![]() |
Reference documentation for deal.II version 8.1.0
|
#include <petsc_parallel_vector.h>
Public Types | |
typedef types::global_dof_index | size_type |
![]() | |
typedef PetscScalar | value_type |
typedef PetscReal | real_type |
typedef types::global_dof_index | size_type |
typedef internal::VectorReference | reference |
typedef const internal::VectorReference | const_reference |
Public Member Functions | |
Vector () | |
Vector (const MPI_Comm &communicator, const size_type n, const size_type local_size) | |
template<typename Number > | |
Vector (const MPI_Comm &communicator, const ::Vector< Number > &v, const size_type local_size) | |
Vector (const MPI_Comm &communicator, const VectorBase &v, const size_type local_size) | |
Vector (const MPI_Comm &communicator, const IndexSet &local, const IndexSet &ghost) DEAL_II_DEPRECATED | |
Vector (const IndexSet &local, const IndexSet &ghost, const MPI_Comm &communicator) | |
Vector (const MPI_Comm &communicator, const IndexSet &local) DEAL_II_DEPRECATED | |
Vector (const IndexSet &local, const MPI_Comm &communicator) | |
Vector & | operator= (const Vector &v) |
Vector & | operator= (const PETScWrappers::Vector &v) |
Vector & | operator= (const PetscScalar s) |
template<typename number > | |
Vector & | operator= (const ::Vector< number > &v) |
void | reinit (const MPI_Comm &communicator, const size_type N, const size_type local_size, const bool fast=false) |
void | reinit (const Vector &v, const bool fast=false) |
void | reinit (const MPI_Comm &communicator, const IndexSet &local, const IndexSet &ghost) DEAL_II_DEPRECATED |
void | reinit (const IndexSet &local, const IndexSet &ghost, const MPI_Comm &communicator) |
void | reinit (const MPI_Comm &communicator, const IndexSet &local) DEAL_II_DEPRECATED |
void | reinit (const IndexSet &local, const MPI_Comm &communicator) |
const MPI_Comm & | get_mpi_communicator () const |
void | print (std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const |
![]() | |
VectorBase () | |
VectorBase (const VectorBase &v) | |
VectorBase (const Vec &v) | |
virtual | ~VectorBase () |
void | compress (::VectorOperation::values operation) |
void | compress () DEAL_II_DEPRECATED |
VectorBase & | operator= (const PetscScalar s) |
bool | operator== (const VectorBase &v) const |
bool | operator!= (const VectorBase &v) const |
size_type | size () const |
size_type | local_size () const |
std::pair< size_type, size_type > | local_range () const |
bool | in_local_range (const size_type index) const |
IndexSet | locally_owned_elements () const |
bool | has_ghost_elements () const |
reference | operator() (const size_type index) |
PetscScalar | operator() (const size_type index) const |
reference | operator[] (const size_type index) |
PetscScalar | operator[] (const size_type index) const |
void | set (const std::vector< size_type > &indices, const std::vector< PetscScalar > &values) |
void | extract_subvector_to (const std::vector< size_type > &indices, std::vector< PetscScalar > &values) const |
template<typename ForwardIterator , typename OutputIterator > | |
void | extract_subvector_to (const ForwardIterator indices_begin, const ForwardIterator indices_end, OutputIterator values_begin) const |
void | add (const std::vector< size_type > &indices, const std::vector< PetscScalar > &values) |
void | add (const std::vector< size_type > &indices, const ::Vector< PetscScalar > &values) |
void | add (const size_type n_elements, const size_type *indices, const PetscScalar *values) |
PetscScalar | operator* (const VectorBase &vec) const |
real_type | norm_sqr () const |
PetscScalar | mean_value () const |
real_type | l1_norm () const |
real_type | l2_norm () const |
real_type | lp_norm (const real_type p) const |
real_type | linfty_norm () const |
real_type | normalize () const |
real_type | min () const |
real_type | max () const |
VectorBase & | abs () |
VectorBase & | conjugate () |
VectorBase & | mult () |
VectorBase & | mult (const VectorBase &v) |
VectorBase & | mult (const VectorBase &u, const VectorBase &v) |
bool | all_zero () const |
bool | is_non_negative () const |
VectorBase & | operator*= (const PetscScalar factor) |
VectorBase & | operator/= (const PetscScalar factor) |
VectorBase & | operator+= (const VectorBase &V) |
VectorBase & | operator-= (const VectorBase &V) |
void | add (const PetscScalar s) |
void | add (const VectorBase &V) |
void | add (const PetscScalar a, const VectorBase &V) |
void | add (const PetscScalar a, const VectorBase &V, const PetscScalar b, const VectorBase &W) |
void | sadd (const PetscScalar s, const VectorBase &V) |
void | sadd (const PetscScalar s, const PetscScalar a, const VectorBase &V) |
void | sadd (const PetscScalar s, const PetscScalar a, const VectorBase &V, const PetscScalar b, const VectorBase &W) |
void | sadd (const PetscScalar s, const PetscScalar a, const VectorBase &V, const PetscScalar b, const VectorBase &W, const PetscScalar c, const VectorBase &X) |
void | scale (const VectorBase &scaling_factors) |
void | equ (const PetscScalar a, const VectorBase &V) |
void | equ (const PetscScalar a, const VectorBase &V, const PetscScalar b, const VectorBase &W) |
void | ratio (const VectorBase &a, const VectorBase &b) |
void | update_ghost_values () const DEAL_II_DEPRECATED |
void | write_ascii (const PetscViewerFormat format=PETSC_VIEWER_DEFAULT) |
void | print (std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const |
void | swap (VectorBase &v) |
operator const Vec & () const | |
std::size_t | memory_consumption () const |
![]() | |
Subscriptor () | |
Subscriptor (const Subscriptor &) | |
virtual | ~Subscriptor () |
Subscriptor & | operator= (const Subscriptor &) |
void | subscribe (const char *identifier=0) const |
void | unsubscribe (const char *identifier=0) const |
unsigned int | n_subscriptions () const |
void | list_subscribers () const |
DeclException3 (ExcInUse, int, char *, std::string &,<< "Object of class "<< arg2<< " is still used by "<< arg1<< " other objects.\n"<< "(Additional information: "<< arg3<< ")\n"<< "Note the entry in the Frequently Asked Questions of "<< "deal.II (linked to from http://www.dealii.org/) for "<< "more information on what this error means.") | |
DeclException2 (ExcNoSubscriber, char *, char *,<< "No subscriber with identifier \""<< arg2<< "\" did subscribe to this object of class "<< arg1) | |
template<class Archive > | |
void | serialize (Archive &ar, const unsigned int version) |
Static Public Attributes | |
static const bool | supports_distributed_data = true |
Protected Member Functions | |
virtual void | create_vector (const size_type n, const size_type local_size) |
virtual void | create_vector (const size_type n, const size_type local_size, const IndexSet &ghostnodes) |
![]() | |
void | do_set_add_operation (const size_type n_elements, const size_type *indices, const PetscScalar *values, const bool add_values) |
Private Attributes | |
MPI_Comm | communicator |
Related Functions | |
(Note that these are not member functions.) | |
void | swap (Vector &u, Vector &v) |
![]() | |
void | swap (VectorBase &u, VectorBase &v) |
Additional Inherited Members | |
![]() | |
Vec | vector |
bool | ghosted |
IndexSet | ghost_indices |
mutable::VectorOperation::values | last_action |
bool | attained_ownership |
Implementation of a parallel vector class based on PETSC and using MPI communication to synchronise distributed operations. All the functionality is actually in the base class, except for the calls to generate a parallel vector. This is possible since PETSc only works on an abstract vector type and internally distributes to functions that do the actual work depending on the actual vector type (much like using virtual functions). Only the functions creating a vector of specific type differ, and are implemented in this particular class.
The parallel functionality of PETSc is built on top of the Message Passing Interface (MPI). MPI's communication model is built on collective communications: if one process wants something from another, that other process has to be willing to accept this communication. A process cannot query data from another process by calling a remote function, without that other process expecting such a transaction. The consequence is that most of the operations in the base class of this class have to be called collectively. For example, if you want to compute the l2 norm of a parallel vector, all processes across which this vector is shared have to call the l2_norm
function. If you don't do this, but instead only call the l2_norm
function on one process, then the following happens: This one process will call one of the collective MPI functions and wait for all the other processes to join in on this. Since the other processes don't call this function, you will either get a time-out on the first process, or, worse, by the time the next a callto a PETSc function generates an MPI message on the other processes, you will get a cryptic message that only a subset of processes attempted a communication. These bugs can be very hard to figure out, unless you are well-acquainted with the communication model of MPI, and know which functions may generate MPI messages.
One particular case, where an MPI message may be generated unexpectedly is discussed below.
PETSc does allow read access to individual elements of a vector, but in the distributed case only to elements that are stored locally. We implement this through calls like d=vec(i)
. However, if you access an element outside the locally stored range, an exception is generated.
In contrast to read access, PETSc (and the respective deal.II wrapper classes) allow to write (or add) to individual elements of vectors, even if they are stored on a different process. You can do this writing, for example, vec(i)=d
or vec(i)+=d
, or similar operations. There is one catch, however, that may lead to very confusing error messages: PETSc requires application programs to call the compress() function when they switch from adding, to elements to writing to elements. The reasoning is that all processes might accumulate addition operations to elements, even if multiple processes write to the same elements. By the time we call compress() the next time, all these additions are executed. However, if one process adds to an element, and another overwrites to it, the order of execution would yield non-deterministic behavior if we don't make sure that a synchronisation with compress() happens in between.
In order to make sure these calls to compress() happen at the appropriate time, the deal.II wrappers keep a state variable that store which is the presently allowed operation: additions or writes. If it encounters an operation of the opposite kind, it calls compress() and flips the state. This can sometimes lead to very confusing behavior, in code that may for example look like this:
This code can run into trouble: by the time we see the first addition operation, we need to flush the overwrite buffers for the vector, and the deal.II library will do so by calling compress(). However, it will only do so for all processes that actually do an addition – if the condition is never true for one of the processes, then this one will not get to the actual compress() call, whereas all the other ones do. This gets us into trouble, since all the other processes hang in the call to flush the write buffers, while the one other process advances to the call to compute the l2 norm. At this time, you will get an error that some operation was attempted by only a subset of processes. This behavior may seem surprising, unless you know that write/addition operations on single elements may trigger this behavior.
The problem described here may be avoided by placing additional calls to compress(), or making sure that all processes do the same type of operations at the same time, for example by placing zero additions if necessary.
Definition at line 158 of file petsc_parallel_vector.h.
Declare type for container size.
Definition at line 164 of file petsc_parallel_vector.h.
PETScWrappers::MPI::Vector::Vector | ( | ) |
Default constructor. Initialize the vector as empty.
|
explicit |
Constructor. Set dimension to n
and initialize all elements with zero.
The constructor is made explicit to avoid accidents like this: v=0;
. Presumably, the user wants to set every element of the vector to zero, but instead, what happens is this call: v=Vector<number>(0);
, i.e. the vector is replaced by one of length zero.
|
explicit |
Copy-constructor from deal.II vectors. Sets the dimension to that of the given vector, and copies all elements.
|
explicit |
Copy-constructor the values from a PETSc wrapper vector class.
|
explicit |
Constructs a new parallel ghosted PETSc vector from an Indexset. Note that local
must be contiguous and the global size of the vector is determined by local.size(). The global indices in ghost
are supplied as ghost indices that can also be read locally.
Note that the ghost
IndexSet may be empty and that any indices already contained in local
are ignored during construction. That way, the ghost parameter can equal the set of locally relevant degrees of freedom, see step-32.
PETScWrappers::MPI::Vector::Vector | ( | const IndexSet & | local, |
const IndexSet & | ghost, | ||
const MPI_Comm & | communicator | ||
) |
Constructs a new parallel ghosted PETSc vector from an Indexset. Note that local
must be contiguous and the global size of the vector is determined by local.size(). The global indices in ghost
are supplied as ghost indices that can also be read locally.
Note that the ghost
IndexSet may be empty and that any indices already contained in local
are ignored during construction. That way, the ghost parameter can equal the set of locally relevant degrees of freedom, see step-32.
|
explicit |
Constructs a new parallel PETSc vector from an Indexset. This creates a non ghosted vector.
|
explicit |
Constructs a new parallel PETSc vector from an Indexset. This creates a non ghosted vector.
Copy the given vector. Resize the present vector if necessary. Also take over the MPI communicator of v
.
Vector& PETScWrappers::MPI::Vector::operator= | ( | const PETScWrappers::Vector & | v | ) |
Copy the given sequential (non-distributed) vector into the present parallel vector. It is assumed that they have the same size, and this operation does not change the partitioning of the parallel vector by which its elements are distributed across several MPI processes. What this operation therefore does is to copy that chunk of the given vector v
that corresponds to elements of the target vector that are stored locally, and copies them. Elements that are not stored locally are not touched.
This being a parallel vector, you must make sure that all processes call this function at the same time. It is not possible to change the local part of a parallel vector on only one process, independent of what other processes do, with this function.
Vector& PETScWrappers::MPI::Vector::operator= | ( | const PetscScalar | s | ) |
Set all components of the vector to the given number s
. Simply pass this down to the base class, but we still need to declare this function to make the example given in the discussion about making the constructor explicit work.
Vector& PETScWrappers::MPI::Vector::operator= | ( | const ::Vector< number > & | v | ) |
Copy the values of a deal.II vector (as opposed to those of the PETSc vector wrapper class) into this object.
Contrary to the case of sequential vectors, this operators requires that the present vector already has the correct size, since we need to have a partition and a communicator present which we otherwise can't get from the source vector.
void PETScWrappers::MPI::Vector::reinit | ( | const MPI_Comm & | communicator, |
const size_type | N, | ||
const size_type | local_size, | ||
const bool | fast = false |
||
) |
Change the dimension of the vector to N
. It is unspecified how resizing the vector affects the memory allocation of this object; i.e., it is not guaranteed that resizing it to a smaller size actually also reduces memory consumption, or if for efficiency the same amount of memory is used
local_size
denotes how many of the N
values shall be stored locally on the present process. for less data.
communicator
denotes the MPI communicator henceforth to be used for this vector.
If fast
is false, the vector is filled by zeros. Otherwise, the elements are left an unspecified state.
Change the dimension to that of the vector v
, and also take over the partitioning into local sizes as well as the MPI communicator. The same applies as for the other reinit
function.
The elements of v
are not copied, i.e. this function is the same as calling reinit(v.size(), v.local_size(), fast)
.
void PETScWrappers::MPI::Vector::reinit | ( | const MPI_Comm & | communicator, |
const IndexSet & | local, | ||
const IndexSet & | ghost | ||
) |
Reinit as a ghosted vector. See constructor with same signature for more details.
void PETScWrappers::MPI::Vector::reinit | ( | const IndexSet & | local, |
const IndexSet & | ghost, | ||
const MPI_Comm & | communicator | ||
) |
Reinit as a vector without ghost elements. See constructor with same signature for more detais.
void PETScWrappers::MPI::Vector::reinit | ( | const MPI_Comm & | communicator, |
const IndexSet & | local | ||
) |
Reinit as a vector without ghost elements. See constructor with same signature for more detais.
void PETScWrappers::MPI::Vector::reinit | ( | const IndexSet & | local, |
const MPI_Comm & | communicator | ||
) |
Reinit as a vector without ghost elements. See constructor with same signature for more detais.
|
virtual |
Return a reference to the MPI communicator object in use with this vector.
Reimplemented from PETScWrappers::VectorBase.
void PETScWrappers::MPI::Vector::print | ( | std::ostream & | out, |
const unsigned int | precision = 3 , |
||
const bool | scientific = true , |
||
const bool | across = true |
||
) | const |
Print to a stream. precision
denotes the desired precision with which values shall be printed, scientific
whether scientific notation shall be used. If across
is true
then the vector is printed in a line, while if false
then the elements are printed on a separate line each.
|
protectedvirtual |
Create a vector of length n
. For this class, we create a parallel vector. n
denotes the total size of the vector to be created. local_size
denotes how many of these elements shall be stored locally.
|
protectedvirtual |
Create a vector of global length n
, local size local_size
and with the specified ghost indices. Note that you need to call update_ghost_values() before accessing those.
Global function swap
which overloads the default implementation of the C++ standard library which uses a temporary object. The function simply exchanges the data of the two vectors.
Definition at line 554 of file petsc_parallel_vector.h.
|
static |
A variable that indicates whether this vector supports distributed data storage. If true, then this vector also needs an appropriate compress() function that allows communicating recent set or add operations to individual elements to be communicated to other processors.
For the current class, the variable equals true, since it does support parallel data storage.
Definition at line 177 of file petsc_parallel_vector.h.
|
private |
Copy of the communicator object to be used for this parallel vector.
Definition at line 538 of file petsc_parallel_vector.h.