18 #ifndef __deal2__constraint_matrix_templates_h
19 #define __deal2__constraint_matrix_templates_h
22 #include <deal.II/lac/constraint_matrix.h>
24 #include <deal.II/base/table.h>
25 #include <deal.II/lac/full_matrix.h>
26 #include <deal.II/lac/sparsity_pattern.h>
27 #include <deal.II/lac/sparse_matrix.h>
28 #include <deal.II/lac/block_sparsity_pattern.h>
29 #include <deal.II/lac/block_sparse_matrix.h>
30 #include <deal.II/lac/parallel_vector.h>
31 #include <deal.II/lac/parallel_block_vector.h>
32 #include <deal.II/lac/petsc_parallel_vector.h>
33 #include <deal.II/lac/petsc_vector.h>
34 #include <deal.II/lac/trilinos_vector.h>
38 DEAL_II_NAMESPACE_OPEN
41 template<
typename number>
49 condense (uncondensed, dummy, condensed, dummy);
54 template<
typename number>
64 template <
typename number>
74 template<
class VectorType>
86 std::vector<int> new_line;
88 new_line.reserve (uncondensed.size());
90 std::vector<ConstraintLine>::const_iterator next_constraint =
lines.begin();
94 if (next_constraint ==
lines.end())
97 new_line.push_back (row);
100 if (row == next_constraint->line)
103 new_line.push_back (-1);
107 if (next_constraint ==
lines.end())
112 new_line.push_back (i-shift);
117 new_line.push_back (row-shift);
120 next_constraint =
lines.begin();
127 for (
size_type row=0; row<uncondensed.size(); ++row)
128 if (new_line[row] != -1)
131 condensed(new_line[row]) += uncondensed(row);
136 for (
size_type q=0; q!=next_constraint->entries.size(); ++q)
137 condensed(new_line[next_constraint->entries[q].first])
139 uncondensed(row) * next_constraint->entries[q].second;
147 template <
class VectorType>
158 for (std::vector<ConstraintLine>::const_iterator
159 constraint_line =
lines.begin();
160 constraint_line!=
lines.end(); ++constraint_line)
165 Assert (constraint_line->inhomogeneity == 0.,
166 ExcMessage (
"Inhomogeneous constraint cannot be condensed "
167 "without any matrix specified."));
169 const typename VectorType::value_type old_value = vec(constraint_line->line);
170 for (
size_type q=0; q!=constraint_line->entries.size(); ++q)
171 if (vec.in_local_range(constraint_line->entries[q].first) ==
true)
172 vec(constraint_line->entries[q].first)
173 += (
static_cast<typename VectorType::value_type
>
175 constraint_line->entries[q].second);
178 vec.compress(VectorOperation::add);
180 for (std::vector<ConstraintLine>::const_iterator
181 constraint_line =
lines.begin();
182 constraint_line!=
lines.end(); ++constraint_line)
183 if (vec.in_local_range(constraint_line->line) ==
true)
184 vec(constraint_line->line) = 0.;
186 vec.compress(VectorOperation::insert);
191 template<
typename number,
class VectorType>
201 const bool use_vectors = (uncondensed_vector.size() == 0 &&
202 condensed_vector.size() == 0) ?
false :
true;
211 Assert (condensed.
n() == condensed.
m(),
214 if (use_vectors ==
true)
217 uncondensed_vector.size());
225 std::vector<int> new_line;
227 new_line.reserve (uncondensed_struct.
n_rows());
229 std::vector<ConstraintLine>::const_iterator next_constraint =
lines.begin();
233 if (next_constraint ==
lines.end())
235 for (
size_type row=0; row!=n_rows; ++row)
236 new_line.push_back (row);
238 for (
size_type row=0; row!=n_rows; ++row)
239 if (row == next_constraint->line)
242 new_line.push_back (-1);
246 if (next_constraint ==
lines.end())
251 new_line.push_back (i-shift);
256 new_line.push_back (row-shift);
259 next_constraint =
lines.begin();
268 if (new_line[row] != -1)
275 p = uncondensed.
begin(row);
276 p != uncondensed.
end(row); ++p)
277 if (new_line[p->column()] != -1)
278 condensed.
add (new_line[row],
279 new_line[p->column()],
285 std::vector<ConstraintLine>::const_iterator c =
lines.begin();
286 while (c->line != p->column())
289 for (
size_type q=0; q!=c->entries.size(); ++q)
292 condensed.
add (new_line[row], new_line[c->entries[q].first],
293 p->value() * c->entries[q].second);
301 if (use_vectors ==
true)
302 condensed_vector(new_line[row]) -= p->value() *
306 if (use_vectors ==
true)
307 condensed_vector(new_line[row]) += uncondensed_vector(row);
313 p = uncondensed.
begin(row);
314 p != uncondensed.
end(row); ++p)
316 if (new_line[p->column()] != -1)
318 for (
size_type q=0; q!=next_constraint->entries.size(); ++q)
319 condensed.
add (new_line[next_constraint->entries[q].first],
320 new_line[p->column()],
322 next_constraint->entries[q].second);
330 std::vector<ConstraintLine>::const_iterator c =
lines.begin();
331 while (c->line != p->column())
334 for (
size_type r=0; r!=c->entries.size(); ++r)
335 for (
size_type q=0; q!=next_constraint->entries.size(); ++q)
336 condensed.
add (new_line[next_constraint->entries[q].first],
337 new_line[c->entries[r].first],
339 next_constraint->entries[r].second *
340 c->entries[r].second);
342 if (use_vectors ==
true)
343 for (
size_type q=0; q!=next_constraint->entries.size(); ++q)
344 condensed_vector (new_line[next_constraint->entries[q].first])
346 next_constraint->entries[q].second *
351 if (use_vectors ==
true)
352 for (
size_type q=0; q!=next_constraint->entries.size(); ++q)
353 condensed_vector(new_line[next_constraint->entries[q].first])
355 uncondensed_vector(row) * next_constraint->entries[q].second;
363 template<
typename number,
class VectorType>
371 const bool use_vectors = vec.size() == 0 ?
false :
true;
379 if (use_vectors ==
true)
382 double average_diagonal = 0;
384 average_diagonal += std::fabs (uncondensed.
diag_element(i));
385 average_diagonal /= uncondensed.
m();
406 entry = uncondensed.
begin(row);
407 entry != uncondensed.
end(row); ++entry)
409 const size_type column = entry->column();
417 ExcMatrixNotClosed());
429 uncondensed.
add (row,
439 if (use_vectors ==
true)
452 entry = uncondensed.
begin(row);
453 entry != uncondensed.
end(row); ++entry)
455 const size_type column = entry->column();
463 ExcMatrixNotClosed());
501 if (use_vectors ==
true)
509 entry->value() = (row == column ? average_diagonal : 0. );
514 if (use_vectors ==
true)
528 template <
typename number,
class BlockVectorType>
531 BlockVectorType &vec)
const
536 const bool use_vectors = vec.n_blocks() == 0 ?
false :
true;
554 if (use_vectors ==
true)
560 double average_diagonal = 0;
563 average_diagonal += std::fabs (uncondensed.
block(b,b).diag_element(i));
564 average_diagonal /= uncondensed.
m();
587 const std::pair<size_type,size_type>
589 const size_type block_row = block_index.first;
604 for (
size_type block_col=0; block_col<blocks; ++block_col)
607 entry = uncondensed.
block(block_row, block_col).begin(block_index.second);
608 entry != uncondensed.
block(block_row, block_col).end(block_index.second);
621 const double old_value = entry->value ();
625 uncondensed.
add (row,
635 if (use_vectors ==
true)
636 vec(row) -= entry->value() *
651 for (
size_type block_col=0; block_col<blocks; ++block_col)
654 entry = uncondensed.
block(block_row, block_col).begin(block_index.second);
655 entry != uncondensed.
block(block_row, block_col).end(block_index.second);
673 const double old_value = entry->value();
693 const double old_value = entry->value ();
704 if (use_vectors ==
true)
710 entry->value() = (row == global_col ? average_diagonal : 0. );
716 if (use_vectors ==
true)
744 void set_zero_parallel(
const std::vector<size_type> &cm, VEC &vec, size_type shift = 0)
747 IndexSet locally_owned = vec.locally_owned_elements();
748 for (
typename std::vector<size_type>::const_iterator it = cm.begin();
749 it != cm.end(); ++it)
758 size_type idx = *it - shift;
759 if (idx<vec.size() && locally_owned.is_element(idx))
764 template<
typename Number>
767 for (
typename std::vector<size_type>::const_iterator it = cm.begin();
768 it != cm.end(); ++it)
777 size_type idx = *it - shift;
787 set_zero_parallel(cm, vec, 0);
794 size_type start_shift = 0;
795 for (size_type j=0; j<vec.n_blocks(); ++j)
797 set_zero_parallel(cm, vec.block(j), start_shift);
798 start_shift += vec.block(j).size();
803 void set_zero_serial(
const std::vector<size_type> &cm, VEC &vec)
805 for (
typename std::vector<size_type>::const_iterator it = cm.begin();
806 it != cm.end(); ++it)
811 void set_zero_all(
const std::vector<size_type> &cm, VEC &vec)
814 vec.compress(VectorOperation::insert);
819 void set_zero_all(
const std::vector<size_type> &cm, ::
Vector<T> &vec)
821 set_zero_serial(cm, vec);
825 void set_zero_all(
const std::vector<size_type> &cm, ::
BlockVector<T> &vec)
827 set_zero_serial(cm, vec);
834 template <
class VectorType>
840 std::vector<size_type> constrained_lines(
lines.size());
841 for (
unsigned int i=0; i<
lines.size(); ++i)
842 constrained_lines[i] =
lines[i].line;
843 internal::ConstraintMatrix::set_zero_all(constrained_lines, vec);
849 template <
typename VectorType>
853 const std::vector<size_type> &local_dof_indices,
862 const size_type n_local_dofs = local_vector.
size();
864 global_vector.add(local_dof_indices, local_vector);
866 for (size_type i=0; i<n_local_dofs; ++i)
874 global_vector(local_dof_indices[i]) += local_vector(i);
890 for (size_type j=0; j<n_local_dofs; ++j)
892 global_vector(local_dof_indices[j]) -= val * local_matrix(j,i);
895 const double matrix_entry = local_matrix(j,i);
896 if (matrix_entry == 0)
901 for (size_type q=0; q<position_j.
entries.size(); ++q)
906 ExcMessage (
"Tried to distribute to a fixed dof."));
907 global_vector(position_j.
entries[q].first)
908 -= val * position_j.
entries[q].second * matrix_entry;
915 for (size_type j=0; j<position->
entries.size(); ++j)
920 ExcMessage (
"Tried to distribute to a fixed dof."));
921 global_vector(position->
entries[j].first)
922 += local_vector(i) * position->
entries[j].second;
929 template<
class VectorType>
941 std::vector<int> old_line;
943 old_line.reserve (uncondensed.size());
945 std::vector<ConstraintLine>::const_iterator next_constraint =
lines.begin();
947 size_type n_rows = uncondensed.size();
949 if (next_constraint ==
lines.end())
951 for (size_type row=0; row!=n_rows; ++row)
952 old_line.push_back (row);
954 for (size_type row=0; row!=n_rows; ++row)
955 if (row == next_constraint->line)
958 old_line.push_back (-1);
962 if (next_constraint ==
lines.end())
966 for (size_type i=row+1; i<n_rows; ++i)
967 old_line.push_back (i-shift);
972 old_line.push_back (row-shift);
975 next_constraint =
lines.begin();
982 for (size_type line=0; line<uncondensed.size(); ++line)
983 if (old_line[line] != -1)
985 uncondensed(line) = condensed(old_line[line]);
991 uncondensed(line) = next_constraint->inhomogeneity;
994 for (size_type i=0; i<next_constraint->entries.size(); ++i)
995 uncondensed(line) += (condensed(old_line[next_constraint->entries[i].first]) *
996 next_constraint->entries[i].second);
1011 #ifdef DEAL_II_WITH_TRILINOS
1021 #ifdef DEAL_II_WITH_MPI
1022 const Epetra_MpiComm *mpi_comm
1023 =
dynamic_cast<const Epetra_MpiComm *
>(&vec.
trilinos_vector().Comm());
1026 output.
reinit (needed_elements, mpi_comm->GetMpiComm());
1028 output.
reinit (needed_elements, MPI_COMM_WORLD);
1034 #ifdef DEAL_II_WITH_PETSC
1037 const IndexSet &locally_owned_elements,
1047 template <
typename number>
1050 const IndexSet &locally_owned_elements,
1066 template <
typename Vector>
1068 import_vector_with_ghost_elements (
const Vector &,
1079 template <
class VectorType>
1081 import_vector_with_ghost_elements (
const VectorType &vec,
1082 const IndexSet &locally_owned_elements,
1087 output.reinit (vec.n_blocks());
1090 for (
unsigned int b=0; b<vec.n_blocks(); ++b)
1092 import_vector_with_ghost_elements (vec.block(b),
1093 locally_owned_elements.
get_view (block_start, block_start+vec.block(b).size()),
1094 needed_elements.
get_view (block_start, block_start+vec.block(b).size()),
1097 block_start += vec.block(b).size();
1100 output.collect_sizes ();
1106 template <
class VectorType>
1122 const IndexSet vec_owned_elements = vec.locally_owned_elements();
1123 if (vec.supports_distributed_data ==
true)
1138 IndexSet needed_elements = vec_owned_elements;
1140 typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
1141 for (constraint_iterator it =
lines.begin();
1142 it !=
lines.end(); ++it)
1144 for (
unsigned int i=0; i<it->entries.size(); ++i)
1145 if (!vec_owned_elements.
is_element(it->entries[i].first))
1146 needed_elements.
add_index(it->entries[i].first);
1149 internal::import_vector_with_ghost_elements (vec,
1150 vec_owned_elements, needed_elements,
1154 for (constraint_iterator it =
lines.begin();
1155 it !=
lines.end(); ++it)
1158 typename VectorType::value_type
1159 new_value = it->inhomogeneity;
1160 for (
unsigned int i=0; i<it->entries.size(); ++i)
1161 new_value += (static_cast<typename VectorType::value_type>
1162 (ghosted_vector(it->entries[i].first)) *
1163 it->entries[i].second);
1165 vec(it->line) = new_value;
1173 vec.compress (VectorOperation::insert);
1180 std::vector<ConstraintLine>::const_iterator next_constraint =
lines.begin();
1181 for (; next_constraint !=
lines.end(); ++next_constraint)
1186 typename VectorType::value_type
1187 new_value = next_constraint->inhomogeneity;
1188 for (
unsigned int i=0; i<next_constraint->entries.size(); ++i)
1189 new_value += (static_cast<typename VectorType::value_type>
1190 (vec(next_constraint->entries[i].first)) *
1191 next_constraint->entries[i].second);
1193 vec(next_constraint->line) = new_value;
1228 return global_row<in.global_row;
1231 size_type global_row;
1232 size_type local_row;
1233 mutable size_type constraint_position;
1237 Distributing::Distributing (
const size_type global_row,
1238 const size_type local_row) :
1239 global_row (global_row),
1240 local_row (local_row),
1241 constraint_position (numbers::invalid_size_type) {}
1244 Distributing::Distributing (
const Distributing &in)
1246 constraint_position (numbers::invalid_size_type)
1252 Distributing &Distributing::operator = (
const Distributing &in)
1254 global_row = in.global_row;
1255 local_row = in.local_row;
1262 constraint_position = in.constraint_position;
1288 individual_size.resize(0);
1292 size_type insert_new_index (
const std::pair<size_type,double> &pair)
1295 const unsigned int index = individual_size.size();
1296 individual_size.push_back(1);
1297 data.resize(individual_size.size()*row_length);
1298 data[index*row_length] = pair;
1299 individual_size[index] = 1;
1303 void append_index (
const size_type index,
1304 const std::pair<size_type,double> &pair)
1307 const size_type my_length = individual_size[index];
1308 if (my_length == row_length)
1315 data.resize(2*data.size());
1316 for (size_type i=individual_size.size()-1; i>0; --i)
1317 std::memmove(&data[i*row_length*2], &data[i*row_length],
1319 sizeof(std::pair<size_type,double>));
1322 data[index*row_length+my_length] = pair;
1323 individual_size[index] = my_length + 1;
1327 get_size (
const size_type index)
const
1329 return individual_size[index];
1332 const std::pair<size_type,double> *
1333 get_entry (
const size_type index)
const
1335 return &data[index*row_length];
1338 size_type row_length;
1340 std::vector<std::pair<size_type,double> > data;
1342 std::vector<size_type> individual_size;
1378 n_inhomogeneous_rows (0)
1381 void reinit (
const size_type n_local_rows)
1383 total_row_indices.resize(n_local_rows);
1384 for (
unsigned int i=0; i<n_local_rows; ++i)
1386 n_active_rows = n_local_rows;
1387 n_inhomogeneous_rows = 0;
1388 data_cache.reinit();
1392 void insert_index (
const size_type global_row,
1393 const size_type local_row,
1394 const double constraint_value);
1398 void print(std::ostream &os)
1400 os <<
"Active rows " << n_active_rows << std::endl
1401 <<
"Constr rows " << n_constraints() << std::endl
1402 <<
"Inhom rows " << n_inhomogeneous_rows << std::endl
1404 for (size_type i=0 ; i<total_row_indices.size() ; ++i)
1405 os <<
' ' << std::setw(4) << total_row_indices[i].local_row;
1408 for (size_type i=0 ; i<total_row_indices.size() ; ++i)
1409 os <<
' ' << std::setw(4) << total_row_indices[i].global_row;
1412 for (size_type i=0 ; i<total_row_indices.size() ; ++i)
1413 os <<
' ' << std::setw(4) << total_row_indices[i].constraint_position;
1421 size_type size ()
const
1423 return n_active_rows;
1428 size_type size (
const size_type counter_index)
const
1430 return (total_row_indices[counter_index].constraint_position ==
1433 data_cache.get_size(total_row_indices[counter_index].
1434 constraint_position));
1438 size_type global_row (
const size_type counter_index)
const
1440 return total_row_indices[counter_index].global_row;
1444 size_type &global_row (
const size_type counter_index)
1446 return total_row_indices[counter_index].global_row;
1452 size_type local_row (
const size_type counter_index)
const
1454 return total_row_indices[counter_index].local_row;
1458 size_type &local_row (
const size_type counter_index)
1460 return total_row_indices[counter_index].local_row;
1466 size_type local_row (
const size_type counter_index,
1467 const size_type index_in_constraint)
const
1469 return (data_cache.get_entry(total_row_indices[counter_index].constraint_position)
1470 [index_in_constraint]).first;
1475 double constraint_value (
const size_type counter_index,
1476 const size_type index_in_constraint)
const
1478 return (data_cache.get_entry(total_row_indices[counter_index].constraint_position)
1479 [index_in_constraint]).second;
1484 bool have_indirect_rows ()
const
1486 return data_cache.individual_size.empty() ==
false;
1491 void insert_constraint (
const size_type constrained_local_dof)
1494 total_row_indices[n_active_rows].local_row = constrained_local_dof;
1501 size_type n_constraints ()
const
1503 return total_row_indices.size()-n_active_rows;
1508 size_type n_inhomogeneities ()
const
1510 return n_inhomogeneous_rows;
1517 void set_ith_constraint_inhomogeneous (
const size_type i)
1520 std::swap (total_row_indices[n_active_rows+i],
1521 total_row_indices[n_active_rows+n_inhomogeneous_rows]);
1522 n_inhomogeneous_rows++;
1527 size_type constraint_origin (size_type i)
const
1529 return total_row_indices[n_active_rows+i].local_row;
1535 std::vector<Distributing> total_row_indices;
1542 size_type n_active_rows;
1545 size_type n_inhomogeneous_rows;
1555 GlobalRowsFromLocal::insert_index (
const size_type global_row,
1556 const size_type local_row,
1557 const double constraint_value)
1559 typedef std::vector<Distributing>::iterator index_iterator;
1560 index_iterator pos, pos1;
1562 std::pair<size_type,double> constraint (local_row, constraint_value);
1565 for (size_type i=1; i<n_active_rows; ++i)
1569 total_row_indices.begin()+n_active_rows,
1571 if (pos->global_row == global_row)
1575 pos1 = total_row_indices.insert(pos, row_value);
1580 pos1->constraint_position = data_cache.insert_new_index (constraint);
1582 data_cache.append_index (pos1->constraint_position, constraint);
1593 GlobalRowsFromLocal::sort ()
1595 size_type i, j, j2, temp, templ, istep;
1599 const size_type length = size();
1603 for (size_type i=0; i<length; ++i)
1604 Assert (total_row_indices[i].constraint_position ==
1611 for (i=step; i < length; i++)
1616 temp = total_row_indices[i].global_row;
1617 templ = total_row_indices[i].local_row;
1618 if (total_row_indices[j2].global_row > temp)
1620 while ((j >= istep) && (total_row_indices[j2].global_row > temp))
1622 total_row_indices[j].global_row = total_row_indices[j2].global_row;
1623 total_row_indices[j].local_row = total_row_indices[j2].local_row;
1627 total_row_indices[j].global_row = temp;
1628 total_row_indices[j].local_row = templ;
1654 template <
typename Number>
1729 ExcMessage(
"Access to thread-local scratch data tried, but it is already "
1731 my_scratch_data->
in_use =
true;
1739 my_scratch_data->
in_use =
false;
1747 return *my_scratch_data;
1755 return my_scratch_data;
1776 template <
class BlockType>
1779 make_block_starts (
const BlockType &block_object,
1781 std::vector<size_type> &block_starts)
1785 typedef std::vector<Distributing>::iterator row_iterator;
1786 row_iterator block_indices = global_rows.total_row_indices.begin();
1788 const size_type num_blocks = block_object.n_block_rows();
1789 const size_type n_active_rows = global_rows.size();
1792 block_starts[0] = 0;
1793 for (size_type i=1; i<num_blocks; ++i)
1795 row_iterator first_block =
1797 global_rows.total_row_indices.begin()+n_active_rows,
1798 Distributing(block_object.get_row_indices().block_start(i)));
1799 block_starts[i] = first_block - global_rows.total_row_indices.begin();
1800 block_indices = first_block;
1802 block_starts[num_blocks] = n_active_rows;
1805 for (size_type i=block_starts[1]; i<n_active_rows; ++i)
1806 global_rows.global_row(i) = block_object.get_row_indices().
1807 global_to_local(global_rows.global_row(i)).second;
1814 template <
class BlockType>
1817 make_block_starts (
const BlockType &block_object,
1818 std::vector<size_type> &row_indices,
1819 std::vector<size_type> &block_starts)
1823 typedef std::vector<size_type>::iterator row_iterator;
1824 row_iterator col_indices = row_indices.begin();
1826 const size_type num_blocks = block_object.n_block_rows();
1829 block_starts[0] = 0;
1830 for (size_type i=1; i<num_blocks; ++i)
1832 row_iterator first_block =
1835 block_object.get_row_indices().block_start(i));
1836 block_starts[i] = first_block - row_indices.begin();
1837 col_indices = first_block;
1839 block_starts[num_blocks] = row_indices.size();
1842 for (size_type i=block_starts[1]; i<row_indices.size(); ++i)
1843 row_indices[i] = block_object.get_row_indices().
1844 global_to_local(row_indices[i]).second;
1853 double resolve_matrix_entry (
const GlobalRowsFromLocal &global_rows,
1854 const GlobalRowsFromLocal &global_cols,
1857 const size_type loc_row,
1860 const size_type loc_col = global_cols.local_row(j);
1868 local_matrix(loc_row, loc_col) : 0);
1871 for (size_type p=0; p<global_cols.size(j); ++p)
1872 col_val += (local_matrix(loc_row, global_cols.local_row(j,p)) *
1873 global_cols.constraint_value(j,p));
1882 for (size_type q=0; q<global_rows.size(i); ++q)
1885 ? local_matrix(global_rows.local_row(i,q), loc_col) : 0;
1887 for (size_type p=0; p<global_cols.size(j); ++p)
1888 add_this += (local_matrix(global_rows.local_row(i,q),
1889 global_cols.local_row(j,p))
1891 global_cols.constraint_value(j,p));
1892 col_val += add_this * global_rows.constraint_value(i,q);
1902 template <
typename number>
1905 resolve_matrix_row (
const GlobalRowsFromLocal &global_rows,
1906 const GlobalRowsFromLocal &global_cols,
1908 const size_type column_start,
1909 const size_type column_end,
1911 size_type *&col_ptr,
1914 if (column_end == column_start)
1918 const size_type loc_row = global_rows.local_row(i);
1923 if (global_rows.have_indirect_rows() ==
false &&
1924 global_cols.have_indirect_rows() ==
false)
1927 const double *matrix_ptr = &local_matrix(loc_row, 0);
1929 for (size_type j=column_start; j<column_end; ++j)
1931 const size_type loc_col = global_cols.local_row(j);
1933 const double col_val = matrix_ptr[loc_col];
1936 *val_ptr++ =
static_cast<number
> (col_val);
1937 *col_ptr++ = global_cols.global_row(j);
1946 for (size_type j=column_start; j<column_end; ++j)
1948 double col_val = resolve_matrix_entry (global_rows, global_cols, i, j,
1949 loc_row, local_matrix);
1955 *val_ptr++ =
static_cast<number
> (col_val);
1956 *col_ptr++ = global_cols.global_row(j);
1966 namespace dealiiSparseMatrix
1968 template <
typename SparseMatrixIterator>
1970 void add_value (
const double value,
1971 const size_type row,
1972 const size_type column,
1973 SparseMatrixIterator &matrix_values)
1977 while (matrix_values->column() < column)
1979 Assert (matrix_values->column() == column,
1981 matrix_values->value() += value;
1990 template <
typename number>
1993 resolve_matrix_row (
const GlobalRowsFromLocal &global_rows,
1995 const size_type column_start,
1996 const size_type column_end,
2000 if (column_end == column_start)
2006 if (sparsity.n_nonzero_elements() == 0)
2009 const size_type row = global_rows.global_row(i);
2010 const size_type loc_row = global_rows.local_row(i);
2013 matrix_values = sparse_matrix->
begin(row);
2014 const bool optimize_diagonal = sparsity.n_rows() == sparsity.n_cols();
2020 if (!optimize_diagonal)
2022 if (global_rows.have_indirect_rows() ==
false)
2025 const double *matrix_ptr = &local_matrix(loc_row, 0);
2027 for (size_type j=column_start; j<column_end; ++j)
2029 const size_type loc_col = global_rows.local_row(j);
2030 const double col_val = matrix_ptr[loc_col];
2031 dealiiSparseMatrix::add_value (col_val, row,
2032 global_rows.global_row(j),
2038 for (size_type j=column_start; j<column_end; ++j)
2040 double col_val = resolve_matrix_entry (global_rows, global_rows, i, j,
2041 loc_row, local_matrix);
2042 dealiiSparseMatrix::add_value (col_val, row,
2043 global_rows.global_row(j),
2048 else if (i>=column_start && i<column_end)
2051 if (global_rows.have_indirect_rows() ==
false)
2054 const double *matrix_ptr = &local_matrix(loc_row, 0);
2056 sparse_matrix->
begin(row)->value() += matrix_ptr[loc_row];
2057 for (size_type j=column_start; j<i; ++j)
2059 const size_type loc_col = global_rows.local_row(j);
2060 const double col_val = matrix_ptr[loc_col];
2061 dealiiSparseMatrix::add_value(col_val, row,
2062 global_rows.global_row(j),
2065 for (size_type j=i+1; j<column_end; ++j)
2067 const size_type loc_col = global_rows.local_row(j);
2068 const double col_val = matrix_ptr[loc_col];
2069 dealiiSparseMatrix::add_value(col_val, row,
2070 global_rows.global_row(j),
2076 sparse_matrix->
begin(row)->value() +=
2077 resolve_matrix_entry (global_rows, global_rows, i, i,
2078 loc_row, local_matrix);
2079 for (size_type j=column_start; j<i; ++j)
2081 double col_val = resolve_matrix_entry (global_rows, global_rows, i, j,
2082 loc_row, local_matrix);
2083 dealiiSparseMatrix::add_value (col_val, row,
2084 global_rows.global_row(j),
2087 for (size_type j=i+1; j<column_end; ++j)
2089 double col_val = resolve_matrix_entry (global_rows, global_rows, i, j,
2090 loc_row, local_matrix);
2091 dealiiSparseMatrix::add_value (col_val, row,
2092 global_rows.global_row(j),
2098 else if (global_rows.have_indirect_rows() ==
false)
2102 const double *matrix_ptr = &local_matrix(loc_row, 0);
2104 for (size_type j=column_start; j<column_end; ++j)
2106 const size_type loc_col = global_rows.local_row(j);
2107 const double col_val = matrix_ptr[loc_col];
2108 if (row==global_rows.global_row(j))
2109 sparse_matrix->
begin(row)->value() += col_val;
2111 dealiiSparseMatrix::add_value(col_val, row,
2112 global_rows.global_row(j),
2119 for (size_type j=column_start; j<column_end; ++j)
2121 double col_val = resolve_matrix_entry (global_rows, global_rows, i,
2122 j, loc_row, local_matrix);
2123 if (row==global_rows.global_row(j))
2124 sparse_matrix->
begin(row)->value() += col_val;
2126 dealiiSparseMatrix::add_value (col_val, row,
2127 global_rows.global_row(j),
2139 resolve_matrix_row (
const GlobalRowsFromLocal &global_rows,
2141 const size_type column_start,
2142 const size_type column_end,
2144 std::vector<size_type>::iterator &col_ptr)
2146 if (column_end == column_start)
2149 const size_type loc_row = global_rows.local_row(i);
2153 if (global_rows.have_indirect_rows() ==
false)
2155 Assert(loc_row < dof_mask.n_rows(),
2158 for (size_type j=column_start; j<column_end; ++j)
2160 const size_type loc_col = global_rows.local_row(j);
2163 if (dof_mask(loc_row,loc_col) ==
true)
2164 *col_ptr++ = global_rows.global_row(j);
2172 for (size_type j=column_start; j<column_end; ++j)
2174 const size_type loc_col = global_rows.local_row(j);
2181 if (dof_mask(loc_row,loc_col) ==
true)
2182 goto add_this_index;
2185 for (size_type p=0; p<global_rows.size(j); ++p)
2186 if (dof_mask(loc_row,global_rows.local_row(j,p)) ==
true)
2187 goto add_this_index;
2190 for (size_type q=0; q<global_rows.size(i); ++q)
2195 if (dof_mask(global_rows.local_row(i,q),loc_col) ==
true)
2196 goto add_this_index;
2199 for (size_type p=0; p<global_rows.size(j); ++p)
2200 if (dof_mask(global_rows.local_row(i,q),
2201 global_rows.local_row(j,p)) ==
true)
2202 goto add_this_index;
2209 *col_ptr++ = global_rows.global_row(j);
2229 template <
typename MatrixType,
typename VectorType>
2232 const std::vector<size_type> &local_dof_indices,
2235 MatrixType &global_matrix,
2237 bool use_inhomogeneities_for_rhs)
2239 if (global_rows.n_constraints() > 0)
2241 double average_diagonal = 0;
2242 for (size_type i=0; i<local_matrix.
m(); ++i)
2243 average_diagonal += std::fabs (local_matrix(i,i));
2244 average_diagonal /=
static_cast<double>(local_matrix.
m());
2246 for (size_type i=0; i<global_rows.n_constraints(); i++)
2248 const size_type local_row = global_rows.constraint_origin(i);
2249 const size_type global_row = local_dof_indices[local_row];
2250 const typename MatrixType::value_type new_diagonal
2251 = (std::fabs(local_matrix(local_row,local_row)) != 0 ?
2252 std::fabs(local_matrix(local_row,local_row)) : average_diagonal);
2253 global_matrix.add(global_row, global_row, new_diagonal);
2259 if (use_inhomogeneities_for_rhs ==
true)
2260 global_vector(global_row) += constraints.
get_inhomogeneity(global_row) * new_diagonal;
2272 template <
typename SparsityType>
2275 const std::vector<size_type> &local_dof_indices,
2277 const bool keep_constrained_entries,
2278 SparsityType &sparsity_pattern)
2283 if (global_rows.n_constraints() > 0)
2285 for (size_type i=0; i<global_rows.n_constraints(); i++)
2287 const size_type local_row = global_rows.constraint_origin(i);
2288 const size_type global_row = local_dof_indices[local_row];
2289 if (keep_constrained_entries ==
true)
2291 for (size_type j=0; j<local_dof_indices.size(); ++j)
2293 if (dof_mask(local_row,j) ==
true)
2294 sparsity_pattern.add(global_row,
2295 local_dof_indices[j]);
2296 if (dof_mask(j,local_row) ==
true)
2297 sparsity_pattern.add(local_dof_indices[j],
2303 sparsity_pattern.add(global_row,global_row);
2323 const size_type n_local_dofs = local_dof_indices.size();
2343 size_type added_rows = 0;
2347 for (size_type i = 0; i<n_local_dofs; ++i)
2351 global_rows.global_row(added_rows) = local_dof_indices[i];
2352 global_rows.local_row(added_rows++) = i;
2355 global_rows.insert_constraint(i);
2359 const size_type n_constrained_rows = n_local_dofs-added_rows;
2360 for (size_type i=0; i<n_constrained_rows; ++i)
2362 const size_type local_row = global_rows.constraint_origin(i);
2364 const size_type global_row = local_dof_indices[local_row];
2369 global_rows.set_ith_constraint_inhomogeneous (i);
2370 for (size_type q=0; q<position.
entries.size(); ++q)
2371 global_rows.insert_index (position.
entries[q].first,
2386 std::vector<size_type> &active_dofs)
const
2388 const size_type n_local_dofs = local_dof_indices.size();
2389 size_type added_rows = 0;
2390 for (size_type i = 0; i<n_local_dofs; ++i)
2394 active_dofs[added_rows++] = local_dof_indices[i];
2398 active_dofs[n_local_dofs-i+added_rows-1] = i;
2400 std::sort (active_dofs.begin(), active_dofs.begin()+added_rows);
2402 const size_type n_constrained_dofs = n_local_dofs-added_rows;
2403 for (size_type i=n_constrained_dofs; i>0; --i)
2405 const size_type local_row = active_dofs.back();
2408 active_dofs.pop_back();
2409 const size_type global_row = local_dof_indices[local_row];
2412 for (size_type q=0; q<position.
entries.size(); ++q)
2414 const size_type new_index = position.
entries[q].first;
2415 if (active_dofs[active_dofs.size()-i] < new_index)
2416 active_dofs.insert(active_dofs.end()-i+1,new_index);
2422 std::vector<size_type>::iterator it =
2424 active_dofs.end()-i+1,
2426 if (*it != new_index)
2427 active_dofs.insert(it, new_index);
2442 const std::vector<size_type> &local_dof_indices,
2445 const size_type loc_row = global_rows.local_row(i);
2446 const size_type n_inhomogeneous_rows = global_rows.n_inhomogeneities();
2453 val = local_vector(loc_row);
2454 for (size_type i=0; i<n_inhomogeneous_rows; ++i)
2456 [global_rows.constraint_origin(i)])]].
2458 local_matrix(loc_row, global_rows.constraint_origin(i)));
2462 for (size_type q=0; q<global_rows.size(i); ++q)
2464 const size_type loc_row_q = global_rows.local_row(i,q);
2465 double add_this = local_vector (loc_row_q);
2466 for (size_type k=0; k<n_inhomogeneous_rows; ++k)
2469 [global_rows.constraint_origin(k)])]].
2471 local_matrix(loc_row_q,global_rows.constraint_origin(k)));
2472 val += add_this * global_rows.constraint_value(i,q);
2480 template <
typename MatrixType,
typename VectorType>
2485 const std::vector<size_type> &local_dof_indices,
2486 MatrixType &global_matrix,
2488 bool use_inhomogeneities_for_rhs,
2493 const bool use_vectors = (local_vector.
size() == 0 &&
2494 global_vector.size() == 0) ?
false :
true;
2495 typedef typename MatrixType::value_type number;
2496 const bool use_dealii_matrix =
2501 Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic());
2502 if (use_vectors ==
true)
2509 const size_type n_local_dofs = local_dof_indices.size();
2515 global_rows.reinit(n_local_dofs);
2518 const size_type n_actual_dofs = global_rows.size();
2526 std::vector<size_type> &cols = scratch_data->
columns;
2527 std::vector<number> &vals = scratch_data->
values;
2530 if (use_dealii_matrix ==
false)
2532 cols.resize (n_actual_dofs);
2533 vals.resize (n_actual_dofs);
2540 for (size_type i=0; i<n_actual_dofs; ++i)
2542 const size_type row = global_rows.global_row(i);
2545 if (use_dealii_matrix ==
false)
2547 size_type *col_ptr = &cols[0];
2550 number *val_ptr = &vals[0];
2551 internals::resolve_matrix_row (global_rows, global_rows, i, 0,
2553 local_matrix, col_ptr, val_ptr);
2554 const size_type n_values = col_ptr - &cols[0];
2556 global_matrix.add(row, n_values, &cols[0], &vals[0],
false,
2560 internals::resolve_matrix_row (global_rows, i, 0, n_actual_dofs,
2561 local_matrix, sparse_matrix);
2568 if (use_vectors ==
true)
2576 global_vector(row) +=
static_cast<typename VectorType::value_type
>(val);
2580 internals::set_matrix_diagonals (global_rows, local_dof_indices,
2581 local_matrix, *
this,
2582 global_matrix, global_vector, use_inhomogeneities_for_rhs);
2587 template <
typename MatrixType>
2591 const std::vector<size_type> &row_indices,
2592 const std::vector<size_type> &col_indices,
2593 MatrixType &global_matrix)
const
2595 typedef double number;
2601 const size_type n_local_row_dofs = row_indices.size();
2602 const size_type n_local_col_dofs = col_indices.size();
2607 global_rows.reinit(n_local_row_dofs);
2609 global_cols.reinit(n_local_col_dofs);
2613 const size_type n_actual_row_dofs = global_rows.size();
2614 const size_type n_actual_col_dofs = global_cols.size();
2618 std::vector<size_type> &cols = scratch_data->
columns;
2619 std::vector<number> &vals = scratch_data->
values;
2620 cols.resize(n_actual_col_dofs);
2621 vals.resize(n_actual_col_dofs);
2624 for (size_type i=0; i<n_actual_row_dofs; ++i)
2626 const size_type row = global_rows.global_row(i);
2629 size_type *col_ptr = &cols[0];
2630 number *val_ptr = &vals[0];
2631 internals::resolve_matrix_row (global_rows, global_cols, i, 0,
2633 local_matrix, col_ptr, val_ptr);
2634 const size_type n_values = col_ptr - &cols[0];
2636 global_matrix.add(row, n_values, &cols[0], &vals[0],
false,
true);
2643 template <
typename MatrixType,
typename VectorType>
2648 const std::vector<size_type> &local_dof_indices,
2649 MatrixType &global_matrix,
2651 bool use_inhomogeneities_for_rhs,
2654 const bool use_vectors = (local_vector.
size() == 0 &&
2655 global_vector.size() == 0) ?
false :
true;
2656 typedef typename MatrixType::value_type number;
2657 const bool use_dealii_matrix =
2662 Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic());
2663 Assert (global_matrix.n_block_rows() == global_matrix.n_block_cols(),
2665 if (use_vectors ==
true)
2675 const size_type n_local_dofs = local_dof_indices.size();
2677 global_rows.reinit(n_local_dofs);
2680 const size_type n_actual_dofs = global_rows.size();
2682 std::vector<size_type> &global_indices = scratch_data->
vector_indices;
2683 if (use_vectors ==
true)
2685 global_indices.resize(n_actual_dofs);
2686 for (size_type i=0; i<n_actual_dofs; ++i)
2687 global_indices[i] = global_rows.global_row(i);
2691 const size_type num_blocks = global_matrix.n_block_rows();
2692 std::vector<size_type> &block_starts = scratch_data->
block_starts;
2693 block_starts.resize(num_blocks+1);
2694 internals::make_block_starts (global_matrix, global_rows, block_starts);
2696 std::vector<size_type> &cols = scratch_data->
columns;
2697 std::vector<number> &vals = scratch_data->
values;
2698 if (use_dealii_matrix ==
false)
2700 cols.resize (n_actual_dofs);
2701 vals.resize (n_actual_dofs);
2707 for (size_type block=0; block<num_blocks; ++block)
2709 const size_type next_block = block_starts[block+1];
2710 for (size_type i=block_starts[block]; i<next_block; ++i)
2712 const size_type row = global_rows.global_row(i);
2714 for (size_type block_col=0; block_col<num_blocks; ++block_col)
2716 const size_type start_block = block_starts[block_col],
2717 end_block = block_starts[block_col+1];
2718 if (use_dealii_matrix ==
false)
2720 size_type *col_ptr = &cols[0];
2721 number *val_ptr = &vals[0];
2722 internals::resolve_matrix_row (global_rows, global_rows, i,
2723 start_block, end_block,
2724 local_matrix, col_ptr, val_ptr);
2725 const size_type n_values = col_ptr - &cols[0];
2727 global_matrix.block(block, block_col).add(row, n_values,
2737 internals::resolve_matrix_row (global_rows, i, start_block,
2738 end_block, local_matrix, sparse_matrix);
2742 if (use_vectors ==
true)
2750 global_vector(global_indices[i]) +=
2751 static_cast<typename VectorType::value_type
>(val);
2756 internals::set_matrix_diagonals (global_rows, local_dof_indices,
2757 local_matrix, *
this,
2758 global_matrix, global_vector, use_inhomogeneities_for_rhs);
2763 template <
typename SparsityType>
2767 SparsityType &sparsity_pattern,
2768 const bool keep_constrained_entries,
2772 Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic());
2774 const size_type n_local_dofs = local_dof_indices.size();
2775 bool dof_mask_is_active =
false;
2776 if (dof_mask.n_rows() == n_local_dofs)
2778 dof_mask_is_active =
true;
2788 if (dof_mask_is_active ==
false)
2790 std::vector<size_type> &actual_dof_indices = scratch_data->
columns;
2791 actual_dof_indices.resize(n_local_dofs);
2793 const size_type n_actual_dofs = actual_dof_indices.size();
2797 for (size_type i=0; i<n_actual_dofs; ++i)
2798 sparsity_pattern.add_entries(actual_dof_indices[i],
2799 actual_dof_indices.begin(),
2800 actual_dof_indices.end(),
2806 for (size_type i=0; i<n_local_dofs; i++)
2809 if (keep_constrained_entries ==
true)
2810 for (size_type j=0; j<n_local_dofs; j++)
2812 sparsity_pattern.add (local_dof_indices[i], local_dof_indices[j]);
2813 sparsity_pattern.add (local_dof_indices[j], local_dof_indices[i]);
2816 sparsity_pattern.add (local_dof_indices[i], local_dof_indices[i]);
2827 global_rows.reinit(n_local_dofs);
2829 const size_type n_actual_dofs = global_rows.size();
2833 std::vector<size_type> &cols = scratch_data->
columns;
2834 cols.resize(n_actual_dofs);
2836 for (size_type i=0; i<n_actual_dofs; ++i)
2838 std::vector<size_type>::iterator col_ptr = cols.begin();
2839 const size_type row = global_rows.global_row(i);
2840 internals::resolve_matrix_row (global_rows, i, 0, n_actual_dofs,
2845 if (col_ptr != cols.begin())
2846 sparsity_pattern.add_entries(row, cols.begin(), col_ptr,
2849 internals::set_sparsity_diagonals (global_rows, local_dof_indices,
2850 dof_mask, keep_constrained_entries,
2857 template <
typename SparsityType>
2861 const std::vector<size_type> &col_indices,
2862 SparsityType &sparsity_pattern,
2863 const bool keep_constrained_entries,
2866 const size_type n_local_rows = row_indices.size();
2867 const size_type n_local_cols = col_indices.size();
2868 bool dof_mask_is_active =
false;
2869 if (dof_mask.n_rows() == n_local_rows && dof_mask.n_cols() == n_local_cols)
2870 dof_mask_is_active =
true;
2876 if (dof_mask_is_active ==
false)
2878 std::vector<size_type> actual_row_indices (n_local_rows);
2879 std::vector<size_type> actual_col_indices (n_local_cols);
2882 const size_type n_actual_rows = actual_row_indices.size();
2886 for (size_type i=0; i<n_actual_rows; ++i)
2887 sparsity_pattern.add_entries(actual_row_indices[i],
2888 actual_col_indices.begin(),
2889 actual_col_indices.end(),
2896 if (keep_constrained_entries ==
true)
2898 for (size_type i=0; i<row_indices.size(); i++)
2900 for (size_type j=0; j<col_indices.size(); j++)
2901 sparsity_pattern.add (row_indices[i], col_indices[j]);
2902 for (size_type i=0; i<col_indices.size(); i++)
2904 for (size_type j=0; j<row_indices.size(); j++)
2905 sparsity_pattern.add (row_indices[j], col_indices[i]);
2916 template <
typename SparsityType>
2920 SparsityType &sparsity_pattern,
2921 const bool keep_constrained_entries,
2927 Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic());
2928 Assert (sparsity_pattern.n_block_rows() == sparsity_pattern.n_block_cols(),
2931 const size_type n_local_dofs = local_dof_indices.size();
2932 const size_type num_blocks = sparsity_pattern.n_block_rows();
2936 bool dof_mask_is_active =
false;
2937 if (dof_mask.n_rows() == n_local_dofs)
2939 dof_mask_is_active =
true;
2943 if (dof_mask_is_active ==
false)
2945 std::vector<size_type> &actual_dof_indices = scratch_data->
columns;
2946 actual_dof_indices.resize(n_local_dofs);
2948 const size_type n_actual_dofs = actual_dof_indices.size();
2951 std::vector<size_type> &block_starts = scratch_data->
block_starts;
2952 block_starts.resize(num_blocks+1);
2953 internals::make_block_starts (sparsity_pattern, actual_dof_indices,
2956 for (size_type block=0; block<num_blocks; ++block)
2958 const size_type next_block = block_starts[block+1];
2959 for (size_type i=block_starts[block]; i<next_block; ++i)
2962 const size_type row = actual_dof_indices[i];
2963 Assert (row < sparsity_pattern.block(block,0).n_rows(),
2965 std::vector<size_type>::iterator index_it = actual_dof_indices.begin();
2966 for (size_type block_col = 0; block_col<num_blocks; ++block_col)
2968 const size_type next_block_col = block_starts[block_col+1];
2969 sparsity_pattern.block(block,block_col).
2972 actual_dof_indices.begin() + next_block_col,
2974 index_it = actual_dof_indices.begin() + next_block_col;
2979 for (size_type i=0; i<n_local_dofs; i++)
2982 if (keep_constrained_entries ==
true)
2983 for (size_type j=0; j<n_local_dofs; j++)
2985 sparsity_pattern.add (local_dof_indices[i], local_dof_indices[j]);
2986 sparsity_pattern.add (local_dof_indices[j], local_dof_indices[i]);
2989 sparsity_pattern.add (local_dof_indices[i], local_dof_indices[i]);
2998 global_rows.reinit(n_local_dofs);
3000 const size_type n_actual_dofs = global_rows.size();
3003 std::vector<size_type> &block_starts = scratch_data->
block_starts;
3004 block_starts.resize(num_blocks+1);
3005 internals::make_block_starts(sparsity_pattern, global_rows, block_starts);
3007 std::vector<size_type> &cols = scratch_data->
columns;
3008 cols.resize(n_actual_dofs);
3012 for (size_type block=0; block<num_blocks; ++block)
3014 const size_type next_block = block_starts[block+1];
3015 for (size_type i=block_starts[block]; i<next_block; ++i)
3017 const size_type row = global_rows.global_row(i);
3018 for (size_type block_col=0; block_col<num_blocks; ++block_col)
3020 const size_type begin_block = block_starts[block_col],
3021 end_block = block_starts[block_col+1];
3022 std::vector<size_type>::iterator col_ptr = cols.begin();
3023 internals::resolve_matrix_row (global_rows, i, begin_block,
3024 end_block, dof_mask, col_ptr);
3026 sparsity_pattern.block(block, block_col).add_entries(row,
3034 internals::set_sparsity_diagonals (global_rows, local_dof_indices,
3035 dof_mask, keep_constrained_entries,
3040 DEAL_II_NAMESPACE_CLOSE
Iterator lower_bound(Iterator first, Iterator last, const T &val)
const types::global_dof_index invalid_size_type
double get_inhomogeneity(const size_type line) const
void add(const size_type i, const size_type j, const value_type value)
std::vector< size_type > lines_cache
#define AssertDimension(dim1, dim2)
A class that provides a separate storage location on each thread that accesses the object...
bool is_constrained(const size_type index) const
unsigned int n_block_rows() const
types::global_dof_index size() const
static Threads::ThreadLocalStorage< ScratchData > scratch_data
::ExceptionBase & ExcMessage(std::string arg1)
void reinit(const size_type size, const bool fast=false)
#define AssertIndexRange(index, range)
Auxiliary class aiding in the handling of block structures like in BlockVector or FESystem...
void add_entries(const size_type line, const std::vector< std::pair< size_type, double > > &col_val_pairs)
void add_index(const types::global_dof_index index)
types::global_dof_index size_type
bool is_compressed() const
ScratchData * operator->()
bool is_finite(const double x)
const_iterator begin() const
void update_ghost_values() const
std::vector< size_type > vector_indices
bool has_ghost_elements() const
void distribute_local_to_global(const InVector &local_vector, const std::vector< size_type > &local_dof_indices, OutVector &global_vector) const
::ExceptionBase & ExcGhostsPresent()
ScratchData(const ScratchData &)
std::vector< size_type > block_starts
number diag_element(const size_type i) const
unsigned int global_dof_index
#define Assert(cond, exc)
std::vector< Number > values
GlobalRowsFromLocal global_columns
void condense(const SparsityPattern &uncondensed, SparsityPattern &condensed) const
const BlockIndices & get_column_indices() const
GlobalRowsFromLocal global_rows
const SparsityPattern & get_sparsity_pattern() const
std::vector< size_type > columns
void add_entries_local_to_global(const std::vector< size_type > &local_dof_indices, SparsityType &sparsity_pattern, const bool keep_constrained_entries=true, const Table< 2, bool > &dof_mask=default_empty_table) const
void reinit(const MPI_Comm &communicator, const size_type N, const size_type local_size, const bool fast=false)
void add(const size_type i, const size_type j, const number value)
std::pair< unsigned int, size_type > global_to_local(const size_type i) const
size_type calculate_line_index(const size_type line) const
const_iterator end() const
const MPI_Comm & get_mpi_communicator() const
bool in_local_range(const size_type global_index) const
IndexSet get_view(const types::global_dof_index begin, const types::global_dof_index end) const
const BlockIndices & get_row_indices() const
::ExceptionBase & ExcNumberNotFinite()
void shift(const size_type offset)
void set_zero(VectorType &vec) const
size_type n_constraints() const
size_type n_block_rows() const
void distribute(const VectorType &condensed, VectorType &uncondensed) const
::ExceptionBase & ExcNotImplemented()
void reinit(const VectorBase &v, const bool fast=false, const bool allow_different_maps=false)
ScratchData & operator*()
const Epetra_MultiVector & trilinos_vector() const
const MPI_Comm & get_mpi_communicator() const
std::vector< ConstraintLine > lines
::ExceptionBase & ExcInternalError()
bool is_compressed() const
size_type n_block_cols() const
size_type local_to_global(const unsigned int block, const size_type index) const
void make_sorted_row_list(const std::vector< size_type > &local_dof_indices, internals::GlobalRowsFromLocal &global_rows) const
const BlockSparsityPattern & get_sparsity_pattern() const
bool is_element(const types::global_dof_index index) const
double resolve_vector_entry(const size_type i, const internals::GlobalRowsFromLocal &global_rows, const Vector< double > &local_vector, const std::vector< size_type > &local_dof_indices, const FullMatrix< double > &local_matrix) const
static const size_type invalid_entry
BlockType & block(const unsigned int row, const unsigned int column)