Reference documentation for deal.II version Git 92ca227 2018-05-25 19:47:08 -0500
constraint_matrix.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 1998 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/base/memory_consumption.h>
17 
18 #include <deal.II/lac/block_sparse_matrix.h>
19 #include <deal.II/lac/block_sparse_matrix_ez.h>
20 #include <deal.II/lac/block_vector.h>
21 #include <deal.II/lac/chunk_sparse_matrix.h>
22 #include <deal.II/lac/constraint_matrix.h>
23 #include <deal.II/lac/constraint_matrix.templates.h>
24 #include <deal.II/lac/diagonal_matrix.h>
25 #include <deal.II/lac/dynamic_sparsity_pattern.h>
26 #include <deal.II/lac/la_parallel_block_vector.h>
27 #include <deal.II/lac/la_parallel_vector.h>
28 #include <deal.II/lac/la_vector.h>
29 #include <deal.II/lac/matrix_block.h>
30 #include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
31 #include <deal.II/lac/petsc_parallel_block_vector.h>
32 #include <deal.II/lac/petsc_parallel_sparse_matrix.h>
33 #include <deal.II/lac/petsc_parallel_vector.h>
34 #include <deal.II/lac/petsc_sparse_matrix.h>
35 #include <deal.II/lac/sparse_matrix_ez.h>
36 #include <deal.II/lac/trilinos_block_sparse_matrix.h>
37 #include <deal.II/lac/trilinos_parallel_block_vector.h>
38 #include <deal.II/lac/trilinos_sparse_matrix.h>
39 #include <deal.II/lac/trilinos_vector.h>
40 
41 #include <boost/serialization/utility.hpp>
42 
43 #include <algorithm>
44 #include <numeric>
45 #include <ostream>
46 #include <set>
47 
48 DEAL_II_NAMESPACE_OPEN
49 
50 
51 
52 // Static member variable
54 
55 
56 
57 void
59 {
60  lines = other.lines;
61  lines_cache = other.lines_cache;
62  local_lines = other.local_lines;
63  sorted = other.sorted;
64 }
65 
66 
67 
68 bool
69 ConstraintMatrix::check_zero_weight(const std::pair<size_type, double> &p)
70 {
71  return (p.second == 0);
72 }
73 
74 
75 
76 bool
78 {
79  return index < a.index;
80 }
81 
82 
83 
84 bool
86 {
87  return index == a.index;
88 }
89 
90 
91 
92 std::size_t
94 {
98 }
99 
100 
101 
104 {
105  return boost::make_iterator_range(lines.begin(), lines.end());
106 }
107 
108 
109 
110 bool
112  const std::vector<IndexSet> &locally_owned_dofs,
113  const IndexSet & locally_active_dofs,
114  const MPI_Comm mpi_communicator,
115  const bool verbose) const
116 {
117  ConstraintLine empty;
118  empty.inhomogeneity = 0.0;
119 
120  // Helper to return a reference to the ConstraintLine object that belongs to row @p row.
121  // We don't want to make copies but to return a reference, we need an empty
122  // object that we store above.
123  auto get_line = [&](const size_type row) -> const ConstraintLine & {
124  const size_type line_index = calculate_line_index(row);
125  if (line_index >= lines_cache.size() ||
127  {
128  empty.index = row;
129  return empty;
130  }
131  else
132  return lines[lines_cache[line_index]];
133  };
134 
135  // identify non-owned rows and send to owner:
136  std::map<unsigned int, std::vector<ConstraintLine>> to_send;
137 
138  const unsigned int myid =
139  ::Utilities::MPI::this_mpi_process(mpi_communicator);
140  const unsigned int nproc =
141  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
142 
143  // We will send all locally active dofs that are not locally owned for
144  // checking. Note that we allow constraints to differ on locally_relevant (and
145  // not active) DoFs.
146  IndexSet non_owned = locally_active_dofs;
147  non_owned.subtract_set(locally_owned_dofs[myid]);
148  for (unsigned int owner = 0; owner < nproc; ++owner)
149  {
150  // find all lines to send to @p owner
151  IndexSet indices_to_send = non_owned & locally_owned_dofs[owner];
152  for (const auto &row_idx : indices_to_send)
153  {
154  to_send[owner].push_back(get_line(row_idx));
155  }
156  }
157 
158  std::map<unsigned int, std::vector<ConstraintLine>> received =
159  Utilities::MPI::some_to_some(mpi_communicator, to_send);
160 
161  unsigned int inconsistent = 0;
162 
163  // from each processor:
164  for (const auto &kv : received)
165  {
166  // for each incoming line:
167  for (auto &lineit : kv.second)
168  {
169  const ConstraintLine &reference = get_line(lineit.index);
170 
171  if (lineit.inhomogeneity != reference.inhomogeneity)
172  {
173  ++inconsistent;
174 
175  if (verbose)
176  std::cout << "Proc " << myid << " got line " << lineit.index
177  << " from " << kv.first << " inhomogeneity "
178  << lineit.inhomogeneity
179  << " != " << reference.inhomogeneity << std::endl;
180  }
181  else if (lineit.entries != reference.entries)
182  {
183  ++inconsistent;
184  if (verbose)
185  std::cout << "Proc " << myid << " got line " << lineit.index
186  << " from " << kv.first << " wrong values!"
187  << std::endl;
188  }
189  }
190  }
191 
192  const unsigned int total =
193  Utilities::MPI::sum(inconsistent, mpi_communicator);
194  if (verbose && total > 0 && myid == 0)
195  std::cout << total << " inconsistent lines discovered!" << std::endl;
196  return total == 0;
197 }
198 
199 
200 
201 void
202 ConstraintMatrix::add_lines(const std::set<size_type> &lines)
203 {
204  for (std::set<size_type>::const_iterator i = lines.begin(); i != lines.end();
205  ++i)
206  add_line(*i);
207 }
208 
209 
210 
211 void
212 ConstraintMatrix::add_lines(const std::vector<bool> &lines)
213 {
214  for (size_type i = 0; i < lines.size(); ++i)
215  if (lines[i] == true)
216  add_line(i);
217 }
218 
219 
220 
221 void
223 {
224  for (size_type i = 0; i < lines.n_elements(); ++i)
225  add_line(lines.nth_index_in_set(i));
226 }
227 
228 
229 
230 void
232  const size_type line,
233  const std::vector<std::pair<size_type, double>> &col_val_pairs)
234 {
235  Assert(sorted == false, ExcMatrixIsClosed());
237 
239  Assert(line_ptr->index == line, ExcInternalError());
240 
241  // if in debug mode, check whether an entry for this column already
242  // exists and if its the same as the one entered at present
243  //
244  // in any case: skip this entry if an entry for this column already
245  // exists, since we don't want to enter it twice
246  for (std::vector<std::pair<size_type, double>>::const_iterator col_val_pair =
247  col_val_pairs.begin();
248  col_val_pair != col_val_pairs.end();
249  ++col_val_pair)
250  {
251  Assert(line != col_val_pair->first,
252  ExcMessage("Can't constrain a degree of freedom to itself"));
253 
254  for (ConstraintLine::Entries::const_iterator p =
255  line_ptr->entries.begin();
256  p != line_ptr->entries.end();
257  ++p)
258  if (p->first == col_val_pair->first)
259  {
260  // entry exists, break innermost loop
261  Assert(
262  p->second == col_val_pair->second,
264  line, col_val_pair->first, p->second, col_val_pair->second));
265  break;
266  }
267 
268  line_ptr->entries.push_back(*col_val_pair);
269  }
270 }
271 
272 
273 
274 void
276  const IndexSet & filter)
277 {
278  if (constraints.n_constraints() == 0)
279  return;
280 
281  Assert(filter.size() > constraints.lines.back().index,
282  ExcMessage("Filter needs to be larger than constraint matrix size."));
283  for (std::vector<ConstraintLine>::const_iterator line =
284  constraints.lines.begin();
285  line != constraints.lines.end();
286  ++line)
287  if (filter.is_element(line->index))
288  {
289  const size_type row = filter.index_within_set(line->index);
290  add_line(row);
291  set_inhomogeneity(row, line->inhomogeneity);
292  for (size_type i = 0; i < line->entries.size(); ++i)
293  if (filter.is_element(line->entries[i].first))
294  add_entry(row,
295  filter.index_within_set(line->entries[i].first),
296  line->entries[i].second);
297  }
298 }
299 
300 
301 
302 void
304 {
305  if (sorted == true)
306  return;
307 
308  // sort the lines
309  std::sort(lines.begin(), lines.end());
310 
311  // update list of pointers and give the vector a sharp size since we
312  // won't modify the size any more after this point.
313  {
314  std::vector<size_type> new_lines(lines_cache.size(),
316  size_type counter = 0;
317  for (std::vector<ConstraintLine>::const_iterator line = lines.begin();
318  line != lines.end();
319  ++line, ++counter)
320  new_lines[calculate_line_index(line->index)] = counter;
321  std::swap(lines_cache, new_lines);
322  }
323 
324  // in debug mode: check whether we really set the pointers correctly.
325  for (size_type i = 0; i < lines_cache.size(); ++i)
328  ExcInternalError());
329 
330  // first, strip zero entries, as we have to do that only once
331  for (std::vector<ConstraintLine>::iterator line = lines.begin();
332  line != lines.end();
333  ++line)
334  // first remove zero entries. that would mean that in the linear
335  // constraint for a node, x_i = ax_1 + bx_2 + ..., another node times 0
336  // appears. obviously, 0*something can be omitted
337  line->entries.erase(std::remove_if(line->entries.begin(),
338  line->entries.end(),
340  line->entries.end());
341 
342 
343 
344 #ifdef DEBUG
345  // In debug mode we are computing an estimate for the maximum number
346  // of constraints so that we can bail out if there is a cycle in the
347  // constraints (which is easier than searching for cycles in the graph).
348  //
349  // Let us figure out the largest dof index. This is an upper bound for the
350  // number of constraints because it is an approximation for the number of dofs
351  // in our system.
352  size_type largest_idx = 0;
353  for (std::vector<ConstraintLine>::iterator line = lines.begin();
354  line != lines.end();
355  ++line)
356  {
357  for (ConstraintLine::Entries::iterator it = line->entries.begin();
358  it != line->entries.end();
359  ++it)
360  {
361  largest_idx = std::max(largest_idx, it->first);
362  }
363  }
364 #endif
365 
366  // replace references to dofs that are themselves constrained. note that
367  // because we may replace references to other dofs that may themselves be
368  // constrained to third ones, we have to iterate over all this until we
369  // replace no chains of constraints any more
370  //
371  // the iteration replaces references to constrained degrees of freedom by
372  // second-order references. for example if x3=x0/2+x2/2 and x2=x0/2+x1/2,
373  // then the new list will be x3=x0/2+x0/4+x1/4. note that x0 appear
374  // twice. we will throw this duplicate out in the following step, where
375  // we sort the list so that throwing out duplicates becomes much more
376  // efficient. also, we have to do it only once, rather than in each
377  // iteration
378  size_type iteration = 0;
379  while (true)
380  {
381  bool chained_constraint_replaced = false;
382 
383  for (std::vector<ConstraintLine>::iterator line = lines.begin();
384  line != lines.end();
385  ++line)
386  {
387 #ifdef DEBUG
388  // we need to keep track of how many replacements we do in this line,
389  // because we can end up in a cycle A->B->C->A without the number of
390  // entries growing.
391  size_type n_replacements = 0;
392 #endif
393 
394  // loop over all entries of this line (including ones that we
395  // have appended in this go around) and see whether they are
396  // further constrained. ignore elements that we don't store on
397  // the current processor
398  size_type entry = 0;
399  while (entry < line->entries.size())
400  if (((local_lines.size() == 0) ||
401  (local_lines.is_element(line->entries[entry].first))) &&
402  is_constrained(line->entries[entry].first))
403  {
404  // ok, this entry is further constrained:
405  chained_constraint_replaced = true;
406 
407  // look up the chain of constraints for this entry
408  const size_type dof_index = line->entries[entry].first;
409  const double weight = line->entries[entry].second;
410 
411  Assert(dof_index != line->index,
412  ExcMessage("Cycle in constraints detected!"));
413 
414  const ConstraintLine *constrained_line =
415  &lines[lines_cache[calculate_line_index(dof_index)]];
416  Assert(constrained_line->index == dof_index,
417  ExcInternalError());
418 
419  // now we have to replace an entry by its expansion. we do
420  // that by overwriting the entry by the first entry of the
421  // expansion and adding the remaining ones to the end,
422  // where we will later process them once more
423  //
424  // we can of course only do that if the DoF that we are
425  // currently handle is constrained by a linear combination
426  // of other dofs:
427  if (constrained_line->entries.size() > 0)
428  {
429  for (size_type i = 0; i < constrained_line->entries.size();
430  ++i)
431  Assert(dof_index != constrained_line->entries[i].first,
432  ExcMessage("Cycle in constraints detected!"));
433 
434  // replace first entry, then tack the rest to the end
435  // of the list
436  line->entries[entry] = std::make_pair(
437  constrained_line->entries[0].first,
438  constrained_line->entries[0].second * weight);
439 
440  for (size_type i = 1; i < constrained_line->entries.size();
441  ++i)
442  line->entries.emplace_back(
443  constrained_line->entries[i].first,
444  constrained_line->entries[i].second * weight);
445 
446 #ifdef DEBUG
447  // keep track of how many entries we replace in this
448  // line. If we do more than there are constraints or
449  // dofs in our system, we must have a cycle.
450  ++n_replacements;
451  Assert(n_replacements / 2 < largest_idx,
452  ExcMessage("Cycle in constraints detected!"));
453  if (n_replacements / 2 >= largest_idx)
454  return; // this enables us to test for this Exception.
455 #endif
456  }
457  else
458  // the DoF that we encountered is not constrained by a
459  // linear combination of other dofs but is equal to just
460  // the inhomogeneity (i.e. its chain of entries is
461  // empty). in that case, we can't just overwrite the
462  // current entry, but we have to actually eliminate it
463  {
464  line->entries.erase(line->entries.begin() + entry);
465  }
466 
467  line->inhomogeneity += constrained_line->inhomogeneity * weight;
468 
469  // now that we're here, do not increase index by one but
470  // rather make another pass for the present entry because
471  // we have replaced the present entry by another one, or
472  // because we have deleted it and shifted all following
473  // ones one forward
474  }
475  else
476  // entry not further constrained. just move ahead by one
477  ++entry;
478  }
479 
480  // if we didn't do anything in this round, then quit the loop
481  if (chained_constraint_replaced == false)
482  break;
483 
484  // increase iteration count. note that we should not iterate more
485  // times than there are constraints, since this puts a natural upper
486  // bound on the length of constraint chains
487  ++iteration;
488  Assert(iteration <= lines.size(), ExcInternalError());
489  }
490 
491  // finally sort the entries and re-scale them if necessary. in this step,
492  // we also throw out duplicates as mentioned above. moreover, as some
493  // entries might have had zero weights, we replace them by a vector with
494  // sharp sizes.
495  for (std::vector<ConstraintLine>::iterator line = lines.begin();
496  line != lines.end();
497  ++line)
498  {
499  std::sort(line->entries.begin(), line->entries.end());
500 
501  // loop over the now sorted list and see whether any of the entries
502  // references the same dofs more than once in order to find how many
503  // non-duplicate entries we have. This lets us allocate the correct
504  // amount of memory for the constraint entries.
505  size_type duplicates = 0;
506  for (size_type i = 1; i < line->entries.size(); ++i)
507  if (line->entries[i].first == line->entries[i - 1].first)
508  duplicates++;
509 
510  if (duplicates > 0 || line->entries.size() < line->entries.capacity())
511  {
512  ConstraintLine::Entries new_entries;
513 
514  // if we have no duplicates, copy verbatim the entries. this way,
515  // the final size is of the vector is correct.
516  if (duplicates == 0)
517  new_entries = line->entries;
518  else
519  {
520  // otherwise, we need to go through the list by and and
521  // resolve the duplicates
522  new_entries.reserve(line->entries.size() - duplicates);
523  new_entries.push_back(line->entries[0]);
524  for (size_type j = 1; j < line->entries.size(); ++j)
525  if (line->entries[j].first == line->entries[j - 1].first)
526  {
527  Assert(new_entries.back().first == line->entries[j].first,
528  ExcInternalError());
529  new_entries.back().second += line->entries[j].second;
530  }
531  else
532  new_entries.push_back(line->entries[j]);
533 
534  Assert(new_entries.size() == line->entries.size() - duplicates,
535  ExcInternalError());
536 
537  // make sure there are really no duplicates left and that the
538  // list is still sorted
539  for (size_type j = 1; j < new_entries.size(); ++j)
540  {
541  Assert(new_entries[j].first != new_entries[j - 1].first,
542  ExcInternalError());
543  Assert(new_entries[j].first > new_entries[j - 1].first,
544  ExcInternalError());
545  }
546  }
547 
548  // replace old list of constraints for this dof by the new one
549  line->entries.swap(new_entries);
550  }
551 
552  // finally do the following check: if the sum of weights for the
553  // constraints is close to one, but not exactly one, then rescale all
554  // the weights so that they sum up to 1. this adds a little numerical
555  // stability and avoids all sorts of problems where the actual value
556  // is close to, but not quite what we expected
557  //
558  // the case where the weights don't quite sum up happens when we
559  // compute the interpolation weights "on the fly", i.e. not from
560  // precomputed tables. in this case, the interpolation weights are
561  // also subject to round-off
562  double sum = 0;
563  for (size_type i = 0; i < line->entries.size(); ++i)
564  sum += line->entries[i].second;
565  if ((sum != 1.0) && (std::fabs(sum - 1.) < 1.e-13))
566  {
567  for (size_type i = 0; i < line->entries.size(); ++i)
568  line->entries[i].second /= sum;
569  line->inhomogeneity /= sum;
570  }
571  } // end of loop over all constraint lines
572 
573 #ifdef DEBUG
574  // if in debug mode: check that no dof is constrained to another dof that
575  // is also constrained. exclude dofs from this check whose constraint
576  // lines are not stored on the local processor
577  for (std::vector<ConstraintLine>::const_iterator line = lines.begin();
578  line != lines.end();
579  ++line)
580  for (ConstraintLine::Entries::const_iterator entry = line->entries.begin();
581  entry != line->entries.end();
582  ++entry)
583  if ((local_lines.size() == 0) || (local_lines.is_element(entry->first)))
584  {
585  // make sure that entry->first is not the index of a line itself
586  const bool is_circle = is_constrained(entry->first);
587  Assert(is_circle == false,
588  ExcDoFConstrainedToConstrainedDoF(line->index, entry->first));
589  }
590 #endif
591 
592  sorted = true;
593 }
594 
595 
596 
597 void
598 ConstraintMatrix::merge(const ConstraintMatrix & other_constraints,
599  const MergeConflictBehavior merge_conflict_behavior,
600  const bool allow_different_local_lines)
601 {
602  (void)allow_different_local_lines;
603  Assert(
604  allow_different_local_lines || local_lines == other_constraints.local_lines,
605  ExcMessage("local_lines for this and the other objects are not the same "
606  "although allow_different_local_lines is false."));
607 
608  // store the previous state with respect to sorting
609  const bool object_was_sorted = sorted;
610  sorted = false;
611 
612  // first action is to fold into the present object possible constraints
613  // in the second object. we don't strictly need to do this any more since
614  // the ConstraintMatrix has learned to deal with chains of constraints in
615  // the close() function, but we have traditionally done this and it's not
616  // overly hard to do.
617  //
618  // for this, loop over all constraints and replace the constraint lines
619  // with a new one where constraints are replaced if necessary.
621  for (std::vector<ConstraintLine>::iterator line = lines.begin();
622  line != lines.end();
623  ++line)
624  {
625  tmp.clear();
626  for (size_type i = 0; i < line->entries.size(); ++i)
627  {
628  // if the present dof is not stored, or not constrained, or if we
629  // won't take the constraint from the other object, then simply copy
630  // it over
631  if ((other_constraints.local_lines.size() != 0 &&
632  other_constraints.local_lines.is_element(
633  line->entries[i].first) == false) ||
634  other_constraints.is_constrained(line->entries[i].first) ==
635  false ||
636  ((merge_conflict_behavior != right_object_wins) &&
637  other_constraints.is_constrained(line->entries[i].first) &&
638  this->is_constrained(line->entries[i].first)))
639  tmp.push_back(line->entries[i]);
640  else
641  // otherwise resolve further constraints by replacing the old
642  // entry by a sequence of new entries taken from the other
643  // object, but with multiplied weights
644  {
645  const ConstraintLine::Entries *other_line =
646  other_constraints.get_constraint_entries(
647  line->entries[i].first);
648  Assert(other_line != nullptr, ExcInternalError());
649 
650  const double weight = line->entries[i].second;
651 
652  for (ConstraintLine::Entries::const_iterator j =
653  other_line->begin();
654  j != other_line->end();
655  ++j)
656  tmp.emplace_back(j->first, j->second * weight);
657 
658  line->inhomogeneity +=
659  other_constraints.get_inhomogeneity(line->entries[i].first) *
660  weight;
661  }
662  }
663  // finally exchange old and newly resolved line
664  line->entries.swap(tmp);
665  }
666 
667  if (local_lines.size() != 0)
668  local_lines.add_indices(other_constraints.local_lines);
669 
670  {
671  // do not bother to resize the lines cache exactly since it is pretty
672  // cheap to adjust it along the way.
673  std::fill(
675 
676  // reset lines_cache for our own constraints
677  size_type index = 0;
678  for (std::vector<ConstraintLine>::const_iterator line = lines.begin();
679  line != lines.end();
680  ++line)
681  {
682  size_type local_line_no = calculate_line_index(line->index);
683  if (local_line_no >= lines_cache.size())
684  lines_cache.resize(local_line_no + 1, numbers::invalid_size_type);
685  lines_cache[local_line_no] = index++;
686  }
687 
688  // Add other_constraints to lines cache and our list of constraints
689  for (std::vector<ConstraintLine>::const_iterator line =
690  other_constraints.lines.begin();
691  line != other_constraints.lines.end();
692  ++line)
693  {
694  const size_type local_line_no = calculate_line_index(line->index);
695  if (local_line_no >= lines_cache.size())
696  {
697  lines_cache.resize(local_line_no + 1, numbers::invalid_size_type);
698  lines.push_back(*line);
699  lines_cache[local_line_no] = index++;
700  }
701  else if (lines_cache[local_line_no] == numbers::invalid_size_type)
702  {
703  // there are no constraints for that line yet
704  lines.push_back(*line);
705  AssertIndexRange(local_line_no, lines_cache.size());
706  lines_cache[local_line_no] = index++;
707  }
708  else
709  {
710  // we already store that line
711  switch (merge_conflict_behavior)
712  {
714  AssertThrow(false,
716  break;
717 
718  case left_object_wins:
719  // ignore this constraint
720  break;
721 
722  case right_object_wins:
723  AssertIndexRange(local_line_no, lines_cache.size());
724  lines[lines_cache[local_line_no]] = *line;
725  break;
726 
727  default:
728  Assert(false, ExcNotImplemented());
729  }
730  }
731  }
732 
733  // check that we set the pointers correctly
734  for (size_type i = 0; i < lines_cache.size(); ++i)
737  ExcInternalError());
738  }
739 
740  // if the object was sorted before, then make sure it is so afterward as
741  // well. otherwise leave everything in the unsorted state
742  if (object_was_sorted == true)
743  close();
744 }
745 
746 
747 
748 void
750 {
751  if (local_lines.size() == 0)
752  lines_cache.insert(lines_cache.begin(), offset, numbers::invalid_size_type);
753  else
754  {
755  // shift local_lines
756  IndexSet new_local_lines(local_lines.size());
757  new_local_lines.add_indices(local_lines, offset);
758  std::swap(local_lines, new_local_lines);
759  }
760 
761  for (std::vector<ConstraintLine>::iterator i = lines.begin();
762  i != lines.end();
763  ++i)
764  {
765  i->index += offset;
766  for (ConstraintLine::Entries::iterator j = i->entries.begin();
767  j != i->entries.end();
768  ++j)
769  j->first += offset;
770  }
771 
772 #ifdef DEBUG
773  // make sure that lines, lines_cache and local_lines
774  // are still linked correctly
775  for (size_type i = 0; i < lines_cache.size(); ++i)
777  calculate_line_index(lines[lines_cache[i]].index) == i,
778  ExcInternalError());
779 #endif
780 }
781 
782 
783 
784 void
786 {
787  {
788  std::vector<ConstraintLine> tmp;
789  lines.swap(tmp);
790  }
791 
792  {
793  std::vector<size_type> tmp;
794  lines_cache.swap(tmp);
795  }
796 
797  sorted = false;
798 }
799 
800 
801 
802 void
803 ConstraintMatrix::reinit(const IndexSet &local_constraints)
804 {
805  local_lines = local_constraints;
806 
807  // make sure the IndexSet is compressed. Otherwise this can lead to crashes
808  // that are hard to find (only happen in release mode).
809  // see tests/mpi/constraint_matrix_crash_01
811 
812  clear();
813 }
814 
815 
816 
817 void
819 {
820  Assert(sorted == true, ExcMatrixNotClosed());
821  Assert(sparsity.is_compressed() == false, ExcMatrixIsClosed());
822  Assert(sparsity.n_rows() == sparsity.n_cols(), ExcNotQuadratic());
823 
824  // store for each index whether it must be distributed or not. If entry
825  // is numbers::invalid_unsigned_int, no distribution is necessary.
826  // otherwise, the number states which line in the constraint matrix
827  // handles this index
828  std::vector<size_type> distribute(sparsity.n_rows(),
830 
831  for (size_type c = 0; c < lines.size(); ++c)
832  distribute[lines[c].index] = c;
833 
834  const size_type n_rows = sparsity.n_rows();
835  for (size_type row = 0; row < n_rows; ++row)
836  {
838  {
839  // regular line. loop over cols all valid cols. note that this
840  // changes the line we are presently working on: we add additional
841  // entries. these are put to the end of the row. however, as
842  // constrained nodes cannot be constrained to other constrained
843  // nodes, nothing will happen if we run into these added nodes, as
844  // they can't be distributed further. we might store the position of
845  // the last old entry and stop work there, but since operating on
846  // the newly added ones only takes two comparisons (column index
847  // valid, distribute[column] necessarily
848  // ==numbers::invalid_size_type), it is cheaper to not do so and
849  // run right until the end of the line
850  for (SparsityPattern::iterator entry = sparsity.begin(row);
851  ((entry != sparsity.end(row)) && entry->is_valid_entry());
852  ++entry)
853  {
854  const size_type column = entry->column();
855 
856  if (distribute[column] != numbers::invalid_size_type)
857  {
858  // distribute entry at regular row @p{row} and irregular
859  // column sparsity.colnums[j]
860  for (size_type q = 0;
861  q != lines[distribute[column]].entries.size();
862  ++q)
863  sparsity.add(row,
864  lines[distribute[column]].entries[q].first);
865  }
866  }
867  }
868  else
869  // row must be distributed. note that here the present row is not
870  // touched (unlike above)
871  {
872  for (SparsityPattern::iterator entry = sparsity.begin(row);
873  (entry != sparsity.end(row)) && entry->is_valid_entry();
874  ++entry)
875  {
876  const size_type column = entry->column();
877  if (distribute[column] == numbers::invalid_size_type)
878  // distribute entry at irregular row @p{row} and regular
879  // column sparsity.colnums[j]
880  for (size_type q = 0;
881  q != lines[distribute[row]].entries.size();
882  ++q)
883  sparsity.add(lines[distribute[row]].entries[q].first, column);
884  else
885  // distribute entry at irregular row @p{row} and irregular
886  // column sparsity.get_column_numbers()[j]
887  for (size_type p = 0;
888  p != lines[distribute[row]].entries.size();
889  ++p)
890  for (size_type q = 0;
891  q != lines[distribute[column]].entries.size();
892  ++q)
893  sparsity.add(lines[distribute[row]].entries[p].first,
894  lines[distribute[column]].entries[q].first);
895  }
896  }
897  }
898 
899  sparsity.compress();
900 }
901 
902 
903 
904 void
906 {
907  Assert(sorted == true, ExcMatrixNotClosed());
908  Assert(sparsity.n_rows() == sparsity.n_cols(), ExcNotQuadratic());
909 
910  // store for each index whether it must be distributed or not. If entry
911  // is numbers::invalid_unsigned_int, no distribution is necessary.
912  // otherwise, the number states which line in the constraint matrix
913  // handles this index
914  std::vector<size_type> distribute(sparsity.n_rows(),
916 
917  for (size_type c = 0; c < lines.size(); ++c)
918  distribute[lines[c].index] = c;
919 
920  const size_type n_rows = sparsity.n_rows();
921  for (size_type row = 0; row < n_rows; ++row)
922  {
924  // regular line. loop over cols. note that as we proceed to
925  // distribute cols, the loop may get longer
926  for (size_type j = 0; j < sparsity.row_length(row); ++j)
927  {
928  const size_type column = sparsity.column_number(row, j);
929 
930  if (distribute[column] != numbers::invalid_size_type)
931  {
932  // distribute entry at regular row @p{row} and irregular
933  // column column. note that this changes the line we are
934  // presently working on: we add additional entries. if we
935  // add another entry at a column behind the present one, we
936  // will encounter it later on (but since it can't be
937  // further constrained, won't have to do anything about
938  // it). if we add it up front of the present column, we
939  // will find the present column later on again as it was
940  // shifted back (again nothing happens, in particular no
941  // endless loop, as when we encounter it the second time we
942  // won't be able to add more entries as they all already
943  // exist, but we do the same work more often than
944  // necessary, and the loop gets longer), so move the cursor
945  // one to the right in the case that we add an entry up
946  // front that did not exist before. check whether it
947  // existed before by tracking the length of this row
948  size_type old_rowlength = sparsity.row_length(row);
949  for (size_type q = 0;
950  q != lines[distribute[column]].entries.size();
951  ++q)
952  {
953  const size_type new_col =
954  lines[distribute[column]].entries[q].first;
955 
956  sparsity.add(row, new_col);
957 
958  const size_type new_rowlength = sparsity.row_length(row);
959  if ((new_col < column) && (old_rowlength != new_rowlength))
960  ++j;
961  old_rowlength = new_rowlength;
962  }
963  }
964  }
965  else
966  // row must be distributed
967  for (size_type j = 0; j < sparsity.row_length(row); ++j)
968  {
969  const size_type column = sparsity.column_number(row, j);
970 
971  if (distribute[column] == numbers::invalid_size_type)
972  // distribute entry at irregular row @p{row} and regular
973  // column sparsity.colnums[j]
974  for (size_type q = 0; q != lines[distribute[row]].entries.size();
975  ++q)
976  sparsity.add(lines[distribute[row]].entries[q].first, column);
977  else
978  // distribute entry at irregular row @p{row} and irregular
979  // column sparsity.get_column_numbers()[j]
980  for (size_type p = 0; p != lines[distribute[row]].entries.size();
981  ++p)
982  for (size_type q = 0;
983  q != lines[distribute[sparsity.column_number(row, j)]]
984  .entries.size();
985  ++q)
986  sparsity.add(lines[distribute[row]].entries[p].first,
987  lines[distribute[sparsity.column_number(row, j)]]
988  .entries[q]
989  .first);
990  }
991  }
992 }
993 
994 
995 
996 void
998 {
999  Assert(sorted == true, ExcMatrixNotClosed());
1000  Assert(sparsity.is_compressed() == false, ExcMatrixIsClosed());
1001  Assert(sparsity.n_rows() == sparsity.n_cols(), ExcNotQuadratic());
1002  Assert(sparsity.n_block_rows() == sparsity.n_block_cols(), ExcNotQuadratic());
1003  Assert(sparsity.get_column_indices() == sparsity.get_row_indices(),
1004  ExcNotQuadratic());
1005 
1006  const BlockIndices &index_mapping = sparsity.get_column_indices();
1007 
1008  const size_type n_blocks = sparsity.n_block_rows();
1009 
1010  // store for each index whether it must be distributed or not. If entry
1011  // is numbers::invalid_unsigned_int, no distribution is necessary.
1012  // otherwise, the number states which line in the constraint matrix
1013  // handles this index
1014  std::vector<size_type> distribute(sparsity.n_rows(),
1016 
1017  for (size_type c = 0; c < lines.size(); ++c)
1018  distribute[lines[c].index] = c;
1019 
1020  const size_type n_rows = sparsity.n_rows();
1021  for (size_type row = 0; row < n_rows; ++row)
1022  {
1023  // get index of this row within the blocks
1024  const std::pair<size_type, size_type> block_index =
1025  index_mapping.global_to_local(row);
1026  const size_type block_row = block_index.first;
1027 
1029  // regular line. loop over all columns and see whether this column
1030  // must be distributed
1031  {
1032  // to loop over all entries in this row, we have to loop over all
1033  // blocks in this blockrow and the corresponding row therein
1034  for (size_type block_col = 0; block_col < n_blocks; ++block_col)
1035  {
1036  const SparsityPattern &block_sparsity =
1037  sparsity.block(block_row, block_col);
1038 
1039  for (SparsityPattern::const_iterator entry =
1040  block_sparsity.begin(block_index.second);
1041  (entry != block_sparsity.end(block_index.second)) &&
1042  entry->is_valid_entry();
1043  ++entry)
1044  {
1045  const size_type global_col =
1046  index_mapping.local_to_global(block_col, entry->column());
1047 
1048  if (distribute[global_col] != numbers::invalid_size_type)
1049  // distribute entry at regular row @p{row} and
1050  // irregular column global_col
1051  {
1052  for (size_type q = 0;
1053  q != lines[distribute[global_col]].entries.size();
1054  ++q)
1055  sparsity.add(
1056  row, lines[distribute[global_col]].entries[q].first);
1057  }
1058  }
1059  }
1060  }
1061  else
1062  {
1063  // row must be distributed. split the whole row into the chunks
1064  // defined by the blocks
1065  for (size_type block_col = 0; block_col < n_blocks; ++block_col)
1066  {
1067  const SparsityPattern &block_sparsity =
1068  sparsity.block(block_row, block_col);
1069 
1070  for (SparsityPattern::const_iterator entry =
1071  block_sparsity.begin(block_index.second);
1072  (entry != block_sparsity.end(block_index.second)) &&
1073  entry->is_valid_entry();
1074  ++entry)
1075  {
1076  const size_type global_col =
1077  index_mapping.local_to_global(block_col, entry->column());
1078 
1079  if (distribute[global_col] == numbers::invalid_size_type)
1080  // distribute entry at irregular row @p{row} and
1081  // regular column global_col.
1082  {
1083  for (size_type q = 0;
1084  q != lines[distribute[row]].entries.size();
1085  ++q)
1086  sparsity.add(lines[distribute[row]].entries[q].first,
1087  global_col);
1088  }
1089  else
1090  // distribute entry at irregular row @p{row} and
1091  // irregular column @p{global_col}
1092  {
1093  for (size_type p = 0;
1094  p != lines[distribute[row]].entries.size();
1095  ++p)
1096  for (size_type q = 0;
1097  q != lines[distribute[global_col]].entries.size();
1098  ++q)
1099  sparsity.add(
1100  lines[distribute[row]].entries[p].first,
1101  lines[distribute[global_col]].entries[q].first);
1102  }
1103  }
1104  }
1105  }
1106  }
1107 
1108  sparsity.compress();
1109 }
1110 
1111 
1112 
1113 void
1115 {
1116  Assert(sorted == true, ExcMatrixNotClosed());
1117  Assert(sparsity.n_rows() == sparsity.n_cols(), ExcNotQuadratic());
1118  Assert(sparsity.n_block_rows() == sparsity.n_block_cols(), ExcNotQuadratic());
1119  Assert(sparsity.get_column_indices() == sparsity.get_row_indices(),
1120  ExcNotQuadratic());
1121 
1122  const BlockIndices &index_mapping = sparsity.get_column_indices();
1123 
1124  const size_type n_blocks = sparsity.n_block_rows();
1125 
1126  // store for each index whether it must be distributed or not. If entry
1127  // is numbers::invalid_unsigned_int, no distribution is necessary.
1128  // otherwise, the number states which line in the constraint matrix
1129  // handles this index
1130  std::vector<size_type> distribute(sparsity.n_rows(),
1132 
1133  for (size_type c = 0; c < lines.size(); ++c)
1134  distribute[lines[c].index] = static_cast<signed int>(c);
1135 
1136  const size_type n_rows = sparsity.n_rows();
1137  for (size_type row = 0; row < n_rows; ++row)
1138  {
1139  // get index of this row within the blocks
1140  const std::pair<size_type, size_type> block_index =
1141  index_mapping.global_to_local(row);
1142  const size_type block_row = block_index.first;
1143  const size_type local_row = block_index.second;
1144 
1146  // regular line. loop over all columns and see whether this column
1147  // must be distributed. note that as we proceed to distribute cols,
1148  // the loop over cols may get longer.
1149  //
1150  // don't try to be clever here as in the algorithm for the
1151  // DynamicSparsityPattern, as that would be much more
1152  // complicated here. after all, we know that compressed patterns
1153  // are inefficient...
1154  {
1155  // to loop over all entries in this row, we have to loop over all
1156  // blocks in this blockrow and the corresponding row therein
1157  for (size_type block_col = 0; block_col < n_blocks; ++block_col)
1158  {
1159  const DynamicSparsityPattern &block_sparsity =
1160  sparsity.block(block_row, block_col);
1161 
1162  for (size_type j = 0; j < block_sparsity.row_length(local_row);
1163  ++j)
1164  {
1165  const size_type global_col = index_mapping.local_to_global(
1166  block_col, block_sparsity.column_number(local_row, j));
1167 
1168  if (distribute[global_col] != numbers::invalid_size_type)
1169  // distribute entry at regular row @p{row} and
1170  // irregular column global_col
1171  {
1172  for (size_type q = 0;
1173  q != lines[distribute[global_col]].entries.size();
1174  ++q)
1175  sparsity.add(
1176  row, lines[distribute[global_col]].entries[q].first);
1177  }
1178  }
1179  }
1180  }
1181  else
1182  {
1183  // row must be distributed. split the whole row into the chunks
1184  // defined by the blocks
1185  for (size_type block_col = 0; block_col < n_blocks; ++block_col)
1186  {
1187  const DynamicSparsityPattern &block_sparsity =
1188  sparsity.block(block_row, block_col);
1189 
1190  for (size_type j = 0; j < block_sparsity.row_length(local_row);
1191  ++j)
1192  {
1193  const size_type global_col = index_mapping.local_to_global(
1194  block_col, block_sparsity.column_number(local_row, j));
1195 
1196  if (distribute[global_col] == numbers::invalid_size_type)
1197  // distribute entry at irregular row @p{row} and
1198  // regular column global_col.
1199  {
1200  for (size_type q = 0;
1201  q != lines[distribute[row]].entries.size();
1202  ++q)
1203  sparsity.add(lines[distribute[row]].entries[q].first,
1204  global_col);
1205  }
1206  else
1207  // distribute entry at irregular row @p{row} and
1208  // irregular column @p{global_col}
1209  {
1210  for (size_type p = 0;
1211  p != lines[distribute[row]].entries.size();
1212  ++p)
1213  for (size_type q = 0;
1214  q != lines[distribute[global_col]].entries.size();
1215  ++q)
1216  sparsity.add(
1217  lines[distribute[row]].entries[p].first,
1218  lines[distribute[global_col]].entries[q].first);
1219  }
1220  }
1221  }
1222  }
1223  }
1224 }
1225 
1226 
1227 
1228 bool
1230 {
1231  if (is_constrained(index) == false)
1232  return false;
1233 
1235  Assert(p.index == index, ExcInternalError());
1236 
1237  // return if an entry for this line was found and if it has only one
1238  // entry equal to 1.0
1239  return ((p.entries.size() == 1) && (p.entries[0].second == 1.0));
1240 }
1241 
1242 
1243 bool
1245  const size_type index2) const
1246 {
1247  if (is_constrained(index1) == true)
1248  {
1249  const ConstraintLine &p =
1251  Assert(p.index == index1, ExcInternalError());
1252 
1253  // return if an entry for this line was found and if it has only one
1254  // entry equal to 1.0 and that one is index2
1255  return ((p.entries.size() == 1) && (p.entries[0].first == index2) &&
1256  (p.entries[0].second == 1.0));
1257  }
1258  else if (is_constrained(index2) == true)
1259  {
1260  const ConstraintLine &p =
1262  Assert(p.index == index2, ExcInternalError());
1263 
1264  // return if an entry for this line was found and if it has only one
1265  // entry equal to 1.0 and that one is index1
1266  return ((p.entries.size() == 1) && (p.entries[0].first == index1) &&
1267  (p.entries[0].second == 1.0));
1268  }
1269  else
1270  return false;
1271 }
1272 
1273 
1274 
1277 {
1278  size_type return_value = 0;
1279  for (std::vector<ConstraintLine>::const_iterator i = lines.begin();
1280  i != lines.end();
1281  ++i)
1282  // use static cast, since typeof(size)==std::size_t, which is !=
1283  // size_type on AIX
1284  return_value =
1285  std::max(return_value, static_cast<size_type>(i->entries.size()));
1286 
1287  return return_value;
1288 }
1289 
1290 
1291 
1292 bool
1294 {
1295  for (std::vector<ConstraintLine>::const_iterator i = lines.begin();
1296  i != lines.end();
1297  ++i)
1298  if (i->inhomogeneity != 0.)
1299  return true;
1300 
1301  return false;
1302 }
1303 
1304 
1305 void
1306 ConstraintMatrix::print(std::ostream &out) const
1307 {
1308  for (size_type i = 0; i != lines.size(); ++i)
1309  {
1310  // output the list of constraints as pairs of dofs and their weights
1311  if (lines[i].entries.size() > 0)
1312  {
1313  for (size_type j = 0; j < lines[i].entries.size(); ++j)
1314  out << " " << lines[i].index << " " << lines[i].entries[j].first
1315  << ": " << lines[i].entries[j].second << "\n";
1316 
1317  // print out inhomogeneity.
1318  if (lines[i].inhomogeneity != 0)
1319  out << " " << lines[i].index << ": " << lines[i].inhomogeneity
1320  << "\n";
1321  }
1322  else
1323  // but also output something if the constraint simply reads
1324  // x[13]=0, i.e. where the right hand side is not a linear
1325  // combination of other dofs
1326  {
1327  if (lines[i].inhomogeneity != 0)
1328  out << " " << lines[i].index << " = " << lines[i].inhomogeneity
1329  << "\n";
1330  else
1331  out << " " << lines[i].index << " = 0\n";
1332  }
1333  }
1334 
1335  AssertThrow(out, ExcIO());
1336 }
1337 
1338 
1339 
1340 void
1341 ConstraintMatrix::write_dot(std::ostream &out) const
1342 {
1343  out << "digraph constraints {" << std::endl;
1344  for (size_type i = 0; i != lines.size(); ++i)
1345  {
1346  // same concept as in the previous function
1347  if (lines[i].entries.size() > 0)
1348  for (size_type j = 0; j < lines[i].entries.size(); ++j)
1349  out << " " << lines[i].index << "->" << lines[i].entries[j].first
1350  << "; // weight: " << lines[i].entries[j].second << "\n";
1351  else
1352  out << " " << lines[i].index << "\n";
1353  }
1354  out << "}" << std::endl;
1355 }
1356 
1357 
1358 
1359 std::size_t
1361 {
1366 }
1367 
1368 
1369 
1370 void
1372  std::vector<types::global_dof_index> &indices) const
1373 {
1374  const unsigned int indices_size = indices.size();
1375  const std::vector<std::pair<types::global_dof_index, double>> *line_ptr;
1376  for (unsigned int i = 0; i < indices_size; ++i)
1377  {
1378  line_ptr = get_constraint_entries(indices[i]);
1379  // if the index is constraint, the constraints indices are added to the
1380  // indices vector
1381  if (line_ptr != nullptr)
1382  {
1383  const unsigned int line_size = line_ptr->size();
1384  for (unsigned int j = 0; j < line_size; ++j)
1385  indices.push_back((*line_ptr)[j].first);
1386  }
1387  }
1388 
1389  // keep only the unique elements
1390  std::sort(indices.begin(), indices.end());
1391  std::vector<types::global_dof_index>::iterator it;
1392  it = std::unique(indices.begin(), indices.end());
1393  indices.resize(it - indices.begin());
1394 }
1395 
1396 
1397 
1398 // explicit instantiations
1399 //
1400 // define a list of functions for vectors and matrices, respectively, where
1401 // the vector/matrix can be replaced using a preprocessor variable
1402 // VectorType/MatrixType. note that we need a space between "VectorType" and
1403 // ">" to disambiguate ">>" when VectorType trails in an angle bracket
1404 
1405 // TODO: The way we define all the instantiations is probably not the very
1406 // best one. Try to find a better description.
1407 
1408 #define VECTOR_FUNCTIONS(VectorType) \
1409  template void ConstraintMatrix::condense<VectorType>( \
1410  const VectorType &uncondensed, VectorType &condensed) const; \
1411  template void ConstraintMatrix::condense<VectorType>(VectorType & vec) \
1412  const; \
1413  template void ConstraintMatrix::distribute_local_to_global<VectorType>( \
1414  const Vector<VectorType::value_type> &, \
1415  const std::vector<ConstraintMatrix::size_type> &, \
1416  VectorType &, \
1417  const FullMatrix<VectorType::value_type> &) const; \
1418  template void ConstraintMatrix::distribute_local_to_global<VectorType>( \
1419  const Vector<VectorType::value_type> &, \
1420  const std::vector<ConstraintMatrix::size_type> &, \
1421  const std::vector<ConstraintMatrix::size_type> &, \
1422  VectorType &, \
1423  const FullMatrix<VectorType::value_type> &, \
1424  bool) const
1425 
1426 #define PARALLEL_VECTOR_FUNCTIONS(VectorType) \
1427  template void ConstraintMatrix::distribute_local_to_global<VectorType>( \
1428  const Vector<VectorType::value_type> &, \
1429  const std::vector<ConstraintMatrix::size_type> &, \
1430  VectorType &, \
1431  const FullMatrix<VectorType::value_type> &) const; \
1432  template void ConstraintMatrix::distribute_local_to_global<VectorType>( \
1433  const Vector<VectorType::value_type> &, \
1434  const std::vector<ConstraintMatrix::size_type> &, \
1435  const std::vector<ConstraintMatrix::size_type> &, \
1436  VectorType &, \
1437  const FullMatrix<VectorType::value_type> &, \
1438  bool) const
1439 
1440 #ifdef DEAL_II_WITH_PETSC
1441 VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector);
1442 VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector);
1443 #endif
1444 
1445 #ifdef DEAL_II_WITH_TRILINOS
1446 PARALLEL_VECTOR_FUNCTIONS(TrilinosWrappers::MPI::Vector);
1447 PARALLEL_VECTOR_FUNCTIONS(TrilinosWrappers::MPI::BlockVector);
1448 #endif
1449 
1450 #define MATRIX_VECTOR_FUNCTIONS(MatrixType, VectorType) \
1451  template void \
1452  ConstraintMatrix::distribute_local_to_global<MatrixType, VectorType>( \
1453  const FullMatrix<MatrixType::value_type> &, \
1454  const Vector<VectorType::value_type> &, \
1455  const std::vector<ConstraintMatrix::size_type> &, \
1456  MatrixType &, \
1457  VectorType &, \
1458  bool, \
1459  std::integral_constant<bool, false>) const
1460 #define MATRIX_FUNCTIONS(MatrixType, VectorScalar) \
1461  template void \
1462  ConstraintMatrix::distribute_local_to_global<MatrixType, \
1463  Vector<VectorScalar>>( \
1464  const FullMatrix<MatrixType::value_type> &, \
1465  const Vector<VectorScalar> &, \
1466  const std::vector<ConstraintMatrix::size_type> &, \
1467  MatrixType &, \
1468  Vector<VectorScalar> &, \
1469  bool, \
1470  std::integral_constant<bool, false>) const
1471 #define BLOCK_MATRIX_VECTOR_FUNCTIONS(MatrixType, VectorType) \
1472  template void \
1473  ConstraintMatrix::distribute_local_to_global<MatrixType, VectorType>( \
1474  const FullMatrix<MatrixType::value_type> &, \
1475  const Vector<VectorType::value_type> &, \
1476  const std::vector<ConstraintMatrix::size_type> &, \
1477  MatrixType &, \
1478  VectorType &, \
1479  bool, \
1480  std::integral_constant<bool, true>) const
1481 #define BLOCK_MATRIX_FUNCTIONS(MatrixType) \
1482  template void ConstraintMatrix:: \
1483  distribute_local_to_global<MatrixType, Vector<MatrixType::value_type>>( \
1484  const FullMatrix<MatrixType::value_type> &, \
1485  const Vector<MatrixType::value_type> &, \
1486  const std::vector<ConstraintMatrix::size_type> &, \
1487  MatrixType &, \
1488  Vector<MatrixType::value_type> &, \
1489  bool, \
1490  std::integral_constant<bool, true>) const
1491 
1492 MATRIX_FUNCTIONS(FullMatrix<double>, double);
1493 MATRIX_FUNCTIONS(FullMatrix<float>, float);
1494 MATRIX_FUNCTIONS(FullMatrix<double>, std::complex<double>);
1495 MATRIX_FUNCTIONS(FullMatrix<std::complex<double>>, std::complex<double>);
1496 
1497 MATRIX_FUNCTIONS(SparseMatrix<double>, double);
1498 MATRIX_FUNCTIONS(SparseMatrix<float>, float);
1499 MATRIX_FUNCTIONS(SparseMatrix<double>, std::complex<double>);
1500 MATRIX_FUNCTIONS(SparseMatrix<float>, std::complex<float>);
1501 MATRIX_FUNCTIONS(SparseMatrix<std::complex<double>>, std::complex<double>);
1502 MATRIX_FUNCTIONS(SparseMatrix<std::complex<float>>, std::complex<float>);
1503 
1504 MATRIX_FUNCTIONS(SparseMatrixEZ<double>, double);
1505 MATRIX_FUNCTIONS(SparseMatrixEZ<float>, float);
1506 MATRIX_FUNCTIONS(ChunkSparseMatrix<double>, double);
1507 MATRIX_FUNCTIONS(ChunkSparseMatrix<float>, float);
1508 
1509 
1510 BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrix<double>);
1511 BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrix<float>);
1512 BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrix<double>, BlockVector<double>);
1513 BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrix<float>, BlockVector<float>);
1514 
1515 // BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrixEZ<double>);
1516 // BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrixEZ<float>, Vector<float>);
1517 
1518 #ifdef DEAL_II_WITH_PETSC
1519 MATRIX_FUNCTIONS(PETScWrappers::SparseMatrix, PetscScalar);
1520 MATRIX_FUNCTIONS(PETScWrappers::MPI::SparseMatrix, PetscScalar);
1521 BLOCK_MATRIX_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix);
1522 MATRIX_VECTOR_FUNCTIONS(PETScWrappers::MPI::SparseMatrix,
1524 MATRIX_VECTOR_FUNCTIONS(PETScWrappers::SparseMatrix,
1526 BLOCK_MATRIX_VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix,
1528 #endif
1529 
1530 #ifdef DEAL_II_WITH_TRILINOS
1531 MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix, double);
1532 BLOCK_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix);
1533 MATRIX_VECTOR_FUNCTIONS(TrilinosWrappers::SparseMatrix,
1535 BLOCK_MATRIX_VECTOR_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix,
1537 #endif
1538 
1539 
1540 #define SPARSITY_FUNCTIONS(SparsityPatternType) \
1541  template void \
1542  ConstraintMatrix::add_entries_local_to_global<SparsityPatternType>( \
1543  const std::vector<ConstraintMatrix::size_type> &, \
1544  SparsityPatternType &, \
1545  const bool, \
1546  const Table<2, bool> &, \
1547  std::integral_constant<bool, false>) const; \
1548  template void \
1549  ConstraintMatrix::add_entries_local_to_global<SparsityPatternType>( \
1550  const std::vector<ConstraintMatrix::size_type> &, \
1551  const std::vector<ConstraintMatrix::size_type> &, \
1552  SparsityPatternType &, \
1553  const bool, \
1554  const Table<2, bool> &) const
1555 #define BLOCK_SPARSITY_FUNCTIONS(SparsityPatternType) \
1556  template void \
1557  ConstraintMatrix::add_entries_local_to_global<SparsityPatternType>( \
1558  const std::vector<ConstraintMatrix::size_type> &, \
1559  SparsityPatternType &, \
1560  const bool, \
1561  const Table<2, bool> &, \
1562  std::integral_constant<bool, true>) const; \
1563  template void \
1564  ConstraintMatrix::add_entries_local_to_global<SparsityPatternType>( \
1565  const std::vector<ConstraintMatrix::size_type> &, \
1566  const std::vector<ConstraintMatrix::size_type> &, \
1567  SparsityPatternType &, \
1568  const bool, \
1569  const Table<2, bool> &) const
1570 
1571 SPARSITY_FUNCTIONS(SparsityPattern);
1572 SPARSITY_FUNCTIONS(DynamicSparsityPattern);
1573 BLOCK_SPARSITY_FUNCTIONS(BlockSparsityPattern);
1574 BLOCK_SPARSITY_FUNCTIONS(BlockDynamicSparsityPattern);
1575 
1576 #ifdef DEAL_II_WITH_TRILINOS
1577 SPARSITY_FUNCTIONS(TrilinosWrappers::SparsityPattern);
1578 BLOCK_SPARSITY_FUNCTIONS(TrilinosWrappers::BlockSparsityPattern);
1579 #endif
1580 
1581 
1582 #define ONLY_MATRIX_FUNCTIONS(MatrixType) \
1583  template void ConstraintMatrix::distribute_local_to_global<MatrixType>( \
1584  const FullMatrix<MatrixType::value_type> &, \
1585  const std::vector<ConstraintMatrix::size_type> &, \
1586  const std::vector<ConstraintMatrix::size_type> &, \
1587  MatrixType &) const; \
1588  template void ConstraintMatrix::distribute_local_to_global<MatrixType>( \
1589  const FullMatrix<MatrixType::value_type> &, \
1590  const std::vector<ConstraintMatrix::size_type> &, \
1591  const ConstraintMatrix &, \
1592  const std::vector<ConstraintMatrix::size_type> &, \
1593  MatrixType &) const
1594 
1595 ONLY_MATRIX_FUNCTIONS(FullMatrix<float>);
1596 ONLY_MATRIX_FUNCTIONS(FullMatrix<double>);
1597 ONLY_MATRIX_FUNCTIONS(SparseMatrix<float>);
1598 ONLY_MATRIX_FUNCTIONS(SparseMatrix<double>);
1599 ONLY_MATRIX_FUNCTIONS(MatrixBlock<SparseMatrix<float>>);
1600 ONLY_MATRIX_FUNCTIONS(MatrixBlock<SparseMatrix<double>>);
1601 ONLY_MATRIX_FUNCTIONS(BlockSparseMatrix<float>);
1602 ONLY_MATRIX_FUNCTIONS(BlockSparseMatrix<double>);
1603 
1604 #ifdef DEAL_II_WITH_TRILINOS
1605 ONLY_MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix);
1606 ONLY_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix);
1607 #endif
1608 
1609 #ifdef DEAL_II_WITH_PETSC
1610 ONLY_MATRIX_FUNCTIONS(PETScWrappers::SparseMatrix);
1611 ONLY_MATRIX_FUNCTIONS(PETScWrappers::MPI::SparseMatrix);
1612 ONLY_MATRIX_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix);
1613 #endif
1614 
1615 #include "constraint_matrix.inst"
1616 
1617 // allocate scratch data. Cannot use the generic template instantiation
1618 // because we need to provide an initializer object of type
1619 // internals::ConstraintMatrixData<Number> that can be passed to the
1620 // constructor of scratch_data (it won't allow one to be constructed in place).
1621 namespace internals
1622 {
1623 #define SCRATCH_INITIALIZER(MatrixScalar, VectorScalar, Name) \
1624  ConstraintMatrixData<MatrixScalar, VectorScalar>::ScratchData \
1625  scratch_data_initializer_##Name; \
1626  template <> \
1627  Threads::ThreadLocalStorage< \
1628  ConstraintMatrixData<MatrixScalar, VectorScalar>::ScratchData> \
1629  ConstraintMatrixData<MatrixScalar, VectorScalar>::scratch_data( \
1630  scratch_data_initializer_##Name)
1631 
1632  SCRATCH_INITIALIZER(double, double, dd);
1633  SCRATCH_INITIALIZER(float, float, ff);
1634  SCRATCH_INITIALIZER(std::complex<double>, std::complex<double>, zz);
1635  SCRATCH_INITIALIZER(std::complex<float>, std::complex<float>, cc);
1636  SCRATCH_INITIALIZER(double, std::complex<double>, dz);
1637  SCRATCH_INITIALIZER(float, std::complex<float>, fc);
1638 #undef SCRATCH_INITIALIZER
1639 } // namespace internals
1640 
1641 
1642 DEAL_II_NAMESPACE_CLOSE
size_type row_length(const size_type row) const
const types::global_dof_index invalid_size_type
Definition: types.h:182
const BlockIndices & get_row_indices() const
double get_inhomogeneity(const size_type line) const
std::vector< size_type > lines_cache
static::ExceptionBase & ExcDoFIsConstrainedFromBothObjects(size_type arg1)
static::ExceptionBase & ExcEntryAlreadyExists(size_type arg1, size_type arg2, double arg3, double arg4)
std::vector< ConstraintLine >::const_iterator const_iterator
void merge(const ConstraintMatrix &other_constraints, const MergeConflictBehavior merge_conflict_behavior=no_conflicts_allowed, const bool allow_different_local_lines=false)
bool is_constrained(const size_type index) const
static bool check_zero_weight(const std::pair< size_type, double > &p)
void condense(SparsityPattern &sparsity) const
static::ExceptionBase & ExcIO()
void add(const size_type i, const size_type j)
boost::iterator_range< const_iterator > LineRange
const std::vector< std::pair< size_type, double > > * get_constraint_entries(const size_type line) const
iterator begin() const
#define AssertIndexRange(index, range)
Definition: exceptions.h:1407
size_type n_elements() const
Definition: index_set.h:1739
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1648
void distribute(VectorType &vec) const
bool has_inhomogeneities() const
void print(std::ostream &out) const
void add_entries(const size_type line, const std::vector< std::pair< size_type, double >> &col_val_pairs)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1329
types::global_dof_index size_type
size_type size() const
Definition: index_set.h:1608
size_type max_constraint_indirections() const
bool are_identity_constrained(const size_type index1, const size_type index2) const
static::ExceptionBase & ExcLineInexistant(size_type arg1)
SparsityPatternType & block(const size_type row, const size_type column)
void add_line(const size_type line)
void write_dot(std::ostream &) const
static::ExceptionBase & ExcMessage(std::string arg1)
void add_entry(const size_type line, const size_type column, const double value)
void add(const size_type i, const size_type j)
void add(const size_type i, const size_type j)
T sum(const T &t, const MPI_Comm &mpi_communicator)
void subtract_set(const IndexSet &other)
Definition: index_set.cc:264
#define Assert(cond, exc)
Definition: exceptions.h:1227
bool operator<(const ConstraintLine &) const
size_type n_cols() const
bool operator==(const ConstraintLine &) const
static const Table< 2, bool > default_empty_table
iterator end() const
void compress() const
Definition: index_set.h:1616
size_type calculate_line_index(const size_type line) const
void add_lines(const std::vector< bool > &lines)
std::vector< std::pair< size_type, double > > Entries
static::ExceptionBase & ExcMatrixNotClosed()
size_type column_number(const size_type row, const size_type index) const
static::ExceptionBase & ExcDoFConstrainedToConstrainedDoF(int arg1, int arg2)
static::ExceptionBase & ExcNotQuadratic()
void swap(Vector< Number > &u, Vector< Number > &v)
Definition: vector.h:1345
std::size_t memory_consumption() const
std::size_t memory_consumption() const
void copy_from(const ConstraintMatrix &other)
std::atomic< unsigned int > counter
Definition: subscriptor.h:213
const BlockIndices & get_column_indices() const
void shift(const size_type offset)
size_type n_rows() const
void resolve_indices(std::vector< types::global_dof_index > &indices) const
size_type n_constraints() const
const LineRange get_lines() const
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1830
static::ExceptionBase & ExcNotImplemented()
bool is_element(const size_type index) const
Definition: index_set.h:1672
void reinit(const IndexSet &local_constraints=IndexSet())
void add_selected_constraints(const ConstraintMatrix &constraints_in, const IndexSet &filter)
void set_inhomogeneity(const size_type line, const double value)
bool is_consistent_in_parallel(const std::vector< IndexSet > &locally_owned_dofs, const IndexSet &locally_active_dofs, const MPI_Comm mpi_communicator, const bool verbose=false) const
size_type nth_index_in_set(const unsigned int local_index) const
Definition: index_set.h:1789
std::vector< ConstraintLine > lines
bool is_compressed() const
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
static::ExceptionBase & ExcMatrixIsClosed()
static::ExceptionBase & ExcInternalError()
bool is_identity_constrained(const size_type index) const