Reference documentation for deal.II version Git 05e4468 2017-09-21 10:18:23 +0200
constraint_matrix.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 1998 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/lac/constraint_matrix.h>
17 #include <deal.II/lac/constraint_matrix.templates.h>
18 
19 #include <deal.II/base/memory_consumption.h>
20 #include <deal.II/lac/dynamic_sparsity_pattern.h>
21 #include <deal.II/lac/block_vector.h>
22 #include <deal.II/lac/block_sparse_matrix.h>
23 #include <deal.II/lac/sparse_matrix_ez.h>
24 #include <deal.II/lac/chunk_sparse_matrix.h>
25 #include <deal.II/lac/block_sparse_matrix_ez.h>
26 #include <deal.II/lac/la_vector.h>
27 #include <deal.II/lac/la_parallel_vector.h>
28 #include <deal.II/lac/la_parallel_block_vector.h>
29 #include <deal.II/lac/petsc_sparse_matrix.h>
30 #include <deal.II/lac/petsc_parallel_vector.h>
31 #include <deal.II/lac/petsc_parallel_block_vector.h>
32 #include <deal.II/lac/petsc_parallel_sparse_matrix.h>
33 #include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
34 #include <deal.II/lac/trilinos_vector.h>
35 #include <deal.II/lac/trilinos_parallel_block_vector.h>
36 #include <deal.II/lac/trilinos_sparse_matrix.h>
37 #include <deal.II/lac/trilinos_block_sparse_matrix.h>
38 #include <deal.II/lac/matrix_block.h>
39 #include <deal.II/lac/diagonal_matrix.h>
40 
41 #include <algorithm>
42 #include <numeric>
43 #include <set>
44 #include <ostream>
45 
46 DEAL_II_NAMESPACE_OPEN
47 
48 
49 
50 // Static member variable
52 
53 
54 
55 void
57 {
58  lines = other.lines;
59  lines_cache = other.lines_cache;
60  local_lines = other.local_lines;
61  sorted = other.sorted;
62 }
63 
64 
65 
66 bool
67 ConstraintMatrix::check_zero_weight (const std::pair<size_type, double> &p)
68 {
69  return (p.second == 0);
70 }
71 
72 
73 
74 bool
76 {
77  return index < a.index;
78 }
79 
80 
81 
82 bool
84 {
85  return index == a.index;
86 }
87 
88 
89 
90 std::size_t
92 {
96 }
97 
98 
99 
101 {
102  return boost::make_iterator_range(lines.begin(), lines.end());
103 }
104 
105 
106 
107 void
108 ConstraintMatrix::add_lines (const std::set<size_type> &lines)
109 {
110  for (std::set<size_type>::const_iterator
111  i = lines.begin(); i != lines.end(); ++i)
112  add_line (*i);
113 }
114 
115 
116 
117 void
118 ConstraintMatrix::add_lines (const std::vector<bool> &lines)
119 {
120  for (size_type i=0; i<lines.size(); ++i)
121  if (lines[i] == true)
122  add_line (i);
123 }
124 
125 
126 
127 void
129 {
130  for (size_type i=0; i<lines.n_elements(); ++i)
131  add_line (lines.nth_index_in_set(i));
132 }
133 
134 
135 
136 void
138 (const size_type line,
139  const std::vector<std::pair<size_type,double> > &col_val_pairs)
140 {
141  Assert (sorted==false, ExcMatrixIsClosed());
142  Assert (is_constrained(line), ExcLineInexistant(line));
143 
145  Assert (line_ptr->index == line, ExcInternalError());
146 
147  // if in debug mode, check whether an entry for this column already
148  // exists and if its the same as the one entered at present
149  //
150  // in any case: skip this entry if an entry for this column already
151  // exists, since we don't want to enter it twice
152  for (std::vector<std::pair<size_type,double> >::const_iterator
153  col_val_pair = col_val_pairs.begin();
154  col_val_pair!=col_val_pairs.end(); ++col_val_pair)
155  {
156  Assert (line != col_val_pair->first,
157  ExcMessage ("Can't constrain a degree of freedom to itself"));
158 
159  for (ConstraintLine::Entries::const_iterator
160  p=line_ptr->entries.begin();
161  p != line_ptr->entries.end(); ++p)
162  if (p->first == col_val_pair->first)
163  {
164  // entry exists, break innermost loop
165  Assert (p->second == col_val_pair->second,
166  ExcEntryAlreadyExists(line, col_val_pair->first,
167  p->second, col_val_pair->second));
168  break;
169  }
170 
171  line_ptr->entries.push_back (*col_val_pair);
172  }
173 }
174 
175 
176 
178 (const ConstraintMatrix &constraints,
179  const IndexSet &filter)
180 {
181  if (constraints.n_constraints() == 0)
182  return;
183 
184  Assert (filter.size() > constraints.lines.back().index,
185  ExcMessage ("Filter needs to be larger than constraint matrix size."));
186  for (std::vector<ConstraintLine>::const_iterator line=constraints.lines.begin();
187  line!=constraints.lines.end(); ++line)
188  if (filter.is_element(line->index))
189  {
190  const size_type row = filter.index_within_set (line->index);
191  add_line (row);
192  set_inhomogeneity (row, line->inhomogeneity);
193  for (size_type i=0; i<line->entries.size(); ++i)
194  if (filter.is_element(line->entries[i].first))
195  add_entry (row, filter.index_within_set (line->entries[i].first),
196  line->entries[i].second);
197  }
198 }
199 
200 
201 
203 {
204  if (sorted == true)
205  return;
206 
207  // sort the lines
208  std::sort (lines.begin(), lines.end());
209 
210  // update list of pointers and give the vector a sharp size since we
211  // won't modify the size any more after this point.
212  {
213  std::vector<size_type> new_lines (lines_cache.size(),
215  size_type counter = 0;
216  for (std::vector<ConstraintLine>::const_iterator line=lines.begin();
217  line!=lines.end(); ++line, ++counter)
218  new_lines[calculate_line_index(line->index)] = counter;
219  std::swap (lines_cache, new_lines);
220  }
221 
222  // in debug mode: check whether we really set the pointers correctly.
223  for (size_type i=0; i<lines_cache.size(); ++i)
225  Assert (i == calculate_line_index(lines[lines_cache[i]].index),
226  ExcInternalError());
227 
228  // first, strip zero entries, as we have to do that only once
229  for (std::vector<ConstraintLine>::iterator line = lines.begin();
230  line!=lines.end(); ++line)
231  // first remove zero entries. that would mean that in the linear
232  // constraint for a node, x_i = ax_1 + bx_2 + ..., another node times 0
233  // appears. obviously, 0*something can be omitted
234  line->entries.erase (std::remove_if (line->entries.begin(),
235  line->entries.end(),
237  line->entries.end());
238 
239 
240 
241 #ifdef DEBUG
242  // In debug mode we are computing an estimate for the maximum number
243  // of constraints so that we can bail out if there is a cycle in the
244  // constraints (which is easier than searching for cycles in the graph).
245  //
246  // Let us figure out the largest dof index. This is an upper bound for the
247  // number of constraints because it is an approximation for the number of dofs
248  // in our system.
249  size_type largest_idx = 0;
250  for (std::vector<ConstraintLine>::iterator line = lines.begin();
251  line!=lines.end(); ++line)
252  {
253  for (ConstraintLine::Entries::iterator it = line->entries.begin(); it!=line->entries.end(); ++it)
254  {
255  largest_idx=std::max(largest_idx, it->first);
256  }
257  }
258 #endif
259 
260  // replace references to dofs that are themselves constrained. note that
261  // because we may replace references to other dofs that may themselves be
262  // constrained to third ones, we have to iterate over all this until we
263  // replace no chains of constraints any more
264  //
265  // the iteration replaces references to constrained degrees of freedom by
266  // second-order references. for example if x3=x0/2+x2/2 and x2=x0/2+x1/2,
267  // then the new list will be x3=x0/2+x0/4+x1/4. note that x0 appear
268  // twice. we will throw this duplicate out in the following step, where
269  // we sort the list so that throwing out duplicates becomes much more
270  // efficient. also, we have to do it only once, rather than in each
271  // iteration
272  size_type iteration = 0;
273  while (true)
274  {
275  bool chained_constraint_replaced = false;
276 
277  for (std::vector<ConstraintLine>::iterator line = lines.begin();
278  line!=lines.end(); ++line)
279  {
280 #ifdef DEBUG
281  // we need to keep track of how many replacements we do in this line, because we can
282  // end up in a cycle A->B->C->A without the number of entries growing.
283  size_type n_replacements = 0;
284 #endif
285 
286  // loop over all entries of this line (including ones that we
287  // have appended in this go around) and see whether they are
288  // further constrained. ignore elements that we don't store on
289  // the current processor
290  size_type entry = 0;
291  while (entry < line->entries.size())
292  if (((local_lines.size() == 0)
293  ||
294  (local_lines.is_element(line->entries[entry].first)))
295  &&
296  is_constrained (line->entries[entry].first))
297  {
298  // ok, this entry is further constrained:
299  chained_constraint_replaced = true;
300 
301  // look up the chain of constraints for this entry
302  const size_type dof_index = line->entries[entry].first;
303  const double weight = line->entries[entry].second;
304 
305  Assert (dof_index != line->index,
306  ExcMessage ("Cycle in constraints detected!"));
307 
308  const ConstraintLine *constrained_line =
309  &lines[lines_cache[calculate_line_index(dof_index)]];
310  Assert (constrained_line->index == dof_index,
311  ExcInternalError());
312 
313  // now we have to replace an entry by its expansion. we do
314  // that by overwriting the entry by the first entry of the
315  // expansion and adding the remaining ones to the end,
316  // where we will later process them once more
317  //
318  // we can of course only do that if the DoF that we are
319  // currently handle is constrained by a linear combination
320  // of other dofs:
321  if (constrained_line->entries.size() > 0)
322  {
323  for (size_type i=0; i<constrained_line->entries.size(); ++i)
324  Assert (dof_index != constrained_line->entries[i].first,
325  ExcMessage ("Cycle in constraints detected!"));
326 
327  // replace first entry, then tack the rest to the end
328  // of the list
329  line->entries[entry] =
330  std::make_pair (constrained_line->entries[0].first,
331  constrained_line->entries[0].second *
332  weight);
333 
334  for (size_type i=1; i<constrained_line->entries.size(); ++i)
335  line->entries.emplace_back (constrained_line->entries[i].first,
336  constrained_line->entries[i].second
337  * weight);
338 
339 #ifdef DEBUG
340  // keep track of how many entries we replace in this
341  // line. If we do more than there are constraints or
342  // dofs in our system, we must have a cycle.
343  ++n_replacements;
344  Assert(n_replacements/2<largest_idx, ExcMessage("Cycle in constraints detected!"));
345  if (n_replacements/2>=largest_idx)
346  return; // this enables us to test for this Exception.
347 #endif
348  }
349  else
350  // the DoF that we encountered is not constrained by a
351  // linear combination of other dofs but is equal to just
352  // the inhomogeneity (i.e. its chain of entries is
353  // empty). in that case, we can't just overwrite the
354  // current entry, but we have to actually eliminate it
355  {
356  line->entries.erase (line->entries.begin()+entry);
357  }
358 
359  line->inhomogeneity += constrained_line->inhomogeneity *
360  weight;
361 
362  // now that we're here, do not increase index by one but
363  // rather make another pass for the present entry because
364  // we have replaced the present entry by another one, or
365  // because we have deleted it and shifted all following
366  // ones one forward
367  }
368  else
369  // entry not further constrained. just move ahead by one
370  ++entry;
371  }
372 
373  // if we didn't do anything in this round, then quit the loop
374  if (chained_constraint_replaced == false)
375  break;
376 
377  // increase iteration count. note that we should not iterate more
378  // times than there are constraints, since this puts a natural upper
379  // bound on the length of constraint chains
380  ++iteration;
381  Assert (iteration <= lines.size(), ExcInternalError());
382  }
383 
384  // finally sort the entries and re-scale them if necessary. in this step,
385  // we also throw out duplicates as mentioned above. moreover, as some
386  // entries might have had zero weights, we replace them by a vector with
387  // sharp sizes.
388  for (std::vector<ConstraintLine>::iterator line = lines.begin();
389  line!=lines.end(); ++line)
390  {
391  std::sort (line->entries.begin(), line->entries.end());
392 
393  // loop over the now sorted list and see whether any of the entries
394  // references the same dofs more than once in order to find how many
395  // non-duplicate entries we have. This lets us allocate the correct
396  // amount of memory for the constraint entries.
397  size_type duplicates = 0;
398  for (size_type i=1; i<line->entries.size(); ++i)
399  if (line->entries[i].first == line->entries[i-1].first)
400  duplicates++;
401 
402  if (duplicates > 0 || line->entries.size() < line->entries.capacity())
403  {
404  ConstraintLine::Entries new_entries;
405 
406  // if we have no duplicates, copy verbatim the entries. this way,
407  // the final size is of the vector is correct.
408  if (duplicates == 0)
409  new_entries = line->entries;
410  else
411  {
412  // otherwise, we need to go through the list by and and
413  // resolve the duplicates
414  new_entries.reserve (line->entries.size() - duplicates);
415  new_entries.push_back(line->entries[0]);
416  for (size_type j=1; j<line->entries.size(); ++j)
417  if (line->entries[j].first == line->entries[j-1].first)
418  {
419  Assert (new_entries.back().first == line->entries[j].first,
420  ExcInternalError());
421  new_entries.back().second += line->entries[j].second;
422  }
423  else
424  new_entries.push_back (line->entries[j]);
425 
426  Assert (new_entries.size() == line->entries.size() - duplicates,
427  ExcInternalError());
428 
429  // make sure there are really no duplicates left and that the
430  // list is still sorted
431  for (size_type j=1; j<new_entries.size(); ++j)
432  {
433  Assert (new_entries[j].first != new_entries[j-1].first,
434  ExcInternalError());
435  Assert (new_entries[j].first > new_entries[j-1].first,
436  ExcInternalError());
437  }
438  }
439 
440  // replace old list of constraints for this dof by the new one
441  line->entries.swap (new_entries);
442  }
443 
444  // finally do the following check: if the sum of weights for the
445  // constraints is close to one, but not exactly one, then rescale all
446  // the weights so that they sum up to 1. this adds a little numerical
447  // stability and avoids all sorts of problems where the actual value
448  // is close to, but not quite what we expected
449  //
450  // the case where the weights don't quite sum up happens when we
451  // compute the interpolation weights "on the fly", i.e. not from
452  // precomputed tables. in this case, the interpolation weights are
453  // also subject to round-off
454  double sum = 0;
455  for (size_type i=0; i<line->entries.size(); ++i)
456  sum += line->entries[i].second;
457  if ((sum != 1.0) && (std::fabs (sum-1.) < 1.e-13))
458  {
459  for (size_type i=0; i<line->entries.size(); ++i)
460  line->entries[i].second /= sum;
461  line->inhomogeneity /= sum;
462  }
463  } // end of loop over all constraint lines
464 
465 #ifdef DEBUG
466  // if in debug mode: check that no dof is constrained to another dof that
467  // is also constrained. exclude dofs from this check whose constraint
468  // lines are not stored on the local processor
469  for (std::vector<ConstraintLine>::const_iterator line=lines.begin();
470  line!=lines.end(); ++line)
471  for (ConstraintLine::Entries::const_iterator
472  entry=line->entries.begin();
473  entry!=line->entries.end(); ++entry)
474  if ((local_lines.size() == 0)
475  ||
476  (local_lines.is_element(entry->first)))
477  {
478  // make sure that entry->first is not the index of a line itself
479  const bool is_circle = is_constrained(entry->first);
480  Assert (is_circle == false,
481  ExcDoFConstrainedToConstrainedDoF(line->index, entry->first));
482  }
483 #endif
484 
485  sorted = true;
486 }
487 
488 
489 
490 void
491 ConstraintMatrix::merge (const ConstraintMatrix &other_constraints,
492  const MergeConflictBehavior merge_conflict_behavior,
493  const bool allow_different_local_lines)
494 {
495  (void) allow_different_local_lines;
496  Assert(allow_different_local_lines ||
497  local_lines == other_constraints.local_lines,
498  ExcMessage("local_lines for this and the other objects are not the same "
499  "although allow_different_local_lines is false."));
500 
501  // store the previous state with respect to sorting
502  const bool object_was_sorted = sorted;
503  sorted = false;
504 
505  // first action is to fold into the present object possible constraints
506  // in the second object. we don't strictly need to do this any more since
507  // the ConstraintMatrix has learned to deal with chains of constraints in
508  // the close() function, but we have traditionally done this and it's not
509  // overly hard to do.
510  //
511  // for this, loop over all constraints and replace the constraint lines
512  // with a new one where constraints are replaced if necessary.
514  for (std::vector<ConstraintLine>::iterator line=lines.begin();
515  line!=lines.end(); ++line)
516  {
517  tmp.clear ();
518  for (size_type i=0; i<line->entries.size(); ++i)
519  {
520  // if the present dof is not stored, or not constrained, or if we won't take the
521  // constraint from the other object, then simply copy it over
522  if ((other_constraints.local_lines.size() != 0
523  && other_constraints.local_lines.is_element(line->entries[i].first) == false)
524  ||
525  other_constraints.is_constrained(line->entries[i].first) == false
526  ||
527  ((merge_conflict_behavior != right_object_wins)
528  && other_constraints.is_constrained(line->entries[i].first)
529  && this->is_constrained(line->entries[i].first)))
530  tmp.push_back(line->entries[i]);
531  else
532  // otherwise resolve further constraints by replacing the old
533  // entry by a sequence of new entries taken from the other
534  // object, but with multiplied weights
535  {
536  const ConstraintLine::Entries *other_line
537  = other_constraints.get_constraint_entries (line->entries[i].first);
538  Assert (other_line != nullptr,
539  ExcInternalError());
540 
541  const double weight = line->entries[i].second;
542 
543  for (ConstraintLine::Entries::const_iterator j=other_line->begin();
544  j!=other_line->end(); ++j)
545  tmp.emplace_back(j->first, j->second*weight);
546 
547  line->inhomogeneity
548  += other_constraints.get_inhomogeneity(line->entries[i].first) *
549  weight;
550  }
551  }
552  // finally exchange old and newly resolved line
553  line->entries.swap (tmp);
554  }
555 
556  if (local_lines.size() != 0)
557  local_lines.add_indices(other_constraints.local_lines);
558 
559  {
560  // do not bother to resize the lines cache exactly since it is pretty
561  // cheap to adjust it along the way.
562  std::fill(lines_cache.begin(), lines_cache.end(), numbers::invalid_size_type);
563 
564  // reset lines_cache for our own constraints
565  size_type index = 0;
566  for (std::vector<ConstraintLine>::const_iterator line = lines.begin();
567  line != lines.end(); ++line)
568  {
569  size_type local_line_no = calculate_line_index(line->index);
570  if (local_line_no >= lines_cache.size())
571  lines_cache.resize(local_line_no+1, numbers::invalid_size_type);
572  lines_cache[local_line_no] = index++;
573  }
574 
575  // Add other_constraints to lines cache and our list of constraints
576  for (std::vector<ConstraintLine>::const_iterator line = other_constraints.lines.begin();
577  line != other_constraints.lines.end(); ++line)
578  {
579  const size_type local_line_no = calculate_line_index(line->index);
580  if (local_line_no >= lines_cache.size())
581  {
582  lines_cache.resize(local_line_no+1, numbers::invalid_size_type);
583  lines.push_back(*line);
584  lines_cache[local_line_no] = index++;
585  }
586  else if (lines_cache[local_line_no] == numbers::invalid_size_type)
587  {
588  // there are no constraints for that line yet
589  lines.push_back(*line);
590  AssertIndexRange(local_line_no, lines_cache.size());
591  lines_cache[local_line_no] = index++;
592  }
593  else
594  {
595  // we already store that line
596  switch (merge_conflict_behavior)
597  {
599  AssertThrow (false,
600  ExcDoFIsConstrainedFromBothObjects (line->index));
601  break;
602 
603  case left_object_wins:
604  // ignore this constraint
605  break;
606 
607  case right_object_wins:
608  AssertIndexRange(local_line_no, lines_cache.size());
609  lines[lines_cache[local_line_no]] = *line;
610  break;
611 
612  default:
613  Assert (false, ExcNotImplemented());
614  }
615  }
616  }
617 
618  // check that we set the pointers correctly
619  for (size_type i=0; i<lines_cache.size(); ++i)
621  Assert (i == calculate_line_index(lines[lines_cache[i]].index),
622  ExcInternalError());
623  }
624 
625  // if the object was sorted before, then make sure it is so afterward as
626  // well. otherwise leave everything in the unsorted state
627  if (object_was_sorted == true)
628  close ();
629 }
630 
631 
632 
634 {
635  if (local_lines.size() == 0)
636  lines_cache.insert (lines_cache.begin(), offset,
638  else
639  {
640  // shift local_lines
641  IndexSet new_local_lines(local_lines.size());
642  new_local_lines.add_indices(local_lines, offset);
643  std::swap(local_lines, new_local_lines);
644  }
645 
646  for (std::vector<ConstraintLine>::iterator i = lines.begin();
647  i != lines.end(); ++i)
648  {
649  i->index += offset;
650  for (ConstraintLine::Entries::iterator
651  j = i->entries.begin();
652  j != i->entries.end(); ++j)
653  j->first += offset;
654  }
655 
656 #ifdef DEBUG
657  // make sure that lines, lines_cache and local_lines
658  // are still linked correctly
659  for (size_type i=0; i<lines_cache.size(); ++i)
661  calculate_line_index(lines[lines_cache[i]].index) == i,
662  ExcInternalError());
663 #endif
664 }
665 
666 
667 
669 {
670  {
671  std::vector<ConstraintLine> tmp;
672  lines.swap (tmp);
673  }
674 
675  {
676  std::vector<size_type> tmp;
677  lines_cache.swap (tmp);
678  }
679 
680  sorted = false;
681 }
682 
683 
684 
685 void ConstraintMatrix::reinit (const IndexSet &local_constraints)
686 {
687  local_lines = local_constraints;
688 
689  // make sure the IndexSet is compressed. Otherwise this can lead to crashes
690  // that are hard to find (only happen in release mode).
691  // see tests/mpi/constraint_matrix_crash_01
693 
694  clear();
695 }
696 
697 
698 
700 {
701  Assert (sorted == true, ExcMatrixNotClosed());
702  Assert (sparsity.is_compressed() == false, ExcMatrixIsClosed());
703  Assert (sparsity.n_rows() == sparsity.n_cols(), ExcNotQuadratic());
704 
705  // store for each index whether it must be distributed or not. If entry
706  // is numbers::invalid_unsigned_int, no distribution is necessary.
707  // otherwise, the number states which line in the constraint matrix
708  // handles this index
709  std::vector<size_type> distribute(sparsity.n_rows(),
711 
712  for (size_type c=0; c<lines.size(); ++c)
713  distribute[lines[c].index] = c;
714 
715  const size_type n_rows = sparsity.n_rows();
716  for (size_type row=0; row<n_rows; ++row)
717  {
719  {
720  // regular line. loop over cols all valid cols. note that this
721  // changes the line we are presently working on: we add additional
722  // entries. these are put to the end of the row. however, as
723  // constrained nodes cannot be constrained to other constrained
724  // nodes, nothing will happen if we run into these added nodes, as
725  // they can't be distributed further. we might store the position of
726  // the last old entry and stop work there, but since operating on
727  // the newly added ones only takes two comparisons (column index
728  // valid, distribute[column] necessarily
729  // ==numbers::invalid_size_type), it is cheaper to not do so and
730  // run right until the end of the line
731  for (SparsityPattern::iterator entry = sparsity.begin(row);
732  ((entry != sparsity.end(row)) &&
733  entry->is_valid_entry());
734  ++entry)
735  {
736  const size_type column = entry->column();
737 
738  if (distribute[column] != numbers::invalid_size_type)
739  {
740  // distribute entry at regular row @p{row} and irregular
741  // column sparsity.colnums[j]
742  for (size_type q=0;
743  q!=lines[distribute[column]].entries.size();
744  ++q)
745  sparsity.add (row,
746  lines[distribute[column]].entries[q].first);
747  }
748  }
749  }
750  else
751  // row must be distributed. note that here the present row is not
752  // touched (unlike above)
753  {
754  for (SparsityPattern::iterator entry = sparsity.begin(row);
755  (entry != sparsity.end(row)) && entry->is_valid_entry(); ++entry)
756  {
757  const size_type column = entry->column();
758  if (distribute[column] == numbers::invalid_size_type)
759  // distribute entry at irregular row @p{row} and regular
760  // column sparsity.colnums[j]
761  for (size_type q=0;
762  q!=lines[distribute[row]].entries.size(); ++q)
763  sparsity.add (lines[distribute[row]].entries[q].first,
764  column);
765  else
766  // distribute entry at irregular row @p{row} and irregular
767  // column sparsity.get_column_numbers()[j]
768  for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
769  for (size_type q=0;
770  q!=lines[distribute[column]].entries.size(); ++q)
771  sparsity.add (lines[distribute[row]].entries[p].first,
772  lines[distribute[column]].entries[q].first);
773  }
774  }
775  }
776 
777  sparsity.compress();
778 }
779 
780 
781 
782 
784 {
785  Assert (sorted == true, ExcMatrixNotClosed());
786  Assert (sparsity.n_rows() == sparsity.n_cols(),
787  ExcNotQuadratic());
788 
789  // store for each index whether it must be distributed or not. If entry
790  // is numbers::invalid_unsigned_int, no distribution is necessary.
791  // otherwise, the number states which line in the constraint matrix
792  // handles this index
793  std::vector<size_type> distribute(sparsity.n_rows(),
795 
796  for (size_type c=0; c<lines.size(); ++c)
797  distribute[lines[c].index] = c;
798 
799  const size_type n_rows = sparsity.n_rows();
800  for (size_type row=0; row<n_rows; ++row)
801  {
803  // regular line. loop over cols. note that as we proceed to
804  // distribute cols, the loop may get longer
805  for (size_type j=0; j<sparsity.row_length(row); ++j)
806  {
807  const size_type column = sparsity.column_number(row,j);
808 
809  if (distribute[column] != numbers::invalid_size_type)
810  {
811  // distribute entry at regular row @p{row} and irregular
812  // column column. note that this changes the line we are
813  // presently working on: we add additional entries. if we
814  // add another entry at a column behind the present one, we
815  // will encounter it later on (but since it can't be
816  // further constrained, won't have to do anything about
817  // it). if we add it up front of the present column, we
818  // will find the present column later on again as it was
819  // shifted back (again nothing happens, in particular no
820  // endless loop, as when we encounter it the second time we
821  // won't be able to add more entries as they all already
822  // exist, but we do the same work more often than
823  // necessary, and the loop gets longer), so move the cursor
824  // one to the right in the case that we add an entry up
825  // front that did not exist before. check whether it
826  // existed before by tracking the length of this row
827  size_type old_rowlength = sparsity.row_length(row);
828  for (size_type q=0;
829  q!=lines[distribute[column]].entries.size();
830  ++q)
831  {
832  const size_type
833  new_col = lines[distribute[column]].entries[q].first;
834 
835  sparsity.add (row, new_col);
836 
837  const size_type new_rowlength = sparsity.row_length(row);
838  if ((new_col < column) && (old_rowlength != new_rowlength))
839  ++j;
840  old_rowlength = new_rowlength;
841  };
842  };
843  }
844  else
845  // row must be distributed
846  for (size_type j=0; j<sparsity.row_length(row); ++j)
847  {
848  const size_type column = sparsity.column_number(row,j);
849 
850  if (distribute[column] == numbers::invalid_size_type)
851  // distribute entry at irregular row @p{row} and regular
852  // column sparsity.colnums[j]
853  for (size_type q=0;
854  q!=lines[distribute[row]].entries.size(); ++q)
855  sparsity.add (lines[distribute[row]].entries[q].first,
856  column);
857  else
858  // distribute entry at irregular row @p{row} and irregular
859  // column sparsity.get_column_numbers()[j]
860  for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
861  for (size_type q=0;
862  q!=lines[distribute[sparsity.column_number(row,j)]]
863  .entries.size(); ++q)
864  sparsity.add (lines[distribute[row]].entries[p].first,
865  lines[distribute[sparsity.column_number(row,j)]]
866  .entries[q].first);
867  };
868  };
869 }
870 
871 
872 
874 {
875  Assert (sorted == true, ExcMatrixNotClosed());
876  Assert (sparsity.is_compressed() == false, ExcMatrixIsClosed());
877  Assert (sparsity.n_rows() == sparsity.n_cols(),
878  ExcNotQuadratic());
879  Assert (sparsity.n_block_rows() == sparsity.n_block_cols(),
880  ExcNotQuadratic());
881  Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
882  ExcNotQuadratic());
883 
884  const BlockIndices &
885  index_mapping = sparsity.get_column_indices();
886 
887  const size_type n_blocks = sparsity.n_block_rows();
888 
889  // store for each index whether it must be distributed or not. If entry
890  // is numbers::invalid_unsigned_int, no distribution is necessary.
891  // otherwise, the number states which line in the constraint matrix
892  // handles this index
893  std::vector<size_type> distribute (sparsity.n_rows(),
895 
896  for (size_type c=0; c<lines.size(); ++c)
897  distribute[lines[c].index] = c;
898 
899  const size_type n_rows = sparsity.n_rows();
900  for (size_type row=0; row<n_rows; ++row)
901  {
902  // get index of this row within the blocks
903  const std::pair<size_type,size_type>
904  block_index = index_mapping.global_to_local(row);
905  const size_type block_row = block_index.first;
906 
908  // regular line. loop over all columns and see whether this column
909  // must be distributed
910  {
911 
912  // to loop over all entries in this row, we have to loop over all
913  // blocks in this blockrow and the corresponding row therein
914  for (size_type block_col=0; block_col<n_blocks; ++block_col)
915  {
916  const SparsityPattern &
917  block_sparsity = sparsity.block(block_row, block_col);
918 
920  entry = block_sparsity.begin(block_index.second);
921  (entry != block_sparsity.end(block_index.second)) &&
922  entry->is_valid_entry();
923  ++entry)
924  {
925  const size_type global_col
926  = index_mapping.local_to_global(block_col, entry->column());
927 
928  if (distribute[global_col] != numbers::invalid_size_type)
929  // distribute entry at regular row @p{row} and
930  // irregular column global_col
931  {
932  for (size_type q=0;
933  q!=lines[distribute[global_col]].entries.size(); ++q)
934  sparsity.add (row,
935  lines[distribute[global_col]].entries[q].first);
936  }
937  }
938  }
939  }
940  else
941  {
942  // row must be distributed. split the whole row into the chunks
943  // defined by the blocks
944  for (size_type block_col=0; block_col<n_blocks; ++block_col)
945  {
946  const SparsityPattern &
947  block_sparsity = sparsity.block(block_row,block_col);
948 
950  entry = block_sparsity.begin(block_index.second);
951  (entry != block_sparsity.end(block_index.second)) &&
952  entry->is_valid_entry();
953  ++entry)
954  {
955  const size_type global_col
956  = index_mapping.local_to_global (block_col, entry->column());
957 
958  if (distribute[global_col] == numbers::invalid_size_type)
959  // distribute entry at irregular row @p{row} and
960  // regular column global_col.
961  {
962  for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q)
963  sparsity.add (lines[distribute[row]].entries[q].first, global_col);
964  }
965  else
966  // distribute entry at irregular row @p{row} and
967  // irregular column @p{global_col}
968  {
969  for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p)
970  for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
971  sparsity.add (lines[distribute[row]].entries[p].first,
972  lines[distribute[global_col]].entries[q].first);
973  }
974  }
975  }
976  }
977  }
978 
979  sparsity.compress();
980 }
981 
982 
983 
984 
986 {
987  Assert (sorted == true, ExcMatrixNotClosed());
988  Assert (sparsity.n_rows() == sparsity.n_cols(),
989  ExcNotQuadratic());
990  Assert (sparsity.n_block_rows() == sparsity.n_block_cols(),
991  ExcNotQuadratic());
992  Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
993  ExcNotQuadratic());
994 
995  const BlockIndices &
996  index_mapping = sparsity.get_column_indices();
997 
998  const size_type n_blocks = sparsity.n_block_rows();
999 
1000  // store for each index whether it must be distributed or not. If entry
1001  // is numbers::invalid_unsigned_int, no distribution is necessary.
1002  // otherwise, the number states which line in the constraint matrix
1003  // handles this index
1004  std::vector<size_type> distribute (sparsity.n_rows(),
1006 
1007  for (size_type c=0; c<lines.size(); ++c)
1008  distribute[lines[c].index] = static_cast<signed int>(c);
1009 
1010  const size_type n_rows = sparsity.n_rows();
1011  for (size_type row=0; row<n_rows; ++row)
1012  {
1013  // get index of this row within the blocks
1014  const std::pair<size_type,size_type>
1015  block_index = index_mapping.global_to_local(row);
1016  const size_type block_row = block_index.first;
1017  const size_type local_row = block_index.second;
1018 
1020  // regular line. loop over all columns and see whether this column
1021  // must be distributed. note that as we proceed to distribute cols,
1022  // the loop over cols may get longer.
1023  //
1024  // don't try to be clever here as in the algorithm for the
1025  // DynamicSparsityPattern, as that would be much more
1026  // complicated here. after all, we know that compressed patterns
1027  // are inefficient...
1028  {
1029 
1030  // to loop over all entries in this row, we have to loop over all
1031  // blocks in this blockrow and the corresponding row therein
1032  for (size_type block_col=0; block_col<n_blocks; ++block_col)
1033  {
1034  const DynamicSparsityPattern &
1035  block_sparsity = sparsity.block(block_row, block_col);
1036 
1037  for (size_type j=0; j<block_sparsity.row_length(local_row); ++j)
1038  {
1039  const size_type global_col
1040  = index_mapping.local_to_global(block_col,
1041  block_sparsity.column_number(local_row,j));
1042 
1043  if (distribute[global_col] != numbers::invalid_size_type)
1044  // distribute entry at regular row @p{row} and
1045  // irregular column global_col
1046  {
1047  for (size_type q=0;
1048  q!=lines[distribute[global_col]]
1049  .entries.size(); ++q)
1050  sparsity.add (row,
1051  lines[distribute[global_col]].entries[q].first);
1052  };
1053  };
1054  };
1055  }
1056  else
1057  {
1058  // row must be distributed. split the whole row into the chunks
1059  // defined by the blocks
1060  for (size_type block_col=0; block_col<n_blocks; ++block_col)
1061  {
1062  const DynamicSparsityPattern &
1063  block_sparsity = sparsity.block(block_row,block_col);
1064 
1065  for (size_type j=0; j<block_sparsity.row_length(local_row); ++j)
1066  {
1067  const size_type global_col
1068  = index_mapping.local_to_global (block_col,
1069  block_sparsity.column_number(local_row,j));
1070 
1071  if (distribute[global_col] == numbers::invalid_size_type)
1072  // distribute entry at irregular row @p{row} and
1073  // regular column global_col.
1074  {
1075  for (size_type q=0;
1076  q!=lines[distribute[row]].entries.size(); ++q)
1077  sparsity.add (lines[distribute[row]].entries[q].first,
1078  global_col);
1079  }
1080  else
1081  // distribute entry at irregular row @p{row} and
1082  // irregular column @p{global_col}
1083  {
1084  for (size_type p=0;
1085  p!=lines[distribute[row]].entries.size(); ++p)
1086  for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
1087  sparsity.add (lines[distribute[row]].entries[p].first,
1088  lines[distribute[global_col]].entries[q].first);
1089  };
1090  };
1091  };
1092  };
1093  };
1094 }
1095 
1096 
1097 
1099 {
1100  if (is_constrained(index) == false)
1101  return false;
1102 
1104  Assert (p.index == index, ExcInternalError());
1105 
1106  // return if an entry for this line was found and if it has only one
1107  // entry equal to 1.0
1108  return ((p.entries.size() == 1) &&
1109  (p.entries[0].second == 1.0));
1110 }
1111 
1112 
1114  const size_type index2) const
1115 {
1116  if (is_constrained(index1) == true)
1117  {
1119  Assert (p.index == index1, ExcInternalError());
1120 
1121  // return if an entry for this line was found and if it has only one
1122  // entry equal to 1.0 and that one is index2
1123  return ((p.entries.size() == 1) &&
1124  (p.entries[0].first == index2) &&
1125  (p.entries[0].second == 1.0));
1126  }
1127  else if (is_constrained(index2) == true)
1128  {
1130  Assert (p.index == index2, ExcInternalError());
1131 
1132  // return if an entry for this line was found and if it has only one
1133  // entry equal to 1.0 and that one is index1
1134  return ((p.entries.size() == 1) &&
1135  (p.entries[0].first == index1) &&
1136  (p.entries[0].second == 1.0));
1137  }
1138  else
1139  return false;
1140 }
1141 
1142 
1143 
1146 {
1147  size_type return_value = 0;
1148  for (std::vector<ConstraintLine>::const_iterator i=lines.begin();
1149  i!=lines.end(); ++i)
1150  // use static cast, since typeof(size)==std::size_t, which is !=
1151  // size_type on AIX
1152  return_value = std::max(return_value,
1153  static_cast<size_type>(i->entries.size()));
1154 
1155  return return_value;
1156 }
1157 
1158 
1159 
1161 {
1162  for (std::vector<ConstraintLine>::const_iterator i=lines.begin();
1163  i!=lines.end(); ++i)
1164  if (i->inhomogeneity != 0.)
1165  return true;
1166 
1167  return false;
1168 }
1169 
1170 
1171 void ConstraintMatrix::print (std::ostream &out) const
1172 {
1173  for (size_type i=0; i!=lines.size(); ++i)
1174  {
1175  // output the list of constraints as pairs of dofs and their weights
1176  if (lines[i].entries.size() > 0)
1177  {
1178  for (size_type j=0; j<lines[i].entries.size(); ++j)
1179  out << " " << lines[i].index
1180  << " " << lines[i].entries[j].first
1181  << ": " << lines[i].entries[j].second << "\n";
1182 
1183  // print out inhomogeneity.
1184  if (lines[i].inhomogeneity != 0)
1185  out << " " << lines[i].index
1186  << ": " << lines[i].inhomogeneity << "\n";
1187  }
1188  else
1189  // but also output something if the constraint simply reads
1190  // x[13]=0, i.e. where the right hand side is not a linear
1191  // combination of other dofs
1192  {
1193  if (lines[i].inhomogeneity != 0)
1194  out << " " << lines[i].index
1195  << " = " << lines[i].inhomogeneity
1196  << "\n";
1197  else
1198  out << " " << lines[i].index << " = 0\n";
1199  }
1200  }
1201 
1202  AssertThrow (out, ExcIO());
1203 }
1204 
1205 
1206 
1207 void
1208 ConstraintMatrix::write_dot (std::ostream &out) const
1209 {
1210  out << "digraph constraints {"
1211  << std::endl;
1212  for (size_type i=0; i!=lines.size(); ++i)
1213  {
1214  // same concept as in the previous function
1215  if (lines[i].entries.size() > 0)
1216  for (size_type j=0; j<lines[i].entries.size(); ++j)
1217  out << " " << lines[i].index << "->" << lines[i].entries[j].first
1218  << "; // weight: "
1219  << lines[i].entries[j].second
1220  << "\n";
1221  else
1222  out << " " << lines[i].index << "\n";
1223  }
1224  out << "}" << std::endl;
1225 }
1226 
1227 
1228 
1229 std::size_t
1231 {
1236 }
1237 
1238 
1239 
1240 void
1241 ConstraintMatrix::resolve_indices (std::vector<types::global_dof_index> &indices) const
1242 {
1243  const unsigned int indices_size = indices.size();
1244  const std::vector<std::pair<types::global_dof_index,double> > *line_ptr;
1245  for (unsigned int i=0; i<indices_size; ++i)
1246  {
1247  line_ptr = get_constraint_entries(indices[i]);
1248  // if the index is constraint, the constraints indices are added to the
1249  // indices vector
1250  if (line_ptr!=nullptr)
1251  {
1252  const unsigned int line_size = line_ptr->size();
1253  for (unsigned int j=0; j<line_size; ++j)
1254  indices.push_back((*line_ptr)[j].first);
1255  }
1256  }
1257 
1258  // keep only the unique elements
1259  std::sort(indices.begin(),indices.end());
1260  std::vector<types::global_dof_index>::iterator it;
1261  it = std::unique(indices.begin(),indices.end());
1262  indices.resize(it-indices.begin());
1263 }
1264 
1265 
1266 
1267 // explicit instantiations
1268 //
1269 // define a list of functions for vectors and matrices, respectively, where
1270 // the vector/matrix can be replaced using a preprocessor variable
1271 // VectorType/MatrixType. note that we need a space between "VectorType" and
1272 // ">" to disambiguate ">>" when VectorType trails in an angle bracket
1273 
1274 // TODO: The way we define all the instantiations is probably not the very
1275 // best one. Try to find a better description.
1276 
1277 #define VECTOR_FUNCTIONS(VectorType) \
1278  template void ConstraintMatrix::condense<VectorType >(const VectorType &uncondensed,\
1279  VectorType &condensed) const;\
1280  template void ConstraintMatrix::condense<VectorType >(VectorType &vec) const;\
1281  template void ConstraintMatrix:: \
1282  distribute_local_to_global<VectorType > (const Vector<VectorType::value_type> &, \
1283  const std::vector<ConstraintMatrix::size_type> &, \
1284  VectorType &, \
1285  const FullMatrix<VectorType::value_type> &) const;\
1286  template void ConstraintMatrix:: \
1287  distribute_local_to_global<VectorType > (const Vector<VectorType::value_type> &, \
1288  const std::vector<ConstraintMatrix::size_type> &, \
1289  const std::vector<ConstraintMatrix::size_type> &, \
1290  VectorType &, \
1291  const FullMatrix<VectorType::value_type> &, \
1292  bool) const
1293 
1294 #define PARALLEL_VECTOR_FUNCTIONS(VectorType) \
1295  template void ConstraintMatrix:: \
1296  distribute_local_to_global<VectorType > (const Vector<VectorType::value_type> &, \
1297  const std::vector<ConstraintMatrix::size_type> &, \
1298  VectorType &, \
1299  const FullMatrix<VectorType::value_type> &) const;\
1300  template void ConstraintMatrix:: \
1301  distribute_local_to_global<VectorType > (const Vector<VectorType::value_type> &, \
1302  const std::vector<ConstraintMatrix::size_type> &, \
1303  const std::vector<ConstraintMatrix::size_type> &, \
1304  VectorType &, \
1305  const FullMatrix<VectorType::value_type> &, \
1306  bool) const
1307 
1308 #ifdef DEAL_II_WITH_PETSC
1309 VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector);
1310 VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector);
1311 #endif
1312 
1313 #ifdef DEAL_II_WITH_TRILINOS
1314 PARALLEL_VECTOR_FUNCTIONS(TrilinosWrappers::MPI::Vector);
1315 PARALLEL_VECTOR_FUNCTIONS(TrilinosWrappers::MPI::BlockVector);
1316 #endif
1317 
1318 #define MATRIX_VECTOR_FUNCTIONS(MatrixType, VectorType) \
1319  template void ConstraintMatrix:: \
1320  distribute_local_to_global<MatrixType,VectorType > (const FullMatrix<MatrixType::value_type> &, \
1321  const Vector<VectorType::value_type> &, \
1322  const std::vector<ConstraintMatrix::size_type> &, \
1323  MatrixType &, \
1324  VectorType &, \
1325  bool , \
1326  std::integral_constant<bool, false>) const
1327 #define MATRIX_FUNCTIONS(MatrixType) \
1328  template void ConstraintMatrix:: \
1329  distribute_local_to_global<MatrixType,Vector<MatrixType::value_type> > (const FullMatrix<MatrixType::value_type> &, \
1330  const Vector<MatrixType::value_type> &, \
1331  const std::vector<ConstraintMatrix::size_type> &, \
1332  MatrixType &, \
1333  Vector<MatrixType::value_type> &, \
1334  bool , \
1335  std::integral_constant<bool, false>) const
1336 #define BLOCK_MATRIX_VECTOR_FUNCTIONS(MatrixType, VectorType) \
1337  template void ConstraintMatrix:: \
1338  distribute_local_to_global<MatrixType,VectorType > (const FullMatrix<MatrixType::value_type> &, \
1339  const Vector<VectorType::value_type> &, \
1340  const std::vector<ConstraintMatrix::size_type> &, \
1341  MatrixType &, \
1342  VectorType &, \
1343  bool , \
1344  std::integral_constant<bool, true>) const
1345 #define BLOCK_MATRIX_FUNCTIONS(MatrixType) \
1346  template void ConstraintMatrix:: \
1347  distribute_local_to_global<MatrixType,Vector<MatrixType::value_type> > (const FullMatrix<MatrixType::value_type> &, \
1348  const Vector<MatrixType::value_type> &, \
1349  const std::vector<ConstraintMatrix::size_type> &, \
1350  MatrixType &, \
1351  Vector<MatrixType::value_type> &, \
1352  bool , \
1353  std::integral_constant<bool, true>) const
1354 
1355 MATRIX_FUNCTIONS(SparseMatrix<double>);
1356 MATRIX_FUNCTIONS(SparseMatrix<float>);
1357 MATRIX_FUNCTIONS(FullMatrix<double>);
1358 MATRIX_FUNCTIONS(FullMatrix<float>);
1359 MATRIX_FUNCTIONS(FullMatrix<std::complex<double> >);
1360 MATRIX_FUNCTIONS(SparseMatrix<std::complex<double> >);
1361 MATRIX_FUNCTIONS(SparseMatrix<std::complex<float> >);
1362 
1363 BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrix<double>);
1364 BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrix<float>);
1365 BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrix<double>, BlockVector<double>);
1366 BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrix<float>, BlockVector<float>);
1367 
1368 MATRIX_FUNCTIONS(SparseMatrixEZ<double>);
1369 MATRIX_FUNCTIONS(SparseMatrixEZ<float>);
1370 MATRIX_FUNCTIONS(ChunkSparseMatrix<double>);
1371 MATRIX_FUNCTIONS(ChunkSparseMatrix<float>);
1372 
1373 // BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrixEZ<double>);
1374 // BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrixEZ<float>, Vector<float>);
1375 
1376 #ifdef DEAL_II_WITH_PETSC
1377 MATRIX_FUNCTIONS(PETScWrappers::SparseMatrix);
1378 MATRIX_FUNCTIONS(PETScWrappers::MPI::SparseMatrix);
1379 BLOCK_MATRIX_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix);
1382 #endif
1383 
1384 #ifdef DEAL_II_WITH_TRILINOS
1385 MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix);
1386 BLOCK_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix);
1389 #endif
1390 
1391 
1392 #define SPARSITY_FUNCTIONS(SparsityPatternType) \
1393  template void ConstraintMatrix::add_entries_local_to_global<SparsityPatternType> ( \
1394  const std::vector<ConstraintMatrix::size_type> &, \
1395  SparsityPatternType &, \
1396  const bool, \
1397  const Table<2,bool> &, \
1398  std::integral_constant<bool, false>) const; \
1399  template void ConstraintMatrix::add_entries_local_to_global<SparsityPatternType> ( \
1400  const std::vector<ConstraintMatrix::size_type> &, \
1401  const std::vector<ConstraintMatrix::size_type> &, \
1402  SparsityPatternType &, \
1403  const bool, \
1404  const Table<2,bool> &) const
1405 #define BLOCK_SPARSITY_FUNCTIONS(SparsityPatternType) \
1406  template void ConstraintMatrix::add_entries_local_to_global<SparsityPatternType> ( \
1407  const std::vector<ConstraintMatrix::size_type> &, \
1408  SparsityPatternType &, \
1409  const bool, \
1410  const Table<2,bool> &, \
1411  std::integral_constant<bool, true>) const; \
1412  template void ConstraintMatrix::add_entries_local_to_global<SparsityPatternType> ( \
1413  const std::vector<ConstraintMatrix::size_type> &, \
1414  const std::vector<ConstraintMatrix::size_type> &, \
1415  SparsityPatternType &, \
1416  const bool, \
1417  const Table<2,bool> &) const
1418 
1419 SPARSITY_FUNCTIONS(SparsityPattern);
1420 SPARSITY_FUNCTIONS(DynamicSparsityPattern);
1421 BLOCK_SPARSITY_FUNCTIONS(BlockSparsityPattern);
1422 BLOCK_SPARSITY_FUNCTIONS(BlockDynamicSparsityPattern);
1423 
1424 #ifdef DEAL_II_WITH_TRILINOS
1425 SPARSITY_FUNCTIONS(TrilinosWrappers::SparsityPattern);
1426 BLOCK_SPARSITY_FUNCTIONS(TrilinosWrappers::BlockSparsityPattern);
1427 #endif
1428 
1429 
1430 #define ONLY_MATRIX_FUNCTIONS(MatrixType) \
1431  template void ConstraintMatrix::distribute_local_to_global<MatrixType > (\
1432  const FullMatrix<MatrixType::value_type> &, \
1433  const std::vector<ConstraintMatrix::size_type> &, \
1434  const std::vector<ConstraintMatrix::size_type> &, \
1435  MatrixType &) const
1436 
1437 ONLY_MATRIX_FUNCTIONS(FullMatrix<float>);
1438 ONLY_MATRIX_FUNCTIONS(FullMatrix<double>);
1439 ONLY_MATRIX_FUNCTIONS(SparseMatrix<float>);
1440 ONLY_MATRIX_FUNCTIONS(SparseMatrix<double>);
1441 ONLY_MATRIX_FUNCTIONS(MatrixBlock<SparseMatrix<float> >);
1442 ONLY_MATRIX_FUNCTIONS(MatrixBlock<SparseMatrix<double> >);
1443 ONLY_MATRIX_FUNCTIONS(BlockSparseMatrix<float>);
1444 ONLY_MATRIX_FUNCTIONS(BlockSparseMatrix<double>);
1445 
1446 #ifdef DEAL_II_WITH_TRILINOS
1447 ONLY_MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix);
1448 ONLY_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix);
1449 #endif
1450 
1451 #ifdef DEAL_II_WITH_PETSC
1452 ONLY_MATRIX_FUNCTIONS(PETScWrappers::SparseMatrix);
1453 ONLY_MATRIX_FUNCTIONS(PETScWrappers::MPI::SparseMatrix);
1454 ONLY_MATRIX_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix);
1455 #endif
1456 
1457 #include "constraint_matrix.inst"
1458 
1459 // allocate scratch data. Cannot use the generic template instantiation
1460 // because we need to provide an initializer object of type
1461 // internals::ConstraintMatrixData<Number> that can be passed to the
1462 // constructor of scratch_data (it won't allow one to be constructed in place).
1463 namespace internals
1464 {
1465 #define SCRATCH_INITIALIZER(Number,Name) \
1466  ConstraintMatrixData<Number>::ScratchData scratch_data_initializer_##Name; \
1467  template <> Threads::ThreadLocalStorage<ConstraintMatrixData<Number>::ScratchData> \
1468  ConstraintMatrixData<Number>::scratch_data(scratch_data_initializer_##Name)
1469 
1470  SCRATCH_INITIALIZER(double,double);
1471  SCRATCH_INITIALIZER(float,float);
1472  SCRATCH_INITIALIZER(std::complex<double>,cdouble);
1473  SCRATCH_INITIALIZER(std::complex<float>,cfloat);
1474 #undef SCRATCH_INITIALIZER
1475 }
1476 
1477 
1478 DEAL_II_NAMESPACE_CLOSE
size_type row_length(const size_type row) const
const types::global_dof_index invalid_size_type
Definition: types.h:182
const BlockIndices & get_row_indices() const
double get_inhomogeneity(const size_type line) const
std::vector< size_type > lines_cache
static::ExceptionBase & ExcDoFIsConstrainedFromBothObjects(size_type arg1)
static::ExceptionBase & ExcEntryAlreadyExists(size_type arg1, size_type arg2, double arg3, double arg4)
std::vector< ConstraintLine >::const_iterator const_iterator
void merge(const ConstraintMatrix &other_constraints, const MergeConflictBehavior merge_conflict_behavior=no_conflicts_allowed, const bool allow_different_local_lines=false)
bool is_constrained(const size_type index) const
static bool check_zero_weight(const std::pair< size_type, double > &p)
void condense(SparsityPattern &sparsity) const
static::ExceptionBase & ExcIO()
void add(const size_type i, const size_type j)
boost::iterator_range< const_iterator > LineRange
const std::vector< std::pair< size_type, double > > * get_constraint_entries(const size_type line) const
iterator begin() const
#define AssertIndexRange(index, range)
Definition: exceptions.h:1199
size_type n_elements() const
Definition: index_set.h:1695
void add_entries(const size_type line, const std::vector< std::pair< size_type, double > > &col_val_pairs)
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1597
void distribute(VectorType &vec) const
bool has_inhomogeneities() const
void print(std::ostream &out) const
#define AssertThrow(cond, exc)
Definition: exceptions.h:398
types::global_dof_index size_type
size_type size() const
Definition: index_set.h:1553
std::vector< std::pair< size_type, double > > Entries
size_type max_constraint_indirections() const
bool are_identity_constrained(const size_type index1, const size_type index2) const
static::ExceptionBase & ExcLineInexistant(size_type arg1)
SparsityPatternType & block(const size_type row, const size_type column)
void add_line(const size_type line)
void write_dot(std::ostream &) const
static::ExceptionBase & ExcMessage(std::string arg1)
void add_entry(const size_type line, const size_type column, const double value)
void add(const size_type i, const size_type j)
void add(const size_type i, const size_type j)
#define Assert(cond, exc)
Definition: exceptions.h:337
bool operator<(const ConstraintLine &) const
size_type n_cols() const
bool operator==(const ConstraintLine &) const
static const Table< 2, bool > default_empty_table
iterator end() const
void compress() const
Definition: index_set.h:1562
size_type calculate_line_index(const size_type line) const
void add_lines(const std::vector< bool > &lines)
static::ExceptionBase & ExcMatrixNotClosed()
size_type column_number(const size_type row, const size_type index) const
static::ExceptionBase & ExcDoFConstrainedToConstrainedDoF(int arg1, int arg2)
static::ExceptionBase & ExcNotQuadratic()
std::size_t memory_consumption() const
std::size_t memory_consumption() const
void copy_from(const ConstraintMatrix &other)
std::atomic< unsigned int > counter
Definition: subscriptor.h:203
const BlockIndices & get_column_indices() const
void shift(const size_type offset)
size_type n_rows() const
void resolve_indices(std::vector< types::global_dof_index > &indices) const
size_type n_constraints() const
const LineRange get_lines() const
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1789
static::ExceptionBase & ExcNotImplemented()
bool is_element(const size_type index) const
Definition: index_set.h:1623
void reinit(const IndexSet &local_constraints=IndexSet())
void add_selected_constraints(const ConstraintMatrix &constraints_in, const IndexSet &filter)
void set_inhomogeneity(const size_type line, const double value)
size_type nth_index_in_set(const unsigned int local_index) const
Definition: index_set.h:1747
std::vector< ConstraintLine > lines
bool is_compressed() const
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
static::ExceptionBase & ExcMatrixIsClosed()
static::ExceptionBase & ExcInternalError()
bool is_identity_constrained(const size_type index) const