Dune Core Modules (unstable)

fastamg.hh
Go to the documentation of this file.
1 // SPDX-FileCopyrightText: Copyright © DUNE Project contributors, see file LICENSE.md in module root
2 // SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
3 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
4 // vi: set et ts=4 sw=2 sts=2:
5 #ifndef DUNE_ISTL_FASTAMG_HH
6 #define DUNE_ISTL_FASTAMG_HH
7 
8 #include <memory>
14 #include <dune/istl/solvers.hh>
16 #include <dune/istl/superlu.hh>
17 #include <dune/istl/umfpack.hh>
18 #include <dune/istl/solvertype.hh>
19 #include <dune/istl/io.hh>
21 
22 #include "fastamgsmoother.hh"
23 
32 namespace Dune
33 {
34  namespace Amg
35  {
58  template<class M, class X, class PI=SequentialInformation, class A=std::allocator<X> >
59  class FastAMG : public Preconditioner<X,X>
60  {
61  public:
63  typedef M Operator;
70  typedef PI ParallelInformation;
75 
77  typedef X Domain;
79  typedef X Range;
82 
90  FastAMG(OperatorHierarchy& matrices, CoarseSolver& coarseSolver,
91  const Parameters& parms,
92  bool symmetric=true);
93 
105  template<class C>
106  FastAMG(std::shared_ptr<const Operator> fineOperator,
107  const C& criterion,
108  const Parameters& parms=Parameters(),
109  bool symmetric=true,
111 
124  template<class C>
125  FastAMG(const Operator& fineOperator,
126  const C& criterion,
127  const Parameters& parms=Parameters(),
128  bool symmetric=true,
130  : FastAMG(stackobject_to_shared_ptr(fineOperator),
131  criterion, parms, symmetric, pinfo)
132  {}
133 
137  FastAMG(const FastAMG& amg);
138 
140  void pre(Domain& x, Range& b);
141 
143  void apply(Domain& v, const Range& d);
144 
147  {
149  }
150 
152  void post(Domain& x);
153 
158  template<class A1>
159  void getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont);
160 
161  std::size_t levels();
162 
163  std::size_t maxlevels();
164 
174  {
175  matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
176  }
177 
182  bool usesDirectCoarseLevelSolver() const;
183 
184  private:
191  template<class C>
192  void createHierarchies(C& criterion,
193  std::shared_ptr<const Operator> fineOperator,
194  const PI& pinfo);
195 
202  struct LevelContext
203  {
215  typename OperatorHierarchy::RedistributeInfoList::const_iterator redist;
219  typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates;
235  std::size_t level;
236  };
237 
239  void mgc(LevelContext& levelContext, Domain& x, const Range& b);
240 
247  void presmooth(LevelContext& levelContext, Domain& x, const Range& b);
248 
255  void postsmooth(LevelContext& levelContext, Domain& x, const Range& b);
256 
263  void moveToFineLevel(LevelContext& levelContext, bool processedFineLevel,
264  Domain& fineX);
265 
270  bool moveToCoarseLevel(LevelContext& levelContext);
271 
276  void initIteratorsWithFineLevel(LevelContext& levelContext);
277 
279  std::shared_ptr<OperatorHierarchy> matrices_;
281  std::shared_ptr<CoarseSolver> solver_;
283  std::shared_ptr<Hierarchy<Range,A>> rhs_;
285  std::shared_ptr<Hierarchy<Domain,A>> lhs_;
287  std::shared_ptr<Hierarchy<Domain,A>> residual_;
288 
292  std::shared_ptr<ScalarProduct> scalarProduct_;
294  std::size_t gamma_;
296  std::size_t preSteps_;
298  std::size_t postSteps_;
299  std::size_t level;
300  bool buildHierarchy_;
301  bool symmetric;
302  bool coarsesolverconverged;
304  typedef std::shared_ptr<Smoother> SmootherPointer;
305  SmootherPointer coarseSmoother_;
307  std::size_t verbosity_;
308  };
309 
310  template<class M, class X, class PI, class A>
312  : matrices_(amg.matrices_), solver_(amg.solver_),
313  rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
314  gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
315  symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
316  coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
317  {}
318 
319  template<class M, class X, class PI, class A>
321  const Parameters& parms, bool symmetric_)
322  : matrices_(stackobject_to_shared_ptr(matrices)), solver_(&coarseSolver),
323  rhs_(), lhs_(), residual_(), scalarProduct_(),
324  gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
325  postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
326  symmetric(symmetric_), coarsesolverconverged(true),
327  coarseSmoother_(), verbosity_(parms.debugLevel())
328  {
329  if(preSteps_>1||postSteps_>1)
330  {
331  std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
332  preSteps_=postSteps_=0;
333  }
334  assert(matrices_->isBuilt());
335  static_assert(std::is_same<PI,SequentialInformation>::value,
336  "Currently only sequential runs are supported");
337  }
338  template<class M, class X, class PI, class A>
339  template<class C>
340  FastAMG<M,X,PI,A>::FastAMG(std::shared_ptr<const Operator> fineOperator,
341  const C& criterion,
342  const Parameters& parms,
343  bool symmetric_,
344  const PI& pinfo)
345  : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
346  preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
347  buildHierarchy_(true),
348  symmetric(symmetric_), coarsesolverconverged(true),
349  coarseSmoother_(), verbosity_(criterion.debugLevel())
350  {
351  if(preSteps_>1||postSteps_>1)
352  {
353  std::cerr<<"WARNING only one step of smoothing is supported!"<<std::endl;
354  preSteps_=postSteps_=1;
355  }
356  static_assert(std::is_same<PI,SequentialInformation>::value,
357  "Currently only sequential runs are supported");
358  // TODO: reestablish compile time checks.
359  //static_assert(static_cast<int>(PI::category)==static_cast<int>(S::category),
360  // "Matrix and Solver must match in terms of category!");
361  createHierarchies(criterion, std::move(fineOperator), pinfo);
362  }
363 
364  template<class M, class X, class PI, class A>
365  template<class C>
366  void FastAMG<M,X,PI,A>::createHierarchies(C& criterion,
367  std::shared_ptr<const Operator> fineOperator,
368  const PI& pinfo)
369  {
370  Timer watch;
371  matrices_ = std::make_shared<OperatorHierarchy>(
372  std::const_pointer_cast<Operator>(std::move(fineOperator)),
373  stackobject_to_shared_ptr(const_cast<PI&>(pinfo)));
374 
375  matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
376 
377  if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
378  std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
379 
380  if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
381  // We have the carsest level. Create the coarse Solver
382  typedef typename SmootherTraits<Smoother>::Arguments SmootherArgs;
383  SmootherArgs sargs;
384  sargs.iterations = 1;
385 
387  cargs.setArgs(sargs);
388  if(matrices_->redistributeInformation().back().isSetup()) {
389  // Solve on the redistributed partitioning
390  cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
391  cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
392  }else{
393  cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
394  cargs.setComm(*matrices_->parallelInformation().coarsest());
395  }
396 
397  coarseSmoother_ = ConstructionTraits<Smoother>::construct(cargs);
398  scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
399 
400 #if HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
401 #if HAVE_SUITESPARSE_UMFPACK
402 #define DIRECTSOLVER UMFPack
403 #else
404 #define DIRECTSOLVER SuperLU
405 #endif
406  // Use superlu if we are purely sequential or with only one processor on the coarsest level.
407  if(std::is_same<ParallelInformation,SequentialInformation>::value // sequential mode
408  || matrices_->parallelInformation().coarsest()->communicator().size()==1 //parallel mode and only one processor
409  || (matrices_->parallelInformation().coarsest().isRedistributed()
410  && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
411  && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) { // redistribute and 1 proc
412  if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
413  std::cout<<"Using superlu"<<std::endl;
414  if(matrices_->parallelInformation().coarsest().isRedistributed())
415  {
416  if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
417  // We are still participating on this level
418  solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(), false, false));
419  else
420  solver_.reset();
421  }else
422  solver_.reset(new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(), false, false));
423  }else
424 #undef DIRECTSOLVER
425 #endif // HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
426  {
427  if(matrices_->parallelInformation().coarsest().isRedistributed())
428  {
429  if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
430  // We are still participating on this level
431  solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(matrices_->matrices().coarsest().getRedistributed()),
432  *scalarProduct_,
433  *coarseSmoother_, 1E-2, 1000, 0));
434  else
435  solver_.reset();
436  }else
437  solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(*matrices_->matrices().coarsest()),
438  *scalarProduct_,
439  *coarseSmoother_, 1E-2, 1000, 0));
440  }
441  }
442 
443  if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
444  std::cout<<"Building Hierarchy of "<<matrices_->maxlevels()<<" levels took "<<watch.elapsed()<<" seconds."<<std::endl;
445  }
446 
447 
448  template<class M, class X, class PI, class A>
450  {
451  Timer watch, watch1;
452  // Detect Matrix rows where all offdiagonal entries are
453  // zero and set x such that A_dd*x_d=b_d
454  // Thus users can be more careless when setting up their linear
455  // systems.
456  typedef typename M::matrix_type Matrix;
457  typedef typename Matrix::ConstRowIterator RowIter;
458  typedef typename Matrix::ConstColIterator ColIter;
459  typedef typename Matrix::block_type Block;
460  Block zero;
461  zero=typename Matrix::field_type();
462 
463  const Matrix& mat=matrices_->matrices().finest()->getmat();
464  for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
465  bool isDirichlet = true;
466  bool hasDiagonal = false;
467  ColIter diag;
468  for(ColIter col=row->begin(); col!=row->end(); ++col) {
469  if(row.index()==col.index()) {
470  diag = col;
471  hasDiagonal = (*col != zero);
472  }else{
473  if(*col!=zero)
474  isDirichlet = false;
475  }
476  }
477  if(isDirichlet && hasDiagonal)
478  diag->solve(x[row.index()], b[row.index()]);
479  }
480  if (verbosity_>0)
481  std::cout<<" Preprocessing Dirichlet took "<<watch1.elapsed()<<std::endl;
482  watch1.reset();
483  // No smoother to make x consistent! Do it by hand
484  matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
485  rhs_ = std::make_shared<Hierarchy<Range,A>>(std::make_shared<Range>(b));
486  lhs_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
487  residual_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
488  matrices_->coarsenVector(*rhs_);
489  matrices_->coarsenVector(*lhs_);
490  matrices_->coarsenVector(*residual_);
491 
492  // The preconditioner might change x and b. So we have to
493  // copy the changes to the original vectors.
494  x = *lhs_->finest();
495  b = *rhs_->finest();
496  }
497  template<class M, class X, class PI, class A>
498  std::size_t FastAMG<M,X,PI,A>::levels()
499  {
500  return matrices_->levels();
501  }
502  template<class M, class X, class PI, class A>
503  std::size_t FastAMG<M,X,PI,A>::maxlevels()
504  {
505  return matrices_->maxlevels();
506  }
507 
509  template<class M, class X, class PI, class A>
511  {
512  LevelContext levelContext;
513  // Init all iterators for the current level
514  initIteratorsWithFineLevel(levelContext);
515 
516  assert(v.two_norm()==0);
517 
518  level=0;
519  if(matrices_->maxlevels()==1){
520  // The coarse solver might modify the d!
521  Range b(d);
522  mgc(levelContext, v, b);
523  }else
524  mgc(levelContext, v, d);
525  if(postSteps_==0||matrices_->maxlevels()==1)
526  levelContext.pinfo->copyOwnerToAll(v, v);
527  }
528 
529  template<class M, class X, class PI, class A>
530  void FastAMG<M,X,PI,A>::initIteratorsWithFineLevel(LevelContext& levelContext)
531  {
532  levelContext.matrix = matrices_->matrices().finest();
533  levelContext.pinfo = matrices_->parallelInformation().finest();
534  levelContext.redist =
535  matrices_->redistributeInformation().begin();
536  levelContext.aggregates = matrices_->aggregatesMaps().begin();
537  levelContext.lhs = lhs_->finest();
538  levelContext.residual = residual_->finest();
539  levelContext.rhs = rhs_->finest();
540  levelContext.level=0;
541  }
542 
543  template<class M, class X, class PI, class A>
544  bool FastAMG<M,X,PI,A>
545  ::moveToCoarseLevel(LevelContext& levelContext)
546  {
547  bool processNextLevel=true;
548 
549  if(levelContext.redist->isSetup()) {
550  throw "bla";
551  levelContext.redist->redistribute(static_cast<const Range&>(*levelContext.residual),
552  levelContext.residual.getRedistributed());
553  processNextLevel = levelContext.residual.getRedistributed().size()>0;
554  if(processNextLevel) {
555  //restrict defect to coarse level right hand side.
556  ++levelContext.pinfo;
557  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
558  ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
559  static_cast<const Range&>(levelContext.residual.getRedistributed()),
560  *levelContext.pinfo);
561  }
562  }else{
563  //restrict defect to coarse level right hand side.
564  ++levelContext.rhs;
565  ++levelContext.pinfo;
566  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
567  ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
568  static_cast<const Range&>(*levelContext.residual), *levelContext.pinfo);
569  }
570 
571  if(processNextLevel) {
572  // prepare coarse system
573  ++levelContext.residual;
574  ++levelContext.lhs;
575  ++levelContext.matrix;
576  ++levelContext.level;
577  ++levelContext.redist;
578 
579  if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
580  // next level is not the globally coarsest one
581  ++levelContext.aggregates;
582  }
583  // prepare the lhs on the next level
584  *levelContext.lhs=0;
585  *levelContext.residual=0;
586  }
587  return processNextLevel;
588  }
589 
590  template<class M, class X, class PI, class A>
591  void FastAMG<M,X,PI,A>
592  ::moveToFineLevel(LevelContext& levelContext, bool processNextLevel, Domain& x)
593  {
594  if(processNextLevel) {
595  if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
596  // previous level is not the globally coarsest one
597  --levelContext.aggregates;
598  }
599  --levelContext.redist;
600  --levelContext.level;
601  //prolongate and add the correction (update is in coarse left hand side)
602  --levelContext.matrix;
603  --levelContext.residual;
604 
605  }
606 
607  typename Hierarchy<Domain,A>::Iterator coarseLhs = levelContext.lhs--;
608  if(levelContext.redist->isSetup()) {
609 
610  // Need to redistribute during prolongate
611  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
612  ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
613  levelContext.lhs.getRedistributed(),
614  matrices_->getProlongationDampingFactor(),
615  *levelContext.pinfo, *levelContext.redist);
616  }else{
617  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
618  ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
619  matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
620 
621  // printvector(std::cout, *lhs, "prolongated coarse grid correction", "lhs", 10, 10, 10);
622  }
623 
624 
625  if(processNextLevel) {
626  --levelContext.rhs;
627  }
628 
629  }
630 
631 
632  template<class M, class X, class PI, class A>
633  void FastAMG<M,X,PI,A>
634  ::presmooth(LevelContext& levelContext, Domain& x, const Range& b)
635  {
636  constexpr auto bl = blockLevel<typename M::matrix_type>();
637  GaussSeidelPresmoothDefect<bl>::apply(levelContext.matrix->getmat(),
638  x,
639  *levelContext.residual,
640  b);
641  }
642 
643  template<class M, class X, class PI, class A>
644  void FastAMG<M,X,PI,A>
645  ::postsmooth(LevelContext& levelContext, Domain& x, const Range& b)
646  {
647  constexpr auto bl = blockLevel<typename M::matrix_type>();
648  GaussSeidelPostsmoothDefect<bl>
649  ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
650  }
651 
652 
653  template<class M, class X, class PI, class A>
655  {
656  return IsDirectSolver< CoarseSolver>::value;
657  }
658 
659  template<class M, class X, class PI, class A>
660  void FastAMG<M,X,PI,A>::mgc(LevelContext& levelContext, Domain& v, const Range& b){
661 
662  if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
663  // Solve directly
665  res.converged=true; // If we do not compute this flag will not get updated
666  if(levelContext.redist->isSetup()) {
667  levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
668  if(levelContext.rhs.getRedistributed().size()>0) {
669  // We are still participating in the computation
670  levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
671  levelContext.rhs.getRedistributed());
672  solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
673  }
674  levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
675  levelContext.pinfo->copyOwnerToAll(v, v);
676  }else{
677  levelContext.pinfo->copyOwnerToAll(b, b);
678  solver_->apply(v, const_cast<Range&>(b), res);
679  }
680 
681  // printvector(std::cout, *lhs, "coarse level update", "u", 10, 10, 10);
682  // printvector(std::cout, *rhs, "coarse level rhs", "rhs", 10, 10, 10);
683  if (!res.converged)
684  coarsesolverconverged = false;
685  }else{
686  // presmoothing
687  presmooth(levelContext, v, b);
688  // printvector(std::cout, *lhs, "update", "u", 10, 10, 10);
689  // printvector(std::cout, *residual, "post presmooth residual", "r", 10);
690 #ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
691  bool processNextLevel = moveToCoarseLevel(levelContext);
692 
693  if(processNextLevel) {
694  // next level
695  for(std::size_t i=0; i<gamma_; i++)
696  mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
697  }
698 
699  moveToFineLevel(levelContext, processNextLevel, v);
700 #else
701  *lhs=0;
702 #endif
703 
704  if(levelContext.matrix == matrices_->matrices().finest()) {
705  coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
706  if(!coarsesolverconverged)
707  DUNE_THROW(MathError, "Coarse solver did not converge");
708  }
709 
710  postsmooth(levelContext, v, b);
711  }
712  }
713 
714 
716  template<class M, class X, class PI, class A>
717  void FastAMG<M,X,PI,A>::post([[maybe_unused]] Domain& x)
718  {
719  lhs_=nullptr;
720  rhs_=nullptr;
721  residual_=nullptr;
722  }
723 
724  template<class M, class X, class PI, class A>
725  template<class A1>
726  void FastAMG<M,X,PI,A>::getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont)
727  {
728  matrices_->getCoarsestAggregatesOnFinest(cont);
729  }
730 
731  } // end namespace Amg
732 } // end namespace Dune
733 
734 #endif
A fast (sequential) algebraic multigrid based on agglomeration that saves memory bandwidth.
Definition: fastamg.hh:60
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:216
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:219
The hierarchies build by the coarsening process.
Definition: matrixhierarchy.hh:61
All parameters for AMG.
Definition: parameters.hh:416
ConstIterator class for sequential access.
Definition: matrix.hh:404
A generic dynamic dense matrix.
Definition: matrix.hh:561
typename Imp::BlockTraits< T >::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:565
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition: matrix.hh:589
T block_type
Export the type representing the components.
Definition: matrix.hh:568
The negation of a set. An item is contained in the set if and only if it is not contained in the nega...
Definition: enumset.hh:96
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:32
Base class for scalar product and norm computation.
Definition: scalarproducts.hh:52
Sequential SSOR preconditioner.
Definition: preconditioners.hh:142
A simple stop watch.
Definition: timer.hh:43
void reset() noexcept
Reset timer while keeping the running/stopped state.
Definition: timer.hh:57
double elapsed() const noexcept
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:77
A few common exception classes.
Some generic functions for pretty printing vectors and matrices.
#define DUNE_THROW(E, m)
Definition: exceptions.hh:218
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: fastamg.hh:207
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: fastamg.hh:726
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: fastamg.hh:211
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: fastamg.hh:173
void post(Domain &x)
Clean up.
Definition: fastamg.hh:717
X Domain
The domain type.
Definition: fastamg.hh:77
Hierarchy< Domain, A >::Iterator residual
The iterator over the residuals.
Definition: fastamg.hh:227
virtual SolverCategory::Category category() const
Category of the preconditioner (see SolverCategory::Category)
Definition: fastamg.hh:146
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: fastamg.hh:72
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: fastamg.hh:215
X Range
The range type.
Definition: fastamg.hh:79
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: fastamg.hh:70
M Operator
The matrix operator type.
Definition: fastamg.hh:63
FastAMG(const Operator &fineOperator, const C &criterion, const Parameters &parms=Parameters(), bool symmetric=true, const ParallelInformation &pinfo=ParallelInformation())
Construct an AMG with an inexact coarse solver based on the smoother.
Definition: fastamg.hh:125
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: fastamg.hh:81
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: fastamg.hh:654
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: fastamg.hh:223
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: fastamg.hh:231
std::size_t level
The level index.
Definition: fastamg.hh:235
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition: fastamg.hh:510
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: fastamg.hh:74
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition: fastamg.hh:449
FastAMG(OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const Parameters &parms, bool symmetric=true)
Construct a new amg with a specific coarse solver.
Definition: fastamg.hh:320
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: fastamg.hh:219
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition: smoother.hh:406
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:44
static std::shared_ptr< T > construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:52
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition: smoother.hh:428
Provides a classes representing the hierarchies in AMG.
Dune namespace.
Definition: alignedallocator.hh:13
std::shared_ptr< T > stackobject_to_shared_ptr(T &t)
Create a shared_ptr for a stack-allocated object.
Definition: shared_ptr.hh:72
Define general preconditioner interface.
Define base class for scalar product and norm.
Classes for the generic construction and application of the smoothers.
Implementations of the inverse operator interface.
Templates characterizing the type of a solver.
Traits class for getting the attribute class of a smoother.
Definition: smoother.hh:66
Statistics about the application of an inverse operator.
Definition: solver.hh:48
bool converged
True if convergence criterion has been met.
Definition: solver.hh:73
Category
Definition: solvercategory.hh:23
@ sequential
Category for sequential solvers.
Definition: solvercategory.hh:25
Classes for using SuperLU with ISTL matrices.
Prolongation and restriction for amg.
Traits for type conversions and type information.
Classes for using UMFPack with ISTL matrices.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden  |  generated with Hugo v0.80.0 (May 16, 22:29, 2024)