3#ifndef DUNE_ISTL_FASTAMG_HH 
    4#define DUNE_ISTL_FASTAMG_HH 
   21#include "fastamgsmoother.hh" 
   57    template<
class M, 
class X, 
class PI=SequentialInformation, 
class A=std::allocator<X> >
 
  138      std::size_t levels();
 
  140      std::size_t maxlevels();
 
  169      void createHierarchies(C& criterion, 
Operator& matrix,
 
  191        typename OperatorHierarchy::RedistributeInfoList::const_iterator 
redist;
 
  195        typename OperatorHierarchy::AggregatesMapList::const_iterator 
aggregates;
 
  215      void mgc(LevelContext& levelContext, 
Domain& x, 
const Range& b);
 
  223      void presmooth(LevelContext& levelContext, 
Domain& x, 
const Range& b);
 
  231      void postsmooth(LevelContext& levelContext, 
Domain& x, 
const Range& b);
 
  239      void moveToFineLevel(LevelContext& levelContext, 
bool processedFineLevel,
 
  246      bool moveToCoarseLevel(LevelContext& levelContext);
 
  252      void initIteratorsWithFineLevel(LevelContext& levelContext);
 
  255      std::shared_ptr<OperatorHierarchy> matrices_;
 
  257      std::shared_ptr<CoarseSolver> solver_;
 
  268      typedef typename ScalarProductChooserType::ScalarProduct ScalarProduct;
 
  269      typedef std::shared_ptr<ScalarProduct> ScalarProductPointer;
 
  271      ScalarProductPointer scalarProduct_;
 
  275      std::size_t preSteps_;
 
  277      std::size_t postSteps_;
 
  279      bool buildHierarchy_;
 
  281      bool coarsesolverconverged;
 
  283      typedef std::shared_ptr<Smoother> SmootherPointer;
 
  284      SmootherPointer coarseSmoother_;
 
  286      std::size_t verbosity_;
 
  289    template<
class M, 
class X, 
class PI, 
class A>
 
  291    : matrices_(amg.matrices_), solver_(amg.solver_),
 
  292      rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
 
  293      gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
 
  294      symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
 
  295      coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
 
  305    template<
class M, 
class X, 
class PI, 
class A>
 
  308      : matrices_(&matrices), solver_(&coarseSolver),
 
  309        rhs_(), lhs_(), residual_(), scalarProduct_(),
 
  310        gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
 
  311        postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
 
  312        symmetric(symmetric_), coarsesolverconverged(true),
 
  313        coarseSmoother_(), verbosity_(parms.debugLevel())
 
  315      if(preSteps_>1||postSteps_>1)
 
  317        std::cerr<<
"WARNING only one step of smoothing is supported!"<<std::endl;
 
  318        preSteps_=postSteps_=0;
 
  320      assert(matrices_->isBuilt());
 
  321      static_assert(is_same<PI,SequentialInformation>::value,
 
  322                    "Currently only sequential runs are supported");
 
  324    template<
class M, 
class X, 
class PI, 
class A>
 
  331      : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
 
  332        preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
 
  333        buildHierarchy_(true),
 
  334        symmetric(symmetric_), coarsesolverconverged(true),
 
  335        coarseSmoother_(), verbosity_(criterion.debugLevel())
 
  337      if(preSteps_>1||postSteps_>1)
 
  339        std::cerr<<
"WARNING only one step of smoothing is supported!"<<std::endl;
 
  340        preSteps_=postSteps_=1;
 
  342      static_assert(is_same<PI,SequentialInformation>::value,
 
  343                    "Currently only sequential runs are supported");
 
  347      createHierarchies(criterion, 
const_cast<Operator&
>(matrix), pinfo);
 
  350    template<
class M, 
class X, 
class PI, 
class A>
 
  353      if(buildHierarchy_) {
 
  357          coarseSmoother_.reset();
 
  370    template<
class M, 
class X, 
class PI, 
class A>
 
  372    void FastAMG<M,X,PI,A>::createHierarchies(C& criterion, Operator& matrix,
 
  376      matrices_.reset(
new OperatorHierarchy(matrix, pinfo));
 
  378      matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
 
  380      if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
 
  381        std::cout<<
"Building Hierarchy of "<<matrices_->maxlevels()<<
" levels took "<<watch.elapsed()<<
" seconds."<<std::endl;
 
  383      if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
 
  385        typedef typename SmootherTraits<Smoother>::Arguments SmootherArgs;
 
  387        sargs.iterations = 1;
 
  390        cargs.setArgs(sargs);
 
  391        if(matrices_->redistributeInformation().back().isSetup()) {
 
  393          cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
 
  394          cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
 
  396          cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
 
  397          cargs.setComm(*matrices_->parallelInformation().coarsest());
 
  401        scalarProduct_.reset(ScalarProductChooserType::construct(cargs.getComm()));
 
  403#if HAVE_SUPERLU|| HAVE_UMFPACK 
  405#define DIRECTSOLVER UMFPack 
  407#define DIRECTSOLVER SuperLU 
  410        if(is_same<ParallelInformation,SequentialInformation>::value 
 
  411           || matrices_->parallelInformation().coarsest()->communicator().size()==1 
 
  412           || (matrices_->parallelInformation().coarsest().isRedistributed()
 
  413               && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
 
  414               && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) { 
 
  415          if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
 
  416            std::cout<<
"Using superlu"<<std::endl;
 
  417          if(matrices_->parallelInformation().coarsest().isRedistributed())
 
  419            if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
 
  421              solver_.reset(
new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(), 
false, 
false));
 
  425            solver_.reset(
new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(), 
false, 
false));
 
  430          if(matrices_->parallelInformation().coarsest().isRedistributed())
 
  432            if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
 
  434              solver_.reset(
new BiCGSTABSolver<X>(
const_cast<M&
>(matrices_->matrices().coarsest().getRedistributed()),
 
  436                                                  *coarseSmoother_, 1E-2, 1000, 0));
 
  440            solver_.reset(
new BiCGSTABSolver<X>(
const_cast<M&
>(*matrices_->matrices().coarsest()),
 
  442                                                *coarseSmoother_, 1E-2, 1000, 0));
 
  446      if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
 
  447        std::cout<<
"Building Hierarchy of "<<matrices_->maxlevels()<<
" levels took "<<watch.elapsed()<<
" seconds."<<std::endl;
 
  451    template<
class M, 
class X, 
class PI, 
class A>
 
  459      typedef typename M::matrix_type 
Matrix;
 
  466      const Matrix& mat=matrices_->matrices().finest()->getmat();
 
  467      for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
 
  468        bool isDirichlet = 
true;
 
  469        bool hasDiagonal = 
false;
 
  471        for(ColIter col=row->begin(); col!=row->end(); ++col) {
 
  472          if(row.index()==col.index()) {
 
  480        if(isDirichlet && hasDiagonal)
 
  481          diag->solve(x[row.index()], b[row.index()]);
 
  483      std::cout<<
" Preprocessing Dirichlet took "<<watch1.
elapsed()<<std::endl;
 
  486      matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
 
  497      matrices_->coarsenVector(*rhs_);
 
  498      matrices_->coarsenVector(*lhs_);
 
  499      matrices_->coarsenVector(*residual_);
 
  506    template<
class M, 
class X, 
class PI, 
class A>
 
  509      return matrices_->levels();
 
  511    template<
class M, 
class X, 
class PI, 
class A>
 
  512    std::size_t FastAMG<M,X,PI,A>::maxlevels()
 
  514      return matrices_->maxlevels();
 
  518    template<
class M, 
class X, 
class PI, 
class A>
 
  521      LevelContext levelContext;
 
  523      initIteratorsWithFineLevel(levelContext);
 
  525      assert(v.two_norm()==0);
 
  528      if(matrices_->maxlevels()==1){
 
  531        mgc(levelContext, v, b);
 
  533        mgc(levelContext, v, d);
 
  534      if(postSteps_==0||matrices_->maxlevels()==1)
 
  535        levelContext.pinfo->copyOwnerToAll(v, v);
 
  538    template<
class M, 
class X, 
class PI, 
class A>
 
  541      levelContext.matrix = matrices_->matrices().finest();
 
  542      levelContext.pinfo = matrices_->parallelInformation().finest();
 
  543      levelContext.redist =
 
  544        matrices_->redistributeInformation().begin();
 
  545      levelContext.aggregates = matrices_->aggregatesMaps().begin();
 
  546      levelContext.lhs = lhs_->finest();
 
  547      levelContext.residual = residual_->finest();
 
  548      levelContext.rhs = rhs_->finest();
 
  549      levelContext.level=0;
 
  552    template<
class M, 
class X, 
class PI, 
class A>
 
  553    bool FastAMG<M,X,PI,A>
 
  554    ::moveToCoarseLevel(LevelContext& levelContext)
 
  556      bool processNextLevel=
true;
 
  558      if(levelContext.redist->isSetup()) {
 
  560        levelContext.redist->redistribute(
static_cast<const Range&
>(*levelContext.residual),
 
  561                                          levelContext.residual.getRedistributed());
 
  562        processNextLevel = levelContext.residual.getRedistributed().size()>0;
 
  563        if(processNextLevel) {
 
  565          ++levelContext.pinfo;
 
  566          Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
 
  567          ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
 
  568                           static_cast<const Range&
>(levelContext.residual.getRedistributed()),
 
  569                           *levelContext.pinfo);
 
  574        ++levelContext.pinfo;
 
  575        Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
 
  576        ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
 
  577                         static_cast<const Range&
>(*levelContext.residual), *levelContext.pinfo);
 
  580      if(processNextLevel) {
 
  582        ++levelContext.residual;
 
  584        ++levelContext.matrix;
 
  585        ++levelContext.level;
 
  586        ++levelContext.redist;
 
  588        if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
 
  590          ++levelContext.aggregates;
 
  594        *levelContext.residual=0;
 
  596      return processNextLevel;
 
  599    template<
class M, 
class X, 
class PI, 
class A>
 
  600    void FastAMG<M,X,PI,A>
 
  601    ::moveToFineLevel(LevelContext& levelContext, 
bool processNextLevel, Domain& x)
 
  603      if(processNextLevel) {
 
  604        if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
 
  606          --levelContext.aggregates;
 
  608        --levelContext.redist;
 
  609        --levelContext.level;
 
  611        --levelContext.matrix;
 
  612        --levelContext.residual;
 
  617      if(levelContext.redist->isSetup()) {
 
  620        Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
 
  621        ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
 
  622                           levelContext.lhs.getRedistributed(),
 
  623                           matrices_->getProlongationDampingFactor(),
 
  624                           *levelContext.pinfo, *levelContext.redist);
 
  626        Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
 
  627        ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
 
  628                           matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
 
  634      if(processNextLevel) {
 
  641    template<
class M, 
class X, 
class PI, 
class A>
 
  642    void FastAMG<M,X,PI,A>
 
  643    ::presmooth(LevelContext& levelContext, Domain& x, 
const Range& b)
 
  645      GaussSeidelPresmoothDefect<M::matrix_type::blocklevel>::apply(levelContext.matrix->getmat(),
 
  647                                                                    *levelContext.residual,
 
  651    template<
class M, 
class X, 
class PI, 
class A>
 
  652    void FastAMG<M,X,PI,A>
 
  653    ::postsmooth(LevelContext& levelContext, Domain& x, 
const Range& b)
 
  655      GaussSeidelPostsmoothDefect<M::matrix_type::blocklevel>
 
  656      ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
 
  660    template<
class M, 
class X, 
class PI, 
class A>
 
  663      return IsDirectSolver< CoarseSolver>::value;
 
  666    template<
class M, 
class X, 
class PI, 
class A>
 
  669      if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
 
  673        if(levelContext.redist->isSetup()) {
 
  674          levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
 
  675          if(levelContext.rhs.getRedistributed().size()>0) {
 
  677            levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
 
  678                                                                 levelContext.rhs.getRedistributed());
 
  679            solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
 
  681          levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
 
  682          levelContext.pinfo->copyOwnerToAll(v, v);
 
  684          levelContext.pinfo->copyOwnerToAll(b, b);
 
  685          solver_->apply(v, 
const_cast<Range&
>(b), res);
 
  691          coarsesolverconverged = 
false;
 
  697#ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION 
  698        bool processNextLevel = moveToCoarseLevel(levelContext);
 
  700        if(processNextLevel) {
 
  702          for(std::size_t i=0; i<gamma_; i++)
 
  703            mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
 
  706        moveToFineLevel(levelContext, processNextLevel, v);
 
  711        if(levelContext.matrix == matrices_->matrices().finest()) {
 
  712          coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
 
  713          if(!coarsesolverconverged)
 
  714            DUNE_THROW(MathError, 
"Coarse solver did not converge");
 
  727    template<
class M, 
class X, 
class PI, 
class A>
 
  739    template<
class M, 
class X, 
class PI, 
class A>
 
  743      matrices_->getCoarsestAggregatesOnFinest(cont);
 
A fast (sequential) algebraic multigrid based on agglomeration that saves memory bandwidth.
Definition: fastamg.hh:59
 
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition: hierarchy.hh:257
 
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition: hierarchy.hh:260
 
The hierarchies build by the coarsening process.
Definition: hierarchy.hh:317
 
All parameters for AMG.
Definition: parameters.hh:391
 
A generic dynamic dense matrix.
Definition: matrix.hh:25
 
T::field_type field_type
Export the type representing the underlying field.
Definition: matrix.hh:29
 
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition: matrix.hh:53
 
T block_type
Export the type representing the components.
Definition: matrix.hh:32
 
The negation of a set. An item is contained in the set if and only if it is not contained in the nega...
Definition: enumset.hh:95
 
Base class for matrix free definition of preconditioners.
Definition: preconditioner.hh:26
 
Sequential SSOR preconditioner.
Definition: preconditioners.hh:127
 
A simple stop watch.
Definition: timer.hh:52
 
void reset()
Reset timer while keeping the running/stopped state.
Definition: timer.hh:66
 
double elapsed() const
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:86
 
ConstIterator class for sequential access.
Definition: vbvector.hh:647
 
A few common exception classes.
 
#define DUNE_THROW(E, m)
Definition: exceptions.hh:243
 
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: fastamg.hh:183
 
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: fastamg.hh:741
 
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: fastamg.hh:187
 
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: fastamg.hh:150
 
void post(Domain &x)
Clean up.
Definition: fastamg.hh:728
 
X Domain
The domain type.
Definition: fastamg.hh:76
 
Hierarchy< Domain, A >::Iterator residual
The iterator over the residuals.
Definition: fastamg.hh:203
 
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: fastamg.hh:71
 
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: fastamg.hh:191
 
X Range
The range type.
Definition: fastamg.hh:78
 
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition: fastamg.hh:69
 
M Operator
The matrix operator type.
Definition: fastamg.hh:62
 
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: fastamg.hh:80
 
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: fastamg.hh:661
 
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: fastamg.hh:199
 
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: fastamg.hh:207
 
std::size_t level
The level index.
Definition: fastamg.hh:211
 
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition: fastamg.hh:519
 
FastAMG(const OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const Parameters &parms, bool symmetric=true)
Construct a new amg with a specific coarse solver.
Definition: fastamg.hh:306
 
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: fastamg.hh:73
 
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition: fastamg.hh:452
 
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: fastamg.hh:195
 
@ category
The solver category.
Definition: fastamg.hh:84
 
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition: smoother.hh:408
 
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition: construction.hh:44
 
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition: smoother.hh:430
 
static T * construct(Arguments &args)
Construct an object with the specified arguments.
Definition: construction.hh:52
 
Provides a classes representing the hierarchies in AMG.
 
Some generic functions for pretty printing vectors and matrices.
 
Dune namespace.
Definition: alignment.hh:10
 
Define general preconditioner interface.
 
Define base class for scalar product and norm.
 
Classes for the generic construction and application of the smoothers.
 
Implementations of the inverse operator interface.
 
Templates characterizing the type of a solver.
 
Statistics about the application of an inverse operator.
Definition: solver.hh:32
 
bool converged
True if convergence criterion has been met.
Definition: solver.hh:56
 
Choose the approriate scalar product for a solver category.
Definition: scalarproducts.hh:77
 
@ sequential
Category for sequential solvers.
Definition: solvercategory.hh:21
 
Classes for using SuperLU with ISTL matrices.
 
Prolongation and restriction for amg.
 
Traits for type conversions and type information.
 
Classes for using UMFPack with ISTL matrices.
 
Definition of the DUNE_UNUSED macro for the case that config.h is not available.
 
#define DUNE_UNUSED_PARAMETER(parm)
A macro to mark intentional unused function parameters with.
Definition: unused.hh:18