4 #ifndef DUNE_ISTL_FASTAMG_HH
5 #define DUNE_ISTL_FASTAMG_HH
8 #include <dune/common/exceptions.hh>
9 #include <dune/common/typetraits.hh>
10 #include <dune/common/unused.hh>
58 template<
class M,
class X,
class PI=SequentialInformation,
class A=std::allocator<X> >
153 matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
170 void createHierarchies(C& criterion,
Operator& matrix,
192 typename OperatorHierarchy::RedistributeInfoList::const_iterator
redist;
196 typename OperatorHierarchy::AggregatesMapList::const_iterator
aggregates;
216 void mgc(LevelContext& levelContext,
Domain& x,
const Range& b);
224 void presmooth(LevelContext& levelContext,
Domain& x,
const Range& b);
232 void postsmooth(LevelContext& levelContext,
Domain& x,
const Range& b);
240 void moveToFineLevel(LevelContext& levelContext,
bool processedFineLevel,
247 bool moveToCoarseLevel(LevelContext& levelContext);
253 void initIteratorsWithFineLevel(LevelContext& levelContext);
256 Dune::shared_ptr<OperatorHierarchy> matrices_;
258 Dune::shared_ptr<CoarseSolver> solver_;
269 typedef typename ScalarProductChooserType::ScalarProduct ScalarProduct;
270 typedef Dune::shared_ptr<ScalarProduct> ScalarProductPointer;
272 ScalarProductPointer scalarProduct_;
276 std::size_t preSteps_;
278 std::size_t postSteps_;
280 bool buildHierarchy_;
282 bool coarsesolverconverged;
284 typedef Dune::shared_ptr<Smoother> SmootherPointer;
285 SmootherPointer coarseSmoother_;
287 std::size_t verbosity_;
290 template<
class M,
class X,
class PI,
class A>
292 : matrices_(amg.matrices_), solver_(amg.solver_),
293 rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
294 gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
295 symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
296 coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
306 template<
class M,
class X,
class PI,
class A>
309 : matrices_(&matrices), solver_(&coarseSolver),
310 rhs_(), lhs_(), residual_(), scalarProduct_(),
311 gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
312 postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
313 symmetric(symmetric_), coarsesolverconverged(true),
314 coarseSmoother_(), verbosity_(parms.debugLevel())
316 if(preSteps_>1||postSteps_>1)
318 std::cerr<<
"WARNING only one step of smoothing is supported!"<<std::endl;
319 preSteps_=postSteps_=0;
321 assert(matrices_->isBuilt());
322 dune_static_assert((is_same<PI,SequentialInformation>::value),
"Currently only sequential runs are supported");
324 template<
class M,
class X,
class PI,
class A>
331 : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
332 preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
333 buildHierarchy_(true),
334 symmetric(symmetric_), coarsesolverconverged(true),
335 coarseSmoother_(), verbosity_(criterion.debugLevel())
337 if(preSteps_>1||postSteps_>1)
339 std::cerr<<
"WARNING only one step of smoothing is supported!"<<std::endl;
340 preSteps_=postSteps_=1;
342 dune_static_assert((is_same<PI,SequentialInformation>::value),
"Currently only sequential runs are supported");
346 createHierarchies(criterion, const_cast<Operator&>(matrix), pinfo);
349 template<
class M,
class X,
class PI,
class A>
352 if(buildHierarchy_) {
356 coarseSmoother_.reset();
369 template<
class M,
class X,
class PI,
class A>
375 matrices_.reset(
new OperatorHierarchy(matrix, pinfo));
377 matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
379 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
380 std::cout<<
"Building Hierarchy of "<<matrices_->maxlevels()<<
" levels took "<<watch.elapsed()<<
" seconds."<<std::endl;
382 if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
386 sargs.iterations = 1;
389 cargs.setArgs(sargs);
390 if(matrices_->redistributeInformation().back().isSetup()) {
392 cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
393 cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
395 cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
396 cargs.setComm(*matrices_->parallelInformation().coarsest());
399 coarseSmoother_.reset(ConstructionTraits<Smoother>::construct(cargs));
400 scalarProduct_.reset(ScalarProductChooserType::construct(cargs.getComm()));
402 #if HAVE_SUPERLU|| HAVE_UMFPACK
404 #define DIRECTSOLVER UMFPack
406 #define DIRECTSOLVER SuperLU
409 if(is_same<ParallelInformation,SequentialInformation>::value
410 || matrices_->parallelInformation().coarsest()->communicator().size()==1
411 || (matrices_->parallelInformation().coarsest().isRedistributed()
412 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
413 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) {
414 if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
415 std::cout<<
"Using superlu"<<std::endl;
416 if(matrices_->parallelInformation().coarsest().isRedistributed())
418 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
420 solver_.reset(
new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(),
false,
false));
424 solver_.reset(
new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(),
false,
false));
429 if(matrices_->parallelInformation().coarsest().isRedistributed())
431 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
433 solver_.reset(
new BiCGSTABSolver<X>(const_cast<M&>(matrices_->matrices().coarsest().getRedistributed()),
435 *coarseSmoother_, 1E-2, 1000, 0));
439 solver_.reset(
new BiCGSTABSolver<X>(const_cast<M&>(*matrices_->matrices().coarsest()),
441 *coarseSmoother_, 1E-2, 1000, 0));
445 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
446 std::cout<<
"Building Hierarchy of "<<matrices_->maxlevels()<<
" levels took "<<watch.elapsed()<<
" seconds."<<std::endl;
450 template<
class M,
class X,
class PI,
class A>
458 typedef typename M::matrix_type
Matrix;
465 const Matrix&
mat=matrices_->matrices().finest()->getmat();
467 bool isDirichlet =
true;
468 bool hasDiagonal =
false;
471 if(
row.index()==
col.index()) {
479 if(isDirichlet && hasDiagonal)
480 diag->solve(x[
row.index()], b[
row.index()]);
482 std::cout<<
" Preprocessing Dirichlet took "<<watch1.elapsed()<<std::endl;
485 matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
496 matrices_->coarsenVector(*rhs_);
497 matrices_->coarsenVector(*lhs_);
498 matrices_->coarsenVector(*residual_);
505 template<
class M,
class X,
class PI,
class A>
508 return matrices_->levels();
510 template<
class M,
class X,
class PI,
class A>
513 return matrices_->maxlevels();
517 template<
class M,
class X,
class PI,
class A>
520 LevelContext levelContext;
522 initIteratorsWithFineLevel(levelContext);
524 assert(v.two_norm()==0);
527 if(matrices_->maxlevels()==1){
530 mgc(levelContext, v, b);
532 mgc(levelContext, v, d);
533 if(postSteps_==0||matrices_->maxlevels()==1)
534 levelContext.pinfo->copyOwnerToAll(v, v);
537 template<
class M,
class X,
class PI,
class A>
540 levelContext.matrix = matrices_->matrices().finest();
541 levelContext.pinfo = matrices_->parallelInformation().finest();
542 levelContext.redist =
543 matrices_->redistributeInformation().begin();
544 levelContext.aggregates = matrices_->aggregatesMaps().begin();
545 levelContext.lhs = lhs_->finest();
546 levelContext.residual = residual_->finest();
547 levelContext.rhs = rhs_->finest();
548 levelContext.level=0;
551 template<
class M,
class X,
class PI,
class A>
552 bool FastAMG<M,X,PI,A>
553 ::moveToCoarseLevel(LevelContext& levelContext)
555 bool processNextLevel=
true;
557 if(levelContext.redist->isSetup()) {
559 levelContext.redist->redistribute(static_cast<const Range&>(*levelContext.residual),
560 levelContext.residual.getRedistributed());
561 processNextLevel = levelContext.residual.getRedistributed().size()>0;
562 if(processNextLevel) {
564 ++levelContext.pinfo;
567 static_cast<const Range&>(levelContext.residual.getRedistributed()),
568 *levelContext.pinfo);
573 ++levelContext.pinfo;
576 static_cast<const Range&>(*levelContext.residual), *levelContext.pinfo);
579 if(processNextLevel) {
581 ++levelContext.residual;
583 ++levelContext.matrix;
584 ++levelContext.level;
585 ++levelContext.redist;
587 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
589 ++levelContext.aggregates;
593 *levelContext.residual=0;
595 return processNextLevel;
598 template<
class M,
class X,
class PI,
class A>
599 void FastAMG<M,X,PI,A>
600 ::moveToFineLevel(LevelContext& levelContext,
bool processNextLevel, Domain& x)
602 if(processNextLevel) {
603 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
605 --levelContext.aggregates;
607 --levelContext.redist;
608 --levelContext.level;
610 --levelContext.matrix;
611 --levelContext.residual;
616 if(levelContext.redist->isSetup()) {
621 levelContext.lhs.getRedistributed(),
622 matrices_->getProlongationDampingFactor(),
623 *levelContext.pinfo, *levelContext.redist);
627 matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
633 if(processNextLevel) {
640 template<
class M,
class X,
class PI,
class A>
642 ::presmooth(LevelContext& levelContext, Domain& x,
const Range& b)
646 *levelContext.residual,
650 template<
class M,
class X,
class PI,
class A>
652 ::postsmooth(LevelContext& levelContext, Domain& x,
const Range& b)
655 ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
659 template<
class M,
class X,
class PI,
class A>
665 template<
class M,
class X,
class PI,
class A>
668 if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
672 if(levelContext.redist->isSetup()) {
673 levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
674 if(levelContext.rhs.getRedistributed().size()>0) {
676 levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
677 levelContext.rhs.getRedistributed());
678 solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
680 levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
681 levelContext.pinfo->copyOwnerToAll(v, v);
683 levelContext.pinfo->copyOwnerToAll(b, b);
684 solver_->apply(v, const_cast<Range&>(b), res);
690 coarsesolverconverged =
false;
696 #ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
697 bool processNextLevel = moveToCoarseLevel(levelContext);
699 if(processNextLevel) {
701 for(std::size_t i=0; i<gamma_; i++)
702 mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
705 moveToFineLevel(levelContext, processNextLevel, v);
710 if(levelContext.matrix == matrices_->matrices().finest()) {
711 coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
712 if(!coarsesolverconverged)
713 DUNE_THROW(MathError,
"Coarse solver did not converge");
726 template<
class M,
class X,
class PI,
class A>
729 DUNE_UNUSED_PARAMETER(x);
738 template<
class M,
class X,
class PI,
class A>
742 matrices_->getCoarsestAggregatesOnFinest(cont);