3#ifndef DUNE_ISTL_REPARTITION_HH 
    4#define DUNE_ISTL_REPARTITION_HH 
   57  template<
class G, 
class T1, 
class T2>
 
   61    typedef typename IndexSet::LocalIndex::Attribute 
Attribute;
 
   67    typedef typename G::ConstVertexIterator VertexIterator;
 
   70    std::size_t sum=0, needed = graph.noVertices()-indexSet.size();
 
   71    std::vector<std::size_t> neededall(oocomm.communicator().
size(), 0);
 
   74    for(
int i=0; i<oocomm.communicator().size(); ++i)
 
   83    typedef typename IndexSet::const_iterator Iterator;
 
   86    for(Iterator it = indexSet.begin(); it != end; ++it)
 
   87      maxgi=std::max(maxgi,it->global());
 
   92    maxgi=oocomm.communicator().
max(maxgi);
 
   95    for(
int i=0; i<oocomm.communicator().rank(); ++i)
 
   96      maxgi=maxgi+neededall[i];   
 
   99    std::map<int,SLList<std::pair<T1,Attribute> > > globalIndices;
 
  101    indexSet.beginResize();
 
  103    for(VertexIterator vertex = graph.begin(), vend=graph.end(); vertex != vend; ++vertex) {
 
  104      const typename IndexSet::IndexPair* pair=lookup.
pair(*vertex);
 
  107        indexSet.add(maxgi, 
typename IndexSet::LocalIndex(*vertex, OwnerOverlapCopyAttributeSet::owner, 
false));
 
  112    indexSet.endResize();
 
  116    oocomm.freeGlobalLookup();
 
  117    oocomm.buildGlobalLookup();
 
  119    std::cout<<
"Holes are filled!"<<std::endl;
 
  120    std::cout<<oocomm.communicator().
rank()<<
": "<<oocomm.
indexSet()<<std::endl;
 
  127    class ParmetisDuneIndexMap
 
  132  #if PARMETIS_MAJOR_VERSION > 3 
  133      typedef idx_t idxtype;
 
  138      typedef std::size_t idxtype;
 
  141      template<
class Graph, 
class OOComm>
 
  142      ParmetisDuneIndexMap(
const Graph& graph, 
const OOComm& com);
 
  143      int toParmetis(
int i)
 const 
  145        return duneToParmetis[i];
 
  147      int toLocalParmetis(
int i)
 const 
  149        return duneToParmetis[i]-base_;
 
  151      int operator[](
int i)
 const 
  153        return duneToParmetis[i];
 
  155      int toDune(
int i)
 const 
  157        return parmetisToDune[i];
 
  159      std::vector<int>::size_type numOfOwnVtx()
 const 
  161        return parmetisToDune.size();
 
  167      int globalOwnerVertices;
 
  170      std::vector<int> duneToParmetis;
 
  171      std::vector<int> parmetisToDune;
 
  173      std::vector<idxtype> vtxDist_;
 
  176    template<
class G, 
class OOComm>
 
  177    ParmetisDuneIndexMap::ParmetisDuneIndexMap(
const G& graph, 
const OOComm& oocomm)
 
  178      : duneToParmetis(graph.noVertices(), -1), vtxDist_(oocomm.communicator().size()+1)
 
  180      int npes=oocomm.communicator().size(), mype=oocomm.communicator().rank();
 
  182      typedef typename OOComm::ParallelIndexSet::const_iterator Iterator;
 
  183      typedef typename OOComm::OwnerSet OwnerSet;
 
  186      Iterator 
end = oocomm.indexSet().end();
 
  187      for(Iterator index = oocomm.indexSet().begin(); index != 
end; ++index) {
 
  188        if (OwnerSet::contains(index->local().attribute())) {
 
  192      parmetisToDune.resize(numOfOwnVtx);
 
  193      std::vector<int> globalNumOfVtx(npes);
 
  195      MPI_Allgather(&numOfOwnVtx, 1, MPI_INT, &(globalNumOfVtx[0]), 1, MPI_INT, oocomm.communicator());
 
  199      for(
int i=0; i<npes; i++) {
 
  201          base += globalNumOfVtx[i];
 
  203        vtxDist_[i+1] = vtxDist_[i] + globalNumOfVtx[i];
 
  205      globalOwnerVertices=vtxDist_[npes];
 
  209      typedef typename G::ConstVertexIterator VertexIterator;
 
  211      std::cout << oocomm.communicator().rank()<<
" vtxDist: ";
 
  212      for(
int i=0; i<= npes; ++i)
 
  213        std::cout << vtxDist_[i]<<
" ";
 
  214      std::cout<<std::endl;
 
  221      VertexIterator vend = graph.end();
 
  222      for(VertexIterator vertex = graph.begin(); vertex != vend; ++vertex) {
 
  223        const typename OOComm::ParallelIndexSet::IndexPair* index=oocomm.globalLookup().pair(*vertex);
 
  225        if (OwnerSet::contains(index->local().attribute())) {
 
  227          parmetisToDune[base-base_]=index->local();
 
  228          duneToParmetis[index->local()] = base++;
 
  238      std::cout <<oocomm.communicator().rank()<<
": before ";
 
  239      for(std::size_t i=0; i<duneToParmetis.size(); ++i)
 
  240        std::cout<<duneToParmetis[i]<<
" ";
 
  241      std::cout<<std::endl;
 
  243      oocomm.copyOwnerToAll(duneToParmetis,duneToParmetis);
 
  245      std::cout <<oocomm.communicator().rank()<<
": after ";
 
  246      for(std::size_t i=0; i<duneToParmetis.size(); ++i)
 
  247        std::cout<<duneToParmetis[i]<<
" ";
 
  248      std::cout<<std::endl;
 
  253  struct RedistributeInterface
 
  256    void setCommunicator(MPI_Comm comm)
 
  260    template<
class Flags,
class IS>
 
  261    void buildSendInterface(
const std::vector<int>& toPart, 
const IS& idxset)
 
  263      std::map<int,int> sizes;
 
  265      typedef typename IS::const_iterator IIter;
 
  266      for(IIter i=idxset.begin(), end=idxset.end(); i!=end; ++i)
 
  267        if(Flags::contains(i->local().attribute()))
 
  268          ++sizes[toPart[i->local()]];
 
  271      typedef std::map<int,int>::const_iterator MIter;
 
  272      for(MIter i=sizes.begin(), end=sizes.end(); i!=end; ++i)
 
  273        interfaces()[i->first].first.reserve(i->second);
 
  276      typedef typename IS::const_iterator IIter;
 
  277      for(IIter i=idxset.begin(), end=idxset.end(); i!=end; ++i)
 
  278        if(Flags::contains(i->local().attribute()))
 
  279          interfaces()[toPart[i->local()]].first.add(i->local());
 
  282    void reserveSpaceForReceiveInterface(
int proc, 
int size)
 
  286    void addReceiveIndex(
int proc, std::size_t idx)
 
  290    template<
typename TG>
 
  291    void buildReceiveInterface(std::vector<std::pair<TG,int> >& indices)
 
  293      typedef typename std::vector<std::pair<TG,int> >::const_iterator VIter;
 
  295      for(VIter idx=indices.begin(); idx!= indices.end(); ++idx) {
 
  300    ~RedistributeInterface()
 
  308    typedef ParmetisDuneIndexMap :: idxtype idxtype ;
 
  320    void createSendBuf(std::vector<GI>& ownerVec, std::set<GI>& overlapVec, std::set<int>& neighbors, 
char *sendBuf, 
int buffersize, MPI_Comm comm) {
 
  322      std::size_t s=ownerVec.size();
 
  326      MPI_Pack(&s, 1, MPITraits<std::size_t>::getType(), sendBuf, buffersize, &pos, comm);
 
  327      MPI_Pack(&(ownerVec[0]), s, MPITraits<GI>::getType(), sendBuf, buffersize, &pos, comm);
 
  328      s = overlapVec.size();
 
  329      MPI_Pack(&s, 1, MPITraits<std::size_t>::getType(), sendBuf, buffersize, &pos, comm);
 
  330      typedef typename std::set<GI>::iterator Iter;
 
  331      for(Iter i=overlapVec.begin(), end= overlapVec.end(); i != end; ++i)
 
  332        MPI_Pack(
const_cast<GI*
>(&(*i)), 1, MPITraits<GI>::getType(), sendBuf, buffersize, &pos, comm);
 
  335      MPI_Pack(&s, 1, MPITraits<std::size_t>::getType(), sendBuf, buffersize, &pos, comm);
 
  336      typedef typename std::set<int>::iterator IIter;
 
  338      for(IIter i=neighbors.begin(), end= neighbors.end(); i != end; ++i)
 
  339        MPI_Pack(
const_cast<int*
>(&(*i)), 1, MPI_INT, sendBuf, buffersize, &pos, comm);
 
  350    void saveRecvBuf(
char *recvBuf, 
int bufferSize, std::vector<std::pair<GI,int> >& ownerVec,
 
  351                     std::set<GI>& overlapVec, std::set<int>& neighbors, RedistributeInterface& inf, 
int from, MPI_Comm comm) {
 
  355      MPI_Unpack(recvBuf, bufferSize, &pos, &size, 1, MPITraits<std::size_t>::getType(), comm);
 
  356      inf.reserveSpaceForReceiveInterface(from, size);
 
  357      ownerVec.reserve(ownerVec.size()+size);
 
  358      for(; size!=0; --size) {
 
  360        MPI_Unpack(recvBuf, bufferSize, &pos, &gi, 1, MPITraits<GI>::getType(), comm);
 
  361        ownerVec.push_back(std::make_pair(gi,from));
 
  364      MPI_Unpack(recvBuf, bufferSize, &pos, &size, 1, MPITraits<std::size_t>::getType(), comm);
 
  365      typename std::set<GI>::iterator ipos = overlapVec.begin();
 
  366      Dune::dverb << 
"unpacking "<<size<<
" overlap"<<std::endl;
 
  367      for(; size!=0; --size) {
 
  369        MPI_Unpack(recvBuf, bufferSize, &pos, &gi, 1, MPITraits<GI>::getType(), comm);
 
  370        ipos=overlapVec.insert(ipos, gi);
 
  373      MPI_Unpack(recvBuf, bufferSize, &pos, &size, 1,  MPITraits<std::size_t>::getType(), comm);
 
  374      Dune::dverb << 
"unpacking "<<size<<
" neighbors"<<std::endl;
 
  375      typename std::set<int>::iterator npos = neighbors.begin();
 
  376      for(; size!=0; --size) {
 
  378        MPI_Unpack(recvBuf, bufferSize, &pos, &n, 1, MPI_INT, comm);
 
  379        npos=neighbors.insert(npos, n);
 
  397    void getDomain(
const MPI_Comm& comm, T *part, 
int numOfOwnVtx, 
int nparts, 
int *myDomain, std::vector<int> &domainMapping) {
 
  399      MPI_Comm_size(comm, &npes);
 
  400      MPI_Comm_rank(comm, &mype);
 
  407      std::vector<int> domain(nparts, 0);
 
  408      std::vector<int> assigned(npes, 0);
 
  410      domainMapping.assign(domainMapping.size(), -1);
 
  413      for (i=0; i<numOfOwnVtx; i++) {
 
  417      std::vector<int> domainMatrix(npes * nparts, -1);
 
  420      int *buf = 
new int[nparts];
 
  421      for (i=0; i<nparts; i++) {
 
  423        domainMatrix[mype*nparts+i] = domain[i];
 
  426      int src = (mype-1+npes)%npes;
 
  427      int dest = (mype+1)%npes;
 
  429      for (i=0; i<npes-1; i++) {
 
  430        MPI_Sendrecv_replace(buf, nparts, MPI_INT, dest, 0, src, 0, comm, &status);
 
  432        pe = ((mype-1-i)+npes)%npes;
 
  433        for(j=0; j<nparts; j++) {
 
  435          domainMatrix[pe*nparts+j] = buf[j];
 
  443      int maxOccurance = 0;
 
  445      std::set<std::size_t> unassigned;
 
  447      for(i=0; i<nparts; i++) {
 
  448        for(j=0; j<npes; j++) {
 
  450          if (assigned[j]==0) {
 
  451            if (maxOccurance < domainMatrix[j*nparts+i]) {
 
  452              maxOccurance = domainMatrix[j*nparts+i];
 
  460          domainMapping[i] = pe;
 
  470          unassigned.insert(i);
 
  475      typename std::vector<int>::iterator next_free = assigned.begin();
 
  477      for(
typename std::set<std::size_t>::iterator domain = unassigned.begin(),
 
  478            end = unassigned.end(); domain != end; ++domain)
 
  480        next_free = std::find_if(next_free, assigned.end(), std::bind2nd(std::less<int>(), 1));
 
  481        assert(next_free !=  assigned.end());
 
  482        domainMapping[*domain] = next_free-assigned.begin();
 
  490      bool operator()(
const T& t1, 
const T& t2)
 const 
  508    void mergeVec(std::vector<std::pair<GI, int> >& ownerVec, std::set<GI>& overlapSet) {
 
  510      typedef typename std::vector<std::pair<GI,int> >::const_iterator VIter;
 
  513      if(ownerVec.size()>0)
 
  515        VIter old=ownerVec.begin();
 
  516        for(VIter i=old+1, end=ownerVec.end(); i != end; old=i++)
 
  518          if(i->first==old->first)
 
  520            std::cerr<<
"Value at indes"<<old-ownerVec.begin()<<
" is the same as at index " 
  521                     <<i-ownerVec.begin()<<
" ["<<old->first<<
","<<old->second<<
"]==[" 
  522                     <<i->first<<
","<<i->second<<
"]"<<std::endl;
 
  530      typedef typename std::set<GI>::iterator SIter;
 
  531      VIter v=ownerVec.begin(), vend=ownerVec.end();
 
  532      for(SIter s=overlapSet.begin(), send=overlapSet.end(); s!=send;)
 
  534        while(v!=vend && v->first<*s) ++v;
 
  535        if(v!=vend && v->first==*s) {
 
  540          overlapSet.erase(tmp);
 
  560    template<
class OwnerSet, 
class Graph, 
class IS, 
class GI>
 
  561    void getNeighbor(
const Graph& g, std::vector<int>& part,
 
  562                     typename Graph::VertexDescriptor vtx, 
const IS& indexSet,
 
  563                     int toPe, std::set<GI>& neighbor, std::set<int>& neighborProcs) {
 
  564      typedef typename Graph::ConstEdgeIterator Iter;
 
  565      for(Iter edge=g.beginEdges(vtx), end=g.endEdges(vtx); edge!=end; ++edge)
 
  567        const typename IS::IndexPair* pindex = indexSet.pair(edge.target());
 
  569        if(part[pindex->local()]!=toPe || !OwnerSet::contains(pindex->local().attribute()))
 
  572          neighbor.insert(pindex->global());
 
  573          neighborProcs.insert(part[pindex->local()]);
 
  578    template<
class T, 
class I>
 
  579    void my_push_back(std::vector<T>& ownerVec, 
const I& index, 
int proc)
 
  582      ownerVec.push_back(index);
 
  585    template<
class T, 
class I>
 
  586    void my_push_back(std::vector<std::pair<T,int> >& ownerVec, 
const I& index, 
int proc)
 
  588      ownerVec.push_back(std::make_pair(index,proc));
 
  591    void reserve(std::vector<T>&, RedistributeInterface&, 
int)
 
  594    void reserve(std::vector<std::pair<T,int> >& ownerVec, RedistributeInterface& redist, 
int proc)
 
  596      redist.reserveSpaceForReceiveInterface(proc, ownerVec.size());
 
  617    template<
class OwnerSet, 
class G, 
class IS, 
class T, 
class GI>
 
  618    void getOwnerOverlapVec(
const G& graph, std::vector<int>& part, IS& indexSet,
 
  619                            int myPe, 
int toPe, std::vector<T>& ownerVec, std::set<GI>& overlapSet,
 
  620                            RedistributeInterface& redist, std::set<int>& neighborProcs) {
 
  623      typedef typename IS::const_iterator Iterator;
 
  624      for(Iterator index = indexSet.begin(); index != indexSet.end(); ++index) {
 
  626        if(OwnerSet::contains(index->local().attribute()))
 
  628          if(part[index->local()]==toPe)
 
  630            getNeighbor<OwnerSet>(graph, part, index->local(), indexSet,
 
  631                                  toPe, overlapSet, neighborProcs);
 
  632            my_push_back(ownerVec, index->global(), toPe);
 
  636      reserve(ownerVec, redist, toPe);
 
  647    template<
class F, 
class IS>
 
  648    inline bool isOwner(IS& indexSet, 
int index) {
 
  650      const typename IS::IndexPair* pindex=indexSet.pair(index);
 
  653      return F::contains(pindex->local().attribute());
 
  657    class BaseEdgeFunctor
 
  660      BaseEdgeFunctor(idxtype* adj,
const ParmetisDuneIndexMap& data)
 
  661        : i_(), adj_(adj), data_(data)
 
  665      void operator()(
const T& edge)
 
  669        adj_[i_] = data_.toParmetis(edge.target());
 
  680      const ParmetisDuneIndexMap& data_;
 
  685      : 
public BaseEdgeFunctor
 
  687      EdgeFunctor(idxtype* adj, 
const ParmetisDuneIndexMap& data, std::size_t)
 
  688        : BaseEdgeFunctor(adj, data)
 
  691      idxtype* getWeights()
 
  698    template<
class G, 
class V, 
class E, 
class VM, 
class EM>
 
  699    class EdgeFunctor<
Dune::Amg::PropertiesGraph<G,V,E,VM,EM> >
 
  700      :  
public BaseEdgeFunctor
 
  703      EdgeFunctor(idxtype* adj, 
const ParmetisDuneIndexMap& data, std::size_t s)
 
  704        : BaseEdgeFunctor(adj, data)
 
  706        weight_=
new idxtype[s];
 
  710      void operator()(
const T& edge)
 
  712        weight_[index()]=edge.properties().depends() ? 3 : 1;
 
  713        BaseEdgeFunctor::operator()(edge);
 
  715      idxtype* getWeights()
 
  744    template<
class F, 
class G, 
class IS, 
class EW>
 
  745    void getAdjArrays(G& graph, IS& indexSet, idxtype *xadj,
 
  751      typedef typename G::ConstVertexIterator VertexIterator;
 
  753      typedef typename IS::const_iterator Iterator;
 
  755      VertexIterator vend = graph.end();
 
  758      for(VertexIterator vertex = graph.begin(); vertex != vend; ++vertex) {
 
  759        if (isOwner<F>(indexSet,*vertex)) {
 
  761          typedef typename G::ConstEdgeIterator EdgeIterator;
 
  762          EdgeIterator eend = vertex.end();
 
  763          xadj[j] = ew.index();
 
  765          for(EdgeIterator edge = vertex.begin(); edge != eend; ++edge) {
 
  770      xadj[j] = ew.index();
 
  774  template<
class G, 
class T1, 
class T2>
 
  775  bool buildCommunication(
const G& graph, std::vector<int>& realparts,
 
  778                          RedistributeInterface& redistInf,
 
  781#ifndef METIS_VER_MAJOR 
  785    void METIS_PartGraphKway(
int *nvtxs, idxtype *xadj, idxtype *adjncy, idxtype *vwgt,
 
  786                             idxtype *adjwgt, 
int *wgtflag, 
int *numflag, 
int *nparts,
 
  787                             int *options, 
int *edgecut, idxtype *part);
 
  789    void METIS_PartGraphRecursive(
int *nvtxs, idxtype *xadj, idxtype *adjncy, idxtype *vwgt,
 
  790                                  idxtype *adjwgt, 
int *wgtflag, 
int *numflag, 
int *nparts,
 
  791                                  int *options, 
int *edgecut, idxtype *part);
 
  796  template<
class S, 
class T>
 
  797  inline void print_carray(S& os, T* array, std::size_t l)
 
  799    for(T *cur=array, *end=array+l; cur!=end; ++cur)
 
  803  inline bool isValidGraph(std::size_t noVtx, std::size_t gnoVtx, idxtype noEdges, idxtype* xadj,
 
  804                           idxtype* adjncy, 
bool checkSymmetry)
 
  808    for(idxtype vtx=0; vtx<(idxtype)noVtx; ++vtx) {
 
  809      if(xadj[vtx]>noEdges||xadj[vtx]<0) {
 
  810        std::cerr <<
"Check graph: xadj["<<vtx<<
"]="<<xadj[vtx]<<
" (>" 
  811                  <<noEdges<<
") out of range!"<<std::endl;
 
  814      if(xadj[vtx+1]>noEdges||xadj[vtx+1]<0) {
 
  815        std::cerr <<
"Check graph: xadj["<<vtx+1<<
"]="<<xadj[vtx+1]<<
" (>" 
  816                  <<noEdges<<
") out of range!"<<std::endl;
 
  820      for(idxtype i=xadj[vtx]; i< xadj[vtx+1]; ++i) {
 
  821        if(adjncy[i]<0||((std::size_t)adjncy[i])>gnoVtx) {
 
  822          std::cerr<<
" Edge "<<adjncy[i]<<
" out of range ["<<0<<
","<<noVtx<<
")" 
  828        for(idxtype i=xadj[vtx]; i< xadj[vtx+1]; ++i) {
 
  829          idxtype target=adjncy[i];
 
  832          for(idxtype j=xadj[target]; j< xadj[target+1]; ++j)
 
  836            std::cerr<<
"Edge ("<<target<<
","<<vtx<<
") "<<i<<
" time"<<std::endl;
 
  845  template<
class M, 
class T1, 
class T2>
 
  849                            RedistributeInterface& redistInf,
 
  852    if(verbose && oocomm.communicator().
rank()==0)
 
  853      std::cout<<
"Repartitioning from "<<oocomm.communicator().
size()
 
  854               <<
" to "<<nparts<<
" parts"<<std::endl;
 
  856    int rank = oocomm.communicator().
rank();
 
  858    int* part = 
new int[1];
 
  861    idxtype* part = 
new idxtype[1]; 
 
  873        typedef typename RemoteIndices::const_iterator
 
  888        idxtype *xadj=
new idxtype[2];
 
  889        idxtype *vtxdist=
new idxtype[oocomm.communicator().
size()+1];
 
  890        idxtype *adjncy=
new idxtype[noNeighbours];
 
  897        for(
int i=0; i<oocomm.communicator().size(); ++i)
 
  899        vtxdist[oocomm.communicator().
size()]=oocomm.communicator().
size();
 
  902        xadj[1]=noNeighbours;
 
  932        typedef typename  RemoteIndices::const_iterator NeighbourIterator;
 
  934        idxtype* adjp=adjncy;
 
  937        vwgt   = 
new idxtype[1];
 
  940        adjwgt = 
new idxtype[noNeighbours];
 
  941        idxtype* adjwp=adjwgt;
 
  946          if(n->first != rank) {
 
  954        assert(isValidGraph(vtxdist[rank+1]-vtxdist[rank],
 
  955                            vtxdist[oocomm.communicator().
size()],
 
  956                            noNeighbours, xadj, adjncy, 
false));
 
  958        idxtype wgtflag=0, numflag=0;
 
  963        float *tpwgts = 
new float[nparts];
 
  964        for(
int i=0; i<nparts; ++i)
 
  965          tpwgts[i]=1.0/nparts;
 
  966        int options[5] ={ 0,1,15,0,0};
 
  967        MPI_Comm comm=oocomm.communicator();
 
  983        oocomm.communicator().
barrier();
 
  984        if(verbose && oocomm.communicator().
rank()==0)
 
  985          std::cout<<
"Creating comm graph took "<<time.elapsed()<<std::endl;
 
  988#ifdef PARALLEL_PARTITION 
  995        ParMETIS_V3_PartKway(vtxdist, xadj, adjncy,
 
  996                             vwgt, adjwgt, &wgtflag,
 
  997                             &numflag, &ncon, &nparts, tpwgts, &ubvec, options, &edgecut, part,
 
  999        if(verbose && oocomm.communicator().
rank()==0)
 
 1000          std::cout<<
"ParMETIS took "<<time.elapsed()<<std::endl;
 
 1004        std::size_t gnoedges=0;
 
 1006        noedges = 
new int[oocomm.communicator().
size()];
 
 1007        Dune::dverb<<
"noNeighbours: "<<noNeighbours<<std::endl;
 
 1009        MPI_Allgather(&noNeighbours,1,MPI_INT,noedges,1, MPI_INT,oocomm.communicator());
 
 1011        if(verbose && oocomm.communicator().
rank()==0)
 
 1012          std::cout<<
"Gathering noedges took "<<time1.elapsed()<<std::endl;
 
 1015        idxtype noVertices = vtxdist[oocomm.communicator().
size()];
 
 1018        idxtype *gadjncy = 0;
 
 1019        idxtype *gadjwgt = 0;
 
 1027        std::size_t localNoVtx=vtxdist[rank+1]-vtxdist[rank];
 
 1029        std::size_t gxadjlen = vtxdist[oocomm.communicator().
size()]-vtxdist[0]+oocomm.communicator().
size();
 
 1035          displ = 
new int[oocomm.communicator().
size()];
 
 1036          xdispl = 
new int[oocomm.communicator().
size()];
 
 1037          noxs = 
new int[oocomm.communicator().
size()];
 
 1038          vdispl = 
new int[oocomm.communicator().
size()];
 
 1039          novs = 
new int[oocomm.communicator().
size()];
 
 1041          for(
int i=0; i < oocomm.communicator().size(); ++i) {
 
 1042            noxs[i]=vtxdist[i+1]-vtxdist[i]+1;
 
 1043            novs[i]=vtxdist[i+1]-vtxdist[i];
 
 1046          idxtype *so= vtxdist;
 
 1048          for(
int *xcurr = xdispl, *vcurr = vdispl, *end=vdispl+oocomm.communicator().
size();
 
 1049              vcurr!=end; ++vcurr, ++xcurr, ++so, ++offset) {
 
 1051            *xcurr = offset + *so;
 
 1057          for(
int *curr=noedges, *end=noedges+oocomm.communicator().
size()-1;
 
 1058              curr!=end; ++curr) {
 
 1069          for(
int *curr=noedges, *end=noedges+oocomm.communicator().
size();
 
 1074          Dune::dinfo<<
"gxadjlen: "<<gxadjlen<<
" noVertices: "<<noVertices
 
 1075                     <<
" gnoedges: "<<gnoedges<<std::endl;
 
 1076          gxadj = 
new idxtype[gxadjlen];
 
 1077          gpart = 
new idxtype[noVertices];
 
 1079          gvwgt = 
new idxtype[noVertices];
 
 1080          gadjwgt = 
new idxtype[gnoedges];
 
 1082          gadjncy = 
new idxtype[gnoedges];
 
 1085        if(verbose && oocomm.communicator().
rank()==0)
 
 1086          std::cout<<
"Preparing global graph took "<<time1.elapsed()<<std::endl;
 
 1090        MPI_Allgatherv(xadj,2,MPITraits<idxtype>::getType(),
 
 1091                       gxadj,noxs,xdispl,MPITraits<idxtype>::getType(),
 
 1093        MPI_Allgatherv(adjncy,noNeighbours,MPITraits<idxtype>::getType(),
 
 1094                       gadjncy,noedges,displ,MPITraits<idxtype>::getType(),
 
 1097        MPI_Allgatherv(adjwgt,noNeighbours,MPITraits<idxtype>::getType(),
 
 1098                       gadjwgt,noedges,displ,MPITraits<idxtype>::getType(),
 
 1100        MPI_Allgatherv(vwgt,localNoVtx,MPITraits<idxtype>::getType(),
 
 1101                       gvwgt,novs,vdispl,MPITraits<idxtype>::getType(),
 
 1104        if(verbose && oocomm.communicator().
rank()==0)
 
 1105          std::cout<<
"Gathering global graph data took "<<time1.elapsed()<<std::endl;
 
 1115          idxtype increment = vtxdist[1];
 
 1116          idxtype *start=gxadj+1;
 
 1117          for(
int i=1; i<oocomm.communicator().size(); ++i) {
 
 1119            int lprev = vtxdist[i]-vtxdist[i-1];
 
 1120            int l = vtxdist[i+1]-vtxdist[i];
 
 1122            assert((start+l+offset)-gxadj<=
static_cast<idxtype
>(gxadjlen));
 
 1123            increment = *(start-1);
 
 1124            std::transform(start+offset, start+l+offset, start, std::bind2nd(std::plus<idxtype>(), increment));
 
 1138          if(verbose && oocomm.communicator().
rank()==0)
 
 1139            std::cout<<
"Postprocesing global graph data took "<<time1.elapsed()<<std::endl;
 
 1142          assert(isValidGraph(noVertices, noVertices, gnoedges,
 
 1143                              gxadj, gadjncy, 
true));
 
 1146          if(verbose && oocomm.communicator().
rank()==0)
 
 1147            std::cout<<
"Creating grah one 1 process took "<<time.elapsed()<<std::endl;
 
 1149          options[0]=0; options[1]=1; options[2]=1; options[3]=3; options[4]=3;
 
 1150#if METIS_VER_MAJOR >= 5 
 1152          idxtype moptions[METIS_NOPTIONS];
 
 1153          METIS_SetDefaultOptions(moptions);
 
 1154          moptions[METIS_OPTION_NUMBERING] = numflag;
 
 1155          METIS_PartGraphRecursive(&noVertices, &ncon, gxadj, gadjncy, gvwgt, NULL, gadjwgt,
 
 1156                         &nparts, NULL, NULL, moptions, &edgecut, gpart);
 
 1159          METIS_PartGraphRecursive(&noVertices, gxadj, gadjncy, gvwgt, gadjwgt, &wgtflag,
 
 1160                                   &numflag, &nparts, options, &edgecut, gpart);
 
 1163          if(verbose && oocomm.communicator().
rank()==0)
 
 1164            std::cout<<
"METIS took "<<time.elapsed()<<std::endl;
 
 1178        MPI_Scatter(gpart, 1, MPITraits<idxtype>::getType(), part, 1,
 
 1179                    MPITraits<idxtype>::getType(), 0, comm);
 
 1203    Dune::dinfo<<
" repart "<<rank <<
" -> "<< part[0]<<std::endl;
 
 1205    std::vector<int> realpart(mat.
N(), part[0]);
 
 1210    if(verbose && oocomm.communicator().
rank()==0)
 
 1211      std::cout<<
"Scattering repartitioning took "<<time.elapsed()<<std::endl;
 
 1215    oocomm.buildGlobalLookup(mat.
N());
 
 1218    if(verbose && oocomm.communicator().
rank()==0)
 
 1219      std::cout<<
"Filling index set took "<<time.elapsed()<<std::endl;
 
 1224      noNeighbours = oocomm.communicator().
sum(noNeighbours)
 
 1225                     / oocomm.communicator().
size();
 
 1226      if(oocomm.communicator().
rank()==0)
 
 1227        std::cout<<
"Average no neighbours was "<<noNeighbours<<std::endl;
 
 1229    bool ret = buildCommunication(graph, realpart, oocomm, outcomm, redistInf,
 
 1231    if(verbose && oocomm.communicator().
rank()==0)
 
 1232      std::cout<<
"Building index sets took "<<time.elapsed()<<std::endl;
 
 1254  template<
class G, 
class T1, 
class T2>
 
 1257                        RedistributeInterface& redistInf,
 
 1262    MPI_Comm comm=oocomm.communicator();
 
 1263    oocomm.buildGlobalLookup(graph.noVertices());
 
 1266    if(verbose && oocomm.communicator().
rank()==0)
 
 1267      std::cout<<
"Filling holes took "<<time.
elapsed()<<std::endl;
 
 1274    double t1=0.0, t2=0.0, t3=0.0, t4=0.0, tSum=0.0;
 
 1279    int mype = oocomm.communicator().
rank();
 
 1281    assert(nparts<=oocomm.communicator().
size());
 
 1302    typedef typename  OOComm::OwnerSet OwnerSet;
 
 1307    ParmetisDuneIndexMap indexMap(graph,oocomm);
 
 1309    idxtype *part = 
new idxtype[indexMap.numOfOwnVtx()];
 
 1311    std::size_t *part = 
new std::size_t[indexMap.numOfOwnVtx()];
 
 1313    for(std::size_t i=0; i < indexMap.numOfOwnVtx(); ++i)
 
 1317    if(oocomm.communicator().
rank()==0 && nparts>1)
 
 1318      std::cerr<<
"ParMETIS not activated. Will repartition to 1 domain instead of requested " 
 1319               <<nparts<<
" domains."<<std::endl;
 
 1326      idxtype *xadj = 
new  idxtype[indexMap.numOfOwnVtx()+1];
 
 1327      idxtype *adjncy = 
new idxtype[graph.noEdges()];
 
 1328      EdgeFunctor<G> ef(adjncy, indexMap, graph.noEdges());
 
 1329      getAdjArrays<OwnerSet>(graph, oocomm.globalLookup(), xadj, ef);
 
 1335      idxtype numflag=0, wgtflag=0, options[3], edgecut=0, ncon=1;
 
 1337      float *tpwgts = 
new float[nparts];
 
 1338      for(
int i=0; i<nparts; ++i)
 
 1339        tpwgts[i]=1.0/nparts;
 
 1348      wgtflag = (ef.getWeights()!=NULL) ? 1 : 0;
 
 1356        std::cout<<std::endl;
 
 1357        std::cout<<
"Testing ParMETIS_V3_PartKway with options[1-2] = {" 
 1358                 <<options[1]<<
" "<<options[2]<<
"}, Ncon: " 
 1359                 <<ncon<<
", Nparts: "<<nparts<<std::endl;
 
 1370        oocomm.communicator().
barrier();
 
 1371        if(oocomm.communicator().
rank()==0)
 
 1372          std::cout<<
"Preparing for parmetis took "<<time.
elapsed()<<std::endl;
 
 1379      ParMETIS_V3_PartKway(indexMap.vtxDist(), xadj, adjncy,
 
 1380                           NULL, ef.getWeights(), &wgtflag,
 
 1381                           &numflag, &ncon, &nparts, tpwgts, ubvec, options, &edgecut, part, &
const_cast<MPI_Comm&
>(comm));
 
 1392        std::cout<<std::endl;
 
 1393        std::cout<<
"ParMETIS_V3_PartKway reported a cut of "<<edgecut<<std::endl;
 
 1394        std::cout<<std::endl;
 
 1396      std::cout<<mype<<
": PARMETIS-Result: ";
 
 1397      for(
int i=0; i < indexMap.vtxDist()[mype+1]-indexMap.vtxDist()[mype]; ++i) {
 
 1398        std::cout<<part[i]<<
" ";
 
 1400      std::cout<<std::endl;
 
 1401      std::cout<<
"Testing ParMETIS_V3_PartKway with options[1-2] = {" 
 1402               <<options[1]<<
" "<<options[2]<<
"}, Ncon: " 
 1403               <<ncon<<
", Nparts: "<<nparts<<std::endl;
 
 1414        oocomm.communicator().
barrier();
 
 1415        if(oocomm.communicator().
rank()==0)
 
 1416          std::cout<<
"Parmetis took "<<time.
elapsed()<<std::endl;
 
 1423      for(std::size_t i=0; i<indexMap.numOfOwnVtx(); ++i)
 
 1433    std::vector<int> domainMapping(nparts);
 
 1435      getDomain(comm, part, indexMap.numOfOwnVtx(), nparts, &myDomain, domainMapping);
 
 1440    std::cout<<mype<<
": myDomain: "<<myDomain<<std::endl;
 
 1441    std::cout<<mype<<
": DomainMapping: ";
 
 1442    for(
int j=0; j<nparts; j++) {
 
 1443      std::cout<<
" do: "<<j<<
" pe: "<<domainMapping[j]<<
" ";
 
 1445    std::cout<<std::endl;
 
 1452    std::vector<int> setPartition(oocomm.
indexSet().
size(), -1);
 
 1454    typedef typename  OOComm::ParallelIndexSet::const_iterator Iterator;
 
 1457      if(OwnerSet::contains(index->local().attribute())) {
 
 1458        setPartition[index->local()]=domainMapping[part[i++]];
 
 1468    bool ret = buildCommunication(graph, setPartition, oocomm, outcomm, redistInf,
 
 1471      oocomm.communicator().
barrier();
 
 1472      if(oocomm.communicator().
rank()==0)
 
 1473        std::cout<<
"Creating indexsets took "<<time.
elapsed()<<std::endl;
 
 1480  template<
class G, 
class T1, 
class T2>
 
 1481  bool buildCommunication(
const G& graph,
 
 1484                          RedistributeInterface& redistInf,
 
 1488    typedef typename  OOComm::OwnerSet OwnerSet;
 
 1493    redistInf.buildSendInterface<OwnerSet>(setPartition, oocomm.
indexSet());
 
 1519    int npes = oocomm.communicator().
size();
 
 1522    std::set<int> recvFrom;
 
 1528    typedef typename std::vector<int>::const_iterator VIter;
 
 1529    int mype = oocomm.communicator().
rank();
 
 1532      std::set<int> tsendTo;
 
 1533      for(VIter i=setPartition.begin(), iend = setPartition.end(); i!=iend; ++i)
 
 1536      noSendTo = tsendTo.size();
 
 1537      sendTo = 
new int[noSendTo];
 
 1538      typedef std::set<int>::const_iterator iterator;
 
 1540      for(iterator i=tsendTo.begin(); i != tsendTo.end(); ++i, ++idx)
 
 1545    int* gnoSend= 
new int[oocomm.communicator().
size()];
 
 1546    int* gsendToDispl =  
new int[oocomm.communicator().
size()+1];
 
 1548    MPI_Allgather(&noSendTo, 1, MPI_INT, gnoSend, 1,
 
 1549                  MPI_INT, oocomm.communicator());
 
 1552    int totalNoRecv = 0;
 
 1553    for(
int i=0; i<npes; ++i)
 
 1554      totalNoRecv += gnoSend[i];
 
 1556    int *gsendTo = 
new int[totalNoRecv];
 
 1560    for(
int i=0; i<npes; ++i)
 
 1561      gsendToDispl[i+1]=gsendToDispl[i]+gnoSend[i];
 
 1564    MPI_Allgatherv(sendTo, noSendTo, MPI_INT, gsendTo, gnoSend, gsendToDispl,
 
 1565                   MPI_INT, oocomm.communicator());
 
 1568    for(
int proc=0; proc < npes; ++proc)
 
 1569      for(
int i=gsendToDispl[proc]; i < gsendToDispl[proc+1]; ++i)
 
 1570        if(gsendTo[i]==mype)
 
 1571          recvFrom.insert(proc);
 
 1573    bool existentOnNextLevel = recvFrom.size()>0;
 
 1577    delete[] gsendToDispl;
 
 1582    if(recvFrom.size()) {
 
 1583      std::cout<<mype<<
": recvFrom: ";
 
 1584      typedef typename std::set<int>::const_iterator siter;
 
 1585      for(siter i=recvFrom.begin(); i!= recvFrom.end(); ++i) {
 
 1590    std::cout<<std::endl<<std::endl;
 
 1591    std::cout<<mype<<
": sendTo: ";
 
 1592    for(
int i=0; i<noSendTo; i++) {
 
 1593      std::cout<<sendTo[i]<<
" ";
 
 1595    std::cout<<std::endl<<std::endl;
 
 1599      if(oocomm.communicator().
rank()==0)
 
 1600        std::cout<<
" Communicating the receive information took "<<
 
 1601        time.elapsed()<<std::endl;
 
 1615    typedef typename OOComm::ParallelIndexSet::GlobalIndex GI;
 
 1616    typedef std::vector<GI> GlobalVector;
 
 1617    std::vector<std::pair<GI,int> > myOwnerVec;
 
 1618    std::set<GI> myOverlapSet;
 
 1619    GlobalVector sendOwnerVec;
 
 1620    std::set<GI> sendOverlapSet;
 
 1621    std::set<int> myNeighbors;
 
 1626    char **sendBuffers=
new char*[noSendTo];
 
 1627    MPI_Request *requests = 
new MPI_Request[noSendTo];
 
 1630    for(
int i=0; i < noSendTo; ++i) {
 
 1632      sendOwnerVec.clear();
 
 1633      sendOverlapSet.clear();
 
 1636      std::set<int> neighbors;
 
 1637      getOwnerOverlapVec<OwnerSet>(graph, setPartition, oocomm.globalLookup(),
 
 1638                                   mype, sendTo[i], sendOwnerVec, sendOverlapSet, redistInf,
 
 1644      MPI_Pack_size(1, MPITraits<std::size_t>::getType(), oocomm.communicator(), &buffersize);
 
 1645      MPI_Pack_size(sendOwnerVec.size(), MPITraits<GI>::getType(), oocomm.communicator(), &tsize);
 
 1647      MPI_Pack_size(1, MPITraits<std::size_t>::getType(), oocomm.communicator(), &tsize);
 
 1649      MPI_Pack_size(sendOverlapSet.size(), MPITraits<GI>::getType(), oocomm.communicator(), &tsize);
 
 1650      buffersize += tsize;
 
 1651      MPI_Pack_size(1, MPITraits<std::size_t>::getType(), oocomm.communicator(), &tsize);
 
 1652      buffersize += tsize;
 
 1653      MPI_Pack_size(neighbors.size(), MPI_INT, oocomm.communicator(), &tsize);
 
 1654      buffersize += tsize;
 
 1656      sendBuffers[i] = 
new char[buffersize];
 
 1659      std::cout<<mype<<
" sending "<<sendOwnerVec.size()<<
" owner and "<<
 
 1660      sendOverlapSet.size()<<
" overlap to "<<sendTo[i]<<
" buffersize="<<buffersize<<std::endl;
 
 1662      createSendBuf(sendOwnerVec, sendOverlapSet, neighbors, sendBuffers[i], buffersize, oocomm.communicator());
 
 1663      MPI_Issend(sendBuffers[i], buffersize, MPI_PACKED, sendTo[i], 99, oocomm.communicator(), requests+i);
 
 1667      oocomm.communicator().
barrier();
 
 1668      if(oocomm.communicator().
rank()==0)
 
 1669        std::cout<<
" Creating sends took "<<
 
 1670        time.elapsed()<<std::endl;
 
 1675    int noRecv = recvFrom.size();
 
 1676    int oldbuffersize=0;
 
 1681      MPI_Probe(MPI_ANY_SOURCE, 99,  oocomm.communicator(), &stat);
 
 1683      MPI_Get_count(&stat, MPI_PACKED, &buffersize);
 
 1685      if(oldbuffersize<buffersize) {
 
 1688        recvBuf = 
new char[buffersize];
 
 1689        oldbuffersize = buffersize;
 
 1691      MPI_Recv(recvBuf, buffersize, MPI_PACKED, stat.MPI_SOURCE, 99, oocomm.communicator(), &stat);
 
 1692      saveRecvBuf(recvBuf, buffersize, myOwnerVec, myOverlapSet, myNeighbors, redistInf,
 
 1693                  stat.MPI_SOURCE, oocomm.communicator());
 
 1702    MPI_Status *statuses = 
new MPI_Status[noSendTo];
 
 1703    int send = MPI_Waitall(noSendTo, requests, statuses);
 
 1706    if(send==MPI_ERR_IN_STATUS) {
 
 1707      std::cerr<<mype<<
": Error in sending :"<<std::endl;
 
 1709      for(
int i=0; i< noSendTo; i++)
 
 1710        if(statuses[i].MPI_ERROR!=MPI_SUCCESS) {
 
 1713          MPI_Error_string(statuses[i].MPI_ERROR, message, &messageLength);
 
 1714          std::cerr<<
" source="<<statuses[i].MPI_SOURCE<<
" message: ";
 
 1715          for(
int j = 0; j < messageLength; j++)
 
 1716            std::cout<<message[j];
 
 1718      std::cerr<<std::endl;
 
 1722      oocomm.communicator().
barrier();
 
 1723      if(oocomm.communicator().
rank()==0)
 
 1724        std::cout<<
" Receiving and saving took "<<
 
 1725        time.elapsed()<<std::endl;
 
 1729    for(
int i=0; i < noSendTo; ++i)
 
 1730      delete[] sendBuffers[i];
 
 1732    delete[] sendBuffers;
 
 1736    redistInf.setCommunicator(oocomm.communicator());
 
 1747    if (!existentOnNextLevel) {
 
 1749      color= MPI_UNDEFINED;
 
 1751    MPI_Comm outputComm;
 
 1753    MPI_Comm_split(oocomm.communicator(), color, oocomm.communicator().
rank(), &outputComm);
 
 1757    int newrank=outcomm->communicator().
rank();
 
 1758    int *newranks=
new int[oocomm.communicator().
size()];
 
 1759    std::vector<int> tneighbors;
 
 1760    tneighbors.reserve(myNeighbors.size());
 
 1762    typename OOComm::ParallelIndexSet& outputIndexSet = outcomm->
indexSet();
 
 1764    MPI_Allgather(&newrank, 1, MPI_INT, newranks, 1,
 
 1765                  MPI_INT, oocomm.communicator());
 
 1766    typedef typename std::set<int>::const_iterator IIter;
 
 1769    std::cout<<oocomm.communicator().
rank()<<
" ";
 
 1770    for(IIter i=myNeighbors.begin(), end=myNeighbors.end();
 
 1772      assert(newranks[*i]>=0);
 
 1773      std::cout<<*i<<
"->"<<newranks[*i]<<
" ";
 
 1774      tneighbors.push_back(newranks[*i]);
 
 1776    std::cout<<std::endl;
 
 1778    for(IIter i=myNeighbors.begin(), end=myNeighbors.end();
 
 1780      tneighbors.push_back(newranks[*i]);
 
 1784    myNeighbors.clear();
 
 1787      oocomm.communicator().
barrier();
 
 1788      if(oocomm.communicator().
rank()==0)
 
 1789        std::cout<<
" Calculating new neighbours ("<<tneighbors.size()<<
") took "<<
 
 1790        time.elapsed()<<std::endl;
 
 1795    outputIndexSet.beginResize();
 
 1798    std::sort(myOwnerVec.begin(), myOwnerVec.end(), SortFirst());
 
 1802    typedef typename OOComm::ParallelIndexSet::LocalIndex LocalIndex;
 
 1803    typedef typename std::vector<std::pair<GI,int> >::const_iterator VPIter;
 
 1805    for(VPIter g=myOwnerVec.begin(), end =myOwnerVec.end(); g!=end; ++g, ++i ) {
 
 1806      outputIndexSet.add(g->first,LocalIndex(i, OwnerOverlapCopyAttributeSet::owner, 
true));
 
 1807      redistInf.addReceiveIndex(g->second, i);
 
 1811      oocomm.communicator().
barrier();
 
 1812      if(oocomm.communicator().
rank()==0)
 
 1813        std::cout<<
" Adding owner indices took "<<
 
 1814        time.elapsed()<<std::endl;
 
 1823    mergeVec(myOwnerVec, myOverlapSet);
 
 1827    myOwnerVec.swap(myOwnerVec);
 
 1830      oocomm.communicator().
barrier();
 
 1831      if(oocomm.communicator().
rank()==0)
 
 1832        std::cout<<
" Merging indices took "<<
 
 1833        time.elapsed()<<std::endl;
 
 1839    typedef typename std::set<GI>::const_iterator SIter;
 
 1840    for(SIter g=myOverlapSet.begin(), end=myOverlapSet.end(); g!=end; ++g, i++) {
 
 1841      outputIndexSet.add(*g,LocalIndex(i, OwnerOverlapCopyAttributeSet::copy, 
true));
 
 1843    myOverlapSet.clear();
 
 1844    outputIndexSet.endResize();
 
 1846#ifdef DUNE_ISTL_WITH_CHECKING 
 1848    typedef typename OOComm::ParallelIndexSet::const_iterator Iterator;
 
 1849    Iterator end = outputIndexSet.end();
 
 1850    for(Iterator index = outputIndexSet.begin(); index != end; ++index) {
 
 1851      if (OwnerSet::contains(index->local().attribute())) {
 
 1855    numOfOwnVtx = oocomm.communicator().
sum(numOfOwnVtx);
 
 1862    Iterator index=outputIndexSet.begin();
 
 1865      for(Iterator old = outputIndexSet.begin(); index != end; old=index++) {
 
 1866        if(old->global()>index->global())
 
 1867          DUNE_THROW(ISTLError, 
"Index set's globalindex not sorted correctly");
 
 1872      oocomm.communicator().
barrier();
 
 1873      if(oocomm.communicator().
rank()==0)
 
 1874        std::cout<<
" Adding overlap indices took "<<
 
 1875        time.elapsed()<<std::endl;
 
 1880    if(color != MPI_UNDEFINED) {
 
 1890      oocomm.communicator().
barrier();
 
 1891      if(oocomm.communicator().
rank()==0)
 
 1892        std::cout<<
" Storing indexsets took "<<
 
 1893        time.elapsed()<<std::endl;
 
 1899    tSum = t1 + t2 + t3 + t4;
 
 1900    std::cout<<std::endl
 
 1901             <<mype<<
": WTime for step 1): "<<t1
 
 1909    return color!=MPI_UNDEFINED;
 
 1913  template<
class G, 
class P,
class T1, 
class T2, 
class R>
 
 1919    if(nparts!=oocomm.size())
 
 1920      DUNE_THROW(NotImplemented, 
"only available for MPI programs");
 
 1924  template<
class G, 
class P,
class T1, 
class T2, 
class R>
 
 1925  bool commGraphRepartition(
const G& graph, P& oocomm, 
int nparts,
 
 1930    if(nparts!=oocomm.size())
 
 1931      DUNE_THROW(NotImplemented, 
"only available for MPI programs");
 
The (undirected) graph of a matrix.
Definition: graph.hh:49
 
int rank() const
Return rank, is between 0 and size()-1.
Definition: mpicollectivecommunication.hh:166
 
T max(T &in) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:228
 
int size() const
Number of processes in set, is greater than 0.
Definition: mpicollectivecommunication.hh:172
 
int barrier() const
Wait until all processes have arrived at this point in the program.
Definition: mpicollectivecommunication.hh:243
 
T sum(T &in) const
Compute the sum of the argument over all processes and return the result in every process....
Definition: mpicollectivecommunication.hh:179
 
Index Set Interface base class.
Definition: indexidset.hh:76
 
MPI_Comm communicator_
The MPI communicator we use.
Definition: interface.hh:314
 
size_type N() const
Return the number of rows.
Definition: matrix.hh:160
 
A class setting up standard communication for a two-valued attribute set with owner/overlap/copy sema...
Definition: owneroverlapcopy.hh:173
 
const ParallelIndexSet & indexSet() const
Get the underlying parallel index set.
Definition: owneroverlapcopy.hh:464
 
void copyCopyToAll(const T &source, T &dest) const
Communicate values from copy data points to all other data points.
Definition: owneroverlapcopy.hh:333
 
void copyOwnerToAll(const T &source, T &dest) const
Communicate values from owner data points to all other data points.
Definition: owneroverlapcopy.hh:316
 
const RemoteIndices & remoteIndices() const
Get the underlying remote indices.
Definition: owneroverlapcopy.hh:473
 
SolverCategory::Category getSolverCategory() const
Get Solver Category.
Definition: owneroverlapcopy.hh:300
 
LocalIndex::Attribute Attribute
The type of the attribute.
Definition: remoteindices.hh:218
 
const_iterator end() const
Get an iterator over all remote index lists.
Definition: remoteindices.hh:1524
 
int neighbours() const
Get the number of processors we share indices with.
Definition: remoteindices.hh:1441
 
const_iterator begin() const
Get an iterator over all remote index lists.
Definition: remoteindices.hh:1517
 
A simple stop watch.
Definition: timer.hh:52
 
void reset()
Reset timer while keeping the running/stopped state.
Definition: timer.hh:66
 
double elapsed() const
Get elapsed user-time from last reset until now/last stop in seconds.
Definition: timer.hh:86
 
Provides utility classes for syncing distributed data via MPI communication.
 
Classes for building sets out of enumeration values.
 
Provides classes for building the matrix graph.
 
size_t size() const
Get the total number (public and nonpublic) indices.
 
void repairLocalIndexPointers(std::map< int, SLList< std::pair< typename T::GlobalIndex, typename T::LocalIndex::Attribute >, A > > &globalMap, RemoteIndices< T, A1 > &remoteIndices, const T &indexSet)
Repair the pointers to the local indices in the remote indices.
Definition: indicessyncer.hh:490
 
iterator begin()
Get an iterator over the indices positioned at the first index.
 
iterator end()
Get an iterator over the indices positioned after the last index.
 
const InformationMap & interfaces() const
Get information about the interfaces.
Definition: interface.hh:422
 
const IndexPair * pair(const std::size_t &local) const
Get the index pair corresponding to a local index.
 
void storeGlobalIndicesOfRemoteIndices(std::map< int, SLList< std::pair< typename T::GlobalIndex, typename T::LocalIndex::Attribute >, A > > &globalMap, const RemoteIndices< T, A1 > &remoteIndices)
Stores the corresponding global indices of the remote index information.
Definition: indicessyncer.hh:461
 
#define DUNE_THROW(E, m)
Definition: exceptions.hh:243
 
DInfoType dinfo(std::cout)
Stream for informative output.
Definition: stdstreams.hh:138
 
DVerbType dverb(std::cout)
Singleton of verbose debug stream.
Definition: stdstreams.hh:114
 
Provides a map between global and local indices.
 
Class for adding missing indices of a distributed index set in a local communication.
 
Traits classes for mapping types onto MPI_Datatype.
 
Dune namespace.
Definition: alignment.hh:10
 
bool graphRepartition(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm, idxtype nparts, Dune::OwnerOverlapCopyCommunication< T1, T2 > *&outcomm, RedistributeInterface &redistInf, bool verbose=false)
execute a graph repartition for a giving graph and indexset.
Definition: repartition.hh:1255
 
void fillIndexSetHoles(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm)
Fills the holes in an index set.
Definition: repartition.hh:58
 
Classes providing communication interfaces for overlapping Schwarz methods.
 
Classes describing a distributed indexset.
 
Standard Dune debug streams.
 
A traits class describing the mapping of types onto MPI_Datatypes.
Definition: mpitraits.hh:37
 
@ nonoverlapping
Category for on overlapping solvers.
Definition: solvercategory.hh:23
 
Definition of the DUNE_UNUSED macro for the case that config.h is not available.
 
#define DUNE_UNUSED_PARAMETER(parm)
A macro to mark intentional unused function parameters with.
Definition: unused.hh:18