Dune Core Modules (unstable)

yaspgrid.hh
Go to the documentation of this file.
1// SPDX-FileCopyrightText: Copyright © DUNE Project contributors, see file LICENSE.md in module root
2// SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
3// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
4// vi: set et ts=4 sw=2 sts=2:
5#ifndef DUNE_GRID_YASPGRID_HH
6#define DUNE_GRID_YASPGRID_HH
7
8#include <cstdint>
9#include <iostream>
10#include <iterator>
11#include <vector>
12#include <algorithm>
13#include <stack>
14#include <type_traits>
15
16#include <dune/grid/common/backuprestore.hh>
17#include <dune/grid/common/grid.hh> // the grid base classes
18#include <dune/grid/common/capabilities.hh> // the capabilities
19#include <dune/common/hybridutilities.hh>
21#include <dune/common/math.hh>
27#include <dune/geometry/type.hh>
30
31
32#if HAVE_MPI
34#endif
35
43namespace Dune {
44
45 /* some sizes for building global ids
46 */
47 const int yaspgrid_dim_bits = 24; // bits for encoding each dimension
48 const int yaspgrid_level_bits = 5; // bits for encoding level number
49
50
51 //************************************************************************
52 // forward declaration of templates
53
54 template<int dim, class Coordinates> class YaspGrid;
55 template<int mydim, int cdim, class GridImp> class YaspGeometry;
56 template<int codim, int dim, class GridImp> class YaspEntity;
57 template<int codim, class GridImp> class YaspEntitySeed;
58 template<int codim, PartitionIteratorType pitype, class GridImp> class YaspLevelIterator;
59 template<class GridImp> class YaspIntersectionIterator;
60 template<class GridImp> class YaspIntersection;
61 template<class GridImp> class YaspHierarchicIterator;
62 template<class GridImp, bool isLeafIndexSet> class YaspIndexSet;
63 template<class GridImp> class YaspGlobalIdSet;
64 template<class GridImp> class YaspPersistentContainerIndex;
65
66} // namespace Dune
67
79#include <dune/grid/yaspgrid/yaspgrididset.hh>
81
82namespace Dune {
83
84#if HAVE_MPI
85 using YaspCommunication = Communication<MPI_Comm>;
86#else
87 using YaspCommunication = Communication<No_Comm>;
88#endif
89
90 template<int dim, class Coordinates>
91 struct YaspGridFamily
92 {
93 typedef YaspCommunication CCType;
94
95 typedef GridTraits<dim, // dimension of the grid
96 dim, // dimension of the world space
98 YaspGeometry,YaspEntity,
99 YaspLevelIterator, // type used for the level iterator
100 YaspIntersection, // leaf intersection
101 YaspIntersection, // level intersection
102 YaspIntersectionIterator, // leaf intersection iter
103 YaspIntersectionIterator, // level intersection iter
104 YaspHierarchicIterator,
105 YaspLevelIterator, // type used for the leaf(!) iterator
106 YaspIndexSet< const YaspGrid< dim, Coordinates >, false >, // level index set
107 YaspIndexSet< const YaspGrid< dim, Coordinates >, true >, // leaf index set
108 YaspGlobalIdSet<const YaspGrid<dim, Coordinates> >,
109 bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+dim>,
110 YaspGlobalIdSet<const YaspGrid<dim, Coordinates> >,
111 bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+dim>,
112 CCType,
113 DefaultLevelGridViewTraits, DefaultLeafGridViewTraits,
114 YaspEntitySeed,
115 YaspGeometry,
116 unsigned int,
117 std::array<GeometryType,1>>
118 Traits;
119 };
120
121#ifndef DOXYGEN
122 template<int dim, int codim>
123 struct YaspCommunicateMeta {
124 template<class G, class DataHandle>
125 static void comm (const G& g, DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level)
126 {
127 if (data.contains(dim,codim))
128 {
129 g.template communicateCodim<DataHandle,codim>(data,iftype,dir,level);
130 }
131 YaspCommunicateMeta<dim,codim-1>::comm(g,data,iftype,dir,level);
132 }
133 };
134
135 template<int dim>
136 struct YaspCommunicateMeta<dim,0> {
137 template<class G, class DataHandle>
138 static void comm (const G& g, DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level)
139 {
140 if (data.contains(dim,0))
141 g.template communicateCodim<DataHandle,0>(data,iftype,dir,level);
142 }
143 };
144#endif
145
146 //************************************************************************
163 template<int dim, class Coordinates = EquidistantCoordinates<double, dim> >
165 : public GridDefaultImplementation<dim,dim,typename Coordinates::ctype,YaspGridFamily<dim, Coordinates> >
166 {
167
168 template<int, PartitionIteratorType, typename>
169 friend class YaspLevelIterator;
170
171 template<typename>
172 friend class YaspHierarchicIterator;
173
175 protected:
176
177 public:
179 typedef typename Coordinates::ctype ctype;
180
181 using Communication = typename Base::Communication;
182
183#ifndef DOXYGEN
184 typedef typename Dune::YGrid<Coordinates> YGrid;
186
189 struct YGridLevel {
190
192 int level() const
193 {
194 return level_;
195 }
196
197 Coordinates coords;
198
199 std::array<YGrid, dim+1> overlapfront;
200 std::array<YGridComponent<Coordinates>, Dune::power(2,dim)> overlapfront_data;
201 std::array<YGrid, dim+1> overlap;
202 std::array<YGridComponent<Coordinates>, Dune::power(2,dim)> overlap_data;
203 std::array<YGrid, dim+1> interiorborder;
204 std::array<YGridComponent<Coordinates>, Dune::power(2,dim)> interiorborder_data;
205 std::array<YGrid, dim+1> interior;
206 std::array<YGridComponent<Coordinates>, Dune::power(2,dim)> interior_data;
207
208 std::array<YGridList<Coordinates>,dim+1> send_overlapfront_overlapfront;
209 std::array<std::deque<Intersection>, Dune::power(2,dim)> send_overlapfront_overlapfront_data;
210 std::array<YGridList<Coordinates>,dim+1> recv_overlapfront_overlapfront;
211 std::array<std::deque<Intersection>, Dune::power(2,dim)> recv_overlapfront_overlapfront_data;
212
213 std::array<YGridList<Coordinates>,dim+1> send_overlap_overlapfront;
214 std::array<std::deque<Intersection>, Dune::power(2,dim)> send_overlap_overlapfront_data;
215 std::array<YGridList<Coordinates>,dim+1> recv_overlapfront_overlap;
216 std::array<std::deque<Intersection>, Dune::power(2,dim)> recv_overlapfront_overlap_data;
217
218 std::array<YGridList<Coordinates>,dim+1> send_interiorborder_interiorborder;
219 std::array<std::deque<Intersection>, Dune::power(2,dim)> send_interiorborder_interiorborder_data;
220 std::array<YGridList<Coordinates>,dim+1> recv_interiorborder_interiorborder;
221 std::array<std::deque<Intersection>, Dune::power(2,dim)> recv_interiorborder_interiorborder_data;
222
223 std::array<YGridList<Coordinates>,dim+1> send_interiorborder_overlapfront;
224 std::array<std::deque<Intersection>, Dune::power(2,dim)> send_interiorborder_overlapfront_data;
225 std::array<YGridList<Coordinates>,dim+1> recv_overlapfront_interiorborder;
226 std::array<std::deque<Intersection>, Dune::power(2,dim)> recv_overlapfront_interiorborder_data;
227
228 // general
229 YaspGrid<dim,Coordinates>* mg; // each grid level knows its multigrid
230 int overlapSize; // in mesh cells on this level
231 bool keepOverlap;
232
234 int level_;
235 };
236
238 typedef std::array<int, dim> iTupel;
239 typedef FieldVector<ctype, dim> fTupel;
240
241 // communication tag used by multigrid
242 enum { tag = 17 };
243#endif
244
247 {
248 return _torus;
249 }
250
252 int globalSize(int i) const
253 {
254 return levelSize(maxLevel(),i);
255 }
256
258 iTupel globalSize() const
259 {
260 return levelSize(maxLevel());
261 }
262
264 int levelSize(int l, int i) const
265 {
266 return _coarseSize[i] * (1 << l);
267 }
268
270 iTupel levelSize(int l) const
271 {
272 iTupel s;
273 for (int i=0; i<dim; ++i)
274 s[i] = levelSize(l,i);
275 return s;
276 }
277
279 bool isPeriodic(int i) const
280 {
281 return _periodic[i];
282 }
283
284 bool getRefineOption() const
285 {
286 return keep_ovlp;
287 }
288
291
294 {
295 return _levels.begin();
296 }
297
300 {
301 if (i<0 || i>maxLevel())
302 DUNE_THROW(GridError, "level not existing");
303 return std::next(_levels.begin(), i);
304 }
305
308 {
309 return _levels.end();
310 }
311
312 // static method to create the default partitioning strategy
313 static const Yasp::Partitioning<dim>* defaultPartitioner()
314 {
315 static Yasp::DefaultPartitioning<dim> lb;
316 return & lb;
317 }
318
319 protected:
327 void makelevel (const Coordinates& coords, std::bitset<dim> periodic, iTupel o_interior, int overlap)
328 {
329 YGridLevel& g = _levels.back();
330 g.overlapSize = overlap;
331 g.mg = this;
332 g.level_ = maxLevel();
333 g.coords = coords;
334 g.keepOverlap = keep_ovlp;
335
336 using Dune::power;
337
338 // set the inserting positions in the corresponding arrays of YGridLevelStructure
339 typename std::array<YGridComponent<Coordinates>, power(2,dim)>::iterator overlapfront_it = g.overlapfront_data.begin();
340 typename std::array<YGridComponent<Coordinates>, power(2,dim)>::iterator overlap_it = g.overlap_data.begin();
341 typename std::array<YGridComponent<Coordinates>, power(2,dim)>::iterator interiorborder_it = g.interiorborder_data.begin();
342 typename std::array<YGridComponent<Coordinates>, power(2,dim)>::iterator interior_it = g.interior_data.begin();
343
344 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
345 send_overlapfront_overlapfront_it = g.send_overlapfront_overlapfront_data.begin();
346 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
347 recv_overlapfront_overlapfront_it = g.recv_overlapfront_overlapfront_data.begin();
348
349 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
350 send_overlap_overlapfront_it = g.send_overlap_overlapfront_data.begin();
351 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
352 recv_overlapfront_overlap_it = g.recv_overlapfront_overlap_data.begin();
353
354 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
355 send_interiorborder_interiorborder_it = g.send_interiorborder_interiorborder_data.begin();
356 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
357 recv_interiorborder_interiorborder_it = g.recv_interiorborder_interiorborder_data.begin();
358
359 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
360 send_interiorborder_overlapfront_it = g.send_interiorborder_overlapfront_data.begin();
361 typename std::array<std::deque<Intersection>, power(2,dim)>::iterator
362 recv_overlapfront_interiorborder_it = g.recv_overlapfront_interiorborder_data.begin();
363
364 // have a null array for constructor calls around
365 std::array<int,dim> n;
366 std::fill(n.begin(), n.end(), 0);
367
368 // determine origin of the grid with overlap and store whether an overlap area exists in direction i.
369 std::bitset<dim> ovlp_low(0ULL);
370 std::bitset<dim> ovlp_up(0ULL);
371
372 iTupel o_overlap;
373 iTupel s_overlap;
374
375 // determine at where we have overlap and how big the size of the overlap partition is
376 for (int i=0; i<dim; i++)
377 {
378 // the coordinate container has been constructed to hold the entire grid on
379 // this processor, including overlap. this is the element size.
380 s_overlap[i] = g.coords.size(i);
381
382 //in the periodic case there is always overlap
383 if (periodic[i])
384 {
385 o_overlap[i] = o_interior[i]-overlap;
386 ovlp_low[i] = true;
387 ovlp_up[i] = true;
388 }
389 else
390 {
391 //check lower boundary
392 if (o_interior[i] - overlap < 0)
393 o_overlap[i] = 0;
394 else
395 {
396 o_overlap[i] = o_interior[i] - overlap;
397 ovlp_low[i] = true;
398 }
399
400 //check upper boundary
401 if (o_overlap[i] + g.coords.size(i) < globalSize(i))
402 ovlp_up[i] = true;
403 }
404 }
405
406 for (unsigned int codim = 0; codim < dim + 1; codim++)
407 {
408 // set the begin iterator for the corresponding ygrids
409 g.overlapfront[codim].setBegin(&*overlapfront_it);
410 g.overlap[codim].setBegin(&*overlap_it);
411 g.interiorborder[codim].setBegin(&*interiorborder_it);
412 g.interior[codim].setBegin(&*interior_it);
413 g.send_overlapfront_overlapfront[codim].setBegin(send_overlapfront_overlapfront_it);
414 g.recv_overlapfront_overlapfront[codim].setBegin(recv_overlapfront_overlapfront_it);
415 g.send_overlap_overlapfront[codim].setBegin(send_overlap_overlapfront_it);
416 g.recv_overlapfront_overlap[codim].setBegin(recv_overlapfront_overlap_it);
417 g.send_interiorborder_interiorborder[codim].setBegin(send_interiorborder_interiorborder_it);
418 g.recv_interiorborder_interiorborder[codim].setBegin(recv_interiorborder_interiorborder_it);
419 g.send_interiorborder_overlapfront[codim].setBegin(send_interiorborder_overlapfront_it);
420 g.recv_overlapfront_interiorborder[codim].setBegin(recv_overlapfront_interiorborder_it);
421
422 // find all combinations of unit vectors that span entities of the given codimension
423 for (unsigned int index = 0; index < (1<<dim); index++)
424 {
425 // check whether the given shift is of our codimension
426 std::bitset<dim> r(index);
427 if (r.count() != dim-codim)
428 continue;
429
430 // get an origin and a size array for subsequent modification
431 std::array<int,dim> origin(o_overlap);
432 std::array<int,dim> size(s_overlap);
433
434 // build overlapfront
435 // we have to extend the element size by one in all directions without shift.
436 for (int i=0; i<dim; i++)
437 if (!r[i])
438 size[i]++;
439 *overlapfront_it = YGridComponent<Coordinates>(origin, r, &g.coords, size, n, size);
440
441 // build overlap
442 for (int i=0; i<dim; i++)
443 {
444 if (!r[i])
445 {
446 if (ovlp_low[i])
447 {
448 origin[i]++;
449 size[i]--;
450 }
451 if (ovlp_up[i])
452 size[i]--;
453 }
454 }
455 *overlap_it = YGridComponent<Coordinates>(origin,size,*overlapfront_it);
456
457 // build interiorborder
458 for (int i=0; i<dim; i++)
459 {
460 if (ovlp_low[i])
461 {
462 origin[i] += overlap;
463 size[i] -= overlap;
464 if (!r[i])
465 {
466 origin[i]--;
467 size[i]++;
468 }
469 }
470 if (ovlp_up[i])
471 {
472 size[i] -= overlap;
473 if (!r[i])
474 size[i]++;
475 }
476 }
477 *interiorborder_it = YGridComponent<Coordinates>(origin,size,*overlapfront_it);
478
479 // build interior
480 for (int i=0; i<dim; i++)
481 {
482 if (!r[i])
483 {
484 if (ovlp_low[i])
485 {
486 origin[i]++;
487 size[i]--;
488 }
489 if (ovlp_up[i])
490 size[i]--;
491 }
492 }
493 *interior_it = YGridComponent<Coordinates>(origin, size, *overlapfront_it);
494
495 intersections(*overlapfront_it,*overlapfront_it,*send_overlapfront_overlapfront_it, *recv_overlapfront_overlapfront_it);
496 intersections(*overlap_it,*overlapfront_it,*send_overlap_overlapfront_it, *recv_overlapfront_overlap_it);
497 intersections(*interiorborder_it,*interiorborder_it,*send_interiorborder_interiorborder_it,*recv_interiorborder_interiorborder_it);
498 intersections(*interiorborder_it,*overlapfront_it,*send_interiorborder_overlapfront_it,*recv_overlapfront_interiorborder_it);
499
500 // advance all iterators pointing to the next insertion point
501 ++overlapfront_it;
502 ++overlap_it;
503 ++interiorborder_it;
504 ++interior_it;
505 ++send_overlapfront_overlapfront_it;
506 ++recv_overlapfront_overlapfront_it;
507 ++send_overlap_overlapfront_it;
508 ++recv_overlapfront_overlap_it;
509 ++send_interiorborder_interiorborder_it;
510 ++recv_interiorborder_interiorborder_it;
511 ++send_interiorborder_overlapfront_it;
512 ++recv_overlapfront_interiorborder_it;
513 }
514
515 // set end iterators in the corresponding ygrids
516 g.overlapfront[codim].finalize(&*overlapfront_it);
517 g.overlap[codim].finalize(&*overlap_it);
518 g.interiorborder[codim].finalize(&*interiorborder_it);
519 g.interior[codim].finalize(&*interior_it);
520 g.send_overlapfront_overlapfront[codim].finalize(send_overlapfront_overlapfront_it,g.overlapfront[codim]);
521 g.recv_overlapfront_overlapfront[codim].finalize(recv_overlapfront_overlapfront_it,g.overlapfront[codim]);
522 g.send_overlap_overlapfront[codim].finalize(send_overlap_overlapfront_it,g.overlapfront[codim]);
523 g.recv_overlapfront_overlap[codim].finalize(recv_overlapfront_overlap_it,g.overlapfront[codim]);
524 g.send_interiorborder_interiorborder[codim].finalize(send_interiorborder_interiorborder_it,g.overlapfront[codim]);
525 g.recv_interiorborder_interiorborder[codim].finalize(recv_interiorborder_interiorborder_it,g.overlapfront[codim]);
526 g.send_interiorborder_overlapfront[codim].finalize(send_interiorborder_overlapfront_it,g.overlapfront[codim]);
527 g.recv_overlapfront_interiorborder[codim].finalize(recv_overlapfront_interiorborder_it,g.overlapfront[codim]);
528 }
529 }
530
531#ifndef DOXYGEN
540 struct mpifriendly_ygrid {
541 mpifriendly_ygrid ()
542 {
543 std::fill(origin.begin(), origin.end(), 0);
544 std::fill(size.begin(), size.end(), 0);
545 }
546 mpifriendly_ygrid (const YGridComponent<Coordinates>& grid)
547 : origin(grid.origin()), size(grid.size())
548 {}
549 iTupel origin;
550 iTupel size;
551 };
552#endif
553
562 std::deque<Intersection>& sendlist, std::deque<Intersection>& recvlist)
563 {
564 iTupel size = globalSize();
565
566 // the exchange buffers
567 std::vector<YGridComponent<Coordinates> > send_recvgrid(_torus.neighbors());
568 std::vector<YGridComponent<Coordinates> > recv_recvgrid(_torus.neighbors());
569 std::vector<YGridComponent<Coordinates> > send_sendgrid(_torus.neighbors());
570 std::vector<YGridComponent<Coordinates> > recv_sendgrid(_torus.neighbors());
571
572 // new exchange buffers to send simple struct without virtual functions
573 std::vector<mpifriendly_ygrid> mpifriendly_send_recvgrid(_torus.neighbors());
574 std::vector<mpifriendly_ygrid> mpifriendly_recv_recvgrid(_torus.neighbors());
575 std::vector<mpifriendly_ygrid> mpifriendly_send_sendgrid(_torus.neighbors());
576 std::vector<mpifriendly_ygrid> mpifriendly_recv_sendgrid(_torus.neighbors());
577
578 // fill send buffers; iterate over all neighboring processes
579 // non-periodic case is handled automatically because intersection will be zero
580 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
581 {
582 // determine if we communicate with this neighbor (and what)
583 bool skip = false;
584 iTupel coord = _torus.coord(); // my coordinates
585 iTupel delta = i.delta(); // delta to neighbor
586 iTupel nb = coord; // the neighbor
587 for (int k=0; k<dim; k++) nb[k] += delta[k];
588 iTupel v; // grid movement
589 std::fill(v.begin(), v.end(), 0);
590
591 for (int k=0; k<dim; k++)
592 {
593 if (nb[k]<0)
594 {
595 if (_periodic[k])
596 v[k] += size[k];
597 else
598 skip = true;
599 }
600 if (nb[k]>=_torus.dims(k))
601 {
602 if (_periodic[k])
603 v[k] -= size[k];
604 else
605 skip = true;
606 }
607 // neither might be true, then v=0
608 }
609
610 // store moved grids in send buffers
611 if (!skip)
612 {
613 send_sendgrid[i.index()] = sendgrid.move(v);
614 send_recvgrid[i.index()] = recvgrid.move(v);
615 }
616 else
617 {
618 send_sendgrid[i.index()] = YGridComponent<Coordinates>();
619 send_recvgrid[i.index()] = YGridComponent<Coordinates>();
620 }
621 }
622
623 // issue send requests for sendgrid being sent to all neighbors
624 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
625 {
626 mpifriendly_send_sendgrid[i.index()] = mpifriendly_ygrid(send_sendgrid[i.index()]);
627 _torus.send(i.rank(), &mpifriendly_send_sendgrid[i.index()], sizeof(mpifriendly_ygrid));
628 }
629
630 // issue recv requests for sendgrids of neighbors
631 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
632 _torus.recv(i.rank(), &mpifriendly_recv_sendgrid[i.index()], sizeof(mpifriendly_ygrid));
633
634 // exchange the sendgrids
635 _torus.exchange();
636
637 // issue send requests for recvgrid being sent to all neighbors
638 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.sendbegin(); i!=_torus.sendend(); ++i)
639 {
640 mpifriendly_send_recvgrid[i.index()] = mpifriendly_ygrid(send_recvgrid[i.index()]);
641 _torus.send(i.rank(), &mpifriendly_send_recvgrid[i.index()], sizeof(mpifriendly_ygrid));
642 }
643
644 // issue recv requests for recvgrid of neighbors
645 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
646 _torus.recv(i.rank(), &mpifriendly_recv_recvgrid[i.index()], sizeof(mpifriendly_ygrid));
647
648 // exchange the recvgrid
649 _torus.exchange();
650
651 // process receive buffers and compute intersections
652 for (typename Torus<Communication,dim>::ProcListIterator i=_torus.recvbegin(); i!=_torus.recvend(); ++i)
653 {
654 // what must be sent to this neighbor
655 Intersection send_intersection{};
656 mpifriendly_ygrid yg = mpifriendly_recv_recvgrid[i.index()];
657 recv_recvgrid[i.index()] = YGridComponent<Coordinates>(yg.origin,yg.size);
658 send_intersection.grid = sendgrid.intersection(recv_recvgrid[i.index()]);
659 send_intersection.rank = i.rank();
660 send_intersection.distance = i.distance();
661 if (!send_intersection.grid.empty()) sendlist.push_front(send_intersection);
662
663 Intersection recv_intersection{};
664 yg = mpifriendly_recv_sendgrid[i.index()];
665 recv_sendgrid[i.index()] = YGridComponent<Coordinates>(yg.origin,yg.size);
666 recv_intersection.grid = recvgrid.intersection(recv_sendgrid[i.index()]);
667 recv_intersection.rank = i.rank();
668 recv_intersection.distance = i.distance();
669 if(!recv_intersection.grid.empty()) recvlist.push_back(recv_intersection);
670 }
671 }
672
673 protected:
674
675 typedef const YaspGrid<dim,Coordinates> GridImp;
676
677 void init()
678 {
679 indexsets.push_back( std::make_shared< YaspIndexSet<const YaspGrid<dim, Coordinates>, false > >(*this,0) );
680 boundarysegmentssize();
681 }
682
683 void boundarysegmentssize()
684 {
685 // sizes of local macro grid
686 std::array<int, dim> sides;
687 {
688 for (int i=0; i<dim; i++)
689 {
690 sides[i] =
691 ((begin()->overlap[0].dataBegin()->origin(i) == 0)+
692 (begin()->overlap[0].dataBegin()->origin(i) + begin()->overlap[0].dataBegin()->size(i)
693 == levelSize(0,i)));
694 }
695 }
696 nBSegments = 0;
697 for (int k=0; k<dim; k++)
698 {
699 int offset = 1;
700 for (int l=0; l<dim; l++)
701 {
702 if (l==k) continue;
703 offset *= begin()->overlap[0].dataBegin()->size(l);
704 }
705 nBSegments += sides[k]*offset;
706 }
707 }
708
709 public:
710
711 // define the persistent index type
712 typedef bigunsignedint<dim*yaspgrid_dim_bits+yaspgrid_level_bits+dim> PersistentIndexType;
713
715 typedef YaspGridFamily<dim, Coordinates> GridFamily;
716 // the Traits
718
719 // need for friend declarations in entity
723
731 YaspGrid (const Coordinates& coordinates,
732 std::bitset<dim> periodic = std::bitset<dim>(0ULL),
733 int overlap = 1,
734 Communication comm = Communication(),
735 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
736 : ccobj(comm)
737 , leafIndexSet_(*this)
738 , _periodic(periodic)
739 , _overlap(overlap)
740 , keep_ovlp(true)
741 , adaptRefCount(0)
742 , adaptActive(false)
743 {
744 _levels.resize(1);
745
746 // Number of elements per coordinate direction on the coarsest level
747 for (std::size_t i=0; i<dim; i++)
748 _coarseSize[i] = coordinates.size(i);
749
750 // Construct the communication torus
751 _torus = decltype(_torus)(comm,tag,_coarseSize,overlap,partitioner);
752
753 iTupel o;
754 std::fill(o.begin(), o.end(), 0);
755 iTupel o_interior(o);
756 iTupel s_interior(_coarseSize);
757
758 _torus.partition(_torus.rank(),o,_coarseSize,o_interior,s_interior);
759
760 // Set domain size
761 if (std::is_same<Coordinates,EquidistantCoordinates<ctype,dim> >::value
762 || std::is_same<Coordinates,EquidistantOffsetCoordinates<ctype,dim> >::value)
763 {
764 for (std::size_t i=0; i<dim; i++)
765 _L[i] = coordinates.size(i) * coordinates.meshsize(i,0);
766 }
767 if (std::is_same<Coordinates,TensorProductCoordinates<ctype,dim> >::value)
768 {
769 //determine sizes of vector to correctly construct torus structure and store for later size requests
770 for (int i=0; i<dim; i++)
771 _L[i] = coordinates.coordinate(i,_coarseSize[i]) - coordinates.coordinate(i,0);
772 }
773
774#if HAVE_MPI
775 // TODO: Settle on a single value for all coordinate types
776 int mysteryFactor = (std::is_same<Coordinates,EquidistantCoordinates<ctype,dim> >::value) ? 1 : 2;
777
778 // check whether the grid is large enough to be overlapping
779 for (int i=0; i<dim; i++)
780 {
781 // find out whether the grid is too small to
782 int toosmall = (s_interior[i] <= mysteryFactor * overlap) && // interior is very small
783 (periodic[i] || (s_interior[i] != _coarseSize[i])); // there is an overlap in that direction
784 // communicate the result to all those processes to have all processors error out if one process failed.
785 int global = 0;
786 MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
787 if (global)
788 DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
789 " Note that this also holds for DOFs on subdomain boundaries."
790 " Increase grid elements or decrease overlap accordingly.");
791 }
792#endif // #if HAVE_MPI
793
794 if (std::is_same<Coordinates,EquidistantCoordinates<ctype,dim> >::value
795 || std::is_same<Coordinates,EquidistantOffsetCoordinates<ctype,dim> >::value)
796 {
797 iTupel s_overlap(s_interior);
798 for (int i=0; i<dim; i++)
799 {
800 if ((o_interior[i] - overlap > 0) || (periodic[i]))
801 s_overlap[i] += overlap;
802 if ((o_interior[i] + s_interior[i] + overlap <= _coarseSize[i]) || (periodic[i]))
803 s_overlap[i] += overlap;
804 }
805
806 FieldVector<ctype,dim> upperRightWithOverlap;
807 for (int i=0; i<dim; i++)
808 upperRightWithOverlap[i] = coordinates.coordinate(i,0) + coordinates.meshsize(i,0) * s_overlap[i];
809
810 if constexpr (std::is_same_v<Coordinates,EquidistantCoordinates<ctype,dim>>)
811 {
812 // New coordinate object that additionally contains the overlap elements
813 EquidistantCoordinates<ctype,dim> coordinatesWithOverlap(upperRightWithOverlap,s_overlap);
814
815 // add level (the this-> is needed to make g++-6 happy)
816 this->makelevel(coordinatesWithOverlap,periodic,o_interior,overlap);
817 }
818
819 if constexpr (std::is_same_v<Coordinates,EquidistantOffsetCoordinates<ctype,dim>>)
820 {
822 for (int i=0; i<dim; i++)
823 lowerleft[i] = coordinates.origin(i);
824
825 // New coordinate object that additionally contains the overlap elements
826 EquidistantOffsetCoordinates<ctype,dim> coordinatesWithOverlap(lowerleft,upperRightWithOverlap,s_overlap);
827
828 // add level (the this-> is needed to make g++-6 happy)
829 this->makelevel(coordinatesWithOverlap,periodic,o_interior,overlap);
830 }
831 }
832
833 if constexpr (std::is_same_v<Coordinates,TensorProductCoordinates<ctype,dim>>)
834 {
835 std::array<std::vector<ctype>,dim> newCoords;
836 std::array<int, dim> offset(o_interior);
837
838 // find the relevant part of the coords vector for this processor and copy it to newCoords
839 for (int i=0; i<dim; ++i)
840 {
841 //define the coordinate range to be used
842 std::size_t begin = o_interior[i];
843 std::size_t end = begin + s_interior[i] + 1;
844
845 // check whether we are not at the physical boundary. In that case overlap is a simple
846 // extension of the coordinate range to be used
847 if (o_interior[i] - overlap > 0)
848 {
849 begin = begin - overlap;
850 offset[i] -= overlap;
851 }
852 if (o_interior[i] + s_interior[i] + overlap < _coarseSize[i])
853 end = end + overlap;
854
855 //copy the selected part in the new coord vector
856 newCoords[i].resize(end-begin);
857 auto newCoordsIt = newCoords[i].begin();
858 for (std::size_t j=begin; j<end; j++)
859 {
860 *newCoordsIt = coordinates.coordinate(i, j);
861 newCoordsIt++;
862 }
863
864 // Check whether we are at the physical boundary and have a periodic grid.
865 // In this case the coordinate vector has to be tweaked manually.
866 if ((periodic[i]) && (o_interior[i] + s_interior[i] + overlap >= _coarseSize[i]))
867 {
868 // we need to add the first <overlap> cells to the end of newcoords
869 for (int j=0; j<overlap; ++j)
870 newCoords[i].push_back(newCoords[i].back() - coordinates.coordinate(i,j) + coordinates.coordinate(i,j+1));
871 }
872
873 if ((periodic[i]) && (o_interior[i] - overlap <= 0))
874 {
875 offset[i] -= overlap;
876
877 // we need to add the last <overlap> cells to the begin of newcoords
878 std::size_t reverseCounter = coordinates.size(i);
879 for (int j=0; j<overlap; ++j, --reverseCounter)
880 newCoords[i].insert(newCoords[i].begin(), newCoords[i].front()
881 - coordinates.coordinate(i,reverseCounter) + coordinates.coordinate(i,reverseCounter-1));
882 }
883 }
884
885 TensorProductCoordinates<ctype,dim> coordinatesWithOverlap(newCoords, offset);
886
887 // add level (the this-> is needed to make g++-6 happy)
888 this->makelevel(coordinatesWithOverlap,periodic,o_interior,overlap);
889 }
890
891 init();
892 }
893
902 template<class C = Coordinates,
903 typename std::enable_if_t< std::is_same_v<C, EquidistantCoordinates<ctype,dim> >, int> = 0>
905 std::array<int, std::size_t{dim}> s,
906 std::bitset<std::size_t{dim}> periodic = std::bitset<std::size_t{dim}>{0ULL},
907 int overlap = 1,
908 Communication comm = Communication(),
909 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
910 : ccobj(comm), _torus(comm,tag,s,overlap,partitioner), leafIndexSet_(*this),
911 _L(L), _periodic(periodic), _coarseSize(s), _overlap(overlap),
912 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
913 {
914 _levels.resize(1);
915
916 iTupel o;
917 std::fill(o.begin(), o.end(), 0);
918 iTupel o_interior(o);
919 iTupel s_interior(s);
920
921 _torus.partition(_torus.rank(),o,s,o_interior,s_interior);
922
923#if HAVE_MPI
924 // check whether the grid is large enough to be overlapping
925 for (int i=0; i<dim; i++)
926 {
927 // find out whether the grid is too small to
928 int toosmall = (s_interior[i] < 2*overlap) && // interior is very small
929 (periodic[i] || (s_interior[i] != s[i])); // there is an overlap in that direction
930 // communicate the result to all those processes to have all processors error out if one process failed.
931 int global = 0;
932 MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
933 if (global)
934 DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
935 " Note that this also holds for DOFs on subdomain boundaries."
936 " Increase grid elements or decrease overlap accordingly.");
937 }
938#endif // #if HAVE_MPI
939
940 iTupel s_overlap(s_interior);
941 for (int i=0; i<dim; i++)
942 {
943 if ((o_interior[i] - overlap > 0) || (periodic[i]))
944 s_overlap[i] += overlap;
945 if ((o_interior[i] + s_interior[i] + overlap <= _coarseSize[i]) || (periodic[i]))
946 s_overlap[i] += overlap;
947 }
948
949 FieldVector<ctype,dim> upperRightWithOverlap;
950
951 for (int i=0; i<dim; i++)
952 upperRightWithOverlap[i] = (L[i] / s[i]) * s_overlap[i];
953
954 // New coordinate object that additionally contains the overlap elements
955 EquidistantCoordinates<ctype,dim> cc(upperRightWithOverlap,s_overlap);
956
957 // add level
958 makelevel(cc,periodic,o_interior,overlap);
959
960 init();
961 }
962
972 template<class C = Coordinates,
973 typename std::enable_if_t< std::is_same_v<C, EquidistantOffsetCoordinates<ctype,dim> >, int> = 0>
976 std::array<int, std::size_t{dim}> s,
977 std::bitset<std::size_t{dim}> periodic = std::bitset<std::size_t{dim}>(0ULL),
978 int overlap = 1,
979 Communication comm = Communication(),
980 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
981 : ccobj(comm), _torus(comm,tag,s,overlap,partitioner), leafIndexSet_(*this),
982 _L(upperright - lowerleft),
983 _periodic(periodic), _coarseSize(s), _overlap(overlap),
984 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
985 {
986 _levels.resize(1);
987
988 iTupel o;
989 std::fill(o.begin(), o.end(), 0);
990 iTupel o_interior(o);
991 iTupel s_interior(s);
992
993 _torus.partition(_torus.rank(),o,s,o_interior,s_interior);
994
995#if HAVE_MPI
996 // check whether the grid is large enough to be overlapping
997 for (int i=0; i<dim; i++)
998 {
999 // find out whether the grid is too small to
1000 int toosmall = (s_interior[i] < 2*overlap) && // interior is very small
1001 (periodic[i] || (s_interior[i] != s[i])); // there is an overlap in that direction
1002 // communicate the result to all those processes to have all processors error out if one process failed.
1003 int global = 0;
1004 MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
1005 if (global)
1006 DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
1007 " Note that this also holds for DOFs on subdomain boundaries."
1008 " Increase grid elements or decrease overlap accordingly.");
1009 }
1010#endif // #if HAVE_MPI
1011
1012 iTupel s_overlap(s_interior);
1013 for (int i=0; i<dim; i++)
1014 {
1015 if ((o_interior[i] - overlap > 0) || (periodic[i]))
1016 s_overlap[i] += overlap;
1017 if ((o_interior[i] + s_interior[i] + overlap <= _coarseSize[i]) || (periodic[i]))
1018 s_overlap[i] += overlap;
1019 }
1020
1021 FieldVector<ctype,dim> upperRightWithOverlap;
1022 for (int i=0; i<dim; i++)
1023 upperRightWithOverlap[i] = lowerleft[i]
1024 + s_overlap[i] * (upperright[i]-lowerleft[i]) / s[i];
1025
1026 EquidistantOffsetCoordinates<ctype,dim> cc(lowerleft,upperRightWithOverlap,s_overlap);
1027
1028 // add level
1029 makelevel(cc,periodic,o_interior,overlap);
1030
1031 init();
1032 }
1033
1041 template<class C = Coordinates,
1042 typename std::enable_if_t< std::is_same_v<C, TensorProductCoordinates<ctype,dim> >, int> = 0>
1043 YaspGrid (std::array<std::vector<ctype>, std::size_t{dim}> coords,
1044 std::bitset<std::size_t{dim}> periodic = std::bitset<std::size_t{dim}>(0ULL),
1045 int overlap = 1,
1046 Communication comm = Communication(),
1047 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
1048 : ccobj(comm), _torus(comm,tag,Dune::Yasp::sizeArray<dim>(coords),overlap,partitioner),
1049 leafIndexSet_(*this), _periodic(periodic), _overlap(overlap),
1050 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
1051 {
1052 if (!Dune::Yasp::checkIfMonotonous(coords))
1053 DUNE_THROW(Dune::GridError,"Setup of a tensorproduct grid requires monotonous sequences of coordinates.");
1054
1055 _levels.resize(1);
1056
1057 //determine sizes of vector to correctly construct torus structure and store for later size requests
1058 for (int i=0; i<dim; i++) {
1059 _coarseSize[i] = coords[i].size() - 1;
1060 _L[i] = coords[i][_coarseSize[i]] - coords[i][0];
1061 }
1062
1063 iTupel o;
1064 std::fill(o.begin(), o.end(), 0);
1065 iTupel o_interior(o);
1066 iTupel s_interior(_coarseSize);
1067
1068 _torus.partition(_torus.rank(),o,_coarseSize,o_interior,s_interior);
1069
1070#if HAVE_MPI
1071 // check whether the grid is large enough to be overlapping
1072 for (int i=0; i<dim; i++)
1073 {
1074 // find out whether the grid is too small to
1075 int toosmall = (s_interior[i] < 2*overlap) && // interior is very small
1076 (periodic[i] || (s_interior[i] != _coarseSize[i])); // there is an overlap in that direction
1077 // communicate the result to all those processes to have all processors error out if one process failed.
1078 int global = 0;
1079 MPI_Allreduce(&toosmall, &global, 1, MPI_INT, MPI_LOR, comm);
1080 if (global)
1081 DUNE_THROW(Dune::GridError,"YaspGrid does not support degrees of freedom shared by more than immediately neighboring subdomains."
1082 " Note that this also holds for DOFs on subdomain boundaries."
1083 " Increase grid elements or decrease overlap accordingly.");
1084 }
1085#endif // #if HAVE_MPI
1086
1087
1088 std::array<std::vector<ctype>,dim> newcoords;
1089 std::array<int, dim> offset(o_interior);
1090
1091 // find the relevant part of the coords vector for this processor and copy it to newcoords
1092 for (int i=0; i<dim; ++i)
1093 {
1094 //define iterators on coords that specify the coordinate range to be used
1095 typename std::vector<ctype>::iterator begin = coords[i].begin() + o_interior[i];
1096 typename std::vector<ctype>::iterator end = begin + s_interior[i] + 1;
1097
1098 // check whether we are not at the physical boundary. In that case overlap is a simple
1099 // extension of the coordinate range to be used
1100 if (o_interior[i] - overlap > 0)
1101 {
1102 begin = begin - overlap;
1103 offset[i] -= overlap;
1104 }
1105 if (o_interior[i] + s_interior[i] + overlap < _coarseSize[i])
1106 end = end + overlap;
1107
1108 //copy the selected part in the new coord vector
1109 newcoords[i].resize(end-begin);
1110 std::copy(begin, end, newcoords[i].begin());
1111
1112 // check whether we are at the physical boundary and a have a periodic grid.
1113 // In this case the coordinate vector has to be tweaked manually.
1114 if ((periodic[i]) && (o_interior[i] + s_interior[i] + overlap >= _coarseSize[i]))
1115 {
1116 // we need to add the first <overlap> cells to the end of newcoords
1117 typename std::vector<ctype>::iterator it = coords[i].begin();
1118 for (int j=0; j<overlap; ++j)
1119 newcoords[i].push_back(newcoords[i].back() - *it + *(++it));
1120 }
1121
1122 if ((periodic[i]) && (o_interior[i] - overlap <= 0))
1123 {
1124 offset[i] -= overlap;
1125
1126 // we need to add the last <overlap> cells to the begin of newcoords
1127 typename std::vector<ctype>::iterator it = coords[i].end() - 1;
1128 for (int j=0; j<overlap; ++j)
1129 newcoords[i].insert(newcoords[i].begin(), newcoords[i].front() - *it + *(--it));
1130 }
1131 }
1132
1133 TensorProductCoordinates<ctype,dim> cc(newcoords, offset);
1134
1135 // add level
1136 makelevel(cc,periodic,o_interior,overlap);
1137 init();
1138 }
1139
1140 private:
1141
1156 YaspGrid (std::array<std::vector<ctype>, std::size_t{dim}> coords,
1157 std::bitset<std::size_t{dim}> periodic,
1158 int overlap,
1159 Communication comm,
1160 std::array<int,dim> coarseSize,
1161 const Yasp::Partitioning<dim>* partitioner = defaultPartitioner())
1162 : ccobj(comm), _torus(comm,tag,coarseSize,overlap,partitioner), leafIndexSet_(*this),
1163 _periodic(periodic), _coarseSize(coarseSize), _overlap(overlap),
1164 keep_ovlp(true), adaptRefCount(0), adaptActive(false)
1165 {
1166 // check whether YaspGrid has been given the correct template parameter
1167 static_assert(std::is_same<Coordinates,TensorProductCoordinates<ctype,dim> >::value,
1168 "YaspGrid coordinate container template parameter and given constructor values do not match!");
1169
1170 if (!Dune::Yasp::checkIfMonotonous(coords))
1171 DUNE_THROW(Dune::GridError,"Setup of a tensorproduct grid requires monotonous sequences of coordinates.");
1172
1173 for (int i=0; i<dim; i++)
1174 _L[i] = coords[i][coords[i].size() - 1] - coords[i][0];
1175
1176 _levels.resize(1);
1177
1178 std::array<int,dim> o;
1179 std::fill(o.begin(), o.end(), 0);
1180 std::array<int,dim> o_interior(o);
1181 std::array<int,dim> s_interior(coarseSize);
1182
1183 _torus.partition(_torus.rank(),o,coarseSize,o_interior,s_interior);
1184
1185 // get offset by modifying o_interior according to overlap
1186 std::array<int,dim> offset(o_interior);
1187 for (int i=0; i<dim; i++)
1188 if ((periodic[i]) || (o_interior[i] > 0))
1189 offset[i] -= overlap;
1190
1191 TensorProductCoordinates<ctype,dim> cc(coords, offset);
1192
1193 // add level
1194 makelevel(cc,periodic,o_interior,overlap);
1195
1196 init();
1197 }
1198
1199 // the backup restore facility needs to be able to use above constructor
1200 friend struct BackupRestoreFacility<YaspGrid<dim,Coordinates> >;
1201
1202 // do not copy this class
1203 YaspGrid(const YaspGrid&);
1204
1205 public:
1206
1210 int maxLevel() const
1211 {
1212 return _levels.size()-1;
1213 }
1214
1216 void globalRefine (int refCount)
1217 {
1218 if (refCount < -maxLevel())
1219 DUNE_THROW(GridError, "Only " << maxLevel() << " levels left. " <<
1220 "Coarsening " << -refCount << " levels requested!");
1221
1222 // If refCount is negative then coarsen the grid
1223 for (int k=refCount; k<0; k++)
1224 {
1225 // create an empty grid level
1226 YGridLevel empty;
1227 _levels.back() = empty;
1228 // reduce maxlevel
1229 _levels.pop_back();
1230
1231 indexsets.pop_back();
1232 }
1233
1234 // If refCount is positive refine the grid
1235 for (int k=0; k<refCount; k++)
1236 {
1237 // access to coarser grid level
1238 YGridLevel& cg = _levels[maxLevel()];
1239
1240 std::bitset<dim> ovlp_low(0ULL), ovlp_up(0ULL);
1241 for (int i=0; i<dim; i++)
1242 {
1243 if (cg.overlap[0].dataBegin()->origin(i) > 0 || _periodic[i])
1244 ovlp_low[i] = true;
1245 if (cg.overlap[0].dataBegin()->max(i) + 1 < globalSize(i) || _periodic[i])
1246 ovlp_up[i] = true;
1247 }
1248
1249 Coordinates newcont(cg.coords.refine(ovlp_low, ovlp_up, cg.overlapSize, keep_ovlp));
1250
1251 int overlap = (keep_ovlp) ? 2*cg.overlapSize : cg.overlapSize;
1252
1253 //determine new origin
1254 iTupel o_interior;
1255 for (int i=0; i<dim; i++)
1256 o_interior[i] = 2*cg.interior[0].dataBegin()->origin(i);
1257
1258 // add level
1259 _levels.resize(_levels.size() + 1);
1260 makelevel(newcont,_periodic,o_interior,overlap);
1261
1262 indexsets.push_back( std::make_shared<YaspIndexSet<const YaspGrid<dim,Coordinates>, false > >(*this,maxLevel()) );
1263 }
1264 }
1265
1270 void refineOptions (bool keepPhysicalOverlap)
1271 {
1272 keep_ovlp = keepPhysicalOverlap;
1273 }
1274
1286 bool mark( int refCount, const typename Traits::template Codim<0>::Entity & e )
1287 {
1288 assert(adaptActive == false);
1289 if (e.level() != maxLevel()) return false;
1290 adaptRefCount = std::max(adaptRefCount, refCount);
1291 return true;
1292 }
1293
1300 int getMark ( const typename Traits::template Codim<0>::Entity &e ) const
1301 {
1302 return ( e.level() == maxLevel() ) ? adaptRefCount : 0;
1303 }
1304
1306 bool adapt ()
1307 {
1308 globalRefine(adaptRefCount);
1309 return (adaptRefCount > 0);
1310 }
1311
1313 bool preAdapt ()
1314 {
1315 adaptActive = true;
1316 adaptRefCount = comm().max(adaptRefCount);
1317 return (adaptRefCount < 0);
1318 }
1319
1322 {
1323 adaptActive = false;
1324 adaptRefCount = 0;
1325 }
1326
1328 template<int cd, PartitionIteratorType pitype>
1329 typename Traits::template Codim<cd>::template Partition<pitype>::LevelIterator lbegin (int level) const
1330 {
1331 return levelbegin<cd,pitype>(level);
1332 }
1333
1335 template<int cd, PartitionIteratorType pitype>
1336 typename Traits::template Codim<cd>::template Partition<pitype>::LevelIterator lend (int level) const
1337 {
1338 return levelend<cd,pitype>(level);
1339 }
1340
1342 template<int cd>
1343 typename Traits::template Codim<cd>::template Partition<All_Partition>::LevelIterator lbegin (int level) const
1344 {
1345 return levelbegin<cd,All_Partition>(level);
1346 }
1347
1349 template<int cd>
1350 typename Traits::template Codim<cd>::template Partition<All_Partition>::LevelIterator lend (int level) const
1351 {
1352 return levelend<cd,All_Partition>(level);
1353 }
1354
1356 template<int cd, PartitionIteratorType pitype>
1357 typename Traits::template Codim<cd>::template Partition<pitype>::LeafIterator leafbegin () const
1358 {
1359 return levelbegin<cd,pitype>(maxLevel());
1360 }
1361
1363 template<int cd, PartitionIteratorType pitype>
1364 typename Traits::template Codim<cd>::template Partition<pitype>::LeafIterator leafend () const
1365 {
1366 return levelend<cd,pitype>(maxLevel());
1367 }
1368
1370 template<int cd>
1371 typename Traits::template Codim<cd>::template Partition<All_Partition>::LeafIterator leafbegin () const
1372 {
1373 return levelbegin<cd,All_Partition>(maxLevel());
1374 }
1375
1377 template<int cd>
1378 typename Traits::template Codim<cd>::template Partition<All_Partition>::LeafIterator leafend () const
1379 {
1380 return levelend<cd,All_Partition>(maxLevel());
1381 }
1382
1383 // \brief obtain Entity from EntitySeed. */
1384 template <typename Seed>
1385 typename Traits::template Codim<Seed::codimension>::Entity
1386 entity(const Seed& seed) const
1387 {
1388 const int codim = Seed::codimension;
1389 YGridLevelIterator g = begin(seed.impl().level());
1390
1391 typedef typename Traits::template Codim<Seed::codimension>::Entity Entity;
1392 typedef YaspEntity<codim,dim,const YaspGrid> EntityImp;
1393 typedef typename YGrid::Iterator YIterator;
1394
1395 return Entity(EntityImp(g,YIterator(g->overlapfront[codim],seed.impl().coord(),seed.impl().offset())));
1396 }
1397
1399 int overlapSize (int level, [[maybe_unused]] int codim) const
1400 {
1401 YGridLevelIterator g = begin(level);
1402 return g->overlapSize;
1403 }
1404
1406 int overlapSize ([[maybe_unused]] int odim) const
1407 {
1409 return g->overlapSize;
1410 }
1411
1413 int ghostSize ([[maybe_unused]] int level, [[maybe_unused]] int codim) const
1414 {
1415 return 0;
1416 }
1417
1419 int ghostSize ([[maybe_unused]] int codim) const
1420 {
1421 return 0;
1422 }
1423
1425 int size (int level, int codim) const
1426 {
1427 YGridLevelIterator g = begin(level);
1428
1429 // sum over all components of the codimension
1430 int count = 0;
1431 for (auto it = g->overlapfront[codim].dataBegin(); it != g->overlapfront[codim].dataEnd(); ++it)
1432 count += it->totalsize();
1433
1434 return count;
1435 }
1436
1438 int size (int codim) const
1439 {
1440 return size(maxLevel(),codim);
1441 }
1442
1444 int size (int level, GeometryType type) const
1445 {
1446 return (type.isCube()) ? size(level,dim-type.dim()) : 0;
1447 }
1448
1450 int size (GeometryType type) const
1451 {
1452 return size(maxLevel(),type);
1453 }
1454
1456 size_t numBoundarySegments () const
1457 {
1458 return nBSegments;
1459 }
1460
1463 return _L;
1464 }
1465
1470 template<class DataHandleImp, class DataType>
1472 {
1473 YaspCommunicateMeta<dim,dim>::comm(*this,data,iftype,dir,level);
1474 }
1475
1480 template<class DataHandleImp, class DataType>
1482 {
1483 YaspCommunicateMeta<dim,dim>::comm(*this,data,iftype,dir,this->maxLevel());
1484 }
1485
1490 template<class DataHandle, int codim>
1491 void communicateCodim (DataHandle& data, InterfaceType iftype, CommunicationDirection dir, int level) const
1492 {
1493 // check input
1494 if (!data.contains(dim,codim)) return; // should have been checked outside
1495
1496 // data types
1497 typedef typename DataHandle::DataType DataType;
1498
1499 // access to grid level
1500 YGridLevelIterator g = begin(level);
1501
1502 // find send/recv lists or throw error
1503 const YGridList<Coordinates>* sendlist = 0;
1504 const YGridList<Coordinates>* recvlist = 0;
1505
1507 {
1508 sendlist = &g->send_interiorborder_interiorborder[codim];
1509 recvlist = &g->recv_interiorborder_interiorborder[codim];
1510 }
1511 if (iftype==InteriorBorder_All_Interface)
1512 {
1513 sendlist = &g->send_interiorborder_overlapfront[codim];
1514 recvlist = &g->recv_overlapfront_interiorborder[codim];
1515 }
1517 {
1518 sendlist = &g->send_overlap_overlapfront[codim];
1519 recvlist = &g->recv_overlapfront_overlap[codim];
1520 }
1521 if (iftype==All_All_Interface)
1522 {
1523 sendlist = &g->send_overlapfront_overlapfront[codim];
1524 recvlist = &g->recv_overlapfront_overlapfront[codim];
1525 }
1526
1527 // change communication direction?
1528 if (dir==BackwardCommunication)
1529 std::swap(sendlist,recvlist);
1530
1531 int cnt;
1532
1533 // Size computation (requires communication if variable size)
1534 std::vector<int> send_size(sendlist->size(),-1); // map rank to total number of objects (of type DataType) to be sent
1535 std::vector<int> recv_size(recvlist->size(),-1); // map rank to total number of objects (of type DataType) to be recvd
1536 std::vector<size_t*> send_sizes(sendlist->size(),static_cast<size_t*>(0)); // map rank to array giving number of objects per entity to be sent
1537 std::vector<size_t*> recv_sizes(recvlist->size(),static_cast<size_t*>(0)); // map rank to array giving number of objects per entity to be recvd
1538
1539 // define type to iterate over send and recv lists
1540 typedef typename YGridList<Coordinates>::Iterator ListIt;
1541
1542 if (data.fixedSize(dim,codim))
1543 {
1544 // fixed size: just take a dummy entity, size can be computed without communication
1545 cnt=0;
1546 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1547 {
1548 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1550 send_size[cnt] = is->grid.totalsize() * data.size(*it);
1551 cnt++;
1552 }
1553 cnt=0;
1554 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1555 {
1556 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1558 recv_size[cnt] = is->grid.totalsize() * data.size(*it);
1559 cnt++;
1560 }
1561 }
1562 else
1563 {
1564 // variable size case: sender side determines the size
1565 cnt=0;
1566 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1567 {
1568 // allocate send buffer for sizes per entity
1569 size_t *buf = new size_t[is->grid.totalsize()];
1570 send_sizes[cnt] = buf;
1571
1572 // loop over entities and ask for size
1573 int i=0; size_t n=0;
1574 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1576 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1577 itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1578 for ( ; it!=itend; ++it)
1579 {
1580 buf[i] = data.size(*it);
1581 n += buf[i];
1582 i++;
1583 }
1584
1585 // now we know the size for this rank
1586 send_size[cnt] = n;
1587
1588 // hand over send request to torus class
1589 torus().send(is->rank,buf,is->grid.totalsize()*sizeof(size_t));
1590 cnt++;
1591 }
1592
1593 // allocate recv buffers for sizes and store receive request
1594 cnt=0;
1595 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1596 {
1597 // allocate recv buffer
1598 size_t *buf = new size_t[is->grid.totalsize()];
1599 recv_sizes[cnt] = buf;
1600
1601 // hand over recv request to torus class
1602 torus().recv(is->rank,buf,is->grid.totalsize()*sizeof(size_t));
1603 cnt++;
1604 }
1605
1606 // exchange all size buffers now
1607 torus().exchange();
1608
1609 // release send size buffers
1610 cnt=0;
1611 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1612 {
1613 delete[] send_sizes[cnt];
1614 send_sizes[cnt] = 0;
1615 cnt++;
1616 }
1617
1618 // process receive size buffers
1619 cnt=0;
1620 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1621 {
1622 // get recv buffer
1623 size_t *buf = recv_sizes[cnt];
1624
1625 // compute total size
1626 size_t n=0;
1627 for (int i=0; i<is->grid.totalsize(); ++i)
1628 n += buf[i];
1629
1630 // ... and store it
1631 recv_size[cnt] = n;
1632 ++cnt;
1633 }
1634 }
1635
1636
1637 // allocate & fill the send buffers & store send request
1638 std::vector<DataType*> sends(sendlist->size(), static_cast<DataType*>(0)); // store pointers to send buffers
1639 cnt=0;
1640 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1641 {
1642 // allocate send buffer
1643 DataType *buf = new DataType[send_size[cnt]];
1644
1645 // remember send buffer
1646 sends[cnt] = buf;
1647
1648 // make a message buffer
1649 MessageBuffer<DataType> mb(buf);
1650
1651 // fill send buffer; iterate over cells in intersection
1652 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1654 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1655 itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1656 for ( ; it!=itend; ++it)
1657 data.gather(mb,*it);
1658
1659 // hand over send request to torus class
1660 torus().send(is->rank,buf,send_size[cnt]*sizeof(DataType));
1661 cnt++;
1662 }
1663
1664 // allocate recv buffers and store receive request
1665 std::vector<DataType*> recvs(recvlist->size(),static_cast<DataType*>(0)); // store pointers to send buffers
1666 cnt=0;
1667 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1668 {
1669 // allocate recv buffer
1670 DataType *buf = new DataType[recv_size[cnt]];
1671
1672 // remember recv buffer
1673 recvs[cnt] = buf;
1674
1675 // hand over recv request to torus class
1676 torus().recv(is->rank,buf,recv_size[cnt]*sizeof(DataType));
1677 cnt++;
1678 }
1679
1680 // exchange all buffers now
1681 torus().exchange();
1682
1683 // release send buffers
1684 cnt=0;
1685 for (ListIt is=sendlist->begin(); is!=sendlist->end(); ++is)
1686 {
1687 delete[] sends[cnt];
1688 sends[cnt] = 0;
1689 cnt++;
1690 }
1691
1692 // process receive buffers and delete them
1693 cnt=0;
1694 for (ListIt is=recvlist->begin(); is!=recvlist->end(); ++is)
1695 {
1696 // get recv buffer
1697 DataType *buf = recvs[cnt];
1698
1699 // make a message buffer
1700 MessageBuffer<DataType> mb(buf);
1701
1702 // copy data from receive buffer; iterate over cells in intersection
1703 if (data.fixedSize(dim,codim))
1704 {
1705 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1707 size_t n=data.size(*it);
1708 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1709 itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1710 for ( ; it!=itend; ++it)
1711 data.scatter(mb,*it,n);
1712 }
1713 else
1714 {
1715 int i=0;
1716 size_t *sbuf = recv_sizes[cnt];
1717 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1719 typename Traits::template Codim<codim>::template Partition<All_Partition>::LevelIterator
1720 itend(YaspLevelIterator<codim,All_Partition,GridImp>(g, typename YGrid::Iterator(is->yg,true)));
1721 for ( ; it!=itend; ++it)
1722 data.scatter(mb,*it,sbuf[i++]);
1723 delete[] sbuf;
1724 }
1725
1726 // delete buffer
1727 delete[] buf; // hier krachts !
1728 cnt++;
1729 }
1730 }
1731
1732 // The new index sets from DDM 11.07.2005
1733 const typename Traits::GlobalIdSet& globalIdSet() const
1734 {
1735 return theglobalidset;
1736 }
1737
1738 const typename Traits::LocalIdSet& localIdSet() const
1739 {
1740 return theglobalidset;
1741 }
1742
1743 const typename Traits::LevelIndexSet& levelIndexSet(int level) const
1744 {
1745 if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1746 return *(indexsets[level]);
1747 }
1748
1749 const typename Traits::LeafIndexSet& leafIndexSet() const
1750 {
1751 return leafIndexSet_;
1752 }
1753
1756 const Communication& comm () const
1757 {
1758 return ccobj;
1759 }
1760
1761 private:
1762
1763 // number of boundary segments of the level 0 grid
1764 int nBSegments;
1765
1766 // Index classes need access to the real entity
1767 friend class Dune::YaspIndexSet<const Dune::YaspGrid<dim, Coordinates>, true >;
1768 friend class Dune::YaspIndexSet<const Dune::YaspGrid<dim, Coordinates>, false >;
1769 friend class Dune::YaspGlobalIdSet<const Dune::YaspGrid<dim, Coordinates> >;
1770 friend class Dune::YaspPersistentContainerIndex<const Dune::YaspGrid<dim, Coordinates> >;
1771
1772 friend class Dune::YaspIntersectionIterator<const Dune::YaspGrid<dim, Coordinates> >;
1773 friend class Dune::YaspIntersection<const Dune::YaspGrid<dim, Coordinates> >;
1774 friend class Dune::YaspEntity<0, dim, const Dune::YaspGrid<dim, Coordinates> >;
1775
1776 template<int codim_, int dim_, class GridImp_, template<int,int,class> class EntityImp_>
1777 friend class Entity;
1778
1779 template<class DT>
1780 class MessageBuffer {
1781 public:
1782 // Constructor
1783 MessageBuffer (DT *p)
1784 {
1785 a=p;
1786 i=0;
1787 j=0;
1788 }
1789
1790 // write data to message buffer, acts like a stream !
1791 template<class Y>
1792 void write (const Y& data)
1793 {
1794 static_assert(( std::is_same<DT,Y>::value ), "DataType mismatch");
1795 a[i++] = data;
1796 }
1797
1798 // read data from message buffer, acts like a stream !
1799 template<class Y>
1800 void read (Y& data) const
1801 {
1802 static_assert(( std::is_same<DT,Y>::value ), "DataType mismatch");
1803 data = a[j++];
1804 }
1805
1806 private:
1807 DT *a;
1808 int i;
1809 mutable int j;
1810 };
1811
1813 template<int cd, PartitionIteratorType pitype>
1814 YaspLevelIterator<cd,pitype,GridImp> levelbegin (int level) const
1815 {
1816 YGridLevelIterator g = begin(level);
1817 if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1818
1819 if (pitype==Interior_Partition)
1820 return YaspLevelIterator<cd,pitype,GridImp>(g,g->interior[cd].begin());
1821 if (pitype==InteriorBorder_Partition)
1822 return YaspLevelIterator<cd,pitype,GridImp>(g,g->interiorborder[cd].begin());
1823 if (pitype==Overlap_Partition)
1824 return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlap[cd].begin());
1825 if (pitype<=All_Partition)
1826 return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlapfront[cd].begin());
1827 if (pitype==Ghost_Partition)
1828 return levelend <cd, pitype> (level);
1829
1830 DUNE_THROW(GridError, "YaspLevelIterator with this codim or partition type not implemented");
1831 }
1832
1834 template<int cd, PartitionIteratorType pitype>
1835 YaspLevelIterator<cd,pitype,GridImp> levelend (int level) const
1836 {
1837 YGridLevelIterator g = begin(level);
1838 if (level<0 || level>maxLevel()) DUNE_THROW(RangeError, "level out of range");
1839
1840 if (pitype==Interior_Partition)
1841 return YaspLevelIterator<cd,pitype,GridImp>(g,g->interior[cd].end());
1842 if (pitype==InteriorBorder_Partition)
1843 return YaspLevelIterator<cd,pitype,GridImp>(g,g->interiorborder[cd].end());
1844 if (pitype==Overlap_Partition)
1845 return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlap[cd].end());
1846 if (pitype<=All_Partition || pitype == Ghost_Partition)
1847 return YaspLevelIterator<cd,pitype,GridImp>(g,g->overlapfront[cd].end());
1848
1849 DUNE_THROW(GridError, "YaspLevelIterator with this codim or partition type not implemented");
1850 }
1851
1852 Communication ccobj;
1853
1854 Torus<Communication,dim> _torus;
1855
1856 std::vector< std::shared_ptr< YaspIndexSet<const YaspGrid<dim,Coordinates>, false > > > indexsets;
1857 YaspIndexSet<const YaspGrid<dim,Coordinates>, true> leafIndexSet_;
1858 YaspGlobalIdSet<const YaspGrid<dim,Coordinates> > theglobalidset;
1859
1861 iTupel _s;
1862 std::bitset<dim> _periodic;
1863 iTupel _coarseSize;
1864 ReservedVector<YGridLevel,32> _levels;
1865 int _overlap;
1866 bool keep_ovlp;
1867 int adaptRefCount;
1868 bool adaptActive;
1869 };
1870
1871 // Class template deduction guides
1872 template<typename ctype, int dim>
1873 YaspGrid(FieldVector<ctype, dim>,
1874 std::array<int, std::size_t{dim}>,
1875 std::bitset<std::size_t{dim}> = std::bitset<std::size_t{dim}>{0ULL},
1876 int = 1,
1877 YaspCommunication = YaspCommunication(),
1878 const Yasp::Partitioning<dim>* = YaspGrid<dim, EquidistantCoordinates<ctype, dim>>::defaultPartitioner())
1879 -> YaspGrid< dim, EquidistantCoordinates<ctype, dim> >;
1880
1881 template<typename ctype, int dim>
1882 YaspGrid(FieldVector<ctype, dim>,
1883 FieldVector<ctype, dim>,
1884 std::array<int, std::size_t{dim}>,
1885 std::bitset<std::size_t{dim}> = std::bitset<std::size_t{dim}>{0ULL},
1886 int = 1,
1887 YaspCommunication = YaspCommunication(),
1888 const Yasp::Partitioning<dim>* = YaspGrid<dim, EquidistantOffsetCoordinates<ctype, dim>>::defaultPartitioner())
1889 -> YaspGrid< dim, EquidistantOffsetCoordinates<ctype, dim> >;
1890
1891 template<typename ctype, std::size_t dim>
1892 YaspGrid(std::array<std::vector<ctype>, dim>,
1893 std::bitset<dim> = std::bitset<dim>{0ULL},
1894 int = 1,
1895 YaspCommunication = YaspCommunication(),
1896 const Yasp::Partitioning<dim>* = YaspGrid<int{dim}, TensorProductCoordinates<ctype, int{dim}>>::defaultPartitioner())
1897 -> YaspGrid< int{dim}, TensorProductCoordinates<ctype, int{dim}> >;
1898
1900 template <int d, class CC>
1901 std::ostream& operator<< (std::ostream& s, const YaspGrid<d,CC>& grid)
1902 {
1903 int rank = grid.torus().rank();
1904
1905 s << "[" << rank << "]:" << " YaspGrid maxlevel=" << grid.maxLevel() << std::endl;
1906
1907 s << "Printing the torus: " <<std::endl;
1908 s << grid.torus() << std::endl;
1909
1910 for (typename YaspGrid<d,CC>::YGridLevelIterator g=grid.begin(); g!=grid.end(); ++g)
1911 {
1912 s << "[" << rank << "]: " << std::endl;
1913 s << "[" << rank << "]: " << "==========================================" << std::endl;
1914 s << "[" << rank << "]: " << "level=" << g->level() << std::endl;
1915
1916 for (int codim = 0; codim < d + 1; ++codim)
1917 {
1918 s << "[" << rank << "]: " << "overlapfront[" << codim << "]: " << g->overlapfront[codim] << std::endl;
1919 s << "[" << rank << "]: " << "overlap[" << codim << "]: " << g->overlap[codim] << std::endl;
1920 s << "[" << rank << "]: " << "interiorborder[" << codim << "]: " << g->interiorborder[codim] << std::endl;
1921 s << "[" << rank << "]: " << "interior[" << codim << "]: " << g->interior[codim] << std::endl;
1922
1923 typedef typename YGridList<CC>::Iterator I;
1924 for (I i=g->send_overlapfront_overlapfront[codim].begin();
1925 i!=g->send_overlapfront_overlapfront[codim].end(); ++i)
1926 s << "[" << rank << "]: " << " s_of_of[" << codim << "] to rank "
1927 << i->rank << " " << i->grid << std::endl;
1928
1929 for (I i=g->recv_overlapfront_overlapfront[codim].begin();
1930 i!=g->recv_overlapfront_overlapfront[codim].end(); ++i)
1931 s << "[" << rank << "]: " << " r_of_of[" << codim << "] to rank "
1932 << i->rank << " " << i->grid << std::endl;
1933
1934 for (I i=g->send_overlap_overlapfront[codim].begin();
1935 i!=g->send_overlap_overlapfront[codim].end(); ++i)
1936 s << "[" << rank << "]: " << " s_o_of[" << codim << "] to rank "
1937 << i->rank << " " << i->grid << std::endl;
1938
1939 for (I i=g->recv_overlapfront_overlap[codim].begin();
1940 i!=g->recv_overlapfront_overlap[codim].end(); ++i)
1941 s << "[" << rank << "]: " << " r_of_o[" << codim << "] to rank "
1942 << i->rank << " " << i->grid << std::endl;
1943
1944 for (I i=g->send_interiorborder_interiorborder[codim].begin();
1945 i!=g->send_interiorborder_interiorborder[codim].end(); ++i)
1946 s << "[" << rank << "]: " << " s_ib_ib[" << codim << "] to rank "
1947 << i->rank << " " << i->grid << std::endl;
1948
1949 for (I i=g->recv_interiorborder_interiorborder[codim].begin();
1950 i!=g->recv_interiorborder_interiorborder[codim].end(); ++i)
1951 s << "[" << rank << "]: " << " r_ib_ib[" << codim << "] to rank "
1952 << i->rank << " " << i->grid << std::endl;
1953
1954 for (I i=g->send_interiorborder_overlapfront[codim].begin();
1955 i!=g->send_interiorborder_overlapfront[codim].end(); ++i)
1956 s << "[" << rank << "]: " << " s_ib_of[" << codim << "] to rank "
1957 << i->rank << " " << i->grid << std::endl;
1958
1959 for (I i=g->recv_overlapfront_interiorborder[codim].begin();
1960 i!=g->recv_overlapfront_interiorborder[codim].end(); ++i)
1961 s << "[" << rank << "]: " << " r_of_ib[" << codim << "] to rank "
1962 << i->rank << " " << i->grid << std::endl;
1963 }
1964 }
1965
1966 s << std::endl;
1967
1968 return s;
1969 }
1970
1971 namespace Capabilities
1972 {
1973
1981 template<int dim, class Coordinates>
1982 struct hasBackupRestoreFacilities< YaspGrid<dim, Coordinates> >
1983 {
1984 static const bool v = true;
1985 };
1986
1990 template<int dim, class Coordinates>
1991 struct hasSingleGeometryType< YaspGrid<dim, Coordinates> >
1992 {
1993 static const bool v = true;
1994 static const unsigned int topologyId = GeometryTypes::cube(dim).id();
1995 };
1996
2000 template<int dim, class Coordinates>
2001 struct isCartesian< YaspGrid<dim, Coordinates> >
2002 {
2003 static const bool v = true;
2004 };
2005
2009 template<int dim, class Coordinates, int codim>
2010 struct hasEntity< YaspGrid<dim, Coordinates>, codim>
2011 {
2012 static const bool v = true;
2013 };
2014
2019 template<int dim, class Coordinates, int codim>
2020 struct hasEntityIterator<YaspGrid<dim, Coordinates>, codim>
2021 {
2022 static const bool v = true;
2023 };
2024
2028 template<int dim, int codim, class Coordinates>
2029 struct canCommunicate< YaspGrid< dim, Coordinates>, codim >
2030 {
2031 static const bool v = true;
2032 };
2033
2037 template<int dim, class Coordinates>
2038 struct isLevelwiseConforming< YaspGrid<dim, Coordinates> >
2039 {
2040 static const bool v = true;
2041 };
2042
2046 template<int dim, class Coordinates>
2047 struct isLeafwiseConforming< YaspGrid<dim, Coordinates> >
2048 {
2049 static const bool v = true;
2050 };
2051
2055 template<int dim, class Coordinates>
2056 struct viewThreadSafe< YaspGrid<dim, Coordinates> >
2057 {
2058 static const bool v = true;
2059 };
2060
2061 }
2062
2063} // end namespace
2064
2065// Include the specialization of the StructuredGridFactory class for YaspGrid
2067// Include the specialization of the BackupRestoreFacility class for YaspGrid
2068#include <dune/grid/yaspgrid/backuprestore.hh>
2069
2070#endif
A geometry implementation for axis-aligned hypercubes.
Portable very large unsigned integers.
CommDataHandleIF describes the features of a data handle for communication in parallel runs using the...
Definition: datahandleif.hh:78
Wrapper class for entities.
Definition: entity.hh:66
Container for equidistant coordinates in a YaspGrid.
Definition: coordinates.hh:29
Container for equidistant coordinates in a YaspGrid with non-trivial origin.
Definition: coordinates.hh:131
static constexpr size_type size() noexcept
Obtain the number of elements stored in the vector.
Definition: fvector.hh:218
Unique label for each type of entities that can occur in DUNE grids.
Definition: type.hh:114
constexpr unsigned int dim() const
Return dimension of the type.
Definition: type.hh:360
constexpr bool isCube() const
Return true if entity is a cube of any dimension.
Definition: type.hh:324
constexpr unsigned int id() const
Return the topology id of the type.
Definition: type.hh:365
Definition: grid.hh:848
Base class for exceptions in Dune grid modules.
Definition: exceptions.hh:20
typename GridFamily::Traits::Communication Communication
A type that is a model of Dune::Communication. It provides a portable way for communication on the se...
Definition: grid.hh:515
Intersection of a mesh entity of codimension 0 ("element") with a "neighboring" element or with the d...
Definition: intersection.hh:164
A Vector class with statically reserved memory.
Definition: reservedvector.hh:49
constexpr iterator end() noexcept
Returns an iterator pointing to the end of the vector.
Definition: reservedvector.hh:247
constexpr size_type size() const noexcept
Returns number of elements in the vector.
Definition: reservedvector.hh:361
constexpr iterator begin() noexcept
Returns a iterator pointing to the beginning of the vector.
Definition: reservedvector.hh:211
constexpr void pop_back() noexcept
Erases the last element of the vector, O(1) time.
Definition: reservedvector.hh:201
constexpr reference back() noexcept
Returns reference to last element of vector.
Definition: reservedvector.hh:331
constexpr void resize(size_type s) noexcept
Specifies a new size for the vector.
Definition: reservedvector.hh:161
Coordinate container for a tensor product YaspGrid.
Definition: coordinates.hh:245
double partition(int rank, iTupel origin_in, iTupel size_in, iTupel &origin_out, iTupel &size_out) const
partition the given grid onto the torus and return the piece of the process with given rank; returns ...
Definition: torus.hh:239
iTupel coord() const
return own coordinates
Definition: torus.hh:100
int rank() const
return own rank
Definition: torus.hh:94
const iTupel & dims() const
return dimensions of torus
Definition: torus.hh:112
void recv(int rank, void *buffer, int size) const
store a receive request; buffers are received in order; handles also local requests with memcpy
Definition: torus.hh:374
void send(int rank, void *buffer, int size) const
store a send request; buffers are sent in order; handles also local requests with memcpy
Definition: torus.hh:361
int neighbors() const
return the number of neighbors, which is
Definition: torus.hh:203
ProcListIterator recvend() const
last process in receive list
Definition: torus.hh:355
ProcListIterator sendend() const
end of send list
Definition: torus.hh:343
ProcListIterator sendbegin() const
first process in send list
Definition: torus.hh:337
void exchange() const
exchange messages stored in request buffers; clear request buffers afterwards
Definition: torus.hh:387
ProcListIterator recvbegin() const
first process in receive list
Definition: torus.hh:349
Definition: ygrid.hh:75
YGridComponent< Coordinates > move(iTupel v) const
return grid moved by the vector v
Definition: ygrid.hh:263
YGridComponent< Coordinates > intersection(const YGridComponent< Coordinates > &r) const
Return YGridComponent of supergrid of self which is the intersection of self and another YGridCompone...
Definition: ygrid.hh:271
implements a collection of multiple std::deque<Intersection> Intersections with neighboring processor...
Definition: ygrid.hh:823
int size() const
return the size of the container, this is the sum of the sizes of all deques
Definition: ygrid.hh:953
Iterator end() const
return iterator pointing to the end of the container
Definition: ygrid.hh:929
Iterator begin() const
return iterator pointing to the begin of the container
Definition: ygrid.hh:923
Iterator over a collection o YGrids A YGrid::Iterator is the heart of an entity in YaspGrid.
Definition: ygrid.hh:594
implements a collection of YGridComponents which form a codimension Entities of given codimension c n...
Definition: ygrid.hh:551
persistent, globally unique Ids
Definition: yaspgrididset.hh:25
[ provides Dune::Grid ]
Definition: yaspgrid.hh:166
YaspGridFamily< dim, Coordinates > GridFamily
the GridFamily of this grid
Definition: yaspgrid.hh:715
int ghostSize(int level, int codim) const
return size (= distance in graph) of ghost region
Definition: yaspgrid.hh:1413
const Torus< Communication, dim > & torus() const
return reference to torus
Definition: yaspgrid.hh:246
Traits::template Codim< cd >::template Partition< All_Partition >::LevelIterator lbegin(int level) const
version without second template parameter for convenience
Definition: yaspgrid.hh:1343
int size(int level, GeometryType type) const
number of entities per level and geometry type in this process
Definition: yaspgrid.hh:1444
Traits::template Codim< cd >::template Partition< All_Partition >::LeafIterator leafbegin() const
return LeafIterator which points to the first entity in maxLevel
Definition: yaspgrid.hh:1371
void globalRefine(int refCount)
refine the grid refCount times.
Definition: yaspgrid.hh:1216
int getMark(const typename Traits::template Codim< 0 >::Entity &e) const
returns adaptation mark for given entity
Definition: yaspgrid.hh:1300
YaspGrid(std::array< std::vector< ctype >, std::size_t{dim}> coords, std::bitset< std::size_t{dim}> periodic=std::bitset< std::size_t{dim}>(0ULL), int overlap=1, Communication comm=Communication(), const Yasp::Partitioning< dim > *partitioner=defaultPartitioner())
Standard constructor for a tensorproduct YaspGrid.
Definition: yaspgrid.hh:1043
int ghostSize(int codim) const
return size (= distance in graph) of ghost region
Definition: yaspgrid.hh:1419
int globalSize(int i) const
return number of cells on finest level in given direction on all processors
Definition: yaspgrid.hh:252
YaspGrid(Dune::FieldVector< ctype, dim > lowerleft, Dune::FieldVector< ctype, dim > upperright, std::array< int, std::size_t{dim}> s, std::bitset< std::size_t{dim}> periodic=std::bitset< std::size_t{dim}>(0ULL), int overlap=1, Communication comm=Communication(), const Yasp::Partitioning< dim > *partitioner=defaultPartitioner())
Definition: yaspgrid.hh:974
Traits::template Codim< cd >::template Partition< pitype >::LeafIterator leafend() const
return LeafIterator which points behind the last entity in maxLevel
Definition: yaspgrid.hh:1364
void postAdapt()
clean up some markers
Definition: yaspgrid.hh:1321
const Dune::FieldVector< ctype, dim > & domainSize() const
returns the size of the physical domain
Definition: yaspgrid.hh:1462
YGridLevelIterator end() const
return iterator pointing to one past the finest level
Definition: yaspgrid.hh:307
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: yaspgrid.hh:1450
int maxLevel() const
Definition: yaspgrid.hh:1210
YaspGrid(Dune::FieldVector< ctype, dim > L, std::array< int, std::size_t{dim}> s, std::bitset< std::size_t{dim}> periodic=std::bitset< std::size_t{dim}>{0ULL}, int overlap=1, Communication comm=Communication(), const Yasp::Partitioning< dim > *partitioner=defaultPartitioner())
Definition: yaspgrid.hh:904
Traits::template Codim< cd >::template Partition< All_Partition >::LeafIterator leafend() const
return LeafIterator which points behind the last entity in maxLevel
Definition: yaspgrid.hh:1378
void communicate(CommDataHandleIF< DataHandleImp, DataType > &data, InterfaceType iftype, CommunicationDirection dir, int level) const
Definition: yaspgrid.hh:1471
void intersections(const YGridComponent< Coordinates > &sendgrid, const YGridComponent< Coordinates > &recvgrid, std::deque< Intersection > &sendlist, std::deque< Intersection > &recvlist)
Construct list of intersections with neighboring processors.
Definition: yaspgrid.hh:561
Traits::template Codim< cd >::template Partition< pitype >::LeafIterator leafbegin() const
return LeafIterator which points to the first entity in maxLevel
Definition: yaspgrid.hh:1357
bool preAdapt()
returns true, if the grid will be coarsened
Definition: yaspgrid.hh:1313
iTupel levelSize(int l) const
return size vector of the grid (in cells) on level l
Definition: yaspgrid.hh:270
bool mark(int refCount, const typename Traits::template Codim< 0 >::Entity &e)
Marks an entity to be refined/coarsened in a subsequent adapt.
Definition: yaspgrid.hh:1286
int overlapSize(int level, int codim) const
return size (= distance in graph) of overlap region
Definition: yaspgrid.hh:1399
void communicate(CommDataHandleIF< DataHandleImp, DataType > &data, InterfaceType iftype, CommunicationDirection dir) const
Definition: yaspgrid.hh:1481
int size(int codim) const
number of leaf entities per codim in this process
Definition: yaspgrid.hh:1438
bool isPeriodic(int i) const
return whether the grid is periodic in direction i
Definition: yaspgrid.hh:279
void refineOptions(bool keepPhysicalOverlap)
set options for refinement
Definition: yaspgrid.hh:1270
int size(int level, int codim) const
number of entities per level and codim in this process
Definition: yaspgrid.hh:1425
YaspGrid(const Coordinates &coordinates, std::bitset< dim > periodic=std::bitset< dim >(0ULL), int overlap=1, Communication comm=Communication(), const Yasp::Partitioning< dim > *partitioner=defaultPartitioner())
Definition: yaspgrid.hh:731
bool adapt()
map adapt to global refine
Definition: yaspgrid.hh:1306
Traits::template Codim< cd >::template Partition< All_Partition >::LevelIterator lend(int level) const
version without second template parameter for convenience
Definition: yaspgrid.hh:1350
ReservedVector< YGridLevel, 32 >::const_iterator YGridLevelIterator
Iterator over the grid levels.
Definition: yaspgrid.hh:290
size_t numBoundarySegments() const
returns the number of boundary segments within the macro grid
Definition: yaspgrid.hh:1456
void communicateCodim(DataHandle &data, InterfaceType iftype, CommunicationDirection dir, int level) const
Definition: yaspgrid.hh:1491
void makelevel(const Coordinates &coords, std::bitset< dim > periodic, iTupel o_interior, int overlap)
Make a new YGridLevel structure.
Definition: yaspgrid.hh:327
Traits::template Codim< cd >::template Partition< pitype >::LevelIterator lend(int level) const
Iterator to one past the last entity of given codim on level for partition type.
Definition: yaspgrid.hh:1336
Traits::template Codim< cd >::template Partition< pitype >::LevelIterator lbegin(int level) const
one past the end on this level
Definition: yaspgrid.hh:1329
iTupel globalSize() const
return number of cells on finest level on all processors
Definition: yaspgrid.hh:258
const Communication & comm() const
return a communication object
Definition: yaspgrid.hh:1756
int overlapSize(int odim) const
return size (= distance in graph) of overlap region
Definition: yaspgrid.hh:1406
YGridLevelIterator begin(int i) const
return iterator pointing to given level
Definition: yaspgrid.hh:299
Coordinates::ctype ctype
Type used for coordinates.
Definition: yaspgrid.hh:179
int levelSize(int l, int i) const
return size of the grid (in cells) on level l in direction i
Definition: yaspgrid.hh:264
YGridLevelIterator begin() const
return iterator pointing to coarsest level
Definition: yaspgrid.hh:293
YaspHierarchicIterator enables iteration over son entities of codim 0.
Definition: yaspgridhierarchiciterator.hh:20
Implementation of Level- and LeafIndexSets for YaspGrid.
Definition: yaspgridindexsets.hh:25
YaspIntersectionIterator enables iteration over intersections with neighboring codim 0 entities.
Definition: yaspgridintersectioniterator.hh:22
YaspIntersection provides data about intersection with neighboring codim 0 entities.
Definition: yaspgridintersection.hh:22
Iterates over entities of one grid level.
Definition: yaspgridleveliterator.hh:19
implement a consecutive index for all entities of given codim of a YaspGrid
Definition: yaspgridpersistentcontainer.hh:35
a base class for the yaspgrid partitioning strategy
Definition: partitioning.hh:38
A set of traits classes to store static information about grid implementation.
Different resources needed by all grid implementations.
Provides base classes for index and id sets.
Implements an utility class that provides collective communication methods for sequential programs.
This provides container classes for the coordinates to be used in YaspGrid Upon implementation of the...
Describes the parallel communication interface class for MessageBuffers and DataHandles.
#define DUNE_THROW(E,...)
Definition: exceptions.hh:314
CommunicationDirection
Define a type for communication direction parameter.
Definition: gridenums.hh:170
InterfaceType
Parameter to be used for the communication functions.
Definition: gridenums.hh:86
@ All_Partition
all entities
Definition: gridenums.hh:141
@ Interior_Partition
only interior entities
Definition: gridenums.hh:137
@ InteriorBorder_Partition
interior and border entities
Definition: gridenums.hh:138
@ Overlap_Partition
interior, border, and overlap entities
Definition: gridenums.hh:139
@ Ghost_Partition
only ghost entities
Definition: gridenums.hh:142
@ BackwardCommunication
reverse communication direction
Definition: gridenums.hh:172
@ InteriorBorder_All_Interface
send interior and border, receive all entities
Definition: gridenums.hh:88
@ All_All_Interface
send all and receive all entities
Definition: gridenums.hh:91
@ Overlap_All_Interface
send overlap, receive all entities
Definition: gridenums.hh:90
@ Overlap_OverlapFront_Interface
send overlap, receive overlap and front entities
Definition: gridenums.hh:89
@ InteriorBorder_InteriorBorder_Interface
send/receive interior and border entities
Definition: gridenums.hh:87
constexpr GeometryType cube(unsigned int dim)
Returns a GeometryType representing a hypercube of dimension dim.
Definition: type.hh:462
constexpr auto max
Function object that returns the greater of the given values.
Definition: hybridutilities.hh:489
Some useful basic math stuff.
Implements an utility class that provides MPI's collective communication methods.
Helpers for dealing with MPI.
Dune namespace
Definition: alignedallocator.hh:13
constexpr Base power(Base m, Exponent p)
Power method for integer exponents.
Definition: math.hh:75
constexpr std::bool_constant<(sizeof...(II)==0)> empty(std::integer_sequence< T, II... >)
Checks whether the sequence is empty.
Definition: integersequence.hh:80
constexpr auto back(const HybridMultiIndex< T... > &tp) -> decltype(tp.back())
Returns a copy of the last element of the HybridMultiIndex.
Definition: hybridmultiindex.hh:225
constexpr HybridMultiIndex< T..., std::size_t > push_back(const HybridMultiIndex< T... > &tp, std::size_t i)
Appends a run time index to a HybridMultiIndex.
Definition: hybridmultiindex.hh:249
constexpr auto front(const HybridMultiIndex< T... > &tp) -> decltype(tp.front())
Returns a copy of the first element of the HybridMultiIndex.
Definition: hybridmultiindex.hh:238
An stl-compliant random-access container which stores everything on the stack.
specialize with 'true' for all codims that a grid can communicate data on (default=false)
Definition: capabilities.hh:97
specialize with 'true' for all codims that a grid provides an iterator for (default=hasEntity<codim>:...
Definition: capabilities.hh:74
Specialize with 'true' for all codims that a grid implements entities for. (default=false)
Definition: capabilities.hh:58
Specialize with 'true' for if the codimension 0 entity of the grid has only one possible geometry typ...
Definition: capabilities.hh:27
Specialize with 'true' if the grid is a Cartesian grid. Cartesian grids satisfy the following propert...
Definition: capabilities.hh:48
Specialize with 'true' if implementation guarantees a conforming leaf grid. (default=false)
Definition: capabilities.hh:115
Specialize with 'true' if implementation guarantees conforming level grids. (default=false)
Definition: capabilities.hh:106
Specialize with 'true' if the grid implementation is thread safe, while it is not modified....
Definition: capabilities.hh:169
Static tag representing a codimension.
Definition: dimension.hh:24
A traits struct that collects all associated types of one grid model.
Definition: grid.hh:1013
IndexSet< const GridImp, LeafIndexSetImp, LeafIndexType, LeafGeometryTypes > LeafIndexSet
The type of the leaf index set.
Definition: grid.hh:1082
IndexSet< const GridImp, LevelIndexSetImp, LevelIndexType, LevelGeometryTypes > LevelIndexSet
The type of the level index set.
Definition: grid.hh:1080
IdSet< const GridImp, LocalIdSetImp, LIDType > LocalIdSet
The type of the local id set.
Definition: grid.hh:1086
IdSet< const GridImp, GlobalIdSetImp, GIDType > GlobalIdSet
The type of the global id set.
Definition: grid.hh:1084
type describing an intersection with a neighboring processor
Definition: ygrid.hh:829
Specialization of the StructuredGridFactory class for YaspGrid.
This file provides the infrastructure for toroidal communication in YaspGrid.
A unique label for each type of element that can occur in a grid.
Traits for type conversions and type information.
the YaspEntity class and its specializations
The YaspEntitySeed class.
The YaspGeometry class and its specializations.
level-wise, non-persistent, consecutive indices for YaspGrid
The YaspIntersection class.
The YaspIntersectionIterator class.
The YaspLevelIterator class.
Specialization of the PersistentContainer for YaspGrid.
This provides a YGrid, the elemental component of the yaspgrid implementation.
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden & Uni Heidelberg  |  generated with Hugo v0.111.3 (Jan 8, 23:33, 2026)