dune-composites (2.5.1)

subdomainprojectedcoarsespace.hh
1#ifndef DUNE_GENEO_SUBDOMAINPROJECTEDCOARSESPACE_HH
2#define DUNE_GENEO_SUBDOMAINPROJECTEDCOARSESPACE_HH
3
4
5#if HAVE_ARPACKPP
6
7#include <dune/pdelab/boilerplate/pdelab.hh>
8
9#include <dune/common/timer.hh>
10
11#include "multicommdatahandle.hh"
12
13#include "coarsespace.hh"
14#include "subdomainbasis.hh"
15
16namespace Dune {
17 namespace Geneo {
18
19
20 template<class GFS, class M, class X, class Y, int dim>
21 class SubdomainProjectedCoarseSpace : public CoarseSpace<M,X>
22 {
23
24 public:
25 typedef typename CoarseSpace<M,X>::COARSE_V COARSE_V;
26 typedef typename CoarseSpace<M,X>::COARSE_M COARSE_M;
27
31 SubdomainProjectedCoarseSpace (const GFS& gfs_, const M& AF_, std::shared_ptr<SubdomainBasis<X> > subdomainbasis, int verbosity)
32 : gfs(gfs_), AF(AF_),
33 ranks(gfs.gridView().comm().size()),
34 my_rank(gfs.gridView().comm().rank()),
35 subdomainbasis_(subdomainbasis),
36 _verbosity(verbosity)
37 {
38
39 // Find neighbors (based on parallelhelper.hh in PDELab)
40
41 using RankVector = Dune::PDELab::Backend::Vector<GFS,int>;
42 RankVector rank_partition(gfs, my_rank); // vector to identify unique decomposition
44
46
47 Dune::InterfaceType _interiorBorder_all_interface;
48
50 //Dune::InterfaceType _all_all_interface;
51
52 // TODO: The following shortcut may never be fulfilled because we have no overlap?
53 // Let's try to be clever and reduce the communication overhead by picking the smallest
54 // possible communication interface depending on the overlap structure of the GFS.
55 // FIXME: Switch to simple comparison as soon as dune-grid:1b3e83ec0 is reliably available!
56 if (gfs.entitySet().partitions().value == Dune::Partitions::interiorBorder.value)
57 {
58 // The GFS only spans the interior and border partitions, so we can skip sending or
59 // receiving anything else.
60 _interiorBorder_all_interface = Dune::InteriorBorder_InteriorBorder_Interface;
61 //_all_all_interface = Dune::InteriorBorder_InteriorBorder_Interface;
62 }
63 else
64 {
65 // In general, we have to transmit more.
66 _interiorBorder_all_interface = Dune::InteriorBorder_All_Interface;
67 //_all_all_interface = Dune::All_All_Interface;
68 }
69 Dune::PDELab::DisjointPartitioningDataHandle<GFS,RankVector> pdh(gfs,rank_partition);
70 gfs.gridView().communicate(pdh,_interiorBorder_all_interface,Dune::ForwardCommunication);
71
72 std::set<int> rank_set;
73 for (int rank : rank_partition)
74 if (rank != my_rank)
75 rank_set.insert(rank);
76
77 for (int rank : rank_set)
78 neighbor_ranks.push_back(rank);
79
80 setup_coarse_system();
81 }
82
83 private:
84 void setup_coarse_system () {
85 using Dune::PDELab::Backend::native;
86
87 // Get local basis vectors
88 auto local_basis = subdomainbasis_->local_basis;
89
90 // Normalize basis vectors
91 for (unsigned int i = 0; i < local_basis.size(); i++) {
92 native(*(local_basis[i])) *= 1.0 / (native(*(local_basis[i])) * native(*(local_basis[i])));
93 }
94
95 MPI_Barrier(gfs.gridView().comm());
96 if (my_rank == 0) std::cout << "Matrix setup" << std::endl;
97 Dune::Timer timer_setup;
98
99 // Communicate local coarse space dimensions
100 int buf_basis_sizes[ranks];
101 int local_size = local_basis.size();
102 MPI_Allgather(&local_size, 1, MPI_INT, &buf_basis_sizes, 1, MPI_INT, gfs.gridView().comm());
103 local_basis_sizes = std::vector<int>(buf_basis_sizes, buf_basis_sizes + ranks);
104
105 // Count coarse space dimensions
106 global_basis_size = 0;
107 for (int n : local_basis_sizes) {
108 global_basis_size += n;
109 }
110 my_basis_array_offset = 0;
111 for (int i = 0; i < my_rank; i++) {
112 my_basis_array_offset += local_basis_sizes[i];
113 }
114
115 if (my_rank == 0) std::cout << "Global basis size B=" << global_basis_size << std::endl;
116
117 if (configuration.get<bool>("OldMatrixSetup",false)) { // Setup version switch
118
119 coarse_system = std::make_shared<COARSE_M>(global_basis_size, global_basis_size, COARSE_M::random);
120 for (int row = 0; row < global_basis_size; row++) {
121 coarse_system->setrowsize(row, global_basis_size);
122 }
123 coarse_system->endrowsizes();
124 for (int row = 0; row < global_basis_size; row++) {
125 for (int col = 0; col < global_basis_size; col++) {
126 coarse_system->addindex(row, col);
127 }
128 }
129 coarse_system->endindices();
130
131 int recvcounts[ranks];
132 int displs[ranks];
133 for (int rank = 0; rank < ranks; rank++) {
134 displs[rank] = 0;
135 }
136 for (int rank = 0; rank < ranks; rank++) {
137 recvcounts[rank] = local_basis_sizes[rank];
138 for (int i = rank+1; i < ranks; i++)
139 displs[i] += local_basis_sizes[rank];
140 }
141
142 double buf_coarse_mat_row[global_basis_size];
143 double buf_coarse_mat_row_local[local_basis_sizes[my_rank]];
144
145 int row_id = 0;
146 for (int rank = 0; rank < ranks; rank++) {
147
148 for (int basis_index = 0; basis_index < local_basis_sizes[rank]; basis_index++) {
149
150 X basis_vector(gfs,0.0);
151 if (rank == my_rank) {
152 basis_vector = *local_basis[basis_index];
153 }
154 Dune::PDELab::AddDataHandle<GFS,X> adddh(gfs,basis_vector);
155 gfs.gridView().communicate(adddh,Dune::All_All_Interface,Dune::ForwardCommunication);
156
157
158 for (int basis_index2 = 0; basis_index2 < local_basis_sizes[my_rank]; basis_index2++) {
159
160 double entry;
161 X Atimesv(gfs,0.0);
162 native(AF).mv(native(basis_vector), native(Atimesv));
163 entry = *local_basis[basis_index2]*Atimesv;
164
165 buf_coarse_mat_row_local[basis_index2] = entry;
166 }
167
168 MPI_Allgatherv(&buf_coarse_mat_row_local, local_basis_sizes[my_rank], MPI_DOUBLE, &buf_coarse_mat_row, recvcounts, displs, MPI_DOUBLE, gfs.gridView().comm());
169
170 for (int i = 0; i < global_basis_size; i++) {
171 (*coarse_system)[row_id][i] = buf_coarse_mat_row[i];
172 }
173
174 row_id++;
175 }
176 }
177
178 } else { // Setup version switch
179
180 int max_local_basis_size = 0;
181 for (int rank = 0; rank < ranks; rank++) {
182 if (local_basis_sizes[rank] > max_local_basis_size)
183 max_local_basis_size = local_basis_sizes[rank];
184 }
185
186
187 coarse_system = std::make_shared<COARSE_M>(global_basis_size, global_basis_size, COARSE_M::row_wise);
188
189 std::vector<std::vector<std::vector<double> > > local_rows(local_basis_sizes[my_rank]);
190 for (int basis_index = 0; basis_index < local_basis_sizes[my_rank]; basis_index++) {
191 local_rows[basis_index].resize(neighbor_ranks.size()+1);
192 }
193
194 for (int basis_index_remote = 0; basis_index_remote < max_local_basis_size; basis_index_remote++) {
195
196 std::vector<std::shared_ptr<X> > neighbor_basis(neighbor_ranks.size()); // Local coarse space basis
197 for (unsigned int i = 0; i < neighbor_basis.size(); i++) {
198 neighbor_basis[i] = std::make_shared<X>(gfs, 0.0);
199 }
200
201 if (basis_index_remote < local_basis_sizes[my_rank]) {
202 MultiCommDataHandle<GFS,X,int,dim> commdh(gfs, *local_basis[basis_index_remote], neighbor_basis, neighbor_ranks);
203 gfs.gridView().communicate(commdh,Dune::All_All_Interface,Dune::ForwardCommunication);
204 } else {
205 X dummy(gfs, 0.0);
206 MultiCommDataHandle<GFS,X,int,dim> commdh(gfs, dummy, neighbor_basis, neighbor_ranks);
207 gfs.gridView().communicate(commdh,Dune::All_All_Interface,Dune::ForwardCommunication);
208 }
209
210
211 if (basis_index_remote < local_basis_sizes[my_rank]) {
212 auto basis_vector = *local_basis[basis_index_remote];
213 X Atimesv(gfs,0.0);
214 native(AF).mv(native(basis_vector), native(Atimesv));
215 for (int basis_index = 0; basis_index < local_basis_sizes[my_rank]; basis_index++) {
216 double entry = *local_basis[basis_index]*Atimesv;
217 local_rows[basis_index][neighbor_ranks.size()].push_back(entry);
218 }
219 }
220
221 for (unsigned int neighbor_id = 0; neighbor_id < neighbor_ranks.size(); neighbor_id++) {
222 if (basis_index_remote >= local_basis_sizes[neighbor_ranks[neighbor_id]])
223 continue;
224
225 auto basis_vector = *neighbor_basis[neighbor_id];
226 X Atimesv(gfs,0.0);
227 native(AF).mv(native(basis_vector), native(Atimesv));
228
229 for (int basis_index = 0; basis_index < local_basis_sizes[my_rank]; basis_index++) {
230
231 double entry = *local_basis[basis_index]*Atimesv;
232 local_rows[basis_index][neighbor_id].push_back(entry);
233 }
234 }
235
236 }
237
238 auto setup_row = coarse_system->createbegin();
239 int row_id = 0;
240 for (int rank = 0; rank < ranks; rank++) {
241 for (int basis_index = 0; basis_index < local_basis_sizes[rank]; basis_index++) {
242
243 // Communicate number of entries in this row
244 int couplings = 0;
245 if (rank == my_rank) {
246 couplings = local_basis_sizes[my_rank];
247 for (int neighbor_id : neighbor_ranks) {
248 couplings += local_basis_sizes[neighbor_id];
249 }
250 }
251 MPI_Bcast(&couplings, 1, MPI_INT, rank, gfs.gridView().comm());
252
253 // Communicate row's pattern
254 int entries_pos[couplings];
255 if (rank == my_rank) {
256 int cnt = 0;
257 for (int basis_index2 = 0; basis_index2 < local_basis_sizes[my_rank]; basis_index2++) {
258 entries_pos[cnt] = my_basis_array_offset + basis_index2;
259 cnt++;
260 }
261 for (unsigned int neighbor_id = 0; neighbor_id < neighbor_ranks.size(); neighbor_id++) {
262 int neighbor_offset = basis_array_offset (neighbor_ranks[neighbor_id]);
263 for (int basis_index2 = 0; basis_index2 < local_basis_sizes[neighbor_ranks[neighbor_id]]; basis_index2++) {
264 entries_pos[cnt] = neighbor_offset + basis_index2;
265 cnt++;
266 }
267 }
268 }
269 MPI_Bcast(&entries_pos, couplings, MPI_INT, rank, gfs.gridView().comm());
270
271 // Communicate actual entries
272 double entries[couplings];
273 if (rank == my_rank) {
274 int cnt = 0;
275 for (int basis_index2 = 0; basis_index2 < local_basis_sizes[my_rank]; basis_index2++) {
276 entries[cnt] = local_rows[basis_index][neighbor_ranks.size()][basis_index2];
277 cnt++;
278 }
279 for (unsigned int neighbor_id = 0; neighbor_id < neighbor_ranks.size(); neighbor_id++) {
280 for (int basis_index2 = 0; basis_index2 < local_basis_sizes[neighbor_ranks[neighbor_id]]; basis_index2++) {
281 entries[cnt] = local_rows[basis_index][neighbor_id][basis_index2];
282 cnt++;
283 }
284 }
285 }
286 MPI_Bcast(&entries, couplings, MPI_DOUBLE, rank, gfs.gridView().comm());
287
288 // Build matrix row based on pattern
289 for (int i = 0; i < couplings; i++)
290 setup_row.insert(entries_pos[i]);
291 ++setup_row;
292
293 // Set matrix entries
294 for (int i = 0; i < couplings; i++) {
295 (*coarse_system)[row_id][entries_pos[i]] = entries[i];
296 }
297
298 row_id++;
299 }
300 }
301
302 } // Setup version switch
303
304 if (my_rank == 0) std::cout << "Matrix setup finished: M=" << timer_setup.elapsed() << std::endl;
305 }
306
307 double coarse_time = 0.0;
308
309 public:
310 int basis_array_offset (int rank) override {
311 int offset = 0;
312 for (int i = 0; i < rank; i++) {
313 offset += local_basis_sizes[i];
314 }
315 return offset;
316 }
317
318 public:
319
320 /*
321 * Restrict defect
322 */
323 std::shared_ptr<COARSE_V> restrict_defect (const X& d) const override {
324
325 auto local_basis = subdomainbasis_->local_basis;
326
327 using Dune::PDELab::Backend::native;
328 std::shared_ptr<COARSE_V> coarse_defect = std::make_shared<COARSE_V>(global_basis_size,global_basis_size);
329
330 int recvcounts[ranks];
331 int displs[ranks];
332 for (int rank = 0; rank < ranks; rank++) {
333 displs[rank] = 0;
334 }
335 for (int rank = 0; rank < ranks; rank++) {
336 recvcounts[rank] = local_basis_sizes[rank];
337 for (int i = rank+1; i < ranks; i++)
338 displs[i] += local_basis_sizes[rank];
339 }
340
341 double buf_defect[global_basis_size];
342 double buf_defect_local[local_basis_sizes[my_rank]];
343
344 for (int basis_index = 0; basis_index < local_basis_sizes[my_rank]; basis_index++) {
345 buf_defect_local[basis_index] = 0.0;
346 for (unsigned int i = 0; i < native(d).N(); i++)
347 buf_defect_local[basis_index] += native(*local_basis[basis_index])[i] * native(d)[i];
348 }
349
350 MPI_Allgatherv(&buf_defect_local, local_basis_sizes[my_rank], MPI_DOUBLE, &buf_defect, recvcounts, displs, MPI_DOUBLE, gfs.gridView().comm());
351 for (int basis_index = 0; basis_index < global_basis_size; basis_index++) {
352 (*coarse_defect)[basis_index] = buf_defect[basis_index];
353 }
354 return coarse_defect;
355 }
356
357 /*
358 * Prolongate defect
359 */
360
361 std::shared_ptr<X> prolongate_defect (const COARSE_V& v0) const override {
362 auto local_basis = subdomainbasis_->local_basis;
363
364 using Dune::PDELab::Backend::native;
365 auto v = std::make_shared<X>(gfs, 0.0);
366
367 // Prolongate result
368 for (int basis_index = 0; basis_index < local_basis_sizes[my_rank]; basis_index++) {
369 X local_result(*local_basis[basis_index]);
370 native(local_result) *= v0[my_basis_array_offset + basis_index];
371 *v += local_result;
372 }
373 return v;
374 }
375
376 /*
377 * get coarse matrix
378 */
379
380 std::shared_ptr<COARSE_M> get_coarse_system () override {
381 return coarse_system;
382 }
383
384 int basis_size() override {
385 return global_basis_size;
386 }
387
388 int get_local_basis_sizes(int rank) override{ // Dimensions of local coarse space per subdomain
389 return local_basis_sizes[rank];
390 }
391
392 private:
393
394 const GFS& gfs;
395 const M& AF;
396 int ranks, my_rank;
397 std::shared_ptr<SubdomainBasis<X> > subdomainbasis_;
398 int _verbosity;
399
400 std::vector<int> neighbor_ranks;
401
402 std::vector<int> local_basis_sizes; // Dimensions of local coarse space per subdomain
403 int my_basis_array_offset; // Start of local basis functions in a consecutive global ordering
404 int global_basis_size; // Dimension of entire coarse space
405
406 std::shared_ptr<COARSE_M> coarse_system; // Coarse space matrix
407 };
408
409 }
410}
411
412#endif // have arpack
413
414#endif //DUNE_GENEO_SUBDOMAINPROJECTEDCOARSESPACE_HH
Creative Commons License   |  Legal Statements / Impressum  |  Hosted by TU Dresden & Uni Heidelberg  |  generated with Hugo v0.111.3 (Sep 5, 22:35, 2025)