Line data Source code
1 : /*
2 : * Copyright (c) 2011-2015: G-CSC, Goethe University Frankfurt
3 : * Author: Sebastian Reiter
4 : *
5 : * This file is part of UG4.
6 : *
7 : * UG4 is free software: you can redistribute it and/or modify it under the
8 : * terms of the GNU Lesser General Public License version 3 (as published by the
9 : * Free Software Foundation) with the following additional attribution
10 : * requirements (according to LGPL/GPL v3 §7):
11 : *
12 : * (1) The following notice must be displayed in the Appropriate Legal Notices
13 : * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14 : *
15 : * (2) The following notice must be displayed at a prominent place in the
16 : * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17 : *
18 : * (3) The following bibliography is recommended for citation and must be
19 : * preserved in all covered files:
20 : * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21 : * parallel geometric multigrid solver on hierarchically distributed grids.
22 : * Computing and visualization in science 16, 4 (2013), 151-164"
23 : * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24 : * flexible software system for simulating pde based models on high performance
25 : * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26 : *
27 : * This program is distributed in the hope that it will be useful,
28 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 : * GNU Lesser General Public License for more details.
31 : */
32 :
33 : #ifndef __H__UG__domain_distribution_impl__
34 : #define __H__UG__domain_distribution_impl__
35 :
36 : #include "domain_distribution.h"
37 : #include "lib_grid/algorithms/attachment_util.h"
38 : #include "lib_grid/parallelization/deprecated/load_balancing.h"
39 : #include "common/serialization.h"
40 :
41 : #ifdef UG_PARALLEL
42 : #include "pcl/pcl.h"
43 : #include "lib_grid/parallelization/distribution.h"
44 : #endif
45 :
46 :
47 : namespace ug
48 : {
49 :
50 : /// partitions a domain by sorting all elements into a regular grid
51 : template <typename TDomain>
52 0 : static bool PartitionDomain_RegularGrid(TDomain& domain, PartitionMap& partitionMap,
53 : int numCellsX, int numCellsY, int numCellsZ,
54 : bool surfaceOnly)
55 : {
56 : PROFILE_FUNC_GROUP("parallelization");
57 : // prepare the partition map and a vertex position attachment accessor
58 : SmartPtr<MultiGrid> pMG = domain.grid();
59 0 : partitionMap.assign_grid(*pMG);
60 :
61 : #ifdef UG_PARALLEL
62 :
63 : SubsetHandler& partitionHandler = *partitionMap.get_partition_handler();
64 :
65 : // a distributed grid manager is required
66 : if(!domain.distributed_grid_manager()){
67 : UG_LOG("A distributed grid manager is required in the given domain.\n");
68 : return false;
69 : }
70 :
71 : typedef typename TDomain::position_attachment_type TAPos;
72 : Grid::AttachmentAccessor<Vertex, TAPos> aaPos(*pMG,
73 : domain.position_attachment());
74 :
75 : // this callback allows us to only distribute surface elements, which are no ghosts
76 : IsRegularSurfaceElem cbConsiderElem(*domain.distributed_grid_manager());
77 :
78 : // we need a process to which elements which are not considered will be send.
79 : // Those elements should stay on the current process.
80 : int localProc = 0;
81 : localProc = pcl::ProcRank();
82 :
83 : int bucketSubset = partitionMap.find_target_proc(localProc);
84 : if(bucketSubset == -1)
85 : bucketSubset = (int)partitionMap.num_target_procs();
86 :
87 : // partition the grid
88 : if(pMG->num<Volume>() > 0){
89 : if(!surfaceOnly)
90 : PartitionElements_RegularGrid<Volume>(
91 : partitionHandler,
92 : pMG->begin<Volume>(), pMG->end<Volume>(),
93 : numCellsX, numCellsY, numCellsZ, aaPos,
94 : ConsiderAll(), bucketSubset);
95 : else
96 : PartitionElements_RegularGrid<Volume>(
97 : partitionHandler,
98 : pMG->begin<Volume>(), pMG->end<Volume>(),
99 : numCellsX, numCellsY, numCellsZ, aaPos,
100 : cbConsiderElem, bucketSubset);
101 : }
102 : else if(pMG->num<Face>() > 0){
103 : if(!surfaceOnly)
104 : PartitionElements_RegularGrid<Face>(
105 : partitionHandler,
106 : pMG->begin<Face>(), pMG->end<Face>(),
107 : numCellsX, numCellsY, numCellsZ, aaPos,
108 : ConsiderAll(), bucketSubset);
109 : else
110 : PartitionElements_RegularGrid<Face>(
111 : partitionHandler,
112 : pMG->begin<Face>(), pMG->end<Face>(),
113 : numCellsX, numCellsY, numCellsZ, aaPos,
114 : cbConsiderElem, bucketSubset);
115 : }
116 : else if(pMG->num<Edge>() > 0){
117 : if(!surfaceOnly)
118 : PartitionElements_RegularGrid<Edge>(
119 : partitionHandler,
120 : pMG->begin<Edge>(), pMG->end<Edge>(),
121 : numCellsX, numCellsY, numCellsZ, aaPos,
122 : ConsiderAll(), bucketSubset);
123 : else
124 : PartitionElements_RegularGrid<Edge>(
125 : partitionHandler,
126 : pMG->begin<Edge>(), pMG->end<Edge>(),
127 : numCellsX, numCellsY, numCellsZ, aaPos,
128 : cbConsiderElem, bucketSubset);
129 : }
130 : else if(pMG->num<Vertex>() > 0){
131 : if(!surfaceOnly)
132 : PartitionElements_RegularGrid<Vertex>(
133 : partitionHandler,
134 : pMG->begin<Vertex>(), pMG->end<Vertex>(),
135 : numCellsX, numCellsY, numCellsZ, aaPos,
136 : ConsiderAll(), bucketSubset);
137 : else
138 : PartitionElements_RegularGrid<Vertex>(
139 : partitionHandler,
140 : pMG->begin<Vertex>(), pMG->end<Vertex>(),
141 : numCellsX, numCellsY, numCellsZ, aaPos,
142 : cbConsiderElem, bucketSubset);
143 : }
144 : else{
145 : LOG("partitioning could not be performed - "
146 : << "grid doesn't contain any elements!\n");
147 : return false;
148 : }
149 :
150 : // if elements have been assigned to bucketProc, then we have to make sure,
151 : // that it is also present in the process-map
152 : if(!partitionHandler.empty(bucketSubset)){
153 : if(bucketSubset >= (int)partitionMap.num_target_procs())
154 : partitionMap.add_target_proc(localProc);
155 : }
156 :
157 : return true;
158 : #endif
159 :
160 : UG_LOG("WARNING: PartitionDomain_RegularGrid is currently only implemented for");
161 : UG_LOG(" parallel environments.\n");
162 0 : return false;
163 : }
164 :
165 : template <typename TDomain>
166 : static bool
167 0 : PartitionDomain_MetisKWay(TDomain& domain, PartitionMap& partitionMap,
168 : int numPartitions, size_t baseLevel,
169 : int hWeight, int vWeight)
170 : {
171 : PROFILE_FUNC_GROUP("parallelization");
172 : // prepare the partition map
173 : SmartPtr<MultiGrid> pMG = domain.grid();
174 0 : partitionMap.assign_grid(*pMG);
175 :
176 : #ifdef UG_PARALLEL
177 :
178 : SubsetHandler& partitionHandler = *partitionMap.get_partition_handler();
179 :
180 : // we need a process to which elements which are not considered will be send.
181 : // Those elements should stay on the current process.
182 : int localProc = 0;
183 : localProc = pcl::ProcRank();
184 :
185 : int bucketSubset = partitionMap.find_target_proc(localProc);
186 : if(bucketSubset == -1)
187 : bucketSubset = (int)partitionMap.num_target_procs();
188 :
189 : // call the actual partitioning routine
190 : if(pMG->num<Volume>() > 0){
191 : PartitionMultiGrid_MetisKway<Volume>(partitionHandler, *pMG, numPartitions,
192 : baseLevel, hWeight, vWeight);
193 : // assign all elements below baseLevel to bucketSubset
194 : for(size_t lvl = 0; lvl < baseLevel; ++lvl)
195 : partitionHandler.assign_subset(pMG->begin<Volume>(lvl), pMG->end<Volume>(lvl),
196 : bucketSubset);
197 : }
198 : else if(pMG->num<Face>() > 0){
199 : PartitionMultiGrid_MetisKway<Face>(partitionHandler, *pMG, numPartitions,
200 : baseLevel, hWeight, vWeight);
201 : // assign all elements below baseLevel to bucketSubset
202 : for(size_t lvl = 0; lvl < baseLevel; ++lvl)
203 : partitionHandler.assign_subset(pMG->begin<Face>(lvl), pMG->end<Face>(lvl),
204 : bucketSubset);
205 : }
206 : else if(pMG->num<Edge>() > 0){
207 : PartitionMultiGrid_MetisKway<Edge>(partitionHandler, *pMG, numPartitions,
208 : baseLevel, hWeight, vWeight);
209 : // assign all elements below baseLevel to bucketSubset
210 : for(size_t lvl = 0; lvl < baseLevel; ++lvl)
211 : partitionHandler.assign_subset(pMG->begin<Edge>(lvl), pMG->end<Edge>(lvl),
212 : bucketSubset);
213 : }
214 :
215 : if(!partitionHandler.empty(bucketSubset)){
216 : if(bucketSubset >= (int)partitionMap.num_target_procs())
217 : partitionMap.add_target_proc(localProc);
218 : }
219 :
220 : return true;
221 : #else
222 : UG_LOG("WARNING in PartitionDomain_MetisKWay: Only available in parallel builds.\n");
223 0 : return false;
224 : #endif
225 : }
226 :
227 : template <typename TDomain>
228 : static bool
229 0 : PartitionDomain_MetisKWay(TDomain& domain, PartitionMap& partitionMap,
230 : int numPartitions, size_t baseLevel,
231 : SmartPtr<PartitionWeighting> weightFct)
232 : {
233 : PROFILE_FUNC_GROUP("parallelization");
234 : // prepare the partition map
235 : SmartPtr<MultiGrid> pMG = domain.grid();
236 0 : partitionMap.assign_grid(*pMG);
237 :
238 : #ifdef UG_PARALLEL
239 :
240 : SubsetHandler& partitionHandler = *partitionMap.get_partition_handler();
241 :
242 : PartitionWeighting& wFct = *weightFct;
243 : wFct.set_subset_handler(domain.subset_handler().operator->());
244 : // we need a process to which elements which are not considered will be send.
245 : // Those elements should stay on the current process.
246 : int localProc = 0;
247 : localProc = pcl::ProcRank();
248 :
249 : int bucketSubset = partitionMap.find_target_proc(localProc);
250 : if(bucketSubset == -1)
251 : bucketSubset = (int)partitionMap.num_target_procs();
252 :
253 : // call the actual partitioning routine
254 : if(pMG->num<Volume>() > 0){
255 : // do not use boost::function<...> f = wFct, since this leads to slicing
256 : // of wFct and losing properties of derived objects
257 : boost::function<int (Volume*, Volume*)> f = boost::ref(wFct);
258 : PartitionMultiGrid_MetisKway<Volume>(partitionHandler, *pMG, numPartitions, baseLevel, f);
259 : // assign all elements below baseLevel to bucketSubset
260 : for(size_t lvl = 0; lvl < baseLevel; ++lvl)
261 : partitionHandler.assign_subset(pMG->begin<Volume>(lvl), pMG->end<Volume>(lvl),
262 : bucketSubset);
263 : }
264 : else if(pMG->num<Face>() > 0){
265 : boost::function<int (Face*, Face*)> f = boost::ref(wFct);
266 : PartitionMultiGrid_MetisKway<Face>(partitionHandler, *pMG, numPartitions, baseLevel, f);
267 : // assign all elements below baseLevel to bucketSubset
268 : for(size_t lvl = 0; lvl < baseLevel; ++lvl)
269 : partitionHandler.assign_subset(pMG->begin<Face>(lvl), pMG->end<Face>(lvl),
270 : bucketSubset);
271 : }
272 : else if(pMG->num<Edge>() > 0){
273 : boost::function<int (Edge*, Edge*)> f = boost::ref(wFct);
274 : PartitionMultiGrid_MetisKway<Edge>(partitionHandler, *pMG, numPartitions, baseLevel, f);
275 : // assign all elements below baseLevel to bucketSubset
276 : for(size_t lvl = 0; lvl < baseLevel; ++lvl)
277 : partitionHandler.assign_subset(pMG->begin<Edge>(lvl), pMG->end<Edge>(lvl),
278 : bucketSubset);
279 : }
280 :
281 : if(!partitionHandler.empty(bucketSubset)){
282 : if(bucketSubset >= (int)partitionMap.num_target_procs())
283 : partitionMap.add_target_proc(localProc);
284 : }
285 :
286 : return true;
287 : #else
288 : UG_LOG("WARNING in PartitionDomain_MetisKWay: Only available in parallel builds.\n");
289 0 : return false;
290 : #endif
291 : }
292 :
293 :
294 : template <typename TDomain>
295 : static bool
296 0 : PartitionDomain_LevelBased(TDomain& domain, PartitionMap& partitionMap,
297 : int numPartitions, size_t level)
298 : {
299 : PROFILE_FUNC_GROUP("parallelization");
300 : // prepare the partition map
301 : SmartPtr<MultiGrid> pMG = domain.grid();
302 0 : partitionMap.assign_grid(*pMG);
303 0 : SubsetHandler& partitionHandler = *partitionMap.get_partition_handler();
304 :
305 : // call the actual partitioning routine
306 0 : switch(domain.domain_info().element_type()){
307 : case VOLUME:
308 0 : PartitionMultiGridLevel_MetisKway<Volume>(partitionHandler, *pMG, numPartitions, level);
309 : break;
310 :
311 : case FACE:
312 0 : PartitionMultiGridLevel_MetisKway<Face>(partitionHandler, *pMG, numPartitions, level);
313 : break;
314 :
315 : case EDGE:
316 0 : PartitionMultiGridLevel_MetisKway<Edge>(partitionHandler, *pMG, numPartitions, level);
317 : break;
318 :
319 0 : default:
320 0 : UG_THROW("Partitioning only works for element types EDGE, FACE, and VOLUME!");
321 : break;
322 : }
323 :
324 0 : return true;
325 : }
326 :
327 :
328 : template <typename TDomain>
329 : static bool
330 0 : PartitionDistributedDomain_LevelBased(TDomain& domain, PartitionMap& partitionMap,
331 : int numPartitions, size_t level)
332 : {
333 : PROFILE_FUNC_GROUP("parallelization");
334 : // prepare the partition map
335 : SmartPtr<MultiGrid> pMG = domain.grid();
336 0 : partitionMap.assign_grid(*pMG);
337 0 : SubsetHandler& partitionHandler = *partitionMap.get_partition_handler();
338 :
339 : // call the actual partitioning routine
340 0 : switch(domain.domain_info().element_type()){
341 : case VOLUME:
342 0 : PartitionMultiGridLevel_ParmetisKway<Volume>(partitionHandler, *pMG, numPartitions, level);
343 : break;
344 :
345 : case FACE:
346 0 : PartitionMultiGridLevel_ParmetisKway<Face>(partitionHandler, *pMG, numPartitions, level);
347 : break;
348 :
349 : case EDGE:
350 0 : PartitionMultiGridLevel_ParmetisKway<Edge>(partitionHandler, *pMG, numPartitions, level);
351 : break;
352 :
353 0 : default:
354 0 : UG_THROW("Partitioning only works for element types EDGE, FACE, and VOLUME!");
355 : break;
356 : }
357 :
358 0 : return true;
359 : }
360 :
361 :
362 : template <typename TDomain>
363 0 : static bool DistributeDomain(TDomain& domainOut,
364 : PartitionMap& partitionMap,
365 : bool createVerticalInterfaces)
366 : {
367 : PROFILE_FUNC_GROUP("parallelization");
368 : //todo Use a process-communicator to restrict communication
369 :
370 : // make sure that the input is fine
371 : typedef typename TDomain::grid_type GridType;
372 : SmartPtr<GridType> pGrid = domainOut.grid();
373 0 : SubsetHandler& partitionHandler = *partitionMap.get_partition_handler();
374 :
375 0 : if(partitionHandler.grid() != pGrid.get()){
376 0 : partitionMap.assign_grid(*pGrid);
377 : }
378 :
379 : #ifdef UG_PARALLEL
380 :
381 : typedef typename TDomain::position_attachment_type position_attachment_type;
382 :
383 : // used to check whether all processes are correctly prepared for redistribution
384 : //bool performDistribution = true;
385 :
386 : // make sure that the number of subsets and target processes match
387 : // THIS MAKES NO SENSE FOR PARALLEL REDISTRIBUTION - IT IS CLEAR THAT SOME
388 : // PROCS WON'T DELIVER TO ALL PROCS IN THE MAP.
389 : /* const int numSubs = partitionHandler.num_subsets();
390 : const int numTargetProcs = (int)partitionMap.num_target_procs();
391 : if(numSubs > numTargetProcs){
392 : UG_LOG("ERROR in RedistributeDomain: More partitions than target processes.\n");
393 : performDistribution = false;
394 : }
395 : else if(numSubs < numTargetProcs){
396 : UG_LOG("ERROR in RedistributeDomain: More target processes than partitions.\n");
397 : performDistribution = false;
398 : }
399 : */
400 :
401 : //todo: check whether all target-processes in partitionMap are in the valid range.
402 :
403 : PCL_PROFILE(RedistributeDomain);
404 :
405 : //todo Use a process-communicator to restrict communication
406 : /*
407 : if(!pcl::AllProcsTrue(performDistribution))
408 : return false;
409 : */
410 :
411 : // data serialization
412 : SPVertexDataSerializer posSerializer =
413 : GeomObjAttachmentSerializer<Vertex, position_attachment_type>::
414 : create(*pGrid, domainOut.position_attachment());
415 :
416 : SPGridDataSerializer shSerializer = SubsetHandlerSerializer::
417 : create(*domainOut.subset_handler());
418 :
419 : GridDataSerializationHandler serializer;
420 : serializer.add(posSerializer);
421 : serializer.add(shSerializer);
422 :
423 : std::vector<std::string> additionalSHNames = domainOut.additional_subset_handler_names();
424 : for(size_t i = 0; i < additionalSHNames.size(); ++i){
425 : SmartPtr<ISubsetHandler> sh = domainOut.additional_subset_handler(additionalSHNames[i]);
426 : if(sh.valid()){
427 : SPGridDataSerializer shSerializer = SubsetHandlerSerializer::create(*sh);
428 : serializer.add(shSerializer);
429 : }
430 : }
431 :
432 : // now call redistribution
433 : DistributeGrid(*pGrid, partitionHandler, serializer, createVerticalInterfaces,
434 : &partitionMap.get_target_proc_vec());
435 :
436 : PCL_PROFILE_END();
437 : #endif
438 :
439 : // in the serial case there's nothing to do.
440 0 : return true;
441 : }
442 :
443 : }// end of namespace
444 :
445 : #endif
|