Line data Source code
1 : /*
2 : * Copyright (c) 2010-2015: G-CSC, Goethe University Frankfurt
3 : * Author: Andreas Vogel
4 : *
5 : * This file is part of UG4.
6 : *
7 : * UG4 is free software: you can redistribute it and/or modify it under the
8 : * terms of the GNU Lesser General Public License version 3 (as published by the
9 : * Free Software Foundation) with the following additional attribution
10 : * requirements (according to LGPL/GPL v3 §7):
11 : *
12 : * (1) The following notice must be displayed in the Appropriate Legal Notices
13 : * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14 : *
15 : * (2) The following notice must be displayed at a prominent place in the
16 : * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17 : *
18 : * (3) The following bibliography is recommended for citation and must be
19 : * preserved in all covered files:
20 : * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21 : * parallel geometric multigrid solver on hierarchically distributed grids.
22 : * Computing and visualization in science 16, 4 (2013), 151-164"
23 : * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24 : * flexible software system for simulating pde based models on high performance
25 : * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26 : *
27 : * This program is distributed in the hope that it will be useful,
28 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 : * GNU Lesser General Public License for more details.
31 : */
32 :
33 : #ifndef __H__UG__LIB_DISC__MULTI_GRID_SOLVER__MG_SOLVER_IMPL__
34 : #define __H__UG__LIB_DISC__MULTI_GRID_SOLVER__MG_SOLVER_IMPL__
35 :
36 : #include <iostream>
37 : #include <sstream>
38 : #include <string>
39 : #include "common/profiler/profiler.h"
40 : #include "common/error.h"
41 : #include "lib_disc/function_spaces/grid_function_util.h"
42 : #include "lib_disc/operator/linear_operator/std_transfer.h"
43 : #include "lib_disc/operator/linear_operator/std_injection.h"
44 : #include "lib_grid/tools/periodic_boundary_manager.h"
45 : #include "lib_disc/operator/linear_operator/level_preconditioner_interface.h"
46 : #include "mg_solver.h"
47 :
48 : #ifdef UG_PARALLEL
49 : #include "lib_algebra/parallelization/parallelization.h"
50 : #include "pcl/pcl_util.h"
51 : // the debug barrier is used to eliminate synchronization overhead from
52 : // profiling stats. Only used for parallel builds.
53 : // PCL_DEBUG_BARRIER only has an effect if PCL_DEBUG_BARRIER_ENABLED is defined.
54 : #define GMG_PARALLEL_DEBUG_BARRIER(comm) PCL_DEBUG_BARRIER(comm)
55 :
56 : #else
57 : #define GMG_PARALLEL_DEBUG_BARRIER(comm)
58 : #endif
59 :
60 : #define PROFILE_GMG
61 : #ifdef PROFILE_GMG
62 : #define GMG_PROFILE_FUNC() PROFILE_FUNC_GROUP("gmg")
63 : #define GMG_PROFILE_BEGIN(name) PROFILE_BEGIN_GROUP(name, "gmg")
64 : #define GMG_PROFILE_END() PROFILE_END()
65 : #else
66 : #define GMG_PROFILE_FUNC()
67 : #define GMG_PROFILE_BEGIN(name)
68 : #define GMG_PROFILE_END()
69 : #endif
70 :
71 : namespace ug{
72 :
73 : ////////////////////////////////////////////////////////////////////////////////
74 : // Constructor
75 : ////////////////////////////////////////////////////////////////////////////////
76 :
77 : template <typename TDomain, typename TAlgebra>
78 0 : AssembledMultiGridCycle<TDomain, TAlgebra>::
79 : AssembledMultiGridCycle() :
80 0 : m_spSurfaceMat(NULL), m_pSurfaceSol(nullptr), m_spAss(NULL), m_spApproxSpace(SPNULL),
81 0 : m_topLev(GridLevel::TOP), m_surfaceLev(GridLevel::TOP),
82 0 : m_baseLev(0), m_cycleType(_V_),
83 0 : m_numPreSmooth(2), m_numPostSmooth(2),
84 0 : m_LocalFullRefLevel(0), m_GridLevelType(GridLevel::LEVEL),
85 0 : m_bUseRAP(false), m_bSmoothOnSurfaceRim(false),
86 0 : m_bCommCompOverlap(false),
87 0 : m_spPreSmootherPrototype(new Jacobi<TAlgebra>()),
88 : m_spPostSmootherPrototype(m_spPreSmootherPrototype),
89 : m_spProjectionPrototype(SPNULL),
90 0 : m_spProlongationPrototype(new StdTransfer<TDomain,TAlgebra>()),
91 : m_spRestrictionPrototype(m_spProlongationPrototype),
92 0 : m_spBaseSolver(new LU<TAlgebra>()),
93 0 : m_bGatheredBaseIfAmbiguous(true),
94 0 : m_bGatheredBaseUsed(true),
95 0 : m_ignoreInitForBaseSolver(false),
96 0 : m_bMatrixStructureIsConst(false),
97 0 : m_pC(nullptr),
98 0 : m_spDebugWriter(NULL), m_dbgIterCnt(0)
99 0 : {};
100 :
101 :
102 : template <typename TDomain, typename TAlgebra>
103 0 : AssembledMultiGridCycle<TDomain, TAlgebra>::
104 : AssembledMultiGridCycle(SmartPtr<ApproximationSpace<TDomain> > approxSpace) :
105 0 : m_spSurfaceMat(NULL), m_pSurfaceSol(nullptr), m_spAss(NULL), m_spApproxSpace(approxSpace),
106 0 : m_topLev(GridLevel::TOP), m_surfaceLev(GridLevel::TOP),
107 0 : m_baseLev(0), m_cycleType(_V_),
108 0 : m_numPreSmooth(2), m_numPostSmooth(2),
109 0 : m_LocalFullRefLevel(0), m_GridLevelType(GridLevel::LEVEL),
110 0 : m_bUseRAP(false), m_bSmoothOnSurfaceRim(false),
111 0 : m_bCommCompOverlap(false),
112 0 : m_spPreSmootherPrototype(new Jacobi<TAlgebra>()),
113 : m_spPostSmootherPrototype(m_spPreSmootherPrototype),
114 0 : m_spProjectionPrototype(new StdInjection<TDomain,TAlgebra>(m_spApproxSpace)),
115 0 : m_spProlongationPrototype(new StdTransfer<TDomain,TAlgebra>()),
116 : m_spRestrictionPrototype(m_spProlongationPrototype),
117 0 : m_spBaseSolver(new LU<TAlgebra>()),
118 0 : m_bGatheredBaseIfAmbiguous(true),
119 0 : m_bGatheredBaseUsed(true),
120 0 : m_ignoreInitForBaseSolver(false),
121 0 : m_bMatrixStructureIsConst(false),
122 0 : m_pC(nullptr),
123 0 : m_spDebugWriter(NULL), m_dbgIterCnt(0)
124 0 : {};
125 :
126 : template <typename TDomain, typename TAlgebra>
127 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
128 : set_approximation_space(SmartPtr<ApproximationSpace<TDomain> > approxSpace)
129 : {
130 0 : m_spApproxSpace = approxSpace;
131 0 : m_spProjectionPrototype = make_sp(new StdInjection<TDomain,TAlgebra>(m_spApproxSpace));
132 0 : }
133 :
134 : template <typename TDomain, typename TAlgebra>
135 : SmartPtr<ILinearIterator<typename TAlgebra::vector_type> >
136 0 : AssembledMultiGridCycle<TDomain, TAlgebra>::
137 : clone()
138 : {
139 0 : SmartPtr<AssembledMultiGridCycle<TDomain, TAlgebra> > clone(
140 0 : new AssembledMultiGridCycle<TDomain, TAlgebra>(m_spApproxSpace));
141 :
142 0 : clone->set_base_level(m_baseLev);
143 0 : clone->set_base_solver(m_spBaseSolver);
144 0 : clone->set_cycle_type(m_cycleType);
145 0 : clone->set_debug(m_spDebugWriter);
146 0 : clone->set_discretization(m_spAss);
147 0 : clone->set_num_postsmooth(m_numPostSmooth);
148 0 : clone->set_num_presmooth(m_numPreSmooth);
149 0 : clone->set_projection(m_spProjectionPrototype);
150 0 : clone->set_prolongation(m_spProlongationPrototype);
151 0 : clone->set_restriction(m_spRestrictionPrototype);
152 0 : clone->set_presmoother(m_spPreSmootherPrototype);
153 0 : clone->set_postsmoother(m_spPostSmootherPrototype);
154 0 : clone->set_surface_level(m_surfaceLev);
155 :
156 0 : for(size_t i = 0; i < m_vspProlongationPostProcess.size(); ++i)
157 0 : clone->add_prolongation_post_process(m_vspProlongationPostProcess[i]);
158 :
159 0 : for(size_t i = 0; i < m_vspRestrictionPostProcess.size(); ++i)
160 0 : clone->add_restriction_post_process(m_vspRestrictionPostProcess[i]);
161 :
162 0 : return clone;
163 : }
164 :
165 : template <typename TDomain, typename TAlgebra>
166 0 : AssembledMultiGridCycle<TDomain, TAlgebra>::
167 : ~AssembledMultiGridCycle()
168 0 : {};
169 :
170 : ////////////////////////////////////////////////////////////////////////////////
171 : // apply and init
172 : ////////////////////////////////////////////////////////////////////////////////
173 :
174 : template <typename TDomain, typename TAlgebra>
175 0 : bool AssembledMultiGridCycle<TDomain, TAlgebra>::
176 : apply(vector_type& rC, const vector_type& rD)
177 : {
178 : GMG_PROFILE_FUNC();
179 0 : GF* pC = dynamic_cast<GF*>(&rC);
180 0 : if(!pC) UG_THROW("GMG::apply: Expect Correction to be grid based.")
181 0 : const GF* pD = dynamic_cast<const GF*>(&rD);
182 0 : if(!pD) UG_THROW("GMG::apply: Expect Defect to be grid based.")
183 : GF& c = *pC;
184 0 : m_pC = pC;
185 : const GF& d = *pD;
186 :
187 : try{
188 : // Check if surface level has been chosen correctly
189 : // Please note, that the approximation space returns the global number of levels,
190 : // i.e. the maximum of levels among all processes.
191 0 : if(m_topLev >= (int)m_spApproxSpace->num_levels())
192 0 : UG_THROW("GMG::apply: SurfaceLevel "<<m_topLev<<" does not exist.");
193 :
194 : // Check if base level has been choose correctly
195 0 : if(m_baseLev > m_topLev)
196 0 : UG_THROW("GMG::apply: Base level must be smaller or equal to surface Level.");
197 :
198 : // debug output
199 0 : write_debug(d, "Defect_In");
200 0 : write_debug(*m_spSurfaceMat, "SurfaceStiffness", c, c);
201 0 : for(int lev = m_baseLev; lev <= m_topLev; ++lev)
202 : {
203 0 : LevData& ld = *m_vLevData[lev];
204 0 : ld.n_pre_calls = ld.n_post_calls = ld.n_base_calls
205 0 : = ld.n_restr_calls = ld.n_prolong_calls = 0;
206 : }
207 :
208 : // project defect from surface to level
209 : GMG_PROFILE_BEGIN(GMG_Apply_CopyDefectFromSurface);
210 : try{
211 0 : for(int lev = m_baseLev; lev <= m_topLev; ++lev){
212 0 : const std::vector<SurfLevelMap>& vMap = m_vLevData[lev]->vSurfLevelMap;
213 : LevData& ld = *m_vLevData[lev];
214 0 : for(size_t i = 0; i < vMap.size(); ++i){
215 0 : (*ld.sd)[vMap[i].levIndex] = d[vMap[i].surfIndex];
216 : }
217 : }
218 : #ifdef UG_PARALLEL
219 : for(int lev = m_baseLev; lev <= m_topLev; ++lev)
220 : m_vLevData[lev]->sd->set_storage_type(d.get_storage_mask());
221 : #endif
222 : }
223 : UG_CATCH_THROW("GMG::apply: Project d Surf -> Level failed.");
224 : GMG_PROFILE_END();
225 :
226 : // Perform one multigrid cycle
227 : UG_DLOG(LIB_DISC_MULTIGRID, 4, "gmg-apply lmgc (on level " << m_topLev << ")... \n");
228 : try{
229 : GMG_PROFILE_BEGIN(GMG_Apply_ResetCorrection);
230 : // reset surface correction
231 : c.set(0.0);
232 :
233 : // reset corr on top level
234 0 : m_vLevData[m_topLev]->sc->set(0.0);
235 : GMG_PROFILE_END();
236 :
237 : // start mg-cycle
238 : GMG_PROFILE_BEGIN(GMG_Apply_lmgc);
239 0 : lmgc(m_topLev, m_cycleType);
240 : GMG_PROFILE_END();
241 :
242 : // project top lev to surface
243 : GMG_PROFILE_BEGIN(GMG_Apply_AddCorrectionToSurface);
244 0 : const std::vector<SurfLevelMap>& vMap = m_vLevData[m_topLev]->vSurfLevelMap;
245 : LevData& ld = *m_vLevData[m_topLev];
246 0 : for(size_t i = 0; i < vMap.size(); ++i){
247 0 : c[vMap[i].surfIndex] += (*ld.sc)[vMap[i].levIndex];
248 : }
249 : #ifdef UG_PARALLEL
250 : c.set_storage_type(PST_CONSISTENT);
251 : #endif
252 : GMG_PROFILE_END();
253 : }
254 0 : UG_CATCH_THROW("GMG: lmgc failed.");
255 :
256 : // apply scaling
257 : GMG_PROFILE_BEGIN(GMG_Apply_Scaling);
258 : try{
259 0 : const number kappa = this->damping()->damping(c, d, m_spSurfaceMat.template cast_dynamic<ILinearOperator<vector_type> >());
260 0 : if(kappa != 1.0) c *= kappa;
261 : }
262 0 : UG_CATCH_THROW("GMG: Damping failed.")
263 : GMG_PROFILE_END();
264 :
265 : // debug output
266 0 : write_debug(c, "Correction_Out");
267 :
268 : // increase dbg counter
269 0 : if(m_spDebugWriter.valid()) m_dbgIterCnt++;
270 :
271 0 : } UG_CATCH_THROW("GMG::apply: Application failed.");
272 :
273 : UG_DLOG(LIB_DISC_MULTIGRID, 4, "gmg-apply done. \n");
274 0 : return true;
275 : }
276 :
277 : template <typename TDomain, typename TAlgebra>
278 0 : bool AssembledMultiGridCycle<TDomain, TAlgebra>::
279 : apply_update_defect(vector_type &c, vector_type& rD)
280 : {
281 : GMG_PROFILE_FUNC();
282 :
283 : // NOTE: This is the implementation of a multiplicative Multigrid. Thus, the
284 : // defect is kept up to date when traversing the grid. At the end of
285 : // the iteration the updated defect is stored in the level defects and
286 : // could be projected back to the surface in order to get an updated
287 : // surface defect. This is, however, not done. For these reasons:
288 : // a) A Matrix-Vector operation is not very expensive
289 : // b) In a 2d adaptive case, the update is difficult, but can be
290 : // performed. But if the implementation is incorrect, this will
291 : // hardly be detected.
292 : // c) In a 3d adaptive case, it is impossible to ensure, that assembled
293 : // level matrices and the surface matrix have the same couplings
294 : // (not even to inner points). This is due to the fact, that e.g.
295 : // finite volume geometries are computed using different
296 : // integration points. (Hanging fv used triangles as scvf in 3d,
297 : // while normal fv use quads). Therefore, the updated defect is
298 : // only approximately the correct defect. In order to return the
299 : // correct defect, we must recompute the defect in the
300 : // adaptive case.
301 : // d) If scaling of the correction is performed, the defect must be
302 : // recomputed anyway. Thus, only scale=1.0 allows optimizing.
303 : // e) Updated defects are only needed in LinearIterativeSolvers. In
304 : // Krylov-Methods (CG, BiCGStab, ...) only the correction is
305 : // needed. We optimize for that case.
306 :
307 : // compute correction
308 0 : if(!apply(c, rD)) return false;
309 :
310 : // update defect: d = d - A*c
311 : m_spSurfaceMat->matmul_minus(rD, c);
312 :
313 : // write for debugging
314 0 : const GF* pD = dynamic_cast<const GF*>(&rD);
315 0 : if(!pD) UG_THROW("GMG::apply: Expect Defect to be grid based.")
316 : const GF& d = *pD;
317 0 : write_debug(d, "Defect_Out");
318 :
319 0 : return true;
320 : }
321 :
322 : template <typename TDomain, typename TAlgebra>
323 0 : bool AssembledMultiGridCycle<TDomain, TAlgebra>::
324 : init(SmartPtr<ILinearOperator<vector_type> > J, const vector_type& u)
325 : {
326 : GMG_PROFILE_FUNC();
327 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - init(J, u)\n");
328 :
329 : // try to extract assembling routine
330 0 : SmartPtr<AssembledLinearOperator<TAlgebra> > spALO =
331 : J.template cast_dynamic<AssembledLinearOperator<TAlgebra> >();
332 0 : if(spALO.valid()){
333 0 : m_spAss = spALO->discretization();
334 : }
335 :
336 : // Store Surface Matrix
337 0 : m_spSurfaceMat = J.template cast_dynamic<matrix_type>();
338 :
339 : // Store Surface Solution
340 0 : m_pSurfaceSol = &u;
341 :
342 : // call init
343 0 : init();
344 :
345 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - init(J, u)\n");
346 0 : return true;
347 : }
348 :
349 : template <typename TDomain, typename TAlgebra>
350 0 : bool AssembledMultiGridCycle<TDomain, TAlgebra>::
351 : init(SmartPtr<ILinearOperator<vector_type> > L)
352 : {
353 : GMG_PROFILE_FUNC();
354 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - init(L)\n");
355 :
356 : // try to extract assembling routine
357 0 : SmartPtr<AssembledLinearOperator<TAlgebra> > spALO =
358 : L.template cast_dynamic<AssembledLinearOperator<TAlgebra> >();
359 0 : if(spALO.valid()){
360 0 : m_spAss = spALO->discretization();
361 : }
362 :
363 : // Store Surface Matrix
364 0 : m_spSurfaceMat = L.template cast_dynamic<matrix_type>();
365 :
366 : // Store Surface Solution
367 0 : m_pSurfaceSol = NULL;
368 :
369 : // call init
370 0 : init();
371 :
372 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - init(L)\n");
373 0 : return true;
374 : }
375 :
376 :
377 :
378 : template <typename TDomain, typename TAlgebra>
379 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
380 : init()
381 : {
382 : GMG_PROFILE_FUNC();
383 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start init_common\n");
384 :
385 : try{
386 :
387 : // Cast Operator
388 0 : if(m_spSurfaceMat.invalid())
389 0 : UG_THROW("GMG:init: Can not cast Operator to Matrix.");
390 :
391 : // Check Approx Space
392 0 : if(m_spApproxSpace.invalid())
393 0 : UG_THROW("GMG::init: Approximation Space not set.");
394 :
395 : // check that grid given
396 0 : if(m_spApproxSpace->num_levels() == 0)
397 0 : UG_THROW("GMG:init: No grid levels");
398 :
399 0 : if(m_spAss.invalid())
400 0 : UG_THROW("GMG::init: Discretization not set.");
401 :
402 0 : if(m_spBaseSolver.invalid())
403 0 : UG_THROW("GMG::init: Base Solver not set.");
404 :
405 0 : if(m_spPreSmootherPrototype.invalid())
406 0 : UG_THROW("GMG::init: PreSmoother not set.");
407 :
408 0 : if(m_spPostSmootherPrototype.invalid())
409 0 : UG_THROW("GMG::init: PostSmoother not set.");
410 :
411 0 : if(m_spProlongationPrototype.invalid())
412 0 : UG_THROW("GMG::init: Prolongation not set.");
413 :
414 0 : if(m_spRestrictionPrototype.invalid())
415 0 : UG_THROW("GMG::init: Restriction not set.");
416 :
417 : // get current toplevel
418 0 : const GF* pSol = dynamic_cast<const GF*>(m_pSurfaceSol);
419 0 : if(pSol){
420 0 : m_surfaceLev = pSol->dof_distribution()->grid_level().level();
421 : }
422 :
423 : int topLev = 0;
424 0 : if(m_surfaceLev != GridLevel::TOP) topLev = m_surfaceLev;
425 0 : else topLev = m_spApproxSpace->num_levels() - 1;
426 :
427 0 : if(m_baseLev > topLev)
428 0 : UG_THROW("GMG::init: Base Level greater than Surface level.");
429 :
430 : if(m_ApproxSpaceRevision != m_spApproxSpace->revision()
431 0 : || topLev != m_topLev)
432 : {
433 : // remember new top level
434 0 : m_topLev = topLev;
435 :
436 : // Allocate memory for given top level
437 : GMG_PROFILE_BEGIN(GMG_Init_LevelMemory);
438 : try{
439 0 : init_level_memory(m_baseLev, m_topLev);
440 : }
441 0 : UG_CATCH_THROW("GMG::init: Cannot allocate memory.");
442 : GMG_PROFILE_END();
443 :
444 : // init mapping from surface level to top level in case of full refinement
445 : GMG_PROFILE_BEGIN(GMG_Init_IndexMappings);
446 : try{
447 0 : init_index_mappings();
448 : }
449 0 : UG_CATCH_THROW("GMG: Cannot create SurfaceToLevelMap.")
450 : GMG_PROFILE_END();
451 :
452 : // Create Interpolation
453 : GMG_PROFILE_BEGIN(GMG_Init_Transfers);
454 : try{
455 0 : init_transfer();
456 : }
457 0 : UG_CATCH_THROW("GMG:init: Cannot init Transfer (Prolongation/Restriction).");
458 : GMG_PROFILE_END();
459 :
460 : // remember revision counter of approx space
461 0 : m_ApproxSpaceRevision = m_spApproxSpace->revision();
462 : }
463 :
464 : // Assemble coarse grid operators
465 : GMG_PROFILE_BEGIN(GMG_Init_CreateLevelMatrices);
466 : try{
467 0 : if(m_bUseRAP){
468 0 : init_rap_operator();
469 : } else {
470 0 : assemble_level_operator();
471 : }
472 : }
473 0 : UG_CATCH_THROW("GMG: Initialization of Level Operator failed.");
474 : GMG_PROFILE_END();
475 :
476 : // Init smoother for coarse grid operators
477 : GMG_PROFILE_BEGIN(GMG_Init_Smoother);
478 : try{
479 0 : init_smoother();
480 : }
481 0 : UG_CATCH_THROW("GMG:init: Cannot init Smoother.");
482 : GMG_PROFILE_END();
483 :
484 : // Init base solver
485 0 : if(!ignore_init_for_base_solver()){
486 : GMG_PROFILE_BEGIN(GMG_Init_BaseSolver);
487 : try{
488 0 : init_base_solver();
489 : }
490 0 : UG_CATCH_THROW("GMG:init: Cannot init Base Solver.");
491 : GMG_PROFILE_END();
492 : }
493 0 : } UG_CATCH_THROW("GMG: Init failure for init(u)");
494 :
495 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop init_common\n");
496 0 : }
497 :
498 :
499 : template <typename TDomain, typename TAlgebra>
500 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
501 : ignore_init_for_base_solver(bool ignore)
502 : {
503 : #ifdef UG_PARALLEL
504 : UG_COND_THROW(ignore && (pcl::NumProcs() > 1),
505 : "ignore_init_for_base_solver currently only works for serial runs.")
506 : #endif
507 0 : m_ignoreInitForBaseSolver = ignore;
508 0 : }
509 :
510 : template <typename TDomain, typename TAlgebra>
511 0 : bool AssembledMultiGridCycle<TDomain, TAlgebra>::
512 : ignore_init_for_base_solver() const
513 : {
514 0 : return m_ignoreInitForBaseSolver;
515 : }
516 :
517 :
518 : template <typename TDomain, typename TAlgebra>
519 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
520 : force_reinit()
521 : {
522 : m_ApproxSpaceRevision.invalidate();
523 0 : }
524 :
525 :
526 : template <typename TDomain, typename TAlgebra>
527 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
528 : assemble_level_operator()
529 : {
530 : GMG_PROFILE_FUNC();
531 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start assemble_level_operator\n");
532 :
533 0 : if(m_GridLevelType == GridLevel::SURFACE)
534 0 : UG_THROW("GMG: emulate_full_refined_grid currently only implemented "
535 : "for set_rap(true) - since assembling of on surface-level with "
536 : " lev < topLev is currently not supported by constraints and "
537 : " elem-disc loop (only top-lev or level-view poosible). It is "
538 : "necessary to rework that part of the assembing procedure.")
539 :
540 : // Create Projection
541 : try{
542 0 : if(m_pSurfaceSol) {
543 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " start assemble_level_operator: project sol\n");
544 : GMG_PROFILE_BEGIN(GMG_ProjectSolution_CopyFromSurface);
545 : #ifdef UG_PARALLEL
546 : if(!m_pSurfaceSol->has_storage_type(PST_CONSISTENT))
547 : UG_THROW("GMG::init: Can only project "
548 : "a consistent solution. Make sure to pass a consistent on.");
549 : #endif
550 :
551 0 : init_projection();
552 :
553 0 : for(int lev = m_baseLev; lev <= m_topLev; ++lev){
554 0 : const std::vector<SurfLevelMap>& vMap = m_vLevData[lev]->vSurfLevelMap;
555 : LevData& ld = *m_vLevData[lev];
556 0 : for(size_t i = 0; i < vMap.size(); ++i){
557 0 : (*ld.st)[vMap[i].levIndex] = (*m_pSurfaceSol)[vMap[i].surfIndex];
558 : }
559 : }
560 : GMG_PROFILE_END();
561 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " end assemble_level_operator: project sol\n");
562 : }
563 : }
564 0 : UG_CATCH_THROW("GMG::init: Projection of Surface Solution failed.");
565 :
566 : // Create coarse level operators
567 : GMG_PROFILE_BEGIN(GMG_AssembleLevelMat_AllLevelMat);
568 0 : for(int lev = m_topLev; lev >= m_baseLev; --lev)
569 : {
570 0 : LevData& ld = *m_vLevData[lev];
571 :
572 : // send solution to v-master
573 : SmartPtr<GF> spT = ld.st;
574 : #ifdef UG_PARALLEL
575 : ComPol_VecCopy<vector_type> cpVecCopy(ld.t.get());
576 : if(m_pSurfaceSol && lev > m_baseLev)
577 : {
578 : if( !ld.t->layouts()->vertical_slave().empty() ||
579 : !ld.t->layouts()->vertical_master().empty())
580 : {
581 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - send sol to v-master\n");
582 :
583 : // todo: if no ghosts present on proc one could use ld.st directly
584 : GMG_PROFILE_BEGIN(GMG_ProjectSolution_CopyNoghostToGhost);
585 : copy_noghost_to_ghost(ld.t, ld.st, ld.vMapPatchToGlobal);
586 : GMG_PROFILE_END();
587 :
588 : GMG_PROFILE_BEGIN(GMG_ProjectSolution_CollectAndSend);
589 : m_Com.receive_data(ld.t->layouts()->vertical_master(), cpVecCopy);
590 : m_Com.send_data(ld.t->layouts()->vertical_slave(), cpVecCopy);
591 : m_Com.communicate_and_resume();
592 : GMG_PROFILE_END();
593 : if(!m_bCommCompOverlap){
594 : GMG_PROFILE_BEGIN(GMG_ProjectSolution_RevieveAndExtract_NoOverlap);
595 : m_Com.wait();
596 : GMG_PROFILE_END();
597 : }
598 :
599 : spT = ld.t;
600 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - send sol to v-master\n");
601 : }
602 : }
603 : #endif
604 :
605 : // In Full-Ref case we can copy the Matrix from the surface
606 0 : bool bCpyFromSurface = ((lev == m_topLev) && (lev <= m_LocalFullRefLevel));
607 : if(!bCpyFromSurface)
608 : {
609 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " start assemble_level_operator: assemble on lev "<<lev<<"\n");
610 : GMG_PROFILE_BEGIN(GMG_AssembleLevelMat_AssembleOnLevel);
611 : try{
612 0 : if(m_GridLevelType == GridLevel::LEVEL)
613 0 : m_spAss->ass_tuner()->set_force_regular_grid(true);
614 0 : m_spAss->assemble_jacobian(*ld.A, *ld.st, GridLevel(lev, m_GridLevelType, false));
615 0 : m_spAss->ass_tuner()->set_force_regular_grid(false);
616 : }
617 0 : UG_CATCH_THROW("GMG:init: Cannot init operator for level "<<lev);
618 : GMG_PROFILE_END();
619 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " end assemble_level_operator: assemble on lev "<<lev<<"\n");
620 : }
621 : else
622 : {
623 : // in case of full refinement we simply copy the matrix (with correct numbering)
624 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " start assemble_level_operator: copy mat on lev "<<lev<<"\n");
625 : GMG_PROFILE_BEGIN(GMG_AssembleLevelMat_CopyFromTopSurface);
626 :
627 : // loop all mapped indices
628 : UG_ASSERT(m_spSurfaceMat->num_rows() == m_vSurfToLevelMap.size(),
629 : "Surface Matrix rows != Surf Level Indices")
630 :
631 0 : if (m_bMatrixStructureIsConst)
632 0 : ld.A->clear_retain_structure();
633 : else
634 0 : ld.A->resize_and_clear(m_spSurfaceMat->num_rows(), m_spSurfaceMat->num_cols());
635 0 : for(size_t srfRow = 0; srfRow < m_vSurfToLevelMap.size(); ++srfRow)
636 : {
637 : // get mapped level index
638 : UG_ASSERT(m_vSurfToLevelMap[srfRow].level == m_topLev,
639 : "All surface Indices must be on top level for full-ref.")
640 0 : const size_t lvlRow = m_vSurfToLevelMap[srfRow].index;
641 :
642 : // loop all connections of the surface dof to other surface dofs
643 : // and copy the matrix coupling into the level matrix
644 : typedef typename matrix_type::const_row_iterator const_row_iterator;
645 : const_row_iterator conn = m_spSurfaceMat->begin_row(srfRow);
646 : const_row_iterator connEnd = m_spSurfaceMat->end_row(srfRow);
647 0 : for( ;conn != connEnd; ++conn){
648 : // get corresponding level connection index
649 : UG_ASSERT(m_vSurfToLevelMap[conn.index()].level == m_topLev,
650 : "All surface Indices must be on top level for full-ref.")
651 0 : const size_t lvlCol = m_vSurfToLevelMap[conn.index()].index;
652 :
653 : // copy connection to level matrix
654 0 : (*ld.A)(lvlRow, lvlCol) = conn.value();
655 : }
656 : }
657 :
658 : #ifdef UG_PARALLEL
659 : ld.A->set_storage_type(m_spSurfaceMat->get_storage_mask());
660 : ld.A->set_layouts(ld.st->layouts());
661 : #endif
662 : GMG_PROFILE_END();
663 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " end assemble_level_operator: copy mat on lev "<<lev<<"\n");
664 : }
665 :
666 0 : if(m_pSurfaceSol && lev > m_baseLev)
667 : {
668 : #ifdef UG_PARALLEL
669 : if(m_bCommCompOverlap){
670 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - recieve sol at v-master\n");
671 : GMG_PROFILE_BEGIN(GMG_ProjectSolution_RecieveAndExtract_WithOverlap);
672 : m_Com.wait();
673 : GMG_PROFILE_END();
674 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - recieve sol at v-master\n");
675 : }
676 : #endif
677 :
678 : GMG_PROFILE_BEGIN(GMG_ProjectSolution_Transfer);
679 0 : LevData& lc = *m_vLevData[lev-1];
680 : try{
681 0 : ld.Projection->do_restrict(*lc.st, *spT);
682 : #ifdef UG_PARALLEL
683 : lc.st->set_storage_type(m_pSurfaceSol->get_storage_mask());
684 : #endif
685 0 : } UG_CATCH_THROW("GMG::init: Cannot project solution to coarse grid "
686 : "function of level "<<lev-1);
687 : GMG_PROFILE_END();
688 : }
689 : }
690 : GMG_PROFILE_END();
691 :
692 : // write computed level matrices for debug purpose
693 0 : for(int lev = m_baseLev; lev <= m_topLev; ++lev){
694 0 : LevData& ld = *m_vLevData[lev];
695 0 : write_debug(*ld.A, "LevelMatrix", *ld.st, *ld.st);
696 : }
697 :
698 : // if no ghosts are present we can simply use the whole grid. If the base
699 : // solver is carried out in serial (gathering to some processes), we have
700 : // to assemble the assemble the coarse grid matrix on the whole grid as
701 : // well
702 0 : if(m_bGatheredBaseUsed)
703 : {
704 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " start assemble_level_operator: ass gathered on lev "<<m_baseLev<<"\n");
705 0 : LevData& ld = *m_vLevData[m_baseLev];
706 :
707 : if(m_pSurfaceSol){
708 :
709 : #ifdef UG_PARALLEL
710 : if( !ld.t->layouts()->vertical_slave().empty() ||
711 : !ld.t->layouts()->vertical_master().empty())
712 : {
713 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - copy sol to gathered master\n");
714 :
715 : GMG_PROFILE_BEGIN(GMG_ProjectSolution_CopyToGatheredMaster);
716 : copy_noghost_to_ghost(ld.t, ld.st, ld.vMapPatchToGlobal);
717 :
718 : ComPol_VecCopy<vector_type> cpVecCopy(ld.t.get());
719 : m_Com.receive_data(ld.t->layouts()->vertical_master(), cpVecCopy);
720 : m_Com.send_data(ld.t->layouts()->vertical_slave(), cpVecCopy);
721 : m_Com.communicate();
722 : GMG_PROFILE_END();
723 :
724 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - copy sol to gathered master\n");
725 : }
726 : #endif
727 : }
728 :
729 : // only assemble on gathering proc
730 0 : if(gathered_base_master())
731 : {
732 : GMG_PROFILE_BEGIN(GMG_AssembleLevelMat_GatheredBase);
733 : try{
734 0 : if(m_GridLevelType == GridLevel::LEVEL)
735 0 : m_spAss->ass_tuner()->set_force_regular_grid(true);
736 0 : m_spAss->assemble_jacobian(*spGatheredBaseMat, *ld.t, GridLevel(m_baseLev, m_GridLevelType, true));
737 0 : m_spAss->ass_tuner()->set_force_regular_grid(false);
738 : }
739 0 : UG_CATCH_THROW("GMG:init: Cannot init operator base level operator");
740 : GMG_PROFILE_END();
741 : }
742 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " end assemble_level_operator: ass gathered on lev "<<m_baseLev<<"\n");
743 : }
744 :
745 : // assemble missing coarse grid matrix contribution (only in adaptive case)
746 : try{
747 0 : assemble_rim_cpl(m_pSurfaceSol);
748 : }
749 0 : UG_CATCH_THROW("GMG:init: Cannot init missing coarse grid coupling.");
750 :
751 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop assemble_level_operator\n");
752 0 : }
753 :
754 : template <typename TDomain, typename TAlgebra>
755 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
756 : assemble_rim_cpl(const vector_type* u)
757 : {
758 : GMG_PROFILE_FUNC();
759 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - assemble_rim_cpl " << "\n");
760 :
761 : // clear matrices
762 0 : for(int lev = m_baseLev; lev <= m_topLev; ++lev){
763 0 : LevData& ld = *m_vLevData[lev];
764 0 : ld.RimCpl_Fine_Coarse.resize_and_clear(0,0);
765 0 : ld.RimCpl_Coarse_Fine.resize_and_clear(0,0);
766 : }
767 :
768 : // loop all levels to compute the missing contribution
769 0 : for(int lev = m_topLev; lev > m_LocalFullRefLevel; --lev)
770 : {
771 0 : LevData& lc = *m_vLevData[lev-1];
772 0 : LevData& lf = *m_vLevData[lev];
773 :
774 0 : lc.RimCpl_Fine_Coarse.resize_and_clear(lf.sd->size(), lc.sc->size());
775 0 : if(m_bSmoothOnSurfaceRim)
776 0 : lc.RimCpl_Coarse_Fine.resize_and_clear(lc.sd->size(), lf.sc->size());
777 :
778 0 : for(size_t i = 0; i< lf.vShadowing.size(); ++i)
779 : {
780 0 : const size_t lvlRow = lf.vShadowing[i];
781 0 : const size_t srfRow = lf.vSurfShadowing[i];
782 :
783 0 : SetRow((*lf.A), lvlRow, 0.0);
784 :
785 : typedef typename matrix_type::const_row_iterator row_iterator;
786 : row_iterator conn = m_spSurfaceMat->begin_row(srfRow);
787 : row_iterator connEnd = m_spSurfaceMat->end_row(srfRow);
788 0 : for( ;conn != connEnd; ++conn)
789 : {
790 : const size_t srfCol = conn.index();
791 :
792 0 : if(m_vSurfToLevelMap[srfCol].level == lev) {
793 0 : const size_t lvlCol = m_vSurfToLevelMap[srfCol].index;
794 0 : (*lf.A)(lvlRow, lvlCol) = conn.value();
795 : }
796 0 : if(m_vSurfToLevelMap[srfCol].level == lev+1) {
797 0 : if(m_vSurfToLevelMap[srfCol].levelLower == lev) {
798 0 : const size_t lvlCol = m_vSurfToLevelMap[srfCol].indexLower;
799 0 : (*lf.A)(lvlRow, lvlCol) = conn.value();
800 : }
801 : }
802 0 : if(m_vSurfToLevelMap[srfCol].level == lev-1){
803 0 : const size_t lvlCol = m_vSurfToLevelMap[srfCol].index;
804 0 : (lc.RimCpl_Fine_Coarse)(lvlRow, lvlCol) = conn.value();
805 :
806 0 : if(m_bSmoothOnSurfaceRim){
807 0 : lc.RimCpl_Coarse_Fine(lvlCol, lvlRow) = (*m_spSurfaceMat)(srfCol, srfRow);
808 : }
809 : }
810 :
811 : }
812 : }
813 :
814 : #ifdef UG_PARALLEL
815 : lc.RimCpl_Fine_Coarse.set_storage_type(m_spSurfaceMat->get_storage_mask());
816 : if(m_bSmoothOnSurfaceRim)
817 : lc.RimCpl_Coarse_Fine.set_storage_type(m_spSurfaceMat->get_storage_mask());
818 : #endif
819 0 : write_debug(*lf.A, "LevelMatrix", *lf.st, *lf.st);
820 0 : write_debug(lc.RimCpl_Fine_Coarse, "RimCpl", *lf.sd, *lc.sc);
821 0 : if(m_bSmoothOnSurfaceRim)
822 0 : write_debug(lc.RimCpl_Coarse_Fine, "RimCpl", *lc.sd, *lf.sc);
823 : }
824 :
825 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - assemble_rim_cpl " << "\n");
826 0 : }
827 :
828 : template <typename TDomain, typename TAlgebra>
829 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
830 : init_rap_operator()
831 : {
832 : GMG_PROFILE_FUNC();
833 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start init_rap_operator\n");
834 :
835 : GMG_PROFILE_BEGIN(GMG_BuildRAP_ResizeLevelMat);
836 0 : for(int lev = m_topLev; lev >= m_baseLev; --lev)
837 : {
838 0 : LevData& ld = *m_vLevData[lev];
839 0 : if (m_bMatrixStructureIsConst)
840 0 : ld.A->clear_retain_structure();
841 : else
842 0 : ld.A->resize_and_clear(ld.st->size(), ld.st->size());
843 : #ifdef UG_PARALLEL
844 : ld.A->set_storage_type(m_spSurfaceMat->get_storage_mask());
845 : ld.A->set_layouts(ld.st->layouts());
846 : #endif
847 : }
848 : GMG_PROFILE_END();
849 :
850 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " start init_rap_operator: copy from surface\n");
851 : GMG_PROFILE_BEGIN(GMG_BuildRAP_CopyFromSurfaceMat);
852 0 : for(size_t srfRow = 0; srfRow < m_vSurfToLevelMap.size(); ++srfRow)
853 : {
854 : // loop all connections of the surface dof to other surface dofs
855 : // and copy the matrix coupling into the level matrix
856 : typedef typename matrix_type::const_row_iterator const_row_iterator;
857 : const_row_iterator conn = m_spSurfaceMat->begin_row(srfRow);
858 : const_row_iterator connEnd = m_spSurfaceMat->end_row(srfRow);
859 0 : for( ;conn != connEnd; ++conn)
860 : {
861 : // get corresponding surface connection index
862 : const size_t srfCol = conn.index();
863 :
864 : // get level
865 0 : int rowLevel = m_vSurfToLevelMap[srfRow].level;
866 0 : int colLevel = m_vSurfToLevelMap[srfCol].level;
867 :
868 : // get corresponding indices
869 0 : size_t lvlRow = m_vSurfToLevelMap[srfRow].index;
870 0 : size_t lvlCol = m_vSurfToLevelMap[srfCol].index;
871 :
872 : // if connection between different level -> adjust
873 : UG_ASSERT(colLevel >= 0, "Invalid colLevel: "<<colLevel)
874 : UG_ASSERT(rowLevel >= 0, "Invalid rowLevel: "<<rowLevel)
875 0 : if(colLevel > rowLevel){
876 0 : if(m_vSurfToLevelMap[srfCol].levelLower == rowLevel){
877 0 : lvlCol = m_vSurfToLevelMap[srfCol].indexLower;
878 : colLevel = rowLevel;
879 : } else {
880 0 : continue;
881 : }
882 0 : } else if(colLevel < rowLevel){
883 0 : if(m_vSurfToLevelMap[srfRow].levelLower == colLevel){
884 0 : lvlRow = m_vSurfToLevelMap[srfRow].indexLower;
885 : rowLevel = colLevel;
886 : } else {
887 0 : continue;
888 : }
889 : }
890 :
891 : // copy connection to level matrix
892 0 : (*(m_vLevData[colLevel]->A))(lvlRow, lvlCol) = conn.value();
893 : }
894 : }
895 : GMG_PROFILE_END();
896 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " end init_rap_operator: copy from surface\n");
897 :
898 : // write computed level matrices for debug purpose
899 0 : for(int lev = m_baseLev; lev <= m_topLev; ++lev){
900 0 : LevData& ld = *m_vLevData[lev];
901 0 : write_debug(*ld.A, "LevelMatrixFromSurf", *ld.st, *ld.st);
902 : }
903 :
904 : // Create coarse level operators
905 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " start init_rap_operator: build rap\n");
906 : GMG_PROFILE_BEGIN(GMG_BuildRAP_AllLevelMat);
907 0 : for(int lev = m_topLev; lev > m_baseLev; --lev)
908 : {
909 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " start init_rap_operator: build rap on lev "<<lev<<"\n");
910 0 : LevData& lf = *m_vLevData[lev];
911 0 : LevData& lc = *m_vLevData[lev-1];
912 :
913 : SmartPtr<matrix_type> spA = lf.A;
914 : #ifdef UG_PARALLEL
915 : SmartPtr<matrix_type> spGhostA = SmartPtr<matrix_type>(new matrix_type);
916 : ComPol_MatAddSetZeroInnerInterfaceCouplings<matrix_type> cpMatAdd(*spGhostA);
917 : if( !lf.t->layouts()->vertical_master().empty() ||
918 : !lf.t->layouts()->vertical_slave().empty())
919 : {
920 : GMG_PROFILE_BEGIN(GMG_BuildRAP_CopyNoghostToGhost);
921 : spGhostA->resize_and_clear(lf.t->size(), lf.t->size());
922 : spGhostA->set_layouts(lf.t->layouts());
923 : if(!lf.t->layouts()->vertical_master().empty()){
924 : copy_noghost_to_ghost(spGhostA, lf.A, lf.vMapPatchToGlobal);
925 : } else {
926 : *spGhostA = *lf.A;
927 : }
928 : GMG_PROFILE_END();
929 :
930 : GMG_PROFILE_BEGIN(GMG_BuildRAP_CollectAndSend);
931 : m_Com.send_data(spGhostA->layouts()->vertical_slave(), cpMatAdd);
932 : m_Com.receive_data(spGhostA->layouts()->vertical_master(), cpMatAdd);
933 : m_Com.communicate_and_resume();
934 : GMG_PROFILE_END();
935 : if(!m_bCommCompOverlap){
936 : GMG_PROFILE_BEGIN(GMG_BuildRAP_RecieveAndExtract_NoOverlap);
937 : m_Com.wait();
938 : GMG_PROFILE_END();
939 : }
940 :
941 : spA = spGhostA;
942 : }
943 : #endif
944 :
945 : GMG_PROFILE_BEGIN(GMG_BuildRAP_CreateRestrictionAndProlongation);
946 0 : SmartPtr<matrix_type> R = lf.Restriction->restriction(lc.st->grid_level(), lf.t->grid_level(), m_spApproxSpace);
947 0 : SmartPtr<matrix_type> P = lf.Prolongation->prolongation(lf.t->grid_level(), lc.st->grid_level(), m_spApproxSpace);
948 : GMG_PROFILE_END();
949 :
950 : #ifdef UG_PARALLEL
951 : if(m_bCommCompOverlap){
952 : GMG_PROFILE_BEGIN(GMG_BuildRAP_RecieveAndExtract_WithOverlap);
953 : m_Com.wait();
954 : GMG_PROFILE_END();
955 : }
956 : #endif
957 :
958 : GMG_PROFILE_BEGIN(GMG_BuildRAP_MultiplyRAP);
959 0 : AddMultiplyOf(*lc.A, *R, *spA, *P);
960 : GMG_PROFILE_END();
961 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " end init_rap_operator: build rap on lev "<<lev<<"\n");
962 : }
963 : GMG_PROFILE_END();
964 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " end init_rap_operator: build rap\n");
965 :
966 : // write computed level matrices for debug purpose
967 0 : for(int lev = m_baseLev; lev <= m_topLev; ++lev){
968 0 : LevData& ld = *m_vLevData[lev];
969 0 : write_debug(*ld.A, "LevelMatrix", *ld.st, *ld.st);
970 : }
971 :
972 : if(m_bGatheredBaseUsed)
973 : {
974 : #ifdef UG_PARALLEL
975 : GMG_PROFILE_BEGIN(GMG_BuildRAP_CopyNoghostToGhost_GatheredBase);
976 : LevData& ld = *m_vLevData[m_baseLev];
977 : if(gathered_base_master()){
978 :
979 : if (m_bMatrixStructureIsConst)
980 : spGatheredBaseMat->clear_retain_structure();
981 : else
982 : spGatheredBaseMat->resize_and_clear(ld.t->size(), ld.t->size());
983 : copy_noghost_to_ghost(spGatheredBaseMat.template cast_dynamic<matrix_type>(), ld.A, ld.vMapPatchToGlobal);
984 : } else {
985 : spGatheredBaseMat = SmartPtr<MatrixOperator<matrix_type, vector_type> >(new MatrixOperator<matrix_type, vector_type>);
986 : *spGatheredBaseMat = *ld.A;
987 : }
988 : spGatheredBaseMat->set_storage_type(m_spSurfaceMat->get_storage_mask());
989 : spGatheredBaseMat->set_layouts(ld.t->layouts());
990 : GMG_PROFILE_END();
991 :
992 : GMG_PROFILE_BEGIN(GMG_BuildRAP_SendAndRevieve_GatheredBase);
993 : ComPol_MatAddSetZeroInnerInterfaceCouplings<matrix_type> cpMatAdd(*spGatheredBaseMat);
994 : m_Com.send_data(spGatheredBaseMat->layouts()->vertical_slave(), cpMatAdd);
995 : if(gathered_base_master())
996 : m_Com.receive_data(spGatheredBaseMat->layouts()->vertical_master(), cpMatAdd);
997 : m_Com.communicate();
998 : GMG_PROFILE_END();
999 :
1000 : if(!gathered_base_master()){
1001 : spGatheredBaseMat = SPNULL;
1002 : }
1003 : #endif
1004 : }
1005 :
1006 : // coarse grid contributions
1007 : try{
1008 0 : init_rap_rim_cpl();
1009 : }
1010 0 : UG_CATCH_THROW("GMG:init: Cannot init missing coarse grid coupling.");
1011 :
1012 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop init_rap_operator\n");
1013 0 : }
1014 :
1015 : template <typename TDomain, typename TAlgebra>
1016 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1017 : init_rap_rim_cpl()
1018 : {
1019 : GMG_PROFILE_FUNC();
1020 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - init_rap_rim_cpl " << "\n");
1021 :
1022 : // clear matrices
1023 0 : for(int lev = m_baseLev; lev <= m_topLev; ++lev){
1024 0 : LevData& ld = *m_vLevData[lev];
1025 0 : ld.RimCpl_Fine_Coarse.resize_and_clear(0,0);
1026 0 : ld.RimCpl_Coarse_Fine.resize_and_clear(0,0);
1027 : }
1028 :
1029 : // loop all levels to compute the missing contribution
1030 0 : for(int lev = m_topLev; lev > m_LocalFullRefLevel; --lev)
1031 : {
1032 0 : LevData& lc = *m_vLevData[lev-1];
1033 0 : LevData& lf = *m_vLevData[lev];
1034 :
1035 0 : lc.RimCpl_Fine_Coarse.resize_and_clear(lf.sd->size(), lc.sc->size());
1036 0 : if(m_bSmoothOnSurfaceRim)
1037 0 : lc.RimCpl_Coarse_Fine.resize_and_clear(lc.sd->size(), lf.sc->size());
1038 :
1039 0 : for(size_t i = 0; i< lf.vShadowing.size(); ++i)
1040 : {
1041 0 : const size_t lvlRow = lf.vShadowing[i];
1042 0 : const size_t srfRow = lf.vSurfShadowing[i];
1043 :
1044 : typedef typename matrix_type::const_row_iterator const_row_iterator;
1045 : const_row_iterator conn = m_spSurfaceMat->begin_row(srfRow);
1046 : const_row_iterator connEnd = m_spSurfaceMat->end_row(srfRow);
1047 0 : for( ;conn != connEnd; ++conn)
1048 : {
1049 : const size_t srfCol = conn.index();
1050 :
1051 0 : if(m_vSurfToLevelMap[srfCol].level == lev-1){
1052 0 : const size_t lvlCol = m_vSurfToLevelMap[srfCol].index;
1053 0 : (lc.RimCpl_Fine_Coarse)(lvlRow, lvlCol) = conn.value();
1054 :
1055 0 : if(m_bSmoothOnSurfaceRim){
1056 0 : lc.RimCpl_Coarse_Fine(lvlCol, lvlRow) = (*m_spSurfaceMat)(srfCol, srfRow);
1057 : }
1058 : }
1059 : }
1060 : }
1061 :
1062 : #ifdef UG_PARALLEL
1063 : lc.RimCpl_Fine_Coarse.set_storage_type(m_spSurfaceMat->get_storage_mask());
1064 : if(m_bSmoothOnSurfaceRim)
1065 : lc.RimCpl_Coarse_Fine.set_storage_type(m_spSurfaceMat->get_storage_mask());
1066 : #endif
1067 0 : write_debug(lc.RimCpl_Fine_Coarse, "RimCpl", *lf.sd, *lc.sc);
1068 0 : if(m_bSmoothOnSurfaceRim)
1069 0 : write_debug(lc.RimCpl_Coarse_Fine, "RimCpl", *lc.sd, *lf.sc);
1070 : }
1071 :
1072 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - init_rap_rim_cpl " << "\n");
1073 0 : }
1074 :
1075 :
1076 : template <typename TDomain, typename TAlgebra>
1077 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1078 : init_transfer()
1079 : {
1080 : GMG_PROFILE_FUNC();
1081 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start init_transfer\n");
1082 :
1083 : // loop all levels
1084 0 : for(int lev = m_baseLev+1; lev <= m_topLev; ++lev)
1085 : {
1086 0 : LevData& lf = *m_vLevData[lev];
1087 0 : LevData& lc = *m_vLevData[lev-1];
1088 :
1089 : // check if same operator for prolongation and restriction used
1090 : bool bOneOperator = false;
1091 0 : if(lf.Prolongation == lf.Restriction) bOneOperator = true;
1092 :
1093 0 : lf.Prolongation->set_levels(lc.st->grid_level(), lf.t->grid_level());
1094 0 : lf.Prolongation->clear_constraints();
1095 0 : for(size_t i = 0; i < m_spAss->num_constraints(); ++i){
1096 0 : SmartPtr<IConstraint<TAlgebra> > pp = m_spAss->constraint(i);
1097 0 : lf.Prolongation->add_constraint(pp);
1098 : }
1099 0 : lf.Prolongation->init();
1100 :
1101 0 : if(!bOneOperator){
1102 0 : lf.Restriction->set_levels(lc.st->grid_level(), lf.t->grid_level());
1103 0 : lf.Restriction->clear_constraints();
1104 0 : for(size_t i = 0; i < m_spAss->num_constraints(); ++i){
1105 0 : SmartPtr<IConstraint<TAlgebra> > pp = m_spAss->constraint(i);
1106 0 : lf.Restriction->add_constraint(pp);
1107 : }
1108 0 : lf.Restriction->init();
1109 : }
1110 : }
1111 :
1112 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop init_transfer\n");
1113 0 : }
1114 :
1115 : template <typename TDomain, typename TAlgebra>
1116 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1117 : init_projection()
1118 : {
1119 : GMG_PROFILE_FUNC();
1120 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start init_projection\n");
1121 :
1122 0 : for(int lev = m_baseLev+1; lev <= m_topLev; ++lev)
1123 : {
1124 0 : LevData& lf = *m_vLevData[lev];
1125 0 : LevData& lc = *m_vLevData[lev-1];
1126 0 : GridLevel gw_gl; enter_debug_writer_section(gw_gl, "ProjectionInit", lev);
1127 0 : lf.Projection->set_levels(lc.st->grid_level(), lf.t->grid_level());
1128 0 : lf.Projection->init();
1129 0 : leave_debug_writer_section(gw_gl);
1130 : }
1131 :
1132 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop init_projection\n");
1133 0 : }
1134 :
1135 : template <typename TDomain, typename TAlgebra>
1136 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1137 : init_smoother()
1138 : {
1139 : GMG_PROFILE_FUNC();
1140 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start init_smoother\n");
1141 :
1142 : // Init smoother
1143 0 : for(int lev = m_baseLev+1; lev <= m_topLev; ++lev)
1144 : {
1145 0 : LevData& ld = *m_vLevData[lev];
1146 :
1147 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " init_smoother: initializing pre-smoother on lev "<<lev<<"\n");
1148 : bool success;
1149 0 : GridLevel gw_gl; enter_debug_writer_section(gw_gl, "PreSmootherInit", lev);
1150 0 : try {success = ld.PreSmoother->init(ld.A, *ld.sc);}
1151 0 : UG_CATCH_THROW("GMG::init: Cannot init pre-smoother for level "<<lev);
1152 0 : leave_debug_writer_section(gw_gl);
1153 0 : if (!success)
1154 0 : UG_THROW("GMG::init: Cannot init pre-smoother for level "<<lev);
1155 :
1156 : UG_DLOG(LIB_DISC_MULTIGRID, 4, " init_smoother: initializing post-smoother on lev "<<lev<<"\n");
1157 0 : if(ld.PreSmoother != ld.PostSmoother)
1158 : {
1159 0 : GridLevel gw_gl; enter_debug_writer_section(gw_gl, "PostSmootherInit", lev);
1160 0 : try {success = ld.PostSmoother->init(ld.A, *ld.sc);}
1161 0 : UG_CATCH_THROW("GMG::init: Cannot init post-smoother for level "<<lev);
1162 0 : leave_debug_writer_section(gw_gl);
1163 0 : if (!success)
1164 0 : UG_THROW("GMG::init: Cannot init post-smoother for level "<<lev);
1165 : }
1166 : }
1167 :
1168 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop init_smoother\n");
1169 0 : }
1170 :
1171 : template <typename TDomain, typename TAlgebra>
1172 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1173 : init_base_solver()
1174 : {
1175 : GMG_PROFILE_FUNC();
1176 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start init_base_solver\n");
1177 0 : LevData& ld = *m_vLevData[m_baseLev];
1178 :
1179 : // check, if a gathering base solver is required:
1180 0 : if(m_bGatheredBaseUsed)
1181 : {
1182 : // only init on gathering proc
1183 0 : if(!gathered_base_master()) return;
1184 :
1185 : // create layout with only v-master (v-slave do not exist on gathering proc)
1186 : // especially: remove h-layouts and set proc-comm to process-only
1187 : #ifdef UG_PARALLEL
1188 : SmartPtr<AlgebraLayouts> spOneProcLayout =
1189 : SmartPtr<AlgebraLayouts>(new AlgebraLayouts);
1190 : spOneProcLayout->clear();
1191 : spOneProcLayout->vertical_master() = ld.t->layouts()->vertical_master();
1192 : spOneProcLayout->proc_comm() = pcl::ProcessCommunicator(pcl::PCD_LOCAL);
1193 :
1194 : spGatheredBaseMat->set_layouts(spOneProcLayout);
1195 : spGatheredBaseCorr->set_layouts(spOneProcLayout);
1196 :
1197 : ld.t->set_layouts(spOneProcLayout);
1198 : #endif
1199 :
1200 : // we init the base solver with the whole grid matrix
1201 0 : if(!m_pSurfaceSol)
1202 : *ld.t = 0;
1203 0 : GridLevel gw_gl; enter_debug_writer_section(gw_gl, "GatheredBaseSolverInit", m_baseLev);
1204 0 : if(!m_spBaseSolver->init(spGatheredBaseMat, *ld.t))
1205 0 : UG_THROW("GMG::init: Cannot init base solver on baselevel "<< m_baseLev);
1206 0 : leave_debug_writer_section(gw_gl);
1207 : }
1208 : else
1209 : {
1210 : #ifdef UG_PARALLEL
1211 : if(!ld.st->layouts()->master().empty() || !ld.st->layouts()->slave().empty())
1212 : if(!m_spBaseSolver->supports_parallel())
1213 : UG_THROW("GMG: Base level is distributed onto more than process, "
1214 : "but the chosen base solver "<<m_spBaseSolver->name()<<
1215 : " does not support parallel solving. Choose a parallel"
1216 : " base solver or construct a situation, where a single"
1217 : " process stores the whole base grid, to cure this issue.");
1218 : #endif
1219 :
1220 0 : if(!m_pSurfaceSol)
1221 : *ld.st = 0;
1222 0 : GridLevel gw_gl; enter_debug_writer_section(gw_gl, "BaseSolverInit", m_baseLev);
1223 0 : if(!m_spBaseSolver->init(ld.A, *ld.st))
1224 0 : UG_THROW("GMG::init: Cannot init base solver on baselevel "<< m_baseLev);
1225 0 : leave_debug_writer_section(gw_gl);
1226 : }
1227 :
1228 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop init_base_solver\n");
1229 : }
1230 :
1231 : template <typename TDomain, typename TAlgebra>
1232 : template <typename TElem>
1233 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1234 : init_index_mappings()
1235 : {
1236 : /* This Method is used to create a caching the transfer between surface and
1237 : * level grid functions. Note, that in the adaptive case the surface grid is
1238 : * distributed over several levels (all elements that do not have children). But
1239 : * even in the full refinement case the surface level and the top level may
1240 : * not have the same index pattern, since a different sorting may be used (however
1241 : * the number of indices must be equal in the full-ref case).
1242 : * In every case, each surface index has a corresponding index on some level
1243 : * of the level grid functions. In this method this index is looked up and
1244 : * stored in a vector (for fast access). I.e. for every surface index i, the
1245 : * corresponding index in the level grid function is stored in m_vSurfToLevelMap[i]
1246 : * together with the level of the grid function. Using this mapping copying
1247 : * between surface <-> levels is implemented. Note: When Shadow-Copy are present
1248 : * the dof manager usually assigns the same surface index to both, shadowing and
1249 : * shadow-copy. Thus, there exist more than one corresponding level index for
1250 : * such a surface index. In this case the shadowing index is used in the mapping
1251 : * since this index will have the correct value at the end of the multigrid
1252 : * cycle and at startup the projection to the level is necessary only to shadowing,
1253 : * because shadows will get their value by transfer between the level. In order
1254 : * to get the map this way, the surface loop below is only performed on
1255 : * SURFACE_NONCOPY elements.
1256 : */
1257 :
1258 0 : PeriodicBoundaryManager* pbm = m_spApproxSpace->domain()->grid()->periodic_boundary_manager();
1259 : ConstSmartPtr<SurfaceView> spSurfView = m_spApproxSpace->surface_view();
1260 :
1261 0 : std::vector<ConstSmartPtr<DoFDistribution> > vLevelDD(m_topLev+1);
1262 0 : for(int lev = m_baseLev; lev <= m_topLev; ++lev)
1263 0 : vLevelDD[lev] = m_spApproxSpace->dof_distribution(GridLevel(lev, m_GridLevelType, false));
1264 :
1265 0 : ConstSmartPtr<DoFDistribution> surfDD =
1266 0 : m_spApproxSpace->dof_distribution(GridLevel(m_surfaceLev, GridLevel::SURFACE));
1267 :
1268 : // iterators for subset
1269 : // \todo: The loop below should only be on SURFACE_NONCOPY, since the
1270 : // copy-shadows have the same indices as their shadowing. In such a
1271 : // case the child index (shadowing) must win. This could be realized by
1272 : // looping only non-copy elements. But it seems, that on a process
1273 : // the shadow-copy may exist without its shadowing. In such a case
1274 : // the mapping is invalid. To fix this, the loop is extended temporarily
1275 : // below and doubling of appearance is checked.
1276 : typedef typename DoFDistribution::traits<TElem>::const_iterator iter_type;
1277 : iter_type iter = surfDD->begin<TElem>(SurfaceView::ALL);
1278 : iter_type iterEnd = surfDD->end<TElem>(SurfaceView::ALL);
1279 :
1280 : // vector of indices
1281 : std::vector<size_t> vSurfInd, vLevelInd;
1282 :
1283 : // loop all elements of type
1284 0 : for( ; iter != iterEnd; ++iter)
1285 : {
1286 : // get elem
1287 : TElem* elem = *iter;
1288 :
1289 : // ignore slave (otherwise they would appear twice in map)
1290 0 : if (pbm && pbm->is_slave(elem)) continue;
1291 :
1292 : // get elem level
1293 0 : int level = m_spApproxSpace->domain()->grid()->get_level(elem);
1294 :
1295 0 : if (m_GridLevelType == GridLevel::SURFACE)
1296 0 : level = m_topLev;
1297 :
1298 : // check that coarse grid covers whole domain. If this is not the case,
1299 : // some surface indices are mapped to grid levels below baseLev. We
1300 : // do not allow that.
1301 0 : if(level < m_baseLev)
1302 0 : UG_THROW("GMG::init: Some index of the surface grid is located on "
1303 : "level "<<level<<", which is below the chosen baseLev "<<
1304 : m_baseLev<<". This is not allowed, since otherwise the "
1305 : "gmg correction would only affect parts of the domain. Use"
1306 : "gmg:set_base_level(lvl) to cure this issue.");
1307 :
1308 : // remember minimal level, that contains a surface index on this proc
1309 0 : m_LocalFullRefLevel = std::min(m_LocalFullRefLevel, level);
1310 :
1311 : // extract all algebra indices for the element on surface and level
1312 0 : vLevelDD[level]->inner_algebra_indices(elem, vLevelInd);
1313 0 : surfDD->inner_algebra_indices(elem, vSurfInd);
1314 : UG_ASSERT(vSurfInd.size() == vLevelInd.size(), "Number of indices does not match.");
1315 :
1316 : // extract shadows
1317 0 : if(m_GridLevelType == GridLevel::LEVEL) {
1318 0 : if(spSurfView->surface_state(elem).contains(SurfaceView::MG_SURFACE_RIM)){
1319 0 : for(size_t i = 0; i < vSurfInd.size(); ++i){
1320 0 : m_vLevData[level]->vShadowing.push_back(vLevelInd[i]);
1321 0 : m_vLevData[level]->vSurfShadowing.push_back(vSurfInd[i]);
1322 : }
1323 : }
1324 : }
1325 :
1326 : // set mapping index
1327 0 : for(size_t i = 0; i < vSurfInd.size(); ++i){
1328 : if(spSurfView->surface_state(elem).contains(SurfaceView::MG_SHADOW_RIM_COPY)
1329 0 : && (level != m_topLev)) {
1330 0 : if(m_GridLevelType == GridLevel::LEVEL){
1331 0 : m_vSurfToLevelMap[vSurfInd[i]].indexLower = vLevelInd[i];
1332 0 : m_vSurfToLevelMap[vSurfInd[i]].levelLower = level;
1333 : }
1334 : } else {
1335 0 : m_vSurfToLevelMap[vSurfInd[i]].index = vLevelInd[i];
1336 0 : m_vSurfToLevelMap[vSurfInd[i]].level = level;
1337 0 : m_vLevData[level]->vSurfLevelMap.push_back(SurfLevelMap(vLevelInd[i], vSurfInd[i]));
1338 : }
1339 : }
1340 : }
1341 0 : }
1342 :
1343 :
1344 : template <typename TDomain, typename TAlgebra>
1345 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1346 : init_index_mappings()
1347 : {
1348 : GMG_PROFILE_FUNC();
1349 0 : ConstSmartPtr<DoFDistribution> surfDD =
1350 0 : m_spApproxSpace->dof_distribution(GridLevel(m_surfaceLev, GridLevel::SURFACE));
1351 :
1352 0 : m_vSurfToLevelMap.resize(0);
1353 0 : m_vSurfToLevelMap.resize(surfDD->num_indices());
1354 0 : for(int lev = m_baseLev; lev <= m_topLev; ++lev){
1355 0 : m_vLevData[lev]->vShadowing.clear();
1356 : m_vLevData[lev]->vSurfShadowing.clear();
1357 : m_vLevData[lev]->vSurfLevelMap.clear();
1358 : }
1359 0 : m_LocalFullRefLevel = std::numeric_limits<int>::max();
1360 :
1361 0 : if(surfDD->max_dofs(VERTEX)) init_index_mappings<Vertex>();
1362 0 : if(surfDD->max_dofs(EDGE)) init_index_mappings<Edge>();
1363 0 : if(surfDD->max_dofs(FACE)) init_index_mappings<Face>();
1364 0 : if(surfDD->max_dofs(VOLUME)) init_index_mappings<Volume>();
1365 :
1366 0 : if(m_baseLev > m_LocalFullRefLevel)
1367 0 : UG_THROW("GMG: Base level "<<m_baseLev<<" does not cover whole grid. "
1368 : <<"Highest full-ref level is "<<m_LocalFullRefLevel);
1369 0 : }
1370 :
1371 : template <typename TDomain, typename TAlgebra>
1372 : template <typename TElem>
1373 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1374 : init_noghost_to_ghost_mapping( std::vector<size_t>& vNoGhostToGhostMap,
1375 : ConstSmartPtr<DoFDistribution> spNoGhostDD,
1376 : ConstSmartPtr<DoFDistribution> spGhostDD)
1377 : {
1378 : typedef typename DoFDistribution::traits<TElem>::const_iterator iter_type;
1379 0 : iter_type iter = spNoGhostDD->begin<TElem>();
1380 0 : iter_type iterEnd = spNoGhostDD->end<TElem>();
1381 :
1382 : // vector of indices
1383 : std::vector<size_t> vGhostInd, vNoGhostInd;
1384 :
1385 : // loop all elements of type
1386 0 : for( ; iter != iterEnd; ++iter){
1387 : // get elem
1388 : TElem* elem = *iter;
1389 :
1390 : // extract all algebra indices for the element
1391 0 : spGhostDD->inner_algebra_indices(elem, vGhostInd);
1392 0 : spNoGhostDD->inner_algebra_indices(elem, vNoGhostInd);
1393 : UG_ASSERT(vGhostInd.size() == vNoGhostInd.size(), "Number of indices does not match.");
1394 :
1395 : // set mapping index
1396 0 : for(size_t i = 0; i < vNoGhostInd.size(); ++i){
1397 0 : vNoGhostToGhostMap[vNoGhostInd[i]] = vGhostInd[i];
1398 : }
1399 : }
1400 0 : }
1401 :
1402 :
1403 : template <typename TDomain, typename TAlgebra>
1404 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1405 : init_noghost_to_ghost_mapping(int lev)
1406 : {
1407 : GMG_PROFILE_FUNC();
1408 :
1409 0 : LevData& ld = *m_vLevData[lev];
1410 0 : std::vector<size_t>& vMapPatchToGlobal = ld.vMapPatchToGlobal;
1411 0 : vMapPatchToGlobal.resize(0);
1412 :
1413 0 : ConstSmartPtr<DoFDistribution> spNoGhostDD = ld.st->dd();
1414 0 : ConstSmartPtr<DoFDistribution> spGhostDD = ld.t->dd();
1415 :
1416 0 : if(spNoGhostDD == spGhostDD) return;
1417 :
1418 0 : vMapPatchToGlobal.resize(spNoGhostDD->num_indices());
1419 :
1420 0 : if(spNoGhostDD->max_dofs(VERTEX)) init_noghost_to_ghost_mapping<Vertex>(vMapPatchToGlobal, spNoGhostDD, spGhostDD);
1421 0 : if(spNoGhostDD->max_dofs(EDGE)) init_noghost_to_ghost_mapping<Edge>(vMapPatchToGlobal, spNoGhostDD, spGhostDD);
1422 0 : if(spNoGhostDD->max_dofs(FACE)) init_noghost_to_ghost_mapping<Face>(vMapPatchToGlobal, spNoGhostDD, spGhostDD);
1423 0 : if(spNoGhostDD->max_dofs(VOLUME)) init_noghost_to_ghost_mapping<Volume>(vMapPatchToGlobal, spNoGhostDD, spGhostDD);
1424 : }
1425 :
1426 :
1427 : template <typename TDomain, typename TAlgebra>
1428 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1429 : copy_ghost_to_noghost(SmartPtr<GF> spVecTo,
1430 : ConstSmartPtr<GF> spVecFrom,
1431 : const std::vector<size_t>& vMapPatchToGlobal)
1432 : {
1433 : GMG_PROFILE_FUNC();
1434 :
1435 : if(spVecTo == spVecFrom)
1436 : UG_THROW("GMG::copy_ghost_to_noghost: Should not be called on equal GF.");
1437 :
1438 : if(spVecTo->dd() == spVecFrom->dd())
1439 : {
1440 : for(size_t i = 0; i < spVecTo->size(); ++i)
1441 : (*spVecTo)[i] = (*spVecFrom)[i];
1442 : }
1443 : else
1444 : {
1445 : UG_ASSERT(vMapPatchToGlobal.size() == spVecTo->size(),
1446 : "Mapping range ("<<vMapPatchToGlobal.size()<<") != "
1447 : "To-Vec-Size ("<<spVecTo->size()<<")");
1448 :
1449 : for(size_t i = 0; i < vMapPatchToGlobal.size(); ++i)
1450 : (*spVecTo)[i] = (*spVecFrom)[ vMapPatchToGlobal[i] ];
1451 : }
1452 :
1453 : #ifdef UG_PARALLEL
1454 : spVecTo->set_storage_type(spVecFrom->get_storage_mask());
1455 : #endif
1456 : }
1457 :
1458 : template <typename TDomain, typename TAlgebra>
1459 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1460 : copy_noghost_to_ghost(SmartPtr<GF> spVecTo,
1461 : ConstSmartPtr<GF> spVecFrom,
1462 : const std::vector<size_t>& vMapPatchToGlobal)
1463 : {
1464 : GMG_PROFILE_FUNC();
1465 :
1466 : if(spVecTo == spVecFrom)
1467 : UG_THROW("GMG::copy_noghost_to_ghost: Should not be called on equal GF.");
1468 :
1469 : if(spVecTo->dd() == spVecFrom->dd())
1470 : {
1471 : for(size_t i = 0; i < spVecTo->size(); ++i)
1472 : (*spVecTo)[i] = (*spVecFrom)[i];
1473 : }
1474 : else
1475 : {
1476 : UG_ASSERT(vMapPatchToGlobal.size() == spVecFrom->size(),
1477 : "Mapping domain ("<<vMapPatchToGlobal.size()<<") != "
1478 : "From-Vec-Size ("<<spVecFrom->size()<<")");
1479 :
1480 : for(size_t i = 0; i < vMapPatchToGlobal.size(); ++i)
1481 : (*spVecTo)[ vMapPatchToGlobal[i] ] = (*spVecFrom)[i];
1482 : }
1483 : #ifdef UG_PARALLEL
1484 : spVecTo->set_storage_type(spVecFrom->get_storage_mask());
1485 : #endif
1486 : }
1487 :
1488 : template <typename TDomain, typename TAlgebra>
1489 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1490 : copy_noghost_to_ghost(SmartPtr<matrix_type> spMatTo,
1491 : ConstSmartPtr<matrix_type> spMatFrom,
1492 : const std::vector<size_t>& vMapPatchToGlobal)
1493 : {
1494 : GMG_PROFILE_FUNC();
1495 : UG_ASSERT(vMapPatchToGlobal.size() == spMatFrom->num_rows(),
1496 : "Mapping domain ("<<vMapPatchToGlobal.size()<<") != "
1497 : "From-Mat-Num-Rows ("<<spMatFrom->num_rows()<<")");
1498 : UG_ASSERT(vMapPatchToGlobal.size() == spMatFrom->num_cols(),
1499 : "Mapping domain ("<<vMapPatchToGlobal.size()<<") != "
1500 : "From-Mat-Num-Cols ("<<spMatFrom->num_cols()<<")");
1501 :
1502 : for(size_t noghostFrom = 0; noghostFrom < vMapPatchToGlobal.size(); ++noghostFrom)
1503 : {
1504 : typedef typename matrix_type::const_row_iterator row_iter;
1505 : row_iter conn = spMatFrom->begin_row(noghostFrom);
1506 : row_iter connEnd = spMatFrom->end_row(noghostFrom);
1507 : for(; conn != connEnd; ++conn)
1508 : {
1509 : const typename matrix_type::value_type& block = conn.value();
1510 : size_t noghostTo = conn.index();
1511 :
1512 : (*spMatTo)(vMapPatchToGlobal[noghostFrom], vMapPatchToGlobal[noghostTo])
1513 : = block;
1514 : }
1515 : }
1516 :
1517 : #ifdef UG_PARALLEL
1518 : spMatTo->set_storage_type(spMatFrom->get_storage_mask());
1519 : #endif
1520 : }
1521 :
1522 :
1523 : ////////////////////////////////////////////////////////////////////////////////
1524 : // Init Level Data
1525 : ////////////////////////////////////////////////////////////////////////////////
1526 :
1527 : template <typename TDomain, typename TAlgebra>
1528 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1529 : init_level_memory(int baseLev, int topLev)
1530 : {
1531 : GMG_PROFILE_FUNC();
1532 :
1533 0 : m_vLevData.resize(0);
1534 0 : m_vLevData.resize(topLev+1);
1535 :
1536 0 : for(int lev = baseLev; lev <= topLev; ++lev)
1537 : {
1538 0 : m_vLevData[lev] = SmartPtr<LevData>(new LevData);
1539 : LevData& ld = *m_vLevData[lev];
1540 :
1541 0 : GridLevel gl = GridLevel(lev, m_GridLevelType, false);
1542 0 : ld.sc = SmartPtr<GF>(new GF(m_spApproxSpace, gl, false));
1543 0 : ld.sd = SmartPtr<GF>(new GF(m_spApproxSpace, gl, false));
1544 0 : ld.st = SmartPtr<GF>(new GF(m_spApproxSpace, gl, false));
1545 :
1546 : // TODO: all procs must call this, since a MPI-Group is created.
1547 : // Think about optimizing this
1548 : #ifdef UG_PARALLEL
1549 : GridLevel glGhosts = GridLevel(lev, m_GridLevelType, true);
1550 : ld.t = SmartPtr<GF>(new GF(m_spApproxSpace, glGhosts, false));
1551 : if( ld.t->layouts()->vertical_slave().empty() &&
1552 : ld.t->layouts()->vertical_master().empty())
1553 : #endif
1554 : {
1555 0 : ld.t = ld.st;
1556 : }
1557 :
1558 0 : ld.A = SmartPtr<MatrixOperator<matrix_type, vector_type> >(
1559 0 : new MatrixOperator<matrix_type, vector_type>);
1560 :
1561 0 : ld.PreSmoother = m_spPreSmootherPrototype->clone();
1562 0 : if(m_spPreSmootherPrototype == m_spPostSmootherPrototype)
1563 0 : ld.PostSmoother = ld.PreSmoother;
1564 : else
1565 0 : ld.PostSmoother = m_spPostSmootherPrototype->clone();
1566 :
1567 : // let the smoothers know the grid level they are on if they need to know
1568 0 : ILevelPreconditioner<TAlgebra>* lpre = dynamic_cast<ILevelPreconditioner<TAlgebra>*>(ld.PreSmoother.get());
1569 0 : ILevelPreconditioner<TAlgebra>* lpost = dynamic_cast<ILevelPreconditioner<TAlgebra>*>(ld.PostSmoother.get());
1570 0 : if (lpre) lpre->set_grid_level(gl);
1571 0 : if (lpost) lpost->set_grid_level(gl);
1572 :
1573 0 : ld.Projection = m_spProjectionPrototype->clone();
1574 :
1575 0 : ld.Prolongation = m_spProlongationPrototype->clone();
1576 0 : if(m_spProlongationPrototype == m_spRestrictionPrototype)
1577 0 : ld.Restriction = ld.Prolongation;
1578 : else
1579 0 : ld.Restriction = m_spRestrictionPrototype->clone();
1580 :
1581 0 : init_noghost_to_ghost_mapping(lev);
1582 : }
1583 :
1584 : bool bHasVertMaster = false;
1585 : bool bHasVertSlave = false;
1586 : bool bHasHorrMaster = false;
1587 : bool bHasHorrSlave = false;
1588 : #ifdef UG_PARALLEL
1589 : LevData& ld = *m_vLevData[baseLev];
1590 : if( !ld.t->layouts()->vertical_master().empty()) bHasVertMaster = true;
1591 : if( !ld.t->layouts()->vertical_slave().empty()) bHasVertSlave = true;
1592 : if( !ld.t->layouts()->master().empty()) bHasHorrMaster = true;
1593 : if( !ld.t->layouts()->slave().empty()) bHasHorrSlave = true;
1594 : #endif
1595 : bool bHasVertConn = bHasVertMaster || bHasVertSlave;
1596 : bool bHasHorrConn = bHasHorrMaster || bHasHorrSlave;
1597 :
1598 : m_bGatheredBaseUsed = m_bGatheredBaseIfAmbiguous;
1599 :
1600 : // if no vert-interfaces are present (especially in serial or on a proc that
1601 : // does not have parts of the base level), we cannot gather. In this case we
1602 : // overrule the choice of the user
1603 : // Note: levels not containing any dof, are skipped from computation anyway
1604 0 : if(!bHasVertConn) m_bGatheredBaseUsed = false;
1605 :
1606 : // check if parallel solver is available, if not, try to use gathered
1607 : if(!m_bGatheredBaseUsed
1608 : && bHasHorrConn
1609 : && !m_spBaseSolver->supports_parallel())
1610 : {
1611 : if(!bHasVertConn)
1612 : UG_THROW("GMG: base level distributed in parallel, without possibility"
1613 : " to gather on one process, but chosen base solver is not"
1614 : " parallel. Choose a parallel base solver.")
1615 :
1616 : m_bGatheredBaseUsed = true;
1617 : }
1618 :
1619 : // check if gathering base solver possible: If some horizontal layouts are
1620 : // given, we know, that still the grid is distributed. But, if no
1621 : // vertical layouts are present in addition, we can not gather the vectors
1622 : // to on proc.
1623 : if(m_bGatheredBaseUsed){
1624 : if(bHasVertMaster && bHasVertSlave)
1625 : UG_THROW("GMG: Gathered base solver expects one proc to have all "
1626 : "v-masters and no v-slaves. Use gmg:set_gathered_base_solver_if_ambiguous(false)"
1627 : " to avoid this error.");
1628 :
1629 : if(!bHasVertConn && bHasHorrConn){
1630 : UG_THROW("GMG: Base level distributed among processes and no possibility"
1631 : " of gathering (vert. interfaces) present, but a gathering"
1632 : " base solving is required. Use gmg:set_gathered_base_solver_if_ambiguous(false)"
1633 : " to avoid this error.");
1634 : }
1635 :
1636 : #ifdef UG_PARALLEL
1637 : if(bHasVertSlave && (ld.t->layouts()->vertical_slave().num_interfaces() != 1))
1638 : UG_THROW("GMG: Gathered base solver expects a v-slave containing "
1639 : "process to send all its values to exactly on master. But "
1640 : "more then one master detected. Use gmg:set_gathered_base_solver_if_ambiguous(false)"
1641 : " to avoid this error.");
1642 :
1643 : if(bHasVertSlave && (ld.t->layouts()->vertical_slave().num_interface_elements() != ld.t->size()))
1644 : UG_THROW("GMG: Gathered base solver expects that all indices "
1645 : "are sent to exactly one master. But not all indices are "
1646 : "v-slaves. Use gmg:set_gathered_base_solver_if_ambiguous(false)"
1647 : " to avoid this error.");
1648 : #endif
1649 : }
1650 :
1651 : if(m_bGatheredBaseUsed && gathered_base_master()){
1652 : // note: we can safely call this here, since the dd has already been
1653 : // created in the level loop
1654 : GridLevel glGhosts = GridLevel(baseLev, m_GridLevelType, true);
1655 : spGatheredBaseCorr = SmartPtr<GF>(new GF(m_spApproxSpace, glGhosts, false));
1656 :
1657 : spGatheredBaseMat = SmartPtr<MatrixOperator<matrix_type, vector_type> >(
1658 : new MatrixOperator<matrix_type, vector_type>);
1659 : } else {
1660 0 : spGatheredBaseCorr = SPNULL;
1661 0 : spGatheredBaseMat = SPNULL;
1662 : }
1663 0 : }
1664 :
1665 : template <typename TDomain, typename TAlgebra>
1666 0 : bool AssembledMultiGridCycle<TDomain, TAlgebra>::
1667 : gathered_base_master() const
1668 : {
1669 0 : if(!m_bGatheredBaseUsed)
1670 0 : UG_THROW("GMG: Should only be called when gather-base solver used.")
1671 :
1672 : #ifdef UG_PARALLEL
1673 : if(!m_vLevData[m_baseLev]->t->layouts()->vertical_master().empty())
1674 : return true;
1675 : #endif
1676 :
1677 0 : return false;
1678 : }
1679 :
1680 :
1681 : ////////////////////////////////////////////////////////////////////////////////
1682 : // Cycle - Methods
1683 : ////////////////////////////////////////////////////////////////////////////////
1684 :
1685 : template <typename TDomain, typename TAlgebra>
1686 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1687 : presmooth_and_restriction(int lev)
1688 : {
1689 : GMG_PROFILE_FUNC();
1690 0 : LevData& lf = *m_vLevData[lev];
1691 0 : LevData& lc = *m_vLevData[lev-1];
1692 :
1693 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - presmooth on level "<<lev<<"\n");
1694 0 : log_debug_data(lev, lf.n_restr_calls, "BeforePreSmooth");
1695 : mg_stats_defect(*lf.sd, lev, mg_stats_type::BEFORE_PRE_SMOOTH);
1696 :
1697 : // PRESMOOTH
1698 : GMG_PROFILE_BEGIN(GMG_PreSmooth);
1699 : try{
1700 : // smooth several times
1701 0 : for(int nu = 0; nu < m_numPreSmooth; ++nu)
1702 : {
1703 : // a) Compute t = B*d with some iterator B
1704 0 : GridLevel gw_gl; enter_debug_writer_section(gw_gl, "PreSmoother", lev, lf.n_pre_calls, nu);
1705 0 : if(!lf.PreSmoother->apply(*lf.st, *lf.sd))
1706 0 : UG_THROW("GMG: Smoothing step "<<nu+1<<" on level "<<lev<<" failed.");
1707 0 : leave_debug_writer_section(gw_gl);
1708 :
1709 : // b) handle patch rim.
1710 0 : if(!m_bSmoothOnSurfaceRim){
1711 : const std::vector<size_t>& vShadowing = lf.vShadowing;
1712 0 : for(size_t i = 0; i < vShadowing.size(); ++i)
1713 0 : (*lf.st)[ vShadowing[i] ] = 0.0;
1714 : } else {
1715 0 : if(lev > m_LocalFullRefLevel)
1716 0 : lc.RimCpl_Coarse_Fine.matmul_minus(*lc.sd, *lf.st);
1717 : // make sure each lc.sd has the same PST
1718 : // (if m_LocalFullRefLevel not equal on every proc)
1719 : #ifdef UG_PARALLEL
1720 : else
1721 : lc.sd->set_storage_type(PST_ADDITIVE);
1722 : #endif
1723 : }
1724 :
1725 : // c) update the defect with this correction ...
1726 0 : lf.A->apply_sub(*lf.sd, *lf.st);
1727 :
1728 : // d) ... and add the correction to the overall correction
1729 0 : if(nu < m_numPreSmooth-1)
1730 0 : (*lf.sc) += (*lf.st);
1731 : }
1732 : }
1733 0 : UG_CATCH_THROW("GMG: Pre-Smoothing on level "<<lev<<" failed.");
1734 0 : lf.n_pre_calls++;
1735 : GMG_PROFILE_END();
1736 :
1737 0 : log_debug_data(lev, lf.n_restr_calls, "AfterPreSmooth_BeforeCom");
1738 : mg_stats_defect(*lf.sd, lev, mg_stats_type::AFTER_PRE_SMOOTH);
1739 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - presmooth on level "<<lev<<"\n");
1740 :
1741 : // PARALLEL CASE:
1742 : SmartPtr<GF> spD = lf.sd;
1743 : #ifdef UG_PARALLEL
1744 : ComPol_VecAddSetZero<vector_type> cpVecAdd(lf.t.get());
1745 : if( !lf.t->layouts()->vertical_slave().empty() ||
1746 : !lf.t->layouts()->vertical_master().empty())
1747 : {
1748 : // v-slaves indicate, that part of the parent are stored on a different proc.
1749 : // Thus the values of the defect must be transfered to the v-masters and will
1750 : // have their parents on the proc of the master and must be restricted on
1751 : // that process. Thus we have to transfer the defect (currently stored
1752 : // additive in the noghost-vectors) to the v-masters. And have to make sure
1753 : // that d is additive after this operation. We do that by ensuring that it
1754 : // is additive-unique regarding v-masters and v-slaves (v-slaves will be set to 0).
1755 : // We use a temporary vector including ghost, such that the no-ghost defect
1756 : // remains valid and can be used when the cycle comes back to this level.
1757 :
1758 : GMG_PROFILE_BEGIN(GMG_Restrict_CopyNoghostToGhost);
1759 : SetLayoutValues(&(*lf.t), lf.t->layouts()->vertical_master(), 0);
1760 : copy_noghost_to_ghost(lf.t, lf.sd, lf.vMapPatchToGlobal);
1761 : GMG_PROFILE_END();
1762 :
1763 : GMG_PROFILE_BEGIN(GMG_Restrict_CollectAndSend);
1764 : m_Com.send_data(lf.t->layouts()->vertical_slave(), cpVecAdd);
1765 : m_Com.receive_data(lf.t->layouts()->vertical_master(), cpVecAdd);
1766 : m_Com.communicate_and_resume();
1767 : GMG_PROFILE_END();
1768 : if(!m_bCommCompOverlap){
1769 : GMG_PROFILE_BEGIN(GMG_Restrict_RecieveAndExtract_NoOverlap);
1770 : m_Com.wait();
1771 : GMG_PROFILE_END();
1772 : }
1773 :
1774 : spD = lf.t;
1775 : }
1776 : #endif
1777 :
1778 : GMG_PROFILE_BEGIN(GMG_Restrict_OverlapedComputation);
1779 : // reset corr on coarse level
1780 : lc.sc->set(0.0);
1781 :
1782 : // update last correction
1783 0 : if(m_numPreSmooth > 0)
1784 0 : (*lf.sc) += (*lf.st);
1785 : GMG_PROFILE_END();
1786 :
1787 : #ifdef UG_PARALLEL
1788 : if(m_bCommCompOverlap){
1789 : GMG_PROFILE_BEGIN(GMG_Restrict_RecieveAndExtract_WithOverlap);
1790 : m_Com.wait();
1791 : GMG_PROFILE_END();
1792 : }
1793 : #endif
1794 :
1795 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - restriction on level "<<lev<<"\n");
1796 0 : log_debug_data(lev, lf.n_restr_calls, "BeforeRestrict");
1797 :
1798 : // RESTRICTION:
1799 0 : GridLevel gw_gl; enter_debug_writer_section(gw_gl, "Restriction", lev, lf.n_restr_calls);
1800 : GMG_PROFILE_BEGIN(GMG_Restrict_Transfer);
1801 : try{
1802 0 : lf.Restriction->do_restrict(*lc.sd, *spD);
1803 : }
1804 0 : UG_CATCH_THROW("GMG: Restriction from lev "<<lev<<" to "<<lev-1<<" failed.");
1805 : GMG_PROFILE_END();
1806 :
1807 : // apply post processes
1808 : GMG_PROFILE_BEGIN(GMG_Restrict_PostProcess);
1809 0 : for(size_t i = 0; i < m_vspRestrictionPostProcess.size(); ++i)
1810 0 : m_vspRestrictionPostProcess[i]->post_process(lc.sd);
1811 : GMG_PROFILE_END();
1812 0 : leave_debug_writer_section(gw_gl);
1813 0 : lf.n_restr_calls++;
1814 :
1815 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - restriction on level "<<lev<<"\n");
1816 0 : }
1817 :
1818 : template <typename TDomain, typename TAlgebra>
1819 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1820 : prolongation_and_postsmooth(int lev)
1821 : {
1822 : GMG_PROFILE_FUNC();
1823 0 : LevData& lf = *m_vLevData[lev];
1824 0 : LevData& lc = *m_vLevData[lev-1];
1825 :
1826 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - prolongation on level "<<lev<<"\n");
1827 0 : log_debug_data(lev, lf.n_prolong_calls, "BeforeProlong");
1828 :
1829 : // ADAPTIVE CASE:
1830 0 : if(lev > m_LocalFullRefLevel)
1831 : {
1832 : // write computed correction to surface
1833 : GMG_PROFILE_BEGIN(GMG_AddCorrectionToSurface);
1834 : try{
1835 : const std::vector<SurfLevelMap>& vMap = lc.vSurfLevelMap;
1836 0 : for(size_t i = 0; i < vMap.size(); ++i){
1837 0 : (*m_pC)[vMap[i].surfIndex] += (*lc.sc)[vMap[i].levIndex];
1838 : }
1839 : }
1840 : UG_CATCH_THROW("GMG::lmgc: Cannot add to surface.");
1841 : GMG_PROFILE_END();
1842 :
1843 : // in the adaptive case there is a small part of the coarse coupling that
1844 : // has not been used to update the defect. In order to ensure, that the
1845 : // defect on this level still corresponds to the updated defect, we need
1846 : // to add it here.
1847 : GMG_PROFILE_BEGIN(GMG_UpdateRimDefect);
1848 0 : lc.RimCpl_Fine_Coarse.matmul_minus(*lf.sd, *lc.sc);
1849 : GMG_PROFILE_END();
1850 : }
1851 0 : log_debug_data(lev, lf.n_prolong_calls, "AfterCoarseGridDefect");
1852 :
1853 : // PROLONGATE:
1854 0 : GridLevel gw_gl; enter_debug_writer_section(gw_gl, "Prolongation", lev, lf.n_prolong_calls);
1855 : SmartPtr<GF> spT = lf.st;
1856 : #ifdef UG_PARALLEL
1857 : if( !lf.t->layouts()->vertical_slave().empty() ||
1858 : !lf.t->layouts()->vertical_master().empty())
1859 : {
1860 : spT = lf.t;
1861 : }
1862 : #endif
1863 : GMG_PROFILE_BEGIN(GMG_Prolongate_Transfer);
1864 : try{
1865 0 : lf.Prolongation->prolongate(*spT, *lc.sc);
1866 : }
1867 0 : UG_CATCH_THROW("GMG: Prolongation from lev "<<lev-1<<" to "<<lev<<" failed.");
1868 : GMG_PROFILE_END();
1869 :
1870 : // PARALLEL CASE:
1871 : #ifdef UG_PARALLEL
1872 : if( !lf.t->layouts()->vertical_slave().empty() ||
1873 : !lf.t->layouts()->vertical_master().empty())
1874 : {
1875 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - copy_to_vertical_slaves\n");
1876 :
1877 : // Receive values of correction for vertical slaves:
1878 : // If there are vertical slaves/masters on the coarser level, we now copy
1879 : // the correction values from the v-master DoFs to the v-slave DoFs.
1880 : GMG_PROFILE_BEGIN(GMG_Prolongate_SendAndRecieve);
1881 : ComPol_VecCopy<vector_type> cpVecCopy(lf.t.get());
1882 : m_Com.receive_data(lf.t->layouts()->vertical_slave(), cpVecCopy);
1883 : m_Com.send_data(lf.t->layouts()->vertical_master(), cpVecCopy);
1884 : m_Com.communicate();
1885 : GMG_PROFILE_END();
1886 :
1887 : GMG_PROFILE_BEGIN(GMG_Prolongate_GhostToNoghost);
1888 : copy_ghost_to_noghost(lf.st, lf.t, lf.vMapPatchToGlobal);
1889 : GMG_PROFILE_END();
1890 :
1891 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - copy_to_vertical_slaves\n");
1892 : }
1893 : #endif
1894 :
1895 : // apply post processes
1896 : GMG_PROFILE_BEGIN(GMG_Prolongate_PostProcess);
1897 0 : for(size_t i = 0; i < m_vspProlongationPostProcess.size(); ++i)
1898 0 : m_vspProlongationPostProcess[i]->post_process(lf.st);
1899 : GMG_PROFILE_END();
1900 :
1901 0 : leave_debug_writer_section(gw_gl);
1902 :
1903 : // add coarse grid correction: c := c + t
1904 : GMG_PROFILE_BEGIN(GMG_AddCoarseGridCorrection);
1905 0 : (*lf.sc) += (*lf.st);
1906 : GMG_PROFILE_END();
1907 :
1908 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - prolongation on level "<<lev<<"\n");
1909 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - postsmooth on level "<<lev<<"\n");
1910 : // log_debug_data(lev, lf.n_prolong_calls, "BeforePostSmooth");
1911 :
1912 : // POST-SMOOTH:
1913 : GMG_PROFILE_BEGIN(GMG_PostSmooth);
1914 : try{
1915 : // smooth several times
1916 0 : for(int nu = 0; nu < m_numPostSmooth; ++nu)
1917 : {
1918 : // update defect
1919 0 : lf.A->apply_sub(*lf.sd, *lf.st);
1920 :
1921 0 : if(nu == 0){
1922 0 : log_debug_data(lev, lf.n_prolong_calls, "BeforePostSmooth");
1923 : mg_stats_defect(*lf.sd, lev, mg_stats_type::BEFORE_POST_SMOOTH);
1924 : }
1925 :
1926 : // a) Compute t = B*d with some iterator B
1927 0 : GridLevel gw_gl; enter_debug_writer_section(gw_gl, "PostSmoother", lev, lf.n_post_calls, nu);
1928 0 : if(!lf.PostSmoother->apply(*lf.st, *lf.sd))
1929 0 : UG_THROW("GMG: Smoothing step "<<nu+1<<" on level "<<lev<<" failed.");
1930 0 : leave_debug_writer_section(gw_gl);
1931 :
1932 : // b) handle patch rim
1933 0 : if(!m_bSmoothOnSurfaceRim){
1934 : const std::vector<size_t>& vShadowing = lf.vShadowing;
1935 0 : for(size_t i = 0; i < vShadowing.size(); ++i)
1936 0 : (*lf.st)[ vShadowing[i] ] = 0.0;
1937 : } else {
1938 0 : if(lev > m_LocalFullRefLevel)
1939 0 : lc.RimCpl_Coarse_Fine.matmul_minus(*lc.sd, *lf.st);
1940 : }
1941 :
1942 : // d) ... and add the correction to the overall correction
1943 0 : (*lf.sc) += (*lf.st);
1944 : }
1945 : }
1946 0 : UG_CATCH_THROW("GMG: Post-Smoothing on level "<<lev<<" failed. ")
1947 : GMG_PROFILE_END();
1948 0 : lf.n_post_calls++;
1949 :
1950 : // update the defect if required. In full-ref case, the defect is not needed
1951 : // anymore, since it will be restricted anyway. For adaptive case, however,
1952 : // we must keep track of the defect on the surface.
1953 : // We also need it if we want to write stats or debug data
1954 0 : if(lev >= m_LocalFullRefLevel || m_mgstats.valid() || m_spDebugWriter.valid()){
1955 : GMG_PROFILE_BEGIN(GMG_UpdateDefectAfterPostSmooth);
1956 0 : lf.A->apply_sub(*lf.sd, *lf.st);
1957 : GMG_PROFILE_END();
1958 : }
1959 :
1960 0 : log_debug_data(lev, lf.n_prolong_calls, "AfterPostSmooth");
1961 : mg_stats_defect(*lf.sd, lev, mg_stats_type::AFTER_POST_SMOOTH);
1962 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - postsmooth on level "<<lev<<"\n");
1963 0 : lf.n_prolong_calls++;
1964 0 : }
1965 :
1966 : // performs the base solving
1967 : template <typename TDomain, typename TAlgebra>
1968 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
1969 : base_solve(int lev)
1970 : {
1971 : GMG_PROFILE_FUNC();
1972 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - base_solve on level "<<lev<<"\n");
1973 :
1974 : try{
1975 0 : LevData& ld = *m_vLevData[lev];
1976 :
1977 0 : log_debug_data(lev, ld.n_base_calls, "BeforeBaseSolver");
1978 :
1979 : // SOLVE BASE PROBLEM
1980 : // Here we distinguish two possibilities:
1981 : // a) The coarse grid problem is solved in parallel, using a parallel solver
1982 : // b) First all vectors are gathered to one process, solved on this one
1983 : // process and then again distributed
1984 :
1985 : // CASE a): We solve the problem in parallel (or normally for sequential code)
1986 0 : if(!m_bGatheredBaseUsed)
1987 : {
1988 : UG_DLOG(LIB_DISC_MULTIGRID, 3, " GMG: entering distributed basesolver branch.\n");
1989 :
1990 : GMG_PROFILE_BEGIN(GMG_BaseSolver_Apply);
1991 0 : GridLevel gw_gl; enter_debug_writer_section(gw_gl, "GatheredBaseSolver", lev, ld.n_base_calls);
1992 : try{
1993 0 : if(!m_spBaseSolver->apply(*ld.sc, *ld.sd))
1994 0 : UG_THROW("GMG::lmgc: Base solver on base level "<<lev<<" failed.");
1995 : }
1996 0 : UG_CATCH_THROW("GMG: BaseSolver::apply failed. (case: a).")
1997 0 : leave_debug_writer_section(gw_gl);
1998 : GMG_PROFILE_END();
1999 :
2000 : UG_DLOG(LIB_DISC_MULTIGRID, 3, " GMG: exiting distributed basesolver branch.\n");
2001 : }
2002 :
2003 : // CASE b): We gather the processes, solve on one proc and distribute again
2004 : else
2005 : {
2006 : UG_DLOG(LIB_DISC_MULTIGRID, 3, " GMG: entering gathered basesolver branch.\n");
2007 :
2008 : // gather the defect
2009 : #ifdef UG_PARALLEL
2010 : if( !ld.t->layouts()->vertical_slave().empty() ||
2011 : !ld.t->layouts()->vertical_master().empty())
2012 : {
2013 : GMG_PROFILE_BEGIN(GMG_GatheredBaseSolver_Defect_CopyNoghostToGhost);
2014 : SetLayoutValues(&(*ld.t), ld.t->layouts()->vertical_master(), 0);
2015 : copy_noghost_to_ghost(ld.t, ld.sd, ld.vMapPatchToGlobal);
2016 : GMG_PROFILE_END();
2017 :
2018 : GMG_PROFILE_BEGIN(GMG_GatheredBaseSolver_Defect_SendAndRecieve);
2019 : ComPol_VecAddSetZero<vector_type> cpVecAdd(ld.t.get());
2020 : m_Com.send_data(ld.t->layouts()->vertical_slave(), cpVecAdd);
2021 : m_Com.receive_data(ld.t->layouts()->vertical_master(), cpVecAdd);
2022 : m_Com.communicate();
2023 : GMG_PROFILE_END();
2024 : }
2025 : #endif
2026 :
2027 : // only solve on gathering master
2028 0 : if(gathered_base_master())
2029 : {
2030 : UG_DLOG(LIB_DISC_MULTIGRID, 3, " GMG: Start serial base solver.\n");
2031 : GMG_PROFILE_BEGIN(GMG_GatheredBaseSolver_Apply);
2032 :
2033 : // Reset correction
2034 : spGatheredBaseCorr->set(0.0);
2035 :
2036 : // compute coarse correction
2037 0 : GridLevel gw_gl; enter_debug_writer_section(gw_gl, "BaseSolver", lev, ld.n_base_calls);
2038 : try{
2039 0 : if(!m_spBaseSolver->apply(*spGatheredBaseCorr, *ld.t))
2040 0 : UG_THROW("GMG::lmgc: Base solver on base level "<<lev<<" failed.");
2041 : }
2042 0 : UG_CATCH_THROW("GMG: BaseSolver::apply failed. (case: b).")
2043 0 : leave_debug_writer_section(gw_gl);
2044 :
2045 : GMG_PROFILE_END();
2046 : UG_DLOG(LIB_DISC_MULTIGRID, 3, " GMG gathered base solver done.\n");
2047 : }
2048 :
2049 : // broadcast the correction
2050 : #ifdef UG_PARALLEL
2051 : SmartPtr<GF> spC = ld.sc;
2052 : if(gathered_base_master()) spC = spGatheredBaseCorr;
2053 :
2054 : GMG_PROFILE_BEGIN(GMG_GatheredBaseSolver_Correction_SendAndRecieve);
2055 : ComPol_VecCopy<vector_type> cpVecCopy(spC.get());
2056 : m_Com.send_data(spC->layouts()->vertical_master(), cpVecCopy);
2057 : m_Com.receive_data(spC->layouts()->vertical_slave(), cpVecCopy);
2058 : m_Com.communicate();
2059 : GMG_PROFILE_END();
2060 : if(gathered_base_master()){
2061 : GMG_PROFILE_BEGIN(GMG_GatheredBaseSolver_Correction_CopyGhostToNoghost);
2062 : spGatheredBaseCorr->set_storage_type(PST_CONSISTENT);
2063 : copy_ghost_to_noghost(ld.sc, spGatheredBaseCorr, ld.vMapPatchToGlobal);
2064 : GMG_PROFILE_END();
2065 : } else {
2066 : ld.sc->set_storage_type(PST_CONSISTENT);
2067 : }
2068 : #endif
2069 : UG_DLOG(LIB_DISC_MULTIGRID, 3, " GMG: exiting gathered basesolver branch.\n");
2070 : }
2071 :
2072 : // update the defect if required. In full-ref case, the defect is not needed
2073 : // anymore, since it will be restricted anyway. For adaptive case, however,
2074 : // we must keep track of the defect on the surface
2075 0 : if(lev >= m_LocalFullRefLevel){
2076 : GMG_PROFILE_BEGIN(GMG_UpdateDefectAfterBaseSolver);
2077 0 : ld.A->apply_sub(*ld.sd, *ld.sc);
2078 : GMG_PROFILE_END();
2079 : }
2080 :
2081 0 : log_debug_data(lev, ld.n_base_calls, "AfterBaseSolver");
2082 0 : ld.n_base_calls++;
2083 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - base_solve on level "<<lev<<"\n");
2084 : }
2085 0 : UG_CATCH_THROW("GMG: Base Solver failed.");
2086 0 : }
2087 :
2088 : // performs a multi grid cycle on the level
2089 : template <typename TDomain, typename TAlgebra>
2090 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
2091 : lmgc(int lev, int cycleType)
2092 : {
2093 : GMG_PROFILE_FUNC();
2094 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-start - lmgc on level "<<lev<<"\n");
2095 :
2096 : // check if already base level
2097 0 : if(lev == m_baseLev) {
2098 0 : base_solve(m_topLev);
2099 0 : return;
2100 0 : } else if(lev < m_baseLev){
2101 0 : UG_THROW("GMG::lmgc: call lmgc only for lev > baseLev.");
2102 : }
2103 :
2104 : // presmooth and restrict
2105 : try{
2106 0 : presmooth_and_restriction(lev);
2107 : }
2108 0 : UG_CATCH_THROW("GMG::lmgc: presmooth-restriction failed on level "<<lev);
2109 :
2110 : try{
2111 : // on base level, invert only once
2112 0 : if(lev-1 == m_baseLev) {
2113 0 : base_solve(lev-1);
2114 : }
2115 : // F-cycle: one F-cycle on lev-1, followed by a V-cycle
2116 0 : else if(cycleType == _F_){
2117 0 : lmgc(lev-1, _F_);
2118 0 : lmgc(lev-1, _V_);
2119 : }
2120 : // V- or W- cycle, or gamma > 2
2121 : else {
2122 0 : for(int i = 0; i < cycleType; ++i)
2123 0 : lmgc(lev-1, cycleType);
2124 : }
2125 : }
2126 0 : UG_CATCH_THROW("GMG::lmgc: Linear multi-grid cycle on level "<<lev-1<<
2127 : " failed. (BaseLev="<<m_baseLev<<", TopLev="<<m_topLev<<").");
2128 :
2129 : // prolongate, add coarse-grid correction and postsmooth
2130 : try{
2131 0 : prolongation_and_postsmooth(lev);
2132 : }
2133 0 : UG_CATCH_THROW("GMG::lmgc: prolongation-postsmooth failed on level "<<lev);
2134 :
2135 : UG_DLOG(LIB_DISC_MULTIGRID, 3, "gmg-stop - lmgc on level "<<lev<<"\n");
2136 : }
2137 :
2138 : ////////////////////////////////////////////////////////////////////////////////
2139 : // Debug Methods
2140 : ////////////////////////////////////////////////////////////////////////////////
2141 :
2142 : template <typename TDomain, typename TAlgebra>
2143 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
2144 : write_debug(ConstSmartPtr<GF> spGF, std::string name, int cycleNo)
2145 : {
2146 0 : write_debug(*spGF, name, cycleNo);
2147 0 : }
2148 :
2149 : template <typename TDomain, typename TAlgebra>
2150 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
2151 : write_debug(const GF& rGF, std::string name, int cycleNo)
2152 : {
2153 : PROFILE_FUNC_GROUP("debug");
2154 :
2155 0 : if(m_spDebugWriter.invalid()) return;
2156 :
2157 : // build name
2158 0 : GridLevel gl = rGF.grid_level();
2159 0 : std::stringstream ss;
2160 0 : ss << "GMG_" << name << GridLevelAppendix(gl);
2161 0 : ss << "_i" << std::setfill('0') << std::setw(3) << m_dbgIterCnt;
2162 0 : if (cycleNo >= 0) ss << "_cycle" << std::setfill('0') << std::setw(3) << cycleNo;
2163 0 : ss << ".vec";
2164 :
2165 : // write
2166 0 : GridLevel currGL = m_spDebugWriter->grid_level();
2167 : m_spDebugWriter->set_grid_level(gl);
2168 0 : m_spDebugWriter->write_vector(rGF, ss.str().c_str());
2169 : m_spDebugWriter->set_grid_level(currGL);
2170 0 : }
2171 :
2172 : template <typename TDomain, typename TAlgebra>
2173 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
2174 : write_debug(const matrix_type& mat, std::string name, const GF& rTo, const GF& rFrom)
2175 : {
2176 0 : GridLevel glFrom = rFrom.grid_level();
2177 0 : GridLevel glTo = rTo.grid_level();
2178 0 : write_debug(mat, name, glTo, glFrom);
2179 0 : }
2180 :
2181 : template <typename TDomain, typename TAlgebra>
2182 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
2183 : write_debug(const matrix_type& mat, std::string name, const GridLevel& glTo, const GridLevel& glFrom)
2184 : {
2185 : PROFILE_FUNC_GROUP("debug");
2186 :
2187 0 : if(m_spDebugWriter.invalid()) return;
2188 :
2189 : // build name
2190 0 : std::stringstream ss;
2191 0 : ss << "GMG_" << name << GridLevelAppendix(glTo);
2192 0 : if(glFrom != glTo) ss << GridLevelAppendix(glFrom);
2193 0 : ss << ".mat";
2194 :
2195 : // write
2196 0 : GridLevel currGL = m_spDebugWriter->grid_level();
2197 : m_spDebugWriter->set_grid_levels(glFrom, glTo);
2198 0 : m_spDebugWriter->write_matrix(mat, ss.str().c_str());
2199 : m_spDebugWriter->set_grid_level(currGL);
2200 0 : }
2201 :
2202 : template <typename TDomain, typename TAlgebra>
2203 0 : inline void AssembledMultiGridCycle<TDomain, TAlgebra>::
2204 : enter_debug_writer_section(GridLevel& dw_orig_gl, const char * sec_name, int lev, int cycleNo, int callNo)
2205 : {
2206 : PROFILE_FUNC_GROUP("debug");
2207 :
2208 0 : if(m_spDebugWriter.invalid()) return;
2209 :
2210 0 : dw_orig_gl = m_spDebugWriter->grid_level();
2211 0 : m_spDebugWriter->set_grid_level(m_vLevData[lev]->sd->grid_level());
2212 :
2213 0 : std::stringstream ss;
2214 0 : ss << "GMG_" << sec_name;
2215 0 : ss << "_i" << std::setfill('0') << std::setw(3) << m_dbgIterCnt;
2216 0 : if (cycleNo >= 0) ss << "_cycle" << std::setfill('0') << std::setw(3) << cycleNo;
2217 0 : ss << "_l" << lev;
2218 0 : if (callNo >= 0) ss << "_call" << std::setfill('0') << std::setw(3) << callNo;
2219 0 : m_spDebugWriter->enter_section(ss.str().c_str());
2220 0 : }
2221 :
2222 : template <typename TDomain, typename TAlgebra>
2223 0 : inline void AssembledMultiGridCycle<TDomain, TAlgebra>::
2224 : leave_debug_writer_section(GridLevel& dw_orig_gl)
2225 : {
2226 0 : if(m_spDebugWriter.invalid()) return;
2227 : m_spDebugWriter->leave_section();
2228 : m_spDebugWriter->set_grid_level(dw_orig_gl);
2229 : }
2230 :
2231 : template <typename TDomain, typename TAlgebra>
2232 0 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
2233 : log_debug_data(int lvl, int cycleNo, std::string name)
2234 : {
2235 0 : if(m_spDebugWriter.valid()){
2236 0 : std::string defName("Def_"); defName.append(name);
2237 0 : std::string curName("Cor_"); curName.append(name);
2238 0 : write_debug(m_vLevData[lvl]->sd, defName, cycleNo);
2239 0 : write_debug(m_vLevData[lvl]->sc, curName, cycleNo);
2240 : }
2241 :
2242 : const bool bEnableSerialNorm = false;
2243 : const bool bEnableParallelNorm = false;
2244 :
2245 : if(!bEnableSerialNorm && !bEnableParallelNorm) return;
2246 :
2247 : std::string prefix;
2248 : if(lvl < (int)m_vLevData.size())
2249 : prefix.assign(2 + 2 * (m_vLevData.size() - lvl), ' ');
2250 : prefix.append(name).append(" on lev ").append(ToString(lvl)).append(": ");
2251 :
2252 : LevData& ld = *m_vLevData[lvl];
2253 : if(bEnableSerialNorm){
2254 : UG_LOG(prefix << "local sd norm: " << sqrt(VecProd(*ld.sd, *ld.sd)) << std::endl);
2255 : UG_LOG(prefix << "local sc norm: " << sqrt(VecProd(*ld.sc, *ld.sc)) << std::endl);
2256 : }
2257 : if(bEnableParallelNorm){
2258 : #ifdef UG_PARALLEL
2259 : uint oldStorageMask = ld.sd->get_storage_mask();
2260 : number norm = ld.sd->norm();
2261 : UG_LOG(prefix << "sd norm: " << norm << "\n");
2262 : if(oldStorageMask & PST_ADDITIVE)
2263 : ld.sd->change_storage_type(PST_ADDITIVE);
2264 : else if(oldStorageMask & PST_CONSISTENT)
2265 : ld.sd->change_storage_type(PST_CONSISTENT);
2266 :
2267 : oldStorageMask = ld.sc->get_storage_mask();
2268 : norm = ld.sc->norm();
2269 : UG_LOG(prefix << "sc norm: " << norm << "\n");
2270 : if(oldStorageMask & PST_ADDITIVE)
2271 : ld.sc->change_storage_type(PST_ADDITIVE);
2272 : else if(oldStorageMask & PST_CONSISTENT)
2273 : ld.sc->change_storage_type(PST_CONSISTENT);
2274 : /*
2275 : oldStorageMask = ld.st->get_storage_mask();
2276 : norm = ld.st->norm();
2277 : UG_LOG(prefix << "st norm: " << norm << "\n");
2278 : if(oldStorageMask & PST_ADDITIVE)
2279 : ld.st->change_storage_type(PST_ADDITIVE);
2280 : else if(oldStorageMask & PST_CONSISTENT)
2281 : ld.st->change_storage_type(PST_CONSISTENT);
2282 :
2283 : oldStorageMask = ld.t->get_storage_mask();
2284 : norm = ld.t->norm();
2285 : UG_LOG(prefix << " t norm: " << norm << "\n");
2286 : if(oldStorageMask & PST_ADDITIVE)
2287 : ld.t->change_storage_type(PST_ADDITIVE);
2288 : else if(oldStorageMask & PST_CONSISTENT)
2289 : ld.t->change_storage_type(PST_CONSISTENT);
2290 : */
2291 : #endif
2292 : }
2293 : }
2294 :
2295 : template <typename TDomain, typename TAlgebra>
2296 : void AssembledMultiGridCycle<TDomain, TAlgebra>::
2297 : mg_stats_defect(GF& gf, int lvl, typename mg_stats_type::Stage stage)
2298 : {
2299 0 : if(m_mgstats.valid())
2300 0 : m_mgstats->set_defect(gf, lvl, stage);
2301 : }
2302 :
2303 : template <typename TDomain, typename TAlgebra>
2304 : std::string
2305 0 : AssembledMultiGridCycle<TDomain, TAlgebra>::
2306 : config_string() const
2307 : {
2308 0 : std::stringstream ss;
2309 0 : ss << "GeometricMultigrid (";
2310 0 : if(m_cycleType == _V_) ss << "V-Cycle";
2311 0 : else if(m_cycleType == _W_) ss << "W-Cycle";
2312 0 : else if(m_cycleType == _F_) ss << "F-Cycle";
2313 0 : else ss << " " << m_cycleType << "-Cycle";
2314 0 : ss << ")\n";
2315 :
2316 0 : if(m_spPreSmootherPrototype==m_spPostSmootherPrototype)
2317 0 : ss << " Smoother (" << m_numPreSmooth << "x pre, " << m_numPostSmooth << "x post): "
2318 0 : << ConfigShift(m_spPreSmootherPrototype->config_string());
2319 : else
2320 : {
2321 0 : ss << " Presmoother (" << m_numPreSmooth << "x): " << ConfigShift(m_spPreSmootherPrototype->config_string());
2322 0 : ss << " Postsmoother ( " << m_numPostSmooth << "x): " << ConfigShift(m_spPostSmootherPrototype->config_string());
2323 : }
2324 0 : ss << "\n";
2325 0 : ss << " Basesolver ( Baselevel = " << m_baseLev << ", gathered base = " << (m_bGatheredBaseIfAmbiguous ? "true" : "false") << "): ";
2326 0 : ss << ConfigShift(m_spBaseSolver->config_string());
2327 0 : return ss.str();
2328 :
2329 0 : }
2330 :
2331 : } // namespace ug
2332 :
2333 :
2334 : #endif /* __H__UG__LIB_DISC__MULTI_GRID_SOLVER__MG_SOLVER_IMPL__ */
|