Line data Source code
1 : /*
2 : * Copyright (c) 2012-2015: G-CSC, Goethe University Frankfurt
3 : * Author: Andreas Vogel
4 : *
5 : * This file is part of UG4.
6 : *
7 : * UG4 is free software: you can redistribute it and/or modify it under the
8 : * terms of the GNU Lesser General Public License version 3 (as published by the
9 : * Free Software Foundation) with the following additional attribution
10 : * requirements (according to LGPL/GPL v3 §7):
11 : *
12 : * (1) The following notice must be displayed in the Appropriate Legal Notices
13 : * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14 : *
15 : * (2) The following notice must be displayed at a prominent place in the
16 : * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17 : *
18 : * (3) The following bibliography is recommended for citation and must be
19 : * preserved in all covered files:
20 : * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21 : * parallel geometric multigrid solver on hierarchically distributed grids.
22 : * Computing and visualization in science 16, 4 (2013), 151-164"
23 : * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24 : * flexible software system for simulating pde based models on high performance
25 : * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26 : *
27 : * This program is distributed in the hope that it will be useful,
28 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 : * GNU Lesser General Public License for more details.
31 : */
32 :
33 : #ifndef __H__UG__LIB_DISC__OPERATOR__LINEAR_OPERATOR__STD_TRANSFER_IMPL__
34 : #define __H__UG__LIB_DISC__OPERATOR__LINEAR_OPERATOR__STD_TRANSFER_IMPL__
35 :
36 : #include "std_transfer.h"
37 : #include "lib_disc/reference_element/reference_mapping_provider.h"
38 : #include "lib_disc/local_finite_element/local_finite_element_provider.h"
39 : #include "lib_disc/function_spaces/grid_function_util.h"
40 : #include "lib_grid/algorithms/debug_util.h" // ElementDebugInfo
41 :
42 : namespace ug{
43 :
44 :
45 : template <typename TDomain, typename TAlgebra>
46 0 : void StdTransfer<TDomain, TAlgebra>::
47 : assemble_prolongation_p1(matrix_type& P,
48 : const DoFDistribution& fineDD,
49 : const DoFDistribution& coarseDD)
50 : {
51 : PROFILE_FUNC_GROUP("gmg");
52 : // allow only lagrange P1 functions
53 0 : for(size_t fct = 0; fct < fineDD.num_fct(); ++fct)
54 0 : if(fineDD.lfeid(fct).type() != LFEID::LAGRANGE ||
55 : fineDD.lfeid(fct).order() != 1)
56 0 : UG_THROW("AssembleStdProlongationForP1Lagrange:"
57 : "Interpolation only implemented for Lagrange P1 functions.");
58 :
59 : // resize matrix
60 0 : P.resize_and_clear(fineDD.num_indices(), coarseDD.num_indices());
61 :
62 : // iterators
63 0 : const MultiGrid& mg = *coarseDD.multi_grid();
64 : typedef DoFDistribution::traits<Vertex>::const_iterator const_iterator;
65 : const_iterator iter, iterBegin, iterEnd;
66 :
67 : // loop subsets on fine level
68 : std::vector<size_t> vParentIndex, vChildIndex;
69 : std::vector<DoFIndex> vParentDoF, vChildDoF;
70 0 : for(int si = 0; si < fineDD.num_subsets(); ++si)
71 : {
72 0 : iterBegin = fineDD.template begin<Vertex>(si);
73 0 : iterEnd = fineDD.template end<Vertex>(si);
74 :
75 : // loop vertices for fine level subset
76 0 : for(iter = iterBegin; iter != iterEnd; ++iter)
77 : {
78 : // get element
79 : Vertex* child = *iter;
80 :
81 : // get father
82 : GridObject* parent = mg.get_parent(child);
83 :
84 : // check if child contained in coarseDD. This should always be false
85 : // for a GridLevel::LEVEL, but might be the case for GridLevel::SURFACE
86 : // and an adaptive grid-part used by both dds. In such a case we can
87 : // simply set identity.
88 0 : if(coarseDD.is_contained(child)){
89 : // get indices
90 0 : coarseDD.inner_algebra_indices(child, vParentIndex);
91 0 : fineDD.inner_algebra_indices(child, vChildIndex);
92 : UG_ASSERT(vParentIndex.size() == vChildIndex.size(), "Size mismatch");
93 :
94 : // set identity
95 0 : for(size_t i = 0; i < vParentIndex.size(); ++i)
96 0 : P(vChildIndex[i], vParentIndex[i]) = 1.0;
97 :
98 : // this child is perfectly handled
99 0 : continue;
100 0 : }
101 : else{
102 : // check if parent exists (this should always be the case, except in
103 : // the case that 'child' is a v-slave)
104 0 : if(!parent) continue;
105 :
106 0 : if(!coarseDD.is_contained(parent)){
107 0 : UG_THROW("StdTransfer: Parent element \n"
108 : << ElementDebugInfo(mg, parent) <<
109 : "is not contained in coarse-dd nor the child element\n"
110 : << ElementDebugInfo(mg, child) <<
111 : " in the coarse-dd. This should not happen.")
112 : }
113 : }
114 :
115 : // type of father
116 0 : const ReferenceObjectID roid = parent->reference_object_id();
117 :
118 : // loop all components
119 0 : for(size_t fct = 0; fct < fineDD.num_fct(); fct++)
120 : {
121 : // check that fct defined on subset
122 0 : if(!fineDD.is_def_in_subset(fct, si)) continue;
123 :
124 : // get global indices
125 0 : fineDD.inner_dof_indices(child, fct, vChildDoF);
126 :
127 : // detect type of father
128 0 : switch(roid)
129 : {
130 : case ROID_VERTEX:
131 : {
132 0 : Vertex* vrt = dynamic_cast<Vertex*>(parent);
133 0 : coarseDD.inner_dof_indices(vrt, fct, vParentDoF);
134 0 : DoFRef(P, vChildDoF[0], vParentDoF[0]) = 1.0;
135 : }
136 0 : break;
137 : case ROID_EDGE:
138 0 : for(int i = 0; i < 2; ++i)
139 : {
140 0 : Edge* edge = dynamic_cast<Edge*>(parent);
141 0 : coarseDD.inner_dof_indices(edge->vertex(i), fct, vParentDoF);
142 0 : DoFRef(P, vChildDoF[0], vParentDoF[0]) = 0.5;
143 : }
144 : break;
145 : case ROID_QUADRILATERAL:
146 0 : for(int i = 0; i < 4; ++i)
147 : {
148 0 : Face* face = dynamic_cast<Face*>(parent);
149 0 : coarseDD.inner_dof_indices(face->vertex(i), fct, vParentDoF);
150 0 : DoFRef(P, vChildDoF[0], vParentDoF[0]) = 0.25;
151 : }
152 : break;
153 : case ROID_HEXAHEDRON:
154 0 : for(int i = 0; i < 8; ++i)
155 : {
156 0 : Volume* hexaeder = dynamic_cast<Volume*>(parent);
157 0 : coarseDD.inner_dof_indices(hexaeder->vertex(i), fct, vParentDoF);
158 0 : DoFRef(P, vChildDoF[0], vParentDoF[0]) = 0.125;
159 : }
160 : break;
161 0 : default: UG_THROW("AssembleStdProlongationForP1Lagrange: Element father"
162 : " is of unsupported type "<< roid << " for "
163 : << ElementDebugInfo(mg, child) << ".");
164 : }
165 : }
166 : }
167 : }
168 0 : }
169 : /*
170 : template <typename TDomain>
171 : void ProjectGlobalPositionToElem(std::vector<MathVector<TDomain::dim> >& vGlobPos,
172 : GridObject* parent, const TDomain& domain)
173 : {
174 : const int parentDim = parent->base_object_id();
175 :
176 : // vertex and full dim parent must match
177 : if(parentDim == 0 || parentDim == TDomain::dim)
178 : return;
179 :
180 : // get the vertices
181 : std::vector<MathVector<TDomain::dim> > vCornerCoord;
182 : switch(parentDim)
183 : {
184 : case EDGE:
185 : {
186 : CollectCornerCoordinates(vCornerCoord, *static_cast<Edge*>(parent), domain, true);
187 : MathVector<TDomain::dim> dir;
188 : VecSubtract(dir, vCornerCoord[1], vCornerCoord[0]);
189 : for(size_t p = 0; p < vGlobPos.size(); ++p){
190 : ProjectPointToRay(vGlobPos[p], vGlobPos[p], vCornerCoord[0], dir);
191 : }
192 : }
193 : break;
194 : case FACE:
195 : {
196 : CollectCornerCoordinates(vCornerCoord, *static_cast<Face*>(parent), domain, true);
197 : MathVector<TDomain::dim> normal;
198 : MathVector<TDomain::dim> a, b;
199 : VecSubtract(a, vCornerCoord[1], vCornerCoord[0]);
200 : VecSubtract(b, vCornerCoord[2], vCornerCoord[0]);
201 : VecCross(normal, a,b);
202 :
203 : for(size_t p = 0; p < vGlobPos.size(); ++p){
204 : ProjectPointToPlane(vGlobPos[p], vGlobPos[p], vCornerCoord[0], normal);
205 : }
206 : }
207 : break;
208 : default: UG_THROW( "Base Object type not found.");
209 : }
210 : }
211 : */
212 :
213 :
214 : template <typename TDomain, typename TAlgebra>
215 : template <typename TChild>
216 0 : void StdTransfer<TDomain, TAlgebra>::
217 : assemble_prolongation(matrix_type& P,
218 : const DoFDistribution& fineDD,
219 : const DoFDistribution& coarseDD,
220 : ConstSmartPtr<TDomain> spDomain)
221 : {
222 : PROFILE_FUNC_GROUP("gmg");
223 :
224 : // iterators
225 0 : MultiGrid& mg = *const_cast<MultiGrid*>(coarseDD.multi_grid().get());
226 : typedef typename DoFDistribution::traits<TChild>::const_iterator const_iterator;
227 : const_iterator iter, iterBegin, iterEnd;
228 :
229 : // loop subsets on coarse level
230 : std::vector<DoFIndex> vParentDoF, vChildDoF;
231 : std::vector<size_t> vParentIndex, vChildIndex;
232 0 : for(int si = 0; si < fineDD.num_subsets(); ++si)
233 : {
234 0 : iterBegin = fineDD.template begin<TChild>(si);
235 0 : iterEnd = fineDD.template end<TChild>(si);
236 :
237 : // check, which cmps to consider on this subset
238 : std::vector<LFEID> vLFEID;
239 : std::vector<size_t> vFct;
240 0 : for(size_t fct = 0; fct < fineDD.num_fct(); ++fct){
241 0 : if(fineDD.max_fct_dofs(fct, TChild::dim, si) == 0) continue;
242 0 : vFct.push_back(fct);
243 0 : vLFEID.push_back(fineDD.lfeid(fct));
244 : }
245 0 : if(vFct.empty()) continue;
246 :
247 : // loop elems on coarse level for subset
248 0 : for(iter = iterBegin; iter != iterEnd; ++iter)
249 : {
250 : // get child
251 : TChild* child = *iter;
252 :
253 : // get parent
254 0 : GridObject* parent = mg.get_parent(child);
255 :
256 : // check if child contained in coarseDD. This should always be false
257 : // for a GridLevel::LEVEL, but might be the case for GridLevel::SURFACE
258 : // and an adaptive grid-part used by both dds. In such a case we can
259 : // simply set identity.
260 0 : if(coarseDD.is_contained(child)){
261 : // get indices
262 0 : coarseDD.inner_algebra_indices(child, vParentIndex);
263 0 : fineDD.inner_algebra_indices(child, vChildIndex);
264 : UG_ASSERT(vParentIndex.size() == vChildIndex.size(), "Size mismatch");
265 :
266 : // set identity
267 0 : for(size_t i = 0; i < vParentIndex.size(); ++i)
268 0 : P(vChildIndex[i], vParentIndex[i]) = 1.0;
269 :
270 : // this child is perfectly handled
271 0 : continue;
272 0 : }
273 : else{
274 :
275 : // check if parent exists (this should always be the case, except in
276 : // the case that 'child' is a v-slave)
277 0 : if(!parent) continue;
278 :
279 0 : if(!coarseDD.is_contained(parent)){
280 0 : UG_THROW("StdTransfer: A parent element is not contained in "
281 : " coarse-dd nor the child element in the coarse-dd. "
282 : "This should not happen.")
283 : }
284 : }
285 :
286 : // loop all components
287 0 : for(size_t f = 0; f < vFct.size(); f++)
288 : {
289 : // get comp and lfeid
290 0 : const size_t fct = vFct[f];
291 : const LFEID& lfeID = vLFEID[f];
292 :
293 : // get global indices
294 0 : fineDD.inner_dof_indices(child, fct, vChildDoF);
295 :
296 : // switch space type
297 0 : switch(lfeID.type())
298 : {
299 0 : case LFEID::PIECEWISE_CONSTANT:
300 : {
301 0 : coarseDD.dof_indices(parent, fct, vParentDoF);
302 : UG_ASSERT(vChildDoF.size() == 1, "Must be one.");
303 : UG_ASSERT(vParentDoF.size() == 1, "Must be one.");
304 :
305 0 : DoFRef(P, vChildDoF[0], vParentDoF[0]) = 1.0;
306 : }
307 0 : break;
308 :
309 0 : case LFEID::CROUZEIX_RAVIART:
310 : {
311 : // get dimension of parent
312 0 : const int parentDim = parent->base_object_id();
313 : std::vector<GridObject*> vParent;
314 :
315 : // check if to interpolate from neighbor elems
316 0 : if(parentDim == lfeID.dim()){
317 : // case: Side inner to parent. --> Parent fine.
318 0 : vParent.push_back(parent);
319 0 : } else if(parentDim == lfeID.dim() - 1){
320 : // case: parent is Side. --> Get neighbor elems
321 : typedef typename TChild::sideof TElem;
322 : std::vector<TElem*> vElem;
323 0 : coarseDD.collect_associated(vElem, parent);
324 0 : for(size_t p = 0; p < vElem.size(); ++p)
325 0 : vParent.push_back(vElem[p]);
326 :
327 0 : } else {
328 0 : UG_THROW("StdTransfer: For CR parent must be full-dim "
329 : "elem or a side (dim-1). But has dim: "<<parentDim);
330 : }
331 :
332 :
333 : // global positions of fine dofs
334 : std::vector<MathVector<TDomain::dim> > vDoFPos;
335 0 : InnerDoFPosition(vDoFPos, child, *spDomain, lfeID);
336 :
337 : // loop contributions from parents
338 0 : for(size_t i = 0; i < vParent.size(); ++i)
339 : {
340 : // get coarse indices
341 0 : coarseDD.dof_indices(vParent[i], fct, vParentDoF);
342 :
343 : // get shapes at global positions
344 : std::vector<std::vector<number> > vvShape;
345 0 : ShapesAtGlobalPosition(vvShape, vDoFPos, vParent[i], *spDomain, lfeID);
346 :
347 : // add restriction
348 0 : for(size_t ip = 0; ip < vvShape.size(); ++ip)
349 0 : for(size_t sh = 0; sh < vvShape[ip].size(); ++sh)
350 0 : DoFRef(P, vChildDoF[ip], vParentDoF[sh]) +=
351 0 : (1./vParent.size()) * vvShape[ip][sh];
352 : }
353 0 : }
354 0 : break;
355 :
356 0 : case LFEID::LAGRANGE:
357 : {
358 : // get coarse indices
359 0 : coarseDD.dof_indices(parent, fct, vParentDoF);
360 :
361 : // global positions of child dofs
362 : std::vector<MathVector<TDomain::dim> > vDoFPos;
363 0 : InnerDoFPosition(vDoFPos, child, *spDomain, lfeID);
364 :
365 : // project
366 : // ProjectGlobalPositionToElem(vDoFPos, parent, *spDomain);
367 :
368 : // get shapes at global positions
369 : std::vector<std::vector<number> > vvShape;
370 0 : ShapesAtGlobalPosition(vvShape, vDoFPos, parent, *spDomain, lfeID);
371 :
372 : // set restriction
373 0 : for(size_t ip = 0; ip < vvShape.size(); ++ip)
374 0 : for(size_t sh = 0; sh < vvShape[ip].size(); ++sh)
375 0 : DoFRef(P, vChildDoF[ip], vParentDoF[sh]) = vvShape[ip][sh];
376 0 : }
377 0 : break;
378 :
379 0 : default:
380 0 : UG_THROW("StdTransfer: Local-Finite-Element: "<<lfeID<<
381 : " is not supported by this Transfer.")
382 :
383 : } // end LFEID-switch
384 : } // end fct - cmps
385 : } // end fine - elements
386 : } // end subset
387 0 : }
388 :
389 : template <typename TDomain, typename TAlgebra>
390 0 : void StdTransfer<TDomain, TAlgebra>::
391 : assemble_prolongation(matrix_type& P,
392 : const DoFDistribution& fineDD,
393 : const DoFDistribution& coarseDD,
394 : ConstSmartPtr<TDomain> spDomain)
395 : {
396 : // resize matrix
397 0 : P.resize_and_clear(fineDD.num_indices(), coarseDD.num_indices());
398 :
399 : // loop all base types carrying indices on fine elems
400 0 : if(fineDD.max_dofs(VERTEX)) assemble_prolongation<Vertex>(P, fineDD, coarseDD, spDomain);
401 0 : if(fineDD.max_dofs(EDGE)) assemble_prolongation<Edge>(P, fineDD, coarseDD, spDomain);
402 0 : if(fineDD.max_dofs(FACE)) assemble_prolongation<Face>(P, fineDD, coarseDD, spDomain);
403 0 : if(fineDD.max_dofs(VOLUME)) assemble_prolongation<Volume>(P, fineDD, coarseDD, spDomain);
404 0 : }
405 :
406 :
407 : template <typename TDomain, typename TAlgebra>
408 : template <typename TChild>
409 0 : void StdTransfer<TDomain, TAlgebra>::
410 : assemble_restriction(matrix_type& R,
411 : const DoFDistribution& coarseDD,
412 : const DoFDistribution& fineDD,
413 : ConstSmartPtr<TDomain> spDomain)
414 : {
415 : PROFILE_FUNC_GROUP("gmg");
416 :
417 : // iterators
418 0 : MultiGrid& mg = *const_cast<MultiGrid*>(coarseDD.multi_grid().get());
419 : typedef typename DoFDistribution::traits<TChild>::const_iterator const_iterator;
420 : const_iterator iter, iterBegin, iterEnd;
421 :
422 : // loop subsets on coarse level
423 : std::vector<DoFIndex> vParentDoF, vChildDoF;
424 : std::vector<size_t> vParentIndex, vChildIndex;
425 0 : for(int si = 0; si < fineDD.num_subsets(); ++si)
426 : {
427 0 : iterBegin = fineDD.template begin<TChild>(si);
428 0 : iterEnd = fineDD.template end<TChild>(si);
429 :
430 : // check, which cmps to consider on this subset
431 : std::vector<LFEID> vLFEID;
432 : std::vector<size_t> vFct;
433 0 : for(size_t fct = 0; fct < fineDD.num_fct(); ++fct){
434 0 : if(fineDD.max_fct_dofs(fct, TChild::dim, si) == 0) continue;
435 0 : vFct.push_back(fct);
436 0 : vLFEID.push_back(fineDD.lfeid(fct));
437 : }
438 0 : if(vFct.empty()) continue;
439 :
440 : // loop elems on coarse level for subset
441 0 : for(iter = iterBegin; iter != iterEnd; ++iter)
442 : {
443 : // get child
444 : TChild* child = *iter;
445 :
446 : // get parent
447 0 : GridObject* parent = mg.get_parent(child);
448 :
449 : // check if child contained in coarseDD. This should always be false
450 : // for a GridLevel::LEVEL, but might be the case for GridLevel::SURFACE
451 : // and an adaptive grid-part used by both dds. In such a case we can
452 : // simply set identity.
453 0 : if(coarseDD.is_contained(child)){
454 : // get indices
455 0 : coarseDD.inner_algebra_indices(child, vParentIndex);
456 0 : fineDD.inner_algebra_indices(child, vChildIndex);
457 : UG_ASSERT(vParentIndex.size() == vChildIndex.size(), "Size mismatch");
458 :
459 : // set identity
460 0 : for(size_t i = 0; i < vParentIndex.size(); ++i)
461 0 : R(vParentIndex[i], vChildIndex[i]) = 1.0;
462 :
463 : // this child is perfectly handled
464 0 : continue;
465 0 : }
466 : else{
467 :
468 : // check if parent exists (this should always be the case, except in
469 : // the case that 'child' is a v-slave)
470 0 : if(!parent) continue;
471 :
472 0 : if(!coarseDD.is_contained(parent)){
473 0 : UG_THROW("StdTransfer: A parent element is not contained in "
474 : " coarse-dd nor the child element in the coarse-dd. "
475 : "This should not happen.")
476 : }
477 : }
478 :
479 : // loop all components
480 0 : for(size_t f = 0; f < vFct.size(); f++)
481 : {
482 : // get comp and lfeid
483 0 : const size_t fct = vFct[f];
484 : const LFEID& lfeID = vLFEID[f];
485 :
486 : // get global indices
487 0 : fineDD.inner_dof_indices(child, fct, vChildDoF);
488 :
489 : // switch space type
490 0 : switch(lfeID.type())
491 : {
492 0 : case LFEID::PIECEWISE_CONSTANT:
493 : {
494 0 : coarseDD.dof_indices(parent, fct, vParentDoF);
495 : UG_ASSERT(vChildDoF.size() == 1, "Must be one.");
496 : UG_ASSERT(vParentDoF.size() == 1, "Must be one.");
497 :
498 0 : DoFRef(R, vParentDoF[0], vChildDoF[0]) = 1.0;
499 : }
500 0 : break;
501 :
502 0 : case LFEID::CROUZEIX_RAVIART:
503 : {
504 : // get dimension of parent
505 0 : const int parentDim = parent->base_object_id();
506 : std::vector<GridObject*> vParent;
507 :
508 : // check if to interpolate from neighbor elems
509 0 : if(parentDim == lfeID.dim()){
510 : // case: Side inner to parent. --> Parent fine.
511 0 : vParent.push_back(parent);
512 0 : } else if(parentDim == lfeID.dim() - 1){
513 : // case: parent is Side. --> Get neighbor elems
514 : typedef typename TChild::sideof TElem;
515 : std::vector<TElem*> vElem;
516 0 : coarseDD.collect_associated(vElem, parent);
517 0 : for(size_t p = 0; p < vElem.size(); ++p){
518 : // NOTE: This is not the transposed of the prolongation
519 : // in adaptive case, since we only restrict to
520 : // covered parts.
521 0 : if(mg.num_children<TElem>(vElem[p]) > 0)
522 0 : vParent.push_back(vElem[p]);
523 : }
524 :
525 0 : } else {
526 0 : UG_THROW("StdTransfer: For CR parent must be full-dim "
527 : "elem or a side (dim-1). But has dim: "<<parentDim);
528 : }
529 :
530 :
531 : // global positions of fine dofs
532 : std::vector<MathVector<TDomain::dim> > vDoFPos;
533 0 : InnerDoFPosition(vDoFPos, child, *spDomain, lfeID);
534 :
535 : // loop contributions from parents
536 0 : for(size_t i = 0; i < vParent.size(); ++i)
537 : {
538 : // get coarse indices
539 0 : coarseDD.dof_indices(vParent[i], fct, vParentDoF);
540 :
541 : // get shapes at global positions
542 : std::vector<std::vector<number> > vvShape;
543 0 : ShapesAtGlobalPosition(vvShape, vDoFPos, vParent[i], *spDomain, lfeID);
544 :
545 : // add restriction
546 0 : for(size_t ip = 0; ip < vvShape.size(); ++ip)
547 0 : for(size_t sh = 0; sh < vvShape[ip].size(); ++sh)
548 0 : DoFRef(R, vParentDoF[sh], vChildDoF[ip]) +=
549 0 : (1./vParent.size()) * vvShape[ip][sh];
550 : }
551 0 : }
552 0 : break;
553 :
554 0 : case LFEID::LAGRANGE:
555 : {
556 : // get coarse indices
557 0 : coarseDD.dof_indices(parent, fct, vParentDoF);
558 :
559 : // global positions of child dofs
560 : std::vector<MathVector<TDomain::dim> > vDoFPos;
561 0 : InnerDoFPosition(vDoFPos, child, *spDomain, lfeID);
562 :
563 : // get shapes at global positions
564 : std::vector<std::vector<number> > vvShape;
565 0 : ShapesAtGlobalPosition(vvShape, vDoFPos, parent, *spDomain, lfeID);
566 :
567 : // set restriction
568 0 : for(size_t ip = 0; ip < vvShape.size(); ++ip)
569 0 : for(size_t sh = 0; sh < vvShape[ip].size(); ++sh)
570 0 : DoFRef(R, vParentDoF[sh], vChildDoF[ip]) = vvShape[ip][sh];
571 0 : }
572 0 : break;
573 :
574 0 : default:
575 0 : UG_THROW("StdTransfer: Local-Finite-Element: "<<lfeID<<
576 : " is not supported by this Transfer.")
577 :
578 : } // end LFEID-switch
579 : } // end fct - cmps
580 : } // end fine - elements
581 : } // end subset
582 0 : }
583 :
584 : template <typename TDomain, typename TAlgebra>
585 0 : void StdTransfer<TDomain, TAlgebra>::
586 : assemble_restriction(matrix_type& R,
587 : const DoFDistribution& coarseDD,
588 : const DoFDistribution& fineDD,
589 : ConstSmartPtr<TDomain> spDomain)
590 : {
591 : // resize matrix
592 0 : R.resize_and_clear(coarseDD.num_indices(), fineDD.num_indices());
593 :
594 : // loop all base types carrying indices on fine elems
595 0 : if(fineDD.max_dofs(VERTEX)) assemble_restriction<Vertex>(R, coarseDD, fineDD, spDomain);
596 0 : if(fineDD.max_dofs(EDGE)) assemble_restriction<Edge>(R, coarseDD, fineDD, spDomain);
597 0 : if(fineDD.max_dofs(FACE)) assemble_restriction<Face>(R, coarseDD, fineDD, spDomain);
598 0 : if(fineDD.max_dofs(VOLUME)) assemble_restriction<Volume>(R, coarseDD, fineDD, spDomain);
599 0 : }
600 :
601 :
602 : template <typename TDomain, typename TAlgebra>
603 : SmartPtr<typename TAlgebra::matrix_type>
604 0 : StdTransfer<TDomain, TAlgebra>::
605 : prolongation(const GridLevel& fineGL, const GridLevel& coarseGL,
606 : ConstSmartPtr<ApproximationSpace<TDomain> > spApproxSpace)
607 : {
608 0 : if(fineGL.level() - coarseGL.level() != 1)
609 0 : UG_THROW("StdTransfer: Can only project between successive level, "
610 : "but fine = "<<fineGL<<", coarse = "<<coarseGL);
611 :
612 0 : if(fineGL.type() != coarseGL.type())
613 0 : UG_THROW("StdTransfer: Can only project between dof distributions of "
614 : "same type, but fine = "<<fineGL<<", coarse = "<<coarseGL);
615 :
616 : // remove old revisions
617 0 : remove_outdated(m_mProlongation, spApproxSpace->revision());
618 :
619 : // key of this restriction
620 : TransferKey key(coarseGL, fineGL, spApproxSpace->revision());
621 :
622 : // check if must be created
623 0 : if(m_mProlongation.find(key) == m_mProlongation.end())
624 : {
625 : SmartPtr<matrix_type> P =
626 0 : m_mProlongation[key] = SmartPtr<matrix_type>(new matrix_type);
627 :
628 0 : ConstSmartPtr<DoFDistribution> spCoarseDD = spApproxSpace->dof_distribution(coarseGL);
629 0 : ConstSmartPtr<DoFDistribution> spFineDD = spApproxSpace->dof_distribution(fineGL);
630 :
631 : bool P1LagrangeOnly = false;
632 0 : if(m_p1LagrangeOptimizationEnabled){
633 : P1LagrangeOnly = true;
634 0 : for(size_t fct = 0; fct < spApproxSpace->num_fct(); ++fct)
635 0 : if(spApproxSpace->lfeid(fct).type() != LFEID::LAGRANGE ||
636 : spApproxSpace->lfeid(fct).order() != 1)
637 : P1LagrangeOnly = false;
638 : }
639 :
640 0 : if(P1LagrangeOnly){
641 0 : assemble_prolongation_p1(*P, *spFineDD, *spCoarseDD);
642 : } else{
643 0 : assemble_prolongation(*P, *spFineDD, *spCoarseDD, spApproxSpace->domain());
644 : }
645 :
646 0 : for (int type = 1; type < CT_ALL; type = type << 1)
647 : {
648 0 : for (size_t i = 0; i < m_vConstraint.size(); ++i)
649 : {
650 0 : if (m_vConstraint[i]->type() & type)
651 0 : m_vConstraint[i]->adjust_prolongation(*P, spFineDD, spCoarseDD, type);
652 : }
653 : }
654 :
655 : #ifdef UG_PARALLEL
656 : P->set_storage_type(PST_CONSISTENT);
657 : #endif
658 :
659 0 : write_debug(*P, "P", fineGL, coarseGL);
660 : }
661 :
662 0 : return m_mProlongation[key];
663 : }
664 :
665 : template <typename TDomain, typename TAlgebra>
666 : SmartPtr<typename TAlgebra::matrix_type>
667 0 : StdTransfer<TDomain, TAlgebra>::
668 : restriction(const GridLevel& coarseGL, const GridLevel& fineGL,
669 : ConstSmartPtr<ApproximationSpace<TDomain> > spApproxSpace)
670 : {
671 0 : if(fineGL.level() - coarseGL.level() != 1)
672 0 : UG_THROW("StdTransfer: Can only project between successive level, "
673 : "but fine = "<<fineGL<<", coarse = "<<coarseGL);
674 :
675 0 : if(fineGL.type() != coarseGL.type())
676 0 : UG_THROW("StdTransfer: Can only project between dof distributions of "
677 : "same type, but fine = "<<fineGL<<", coarse = "<<coarseGL);
678 :
679 : // remove old revisions
680 0 : remove_outdated(m_mRestriction, spApproxSpace->revision());
681 :
682 : // key of this restriction
683 : TransferKey key(coarseGL, fineGL, spApproxSpace->revision());
684 :
685 : // check if must be created
686 0 : if(m_mRestriction.find(key) == m_mRestriction.end())
687 : {
688 : SmartPtr<matrix_type> R =
689 0 : m_mRestriction[key] = SmartPtr<matrix_type>(new matrix_type);
690 :
691 0 : ConstSmartPtr<DoFDistribution> spCoarseDD = spApproxSpace->dof_distribution(coarseGL);
692 0 : ConstSmartPtr<DoFDistribution> spFineDD = spApproxSpace->dof_distribution(fineGL);
693 :
694 0 : if(m_bUseTransposed)
695 0 : R->set_as_transpose_of(*prolongation(fineGL, coarseGL, spApproxSpace));
696 : else
697 0 : assemble_restriction(*R, *spCoarseDD, *spFineDD, spApproxSpace->domain());
698 :
699 :
700 : #ifdef UG_PARALLEL
701 : R->set_storage_type(PST_CONSISTENT);
702 : #endif
703 :
704 0 : for (int type = 1; type < CT_ALL; type = type << 1)
705 : {
706 0 : for (size_t i = 0; i < m_vConstraint.size(); ++i)
707 : {
708 0 : if (m_vConstraint[i]->type() & type)
709 0 : m_vConstraint[i]->adjust_restriction(*R, spCoarseDD, spFineDD, type);
710 : }
711 : }
712 :
713 0 : write_debug(*R, "R", coarseGL, fineGL);
714 : }
715 :
716 0 : return m_mRestriction[key];
717 : }
718 :
719 : template <typename TDomain, typename TAlgebra>
720 0 : void StdTransfer<TDomain, TAlgebra>::
721 : prolongate(GF& uFine, const GF& uCoarse)
722 : {
723 : PROFILE_FUNC_GROUP("gmg");
724 :
725 0 : if(!bCached)
726 0 : UG_THROW("StdTransfer: currently only cached implemented.");
727 :
728 : const GridLevel& coarseGL = uCoarse.grid_level();
729 : const GridLevel& fineGL = uFine.grid_level();
730 0 : ConstSmartPtr<ApproximationSpace<TDomain> > spApproxSpace = uFine.approx_space();
731 0 : if(uCoarse.approx_space() != spApproxSpace)
732 0 : UG_THROW("StdTransfer: cannot prolongate between grid functions from "
733 : "different approximation spaces.");
734 :
735 : try{
736 : //prolongation(fineGL, coarseGL, spApproxSpace)->apply(uFine, uCoarse);
737 : #ifdef UG_PARALLEL
738 : MatMultDirect(uFine, m_dampProl, *prolongation(fineGL, coarseGL, spApproxSpace), uCoarse);
739 : #else
740 0 : prolongation(fineGL, coarseGL, spApproxSpace)->axpy(uFine, 0.0, uFine, m_dampProl, uCoarse);
741 : #endif
742 :
743 : // adjust using constraints
744 0 : for (int type = 1; type < CT_ALL; type = type << 1)
745 : {
746 0 : for (size_t i = 0; i < m_vConstraint.size(); ++i)
747 : {
748 0 : if (m_vConstraint[i]->type() & type)
749 0 : m_vConstraint[i]->adjust_prolongation(uFine, fineGL, uCoarse, coarseGL, type);
750 : }
751 : }
752 :
753 : }
754 0 : UG_CATCH_THROW("StdTransfer:prolongation: Failed for fine = "<<fineGL<<" and "
755 : " coarse = "<<coarseGL);
756 :
757 : // check CR functions
758 : #ifdef UG_PARALLEL
759 : bool bCROnly = true;
760 : for(size_t fct = 0; fct < spApproxSpace->num_fct(); ++fct)
761 : if(spApproxSpace->lfeid(fct).type() != LFEID::CROUZEIX_RAVIART &&
762 : spApproxSpace->lfeid(fct).type() != LFEID::PIECEWISE_CONSTANT)
763 : bCROnly = false;
764 :
765 : if(bCROnly){
766 : ScaleLayoutValues(&uFine, uFine.layouts()->master(), 0.5);
767 : ScaleLayoutValues(&uFine, uFine.layouts()->slave(), 0.5);
768 : AdditiveToConsistent(&uFine, uFine.layouts()->master(), uFine.layouts()->slave(),
769 : &uFine.layouts()->comm());
770 : }
771 : #endif
772 0 : }
773 :
774 : template <typename TDomain, typename TAlgebra>
775 0 : void StdTransfer<TDomain, TAlgebra>::
776 : do_restrict(GF& uCoarse, const GF& uFine)
777 : {
778 : PROFILE_FUNC_GROUP("gmg");
779 :
780 0 : if(!bCached)
781 0 : UG_THROW("StdTransfer: currently only cached implemented.");
782 :
783 : const GridLevel& coarseGL = uCoarse.grid_level();
784 : const GridLevel& fineGL = uFine.grid_level();
785 : ConstSmartPtr<ApproximationSpace<TDomain> > spApproxSpace = uFine.approx_space();
786 0 : if(uCoarse.approx_space() != spApproxSpace)
787 0 : UG_THROW("StdTransfer: cannot prolongate between grid functions from "
788 : "different approximation spaces.");
789 : try{
790 :
791 0 : restriction(coarseGL, fineGL, spApproxSpace)->
792 0 : apply_ignore_zero_rows(uCoarse, m_dampRes, uFine);
793 :
794 : // adjust using constraints
795 0 : for (int type = 1; type < CT_ALL; type = type << 1)
796 : {
797 0 : for (size_t i = 0; i < m_vConstraint.size(); ++i)
798 : {
799 0 : if (m_vConstraint[i]->type() & type)
800 0 : m_vConstraint[i]->adjust_restriction(uCoarse, coarseGL, uFine, fineGL, type);
801 : }
802 : }
803 :
804 0 : } UG_CATCH_THROW("StdTransfer:do_restrict: Failed for fine = "<<fineGL<<" and "
805 : " coarse = "<<coarseGL);
806 0 : }
807 :
808 : template <typename TDomain, typename TAlgebra>
809 : SmartPtr<ITransferOperator<TDomain, TAlgebra> >
810 0 : StdTransfer<TDomain, TAlgebra>::clone()
811 : {
812 0 : SmartPtr<StdTransfer> op(new StdTransfer);
813 0 : for(size_t i = 0; i < m_vConstraint.size(); ++i)
814 0 : op->add_constraint(m_vConstraint[i]);
815 0 : op->set_restriction_damping(m_dampRes);
816 0 : op->set_prolongation_damping(m_dampProl);
817 0 : op->set_debug(m_spDebugWriter);
818 : op->enable_p1_lagrange_optimization(p1_lagrange_optimization_enabled());
819 0 : op->set_use_transposed(m_bUseTransposed);
820 0 : return op;
821 : }
822 :
823 : template <typename TDomain, typename TAlgebra>
824 0 : void StdTransfer<TDomain, TAlgebra>::
825 : write_debug(const matrix_type& mat, std::string name,
826 : const GridLevel& glTo, const GridLevel& glFrom)
827 : {
828 : PROFILE_FUNC_GROUP("debug");
829 : // if no debug writer set, we're done
830 0 : if(m_spDebugWriter.invalid()) return;
831 :
832 : // cast dbg writer
833 0 : SmartPtr<GridFunctionDebugWriter<TDomain, TAlgebra> > dbgWriter =
834 : m_spDebugWriter.template cast_dynamic<GridFunctionDebugWriter<TDomain, TAlgebra> >();
835 :
836 : // check success
837 0 : if(dbgWriter.invalid()) return;
838 :
839 : // add iter count to name
840 0 : name.append("_").append(ToString(glTo.level()));
841 0 : name.append("_").append(ToString(glFrom.level()));
842 0 : name.append(".mat");
843 :
844 : // write
845 0 : GridLevel gridLev = dbgWriter->grid_level();
846 : dbgWriter->set_grid_levels(glFrom, glTo);
847 0 : dbgWriter->write_matrix(mat, name.c_str());
848 : dbgWriter->set_grid_level(gridLev);
849 : }
850 :
851 : } // end namespace ug
852 :
853 : #endif /* __H__UG__LIB_DISC__OPERATOR__LINEAR_OPERATOR__STD_TRANSFER_IMPL__ */
|