Line data Source code
1 : /*
2 : * Copyright (c) 2014-2015: G-CSC, Goethe University Frankfurt
3 : * Authors: Dmitry Logashenko, Markus Breit
4 : *
5 : * This file is part of UG4.
6 : *
7 : * UG4 is free software: you can redistribute it and/or modify it under the
8 : * terms of the GNU Lesser General Public License version 3 (as published by the
9 : * Free Software Foundation) with the following additional attribution
10 : * requirements (according to LGPL/GPL v3 §7):
11 : *
12 : * (1) The following notice must be displayed in the Appropriate Legal Notices
13 : * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14 : *
15 : * (2) The following notice must be displayed at a prominent place in the
16 : * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17 : *
18 : * (3) The following bibliography is recommended for citation and must be
19 : * preserved in all covered files:
20 : * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21 : * parallel geometric multigrid solver on hierarchically distributed grids.
22 : * Computing and visualization in science 16, 4 (2013), 151-164"
23 : * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24 : * flexible software system for simulating pde based models on high performance
25 : * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26 : *
27 : * This program is distributed in the hope that it will be useful,
28 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 : * GNU Lesser General Public License for more details.
31 : */
32 :
33 : namespace ug{
34 :
35 : // ******** class SideFluxErrEstData ********
36 :
37 : /// Allocates data structures for the error estimator
38 : template <typename TDomain>
39 0 : void SideFluxErrEstData<TDomain>::alloc_err_est_data
40 : (
41 : ConstSmartPtr<SurfaceView> spSV,
42 : const GridLevel& gl
43 : )
44 : {
45 : // Get and check the grid level:
46 0 : if (gl.type () != GridLevel::SURFACE)
47 0 : UG_THROW("SideFluxErrEstData::alloc_err_est_data:"
48 : " The error estimator can work only with grid functions of the SURFACE type.");
49 :
50 : // Copy the parameters to the object:
51 0 : m_errEstGL = gl;
52 0 : m_spSV = spSV;
53 :
54 : // Prepare the attachment for the jumps of the fluxes over the sides:
55 : typedef typename domain_traits<dim>::side_type side_type;
56 0 : MultiGrid * pMG = (MultiGrid *) (spSV->subset_handler()->multi_grid());
57 0 : pMG->template attach_to_dv<side_type,ANumber>(m_aFluxJumpOverSide, 0);
58 : m_aaFluxJump.access(*pMG, m_aFluxJumpOverSide);
59 0 : };
60 :
61 : /// Called after the computation of the error estimator data in all the elements
62 : template <typename TDomain>
63 0 : void SideFluxErrEstData<TDomain>::summarize_err_est_data (SmartPtr<TDomain> spDomain)
64 : {
65 : typedef typename domain_traits<dim>::side_type side_type;
66 :
67 0 : const MultiGrid * pMG = m_spSV->subset_handler()->multi_grid();
68 :
69 : // Loop the rim sides and add the jumps
70 : typedef typename SurfaceView::traits<side_type>::const_iterator t_iterator;
71 0 : t_iterator end_rim_side_iter = m_spSV->template end<side_type> (m_errEstGL, SurfaceView::SHADOW_RIM);
72 0 : for (t_iterator rim_side_iter = m_spSV->template begin<side_type> (m_errEstGL, SurfaceView::SHADOW_RIM);
73 0 : rim_side_iter != end_rim_side_iter; ++rim_side_iter)
74 : {
75 : // Get the sides on both the levels
76 : side_type * c_rim_side = *rim_side_iter;
77 0 : if (pMG->template num_children<side_type>(c_rim_side) != 1) // we consider _NONCOPY only, no hanging nodes
78 0 : UG_THROW ("SideFluxErrEstData::summarize_error_estimator:"
79 : " The error estimator does not accept hanging nodes");
80 : side_type * f_rim_side = pMG->template get_child<side_type> (c_rim_side, 0);
81 :
82 : // Compute the total jump and save it for both the sides:
83 : number & c_rim_flux = m_aaFluxJump[c_rim_side];
84 : number & f_rim_flux = m_aaFluxJump[f_rim_side];
85 0 : number flux_jump = f_rim_flux + c_rim_flux;
86 0 : c_rim_flux = f_rim_flux = flux_jump;
87 : }
88 0 : };
89 :
90 : /// Releases data structures of the error estimator
91 : template <typename TDomain>
92 0 : void SideFluxErrEstData<TDomain>::release_err_est_data ()
93 : {
94 : // Release the attachment
95 : typedef typename domain_traits<dim>::side_type side_type;
96 0 : MultiGrid * pMG = (MultiGrid *) (m_spSV->subset_handler()->multi_grid());
97 : m_aaFluxJump.invalidate ();
98 0 : pMG->template detach_from<side_type> (m_aFluxJumpOverSide);
99 : //m_spSV = ConstSmartPtr<SurfaceView> (NULL); // this raises a rte
100 0 : };
101 :
102 :
103 :
104 :
105 : // ******** class SideAndElemErrEstData ********
106 :
107 0 : inline void check_subset_strings(std::vector<std::string> s)
108 : {
109 : // remove white space
110 0 : for (size_t i = 0; i < s.size(); i++)
111 0 : RemoveWhitespaceFromString(s[i]);
112 :
113 : // if no subset passed, clear subsets
114 0 : if (s.size() == 1 && s[0].empty()) s.clear();
115 :
116 : // if subsets passed with separator, but not all tokens filled, throw error
117 0 : for (size_t i = 0; i < s.size(); i++)
118 : {
119 0 : if (s.empty())
120 : {
121 0 : UG_THROW("Error while setting subsets in SideAndElemErrEstData: Passed subset string lacks a "
122 : "subset specification at position " << i << "(of " << s.size()-1 << ")");
123 : }
124 : }
125 :
126 0 : if (s.size() == 0)
127 : {
128 : UG_LOG("Warning: SideAndElemErrEstData is constructed without definition of subsets. This is likely not to work.\n"
129 : "Please specify a subset of the same dimension as your domain that the error estimator is supposed to work on.\n");
130 : }
131 0 : }
132 :
133 :
134 : template <typename TDomain>
135 0 : SideAndElemErrEstData<TDomain>::SideAndElemErrEstData
136 : (
137 : size_t _sideOrder,
138 : size_t _elemOrder,
139 : const char* subsets
140 : ) :
141 : IErrEstData<TDomain>(),
142 0 : sideOrder(_sideOrder), elemOrder(_elemOrder),
143 : m_aSide(attachment_type("errEstSide")), m_aElem(attachment_type("errEstElem")),
144 : m_aaSide(MultiGrid::AttachmentAccessor<side_type, attachment_type >()),
145 : m_aaElem(MultiGrid::AttachmentAccessor<elem_type, attachment_type >()),
146 : m_spSV(SPNULL), m_errEstGL(GridLevel()),
147 0 : m_type(H1_ERROR_TYPE)
148 : {
149 0 : m_vSs = TokenizeString(subsets);
150 0 : check_subset_strings(m_vSs);
151 0 : init_quadrature();
152 0 : }
153 :
154 :
155 : template <typename TDomain>
156 0 : SideAndElemErrEstData<TDomain>::SideAndElemErrEstData
157 : (
158 : size_t _sideOrder,
159 : size_t _elemOrder,
160 : std::vector<std::string> subsets
161 : ) :
162 : IErrEstData<TDomain>(),
163 0 : sideOrder(_sideOrder), elemOrder(_elemOrder),
164 : m_aSide(attachment_type("errEstSide")), m_aElem(attachment_type("errEstElem")),
165 : m_aaSide(MultiGrid::AttachmentAccessor<side_type, attachment_type >()),
166 : m_aaElem(MultiGrid::AttachmentAccessor<elem_type, attachment_type >()),
167 : m_spSV(SPNULL), m_errEstGL(GridLevel()),
168 0 : m_type(H1_ERROR_TYPE)
169 : {
170 0 : m_vSs = subsets;
171 0 : check_subset_strings(m_vSs);
172 0 : init_quadrature();
173 0 : }
174 :
175 : template <typename TDomain>
176 0 : void SideAndElemErrEstData<TDomain>::init_quadrature()
177 : {
178 : // get quadrature rules for sides and elems
179 0 : boost::mpl::for_each<typename domain_traits<dim>::ManifoldElemList>(GetQuadRules<dim-1>(&quadRuleSide[0], sideOrder));
180 0 : boost::mpl::for_each<typename domain_traits<dim>::DimElemList>(GetQuadRules<dim>(&quadRuleElem[0], elemOrder));
181 :
182 : // fill in values for local side IPs (must be transformed from side local coords to elem local coords)
183 : // and fill IP indexing structure along the way
184 0 : for (ReferenceObjectID roid = ROID_VERTEX; roid != NUM_REFERENCE_OBJECTS; roid++)
185 : {
186 : // get reference element for roid
187 : const ReferenceElement& re = ReferenceElementProvider::get(roid);
188 : int ref_dim = re.dimension();
189 0 : if (ref_dim != dim) continue;
190 :
191 : // make a DimReferenceElement from roid (we need access to corner coords)
192 : const DimReferenceElement<dim>& ref_elem = ReferenceElementProvider::get<dim>(roid);
193 :
194 : // clear IP coords
195 : m_SideIPcoords[roid].clear();
196 :
197 : // loop sides of ref elem
198 0 : for (size_t side = 0; side < ref_elem.num(ref_dim-1); side++)
199 : {
200 : // get side roid
201 : ReferenceObjectID side_roid = ref_elem.roid(ref_dim-1, side);
202 :
203 : // get number of IPs for this roid from quad rules
204 0 : if (!quadRuleSide[side_roid])
205 0 : UG_THROW("Requesting side IPs for roid " << roid << ", but no quadrature rule has been created for it.");
206 : size_t nIPs = quadRuleSide[side_roid]->size();
207 :
208 : // save start index for this side's IPs
209 0 : if (side == 0) m_sideIPsStartIndex[roid][side] = 0;
210 0 : else m_sideIPsStartIndex[roid][side] = m_sideIPsStartIndex[roid][side-1] + nIPs;
211 :
212 : // get the side-local IPs
213 : const MathVector<dim-1>* sideloc_IPs = quadRuleSide[side_roid]->points();
214 :
215 : // fill vector of side corners (in element-dimensional coords)
216 : size_t nCo = ref_elem.num(dim-1, side, 0);
217 0 : std::vector<MathVector<dim> > side_corners(nCo);
218 0 : for (size_t co = 0; co < ref_elem.num(dim-1, side, 0); co++)
219 : {
220 0 : size_t co_id = ref_elem.id(dim-1, side, 0, co);
221 : side_corners[co] = ref_elem.corner(co_id);
222 : }
223 :
224 : // get reference mapping
225 : DimReferenceMapping<dim-1,dim>& ref_map = ReferenceMappingProvider::get<dim-1,dim>(side_roid, &side_corners[0]);
226 :
227 : // map IPs
228 0 : for (size_t ip = 0; ip < nIPs; ip++)
229 : {
230 0 : m_SideIPcoords[roid].push_back(MathVector<TDomain::dim>());
231 0 : ref_map.local_to_global(m_SideIPcoords[roid].back(), sideloc_IPs[ip]);
232 : }
233 : }
234 : }
235 0 : }
236 :
237 :
238 : /// get the data reference for a given side and ip
239 : template <typename TDomain>
240 0 : number& SideAndElemErrEstData<TDomain>::operator() (side_type* pSide, size_t ip)
241 : {
242 : try
243 : {
244 : return m_aaSide[pSide].at(ip);
245 : }
246 0 : catch (const std::out_of_range& oor)
247 : {
248 0 : UG_THROW("Requested attachment for side integration point " << ip <<
249 : ", which does not appear to be allocated.");
250 :
251 : }
252 0 : UG_CATCH_THROW("Could not access error estimator side attachment for IP " << ip << ".");
253 :
254 : // silence no return warning; this code is never reached
255 : UG_THROW("Reached unreachable! This should be impossible. Check out why it is not!");
256 : return m_aaSide[pSide].at(ip);
257 : }
258 :
259 : template <typename TDomain>
260 0 : number& SideAndElemErrEstData<TDomain>::operator() (elem_type* pElem, size_t ip)
261 : {
262 : try
263 : {
264 : return m_aaElem[pElem].at(ip);
265 : }
266 0 : catch (const std::out_of_range& oor)
267 : {
268 0 : UG_THROW("Requested attachment for elem integration point " << ip <<
269 : ", which does not appear to be allocated.");
270 :
271 : }
272 0 : UG_CATCH_THROW("Could not access error estimator elem attachment for IP " << ip << ".");
273 :
274 : // silence no return warning; this code is never reached
275 : UG_THROW("Reached unreachable! This should be impossible. Check out why it is not!");
276 : return m_aaElem[pElem].at(ip);
277 : }
278 :
279 :
280 : template <typename TDomain>
281 : template <int refDim>
282 0 : const MathVector<refDim>* SideAndElemErrEstData<TDomain>::side_local_ips(const ReferenceObjectID roid)
283 : {
284 : // the usual case: return all side IPs of an element (belonging to all sides)
285 : if (TDomain::dim == refDim)
286 : {
287 : // check that IP series exists
288 0 : if (m_SideIPcoords[roid].size() == 0)
289 0 : UG_THROW("No side IP series available for roid " << roid << ".");
290 :
291 : // cast is necessary, since TDomain::dim might be != refDim,
292 : // but in that case, this return is not reached
293 : return reinterpret_cast<const MathVector<refDim>*>(&m_SideIPcoords[roid][0]);
294 : }
295 : // special case (assembling over manifold, needed for inner_boundary): only IPs of one side
296 : else if (TDomain::dim == refDim+1)
297 : {
298 : // check that quad rule exists
299 0 : if (!quadRuleSide[roid])
300 0 : UG_THROW("Requesting side IPs for roid " << roid << ", but no quadrature rule has been created for it.");
301 :
302 : return reinterpret_cast<const MathVector<refDim>*>(quadRuleSide[roid]->points());
303 : }
304 :
305 0 : UG_THROW("Local IPs requested with the wrong dimension refDim." << std::endl
306 : << "Either call with refDim == TDomain::dim and a TDomain::dim-dimensional roid "
307 : "for the local side IPs of all of its sides" << std::endl
308 : << "or with refDim == TDomain::dim-1 and a (TDomain::dim-1)-dimensional roid"
309 : "for the local side IPs for a side of this roid.");
310 :
311 : return NULL;
312 : }
313 :
314 : template <typename TDomain>
315 : template <int refDim>
316 0 : const MathVector<refDim>* SideAndElemErrEstData<TDomain>::elem_local_ips(const ReferenceObjectID roid)
317 : {
318 : // return NULL if dim is not fitting (not meaningful for the purpose of error estimation)
319 : if (TDomain::dim != refDim) return NULL;
320 :
321 : // check that quad rule exists
322 0 : UG_COND_THROW(!quadRuleElem[roid], "Requesting side IPs for roid " << roid << ", but no quadrature rule has been created for it.");
323 : // check that IP series exists
324 0 : UG_COND_THROW(quadRuleElem[roid]->size() == 0, "No elem IP series available for roid " << roid << ".");
325 :
326 : // cast is necessary, since TDomain::dim might be != refDim,
327 : // but in that case, this return is not reached
328 : return reinterpret_cast<const MathVector<refDim>*>(quadRuleElem[roid]->points());
329 : }
330 :
331 : template <typename TDomain>
332 0 : MathVector<TDomain::dim>* SideAndElemErrEstData<TDomain>::all_side_global_ips
333 : (
334 : GridObject* elem,
335 : const MathVector<dim> vCornerCoords[]
336 : )
337 : {
338 : // get reference object ID
339 0 : ReferenceObjectID roid = elem->reference_object_id();
340 :
341 : // get reference mapping
342 : DimReferenceMapping<dim,dim>& ref_map =
343 : ReferenceMappingProvider::get<dim,dim>(roid, &vCornerCoords[0]);
344 :
345 : // map IPs
346 : try
347 : {
348 0 : m_sideGlobalIPcoords.resize(num_all_side_ips(roid));
349 0 : ref_map.local_to_global(&m_sideGlobalIPcoords[0], &m_SideIPcoords[roid][0], m_SideIPcoords[roid].size());
350 : }
351 0 : catch (std::exception& e)
352 : {
353 0 : UG_THROW("Encountered exception while trying to fill array of global IPs: "
354 : << std::endl << "'" << e.what() << "'");
355 : }
356 :
357 0 : return &m_sideGlobalIPcoords[0];
358 : }
359 :
360 : template <typename TDomain>
361 0 : MathVector<TDomain::dim>* SideAndElemErrEstData<TDomain>::side_global_ips
362 : (
363 : GridObject* elem,
364 : const MathVector<dim> vCornerCoords[]
365 : )
366 : {
367 : // get reference object ID
368 0 : ReferenceObjectID roid = elem->reference_object_id();
369 :
370 : // get reference mapping
371 : DimReferenceMapping<dim-1,dim>& ref_map =
372 : ReferenceMappingProvider::get<dim-1,dim>(roid, &vCornerCoords[0]);
373 :
374 : // map IPs
375 : try
376 : {
377 0 : m_singleSideGlobalIPcoords.resize(quadRuleSide[roid]->size());
378 0 : ref_map.local_to_global(&m_singleSideGlobalIPcoords[0], quadRuleSide[roid]->points(), quadRuleSide[roid]->size());
379 : }
380 0 : catch (std::exception& e)
381 : {
382 0 : UG_THROW("Encountered exception while trying to fill array of global IPs: "
383 : << std::endl << "'" << e.what() << "'");
384 : }
385 :
386 0 : return &m_singleSideGlobalIPcoords[0];
387 : }
388 :
389 : template <typename TDomain>
390 0 : MathVector<TDomain::dim>* SideAndElemErrEstData<TDomain>::elem_global_ips
391 : (
392 : GridObject* elem,
393 : const MathVector<dim> vCornerCoords[]
394 : )
395 : {
396 : // get reference object ID
397 0 : ReferenceObjectID roid = elem->reference_object_id();
398 :
399 : // get reference mapping
400 : DimReferenceMapping<dim,dim>& ref_map =
401 : ReferenceMappingProvider::get<dim,dim>(roid, &vCornerCoords[0]);
402 :
403 : // map IPs
404 : try
405 : {
406 0 : m_elemGlobalIPcoords.resize(num_elem_ips(roid));
407 0 : ref_map.local_to_global(&m_elemGlobalIPcoords[0], quadRuleElem[roid]->points(), quadRuleElem[roid]->size());
408 : }
409 0 : catch (std::exception& e)
410 : {
411 0 : UG_THROW("Encountered exception while trying to fill array of global IPs: "
412 : << std::endl << "'" << e.what() << "'");
413 : }
414 :
415 0 : return &m_elemGlobalIPcoords[0];
416 : }
417 :
418 : template <typename TDomain>
419 : size_t SideAndElemErrEstData<TDomain>::num_side_ips(const side_type* pSide)
420 : {
421 : return m_aaSide[pSide].size();
422 : }
423 :
424 : template <typename TDomain>
425 0 : size_t SideAndElemErrEstData<TDomain>::num_side_ips(const ReferenceObjectID roid)
426 : {
427 : // check that quad rule exists
428 0 : UG_COND_THROW(!quadRuleSide[roid],
429 : "Requesting number of side IPs for roid " << roid << ", but no quadrature rule has been created for it.");
430 :
431 0 : return quadRuleSide[roid]->size();
432 : }
433 :
434 : template <typename TDomain>
435 : size_t SideAndElemErrEstData<TDomain>::first_side_ips(const ReferenceObjectID roid, const size_t side)
436 : {
437 0 : return m_sideIPsStartIndex[roid][side];
438 : }
439 :
440 :
441 : template <typename TDomain>
442 : size_t SideAndElemErrEstData<TDomain>::num_all_side_ips(const ReferenceObjectID roid)
443 : {
444 : return m_SideIPcoords[roid].size();
445 : }
446 :
447 : template <typename TDomain>
448 0 : size_t SideAndElemErrEstData<TDomain>::num_elem_ips(const ReferenceObjectID roid)
449 : {
450 : // check that quad rule exists
451 0 : UG_COND_THROW(!quadRuleElem[roid], "Requesting elem IPs for roid " << roid << ", but no quadrature rule has been created for it.");
452 :
453 0 : return quadRuleElem[roid]->size();
454 : }
455 :
456 : template <typename TDomain>
457 : size_t SideAndElemErrEstData<TDomain>::side_ip_index
458 : ( const ReferenceObjectID roid,
459 : const size_t side,
460 : const size_t ip
461 : )
462 : {
463 : // TODO: check validity of side index
464 : return m_sideIPsStartIndex[roid][side];
465 : }
466 :
467 :
468 : /// Allocates data structures for the error estimator
469 : template <typename TDomain>
470 0 : void SideAndElemErrEstData<TDomain>::alloc_err_est_data
471 : (
472 : ConstSmartPtr<SurfaceView> spSV,
473 : const GridLevel& gl
474 : )
475 : {
476 : // get and check the grid level
477 0 : UG_COND_THROW(gl.type () != GridLevel::SURFACE, "SideFluxErrEstData::alloc_err_est_data:"
478 : " The error estimator can work only with grid functions of the SURFACE type.");
479 :
480 : // get the subset handler
481 : ConstSmartPtr<MGSubsetHandler> ssh = spSV->subset_handler();
482 :
483 : // copy the parameters to the object
484 0 : m_errEstGL = gl;
485 0 : m_spSV = spSV;
486 :
487 : // prepare the attachments and their accessors
488 : MultiGrid* pMG = (MultiGrid*) (ssh->multi_grid());
489 :
490 : // sides
491 0 : pMG->template attach_to_dv<side_type, attachment_type >(m_aSide, std::vector<number>(0));
492 : m_aaSide.access(*pMG, m_aSide);
493 :
494 : // elems
495 0 : pMG->template attach_to_dv<elem_type, attachment_type >(m_aElem, std::vector<number>(0));
496 : m_aaElem.access(*pMG, m_aElem);
497 :
498 : // construct subset group from subset info
499 0 : m_ssg.set_subset_handler(ssh);
500 0 : if (m_vSs.size() == 0) m_ssg.add_all();
501 0 : else m_ssg.add(m_vSs);
502 :
503 : // find out whether we work on an elem subset or a side subset
504 : int ssDimMax = 0;
505 : int ssDimMin = 4;
506 0 : for (size_t si = 0; si < m_ssg.size(); si++)
507 : {
508 0 : const int dim_si = DimensionOfSubset(*ssh, m_ssg[si]);
509 0 : if (dim_si > ssDimMax) ssDimMax = dim_si;
510 0 : if (dim_si < ssDimMin) ssDimMin = dim_si;
511 : }
512 :
513 0 : UG_COND_THROW((ssDimMax != ssDimMin || ssDimMax != TDomain::dim),
514 : "Subsets passed to an instance of SideAndElemErrEstData have varying or inadmissable dimension.\n"
515 : "(NOTE: Only sets of subsets of the same dimension as the domain are allowed.)");
516 :
517 :
518 : // iterate over elems and associated sides and resize the IP value vectors
519 0 : for (size_t si = 0; si < m_ssg.size(); si++)
520 : {
521 : typedef typename SurfaceView::traits<elem_type>::const_iterator elem_iterator_type;
522 0 : elem_iterator_type elem_iter_end = m_spSV->template end<elem_type>(m_ssg[si], m_errEstGL, SurfaceView::ALL);
523 0 : for (elem_iterator_type elem_iter = m_spSV->template begin<elem_type>(m_ssg[si], m_errEstGL, SurfaceView::ALL);
524 0 : elem_iter != elem_iter_end; ++elem_iter)
525 : {
526 : elem_type* elem = *elem_iter;
527 :
528 : // get roid of elem
529 0 : ReferenceObjectID roid = elem->reference_object_id();
530 :
531 : // get number of IPs from quadrature rule for the roid and specified side quadrature order
532 0 : size_t size = quadRuleElem[roid]->size();
533 :
534 : // resize attachment accordingly
535 0 : m_aaElem[elem].resize(size, 0.0);
536 :
537 : // loop associated sides
538 : typename MultiGrid::traits<side_type>::secure_container side_list;
539 : pMG->associated_elements(side_list, elem);
540 :
541 0 : for (size_t side = 0; side < side_list.size(); side++)
542 : {
543 : side_type* pSide = side_list[side];
544 :
545 : // get roid of side
546 0 : ReferenceObjectID roid = pSide->reference_object_id();
547 :
548 : // get number of IPs from quadrature rule for the roid and specified side quadrature order
549 0 : size_t size = quadRuleSide[roid]->size();
550 :
551 : // resize attachment accordingly
552 0 : if (m_aaSide[pSide].size() != size)
553 0 : m_aaSide[pSide].resize(size, 0.0);
554 : }
555 : }
556 : }
557 :
558 : // equalize sizes at horizontal interface
559 : // done by summing up, which, in a first step, will resize the smaller of the two vectors
560 : // to the same size as the bigger one (cf. std_number_vector_attachment_reduce_traits)
561 : // the actual summing does not hurt, since it performs 0 = 0 + 0;
562 : #ifdef UG_PARALLEL
563 : typedef typename GridLayoutMap::Types<side_type>::Layout layout_type;
564 : DistributedGridManager& dgm = *pMG->distributed_grid_manager();
565 : GridLayoutMap& glm = dgm.grid_layout_map();
566 : pcl::InterfaceCommunicator<layout_type> icom;
567 :
568 : // sum all copies at the h-master attachment
569 : ComPol_AttachmentReduce<layout_type, attachment_type> compolSumSideErr(*pMG, m_aSide, PCL_RO_SUM);
570 : icom.exchange_data(glm, INT_H_SLAVE, INT_H_MASTER, compolSumSideErr);
571 : icom.communicate();
572 : icom.exchange_data(glm, INT_H_MASTER, INT_H_SLAVE, compolSumSideErr);
573 : icom.communicate();
574 :
575 : icom.exchange_data(glm, INT_V_SLAVE, INT_V_MASTER, compolSumSideErr);
576 : icom.communicate();
577 : #endif
578 :
579 : // for the case where subset border goes through SHADOW_RIM:
580 : // resize SHADOW_RIM children if parent is resized and vice-versa
581 : typedef typename Grid::traits<side_type>::const_iterator side_iter_type;
582 : side_iter_type end_side = pMG->template end<side_type>();
583 0 : for (side_iter_type side_iter = pMG->template begin<side_type>(); side_iter != end_side; ++side_iter)
584 : {
585 : // get the sides on both the levels (coarse and fine)
586 : side_type* c_rim_side = *side_iter;
587 :
588 : // if side is not SHADOW_RIM: do nothing
589 0 : if (!m_spSV->surface_state(c_rim_side).partially_contains(SurfaceView::SHADOW_RIM)) continue;
590 :
591 : // loop rim side children
592 : bool resize_parent = false;
593 : const size_t num_children = pMG->template num_children<side_type>(c_rim_side);
594 0 : for (size_t ch = 0; ch < num_children; ch++)
595 : {
596 : side_type* child_side = pMG->template get_child<side_type>(c_rim_side, ch);
597 :
598 : // get roid of side
599 0 : ReferenceObjectID child_roid = child_side->reference_object_id();
600 :
601 : // get number of IPs from quadrature rule for the roid and specified side quadrature order
602 0 : size_t child_size = quadRuleSide[child_roid]->size();
603 :
604 : // resize attachment accordingly
605 0 : if (m_aaSide[child_side].size() != child_size && num_side_ips(c_rim_side) > 0)
606 0 : m_aaSide[child_side].resize(child_size, 0.0);
607 0 : else if (m_aaSide[child_side].size() == child_size && num_side_ips(c_rim_side) == 0)
608 : resize_parent = true;
609 : }
610 :
611 0 : if (resize_parent)
612 : {
613 : // get roid of side
614 0 : ReferenceObjectID roid = c_rim_side->reference_object_id();
615 :
616 : // get number of IPs from quadrature rule for the roid and specified side quadrature order
617 0 : size_t size = quadRuleSide[roid]->size();
618 :
619 : // resize attachment accordingly
620 0 : m_aaSide[c_rim_side].resize(size, 0.0);
621 : }
622 : }
623 :
624 : #ifdef UG_PARALLEL
625 : icom.exchange_data(glm, INT_V_MASTER, INT_V_SLAVE, compolSumSideErr);
626 : icom.communicate();
627 : #endif
628 0 : };
629 :
630 : /// Called after the computation of the error estimator data in all the elements
631 : /** Because of the multigrid hierarchy, the sides at the rim of a multigrid level
632 : * only have one of the two flux terms (cis or trans). In order to calculate a
633 : * jump, we need to add up the resp. attachments on parent and child for that side.
634 : */
635 : template <typename TDomain>
636 0 : void SideAndElemErrEstData<TDomain>::summarize_err_est_data(SmartPtr<TDomain> spDomain)
637 : {
638 0 : MultiGrid* pMG = spDomain->subset_handler()->multi_grid();
639 :
640 : // STEP 1: Ensure consistency over horizontal interfaces.
641 : // Add the IP values of horizontal interfaces. Care has to be taken in the case where a side
642 : // is SHADOW_RIM, then it only has values from one side of the interface. This will be checked
643 : // by calling size() of the IP value vector.
644 : #ifdef UG_PARALLEL
645 : typedef typename GridLayoutMap::Types<side_type>::Layout layout_type;
646 : DistributedGridManager& dgm = *pMG->distributed_grid_manager();
647 : GridLayoutMap& glm = dgm.grid_layout_map();
648 : pcl::InterfaceCommunicator<layout_type> icom;
649 :
650 : // sum all copies at the h-master attachment
651 : ComPol_AttachmentReduce<layout_type, attachment_type> compolSumSideErr(*pMG, m_aSide, PCL_RO_SUM);
652 : icom.exchange_data(glm, INT_H_SLAVE, INT_H_MASTER, compolSumSideErr);
653 : icom.communicate();
654 :
655 : // copy the sum from the master to all of its slave-copies
656 : ComPol_CopyAttachment<layout_type, attachment_type> compolCopySideErr(*pMG, m_aSide);
657 : icom.exchange_data(glm, INT_H_MASTER, INT_H_SLAVE, compolCopySideErr);
658 : icom.communicate();
659 :
660 : // STEP 2: Ensure correct values in vertical master rim side children.
661 : // since we're copying from vmasters to vslaves later on, we have to make
662 : // sure, that also all v-masters contain the correct values.
663 : // todo: communication to vmasters may not be necessary here...
664 : // it is performed to make sure that all surface-rim-children
665 : // contain their true value.
666 : icom.exchange_data(glm, INT_V_SLAVE, INT_V_MASTER, compolCopySideErr);
667 : icom.communicate();
668 : #endif
669 :
670 : // STEP 3: Calculate values on rim sides.
671 : // loop the rim sides and add the jumps
672 : // TODO: not sure whether we cannot simply loop with
673 : // m_spSV->template end<side_type> (m_errEstGL, SurfaceView::SHADOW_RIM)
674 :
675 : //typedef typename SurfaceView::traits<side_type>::const_iterator t_iterator;
676 : //t_iterator end_rim_side_iter = m_spSV->template end<side_type> (m_errEstGL, SurfaceView::SHADOW_RIM);
677 : //for (t_iterator rim_side_iter = m_spSV->template begin<side_type> (m_errEstGL, SurfaceView::SHADOW_RIM);
678 : // rim_side_iter != end_rim_side_iter; ++rim_side_iter)
679 : typedef typename Grid::traits<side_type>::const_iterator side_iter_type;
680 : side_iter_type end_side = pMG->template end<side_type>();
681 0 : for (side_iter_type side_iter = pMG->template begin<side_type>(); side_iter != end_side; ++side_iter)
682 : {
683 : // get the sides on both the levels (coarse and fine)
684 : side_type* c_rim_side = *side_iter;
685 :
686 : // if side is not SHADOW_RIM: do nothing
687 0 : if (!m_spSV->surface_state(c_rim_side).partially_contains(SurfaceView::SHADOW_RIM)) continue;
688 :
689 : // if no ips registered on this side: do nothing
690 0 : if (num_side_ips(c_rim_side) == 0) continue;
691 :
692 : // distinguish hanging nodes and clozure elements by number of children
693 : const size_t num_children = pMG->template num_children<side_type>(c_rim_side);
694 :
695 : // closure elements
696 0 : if (num_children == 1)
697 : {
698 : side_type* f_rim_side = pMG->template get_child<side_type>(c_rim_side, 0);
699 :
700 : // add up total jump and save it for both sides
701 0 : for (size_t i = 0; i < m_aaSide[c_rim_side].size(); i++)
702 : {
703 : number& c_rim_flux = m_aaSide[c_rim_side][i];
704 : number& f_rim_flux = m_aaSide[f_rim_side][i];
705 0 : number flux_jump = f_rim_flux + c_rim_flux;
706 0 : c_rim_flux = f_rim_flux = flux_jump;
707 : }
708 : }
709 :
710 : // hanging node
711 : else
712 : {
713 : // TODO: This is a somehow dirty hack (but will converge with h->0),
714 : // a correct interpolation with all given points would be preferable.
715 : // And even if it is not: Check whether it could be beneficial to use structures like k-d tree,
716 : // since this is the most naive implementation possible. I guess, the number of IPs
717 : // is likely to be very low in most of the applications.
718 :
719 : // The IPs are not in the same locations on both sides:
720 : // Interpolate values for both sides, then add up and distribute.
721 : // Interpolation is piecewise constant (use value of nearest neighbour).
722 :
723 : // map coarse side local IPs to global
724 : std::vector<MathVector<dim> > c_coCo;
725 0 : CollectCornerCoordinates(c_coCo, c_rim_side, spDomain->position_accessor(), false);
726 0 : MathVector<dim>* c_gloIPs = side_global_ips(c_rim_side, &c_coCo[0]);
727 :
728 : // interpolate fine IPs on coarse side
729 0 : for (size_t ch = 0; ch < num_children; ch++)
730 : {
731 : // get fine side
732 : side_type* f_rim_side = pMG->template get_child<side_type>(c_rim_side, ch);
733 :
734 : // map fine side local IPs to global
735 : std::vector<MathVector<dim> > f_coCo;
736 0 : CollectCornerCoordinates(f_coCo, f_rim_side, spDomain->position_accessor(), false);
737 0 : MathVector<dim>* f_gloIPs = side_global_ips(f_rim_side, &f_coCo[0]);
738 :
739 : // for each fine side IP, find nearest coarse side IP
740 0 : for (size_t fip = 0; fip < m_aaSide[f_rim_side].size(); fip++)
741 : {
742 0 : if (m_aaSide[c_rim_side].size() < 1)
743 0 : {UG_THROW("No IP defined for coarse side.");}
744 :
745 : size_t nearest = 0;
746 : MathVector<dim> diff;
747 0 : VecSubtract(diff, f_gloIPs[fip], c_gloIPs[0]);
748 : number dist = VecLengthSq(diff);
749 :
750 : // loop coarse IPs
751 0 : for (size_t cip = 1; cip < m_aaSide[c_rim_side].size(); cip++)
752 : {
753 0 : VecSubtract(diff, f_gloIPs[fip], c_gloIPs[cip]);
754 0 : if (VecLengthSq(diff) < dist)
755 : {
756 : dist = VecLengthSq(diff);
757 : nearest = cip;
758 : }
759 : }
760 0 : m_aaSide[f_rim_side][fip] += m_aaSide[c_rim_side][nearest];
761 : }
762 : }
763 :
764 : // interpolate coarse IPs on fine side:
765 0 : for (size_t cip = 0; cip < m_aaSide[c_rim_side].size(); cip++)
766 : {
767 : MathVector<dim> diff;
768 : number dist = std::numeric_limits<number>::infinity();
769 : number val = 0.0;
770 :
771 : // we have to loop all child sides
772 0 : for (size_t ch = 0; ch < pMG->template num_children<side_type>(c_rim_side); ch++)
773 : {
774 : // get fine side
775 : side_type* f_rim_side = pMG->template get_child<side_type>(c_rim_side, ch);
776 :
777 : // map fine side local IPs to global
778 : std::vector<MathVector<dim> > f_coCo;
779 0 : CollectCornerCoordinates(f_coCo, f_rim_side, spDomain->position_accessor(), false);
780 0 : MathVector<dim>* f_gloIPs = side_global_ips(f_rim_side, &f_coCo[0]);
781 :
782 : // loop coarse IPs
783 0 : for (size_t fip = 1; fip < m_aaSide[f_rim_side].size(); fip++)
784 : {
785 0 : VecSubtract(diff, f_gloIPs[fip], c_gloIPs[cip]);
786 0 : if (VecLengthSq(diff) < dist)
787 : {
788 : dist = VecLengthSq(diff);
789 0 : val = m_aaSide[f_rim_side][fip];
790 : }
791 : }
792 : }
793 :
794 : // the fine grid already contains the correct values
795 0 : m_aaSide[c_rim_side][cip] = val;
796 : }
797 0 : }
798 : }
799 :
800 : // STEP 4: Ensure consistency in slave rim sides.
801 : // Copy from v-masters to v-slaves, since there may be constrained sides which locally
802 : // do not have a constraining element. Note that constrained V-Masters always have a local
803 : // constraining element and thus contain the correct value.
804 : #ifdef UG_PARALLEL
805 : compolCopySideErr.extract_on_constrained_elems_only(true);
806 : icom.exchange_data(glm, INT_V_MASTER, INT_V_SLAVE, compolCopySideErr);
807 : icom.communicate();
808 : #endif
809 0 : };
810 :
811 :
812 : template <typename TDomain>
813 0 : number SideAndElemErrEstData<TDomain>::get_elem_error_indicator(GridObject* pElem, const MathVector<dim> vCornerCoords[])
814 : {
815 :
816 : // the indicator
817 : number etaSq = 0.0;
818 :
819 : // elem terms
820 : // info about reference element type
821 0 : ReferenceObjectID roid = pElem->reference_object_id();
822 : const DimReferenceElement<dim>& refElem = ReferenceElementProvider::get<dim>(roid);
823 :
824 : // only take into account elem contributions of the subsets defined in the constructor
825 0 : int ssi = surface_view()->subset_handler()->get_subset_index(pElem);
826 0 : if (!m_ssg.contains(ssi)) return 0.0;
827 :
828 : // check number of integration points
829 0 : size_t nIPs = quadRuleElem[roid]->size();
830 0 : std::vector<number>& integrand = m_aaElem[dynamic_cast<elem_type*>(pElem)];
831 0 : if (nIPs != integrand.size())
832 0 : UG_THROW("Element attachment vector does not have the required size for integration!");
833 :
834 : // get reference element mapping
835 0 : DimReferenceMapping<dim,dim>& mapping = ReferenceMappingProvider::get<dim,dim>(roid);
836 0 : mapping.update(&vCornerCoords[0]);
837 :
838 : // compute det of jacobian at each IP
839 0 : std::vector<number> det(nIPs);
840 0 : mapping.sqrt_gram_det(&det[0], quadRuleElem[roid]->points(), nIPs);
841 :
842 : // integrate
843 : number sum = 0.0;
844 0 : for (size_t ip = 0; ip < nIPs; ip++)
845 0 : sum += quadRuleElem[roid]->weight(ip) * std::pow(integrand[ip], 2.0) * det[ip];
846 :
847 : // scale by diam^2(elem) (= h_T^2)
848 : // c* vol(elem) >= diam^3(elem) >= vol(elem)
849 : // therefore, up to a constant, error estimator can calculate diam(elem) as (vol(elem))^(1/3)
850 0 : number diamSq = std::pow(ElementSize<dim>(roid, &vCornerCoords[0]), 2.0/dim);
851 :
852 : // add to error indicator
853 0 : etaSq += diamSq * sum;
854 :
855 : // side terms
856 : // get the sides of the element
857 0 : MultiGrid* pErrEstGrid = (MultiGrid*) (surface_view()->subset_handler()->multi_grid());
858 : typename MultiGrid::traits<side_type>::secure_container side_list;
859 0 : pErrEstGrid->associated_elements(side_list, pElem);
860 :
861 : // loop sides
862 0 : for (size_t side = 0; side < side_list.size(); side++)
863 : {
864 : side_type* pSide = side_list[side];
865 :
866 : // info about reference side type
867 0 : ReferenceObjectID side_roid = pSide->reference_object_id();
868 :
869 : // check number of integration points
870 0 : size_t nsIPs = quadRuleSide[side_roid]->size();
871 0 : UG_COND_THROW(nsIPs != m_aaSide[pSide].size(),
872 : "Side attachment vector does not have the required size for integration!");
873 :
874 : // get side corners
875 0 : std::vector<MathVector<dim> > vSideCornerCoords(0);
876 : size_t nsCo = refElem.num(dim-1, side, 0);
877 0 : for (size_t co = 0; co < nsCo; co++)
878 0 : vSideCornerCoords.push_back(vCornerCoords[refElem.id(dim-1, side, 0, co)]);
879 :
880 : // get reference element mapping
881 0 : DimReferenceMapping<dim-1,dim>& mapping = ReferenceMappingProvider::get<dim-1,dim>(side_roid);
882 0 : mapping.update(&vSideCornerCoords[0]);
883 :
884 : // compute det of jacobian at each IP
885 0 : det.resize(nsIPs);
886 0 : mapping.sqrt_gram_det(&det[0], quadRuleSide[side_roid]->points(), nsIPs);
887 :
888 : // integrate
889 : number sum = 0.0;
890 0 : for (size_t ip = 0; ip < nsIPs; ip++)
891 0 : sum += quadRuleSide[side_roid]->weight(ip) * std::pow(m_aaSide[pSide][ip], 2.0) * det[ip];
892 :
893 : // scale by diam(side) (= $h_E$)
894 : // c* vol(side) >= diam^2(side) >= vol(side)
895 : // therefore, up to a constant, error estimator can calculate diam as sqrt(vol(side))
896 : number diamE;
897 : if (dim==1) { diamE = 1.0; }
898 0 : else if (dim==2) { diamE = ElementSize<dim>(side_roid, &vSideCornerCoords[0]); }
899 0 : else if (dim==3) { diamE = std::sqrt(ElementSize<dim>(side_roid, &vSideCornerCoords[0])); }
900 : else { UG_THROW("Unknown dimension: "<<dim <<"."); }
901 :
902 : // add to error indicator
903 0 : etaSq += diamE * sum;
904 : }
905 0 : etaSq = (m_type == SideAndElemErrEstData<TDomain>::L2_ERROR_TYPE) ? etaSq*diamSq : etaSq;
906 : return etaSq;
907 0 : }
908 :
909 :
910 : /// Releases data structures of the error estimator
911 : template <typename TDomain>
912 0 : void SideAndElemErrEstData<TDomain>::release_err_est_data ()
913 : {
914 : // release the attachments
915 0 : MultiGrid * pMG = (MultiGrid *) (m_spSV->subset_handler()->multi_grid());
916 :
917 : m_aaSide.invalidate();
918 0 : pMG->template detach_from<side_type>(m_aSide);
919 :
920 : m_aaElem.invalidate();
921 0 : pMG->template detach_from<elem_type>(m_aElem);
922 : //m_spSV = ConstSmartPtr<SurfaceView> (NULL); // this raises a rte
923 0 : };
924 :
925 :
926 :
927 : // ******** class MultipleErrEstData ********
928 :
929 : template <typename TDomain, typename TErrEstData>
930 0 : void MultipleErrEstData<TDomain,TErrEstData>::
931 : alloc_err_est_data(ConstSmartPtr<SurfaceView> spSV, const GridLevel& gl)
932 : {
933 : // only called if consider_me()
934 0 : for (size_t eed = 0; eed < num(); eed++)
935 0 : m_vEed[eed]->alloc_err_est_data(spSV, gl);
936 0 : }
937 :
938 : template <typename TDomain, typename TErrEstData>
939 0 : void MultipleErrEstData<TDomain,TErrEstData>::
940 : summarize_err_est_data(SmartPtr<TDomain> spDomain)
941 : {
942 : // only called if consider_me()
943 0 : for (size_t eed = 0; eed < num(); eed++)
944 0 : m_vEed[eed]->summarize_err_est_data(spDomain);
945 0 : }
946 :
947 : template <typename TDomain, typename TErrEstData>
948 0 : number MultipleErrEstData<TDomain,TErrEstData>::
949 : get_elem_error_indicator(GridObject* elem, const MathVector<dim> vCornerCoords[])
950 : {
951 : // only called if consider_me()
952 : number sum = 0.0;
953 0 : for (size_t eed = 0; eed < num(); eed++)
954 0 : sum += m_vEed[eed]->get_elem_error_indicator(elem, vCornerCoords);
955 :
956 0 : return sum;
957 : }
958 :
959 : template <typename TDomain, typename TErrEstData>
960 0 : void MultipleErrEstData<TDomain,TErrEstData>::
961 : release_err_est_data()
962 : {
963 : // only called if consider_me()
964 0 : for (size_t eed = 0; eed < num(); eed++)
965 0 : m_vEed[eed]->release_err_est_data();
966 0 : }
967 :
968 :
969 :
970 : // ******** class MultipleSideAndElemErrEstData ********
971 :
972 : template <typename TDomain>
973 0 : void MultipleSideAndElemErrEstData<TDomain>::
974 : add(SmartPtr<SideAndElemErrEstData<TDomain> > spEed, const char* fct)
975 : {
976 : check_equal_order();
977 0 : this->MultipleErrEstData<TDomain, SideAndElemErrEstData<TDomain> >::add(spEed, fct);
978 0 : }
979 :
980 : template <typename TDomain>
981 : void MultipleSideAndElemErrEstData<TDomain>::check_equal_order()
982 : {
983 0 : check_equal_side_order();
984 0 : check_equal_elem_order();
985 : }
986 :
987 : template <typename TDomain>
988 0 : void MultipleSideAndElemErrEstData<TDomain>::check_equal_side_order()
989 : {
990 0 : m_bEqSideOrder = false;
991 :
992 0 : if (this->m_vEed.size() == 0)
993 : {
994 0 : m_bEqSideOrder = true;
995 0 : return;
996 : }
997 :
998 0 : size_t side_order = this->m_vEed[0]->side_order();
999 :
1000 0 : for (size_t ee = 1; ee < this->m_vEed.size(); ee++)
1001 0 : if (this->m_vEed[ee]->side_order() != side_order) return;
1002 :
1003 0 : m_bEqSideOrder = true;
1004 : }
1005 :
1006 : template <typename TDomain>
1007 0 : void MultipleSideAndElemErrEstData<TDomain>::check_equal_elem_order()
1008 : {
1009 0 : m_bEqElemOrder = false;
1010 :
1011 0 : if (this->m_vEed.size() == 0)
1012 : {
1013 0 : m_bEqElemOrder = true;
1014 0 : return;
1015 : }
1016 :
1017 0 : size_t elem_order = this->m_vEed[0]->elem_order();
1018 :
1019 0 : for (size_t ee = 1; ee < this->m_vEed.size(); ee++)
1020 0 : if (this->m_vEed[ee]->elem_order() != elem_order) return;
1021 :
1022 0 : m_bEqElemOrder = true;
1023 : }
1024 :
1025 :
1026 : } // end of namespace ug
1027 :
1028 : /* End of File */
|