Line data Source code
1 : /*
2 : * Copyright (c) 2011-2015: G-CSC, Goethe University Frankfurt
3 : * Author: Andreas Vogel
4 : *
5 : * This file is part of UG4.
6 : *
7 : * UG4 is free software: you can redistribute it and/or modify it under the
8 : * terms of the GNU Lesser General Public License version 3 (as published by the
9 : * Free Software Foundation) with the following additional attribution
10 : * requirements (according to LGPL/GPL v3 §7):
11 : *
12 : * (1) The following notice must be displayed in the Appropriate Legal Notices
13 : * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14 : *
15 : * (2) The following notice must be displayed at a prominent place in the
16 : * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17 : *
18 : * (3) The following bibliography is recommended for citation and must be
19 : * preserved in all covered files:
20 : * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21 : * parallel geometric multigrid solver on hierarchically distributed grids.
22 : * Computing and visualization in science 16, 4 (2013), 151-164"
23 : * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24 : * flexible software system for simulating pde based models on high performance
25 : * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26 : *
27 : * This program is distributed in the hope that it will be useful,
28 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 : * GNU Lesser General Public License for more details.
31 : */
32 :
33 : #ifndef __H__UG__LIB_DISC__FUNCTION_SPACE__GRID_FUNCTION_IMPL__
34 : #define __H__UG__LIB_DISC__FUNCTION_SPACE__GRID_FUNCTION_IMPL__
35 :
36 : #include "grid_function.h"
37 :
38 : #include "lib_algebra/algebra_type.h"
39 : #include "adaption_surface_grid_function.h"
40 : #include "lib_grid/algorithms/serialization.h"
41 : #include "common/profiler/profiler.h"
42 :
43 : #ifdef UG_PARALLEL
44 : #include "pcl/pcl.h"
45 : #include "lib_algebra/parallelization/parallelization.h"
46 : #include "lib_grid/parallelization/util/compol_copy_attachment.h"
47 : #endif
48 :
49 : namespace ug{
50 :
51 : ////////////////////////////////////////////////////////////////////////////////
52 : // GridFunction : init
53 : ////////////////////////////////////////////////////////////////////////////////
54 :
55 : template <typename TDomain, typename TAlgebra>
56 0 : GridFunction<TDomain, TAlgebra>::
57 : GridFunction(SmartPtr<ApproximationSpace<TDomain> > spApproxSpace,
58 0 : SmartPtr<DoFDistribution> spDoFDistr, bool bManage)
59 : {
60 0 : init(spApproxSpace, spDoFDistr, bManage);
61 0 : };
62 :
63 : template <typename TDomain, typename TAlgebra>
64 0 : GridFunction<TDomain, TAlgebra>::
65 0 : GridFunction(SmartPtr<ApproximationSpace<TDomain> > spApproxSpace, bool bManage)
66 : {
67 0 : init(spApproxSpace, spApproxSpace->dof_distribution(GridLevel(GridLevel::TOP, GridLevel::SURFACE, false)), bManage);
68 0 : };
69 :
70 : template <typename TDomain, typename TAlgebra>
71 0 : GridFunction<TDomain, TAlgebra>::
72 0 : GridFunction(SmartPtr<ApproximationSpace<TDomain> > spApproxSpace, int level, bool bManage)
73 : {
74 0 : init(spApproxSpace, spApproxSpace->dof_distribution(GridLevel(level, GridLevel::SURFACE, false)), bManage);
75 0 : };
76 :
77 : template <typename TDomain, typename TAlgebra>
78 0 : GridFunction<TDomain, TAlgebra>::
79 0 : GridFunction(SmartPtr<approximation_space_type> spApproxSpace, const GridLevel& gl, bool bManage)
80 : {
81 0 : init(spApproxSpace, spApproxSpace->dof_distribution(gl), bManage);
82 0 : };
83 :
84 : template <typename TDomain, typename TAlgebra>
85 : void
86 0 : GridFunction<TDomain, TAlgebra>::
87 : init(SmartPtr<ApproximationSpace<TDomain> > spApproxSpace,
88 : SmartPtr<DoFDistribution> spDoFDistr, bool bManage)
89 : {
90 0 : m_spApproxSpace = spApproxSpace;
91 0 : m_spDD = spDoFDistr;
92 0 : m_bManaged = bManage;
93 0 : m_bRedistribute = true;
94 0 : this->set_dof_distribution_info(m_spApproxSpace->dof_distribution_info());
95 0 : m_spAdaptGridFct = SPNULL;
96 :
97 : // check correct passings
98 0 : if(m_spDD.invalid()) UG_THROW("GridFunction: DoF Distribution is null.");
99 0 : if(m_spApproxSpace.invalid()) UG_THROW("GridFunction: ApproxSpace is null.");
100 :
101 : // check correct choice of compile-time algebra
102 0 : check_algebra();
103 :
104 : // resize the vector to correct size
105 0 : resize_values(num_indices());
106 :
107 0 : if(bManage) {
108 : // registered as managed by dof distribution
109 0 : m_spDD->manage_grid_function(*this);
110 :
111 : // register to observe grid
112 0 : register_at_adaption_msg_hub();
113 : }
114 :
115 :
116 : #ifdef UG_PARALLEL
117 : // set layouts
118 : this->set_layouts(m_spDD->layouts());
119 :
120 : // set storage type
121 : this->set_storage_type(PST_UNDEFINED);
122 : #endif
123 0 : };
124 :
125 : template <typename TDomain, typename TAlgebra>
126 : void
127 0 : GridFunction<TDomain, TAlgebra>::check_algebra()
128 : {
129 : // get blocksize of algebra
130 : const int blockSize = algebra_type::blockSize;
131 :
132 : // a) If blocksize fixed and > 1, we need grouping.
133 0 : if(blockSize > 1 && !this->m_spDD->grouped())
134 : {
135 0 : UG_THROW("Fixed block algebra needs grouped dofs.");
136 : }
137 : // b) If blocksize flexible, we group
138 : else if (blockSize == AlgebraType::VariableBlockSize
139 : && !this->m_spDD->grouped())
140 : {
141 : UG_THROW("Variable block algebra needs grouped dofs.");
142 : }
143 : // c) If blocksize == 1, we do not group. This will allow us to handle
144 : // this case for any problem.
145 0 : else if (blockSize == 1 && this->m_spDD->grouped())
146 : {
147 0 : UG_THROW("block 1x1 algebra needs non-grouped dofs.");
148 : }
149 0 : }
150 :
151 : template <typename TDomain, typename TAlgebra>
152 : size_t
153 0 : GridFunction<TDomain, TAlgebra>::
154 : num_dofs(int fct, int si) const
155 : {
156 0 : DoFCount dc = m_spDD->dof_count();
157 0 : dc.sum_values_over_procs();
158 :
159 0 : return dc.num(fct, si, DoFCount::UNIQUE_SS, DoFCount::UNIQUE_ES);
160 : }
161 :
162 : ////////////////////////////////////////////////////////////////////////////////
163 : // GridFunction : cloning
164 : ////////////////////////////////////////////////////////////////////////////////
165 :
166 : template <typename TDomain, typename TAlgebra>
167 : void
168 0 : GridFunction<TDomain, TAlgebra>::
169 : clone_pattern(const this_type& v)
170 : {
171 : // init normally
172 0 : init(v.m_spApproxSpace, v.m_spDD, v.m_bManaged);
173 : enable_redistribution(v.redistribution_enabled());
174 :
175 : #ifdef UG_PARALLEL
176 : // copy storage type
177 : this->set_storage_type(v.get_storage_mask());
178 : this->set_layouts(v.layouts());
179 : #endif
180 0 : };
181 :
182 :
183 : template <typename TDomain, typename TAlgebra>
184 0 : void GridFunction<TDomain, TAlgebra>::assign(const vector_type& v)
185 : {
186 : // check size
187 0 : if(v.size() != vector_type::size())
188 0 : UG_THROW("GridFunction: Assigned vector has incorrect size.");
189 :
190 : // assign vector
191 0 : *(dynamic_cast<vector_type*>(this)) = v;
192 0 : }
193 :
194 : template <typename TDomain, typename TAlgebra>
195 : void GridFunction<TDomain, TAlgebra>::assign(const this_type& v)
196 : {
197 : // clone pattern
198 0 : clone_pattern(v);
199 :
200 : // copy values
201 0 : *(dynamic_cast<vector_type*>(this)) = *dynamic_cast<const vector_type*>(&v);
202 0 : }
203 :
204 : template <typename TDomain, typename TAlgebra>
205 : GridFunction<TDomain, TAlgebra>*
206 0 : GridFunction<TDomain, TAlgebra>::virtual_clone_without_values() const
207 : {
208 : GridFunction<TDomain, TAlgebra>* p =
209 0 : new GridFunction<TDomain, TAlgebra>(m_spApproxSpace, m_spDD, m_bManaged);
210 : p->enable_redistribution(redistribution_enabled());
211 0 : if(p->size() != this->size())
212 0 : p->resize(this->size());
213 : #ifdef UG_PARALLEL
214 : p->set_layouts(this->layouts());
215 : #endif
216 :
217 0 : return p;
218 : }
219 :
220 : ////////////////////////////////////////////////////////////////////////////////
221 : // GridFunction : dof distribution callbacks
222 : ////////////////////////////////////////////////////////////////////////////////
223 :
224 : template <typename TDomain, typename TAlgebra>
225 : void
226 0 : GridFunction<TDomain, TAlgebra>::
227 : resize_values(size_t s, number defaultValue)
228 : {
229 : // remember old values
230 : const size_t oldSize = vector_type::size();
231 :
232 : // resize vector
233 0 : vector_type::resize_sloppy(s);
234 :
235 : // set vector to zero-values
236 0 : for(size_t i = oldSize; i < s; ++i)
237 0 : this->operator[](i) = defaultValue;
238 0 : }
239 :
240 : template <typename TDomain, typename TAlgebra>
241 : void
242 0 : GridFunction<TDomain, TAlgebra>::
243 : permute_values(const std::vector<size_t>& vIndNew)
244 : {
245 : // check sizes
246 0 : if(vIndNew.size() != this->size())
247 0 : UG_THROW("GridFunction::permute_values: For a permutation the"
248 : " index set must have same cardinality as vector.");
249 :
250 : // \todo: avoid tmp vector, only copy values into new vector and use that one
251 : // create tmp vector
252 : vector_type vecTmp; vecTmp.resize(this->size());
253 : #ifdef UG_PARALLEL
254 : // copy storage type
255 : vecTmp.set_storage_type(this->get_storage_mask());
256 : vecTmp.set_layouts(this->layouts());
257 : #endif
258 :
259 : // loop indices and copy values
260 0 : for(size_t i = 0; i < vIndNew.size(); ++i)
261 0 : vecTmp[vIndNew[i]] = this->operator[](i);
262 :
263 : // copy tmp vector into this vector
264 0 : this->assign(vecTmp);
265 0 : }
266 :
267 : template <typename TDomain, typename TAlgebra>
268 : void
269 0 : GridFunction<TDomain, TAlgebra>::
270 : copy_values(const std::vector<std::pair<size_t, size_t> >& vIndexMap,bool bDisjunct)
271 : {
272 : // disjunct case
273 0 : if(bDisjunct)
274 0 : for(size_t i = 0; i < vIndexMap.size(); ++i)
275 0 : this->operator[](vIndexMap[i].second)
276 0 : = this->operator[](vIndexMap[i].first);
277 : else {
278 : typedef typename vector_type::value_type value_type;
279 : std::vector<value_type> values;
280 0 : values.resize(vIndexMap[vIndexMap.size()-1].first);
281 0 : for(size_t i = 0; i < vIndexMap.size(); ++i){
282 0 : const size_t index = vIndexMap[i].first;
283 0 : if (index>=values.size()) values.resize(index+1);
284 0 : values[index] = this->operator[](index);
285 : }
286 0 : for(size_t i = 0; i < vIndexMap.size(); ++i)
287 0 : this->operator[](vIndexMap[i].second)
288 0 : = values[vIndexMap[i].first];
289 0 : }
290 0 : }
291 :
292 : ////////////////////////////////////////////////////////////////////////////////
293 : // GridFunction : grid adaption
294 : ////////////////////////////////////////////////////////////////////////////////
295 :
296 : template <typename TDomain, typename TAlgebra>
297 : void
298 0 : GridFunction<TDomain, TAlgebra>::
299 : register_at_adaption_msg_hub()
300 : {
301 : // register function for grid adaption
302 0 : SPMessageHub msgHub = domain()->grid()->message_hub();
303 0 : m_spGridAdaptionCallbackID =
304 : msgHub->register_class_callback(this,
305 : &this_type::grid_changed_callback);
306 :
307 0 : m_spGridDistributionCallbackID =
308 : msgHub->register_class_callback(this,
309 : &this_type::grid_distribution_callback);
310 0 : }
311 :
312 : template <typename TDomain, typename TAlgebra>
313 : void
314 0 : GridFunction<TDomain, TAlgebra>::
315 : grid_changed_callback(const GridMessage_Adaption& msg)
316 : {
317 : // before adaption begins: copy values into grid attachments
318 0 : if(msg.adaption_begins()){
319 : // prepare
320 0 : m_spAdaptGridFct = SmartPtr<AdaptionSurfaceGridFunction<TDomain> >(
321 0 : new AdaptionSurfaceGridFunction<TDomain>(this->domain()));
322 0 : m_spAdaptGridFct->copy_from_surface(*this);
323 : }
324 :
325 : // before coarsening: restrict values
326 0 : if(msg.coarsening() && msg.step_begins()){
327 : #ifdef UG_PARALLEL
328 : // since ghosts may exist in a parallel environment and since those ghosts
329 : // may be removed during coarsening, we have to make sure, that the correct
330 : // values are stored in those ghosts before restriction is performed.
331 : Grid& grid = *domain()->grid();
332 : typedef typename AdaptionSurfaceGridFunction<TDomain>::AValues AValues;
333 : if(m_spDDI->max_dofs(VERTEX)){
334 : ComPol_CopyAttachment<VertexLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
335 : pcl::InterfaceCommunicator<VertexLayout> com;
336 : com.exchange_data(grid.distributed_grid_manager()->grid_layout_map(),
337 : INT_V_SLAVE, INT_V_MASTER, compol);
338 : com.communicate();
339 : }
340 : if(m_spDDI->max_dofs(EDGE)){
341 : ComPol_CopyAttachment<EdgeLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
342 : pcl::InterfaceCommunicator<EdgeLayout> com;
343 : com.exchange_data(grid.distributed_grid_manager()->grid_layout_map(),
344 : INT_V_SLAVE, INT_V_MASTER, compol);
345 : com.communicate();
346 : }
347 : if(m_spDDI->max_dofs(FACE)){
348 : ComPol_CopyAttachment<FaceLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
349 : pcl::InterfaceCommunicator<FaceLayout> com;
350 : com.exchange_data(grid.distributed_grid_manager()->grid_layout_map(),
351 : INT_V_SLAVE, INT_V_MASTER, compol);
352 : com.communicate();
353 : }
354 : if(m_spDDI->max_dofs(VOLUME)){
355 : ComPol_CopyAttachment<VolumeLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
356 : pcl::InterfaceCommunicator<VolumeLayout> com;
357 : com.exchange_data(grid.distributed_grid_manager()->grid_layout_map(),
358 : INT_V_SLAVE, INT_V_MASTER, compol);
359 : com.communicate();
360 : }
361 : #endif
362 0 : m_spAdaptGridFct->do_restrict(msg);
363 : }
364 :
365 : // after refinement: prolongate values
366 0 : if(msg.refinement() && msg.step_ends()){
367 0 : m_spAdaptGridFct->prolongate(msg);
368 : }
369 :
370 : // at end of adaption: copy values back into algebra vector
371 0 : if(msg.adaption_ends())
372 : {
373 : // all grid functions must resize to the current number of dofs
374 0 : resize_values(num_indices());
375 :
376 : #ifdef UG_PARALLEL
377 : // set layouts
378 : this->set_layouts(m_spDD->layouts());
379 : #endif
380 :
381 :
382 0 : m_spAdaptGridFct->copy_to_surface(*this);
383 0 : m_spAdaptGridFct = SPNULL;
384 : }
385 0 : }
386 :
387 : template <typename TDomain, typename TAlgebra>
388 : void
389 0 : GridFunction<TDomain, TAlgebra>::
390 : grid_distribution_callback(const GridMessage_Distribution& msg)
391 : {
392 : PROFILE_FUNC();
393 :
394 : #ifdef UG_PARALLEL
395 : GridDataSerializationHandler& sh = msg.serialization_handler();
396 :
397 : switch(msg.msg()){
398 : case GMDT_DISTRIBUTION_STARTS:{
399 : if(redistribution_enabled()){
400 : m_preDistStorageType = this->get_storage_mask();
401 : if(!(this->has_storage_type(PST_CONSISTENT) || this->has_storage_type(PST_UNDEFINED))){
402 : this->change_storage_type(PST_CONSISTENT);
403 : }
404 :
405 : m_spAdaptGridFct = SmartPtr<AdaptionSurfaceGridFunction<TDomain> >(
406 : new AdaptionSurfaceGridFunction<TDomain>(this->domain(), false));
407 : m_spAdaptGridFct->copy_from_surface(*this);
408 : Grid& grid = *domain()->grid();
409 :
410 : typedef typename AdaptionSurfaceGridFunction<TDomain>::AValues AValues;
411 :
412 : if(m_spDDI->max_dofs(VERTEX)){
413 : ComPol_CopyAttachment<VertexLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
414 : pcl::InterfaceCommunicator<VertexLayout> com;
415 : com.exchange_data(grid.distributed_grid_manager()->grid_layout_map(),
416 : INT_V_SLAVE, INT_V_MASTER, compol);
417 : com.communicate();
418 : sh.add(GeomObjAttachmentSerializer<Vertex, AValues>::
419 : create(grid, m_spAdaptGridFct->value_attachment()));
420 : }
421 : if(m_spDDI->max_dofs(EDGE)){
422 : ComPol_CopyAttachment<EdgeLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
423 : pcl::InterfaceCommunicator<EdgeLayout> com;
424 : com.exchange_data(grid.distributed_grid_manager()->grid_layout_map(),
425 : INT_V_SLAVE, INT_V_MASTER, compol);
426 : com.communicate();
427 : sh.add(GeomObjAttachmentSerializer<Edge, AValues>::
428 : create(grid, m_spAdaptGridFct->value_attachment()));
429 : }
430 : if(m_spDDI->max_dofs(FACE)){
431 : ComPol_CopyAttachment<FaceLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
432 : pcl::InterfaceCommunicator<FaceLayout> com;
433 : com.exchange_data(grid.distributed_grid_manager()->grid_layout_map(),
434 : INT_V_SLAVE, INT_V_MASTER, compol);
435 : com.communicate();
436 : sh.add(GeomObjAttachmentSerializer<Face, AValues>::
437 : create(grid, m_spAdaptGridFct->value_attachment()));
438 : }
439 : if(m_spDDI->max_dofs(VOLUME)){
440 : ComPol_CopyAttachment<VolumeLayout, AValues> compol(grid, m_spAdaptGridFct->value_attachment());
441 : pcl::InterfaceCommunicator<VolumeLayout> com;
442 : com.exchange_data(grid.distributed_grid_manager()->grid_layout_map(),
443 : INT_V_SLAVE, INT_V_MASTER, compol);
444 : sh.add(GeomObjAttachmentSerializer<Volume, AValues>::
445 : create(grid, m_spAdaptGridFct->value_attachment()));
446 : }
447 : }
448 : }break;
449 :
450 : case GMDT_DISTRIBUTION_STOPS:
451 : {
452 : PROFILE_BEGIN(grid_func_distribution_stops)
453 : // all grid functions must resize to the current number of dofs
454 : resize_values(num_indices());
455 :
456 : // set layouts
457 : this->set_layouts(m_spDD->layouts());
458 :
459 : if(redistribution_enabled()){
460 : m_spAdaptGridFct->copy_to_surface(*this);
461 : m_spAdaptGridFct = SPNULL;
462 :
463 : if(m_preDistStorageType != this->get_storage_mask()){
464 : if((m_preDistStorageType & PST_ADDITIVE) == PST_ADDITIVE)
465 : this->change_storage_type(PST_ADDITIVE);
466 : else if((m_preDistStorageType & PST_UNIQUE) == PST_UNIQUE)
467 : this->change_storage_type(PST_UNIQUE);
468 : else{
469 : UG_THROW("Can't reestablish storage type!");
470 : }
471 : }
472 : }
473 :
474 : PROFILE_END();
475 : }break;
476 :
477 : default:
478 : break;
479 : }
480 : #endif
481 0 : }
482 :
483 : } // end namespace ug
484 :
485 : #endif /* __H__UG__LIB_DISC__FUNCTION_SPACE__GRID_FUNCTION_IMPL__ */
|