Line data Source code
1 : /*
2 : * Copyright (c) 2017-now: G-CSC, Goethe University Frankfurt
3 : * Authors: Arne Nägel
4 : *
5 : * This file is part of UG4.
6 : *
7 : * UG4 is free software: you can redistribute it and/or modify it under the
8 : * terms of the GNU Lesser General Public License version 3 (as published by the
9 : * Free Software Foundation) with the following additional attribution
10 : * requirements (according to LGPL/GPL v3 §7):
11 : *
12 : * (1) The following notice must be displayed in the Appropriate Legal Notices
13 : * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14 : *
15 : * (2) The following notice must be displayed at a prominent place in the
16 : * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17 : *
18 : * (3) The following bibliography is recommended for citation and must be
19 : * preserved in all covered files:
20 : * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21 : * parallel geometric multigrid solver on hierarchically distributed grids.
22 : * Computing and visualization in science 16, 4 (2013), 151-164"
23 : * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24 : * flexible software system for simulating pde based models on high performance
25 : * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26 : *
27 : * This program is distributed in the hope that it will be useful,
28 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 : * GNU Lesser General Public License for more details.
31 : */
32 :
33 : #ifndef __H__UG__LIB_DISC__OPERATOR__LINEAR_OPERATOR__UZAWA__
34 : #define __H__UG__LIB_DISC__OPERATOR__LINEAR_OPERATOR__UZAWA__
35 :
36 : #include <vector>
37 : #include <string>
38 :
39 :
40 : #include "lib_algebra/operator/algebra_debug_writer.h"
41 : #include "lib_algebra/adapter/slicing.h"
42 :
43 : #include "common/util/string_util.h" // string tokenizer
44 : #include "bridge/util_overloaded.h"
45 :
46 : namespace ug{
47 :
48 :
49 :
50 :
51 : typedef std::vector<bool> binary_grouping_vector;
52 :
53 :
54 : template<typename TGroupObj, typename TGridFunction>
55 0 : void ExtractByObject(std::vector<DoFIndex>& vIndex,
56 : const TGridFunction& c,
57 : const std::vector<size_t>& vFullRowCmp,
58 : const std::vector<size_t>& vRemainCmp)
59 : {
60 : // memory for local algebra
61 : typedef typename TGridFunction::element_type Element;
62 : std::vector<Element*> vElem;
63 :
64 :
65 : // loop all grouping objects
66 : typedef typename TGridFunction::template traits<TGroupObj>::const_iterator GroupObjIter;
67 0 : for(GroupObjIter iter = c.template begin<TGroupObj>();
68 0 : iter != c.template end<TGroupObj>(); ++iter)
69 : {
70 : // get grouping obj
71 : TGroupObj* groupObj = *iter;
72 :
73 : // get all dof indices on obj associated to cmps
74 :
75 0 : for(size_t f = 0; f < vFullRowCmp.size(); ++f)
76 0 : c.inner_dof_indices(groupObj, vFullRowCmp[f], vIndex, false);
77 : }
78 0 : }
79 :
80 : template <typename TGridFunction>
81 0 : class UzawaSlicing : public SlicingData<binary_grouping_vector, 2> {
82 : public:
83 :
84 : typedef SlicingData<binary_grouping_vector, 2> base_type;
85 :
86 : // build an object with
87 : UzawaSlicing(const std::vector<std::string>& vSchurCmps)
88 : {}
89 :
90 : void init(const TGridFunction &u, const std::vector<std::string>& vSchurCmps);
91 :
92 : protected:
93 :
94 : };
95 :
96 :
97 :
98 : template <typename TGridFunction>
99 0 : void UzawaSlicing <TGridFunction>::
100 : init(const TGridFunction &u, const std::vector<std::string>& vSchurCmps)
101 : {
102 : UG_DLOG(SchurDebug, 3, "UzawaSlicing::init" << std::endl);
103 :
104 : ConstSmartPtr<DoFDistributionInfo> ddinfo =
105 0 : u.approx_space()->dof_distribution_info();
106 :
107 : // fill this vector
108 : std::vector<DoFIndex> vIndex;
109 :
110 : // ids of selected functions
111 : std::vector<size_t> vFullRowCmp;
112 :
113 : // complementing functions
114 : std::vector<size_t> vRemainCmp;
115 :
116 : // tokenize passed functions
117 : UG_ASSERT(!vSchurCmps.empty(), "UzawaBase::init: No components set.");
118 :
119 : // get ids of selected functions
120 0 : for(size_t i = 0; i < vSchurCmps.size(); ++i)
121 0 : vFullRowCmp.push_back(ddinfo->fct_id_by_name(vSchurCmps[i].c_str()));
122 :
123 :
124 : // Obtain remaining (non-Schur) functions.
125 0 : for(size_t f = 0; f < ddinfo->num_fct(); ++f)
126 0 : if(std::find(vFullRowCmp.begin(), vFullRowCmp.end(), f) == vFullRowCmp.end())
127 0 : vRemainCmp.push_back(f);
128 :
129 :
130 :
131 : // Extract for each dim-grouping objects.
132 0 : for(int d = VERTEX; d <= VOLUME; ++d)
133 : {
134 :
135 : // only extract if carrying dofs
136 : int iCarryDoFs = 0;
137 0 : for(size_t f = 0; f < vFullRowCmp.size(); ++f)
138 0 : iCarryDoFs += ddinfo->max_fct_dofs(vFullRowCmp[f], d);
139 0 : if(iCarryDoFs == 0) continue;
140 :
141 : // extract
142 0 : switch(d){
143 0 : case VERTEX: ExtractByObject<Vertex, TGridFunction>(vIndex, u, vFullRowCmp, vRemainCmp); break;
144 0 : case EDGE: ExtractByObject<Edge, TGridFunction>(vIndex, u, vFullRowCmp, vRemainCmp); break;
145 0 : case FACE: ExtractByObject<Face, TGridFunction>(vIndex, u, vFullRowCmp, vRemainCmp); break;
146 0 : case VOLUME: ExtractByObject<Volume, TGridFunction>(vIndex, u, vFullRowCmp, vRemainCmp); break;
147 : default: UG_THROW("wrong dim");
148 : }
149 :
150 : UG_DLOG(SchurDebug, 4, "Found "<< vIndex.size() << " indices ( out of "<< u.size() << ") for Schur block after dimension "<< d << std::endl) ;
151 : }
152 :
153 :
154 : // todo: replace by InputIterator
155 0 : base_type::slice_desc_type_vector mapping(u.size(), false);
156 :
157 0 : for (size_t i=0; i<vIndex.size(); ++i)
158 : {
159 : UG_ASSERT(vIndex[i][1]==0, "Assuming CPUAlgebra...")
160 0 : mapping[vIndex[i][0]] = true;
161 : }
162 :
163 : UG_DLOG(SchurDebug, 3, "UzawaSlicing::init::set_types" << std::endl);
164 0 : base_type::set_types(mapping, true);
165 :
166 0 : }
167 :
168 :
169 :
170 :
171 : /*! Base class for an Uzawa iteration */
172 :
173 : template <typename TDomain, typename TAlgebra>
174 : class UzawaBase : public IPreconditioner<TAlgebra>
175 : {
176 :
177 : static const bool UZAWA_CMP_SCHUR= true;
178 : static const bool UZAWA_CMP_DEFAULT = false;
179 :
180 : public:
181 : /// World dimension
182 : static const int dim = TDomain::dim;
183 : typedef GridFunction<TDomain, TAlgebra> TGridFunction;
184 : typedef typename TGridFunction::element_type TElement;
185 : typedef typename TGridFunction::side_type TSide;
186 :
187 :
188 : /// Algebra types
189 : /// \{
190 : typedef TAlgebra algebra_type;
191 : typedef typename TAlgebra::vector_type vector_type;
192 : typedef typename TAlgebra::matrix_type matrix_type;
193 : typedef MatrixOperator<matrix_type, vector_type> matrix_operator_type;
194 :
195 : /// \}
196 :
197 : protected:
198 : typedef IPreconditioner<TAlgebra> base_type;
199 :
200 :
201 : public:
202 : /// default constructor
203 0 : UzawaBase(const std::vector<std::string>& vSchurCmp)
204 0 : : m_bInit(false), m_vSchurCmp(vSchurCmp), m_slicing(vSchurCmp), m_dSchurUpdateWeight(0.0)
205 : {
206 0 : init_block_operators();
207 0 : for (size_t i=0; i<vSchurCmp.size(); ++i)
208 : { std::cout << "Comp = " << vSchurCmp[i] << std::endl; }
209 0 : }
210 :
211 0 : UzawaBase(const char *sSchurCmp)
212 0 : : m_bInit(false), m_vSchurCmp(TokenizeTrimString(sSchurCmp)), m_slicing(m_vSchurCmp), m_dSchurUpdateWeight(0.0)
213 : {
214 0 : init_block_operators();
215 0 : { std::cout << "Comp = " << sSchurCmp << std::endl; }
216 0 : }
217 :
218 :
219 0 : virtual ~UzawaBase()
220 0 : {}
221 :
222 : /// Overriding base type
223 0 : virtual bool init(SmartPtr<ILinearOperator<vector_type> > J, const vector_type& u)
224 : {
225 : UG_DLOG(SchurDebug, 4, "UzawaBase::init(J,u)")
226 :
227 0 : SmartPtr<MatrixOperator<matrix_type, vector_type> > pOp =
228 : J.template cast_dynamic<MatrixOperator<matrix_type, vector_type> >();
229 :
230 0 : const TGridFunction* pVecU = dynamic_cast<const TGridFunction* >(&u);
231 :
232 : UG_ASSERT(pOp.valid(), "Need a matrix based operator here!");
233 : UG_ASSERT(pVecU!=NULL, "Need a GridFunction here!");
234 0 : base_type::init(J,u); // calls preprocess
235 :
236 : // Extract sub-matrices afterwards.
237 0 : init_in_first_step(*pOp, *pVecU);
238 0 : m_bInit = true;
239 :
240 0 : return true;
241 :
242 :
243 : }
244 :
245 : SmartPtr<ILinearIterator<typename TAlgebra::vector_type> > clone();
246 : protected:
247 : // Interface for IPreconditioner
248 :
249 : /// name
250 0 : virtual const char* name() const {return "IUzawaBase";}
251 :
252 : /// initializes the preconditioner
253 : virtual bool preprocess(SmartPtr<MatrixOperator<matrix_type, vector_type> > pOp);
254 :
255 : /// computes a new correction c = B*d
256 : virtual bool step(SmartPtr<MatrixOperator<matrix_type, vector_type> > pOp, vector_type& c, const vector_type& d);
257 :
258 :
259 : /// cleans the operator
260 : virtual bool postprocess();
261 :
262 : public:
263 :
264 : /// returns if parallel solving is supported
265 0 : virtual bool supports_parallel() const
266 : {
267 : UG_ASSERT(m_spForwardInverse.valid() && m_SpSchurComplementInverse.valid(), "Need valid iter");
268 0 : return (m_spForwardInverse->supports_parallel() && m_SpSchurComplementInverse->supports_parallel());
269 :
270 : }
271 :
272 : // forward approximate inverse
273 0 : void set_forward_iter(SmartPtr<ILinearIterator<vector_type> > iter)
274 0 : { m_spForwardInverse = iter; }
275 :
276 : // Schur approximate inverse
277 0 : void set_schur_iter(SmartPtr<ILinearIterator<vector_type> > iter)
278 0 : { m_SpSchurComplementInverse = iter; }
279 :
280 : // backward approximate inverse
281 0 : void set_backward_iter(SmartPtr<ILinearIterator<vector_type> > iter)
282 0 : { m_spBackwardInverse = iter; }
283 :
284 : // assembly for update of Schur operator
285 0 : void set_schur_operator_update(SmartPtr<AssembledLinearOperator<TAlgebra> > spSchurUpdateOp, double theta=0.0)
286 0 : { m_spSchurUpdateOp = spSchurUpdateOp; m_dSchurUpdateWeight=theta;}
287 :
288 :
289 : /// allocate block matrix operators
290 0 : void init_block_operators()
291 : {
292 0 : m_auxMat[AUX_A11] = make_sp(new matrix_operator_type());
293 0 : m_auxMat[B12] = make_sp(new matrix_operator_type());
294 0 : m_auxMat[B21] = make_sp(new matrix_operator_type());
295 0 : m_auxMat[AUX_C22] = make_sp(new matrix_operator_type());
296 :
297 0 : m_auxMat[AUX_M22] = make_sp(new matrix_operator_type());
298 0 : }
299 :
300 : /// extract block matrix operators (called once)
301 : void extract_sub_matrices(const matrix_type& K, const TGridFunction& c);
302 :
303 : /// Update C22 block by matrix.
304 0 : void extract_schur_update(const matrix_type& K, const TGridFunction& c)
305 : {
306 0 : if (m_spSchurUpdateOp.invalid()) return;
307 :
308 :
309 0 : const GridLevel clevel = c.grid_level();
310 0 : my_write_debug(K, "init_KFull_ForSchurUpdate", clevel, clevel);
311 :
312 : //Assembling auxiliary matrix
313 : SmartPtr<IAssemble<TAlgebra> > m_spAss = m_spSchurUpdateOp->discretization();
314 0 : matrix_type tmpM;
315 :
316 : //m_spAss->ass_tuner()->set_force_regular_grid(true);
317 : //m_spAss->assemble_jacobian(tmpM, c, clevel);
318 0 : m_spAss->assemble_mass_matrix(tmpM, c, clevel);
319 : //m_spAss->ass_tuner()->set_force_regular_grid(false);
320 :
321 0 : my_write_debug(tmpM, "init_MFull_ForSchurUpdate", clevel, clevel);
322 : UG_DLOG(SchurDebug, 5, "extract_schur_update on level "<< clevel << ", " << tmpM);
323 :
324 :
325 : /* Matrices C22 and M22 are both additive. Thus, we add both.
326 : * (Note, that preconds requiring a consistent diagonal must be called afterwards!)*/
327 0 : m_slicing.get_matrix(tmpM, UZAWA_CMP_SCHUR, UZAWA_CMP_SCHUR,
328 : *(m_auxMat[AUX_M22].template cast_static<matrix_type>()) );
329 :
330 0 : if (m_spSliceDebugWriter[UZAWA_CMP_SCHUR].valid()) {
331 0 : m_spSliceDebugWriter[UZAWA_CMP_SCHUR]->write_matrix(*m_auxMat[AUX_M22],
332 : "UZAWA_init_M22_ForSchurUpdate.mat");
333 : }
334 :
335 : UG_DLOG(SchurDebug, 4, "AUX_C22:"); CheckRowIterators(*m_auxMat[AUX_C22]);
336 : UG_DLOG(SchurDebug, 4, "AUX_M22:"); CheckRowIterators(*m_auxMat[AUX_M22]);
337 :
338 : // add matrix
339 0 : MatAddNonDirichlet<matrix_type>(*m_auxMat[AUX_C22], 1.0, *m_auxMat[AUX_C22], m_dSchurUpdateWeight, *m_auxMat[AUX_M22]);
340 :
341 0 : if (m_spSliceDebugWriter[UZAWA_CMP_SCHUR].valid()) {
342 0 : m_spSliceDebugWriter[UZAWA_CMP_SCHUR]->write_matrix(*m_auxMat[AUX_C22],
343 : "UZAWA_init_C22_AfterSchurUpdate.mat");
344 : }
345 0 : }
346 :
347 0 : void init_block_iterations()
348 : {
349 0 : if (m_spForwardInverse.valid()) { m_spForwardInverse->init(m_auxMat[AUX_A11]); }
350 0 : if (m_SpSchurComplementInverse.valid()) { m_SpSchurComplementInverse->init(m_auxMat[AUX_C22]); }
351 0 : if (m_spBackwardInverse.valid()) { m_spBackwardInverse->init(m_auxMat[AUX_A11]); }
352 0 : }
353 :
354 : /*
355 : void power_iteration(ConstSmartPtr<vector_type> usol, ConstSmartPtr<vector_type> dsol)
356 : {
357 : SmartPtr<vector_type> uschur(m_slicing.template slice_clone<vector_type>(*usol, UZAWA_CMP_SCHUR) );
358 : SmartPtr<vector_type> dschur(m_slicing.template slice_clone<vector_type>(*dsol, UZAWA_CMP_SCHUR) )
359 : SmartPtr<vector_type> ueigen(m_slicing.template slice_clone<vector_type>(*usol, UZAWA_CMP_DEFAULT) );
360 : SmartPtr<vector_type> deigen(m_slicing.template slice_clone<vector_type>(*usol, UZAWA_CMP_DEFAULT) );
361 :
362 : // Initialize with random guess
363 : ueigen->set_random(-0.5, 0.5);
364 :
365 : // dschur = B12*ueigen
366 :
367 :
368 : //
369 : m_spForwardInverse->apply(*uschur, *dschur);
370 :
371 : // deigen = B21 * uschur
372 :
373 : }
374 : */
375 :
376 : void postprocess_block_iterations()
377 : {
378 :
379 : }
380 :
381 : protected:
382 0 : void init_in_first_step(const matrix_type &pMat, const TGridFunction &pC)
383 : {
384 : UG_DLOG(SchurDebug, 4, "step-init: Size=" << m_vSchurCmp.size() << std::endl);
385 0 : m_slicing.init(pC, m_vSchurCmp);
386 :
387 0 : if (debug_writer().valid())
388 : {
389 :
390 0 : if (m_spGridFunctionDebugWriter.valid())
391 : {
392 0 : UG_LOG("Valid grid function writer for "<< m_spGridFunctionDebugWriter->grid_level() << " on level " << pC.grid_level());
393 :
394 0 : GridLevel gLevel = m_spGridFunctionDebugWriter->grid_level();
395 : m_spGridFunctionDebugWriter->set_grid_level(pC.grid_level());
396 0 : m_spGridFunctionDebugWriter->update_positions();
397 : m_spGridFunctionDebugWriter->set_grid_level(gLevel);
398 : }
399 :
400 :
401 0 : switch(debug_writer()->get_dim())
402 : {
403 0 : case 1: reset_slice_debug_writers<1>(); break;
404 0 : case 2: reset_slice_debug_writers<2>(); break;
405 0 : case 3: reset_slice_debug_writers<3>(); break;
406 : default: UG_LOG("Invalid dimension for debug_write ???");
407 : }
408 : }
409 :
410 0 : extract_sub_matrices(pMat, pC);
411 0 : extract_schur_update(pMat, pC);
412 :
413 : // Initialize preconditioners.
414 0 : init_block_iterations();
415 0 : }
416 : protected:
417 :
418 : /// flag indicating, whether operator must be initialized
419 : bool m_bInit;
420 :
421 : /// vector of strings identifying components used for Schur complement
422 : std::vector<std::string> m_vSchurCmp;
423 :
424 : /// object for slicing routines
425 : UzawaSlicing<TGridFunction> m_slicing;
426 :
427 : /// iteration for forward system
428 : SmartPtr<ILinearIterator<vector_type> > m_spForwardInverse;
429 :
430 : /// iteration for forward system
431 : SmartPtr<ILinearIterator<vector_type> > m_SpSchurComplementInverse;
432 :
433 : /// iteration for forward system
434 : SmartPtr<ILinearIterator<vector_type> > m_spBackwardInverse;
435 :
436 : /// assembly for (additive) Schur complement update
437 : SmartPtr<AssembledLinearOperator<TAlgebra> > m_spSchurUpdateOp;
438 :
439 : /// scaling factor for (additive) Schur complement update
440 : double m_dSchurUpdateWeight;
441 :
442 :
443 :
444 : enum BLOCK{AUX_A11, B12, B21, AUX_C22, AUX_M22, AUX_ARRAY_SIZE};
445 : SmartPtr<matrix_operator_type> m_auxMat[AUX_ARRAY_SIZE]; /// auxiliary matrices (not cloned!)
446 :
447 :
448 : void my_write_debug(const matrix_type& mat, std::string name, const TGridFunction& rTo, const TGridFunction& rFrom)
449 : {
450 : my_write_debug(mat, name, rTo.grid_level(), rFrom.grid_level());
451 : }
452 :
453 0 : void my_write_debug(const matrix_type& mat, std::string name, const GridLevel& glTo, const GridLevel& glFrom)
454 : {
455 : PROFILE_FUNC_GROUP("debug");
456 :
457 0 : if(m_spGridFunctionDebugWriter.invalid()) return;
458 :
459 : // build name
460 0 : std::stringstream ss;
461 0 : ss << "UZAWA_" << name << GridLevelAppendix(glTo);
462 0 : if(glFrom != glTo) ss << GridLevelAppendix(glFrom);
463 0 : ss << ".mat";
464 :
465 : // write
466 0 : GridLevel currGL = m_spGridFunctionDebugWriter->grid_level();
467 : m_spGridFunctionDebugWriter->set_grid_levels(glFrom, glTo);
468 0 : m_spGridFunctionDebugWriter->write_matrix(mat, ss.str().c_str());
469 : m_spGridFunctionDebugWriter->set_grid_level(currGL);
470 0 : }
471 :
472 0 : virtual void my_write_debug(const TGridFunction& rGF, std::string name)
473 : {
474 : int m_dbgIterCnt = 0;
475 : PROFILE_FUNC_GROUP("debug");
476 :
477 0 : if(m_spGridFunctionDebugWriter.invalid()) return;
478 :
479 : // build name
480 0 : GridLevel gl = rGF.grid_level();
481 0 : std::stringstream ss;
482 0 : ss << "UZAWA_" << name << GridLevelAppendix(gl);
483 0 : ss << "_i" << std::setfill('0') << std::setw(3) << m_dbgIterCnt << ".vec";
484 :
485 : // write
486 0 : GridLevel currGL = m_spGridFunctionDebugWriter->grid_level();
487 : m_spGridFunctionDebugWriter->set_grid_level(gl);
488 0 : m_spGridFunctionDebugWriter->write_vector(rGF, ss.str().c_str());
489 : m_spGridFunctionDebugWriter->set_grid_level(currGL);
490 0 : };
491 :
492 : protected:
493 : using base_type::debug_writer;
494 :
495 : void set_debug(SmartPtr<IDebugWriter<algebra_type> > spDebugWriter);
496 :
497 : void create_slice_debug_writers();
498 : template <int d> void reset_slice_debug_writers();
499 :
500 : SmartPtr<GridFunctionDebugWriter<TDomain, TAlgebra> >m_spGridFunctionDebugWriter;
501 : SmartPtr<IDebugWriter<algebra_type> > m_spSliceDebugWriter[2];
502 :
503 : #ifdef UG_PARALLEL
504 : matrix_type m_A;
505 : #endif
506 : };
507 :
508 :
509 : template <typename TDomain, typename TAlgebra>
510 0 : bool UzawaBase<TDomain, TAlgebra>::
511 : preprocess(SmartPtr<MatrixOperator<matrix_type, vector_type> > pOp)
512 : {
513 : #ifdef UG_PARALLEL
514 : if(pcl::NumProcs() > 1)
515 : {
516 : // copy original matrix
517 : MakeConsistent(*pOp, m_A);
518 : // set zero on slaves
519 : std::vector<IndexLayout::Element> vIndex;
520 : CollectUniqueElements(vIndex, m_A.layouts()->slave());
521 : SetDirichletRow(m_A, vIndex);
522 : }
523 : #endif
524 :
525 : // more preprocessing is based on grid functions
526 : // thus, it must be performed later...
527 : //m_bInit = false;
528 :
529 0 : return true;
530 : }
531 :
532 :
533 : //! Single step of an (inexact) Uzawa iteration
534 : template <typename TDomain, typename TAlgebra>
535 0 : bool UzawaBase<TDomain, TAlgebra>::
536 : step(SmartPtr<MatrixOperator<matrix_type, vector_type> > pOp, vector_type& c, const vector_type& d)
537 : {
538 : // Check that grid function was passed.
539 0 : GridFunction<TDomain, TAlgebra>* pC = dynamic_cast<GridFunction<TDomain, TAlgebra>*>(&c);
540 0 : if(pC == NULL) UG_THROW("UzawaBase: expects correction to be a GridFunction.");
541 :
542 : const vector_type* pD = &d;
543 : const matrix_type* pMat = pOp.get();
544 :
545 : #ifdef UG_PARALLEL
546 : SmartPtr<vector_type> spDtmp;
547 : if(pcl::NumProcs() > 1)
548 : {
549 : // Make defect unique
550 : spDtmp = d.clone();
551 : spDtmp->change_storage_type(PST_UNIQUE);
552 : pD = spDtmp.get();
553 : pMat = &m_A;
554 : }
555 : #endif
556 :
557 : // Check, if initialized.
558 0 : if(!m_bInit)
559 : {
560 0 : init_in_first_step(*pMat, *pC);
561 0 : m_bInit = true;
562 :
563 : }
564 :
565 : // Obtain defects.
566 0 : SmartPtr<vector_type> ff(m_slicing.template slice_clone<vector_type>(*pD, UZAWA_CMP_DEFAULT) );
567 0 : SmartPtr<vector_type> gg(m_slicing.template slice_clone<vector_type>(*pD, UZAWA_CMP_SCHUR) );
568 :
569 : // Clear corrections.
570 0 : SmartPtr<vector_type> cRegular(m_slicing.template slice_clone_without_values<vector_type>(*pC, UZAWA_CMP_DEFAULT));
571 0 : SmartPtr<vector_type> cSchur(m_slicing.template slice_clone_without_values<vector_type>(*pC, UZAWA_CMP_SCHUR));
572 :
573 : pC->set(0.0);
574 : cRegular->set(0.0);
575 : cSchur->set(0.0);
576 :
577 : // Step 1: Solve problem \tilde A11 \tilde u = f
578 0 : my_write_debug(*pC, "UZAWA_Correction0");
579 0 : my_write_debug(*(GridFunction<TDomain, TAlgebra>*) pD, "UZAWA_Defect0");
580 0 : if (m_spForwardInverse.valid())
581 : {
582 : // Solve problem, also compute g:=(f - A11 \tilde u)
583 : // WARNING: Should use exact A11.
584 : // m_spForwardInverse->apply_update_defect(*cRegular, *ff);
585 0 : m_spForwardInverse->apply(*cRegular, *ff);
586 :
587 0 : m_slicing.template set_vector_slice<vector_type>(*cRegular, *pC, UZAWA_CMP_DEFAULT);
588 0 : my_write_debug(*pC, "UZAWA_Correction1");
589 :
590 : // Update g:= (g - B21 \tilde u^k)
591 0 : m_auxMat[B21]->apply_sub(*gg, *cRegular);
592 : }
593 :
594 : // Step 2: Solve problem: (\tilde S22) pDelta = (g - B21 \tilde u)
595 0 : if (m_SpSchurComplementInverse.valid()) {
596 :
597 : //m_auxMat[AUX_C22]->apply_sub(*gg, *cRegular);
598 0 : m_SpSchurComplementInverse->apply(*cSchur, *gg);
599 0 : m_slicing.template set_vector_slice<vector_type>(*cSchur, *pC, UZAWA_CMP_SCHUR);
600 0 : my_write_debug(*pC, "UZAWA_Correction2");
601 :
602 : // update defect
603 0 : m_auxMat[B12]->apply_sub(*ff, *cSchur);
604 : }
605 :
606 : // Step 3: Solve problem \tilde A11 uDelta^{k+1} = (f - \tilde A11 u^k) - B12 p^k
607 0 : if (m_spBackwardInverse.valid()) {
608 :
609 : // ff has been updated
610 0 : m_spBackwardInverse->apply(*cRegular, *ff);
611 : // m_slicing.template add_vector_slice<vector_type>(*cRegular, *pC, UZAWA_CMP_DEFAULT);
612 0 : m_slicing.template set_vector_slice<vector_type>(*cRegular, *pC, UZAWA_CMP_DEFAULT);
613 0 : my_write_debug(*pC, "UZAWA_Correction3");
614 : }
615 :
616 :
617 : #ifdef UG_PARALLEL
618 : pC->set_storage_type(PST_UNIQUE);
619 : #endif
620 :
621 : // UG_ASSERT(0, "STOP HERE!")
622 0 : return true;
623 : }
624 :
625 : template <typename TDomain, typename TAlgebra>
626 0 : void UzawaBase<TDomain, TAlgebra>::
627 : extract_sub_matrices(const matrix_type& K, const TGridFunction& c)
628 : {
629 : // Copy matrices.
630 0 : m_slicing.get_matrix(K, UZAWA_CMP_DEFAULT, UZAWA_CMP_DEFAULT, *(m_auxMat[AUX_A11].template cast_static<matrix_type>()) );
631 0 : m_slicing.get_matrix(K, UZAWA_CMP_DEFAULT, UZAWA_CMP_SCHUR, *(m_auxMat[B12].template cast_static<matrix_type>()) );
632 0 : m_slicing.get_matrix(K, UZAWA_CMP_SCHUR, UZAWA_CMP_DEFAULT, *(m_auxMat[B21].template cast_static<matrix_type>()) );
633 0 : m_slicing.get_matrix(K, UZAWA_CMP_SCHUR, UZAWA_CMP_SCHUR, *(m_auxMat[AUX_C22].template cast_static<matrix_type>()) );
634 :
635 : UG_DLOG(SchurDebug, 4, "A11 =" << *m_auxMat[AUX_A11] << ", ");
636 : UG_DLOG(SchurDebug, 4, "B12 =" << *m_auxMat[B12] << ", ");
637 : UG_DLOG(SchurDebug, 4, "B21 =" << *m_auxMat[B21] << ", ");
638 : UG_DLOG(SchurDebug, 4, "C22 =" << *m_auxMat[AUX_C22] << std::endl);
639 :
640 : #ifdef UG_PARALLEL
641 : // Copy storage mask.
642 : uint mask_K = K.get_storage_mask();
643 : m_auxMat[AUX_A11]->set_storage_type(mask_K);
644 : m_auxMat[B12]->set_storage_type(mask_K);
645 : m_auxMat[B21]->set_storage_type(mask_K);
646 : m_auxMat[AUX_C22]->set_storage_type(mask_K);
647 :
648 : // Extract and set layouts.
649 : SmartPtr<AlgebraLayouts> defaultLayouts = m_slicing.create_slice_layouts(K.layouts(), UZAWA_CMP_DEFAULT);
650 : SmartPtr<AlgebraLayouts> schurLayouts = m_slicing.create_slice_layouts(K.layouts(), UZAWA_CMP_SCHUR);
651 :
652 : m_auxMat[AUX_A11]->set_layouts(defaultLayouts);
653 : m_auxMat[AUX_C22]->set_layouts(schurLayouts);
654 :
655 : #endif
656 :
657 :
658 : // Write matrices for debug, if applicable.
659 0 : if (m_spSliceDebugWriter[UZAWA_CMP_DEFAULT].valid()) {
660 0 : m_spSliceDebugWriter[UZAWA_CMP_DEFAULT]->write_matrix(*m_auxMat[AUX_A11], "UZAWA_init_A11_AfterExtract.mat");
661 : }
662 : //write_debug(*m_auxMat[B12], "Uzawa_init_B12_AfterExtract");
663 : //write_debug(*m_auxMat[B21], "Uzawa_init_B21_AfterExtract");
664 0 : if (m_spSliceDebugWriter[UZAWA_CMP_SCHUR].valid()) {
665 0 : m_spSliceDebugWriter[UZAWA_CMP_SCHUR]->write_matrix(*m_auxMat[AUX_C22], "UZAWA_init_C22_AfterExtract.mat");
666 : }
667 0 : }
668 :
669 :
670 : template <typename TDomain, typename TAlgebra>
671 0 : bool UzawaBase<TDomain, TAlgebra>::
672 : postprocess()
673 : {
674 : postprocess_block_iterations();
675 0 : m_bInit = false;
676 :
677 0 : return true;
678 : }
679 :
680 :
681 :
682 : template <typename TDomain, typename TAlgebra>
683 : SmartPtr<ILinearIterator<typename TAlgebra::vector_type> >
684 0 : UzawaBase<TDomain, TAlgebra>::clone()
685 : {
686 : SmartPtr<UzawaBase<TDomain, TAlgebra> >
687 0 : newInst(new UzawaBase<TDomain, TAlgebra>(m_vSchurCmp));
688 :
689 0 : newInst->set_debug(debug_writer());
690 :
691 : // clone approximate inverses
692 0 : newInst->m_spForwardInverse = (m_spForwardInverse.valid()) ? m_spForwardInverse->clone().template cast_dynamic<base_type>() : SPNULL;
693 0 : newInst->m_SpSchurComplementInverse = (m_SpSchurComplementInverse.valid()) ? m_SpSchurComplementInverse->clone().template cast_dynamic<base_type>() : SPNULL;
694 0 : newInst->m_spBackwardInverse = (m_spBackwardInverse.valid()) ? m_spBackwardInverse->clone().template cast_dynamic<base_type>() : SPNULL;
695 :
696 : // todo: need to clone here?
697 0 : newInst->m_spSchurUpdateOp = m_spSchurUpdateOp;
698 0 : newInst->m_dSchurUpdateWeight = m_dSchurUpdateWeight;
699 :
700 0 : return newInst;
701 : }
702 :
703 :
704 :
705 : template <typename TDomain, typename TAlgebra>
706 0 : void UzawaBase<TDomain, TAlgebra>::
707 : create_slice_debug_writers()
708 : {
709 0 : std::string basePath = debug_writer()->get_base_dir();
710 :
711 0 : m_spSliceDebugWriter[UZAWA_CMP_DEFAULT] = make_sp(new AlgebraDebugWriter<algebra_type>);
712 0 : m_spSliceDebugWriter[UZAWA_CMP_DEFAULT] ->set_base_dir(basePath.c_str());
713 :
714 0 : m_spSliceDebugWriter[UZAWA_CMP_SCHUR] = make_sp(new AlgebraDebugWriter<algebra_type>);
715 0 : m_spSliceDebugWriter[UZAWA_CMP_SCHUR] ->set_base_dir(basePath.c_str());
716 0 : }
717 :
718 : template <typename TDomain, typename TAlgebra>
719 : template<int dim>
720 0 : void UzawaBase<TDomain, TAlgebra>::
721 : reset_slice_debug_writers()
722 : {
723 : UG_LOG("reset_slice_debug_writers"<< std::endl);
724 :
725 0 : if (m_spSliceDebugWriter[UZAWA_CMP_DEFAULT].invalid() ||
726 0 : m_spSliceDebugWriter[UZAWA_CMP_SCHUR].invalid()) return;
727 :
728 0 : const std::vector<MathVector<dim> > &positions = (m_spGridFunctionDebugWriter.valid()) ? m_spGridFunctionDebugWriter->template get_positions<dim>() : debug_writer()->template get_positions<dim>();
729 0 : std::vector<MathVector<dim> > cmpPositions[2];
730 :
731 0 : m_slicing.get_vector_slice(positions, UZAWA_CMP_DEFAULT, cmpPositions[UZAWA_CMP_DEFAULT]);
732 : m_spSliceDebugWriter[UZAWA_CMP_DEFAULT]->template set_positions<dim>(cmpPositions[UZAWA_CMP_DEFAULT]);
733 :
734 0 : m_slicing.get_vector_slice(positions, UZAWA_CMP_SCHUR, cmpPositions[UZAWA_CMP_SCHUR]);
735 : m_spSliceDebugWriter[UZAWA_CMP_SCHUR]->template set_positions<dim>(cmpPositions[UZAWA_CMP_SCHUR]);
736 :
737 0 : }
738 :
739 : template <typename TDomain, typename TAlgebra>
740 0 : void UzawaBase<TDomain, TAlgebra>::set_debug(SmartPtr<IDebugWriter<algebra_type> > spDebugWriter)
741 : {
742 0 : base_type::set_debug(spDebugWriter);
743 0 : m_spGridFunctionDebugWriter = debug_writer().template cast_dynamic<GridFunctionDebugWriter<TDomain, TAlgebra> >();
744 :
745 0 : if (m_spGridFunctionDebugWriter.invalid()) return;
746 :
747 0 : create_slice_debug_writers();
748 :
749 0 : debug_writer()->update_positions();
750 0 : switch(debug_writer()->get_dim())
751 : {
752 0 : case 1: reset_slice_debug_writers<1>(); break;
753 0 : case 2: reset_slice_debug_writers<2>(); break;
754 0 : case 3: reset_slice_debug_writers<3>(); break;
755 : default: UG_LOG("debug dim ???");
756 : }
757 :
758 : }
759 :
760 :
761 : } // namespace ug
762 : #endif
|