Line data Source code
1 : /*
2 : * Copyright (c) 2014-2015: G-CSC, Goethe University Frankfurt
3 : * Author: Martin Rupp
4 : *
5 : * This file is part of UG4.
6 : *
7 : * UG4 is free software: you can redistribute it and/or modify it under the
8 : * terms of the GNU Lesser General Public License version 3 (as published by the
9 : * Free Software Foundation) with the following additional attribution
10 : * requirements (according to LGPL/GPL v3 §7):
11 : *
12 : * (1) The following notice must be displayed in the Appropriate Legal Notices
13 : * of covered and combined works: "Based on UG4 (www.ug4.org/license)".
14 : *
15 : * (2) The following notice must be displayed at a prominent place in the
16 : * terminal output of covered works: "Based on UG4 (www.ug4.org/license)".
17 : *
18 : * (3) The following bibliography is recommended for citation and must be
19 : * preserved in all covered files:
20 : * "Reiter, S., Vogel, A., Heppner, I., Rupp, M., and Wittum, G. A massively
21 : * parallel geometric multigrid solver on hierarchically distributed grids.
22 : * Computing and visualization in science 16, 4 (2013), 151-164"
23 : * "Vogel, A., Reiter, S., Rupp, M., Nägel, A., and Wittum, G. UG4 -- a novel
24 : * flexible software system for simulating pde based models on high performance
25 : * computers. Computing and visualization in science 16, 4 (2013), 165-179"
26 : *
27 : * This program is distributed in the hope that it will be useful,
28 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 : * GNU Lesser General Public License for more details.
31 : */
32 :
33 : #ifndef __H__UG__LIB_DISC__OPERATOR__LINEAR_OPERATOR__EXTERNAL_SOLVER_
34 : #define __H__UG__LIB_DISC__OPERATOR__LINEAR_OPERATOR__EXTERNAL_SOLVER_
35 :
36 : #include "common/common.h"
37 : #include "lib_algebra/operator/interface/matrix_operator_inverse.h"
38 :
39 :
40 : #ifdef UG_PARALLEL
41 : #include "lib_algebra/parallelization/parallelization.h"
42 : #endif
43 : #include "common/progress.h"
44 : #include "common/util/ostream_util.h"
45 :
46 : #include "lib_algebra/operator/linear_solver/linear_solver.h"
47 : #include "lib_algebra/cpu_algebra_types.h"
48 :
49 :
50 : namespace ug{
51 :
52 : class IExternalSolverImplementation
53 : {
54 : public:
55 : virtual bool init(const CPUAlgebra::matrix_type &A) = 0;
56 : virtual bool apply(CPUAlgebra::vector_type &c, const CPUAlgebra::vector_type &d) = 0;
57 : virtual const char* name() const = 0;
58 : virtual ~IExternalSolverImplementation() {}
59 : };
60 :
61 : template <typename TAlgebra>
62 : class IExternalSolver
63 : : public IMatrixOperatorInverse<typename TAlgebra::matrix_type,
64 : typename TAlgebra::vector_type>,
65 : public VectorDebugWritingObject<typename TAlgebra::vector_type>
66 : {
67 : public:
68 :
69 : virtual const char *double_name() const = 0;
70 :
71 0 : virtual const char* name() const
72 : {
73 0 : return double_name();
74 : }
75 :
76 : // Algebra type
77 : typedef TAlgebra algebra_type;
78 :
79 : // Vector type
80 : typedef typename TAlgebra::vector_type vector_type;
81 :
82 : // Matrix type
83 : typedef typename TAlgebra::matrix_type matrix_type;
84 :
85 : using IMatrixOperatorInverse<matrix_type,vector_type>::init;
86 :
87 : public:
88 : // Constructor
89 : IExternalSolver()
90 : : m_bUseConsistentInterfaces(false), m_bDisablePreprocessing(false)
91 : {
92 : m_size = 0;
93 : m_blockSize = 0;
94 : };
95 :
96 : // Clone
97 :
98 0 : SmartPtr<ILinearIterator<vector_type> > clone()
99 : {
100 0 : UG_THROW("");
101 : return SPNULL;
102 : }
103 :
104 :
105 : /// returns if parallel solving is supported
106 0 : virtual bool supports_parallel() const {return false;}
107 :
108 : /// disable preprocessing (if underlying matrix has not changed)
109 0 : void set_disable_preprocessing(bool bDisable) {m_bDisablePreprocessing = bDisable;}
110 :
111 0 : void enable_consistent_interfaces(bool enable){ m_bUseConsistentInterfaces = enable; }
112 :
113 : virtual void double_init(const CPUAlgebra::matrix_type &mat) = 0;
114 :
115 0 : void mat_preprocess(const matrix_type &A)
116 : {
117 : // do not do a thing if preprocessing disabled
118 0 : if (m_bDisablePreprocessing)
119 0 : return;
120 :
121 0 : if( A.num_rows() == 0 || A.num_cols() == 0) { m_size = 0; return; }
122 : STATIC_ASSERT(matrix_type::rows_sorted, Matrix_has_to_have_sorted_rows);
123 :
124 0 : CPUAlgebra::matrix_type mat;
125 :
126 : #ifdef UG_PARALLEL
127 : // add slave rows to master rows (in indices which this is possible for)
128 : matrix_type A_tmp; A_tmp = A;
129 : if(m_bUseConsistentInterfaces){
130 : MatMakeConsistentOverlap0(A_tmp);
131 : }else {
132 : MatAddSlaveRowsToMasterRowOverlap0(A_tmp);
133 :
134 : // set zero on slaves
135 : std::vector<IndexLayout::Element> vIndex;
136 : CollectUniqueElements(vIndex, A.layouts()->slave());
137 : SetDirichletRow(A_tmp, vIndex);
138 : }
139 : // Even after this setting of Dirichlet rows, it is possible that there are
140 : // zero rows on a proc because of the distribution:
141 : // For example, if one has a horizontal grid interface between two SHADOW_RIM_COPY
142 : // vertices, but the shadowing element for the hSlave side is vMaster (without being in
143 : // any horizontal interface). Then, as the horizontal interface (on the shadowed level)
144 : // is not part of the algebraic layouts, the hSlave is not converted into a Dirichlet row
145 : // by the previous commands.
146 : // As a first aid, we will simply convert any zero row on the current proc into a
147 : // Dirichlet row.
148 : // TODO: The corresponding rhs vector entry could be non-zero!
149 : // It is definitely not set to zero by change_storage_type(PST_UNIQUE) as the index is not contained
150 : // in the vector layouts either. Still, the defect assembling process might contain a vertex
151 : // loop and assemble something that is not solution-dependent! What do we do then?
152 : size_t nInd = A_tmp.num_rows();
153 : for (size_t i = 0; i < nInd; ++i)
154 : {
155 : if (!A_tmp.num_connections(i))
156 : A_tmp(i,i) = 1.0;
157 : }
158 :
159 : mat.set_storage_type(PST_ADDITIVE);
160 : mat.set_layouts(CreateLocalAlgebraLayouts());
161 : #else
162 : const matrix_type& A_tmp = A;
163 : #endif
164 :
165 0 : m_size = GetDoubleSparseFromBlockSparse(mat, A_tmp);
166 0 : m_blockSize = mat.num_rows()/A_tmp.num_rows();
167 :
168 0 : if(m_size != 0)
169 0 : double_init(mat);
170 0 : }
171 :
172 : SmartPtr<MatrixOperator<matrix_type, vector_type> > m_spOperator;
173 0 : virtual bool init(SmartPtr<MatrixOperator<matrix_type, vector_type> > Op)
174 : {
175 : // remember operator
176 0 : m_spOperator = Op;
177 :
178 : // init LU operator
179 0 : mat_preprocess(m_spOperator->get_matrix());
180 : // we're done
181 0 : return true;
182 : }
183 :
184 :
185 : protected:
186 :
187 : // Preprocess routine
188 : virtual bool preprocess(SmartPtr<MatrixOperator<matrix_type, vector_type> > pOp)
189 : {
190 : matrix_type &A = *pOp;
191 : mat_preprocess(A);
192 :
193 : return true;
194 : }
195 :
196 : void get_vector(CPUAlgebra::vector_type &v_scalar, const vector_type& v)
197 : {
198 0 : for(size_t i=0, k=0; i<v.size(); i++)
199 : {
200 0 : for(size_t j=0; j<GetSize(v[i]); j++)
201 0 : v_scalar[k++] = BlockRef(v[i],j);
202 : }
203 : }
204 :
205 : void set_vector(CPUAlgebra::vector_type &v_scalar, vector_type& v)
206 : {
207 0 : for(size_t i=0, k=0; i<v.size(); i++)
208 : {
209 0 : for(size_t j=0; j<GetSize(v[i]); j++)
210 0 : BlockRef(v[i],j) = v_scalar[k++];
211 : }
212 : }
213 :
214 : virtual bool double_apply(CPUAlgebra::vector_type &c, const CPUAlgebra::vector_type &d) = 0;
215 :
216 :
217 : // Stepping routine
218 0 : virtual bool apply(vector_type& c, const vector_type& d)
219 : {
220 0 : if (m_size == 0)
221 : {
222 : #ifdef UG_PARALLEL
223 : c.set_storage_type(PST_CONSISTENT);
224 : #endif
225 : return true;
226 : }
227 0 : m_c.resize(m_size);
228 0 : m_d.resize(m_size);
229 :
230 : #ifdef UG_PARALLEL
231 :
232 : m_d.set_storage_type(PST_ADDITIVE);
233 : m_c.set_storage_type(PST_CONSISTENT);
234 : // make defect unique
235 : SmartPtr<vector_type> spDtmp = d.clone();
236 : if(m_bUseConsistentInterfaces){
237 : spDtmp->change_storage_type(PST_CONSISTENT);
238 : } else{
239 : spDtmp->change_storage_type(PST_UNIQUE);
240 : }
241 :
242 :
243 : get_vector(m_d, *spDtmp);
244 : #else
245 : get_vector(m_d, d);
246 : #endif
247 :
248 : m_c.set(0.0);
249 :
250 :
251 0 : double_apply(m_c, m_d);
252 :
253 : set_vector(m_c, c);
254 :
255 : #ifdef UG_PARALLEL
256 : // correction must always be consistent (but is unique by construction)
257 : // (or regarded as such in the case of consistent interfaces)
258 : c.set_storage_type(PST_UNIQUE);
259 : c.change_storage_type(PST_CONSISTENT);
260 : #endif
261 : return true;
262 : }
263 :
264 0 : virtual bool apply_return_defect(vector_type& u, vector_type& f)
265 : {
266 : // solve u
267 0 : if(!apply(u, f)) return false;
268 :
269 : // update defect
270 0 : if(!m_spOperator->matmul_minus(f, u))
271 : {
272 : UG_LOG("ERROR in 'LU::apply_return_defect': "
273 : "Cannot apply matmul_minus.\n");
274 : return false;
275 : }
276 :
277 : // we're done
278 0 : return true;
279 : }
280 :
281 : public:
282 : using VectorDebugWritingObject<typename TAlgebra::vector_type>::vector_debug_writer;
283 :
284 : int get_dim()
285 : {
286 : if(vector_debug_writer().valid() == false) return -1;
287 : vector_debug_writer()->update_positions();
288 : return vector_debug_writer()->get_dim();
289 : }
290 :
291 : template<int Tdim>
292 : bool get_positions(std::vector<MathVector<Tdim> > &coord)
293 : {
294 : UG_COND_THROW(vector_debug_writer().valid() == false, "no debug writer set.");
295 : int dim = get_dim();
296 : UG_COND_THROW(dim != Tdim, "wrong dimension");
297 :
298 : return copy_pos<Tdim, Tdim>(coord, get_positions<Tdim>());
299 : }
300 :
301 : bool get_positions3(std::vector<MathVector<3> > &coord)
302 : {
303 : UG_COND_THROW(vector_debug_writer().valid() == false, "no debug writer set.");
304 : int dim = get_dim();
305 : switch(dim)
306 : {
307 : case 1:
308 : return copy_pos(coord, get_positions<1>());
309 : case 2:
310 : return copy_pos(coord, get_positions<2>());
311 : case 3:
312 : return copy_pos(coord, get_positions<3>());
313 : case -1:
314 : return false;
315 : }
316 : }
317 :
318 :
319 : template<int dim>
320 : const std::vector<MathVector<dim> > &get_positions()
321 : {
322 : if(vector_debug_writer().valid())
323 : {
324 : vector_debug_writer()->update_positions();
325 : return vector_debug_writer()->template get_positions<dim>();
326 : }
327 : else UG_THROW("no debug_writer!");
328 : }
329 : template<int dim1, int dim2>
330 : bool copy_pos(std::vector<MathVector<dim1> > &dest, const std::vector<MathVector<dim2> > &src)
331 : {
332 : UG_COND_THROW(m_size == 0 || m_blockSize == 0, "not initialized");
333 : UG_COND_THROW(dim1 < dim2, "loss of data");
334 :
335 : dest.resize(m_size);
336 : for(size_t i=0; i<src.size(); i++)
337 : {
338 : for(size_t k=0; k<m_blockSize; k++)
339 : {
340 : dest[i*m_blockSize + k]=0;
341 : for(size_t j=0; j<dim2; j++)
342 : dest[i*m_blockSize + k][j] = src[i][j];
343 : }
344 : }
345 : return true;
346 : }
347 :
348 :
349 : protected:
350 : // Postprocess routine
351 : virtual bool postprocess() {return true;}
352 :
353 : bool m_bUseConsistentInterfaces;
354 : CPUAlgebra::vector_type m_c, m_d;
355 : size_t m_size;
356 : size_t m_blockSize;
357 : bool m_bDisablePreprocessing;
358 :
359 :
360 : };
361 :
362 : } // namespace ug
363 :
364 : #endif /* __H__UG__LIB_DISC__OPERATOR__LINEAR_OPERATOR__EXTERNAL_SOLVER_ */
|