Chi-Tech
dfem_diffusion_solver.cc
Go to the documentation of this file.
2
3#include "chi_runtime.h"
4#include "chi_log.h"
5#include "utils/chi_timer.h"
6
9
11
13
15
16#define DefaultBCDirichlet BoundaryCondition{BCType::DIRICHLET,{0,0,0}}
17
18//============================================= constructor
19dfem_diffusion::Solver::Solver(const std::string& in_solver_name):
20 chi_physics::Solver(in_solver_name, { {"max_iters", int64_t(500) },
21 {"residual_tolerance", 1.0e-2}})
22{}
23
24//============================================= destructor
26{
27 VecDestroy(&x_);
28 VecDestroy(&b_);
29 MatDestroy(&A_);
30}
31
32//============================================= Initialize
34{
35 const std::string fname = "dfem_diffusion::Solver::Initialize";
36 Chi::log.Log() << "\n"
38 << TextName() << ": Initializing DFEM Diffusion solver ";
39
40 //============================================= Get grid
42 const auto& grid = *grid_ptr_;
43 if (grid_ptr_ == nullptr)
44 throw std::logic_error(std::string(__PRETTY_FUNCTION__) +
45 " No grid defined.");
46
47 Chi::log.Log() << "Global num cells: " << grid.GetGlobalNumberOfCells();
48
49 //============================================= BIDs
50 auto globl_unique_bndry_ids = grid.GetDomainUniqueBoundaryIDs();
51
52 const auto& grid_boundary_id_map = grid_ptr_->GetBoundaryIDMap();
53 for (uint64_t bndry_id : globl_unique_bndry_ids)
54 {
55 if (grid_boundary_id_map.count(bndry_id) == 0)
56 throw std::logic_error(fname + ": Boundary id " +
57 std::to_string(bndry_id) + " does not have a name-assignment.");
58
59 const auto& bndry_name = grid_boundary_id_map.at(bndry_id);
60 if (boundary_preferences_.find(bndry_name) != boundary_preferences_.end())
61 {
62 BoundaryInfo bndry_info = boundary_preferences_.at(bndry_name);
63 auto& bndry_vals = bndry_info.second;
64 switch (bndry_info.first)
65 {
67 {
68 boundaries_.insert(std::make_pair(
69 bndry_id,Boundary{BoundaryType::Reflecting, {0., 0., 0.}}));
70 Chi::log.Log() << "Boundary " << bndry_name << " set to reflecting.";
71 break;
72 }
74 {
75 if (bndry_vals.empty()) bndry_vals.resize(1,0.0);
76 boundaries_.insert(std::make_pair(
77 bndry_id,Boundary{BoundaryType::Dirichlet, {bndry_vals[0], 0., 0.}}));
78 Chi::log.Log() << "Boundary " << bndry_name << " set to dirichlet.";
79 break;
80 }
82 {
83 if (bndry_vals.size()!=3)
84 throw std::logic_error(std::string(__PRETTY_FUNCTION__) +
85 " Robin needs 3 values in bndry vals.");
86 boundaries_.insert(std::make_pair(
87 bndry_id,Boundary{BoundaryType::Robin, {bndry_vals[0],
88 bndry_vals[1],
89 bndry_vals[2]}}));
90 Chi::log.Log() << "Boundary " << bndry_name
91 << " set to robin." << bndry_vals[0]<<","
92 << bndry_vals[1]<<","<<bndry_vals[2];
93 break;
94 }
96 {
97 boundaries_.insert(std::make_pair(
98 bndry_id,Boundary{BoundaryType::Robin, {0.25, 0.5, 0.}}));
99 Chi::log.Log() << "Boundary " << bndry_name << " set to vacuum.";
100 break;
101 }
103 {
104 if (bndry_vals.size()!=3)
105 throw std::logic_error(std::string(__PRETTY_FUNCTION__) +
106 " Neumann needs 3 values in bndry vals.");
107 boundaries_.insert(std::make_pair(
108 bndry_id,Boundary{BoundaryType::Robin, {0., bndry_vals[0],
109 bndry_vals[1]}}));
110 Chi::log.Log() << "Boundary " << bndry_name
111 << " set to neumann." << bndry_vals[0];
112 break;
113 }
114 }//switch boundary type
115 }
116 else
117 {
118 boundaries_.insert(std::make_pair(
119 bndry_id,Boundary{BoundaryType::Dirichlet, {0., 0., 0.}}));
121 << "No boundary preference found for boundary index " << bndry_name
122 << "Dirichlet boundary added with zero boundary value.";
123 }
124 }//for bndry
125
126 //============================================= Make SDM
128 const auto& sdm = *sdm_ptr_;
129
130 const auto& OneDofPerNode = sdm.UNITARY_UNKNOWN_MANAGER;
131
132 num_local_dofs_ = sdm.GetNumLocalDOFs(OneDofPerNode);
133 num_globl_dofs_ = sdm.GetNumGlobalDOFs(OneDofPerNode);
134
135 Chi::log.Log() << "Num local DOFs: " << num_local_dofs_;
136 Chi::log.Log() << "Num globl DOFs: " << num_globl_dofs_;
137
138 //============================================= Initializes Mats and Vecs
139 const auto n = static_cast<int64_t>(num_local_dofs_);
140 const auto N = static_cast<int64_t>(num_globl_dofs_);
141
145
146 std::vector<int64_t> nodal_nnz_in_diag;
147 std::vector<int64_t> nodal_nnz_off_diag;
148 sdm.BuildSparsityPattern(nodal_nnz_in_diag,nodal_nnz_off_diag, OneDofPerNode);
149
151 nodal_nnz_in_diag,
152 nodal_nnz_off_diag);
153
154 if (field_functions_.empty())
155 {
156 std::string solver_name;
157 if (not TextName().empty()) solver_name = TextName() + "-";
158
159 std::string text_name = solver_name + "phi";
160
161 using namespace chi_math;
162 auto initial_field_function =
163 std::make_shared<chi_physics::FieldFunctionGridBased>(
164 text_name, //Text name
165 sdm_ptr_, //Spatial Discretization
166 Unknown(UnknownType::SCALAR)); //Unknown/Variable
167
168 field_functions_.push_back(initial_field_function);
169 Chi::field_function_stack.push_back(initial_field_function);
170 }//if not ff set
171
172}//end initialize
173
174//========================================================== Execute
176{
177 Chi::log.Log() << "\nExecuting DFEM IP Diffusion solver";
178
179 const auto& grid = *grid_ptr_;
180 const auto& sdm = *sdm_ptr_;
181
182 lua_State* L = Chi::console.GetConsoleState();
183
184 //============================================= Assemble the system
185 // is this needed?
186 VecSet(b_, 0.0);
187
188 Chi::log.Log() << "Assembling system: ";
189
190 for (const auto& cell : grid.local_cells)
191 {
192 const auto& cell_mapping = sdm.GetCellMapping(cell);
193 const size_t num_nodes = cell_mapping.NumNodes();
194 const auto cc_nodes = cell_mapping.GetNodeLocations();
195 const auto qp_data = cell_mapping.MakeVolumetricQuadraturePointData();
196
197 const auto imat = cell.material_id_;
198 MatDbl Acell(num_nodes, VecDbl(num_nodes, 0.0));
199 VecDbl cell_rhs(num_nodes, 0.0);
200
201 //==================================== Assemble volumetric terms
202 for (size_t i=0; i<num_nodes; ++i)
203 {
204 const int64_t imap = sdm.MapDOF(cell, i);
205
206 for (size_t j=0; j<num_nodes; ++j)
207 {
208 const int64_t jmap = sdm.MapDOF(cell, j);
209 double entry_aij = 0.0;
210 for (size_t qp : qp_data.QuadraturePointIndices())
211 {
212 entry_aij +=
213 (
214 CallLua_iXYZFunction(L, "D_coef",imat,qp_data.QPointXYZ(qp)) *
215 qp_data.ShapeGrad(i, qp).Dot(qp_data.ShapeGrad(j, qp))
216 +
217 CallLua_iXYZFunction(L, "Sigma_a",imat,qp_data.QPointXYZ(qp)) *
218 qp_data.ShapeValue(i, qp) * qp_data.ShapeValue(j, qp)
219 )
220 *
221 qp_data.JxW(qp);
222 }//for qp
223 MatSetValue(A_, imap, jmap, entry_aij, ADD_VALUES);
224 }//for j
225 double entry_rhs_i = 0.0;
226 for (size_t qp : qp_data.QuadraturePointIndices())
227 entry_rhs_i += CallLua_iXYZFunction(L,"Q_ext",imat,qp_data.QPointXYZ(qp))
228 * qp_data.ShapeValue(i, qp) * qp_data.JxW(qp);
229 VecSetValue(b_, imap, entry_rhs_i, ADD_VALUES);
230 }//for i
231
232 //==================================== Assemble face terms
233 const size_t num_faces = cell.faces_.size();
234 for (size_t f = 0; f < num_faces; ++f) {
235 const auto &face = cell.faces_[f];
236 const auto &n_f = face.normal_;
237 const size_t num_face_nodes = cell_mapping.NumFaceNodes(f);
238 const auto fqp_data = cell_mapping.MakeSurfaceQuadraturePointData(f);
239
240 const double hm = HPerpendicular(cell, f);
241
242 typedef chi_mesh::MeshContinuum Grid;
243
244 // interior face
245 if (face.has_neighbor_)
246 {
247 const auto &adj_cell = grid.cells[face.neighbor_id_];
248 const auto &adj_cell_mapping = sdm.GetCellMapping(adj_cell);
249 const auto ac_nodes = adj_cell_mapping.GetNodeLocations();
250 const size_t acf = Grid::MapCellFace(cell, adj_cell, f);
251 const double hp_neigh = HPerpendicular(adj_cell, acf);
252
253 const auto imat_neigh = adj_cell.material_id_;
254
255 //========================= Compute Ckappa IP
256 double Ckappa = 1.0;
257 if (cell.Type() == chi_mesh::CellType::SLAB)
258 Ckappa = 2.0;
259 if (cell.Type() == chi_mesh::CellType::POLYGON)
260 Ckappa = 2.0;
261 if (cell.Type() == chi_mesh::CellType::POLYHEDRON)
262 Ckappa = 4.0;
263
264 //========================= Assembly penalty terms
265 for (size_t fi = 0; fi < num_face_nodes; ++fi) {
266 const int i = cell_mapping.MapFaceNode(f, fi);
267 const int64_t imap = sdm.MapDOF(cell, i);
268
269 for (size_t fj = 0; fj < num_face_nodes; ++fj) {
270 const int jm = cell_mapping.MapFaceNode(f, fj); //j-minus
271 const int64_t jmmap = sdm.MapDOF(cell, jm);
272
273 const int jp = MapFaceNodeDisc(cell, adj_cell, cc_nodes, ac_nodes,
274 f, acf, fj); //j-plus
275 const int64_t jpmap = sdm.MapDOF(adj_cell, jp);
276
277 double aij = 0.0;
278 for (size_t qp: fqp_data.QuadraturePointIndices())
279 aij += Ckappa *
280 (CallLua_iXYZFunction(L, "D_coef", imat, fqp_data.QPointXYZ(qp)) / hm +
281 CallLua_iXYZFunction(L, "D_coef", imat_neigh, fqp_data.QPointXYZ(qp)) / hp_neigh) / 2.
282 *
283 fqp_data.ShapeValue(i, qp) * fqp_data.ShapeValue(jm, qp) *
284 fqp_data.JxW(qp);
285
286 MatSetValue(A_, imap, jmmap, aij, ADD_VALUES);
287 MatSetValue(A_, imap, jpmap, -aij, ADD_VALUES);
288 }//for fj
289 }//for fi
290
291 //========================= Assemble gradient terms
292 // For the following comments we use the notation:
293 // Dk = 0.5* n dot nabla bk
294
295 // {{D d_n b_i}}[[Phi]]
296 // 0.5*D* n dot (b_j^+ - b_j^-)*nabla b_i^-
297
298 // loop over node of current cell (gradient of b_i)
299 for (int i = 0; i < num_nodes; ++i) {
300 const int64_t imap = sdm.MapDOF(cell, i);
301
302 // loop over faces
303 for (int fj = 0; fj < num_face_nodes; ++fj) {
304 const int jm = cell_mapping.MapFaceNode(f, fj); //j-minus
305 const int64_t jmmap = sdm.MapDOF(cell, jm);
306 const int jp = MapFaceNodeDisc(cell, adj_cell, cc_nodes, ac_nodes,
307 f, acf, fj); //j-plus
308 const int64_t jpmap = sdm.MapDOF(adj_cell, jp);
309
310 chi_mesh::Vector3 vec_aij;
311 for (size_t qp: fqp_data.QuadraturePointIndices())
312 vec_aij +=
313 CallLua_iXYZFunction(L, "D_coef", imat, fqp_data.QPointXYZ(qp)) *
314 fqp_data.ShapeValue(jm, qp) * fqp_data.ShapeGrad(i, qp) *
315 fqp_data.JxW(qp);
316 const double aij = -0.5 * n_f.Dot(vec_aij);
317
318 MatSetValue(A_, imap, jmmap, aij, ADD_VALUES);
319 MatSetValue(A_, imap, jpmap, -aij, ADD_VALUES);
320 }//for fj
321 }//for i
322
323 // {{D d_n Phi}}[[b_i]]
324 // 0.5*D* n dot (b_i^+ - b_i^-)*nabla b_j^-
325 for (int fi = 0; fi < num_face_nodes; ++fi) {
326 const int im = cell_mapping.MapFaceNode(f, fi); //i-minus
327 const int64_t immap = sdm.MapDOF(cell, im);
328
329 const int ip = MapFaceNodeDisc(cell, adj_cell, cc_nodes, ac_nodes,
330 f, acf, fi); //i-plus
331 const int64_t ipmap = sdm.MapDOF(adj_cell, ip);
332
333 for (int j = 0; j < num_nodes; ++j) {
334 const int64_t jmap = sdm.MapDOF(cell, j);
335
336 chi_mesh::Vector3 vec_aij;
337 for (size_t qp: fqp_data.QuadraturePointIndices())
338 vec_aij +=
339 CallLua_iXYZFunction(L, "D_coef", imat, fqp_data.QPointXYZ(qp)) *
340 fqp_data.ShapeValue(im, qp) * fqp_data.ShapeGrad(j, qp) *
341 fqp_data.JxW(qp);
342 const double aij = -0.5 * n_f.Dot(vec_aij);
343
344 MatSetValue(A_, immap, jmap, aij, ADD_VALUES);
345 MatSetValue(A_, ipmap, jmap, -aij, ADD_VALUES);
346 }//for j
347 }//for fi
348
349 }//internal face
350 else
351 { // boundary face
352 const auto &bndry = boundaries_[face.neighbor_id_];
353 // Robin boundary
354 if (bndry.type_ == BoundaryType::Robin)
355 {
356 const auto qp_face_data =
357 cell_mapping.MakeSurfaceQuadraturePointData(f);
358
359 const auto &aval = bndry.values_[0];
360 const auto &bval = bndry.values_[1];
361 const auto &fval = bndry.values_[2];
362
363 Chi::log.Log0Verbose1() << "Boundary set as Robin with a,b,f = ("
364 << aval << ","
365 << bval << ","
366 << fval << ") ";
367 // true Robin when a!=0, otherwise, it is a Neumann:
368 // Assert if b=0
369 if (std::fabs(bval) < 1e-8)
370 throw std::logic_error("if b=0, this is a Dirichlet BC, not a Robin BC");
371
372 for (size_t fi = 0; fi < num_face_nodes; fi++) {
373 const uint i = cell_mapping.MapFaceNode(f, fi);
374 const int64_t ir = sdm.MapDOF(cell, i);
375
376 if (std::fabs(aval) >= 1.0e-12) {
377 for (size_t fj = 0; fj < num_face_nodes; fj++) {
378 const uint j = cell_mapping.MapFaceNode(f, fj);
379 const int64_t jr = sdm.MapDOF(cell, j);
380
381 double aij = 0.0;
382 for (size_t qp: fqp_data.QuadraturePointIndices())
383 aij += fqp_data.ShapeValue(i, qp) * fqp_data.ShapeValue(j, qp) *
384 fqp_data.JxW(qp);
385 aij *= (aval / bval);
386
387 MatSetValue(A_, ir, jr, aij, ADD_VALUES);
388 }//for fj
389 }//if a nonzero
390
391 if (std::fabs(fval) >= 1.0e-12) {
392 double rhs_val = 0.0;
393 for (size_t qp: fqp_data.QuadraturePointIndices())
394 rhs_val += fqp_data.ShapeValue(i, qp) * fqp_data.JxW(qp);
395 rhs_val *= (fval / bval);
396
397 VecSetValue(b_, ir, rhs_val, ADD_VALUES);
398 }//if f nonzero
399 }//for fi
400 }//Robin BC
401 else if (bndry.type_ == BoundaryType::Dirichlet) {
402 const double bc_value = bndry.values_[0];
403 //========================= Compute kappa
404 double Ckappa = 2.0;
405 if (cell.Type() == chi_mesh::CellType::SLAB)
406 Ckappa = 4.0; // fmax(4.0*Dg/hm,0.25);
407 if (cell.Type() == chi_mesh::CellType::POLYGON)
408 Ckappa = 4.0;
409 if (cell.Type() == chi_mesh::CellType::POLYHEDRON)
410 Ckappa = 8.0;
411
412 //========================= Assembly penalty terms
413 for (size_t fi = 0; fi < num_face_nodes; ++fi) {
414 const uint i = cell_mapping.MapFaceNode(f, fi);
415 const int64_t imap = sdm.MapDOF(cell, i);
416
417 for (size_t fj = 0; fj < num_face_nodes; ++fj) {
418 const uint jm = cell_mapping.MapFaceNode(f, fj);
419 const int64_t jmmap = sdm.MapDOF(cell, jm);
420
421 double aij = 0.0;
422 for (size_t qp: fqp_data.QuadraturePointIndices())
423 aij += Ckappa *
424 CallLua_iXYZFunction(L, "D_coef", imat, fqp_data.QPointXYZ(qp)) / hm *
425 fqp_data.ShapeValue(i, qp) * fqp_data.ShapeValue(jm, qp) *
426 fqp_data.JxW(qp);
427 double aij_bc_value = aij * bc_value;
428
429 MatSetValue(A_, imap, jmmap, aij, ADD_VALUES);
430 VecSetValue(b_, imap, aij_bc_value, ADD_VALUES);
431 }//for fj
432 }//for fi
433
434 //========================= Assemble gradient terms
435 // For the following comments we use the notation:
436 // Dk = 0.5* n dot nabla bk
437
438 // 0.5*D* n dot (b_j^+ - b_j^-)*nabla b_i^-
439 for (size_t i = 0; i < num_nodes; i++) {
440 const int64_t imap = sdm.MapDOF(cell, i);
441
442 for (size_t j = 0; j < num_nodes; j++) {
443 const int64_t jmap = sdm.MapDOF(cell, j);
444
445 chi_mesh::Vector3 vec_aij;
446 for (size_t qp: fqp_data.QuadraturePointIndices())
447 vec_aij +=
448 (fqp_data.ShapeValue(j, qp) * fqp_data.ShapeGrad(i, qp) +
449 fqp_data.ShapeValue(i, qp) * fqp_data.ShapeGrad(j, qp)) *
450 fqp_data.JxW(qp) *
451 CallLua_iXYZFunction(L, "D_coef", imat, fqp_data.QPointXYZ(qp));
452
453 const double aij = -n_f.Dot(vec_aij);
454 double aij_bc_value = aij * bc_value;
455
456 MatSetValue(A_, imap, jmap, aij, ADD_VALUES);
457 VecSetValue(b_, imap, aij_bc_value, ADD_VALUES);
458 }//for fj
459 }//for i
460 }//Dirichlet BC
461 else {
462
463 }//else BC
464 }//boundary face
465 }//for face f
466 }//for cell
467
468 Chi::log.Log() << "Global assembly";
469
470 MatAssemblyBegin(A_, MAT_FINAL_ASSEMBLY);
471 MatAssemblyEnd(A_, MAT_FINAL_ASSEMBLY);
472 VecAssemblyBegin(b_);
473 VecAssemblyEnd(b_);
474
475// MatView(A, PETSC_VIEWER_STDERR_WORLD);
476//
477// PetscViewer viewer;
478// PetscViewerASCIIOpen(PETSC_COMM_WORLD,"A.m",&viewer);
479// PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_MATLAB);
480// MatView(A,viewer);
481// PetscViewerPopFormat(viewer);
482// PetscViewerDestroy(&viewer);
483
484 Chi::log.Log() << "Done global assembly";
485
486 //============================================= Create Krylov Solver
487 Chi::log.Log() << "Solving: ";
488 auto petsc_solver =
490 A_, //Matrix
491 TextName(), //Solver name
492 KSPCG, //Solver type
493 PCGAMG, //Preconditioner type
494 basic_options_("residual_tolerance").FloatValue(), //Relative residual tolerance
495 basic_options_("max_iters").IntegerValue() //Max iterations
496 );
497
498 //============================================= Solve
499 KSPSolve(petsc_solver.ksp, b_, x_);
500
501 Chi::log.Log() << "Done solving";
502
503 const auto& OneDofPerNode = sdm.UNITARY_UNKNOWN_MANAGER;
504 sdm.LocalizePETScVector(x_, field_, OneDofPerNode);
505
506 field_functions_.front()->UpdateFieldVector(field_);
507}
std::vector< VecDbl > MatDbl
Definition: chi_math.h:13
std::vector< double > VecDbl
Definition: chi_math.h:12
static std::vector< chi_physics::FieldFunctionPtr > field_function_stack
Definition: chi_runtime.h:92
static chi::Timer program_timer
Definition: chi_runtime.h:79
static chi::ChiLog & log
Definition: chi_runtime.h:81
static chi::Console & console
Definition: chi_runtime.h:80
LogStream Log(LOG_LVL level=LOG_0)
Definition: chi_log.cc:35
LogStream Log0Verbose1()
Definition: chi_log.h:234
lua_State *& GetConsoleState()
Definition: chi_console.h:130
std::string GetTimeString() const
Definition: chi_timer.cc:38
static std::shared_ptr< PieceWiseLinearDiscontinuous > New(const chi_mesh::MeshContinuum &grid, QuadratureOrder q_order=QuadratureOrder::SECOND, CoordinateSystemType cs_type=CoordinateSystemType::CARTESIAN)
GlobalCellHandler cells
chi_mesh::MeshContinuumPtr & GetGrid() const
std::pair< BoundaryType, std::vector< double > > BoundaryInfo
Solver(const std::string &in_solver_name)
void InitMatrixSparsity(Mat &A, const std::vector< int64_t > &nodal_nnz_in_diag, const std::vector< int64_t > &nodal_nnz_off_diag)
PETScSolverSetup CreateCommonKrylovSolverSetup(Mat ref_matrix, const std::string &in_solver_name="KSPSolver", const std::string &in_solver_type=KSPGMRES, const std::string &in_preconditioner_type=PCNONE, double in_relative_residual_tolerance=1.0e-6, int64_t in_maximum_iterations=100)
Mat CreateSquareMatrix(int64_t local_size, int64_t global_size)
Vec CreateVector(int64_t local_size, int64_t global_size)
MeshHandler & GetCurrentHandler()
#define uint
Vector3 Dot(const chi_mesh::TensorRank2Dim3 &that) const