Chi-Tech
fv_diffusion_solver.cc
Go to the documentation of this file.
2
3#include "chi_runtime.h"
4#include "chi_log.h"
5#include "utils/chi_timer.h"
6
9
10#include "fv_diffusion_bndry.h"
11
13
15
16//============================================= constructor
17fv_diffusion::Solver::Solver(const std::string& in_solver_name):
18 chi_physics::Solver(in_solver_name, { {"max_iters", int64_t(500) },
19 {"residual_tolerance", 1.0e-2}})
20{}
21
22//============================================= destructor
24{
25 VecDestroy(&x_);
26 VecDestroy(&b_);
27 MatDestroy(&A_);
28}
29
30//============================================= Initialize
32{
33 const std::string fname = "fv_diffusion::Solver::Initialize";
34 Chi::log.Log() << "\n"
36 << TextName() << ": Initializing CFEM Diffusion solver ";
37
38 //============================================= Get grid
40 const auto& grid = *grid_ptr_;
41 if (grid_ptr_ == nullptr)
42 throw std::logic_error(std::string(__PRETTY_FUNCTION__) +
43 " No grid defined.");
44
45 Chi::log.Log() << "Global num cells: " << grid.GetGlobalNumberOfCells();
46
47 //============================================= BIDs
48 auto globl_unique_bndry_ids = grid.GetDomainUniqueBoundaryIDs();
49
50 const auto& grid_boundary_id_map = grid_ptr_->GetBoundaryIDMap();
51 for (uint64_t bndry_id : globl_unique_bndry_ids)
52 {
53 if (grid_boundary_id_map.count(bndry_id) == 0)
54 throw std::logic_error(fname + ": Boundary id " +
55 std::to_string(bndry_id) + " does not have a name-assignment.");
56
57 const auto& bndry_name = grid_boundary_id_map.at(bndry_id);
58 if (boundary_preferences_.find(bndry_name) != boundary_preferences_.end())
59 {
60 BoundaryInfo bndry_info = boundary_preferences_.at(bndry_name);
61 auto& bndry_vals = bndry_info.second;
62 switch (bndry_info.first)
63 {
65 {
66 boundaries_.insert(std::make_pair(
67 bndry_id,Boundary{BoundaryType::Reflecting, {0., 0., 0.}}));
68 Chi::log.Log() << "Boundary " << bndry_name << " set to reflecting.";
69 break;
70 }
72 {
73 if (bndry_vals.empty()) bndry_vals.resize(1,0.0);
74 boundaries_.insert(std::make_pair(
75 bndry_id,Boundary{BoundaryType::Dirichlet, {bndry_vals[0], 0., 0.}}));
76 Chi::log.Log() << "Boundary " << bndry_name << " set to dirichlet.";
77 break;
78 }
80 {
81 if (bndry_vals.size()!=3)
82 throw std::logic_error(std::string(__PRETTY_FUNCTION__) +
83 " Robin needs 3 values in bndry vals.");
84 boundaries_.insert(std::make_pair(
85 bndry_id,Boundary{BoundaryType::Robin, {bndry_vals[0],
86 bndry_vals[1],
87 bndry_vals[2]}}));
88 Chi::log.Log() << "Boundary " << bndry_name
89 << " set to robin." << bndry_vals[0]<<","
90 << bndry_vals[1]<<","<<bndry_vals[2];
91 break;
92 }
94 {
95 boundaries_.insert(std::make_pair(
96 bndry_id,Boundary{BoundaryType::Robin, {0.25, 0.5, 0.}}));
97 Chi::log.Log() << "Boundary " << bndry_name << " set to vacuum.";
98 break;
99 }
101 {
102 if (bndry_vals.size()!=3)
103 throw std::logic_error(std::string(__PRETTY_FUNCTION__) +
104 " Neumann needs 3 values in bndry vals.");
105 boundaries_.insert(std::make_pair(
106 bndry_id,Boundary{BoundaryType::Robin, {0., bndry_vals[0],
107 bndry_vals[1]}}));
108 Chi::log.Log() << "Boundary " << bndry_name
109 << " set to neumann." << bndry_vals[0];
110 break;
111 }
112 }//switch boundary type
113 }
114 else
115 {
116 boundaries_.insert(std::make_pair(
117 bndry_id,Boundary{BoundaryType::Dirichlet, {0., 0., 0.}}));
119 << "No boundary preference found for boundary index " << bndry_name
120 << "Dirichlet boundary added with zero boundary value.";
121 }
122 }//for bndry
123
124 //============================================= Make SDM
126 const auto& sdm = *sdm_ptr_;
127
128 const auto& OneDofPerNode = sdm.UNITARY_UNKNOWN_MANAGER;
129 num_local_dofs_ = sdm.GetNumLocalDOFs(OneDofPerNode);
130 num_globl_dofs_ = sdm.GetNumGlobalDOFs(OneDofPerNode);
131
132 Chi::log.Log() << "Num local DOFs: " << num_local_dofs_;
133 Chi::log.Log() << "Num globl DOFs: " << num_globl_dofs_;
134
135 //============================================= Initializes Mats and Vecs
136 const auto n = static_cast<int64_t>(num_local_dofs_);
137 const auto N = static_cast<int64_t>(num_globl_dofs_);
138
142
143 std::vector<int64_t> nodal_nnz_in_diag;
144 std::vector<int64_t> nodal_nnz_off_diag;
145 sdm.BuildSparsityPattern(nodal_nnz_in_diag,nodal_nnz_off_diag, OneDofPerNode);
146
148 nodal_nnz_in_diag,
149 nodal_nnz_off_diag);
150
151 if (field_functions_.empty())
152 {
153 std::string solver_name;
154 if (not TextName().empty()) solver_name = TextName() + "-";
155
156 std::string text_name = solver_name + "phi";
157
158 using namespace chi_math;
159 auto initial_field_function =
160 std::make_shared<chi_physics::FieldFunctionGridBased>(
161 text_name, //Text name
162 sdm_ptr_, //Spatial Discretization
163 Unknown(UnknownType::SCALAR)); //Unknown/Variable
164
165 field_functions_.push_back(initial_field_function);
166 Chi::field_function_stack.push_back(initial_field_function);
167 }//if not ff set
168
169}//end initialize
170
171//========================================================== Execute
173{
174 Chi::log.Log() << "\nExecuting CFEM Diffusion solver";
175
176 const auto& grid = *grid_ptr_;
177 const auto& sdm = *sdm_ptr_;
178
179 lua_State* L = Chi::console.GetConsoleState();
180
181 //============================================= Assemble the system
182 // P ~ Present cell
183 // N ~ Neighbor cell
184 Chi::log.Log() << "Assembling system: ";
185 for (const auto& cell_P : grid.local_cells)
186 {
187 const auto& cell_mapping = sdm.GetCellMapping(cell_P);
188 const double volume_P = cell_mapping.CellVolume(); //Volume of present cell
189 const auto& x_cc_P = cell_P.centroid_;
190
191 const auto imat = cell_P.material_id_;
192
193 const double sigma_a = CallLua_iXYZFunction(L, "Sigma_a",imat,x_cc_P);
194 const double q_ext = CallLua_iXYZFunction(L, "Q_ext" ,imat,x_cc_P);
195 const double D_P = CallLua_iXYZFunction(L, "D_coef" ,imat,x_cc_P);
196
197 const int64_t imap = sdm.MapDOF(cell_P, 0);
198 MatSetValue(A_, imap, imap, sigma_a * volume_P, ADD_VALUES);
199 VecSetValue(b_, imap, q_ext * volume_P, ADD_VALUES);
200
201 for (size_t f=0; f < cell_P.faces_.size(); ++f)
202 {
203 const auto& face = cell_P.faces_[f];
204 const auto& x_fc = face.centroid_;
205 const auto x_PF = x_fc - x_cc_P;
206 const auto A_f = cell_mapping.FaceArea(f);
207 const auto A_f_n = A_f * face.normal_;
208
209 if (face.has_neighbor_)
210 {
211 const auto& cell_N = grid.cells[face.neighbor_id_];
212 const int jmat = cell_N.material_id_;
213 const auto& x_cc_N = cell_N.centroid_;
214 const auto x_PN = x_cc_N - x_cc_P;
215
216 const double D_N = CallLua_iXYZFunction(L, "D_coef",jmat,x_cc_N);
217
218 const double w = x_PF.Norm()/x_PN.Norm();
219 const double D_f = 1.0/(w/D_P + (1.0-w)/D_N);
220
221 const double entry_ii = A_f_n.Dot(D_f * x_PN/x_PN.NormSquare());
222 const double entry_ij = - entry_ii;
223
224 const int64_t jmap = sdm.MapDOF(cell_N, 0);
225 MatSetValue(A_, imap, imap, entry_ii, ADD_VALUES);
226 MatSetValue(A_, imap, jmap, entry_ij, ADD_VALUES);
227 }//internal face
228 else
229 {
230 const auto& bndry = boundaries_[face.neighbor_id_];
231
232 if (bndry.type_ == BoundaryType::Robin)
233 {
234 const auto& aval = bndry.values_[0];
235 const auto& bval = bndry.values_[1];
236 const auto& fval = bndry.values_[2];
237
238 if (std::fabs(bval) < 1e-8)
239 throw std::logic_error("if b=0, this is a Dirichlet BC, not a Robin BC");
240
241 if (std::fabs(aval) > 1.0e-8)
242 MatSetValue(A_, imap, imap, A_f * aval / bval, ADD_VALUES);
243 if (std::fabs(fval) > 1.0e-8)
244 VecSetValue(b_, imap, A_f * fval / bval, ADD_VALUES);
245 }//if Robin
246
247 if (bndry.type_ == BoundaryType::Dirichlet)
248 {
249 const auto& boundary_value = bndry.values_[0];
250
251 const auto& x_cc_N = x_cc_P + 2.0*x_PF;
252 const auto x_PN = x_cc_N - x_cc_P;
253
254 const double D_f = D_P;
255 const double entry_ii = A_f_n.Dot(D_f * x_PN/x_PN.NormSquare());
256
257 MatSetValue(A_, imap, imap, entry_ii, ADD_VALUES);
258 VecSetValue(b_, imap, entry_ii * boundary_value, ADD_VALUES);
259 }//if Dirichlet
260 }//bndry face
261 }//for f
262 }//for cell
263
264 Chi::log.Log() << "Global assembly";
265
266 MatAssemblyBegin(A_, MAT_FINAL_ASSEMBLY);
267 MatAssemblyEnd(A_, MAT_FINAL_ASSEMBLY);
268 VecAssemblyBegin(b_);
269 VecAssemblyEnd(b_);
270
271 Chi::log.Log() << "Done global assembly";
272
273 //============================================= Create Krylov Solver
274 Chi::log.Log() << "Solving: ";
275 auto petsc_solver =
277 A_, //Matrix
278 TextName(), //Solver name
279 KSPCG, //Solver type
280 PCGAMG, //Preconditioner type
281 basic_options_("residual_tolerance").FloatValue(), //Relative residual tolerance
282 basic_options_("max_iters").IntegerValue() //Max iterations
283 );
284
285 //============================================= Solve
286 KSPSolve(petsc_solver.ksp, b_, x_);
287
288 UpdateFieldFunctions();
289
290 Chi::log.Log() << "Done solving";
291
292}
static std::vector< chi_physics::FieldFunctionPtr > field_function_stack
Definition: chi_runtime.h:92
static chi::Timer program_timer
Definition: chi_runtime.h:79
static chi::ChiLog & log
Definition: chi_runtime.h:81
static chi::Console & console
Definition: chi_runtime.h:80
LogStream Log(LOG_LVL level=LOG_0)
Definition: chi_log.cc:35
LogStream Log0Verbose1()
Definition: chi_log.h:234
lua_State *& GetConsoleState()
Definition: chi_console.h:130
std::string GetTimeString() const
Definition: chi_timer.cc:38
static std::shared_ptr< FiniteVolume > New(const chi_mesh::MeshContinuum &in_grid, CoordinateSystemType in_cs_type=CoordinateSystemType::CARTESIAN)
chi_mesh::MeshContinuumPtr & GetGrid() const
Solver(const std::string &in_solver_name)
void Initialize() override
std::pair< fv_diffusion::BoundaryType, std::vector< double > > BoundaryInfo
void InitMatrixSparsity(Mat &A, const std::vector< int64_t > &nodal_nnz_in_diag, const std::vector< int64_t > &nodal_nnz_off_diag)
PETScSolverSetup CreateCommonKrylovSolverSetup(Mat ref_matrix, const std::string &in_solver_name="KSPSolver", const std::string &in_solver_type=KSPGMRES, const std::string &in_preconditioner_type=PCNONE, double in_relative_residual_tolerance=1.0e-6, int64_t in_maximum_iterations=100)
Mat CreateSquareMatrix(int64_t local_size, int64_t global_size)
Vec CreateVector(int64_t local_size, int64_t global_size)
MeshHandler & GetCurrentHandler()