Chi-Tech
CodeTut1.h
Go to the documentation of this file.
1/** \page CodeTut1 Coding Tutorial 1 - Poisson's problem with Finite Volume
2
3## Table of contents
4- \ref CodeTut1Sec1
5 - \ref CodeTut1Sec1_1
6 - \ref CodeTut1Sec1_2
7- \ref CodeTut1Sec2
8- \ref CodeTut1Sec3
9- \ref CodeTut1Sec4
10- \ref CodeTut1Sec5
11- \ref CodeTut1Sec6
12- \ref CodeTut1Sec7
13- \ref CodeTut1Sec8
14- \ref CodeTut1Sec9
15
16\section CodeTut1Sec1 1 Poisson's equation
17<a href="https://en.wikipedia.org/wiki/Poisson%27s_equation">The Poisson's equation</a> states the following, for
18\f$ \phi, q \in \mathcal{R} \f$,
19\f{eqnarray*}{
20-\boldsymbol{\nabla} \cdot \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr) = q(\mathbf{x}), \quad \quad \quad (\mathbf{x})\in\mathcal{D} \\
21\phi(\mathbf{x}) = 0, \quad \quad \quad \mathbf{x}\in \partial \mathcal{D}
22\f}
23where \f$ \boldsymbol{\nabla} \cdot \bigr( \bigr) \f$ denotes the divergence-operator
24and \f$ \boldsymbol{\nabla} \f$ denotes the gradient-operator, i.e.
25\f$ \boldsymbol{\nabla} \phi \f$ denotes the gradient of \f$ \phi \f$. The
26boundary conditions here state that \f$ \phi=0 \f$ on the boundary.
27
28
29
30
31\subsection CodeTut1Sec1_1 1.1 Our specific problem
32For our specific problem we will choose \f$ q(\mathbf{x})=1 \f$ and \f$ \mathcal{D} \f$ a cartesian domain,
33either 1D, 2D or 3D, with each dimension always between \f$ -1,+1 \f$. We can generate the mesh for this
34problem using an input file
35\code
36--############################################### Setup mesh
37chiMeshHandlerCreate()
38
39mesh={}
40N=20
41L=2
42xmin = -L/2
43dx = L/N
44for i=1,(N+1) do
45 k=i-1
46 mesh[i] = xmin + k*dx
47end
48
49--chiMeshCreateUnpartitioned3DOrthoMesh(mesh,mesh,mesh)
50chiMeshCreateUnpartitioned2DOrthoMesh(mesh,mesh)
51--chiMeshCreateUnpartitioned1DOrthoMesh(mesh)
52chiVolumeMesherExecute();
53
54--############################################### Set Material IDs
55chiVolumeMesherSetMatIDToAll(0)
56\endcode
57This code can be used to generate any of the following meshes,
58\image html CodingTutorials/OrthoMesh_1D_2D_3D.png "[From left to right] 1D, 2D, 3D orthogonal mesh" width=1200px
59
60
61
62
63\subsection CodeTut1Sec1_2 1.2 The Finite Volume spatial discretization
64To apply the Finite Volume spatial discretization we integrate Poisson's equation
65over the volume of the cell
66\f{eqnarray*}{
67-\int_V \boldsymbol{\nabla} \cdot \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr) dV
68=
69\int_V q(\mathbf{x}) dV
70\f}
71Next we apply the approximation that both \f$ \phi \f$ and \f$ q \f$ are constant
72over a cell. Therefore, for cell \f$ P \f$, we have the source term
73\f{eqnarray*}{
74\int_V q(\mathbf{x}) dV = q_P V_P.
75\f}
76For the left-hand side we first apply Gauss Divergence Theorem,
77\f{eqnarray*}{
78\int_V \boldsymbol{\nabla} \cdot \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr) dV =
79\int_S \mathbf{n} \cdot \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr) dA
80\f}
81and for a discrete cell this surface integral can be discretized as the sum over all
82the faces
83\f{eqnarray*}{
84\int_S \mathbf{n} \cdot \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr) dA =
85\sum_f A_f \mathbf{n}_f \cdot \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr)_f.
86\f}
87We now still have to resolve \f$ \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr)_f \f$.
88To comprehend the approximation we are about to make, consider the figure below.
89We observe a cell \f$ P \f$, the present cell, and its neighbor, cell \f$ N \f$, at face \f$ f \f$.
90The cell centroids are \f$ \mathbf{x}_P \f$ and \f$ \mathbf{x}_N \f$ respectively.
91
92\image html CodingTutorials/NeighborXPN.png width=250px
93
94We next form \f$ \mathbf{x}_{PN} \f$, as in the diagram, after which we approximate
95the gradient of \f$ \phi \f$ at face \f$ f \f$ as
96\f{eqnarray*}{
97\bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr)_f =
98\biggr(
99\frac{\phi_N - \phi_P}{||\mathbf{x}_{PN}||}
100\biggr)
101\ \frac{\mathbf{x}_{PN}}{||\mathbf{x}_{PN}||}.
102\f}
103Note that this is essentially a scalar component
104 \f$ \frac{df}{ds} = \frac{\phi_N - \phi_P}{||\mathbf{x}_{PN}||}\f$ after which we give it a direction
105by normalizing \f$ \mathbf{x}_{PN} \f$ as \f$ \frac{\mathbf{x}_{PN}}{||\mathbf{x}_{PN}||} \f$.
106
107Our discretized equations now become
108
109\f{align*}{
110\sum_f
111c_f
112\phi_P
113-
114\sum_f
115c_f
116\phi_N
117&=
118q_P V_P
119\\
120c_f &= \biggr[
121\mathbf{A}_f \boldsymbol{\cdot}
122\frac{\mathbf{x}_{PN}}{||\mathbf{x}_{PN}||^2}
123\biggr]_f
124\f}
125where \f$ \mathbf{A}_f \f$ is the area-vector of the face, i.e. \f$ A_f \mathbf{n}_f \f$ and
126 \f$ \phi_P=0 \f$ when face \f$ f \f$ is on a boundary (enforcing the
127 Dirichlet boundary condition).
128
129
130
131
132
133\section CodeTut1Sec2 2 Setting up the problem
134In order to implement the code, we must first follow the steps in \ref DevManUsingLib.
135Before proceeding, make sure you are fully acquinted with this step.
136
137For this tutorial we will be deviation from the library tutorial by first
138changing the cmake target and project name. Therefore, we pick a project folder
139and create the `CMakeLists.txt` file but we change the following lines.
140\code
141set(TARGET code_tut1)
142project(CodeTut1 CXX)
143\endcode
144
145So now the entire `CMakeLists.txt` should look like this
146\code
147cmake_minimum_required(VERSION 3.12)
148
149set(TARGET code_tut1)
150project(CodeTut1 CXX)
151
152#------------------------------------------------ DEPENDENCIES
153if (NOT DEFINED CHI_TECH_DIR)
154 if (NOT (DEFINED ENV{CHI_TECH_DIR}))
155 message(FATAL_ERROR "***** CHI_TECH_DIR is not set *****")
156 else()
157 set(CHI_TECH_DIR "$ENV{CHI_TECH_DIR}")
158 endif()
159endif()
160message(STATUS "CHI_TECH_DIR set to ${CHI_TECH_DIR}")
161
162include("${CHI_TECH_DIR}/resources/Macros/Downstream.cmake")
163
164set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_SOURCE_DIR}/lib")
165set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${PROJECT_SOURCE_DIR}/lib")
166set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${PROJECT_SOURCE_DIR}/bin")
167
168file (GLOB_RECURSE SOURCES "*.cc")
169add_executable(${TARGET} "${SOURCES}")
170target_link_libraries(${TARGET} ${CHI_LIBS})
171
172file(WRITE ${PROJECT_SOURCE_DIR}/Makefile "subsystem:\n" "\t$(MAKE) -C chi_build \n\n"
173 "clean:\n\t$(MAKE) -C chi_build clean\n")
174\endcode
175
176Next we create the source file which we will call `code_tut1.cc`, and for now
177we will keep its contents the same as the library tutorial. Therefore `code_tut1.cc`
178should look like this
179\code
180#include "chi_runtime.h"
181#include "chi_log.h"
182
183int main(int argc, char* argv[])
184{
185 chi::Initialize(argc,argv);
186 chi::RunBatch(argc, argv);
187
188 chi::log.Log() << "Hello World!";
189
190 //We will add code here
191
192 chi::Finalize();
193 return 0;
194}
195\endcode
196
197To compile this program we first have to set the `CHI_TECH_DIR` environment
198variable. This might be different for everyone but should generally look like this
199\code
200export CHI_TECH_DIR=/Users/drjanisawesome/codes/ChiTech/chi-tech/
201\endcode
202Note: the directory we want to specify here must contain `bin/` (in otherwords)
203it shouldn't be `bin/` itself).
204
205Next we create a directory for running `cmake`. Inside your project folder, create
206a folder called `chi_build` and `cd` into it.
207\code
208mkdir chi_build
209cd chi_build
210\endcode
211
212Then run `cmake` pointing to the folder that contains the `CMakeLists.txt` file,
213which in this case is the parent folder of the one we are in.
214\code
215cmake ../
216\endcode
217
218After this you should be able to make the project.
219\code
220make
221\endcode
222after which you should see the following
223\verbatim
224[ 50%] Building CXX object CMakeFiles/code_tut1.dir/code_tut1.cc.o
225[100%] Linking CXX executable ../bin/code_tut1
226[100%] Built target code_tut1
227\endverbatim
228
229As a test you can try to execute the project with no arguments which should look
230like this
231\verbatim
232../bin/code_tut1
233[0] 2022-11-14 13:02:43 Running ChiTech in batch-mode with 1 processes.
234[0] ChiTech version 1.0.2
235[0] ChiTech number of arguments supplied: 0
236[0]
237[0] Usage: exe inputfile [options values]
238[0]
239[0] -v Level of verbosity. Default 0. Can be either 0, 1 or 2.
240[0] a=b Executes argument as a lua string. i.e. x=2 or y=[["string"]]
241[0] -allow_petsc_error_handler Allow petsc error handler.
242[0]
243[0]
244[0]
245[0] Final program time 00:00:00
246[0] 2022-11-14 13:02:43 ChiTech finished execution of
247[0] Hello World!
248\endverbatim
249
250\section CodeTut1Sec3 3 Getting the grid
251First we remove the lines
252\code
253chi::log.Log() << "Hello World!";
254
255//We will add code here
256\endcode
257
258Next we include the headers for access to the `chi_mesh::MeshHandler` and the
259grid, aka `chi_mesh::MeshContinuum`
260\code
261#include "mesh/MeshHandler/chi_meshhandler.h"
262#include "mesh/MeshContinuum/chi_meshcontinuum.h"
263\endcode
264
265Next we add the following lines of code:
266\code
267chi::log.Log() << "Coding Tutorial 1";
268
269//============================================= Get grid
270auto grid_ptr = chi_mesh::GetCurrentHandler().GetGrid();
271const auto& grid = *grid_ptr;
272
273chi::log.Log() << "Global num cells: " << grid.GetGlobalNumberOfCells();
274\endcode
275This is the default way of obtaining a grid loaded into the ChiTech state at any
276moment, normally via an input file.
277
278When you compile and run this code it ought to fail by throwing an exception
279that looks like this
280\verbatim
281terminate called after throwing an instance of 'std::logic_error'
282 what(): chi_mesh::GetCurrentHandler: No handlers on stack
283Abort trap: 6
284\endverbatim
285This means the code was not loaded with a mesh-handler and therefore no grid is
286defined. To fix this we first create a `mesh.lua` input file within the `chi_build`
287directory.
288\code
289>mesh.lua
290\endcode
291Use an editor to add the following lines to `mesh.lua`
292\code
293--############################################### Setup mesh
294chiMeshHandlerCreate()
295
296mesh={}
297N=20
298L=2
299xmin = -L/2
300dx = L/N
301for i=1,(N+1) do
302 k=i-1
303 mesh[i] = xmin + k*dx
304end
305
306--chiMeshCreateUnpartitioned3DOrthoMesh(mesh,mesh,mesh)
307chiMeshCreateUnpartitioned2DOrthoMesh(mesh,mesh)
308--chiMeshCreateUnpartitioned1DOrthoMesh(mesh)
309chiVolumeMesherExecute();
310
311--############################################### Set Material IDs
312chiVolumeMesherSetMatIDToAll(0)
313\endcode
314This is known as ChiTech lua-console input file. It contains lua code.
315
316To run the program with this input file we simply add it to the command line
317\code
318../bin/code_tut1 mesh.lua
319\endcode
320
321If everything went well then you should see the following
322\verbatim
323[0] Parsing argument 1 mesh.lua
324[0] 2022-11-14 13:14:52 Running ChiTech in batch-mode with 1 processes.
325[0] ChiTech version 1.0.2
326[0] ChiTech number of arguments supplied: 1
327[0] Computing cell-centroids.
328[0] Done computing cell-centroids.
329[0] Checking cell-center-to-face orientations
330[0] Done checking cell-center-to-face orientations
331[0] 00:00:00 Establishing cell connectivity.
332[0] 00:00:00 Vertex cell subscriptions complete.
333[0] 00:00:00 Surpassing cell 40 of 400 (10%)
334[0] 00:00:00 Surpassing cell 80 of 400 (20%)
335\\ STUFF
336\\ STUFF
337\\ STUFF
338[0]
339[0] Final program time 00:00:00
340[0] 2022-11-14 13:14:52 ChiTech finished execution of mesh.lua
341[0] Coding Tutorial 1
342[0] Global num cells: 400
343\endverbatim
344
345\section CodeTut1Sec4 4 Initializing the Finite Volume Spatial Discretization
346All spatial discretizations in ChiTech are based on a grid. Right now we want
347`chi_math::SpatialDiscretization_FV` which is the Finite Volume Spatial discretization.
348It will place only a single node at the centroid of each cell.
349
350To access this discretization we must first include the header for
351`chi_math::SpatialDiscretization_FV`,
352\code
353#include "math/SpatialDiscretization/FiniteVolume/fv.h"
354\endcode
355
356Next we add the following lines of code
357\code
358//============================================= Make SDM
359typedef std::shared_ptr<chi_math::SpatialDiscretization> SDMPtr;
360SDMPtr sdm_ptr = chi_math::SpatialDiscretization_FV::New(grid_ptr);
361const auto& sdm = *sdm_ptr;
362
363const auto& OneDofPerNode = sdm.UNITARY_UNKNOWN_MANAGER;
364
365const size_t num_local_dofs = sdm.GetNumLocalDOFs(OneDofPerNode);
366const size_t num_globl_dofs = sdm.GetNumGlobalDOFs(OneDofPerNode);
367
368chi::log.Log() << "Num local DOFs: " << num_local_dofs;
369chi::log.Log() << "Num globl DOFs: " << num_globl_dofs;
370\endcode
371
372What we did here is to first define the shared-pointer of type
373`chi_math::SpatialDiscretization` to be just `SMDPtr` which greatly reduced
374the effort on the next line, where we actually instantiate the spatial discretization.
375Notice the use of `chi_math::SpatialDiscretization_FV::New`. All of the ChiTech
376spatial discretizations can only be created as shared pointers, the need for this
377will only become obvious when you write your own solvers.
378
379The creation of a spatial discretization involves a lot of steps. The first thing
380the spatial discretization (SD) will do is to create cell-mappings. These contain
381all the necessary information for each cell to help build codes, e.g., the number
382of nodes on a cell, its volume, face areas, etc. The second thing the SD does is
383to order the nodes in parallel in such a way that we can easily map the nodes either
384globally or locally.
385
386The basic SD operates on the notion of `nodes`. A node is a place where a specific
387component of an `unknown` is located. Since it knows the underlying node structure
388it can provide index mappings for different unknown-structures. For example, if we
389stack velocity-x, velocity-y and velocity-z onto a node the SD only needs to know
390the structure of the unknowns, traditionally called Degrees-of-Freedom or DOFs, in
391order to provide index-mapping. To keep things simple, all SDs have a unitary
392`chi_math::UnknownManager` as a member called `UNITARY_UNKNOWN_MANAGER`. This
393unknown manager allows us to map indices assuming only one DOF per node. We obtained
394a reference to this `chi_math::UnknownManager` object with the line
395\code
396const auto& OneDofPerNode = sdm.UNITARY_UNKNOWN_MANAGER;
397\endcode
398
399With this in-hand we can now query the SD to provide us the number of local- and
400global DOFs. The local DOFs are those that are stored on the local processor whereas
401the global DOFs encompass all the DOFs across all processors. The way we ran the
402code above is only in serial. Therefore when we do this again, with this new code
403added, we will see the following new output:
404\verbatim
405[0] Num local DOFs: 400
406[0] Num globl DOFs: 400
407\endverbatim
408Since the code runs in serial mode the number of local- and global DOFS are reported
409to be the same.
410
411To run this code in parallel we change our input, for execution with 3 processors,
412as
413\code
414mpiexec -np 3 ../bin/code_tut1 mesh.lua
415\endcode
416which now produces a different output:
417\verbatim
418[0] Num local DOFs: 131
419[0] Num globl DOFs: 400
420\endverbatim
421Notice the global number of DOFs remains the same. Only the number of local nodes
422has changed.
423
424\section CodeTut1Sec5 5 Creating and Initializing PETSc matrices and vectors
425ChiTech has several `macro`-type functions for handling PETSc objects. All of these
426are accessed via the `chi_math::PETScUtils` namespace for which we need to include
427the header
428\code
429#include "math/PETScUtils/petsc_utils.h"
430\endcode
431
432Next we add the following code:
433\code
434//============================================= Initializes Mats and Vecs
435const auto n = static_cast<int64_t>(num_local_dofs);
436const auto N = static_cast<int64_t>(num_globl_dofs);
437Mat A;
438Vec x,b;
439
440A = chi_math::PETScUtils::CreateSquareMatrix(n,N);
441x = chi_math::PETScUtils::CreateVector(n,N);
442b = chi_math::PETScUtils::CreateVector(n,N);
443
444std::vector<int64_t> nodal_nnz_in_diag;
445std::vector<int64_t> nodal_nnz_off_diag;
446sdm.BuildSparsityPattern(nodal_nnz_in_diag,nodal_nnz_off_diag,OneDofPerNode);
447
448chi_math::PETScUtils::InitMatrixSparsity(A,
449 nodal_nnz_in_diag,
450 nodal_nnz_off_diag);
451\endcode
452Notice that we made `int64_t` equivalents of `num_local_dofs` and its global
453counterpart. This is because PETSc operates on `int64_t`. The actual creation of
454`A`, `x`, and `b` is self explanatory noting that each needs a local-size as well
455as a global size.
456
457The next two pieces of code after the creation of the PETSc entities allow us to
458accurately and efficiently set the matrix sparsity pattern, in-turn allowing PETSc to
459assemble the sparse-matrix efficiently. The sparsity pattern is defined per row
460and is split into entries locally stored ("in" the diagonal block) and entries
461stored elsewhere ("off" the diagonal block). We can get this information from our
462ChiTech SDs by using the `BuildSparsityPattern` routine. This routine populates
463`nodal_nnz_in_diag` and `nodal_nnz_off_diag`, according to an unknown-structure
464(which in our case is `OneDofPerNode`).
465
466Finally we use our sparsity information to set the PETSc matrix's sparsity
467pattern using `chi_math::PETScUtils::InitMatrixSparsity`.
468
469
470
471
472
473
474\section CodeTut1Sec6 6 Assembling the matrix and right-hand-side
475The code to assemble the matrix is as follows.
476\code
477//============================================= Assemble the system
478chi::log.Log() << "Assembling system: ";
479for (const auto& cell : grid.local_cells)
480{
481 const auto& cell_mapping = sdm.GetCellMapping(cell);
482 const int64_t imap = sdm.MapDOF(cell,0);
483
484 const auto& xp = cell.centroid;
485 const double V = cell_mapping.CellVolume();
486
487 size_t f=0;
488 for (const auto& face : cell.faces)
489 {
490 const auto Af = face.normal * cell_mapping.FaceArea(f);
491
492 if (face.has_neighbor)
493 {
494 const auto& adj_cell = grid.cells[face.neighbor_id];
495 const int64_t jnmap = sdm.MapDOF(adj_cell,0);
496
497 const auto& xn = adj_cell.centroid;
498
499 const auto xpn = xn - xp;
500
501 const auto cf = Af.Dot(xpn) / xpn.NormSquare();
502
503 MatSetValue(A, imap, imap , cf, ADD_VALUES);
504 MatSetValue(A, imap, jnmap, -cf, ADD_VALUES);
505 }
506 else
507 {
508 const auto& xn = xp + 2.0*(face.centroid - xp);
509 const auto xpn = xn - xp;
510
511 const auto cf = Af.Dot(xpn) / xpn.NormSquare();
512
513 MatSetValue(A, imap, imap , cf, ADD_VALUES);
514 }
515 ++f;
516 }//for face
517
518 VecSetValue(b, imap, 1.0*V, ADD_VALUES);
519}//for cell i
520
521chi::log.Log() << "Global assembly";
522
523MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
524MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
525VecAssemblyBegin(b);
526VecAssemblyEnd(b);
527
528chi::log.Log() << "Done global assembly";
529\endcode
530The first item to note is the loop over local cells. ChiTech can only loop over local
531cells, for more information on this see \ref devman_meshes_sec_1.
532
533Once we have a reference to a local-cell we can now obtain the SD's mapping of that cell
534using
535\code
536const auto& cell_mapping = sdm.GetCellMapping(cell);
537\endcode
538We can also immediately map the cell's \f$ \phi_P \f$ global location in the
539parallel matrix/vector by making a call to
540`chi_math::SpatialDiscretization::MapDOF`.
541\code
542const int64_t imap = sdm.MapDOF(cell,0);
543\endcode
544The syntax of this function is #1 A cell reference, and #2 the node number which,
545for Finite Volume, will always be zero.
546
547Finally, before looping over the faces, we grab the cell centroid and volume.
548
549The loop over faces is split into two cases, one for internal faces and one for
550boundary faces. Both cases need the face area-vector there we construct that first
551\code
552for (const auto& face : cell.faces)
553{
554 const auto Af = face.normal * cell_mapping.FaceArea(f);
555 //etc.
556\endcode
557
558For internal faces, whether that face is internal, i.e., has a neighboring cell,
559is indicated by the member `has_neighbor`. Additionally the neighbor's global-id
560will be stored in the member `neighbor_id`. If the face does not have a neighbor
561then `neighbor_id` doubles as the boundary-id if boundary-ids are used. All cells
562that at minimum share a vertex with local cells are classified as `Ghost`-cells
563in ChiTech. And since a neighbor can be either a local- or ghost-cell we opt here
564to get a reference to the neighbor cell using its global-id for which we use
565`grid.cells`, instead of `grid.local_cells` which only uses local-ids.
566\code
567const auto& adj_cell = grid.cells[face.neighbor_id];
568\endcode
569Once we have a reference to the adjacent cell we can now map the address of its
570unknown \f$ \phi_N \f$ as
571\code
572const int64_t jnmap = sdm.MapDOF(adj_cell,0);
573\endcode
574We then compute
575\f{align*}{
576c_f = \biggr[
577\mathbf{A}_f \boldsymbol{\cdot}
578\frac{\mathbf{x}_{PN}}{||\mathbf{x}_{PN}||^2}
579\biggr]_f
580\f}
581with the code
582\code
583const auto& xn = adj_cell.centroid;
584
585const auto xpn = xn - xp;
586
587const auto cf = Af.Dot(xpn) / xpn.NormSquare();
588\endcode
589after that we set the matrix entries associated with \f$ \phi_P \f$ and
590\f$ \phi_N \f$ using the PETSc commands
591\code
592MatSetValue(A, imap, imap , cf, ADD_VALUES);
593MatSetValue(A, imap, jnmap, -cf, ADD_VALUES);
594\endcode
595
596
597
598
599For boundary faces, we essentially do the same, however, we do not have an adjacent
600cell or \f$ \mathbf{x}_N \f$. Therefore we extrapolate a virtual boundary cell-centroid
601using the face-centroid, \f$ \mathbf{x}_f \f$, as
602\f[
603\mathbf{x}_N = \mathbf{x}_P + 2 ( \mathbf{x}_f - \mathbf{x}_P )
604\f]
605and then
606\code
607const auto& xn = xp + 2.0*(face.centroid - xp);
608const auto xpn = xn - xp;
609
610const auto cf = Af.Dot(xpn) / xpn.NormSquare();
611\endcode
612
613Finally, since there is no neighbor cell entry, we only have a diagonal entry
614\code
615MatSetValue(A, imap, imap , cf, ADD_VALUES);
616\endcode
617
618After the face loops we then set the right-hand side using
619\code
620VecSetValue(b, imap, 1.0*V, ADD_VALUES);
621\endcode
622where the `1.0` represents \f$ q(\mathbf{x})=1 \f$.
623
624We finish parallel assembly with the following calls
625\code
626MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
627MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
628VecAssemblyBegin(b);
629VecAssemblyEnd(b);
630\endcode
631
632
633
634
635\section CodeTut1Sec7 7 Solving the system with a Krylov Subspace solver
636Most of the following code is self explanatory.
637\code
638//============================================= Create Krylov Solver
639chi::log.Log() << "Solving: ";
640auto petsc_solver =
641 chi_math::PETScUtils::CreateCommonKrylovSolverSetup(
642 A, //Matrix
643 "FVDiffSolver", //Solver name
644 KSPCG, //Solver type
645 PCGAMG, //Preconditioner type
646 1.0e-6, //Relative residual tolerance
647 1000); //Max iterations
648
649//============================================= Solve
650KSPSolve(petsc_solver.ksp,b,x);
651
652chi::log.Log() << "Done solving";
653\endcode
654The solver type here was specified as `KSPCG` which represents the conjugate-gradient
655method. The preconditioner `PCGAMG` represents Geometric and/or Algebraic MultiGrid.
656The combination of this solver and preconditioner is known to be thee most efficient
657ways to solve these type of systems.
658
659After this solve-step we want to get an STL representation of the PETSc vector so
660that we can think about visualizing the solution. We can get a local copy of the
661PETSc vector using the code
662\code
663//============================================= Extract PETSc vector
664std::vector<double> field(num_local_dofs, 0.0);
665sdm.LocalizePETScVector(x,field,OneDofPerNode);
666
667//============================================= Clean up
668KSPDestroy(&petsc_solver.ksp);
669
670VecDestroy(&x);
671VecDestroy(&b);
672MatDestroy(&A);
673
674chi::log.Log() << "Done cleanup";
675\endcode
676Here the SD took care of appropriately copying from the PETSc vector.
677
678After this is completed we have no need for any of the PETSc entities and therefore
679we destroy them.
680
681
682
683
684
685
686\section CodeTut1Sec8 8 Visualizing the solution
687ChiTech uses the notion of `FieldFunctions` and the visualization toolkit, `VTK`,
688to visual solutions. In order to gain access to `chi_physics::FieldFunction` we
689need to include the header
690\code
691#include "physics/FieldFunction/fieldfunction2.h"
692\endcode
693
694Next we create the field function using the code
695\code
696//============================================= Create Field Function
697auto ff = std::make_shared<chi_physics::FieldFunction>(
698 "Phi",
699 sdm_ptr,
700 chi_math::Unknown(chi_math::UnknownType::SCALAR)
701);
702\endcode
703Note here that a field function simply needs a name, a SD, and an unknown structure.
704
705After this is created we can update the field function with a field vector
706\code
707ff->UpdateFieldVector(field);
708\endcode
709
710And finally we can export the solution to VTK with
711\code
712ff->ExportToVTK("CodeTut1_FV");
713\endcode
714which takes, as an argument, the base-name of the VTK output files. VTK will
715utilize the parallel nature of the simulation to generate a set of files
716\verbatim
717CodeTut1_FV.pvtu
718CodeTut1_FV_0.vtu
719CodeTut1_FV_1.vtu
720CodeTut1_FV_2.vtu
721\endverbatim
722The structure here is that each processor writes a `.vtu` file. The processor-id
723is appended to the basename before the `.vtu` extension is added. These files contain
724the actual data. An additional proxy-file, used to link all the data together, is written
725as the base-name with `.pvtu` appended to it. This `.pvtu` file can be opened in popular
726visualization tools such as `Visit` and `Paraview` to visualize the solution.
727
728For this tutorial we used Paraview.
729
730\image html CodingTutorials/Solution_1D_2D_3D.png "[From left to right] 1D, 2D, 3D solutions with a coarse mesh" width=1200px
731
732And on a finer mesh, with the 3D case being 1 million cells:
733\image html CodingTutorials/Solution_1D_2D_3D_fine.png "[From left to right] 1D, 2D, 3D solutions with a fine mesh" width=1200px
734
735
736
737
738\section CodeTut1Sec9 The complete program
739
740\code
741#include "chi_runtime.h"
742#include "chi_log.h"
743
744#include "mesh/MeshHandler/chi_meshhandler.h"
745#include "mesh/MeshContinuum/chi_meshcontinuum.h"
746
747#include "math/SpatialDiscretization/FiniteVolume/fv.h"
748#include "math/PETScUtils/petsc_utils.h"
749
750#include "physics/FieldFunction/fieldfunction2.h"
751
752int main(int argc, char* argv[])
753{
754 chi::Initialize(argc,argv);
755 chi::RunBatch(argc, argv);
756
757 chi::log.Log() << "Coding Tutorial 1";
758
759 //============================================= Get grid
760 auto grid_ptr = chi_mesh::GetCurrentHandler().GetGrid();
761 const auto& grid = *grid_ptr;
762
763 chi::log.Log() << "Global num cells: " << grid.GetGlobalNumberOfCells();
764
765 //============================================= Make SDM
766 typedef std::shared_ptr<chi_math::SpatialDiscretization> SDMPtr;
767 SDMPtr sdm_ptr = chi_math::SpatialDiscretization_FV::New(grid_ptr);
768 const auto& sdm = *sdm_ptr;
769
770 const auto& OneDofPerNode = sdm.UNITARY_UNKNOWN_MANAGER;
771
772 const size_t num_local_dofs = sdm.GetNumLocalDOFs(OneDofPerNode);
773 const size_t num_globl_dofs = sdm.GetNumGlobalDOFs(OneDofPerNode);
774
775 chi::log.Log() << "Num local DOFs: " << num_local_dofs;
776 chi::log.Log() << "Num globl DOFs: " << num_globl_dofs;
777
778 //============================================= Initializes Mats and Vecs
779 const auto n = static_cast<int64_t>(num_local_dofs);
780 const auto N = static_cast<int64_t>(num_globl_dofs);
781 Mat A;
782 Vec x,b;
783
784 A = chi_math::PETScUtils::CreateSquareMatrix(n,N);
785 x = chi_math::PETScUtils::CreateVector(n,N);
786 b = chi_math::PETScUtils::CreateVector(n,N);
787
788 std::vector<int64_t> nodal_nnz_in_diag;
789 std::vector<int64_t> nodal_nnz_off_diag;
790 sdm.BuildSparsityPattern(nodal_nnz_in_diag,nodal_nnz_off_diag,OneDofPerNode);
791
792 chi_math::PETScUtils::InitMatrixSparsity(A,
793 nodal_nnz_in_diag,
794 nodal_nnz_off_diag);
795
796 //============================================= Assemble the system
797 chi::log.Log() << "Assembling system: ";
798 for (const auto& cell : grid.local_cells)
799 {
800 const auto& cell_mapping = sdm.GetCellMapping(cell);
801 const int64_t imap = sdm.MapDOF(cell,0);
802
803 const auto& xp = cell.centroid;
804 const double V = cell_mapping.CellVolume();
805
806 size_t f=0;
807 for (const auto& face : cell.faces)
808 {
809 const auto Af = face.normal * cell_mapping.FaceArea(f);
810
811 if (face.has_neighbor)
812 {
813 const auto& adj_cell = grid.cells[face.neighbor_id];
814 const int64_t jnmap = sdm.MapDOF(adj_cell,0);
815
816 const auto& xn = adj_cell.centroid;
817
818 const auto xpn = xn - xp;
819
820 const auto cf = Af.Dot(xpn) / xpn.NormSquare();
821
822 MatSetValue(A, imap, imap , cf, ADD_VALUES);
823 MatSetValue(A, imap, jnmap, -cf, ADD_VALUES);
824 }
825 else
826 {
827 const auto& xn = xp + 2.0*(face.centroid - xp);
828 const auto xpn = xn - xp;
829
830 const auto cf = Af.Dot(xpn) / xpn.NormSquare();
831
832 MatSetValue(A, imap, imap , cf, ADD_VALUES);
833 }
834 ++f;
835 }//for face
836
837 VecSetValue(b, imap, 1.0*V, ADD_VALUES);
838 }//for cell i
839
840 chi::log.Log() << "Global assembly";
841
842 MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
843 MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
844 VecAssemblyBegin(b);
845 VecAssemblyEnd(b);
846
847 chi::log.Log() << "Done global assembly";
848
849 //============================================= Create Krylov Solver
850 chi::log.Log() << "Solving: ";
851 auto petsc_solver =
852 chi_math::PETScUtils::CreateCommonKrylovSolverSetup(
853 A, //Matrix
854 "FVDiffSolver", //Solver name
855 KSPCG, //Solver type
856 PCGAMG, //Preconditioner type
857 1.0e-6, //Relative residual tolerance
858 1000); //Max iterations
859
860 //============================================= Solve
861 KSPSolve(petsc_solver.ksp,b,x);
862
863 chi::log.Log() << "Done solving";
864
865 //============================================= Extract PETSc vector
866 std::vector<double> field(num_local_dofs, 0.0);
867 sdm.LocalizePETScVector(x,field,OneDofPerNode);
868
869 //============================================= Clean up
870 KSPDestroy(&petsc_solver.ksp);
871
872 VecDestroy(&x);
873 VecDestroy(&b);
874 MatDestroy(&A);
875
876 chi::log.Log() << "Done cleanup";
877
878 //============================================= Create Field Function
879 auto ff = std::make_shared<chi_physics::FieldFunction>(
880 "Phi",
881 sdm_ptr,
882 chi_math::Unknown(chi_math::UnknownType::SCALAR)
883 );
884
885 ff->UpdateFieldVector(field);
886
887 ff->ExportToVTK("CodeTut1_FV");
888
889 chi::Finalize();
890 return 0;
891}
892\endcode
893*/