Chi-Tech
CodeTut1.h
Go to the documentation of this file.
1
/** \page CodeTut1 Coding Tutorial 1 - Poisson's problem with Finite Volume
2
3
## Table of contents
4
- \ref CodeTut1Sec1
5
- \ref CodeTut1Sec1_1
6
- \ref CodeTut1Sec1_2
7
- \ref CodeTut1Sec2
8
- \ref CodeTut1Sec3
9
- \ref CodeTut1Sec4
10
- \ref CodeTut1Sec5
11
- \ref CodeTut1Sec6
12
- \ref CodeTut1Sec7
13
- \ref CodeTut1Sec8
14
- \ref CodeTut1Sec9
15
16
\section CodeTut1Sec1 1 Poisson's equation
17
<a href="https://en.wikipedia.org/wiki/Poisson%27s_equation">The Poisson's equation</a> states the following, for
18
\f$ \phi, q \in \mathcal{R} \f$,
19
\f{eqnarray*}{
20
-\boldsymbol{\nabla} \cdot \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr) = q(\mathbf{x}), \quad \quad \quad (\mathbf{x})\in\mathcal{D} \\
21
\phi(\mathbf{x}) = 0, \quad \quad \quad \mathbf{x}\in \partial \mathcal{D}
22
\f}
23
where \f$ \boldsymbol{\nabla} \cdot \bigr( \bigr) \f$ denotes the divergence-operator
24
and \f$ \boldsymbol{\nabla} \f$ denotes the gradient-operator, i.e.
25
\f$ \boldsymbol{\nabla} \phi \f$ denotes the gradient of \f$ \phi \f$. The
26
boundary conditions here state that \f$ \phi=0 \f$ on the boundary.
27
28
29
30
31
\subsection CodeTut1Sec1_1 1.1 Our specific problem
32
For our specific problem we will choose \f$ q(\mathbf{x})=1 \f$ and \f$ \mathcal{D} \f$ a cartesian domain,
33
either 1D, 2D or 3D, with each dimension always between \f$ -1,+1 \f$. We can generate the mesh for this
34
problem using an input file
35
\code
36
--############################################### Setup mesh
37
chiMeshHandlerCreate()
38
39
mesh={}
40
N=20
41
L=2
42
xmin = -L/2
43
dx = L/N
44
for i=1,(N+1) do
45
k=i-1
46
mesh[i] = xmin + k*dx
47
end
48
49
--chiMeshCreateUnpartitioned3DOrthoMesh(mesh,mesh,mesh)
50
chiMeshCreateUnpartitioned2DOrthoMesh(mesh,mesh)
51
--chiMeshCreateUnpartitioned1DOrthoMesh(mesh)
52
chiVolumeMesherExecute();
53
54
--############################################### Set Material IDs
55
chiVolumeMesherSetMatIDToAll(0)
56
\endcode
57
This code can be used to generate any of the following meshes,
58
\image html CodingTutorials/OrthoMesh_1D_2D_3D.png "[From left to right] 1D, 2D, 3D orthogonal mesh" width=1200px
59
60
61
62
63
\subsection CodeTut1Sec1_2 1.2 The Finite Volume spatial discretization
64
To apply the Finite Volume spatial discretization we integrate Poisson's equation
65
over the volume of the cell
66
\f{eqnarray*}{
67
-\int_V \boldsymbol{\nabla} \cdot \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr) dV
68
=
69
\int_V q(\mathbf{x}) dV
70
\f}
71
Next we apply the approximation that both \f$ \phi \f$ and \f$ q \f$ are constant
72
over a cell. Therefore, for cell \f$ P \f$, we have the source term
73
\f{eqnarray*}{
74
\int_V q(\mathbf{x}) dV = q_P V_P.
75
\f}
76
For the left-hand side we first apply Gauss Divergence Theorem,
77
\f{eqnarray*}{
78
\int_V \boldsymbol{\nabla} \cdot \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr) dV =
79
\int_S \mathbf{n} \cdot \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr) dA
80
\f}
81
and for a discrete cell this surface integral can be discretized as the sum over all
82
the faces
83
\f{eqnarray*}{
84
\int_S \mathbf{n} \cdot \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr) dA =
85
\sum_f A_f \mathbf{n}_f \cdot \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr)_f.
86
\f}
87
We now still have to resolve \f$ \bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr)_f \f$.
88
To comprehend the approximation we are about to make, consider the figure below.
89
We observe a cell \f$ P \f$, the present cell, and its neighbor, cell \f$ N \f$, at face \f$ f \f$.
90
The cell centroids are \f$ \mathbf{x}_P \f$ and \f$ \mathbf{x}_N \f$ respectively.
91
92
\image html CodingTutorials/NeighborXPN.png width=250px
93
94
We next form \f$ \mathbf{x}_{PN} \f$, as in the diagram, after which we approximate
95
the gradient of \f$ \phi \f$ at face \f$ f \f$ as
96
\f{eqnarray*}{
97
\bigr(\boldsymbol{\nabla} \phi(\mathbf{x})\bigr)_f =
98
\biggr(
99
\frac{\phi_N - \phi_P}{||\mathbf{x}_{PN}||}
100
\biggr)
101
\ \frac{\mathbf{x}_{PN}}{||\mathbf{x}_{PN}||}.
102
\f}
103
Note that this is essentially a scalar component
104
\f$ \frac{df}{ds} = \frac{\phi_N - \phi_P}{||\mathbf{x}_{PN}||}\f$ after which we give it a direction
105
by normalizing \f$ \mathbf{x}_{PN} \f$ as \f$ \frac{\mathbf{x}_{PN}}{||\mathbf{x}_{PN}||} \f$.
106
107
Our discretized equations now become
108
109
\f{align*}{
110
\sum_f
111
c_f
112
\phi_P
113
-
114
\sum_f
115
c_f
116
\phi_N
117
&=
118
q_P V_P
119
\\
120
c_f &= \biggr[
121
\mathbf{A}_f \boldsymbol{\cdot}
122
\frac{\mathbf{x}_{PN}}{||\mathbf{x}_{PN}||^2}
123
\biggr]_f
124
\f}
125
where \f$ \mathbf{A}_f \f$ is the area-vector of the face, i.e. \f$ A_f \mathbf{n}_f \f$ and
126
\f$ \phi_P=0 \f$ when face \f$ f \f$ is on a boundary (enforcing the
127
Dirichlet boundary condition).
128
129
130
131
132
133
\section CodeTut1Sec2 2 Setting up the problem
134
In order to implement the code, we must first follow the steps in \ref DevManUsingLib.
135
Before proceeding, make sure you are fully acquinted with this step.
136
137
For this tutorial we will be deviation from the library tutorial by first
138
changing the cmake target and project name. Therefore, we pick a project folder
139
and create the `CMakeLists.txt` file but we change the following lines.
140
\code
141
set(TARGET code_tut1)
142
project(CodeTut1 CXX)
143
\endcode
144
145
So now the entire `CMakeLists.txt` should look like this
146
\code
147
cmake_minimum_required(VERSION 3.12)
148
149
set(TARGET code_tut1)
150
project(CodeTut1 CXX)
151
152
#------------------------------------------------ DEPENDENCIES
153
if (NOT DEFINED CHI_TECH_DIR)
154
if (NOT (DEFINED ENV{CHI_TECH_DIR}))
155
message(FATAL_ERROR "***** CHI_TECH_DIR is not set *****")
156
else()
157
set(CHI_TECH_DIR "$ENV{CHI_TECH_DIR}")
158
endif()
159
endif()
160
message(STATUS "CHI_TECH_DIR set to ${CHI_TECH_DIR}")
161
162
include("${CHI_TECH_DIR}/resources/Macros/Downstream.cmake")
163
164
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_SOURCE_DIR}/lib")
165
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${PROJECT_SOURCE_DIR}/lib")
166
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${PROJECT_SOURCE_DIR}/bin")
167
168
file (GLOB_RECURSE SOURCES "*.cc")
169
add_executable(${TARGET} "${SOURCES}")
170
target_link_libraries(${TARGET} ${CHI_LIBS})
171
172
file(WRITE ${PROJECT_SOURCE_DIR}/Makefile "subsystem:\n" "\t$(MAKE) -C chi_build \n\n"
173
"clean:\n\t$(MAKE) -C chi_build clean\n")
174
\endcode
175
176
Next we create the source file which we will call `code_tut1.cc`, and for now
177
we will keep its contents the same as the library tutorial. Therefore `code_tut1.cc`
178
should look like this
179
\code
180
#include "chi_runtime.h"
181
#include "chi_log.h"
182
183
int main(int argc, char* argv[])
184
{
185
chi::Initialize(argc,argv);
186
chi::RunBatch(argc, argv);
187
188
chi::log.Log() << "Hello World!";
189
190
//We will add code here
191
192
chi::Finalize();
193
return 0;
194
}
195
\endcode
196
197
To compile this program we first have to set the `CHI_TECH_DIR` environment
198
variable. This might be different for everyone but should generally look like this
199
\code
200
export CHI_TECH_DIR=/Users/drjanisawesome/codes/ChiTech/chi-tech/
201
\endcode
202
Note: the directory we want to specify here must contain `bin/` (in otherwords)
203
it shouldn't be `bin/` itself).
204
205
Next we create a directory for running `cmake`. Inside your project folder, create
206
a folder called `chi_build` and `cd` into it.
207
\code
208
mkdir chi_build
209
cd chi_build
210
\endcode
211
212
Then run `cmake` pointing to the folder that contains the `CMakeLists.txt` file,
213
which in this case is the parent folder of the one we are in.
214
\code
215
cmake ../
216
\endcode
217
218
After this you should be able to make the project.
219
\code
220
make
221
\endcode
222
after which you should see the following
223
\verbatim
224
[ 50%] Building CXX object CMakeFiles/code_tut1.dir/code_tut1.cc.o
225
[100%] Linking CXX executable ../bin/code_tut1
226
[100%] Built target code_tut1
227
\endverbatim
228
229
As a test you can try to execute the project with no arguments which should look
230
like this
231
\verbatim
232
../bin/code_tut1
233
[0] 2022-11-14 13:02:43 Running ChiTech in batch-mode with 1 processes.
234
[0] ChiTech version 1.0.2
235
[0] ChiTech number of arguments supplied: 0
236
[0]
237
[0] Usage: exe inputfile [options values]
238
[0]
239
[0] -v Level of verbosity. Default 0. Can be either 0, 1 or 2.
240
[0] a=b Executes argument as a lua string. i.e. x=2 or y=[["string"]]
241
[0] -allow_petsc_error_handler Allow petsc error handler.
242
[0]
243
[0]
244
[0]
245
[0] Final program time 00:00:00
246
[0] 2022-11-14 13:02:43 ChiTech finished execution of
247
[0] Hello World!
248
\endverbatim
249
250
\section CodeTut1Sec3 3 Getting the grid
251
First we remove the lines
252
\code
253
chi::log.Log() << "Hello World!";
254
255
//We will add code here
256
\endcode
257
258
Next we include the headers for access to the `chi_mesh::MeshHandler` and the
259
grid, aka `chi_mesh::MeshContinuum`
260
\code
261
#include "mesh/MeshHandler/chi_meshhandler.h"
262
#include "mesh/MeshContinuum/chi_meshcontinuum.h"
263
\endcode
264
265
Next we add the following lines of code:
266
\code
267
chi::log.Log() << "Coding Tutorial 1";
268
269
//============================================= Get grid
270
auto grid_ptr = chi_mesh::GetCurrentHandler().GetGrid();
271
const auto& grid = *grid_ptr;
272
273
chi::log.Log() << "Global num cells: " << grid.GetGlobalNumberOfCells();
274
\endcode
275
This is the default way of obtaining a grid loaded into the ChiTech state at any
276
moment, normally via an input file.
277
278
When you compile and run this code it ought to fail by throwing an exception
279
that looks like this
280
\verbatim
281
terminate called after throwing an instance of 'std::logic_error'
282
what(): chi_mesh::GetCurrentHandler: No handlers on stack
283
Abort trap: 6
284
\endverbatim
285
This means the code was not loaded with a mesh-handler and therefore no grid is
286
defined. To fix this we first create a `mesh.lua` input file within the `chi_build`
287
directory.
288
\code
289
>mesh.lua
290
\endcode
291
Use an editor to add the following lines to `mesh.lua`
292
\code
293
--############################################### Setup mesh
294
chiMeshHandlerCreate()
295
296
mesh={}
297
N=20
298
L=2
299
xmin = -L/2
300
dx = L/N
301
for i=1,(N+1) do
302
k=i-1
303
mesh[i] = xmin + k*dx
304
end
305
306
--chiMeshCreateUnpartitioned3DOrthoMesh(mesh,mesh,mesh)
307
chiMeshCreateUnpartitioned2DOrthoMesh(mesh,mesh)
308
--chiMeshCreateUnpartitioned1DOrthoMesh(mesh)
309
chiVolumeMesherExecute();
310
311
--############################################### Set Material IDs
312
chiVolumeMesherSetMatIDToAll(0)
313
\endcode
314
This is known as ChiTech lua-console input file. It contains lua code.
315
316
To run the program with this input file we simply add it to the command line
317
\code
318
../bin/code_tut1 mesh.lua
319
\endcode
320
321
If everything went well then you should see the following
322
\verbatim
323
[0] Parsing argument 1 mesh.lua
324
[0] 2022-11-14 13:14:52 Running ChiTech in batch-mode with 1 processes.
325
[0] ChiTech version 1.0.2
326
[0] ChiTech number of arguments supplied: 1
327
[0] Computing cell-centroids.
328
[0] Done computing cell-centroids.
329
[0] Checking cell-center-to-face orientations
330
[0] Done checking cell-center-to-face orientations
331
[0] 00:00:00 Establishing cell connectivity.
332
[0] 00:00:00 Vertex cell subscriptions complete.
333
[0] 00:00:00 Surpassing cell 40 of 400 (10%)
334
[0] 00:00:00 Surpassing cell 80 of 400 (20%)
335
\\ STUFF
336
\\ STUFF
337
\\ STUFF
338
[0]
339
[0] Final program time 00:00:00
340
[0] 2022-11-14 13:14:52 ChiTech finished execution of mesh.lua
341
[0] Coding Tutorial 1
342
[0] Global num cells: 400
343
\endverbatim
344
345
\section CodeTut1Sec4 4 Initializing the Finite Volume Spatial Discretization
346
All spatial discretizations in ChiTech are based on a grid. Right now we want
347
`chi_math::SpatialDiscretization_FV` which is the Finite Volume Spatial discretization.
348
It will place only a single node at the centroid of each cell.
349
350
To access this discretization we must first include the header for
351
`chi_math::SpatialDiscretization_FV`,
352
\code
353
#include "math/SpatialDiscretization/FiniteVolume/fv.h"
354
\endcode
355
356
Next we add the following lines of code
357
\code
358
//============================================= Make SDM
359
typedef std::shared_ptr<chi_math::SpatialDiscretization> SDMPtr;
360
SDMPtr sdm_ptr = chi_math::SpatialDiscretization_FV::New(grid_ptr);
361
const auto& sdm = *sdm_ptr;
362
363
const auto& OneDofPerNode = sdm.UNITARY_UNKNOWN_MANAGER;
364
365
const size_t num_local_dofs = sdm.GetNumLocalDOFs(OneDofPerNode);
366
const size_t num_globl_dofs = sdm.GetNumGlobalDOFs(OneDofPerNode);
367
368
chi::log.Log() << "Num local DOFs: " << num_local_dofs;
369
chi::log.Log() << "Num globl DOFs: " << num_globl_dofs;
370
\endcode
371
372
What we did here is to first define the shared-pointer of type
373
`chi_math::SpatialDiscretization` to be just `SMDPtr` which greatly reduced
374
the effort on the next line, where we actually instantiate the spatial discretization.
375
Notice the use of `chi_math::SpatialDiscretization_FV::New`. All of the ChiTech
376
spatial discretizations can only be created as shared pointers, the need for this
377
will only become obvious when you write your own solvers.
378
379
The creation of a spatial discretization involves a lot of steps. The first thing
380
the spatial discretization (SD) will do is to create cell-mappings. These contain
381
all the necessary information for each cell to help build codes, e.g., the number
382
of nodes on a cell, its volume, face areas, etc. The second thing the SD does is
383
to order the nodes in parallel in such a way that we can easily map the nodes either
384
globally or locally.
385
386
The basic SD operates on the notion of `nodes`. A node is a place where a specific
387
component of an `unknown` is located. Since it knows the underlying node structure
388
it can provide index mappings for different unknown-structures. For example, if we
389
stack velocity-x, velocity-y and velocity-z onto a node the SD only needs to know
390
the structure of the unknowns, traditionally called Degrees-of-Freedom or DOFs, in
391
order to provide index-mapping. To keep things simple, all SDs have a unitary
392
`chi_math::UnknownManager` as a member called `UNITARY_UNKNOWN_MANAGER`. This
393
unknown manager allows us to map indices assuming only one DOF per node. We obtained
394
a reference to this `chi_math::UnknownManager` object with the line
395
\code
396
const auto& OneDofPerNode = sdm.UNITARY_UNKNOWN_MANAGER;
397
\endcode
398
399
With this in-hand we can now query the SD to provide us the number of local- and
400
global DOFs. The local DOFs are those that are stored on the local processor whereas
401
the global DOFs encompass all the DOFs across all processors. The way we ran the
402
code above is only in serial. Therefore when we do this again, with this new code
403
added, we will see the following new output:
404
\verbatim
405
[0] Num local DOFs: 400
406
[0] Num globl DOFs: 400
407
\endverbatim
408
Since the code runs in serial mode the number of local- and global DOFS are reported
409
to be the same.
410
411
To run this code in parallel we change our input, for execution with 3 processors,
412
as
413
\code
414
mpiexec -np 3 ../bin/code_tut1 mesh.lua
415
\endcode
416
which now produces a different output:
417
\verbatim
418
[0] Num local DOFs: 131
419
[0] Num globl DOFs: 400
420
\endverbatim
421
Notice the global number of DOFs remains the same. Only the number of local nodes
422
has changed.
423
424
\section CodeTut1Sec5 5 Creating and Initializing PETSc matrices and vectors
425
ChiTech has several `macro`-type functions for handling PETSc objects. All of these
426
are accessed via the `chi_math::PETScUtils` namespace for which we need to include
427
the header
428
\code
429
#include "math/PETScUtils/petsc_utils.h"
430
\endcode
431
432
Next we add the following code:
433
\code
434
//============================================= Initializes Mats and Vecs
435
const auto n = static_cast<int64_t>(num_local_dofs);
436
const auto N = static_cast<int64_t>(num_globl_dofs);
437
Mat A;
438
Vec x,b;
439
440
A = chi_math::PETScUtils::CreateSquareMatrix(n,N);
441
x = chi_math::PETScUtils::CreateVector(n,N);
442
b = chi_math::PETScUtils::CreateVector(n,N);
443
444
std::vector<int64_t> nodal_nnz_in_diag;
445
std::vector<int64_t> nodal_nnz_off_diag;
446
sdm.BuildSparsityPattern(nodal_nnz_in_diag,nodal_nnz_off_diag,OneDofPerNode);
447
448
chi_math::PETScUtils::InitMatrixSparsity(A,
449
nodal_nnz_in_diag,
450
nodal_nnz_off_diag);
451
\endcode
452
Notice that we made `int64_t` equivalents of `num_local_dofs` and its global
453
counterpart. This is because PETSc operates on `int64_t`. The actual creation of
454
`A`, `x`, and `b` is self explanatory noting that each needs a local-size as well
455
as a global size.
456
457
The next two pieces of code after the creation of the PETSc entities allow us to
458
accurately and efficiently set the matrix sparsity pattern, in-turn allowing PETSc to
459
assemble the sparse-matrix efficiently. The sparsity pattern is defined per row
460
and is split into entries locally stored ("in" the diagonal block) and entries
461
stored elsewhere ("off" the diagonal block). We can get this information from our
462
ChiTech SDs by using the `BuildSparsityPattern` routine. This routine populates
463
`nodal_nnz_in_diag` and `nodal_nnz_off_diag`, according to an unknown-structure
464
(which in our case is `OneDofPerNode`).
465
466
Finally we use our sparsity information to set the PETSc matrix's sparsity
467
pattern using `chi_math::PETScUtils::InitMatrixSparsity`.
468
469
470
471
472
473
474
\section CodeTut1Sec6 6 Assembling the matrix and right-hand-side
475
The code to assemble the matrix is as follows.
476
\code
477
//============================================= Assemble the system
478
chi::log.Log() << "Assembling system: ";
479
for (const auto& cell : grid.local_cells)
480
{
481
const auto& cell_mapping = sdm.GetCellMapping(cell);
482
const int64_t imap = sdm.MapDOF(cell,0);
483
484
const auto& xp = cell.centroid;
485
const double V = cell_mapping.CellVolume();
486
487
size_t f=0;
488
for (const auto& face : cell.faces)
489
{
490
const auto Af = face.normal * cell_mapping.FaceArea(f);
491
492
if (face.has_neighbor)
493
{
494
const auto& adj_cell = grid.cells[face.neighbor_id];
495
const int64_t jnmap = sdm.MapDOF(adj_cell,0);
496
497
const auto& xn = adj_cell.centroid;
498
499
const auto xpn = xn - xp;
500
501
const auto cf = Af.Dot(xpn) / xpn.NormSquare();
502
503
MatSetValue(A, imap, imap , cf, ADD_VALUES);
504
MatSetValue(A, imap, jnmap, -cf, ADD_VALUES);
505
}
506
else
507
{
508
const auto& xn = xp + 2.0*(face.centroid - xp);
509
const auto xpn = xn - xp;
510
511
const auto cf = Af.Dot(xpn) / xpn.NormSquare();
512
513
MatSetValue(A, imap, imap , cf, ADD_VALUES);
514
}
515
++f;
516
}//for face
517
518
VecSetValue(b, imap, 1.0*V, ADD_VALUES);
519
}//for cell i
520
521
chi::log.Log() << "Global assembly";
522
523
MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
524
MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
525
VecAssemblyBegin(b);
526
VecAssemblyEnd(b);
527
528
chi::log.Log() << "Done global assembly";
529
\endcode
530
The first item to note is the loop over local cells. ChiTech can only loop over local
531
cells, for more information on this see \ref devman_meshes_sec_1.
532
533
Once we have a reference to a local-cell we can now obtain the SD's mapping of that cell
534
using
535
\code
536
const auto& cell_mapping = sdm.GetCellMapping(cell);
537
\endcode
538
We can also immediately map the cell's \f$ \phi_P \f$ global location in the
539
parallel matrix/vector by making a call to
540
`chi_math::SpatialDiscretization::MapDOF`.
541
\code
542
const int64_t imap = sdm.MapDOF(cell,0);
543
\endcode
544
The syntax of this function is #1 A cell reference, and #2 the node number which,
545
for Finite Volume, will always be zero.
546
547
Finally, before looping over the faces, we grab the cell centroid and volume.
548
549
The loop over faces is split into two cases, one for internal faces and one for
550
boundary faces. Both cases need the face area-vector there we construct that first
551
\code
552
for (const auto& face : cell.faces)
553
{
554
const auto Af = face.normal * cell_mapping.FaceArea(f);
555
//etc.
556
\endcode
557
558
For internal faces, whether that face is internal, i.e., has a neighboring cell,
559
is indicated by the member `has_neighbor`. Additionally the neighbor's global-id
560
will be stored in the member `neighbor_id`. If the face does not have a neighbor
561
then `neighbor_id` doubles as the boundary-id if boundary-ids are used. All cells
562
that at minimum share a vertex with local cells are classified as `Ghost`-cells
563
in ChiTech. And since a neighbor can be either a local- or ghost-cell we opt here
564
to get a reference to the neighbor cell using its global-id for which we use
565
`grid.cells`, instead of `grid.local_cells` which only uses local-ids.
566
\code
567
const auto& adj_cell = grid.cells[face.neighbor_id];
568
\endcode
569
Once we have a reference to the adjacent cell we can now map the address of its
570
unknown \f$ \phi_N \f$ as
571
\code
572
const int64_t jnmap = sdm.MapDOF(adj_cell,0);
573
\endcode
574
We then compute
575
\f{align*}{
576
c_f = \biggr[
577
\mathbf{A}_f \boldsymbol{\cdot}
578
\frac{\mathbf{x}_{PN}}{||\mathbf{x}_{PN}||^2}
579
\biggr]_f
580
\f}
581
with the code
582
\code
583
const auto& xn = adj_cell.centroid;
584
585
const auto xpn = xn - xp;
586
587
const auto cf = Af.Dot(xpn) / xpn.NormSquare();
588
\endcode
589
after that we set the matrix entries associated with \f$ \phi_P \f$ and
590
\f$ \phi_N \f$ using the PETSc commands
591
\code
592
MatSetValue(A, imap, imap , cf, ADD_VALUES);
593
MatSetValue(A, imap, jnmap, -cf, ADD_VALUES);
594
\endcode
595
596
597
598
599
For boundary faces, we essentially do the same, however, we do not have an adjacent
600
cell or \f$ \mathbf{x}_N \f$. Therefore we extrapolate a virtual boundary cell-centroid
601
using the face-centroid, \f$ \mathbf{x}_f \f$, as
602
\f[
603
\mathbf{x}_N = \mathbf{x}_P + 2 ( \mathbf{x}_f - \mathbf{x}_P )
604
\f]
605
and then
606
\code
607
const auto& xn = xp + 2.0*(face.centroid - xp);
608
const auto xpn = xn - xp;
609
610
const auto cf = Af.Dot(xpn) / xpn.NormSquare();
611
\endcode
612
613
Finally, since there is no neighbor cell entry, we only have a diagonal entry
614
\code
615
MatSetValue(A, imap, imap , cf, ADD_VALUES);
616
\endcode
617
618
After the face loops we then set the right-hand side using
619
\code
620
VecSetValue(b, imap, 1.0*V, ADD_VALUES);
621
\endcode
622
where the `1.0` represents \f$ q(\mathbf{x})=1 \f$.
623
624
We finish parallel assembly with the following calls
625
\code
626
MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
627
MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
628
VecAssemblyBegin(b);
629
VecAssemblyEnd(b);
630
\endcode
631
632
633
634
635
\section CodeTut1Sec7 7 Solving the system with a Krylov Subspace solver
636
Most of the following code is self explanatory.
637
\code
638
//============================================= Create Krylov Solver
639
chi::log.Log() << "Solving: ";
640
auto petsc_solver =
641
chi_math::PETScUtils::CreateCommonKrylovSolverSetup(
642
A, //Matrix
643
"FVDiffSolver", //Solver name
644
KSPCG, //Solver type
645
PCGAMG, //Preconditioner type
646
1.0e-6, //Relative residual tolerance
647
1000); //Max iterations
648
649
//============================================= Solve
650
KSPSolve(petsc_solver.ksp,b,x);
651
652
chi::log.Log() << "Done solving";
653
\endcode
654
The solver type here was specified as `KSPCG` which represents the conjugate-gradient
655
method. The preconditioner `PCGAMG` represents Geometric and/or Algebraic MultiGrid.
656
The combination of this solver and preconditioner is known to be thee most efficient
657
ways to solve these type of systems.
658
659
After this solve-step we want to get an STL representation of the PETSc vector so
660
that we can think about visualizing the solution. We can get a local copy of the
661
PETSc vector using the code
662
\code
663
//============================================= Extract PETSc vector
664
std::vector<double> field(num_local_dofs, 0.0);
665
sdm.LocalizePETScVector(x,field,OneDofPerNode);
666
667
//============================================= Clean up
668
KSPDestroy(&petsc_solver.ksp);
669
670
VecDestroy(&x);
671
VecDestroy(&b);
672
MatDestroy(&A);
673
674
chi::log.Log() << "Done cleanup";
675
\endcode
676
Here the SD took care of appropriately copying from the PETSc vector.
677
678
After this is completed we have no need for any of the PETSc entities and therefore
679
we destroy them.
680
681
682
683
684
685
686
\section CodeTut1Sec8 8 Visualizing the solution
687
ChiTech uses the notion of `FieldFunctions` and the visualization toolkit, `VTK`,
688
to visual solutions. In order to gain access to `chi_physics::FieldFunction` we
689
need to include the header
690
\code
691
#include "physics/FieldFunction/fieldfunction2.h"
692
\endcode
693
694
Next we create the field function using the code
695
\code
696
//============================================= Create Field Function
697
auto ff = std::make_shared<chi_physics::FieldFunction>(
698
"Phi",
699
sdm_ptr,
700
chi_math::Unknown(chi_math::UnknownType::SCALAR)
701
);
702
\endcode
703
Note here that a field function simply needs a name, a SD, and an unknown structure.
704
705
After this is created we can update the field function with a field vector
706
\code
707
ff->UpdateFieldVector(field);
708
\endcode
709
710
And finally we can export the solution to VTK with
711
\code
712
ff->ExportToVTK("CodeTut1_FV");
713
\endcode
714
which takes, as an argument, the base-name of the VTK output files. VTK will
715
utilize the parallel nature of the simulation to generate a set of files
716
\verbatim
717
CodeTut1_FV.pvtu
718
CodeTut1_FV_0.vtu
719
CodeTut1_FV_1.vtu
720
CodeTut1_FV_2.vtu
721
\endverbatim
722
The structure here is that each processor writes a `.vtu` file. The processor-id
723
is appended to the basename before the `.vtu` extension is added. These files contain
724
the actual data. An additional proxy-file, used to link all the data together, is written
725
as the base-name with `.pvtu` appended to it. This `.pvtu` file can be opened in popular
726
visualization tools such as `Visit` and `Paraview` to visualize the solution.
727
728
For this tutorial we used Paraview.
729
730
\image html CodingTutorials/Solution_1D_2D_3D.png "[From left to right] 1D, 2D, 3D solutions with a coarse mesh" width=1200px
731
732
And on a finer mesh, with the 3D case being 1 million cells:
733
\image html CodingTutorials/Solution_1D_2D_3D_fine.png "[From left to right] 1D, 2D, 3D solutions with a fine mesh" width=1200px
734
735
736
737
738
\section CodeTut1Sec9 The complete program
739
740
\code
741
#include "chi_runtime.h"
742
#include "chi_log.h"
743
744
#include "mesh/MeshHandler/chi_meshhandler.h"
745
#include "mesh/MeshContinuum/chi_meshcontinuum.h"
746
747
#include "math/SpatialDiscretization/FiniteVolume/fv.h"
748
#include "math/PETScUtils/petsc_utils.h"
749
750
#include "physics/FieldFunction/fieldfunction2.h"
751
752
int main(int argc, char* argv[])
753
{
754
chi::Initialize(argc,argv);
755
chi::RunBatch(argc, argv);
756
757
chi::log.Log() << "Coding Tutorial 1";
758
759
//============================================= Get grid
760
auto grid_ptr = chi_mesh::GetCurrentHandler().GetGrid();
761
const auto& grid = *grid_ptr;
762
763
chi::log.Log() << "Global num cells: " << grid.GetGlobalNumberOfCells();
764
765
//============================================= Make SDM
766
typedef std::shared_ptr<chi_math::SpatialDiscretization> SDMPtr;
767
SDMPtr sdm_ptr = chi_math::SpatialDiscretization_FV::New(grid_ptr);
768
const auto& sdm = *sdm_ptr;
769
770
const auto& OneDofPerNode = sdm.UNITARY_UNKNOWN_MANAGER;
771
772
const size_t num_local_dofs = sdm.GetNumLocalDOFs(OneDofPerNode);
773
const size_t num_globl_dofs = sdm.GetNumGlobalDOFs(OneDofPerNode);
774
775
chi::log.Log() << "Num local DOFs: " << num_local_dofs;
776
chi::log.Log() << "Num globl DOFs: " << num_globl_dofs;
777
778
//============================================= Initializes Mats and Vecs
779
const auto n = static_cast<int64_t>(num_local_dofs);
780
const auto N = static_cast<int64_t>(num_globl_dofs);
781
Mat A;
782
Vec x,b;
783
784
A = chi_math::PETScUtils::CreateSquareMatrix(n,N);
785
x = chi_math::PETScUtils::CreateVector(n,N);
786
b = chi_math::PETScUtils::CreateVector(n,N);
787
788
std::vector<int64_t> nodal_nnz_in_diag;
789
std::vector<int64_t> nodal_nnz_off_diag;
790
sdm.BuildSparsityPattern(nodal_nnz_in_diag,nodal_nnz_off_diag,OneDofPerNode);
791
792
chi_math::PETScUtils::InitMatrixSparsity(A,
793
nodal_nnz_in_diag,
794
nodal_nnz_off_diag);
795
796
//============================================= Assemble the system
797
chi::log.Log() << "Assembling system: ";
798
for (const auto& cell : grid.local_cells)
799
{
800
const auto& cell_mapping = sdm.GetCellMapping(cell);
801
const int64_t imap = sdm.MapDOF(cell,0);
802
803
const auto& xp = cell.centroid;
804
const double V = cell_mapping.CellVolume();
805
806
size_t f=0;
807
for (const auto& face : cell.faces)
808
{
809
const auto Af = face.normal * cell_mapping.FaceArea(f);
810
811
if (face.has_neighbor)
812
{
813
const auto& adj_cell = grid.cells[face.neighbor_id];
814
const int64_t jnmap = sdm.MapDOF(adj_cell,0);
815
816
const auto& xn = adj_cell.centroid;
817
818
const auto xpn = xn - xp;
819
820
const auto cf = Af.Dot(xpn) / xpn.NormSquare();
821
822
MatSetValue(A, imap, imap , cf, ADD_VALUES);
823
MatSetValue(A, imap, jnmap, -cf, ADD_VALUES);
824
}
825
else
826
{
827
const auto& xn = xp + 2.0*(face.centroid - xp);
828
const auto xpn = xn - xp;
829
830
const auto cf = Af.Dot(xpn) / xpn.NormSquare();
831
832
MatSetValue(A, imap, imap , cf, ADD_VALUES);
833
}
834
++f;
835
}//for face
836
837
VecSetValue(b, imap, 1.0*V, ADD_VALUES);
838
}//for cell i
839
840
chi::log.Log() << "Global assembly";
841
842
MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
843
MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
844
VecAssemblyBegin(b);
845
VecAssemblyEnd(b);
846
847
chi::log.Log() << "Done global assembly";
848
849
//============================================= Create Krylov Solver
850
chi::log.Log() << "Solving: ";
851
auto petsc_solver =
852
chi_math::PETScUtils::CreateCommonKrylovSolverSetup(
853
A, //Matrix
854
"FVDiffSolver", //Solver name
855
KSPCG, //Solver type
856
PCGAMG, //Preconditioner type
857
1.0e-6, //Relative residual tolerance
858
1000); //Max iterations
859
860
//============================================= Solve
861
KSPSolve(petsc_solver.ksp,b,x);
862
863
chi::log.Log() << "Done solving";
864
865
//============================================= Extract PETSc vector
866
std::vector<double> field(num_local_dofs, 0.0);
867
sdm.LocalizePETScVector(x,field,OneDofPerNode);
868
869
//============================================= Clean up
870
KSPDestroy(&petsc_solver.ksp);
871
872
VecDestroy(&x);
873
VecDestroy(&b);
874
MatDestroy(&A);
875
876
chi::log.Log() << "Done cleanup";
877
878
//============================================= Create Field Function
879
auto ff = std::make_shared<chi_physics::FieldFunction>(
880
"Phi",
881
sdm_ptr,
882
chi_math::Unknown(chi_math::UnknownType::SCALAR)
883
);
884
885
ff->UpdateFieldVector(field);
886
887
ff->ExportToVTK("CodeTut1_FV");
888
889
chi::Finalize();
890
return 0;
891
}
892
\endcode
893
*/
doc
PAGES
ProgrammersManual
CodingTutorials
CodeTut1.h
Generated by
1.9.3