Chi-Tech
lagC_02_ordering.cc
Go to the documentation of this file.
2
4
5#include "chi_runtime.h"
6#include "chi_log.h"
7#include "chi_mpi.h"
8
9#include "utils/chi_timer.h"
11
12#include <algorithm>
13
15{
16
17// ###################################################################
18/**Reorders the nodes for parallel computation in a Continuous
19 * Finite Element calculation.*/
21{
22 const std::string fname = __FUNCTION__;
23 //============================================= Build set of local scope nodes
24 // ls_node_id = local scope node id
25 std::set<uint64_t> ls_node_ids_set;
26 for (const auto& cell : ref_grid_.local_cells)
27 for (uint64_t node_id : cell.vertex_ids_)
28 ls_node_ids_set.insert(node_id);
29
30 //============================================ Build node partition
31 // subscriptions
32 // psub = partition subscription
33 // Multiple partitions can subscribe to a given
34 // node. We build this list here.
35 // We start by adding the current location id
36 // as the first subscription
37 typedef std::set<uint64_t> PSUBS;
38 std::map<uint64_t, PSUBS> ls_node_ids_psubs;
39 for (const uint64_t node_id : ls_node_ids_set)
40 ls_node_ids_psubs[node_id] = {static_cast<uint64_t>(Chi::mpi.location_id)};
41
42 // Now we add the partitions associated with the
43 // ghost cells.
44 const auto ghost_cell_ids = ref_grid_.cells.GetGhostGlobalIDs();
45 for (const uint64_t ghost_id : ghost_cell_ids)
46 {
47 const auto& ghost_cell = ref_grid_.cells[ghost_id];
48 for (const uint64_t vid : ghost_cell.vertex_ids_)
49 ls_node_ids_psubs[vid].insert(ghost_cell.partition_id_);
50 } // for ghost_id
51
52 //============================================= Build lists of local- and
53 // non-local nodes
54 // The lowest partition-# owns a node.
55 std::vector<uint64_t> local_node_ids;
56 std::map<uint64_t, std::vector<uint64_t>> nonlocal_node_ids_map;
57 for (const uint64_t node_id : ls_node_ids_set)
58 {
59 uint64_t smallest_partition_id = Chi::mpi.location_id;
60 for (const uint64_t pid : ls_node_ids_psubs[node_id]) // pid = partition id
61 smallest_partition_id = std::min(smallest_partition_id, pid);
62
63 if (smallest_partition_id == Chi::mpi.location_id)
64 local_node_ids.push_back(node_id);
65 else
66 nonlocal_node_ids_map[smallest_partition_id].push_back(node_id);
67 }
68
69 //============================================= Communicate node counts
70 const uint64_t local_num_nodes = local_node_ids.size();
71 locJ_block_size_.assign(Chi::mpi.process_count, 0);
72 MPI_Allgather(&local_num_nodes, // sendbuf
73 1,
74 MPI_UINT64_T, // sendcount, sendtype
75 locJ_block_size_.data(), // recvbuf
76 1,
77 MPI_UINT64_T, // recvcount, recvtype
78 Chi::mpi.comm); // comm
79
80 //============================================= Build block addresses
81 locJ_block_address_.assign(Chi::mpi.process_count, 0);
82 uint64_t global_num_nodes = 0;
83 for (int j = 0; j < Chi::mpi.process_count; ++j)
84 {
85 locJ_block_address_[j] = global_num_nodes;
86 global_num_nodes += locJ_block_size_[j];
87 }
88
90
91 local_base_block_size_ = local_num_nodes;
92 globl_base_block_size_ = global_num_nodes;
93
94 //============================================= Build node mapping for local
95 // nodes
96 node_mapping_.clear();
97 for (uint64_t i = 0; i < local_num_nodes; ++i)
98 node_mapping_[local_node_ids[i]] =
99 static_cast<int64_t>(local_block_address_ + i);
100
101 //============================================= Communicate nodes in need
102 // of mapping
103 std::map<uint64_t, std::vector<uint64_t>> query_node_ids =
104 chi_mpi_utils::MapAllToAll(nonlocal_node_ids_map, MPI_UINT64_T);
105
106 //============================================= Map the query nodes
107 std::map<uint64_t, std::vector<int64_t>> mapped_node_ids;
108 for (const auto& key_value : query_node_ids)
109 {
110 const uint64_t& pid = key_value.first;
111 const auto& node_list = key_value.second;
112
113 for (const uint64_t node_id : node_list)
114 if (node_mapping_.count(node_id) == 0)
115 throw std::logic_error("Error mapping query node.");
116 else
117 {
118 const int64_t mapping = node_mapping_.at(node_id);
119 mapped_node_ids[pid].push_back(mapping);
120 }
121 } // for query location and nodes
122
123 //============================================= Communicate back the mappings
124 std::map<uint64_t, std::vector<int64_t>> nonlocal_node_ids_map_mapped =
125 chi_mpi_utils::MapAllToAll(mapped_node_ids, MPI_INT64_T);
126
127 //============================================= Processing the mapping for
128 // non-local nodes
129 ghost_node_mapping_.clear();
130 try
131 {
132 for (const auto& pid_node_ids : nonlocal_node_ids_map)
133 {
134 const uint64_t& pid = pid_node_ids.first;
135 const auto& node_list = pid_node_ids.second;
136 const auto& mappings = nonlocal_node_ids_map_mapped.at(pid);
137
138 if (mappings.size() != node_list.size())
139 throw std::logic_error("mappings.size() != node_list.size()");
140
141 const size_t num_nodes = node_list.size();
142 for (size_t i = 0; i < num_nodes; ++i)
143 {
144 node_mapping_[node_list[i]] = mappings[i];
145 ghost_node_mapping_[node_list[i]] = mappings[i];
146 }
147 } // for pid and non-local id
148 }
149 catch (const std::out_of_range& oor)
150 {
151 throw std::out_of_range(fname + ": Processing non-local mapping failed.");
152 }
153 catch (const std::logic_error& lerr)
154 {
155 throw std::logic_error(fname + ": Processing non-local mapping failed." +
156 lerr.what());
157 }
158}
159
160} // namespace chi_math::spatial_discretization
static chi::MPI_Info & mpi
Definition: chi_runtime.h:78
const MPI_Comm & comm
MPI communicator.
Definition: mpi_info.h:28
const int & process_count
Total number of processes.
Definition: mpi_info.h:27
const int & location_id
Current process rank.
Definition: mpi_info.h:26
std::vector< uint64_t > locJ_block_size_
std::vector< uint64_t > locJ_block_address_
const chi_mesh::MeshContinuum & ref_grid_
std::vector< uint64_t > GetGhostGlobalIDs() const
LocalCellHandler local_cells
GlobalCellHandler cells
std::map< K, std::vector< T > > MapAllToAll(const std::map< K, std::vector< T > > &pid_data_pairs, const MPI_Datatype data_mpi_type, const MPI_Comm communicator=Chi::mpi.comm)