Chi-Tech
vector_ghost_communicator.cc
Go to the documentation of this file.
2
3#include "mpi/chi_mpi_utils.h"
4
6
7#include <map>
8#include <string>
9#include <algorithm>
10
11#define scint64_t static_cast<int64_t>
12
13namespace chi_math
14{
15
16// ######################################################################
18 const uint64_t local_size,
19 const uint64_t global_size,
20 const std::vector<int64_t>& ghost_ids,
21 const MPI_Comm communicator)
22 : local_size_(local_size),
23 global_size_(global_size),
24 ghost_ids_(ghost_ids),
25 comm_(communicator),
26 location_id_(chi_mpi_utils::GetLocationID(communicator)),
27 process_count_(chi_mpi_utils::GetProcessCount(communicator)),
28 extents_(chi_mpi_utils::BuildLocationExtents(local_size, communicator)),
29 cached_parallel_data_(MakeCachedParallelData())
30{
31}
32
35{
36 // Construct a mapping between processes and the ghost indices
37 // that belong to them. This information yields what this process
38 // needs to receive from other processes.
39 std::map<int, std::vector<int64_t>> recv_map;
40 for (int64_t ghost_id : ghost_ids_)
41 recv_map[FindOwnerPID(ghost_id)].push_back(ghost_id);
42
43 // This process will receive data in process-contiguous manner,
44 // so a mapping needs to be developed to map each ghost id to
45 // its respective ordering in the received data.
46 std::map<int64_t, size_t> ghost_to_recv_map;
47 size_t count = 0;
48 for (const auto& [pid, gids] : recv_map)
49 for (const int64_t gid : gids)
50 ghost_to_recv_map[gid] = count++;
51
52 std::vector<int> sendcounts;
53 std::vector<int> senddispls;
54 std::vector<int> recvcounts;
55 std::vector<int> recvdispls;
56
57 // Now, the structure of the data being received from communication
58 // is developed. This involves determining the amount of data
59 // being sent per process and the starting position of the data in
60 // the receive buffer per process.
61 int total_recvcounts = 0;
62 recvcounts.assign(process_count_, 0);
63 recvdispls.assign(process_count_, 0);
64 for (const auto& [pid, gids] : recv_map)
65 {
66 recvcounts[pid] = static_cast<int>(gids.size());
67 recvdispls[pid] = total_recvcounts;
68 total_recvcounts += static_cast<int>(gids.size());
69 }
70
71 // For communication, each process must also know what it is
72 // sending to other processors. If each processor sends each
73 // other process the global ids it needs to receive, then each
74 // process will know what other processes need from it. The
75 // MPI utility MapAllToAll in Chi-Tech accomplishes this task,
76 // returning a mapping of processes to the global ids that this
77 // process needs to send.
78 std::map<int, std::vector<int64_t>> send_map =
79 chi_mpi_utils::MapAllToAll(recv_map, MPI_INT64_T, comm_);
80
81 // With this information, the amount of information that needs
82 // to be sent can be determined.
83 size_t send_size = 0;
84 for (const auto& [pid, gids] : send_map)
85 send_size += gids.size();
86
87 // Next, the local ids on this process that need to be
88 // communicated to other processes can be determined and stored.
89 std::vector<int64_t> local_ids_to_send;
90 local_ids_to_send.reserve(send_size);
91 for (const auto& [pid, gids] : send_map)
92 for (const int64_t gid : gids)
93 {
95 gid < extents_[location_id_] or gid >= extents_[location_id_ + 1],
96 std::string(__FUNCTION__) + ": " +
97 "Problem determining communication pattern. Process " +
98 std::to_string(pid) + " determined that process " +
99 std::to_string(location_id_) + " needs to communicate global id " +
100 std::to_string(gid) + " to it, but this id is not locally owned.");
101
102 local_ids_to_send.push_back(gid - scint64_t(extents_[location_id_]));
103 }
104
105 // Finally, the communication pattern for the data being sent
106 // can be constructed similarly to that for the received data.
107 int total_sendcounts = 0;
108 sendcounts.assign(process_count_, 0);
109 senddispls.assign(process_count_, 0);
110 for (const auto& [pid, gids] : send_map)
111 {
112 sendcounts[pid] = static_cast<int>(gids.size());
113 senddispls[pid] = total_sendcounts;
114 total_sendcounts += static_cast<int>(gids.size());
115 }
116
117 return CachedParallelData{std::move(sendcounts),
118 std::move(senddispls),
119 std::move(recvcounts),
120 std::move(recvdispls),
121 std::move(local_ids_to_send),
122 std::move(ghost_to_recv_map)};
123}
124
126 const VectorGhostCommunicator& other)
127 : local_size_(other.local_size_),
128 global_size_(other.local_size_),
129 ghost_ids_(other.ghost_ids_),
130 comm_(other.comm_),
131 location_id_(other.location_id_),
132 process_count_(other.process_count_),
133 extents_(other.extents_),
134 cached_parallel_data_(other.cached_parallel_data_)
135{
136}
137
139 VectorGhostCommunicator&& other) noexcept
140 : local_size_(other.local_size_),
141 global_size_(other.local_size_),
142 ghost_ids_(other.ghost_ids_),
143 comm_(other.comm_),
144 location_id_(other.location_id_),
145 process_count_(other.process_count_),
146 extents_(other.extents_),
147 cached_parallel_data_(other.cached_parallel_data_)
148{
149}
150
151// ######################################################################
152int64_t VectorGhostCommunicator::MapGhostToLocal(const int64_t ghost_id) const
153{
155 cached_parallel_data_.ghost_to_recv_map_.count(ghost_id) == 0,
156 "The given ghost id does not belong to this communicator.");
157
158 // Get the position within the ghost id vector of the given ghost id
159 const auto k = std::find(ghost_ids_.begin(), ghost_ids_.end(), ghost_id) -
160 ghost_ids_.begin();
161
162 // Local index is local size plus the position in the ghost id vector
163 return scint64_t(local_size_) + k;
164}
165
166// ######################################################################
168 std::vector<double>& ghosted_vector) const
169{
170 ChiInvalidArgumentIf(ghosted_vector.size() != local_size_ + ghost_ids_.size(),
171 std::string(__FUNCTION__) +
172 ": Vector size mismatch. "
173 "input size = " +
174 std::to_string(ghosted_vector.size()) +
175 " requirement " +
176 std::to_string(local_size_ + ghost_ids_.size()));
177
178 // Serialize the data that needs to be sent
179 const size_t send_size = cached_parallel_data_.local_ids_to_send_.size();
180 std::vector<double> send_data;
181 send_data.reserve(send_size);
182 for (const int64_t local_id : cached_parallel_data_.local_ids_to_send_)
183 send_data.push_back(ghosted_vector[local_id]);
184
185 // Create serialized storage for the data to be received
186 const size_t recv_size = ghost_ids_.size();
187 std::vector<double> recv_data(recv_size, 0.0);
188
189 // Communicate the ghost data
190 MPI_Alltoallv(send_data.data(),
193 MPI_DOUBLE,
194 recv_data.data(),
197 MPI_DOUBLE,
198 comm_);
199
200 // Lastly, populate the local vector with ghost data. All ghost data is
201 // appended to the back of the local vector. Using the mapping between
202 // ghost indices and the relative ghost index position along with the
203 // ordering of the ghost indices, this can be accomplished.
204 for (size_t k = 0; k < recv_size; ++k)
205 ghosted_vector[local_size_ + k] =
207}
208
209// ######################################################################
211{
212 const auto ghosted_size = local_size_ + ghost_ids_.size();
213 return std::vector<double>(ghosted_size, 0.0);
214}
215
216// ######################################################################
218 const std::vector<double>& local_vector) const
219{
220 ChiInvalidArgumentIf(local_vector.size() != local_size_,
221 std::string(__FUNCTION__) +
222 ": Incompatible unghosted vector." +
223 "unghosted_vector.size() != local_size_");
224
225 // Add ghost indices to the back of the unghosted vector
226 std::vector<double> vec = local_vector;
227 for (size_t i = 0; i < ghost_ids_.size(); ++i)
228 vec.emplace_back(0.0);
229 return vec;
230}
231
232// ###################################################################
233int VectorGhostCommunicator::FindOwnerPID(const int64_t global_id) const
234{
235 ChiInvalidArgumentIf(global_id < 0 or global_id >= global_size_,
236 std::string(__FUNCTION__) + ": Invalid global id." +
237 "Global ids must be in [0, global_size_). " +
238 std::to_string(global_id) + " vs [0," +
239 std::to_string(global_size_) + ")");
240
241 for (int p = 0; p < process_count_; ++p)
242 if (global_id >= extents_[p] and global_id < extents_[p + 1]) return p;
243 return -1;
244}
245
246} // namespace chi_math
#define ChiLogicalErrorIf(condition, message)
#define ChiInvalidArgumentIf(condition, message)
int64_t MapGhostToLocal(int64_t ghost_id) const
const std::vector< uint64_t > extents_
std::vector< double > MakeGhostedVector() const
const std::vector< int64_t > ghost_ids_
const CachedParallelData cached_parallel_data_
void CommunicateGhostEntries(std::vector< double > &ghosted_vector) const
int FindOwnerPID(int64_t global_id) const
VectorGhostCommunicator(uint64_t local_size, uint64_t global_size, const std::vector< int64_t > &ghost_ids, MPI_Comm communicator)
int GetProcessCount(MPI_Comm mpi_comm)
std::vector< uint64_t > BuildLocationExtents(uint64_t local_size, MPI_Comm comm)
int GetLocationID(MPI_Comm mpi_comm)
Definition: chi_mpi_utils.cc:7
std::map< K, std::vector< T > > MapAllToAll(const std::map< K, std::vector< T > > &pid_data_pairs, const MPI_Datatype data_mpi_type, const MPI_Comm communicator=Chi::mpi.comm)
#define scint64_t