Skip to content

Commit 7106958

Browse files
GuyStenpaulromano
andauthored
Simplify IFP message passing (#3719)
Co-authored-by: Paul Romano <[email protected]>
1 parent 65e19c1 commit 7106958

File tree

5 files changed

+90
-104
lines changed

5 files changed

+90
-104
lines changed

include/openmc/ifp.h

Lines changed: 51 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
#include "openmc/particle_data.h"
77
#include "openmc/settings.h"
88

9+
#include <algorithm> // for copy
10+
911
namespace openmc {
1012

1113
//! Check the value of the IFP parameter for beta effective or both.
@@ -113,14 +115,25 @@ void broadcast_ifp_n_generation(int& n_generation,
113115
//! \param[in] n_generation Number of generations
114116
//! \param[in] neighbor Index of the neighboring processor
115117
//! \param[in] requests MPI requests
116-
//! \param[in] delayed_groups List of delayed group numbers lists
117-
//! \param[out] send_delayed_groups Delayed group numbers buffer
118-
//! \param[in] lifetimes List of lifetimes lists
119-
//! \param[out] send_lifetimes Lifetimes buffer
118+
//! \param[in] data List of data lists
119+
//! \param[out] send_data data buffer
120+
template<typename T>
120121
void send_ifp_info(int64_t idx, int64_t n, int n_generation, int neighbor,
121-
vector<MPI_Request>& requests, const vector<vector<int>>& delayed_groups,
122-
vector<int>& send_delayed_groups, const vector<vector<double>>& lifetimes,
123-
vector<double>& send_lifetimes);
122+
vector<MPI_Request>& requests, const vector<vector<T>>& data,
123+
vector<T>& send_data)
124+
{
125+
// Copy data in buffer
126+
for (int i = idx; i < idx + n; i++) {
127+
std::copy(
128+
data[i].begin(), data[i].end(), send_data.begin() + i * n_generation);
129+
}
130+
131+
// Send data
132+
requests.emplace_back();
133+
MPI_Datatype datatype = mpi::MPITypeMap<T>::mpi_type;
134+
MPI_Isend(&send_data[n_generation * idx], n_generation * static_cast<int>(n),
135+
datatype, neighbor, mpi::rank, mpi::intracomm, &requests.back());
136+
}
124137

125138
//! Receive IFP data using MPI.
126139
//!
@@ -129,12 +142,22 @@ void send_ifp_info(int64_t idx, int64_t n, int n_generation, int neighbor,
129142
//! \param[in] n_generation Number of generations
130143
//! \param[in] neighbor Index of the neighboring processor
131144
//! \param[in] requests MPI requests
132-
//! \param[in] delayed_groups List of delayed group numbers
133-
//! \param[in] lifetimes List of lifetimes
145+
//! \param[in] data data buffer
134146
//! \param[out] deserialization Information to deserialize the received data
147+
template<typename T>
135148
void receive_ifp_data(int64_t idx, int64_t n, int n_generation, int neighbor,
136-
vector<MPI_Request>& requests, vector<int>& delayed_groups,
137-
vector<double>& lifetimes, vector<DeserializationInfo>& deserialization);
149+
vector<MPI_Request>& requests, vector<T>& data,
150+
vector<DeserializationInfo>& deserialization)
151+
{
152+
requests.emplace_back();
153+
MPI_Datatype datatype = mpi::MPITypeMap<T>::mpi_type;
154+
MPI_Irecv(&data[n_generation * idx], n_generation * static_cast<int>(n),
155+
datatype, neighbor, neighbor, mpi::intracomm, &requests.back());
156+
157+
// Deserialization info to reconstruct data later
158+
DeserializationInfo info = {idx, n};
159+
deserialization.push_back(info);
160+
}
138161

139162
//! Copy partial IFP data from local lists to source banks.
140163
//!
@@ -151,12 +174,24 @@ void copy_partial_ifp_data_to_source_banks(int64_t idx, int n, int64_t i_bank,
151174
//! the IFP source banks.
152175
//!
153176
//! \param[in] n_generation Number of generations
177+
//! \param[in] data data to deserialize
178+
//! \param[in] bank bank to store data
154179
//! \param[out] deserialization Information to deserialize the received data
155-
//! \param[in] delayed_groups List of delayed group numbers
156-
//! \param[in] lifetimes List of lifetimes
157-
void deserialize_ifp_info(int n_generation,
158-
const vector<DeserializationInfo>& deserialization,
159-
const vector<int>& delayed_groups, const vector<double>& lifetimes);
180+
template<typename T>
181+
void deserialize_ifp_info(int n_generation, const vector<T>& data,
182+
vector<vector<T>>& bank, const vector<DeserializationInfo>& deserialization)
183+
{
184+
for (auto info : deserialization) {
185+
int64_t index_local = info.index_local;
186+
int64_t n = info.n;
187+
188+
for (int i = index_local; i < index_local + n; i++) {
189+
vector<T> data_received(
190+
data.begin() + n_generation * i, data.begin() + n_generation * (i + 1));
191+
bank[i] = data_received;
192+
}
193+
}
194+
}
160195

161196
#endif
162197

include/openmc/message_passing.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,19 @@ extern MPI_Datatype collision_track_site;
2222
extern MPI_Comm intracomm;
2323
#endif
2424

25+
//==============================================================================
26+
// Template struct used to map types to MPI datatypes
27+
// By having a single static data member, the template can
28+
// be specialized for each type we know of. The specializations appear in the
29+
// .cpp file since they are definitions.
30+
//==============================================================================
31+
#ifdef OPENMC_MPI
32+
template<typename T>
33+
struct MPITypeMap {
34+
static const MPI_Datatype mpi_type;
35+
};
36+
#endif
37+
2538
// Calculates global indices of the bank particles
2639
// across all ranks using a parallel scan. This is used to write
2740
// the surface source file in parallel runs. It will probably

src/eigenvalue.cpp

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -248,9 +248,12 @@ void synchronize_bank()
248248

249249
if (settings::ifp_on) {
250250
// Send IFP data
251-
send_ifp_info(index_local, n, ifp_n_generation, neighbor, requests,
252-
temp_delayed_groups, send_delayed_groups, temp_lifetimes,
253-
send_lifetimes);
251+
if (is_beta_effective_or_both())
252+
send_ifp_info(index_local, n, ifp_n_generation, neighbor, requests,
253+
temp_delayed_groups, send_delayed_groups);
254+
if (is_generation_time_or_both())
255+
send_ifp_info(index_local, n, ifp_n_generation, neighbor, requests,
256+
temp_lifetimes, send_lifetimes);
254257
}
255258
}
256259

@@ -316,8 +319,12 @@ void synchronize_bank()
316319

317320
if (settings::ifp_on) {
318321
// Receive IFP data
319-
receive_ifp_data(index_local, n, ifp_n_generation, neighbor, requests,
320-
recv_delayed_groups, recv_lifetimes, deserialization_info);
322+
if (is_beta_effective_or_both())
323+
receive_ifp_data(index_local, n, ifp_n_generation, neighbor, requests,
324+
recv_delayed_groups, deserialization_info);
325+
if (is_generation_time_or_both())
326+
receive_ifp_data(index_local, n, ifp_n_generation, neighbor, requests,
327+
recv_lifetimes, deserialization_info);
321328
}
322329

323330
} else {
@@ -348,8 +355,12 @@ void synchronize_bank()
348355
MPI_Waitall(n_request, requests.data(), MPI_STATUSES_IGNORE);
349356

350357
if (settings::ifp_on) {
351-
deserialize_ifp_info(ifp_n_generation, deserialization_info,
352-
recv_delayed_groups, recv_lifetimes);
358+
if (is_beta_effective_or_both())
359+
deserialize_ifp_info(ifp_n_generation, recv_delayed_groups,
360+
simulation::ifp_source_delayed_group_bank, deserialization_info);
361+
if (is_generation_time_or_both())
362+
deserialize_ifp_info(ifp_n_generation, recv_lifetimes,
363+
simulation::ifp_source_lifetime_bank, deserialization_info);
353364
}
354365

355366
#else

src/ifp.cpp

Lines changed: 0 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,6 @@ void copy_ifp_data_from_fission_banks(
6363
}
6464

6565
#ifdef OPENMC_MPI
66-
6766
void broadcast_ifp_n_generation(int& n_generation,
6867
const vector<vector<int>>& delayed_groups,
6968
const vector<vector<double>>& lifetimes)
@@ -78,61 +77,6 @@ void broadcast_ifp_n_generation(int& n_generation,
7877
MPI_Bcast(&n_generation, 1, MPI_INT, 0, mpi::intracomm);
7978
}
8079

81-
void send_ifp_info(int64_t idx, int64_t n, int n_generation, int neighbor,
82-
vector<MPI_Request>& requests, const vector<vector<int>>& delayed_groups,
83-
vector<int>& send_delayed_groups, const vector<vector<double>>& lifetimes,
84-
vector<double>& send_lifetimes)
85-
{
86-
// Copy data in send buffers
87-
for (int i = idx; i < idx + n; i++) {
88-
if (is_beta_effective_or_both()) {
89-
std::copy(delayed_groups[i].begin(), delayed_groups[i].end(),
90-
send_delayed_groups.begin() + i * n_generation);
91-
}
92-
if (is_generation_time_or_both()) {
93-
std::copy(lifetimes[i].begin(), lifetimes[i].end(),
94-
send_lifetimes.begin() + i * n_generation);
95-
}
96-
}
97-
// Send delayed groups
98-
if (is_beta_effective_or_both()) {
99-
requests.emplace_back();
100-
MPI_Isend(&send_delayed_groups[n_generation * idx],
101-
n_generation * static_cast<int>(n), MPI_INT, neighbor, mpi::rank,
102-
mpi::intracomm, &requests.back());
103-
}
104-
// Send lifetimes
105-
if (is_generation_time_or_both()) {
106-
requests.emplace_back();
107-
MPI_Isend(&send_lifetimes[n_generation * idx],
108-
n_generation * static_cast<int>(n), MPI_DOUBLE, neighbor, mpi::rank,
109-
mpi::intracomm, &requests.back());
110-
}
111-
}
112-
113-
void receive_ifp_data(int64_t idx, int64_t n, int n_generation, int neighbor,
114-
vector<MPI_Request>& requests, vector<int>& delayed_groups,
115-
vector<double>& lifetimes, vector<DeserializationInfo>& deserialization)
116-
{
117-
// Receive delayed groups
118-
if (is_beta_effective_or_both()) {
119-
requests.emplace_back();
120-
MPI_Irecv(&delayed_groups[n_generation * idx],
121-
n_generation * static_cast<int>(n), MPI_INT, neighbor, neighbor,
122-
mpi::intracomm, &requests.back());
123-
}
124-
// Receive lifetimes
125-
if (is_generation_time_or_both()) {
126-
requests.emplace_back();
127-
MPI_Irecv(&lifetimes[n_generation * idx],
128-
n_generation * static_cast<int>(n), MPI_DOUBLE, neighbor, neighbor,
129-
mpi::intracomm, &requests.back());
130-
}
131-
// Deserialization info to reconstruct data later
132-
DeserializationInfo info = {idx, n};
133-
deserialization.push_back(info);
134-
}
135-
13680
void copy_partial_ifp_data_to_source_banks(int64_t idx, int n, int64_t i_bank,
13781
const vector<vector<int>>& delayed_groups,
13882
const vector<vector<double>>& lifetimes)
@@ -146,31 +90,6 @@ void copy_partial_ifp_data_to_source_banks(int64_t idx, int n, int64_t i_bank,
14690
&simulation::ifp_source_lifetime_bank[i_bank]);
14791
}
14892
}
149-
150-
void deserialize_ifp_info(int n_generation,
151-
const vector<DeserializationInfo>& deserialization,
152-
const vector<int>& delayed_groups, const vector<double>& lifetimes)
153-
{
154-
for (auto info : deserialization) {
155-
int64_t index_local = info.index_local;
156-
int64_t n = info.n;
157-
158-
for (int i = index_local; i < index_local + n; i++) {
159-
if (is_beta_effective_or_both()) {
160-
vector<int> delayed_groups_received(
161-
delayed_groups.begin() + n_generation * i,
162-
delayed_groups.begin() + n_generation * (i + 1));
163-
simulation::ifp_source_delayed_group_bank[i] = delayed_groups_received;
164-
}
165-
if (is_generation_time_or_both()) {
166-
vector<double> lifetimes_received(lifetimes.begin() + n_generation * i,
167-
lifetimes.begin() + n_generation * (i + 1));
168-
simulation::ifp_source_lifetime_bank[i] = lifetimes_received;
169-
}
170-
}
171-
}
172-
}
173-
17493
#endif
17594

17695
void copy_complete_ifp_data_to_source_banks(

src/message_passing.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,14 @@ vector<int64_t> calculate_parallel_index_vector(int64_t size)
3939
return result;
4040
}
4141

42+
#ifdef OPENMC_MPI
43+
// Specializations of the MPITypeMap template struct
44+
template<>
45+
const MPI_Datatype MPITypeMap<int>::mpi_type = MPI_INT;
46+
template<>
47+
const MPI_Datatype MPITypeMap<double>::mpi_type = MPI_DOUBLE;
48+
#endif
49+
4250
} // namespace mpi
4351

4452
} // namespace openmc

0 commit comments

Comments
 (0)