-
Notifications
You must be signed in to change notification settings - Fork 14
/
Copy pathmpi_stdpar.cpp
65 lines (50 loc) · 1.49 KB
/
mpi_stdpar.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#include <algorithm>
#include <cstdio>
#include <execution>
#include <iostream>
#include <ranges>
#include <sstream>
#include <vector>
#include <mpi.h>
template <typename T>
void init(T &x)
{
auto kernel = [=, x = x.data()](size_t i) {
x[i] = 1.1 * i;
};
using std::begin;
auto indices = std::views::iota(0);
std::for_each_n(std::execution::par_unseq, begin(indices), size(x), kernel);
}
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
const size_t n = argc > 1 ? (size_t)std::stoll(argv[1]) : 1024;
int size, rank;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("Hello from MPI rank %d/%d\n", rank, size);
// Data
std::vector<double> x(n);
// Fill with zeros on GPU
std::fill(std::execution::par_unseq, begin(x), end(x), 0);
if (rank == 0) {
// Initialize data on rank 0
init(x);
// Send with rank 0
MPI_Send(x.data(), n, MPI_DOUBLE, 1, 123, MPI_COMM_WORLD);
printf("Rank %d sent\n", rank);
} else if (rank == 1) {
// Receive with rank 1
MPI_Recv(x.data(), n, MPI_DOUBLE, 0, 123, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("Rank %d received\n", rank);
}
// Print; this migrates memory to CPU
std::stringstream ss;
ss << "Rank " << rank << " has";
for (int i = 0; i < std::min(8ul, n); ++i) ss << " " << x[i];
if (n > 8) ss << " ...";
ss << "\n";
std::cout << ss.str();
MPI_Finalize();
}