6void init(
int* argc,
char*** argv) {
15 MPI_Comm_rank(comm, &r);
21 MPI_Comm_size(comm, &s);
27 MPI_Allreduce(&local, &global, 1, MPI_DOUBLE, MPI_SUM, comm);
33 MPI_Allreduce(&local_sq, &global_sq, 1, MPI_DOUBLE, MPI_SUM, comm);
34 return std::sqrt(global_sq);
38 MPI_Allreduce(MPI_IN_PLACE,
47 MPI_Bcast(data,
static_cast<int>(n), MPI_DOUBLE, root, comm);
MPI distributed operations.
void broadcast(real *data, idx n, int root=0, MPI_Comm comm=MPI_COMM_WORLD)
Broadcast from root.
void allreduce_sum(real *data, idx n, MPI_Comm comm=MPI_COMM_WORLD)
Allreduce sum.
real norm(const Vector &x, MPI_Comm comm=MPI_COMM_WORLD)
Distributed norm.
void init(int *argc, char ***argv)
Initialize MPI (call once)
void finalize()
Finalize MPI.
int size(MPI_Comm comm=MPI_COMM_WORLD)
Get communicator size.
int rank(MPI_Comm comm=MPI_COMM_WORLD)
Get communicator rank.
real dot(const Vector &x, const Vector &y, MPI_Comm comm=MPI_COMM_WORLD)
Distributed dot product (each rank holds partial vector)
real dot(const Vector &x, const Vector &y, Backend b=default_backend)
dot product