31 #ifndef __TASMANIAN_ADDONS_MPICONSTRUCTGRID_HPP
32 #define __TASMANIAN_ADDONS_MPICONSTRUCTGRID_HPP
57 #ifdef Tasmanian_ENABLE_MPI
69 using ModelSignatureMPI = std::function<void(std::vector<double>
const &x, std::vector<double> &y)>;
79 template<
bool use_initial_guess>
81 int num_dimensions,
int num_outputs,
82 size_t max_num_points,
size_t max_samples_per_job,
83 size_t max_num_ranks,
int tagx,
int tagy,
int root, MPI_Comm comm,
86 std::string
const &checkpoint_filename){
88 MPI_Comm_size(comm, &num_ranks);
90 max_num_ranks = std::max(std::min(max_num_ranks,
size_t(num_ranks)),
size_t(1));
93 std::vector<int> thread_to_rank(num_ranks);
94 std::iota(thread_to_rank.begin(), thread_to_rank.begin() + root, 0);
95 std::iota(thread_to_rank.begin() + root, thread_to_rank.end(), root + 1);
96 thread_to_rank.back() = root;
98 size_t max_message =
sizeof(double) *
Utils::size_mult(num_dimensions, max_samples_per_job) +
sizeof(size_t);
100 max_message +=
sizeof(double) *
Utils::size_mult(num_outputs, max_samples_per_job) +
sizeof(size_t);
104 constructCommon<TasGrid::mode_parallel, use_initial_guess>(
105 [&](std::vector<double>
const &x, std::vector<double> &y,
size_t thread_id)->
void{
106 int rankid = thread_to_rank[thread_id];
110 std::stringstream ss;
111 IO::writeNumbers<mode_binary, IO::pad_none>(ss, x.size());
112 IO::writeVector<mode_binary, IO::pad_none>(x, ss);
114 IO::writeNumbers<mode_binary, IO::pad_none>(ss, y.size());
115 IO::writeVector<mode_binary, IO::pad_none>(y, ss);
117 MPI_Send(ss.str().c_str(), (
int) (ss.str().size()), MPI_BYTE, rankid, tagx, comm);
121 MPI_Irecv(y.data(), (
int) y.size(), MPI_DOUBLE, rankid, tagy, comm, &request);
122 MPI_Wait(&request, &status);
124 }, max_num_points, max_num_ranks, max_samples_per_job, grid, candidates, checkpoint_filename);
126 std::stringstream stop_message;
127 IO::writeNumbers<mode_binary, IO::pad_none>(stop_message,
size_t(0));
128 if (use_initial_guess ==
with_initial_guess) IO::writeNumbers<mode_binary, IO::pad_none>(stop_message,
size_t(0));
129 for(
size_t id=0;
id<max_num_ranks;
id++){
130 if (thread_to_rank[
id] != root){
131 MPI_Send(stop_message.str().c_str(), (
int) (stop_message.str().size()), MPI_BYTE, thread_to_rank[
id], tagx, comm);
135 size_t thread_id = 0;
136 while(thread_to_rank[thread_id] != me) thread_id++;
138 if (thread_id < max_num_ranks){
139 std::vector<char> message_buffer(max_message);
140 std::vector<double> x, y;
143 MPI_Recv(message_buffer.data(), (
int) message_buffer.size(), MPI_BYTE, root, tagx, comm, &status);
147 std::istream is(&data_buffer);
149 x = IO::readVector<IO::mode_binary_type, double>(is, IO::readNumber<IO::mode_binary_type, size_t>(is));
152 y = IO::readVector<IO::mode_binary_type, double>(is, IO::readNumber<IO::mode_binary_type, size_t>(is));
159 MPI_Send(y.data(), (
int) (y.size()), MPI_DOUBLE, root, tagy, comm);
335 template<
bool use_initial_guess = no_initial_guess>
337 int num_dimensions,
int num_outputs,
338 size_t max_num_points,
size_t max_samples_per_job,
339 size_t max_num_ranks,
int tagx,
int tagy,
int root, MPI_Comm comm,
342 std::vector<int>
const &level_limits = std::vector<int>(),
343 std::vector<double>
const &scale_correction = std::vector<double>(),
344 std::string
const &checkpoint_filename = std::string()){
346 mpiConstructCommon<use_initial_guess>(model, num_dimensions, num_outputs, max_num_points, max_samples_per_job,
347 max_num_ranks, tagx, tagy, root, comm, grid,
351 checkpoint_filename);
362 template<
bool use_initial_guess = no_initial_guess>
364 int num_dimensions,
int num_outputs,
365 size_t max_num_points,
size_t max_samples_per_job,
366 size_t max_num_ranks,
int tagx,
int tagy,
int root, MPI_Comm comm,
368 TypeDepth type, std::vector<int>
const &anisotropic_weights = std::vector<int>(),
369 std::vector<int>
const &level_limits = std::vector<int>(),
370 std::string
const &checkpoint_filename = std::string()){
372 mpiConstructCommon<use_initial_guess>(model, num_dimensions, num_outputs, max_num_points, max_samples_per_job,
373 max_num_ranks, tagx, tagy, root, comm, grid,
377 checkpoint_filename);
389 template<
bool use_initial_guess = no_initial_guess>
391 int num_dimensions,
int num_outputs,
392 size_t max_num_points,
size_t max_samples_per_job,
393 size_t max_num_ranks,
int tagx,
int tagy,
int root, MPI_Comm comm,
395 TypeDepth type,
int output, std::vector<int>
const &level_limits = std::vector<int>(),
396 std::string
const &checkpoint_filename = std::string()){
398 mpiConstructCommon<use_initial_guess>(model, num_dimensions, num_outputs, max_num_points, max_samples_per_job,
399 max_num_ranks, tagx, tagy, root, comm, grid,
403 checkpoint_filename);
418 template<
bool use_initial_guess = no_initial_guess>
420 int num_dimensions,
int num_outputs,
size_t max_samples_per_job,
421 size_t max_num_ranks,
int tagx,
int tagy,
int root, MPI_Comm comm){
424 mpiConstructCommon<use_initial_guess>(model, num_dimensions, num_outputs, 0, max_samples_per_job,
425 max_num_ranks, tagx, tagy, root, comm, grid,
427 return std::vector<double>();
The master-class that represents an instance of a Tasmanian sparse grid.
Definition: TasmanianSparseGrid.hpp:293
std::vector< double > getCandidateConstructionPoints(TypeDepth type, std::vector< int > const &anisotropic_weights=std::vector< int >(), std::vector< int > const &level_limits=std::vector< int >())
Generate a sorted list of points weighted by descending importance.
Coverts a vector to basic stream-buffer.
Definition: tsgMPIScatterGrid.hpp:72
TypeDepth
Used by Global Sequence and Fourier grids, indicates the selection criteria.
Definition: tsgEnumerates.hpp:203
TypeRefinement
Refinement strategy for local polynomial and wavelet grids.
Definition: tsgEnumerates.hpp:425
constexpr bool with_initial_guess
Allows for expressive calls to TasGrid::constructSurrogate().
Definition: tsgConstructSurrogate.hpp:84
void mpiConstructWorker(ModelSignatureMPI model, int num_dimensions, int num_outputs, size_t max_samples_per_job, size_t max_num_ranks, int tagx, int tagy, int root, MPI_Comm comm)
Executes the worker (i.e., non-root) portion of the MPI sampling.
Definition: tsgMPIConstructGrid.hpp:419
std::function< void(std::vector< double > const &x, std::vector< double > &y)> ModelSignatureMPI
Signature of a model function to be used in the MPI construction procedures.
Definition: tsgMPIConstructGrid.hpp:69
void mpiConstructSurrogate(ModelSignatureMPI model, int num_dimensions, int num_outputs, size_t max_num_points, size_t max_samples_per_job, size_t max_num_ranks, int tagx, int tagy, int root, MPI_Comm comm, TasmanianSparseGrid &grid, double tolerance, TypeRefinement criteria, int output=-1, std::vector< int > const &level_limits=std::vector< int >(), std::vector< double > const &scale_correction=std::vector< double >(), std::string const &checkpoint_filename=std::string())
Construct a sparse grid surrogate to the model defined by the lambda, MPI version.
Definition: tsgMPIConstructGrid.hpp:336
void mpiConstructCommon(ModelSignatureMPI model, int num_dimensions, int num_outputs, size_t max_num_points, size_t max_samples_per_job, size_t max_num_ranks, int tagx, int tagy, int root, MPI_Comm comm, TasmanianSparseGrid &grid, std::function< std::vector< double >(TasmanianSparseGrid &)> candidates, std::string const &checkpoint_filename)
MPI construction algorithm using generic candidates procedure.
Definition: tsgMPIConstructGrid.hpp:80
int getMPIRank(MPI_Comm comm)
Utility to return the rank within the given comm.
Definition: tsgMPIScatterGrid.hpp:87
size_t size_mult(IntA a, IntB b)
Converts two integer-like variables to size_t and returns the product..
Definition: tsgUtils.hpp:82
Encapsulates the Tasmanian Sparse Grid module.
Definition: TasmanianSparseGrid.hpp:68
Automated parallel construction procedure.
DREAM objects send/receive through MPI.