Doxygen 1.9.8
Toolkit for Adaptive Stochastic Modeling and Non-Intrusive ApproximatioN: Tasmanian v8.2
 
Loading...
Searching...
No Matches
tsgMPIConstructGrid.hpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2017, Miroslav Stoyanov
3 *
4 * This file is part of
5 * Toolkit for Adaptive Stochastic Modeling And Non-Intrusive ApproximatioN: TASMANIAN
6 *
7 * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
12 * and the following disclaimer in the documentation and/or other materials provided with the distribution.
13 *
14 * 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
20 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
21 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 *
24 * UT-BATTELLE, LLC AND THE UNITED STATES GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES, BOTH EXPRESSED AND IMPLIED.
25 * THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENT,
26 * COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY OR DAMAGE.
27 * THE USER ASSUMES RESPONSIBILITY FOR ALL LIABILITIES, PENALTIES, FINES, CLAIMS, CAUSES OF ACTION, AND COSTS AND EXPENSES, CAUSED BY, RESULTING FROM OR ARISING OUT OF,
28 * IN WHOLE OR IN PART THE USE, STORAGE OR DISPOSAL OF THE SOFTWARE.
29 */
30
31#ifndef __TASMANIAN_ADDONS_MPICONSTRUCTGRID_HPP
32#define __TASMANIAN_ADDONS_MPICONSTRUCTGRID_HPP
33
47
57#ifdef Tasmanian_ENABLE_MPI
58
59namespace TasGrid{
60
69using ModelSignatureMPI = std::function<void(std::vector<double> const &x, std::vector<double> &y)>;
70
79template<bool use_initial_guess>
81 int num_dimensions, int num_outputs,
82 size_t max_num_points, size_t max_samples_per_job,
83 size_t max_num_ranks, int tagx, int tagy, int root, MPI_Comm comm,
85 std::function<std::vector<double>(TasmanianSparseGrid &)> candidates,
86 std::string const &checkpoint_filename){
87 int num_ranks;
88 MPI_Comm_size(comm, &num_ranks);
89 // force max_num_ranks between 1 and num_ranks
90 max_num_ranks = std::max(std::min(max_num_ranks, size_t(num_ranks)), size_t(1));
91 int const me = getMPIRank(comm); // my rank within the comm
92
93 std::vector<int> thread_to_rank(num_ranks); // map the thread id to the rank, root is used last
94 std::iota(thread_to_rank.begin(), thread_to_rank.begin() + root, 0);
95 std::iota(thread_to_rank.begin() + root, thread_to_rank.end(), root + 1);
96 thread_to_rank.back() = root;
97
98 size_t max_message = sizeof(double) * Utils::size_mult(num_dimensions, max_samples_per_job) + sizeof(size_t);
99 if (use_initial_guess == with_initial_guess)
100 max_message += sizeof(double) * Utils::size_mult(num_outputs, max_samples_per_job) + sizeof(size_t);
101
102 if (me == root){ // main sampler
103 MPI_Barrier(comm);
104 constructCommon<TasGrid::mode_parallel, use_initial_guess>(
105 [&](std::vector<double> const &x, std::vector<double> &y, size_t thread_id)->void{
106 int rankid = thread_to_rank[thread_id];
107 if (rankid == root){
108 model(x, y); // thread belongs to this rank, do work locally
109 }else{
110 std::stringstream ss; // pack x and y into a message
113 if (use_initial_guess == with_initial_guess){ // pack y only if used
116 }
117 MPI_Send(ss.str().c_str(), (int) (ss.str().size()), MPI_BYTE, rankid, tagx, comm); // send x
118 MPI_Status status;
119 MPI_Request request;
120 y.resize(Utils::size_mult(num_outputs, x.size() / num_dimensions));
121 MPI_Irecv(y.data(), (int) y.size(), MPI_DOUBLE, rankid, tagy, comm, &request); // receive y
122 MPI_Wait(&request, &status); // wait for the result from the model
123 }
124 }, max_num_points, max_num_ranks, max_samples_per_job, grid, candidates, checkpoint_filename);
125
126 std::stringstream stop_message;
127 IO::writeNumbers<mode_binary, IO::pad_none>(stop_message, size_t(0));
128 if (use_initial_guess == with_initial_guess) IO::writeNumbers<mode_binary, IO::pad_none>(stop_message, size_t(0)); // pass 0 for y
129 for(size_t id=0; id<max_num_ranks; id++){
130 if (thread_to_rank[id] != root){
131 MPI_Send(stop_message.str().c_str(), (int) (stop_message.str().size()), MPI_BYTE, thread_to_rank[id], tagx, comm); // send x
132 }
133 }
134 }else{
135 size_t thread_id = 0; // find the thread id that should communicate with my rank
136 while(thread_to_rank[thread_id] != me) thread_id++;
137 MPI_Barrier(comm);
138 if (thread_id < max_num_ranks){ // if such thread would exist do work, otherwise do nothing
139 std::vector<char> message_buffer(max_message);
140 std::vector<double> x, y;
141 do{
142 MPI_Status status;
143 MPI_Recv(message_buffer.data(), (int) message_buffer.size(), MPI_BYTE, root, tagx, comm, &status); // receive x
144
145 // unpack the message
146 VectorToStreamBuffer data_buffer(message_buffer); // do not modify buff after this point
147 std::istream is(&data_buffer);
148
150
151 if (use_initial_guess == with_initial_guess){ // unpack y, if necessary
153 }else{
154 y.resize(Utils::size_mult(num_outputs, x.size() / num_dimensions));
155 }
156
157 if (!x.empty()){
158 model(x, y); // call the model
159 MPI_Send(y.data(), (int) (y.size()), MPI_DOUBLE, root, tagy, comm); // send y
160 }
161 }while(!x.empty());
162 }
163 }
164}
165
335template<bool use_initial_guess = no_initial_guess>
337 int num_dimensions, int num_outputs,
338 size_t max_num_points, size_t max_samples_per_job,
339 size_t max_num_ranks, int tagx, int tagy, int root, MPI_Comm comm,
341 double tolerance, TypeRefinement criteria, int output = -1,
342 std::vector<int> const &level_limits = std::vector<int>(),
343 std::vector<double> const &scale_correction = std::vector<double>(),
344 std::string const &checkpoint_filename = std::string()){
345
346 mpiConstructCommon<use_initial_guess>(model, num_dimensions, num_outputs, max_num_points, max_samples_per_job,
347 max_num_ranks, tagx, tagy, root, comm, grid,
348 [&](TasmanianSparseGrid &g)->std::vector<double>{
349 return g.getCandidateConstructionPoints(tolerance, criteria, output, level_limits, scale_correction);
350 },
351 checkpoint_filename);
352}
353
362template<bool use_initial_guess = no_initial_guess>
364 int num_dimensions, int num_outputs,
365 size_t max_num_points, size_t max_samples_per_job,
366 size_t max_num_ranks, int tagx, int tagy, int root, MPI_Comm comm,
368 TypeDepth type, std::vector<int> const &anisotropic_weights = std::vector<int>(),
369 std::vector<int> const &level_limits = std::vector<int>(),
370 std::string const &checkpoint_filename = std::string()){
371
372 mpiConstructCommon<use_initial_guess>(model, num_dimensions, num_outputs, max_num_points, max_samples_per_job,
373 max_num_ranks, tagx, tagy, root, comm, grid,
374 [&](TasmanianSparseGrid &g)->std::vector<double>{
375 return g.getCandidateConstructionPoints(type, anisotropic_weights, level_limits);
376 },
377 checkpoint_filename);
378}
379
389template<bool use_initial_guess = no_initial_guess>
391 int num_dimensions, int num_outputs,
392 size_t max_num_points, size_t max_samples_per_job,
393 size_t max_num_ranks, int tagx, int tagy, int root, MPI_Comm comm,
395 TypeDepth type, int output, std::vector<int> const &level_limits = std::vector<int>(),
396 std::string const &checkpoint_filename = std::string()){
397
398 mpiConstructCommon<use_initial_guess>(model, num_dimensions, num_outputs, max_num_points, max_samples_per_job,
399 max_num_ranks, tagx, tagy, root, comm, grid,
400 [&](TasmanianSparseGrid &g)->std::vector<double>{
401 return g.getCandidateConstructionPoints(type, output, level_limits);
402 },
403 checkpoint_filename);
404}
405
418template<bool use_initial_guess = no_initial_guess>
420 int num_dimensions, int num_outputs, size_t max_samples_per_job,
421 size_t max_num_ranks, int tagx, int tagy, int root, MPI_Comm comm){
422
423 TasmanianSparseGrid grid; // workers use an empty grid
424 mpiConstructCommon<use_initial_guess>(model, num_dimensions, num_outputs, 0, max_samples_per_job,
425 max_num_ranks, tagx, tagy, root, comm, grid,
426 [&](TasmanianSparseGrid&)->std::vector<double>{
427 return std::vector<double>();
428 }, std::string());
429}
430
431}
432
433#endif // Tasmanian_ENABLE_MPI
434
435#endif
The master-class that represents an instance of a Tasmanian sparse grid.
Definition TasmanianSparseGrid.hpp:293
std::vector< double > getCandidateConstructionPoints(TypeDepth type, std::vector< int > const &anisotropic_weights=std::vector< int >(), std::vector< int > const &level_limits=std::vector< int >())
Generate a sorted list of points weighted by descending importance.
Coverts a vector to basic stream-buffer.
Definition tsgMPIScatterGrid.hpp:72
TypeDepth
Used by Global Sequence and Fourier grids, indicates the selection criteria.
Definition tsgEnumerates.hpp:203
TypeRefinement
Refinement strategy for local polynomial and wavelet grids.
Definition tsgEnumerates.hpp:425
constexpr bool with_initial_guess
Allows for expressive calls to TasGrid::constructSurrogate().
Definition tsgConstructSurrogate.hpp:84
void mpiConstructWorker(ModelSignatureMPI model, int num_dimensions, int num_outputs, size_t max_samples_per_job, size_t max_num_ranks, int tagx, int tagy, int root, MPI_Comm comm)
Executes the worker (i.e., non-root) portion of the MPI sampling.
Definition tsgMPIConstructGrid.hpp:419
std::function< void(std::vector< double > const &x, std::vector< double > &y)> ModelSignatureMPI
Signature of a model function to be used in the MPI construction procedures.
Definition tsgMPIConstructGrid.hpp:69
void mpiConstructSurrogate(ModelSignatureMPI model, int num_dimensions, int num_outputs, size_t max_num_points, size_t max_samples_per_job, size_t max_num_ranks, int tagx, int tagy, int root, MPI_Comm comm, TasmanianSparseGrid &grid, double tolerance, TypeRefinement criteria, int output=-1, std::vector< int > const &level_limits=std::vector< int >(), std::vector< double > const &scale_correction=std::vector< double >(), std::string const &checkpoint_filename=std::string())
Construct a sparse grid surrogate to the model defined by the lambda, MPI version.
Definition tsgMPIConstructGrid.hpp:336
void mpiConstructCommon(ModelSignatureMPI model, int num_dimensions, int num_outputs, size_t max_num_points, size_t max_samples_per_job, size_t max_num_ranks, int tagx, int tagy, int root, MPI_Comm comm, TasmanianSparseGrid &grid, std::function< std::vector< double >(TasmanianSparseGrid &)> candidates, std::string const &checkpoint_filename)
MPI construction algorithm using generic candidates procedure.
Definition tsgMPIConstructGrid.hpp:80
int getMPIRank(MPI_Comm comm)
Utility to return the rank within the given comm.
Definition tsgMPIScatterGrid.hpp:87
void writeNumbers(std::ostream &os, Vals... vals)
Write a bunch of numbers with the same type.
Definition tsgIOHelpers.hpp:383
size_t size_mult(IntA a, IntB b)
Converts two integer-like variables to size_t and returns the product..
Definition tsgUtils.hpp:82
Encapsulates the Tasmanian Sparse Grid module.
Definition TasmanianSparseGrid.hpp:68
Automated parallel construction procedure.
DREAM objects send/receive through MPI.