5#include "dg/topology/grid.h"
7#include "dg/backend/mpi_vector.h"
8#include "dg/topology/mpi_grid.h"
45 char const*
what()
const throw(){
46 return nc_strerror(error_);}
126template<
class host_vector>
128 const host_vector& data,
bool parallel =
false)
131 size_t start[2] = {0,0}, count[2];
132 count[0] = grid.ny()*grid.Ny();
133 count[1] = grid.nx()*grid.Nx();
134 err = nc_put_vara_double( ncid, varid, start, count, data.data());
164template<
class host_vector>
166 const dg::aTopology2d& grid,
const host_vector& data,
bool parallel =
false)
169 size_t start[3] = {slice,0,0}, count[3];
171 count[1] = grid.ny()*grid.Ny();
172 count[2] = grid.nx()*grid.Nx();
173 err = nc_put_vara_double( ncid, varid, start, count, data.data());
177template<
class host_vector>
179 const host_vector& data,
bool parallel =
false)
182 size_t start[3] = {0,0,0}, count[3];
183 count[0] = grid.nz()*grid.Nz();
184 count[1] = grid.ny()*grid.Ny();
185 count[2] = grid.nx()*grid.Nx();
186 err = nc_put_vara_double( ncid, varid, start, count, data.data());
190template<
class host_vector>
192 const dg::aTopology3d& grid,
const host_vector& data,
bool parallel =
false)
195 size_t start[4] = {slice, 0,0,0}, count[4];
197 count[1] = grid.nz()*grid.Nz();
198 count[2] = grid.ny()*grid.Ny();
199 count[3] = grid.nx()*grid.Nx();
200 err = nc_put_vara_double( ncid, varid, start, count, data.data());
205template<
class host_vector>
207 const dg::MPI_Vector<host_vector>& data,
bool parallel =
false)
210 size_t start[3] = {0,0}, count[2];
211 count[0] = grid.ny()*grid.local().Ny();
212 count[1] = grid.nx()*grid.local().Nx();
214 MPI_Comm comm = grid.communicator();
215 MPI_Comm_rank( comm, &rank);
216 MPI_Comm_size( comm, &size);
220 size_t local_size = grid.local().size();
221 std::vector<int> coords( size*2);
222 for(
int rrank=0; rrank<size; rrank++)
223 MPI_Cart_coords( comm, rrank, 2, &coords[2*rrank]);
226 host_vector receive( data.data());
227 for(
int rrank=0; rrank<size; rrank++)
230 MPI_Recv( receive.data(), local_size, MPI_DOUBLE,
231 rrank, rrank, comm, &status);
232 start[0] = coords[2*rrank+1]*count[0],
233 start[1] = coords[2*rrank+0]*count[1],
234 err = nc_put_vara_double( ncid, varid, start, count,
239 MPI_Send( data.data().data(), local_size, MPI_DOUBLE,
246 MPI_Cart_coords( comm, rank, 2, coords);
247 start[0] = coords[1]*count[0],
248 start[1] = coords[0]*count[1],
249 err = nc_put_vara_double( ncid, varid, start, count,
255template<
class host_vector>
257 const dg::aMPITopology2d& grid,
const dg::MPI_Vector<host_vector>& data,
258 bool parallel =
false)
261 size_t start[3] = {slice, 0,0}, count[3];
263 count[1] = grid.ny()*grid.local().Ny();
264 count[2] = grid.nx()*grid.local().Nx();
266 MPI_Comm comm = grid.communicator();
267 MPI_Comm_rank( comm, &rank);
268 MPI_Comm_size( comm, &size);
272 MPI_Cart_coords( comm, rank, 2, coords);
273 start[1] = coords[1]*count[1],
274 start[2] = coords[0]*count[2],
275 err = nc_put_vara_double( ncid, varid, start, count,
281 size_t local_size = grid.local().size();
282 std::vector<int> coords( size*2);
283 for(
int rrank=0; rrank<size; rrank++)
284 MPI_Cart_coords( comm, rrank, 2, &coords[2*rrank]);
287 host_vector receive( data.data());
288 for(
int rrank=0; rrank<size; rrank++)
291 MPI_Recv( receive.data(), local_size, MPI_DOUBLE,
292 rrank, rrank, comm, &status);
293 start[1] = coords[2*rrank+1]*count[1],
294 start[2] = coords[2*rrank+0]*count[2],
295 err = nc_put_vara_double( ncid, varid, start, count,
300 MPI_Send( data.data().data(), local_size, MPI_DOUBLE,
307template<
class host_vector>
309 const dg::aMPITopology3d& grid,
const dg::MPI_Vector<host_vector>& data,
310 bool parallel =
false)
313 size_t start[3] = {0,0,0}, count[3];
314 count[0] = grid.nz()*grid.local().Nz();
315 count[1] = grid.ny()*grid.local().Ny();
316 count[2] = grid.nx()*grid.local().Nx();
318 MPI_Comm comm = grid.communicator();
319 MPI_Comm_rank( comm, &rank);
320 MPI_Comm_size( comm, &size);
324 size_t local_size = grid.local().size();
325 std::vector<int> coords( size*3);
326 for(
int rrank=0; rrank<size; rrank++)
327 MPI_Cart_coords( comm, rrank, 3, &coords[3*rrank]);
330 host_vector receive( data.data());
331 for(
int rrank=0; rrank<size; rrank++)
334 MPI_Recv( receive.data(), local_size, MPI_DOUBLE,
335 rrank, rrank, comm, &status);
336 start[0] = coords[3*rrank+2]*count[0],
337 start[1] = coords[3*rrank+1]*count[1],
338 start[2] = coords[3*rrank+0]*count[2];
339 err = nc_put_vara_double( ncid, varid, start, count,
344 MPI_Send( data.data().data(), local_size, MPI_DOUBLE,
351 MPI_Cart_coords( comm, rank, 3, coords);
352 start[0] = coords[2]*count[0],
353 start[1] = coords[1]*count[1],
354 start[2] = coords[0]*count[2];
355 err = nc_put_vara_double( ncid, varid, start, count,
361template<
class host_vector>
363 const dg::aMPITopology3d& grid,
const dg::MPI_Vector<host_vector>& data,
364 bool parallel =
false)
367 size_t start[4] = {slice, 0,0,0}, count[4];
369 count[1] = grid.nz()*grid.local().Nz();
370 count[2] = grid.ny()*grid.local().Ny();
371 count[3] = grid.nx()*grid.local().Nx();
373 MPI_Comm comm = grid.communicator();
374 MPI_Comm_rank( comm, &rank);
375 MPI_Comm_size( comm, &size);
379 MPI_Cart_coords( comm, rank, 3, coords);
380 start[1] = coords[2]*count[1],
381 start[2] = coords[1]*count[2],
382 start[3] = coords[0]*count[3];
383 err = nc_put_vara_double( ncid, varid, start, count,
389 size_t local_size = grid.local().size();
390 std::vector<int> coords( size*3);
391 for(
int rrank=0; rrank<size; rrank++)
392 MPI_Cart_coords( comm, rrank, 3, &coords[3*rrank]);
395 host_vector receive( data.data());
396 for(
int rrank=0; rrank<size; rrank++)
399 MPI_Recv( receive.data(), local_size, MPI_DOUBLE,
400 rrank, rrank, comm, &status);
401 start[1] = coords[3*rrank+2]*count[1],
402 start[2] = coords[3*rrank+1]*count[2],
403 start[3] = coords[3*rrank+0]*count[3];
404 err = nc_put_vara_double( ncid, varid, start, count,
409 MPI_Send( data.data().data(), local_size, MPI_DOUBLE,
error
Switch between how to handle errors in a Json utitlity functions.
Definition: json_utilities.h:30
void put_vara_double(int ncid, int varid, unsigned slice, const dg::aTopology2d &grid, const host_vector &data, bool parallel=false)
Convenience wrapper around nc_put_vara_double()
Definition: easy_output.h:165
void put_var_double(int ncid, int varid, const dg::aTopology2d &grid, const host_vector &data, bool parallel=false)
Convenience wrapper around nc_put_vara_double()
Definition: easy_output.h:127
Empty utitlity class that handles return values of netcdf functions and throws NC_Error(status) if( s...
Definition: easy_output.h:73
NC_Error_Handle operator=(int err)
Construct from error code.
Definition: easy_output.h:82
NC_Error_Handle operator()(int err)
Construct from error code.
Definition: easy_output.h:95
Class thrown by the NC_Error_Handle.
Definition: easy_output.h:32
char const * what() const
What string.
Definition: easy_output.h:45
NC_Error(int error)
Construct from error code.
Definition: easy_output.h:39