5#include "dg/topology/grid.h"
7#include "dg/backend/mpi_vector.h"
8#include "dg/topology/mpi_grid.h"
9#include "dg/backend/mpi_init.h"
106inline int put_var_T(
int ncid,
int varid,
const T* data)
108 assert(
false &&
"Type not supported!\n" );
112inline int put_var_T<float>(
int ncid,
int varid,
const float* data){
113 return nc_put_var_float( ncid, varid, data);
116inline int put_var_T<double>(
int ncid,
int varid,
const double* data){
117 return nc_put_var_double( ncid, varid, data);
120inline int put_var_T<int>(
int ncid,
int varid,
const int* data){
121 return nc_put_var_int( ncid, varid, data);
124inline int put_var_T<unsigned>(
int ncid,
int varid,
const unsigned* data){
125 return nc_put_var_uint( ncid, varid, data);
128inline int put_vara_T(
int ncid,
int varid,
const size_t* startp,
const size_t* countp,
const T* data)
130 assert(
false &&
"Type not supported!\n" );
134inline int put_vara_T<float>(
int ncid,
int varid,
const size_t* startp,
const size_t* countp,
const float* data){
135 return nc_put_vara_float( ncid, varid, startp, countp, data);
138inline int put_vara_T<double>(
int ncid,
int varid,
const size_t* startp,
const size_t* countp,
const double* data){
139 return nc_put_vara_double( ncid, varid, startp, countp, data);
142inline int put_vara_T<int>(
int ncid,
int varid,
const size_t* startp,
const size_t* countp,
const int* data){
143 return nc_put_vara_int( ncid, varid, startp, countp, data);
146inline int put_vara_T<unsigned>(
int ncid,
int varid,
const size_t* startp,
const size_t* countp,
const unsigned* data){
147 return nc_put_vara_uint( ncid, varid, startp, countp, data);
150template<
class host_vector>
151void put_vara_detail(
int ncid,
int varid,
152 const MPINcHyperslab& slab,
153 const host_vector& data,
154 thrust::host_vector<dg::get_value_type<host_vector>>& receive,
155 MPI_Comm global_comm = MPI_COMM_WORLD
158 MPI_Comm comm = slab.communicator();
161 int local_root_rank = dg::file::detail::mpi_comm_global2local_rank(comm, 0,
163 if (local_root_rank == MPI_UNDEFINED)
165 unsigned ndim = slab.ndim();
167 MPI_Comm_rank( comm, &rank);
168 MPI_Comm_size( comm, &size);
171 std::vector<size_t> r_start( rank == local_root_rank ? size * ndim : 0);
172 std::vector<size_t> r_count( rank == local_root_rank ? size * ndim : 0);
173 MPI_Gather( slab.startp(), ndim, dg::getMPIDataType<size_t>(),
174 &r_start[0], ndim, dg::getMPIDataType<size_t>(),
175 local_root_rank, comm);
176 MPI_Gather( slab.countp(), ndim, dg::getMPIDataType<size_t>(),
177 &r_count[0], ndim, dg::getMPIDataType<size_t>(),
178 local_root_rank, comm);
180 MPI_Datatype mpitype = dg::getMPIDataType<get_value_type<host_vector>>();
182 if( rank == local_root_rank )
186 int e = nc_inq_varndims( ncid, varid, &ndims);
189 if( (
unsigned)ndims != slab.ndim())
192 std::vector<size_t> sizes( size, 1);
193 for(
int r = 0 ; r < size; r++)
194 for(
unsigned u=0; u<ndim; u++)
195 sizes[r]*= r_count[r*ndim + u];
198 unsigned max_size = *std::max_element( sizes.begin(), sizes.end());
199 receive.resize( max_size);
200 for(
int r=0; r<size; r++)
205 MPI_Recv( receive.data(), (
int)sizes[r], mpitype,
206 r, r, comm, &status);
207 int e = detail::put_vara_T( ncid, varid, &r_start[r*ndim],
208 &r_count[r*ndim], receive.data());
214 int e = detail::put_vara_T( ncid, varid, slab.startp(),
215 slab.countp(), thrust::raw_pointer_cast(data.data()));
223 for(
unsigned u=0; u<ndim; u++)
224 num*= slab.count()[u];
225 MPI_Send( thrust::raw_pointer_cast(data.data()), num, mpitype,
226 local_root_rank, rank, comm);
228 MPI_Bcast( &err, 1, dg::getMPIDataType<int>(), local_root_rank, comm);
230 throw NC_Error( err);
235template<
class host_vector,
class MPITopology>
236void put_vara_detail(
int ncid,
int varid,
unsigned slice,
237 const MPITopology& grid,
const MPI_Vector<host_vector>& data,
238 bool vara,
bool parallel =
false)
240 MPINcHyperslab slab( grid);
242 slab = MPINcHyperslab( slice, grid);
245 file::NC_Error_Handle err;
246 err = detail::put_vara_T( ncid, varid,
247 slab.startp(), slab.countp(), data.data().data());
251 thrust::host_vector<dg::get_value_type<host_vector>> receive;
252 put_vara_detail( ncid, varid, slab, data.data(), receive);
283template<
class host_vector,
class Topology>
284void put_var(
int ncid,
int varid,
const Topology& grid,
285 const host_vector& data,
bool parallel =
false)
288 err = detail::put_var_T( ncid, varid, data.data());
315template<
class host_vector,
class Topology>
316void put_vara(
int ncid,
int varid,
unsigned slice,
const Topology& grid,
317 const host_vector& data,
bool parallel =
false)
321 err = detail::put_vara_T( ncid, varid, slab.
startp(), slab.
countp(),
340template<
class T,
class real_type>
341void put_var(
int ncid,
int varid,
const RealGrid0d<real_type>& grid,
342 T data,
bool parallel =
false)
345 err = detail::put_var_T( ncid, varid, &data);
363template<
class T,
class real_type>
364void put_vara(
int ncid,
int varid,
unsigned slice,
const RealGrid0d<real_type>& grid,
365 T data,
bool parallel =
false)
369 size_t start = slice;
370 err = detail::put_vara_T( ncid, varid, &start, &count, &data);
379template<
class host_vector,
class MPITopology>
380void put_var(
int ncid,
int varid,
const MPITopology& grid,
381 const dg::MPI_Vector<host_vector>& data,
bool parallel =
false)
383 detail::put_vara_detail( ncid, varid, 0, grid, data,
false, parallel);
386template<
class host_vector,
class MPITopology>
387void put_vara(
int ncid,
int varid,
unsigned slice,
388 const MPITopology& grid,
const dg::MPI_Vector<host_vector>& data,
389 bool parallel =
false)
391 detail::put_vara_detail( ncid, varid, slice, grid, data,
true, parallel);
395template<
class T,
class real_type>
396void put_var(
int ncid,
int varid,
const RealMPIGrid0d<real_type>& grid,
397 T data,
bool parallel =
false)
400 MPI_Comm_rank( MPI_COMM_WORLD, &rank);
402 put_var( ncid, varid, dg::RealGrid0d<real_type>(), data, parallel);
404template<
class T,
class real_type>
405void put_vara(
int ncid,
int varid,
unsigned slice,
const RealMPIGrid0d<real_type>& grid,
406 T data,
bool parallel =
false)
409 MPI_Comm_rank( MPI_COMM_WORLD, &rank);
411 put_vara( ncid, varid, slice, dg::RealGrid0d<real_type>(), data, parallel);
418template<
class host_vector,
class Topology>
419void put_var_double(
int ncid,
int varid,
const Topology& grid,
420 const host_vector& data,
bool parallel =
false)
422 put_var( ncid, varid, grid, data, parallel);
427template<
class host_vector,
class Topology>
428void put_vara_double(
int ncid,
int varid,
unsigned slice,
const Topology& grid,
429 const host_vector& data,
bool parallel =
false)
431 put_vara( ncid, varid, slice, grid, data, parallel);
void put_vara(int ncid, int varid, unsigned slice, const Topology &grid, const host_vector &data, bool parallel=false)
DEPRECATED Write an array to NetCDF file.
Definition easy_output.h:316
void put_var(int ncid, int varid, const Topology &grid, const host_vector &data, bool parallel=false)
DEPRECATED Write an array to NetCDF file.
Definition easy_output.h:284
Definition easy_atts.h:15
DEPRECATED Empty utitlity class that handles return values of netcdf functions and throws NC_Error(st...
Definition nc_error.h:70
A NetCDF Hyperslab for SerialNcFile.
Definition nc_hyperslab.h:27
const size_t * countp() const
Definition nc_hyperslab.h:134
const size_t * startp() const
Definition nc_hyperslab.h:132