Extension: Json and NetCDF utilities
#include "dg/file/file.h"
Loading...
Searching...
No Matches
easy_output.h
Go to the documentation of this file.
1#pragma once
2
3#include <exception>
4#include <netcdf.h>
5#include "dg/topology/grid.h"
6#ifdef MPI_VERSION
7#include "dg/backend/mpi_vector.h"
8#include "dg/topology/mpi_grid.h"
9#include "dg/backend/mpi_init.h"
10#endif //MPI_VERSION
11#include "nc_error.h"
12#include "nc_hyperslab.h"
13
19namespace dg
20{
21namespace file
22{
102namespace detail
103{
104
105template<class T>
106inline int put_var_T( int ncid, int varid, const T* data)
107{
108 assert( false && "Type not supported!\n" );
109 return NC_EBADTYPE;
110}
111template<>
112inline int put_var_T<float>( int ncid, int varid, const float* data){
113 return nc_put_var_float( ncid, varid, data);
114}
115template<>
116inline int put_var_T<double>( int ncid, int varid, const double* data){
117 return nc_put_var_double( ncid, varid, data);
118}
119template<>
120inline int put_var_T<int>( int ncid, int varid, const int* data){
121 return nc_put_var_int( ncid, varid, data);
122}
123template<>
124inline int put_var_T<unsigned>( int ncid, int varid, const unsigned* data){
125 return nc_put_var_uint( ncid, varid, data);
126}
127template<class T>
128inline int put_vara_T( int ncid, int varid, const size_t* startp, const size_t* countp, const T* data)
129{
130 assert( false && "Type not supported!\n" );
131 return NC_EBADTYPE;
132}
133template<>
134inline int put_vara_T<float>( int ncid, int varid, const size_t* startp, const size_t* countp, const float* data){
135 return nc_put_vara_float( ncid, varid, startp, countp, data);
136}
137template<>
138inline int put_vara_T<double>( int ncid, int varid, const size_t* startp, const size_t* countp, const double* data){
139 return nc_put_vara_double( ncid, varid, startp, countp, data);
140}
141template<>
142inline int put_vara_T<int>( int ncid, int varid, const size_t* startp, const size_t* countp, const int* data){
143 return nc_put_vara_int( ncid, varid, startp, countp, data);
144}
145template<>
146inline int put_vara_T<unsigned>( int ncid, int varid, const size_t* startp, const size_t* countp, const unsigned* data){
147 return nc_put_vara_uint( ncid, varid, startp, countp, data);
148}
149#ifdef MPI_VERSION
150template<class host_vector>
151void put_vara_detail(int ncid, int varid,
152 const MPINcHyperslab& slab,
153 const host_vector& data,
154 thrust::host_vector<dg::get_value_type<host_vector>>& receive,
155 MPI_Comm global_comm = MPI_COMM_WORLD
156 )
157{
158 MPI_Comm comm = slab.communicator();
159 // we need to identify the global root rank within the groups and mark the
160 // entire group
161 int local_root_rank = dg::file::detail::mpi_comm_global2local_rank(comm, 0,
162 global_comm);
163 if (local_root_rank == MPI_UNDEFINED)
164 return;
165 unsigned ndim = slab.ndim(); // same on all processes
166 int rank, size;
167 MPI_Comm_rank( comm, &rank);
168 MPI_Comm_size( comm, &size);
169
170 // Send start and count vectors to root
171 std::vector<size_t> r_start( rank == local_root_rank ? size * ndim : 0);
172 std::vector<size_t> r_count( rank == local_root_rank ? size * ndim : 0);
173 MPI_Gather( slab.startp(), ndim, dg::getMPIDataType<size_t>(),
174 &r_start[0], ndim, dg::getMPIDataType<size_t>(),
175 local_root_rank, comm);
176 MPI_Gather( slab.countp(), ndim, dg::getMPIDataType<size_t>(),
177 &r_count[0], ndim, dg::getMPIDataType<size_t>(),
178 local_root_rank, comm);
179
180 MPI_Datatype mpitype = dg::getMPIDataType<get_value_type<host_vector>>();
181 int err = NC_NOERR; // store error flag
182 if( rank == local_root_rank )
183 {
184 // Sanity check
185 int ndims;
186 int e = nc_inq_varndims( ncid, varid, &ndims);
187 if( not e)
188 err = e;
189 if( (unsigned)ndims != slab.ndim())
190 err = 1001; // Our own error code
191
192 std::vector<size_t> sizes( size, 1);
193 for( int r = 0 ; r < size; r++)
194 for( unsigned u=0; u<ndim; u++)
195 sizes[r]*= r_count[r*ndim + u];
196
197 // host_vector could be a View
198 unsigned max_size = *std::max_element( sizes.begin(), sizes.end());
199 receive.resize( max_size);
200 for( int r=0; r<size; r++)
201 {
202 if(r!=rank)
203 {
204 MPI_Status status;
205 MPI_Recv( receive.data(), (int)sizes[r], mpitype,
206 r, r, comm, &status);
207 int e = detail::put_vara_T( ncid, varid, &r_start[r*ndim],
208 &r_count[r*ndim], receive.data()); // write received
209 if( not e) err = e;
210
211 }
212 else // write own data
213 {
214 int e = detail::put_vara_T( ncid, varid, slab.startp(),
215 slab.countp(), thrust::raw_pointer_cast(data.data()));
216 if( not e) err = e;
217 }
218 }
219 }
220 else
221 {
222 size_t num = 1;
223 for( unsigned u=0; u<ndim; u++)
224 num*= slab.count()[u];
225 MPI_Send( thrust::raw_pointer_cast(data.data()), num, mpitype,
226 local_root_rank, rank, comm);
227 }
228 MPI_Bcast( &err, 1, dg::getMPIDataType<int>(), local_root_rank, comm);
229 if( err)
230 throw NC_Error( err);
231 return;
232}
233
234// all comms must be same size
235template<class host_vector, class MPITopology>
236void put_vara_detail(int ncid, int varid, unsigned slice,
237 const MPITopology& grid, const MPI_Vector<host_vector>& data,
238 bool vara, bool parallel = false)
239{
240 MPINcHyperslab slab( grid);
241 if( vara)
242 slab = MPINcHyperslab( slice, grid);
243 if( parallel)
244 {
245 file::NC_Error_Handle err;
246 err = detail::put_vara_T( ncid, varid,
247 slab.startp(), slab.countp(), data.data().data());
248 }
249 else
250 {
251 thrust::host_vector<dg::get_value_type<host_vector>> receive;
252 put_vara_detail( ncid, varid, slab, data.data(), receive);
253 }
254}
255#endif // MPI_VERSION
256} // namespace detail
258//
259
283template<class host_vector, class Topology>
284void put_var( int ncid, int varid, const Topology& grid,
285 const host_vector& data, bool parallel = false)
286{
288 err = detail::put_var_T( ncid, varid, data.data());
289}
290
315template<class host_vector, class Topology>
316void put_vara( int ncid, int varid, unsigned slice, const Topology& grid,
317 const host_vector& data, bool parallel = false)
318{
320 NcHyperslab slab( slice, grid);
321 err = detail::put_vara_T( ncid, varid, slab.startp(), slab.countp(),
322 data.data());
323}
324// scalar data
325
340template<class T, class real_type>
341void put_var( int ncid, int varid, const RealGrid0d<real_type>& grid,
342 T data, bool parallel = false)
343{
345 err = detail::put_var_T( ncid, varid, &data);
346}
363template<class T, class real_type>
364void put_vara( int ncid, int varid, unsigned slice, const RealGrid0d<real_type>& grid,
365 T data, bool parallel = false)
366{
368 size_t count = 1;
369 size_t start = slice; // convert to size_t
370 err = detail::put_vara_T( ncid, varid, &start, &count, &data);
371}
372
374
376#ifdef MPI_VERSION
377
378// array data
379template<class host_vector, class MPITopology>
380void put_var(int ncid, int varid, const MPITopology& grid,
381 const dg::MPI_Vector<host_vector>& data, bool parallel = false)
382{
383 detail::put_vara_detail( ncid, varid, 0, grid, data, false, parallel);
384}
385
386template<class host_vector, class MPITopology>
387void put_vara(int ncid, int varid, unsigned slice,
388 const MPITopology& grid, const dg::MPI_Vector<host_vector>& data,
389 bool parallel = false)
390{
391 detail::put_vara_detail( ncid, varid, slice, grid, data, true, parallel);
392}
393// scalar data
394
395template<class T, class real_type>
396void put_var( int ncid, int varid, const RealMPIGrid0d<real_type>& grid,
397 T data, bool parallel = false)
398{
399 int rank;
400 MPI_Comm_rank( MPI_COMM_WORLD, &rank);
401 if( rank == 0)
402 put_var( ncid, varid, dg::RealGrid0d<real_type>(), data, parallel);
403}
404template<class T, class real_type>
405void put_vara( int ncid, int varid, unsigned slice, const RealMPIGrid0d<real_type>& grid,
406 T data, bool parallel = false)
407{
408 int rank;
409 MPI_Comm_rank( MPI_COMM_WORLD, &rank);
410 if( rank == 0)
411 put_vara( ncid, varid, slice, dg::RealGrid0d<real_type>(), data, parallel);
412}
413
414#endif //MPI_VERSION
415
418template<class host_vector, class Topology>
419void put_var_double(int ncid, int varid, const Topology& grid,
420 const host_vector& data, bool parallel = false)
421{
422 put_var( ncid, varid, grid, data, parallel);
423}
424
427template<class host_vector, class Topology>
428void put_vara_double(int ncid, int varid, unsigned slice, const Topology& grid,
429 const host_vector& data, bool parallel = false)
430{
431 put_vara( ncid, varid, slice, grid, data, parallel);
432}
434
435}//namespace file
436}//namespace dg
void put_vara(int ncid, int varid, unsigned slice, const Topology &grid, const host_vector &data, bool parallel=false)
DEPRECATED Write an array to NetCDF file.
Definition easy_output.h:316
void put_var(int ncid, int varid, const Topology &grid, const host_vector &data, bool parallel=false)
DEPRECATED Write an array to NetCDF file.
Definition easy_output.h:284
Definition easy_atts.h:15
DEPRECATED Empty utitlity class that handles return values of netcdf functions and throws NC_Error(st...
Definition nc_error.h:70
A NetCDF Hyperslab for SerialNcFile.
Definition nc_hyperslab.h:27
const size_t * countp() const
Definition nc_hyperslab.h:134
const size_t * startp() const
Definition nc_hyperslab.h:132