Extension: Json and NetCDF utilities
#include "dg/file/file.h"
Loading...
Searching...
No Matches
easy_output.h
Go to the documentation of this file.
1#pragma once
2
3#include <exception>
4#include <netcdf.h>
5#include "dg/topology/grid.h"
6#ifdef MPI_VERSION
7#include "dg/backend/mpi_vector.h"
8#include "dg/topology/mpi_grid.h"
9#include "dg/backend/mpi_init.h"
10#endif //MPI_VERSION
11#include "nc_error.h"
12#include "nc_hyperslab.h"
13
19namespace dg
20{
21namespace file
22{
102namespace detail
103{
104
105template<class T>
106inline int put_var_T( int /*ncid*/, int /*varid*/, const T* /*data*/)
107{
108 assert( false && "Type not supported!\n" );
109 return NC_EBADTYPE;
110}
111template<>
112inline int put_var_T<float>( int ncid, int varid, const float* data){
113 return nc_put_var_float( ncid, varid, data);
114}
115template<>
116inline int put_var_T<double>( int ncid, int varid, const double* data){
117 return nc_put_var_double( ncid, varid, data);
118}
119template<>
120inline int put_var_T<int>( int ncid, int varid, const int* data){
121 return nc_put_var_int( ncid, varid, data);
122}
123template<>
124inline int put_var_T<unsigned>( int ncid, int varid, const unsigned* data){
125 return nc_put_var_uint( ncid, varid, data);
126}
127template<class T>
128inline int put_vara_T( int /*ncid*/, int /*varid*/, const size_t* /*startp*/, const size_t* /*countp*/, const T* /*data*/)
129{
130 assert( false && "Type not supported!\n" );
131 return NC_EBADTYPE;
132}
133template<>
134inline int put_vara_T<float>( int ncid, int varid, const size_t* startp, const size_t* countp, const float* data){
135 return nc_put_vara_float( ncid, varid, startp, countp, data);
136}
137template<>
138inline int put_vara_T<double>( int ncid, int varid, const size_t* startp, const size_t* countp, const double* data){
139 return nc_put_vara_double( ncid, varid, startp, countp, data);
140}
141template<>
142inline int put_vara_T<int>( int ncid, int varid, const size_t* startp, const size_t* countp, const int* data){
143 return nc_put_vara_int( ncid, varid, startp, countp, data);
144}
145template<>
146inline int put_vara_T<unsigned>( int ncid, int varid, const size_t* startp, const size_t* countp, const unsigned* data){
147 return nc_put_vara_uint( ncid, varid, startp, countp, data);
148}
149#ifdef MPI_VERSION
150template<class host_vector>
151void put_vara_detail(int ncid, int varid,
152 const MPINcHyperslab& slab,
153 const host_vector& data,
154 thrust::host_vector<dg::get_value_type<host_vector>>& receive,
155 MPI_Comm global_comm = MPI_COMM_WORLD
156 )
157{
158 MPI_Comm comm = slab.communicator();
159 // we need to identify the global root rank within the groups and mark the
160 // entire group
161 int local_root_rank = dg::file::detail::mpi_comm_global2local_rank(comm, 0,
162 global_comm);
163 if (local_root_rank == MPI_UNDEFINED)
164 return;
165 unsigned ndim = slab.ndim(); // same on all processes
166 int rank, size;
167 MPI_Comm_rank( comm, &rank);
168 MPI_Comm_size( comm, &size);
169
170 // Send start and count vectors to root
171 std::vector<size_t> r_start( rank == local_root_rank ? size * ndim : 0);
172 std::vector<size_t> r_count( rank == local_root_rank ? size * ndim : 0);
173 MPI_Gather( slab.startp(), ndim, dg::getMPIDataType<size_t>(),
174 &r_start[0], ndim, dg::getMPIDataType<size_t>(),
175 local_root_rank, comm);
176 MPI_Gather( slab.countp(), ndim, dg::getMPIDataType<size_t>(),
177 &r_count[0], ndim, dg::getMPIDataType<size_t>(),
178 local_root_rank, comm);
179
180 MPI_Datatype mpitype = dg::getMPIDataType<get_value_type<host_vector>>();
181 int err = NC_NOERR; // store error flag
182 if( rank == local_root_rank )
183 {
184 // Sanity check
185 int ndims;
186 int e = nc_inq_varndims( ncid, varid, &ndims);
187 if( not e)
188 err = e;
189 if( (unsigned)ndims != slab.ndim())
190 err = 1001; // Our own error code
191
192 std::vector<size_t> sizes( size, 1);
193 for( int r = 0 ; r < size; r++)
194 for( unsigned u=0; u<ndim; u++)
195 sizes[r]*= r_count[r*ndim + u];
196
197 // host_vector could be a View
198 unsigned max_size = *std::max_element( sizes.begin(), sizes.end());
199 receive.resize( max_size);
200 for( int r=0; r<size; r++)
201 {
202 if(r!=rank)
203 {
204 MPI_Status status;
205 MPI_Recv( receive.data(), (int)sizes[r], mpitype,
206 r, r, comm, &status);
207 int e = detail::put_vara_T( ncid, varid, &r_start[r*ndim],
208 &r_count[r*ndim], receive.data()); // write received
209 if( not e) err = e;
210
211 }
212 else // write own data
213 {
214 int e = detail::put_vara_T( ncid, varid, slab.startp(),
215 slab.countp(), thrust::raw_pointer_cast(data.data()));
216 if( not e) err = e;
217 }
218 }
219 }
220 else
221 {
222 size_t num = 1;
223 for( unsigned u=0; u<ndim; u++)
224 num*= slab.count()[u];
225 MPI_Send( thrust::raw_pointer_cast(data.data()), num, mpitype,
226 local_root_rank, rank, comm);
227 }
228 MPI_Bcast( &err, 1, dg::getMPIDataType<int>(), local_root_rank, comm);
229 if( err)
230 throw NC_Error( err);
231 return;
232}
233
234// all comms must be same size
235template<class host_vector, class MPITopology>
236void put_vara_detail(int ncid, int varid, unsigned slice,
237 const MPITopology& grid, const MPI_Vector<host_vector>& data,
238 bool vara, bool parallel = false)
239{
240 MPINcHyperslab slab( grid);
241 if( vara)
242 slab = MPINcHyperslab( slice, grid);
243 if( parallel)
244 {
245 file::NC_Error_Handle err;
246 err = detail::put_vara_T( ncid, varid,
247 slab.startp(), slab.countp(), data.data().data());
248 }
249 else
250 {
251 thrust::host_vector<dg::get_value_type<host_vector>> receive;
252 put_vara_detail( ncid, varid, slab, data.data(), receive);
253 }
254}
255#endif // MPI_VERSION
256} // namespace detail
258//
259
285template<class host_vector, class Topology>
286void put_var( int ncid, int varid, const Topology& /*grid*/,
287 const host_vector& data, bool /*parallel*/ = false)
288{
290 err = detail::put_var_T( ncid, varid, data.data());
291}
292
318template<class host_vector, class Topology>
319void put_vara( int ncid, int varid, unsigned slice, const Topology& grid,
320 const host_vector& data, bool /*parallel*/ = false)
321{
323 NcHyperslab slab( slice, grid);
324 err = detail::put_vara_T( ncid, varid, slab.startp(), slab.countp(),
325 data.data());
326}
327// scalar data
328
345template<class T, class real_type>
346void put_var( int ncid, int varid, const RealGrid0d<real_type>& /*grid*/,
347 T data, bool /*parallel*/ = false)
348{
350 err = detail::put_var_T( ncid, varid, &data);
351}
370template<class T, class real_type>
371void put_vara( int ncid, int varid, unsigned slice, const RealGrid0d<real_type>& /*grid*/,
372 T data, bool /*parallel*/ = false)
373{
375 size_t count = 1;
376 size_t start = slice; // convert to size_t
377 err = detail::put_vara_T( ncid, varid, &start, &count, &data);
378}
379
381
383#ifdef MPI_VERSION
384
385// array data
386template<class host_vector, class MPITopology>
387void put_var(int ncid, int varid, const MPITopology& grid,
388 const dg::MPI_Vector<host_vector>& data, bool parallel = false)
389{
390 detail::put_vara_detail( ncid, varid, 0, grid, data, false, parallel);
391}
392
393template<class host_vector, class MPITopology>
394void put_vara(int ncid, int varid, unsigned slice,
395 const MPITopology& grid, const dg::MPI_Vector<host_vector>& data,
396 bool parallel = false)
397{
398 detail::put_vara_detail( ncid, varid, slice, grid, data, true, parallel);
399}
400// scalar data
401
402template<class T, class real_type>
403void put_var( int ncid, int varid, const RealMPIGrid0d<real_type>& grid,
404 T data, bool parallel = false)
405{
406 int rank;
407 MPI_Comm_rank( MPI_COMM_WORLD, &rank);
408 if( rank == 0)
409 put_var( ncid, varid, dg::RealGrid0d<real_type>(), data, parallel);
410}
411template<class T, class real_type>
412void put_vara( int ncid, int varid, unsigned slice, const RealMPIGrid0d<real_type>& /*grid*/,
413 T data, bool parallel = false)
414{
415 int rank;
416 MPI_Comm_rank( MPI_COMM_WORLD, &rank);
417 if( rank == 0)
418 put_vara( ncid, varid, slice, dg::RealGrid0d<real_type>(), data, parallel);
419}
420
421#endif //MPI_VERSION
422
425template<class host_vector, class Topology>
426void put_var_double(int ncid, int varid, const Topology& grid,
427 const host_vector& data, bool parallel = false)
428{
429 put_var( ncid, varid, grid, data, parallel);
430}
431
434template<class host_vector, class Topology>
435void put_vara_double(int ncid, int varid, unsigned slice, const Topology& grid,
436 const host_vector& data, bool parallel = false)
437{
438 put_vara( ncid, varid, slice, grid, data, parallel);
439}
441
442}//namespace file
443}//namespace dg
void put_var(int ncid, int varid, const Topology &, const host_vector &data, bool=false)
DEPRECATED Write an array to NetCDF file.
Definition easy_output.h:286
void put_vara(int ncid, int varid, unsigned slice, const Topology &grid, const host_vector &data, bool=false)
DEPRECATED Write an array to NetCDF file.
Definition easy_output.h:319
Definition easy_atts.h:15
DEPRECATED Empty utitlity class that handles return values of netcdf functions and throws NC_Error(st...
Definition nc_error.h:70
A NetCDF Hyperslab for SerialNcFile.
Definition nc_hyperslab.h:27
const size_t * countp() const
Definition nc_hyperslab.h:134
const size_t * startp() const
Definition nc_hyperslab.h:132