Extension: Json and NetCDF utilities
#include "dg/file/file.h" (includes both Json and NetCDF utilities)
easy_output.h
Go to the documentation of this file.
1#pragma once
2
3#include <exception>
4#include <netcdf.h>
5#include "dg/topology/grid.h"
6#ifdef MPI_VERSION
7#include "dg/backend/mpi_vector.h"
8#include "dg/topology/mpi_grid.h"
9#endif //MPI_VERSION
10
16namespace dg
17{
18namespace file
19{
31struct NC_Error : public std::exception
32{
33
39 NC_Error( int error): error_( error) {}
45 char const* what() const throw(){
46 return nc_strerror(error_);}
47 private:
48 int error_;
49};
50
73{
83 {
85 return h(err);
86 }
96 {
97 if( err)
98 throw NC_Error( err);
99 return *this;
100 }
101};
102
126template<class host_vector>
127void put_var_double(int ncid, int varid, const dg::aTopology2d& grid,
128 const host_vector& data, bool parallel = false)
129{
131 size_t start[2] = {0,0}, count[2];
132 count[0] = grid.ny()*grid.Ny();
133 count[1] = grid.nx()*grid.Nx();
134 err = nc_put_vara_double( ncid, varid, start, count, data.data());
135}
136
164template<class host_vector>
165void put_vara_double(int ncid, int varid, unsigned slice,
166 const dg::aTopology2d& grid, const host_vector& data, bool parallel = false)
167{
169 size_t start[3] = {slice,0,0}, count[3];
170 count[0] = 1;
171 count[1] = grid.ny()*grid.Ny();
172 count[2] = grid.nx()*grid.Nx();
173 err = nc_put_vara_double( ncid, varid, start, count, data.data());
174}
175
177template<class host_vector>
178void put_var_double(int ncid, int varid, const dg::aTopology3d& grid,
179 const host_vector& data, bool parallel = false)
180{
182 size_t start[3] = {0,0,0}, count[3];
183 count[0] = grid.nz()*grid.Nz();
184 count[1] = grid.ny()*grid.Ny();
185 count[2] = grid.nx()*grid.Nx();
186 err = nc_put_vara_double( ncid, varid, start, count, data.data());
187}
188
190template<class host_vector>
191void put_vara_double(int ncid, int varid, unsigned slice,
192 const dg::aTopology3d& grid, const host_vector& data, bool parallel = false)
193{
195 size_t start[4] = {slice, 0,0,0}, count[4];
196 count[0] = 1;
197 count[1] = grid.nz()*grid.Nz();
198 count[2] = grid.ny()*grid.Ny();
199 count[3] = grid.nx()*grid.Nx();
200 err = nc_put_vara_double( ncid, varid, start, count, data.data());
201}
202
203#ifdef MPI_VERSION
205template<class host_vector>
206void put_var_double(int ncid, int varid, const dg::aMPITopology2d& grid,
207 const dg::MPI_Vector<host_vector>& data, bool parallel = false)
208{
210 size_t start[3] = {0,0}, count[2];
211 count[0] = grid.ny()*grid.local().Ny();
212 count[1] = grid.nx()*grid.local().Nx();
213 int rank, size;
214 MPI_Comm comm = grid.communicator();
215 MPI_Comm_rank( comm, &rank);
216 MPI_Comm_size( comm, &size);
217 if( !parallel)
218 {
219 MPI_Status status;
220 size_t local_size = grid.local().size();
221 std::vector<int> coords( size*2);
222 for( int rrank=0; rrank<size; rrank++)
223 MPI_Cart_coords( comm, rrank, 2, &coords[2*rrank]);
224 if(rank==0)
225 {
226 host_vector receive( data.data());
227 for( int rrank=0; rrank<size; rrank++)
228 {
229 if(rrank!=0)
230 MPI_Recv( receive.data(), local_size, MPI_DOUBLE,
231 rrank, rrank, comm, &status);
232 start[0] = coords[2*rrank+1]*count[0],
233 start[1] = coords[2*rrank+0]*count[1],
234 err = nc_put_vara_double( ncid, varid, start, count,
235 receive.data());
236 }
237 }
238 else
239 MPI_Send( data.data().data(), local_size, MPI_DOUBLE,
240 0, rank, comm);
241 MPI_Barrier( comm);
242 }
243 else
244 {
245 int coords[2];
246 MPI_Cart_coords( comm, rank, 2, coords);
247 start[0] = coords[1]*count[0],
248 start[1] = coords[0]*count[1],
249 err = nc_put_vara_double( ncid, varid, start, count,
250 data.data().data());
251 }
252}
253
255template<class host_vector>
256void put_vara_double(int ncid, int varid, unsigned slice,
257 const dg::aMPITopology2d& grid, const dg::MPI_Vector<host_vector>& data,
258 bool parallel = false)
259{
261 size_t start[3] = {slice, 0,0}, count[3];
262 count[0] = 1;
263 count[1] = grid.ny()*grid.local().Ny();
264 count[2] = grid.nx()*grid.local().Nx();
265 int rank, size;
266 MPI_Comm comm = grid.communicator();
267 MPI_Comm_rank( comm, &rank);
268 MPI_Comm_size( comm, &size);
269 if( parallel)
270 {
271 int coords[2];
272 MPI_Cart_coords( comm, rank, 2, coords);
273 start[1] = coords[1]*count[1],
274 start[2] = coords[0]*count[2],
275 err = nc_put_vara_double( ncid, varid, start, count,
276 data.data().data());
277 }
278 else
279 {
280 MPI_Status status;
281 size_t local_size = grid.local().size();
282 std::vector<int> coords( size*2);
283 for( int rrank=0; rrank<size; rrank++)
284 MPI_Cart_coords( comm, rrank, 2, &coords[2*rrank]);
285 if(rank==0)
286 {
287 host_vector receive( data.data());
288 for( int rrank=0; rrank<size; rrank++)
289 {
290 if(rrank!=0)
291 MPI_Recv( receive.data(), local_size, MPI_DOUBLE,
292 rrank, rrank, comm, &status);
293 start[1] = coords[2*rrank+1]*count[1],
294 start[2] = coords[2*rrank+0]*count[2],
295 err = nc_put_vara_double( ncid, varid, start, count,
296 receive.data());
297 }
298 }
299 else
300 MPI_Send( data.data().data(), local_size, MPI_DOUBLE,
301 0, rank, comm);
302 MPI_Barrier( comm);
303 }
304}
305
307template<class host_vector>
308void put_var_double(int ncid, int varid,
309 const dg::aMPITopology3d& grid, const dg::MPI_Vector<host_vector>& data,
310 bool parallel = false)
311{
313 size_t start[3] = {0,0,0}, count[3];
314 count[0] = grid.nz()*grid.local().Nz();
315 count[1] = grid.ny()*grid.local().Ny();
316 count[2] = grid.nx()*grid.local().Nx();
317 int rank, size;
318 MPI_Comm comm = grid.communicator();
319 MPI_Comm_rank( comm, &rank);
320 MPI_Comm_size( comm, &size);
321 if( !parallel)
322 {
323 MPI_Status status;
324 size_t local_size = grid.local().size();
325 std::vector<int> coords( size*3);
326 for( int rrank=0; rrank<size; rrank++)
327 MPI_Cart_coords( comm, rrank, 3, &coords[3*rrank]);
328 if(rank==0)
329 {
330 host_vector receive( data.data());
331 for( int rrank=0; rrank<size; rrank++)
332 {
333 if(rrank!=0)
334 MPI_Recv( receive.data(), local_size, MPI_DOUBLE,
335 rrank, rrank, comm, &status);
336 start[0] = coords[3*rrank+2]*count[0],
337 start[1] = coords[3*rrank+1]*count[1],
338 start[2] = coords[3*rrank+0]*count[2];
339 err = nc_put_vara_double( ncid, varid, start, count,
340 receive.data());
341 }
342 }
343 else
344 MPI_Send( data.data().data(), local_size, MPI_DOUBLE,
345 0, rank, comm);
346 MPI_Barrier( comm);
347 }
348 else
349 {
350 int coords[3];
351 MPI_Cart_coords( comm, rank, 3, coords);
352 start[0] = coords[2]*count[0],
353 start[1] = coords[1]*count[1],
354 start[2] = coords[0]*count[2];
355 err = nc_put_vara_double( ncid, varid, start, count,
356 data.data().data());
357 }
358}
359
361template<class host_vector>
362void put_vara_double(int ncid, int varid, unsigned slice,
363 const dg::aMPITopology3d& grid, const dg::MPI_Vector<host_vector>& data,
364 bool parallel = false)
365{
367 size_t start[4] = {slice, 0,0,0}, count[4];
368 count[0] = 1;
369 count[1] = grid.nz()*grid.local().Nz();
370 count[2] = grid.ny()*grid.local().Ny();
371 count[3] = grid.nx()*grid.local().Nx();
372 int rank, size;
373 MPI_Comm comm = grid.communicator();
374 MPI_Comm_rank( comm, &rank);
375 MPI_Comm_size( comm, &size);
376 if( parallel)
377 {
378 int coords[3];
379 MPI_Cart_coords( comm, rank, 3, coords);
380 start[1] = coords[2]*count[1],
381 start[2] = coords[1]*count[2],
382 start[3] = coords[0]*count[3];
383 err = nc_put_vara_double( ncid, varid, start, count,
384 data.data().data());
385 }
386 else
387 {
388 MPI_Status status;
389 size_t local_size = grid.local().size();
390 std::vector<int> coords( size*3);
391 for( int rrank=0; rrank<size; rrank++)
392 MPI_Cart_coords( comm, rrank, 3, &coords[3*rrank]);
393 if(rank==0)
394 {
395 host_vector receive( data.data());
396 for( int rrank=0; rrank<size; rrank++)
397 {
398 if(rrank!=0)
399 MPI_Recv( receive.data(), local_size, MPI_DOUBLE,
400 rrank, rrank, comm, &status);
401 start[1] = coords[3*rrank+2]*count[1],
402 start[2] = coords[3*rrank+1]*count[2],
403 start[3] = coords[3*rrank+0]*count[3];
404 err = nc_put_vara_double( ncid, varid, start, count,
405 receive.data());
406 }
407 }
408 else
409 MPI_Send( data.data().data(), local_size, MPI_DOUBLE,
410 0, rank, comm);
411 MPI_Barrier( comm);
412 }
413}
414#endif //MPI_VERSION
415
417}//namespace file
418}//namespace dg
error
Switch between how to handle errors in a Json utitlity functions.
Definition: json_utilities.h:30
void put_vara_double(int ncid, int varid, unsigned slice, const dg::aTopology2d &grid, const host_vector &data, bool parallel=false)
Convenience wrapper around nc_put_vara_double()
Definition: easy_output.h:165
void put_var_double(int ncid, int varid, const dg::aTopology2d &grid, const host_vector &data, bool parallel=false)
Convenience wrapper around nc_put_vara_double()
Definition: easy_output.h:127
Empty utitlity class that handles return values of netcdf functions and throws NC_Error(status) if( s...
Definition: easy_output.h:73
NC_Error_Handle operator=(int err)
Construct from error code.
Definition: easy_output.h:82
NC_Error_Handle operator()(int err)
Construct from error code.
Definition: easy_output.h:95
Class thrown by the NC_Error_Handle.
Definition: easy_output.h:32
char const * what() const
What string.
Definition: easy_output.h:45
NC_Error(int error)
Construct from error code.
Definition: easy_output.h:39