Extension: Json and NetCDF utilities
#include "dg/file/file.h"
Loading...
Searching...
No Matches
Legacy NetCDF-C utility

Classes

struct  dg::file::NC_Error_Handle
 DEPRECATED Empty utitlity class that handles return values of netcdf functions and throws NC_Error(status) if( status != NC_NOERR) More...
 

Functions

template<class T >
bool dg::file::check_real_time (int ncid, const char *name, int *dimID, int *tvarID)
 DEPRECATED Check if an unlimited dimension exists as if define_real_time was called.
 
template<class T >
int dg::file::define_real_time (int ncid, const char *name, int *dimID, int *tvarID, bool full_check=false)
 DEPRECATED Define an unlimited time dimension and coordinate variable.
 
int dg::file::define_time (int ncid, const char *name, int *dimID, int *tvarID)
 DEPRECATED An alias for define_real_time<double>
 
int dg::file::define_limited_time (int ncid, const char *name, int size, int *dimID, int *tvarID)
 DEPRECATED Define a limited time dimension and coordinate variable.
 
template<class T >
bool dg::file::check_dimension (int ncid, int *dimID, const dg::RealGrid1d< T > &g, std::string name_dim="x", std::string axis="X")
 DEPRECATED Check if a dimension exists as if define_dimension was called.
 
template<class T >
int dg::file::define_dimension (int ncid, int *dimID, const dg::RealGrid1d< T > &g, std::string name_dim="x", std::string axis="X", bool full_check=false)
 DEPRECATED Define a 1d dimension and associated coordinate variable.
 
template<class Topology , std::enable_if_t< dg::is_vector_v< typename Topology::host_vector, dg::SharedVectorTag >, bool > = true>
int dg::file::define_dimensions (int ncid, int *dimsIDs, const Topology &g, std::vector< std::string > name_dims={}, bool full_check=false)
 DEPRECATED Define dimensions and associated coordiante variables.
 
template<class Topology , std::enable_if_t< dg::is_vector_v< typename Topology::host_vector, dg::SharedVectorTag >, bool > = true>
int dg::file::define_dimensions (int ncid, int *dimsIDs, int *tvarID, const Topology &g, std::vector< std::string > name_dims={}, bool full_check=false)
 DEPRECATED Define an unlimited time and grid dimensions together with their coordinate variables.
 
template<class Topology , std::enable_if_t< dg::is_vector_v< typename Topology::host_vector, dg::SharedVectorTag >, bool > = true>
bool dg::file::check_dimensions (int ncid, int *dimsIDs, const Topology &g, std::vector< std::string > name_dims={})
 DEPRECATED Check if dimensions exist as if define_dimensions was called.
 
template<class Topology , std::enable_if_t< dg::is_vector_v< typename Topology::host_vector, dg::SharedVectorTag >, bool > = true>
bool dg::file::check_dimensions (int ncid, int *dimsIDs, int *tvarID, const Topology &g, std::vector< std::string > name_dims={})
 DEPRECATED Check if dimensions exist as if define_dimensions was called.
 
template<class T >
int dg::file::define_limtime_xy (int ncid, int *dimsIDs, int size, int *tvarID, const dg::aRealTopology2d< T > &g, std::vector< std::string > name_dims={"time", "y", "x"})
 DEPRECATED Define a limited time and 2 dimensions and associated coordinate variables.
 
template<class MPITopology , std::enable_if_t< dg::is_vector_v< typename MPITopology::host_vector, dg::MPIVectorTag >, bool > = true>
int dg::file::define_dimensions (int ncid, int *dimsIDs, const MPITopology &g, std::vector< std::string > name_dims={}, bool full_check=false)
 DEPRECATED All processes may call this but only master process has to and will execute!! Convenience function that just calls the corresponding serial version with the global grid.
 
template<class MPITopology , std::enable_if_t< dg::is_vector_v< typename MPITopology::host_vector, dg::MPIVectorTag >, bool > = true>
int dg::file::define_dimensions (int ncid, int *dimsIDs, int *tvarID, const MPITopology &g, std::vector< std::string > name_dims={}, bool full_check=false)
 DEPRECATED All processes may call this but only master process has to and will execute!! Convenience function that just calls the corresponding serial version with the global grid.
 
template<class MPITopology , std::enable_if_t< dg::is_vector_v< typename MPITopology::host_vector, dg::MPIVectorTag >, bool > = true>
bool dg::file::check_dimensions (int ncid, int *dimsIDs, const MPITopology &g, std::vector< std::string > name_dims={})
 DEPRECATED All processes may call this and all will execute!! Convenience function that just calls the corresponding serial version with the global grid.
 
template<class MPITopology , std::enable_if_t< dg::is_vector_v< typename MPITopology::host_vector, dg::MPIVectorTag >, bool > = true>
bool dg::file::check_dimensions (int ncid, int *dimsIDs, int *tvarID, const MPITopology &g, std::vector< std::string > name_dims={})
 DEPRECATED All processes may call this and all will execute!! Convenience function that just calls the corresponding serial version with the global grid.
 
template<class host_vector , class Topology >
void dg::file::get_var (int ncid, int varid, const Topology &grid, host_vector &data, bool parallel=true)
 DEPRECATED Convenience wrapper around nc_get_var.
 
template<class host_vector , class Topology >
void dg::file::get_vara (int ncid, int varid, unsigned slice, const Topology &grid, host_vector &data, bool parallel=true)
 DEPRECATED Convenience wrapper around nc_get_vara()
 
template<class T , class real_type >
void dg::file::get_var (int ncid, int varid, const RealGrid0d< real_type > &grid, T &data, bool parallel=true)
 DEPRECATED Read a scalar from the netcdf file.
 
template<class T , class real_type >
void dg::file::get_vara (int ncid, int varid, unsigned slice, const RealGrid0d< real_type > &grid, T &data, bool parallel=true)
 DEPRECATED Read a scalar to the netcdf file.
 
template<class host_vector , class Topology >
void dg::file::put_var (int ncid, int varid, const Topology &grid, const host_vector &data, bool parallel=false)
 DEPRECATED Write an array to NetCDF file.
 
template<class host_vector , class Topology >
void dg::file::put_vara (int ncid, int varid, unsigned slice, const Topology &grid, const host_vector &data, bool parallel=false)
 DEPRECATED Write an array to NetCDF file.
 
template<class T , class real_type >
void dg::file::put_var (int ncid, int varid, const RealGrid0d< real_type > &grid, T data, bool parallel=false)
 DEPRECATED Write a scalar to the NetCDF file.
 
template<class T , class real_type >
void dg::file::put_vara (int ncid, int varid, unsigned slice, const RealGrid0d< real_type > &grid, T data, bool parallel=false)
 DEPRECATED Write a scalar to the NetCDF file.
 

Detailed Description

Function Documentation

◆ check_dimension()

template<class T >
bool dg::file::check_dimension ( int ncid,
int * dimID,
const dg::RealGrid1d< T > & g,
std::string name_dim = "x",
std::string axis = "X" )

DEPRECATED Check if a dimension exists as if define_dimension was called.

This function returns false if the dimension with the given name does not exist.

This function throws std::runtime_error if

  • The length of the dimension does not match the grid size
  • The dimension variable has wrong type or dimensions
  • The dimension variable entries do not match the grid abscissas

This function throws an dg::file::NC_Error if

  • The dimension exists but the variable does not
  • The dimension variable has no entries
Note
This function does not write anything to the file, only read
Parameters
ncidNetCDF file or group ID
dimIDdimension ID (output)
gThe 1d DG grid from which data points for coordinate variable are generated using g.abscissas()
name_dimName of dimension and coordinate variable (input)
axisThe axis attribute (input), ("X", "Y" or "Z")
Template Parameters
Tdetermines the datatype of the dimension variables
Returns
False if dimension with given name does not exist, if no errors are thrown True

◆ check_dimensions() [1/4]

template<class MPITopology , std::enable_if_t< dg::is_vector_v< typename MPITopology::host_vector, dg::MPIVectorTag >, bool > = true>
bool dg::file::check_dimensions ( int ncid,
int * dimsIDs,
const MPITopology & g,
std::vector< std::string > name_dims = {} )
inline

DEPRECATED All processes may call this and all will execute!! Convenience function that just calls the corresponding serial version with the global grid.

◆ check_dimensions() [2/4]

template<class Topology , std::enable_if_t< dg::is_vector_v< typename Topology::host_vector, dg::SharedVectorTag >, bool > = true>
bool dg::file::check_dimensions ( int ncid,
int * dimsIDs,
const Topology & g,
std::vector< std::string > name_dims = {} )

DEPRECATED Check if dimensions exist as if define_dimensions was called.

This function checks if the given file contains dimensions and their associated dimension variables in the same way that the corresponding define_dimensions creates them. If anything is amiss, an error will be thrown.

Note
In order to do this the function will actually read in the coordinate variable and compare to the given grid abscissas
Parameters
ncidNetCDF file or group ID
dimsIDs(write - only) dimension IDs, Must be of size g.ndim()
gThe dG grid from which data points for coordinate variable are generated using g.abscissas() in each dimension
name_dimsNames for the dimension and coordinate variables (Must have size g.ndim() ) in numpy python ordering e.g. in 3d we have {"z", "y", "x"}; If name_dims.empty() then default names in {"z", "y", "x"} will be used
Template Parameters
Topology typename Topology::value_type determines the datatype of the dimension variables
Note
For a 0d grid, the default dimension name is "i", axis "I" and the dimension will be of size 1
Returns
False if any dimension with given name does not exist, if no errors are thrown True
See also
check_dimension

◆ check_dimensions() [3/4]

template<class MPITopology , std::enable_if_t< dg::is_vector_v< typename MPITopology::host_vector, dg::MPIVectorTag >, bool > = true>
bool dg::file::check_dimensions ( int ncid,
int * dimsIDs,
int * tvarID,
const MPITopology & g,
std::vector< std::string > name_dims = {} )
inline

DEPRECATED All processes may call this and all will execute!! Convenience function that just calls the corresponding serial version with the global grid.

◆ check_dimensions() [4/4]

template<class Topology , std::enable_if_t< dg::is_vector_v< typename Topology::host_vector, dg::SharedVectorTag >, bool > = true>
bool dg::file::check_dimensions ( int ncid,
int * dimsIDs,
int * tvarID,
const Topology & g,
std::vector< std::string > name_dims = {} )

DEPRECATED Check if dimensions exist as if define_dimensions was called.

Semantically equivalent to the following:

if ( !check_real_time<typename Topology::value_type>( ncid, name_dims[0].data(), &dimsIDs[0], tvarID))
return false;
return check_dimensions( ncid, &dimsIDs[1], g, {name_dims.begin()+1, name_dims.end()});
bool check_dimensions(int ncid, int *dimsIDs, const Topology &g, std::vector< std::string > name_dims={})
DEPRECATED Check if dimensions exist as if define_dimensions was called.
Definition easy_dims.h:392
bool check_real_time(int ncid, const char *name, int *dimID, int *tvarID)
DEPRECATED Check if an unlimited dimension exists as if define_real_time was called.
Definition easy_dims.h:107

This function checks if the given file contains dimensions and their associated dimension variables in the same way that the corresponding define_dimensions creates them. If anything is amiss, an error will be thrown.

Note
In order to do this the function will actually read in the coordinate variable and compare to the given grid abscissas
Parameters
ncidNetCDF file or group ID
dimsIDs(write - only) dimension IDs, Must be of size g.ndim()+1
tvarID(write - only) time coordinate variable ID (unlimited)
gThe dG grid from which data points for coordinate variable are generated using g.abscissas()
name_dimsNames for the dimension and coordinate variables (Must have size g.ndim()+1 ) in numpy python ordering e.g. in 3d we have {"time", "z", "y", "x"}; If name_dims.empty() then default names in {"time", "z", "y", "x"} will be used
Template Parameters
Topology typename Topology::value_type determines the datatype of the dimension variables
Returns
False if any dimension with given name does not exist, if no errors are thrown True
See also
check_dimension check_real_time

◆ check_real_time()

template<class T >
bool dg::file::check_real_time ( int ncid,
const char * name,
int * dimID,
int * tvarID )

DEPRECATED Check if an unlimited dimension exists as if define_real_time was called.

This function returns false if the dimension with the given name does not exist.

This function throws std::runtime_error if

  • The dimension exists but is not unlimited
  • The dimension variable has wrong type or dimensions

This function throws an dg::file::NC_Error if

  • The dimension exists but the variable does not
Note
This function does not write anything to the file, only read
Parameters
ncidNetCDF file or group ID
nameName of unlimited dimension and associated variable
dimID(write-only) time-dimension ID
tvarID(write-only) time-variable ID (for a time variable of type T)
Template Parameters
Tdetermine type of dimension variable
Returns
False if dimension with given name does not exist, if no errors are thrown True
Attention
Dimensions in the parent group are visible in groups, but variables are not, so groups should have separate time dimension https://docs.unidata.ucar.edu/netcdf-c/current/group__groups.html

◆ define_dimension()

template<class T >
int dg::file::define_dimension ( int ncid,
int * dimID,
const dg::RealGrid1d< T > & g,
std::string name_dim = "x",
std::string axis = "X",
bool full_check = false )
inline

DEPRECATED Define a 1d dimension and associated coordinate variable.

Note
By NetCDF conventions a variable with the same name as a dimension is called a coordinate variable.
Parameters
ncidNetCDF file or group ID
dimIDdimension ID (output)
gThe 1d DG grid from which data points for coordinate variable are generated using g.abscissas()
name_dimName of dimension and coordinate variable (input)
axisThe axis attribute (input), ("X", "Y" or "Z")
Template Parameters
Tdetermines the datatype of the dimension variables
Parameters
full_checkIf true, will call check_dimension before definition.
Returns
NetCDF error code if any
Here is the call graph for this function:

◆ define_dimensions() [1/4]

template<class MPITopology , std::enable_if_t< dg::is_vector_v< typename MPITopology::host_vector, dg::MPIVectorTag >, bool > = true>
int dg::file::define_dimensions ( int ncid,
int * dimsIDs,
const MPITopology & g,
std::vector< std::string > name_dims = {},
bool full_check = false )
inline

DEPRECATED All processes may call this but only master process has to and will execute!! Convenience function that just calls the corresponding serial version with the global grid.

◆ define_dimensions() [2/4]

template<class Topology , std::enable_if_t< dg::is_vector_v< typename Topology::host_vector, dg::SharedVectorTag >, bool > = true>
int dg::file::define_dimensions ( int ncid,
int * dimsIDs,
const Topology & g,
std::vector< std::string > name_dims = {},
bool full_check = false )

DEPRECATED Define dimensions and associated coordiante variables.

Note
By NetCDF conventions a variable with the same name as a dimension is called a coordinate variable.
Parameters
ncidNetCDF file or group ID
dimsIDs(write - only) dimension IDs, Must be of size g.ndim()
gThe dG grid from which data points for coordinate variable are generated using g.abscissas() in each dimension
name_dimsNames for the dimension and coordinate variables (Must have size g.ndim() ) in numpy python ordering e.g. in 3d we have {"z", "y", "x"}; If name_dims.empty() then default names in {"z", "y", "x"} will be used
Template Parameters
Topology typename Topology::value_type determines the datatype of the dimension variables
Note
For a 0d grid, the function does nothing
Parameters
full_checkIf true, will call check_dimensions before definition. In this case dimensions can already exist in the file and will not trigger a throw (it is also possible for some dimensions to exist while other do not)
Returns
if anything goes wrong, return the NetCDF error code, else NC_NOERR

◆ define_dimensions() [3/4]

template<class MPITopology , std::enable_if_t< dg::is_vector_v< typename MPITopology::host_vector, dg::MPIVectorTag >, bool > = true>
int dg::file::define_dimensions ( int ncid,
int * dimsIDs,
int * tvarID,
const MPITopology & g,
std::vector< std::string > name_dims = {},
bool full_check = false )
inline

DEPRECATED All processes may call this but only master process has to and will execute!! Convenience function that just calls the corresponding serial version with the global grid.

◆ define_dimensions() [4/4]

template<class Topology , std::enable_if_t< dg::is_vector_v< typename Topology::host_vector, dg::SharedVectorTag >, bool > = true>
int dg::file::define_dimensions ( int ncid,
int * dimsIDs,
int * tvarID,
const Topology & g,
std::vector< std::string > name_dims = {},
bool full_check = false )

DEPRECATED Define an unlimited time and grid dimensions together with their coordinate variables.

Note
By NetCDF conventions a variable with the same name as a dimension is called a coordinate variable.

Semantically equivalent to the following:

retval = define_real_time<typename Topology::value_type>( ncid, name_dims[0].data(), &dimsIDs[0], tvarID);
if(retval)
return retval;
return define_dimensions( ncid, &dimsIDs[1], g, {name_dims.begin()+1, name_dims.end()});
int define_dimensions(int ncid, int *dimsIDs, const Topology &g, std::vector< std::string > name_dims={}, bool full_check=false)
DEPRECATED Define dimensions and associated coordiante variables.
Definition easy_dims.h:317
int define_real_time(int ncid, const char *name, int *dimID, int *tvarID, bool full_check=false)
DEPRECATED Define an unlimited time dimension and coordinate variable.
Definition easy_dims.h:157
Parameters
ncidNetCDF file or group ID
dimsIDs(write - only) dimension IDs, Must be of size g.ndim()+1
tvarID(write - only) time coordinate variable ID (unlimited)
gThe dG grid from which data points for coordinate variable are generated using g.abscissas()
name_dimsNames for the dimension and coordinate variables (Must have size g.ndim()+1 ) in numpy python ordering e.g. in 3d we have {"time", "z", "y", "x"}; If name_dims.empty() then default names in {"time", "z", "y", "x"} will be used
Template Parameters
Topology typename Topology::value_type determines the datatype of the dimension variables
Note
For 0d grids only the "time" dimension is defined, no spatial dimension
Parameters
full_checkIf true, will call check_dimensions before definition. In this case dimensions can already exist in the file and will not trigger a throw (it is also possible for some dimensions to exist while other do not)
Returns
if anything goes wrong, return the NetCDF error code, else NC_NOERR

◆ define_limited_time()

int dg::file::define_limited_time ( int ncid,
const char * name,
int size,
int * dimID,
int * tvarID )
inline

DEPRECATED Define a limited time dimension and coordinate variable.

Note
By NetCDF conventions a variable with the same name as a dimension is called a coordinate variable. The CF conventions dictate that the units attribute must be defined for a time variable: we give it the value "time since start". Furthermore, we define the "axis" : "T" attribute to mark the time dimension.
Parameters
ncidNetCDF file or group ID
nameName of the time variable (usually "time")
sizeThe number of timesteps
dimIDtime-dimension ID
tvarIDtime-variable ID (for a time variable of type NC_DOUBLE)
Returns
NetCDF error code if any

◆ define_limtime_xy()

template<class T >
int dg::file::define_limtime_xy ( int ncid,
int * dimsIDs,
int size,
int * tvarID,
const dg::aRealTopology2d< T > & g,
std::vector< std::string > name_dims = {"time", "y", "x"} )
inline

DEPRECATED Define a limited time and 2 dimensions and associated coordinate variables.

Note
By NetCDF conventions a variable with the same name as a dimension is called a coordinate variable.

Semantically equivalent to the following:

define_limited_time( ncid, name_dims[0], size, &dimsIDs[0], tvarID);
define_dimensions( ncid, &dimsIDs[1], g, {name_dims[1], name_dims[2]});
int define_limited_time(int ncid, const char *name, int size, int *dimID, int *tvarID)
DEPRECATED Define a limited time dimension and coordinate variable.
Definition easy_dims.h:197

Dimensions have attributes of (time, Y, X)

Parameters
ncidNetCDF file or group ID
dimsIDs(write - only) 3D array of dimension IDs (time, Y,X)
sizeThe size of the time variable
tvarID(write - only) The ID of the time variable (limited)
gThe 2d DG grid from which data points for coordinate variable are generated using g.abscissas()
name_dimsNames for the dimension variables (time, Y, X)
Template Parameters
Tdetermines the datatype of the dimension variables
Returns
if anything goes wrong it returns the NetCDF code, else SUCCESS
Note
File stays in define mode

◆ define_real_time()

template<class T >
int dg::file::define_real_time ( int ncid,
const char * name,
int * dimID,
int * tvarID,
bool full_check = false )
inline

DEPRECATED Define an unlimited time dimension and coordinate variable.

Note
By NetCDF conventions a variable with the same name as a dimension is called a coordinate variable. The CF conventions dictate that the "units" attribute must be defined for a time variable: we give it the value "time since start". Furthermore, we define the "axis" : "T" attribute to mark the time dimension.
Parameters
ncidNetCDF file or group ID
nameName of unlimited dimension and associated variable
dimID(write-only) time-dimension ID
tvarID(write-only) time-variable ID (for a time variable of type T)
Template Parameters
Tdetermine type of dimension variable
Parameters
full_checkIf true, will call check_real_time before definition.
Returns
NetCDF error code if any
Here is the call graph for this function:

◆ define_time()

int dg::file::define_time ( int ncid,
const char * name,
int * dimID,
int * tvarID )
inline

DEPRECATED An alias for define_real_time<double>

Here is the call graph for this function:

◆ get_var() [1/2]

template<class T , class real_type >
void dg::file::get_var ( int ncid,
int varid,
const RealGrid0d< real_type > & grid,
T & data,
bool parallel = true )

DEPRECATED Read a scalar from the netcdf file.

Note
This function throws a dg::file::NC_Error if an error occurs
Template Parameters
TDetermines data type to read
real_typeignored
Parameters
ncidNetCDF file or group ID
varidVariable ID
grida Tag to signify scalar ouput (and help the compiler choose this function over the array input function). Can be of type dg::RealMPIGrid0d<real_type>
dataThe (single) datum read from file.
parallelThis parameter is ignored in both serial and MPI versions. In an MPI program all processes call this function and all processes read.
Note
In contrast to writing, reading a NetCDF-4 file can always be done in parallel See https://docs.h5py.org/en/stable/mpi.html So all processes in MPI can open a file, get variable ids and subsequently read it, etc. even if only serial NetCDF is used. The default for parallel is always true in which case all processes must have previously opened the file and inquire e.g. the varid

◆ get_var() [2/2]

template<class host_vector , class Topology >
void dg::file::get_var ( int ncid,
int varid,
const Topology & grid,
host_vector & data,
bool parallel = true )

DEPRECATED Convenience wrapper around nc_get_var.

The purpose of this function is mainly to simplify input in an MPI environment and to provide the same interface also in a shared memory system for uniform programming. This version is for a time-independent variable, i.e. reads a single variable in one go and is actually equivalent to nc_get_var. The dimensionality is given by the grid.

Note
This function throws a dg::file::NC_Error if an error occurs
Template Parameters
TopologyOne of the dG defined grids (e.g. dg::RealGrid2d) Determines if shared memory or MPI version is called
Template Parameters
host_vectorFor shared Topology: Type with data() member that returns pointer to first element in CPU (host) adress space, meaning it cannot be a GPU vector. For MPI Topology: must be MPI_Vector host_vector::value_type must match data type of variable in file.
Parameters
ncidNetCDF file or group ID
varidVariable ID
gridThe grid from which to construct start and count variables to forward to nc_get_vara
datacontains the read data on return (must be of size grid.size() )
Parameters
parallelThis parameter is ignored in the serial version. In the MPI version this parameter indicates whether each process reads/writes to the file independently in parallel (true) or each process funnels its data through the master rank (false), which involves communication but may be faster than the former method.
Note
In an MPI environment
  • all processes must call this function,
  • processes that do not belong to the same communicator as the master process return immediately
In an MPI program it may happen that the data to read/write is partitioned among a process group smaller than MPI_COMM_WORLD, e.g. when reading/writing a 2d slice of a 3d vector. In this example case grid.communicator() is only 2d not 3d. Remember that only the group containing the master process reads/writes its data to the file, while all other processes immediately return. There are two ways to relyable read/write the data in such a case:
  • Manually assemble the data on the master process and construct an MPI grid with a Cartesian communicator containing only one process (using e.g. MPI_Comm_split on MPI_COMM_WORLD followed by MPI_Cart_create)
  • Manually assemble the data on the MPI group that contains the master process (cf MPI_Cart_sub)
Note
The "master" thread is assumed to be the process with rank==0 in MPI_COMM_WORLD. The MPI_COMM_WORLD rank of a process is usually the same in a Cartesian communicator of the same size but is not guaranteed. So always check MPI_COMM_WORLD ranks for file write operations.
Note
In contrast to writing, reading a NetCDF-4 file can always be done in parallel See https://docs.h5py.org/en/stable/mpi.html So all processes in MPI can open a file, get variable ids and subsequently read it, etc. even if only serial NetCDF is used. The default for parallel is always true in which case all processes must have previously opened the file and inquire e.g. the varid

◆ get_vara() [1/2]

template<class T , class real_type >
void dg::file::get_vara ( int ncid,
int varid,
unsigned slice,
const RealGrid0d< real_type > & grid,
T & data,
bool parallel = true )

DEPRECATED Read a scalar to the netcdf file.

Note
This function throws a dg::file::NC_Error if an error occurs
Template Parameters
TDetermines data type to read
real_typeignored
Parameters
ncidNetCDF file or group ID
varidVariable ID
sliceThe number of the time-slice to read (first element of the startp array in nc_get_vara)
grida Tag to signify scalar ouput (and help the compiler choose this function over the array input function). Can be of type dg::RealMPIGrid0d<real_type>
dataThe (single) datum to read.
parallelThis parameter is ignored in both serial and MPI versions. In an MPI program all processes call this function and all processes read.
Note
The "master" thread is assumed to be the process with rank==0 in MPI_COMM_WORLD. The MPI_COMM_WORLD rank of a process is usually the same in a Cartesian communicator of the same size but is not guaranteed. So always check MPI_COMM_WORLD ranks for file write operations.
Note
In contrast to writing, reading a NetCDF-4 file can always be done in parallel See https://docs.h5py.org/en/stable/mpi.html So all processes in MPI can open a file, get variable ids and subsequently read it, etc. even if only serial NetCDF is used. The default for parallel is always true in which case all processes must have previously opened the file and inquire e.g. the varid

◆ get_vara() [2/2]

template<class host_vector , class Topology >
void dg::file::get_vara ( int ncid,
int varid,
unsigned slice,
const Topology & grid,
host_vector & data,
bool parallel = true )

DEPRECATED Convenience wrapper around nc_get_vara()

The purpose of this function is mainly to simplify input in an MPI environment and to provide the same interface also in a shared memory system for uniform programming. This version is for a time-dependent variable, i.e. reads a single time-slice from the file. The dimensionality is given by the grid.

Note
This function throws a dg::file::NC_Error if an error occurs
Template Parameters
TopologyOne of the dG defined grids (e.g. dg::RealGrid2d) Determines if shared memory or MPI version is called
Template Parameters
host_vectorFor shared Topology: Type with data() member that returns pointer to first element in CPU (host) adress space, meaning it cannot be a GPU vector. For MPI Topology: must be MPI_Vector host_vector::value_type must match data type of variable in file.
Parameters
ncidNetCDF file or group ID
varidVariable ID
sliceThe number of the time-slice to read (first element of the startp array in nc_get_vara)
gridThe grid from which to construct start and count variables to forward to nc_get_vara
datacontains the read data on return (must be of size grid.size() )
Parameters
parallelThis parameter is ignored in the serial version. In the MPI version this parameter indicates whether each process reads/writes to the file independently in parallel (true) or each process funnels its data through the master rank (false), which involves communication but may be faster than the former method.
Note
In an MPI environment
  • all processes must call this function,
  • processes that do not belong to the same communicator as the master process return immediately
In an MPI program it may happen that the data to read/write is partitioned among a process group smaller than MPI_COMM_WORLD, e.g. when reading/writing a 2d slice of a 3d vector. In this example case grid.communicator() is only 2d not 3d. Remember that only the group containing the master process reads/writes its data to the file, while all other processes immediately return. There are two ways to relyable read/write the data in such a case:
  • Manually assemble the data on the master process and construct an MPI grid with a Cartesian communicator containing only one process (using e.g. MPI_Comm_split on MPI_COMM_WORLD followed by MPI_Cart_create)
  • Manually assemble the data on the MPI group that contains the master process (cf MPI_Cart_sub)
Note
The "master" thread is assumed to be the process with rank==0 in MPI_COMM_WORLD. The MPI_COMM_WORLD rank of a process is usually the same in a Cartesian communicator of the same size but is not guaranteed. So always check MPI_COMM_WORLD ranks for file write operations.
Note
In contrast to writing, reading a NetCDF-4 file can always be done in parallel See https://docs.h5py.org/en/stable/mpi.html So all processes in MPI can open a file, get variable ids and subsequently read it, etc. even if only serial NetCDF is used. The default for parallel is always true in which case all processes must have previously opened the file and inquire e.g. the varid
Here is the call graph for this function:

◆ put_var() [1/2]

template<class T , class real_type >
void dg::file::put_var ( int ncid,
int varid,
const RealGrid0d< real_type > & grid,
T data,
bool parallel = false )

DEPRECATED Write a scalar to the NetCDF file.

Note
This function throws a dg::file::NC_Error if an error occurs
Template Parameters
TDetermines data type to write
real_typeignored
Parameters
ncidNetCDF file or group ID
varidVariable ID (Note that in NetCDF variables without dimensions are scalars)
grida Tag to signify scalar ouput (and help the compiler choose this function over the array output function). Can be of type dg::RealMPIGrid<real_type>
dataThe (single) datum to write.
parallelThis parameter is ignored in both serial and MPI versions. In an MPI program all processes can call this function but only the master thread writes.
Note
The "master" thread is assumed to be the process with rank==0 in MPI_COMM_WORLD. The MPI_COMM_WORLD rank of a process is usually the same in a Cartesian communicator of the same size but is not guaranteed. So always check MPI_COMM_WORLD ranks for file write operations.

◆ put_var() [2/2]

template<class host_vector , class Topology >
void dg::file::put_var ( int ncid,
int varid,
const Topology & grid,
const host_vector & data,
bool parallel = false )

DEPRECATED Write an array to NetCDF file.

Convenience wrapper around nc_put_var

The purpose of this function is mainly to simplify output in an MPI environment and to provide the same interface also in a shared memory system for uniform programming. This version is for a time-independent variable, i.e. writes a single variable in one go and is actually equivalent to nc_put_var. The dimensionality is given by the grid.

Note
This function throws a dg::file::NC_Error if an error occurs
Template Parameters
TopologyOne of the dG defined grids (e.g. dg::RealGrid2d) Determines if shared memory or MPI version is called
Template Parameters
host_vectorFor shared Topology: Type with data() member that returns pointer to first element in CPU (host) adress space, meaning it cannot be a GPU vector. For MPI Topology: must be MPI_Vector host_vector::value_type must match data type of variable in file.
Parameters
ncidNetCDF file or group ID
varidVariable ID
gridThe grid from which to construct start and count variables to forward to nc_put_vara
datadata to be written to the NetCDF file
Parameters
parallelThis parameter is ignored in the serial version. In the MPI version this parameter indicates whether each process reads/writes to the file independently in parallel (true) or each process funnels its data through the master rank (false), which involves communication but may be faster than the former method.
Note
In an MPI environment
  • all processes must call this function,
  • processes that do not belong to the same communicator as the master process return immediately
In an MPI program it may happen that the data to read/write is partitioned among a process group smaller than MPI_COMM_WORLD, e.g. when reading/writing a 2d slice of a 3d vector. In this example case grid.communicator() is only 2d not 3d. Remember that only the group containing the master process reads/writes its data to the file, while all other processes immediately return. There are two ways to relyable read/write the data in such a case:
  • Manually assemble the data on the master process and construct an MPI grid with a Cartesian communicator containing only one process (using e.g. MPI_Comm_split on MPI_COMM_WORLD followed by MPI_Cart_create)
  • Manually assemble the data on the MPI group that contains the master process (cf MPI_Cart_sub)
Note
The "master" thread is assumed to be the process with rank==0 in MPI_COMM_WORLD. The MPI_COMM_WORLD rank of a process is usually the same in a Cartesian communicator of the same size but is not guaranteed. So always check MPI_COMM_WORLD ranks for file write operations.
Attention
With the serial NetCDF library only a single "master" process can write in a NetCDF file (creation, defining dimension ids, variables ids, writing etc). Thus, in an MPI program
  • parallel should be false
  • the program links to serial NetCDF and hdf5
  • only the master thread needs to know the ncid, variable or dimension names, the slice to write etc.
There is a parallel NetCDF library where all processes can have write access in parallel. In this case
  • parallel should be true
  • the program links to parallel NetCDF and hdf5
  • the file must be opened with the NC_MPIIO flag from the NetCDF_par.h header and the variable be marked with NC_COLLECTIVE access
  • all threads need to know the ncid, variable and dimension names, the slice to write etc.
Note that serious performance penalties have been observed on some platforms for parallel writing NetCDF.

◆ put_vara() [1/2]

template<class T , class real_type >
void dg::file::put_vara ( int ncid,
int varid,
unsigned slice,
const RealGrid0d< real_type > & grid,
T data,
bool parallel = false )

DEPRECATED Write a scalar to the NetCDF file.

Note
This function throws a dg::file::NC_Error if an error occurs
Template Parameters
TDetermines data type to write
real_typeignored
Parameters
ncidNetCDF file or group ID
varidVariable ID (Note that in NetCDF variables without dimensions are scalars)
sliceThe number of the time-slice to write (first element of the startp array in nc_put_vara)
Note
In NetCDF all variables that share an unlimited dimension are considered to have the same size in that dimension. In fact, the size of the unlimited dimension is the maximum of the sizes of all the variables sharing that unlimited dimension. All variables are artificially filled up with filler Values to match that maximum size. It is entirely possible to skip writing data for variables for some times. It is also possible to write data to unlimited variables at slice>=size (in which case all variables sharing the unlimited dimension will increase in size) but it is not possible to read data at slice>=size. It is the user's responsibility to manage the slice value across variables.
Parameters
grida Tag to signify scalar ouput (and help the compiler choose this function over the array output function). Can be of type dg::RealMPIGrid<real_type>
dataThe (single) datum to write.
parallelThis parameter is ignored in both serial and MPI versions. In an MPI program all processes can call this function but only the master thread writes.
Note
The "master" thread is assumed to be the process with rank==0 in MPI_COMM_WORLD. The MPI_COMM_WORLD rank of a process is usually the same in a Cartesian communicator of the same size but is not guaranteed. So always check MPI_COMM_WORLD ranks for file write operations.

◆ put_vara() [2/2]

template<class host_vector , class Topology >
void dg::file::put_vara ( int ncid,
int varid,
unsigned slice,
const Topology & grid,
const host_vector & data,
bool parallel = false )

DEPRECATED Write an array to NetCDF file.

Convenience wrapper around nc_put_vara

The purpose of this function is mainly to simplify output in an MPI environment and to provide the same interface also in a shared memory system for uniform programming. This version is for a time-dependent variable, i.e. writes a single time-slice into the file. The dimensionality is given by the grid.

Note
This function throws a dg::file::NC_Error if an error occurs
Template Parameters
TopologyOne of the dG defined grids (e.g. dg::RealGrid2d) Determines if shared memory or MPI version is called
Template Parameters
host_vectorFor shared Topology: Type with data() member that returns pointer to first element in CPU (host) adress space, meaning it cannot be a GPU vector. For MPI Topology: must be MPI_Vector host_vector::value_type must match data type of variable in file.
Parameters
ncidNetCDF file or group ID
varidVariable ID
sliceThe number of the time-slice to write (first element of the startp array in nc_put_vara)
Note
In NetCDF all variables that share an unlimited dimension are considered to have the same size in that dimension. In fact, the size of the unlimited dimension is the maximum of the sizes of all the variables sharing that unlimited dimension. All variables are artificially filled up with filler Values to match that maximum size. It is entirely possible to skip writing data for variables for some times. It is also possible to write data to unlimited variables at slice>=size (in which case all variables sharing the unlimited dimension will increase in size) but it is not possible to read data at slice>=size. It is the user's responsibility to manage the slice value across variables.
Parameters
gridThe grid from which to construct start and count variables to forward to nc_put_vara
datadata to be written to the NetCDF file
Parameters
parallelThis parameter is ignored in the serial version. In the MPI version this parameter indicates whether each process reads/writes to the file independently in parallel (true) or each process funnels its data through the master rank (false), which involves communication but may be faster than the former method.
Note
In an MPI environment
  • all processes must call this function,
  • processes that do not belong to the same communicator as the master process return immediately
In an MPI program it may happen that the data to read/write is partitioned among a process group smaller than MPI_COMM_WORLD, e.g. when reading/writing a 2d slice of a 3d vector. In this example case grid.communicator() is only 2d not 3d. Remember that only the group containing the master process reads/writes its data to the file, while all other processes immediately return. There are two ways to relyable read/write the data in such a case:
  • Manually assemble the data on the master process and construct an MPI grid with a Cartesian communicator containing only one process (using e.g. MPI_Comm_split on MPI_COMM_WORLD followed by MPI_Cart_create)
  • Manually assemble the data on the MPI group that contains the master process (cf MPI_Cart_sub)
Note
The "master" thread is assumed to be the process with rank==0 in MPI_COMM_WORLD. The MPI_COMM_WORLD rank of a process is usually the same in a Cartesian communicator of the same size but is not guaranteed. So always check MPI_COMM_WORLD ranks for file write operations.
Attention
With the serial NetCDF library only a single "master" process can write in a NetCDF file (creation, defining dimension ids, variables ids, writing etc). Thus, in an MPI program
  • parallel should be false
  • the program links to serial NetCDF and hdf5
  • only the master thread needs to know the ncid, variable or dimension names, the slice to write etc.
There is a parallel NetCDF library where all processes can have write access in parallel. In this case
  • parallel should be true
  • the program links to parallel NetCDF and hdf5
  • the file must be opened with the NC_MPIIO flag from the NetCDF_par.h header and the variable be marked with NC_COLLECTIVE access
  • all threads need to know the ncid, variable and dimension names, the slice to write etc.
Note that serious performance penalties have been observed on some platforms for parallel writing NetCDF.
Here is the call graph for this function: