Extension: Json and NetCDF utilities
#include "dg/file/file.h" (includes both Json and NetCDF utilities)
|
Classes | |
struct | dg::file::NC_Error |
Class thrown by the NC_Error_Handle. More... | |
struct | dg::file::NC_Error_Handle |
Empty utitlity class that handles return values of netcdf functions and throws NC_Error(status) if( status != NC_NOERR) More... | |
Functions | |
template<class host_vector > | |
void | dg::file::put_var_double (int ncid, int varid, const dg::aTopology2d &grid, const host_vector &data, bool parallel=false) |
Convenience wrapper around nc_put_vara_double() More... | |
template<class host_vector > | |
void | dg::file::put_vara_double (int ncid, int varid, unsigned slice, const dg::aTopology2d &grid, const host_vector &data, bool parallel=false) |
Convenience wrapper around nc_put_vara_double() More... | |
template<class host_vector > | |
void | dg::file::put_var_double (int ncid, int varid, const dg::aTopology3d &grid, const host_vector &data, bool parallel=false) |
Convenience wrapper around nc_put_vara_double() More... | |
template<class host_vector > | |
void | dg::file::put_vara_double (int ncid, int varid, unsigned slice, const dg::aTopology3d &grid, const host_vector &data, bool parallel=false) |
Convenience wrapper around nc_put_vara_double() More... | |
template<class host_vector > | |
void | dg::file::put_var_double (int ncid, int varid, const dg::aMPITopology2d &grid, const dg::MPI_Vector< host_vector > &data, bool parallel=false) |
Convenience wrapper around nc_put_vara_double() More... | |
template<class host_vector > | |
void | dg::file::put_vara_double (int ncid, int varid, unsigned slice, const dg::aMPITopology2d &grid, const dg::MPI_Vector< host_vector > &data, bool parallel=false) |
Convenience wrapper around nc_put_vara_double() More... | |
template<class host_vector > | |
void | dg::file::put_var_double (int ncid, int varid, const dg::aMPITopology3d &grid, const dg::MPI_Vector< host_vector > &data, bool parallel=false) |
Convenience wrapper around nc_put_vara_double() More... | |
template<class host_vector > | |
void | dg::file::put_vara_double (int ncid, int varid, unsigned slice, const dg::aMPITopology3d &grid, const dg::MPI_Vector< host_vector > &data, bool parallel=false) |
Convenience wrapper around nc_put_vara_double() More... | |
template<class T > | |
int | dg::file::define_real_time (int ncid, const char *name, int *dimID, int *tvarID) |
Define an unlimited time dimension and coordinate variable. More... | |
static int | dg::file::define_time (int ncid, const char *name, int *dimID, int *tvarID) |
Define an unlimited time dimension and coordinate variable. More... | |
static int | dg::file::define_limited_time (int ncid, const char *name, int size, int *dimID, int *tvarID) |
Define a limited time dimension and coordinate variable. More... | |
template<class T > | |
int | dg::file::define_dimension (int ncid, int *dimID, const dg::RealGrid1d< T > &g, std::string name_dim="x", std::string axis="X") |
Define a 1d dimension and associated coordinate variable. More... | |
template<class T > | |
int | dg::file::define_dimensions (int ncid, int *dimsIDs, int *tvarID, const dg::RealGrid1d< T > &g, std::array< std::string, 2 > name_dims={"time","x"}) |
Define an unlimited time and a dimension together with their coordinate variables. More... | |
template<class T > | |
int | dg::file::define_dimensions (int ncid, int *dimsIDs, const dg::aRealTopology2d< T > &g, std::array< std::string, 2 > name_dims={"y", "x"}) |
Define 2 dimensions and associated coordiante variables. More... | |
template<class T > | |
int | dg::file::define_dimensions (int ncid, int *dimsIDs, int *tvarID, const dg::aRealTopology2d< T > &g, std::array< std::string, 3 > name_dims={"time", "y", "x"}) |
Define an unlimited time and 2 dimensions and associated coordinate variables. More... | |
template<class T > | |
int | dg::file::define_limtime_xy (int ncid, int *dimsIDs, int size, int *tvarID, const dg::aRealTopology2d< T > &g, std::array< std::string, 3 > name_dims={"time", "y", "x"}) |
Define a limited time and 2 dimensions and associated coordinate variables. More... | |
template<class T > | |
int | dg::file::define_dimensions (int ncid, int *dimsIDs, const dg::aRealTopology3d< T > &g, std::array< std::string, 3 > name_dims={"z", "y", "x"}) |
Define 3 dimensions and associated coordinate variables. More... | |
template<class T > | |
int | dg::file::define_dimensions (int ncid, int *dimsIDs, int *tvarID, const dg::aRealTopology3d< T > &g, std::array< std::string, 4 > name_dims={"time", "z", "y", "x"}) |
Define an unlimited time and 3 dimensions together with their coordinate varariables. More... | |
template<class T > | |
int | dg::file::define_dimensions (int ncid, int *dimsIDs, const dg::aRealMPITopology2d< T > &g, std::array< std::string, 2 > name_dims={"y", "x"}) |
Only master process should call this!! Convenience function that just calls the corresponding serial version with the global grid. | |
template<class T > | |
int | dg::file::define_dimensions (int ncid, int *dimsIDs, int *tvarID, const dg::aRealMPITopology2d< T > &g, std::array< std::string, 3 > name_dims={"time", "y", "x"}) |
Only master process should call this!! Convenience function that just calls the corresponding serial version with the global grid. | |
template<class T > | |
int | dg::file::define_dimensions (int ncid, int *dimsIDs, const dg::aRealMPITopology3d< T > &g, std::array< std::string, 3 > name_dims={"z", "y", "x"}) |
Only master process should call this!! Convenience function that just calls the corresponding serial version with the global grid. | |
template<class T > | |
int | dg::file::define_dimensions (int ncid, int *dimsIDs, int *tvarID, const dg::aRealMPITopology3d< T > &g, std::array< std::string, 4 > name_dims={"time", "z", "y", "x"}) |
Only master process should call this!! Convenience function that just calls the corresponding serial version with the global grid. | |
#include "dg/file/nc_utilities.h" (link -lnetcdf -lhdf5[_serial] -lhdf5[_serial]_hl)
|
inline |
Define a 1d dimension and associated coordinate variable.
ncid | file ID |
dimID | dimension ID (output) |
g | The 1d DG grid from which data points for coordinate variable are generated using dg::create::abscissas(g) |
name_dim | Name of dimension and coordinate variable (input) |
axis | The axis attribute (input), ("X", "Y" or "Z") |
T | determines the datatype of the dimension variables |
|
inline |
Define 2 dimensions and associated coordiante variables.
Dimensions have attributes of (Y, X)
ncid | file ID |
dimsIDs | (write - only) 2D array of dimension IDs (Y,X) |
g | The 2d DG grid from which data points for coordinate variable are generated using dg::create::abscissas(g) in each dimension |
name_dims | Names for the dimension variables |
T | determines the datatype of the dimension variables |
|
inline |
Define 3 dimensions and associated coordinate variables.
Dimensions have attributes ( Z, Y, X)
ncid | file ID |
dimsIDs | (write - only) 3D array of dimension IDs (Z,Y,X) |
g | The 3d DG grid from which data points for coordinate variable are generated using dg::create::abscissas(g) in each dimension |
name_dims | Names for the dimension variables ( Z, Y, X) |
T | determines the datatype of the dimension variables |
|
inline |
Define an unlimited time and 2 dimensions and associated coordinate variables.
Semantically equivalent to the following:
Dimensions have attributes of (time, Y, X)
ncid | file ID |
dimsIDs | (write - only) 3D array of dimension IDs (time, Y,X) |
tvarID | (write - only) The ID of the time variable ( unlimited) |
g | The 2d DG grid from which data points for coordinate variable are generated using dg::create::abscissas(g) in each dimension |
name_dims | Names for the dimension variables ( time, Y, X) |
T | determines the datatype of the dimension variables |
|
inline |
Define an unlimited time and 3 dimensions together with their coordinate varariables.
Semantically equivalent to the following:
Dimensions have attributes ( time, Z, Y, X)
ncid | file ID |
dimsIDs | (write - only) 4D array of dimension IDs (time, Z,Y,X) |
tvarID | (write - only) The ID of the time variable ( unlimited) |
g | The 3d DG grid from which data points for coordinate variable are generated using dg::create::abscissas(g) in each dimension |
name_dims | Names for the dimension variables ( time, Z, Y, X) |
T | determines the datatype of the dimension variables |
|
inline |
Define an unlimited time and a dimension together with their coordinate variables.
Semantically equivalent to the following:
Dimensions have attribute of (time, X)
ncid | file ID |
dimsIDs | dimension IDs (time, X) |
tvarID | time coordinate variable ID (unlimited) |
g | The 1d DG grid from which data points for coordinate variable are generated using dg::create::abscissas(g) |
name_dims | Names for the dimension and coordinate variables |
T | determines the datatype of the dimension variables |
|
inlinestatic |
Define a limited time dimension and coordinate variable.
ncid | file ID |
name | Name of the time variable (usually "time") |
size | The number of timesteps |
dimID | time-dimension ID |
tvarID | time-variable ID (for a time variable of type NC_DOUBLE ) |
|
inline |
Define a limited time and 2 dimensions and associated coordinate variables.
Semantically equivalent to the following:
Dimensions have attributes of (time, Y, X)
ncid | file ID |
dimsIDs | (write - only) 3D array of dimension IDs (time, Y,X) |
size | The size of the time variable |
tvarID | (write - only) The ID of the time variable (limited) |
g | The 2d DG grid from which data points for coordinate variable are generated using dg::create::abscissas(g) |
name_dims | Names for the dimension variables (time, Y, X) |
T | determines the datatype of the dimension variables |
|
inline |
Define an unlimited time dimension and coordinate variable.
ncid | file ID |
name | Name of time variable (variable names are not standardized) |
dimID | time-dimension ID |
tvarID | time-variable ID (for a time variable of type NC_DOUBLE ) |
|
inlinestatic |
Define an unlimited time dimension and coordinate variable.
ncid | file ID |
name | Name of time variable (variable names are not standardized) |
dimID | time-dimension ID |
tvarID | time-variable ID (for a time variable of type NC_DOUBLE ) |
void dg::file::put_var_double | ( | int | ncid, |
int | varid, | ||
const dg::aMPITopology2d & | grid, | ||
const dg::MPI_Vector< host_vector > & | data, | ||
bool | parallel = false |
||
) |
Convenience wrapper around nc_put_vara_double()
The purpose of this function is mainly to simplify output in an MPI environment and to provide the same interface also in a shared memory system for uniform programming. This version is for a time-independent variable, i.e. writes a single variable in one go and is actually equivalent to nc_put_var_double
. The dimensionality is given by the grid.
dg::file::NC_Error
if an error occurs host_vector | Type with data() member that returns pointer to first element in CPU (host) adress space, meaning it cannot be a GPU vector |
ncid | Forwarded to nc_put_vara_double |
varid | Forwarded to nc_put_vara_double |
grid | The grid from which to construct start and count variables to forward to nc_put_vara_double |
data | data is forwarded to nc_put_vara_double |
parallel | This parameter is ignored in the serial version. In the MPI version this parameter indicates whether each process writes to the file independently in parallel (true ) or each process funnels its data through the master rank (false ), which involves communication but may be faster than the former method. |
parallel==true
a parallel netcdf and hdf5 must be linked, the file opened with the NC_MPIIO
flag from the netcdf_par.h
header and the variable be marked with NC_COLLECTIVE
access while if parallel==false
we need serial netcdf and hdf5 and only the master thread needs to open and access the file. Note that serious performance penalties have been observed on some platforms for parallel netcdf. void dg::file::put_var_double | ( | int | ncid, |
int | varid, | ||
const dg::aMPITopology3d & | grid, | ||
const dg::MPI_Vector< host_vector > & | data, | ||
bool | parallel = false |
||
) |
Convenience wrapper around nc_put_vara_double()
The purpose of this function is mainly to simplify output in an MPI environment and to provide the same interface also in a shared memory system for uniform programming. This version is for a time-independent variable, i.e. writes a single variable in one go and is actually equivalent to nc_put_var_double
. The dimensionality is given by the grid.
dg::file::NC_Error
if an error occurs host_vector | Type with data() member that returns pointer to first element in CPU (host) adress space, meaning it cannot be a GPU vector |
ncid | Forwarded to nc_put_vara_double |
varid | Forwarded to nc_put_vara_double |
grid | The grid from which to construct start and count variables to forward to nc_put_vara_double |
data | data is forwarded to nc_put_vara_double |
parallel | This parameter is ignored in the serial version. In the MPI version this parameter indicates whether each process writes to the file independently in parallel (true ) or each process funnels its data through the master rank (false ), which involves communication but may be faster than the former method. |
parallel==true
a parallel netcdf and hdf5 must be linked, the file opened with the NC_MPIIO
flag from the netcdf_par.h
header and the variable be marked with NC_COLLECTIVE
access while if parallel==false
we need serial netcdf and hdf5 and only the master thread needs to open and access the file. Note that serious performance penalties have been observed on some platforms for parallel netcdf. void dg::file::put_var_double | ( | int | ncid, |
int | varid, | ||
const dg::aTopology2d & | grid, | ||
const host_vector & | data, | ||
bool | parallel = false |
||
) |
Convenience wrapper around nc_put_vara_double()
The purpose of this function is mainly to simplify output in an MPI environment and to provide the same interface also in a shared memory system for uniform programming. This version is for a time-independent variable, i.e. writes a single variable in one go and is actually equivalent to nc_put_var_double
. The dimensionality is given by the grid.
dg::file::NC_Error
if an error occurs host_vector | Type with data() member that returns pointer to first element in CPU (host) adress space, meaning it cannot be a GPU vector |
ncid | Forwarded to nc_put_vara_double |
varid | Forwarded to nc_put_vara_double |
grid | The grid from which to construct start and count variables to forward to nc_put_vara_double |
data | data is forwarded to nc_put_vara_double |
parallel | This parameter is ignored in the serial version. In the MPI version this parameter indicates whether each process writes to the file independently in parallel (true ) or each process funnels its data through the master rank (false ), which involves communication but may be faster than the former method. |
parallel==true
a parallel netcdf and hdf5 must be linked, the file opened with the NC_MPIIO
flag from the netcdf_par.h
header and the variable be marked with NC_COLLECTIVE
access while if parallel==false
we need serial netcdf and hdf5 and only the master thread needs to open and access the file. Note that serious performance penalties have been observed on some platforms for parallel netcdf. void dg::file::put_var_double | ( | int | ncid, |
int | varid, | ||
const dg::aTopology3d & | grid, | ||
const host_vector & | data, | ||
bool | parallel = false |
||
) |
Convenience wrapper around nc_put_vara_double()
The purpose of this function is mainly to simplify output in an MPI environment and to provide the same interface also in a shared memory system for uniform programming. This version is for a time-independent variable, i.e. writes a single variable in one go and is actually equivalent to nc_put_var_double
. The dimensionality is given by the grid.
dg::file::NC_Error
if an error occurs host_vector | Type with data() member that returns pointer to first element in CPU (host) adress space, meaning it cannot be a GPU vector |
ncid | Forwarded to nc_put_vara_double |
varid | Forwarded to nc_put_vara_double |
grid | The grid from which to construct start and count variables to forward to nc_put_vara_double |
data | data is forwarded to nc_put_vara_double |
parallel | This parameter is ignored in the serial version. In the MPI version this parameter indicates whether each process writes to the file independently in parallel (true ) or each process funnels its data through the master rank (false ), which involves communication but may be faster than the former method. |
parallel==true
a parallel netcdf and hdf5 must be linked, the file opened with the NC_MPIIO
flag from the netcdf_par.h
header and the variable be marked with NC_COLLECTIVE
access while if parallel==false
we need serial netcdf and hdf5 and only the master thread needs to open and access the file. Note that serious performance penalties have been observed on some platforms for parallel netcdf. void dg::file::put_vara_double | ( | int | ncid, |
int | varid, | ||
unsigned | slice, | ||
const dg::aMPITopology2d & | grid, | ||
const dg::MPI_Vector< host_vector > & | data, | ||
bool | parallel = false |
||
) |
Convenience wrapper around nc_put_vara_double()
The purpose of this function is mainly to simplify output in an MPI environment and to provide the same interface also in a shared memory system for uniform programming. This version is for a time-dependent variable, i.e. writes a single time-slice into the file. The dimensionality is given by the grid.
dg::file::NC_Error
if an error occurs host_vector | Type with data() member that returns pointer to first element in CPU (host) adress space, meaning it cannot be a GPU vector |
ncid | Forwarded to nc_put_vara_double |
varid | Forwarded to nc_put_vara_double |
slice | The number of the time-slice to write (first element of the startp array in nc_put_vara_double ) |
grid | The grid from which to construct start and count variables to forward to nc_put_vara_double |
data | data is forwarded to nc_put_vara_double , |
parallel | This parameter is ignored in the serial version. In the MPI version this parameter indicates whether each process writes to the file independently in parallel (true ) or each process funnels its data through the master rank (false ), which involves communication but may be faster than the former method. |
parallel==true
a parallel netcdf and hdf5 must be linked, the file opened with the NC_MPIIO
flag from the netcdf_par.h
header and the variable be marked with NC_COLLECTIVE
access while if parallel==false
we need serial netcdf and hdf5 and only the master thread needs to open and access the file. Note that serious performance penalties have been observed on some platforms for parallel netcdf. void dg::file::put_vara_double | ( | int | ncid, |
int | varid, | ||
unsigned | slice, | ||
const dg::aMPITopology3d & | grid, | ||
const dg::MPI_Vector< host_vector > & | data, | ||
bool | parallel = false |
||
) |
Convenience wrapper around nc_put_vara_double()
The purpose of this function is mainly to simplify output in an MPI environment and to provide the same interface also in a shared memory system for uniform programming. This version is for a time-dependent variable, i.e. writes a single time-slice into the file. The dimensionality is given by the grid.
dg::file::NC_Error
if an error occurs host_vector | Type with data() member that returns pointer to first element in CPU (host) adress space, meaning it cannot be a GPU vector |
ncid | Forwarded to nc_put_vara_double |
varid | Forwarded to nc_put_vara_double |
slice | The number of the time-slice to write (first element of the startp array in nc_put_vara_double ) |
grid | The grid from which to construct start and count variables to forward to nc_put_vara_double |
data | data is forwarded to nc_put_vara_double , |
parallel | This parameter is ignored in the serial version. In the MPI version this parameter indicates whether each process writes to the file independently in parallel (true ) or each process funnels its data through the master rank (false ), which involves communication but may be faster than the former method. |
parallel==true
a parallel netcdf and hdf5 must be linked, the file opened with the NC_MPIIO
flag from the netcdf_par.h
header and the variable be marked with NC_COLLECTIVE
access while if parallel==false
we need serial netcdf and hdf5 and only the master thread needs to open and access the file. Note that serious performance penalties have been observed on some platforms for parallel netcdf. void dg::file::put_vara_double | ( | int | ncid, |
int | varid, | ||
unsigned | slice, | ||
const dg::aTopology2d & | grid, | ||
const host_vector & | data, | ||
bool | parallel = false |
||
) |
Convenience wrapper around nc_put_vara_double()
The purpose of this function is mainly to simplify output in an MPI environment and to provide the same interface also in a shared memory system for uniform programming. This version is for a time-dependent variable, i.e. writes a single time-slice into the file. The dimensionality is given by the grid.
dg::file::NC_Error
if an error occurs host_vector | Type with data() member that returns pointer to first element in CPU (host) adress space, meaning it cannot be a GPU vector |
ncid | Forwarded to nc_put_vara_double |
varid | Forwarded to nc_put_vara_double |
slice | The number of the time-slice to write (first element of the startp array in nc_put_vara_double ) |
grid | The grid from which to construct start and count variables to forward to nc_put_vara_double |
data | data is forwarded to nc_put_vara_double , |
parallel | This parameter is ignored in the serial version. In the MPI version this parameter indicates whether each process writes to the file independently in parallel (true ) or each process funnels its data through the master rank (false ), which involves communication but may be faster than the former method. |
parallel==true
a parallel netcdf and hdf5 must be linked, the file opened with the NC_MPIIO
flag from the netcdf_par.h
header and the variable be marked with NC_COLLECTIVE
access while if parallel==false
we need serial netcdf and hdf5 and only the master thread needs to open and access the file. Note that serious performance penalties have been observed on some platforms for parallel netcdf. void dg::file::put_vara_double | ( | int | ncid, |
int | varid, | ||
unsigned | slice, | ||
const dg::aTopology3d & | grid, | ||
const host_vector & | data, | ||
bool | parallel = false |
||
) |
Convenience wrapper around nc_put_vara_double()
The purpose of this function is mainly to simplify output in an MPI environment and to provide the same interface also in a shared memory system for uniform programming. This version is for a time-dependent variable, i.e. writes a single time-slice into the file. The dimensionality is given by the grid.
dg::file::NC_Error
if an error occurs host_vector | Type with data() member that returns pointer to first element in CPU (host) adress space, meaning it cannot be a GPU vector |
ncid | Forwarded to nc_put_vara_double |
varid | Forwarded to nc_put_vara_double |
slice | The number of the time-slice to write (first element of the startp array in nc_put_vara_double ) |
grid | The grid from which to construct start and count variables to forward to nc_put_vara_double |
data | data is forwarded to nc_put_vara_double , |
parallel | This parameter is ignored in the serial version. In the MPI version this parameter indicates whether each process writes to the file independently in parallel (true ) or each process funnels its data through the master rank (false ), which involves communication but may be faster than the former method. |
parallel==true
a parallel netcdf and hdf5 must be linked, the file opened with the NC_MPIIO
flag from the netcdf_par.h
header and the variable be marked with NC_COLLECTIVE
access while if parallel==false
we need serial netcdf and hdf5 and only the master thread needs to open and access the file. Note that serious performance penalties have been observed on some platforms for parallel netcdf.