5#include "dg/algorithm.h"
15template<
class real_type>
16struct RealCurvilinearProductMPIGrid3d;
24template<
class real_type>
40 {tx.n,ty.n},{tx.N,ty.N}, { tx.b, ty.b},
44 RealCurvilinearGrid2d<real_type> g(
generator, tx, ty);
45 divide_and_conquer(g);
62 virtual void do_set(std::array<unsigned,2> new_n, std::array<unsigned,2> new_N)
override final
66 {new_n[0], new_N[0]}, {new_n[1], new_N[1]});
67 divide_and_conquer(g);
69 virtual void do_set(std::array<dg::bc,2> new_bc)
override final
74 virtual void do_set_pq(std::array<real_type,2> new_x0, std::array<real_type,2> new_x1)
override final
78 void divide_and_conquer(
const RealCurvilinearGrid2d<real_type>& g_)
82 std::vector<thrust::host_vector<real_type> >
map = g_.map();
83 for(
unsigned i=0; i<3; i++)
84 for(
unsigned j=0; j<3; j++)
86 m_metric.idx(i,j) =
metric.idx(i,j);
90 m_jac.values().resize(
jacobian.values().size());
93 m_metric.values().resize(
metric.values().size());
94 for(
unsigned i=0; i<
metric.values().
size(); i++)
96 m_map.resize(
map.size());
97 for(
unsigned i=0; i<
map.size(); i++)
101 virtual SparseTensor<MPI_Vector<thrust::host_vector<real_type>>> do_compute_jacobian( ) const override final{
104 virtual SparseTensor<MPI_Vector<thrust::host_vector<real_type>>> do_compute_metric( ) const override final{
107 virtual std::vector<MPI_Vector<thrust::host_vector<real_type>>> do_compute_map()const override final{
return m_map;}
109 std::vector<MPI_Vector<thrust::host_vector<real_type>>> m_map;
118template<
class real_type>
127 RealCurvilinearProductMPIGrid3d(
generator, {
n,
Nx,
bcx}, {
n,
Ny,
bcy},
RealMPIGrid1d<real_type>{0.,2.*
M_PI,1,
Nz,
bcz,
dg::mpi_cart_split_as<3>(
comm)[2]},
comm){}
137 {tx.N, ty.N,
gz.N()},{ tx.b, ty.b,
gz.bcx()},
143 constructParallel(this->
nz(), this->
local().
Nz());
161 virtual void do_set( std::array<unsigned,3> new_n, std::array<unsigned,3> new_N)
override final
163 auto old_n = this->
get_n(), old_N = this->
get_N();
165 if( !( new_n[0] == old_n[0] && new_N[0] == old_N[0] &&
166 new_n[1] == old_n[1] && new_N[1] == old_N[1] ) )
168 RealCurvilinearMPIGrid2d<real_type> g( *m_handle,
169 { new_n[0], new_N[0], this->
bcx()},
170 { new_n[1], new_N[1], this->
bcy()},
174 constructParallel(this->
nz(), this->
local().
Nz());
176 virtual void do_set(std::array<dg::bc,3> new_bc)
override final
180 virtual void do_set_pq(std::array<real_type,3> new_x0, std::array<real_type,3> new_x1)
override final
184 void constructPerp( RealCurvilinearMPIGrid2d<real_type>& g2d)
186 m_jac=g2d.jacobian();
189 void constructParallel(
unsigned nz,
unsigned localNz )
194 unsigned size2d = this->
nx()*this->
ny()*this->
local().Nx()*this->
local().Ny();
197 for(
unsigned r=0; r<6;r++)
199 m_jac.values()[r].data().resize(
size);
200 m_jac.values()[r].set_communicator(
comm);
202 m_map[0].data().resize(
size);
203 m_map[0].set_communicator(
comm);
204 m_map[1].data().resize(
size);
205 m_map[1].set_communicator(
comm);
207 for(
unsigned k=1; k<
nz*localNz; k++)
208 for(
unsigned i=0; i<size2d; i++)
210 for(
unsigned r=0; r<6; r++)
211 m_jac.values()[r].data()[k*size2d+i] = m_jac.values()[r].data()[(k-1)*size2d+i];
212 m_map[0].data()[k*size2d+i] = m_map[0].data()[(k-1)*size2d+i];
213 m_map[1].data()[k*size2d+i] = m_map[1].data()[(k-1)*size2d+i];
216 virtual SparseTensor<MPI_Vector<thrust::host_vector<real_type>>> do_compute_jacobian( ) const override final{
219 virtual SparseTensor<MPI_Vector<thrust::host_vector<real_type>>> do_compute_metric( ) const override final{
220 return detail::square( m_jac, m_map[0], m_handle->isOrthogonal());
222 virtual std::vector<MPI_Vector<thrust::host_vector<real_type>>> do_compute_map()const override final{
return m_map;}
224 std::vector<MPI_Vector<thrust::host_vector<real_type>>> m_map;
225 ClonePtr<dg::geo::aRealGenerator2d<real_type>> m_handle;
228template<
class real_type>
230 dg::aRealMPIGeometry2d<real_type>( std::array{g.gx(), g.gy()} ),
231 m_handle(g.generator())
238 unsigned s = this->
local().size();
239 MPI_Comm
comm = g.get_perp_comm();
240 for(
unsigned i=0; i<m_jac.values().
size(); i++)
242 m_jac.values()[i].data().resize(s);
243 m_jac.values()[i].set_communicator(
comm);
245 for(
unsigned i=0; i<m_metric.values().
size(); i++)
247 m_metric.values()[i].data().resize(s);
248 m_metric.values()[i].set_communicator(
comm);
254 for(
unsigned i=0; i<m_map.size(); i++)
256 m_map[i].data().resize(s);
257 m_map[i].set_communicator(
comm);
DG_DEVICE double cooZ3d(double x, double y, double z)
void copy(const ContainerTypeIn &source, ContainerTypeOut &target)
auto evaluate(Functor &&f, const Topology &g)
dg::geo::RealCurvilinearProductMPIGrid3d< double > CurvilinearProductMPIGrid3d
Definition mpi_curvilinear.h:263
dg::geo::RealCurvilinearMPIGrid2d< double > CurvilinearMPIGrid2d
Definition mpi_curvilinear.h:262
std::array< MPI_Comm, Nd > mpi_cart_split_as(MPI_Comm comm)
MPI_Vector< thrust::host_vector< real_type > > global2local(const thrust::host_vector< real_type > &global, const MPITopology &g)
SparseTensor< MPI_Vector< thrust::host_vector< real_type > > > jacobian() const
SparseTensor< MPI_Vector< thrust::host_vector< real_type > > > metric() const
std::vector< MPI_Vector< thrust::host_vector< real_type > > > map() const
RealMPIGrid< real_type, 1 > gz() const
unsigned n(unsigned u=0) const
std::enable_if_t< Md==1, MPI_Comm > comm() const
const RealGrid< real_type, Nd > & global() const
virtual void do_set(std::array< unsigned, Nd > new_n, std::array< unsigned, Nd > new_N)=0
MPI_Comm communicator() const
std::enable_if_t<(Md >=2), MPI_Comm > get_perp_comm() const
std::array< unsigned, Nd > get_n() const
std::array< unsigned, Nd > get_N() const
const RealGrid< real_type, Nd > & local() const
A two-dimensional grid based on curvilinear coordinates.
Definition curvilinear.h:113
A two-dimensional MPI grid based on curvilinear coordinates.
Definition mpi_curvilinear.h:26
RealCurvilinearMPIGrid2d(const RealCurvilinearProductMPIGrid3d< real_type > &g)
explicit conversion of 3d product grid to the perpendicular grid
RealCurvilinearMPIGrid2d(const aRealGenerator2d< real_type > &generator, Topology1d tx, Topology1d ty, MPI_Comm comm)
Construct the computational space as the product of two 1d grids.
Definition mpi_curvilinear.h:37
RealCurvilinearMPIGrid2d()=default
virtual RealCurvilinearMPIGrid2d * clone() const override final
Definition mpi_curvilinear.h:52
virtual RealCurvilinearGrid2d< real_type > * global_geometry() const override final
Definition mpi_curvilinear.h:53
RealCurvilinearMPIGrid2d(const aRealGenerator2d< real_type > &generator, unsigned n, unsigned Nx, unsigned Ny, dg::bc bcx, dg::bc bcy, MPI_Comm comm)
Construct a 2D grid.
Definition mpi_curvilinear.h:31
const aRealGenerator2d< real_type > & generator() const
read access to the generator
Definition mpi_curvilinear.h:51
A 2x1 curvilinear product space grid.
Definition curvilinear.h:173
A 2x1 curvilinear product space MPI grid.
Definition mpi_curvilinear.h:120
dg::geo::RealCurvilinearMPIGrid2d< real_type > perpendicular_grid
the two-dimensional grid
Definition mpi_curvilinear.h:121
const aRealGenerator2d< real_type > & generator() const
read access to the generator
Definition mpi_curvilinear.h:148
RealCurvilinearProductMPIGrid3d(const aRealGenerator2d< real_type > &generator, Topology1d tx, Topology1d ty, RealMPIGrid1d< real_type > gz, MPI_Comm comm)
Construct the computational space as the product of three 1d grids.
Definition mpi_curvilinear.h:133
virtual RealCurvilinearProductGrid3d< real_type > * global_geometry() const
Definition mpi_curvilinear.h:150
virtual RealCurvilinearProductMPIGrid3d * clone() const override final
Definition mpi_curvilinear.h:149
RealCurvilinearProductMPIGrid3d(const aRealGenerator2d< real_type > &generator, unsigned n, unsigned Nx, unsigned Ny, unsigned Nz, bc bcx, bc bcy, bc bcz, MPI_Comm comm)
Construct a 3D grid.
Definition mpi_curvilinear.h:126
RealCurvilinearProductMPIGrid3d()=default
Helper class for construction.
Definition curvilinear.h:65
The abstract generator base class.
Definition generator.h:20