Extension: Geometries
#include "dg/geometries/geometries.h"
mpi_curvilinear.h
Go to the documentation of this file.
1#pragma once
2
3#include <mpi.h>
4
5#include "dg/algorithm.h"
6#include "curvilinear.h"
7#include "generator.h"
8
9namespace dg
10{
11namespace geo
12{
13
15template<class real_type>
16struct RealCurvilinearProductMPIGrid3d;
18//
21
24template<class real_type>
26{
30 RealCurvilinearMPIGrid2d( const aRealGenerator2d<real_type>& generator, unsigned n, unsigned Nx, unsigned Ny, dg::bc bcx, dg::bc bcy, MPI_Comm comm):
32
37 dg::aRealMPIGeometry2d<real_type>( {0, generator.width(), tx.n, tx.N, tx.b}, {0., generator.height(), ty.n, ty.N, ty.b}, comm), m_handle(generator)
38 {
39 //generate global 2d grid and then reduce to local
40 RealCurvilinearGrid2d<real_type> g(generator, tx, ty);
41 divide_and_conquer(g);
42 }
45
47 const aRealGenerator2d<real_type>& generator() const{return *m_handle;}
48 virtual RealCurvilinearMPIGrid2d* clone()const override final{return new RealCurvilinearMPIGrid2d(*this);}
49 virtual RealCurvilinearGrid2d<real_type>* global_geometry()const override final{
51 *m_handle,
52 {global().nx(), global().Nx(), global().bcx()},
53 {global().ny(), global().Ny(), global().bcy()});
54 }
55 //These are necessary to help compiler find inherited names
57 private:
58 virtual void do_set( unsigned nx, unsigned Nx, unsigned ny, unsigned Ny) override final
59 {
61 RealCurvilinearGrid2d<real_type> g( *m_handle, {nx, Nx}, {ny, Ny});
62 divide_and_conquer(g);//distribute to processes
63 }
64 void divide_and_conquer(const RealCurvilinearGrid2d<real_type>& g_)
65 {
68 std::vector<thrust::host_vector<real_type> > map = g_.map();
69 for( unsigned i=0; i<3; i++)
70 for( unsigned j=0; j<3; j++)
71 {
72 m_metric.idx(i,j) = metric.idx(i,j);
73 m_jac.idx(i,j) = jacobian.idx(i,j);
74 }
75 // Here we set the communicator implicitly
76 m_jac.values().resize( jacobian.values().size());
77 for( unsigned i=0; i<jacobian.values().size(); i++)
78 m_jac.values()[i] = global2local( jacobian.values()[i], *this);
79 m_metric.values().resize( metric.values().size());
80 for( unsigned i=0; i<metric.values().size(); i++)
81 m_metric.values()[i] = global2local( metric.values()[i], *this);
82 m_map.resize(map.size());
83 for( unsigned i=0; i<map.size(); i++)
84 m_map[i] = global2local( map[i], *this);
85 }
86
87 virtual SparseTensor<MPI_Vector<thrust::host_vector<real_type>>> do_compute_jacobian( ) const override final{
88 return m_jac;
89 }
90 virtual SparseTensor<MPI_Vector<thrust::host_vector<real_type>>> do_compute_metric( ) const override final{
91 return m_metric;
92 }
93 virtual std::vector<MPI_Vector<thrust::host_vector<real_type>>> do_compute_map()const override final{return m_map;}
95 std::vector<MPI_Vector<thrust::host_vector<real_type>>> m_map;
97};
98
104template<class real_type>
106{
111 RealCurvilinearProductMPIGrid3d( const aRealGenerator2d<real_type>& generator, unsigned n, unsigned Nx, unsigned Ny, unsigned Nz, bc bcx, bc bcy, bc bcz, MPI_Comm comm):
112 RealCurvilinearProductMPIGrid3d( generator, {n,Nx,bcx}, {n,Ny,bcy}, {0.,2.*M_PI,1,Nz,bcz}, comm){}
113
114
119 dg::aRealProductMPIGeometry3d<real_type>( {0, generator.width(), tx.n, tx.N, tx.b}, {0., generator.height(), ty.n, ty.N, ty.b}, gz, comm),
120 m_handle( generator)
121 {
122 m_map.resize(3);
123 RealCurvilinearMPIGrid2d<real_type> g(generator,tx,ty,this->get_perp_comm());
124 constructPerp( g);
125 constructParallel(this->nz(), this->local().Nz());
126 }
127
128
130 const aRealGenerator2d<real_type>& generator() const{return *m_handle;}
131 virtual RealCurvilinearProductMPIGrid3d* clone()const override final{return new RealCurvilinearProductMPIGrid3d(*this);}
134 *m_handle,
135 {global().nx(), global().Nx(), global().bcx()},
136 {global().ny(), global().Ny(), global().bcy()},
137 {global().x0(), global().x1(), global().nz(), global().Nz(), global().bcz()});
138 }
139 //These are necessary to help compiler find inherited names
141 private:
142 virtual perpendicular_grid* do_perp_grid() const override final{ return new perpendicular_grid(*this);}
143 virtual void do_set( unsigned nx, unsigned Nx, unsigned ny, unsigned Ny, unsigned nz, unsigned Nz) override final
144 {
146 if( !( nx == this->nx() && Nx == global().Nx() && ny == this->ny() && Ny == global().Ny() ) )
147 {
148 RealCurvilinearMPIGrid2d<real_type> g( *m_handle,{nx,Nx,this->bcx()},{ny,Ny, this->bcy()}, this->get_perp_comm());
149 constructPerp( g);
150 }
151 constructParallel(this->nz(), this->local().Nz());
152 }
153 void constructPerp( RealCurvilinearMPIGrid2d<real_type>& g2d)
154 {
155 m_jac=g2d.jacobian();
156 m_map=g2d.map();
157 }
158 void constructParallel( unsigned nz, unsigned localNz )
159 {
160 m_map.resize(3);
161 m_map[2]=dg::evaluate(dg::cooZ3d, *this);
162 unsigned size = this->local().size();
163 unsigned size2d = this->nx()*this->ny()*this->local().Nx()*this->local().Ny();
164 //resize for 3d values
165 MPI_Comm comm = this->communicator(), comm_mod, comm_mod_reduce;
166 exblas::mpi_reduce_communicator( comm, &comm_mod, &comm_mod_reduce);
167 for( unsigned r=0; r<6;r++)
168 {
169 m_jac.values()[r].data().resize(size);
170 m_jac.values()[r].set_communicator( comm, comm_mod, comm_mod_reduce);
171 }
172 m_map[0].data().resize(size);
173 m_map[0].set_communicator( comm, comm_mod, comm_mod_reduce);
174 m_map[1].data().resize(size);
175 m_map[1].set_communicator( comm, comm_mod, comm_mod_reduce);
176 //lift to 3D grid
177 for( unsigned k=1; k<nz*localNz; k++)
178 for( unsigned i=0; i<size2d; i++)
179 {
180 for(unsigned r=0; r<6; r++)
181 m_jac.values()[r].data()[k*size2d+i] = m_jac.values()[r].data()[(k-1)*size2d+i];
182 m_map[0].data()[k*size2d+i] = m_map[0].data()[(k-1)*size2d+i];
183 m_map[1].data()[k*size2d+i] = m_map[1].data()[(k-1)*size2d+i];
184 }
185 }
186 virtual SparseTensor<MPI_Vector<thrust::host_vector<real_type>>> do_compute_jacobian( ) const override final{
187 return m_jac;
188 }
189 virtual SparseTensor<MPI_Vector<thrust::host_vector<real_type>>> do_compute_metric( ) const override final{
190 return detail::square( m_jac, m_map[0], m_handle->isOrthogonal());
191 }
192 virtual std::vector<MPI_Vector<thrust::host_vector<real_type>>> do_compute_map()const override final{return m_map;}
194 std::vector<MPI_Vector<thrust::host_vector<real_type>>> m_map;
195 ClonePtr<dg::geo::aRealGenerator2d<real_type>> m_handle;
196};
198template<class real_type>
199RealCurvilinearMPIGrid2d<real_type>::RealCurvilinearMPIGrid2d( const RealCurvilinearProductMPIGrid3d<real_type>& g):
200 dg::aRealMPIGeometry2d<real_type>( g.global().gx(), g.global().gy(), g.get_perp_comm() ),
201 m_handle(g.generator())
202{
203 m_map=g.map();
204 m_jac=g.jacobian();
205 m_metric=g.metric();
206 //now resize to 2d
207 m_map.pop_back();
208 unsigned s = this->local().size();
209 MPI_Comm comm = g.get_perp_comm(), comm_mod, comm_mod_reduce;
210 exblas::mpi_reduce_communicator( comm, &comm_mod, &comm_mod_reduce);
211 for( unsigned i=0; i<m_jac.values().size(); i++)
212 {
213 m_jac.values()[i].data().resize(s);
214 m_jac.values()[i].set_communicator( comm, comm_mod, comm_mod_reduce);
215 }
216 for( unsigned i=0; i<m_metric.values().size(); i++)
217 {
218 m_metric.values()[i].data().resize(s);
219 m_metric.values()[i].set_communicator( comm, comm_mod, comm_mod_reduce);
220 }
221 // we rely on the fact that the 3d grid uses square to compute its metric
222 // so the (2,2) entry is value 3 that we need to set to 1 (for the
223 // create::volume function to work properly)
224 dg::blas1::copy( 1., m_metric.values()[3]);
225 for( unsigned i=0; i<m_map.size(); i++)
226 {
227 m_map[i].data().resize(s);
228 m_map[i].set_communicator( comm, comm_mod, comm_mod_reduce);
229 }
230}
232//
235namespace x{
238}//namespace x
239
241}//namespace geo
242}//namespace dg
243
#define M_PI
static DG_DEVICE double cooZ3d(double x, double y, double z)
void copy(const ContainerTypeIn &source, ContainerTypeOut &target)
thrust::host_vector< real_type > evaluate(UnaryOp f, const RealGrid1d< real_type > &g)
dg::geo::RealCurvilinearMPIGrid2d< double > CurvilinearMPIGrid2d
Definition: mpi_curvilinear.h:233
dg::geo::RealCurvilinearProductMPIGrid3d< double > CurvilinearProductMPIGrid3d
Definition: mpi_curvilinear.h:234
MPI_Vector< thrust::host_vector< real_type > > global2local(const thrust::host_vector< real_type > &global, const aRealMPITopology3d< real_type > &g)
SparseTensor< MPI_Vector< thrust::host_vector< real_type > > > jacobian() const
SparseTensor< MPI_Vector< thrust::host_vector< real_type > > > metric() const
std::vector< MPI_Vector< thrust::host_vector< real_type > > > map() const
unsigned n() const
unsigned nx() const
unsigned ny() const
unsigned size() const
unsigned Nx() const
unsigned Ny() const
virtual void do_set(unsigned new_nx, unsigned new_Nx, unsigned new_ny, unsigned new_Ny)=0
const RealGrid2d< real_type > & global() const
MPI_Comm get_perp_comm() const
unsigned nz() const
unsigned Nz() const
unsigned Ny() const
MPI_Comm communicator() const
const RealGrid3d< real_type > & global() const
unsigned n() const
virtual void do_set(unsigned new_nx, unsigned new_Nx, unsigned new_ny, unsigned new_Ny, unsigned new_nz, unsigned new_Nz)=0
unsigned ny() const
const RealGrid3d< real_type > & local() const
unsigned Nx() const
unsigned nx() const
unsigned size() const
A two-dimensional grid based on curvilinear coordinates.
Definition: curvilinear.h:113
A two-dimensional MPI grid based on curvilinear coordinates.
Definition: mpi_curvilinear.h:26
RealCurvilinearMPIGrid2d(const RealCurvilinearProductMPIGrid3d< real_type > &g)
explicit conversion of 3d product grid to the perpendicular grid
RealCurvilinearMPIGrid2d(const aRealGenerator2d< real_type > &generator, Topology1d tx, Topology1d ty, MPI_Comm comm)
Construct the computational space as the product of two 1d grids.
Definition: mpi_curvilinear.h:36
virtual RealCurvilinearMPIGrid2d * clone() const override final
Definition: mpi_curvilinear.h:48
virtual RealCurvilinearGrid2d< real_type > * global_geometry() const override final
Definition: mpi_curvilinear.h:49
RealCurvilinearMPIGrid2d(const aRealGenerator2d< real_type > &generator, unsigned n, unsigned Nx, unsigned Ny, dg::bc bcx, dg::bc bcy, MPI_Comm comm)
Construct a 2D grid.
Definition: mpi_curvilinear.h:30
const aRealGenerator2d< real_type > & generator() const
read access to the generator
Definition: mpi_curvilinear.h:47
A 2x1 curvilinear product space grid.
Definition: curvilinear.h:160
A 2x1 curvilinear product space MPI grid.
Definition: mpi_curvilinear.h:106
RealCurvilinearProductMPIGrid3d(const aRealGenerator2d< real_type > &generator, Topology1d tx, Topology1d ty, RealGrid1d< real_type > gz, MPI_Comm comm)
Construct the computational space as the product of three 1d grids.
Definition: mpi_curvilinear.h:118
const aRealGenerator2d< real_type > & generator() const
read access to the generator
Definition: mpi_curvilinear.h:130
virtual RealCurvilinearProductGrid3d< real_type > * global_geometry() const
Definition: mpi_curvilinear.h:132
dg::geo::RealCurvilinearMPIGrid2d< real_type > perpendicular_grid
Definition: mpi_curvilinear.h:107
virtual RealCurvilinearProductMPIGrid3d * clone() const override final
Definition: mpi_curvilinear.h:131
RealCurvilinearProductMPIGrid3d(const aRealGenerator2d< real_type > &generator, unsigned n, unsigned Nx, unsigned Ny, unsigned Nz, bc bcx, bc bcy, bc bcz, MPI_Comm comm)
Construct a 3D grid.
Definition: mpi_curvilinear.h:111
Helper class for construction.
Definition: curvilinear.h:65
unsigned n
Definition: curvilinear.h:66
bc b
Definition: curvilinear.h:68
unsigned N
Definition: curvilinear.h:67
The abstract generator base class.
Definition: generator.h:20