Extension: Geometries
#include "dg/geometries/geometries.h"
Loading...
Searching...
No Matches
mpi_curvilinear.h
Go to the documentation of this file.
1#pragma once
2
3#include <mpi.h>
4
5#include "dg/algorithm.h"
6#include "curvilinear.h"
7#include "generator.h"
8
9namespace dg
10{
11namespace geo
12{
13
15template<class real_type>
16struct RealCurvilinearProductMPIGrid3d;
18//
21
24template<class real_type>
26{
33
38 dg::aRealMPIGeometry2d<real_type>( {0.,0.},
39 {generator.width(), generator.height()},
40 {tx.n,ty.n},{tx.N,ty.N}, { tx.b, ty.b},
42 {
43 //generate global 2d grid and then reduce to local
44 RealCurvilinearGrid2d<real_type> g(generator, tx, ty);
45 divide_and_conquer(g);
46 }
49
51 const aRealGenerator2d<real_type>& generator() const{return *m_handle;}
52 virtual RealCurvilinearMPIGrid2d* clone()const override final{return new RealCurvilinearMPIGrid2d(*this);}
53 virtual RealCurvilinearGrid2d<real_type>* global_geometry()const override final{
55 *m_handle,
56 {global().nx(), global().Nx(), global().bcx()},
57 {global().ny(), global().Ny(), global().bcy()});
58 }
59 //These are necessary to help compiler find inherited names
60 using dg::aRealMPIGeometry2d<real_type>::global;
61 private:
62 virtual void do_set(std::array<unsigned,2> new_n, std::array<unsigned,2> new_N) override final
63 {
66 {new_n[0], new_N[0]}, {new_n[1], new_N[1]});
67 divide_and_conquer(g);//distribute to processes
68 }
69 virtual void do_set(std::array<dg::bc,2> new_bc) override final
70 {
71 // TODO Do we change MPI periodic topology when we change bcs
73 }
74 virtual void do_set_pq(std::array<real_type,2> new_x0, std::array<real_type,2> new_x1) override final
75 {
76 throw dg::Error(dg::Message(_ping_)<<"This grid cannot change boundaries\n");
77 }
78 void divide_and_conquer(const RealCurvilinearGrid2d<real_type>& g_)
79 {
82 std::vector<thrust::host_vector<real_type> > map = g_.map();
83 for( unsigned i=0; i<3; i++)
84 for( unsigned j=0; j<3; j++)
85 {
86 m_metric.idx(i,j) = metric.idx(i,j);
87 m_jac.idx(i,j) = jacobian.idx(i,j);
88 }
89 // Here we set the communicator implicitly
90 m_jac.values().resize( jacobian.values().size());
91 for( unsigned i=0; i<jacobian.values().size(); i++)
92 m_jac.values()[i] = global2local( jacobian.values()[i], *this);
93 m_metric.values().resize( metric.values().size());
94 for( unsigned i=0; i<metric.values().size(); i++)
95 m_metric.values()[i] = global2local( metric.values()[i], *this);
96 m_map.resize(map.size());
97 for( unsigned i=0; i<map.size(); i++)
98 m_map[i] = global2local( map[i], *this);
99 }
100
101 virtual SparseTensor<MPI_Vector<thrust::host_vector<real_type>>> do_compute_jacobian( ) const override final{
102 return m_jac;
103 }
104 virtual SparseTensor<MPI_Vector<thrust::host_vector<real_type>>> do_compute_metric( ) const override final{
105 return m_metric;
106 }
107 virtual std::vector<MPI_Vector<thrust::host_vector<real_type>>> do_compute_map()const override final{return m_map;}
109 std::vector<MPI_Vector<thrust::host_vector<real_type>>> m_map;
111};
112
118template<class real_type>
120{
128
129
134 dg::aRealProductMPIGeometry3d<real_type>(
135 {0.,0., gz.x0()},{ generator.width(),
136 generator.height(),gz.x1()}, {tx.n,ty.n, gz.n()},
137 {tx.N, ty.N, gz.N()},{ tx.b, ty.b, gz.bcx()},
139 {
140 m_map.resize(3);
141 RealCurvilinearMPIGrid2d<real_type> g(generator,tx,ty,this->get_perp_comm());
142 constructPerp( g);
143 constructParallel(this->nz(), this->local().Nz());
144 }
145
146
148 const aRealGenerator2d<real_type>& generator() const{return *m_handle;}
149 virtual RealCurvilinearProductMPIGrid3d* clone()const override final{return new RealCurvilinearProductMPIGrid3d(*this);}
152 *m_handle,
153 {global().nx(), global().Nx(), global().bcx()},
154 {global().ny(), global().Ny(), global().bcy()},
155 {global().x0(), global().x1(), global().nz(), global().Nz(), global().bcz()});
156 }
157 //These are necessary to help compiler find inherited names
158 using dg::aRealMPIGeometry3d<real_type>::global;
159 private:
160 virtual perpendicular_grid* do_perp_grid() const override final{ return new perpendicular_grid(*this);}
161 virtual void do_set( std::array<unsigned,3> new_n, std::array<unsigned,3> new_N) override final
162 {
163 auto old_n = this->get_n(), old_N = this->get_N();
165 if( !( new_n[0] == old_n[0] && new_N[0] == old_N[0] &&
166 new_n[1] == old_n[1] && new_N[1] == old_N[1] ) )
167 {
168 RealCurvilinearMPIGrid2d<real_type> g( *m_handle,
169 { new_n[0], new_N[0], this->bcx()},
170 { new_n[1], new_N[1], this->bcy()},
171 this->get_perp_comm());
172 constructPerp( g);
173 }
174 constructParallel(this->nz(), this->local().Nz());
175 }
176 virtual void do_set(std::array<dg::bc,3> new_bc) override final
177 {
179 }
180 virtual void do_set_pq(std::array<real_type,3> new_x0, std::array<real_type,3> new_x1) override final
181 {
182 throw dg::Error(dg::Message(_ping_)<<"This grid cannot change boundaries\n");
183 }
184 void constructPerp( RealCurvilinearMPIGrid2d<real_type>& g2d)
185 {
186 m_jac=g2d.jacobian();
187 m_map=g2d.map();
188 }
189 void constructParallel( unsigned nz, unsigned localNz )
190 {
191 m_map.resize(3);
192 m_map[2]=dg::evaluate(dg::cooZ3d, *this);
193 unsigned size = this->local().size();
194 unsigned size2d = this->nx()*this->ny()*this->local().Nx()*this->local().Ny();
195 //resize for 3d values
196 MPI_Comm comm = this->communicator();
197 for( unsigned r=0; r<6;r++)
198 {
199 m_jac.values()[r].data().resize(size);
200 m_jac.values()[r].set_communicator( comm);
201 }
202 m_map[0].data().resize(size);
203 m_map[0].set_communicator( comm);
204 m_map[1].data().resize(size);
205 m_map[1].set_communicator( comm);
206 //lift to 3D grid
207 for( unsigned k=1; k<nz*localNz; k++)
208 for( unsigned i=0; i<size2d; i++)
209 {
210 for(unsigned r=0; r<6; r++)
211 m_jac.values()[r].data()[k*size2d+i] = m_jac.values()[r].data()[(k-1)*size2d+i];
212 m_map[0].data()[k*size2d+i] = m_map[0].data()[(k-1)*size2d+i];
213 m_map[1].data()[k*size2d+i] = m_map[1].data()[(k-1)*size2d+i];
214 }
215 }
216 virtual SparseTensor<MPI_Vector<thrust::host_vector<real_type>>> do_compute_jacobian( ) const override final{
217 return m_jac;
218 }
219 virtual SparseTensor<MPI_Vector<thrust::host_vector<real_type>>> do_compute_metric( ) const override final{
220 return detail::square( m_jac, m_map[0], m_handle->isOrthogonal());
221 }
222 virtual std::vector<MPI_Vector<thrust::host_vector<real_type>>> do_compute_map()const override final{return m_map;}
224 std::vector<MPI_Vector<thrust::host_vector<real_type>>> m_map;
225 ClonePtr<dg::geo::aRealGenerator2d<real_type>> m_handle;
226};
228template<class real_type>
229RealCurvilinearMPIGrid2d<real_type>::RealCurvilinearMPIGrid2d( const RealCurvilinearProductMPIGrid3d<real_type>& g):
230 dg::aRealMPIGeometry2d<real_type>( std::array{g.gx(), g.gy()} ),
231 m_handle(g.generator())
232{
233 m_map=g.map();
234 m_jac=g.jacobian();
235 m_metric=g.metric();
236 //now resize to 2d
237 m_map.pop_back();
238 unsigned s = this->local().size();
239 MPI_Comm comm = g.get_perp_comm();
240 for( unsigned i=0; i<m_jac.values().size(); i++)
241 {
242 m_jac.values()[i].data().resize(s);
243 m_jac.values()[i].set_communicator( comm);
244 }
245 for( unsigned i=0; i<m_metric.values().size(); i++)
246 {
247 m_metric.values()[i].data().resize(s);
248 m_metric.values()[i].set_communicator( comm);
249 }
250 // we rely on the fact that the 3d grid uses square to compute its metric
251 // so the (2,2) entry is value 3 that we need to set to 1 (for the
252 // create::volume function to work properly)
253 dg::blas1::copy( 1., m_metric.values()[3]);
254 for( unsigned i=0; i<m_map.size(); i++)
255 {
256 m_map[i].data().resize(s);
257 m_map[i].set_communicator( comm);
258 }
259}
261//
264namespace x{
267}//namespace x
268
270}//namespace geo
271}//namespace dg
272
#define M_PI
DG_DEVICE double cooZ3d(double x, double y, double z)
void copy(const ContainerTypeIn &source, ContainerTypeOut &target)
auto evaluate(Functor &&f, const Topology &g)
dg::geo::RealCurvilinearProductMPIGrid3d< double > CurvilinearProductMPIGrid3d
Definition mpi_curvilinear.h:263
dg::geo::RealCurvilinearMPIGrid2d< double > CurvilinearMPIGrid2d
Definition mpi_curvilinear.h:262
std::array< MPI_Comm, Nd > mpi_cart_split_as(MPI_Comm comm)
MPI_Vector< thrust::host_vector< real_type > > global2local(const thrust::host_vector< real_type > &global, const MPITopology &g)
SparseTensor< MPI_Vector< thrust::host_vector< real_type > > > jacobian() const
SparseTensor< MPI_Vector< thrust::host_vector< real_type > > > metric() const
std::vector< MPI_Vector< thrust::host_vector< real_type > > > map() const
RealMPIGrid< real_type, 1 > gz() const
unsigned n(unsigned u=0) const
std::enable_if_t< Md==1, MPI_Comm > comm() const
const RealGrid< real_type, Nd > & global() const
virtual void do_set(std::array< unsigned, Nd > new_n, std::array< unsigned, Nd > new_N)=0
std::enable_if_t<(Md >=2), MPI_Comm > get_perp_comm() const
std::array< unsigned, Nd > get_n() const
std::array< unsigned, Nd > get_N() const
const RealGrid< real_type, Nd > & local() const
A two-dimensional grid based on curvilinear coordinates.
Definition curvilinear.h:113
A two-dimensional MPI grid based on curvilinear coordinates.
Definition mpi_curvilinear.h:26
RealCurvilinearMPIGrid2d(const RealCurvilinearProductMPIGrid3d< real_type > &g)
explicit conversion of 3d product grid to the perpendicular grid
RealCurvilinearMPIGrid2d(const aRealGenerator2d< real_type > &generator, Topology1d tx, Topology1d ty, MPI_Comm comm)
Construct the computational space as the product of two 1d grids.
Definition mpi_curvilinear.h:37
virtual RealCurvilinearMPIGrid2d * clone() const override final
Definition mpi_curvilinear.h:52
virtual RealCurvilinearGrid2d< real_type > * global_geometry() const override final
Definition mpi_curvilinear.h:53
RealCurvilinearMPIGrid2d(const aRealGenerator2d< real_type > &generator, unsigned n, unsigned Nx, unsigned Ny, dg::bc bcx, dg::bc bcy, MPI_Comm comm)
Construct a 2D grid.
Definition mpi_curvilinear.h:31
const aRealGenerator2d< real_type > & generator() const
read access to the generator
Definition mpi_curvilinear.h:51
A 2x1 curvilinear product space grid.
Definition curvilinear.h:173
A 2x1 curvilinear product space MPI grid.
Definition mpi_curvilinear.h:120
dg::geo::RealCurvilinearMPIGrid2d< real_type > perpendicular_grid
the two-dimensional grid
Definition mpi_curvilinear.h:121
const aRealGenerator2d< real_type > & generator() const
read access to the generator
Definition mpi_curvilinear.h:148
RealCurvilinearProductMPIGrid3d(const aRealGenerator2d< real_type > &generator, Topology1d tx, Topology1d ty, RealMPIGrid1d< real_type > gz, MPI_Comm comm)
Construct the computational space as the product of three 1d grids.
Definition mpi_curvilinear.h:133
virtual RealCurvilinearProductGrid3d< real_type > * global_geometry() const
Definition mpi_curvilinear.h:150
virtual RealCurvilinearProductMPIGrid3d * clone() const override final
Definition mpi_curvilinear.h:149
RealCurvilinearProductMPIGrid3d(const aRealGenerator2d< real_type > &generator, unsigned n, unsigned Nx, unsigned Ny, unsigned Nz, bc bcx, bc bcy, bc bcz, MPI_Comm comm)
Construct a 3D grid.
Definition mpi_curvilinear.h:126
Helper class for construction.
Definition curvilinear.h:65
The abstract generator base class.
Definition generator.h:20