Discontinuous Galerkin Library
#include "dg/algorithm.h"
Loading...
Searching...
No Matches
mpi_grid.h
Go to the documentation of this file.
1#pragma once
2
3#include <cmath>
6#include "dg/enums.h"
7#include "grid.h"
8
13// /*
14// * The rationale behind this is that \c shape is used to create / construct
15// * MPI distributed vectors and for this we need the local shape
16// * Since a MPI communicator is carried by \c MPI_Vector this allows to write
17// * device independent code
18// * @param u axis number
19// */
20// TODO I think it does not matter which boundary condition the communicator has!?
21namespace dg
22{
23
38template<class real_type, size_t Nd>
39struct RealMPIGrid;
41
89template<class real_type, size_t Nd>
91{
92 // ///////////////// TYPE TRAITS ////////////////////////////
93
95 using value_type = real_type;
102 constexpr static unsigned ndim() { return Nd;}
103
104 // ///////////////// TOPOLOGY CONCEPT ////////////////////////////
105
107 unsigned shape(unsigned u=0) const
108 {
109 return m_g.shape(u);
110 }
120 host_vector abscissas(unsigned u=0) const
121 {
122 // We want to be binary exact
123 // Therefore we can't just call local abscissas
124 int dims[Nd], periods[Nd], coords[Nd];
125 MPI_Cart_get( m_comm, Nd, dims, periods, coords);
126 real_type global_x0 = m_g.p(u);
127 real_type global_hx = m_g.h(u);
128 thrust::host_vector<real_type> abs(m_l.shape(u));
129 auto aa = dg::DLT<real_type>::abscissas(m_l.n(u));
130 auto idx = increment( partition( m_g.N(u), dims[u]));
131 for( unsigned i=0; i<m_l.N(u); i++)
132 {
133 for( unsigned j=0; j<m_l.n(u); j++)
134 {
135 unsigned coord = idx[coords[u]] + i;
136 real_type xmiddle = DG_FMA( global_hx, (real_type)(coord), global_x0);
137 real_type h2 = global_hx/2.;
138 real_type absj = 1.+aa[j];
139 abs[i*m_l.n(u)+j] = DG_FMA( h2, absj, xmiddle);
140 }
141 }
142 return host_vector{abs, m_comms[u]};
143 }
153 host_vector weights(unsigned u=0) const
154 {
155 if( u >= Nd)
156 throw Error( Message(_ping_)<<"u>Nd not allowed! You typed: "<<u<<" while Nd is "<<Nd);
157 thrust::host_vector<real_type> v( m_l.shape(u));
158 auto ww = dg::DLT<real_type>::weights(m_l.n(u));
159 real_type hu = m_g.h(u); // We need global h here to be binary exact
160 for( unsigned i=0; i<m_l.N(u); i++)
161 for( unsigned j=0; j<m_l.n(u); j++)
162 v[i*m_l.n(u) + j] = hu/2.*ww[j];
163 return host_vector{v, m_comms[u]};
164 }
165 // ///////////////// GETTERS ////////////////////////////
167 std::array<unsigned,Nd> get_shape() const{
168 m_g.get_shape();
169 }
171 std::array<host_vector,Nd> get_abscissas() const{
172 std::array<host_vector,Nd> abs;
173 for( unsigned u=0; u<Nd; u++)
174 abs[u] = abscissas(u);
175 return abs;
176 }
178 std::array<host_vector,Nd> get_weights() const{
179 std::array<host_vector,Nd> w;
180 for( unsigned u=0; u<Nd; u++)
181 w[u] = weights(u);
182 return w;
183 }
185 std::array<real_type,Nd> get_p() const{
186 return m_g.get_p();
187 }
189 std::array<real_type,Nd> get_q() const{
190 return m_g.get_q();
191 }
193 std::array<real_type,Nd> get_l() const{
194 return m_g.get_l();
195 }
197 std::array<real_type,Nd> get_h() const{
198 return m_g.get_h();
199 }
201 std::array<unsigned, Nd> get_N() const
202 {
203 return m_g.get_N();
204 }
206 std::array<unsigned, Nd> get_n() const
207 {
208 return m_g.get_n();
209 }
211 std::array<dg::bc, Nd> get_bc() const
212 {
213 return m_g.get_bc();
214 }
217 std::array<MPI_Comm, Nd> get_comms() const { return m_comms;}
218
220 real_type p( unsigned u=0) const { return m_g.p(u);}
222 real_type q( unsigned u=0) const { return m_g.q(u);}
224 real_type h( unsigned u=0) const { return m_g.h(u);}
226 real_type l( unsigned u=0) const { return m_g.l(u);}
228 unsigned n( unsigned u=0) const { return m_g.n(u);}
230 unsigned N( unsigned u=0) const { return m_g.N(u);}
232 dg::bc bc( unsigned u=0) const { return m_g.bc(u);}
233
239 MPI_Comm comm(unsigned u) const { return m_comms.at(u);}
241 template<size_t Md = Nd>
242 std::enable_if_t<Md==1,MPI_Comm> comm() const { return m_comms.at(0);}
248 MPI_Comm communicator() const{return m_comm;}
249
254 template<size_t Md = Nd>
255 std::enable_if_t<(Md >= 2), MPI_Comm> get_perp_comm() const
256 {
257 return mpi_cart_kron( {m_comms[0], m_comms[1]});
258 }
259
260
262 RealMPIGrid<real_type,1> grid(unsigned u ) const{
263 if( u < Nd)
264 return RealMPIGrid<real_type,1>{ m_g.p(u), m_g.q(u), m_g.n(u), m_g.N(u), m_g.bc(u), m_comms[u]};
265 else
266 throw Error( Message(_ping_)<<"u>Nd not allowed! You typed: "<<u<<" while Nd is "<<Nd);
267 }
269 RealMPIGrid<real_type,1> axis(unsigned u ) const{ return grid(u);} // because we can't decide how to name it ...
274 const RealGrid<real_type,Nd>& local() const {return m_l;}
279 const RealGrid<real_type, Nd>& global() const {return m_g;}
280
282 template<size_t Md = Nd>
283 real_type x0() const {return m_g.x0();}
285 template<size_t Md = Nd>
286 real_type x1() const {return m_g.x1();}
288 template<size_t Md = Nd>
289 real_type y0() const {return m_g.y0();}
291 template<size_t Md = Nd>
292 real_type y1() const {return m_g.y1();}
294 template<size_t Md = Nd>
295 real_type z0() const {return m_g.z0();}
297 template<size_t Md = Nd>
298 real_type z1() const {return m_g.z1();}
299
301 template<size_t Md = Nd>
302 real_type lx() const {return m_g.lx();}
304 template<size_t Md = Nd>
305 real_type ly() const {return m_g.ly();}
307 template<size_t Md = Nd>
308 real_type lz() const {return m_g.lz();}
309
311 template<size_t Md = Nd>
312 real_type hx() const {return m_g.hx();}
314 template<size_t Md = Nd>
315 real_type hy() const {return m_g.hy();}
317 template<size_t Md = Nd>
318 real_type hz() const {return m_g.hz();}
319
321 template<size_t Md = Nd>
322 unsigned nx() const {return m_g.nx();}
324 template<size_t Md = Nd>
325 unsigned ny() const {return m_g.ny();}
327 template<size_t Md = Nd>
328 unsigned nz() const {return m_g.nz();}
329
331 template<size_t Md = Nd>
332 unsigned Nx() const {return m_g.Nx();}
334 template<size_t Md = Nd>
335 unsigned Ny() const {return m_g.Ny();}
337 template<size_t Md = Nd>
338 unsigned Nz() const {return m_g.Nz();}
339
341 template<size_t Md = Nd>
342 dg::bc bcx() const {return m_g.bcx();}
344 template<size_t Md = Nd>
345 dg::bc bcy() const {return m_g.bcy();}
347 template<size_t Md = Nd>
348 dg::bc bcz() const {return m_g.bcz();}
349
351 template<size_t Md = Nd>
353 static_assert( Nd > 0);
354 return grid(0);
355 }
357 template<size_t Md = Nd>
359 static_assert( Nd > 1);
360 return grid(1);
361 }
363 template<size_t Md = Nd>
365 static_assert( Nd > 2);
366 return grid(2);
367 }
368
369 // //////////////////SETTERS/////////////////////////////
371 template<size_t Md = Nd>
372 std::enable_if_t< (Md>=2),void> multiplyCellNumbers( real_type fx, real_type fy)
373 {
374 auto Ns = m_g.get_N();
375 Ns[0] = round(fx*(real_type)m_g.N(0));
376 Ns[1] = round(fy*(real_type)m_g.N(1));
377 if( fx != 1 or fy != 1)
378 set( m_g.get_n(), Ns);
379 }
381 template<size_t Md = Nd>
382 std::enable_if_t<(Md == 1), void> set( unsigned new_n, unsigned new_Nx)
383 {
384 set(std::array{new_n}, std::array{new_Nx});
385 }
387 template<size_t Md = Nd>
388 std::enable_if_t<(Md == 2), void> set( unsigned new_n, unsigned new_Nx,
389 unsigned new_Ny)
390 {
391 set(std::array{new_n,new_n}, std::array{new_Nx,new_Ny});
392 }
394 template<size_t Md = Nd>
395 std::enable_if_t<(Md == 3), void> set( unsigned new_n, unsigned new_Nx,
396 unsigned new_Ny, unsigned new_Nz)
397 {
398 set(std::array{new_n,new_n,1u}, std::array{new_Nx,new_Ny,new_Nz});
399 }
401 void set( unsigned new_n, std::array<unsigned,Nd> new_N)
402 {
403 std::array<unsigned , Nd> tmp;
404 for( unsigned u=0; u<Nd; u++)
405 tmp[u] = new_n;
406 set( tmp, new_N);
407 }
409 void set_axis( unsigned coord, unsigned new_n , unsigned new_N)
410 {
411 auto n = m_g.get_n(), N = m_g.get_N();
412 n[coord] = new_n;
413 N[coord] = new_N;
414 set( n, N);
415 }
417 void set( std::array<unsigned,Nd> new_n, std::array<unsigned,Nd> new_N)
418 {
419 if( new_n==m_g.get_n() && new_N == m_g.get_N())
420 return;
421 do_set(new_n, new_N);
422 }
424 void set_pq( std::array<real_type,Nd> new_p, std::array<real_type,Nd> new_q)
425 {
426 do_set_pq( new_p, new_q);
427 }
429 void set_bcs( std::array<dg::bc,Nd> new_bcs)
430 {
431 do_set( new_bcs);
432 }
433
434 // //////////////////UTILITY/////////////////////////////
439 unsigned size() const { return m_g.size();}
444 unsigned local_size() const { return m_l.size();}
445 // used in conversion policy in interpolation
446
452 void display( std::ostream& os = std::cout) const
453 {
454 os << "GLOBAL GRID \n";
455 m_g.display();
456 os << "LOCAL GRID \n";
457 m_l.display();
458 }
459
461 template<size_t Md = Nd>
462 std::enable_if_t<(Md == 1), bool> contains( real_type x) const
463 {
464 return m_g.contains( x);
465 }
466
468 template<class Vector>
469 bool contains( const Vector& x)const { return m_g.contains(x);}
470
481 bool local2globalIdx( int localIdx, int rank, int& globalIdx)const
482 {
483 // TODO shouldn't this test for m_l.size() ? How is this used?
484 // ATTENTION This function cannot depend on who is calling it
485 // so it cannot depend on m_l
486 if( localIdx < 0 || localIdx >= (int)m_g.size()) return false;
487
488 int dims[Nd], periods[Nd], coords[Nd]; // we need the dims
489 if( MPI_Cart_get( m_comm, Nd, dims, periods, coords) != MPI_SUCCESS)
490 return false;
491 // and the coords associated to rank
492 if( MPI_Cart_coords( m_comm, rank, Nd, coords) != MPI_SUCCESS)
493 return false;
494 int gIdx[Nd];
495 int current = localIdx;
496 for( unsigned u=0; u<Nd; u++)
497 {
498 auto idx = increment(partition( m_g.N(u), dims[u]));
499 unsigned shapeu = (idx[coords[u]+1] - idx[coords[u]])*m_g.n(u);
500 int lIdx = current %shapeu; // 1d idx
501 current = current / shapeu;
502 gIdx[u] = lIdx + idx[coords[u]]*m_g.n(u);
503 }
504 globalIdx = gIdx[int(Nd)-1]; // prevent overflow if Nd == 0
505 for( int u=int(Nd)-2; u>=0; u--)
506 globalIdx = globalIdx*m_g.shape(u) + gIdx[u];
507 return true;
508 }
509
520 bool global2localIdx( int globalIdx, int& localIdx, int& rank)const
521 {
522 // an exercise in flattening and unflattening indices
523 if( globalIdx < 0 || globalIdx >= (int)m_g.size()) return false;
524
525 int dims[Nd], periods[Nd], coords[Nd];
526 if( MPI_Cart_get( m_comm, Nd, dims, periods, coords) != MPI_SUCCESS)
527 return false;
528
529 int lIdx[Nd] = {0}, local_shape[Nd] = {0};
530 int current = globalIdx;
531 // ATTENTION This function cannot depend on who is calling it
532 // so it cannot depend on m_l or current coords
533 for( unsigned u=0; u<Nd; u++)
534 {
535 int gIdx = current%(m_g.shape(u)); // 1d idx
536 current = current / (m_g.shape(u));
537 auto idx = increment(partition( m_g.N(u), dims[u]));
538 // Find coord
539 for( unsigned c=0; c<idx.size()-1; c++)
540 if( unsigned(gIdx)< idx[c+1]*m_g.n(u))
541 {
542 coords[u] = c;
543 lIdx[u] = gIdx - idx[c]*m_g.n(u);
544 local_shape[u] = (idx[c+1]-idx[c])*m_g.n(u);
545 break;
546 }
547 }
548 localIdx = lIdx[int(Nd)-1];
549 for( int u=int(Nd)-2; u>=0; u--)
550 localIdx = localIdx*local_shape[u] + lIdx[u];
551
552 if( MPI_Cart_rank( m_comm, coords, &rank) == MPI_SUCCESS )
553 return true;
554 else
555 return false;
556 }
557
567 std::array<unsigned, Nd> start() const
568 {
569 int dims[Nd], periods[Nd], coords[Nd];
570 MPI_Cart_get( m_comm, Nd, dims, periods, coords);
571 std::array<unsigned, Nd> start;
572 for( unsigned u=0;u<Nd; u++)
573 {
574 auto idx = increment(partition( m_g.N(u), dims[u]));
575 start[Nd-1-u] = idx[coords[u]]*m_g.n(u);
576 }
577 return start;
578 }
587 std::array<unsigned, Nd> count() const { return m_l.count(); }
588 protected:
590 ~aRealMPITopology() = default;
591
593 aRealMPITopology() = default;
594
607 aRealMPITopology( std::array<real_type,Nd> p, std::array<real_type,Nd> q,
608 std::array<unsigned,Nd> n, std::array<unsigned,Nd> N,
609 std::array<dg::bc,Nd> bcs, std::array<MPI_Comm, Nd> comms) :
610 m_g(p,q,n,N,bcs), m_comms(comms)
611 {
612 // assert dimensionality of Cartesian communicators
613 int ndims;
614 for ( unsigned u=0; u<Nd;u++)
615 {
616 MPI_Cartdim_get( m_comms[u], &ndims);
617 assert( (unsigned)ndims == 1);
618 }
619 m_comm = dg::mpi_cart_kron( m_comms);
620 MPI_Cartdim_get( m_comm, &ndims);
621 assert( (unsigned)ndims == Nd);
622 // The idea is that every grid gets the same amount and the
623 // rest is distributed to the lowest rank grids
624 int dims[Nd], periods[Nd], coords[Nd];
625 MPI_Cart_get( m_comm, Nd, dims, periods, coords);
626 for( unsigned u=0;u<Nd; u++)
627 {
628 auto idx = increment(partition( m_g.N(u), dims[u]));
629 N[u] = idx[coords[u]+1]-idx[coords[u]] ;
630
631 p[u] = m_g.p(u) + m_g.h(u)*idx[coords[u]];
632 q[u] = m_g.p(u) + m_g.h(u)*idx[coords[u] +1];
633 // The local right boundary should be the same as the global right boundary
634 if( coords[u] == dims[u]-1)
635 q[u] = m_g.q(u);
636 }
637 m_l = { p, q, m_g.get_n(), N, m_g.get_bc()};
638 }
640 aRealMPITopology( const std::array< RealMPIGrid<real_type, 1>, Nd>& axes)
641 {
642 std::array<RealGrid<real_type,1>,Nd> globals, locals;
643 for( unsigned u=0; u<Nd; u++)
644 {
645 globals[u] = axes[u].global();
646 locals[u] = axes[u].local();
647 m_comms[u] = axes[u].communicator();
648 }
649 m_g = RealGrid<real_type,Nd>( globals);
650 m_l = RealGrid<real_type,Nd>( locals);
651 m_comm = dg::mpi_cart_kron( {m_comms.begin(), m_comms.end()});
652 }
653
655 aRealMPITopology(const aRealMPITopology& src) = default;
659 virtual void do_set(std::array<unsigned,Nd> new_n, std::array<unsigned,Nd> new_N) =0;
661 virtual void do_set_pq( std::array<real_type, Nd> new_p, std::array<real_type,Nd> new_q) =0;
663 virtual void do_set( std::array<dg::bc, Nd> new_bcs) =0;
664
665 // MW: The shared version of this constructor causes nvcc-12.4 to segfault when constructing a Geometry
666 // Funnily the mpi version works (but let's kill it for now
667 //template< size_t M0, size_t ...Ms>
668 //aRealMPITopology( const aRealMPITopology<real_type,M0>& g0,
669 // const aRealMPITopology<real_type,Ms> & ...gs)
670 //{
671 // auto grid = aRealMPITopology<real_type, Nd - M0>( gs ...);
672 // *this = aRealMPITopology<real_type, Nd>( g0, grid);
673 //}
674 //template< size_t M0, size_t M1>
675 //aRealMPITopology( const aRealMPITopology<real_type,M0>& g0,
676 // const aRealMPITopology<real_type,M1>& g1) : m_g( g0.global(),g1.global()),
677 // m_l( g0.local(), g1.local())
678 //{
679 // static_assert( (M0 + M1) == Nd);
680 // for( unsigned u=0; u<M0; u++)
681 // {
682 // m_comms[u] = g0.comm(u);
683 // }
684 // for( unsigned u=0; u<M1; u++)
685 // {
686 // m_comms[M0+u] = g1.comm(u);
687 // }
688 // m_comm = dg::mpi_cart_kron( {m_comms.begin(), m_comms.end()});
689
690 //}
691
692 //We do not want that because we cannot distinguish if g is meant to be the local or the global grid...
693 //aRealMPITopology( const RealGrid<real_type,Nd> & g, MPI_Comm comm);
694 private:
695 void check_periods( std::array<dg::bc, Nd> bc) const
696 {
697 int rank, dims[Nd], periods[Nd], coords[Nd];
698 MPI_Cart_get( m_comm, Nd, dims, periods, coords);
699 MPI_Comm_rank( m_comm, &rank);
700 if( rank == 0)
701 {
702 for( unsigned u=0; u<Nd; u++)
703 {
704 if( bc[u] == dg::PER) assert( periods[u] == true);
705 else assert( periods[u] == false);
706 }
707 }
708 }
709 std::vector<unsigned> partition( unsigned N, unsigned r) const
710 {
711 // Divide N points as equally as possible among participants r
712 std::vector<unsigned> points(r, N/r );
713 for( unsigned u=0; u<N%r; u++)
714 points[u]++;
715 return points;
716 }
717
718 std::vector<unsigned> increment( const std::vector<unsigned>& partition) const
719 {
720 // replace with std::inclusive_scan ?
721 // return global starting idx and end idex
722 // start = inc[coord], end = inc[coord+1]
723 std::vector<unsigned> inc( partition.size()+1, 0);
724 for( unsigned u=0; u<inc.size(); u++)
725 for( unsigned k=0; k<u; k++)
726 inc[u] += partition[k];
727 return inc;
728 }
729
730 RealGrid<real_type, Nd> m_g, m_l; //global grid, local grid
731 std::array<MPI_Comm, Nd> m_comms; // 1d comms
732 MPI_Comm m_comm; //just an integer...(No, more like an address)
733};
735
736// pure virtual implementations must be declared outside class
737template<class real_type,size_t Nd>
738void aRealMPITopology<real_type,Nd>::do_set( std::array<unsigned,Nd> new_n, std::array<unsigned,Nd> new_N)
739{
740 m_g.set(new_n, new_N);
741 int dims[Nd], periods[Nd], coords[Nd];
742 MPI_Cart_get( m_comm, Nd, dims, periods, coords);
743 std::array<unsigned, Nd> N;
744 for( unsigned u=0;u<Nd; u++)
745 {
746 auto idx = increment(partition( m_g.N(u), dims[u]));
747 N[u] = idx[coords[u]+1]-idx[coords[u]] ;
748 }
749 m_l.set( new_n, N);
750}
751template<class real_type,size_t Nd>
752void aRealMPITopology<real_type,Nd>::do_set_pq( std::array<real_type, Nd> x0, std::array<real_type,Nd> x1)
753{
754 m_g.set_pq( x0, x1);
755 int dims[Nd], periods[Nd], coords[Nd];
756 MPI_Cart_get( m_comm, Nd, dims, periods, coords);
757 std::array<real_type,Nd> p, q;
758 for( unsigned u=0;u<Nd; u++)
759 {
760 auto idx = increment(partition( m_g.N(u), dims[u]));
761 p[u] = m_g.p(u) + m_g.h(u)*idx[coords[u]];
762 q[u] = m_g.p(u) + m_g.h(u)*idx[coords[u] +1];
763 }
764 m_l.set_pq( p, q);
765}
766template<class real_type,size_t Nd>
767void aRealMPITopology<real_type,Nd>::do_set( std::array<dg::bc, Nd> bcs)
768{
769 check_periods( bcs);
770 m_g.set_bcs( bcs);
771 m_l.set_bcs( bcs);
772}
773
775
781template<class real_type, size_t Nd>
782struct RealMPIGrid : public aRealMPITopology<real_type,Nd>
783{
785 RealMPIGrid() = default;
786 template<size_t Md = Nd>
787 RealMPIGrid( real_type x0, real_type x1, unsigned n, unsigned N, MPI_Comm
788 comm): aRealMPITopology<real_type,1>( {x0}, {x1},
789 {n}, {N}, {dg::PER}, {comm})
790 { }
793 template<size_t Md = Nd>
794 RealMPIGrid( real_type x0, real_type x1, unsigned n, unsigned Nx, dg::bc bcx, MPI_Comm
795 comm): aRealMPITopology<real_type,1>( {x0}, {x1},
796 {n}, {Nx}, {bcx}, {comm})
797 {}
800 template<size_t Md = Nd>
801 RealMPIGrid( real_type x0, real_type x1, real_type y0, real_type y1,
802 unsigned n, unsigned Nx, unsigned Ny, MPI_Comm comm):
803 aRealMPITopology<real_type,2>(
804 {x0,y0},{x1,y1},{n,n},{Nx,Ny},{dg::PER,dg::PER},
806 { }
807
811 template<size_t Md = Nd>
812 RealMPIGrid( real_type x0, real_type x1, real_type y0, real_type y1,
813 unsigned n, unsigned Nx, unsigned Ny, dg::bc bcx, dg::bc bcy, MPI_Comm
814 comm):
815 aRealMPITopology<real_type,2>(
816 {x0,y0},{x1,y1},{n,n},{Nx,Ny},{bcx,bcy},
818 { }
821 template<size_t Md = Nd>
822 RealMPIGrid( real_type x0, real_type x1, real_type y0, real_type y1,
823 real_type z0, real_type z1, unsigned n, unsigned Nx, unsigned Ny,
824 unsigned Nz, MPI_Comm comm):
825 aRealMPITopology<real_type,3>(
826 {x0,y0,z0},{x1,y1,z1},{n,n,1},{Nx,Ny,Nz},{dg::PER,dg::PER,dg::PER},
828 { }
829
833 template<size_t Md = Nd>
834 RealMPIGrid( real_type x0, real_type x1, real_type y0, real_type y1,
835 real_type z0, real_type z1, unsigned n, unsigned Nx, unsigned Ny,
836 unsigned Nz, dg::bc bcx, dg::bc bcy, dg::bc bcz, MPI_Comm comm):
837 aRealMPITopology<real_type,3>(
838 {x0,y0,z0},{x1,y1,z1},{n,n,1},{Nx,Ny,Nz},{bcx,bcy,bcz},
840 { }
841
843 RealMPIGrid( const std::array<RealMPIGrid<real_type,1>,Nd>& axes) :
844 aRealMPITopology<real_type,Nd>( axes){}
845
852 template<class ...Grid1ds>
853 RealMPIGrid( const RealMPIGrid<real_type,1>& g0, const Grid1ds& ...gs) :
854 aRealMPITopology<real_type,Nd>( std::array<RealMPIGrid<real_type,1>,Nd>{g0, gs...}){}
855
857 RealMPIGrid( std::array<real_type,Nd> p, std::array<real_type,Nd> q,
858 std::array<unsigned,Nd> n, std::array<unsigned,Nd> N,
859 std::array<dg::bc,Nd> bcs, std::array<MPI_Comm,Nd> comms) :
860 aRealMPITopology<real_type,Nd>( p,q,n,N,bcs,comms)
861 {}
862
866 aRealMPITopology<real_type,Nd>(src){ }
867 private:
868 virtual void do_set( std::array<unsigned,Nd> new_n, std::array<unsigned,Nd> new_N) override final{
870 }
871 virtual void do_set_pq( std::array<real_type,Nd> new_x0, std::array<real_type,Nd> new_x1) override final{
873 }
874 virtual void do_set( std::array<dg::bc,Nd> new_bcs) override final{
876 }
877};
878
885template<size_t Nd>
889template<class T>
891template<class T>
893template<class T>
895template<class T>
897template<class T>
899template<class T>
901namespace x{
906template<size_t Nd>
910template<class T>
912template<class T>
914template<class T>
916template<class T>
918template<class T>
920template<class T>
922}//namespace x
924
925}//namespace dg
class intended for the use in throw statements
Definition exceptions.h:83
small class holding a stringstream
Definition exceptions.h:29
enums
#define _ping_
Definition exceptions.h:12
base topology classes
bc
Switch between boundary conditions.
Definition enums.h:15
@ PER
periodic boundaries
Definition enums.h:16
@ x
x direction
dg::RealMPIGrid< double, 1 > MPIGrid1d
Definition mpi_grid.h:882
dg::aRealMPITopology< double, 2 > aMPITopology2d
Definition mpi_grid.h:887
dg::aRealMPITopology< double, 3 > aMPITopology3d
Definition mpi_grid.h:888
dg::RealMPIGrid< double, 3 > MPIGrid3d
Definition mpi_grid.h:884
dg::RealMPIGrid< double, 0 > MPIGrid0d
Definition mpi_grid.h:881
dg::RealMPIGrid< double, 2 > MPIGrid2d
Definition mpi_grid.h:883
std::array< MPI_Comm, Nd > mpi_cart_split_as(MPI_Comm comm)
Same as mpi_cart_split but differen return type.
Definition mpi_kron.h:272
MPI_Comm mpi_cart_kron(std::vector< MPI_Comm > comms)
Form a Kronecker product among Cartesian communicators.
Definition mpi_kron.h:178
This is the namespace for all functions and classes defined and used by the discontinuous Galerkin li...
static std::vector< real_type > abscissas(unsigned n)
Return Gauss-Legendre nodes on the interval [-1,1].
Definition dlt.h:27
static std::vector< real_type > weights(unsigned n)
Return Gauss-Legendre weights.
Definition dlt.h:83
A simple wrapper around a container object and an MPI_Comm.
Definition mpi_vector.h:37
The simplest implementation of aRealTopology.
Definition grid.h:710
The simplest implementation of aRealMPITopology3d.
Definition mpi_grid.h:783
RealMPIGrid()=default
construct an empty grid this leaves the access functions undefined
RealMPIGrid(std::array< real_type, Nd > p, std::array< real_type, Nd > q, std::array< unsigned, Nd > n, std::array< unsigned, Nd > N, std::array< dg::bc, Nd > bcs, std::array< MPI_Comm, Nd > comms)
Construct a topology directly from points and dimensions.
Definition mpi_grid.h:857
RealMPIGrid(real_type x0, real_type x1, real_type y0, real_type y1, real_type z0, real_type z1, unsigned n, unsigned Nx, unsigned Ny, unsigned Nz, MPI_Comm comm)
Construct with equal polynomial coefficients.
Definition mpi_grid.h:822
RealMPIGrid(const std::array< RealMPIGrid< real_type, 1 >, Nd > &axes)
Construct a topology as the product of 1d axes grids.
Definition mpi_grid.h:843
RealMPIGrid(real_type x0, real_type x1, unsigned n, unsigned Nx, dg::bc bcx, MPI_Comm comm)
1D grid
Definition mpi_grid.h:794
RealMPIGrid(const aRealMPITopology< real_type, Nd > &src)
Definition mpi_grid.h:865
RealMPIGrid(const RealMPIGrid< real_type, 1 > &g0, const Grid1ds &...gs)
Construct from given 1d grids Equivalent to RealMPIGrid( std::array{g0,gs...})
Definition mpi_grid.h:853
RealMPIGrid(real_type x0, real_type x1, real_type y0, real_type y1, real_type z0, real_type z1, unsigned n, unsigned Nx, unsigned Ny, unsigned Nz, dg::bc bcx, dg::bc bcy, dg::bc bcz, MPI_Comm comm)
Construct with equal polynomial coefficients.
Definition mpi_grid.h:834
RealMPIGrid(real_type x0, real_type x1, real_type y0, real_type y1, unsigned n, unsigned Nx, unsigned Ny, MPI_Comm comm)
Construct with equal polynomial coefficients.
Definition mpi_grid.h:801
RealMPIGrid(real_type x0, real_type x1, real_type y0, real_type y1, unsigned n, unsigned Nx, unsigned Ny, dg::bc bcx, dg::bc bcy, MPI_Comm comm)
Construct with equal polynomial coefficients.
Definition mpi_grid.h:812
RealMPIGrid(real_type x0, real_type x1, unsigned n, unsigned N, MPI_Comm comm)
Definition mpi_grid.h:787
An abstract base class for MPI distributed Nd-dimensional dG grids.
Definition mpi_grid.h:91
std::array< real_type, Nd > get_h() const
Get grid constant for all axes.
Definition mpi_grid.h:197
std::array< real_type, Nd > get_p() const
Get left boundary point .
Definition mpi_grid.h:185
std::enable_if_t<(Md==3), void > set(unsigned new_n, unsigned new_Nx, unsigned new_Ny, unsigned new_Nz)
Set n and N in a 3-dimensional grid.
Definition mpi_grid.h:395
std::array< host_vector, Nd > get_abscissas() const
Construct abscissas for all axes.
Definition mpi_grid.h:171
virtual void do_set(std::array< dg::bc, Nd > new_bcs)=0
Reset the boundary conditions of the grid.
std::enable_if_t<(Md >=2), void > multiplyCellNumbers(real_type fx, real_type fy)
Multiply the number of cells in the first two dimensions with a given factor.
Definition mpi_grid.h:372
std::array< unsigned, Nd > count() const
Count vector in C-order for dg::file::MPINcHyperslab.
Definition mpi_grid.h:587
real_type l(unsigned u=0) const
Get grid length for axis u.
Definition mpi_grid.h:226
real_type hz() const
Equivalent to h(2)
Definition mpi_grid.h:318
RealMPIGrid< real_type, 1 > gz() const
Equivalent to grid(2)
Definition mpi_grid.h:364
std::enable_if_t<(Md==1), bool > contains(real_type x) const
Check if the grid contains a point.
Definition mpi_grid.h:462
void set_axis(unsigned coord, unsigned new_n, unsigned new_N)
Set n and N for axis coord.
Definition mpi_grid.h:409
MPI_Comm comm(unsigned u) const
Get 1d Cartesian communicator for axis u.
Definition mpi_grid.h:239
std::array< real_type, Nd > get_l() const
Get grid length for all axes.
Definition mpi_grid.h:193
void set(std::array< unsigned, Nd > new_n, std::array< unsigned, Nd > new_N)
Set the number of polynomials and cells.
Definition mpi_grid.h:417
unsigned Ny() const
Equivalent to N(1)
Definition mpi_grid.h:335
dg::bc bcx() const
Equivalent to bc(0)
Definition mpi_grid.h:342
dg::bc bcy() const
Equivalent to bc(1)
Definition mpi_grid.h:345
std::array< unsigned, Nd > get_shape() const
the total number of points of an axis
Definition mpi_grid.h:167
unsigned shape(unsigned u=0) const
the total number of points of an axis
Definition mpi_grid.h:107
real_type z1() const
Equivalent to q(2)
Definition mpi_grid.h:298
aRealMPITopology()=default
default constructor
std::array< host_vector, Nd > get_weights() const
Construct weights for all axes.
Definition mpi_grid.h:178
~aRealMPITopology()=default
disallow deletion through base class pointer
dg::bc bc(unsigned u=0) const
Get boundary condition for axis u.
Definition mpi_grid.h:232
void set(unsigned new_n, std::array< unsigned, Nd > new_N)
Same as set( {new_n, new_n,...}, new_N);
Definition mpi_grid.h:401
aRealMPITopology(const std::array< RealMPIGrid< real_type, 1 >, Nd > &axes)
Construct a topology as the product of 1d axes grids.
Definition mpi_grid.h:640
unsigned n(unsigned u=0) const
Get number of polynomial coefficients for axis u.
Definition mpi_grid.h:228
RealMPIGrid< real_type, 1 > axis(unsigned u) const
An alias for "grid".
Definition mpi_grid.h:269
aRealMPITopology(std::array< real_type, Nd > p, std::array< real_type, Nd > q, std::array< unsigned, Nd > n, std::array< unsigned, Nd > N, std::array< dg::bc, Nd > bcs, std::array< MPI_Comm, Nd > comms)
Construct a topology directly from points and dimensions.
Definition mpi_grid.h:607
std::array< unsigned, Nd > start() const
The global start coordinate in C-order of dg::file::MPINcHyperslab that the local grid represents.
Definition mpi_grid.h:567
dg::bc bcz() const
Equivalent to bc(2)
Definition mpi_grid.h:348
unsigned size() const
The total global number of points.
Definition mpi_grid.h:439
std::enable_if_t< Md==1, MPI_Comm > comm() const
Equivalent to comm(0)
Definition mpi_grid.h:242
unsigned local_size() const
The total local number of points.
Definition mpi_grid.h:444
unsigned Nx() const
Equivalent to N(0)
Definition mpi_grid.h:332
bool contains(const Vector &x) const
Check if the grid contains a point.
Definition mpi_grid.h:469
real_type y0() const
Equivalent to p(2)
Definition mpi_grid.h:289
const RealGrid< real_type, Nd > & global() const
The global grid as a shared memory grid.
Definition mpi_grid.h:279
host_vector weights(unsigned u=0) const
Get the weights of the u axis.
Definition mpi_grid.h:153
unsigned N(unsigned u=0) const
Get number of cells for axis u.
Definition mpi_grid.h:230
real_type ly() const
Equivalent to l(1)
Definition mpi_grid.h:305
virtual void do_set(std::array< unsigned, Nd > new_n, std::array< unsigned, Nd > new_N)=0
Set the number of polynomials and cells.
std::array< real_type, Nd > get_q() const
Get right boundary point .
Definition mpi_grid.h:189
static constexpr unsigned ndim()
Dimensionality == Nd.
Definition mpi_grid.h:102
real_type value_type
value type of abscissas and weights
Definition mpi_grid.h:95
real_type y1() const
Equivalent to q(0)
Definition mpi_grid.h:292
virtual void do_set_pq(std::array< real_type, Nd > new_p, std::array< real_type, Nd > new_q)=0
Reset the boundaries of the grid.
std::array< MPI_Comm, Nd > get_comms() const
Get 1d Cartesian communicator for all axes.
Definition mpi_grid.h:217
MPI_Comm communicator() const
Return Nd dimensional MPI cartesian communicator that is used in this grid.
Definition mpi_grid.h:248
real_type lx() const
Equivalent to l(0)
Definition mpi_grid.h:302
std::enable_if_t<(Md==1), void > set(unsigned new_n, unsigned new_Nx)
Set n and N in a 1-dimensional grid.
Definition mpi_grid.h:382
std::enable_if_t<(Md >=2), MPI_Comm > get_perp_comm() const
MPI Cartesian communicator in the first two dimensions (x and y)
Definition mpi_grid.h:255
host_vector abscissas(unsigned u=0) const
Get the grid abscissas of the u axis.
Definition mpi_grid.h:120
real_type hy() const
Equivalent to h(1)
Definition mpi_grid.h:315
RealMPIGrid< real_type, 1 > gy() const
Equivalent to grid(1)
Definition mpi_grid.h:358
real_type h(unsigned u=0) const
Get grid constant for axis u.
Definition mpi_grid.h:224
real_type z0() const
Equivalent to q(1)
Definition mpi_grid.h:295
std::array< dg::bc, Nd > get_bc() const
Get boundary condition for all axes.
Definition mpi_grid.h:211
RealMPIGrid< real_type, 1 > gx() const
Equivalent to grid(0)
Definition mpi_grid.h:352
std::array< unsigned, Nd > get_n() const
Get number of polynomial coefficients for all axes.
Definition mpi_grid.h:206
void display(std::ostream &os=std::cout) const
Display global and local grid paramters.
Definition mpi_grid.h:452
real_type x0() const
Equivalent to p(0)
Definition mpi_grid.h:283
real_type lz() const
Equivalent to l(2)
Definition mpi_grid.h:308
void set_pq(std::array< real_type, Nd > new_p, std::array< real_type, Nd > new_q)
Reset the boundaries of the grid.
Definition mpi_grid.h:424
aRealMPITopology(const aRealMPITopology &src)=default
real_type hx() const
Equivalent to h(0)
Definition mpi_grid.h:312
aRealMPITopology & operator=(const aRealMPITopology &src)=default
std::array< unsigned, Nd > get_N() const
Get number of cells for all axes.
Definition mpi_grid.h:201
unsigned nx() const
Equivalent to n(0)
Definition mpi_grid.h:322
unsigned Nz() const
Equivalent to N(2)
Definition mpi_grid.h:338
bool global2localIdx(int globalIdx, int &localIdx, int &rank) const
Convert the global index of a vector to a local index and the rank of the containing process.
Definition mpi_grid.h:520
const RealGrid< real_type, Nd > & local() const
The local grid as a shared memory grid.
Definition mpi_grid.h:274
RealMPIGrid< real_type, 1 > grid(unsigned u) const
Get axis u as a 1d grid.
Definition mpi_grid.h:262
void set_bcs(std::array< dg::bc, Nd > new_bcs)
Reset the boundary conditions of the grid.
Definition mpi_grid.h:429
std::enable_if_t<(Md==2), void > set(unsigned new_n, unsigned new_Nx, unsigned new_Ny)
Set n and N in a 2-dimensional grid.
Definition mpi_grid.h:388
real_type p(unsigned u=0) const
Get left boundary point for axis u.
Definition mpi_grid.h:220
real_type q(unsigned u=0) const
Get right boundary point for axis u.
Definition mpi_grid.h:222
real_type x1() const
Equivalent to p(1)
Definition mpi_grid.h:286
unsigned ny() const
Equivalent to n(1)
Definition mpi_grid.h:325
bool local2globalIdx(int localIdx, int rank, int &globalIdx) const
Convert the index of a local vector and the rank of the containing process to a global index.
Definition mpi_grid.h:481
unsigned nz() const
Equivalent to n(2)
Definition mpi_grid.h:328