Discontinuous Galerkin Library
#include "dg/algorithm.h"
Loading...
Searching...
No Matches
mpi_gather_kron.h
Go to the documentation of this file.
1#pragma once
2
3#include <cassert>
4#include <thrust/host_vector.h>
5
6#include "exceptions.h"
7#include "config.h"
8#include "mpi_datatype.h"
9#include "tensor_traits.h"
10#include "memory.h"
11#include "mpi_gather.h"
12
13namespace dg
14{
16namespace detail
17{
18// Gather into internal buffer and provide pointers from Contiguous Gather without self-messages
19// i.e. this class is in charge of handling pointers
20template<template< typename> typename Vector>
21struct MPIContiguousKroneckerGather
22{
23 MPIContiguousKroneckerGather( MPI_Comm comm = MPI_COMM_NULL) : m_mpi_gather(comm){ }
24 MPIContiguousKroneckerGather(
25 const std::map<int, thrust::host_vector<int>>& recvIdx, // 1d block index in units of chunk size
26 unsigned chunk_size,
27 MPI_Comm comm_1d)
28 : m_recvIdx(recvIdx), m_chunk_size(chunk_size)
29 {
30 int ndims;
31 MPI_Cartdim_get( comm_1d, &ndims);
32 assert( ndims == 1);
33 int rank;
34 MPI_Comm_rank( comm_1d, &rank);
35 static_assert( dg::is_vector_v<Vector<double>, SharedVectorTag>,
36 "Only Shared vectors allowed");
37 auto recvChunks = detail::MPIContiguousGather::make_chunks(
38 recvIdx, chunk_size);
39 m_mpi_gather = dg::detail::MPIContiguousGather( recvChunks, comm_1d);
40 m_buffer_size = m_mpi_gather.buffer_size(false);
41 }
42 template< template<class> class OtherVector>
43 friend class MPIContiguousKroneckerGather; // enable copy
44
45 template< template<typename > typename OtherVector>
46 MPIContiguousKroneckerGather( const MPIContiguousKroneckerGather<OtherVector>& src)
47 : m_mpi_gather( src.m_mpi_gather),
48 m_recvIdx(src.m_recvIdx),
49 m_chunk_size(src.m_chunk_size),
50 m_buffer_size(src.m_buffer_size)
51 {
52 }
53
54 MPI_Comm communicator() const{return m_mpi_gather.communicator();}
55 // Number of pointers in receive buffer is number of indices in m_recvIdx
56 unsigned buffer_size() const { return detail::flatten_map( m_recvIdx).size(); }
57 unsigned chunk_size() const { return m_chunk_size; }
58 bool isCommunicating() const{ return m_mpi_gather.isCommunicating(); }
59
60 template<class ContainerType>
61 void global_gather_init( const ContainerType& gatherFrom) const
62 {
63 using value_type = dg::get_value_type<ContainerType>;
64 m_buffer.template set<value_type>( m_buffer_size);
65 auto& buffer = m_buffer.template get<value_type>();
66 m_mpi_gather.global_gather_init( gatherFrom, buffer, false);
67 }
68 template<class ContainerType>
69 void global_gather_wait( const ContainerType& gatherFrom,
70 Vector<const dg::get_value_type<ContainerType>*>& buffer_ptrs) const
71 {
72 using value_type = dg::get_value_type<ContainerType>;
73 auto& buffer = m_buffer.template get<value_type>();
74 m_mpi_gather.global_gather_wait( buffer);
75
76 int rank = 0;
77 MPI_Comm_rank( communicator(), &rank);
78 unsigned start = 0, buffer_start = 0;
79 for( auto& idx : m_recvIdx)
80 for( unsigned u=0; u<idx.second.size(); u++)
81 {
82 if( rank != idx.first)
83 {
84 buffer_ptrs[start] = thrust::raw_pointer_cast(
85 buffer.data()) + buffer_start*m_chunk_size;
86 buffer_start ++;
87 }
88 else
89 buffer_ptrs[start] = thrust::raw_pointer_cast(
90 gatherFrom.data()) + idx.second[u]*m_chunk_size;
91 start++;
92 }
93
94 }
95 private:
96 dg::detail::MPIContiguousGather m_mpi_gather;
97 std::map<int,thrust::host_vector<int>> m_recvIdx;
98 unsigned m_chunk_size = 0;
99 unsigned m_buffer_size = 0;
100 mutable detail::AnyVector<Vector> m_buffer;
101};
102}//namespace detail
104
106
142template<template< typename> typename Vector>
144{
146 MPIKroneckerGather( MPI_Comm comm = MPI_COMM_NULL) : m_mpi_gather(comm){ }
164 MPIKroneckerGather( unsigned left_size,
165 const std::map<int, thrust::host_vector<int>>& recvIdx,
166 unsigned n,
167 unsigned num_cols,
168 unsigned right_size,
169 MPI_Comm comm_1d)
170 {
171 m_contiguous = (left_size==1);
172 // If not contiguous data we need to make it contiguous (gather into store)
173 if( not m_contiguous)
174 {
175 auto sendIdx = mpi_permute( recvIdx, comm_1d);
176 // In that case only gather unique messages into send store
177 detail::Unique<int> uni = detail::find_unique_order_preserving(
178 detail::flatten_map( sendIdx));
179 auto uni_sendIdx = detail::make_map_t<thrust::host_vector<int>>(
180 detail::combine_gather( uni.gather1, uni.gather2),
181 detail::get_size_map( sendIdx)); // now sendIdx goes into unique messages
182 // bootstrap communication pattern back to recvIdx
183 // (so that recvIdx has index into same store)
184 auto uni_recvIdx = dg::mpi_permute ( uni_sendIdx, comm_1d);
185 m_mpi_gather = detail::MPIContiguousKroneckerGather<Vector>(
186 uni_recvIdx, n*left_size*right_size, comm_1d);
187 // everything becomes a z - derivative ...
188 // we gather all unique indices contiguously into send buffer
189 // uni.unique == unique local indices into gatherFrom
190 thrust::host_vector<int> g2( uni.unique.size()*n*left_size*
191 right_size);
192 for( unsigned l=0; l<uni.unique.size(); l++)
193 for( unsigned j=0; j<n; j++)
194 for( unsigned i=0; i<left_size; i++)
195 for( unsigned k=0; k<right_size; k++)
196 g2[((l*n+j)*left_size+i)*right_size + k] =
197 ((i*num_cols + uni.unique[l])*n + j)*right_size + k;
198 m_g2 = g2;
199 }
200 else
201 m_mpi_gather = detail::MPIContiguousKroneckerGather<Vector>(
202 recvIdx, n*left_size*right_size, comm_1d);
203 }
204
206 template< template<class> class OtherVector>
207 friend class MPIKroneckerGather;
208
210 template< template<typename > typename OtherVector>
212 : m_contiguous( src.m_contiguous),
213 m_g2( src.m_g2),
214 m_mpi_gather( src.m_mpi_gather)
215 {
216 }
217
219 MPI_Comm communicator() const{return m_mpi_gather.communicator();}
222 unsigned buffer_size() const { return m_mpi_gather.buffer_size(); }
224 unsigned chunk_size() const { return m_mpi_gather.chunk_size(); }
226 bool isCommunicating() const{ return m_mpi_gather.isCommunicating(); }
227
241 template<class ContainerType>
242 void global_gather_init( const ContainerType& gatherFrom) const
243 {
244 using value_type = dg::get_value_type<ContainerType>;
245 if( not m_contiguous)
246 {
247 m_store.template set<value_type>( m_g2.size());
248 auto& store = m_store.template get<value_type>();
249 thrust::gather( m_g2.begin(), m_g2.end(), gatherFrom.begin(),
250 store.begin());
251 m_mpi_gather.global_gather_init( store);
252 }
253 else
254 m_mpi_gather.global_gather_init( gatherFrom);
255 }
268 template<class ContainerType>
269 void global_gather_wait( const ContainerType& gatherFrom,
270 Vector<const dg::get_value_type<ContainerType>*>& buffer_ptrs) const
271 {
272 if( not m_contiguous)
273 {
274 using value_type = dg::get_value_type<ContainerType>;
275 auto& store = m_store.template get<value_type>();
276 m_mpi_gather.global_gather_wait( store, buffer_ptrs);
277 }
278 else
279 m_mpi_gather.global_gather_wait( gatherFrom, buffer_ptrs);
280
281 }
282 private:
283 bool m_contiguous=false;
284 Vector<int> m_g2;
285 dg::detail::MPIContiguousKroneckerGather<Vector> m_mpi_gather;
286 mutable detail::AnyVector<Vector> m_store;
287};
288} // namespace dg
Error classes or the dg library.
constexpr bool is_vector_v
Utility typedef.
Definition predicate.h:75
typename TensorTraits< std::decay_t< Vector > >::value_type get_value_type
Definition tensor_traits.h:45
std::map< int, MessageType > mpi_permute(const std::map< int, MessageType > &messages, MPI_Comm comm)
Exchange messages between processes in a communicator.
Definition mpi_permutation.h:91
This is the namespace for all functions and classes defined and used by the discontinuous Galerkin li...
Communicator for asynchronous communication of MPISparseBlockMat.
Definition mpi_gather_kron.h:144
void global_gather_init(const ContainerType &gatherFrom) const
. Globally (across processes) asynchronously gather data into a buffer
Definition mpi_gather_kron.h:242
bool isCommunicating() const
True if the gather/scatter operation involves actual MPI communication.
Definition mpi_gather_kron.h:226
MPI_Comm communicator() const
The internal MPI communicator used.
Definition mpi_gather_kron.h:219
void global_gather_wait(const ContainerType &gatherFrom, Vector< const dg::get_value_type< ContainerType > * > &buffer_ptrs) const
Wait for asynchronous communication to finish and gather received data into buffer and return pointer...
Definition mpi_gather_kron.h:269
MPIKroneckerGather(unsigned left_size, const std::map< int, thrust::host_vector< int > > &recvIdx, unsigned n, unsigned num_cols, unsigned right_size, MPI_Comm comm_1d)
Construct from communication pattern.
Definition mpi_gather_kron.h:164
MPIKroneckerGather(const MPIKroneckerGather< OtherVector > &src)
Construct from other execution policy.
Definition mpi_gather_kron.h:211
unsigned chunk_size() const
n*left_size*right_size
Definition mpi_gather_kron.h:224
MPIKroneckerGather(MPI_Comm comm=MPI_COMM_NULL)
no communication
Definition mpi_gather_kron.h:146
unsigned buffer_size() const
Number of pointers in receive buffer equals number of indices in recvIdx.
Definition mpi_gather_kron.h:222