Discontinuous Galerkin Library
#include "dg/algorithm.h"
Loading...
Searching...
No Matches
mpi_permutation.h
Go to the documentation of this file.
1#pragma once
2#include <map>
3#include <thrust/host_vector.h>
4#include "index.h"
5#include "mpi_datatype.h"
6#include "tensor_traits.h"
9#include "tensor_traits_std.h"
10
11
12namespace dg{
14
23template<class MessageType>
24bool is_communicating(
25 const std::map<int,MessageType>& messages,
26 MPI_Comm comm)
27{
28 int comm_size;
29 MPI_Comm_size( comm, &comm_size);
30 thrust::host_vector<int> sendTo( comm_size, 0 );
31 thrust::host_vector<int> global( comm_size*comm_size);
32 auto flat_vals = detail::flatten_values( messages);
33 for( auto& send : flat_vals)
34 sendTo[send.first] = send.second.size();
35 // everyone knows howmany messages everyone is sending
36 MPI_Allgather( sendTo.data(), comm_size, MPI_INT,
37 global.data(), comm_size, MPI_INT,
38 comm);
39 bool isCommunicating = false;
40 for( int i=0; i<comm_size; i++)
41 for( int k=0; k<comm_size; k++)
42 if( k != i and global[i*comm_size+k] != 0)
43 isCommunicating = true;
44 return isCommunicating;
45}
47
50
90template<class MessageType>
91std::map<int,MessageType> mpi_permute(
92 const std::map<int,MessageType>& messages,
93 MPI_Comm comm)
94{
95 // I think this function is fairly intuitive to understand which is good
96 int rank, comm_size;
97 MPI_Comm_rank( comm, &rank);
98 MPI_Comm_size( comm, &comm_size);
99 thrust::host_vector<int> sendTo( comm_size, 0 ), recvFrom( comm_size, 0);
100 thrust::host_vector<int> global( comm_size*comm_size);
101 auto flat_vals = detail::flatten_values( messages);
102 for( auto& send : flat_vals)
103 {
104 sendTo[send.first] = send.second.size();
105 }
106 // everyone knows howmany messages everyone is sending
107 MPI_Allgather( sendTo.data(), comm_size, MPI_INT,
108 global.data(), comm_size, MPI_INT,
109 comm);
110 for( int i=0; i<comm_size; i++)
111 recvFrom[i] = global[i*comm_size+rank];
112 // Now we can use Alltoallv to send
113 thrust::host_vector<int> accS( comm_size), accR(comm_size);
114 thrust::exclusive_scan( sendTo.begin(), sendTo.end(), accS.begin());
115 thrust::exclusive_scan( recvFrom.begin(), recvFrom.end(), accR.begin());
116
117 auto send = detail::flatten_map( flat_vals);
118 using value_type = dg::get_value_type<decltype(send)>;
119 thrust::host_vector<value_type> recv(
120 thrust::reduce( recvFrom.begin(), recvFrom.end()));
121 void * send_ptr = thrust::raw_pointer_cast( send.data());
122 const int * sendTo_ptr = thrust::raw_pointer_cast( sendTo.data());
123 const int * accS_ptr = thrust::raw_pointer_cast( accS.data());
124 void * recv_ptr = thrust::raw_pointer_cast( recv.data());
125 const int * recvFrom_ptr = thrust::raw_pointer_cast( recvFrom.data());
126 const int * accR_ptr = thrust::raw_pointer_cast( accR.data());
127 MPI_Datatype type = dg::getMPIDataType<value_type>();
128 MPI_Alltoallv( send_ptr, sendTo_ptr, accS_ptr, type,
129 recv_ptr, recvFrom_ptr, accR_ptr, type,
130 comm);
131 return detail::make_map_t<MessageType>( recv, detail::make_size_map( recvFrom) );
132}
133
148template<class ContainerType>
149void mpi_gather( const thrust::host_vector<std::array<int,2>>& gather_map,
150 const ContainerType& gatherFrom, ContainerType& result, MPI_Comm comm)
151{
152 thrust::host_vector<int> bufferIdx;
153 auto gather_m = gIdx2unique_idx( gather_map, bufferIdx);
154 auto send_map = mpi_permute(gather_m, comm);
155 auto gather = detail::flatten_map( send_map);
156 auto size_map = detail::get_size_map( send_map);
157 ContainerType res( gather.size());
158 thrust::gather( gather.begin(), gather.end(), gatherFrom.begin(), res.begin());
159 std::map<int,ContainerType> result_map = detail::make_map_t<ContainerType>( res, size_map);
160 std::map<int,ContainerType> recv = mpi_permute( result_map, comm);
161 ContainerType flat = detail::flatten_map( recv);
162 result.resize( bufferIdx.size());
163 thrust::gather( bufferIdx.begin(), bufferIdx.end(), flat.begin(), result.begin());
164}
184template<class ContainerType>
185void mpi_scatter( const thrust::host_vector<std::array<int,2>>& scatter_map,
186 const ContainerType& toScatter, ContainerType& result,
187 MPI_Comm comm,
188 bool resize_result = false
189 )
190{
191 thrust::host_vector<int> bufferIdx;
192 // must be injective
193 auto scatter_m = gIdx2unique_idx( scatter_map, bufferIdx);
194 auto recv_map = mpi_permute(scatter_m, comm);
195 auto scatter = detail::flatten_map( recv_map);
196
197 auto size_map = detail::get_size_map( scatter_m);
198
199 ContainerType to_send( toScatter);
200 thrust::scatter( toScatter.begin(), toScatter.end(), bufferIdx.begin(), to_send.begin());
201
202 auto send = detail::make_map_t<ContainerType>( to_send, size_map);
203 auto result_map = mpi_permute( send, comm);
204 auto res = detail::flatten_map( result_map);
205 if( resize_result)
206 result.resize( res.size());
207 thrust::scatter( res.begin(), res.end(), scatter.begin(), result.begin());
208}
209
222template<class Integer>
223thrust::host_vector<std::array<Integer,2>>
224 mpi_invert_permutation( const thrust::host_vector<std::array<Integer,2>>& p,
225 MPI_Comm comm)
226{
227 thrust::host_vector<Integer> seq( p.size());
228 thrust::host_vector<std::array<Integer,2>> seq_arr( p.size());
229 thrust::sequence( seq.begin(), seq.end());
230 Integer rank;
231 MPI_Comm_rank( comm, &rank);
232 // package sequence
233 for( unsigned u=0; u<seq.size(); u++)
234 seq_arr[u] = {rank, seq[u]};
235 thrust::host_vector<std::array<Integer,2>> sort_map;
236 mpi_scatter( p, seq_arr, sort_map, comm, true);
237 return sort_map;
238
239}
241
242
243} // namespace dg
typename TensorTraits< std::decay_t< Vector > >::value_type get_value_type
Definition tensor_traits.h:45
void mpi_gather(const thrust::host_vector< std::array< int, 2 > > &gather_map, const ContainerType &gatherFrom, ContainerType &result, MPI_Comm comm)
Un-optimized distributed gather operation.
Definition mpi_permutation.h:149
thrust::host_vector< std::array< Integer, 2 > > mpi_invert_permutation(const thrust::host_vector< std::array< Integer, 2 > > &p, MPI_Comm comm)
Invert a globally bijective index map.
Definition mpi_permutation.h:224
void mpi_scatter(const thrust::host_vector< std::array< int, 2 > > &scatter_map, const ContainerType &toScatter, ContainerType &result, MPI_Comm comm, bool resize_result=false)
Un-optimized distributed scatter operation.
Definition mpi_permutation.h:185
std::map< int, MessageType > mpi_permute(const std::map< int, MessageType > &messages, MPI_Comm comm)
Exchange messages between processes in a communicator.
Definition mpi_permutation.h:91
This is the namespace for all functions and classes defined and used by the discontinuous Galerkin li...