Discontinuous Galerkin Library
#include "dg/algorithm.h"
Loading...
Searching...
No Matches
mpi_projection.h
Go to the documentation of this file.
1#pragma once
2
4#include "dg/backend/index.h"
7#include "mpi_grid.h"
8#include "projection.h"
9
14namespace dg
15{
16
49template<class ConversionPolicy, class real_type>
51 const dg::IHMatrix_t<real_type>& global_cols, const ConversionPolicy& col_policy)
52{
53 // Our approach here is:
54 // 1. Convert to coo format
55 // 2. mark rows in global_cols that are communicating
56 // we need to grab the entire row to ensure reproducibility (which is guaranteed by order of computations)
57 // 3. Convert inner rows to local and move outer rows to vectors (invalidating them in inner)
58 // 4. Use global columns vector to construct MPIGather
59 // 5. Use buffer index to construct local CooMat
60 int rank;
61 MPI_Comm_rank( col_policy.communicator(), &rank);
62
63 const dg::IHMatrix_t<real_type>& A = global_cols;
64 std::vector<int> global_row( A.num_rows(), 0);
65 // 1st pass determine local rows
66 for(int i = 0; i < (int)A.num_rows(); i++)
67 for (int jj = A.row_offsets()[i]; jj < A.row_offsets()[i+1]; jj++)
68 {
69 int lIdx=0, pid = 0;
70 assert( col_policy.global2localIdx( A.column_indices()[jj], lIdx, pid));
71 if( pid != rank)
72 global_row[i] = 1;
73 }
74
75 // 2nd pass: distribute entries
76 thrust::host_vector<std::array<int,2>> outer_col;
77 thrust::host_vector<int> inner_row, inner_col, outer_row;
78 thrust::host_vector<real_type> inner_val, outer_val;
79 thrust::host_vector<int> row_scatter;
80 inner_row.push_back(0);
81 outer_row.push_back(0);
82 for(int i = 0; i < (int)A.num_rows(); i++)
83 {
84 for (int jj = A.row_offsets()[i]; jj < A.row_offsets()[i+1]; jj++)
85 {
86 int lIdx=0, pid = 0;
87 col_policy.global2localIdx( A.column_indices()[jj], lIdx, pid);
88 if( global_row[i] == 1)
89 {
90 outer_col.push_back( {pid,lIdx});
91 outer_val.push_back( A.values()[jj]);
92 }
93 else
94 {
95 inner_col.push_back( lIdx);
96 inner_val.push_back( A.values()[jj]);
97 }
98 }
99 if( global_row[i] == 1)
100 {
101 row_scatter.push_back(i);
102 int old_end = outer_row.back();
103 outer_row.push_back( old_end + A.row_offsets()[i+1] - A.row_offsets()[i]);
104 inner_row.push_back( inner_row[i]);
105 }
106 else
107 {
108 inner_row.push_back( inner_row[i] + A.row_offsets()[i+1] - A.row_offsets()[i]);
109 }
110 }
111 // 3. Now make MPI Gather object
113 col_policy.local_size(), inner_row, inner_col, inner_val);
114
115 thrust::host_vector<int> lColIdx;
116 auto gather_map = dg::gIdx2unique_idx( outer_col, lColIdx);
118 col_policy.communicator());
119
121 mpi_gather.buffer_size(), outer_row, lColIdx, outer_val);
122
123 return { inner, outer, mpi_gather, row_scatter};
124}
125
126
164template<class ConversionPolicy, class real_type>
166 dg::IHMatrix_t<real_type>& global, const ConversionPolicy& row_policy)
167{
168 // 0. Convert to coo matrix
169 thrust::host_vector<int> global_row_indices = dg::detail::csr2coo( global.row_offsets());
170
171 // 1. For all rows determine pid to which it belongs
172 auto gIdx = dg::gIdx2gIdx( global_row_indices, row_policy);
173 std::map<int, thrust::host_vector<int>> rows, cols;
174 std::map<int, thrust::host_vector<real_type>> vals;
175 for( unsigned u=0; u<gIdx.size(); u++)
176 {
177 rows[gIdx[u][0]].push_back( gIdx[u][1]);
178 cols[gIdx[u][0]].push_back( global.column_indices()[u]);
179 vals[gIdx[u][0]].push_back( global.values()[u]);
180 }
181 // 2. Now send those rows to where they belong
182 auto row_buf = dg::mpi_permute( rows, row_policy.communicator());
183 auto col_buf = dg::mpi_permute( cols, row_policy.communicator());
184 auto val_buf = dg::mpi_permute( vals, row_policy.communicator());
185
187 // indices come in any order ( so we need to sort)
188 B.setFromCoo( row_policy.local_size(), global.num_cols(),
189 dg::detail::flatten_map( row_buf),
190 dg::detail::flatten_map(col_buf),
191 dg::detail::flatten_map(val_buf), true);
192 return B;
193 // 4. Reduce on identical rows/cols
194 // .... Maybe later
195}
196
229template<class ConversionPolicy, class real_type>
230void convertLocal2GlobalCols( dg::IHMatrix_t<real_type>& local, const ConversionPolicy& policy)
231{
232 // 1. For all columns determine pid to which it belongs
233 int rank=0;
234 MPI_Comm_rank( policy.communicator(), &rank);
235 thrust::host_vector<int> local_cols = local.column_indices();
236
237 for(unsigned i=0; i<local.column_indices().size(); i++)
238 assert( policy.local2globalIdx(local_cols[i], rank, local_cols[i]) );
239 local.set( local.num_rows(), policy.size(), local.row_offsets(), local_cols, local.values());
240}
241
242namespace create
243{
244
246//
249template<class MPITopology, typename = std::enable_if_t<dg::is_vector_v<
250 typename MPITopology::host_vector, MPIVectorTag>>>
252 g_new, const MPITopology& g_old, std::string method = "dg")
253{
255 g_new.local(), g_old.global(), method);
256 return make_mpi_matrix( mat, g_old);
257}
258// This is now dg::create::prolongation
260//template<class real_type>
261//dg::MIHMatrix_t<real_type> interpolation( const aRealMPITopology3d<real_type>&
262// g_new, const aRealMPITopology2d<real_type>& g_old,std::string method = "dg")
263//{
264// dg::IHMatrix_t<real_type> mat = dg::create::interpolation(
265// g_new.local(), g_old.global(), method);
266// return make_mpi_matrix( mat, g_old);
267//}
268
270template<class MPITopology, typename = std::enable_if_t<dg::is_vector_v<
271 typename MPITopology::host_vector, MPIVectorTag>>>
273 g_new, const MPITopology& g_old, std::string method = "dg")
274{
276 g_new.global(), g_old.local(), method);
277 convertLocal2GlobalCols( mat, g_old);
278 auto mat_loc = convertGlobal2LocalRows( mat, g_new);
279 return make_mpi_matrix( mat_loc, g_old);
280}
281
283template<class RecursiveHostVector, class real_type, size_t Nd>
285 const RecursiveHostVector& x,
287 std::array<dg::bc, Nd> bcx,
288 std::string method = "dg")
289{
291 bcx, method);
292 return make_mpi_matrix( mat, g);
293}
294
307template<class host_vector, class real_type>
309 const host_vector& x,
311 dg::bc bcx = dg::NEU,
312 std::string method = "dg")
313{
315 bcx, method);
316 return make_mpi_matrix( mat, g);
317}
330template<class host_vector, class real_type>
332 const host_vector& x,
333 const host_vector& y,
335 dg::bc bcx = dg::NEU, dg::bc bcy = dg::NEU,
336 std::string method = "dg")
337{
339 bcx, bcy, method);
340 return make_mpi_matrix( mat, g);
341}
342
355template<class host_vector, class real_type>
357 const host_vector& x,
358 const host_vector& y,
359 const host_vector& z,
361 dg::bc bcx = dg::NEU, dg::bc bcy = dg::NEU, dg::bc bcz = dg::PER,
362 std::string method = "dg")
363{
365 g.global(), bcx, bcy, bcz, method);
366 return make_mpi_matrix( mat, g);
367}
368
369
370
372
373}//namespace create
374}//namespace dg
bc
Switch between boundary conditions.
Definition enums.h:15
@ z
z direction
@ PER
periodic boundaries
Definition enums.h:16
@ NEU
Neumann on both boundaries.
Definition enums.h:20
@ y
y direction
@ x
x direction
constexpr bool is_vector_v
Utility typedef.
Definition predicate.h:75
dg::MIHMatrix_t< typename MPITopology::value_type > projection(const MPITopology &g_new, const MPITopology &g_old, std::string method="dg")
Create a projection between two grids.
Definition mpi_projection.h:272
dg::SparseMatrix< int, real_type, thrust::host_vector > interpolation(const RecursiveHostVector &x, const aRealTopology< real_type, Nd > &g, std::array< dg::bc, Nd > bcx, std::string method="dg")
Create interpolation matrix of a list of points in given grid.
Definition interpolation.h:433
void mpi_gather(const thrust::host_vector< std::array< int, 2 > > &gather_map, const ContainerType &gatherFrom, ContainerType &result, MPI_Comm comm)
Un-optimized distributed gather operation.
Definition mpi_permutation.h:149
std::map< int, MessageType > mpi_permute(const std::map< int, MessageType > &messages, MPI_Comm comm)
Exchange messages between processes in a communicator.
Definition mpi_permutation.h:91
void convertLocal2GlobalCols(dg::IHMatrix_t< real_type > &local, const ConversionPolicy &policy)
Convert a matrix with local column indices to a matrix with global column indices.
Definition mpi_projection.h:230
dg::IHMatrix_t< real_type > convertGlobal2LocalRows(const dg::IHMatrix_t< real_type > &global, const ConversionPolicy &row_policy)
Convert a (column-distributed) matrix with global row and column indices to a row distributed matrix.
Definition mpi_projection.h:165
dg::MIHMatrix_t< real_type > make_mpi_matrix(const dg::IHMatrix_t< real_type > &global_cols, const ConversionPolicy &col_policy)
Convert a (row-distributed) matrix with local row and global column indices to a row distributed MPI ...
Definition mpi_projection.h:50
MPI Grid objects.
MPI matrix classes.
I csr2coo(const I &csr)
Definition sparsematrix.h:44
This is the namespace for all functions and classes defined and used by the discontinuous Galerkin li...
Creation of projection matrices.
Distributed memory matrix class, asynchronous communication.
Definition mpi_matrix.h:395
Optimized MPI Gather operation.
Definition mpi_gather.h:455
A distributed vector contains a data container and a MPI communicator.
Definition vector_categories.h:52
The simplest implementation of aRealMPITopology3d.
Definition mpi_grid.h:783
A CSR formatted sparse matrix.
Definition sparsematrix.h:96
size_t num_cols() const
Number of columns in matrix.
Definition sparsematrix.h:276
void set(size_t num_rows, size_t num_cols, const Vector< Index > &row_offsets, const Vector< Index > column_indices, const Vector< Value > &values, bool sort=false)
Set csr values directly.
Definition sparsematrix.h:231
const Vector< Index > & row_offsets() const
Read row_offsets vector.
Definition sparsematrix.h:287
void setFromCoo(size_t num_rows, size_t num_cols, const Vector< Index > &row_indices, const Vector< Index > &column_indices, const Vector< Value > &values, bool sort=false)
Set csr values from coo formatted sparse matrix.
Definition sparsematrix.h:171
const Vector< Index > & column_indices() const
Read column indices vector.
Definition sparsematrix.h:291
const Vector< Value > & values() const
Read values vector.
Definition sparsematrix.h:295
size_t num_rows() const
Number of rows in matrix.
Definition sparsematrix.h:274
An abstract base class for MPI distributed Nd-dimensional dG grids.
Definition mpi_grid.h:91
const RealGrid< real_type, Nd > & global() const
The global grid as a shared memory grid.
Definition mpi_grid.h:279
Useful typedefs of commonly used types.