49template<
class ConversionPolicy,
class real_type>
61 MPI_Comm_rank( col_policy.communicator(), &rank);
64 std::vector<int> global_row( A.
num_rows(), 0);
66 for(
int i = 0; i < (int)A.
num_rows(); i++)
70 assert( col_policy.global2localIdx( A.
column_indices()[jj], lIdx, pid));
76 thrust::host_vector<std::array<int,2>> outer_col;
77 thrust::host_vector<int> inner_row, inner_col, outer_row;
78 thrust::host_vector<real_type> inner_val, outer_val;
79 thrust::host_vector<int> row_scatter;
80 inner_row.push_back(0);
81 outer_row.push_back(0);
82 for(
int i = 0; i < (int)A.
num_rows(); i++)
88 if( global_row[i] == 1)
90 outer_col.push_back( {pid,lIdx});
91 outer_val.push_back( A.
values()[jj]);
95 inner_col.push_back( lIdx);
96 inner_val.push_back( A.
values()[jj]);
99 if( global_row[i] == 1)
101 row_scatter.push_back(i);
102 int old_end = outer_row.back();
104 inner_row.push_back( inner_row[i]);
113 col_policy.local_size(), inner_row, inner_col, inner_val);
115 thrust::host_vector<int> lColIdx;
116 auto gather_map = dg::gIdx2unique_idx( outer_col, lColIdx);
118 col_policy.communicator());
121 mpi_gather.buffer_size(), outer_row, lColIdx, outer_val);
123 return { inner, outer,
mpi_gather, row_scatter};
164template<
class ConversionPolicy,
class real_type>
172 auto gIdx = dg::gIdx2gIdx( global_row_indices, row_policy);
173 std::map<int, thrust::host_vector<int>> rows, cols;
174 std::map<int, thrust::host_vector<real_type>> vals;
175 for(
unsigned u=0; u<gIdx.size(); u++)
177 rows[gIdx[u][0]].push_back( gIdx[u][1]);
179 vals[gIdx[u][0]].push_back( global.
values()[u]);
189 dg::detail::flatten_map( row_buf),
190 dg::detail::flatten_map(col_buf),
191 dg::detail::flatten_map(val_buf),
true);
229template<
class ConversionPolicy,
class real_type>
234 MPI_Comm_rank( policy.communicator(), &rank);
238 assert( policy.local2globalIdx(local_cols[i], rank, local_cols[i]) );
249template<
class MPITopology,
typename = std::enable_if_t<
dg::is_vector_v<
250 typename MPITopology::host_vector, MPIVectorTag>>>
252 g_new,
const MPITopology& g_old, std::string method =
"dg")
255 g_new.local(), g_old.global(), method);
270template<
class MPITopology,
typename = std::enable_if_t<
dg::is_vector_v<
273 g_new,
const MPITopology& g_old, std::string method =
"dg")
276 g_new.global(), g_old.local(), method);
283template<
class RecursiveHostVector,
class real_type,
size_t Nd>
285 const RecursiveHostVector& x,
287 std::array<dg::bc, Nd> bcx,
288 std::string method =
"dg")
307template<
class host_vector,
class real_type>
309 const host_vector& x,
312 std::string method =
"dg")
330template<
class host_vector,
class real_type>
332 const host_vector& x,
333 const host_vector&
y,
336 std::string method =
"dg")
355template<
class host_vector,
class real_type>
357 const host_vector& x,
358 const host_vector&
y,
359 const host_vector&
z,
362 std::string method =
"dg")
365 g.
global(), bcx, bcy, bcz, method);
bc
Switch between boundary conditions.
Definition enums.h:15
@ PER
periodic boundaries
Definition enums.h:16
@ NEU
Neumann on both boundaries.
Definition enums.h:20
constexpr bool is_vector_v
Utility typedef.
Definition predicate.h:75
dg::MIHMatrix_t< typename MPITopology::value_type > projection(const MPITopology &g_new, const MPITopology &g_old, std::string method="dg")
Create a projection between two grids.
Definition mpi_projection.h:272
dg::SparseMatrix< int, real_type, thrust::host_vector > interpolation(const RecursiveHostVector &x, const aRealTopology< real_type, Nd > &g, std::array< dg::bc, Nd > bcx, std::string method="dg")
Create interpolation matrix of a list of points in given grid.
Definition interpolation.h:433
void mpi_gather(const thrust::host_vector< std::array< int, 2 > > &gather_map, const ContainerType &gatherFrom, ContainerType &result, MPI_Comm comm)
Un-optimized distributed gather operation.
Definition mpi_permutation.h:149
std::map< int, MessageType > mpi_permute(const std::map< int, MessageType > &messages, MPI_Comm comm)
Exchange messages between processes in a communicator.
Definition mpi_permutation.h:91
void convertLocal2GlobalCols(dg::IHMatrix_t< real_type > &local, const ConversionPolicy &policy)
Convert a matrix with local column indices to a matrix with global column indices.
Definition mpi_projection.h:230
dg::IHMatrix_t< real_type > convertGlobal2LocalRows(const dg::IHMatrix_t< real_type > &global, const ConversionPolicy &row_policy)
Convert a (column-distributed) matrix with global row and column indices to a row distributed matrix.
Definition mpi_projection.h:165
dg::MIHMatrix_t< real_type > make_mpi_matrix(const dg::IHMatrix_t< real_type > &global_cols, const ConversionPolicy &col_policy)
Convert a (row-distributed) matrix with local row and global column indices to a row distributed MPI ...
Definition mpi_projection.h:50
I csr2coo(const I &csr)
Definition sparsematrix.h:44
This is the namespace for all functions and classes defined and used by the discontinuous Galerkin li...
Creation of projection matrices.
Distributed memory matrix class, asynchronous communication.
Definition mpi_matrix.h:395
Optimized MPI Gather operation.
Definition mpi_gather.h:455
A distributed vector contains a data container and a MPI communicator.
Definition vector_categories.h:52
The simplest implementation of aRealMPITopology3d.
Definition mpi_grid.h:783
A CSR formatted sparse matrix.
Definition sparsematrix.h:96
size_t num_cols() const
Number of columns in matrix.
Definition sparsematrix.h:276
void set(size_t num_rows, size_t num_cols, const Vector< Index > &row_offsets, const Vector< Index > column_indices, const Vector< Value > &values, bool sort=false)
Set csr values directly.
Definition sparsematrix.h:231
const Vector< Index > & row_offsets() const
Read row_offsets vector.
Definition sparsematrix.h:287
void setFromCoo(size_t num_rows, size_t num_cols, const Vector< Index > &row_indices, const Vector< Index > &column_indices, const Vector< Value > &values, bool sort=false)
Set csr values from coo formatted sparse matrix.
Definition sparsematrix.h:171
const Vector< Index > & column_indices() const
Read column indices vector.
Definition sparsematrix.h:291
const Vector< Value > & values() const
Read values vector.
Definition sparsematrix.h:295
size_t num_rows() const
Number of rows in matrix.
Definition sparsematrix.h:274
An abstract base class for MPI distributed Nd-dimensional dG grids.
Definition mpi_grid.h:91
const RealGrid< real_type, Nd > & global() const
The global grid as a shared memory grid.
Definition mpi_grid.h:279
Useful typedefs of commonly used types.