Distributed memory matrix class, asynchronous communication.
More...
|
| MPIDistMat ()=default |
| no memory allocation
|
|
| MPIDistMat (const LocalMatrixInner &m) |
| Only local computations.
|
|
| MPIDistMat (const LocalMatrixInner &inside, const LocalMatrixOuter &outside, const MPIGather< Vector > &mpi_gather, const Vector< int > &scatter) |
| Constructor.
|
|
| MPIDistMat (const LocalMatrixInner &m, MPI_Comm comm) |
| Allreduce dist type.
|
|
template<template< class > class OtherVector, class OtherMatrixInner , class OtherMatrixOuter > |
| MPIDistMat (const MPIDistMat< OtherVector, OtherMatrixInner, OtherMatrixOuter > &src) |
| Copy constructor.
|
|
const LocalMatrixInner & | inner_matrix () const |
| Read access to the inner matrix.
|
|
const LocalMatrixOuter & | outer_matrix () const |
| Read access to the outer matrix.
|
|
const MPIGather< Vector > & | mpi_gather () const |
| Read access to the communication object.
|
|
MPI_Comm | communicator () const |
| Read access to the MPI Communicator.
|
|
const Vector< int > & | scatter () const |
|
template<class ContainerType1 , class ContainerType2 > |
void | symv (const ContainerType1 &x, ContainerType2 &y) const |
| Matrix Vector product.
|
|
template<class Functor , class ContainerType1 , class ContainerType2 > |
void | stencil (const Functor f, const ContainerType1 &x, ContainerType2 &y) const |
| Stencil computations.
|
|
|
template<template< class > class V, class LI , class LO > |
class | MPIDistMat |
|
template<template< class > class Vector, class LocalMatrixInner, class LocalMatrixOuter = LocalMatrixInner>
struct dg::MPIDistMat< Vector, LocalMatrixInner, LocalMatrixOuter >
Distributed memory matrix class, asynchronous communication.
See Separation of communication and computation
- Template Parameters
-
LocalMatrixInner | The class of the matrix for local computations of the inner points. symv(m,x,y) needs to be callable on the container class of the MPI_Vector |
LocalMatrixOuter | The class of the matrix for local computations of the outer points. symv(1,m,x,1,y) needs to be callable on the container class of the MPI_Vector |
Vector | The storage class for internal buffers must match the execution policy of the containers in the symv functions |
- Note
- This class overlaps communication with computation of the inner matrix
◆ MPIDistMat() [1/5]
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
dg::MPIDistMat< Vector, LocalMatrixInner, LocalMatrixOuter >::MPIDistMat |
( |
| ) |
|
|
default |
◆ MPIDistMat() [2/5]
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
dg::MPIDistMat< Vector, LocalMatrixInner, LocalMatrixOuter >::MPIDistMat |
( |
const LocalMatrixInner & | m | ) |
|
|
inline |
Only local computations.
No communications, only the given local inner matrix will be applied
◆ MPIDistMat() [3/5]
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
dg::MPIDistMat< Vector, LocalMatrixInner, LocalMatrixOuter >::MPIDistMat |
( |
const LocalMatrixInner & | inside, |
|
|
const LocalMatrixOuter & | outside, |
|
|
const MPIGather< Vector > & | mpi_gather, |
|
|
const Vector< int > & | scatter ) |
|
inline |
Constructor.
- Parameters
-
inside | The local matrix for the inner elements |
outside | A local matrix for the elements from other processes |
mpi_gather | The communication object needs to gather values across processes. The global_gather_init() , global_gather_wait() , buffer_size() and isCommunicating() member functions are called If !isCommunicating () the gather functions won't be called and only the inner matrix is applied. |
scatter | Scattering of the rows from row buffer back to result vector |
- Note
- This is needed is because
blas2::symv( m_o, buffer, y);
currently initializes all elements in y
with zero and therefore would overwrite the result from blas2::symv( m_i, x, y)
. We therefore need to allocate a small computational buffer and compute blas2::symv( m_o, buffer, result_buffer)
followed by a scattering of values inside result_buffer
into y
◆ MPIDistMat() [4/5]
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
dg::MPIDistMat< Vector, LocalMatrixInner, LocalMatrixOuter >::MPIDistMat |
( |
const LocalMatrixInner & | m, |
|
|
MPI_Comm | comm ) |
|
inline |
Allreduce dist type.
In this mode the mpi matrix locally aplies the inner matrix and calls MPI_Allreduce
on the result (outer matrix remains empty)
◆ MPIDistMat() [5/5]
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
template<template< class > class OtherVector, class OtherMatrixInner , class OtherMatrixOuter >
dg::MPIDistMat< Vector, LocalMatrixInner, LocalMatrixOuter >::MPIDistMat |
( |
const MPIDistMat< OtherVector, OtherMatrixInner, OtherMatrixOuter > & | src | ) |
|
|
inline |
Copy constructor.
The idea is that a device matrix can be constructed by copying a host matrix.
- Template Parameters
-
OtherVector | Vector must be copy-constructible from OtherVector |
OtherMatrixInner | LocalMatrixInner must be copy-constructible from OtherMatrixInner |
OtherMatrixOuter | LocalMatrixOuter must be copy-constructible from OtherMatrixOuter |
- Parameters
-
◆ communicator()
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
MPI_Comm dg::MPIDistMat< Vector, LocalMatrixInner, LocalMatrixOuter >::communicator |
( |
| ) |
const |
|
inline |
Read access to the MPI Communicator.
◆ inner_matrix()
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
const LocalMatrixInner & dg::MPIDistMat< Vector, LocalMatrixInner, LocalMatrixOuter >::inner_matrix |
( |
| ) |
const |
|
inline |
Read access to the inner matrix.
◆ mpi_gather()
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
Read access to the communication object.
◆ outer_matrix()
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
const LocalMatrixOuter & dg::MPIDistMat< Vector, LocalMatrixInner, LocalMatrixOuter >::outer_matrix |
( |
| ) |
const |
|
inline |
Read access to the outer matrix.
◆ scatter()
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
const Vector< int > & dg::MPIDistMat< Vector, LocalMatrixInner, LocalMatrixOuter >::scatter |
( |
| ) |
const |
|
inline |
◆ stencil()
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
template<class Functor , class ContainerType1 , class ContainerType2 >
void dg::MPIDistMat< Vector, LocalMatrixInner, LocalMatrixOuter >::stencil |
( |
const Functor | f, |
|
|
const ContainerType1 & | x, |
|
|
ContainerType2 & | y ) const |
|
inline |
◆ symv()
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
template<class ContainerType1 , class ContainerType2 >
void dg::MPIDistMat< Vector, LocalMatrixInner, LocalMatrixOuter >::symv |
( |
const ContainerType1 & | x, |
|
|
ContainerType2 & | y ) const |
|
inline |
Matrix Vector product.
First the inner elements are computed with a call to symv then the global_gather_init
function of the communication object is called. Finally the outer elements are added with a call to symv for the outer matrix
- Template Parameters
-
ContainerType | container class of the vector elements |
- Parameters
-
◆ MPIDistMat
template<template< class > class Vector, class LocalMatrixInner , class LocalMatrixOuter = LocalMatrixInner>
template<template< class > class V, class LI , class LO >
The documentation for this struct was generated from the following file: