15void sparsify( cusp::array1d<int, cusp::host_memory>& row_offsets,
16 cusp::array1d<int, cusp::host_memory>& column_indices,
17 cusp::array1d<T, cusp::host_memory>& values,
19 const thrust::host_vector<T>& zw,
20 const std::vector<int>& iz_zw,
21 unsigned nnzmax, T threshold)
28 std::vector<std::pair<double, int>> pairs;
29 std::vector<std::pair<int, double>> accept;
31 accept.push_back( {i, zw[i]});
32 for(
auto idx : iz_zw)
34 pairs.push_back( { zw[idx], idx});
38 std::sort( pairs.begin(), pairs.end(), std::greater<>());
42 for(
int k=0; k<(int)nnzmax-1; k++)
44 if( k < (
int)pairs.size() && fabs(pairs[k].first) > threshold)
47 accept.push_back({pairs[k].second, pairs[k].first});
51 std::sort( accept.begin(), accept.end());
58 row_offsets.push_back(row_offsets[i]);
59 for(
auto pair : accept)
62 column_indices.push_back( pair.first);
63 values.push_back( pair.second);
94 const cusp::csr_matrix<int, T, cusp::host_memory>& a,
95 cusp::csr_matrix<int, T, cusp::host_memory>& s,
96 thrust::host_vector<T>& d,
97 const thrust::host_vector<T>&
weights,
101 unsigned n = a.num_rows;
107 for(
int j = a.row_offsets[0]; j<a.row_offsets[1]; j++)
109 if( a.column_indices[j] == 0)
112 if( fabs( d[0] ) < threshold)
115 cusp::array1d<int, cusp::host_memory> row_offsets, column_indices;
116 cusp::array1d<T, cusp::host_memory> values;
118 row_offsets.push_back(0);
119 row_offsets.push_back(1);
120 column_indices.push_back( 0);
121 values.push_back( 1.0);
124 for(
int i = 1; i<(int)n; i++)
126 thrust::host_vector<T> zw( n, 0.);
127 std::vector<int> iz_zw;
133 for(
int k = a.row_offsets[i]; k<a.row_offsets[i+1]; k++)
135 if( a.column_indices[k] < i )
136 s.push_back( a.column_indices[k]);
144 auto it = std::min_element( s.begin(), s.end());
149 for(
int k = a.row_offsets[j]; k<a.row_offsets[j+1]; k++)
152 d[i] +=
weights[j]*a.values[k]*zw[ a.column_indices[k]];
157 if( fabs( alpha) > threshold)
159 for(
int k = row_offsets[j]; k<row_offsets[j+1]; k++)
161 int zkk = column_indices[k];
163 zw[ zkk] -= alpha * values[k];
165 if (std::find(iz_zw.begin(), iz_zw.end(), zkk) == iz_zw.end()) {
166 iz_zw.push_back( zkk);
170 for(
int l = a.row_offsets[zkk]; l < a.row_offsets[zkk+1]; l++)
173 if ( (std::find(s.begin(), s.end(), l) == s.end()) && (j<l) && (l < i) ) {
182 for(
int k = a.row_offsets[i]; k<a.row_offsets[i+1]; k++)
183 d[i] += a.values[k]*zw[ a.column_indices[k]]*
weights[i];
184 if( fabs(d[i]) < threshold)
191 detail::sparsify( row_offsets, column_indices, values, i, zw, iz_zw, nnzmax, threshold);
193 s.resize( n, n, values.size());
194 s.column_indices = column_indices;
195 s.row_offsets = row_offsets;
MPI_Vector< thrust::host_vector< real_type > > weights(const aRealMPITopology2d< real_type > &g)
Nodal weight coefficients.
Definition: mpi_weights.h:22
void sainv_precond(const cusp::csr_matrix< int, T, cusp::host_memory > &a, cusp::csr_matrix< int, T, cusp::host_memory > &s, thrust::host_vector< T > &d, const thrust::host_vector< T > &weights, unsigned nnzmax, T threshold)
Left looking sparse inverse preconditioner for self-adjoint positive definit matrices.
Definition: precond.h:93
This is the namespace for all functions and classes defined and used by the discontinuous Galerkin li...