Skip to content
Snippets Groups Projects
Commit 3a2c0797 authored by Praetorius, Simon's avatar Praetorius, Simon
Browse files

add PETSc find module and configuration to cmake

changed petsc cmake configuration

added computation of global DOF ids for pre-bases

Global basis ids for dune 2.6 and dune 2.7

corrected the unbind function

wrong unbind function call corrected

Added partition type query to GlobalIdSet

added documentation and extended the test

add general implementation and specialization for TaylorHoodBasis

corrected treepath of velocity component in taylorhoodbasis

add PETSc find module and configuration to cmake

add DofMapping for the association of local and global indices

changed petsc cmake configuration

added DOFMapping and test, to be used with petsc or pmtl backend

extension of istl preconditioner and amg precon

added block jacobi preconditioner with sequential sub precon

update ISTLCommTest for new Communication Creators

removed some smoothers due to bugs in dune-istl

added krylov AMG preconditioner

restructuring of istl solvers and preconditioners

Corrected errors in uniqueBorderPartitionTest and ProblemStat

changed default solver and removed solver category argument from precon creator

restructured stolver and precon creators in istl. Added precompiled explicit template instantiations.

cholmod for dune-2.7 only

removed explicit instantiation of AMG precons due to dune-istl bugs

Cleanup of AMG and more documentation, removed some precompiled preconditioner and solvers

redesign of linear algebra backend for inclusion of PETSc

petsc example added for study of the implementation

Matrix pattern communicates non-owning rows

petsc example extended for debugging

output of vectors does not use std::vector directly anymore

dirichletBC redesigned

restructuring of vectors and matrices, added BiLinearForm and LinearForm

cleanup of VectorBackend design. Needs to be applied to all backends!

adopt istl backend to changes in VectorBase and MatrixBase

Update CMakeLists and implemented twist of globalBasisIds

Communication in MatrixNnzStructure and DofMapping corrected.

not HAVE_MPI caseimplemented

do not use lv.maxSize() for mat and vec sizes

meshcreator with overlap and periodicity for YaspGrid

Twist indexing corrected and new UniqueBorderPartition strategy implemented, In MeshCreated the MAcroGridFactory removed, since it does not work in parallel

PETScCommTest added

MatrixNNZStructure extended to include remote dnnz numbers

made all backends conforming to the new structure

MeshCreator now reads AlbertaGrid only on rank 0

make vector copyable

export BACKEND cmake flag to amdis-config file

cmake-pkg config file added

pkg config file updated and PETSc direct solver added

update MTL and PETSc backend after complex test

make petsc constraints work on DirichletNodes that are not on boundary intersections

moved AMDIS_INFO_LEVEL to Output

updated the backup-restore test and corrected some small errors

make friend use struct instead of class for Constraints

change solver parameter in generated initfile of amdisproject and add PETSc to README

update choldmod to recent dune version
parent 56c073fc
No related branches found
No related tags found
1 merge request!78PETSc backend
Showing
with 1801 additions and 3 deletions
......@@ -38,13 +38,14 @@ Additionally the following optional libraries can be used:
- [MTL4](https://gitlab.math.tu-dresden.de/spraetor/mtl4) (use this fork to get up-to-date changes)
- [Eigen3](http://eigen.tuxfamily.org) >= 3.3
- [SuiteSparse](http://faculty.cse.tamu.edu/davis/suitesparse.html)
- [PETSc](https://www.mcs.anl.gov/petsc)
- libalberta >= 3.0 (For Alberta-Grids)
And a compiler that supports the C++14 standard, e.g. g++ >= 5.0 and clang >= 3.6, and cmake >= 3.1.
By default, the `dune-istl` linear-algebra backend is used. To choose one of `ISTL`, `MTL`, or `EIGEN`, you can specify the cmake parameter `-DBACKEND=[ISTL,MTL,EIGEN]`.
By default, the `dune-istl` linear-algebra backend is used. To choose one of `ISTL`, `MTL`, `PETSC`, or `EIGEN`, you can specify the cmake parameter `-DBACKEND=[ISTL,MTL,PETSC,EIGEN]`.
If your MTL4 installation is not found by default, you have to specify the path,
If, for example, your MTL4 installation is not found by default, you have to specify the path,
where the file `MTLConfig.cmake` is found, here called `MTL_ROOT`. Then simply use
`dunecontrol` to configure and `cmake` to build:
......
......@@ -78,6 +78,9 @@ if ((PETSC_STATIC_FOUND OR PETSC_FOUND) AND NOT TARGET PETSc::PETSc)
set_property(TARGET PETSc::PETSc PROPERTY
INTERFACE_COMPILE_OPTIONS "${${_prefix}_CFLAGS_OTHER}")
endif ()
# workaround for PETSc macros redefining MPI functions
set_property(TARGET PETSc::PETSc PROPERTY
INTERFACE_COMPILE_DEFINITIONS "PETSC_HAVE_BROKEN_RECURSIVE_MACRO=1")
endif ()
unset(_prefix)
......
......@@ -17,6 +17,14 @@
#include <amdis/linearalgebra/eigen/MatrixBackend.hpp>
#include <amdis/linearalgebra/eigen/VectorBackend.hpp>
#elif HAVE_PETSC
#include <amdis/linearalgebra/petsc/Constraints.hpp>
#include <amdis/linearalgebra/petsc/SolverCreator.hpp>
#include <amdis/linearalgebra/petsc/Traits.hpp>
#include <amdis/linearalgebra/petsc/MatrixBackend.hpp>
#include <amdis/linearalgebra/petsc/VectorBackend.hpp>
#else // ISTL
#include <amdis/linearalgebra/istl/Constraints.hpp>
......
......@@ -20,6 +20,8 @@ if (BACKEND STREQUAL "MTL")
add_subdirectory("mtl")
elseif (BACKEND STREQUAL "EIGEN")
add_subdirectory("eigen")
elseif (BACKEND STREQUAL "PETSC")
add_subdirectory("petsc")
else ()
add_subdirectory("istl")
endif ()
install(FILES
Communication.hpp
Constraints.hpp
Interface.hpp
MatrixBackend.hpp
MatrixNnzStructure.hpp
MatrixNnzStructure.inc.hpp
PetscRunner.hpp
SolverCreator.hpp
Traits.hpp
VectorBackend.hpp
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/amdis/linearalgebra/petsc)
#pragma once
#include <petsc.h>
#include <dune/common/parallel/indexset.hh>
#include <dune/common/parallel/localindex.hh>
#include <dune/common/parallel/plocalindex.hh>
#include <dune/common/parallel/remoteindices.hh>
#include <dune/common/std/optional.hh>
#include <amdis/linearalgebra/Communication.hpp>
#include <amdis/linearalgebra/DOFMapping.hpp>
#include <amdis/linearalgebra/ParallelIndexSet.hpp>
namespace AMDiS
{
template <class Basis>
class DistributedCommunication
{
public:
using GlobalIdType = typename GlobalBasisIdSet<Basis>::IdType;
#if HAVE_MPI
using LocalIndexType = Dune::ParallelLocalIndex<DefaultAttributeSet::Type>;
using IndexSet = Dune::ParallelIndexSet<GlobalIdType, LocalIndexType, 512>;
using RemoteIndices = Dune::RemoteIndices<IndexSet>;
#else
struct IndexSet
{
constexpr std::size_t size() const { return size_; }
std::size_t size_ = 0;
};
struct RemoteIndices {};
#endif
using DofMap = DofMapping<IndexSet, PetscInt>;
public:
DistributedCommunication(Basis const& basis)
{
update(basis);
}
/// Return the ParallelIndexSet
IndexSet const& indexSet() const { return *indexSet_; }
IndexSet& indexSet() { return *indexSet_; }
/// Return the RemoteIndices
RemoteIndices const& remoteIndices() const { return remoteIndices_; }
RemoteIndices& remoteIndices() { return remoteIndices_; }
/// Return the DofMapping
DofMap const& dofMap() const { return dofMap_; }
DofMap& dofMap() { return dofMap_; }
/// Update the indexSet, remoteIndices and dofmapping
void update(Basis const& basis)
{
indexSet_.emplace();
#if HAVE_MPI
buildParallelIndexSet(basis, *indexSet_);
remoteIndices_.setIndexSets(*indexSet_, *indexSet_, Environment::comm());
remoteIndices_.template rebuild<true>();
#else
(*indexSet_).size_ = basis.dimension();
#endif
dofMap_.update(*this);
}
protected:
Dune::Std::optional<IndexSet> indexSet_;
RemoteIndices remoteIndices_;
DofMap dofMap_;
};
template <class Basis>
struct CommunicationCreator<DistributedCommunication<Basis>>
{
using Communication = DistributedCommunication<Basis>;
static std::unique_ptr<Communication> create(Basis const& basis, std::string const& prefix = "")
{
return std::make_unique<Communication>(basis);
}
};
} // end namespace AMDiS
#pragma once
#include <vector>
#include <amdis/linearalgebra/Constraints.hpp>
#include <amdis/linearalgebra/petsc/MatrixBackend.hpp>
#include <amdis/linearalgebra/petsc/VectorBackend.hpp>
namespace AMDiS
{
template <class Traits>
struct Constraints<MatrixBackend<Traits>>
{
using Matrix = MatrixBackend<Traits>;
using Vector = VectorBackend<Traits>;
/// Dirichlet boundary conditions.
/**
* Clear the rows (columns) of the given boundary nodes that are owned by the processor.
* The columns are only cleared if the matrix is symmetric. When clearing the columns, the scaled
* solution values are substracted from the rhs.
*
* The rhs vector is adapted to the solution values at the dirichlet nodes.
**/
template <class BitVector>
static void dirichletBC(Matrix& mat, Vector& x, Vector& b, BitVector const& nodes, bool setDiagonal = true)
{
thread_local std::vector<PetscInt> rows;
rows.clear();
auto const& dofMap = mat.dofMap();
for (std::size_t i = 0; i < nodes.size(); ++i)
if (nodes[i])
rows.push_back(dofMap.global(i));
// test for symmetry of the matrix
PetscBool set, flg;
MatIsSymmetricKnown(mat.matrix(), &set, &flg);
if (set == PETSC_TRUE && flg == PETSC_TRUE)
MatZeroRowsColumns(mat.matrix(), rows.size(), rows.data(), setDiagonal ? 1.0 : 0.0, x.vector(), b.vector());
else
MatZeroRows(mat.matrix(), rows.size(), rows.data(), setDiagonal ? 1.0 : 0.0, x.vector(), b.vector());
}
/// Periodic boundary conditions
/**
* The periodic constraints are not implemented in a symmetric fashion
**/
template <class BitVector, class Associations>
static void periodicBC(Matrix& mat, Vector& x, Vector& b, BitVector const& left, Associations const& left2right, bool setDiagonal = true)
{
thread_local std::vector<PetscInt> rows, associated_rows;
rows.clear();
associated_rows.clear();
auto const& dofMap = mat.dofMap();
for (PetscInt i = 0; i < PetscInt(left.size()); ++i) {
if (left[i]) {
// get global row index
PetscInt row_local[2] = {i, at(left2right,i)};
PetscInt row[2] = {dofMap.global(row_local[0]), dofMap.global(row_local[1])};
rows.push_back(row[0]);
associated_rows.push_back(row[1]);
// copy row to associated row
PetscInt ncols;
PetscInt const* cols;
PetscScalar const* vals;
MatGetRow(mat.matrix(), row[0], &ncols, &cols, &vals);
MatSetValues(mat.matrix(), 1, &row[1], ncols, cols, vals, ADD_VALUES);
MatRestoreRow(mat.matrix(), row[0], &ncols, &cols, &vals);
}
}
MatAssemblyBegin(mat.matrix(), MAT_FLUSH_ASSEMBLY); // Maybe MAT_FLUSH_ASSEMBLY
MatAssemblyEnd(mat.matrix(), MAT_FLUSH_ASSEMBLY);
// clear the periodic rows and add a 1 on the diagonal
MatSetOption(mat.matrix(), MAT_NO_OFF_PROC_ZERO_ROWS, PETSC_TRUE);
MatZeroRows(mat.matrix(), rows.size(), rows.data(), setDiagonal ? 1 : 0, PETSC_NULL, PETSC_NULL);
// Insert the -1 value at the off-diagonal associated columns
// NOTE: the following is probably very slow
for (std::size_t i = 0; i < rows.size(); ++i)
MatSetValue(mat.matrix(), rows[i], associated_rows[i], -1, ADD_VALUES);
MatAssemblyBegin(mat.matrix(), MAT_FINAL_ASSEMBLY); // Maybe MAT_FLUSH_ASSEMBLY
MatAssemblyEnd(mat.matrix(), MAT_FINAL_ASSEMBLY);
std::vector<PetscScalar> data(rows.size());
// copy periodic rhs values to associated rows
VecGetValues(b.vector(), rows.size(), rows.data(), data.data());
VecSetValues(b.vector(), rows.size(), associated_rows.data(), data.data(), ADD_VALUES);
VecAssemblyBegin(b.vector());
VecAssemblyEnd(b.vector());
// set the periodic rhs components to 0
data.assign(rows.size(), 0);
VecSetValues(b.vector(), rows.size(), rows.data(), data.data(), INSERT_VALUES);
VecAssemblyBegin(b.vector());
VecAssemblyEnd(b.vector());
// symmetrize the solution vector
VecGetValues(x.vector(), rows.size(), rows.data(), data.data());
VecSetValues(x.vector(), rows.size(), associated_rows.data(), data.data(), INSERT_VALUES);
VecAssemblyBegin(x.vector());
VecAssemblyEnd(x.vector());
}
private:
template <class Associations>
static PetscInt at(Associations const& m, std::size_t idx)
{
auto it = m.find(idx);
assert(it != m.end());
return it->second;
}
};
} // end namespace AMDiS
#pragma once
#include <petscksp.h>
#include <petsc/private/kspimpl.h>
#include <amdis/Output.hpp>
namespace AMDiS
{
namespace PETSc
{
// KSP routines
/// Print the residual norm at every 10th iteration.
inline PetscErrorCode KSPMonitorCyclic(KSP ksp, PetscInt n, PetscReal rnorm, void* ctx)
{
if (n % 10 == 0)
msg("iteration {:>4}: resid {:<.6e}", n, rnorm);
return 0;
}
/// Prints the residual norm, the true residual norm, and the relative residual norm at each iteration.
inline PetscErrorCode KSPMonitorNoisy(KSP ksp, PetscInt n, PetscReal rnorm, void* ctx)
{
Vec resid;
KSPBuildResidual(ksp, NULL, NULL, &resid);
PetscReal truenorm = 0.0;
VecNorm(resid, NORM_2, &truenorm);
VecDestroy(&resid);
PetscReal bnorm = 0.0;
VecNorm(ksp->vec_rhs, NORM_2, &bnorm);
msg("iteration {:>4}: resid {:<.6e}, true resid {:<.2e}, rel. resid {:<.2e}", n, rnorm, truenorm, truenorm/bnorm);
return 0;
}
/// Prints the true residual norm as well as the preconditioned residual norm at each iteration of an iterative solver.
inline PetscErrorCode KSPMonitorTrueResidualNorm(KSP ksp, PetscInt n, PetscReal rnorm, void* ctx)
{
assert(ctx != nullptr);
#if (PETSC_VERSION_MINOR >= 7)
return ::KSPMonitorTrueResidualNorm(ksp, n, rnorm, (PetscViewerAndFormat*)(ctx));
#else
return ::KSPMonitorTrueResidualNorm(ksp, n, rnorm, ctx);
#endif
}
/// Print the residual norm at each iteration of an iterative solver.
inline PetscErrorCode KSPMonitorDefault(KSP ksp, PetscInt n, PetscReal rnorm, void* ctx)
{
assert(ctx != nullptr);
#if (PETSC_VERSION_MINOR >= 7)
return ::KSPMonitorDefault(ksp, n, rnorm, (PetscViewerAndFormat*)(ctx));
#else
return ::KSPMonitorDefault(ksp, n, rnorm, ctx);
#endif
}
/// Sets an additional function to be called at every iteration to monitor the residual/error etc.
template <class Monitor>
inline PetscErrorCode KSPMonitorSet(KSP ksp, Monitor monitor)
{
#if (PETSC_VERSION_MINOR >= 7)
PetscViewerAndFormat *vf;
PetscErrorCode ierr;
ierr = ::PetscViewerAndFormatCreate(PETSC_VIEWER_STDOUT_WORLD,PETSC_VIEWER_DEFAULT,&vf);CHKERRQ(ierr);
ierr = ::KSPMonitorSet(ksp,(PetscErrorCode (*)(KSP,PetscInt,PetscReal,void*))monitor,vf,(PetscErrorCode (*)(void**))PetscViewerAndFormatDestroy);CHKERRQ(ierr);
return ierr;
#else
return ::KSPMonitorSet(ksp, monitor, PETSC_NULL, PETSC_NULL);
#endif
}
/// Gets the matrix associated with the linear system and a (possibly) different one associated with the preconditioner.
inline PetscErrorCode KSPGetOperators(KSP ksp, Mat *Amat, Mat *Pmat)
{
#if (PETSC_VERSION_MINOR >= 5)
return ::KSPGetOperators(ksp, Amat, Pmat);
#else
return ::KSPGetOperators(ksp, Amat, Pmat, PETSC_NULL);
#endif
}
/// Sets the matrix associated with the linear system and a (possibly) different one associated with the preconditioner.
inline PetscErrorCode KSPSetOperators(KSP ksp, Mat Amat, Mat Pmat)
{
#if (PETSC_VERSION_MINOR >= 5)
return ::KSPSetOperators(ksp, Amat, Pmat);
#else
return ::KSPSetOperators(ksp, Amat, Pmat, SAME_NONZERO_PATTERN);
#endif
}
// Mat routines
/// Get vector(s) compatible with the matrix, i.e. with the same parallel layout
inline PetscErrorCode MatCreateVecs(Mat mat, Vec *right, Vec *left)
{
#if (PETSC_VERSION_MINOR >= 6)
return ::MatCreateVecs(mat, right, left);
#else
return ::MatGetVecs(mat, right, left);
#endif
}
/// Gets a single submatrix on the same number of processors as the original matrix.
inline PetscErrorCode MatCreateSubMatrix(Mat mat, IS isrow, IS iscol, MatReuse cll, Mat *newmat)
{
#if (PETSC_VERSION_MINOR >= 8)
return ::MatCreateSubMatrix(mat, isrow, iscol, cll, newmat);
#else
return ::MatGetSubMatrix(mat, isrow, iscol, cll, newmat);
#endif
}
/// Removes all the components of a null space from a vector.
inline PetscErrorCode MatNullSpaceRemove(MatNullSpace sp, Vec vec)
{
#if (PETSC_VERSION_MINOR >= 5)
return ::MatNullSpaceRemove(sp, vec);
#else
return ::MatNullSpaceRemove(sp, vec, PETSC_NULL);
#endif
}
// PC routines
#if (PETSC_VERSION_MINOR >= 9)
/// Sets the software that is used to perform the factorization
inline PetscErrorCode PCFactorSetMatSolverType(PC pc, MatSolverType stype)
{
return ::PCFactorSetMatSolverType(pc, stype);
}
#else
/// Sets the software that is used to perform the factorization
template <class MatSolverType>
inline PetscErrorCode PCFactorSetMatSolverType(PC pc, MatSolverType stype)
{
return ::PCFactorSetMatSolverPackage(pc, stype);
}
#endif
/// Prints the options that have been loaded. This is useful for debugging purposes.
inline PetscErrorCode PetscOptionsView(PetscViewer viewer)
{
#if (PETSC_VERSION_MINOR >= 7)
return ::PetscOptionsView(PETSC_NULL, viewer);
#else
return ::PetscOptionsView(viewer);
#endif
}
// PETSc routine
/// Inserts options into the database from a string
inline PetscErrorCode PetscOptionsInsertString(const char in_str[])
{
#if (PETSC_VERSION_MINOR >= 7)
return ::PetscOptionsInsertString(PETSC_NULL, in_str);
#else
return ::PetscOptionsInsertString(in_str);
#endif
}
} // end namespace PETSc
} // end namespace AMDiS
\ No newline at end of file
#pragma once
#include <algorithm>
#include <memory>
#include <vector>
#include <petscmat.h>
#include <amdis/Output.hpp>
#include <amdis/linearalgebra/SymmetryStructure.hpp>
#include <amdis/linearalgebra/petsc/MatrixNnzStructure.hpp>
namespace AMDiS
{
/// \brief The basic container that stores a base matrix
template <class T>
class MatrixBackend
{
friend struct Constraints<MatrixBackend<T>>;
using Communication = typename T::Comm;
public:
using Traits = T;
/// The matrix type of the underlying base matrix
using BaseMatrix = ::Mat;
/// The type of the elements of the DOFMatrix
using value_type = PetscScalar;
/// The index/size - type
using size_type = PetscInt;
public:
/// Constructor. Constructs new BaseMatrix.
MatrixBackend(std::shared_ptr<Communication> comm)
: comm_(std::move(comm))
{}
// disable copy and move operations
MatrixBackend(MatrixBackend const&) = delete;
MatrixBackend(MatrixBackend&&) = delete;
MatrixBackend& operator=(MatrixBackend const&) = delete;
MatrixBackend& operator=(MatrixBackend&&) = delete;
~MatrixBackend()
{
destroy();
}
/// Return a reference to the data-matrix \ref matrix
BaseMatrix& matrix()
{
return matrix_;
}
/// Return a reference to the data-matrix \ref matrix
BaseMatrix const& matrix() const
{
return matrix_;
}
/// Insert a single value into the matrix
template <class RowIndex, class ColIndex>
void insert(RowIndex const& r, ColIndex const& c, PetscScalar value)
{
PetscInt row = dofMap().global(r);
PetscInt col = dofMap().global(c);
MatSetValue(matrix_, row, col, value, ADD_VALUES);
}
/// Insert an element-matrix with row-indices == col-indices
template <class LocalInd, class LocalMatrix>
void scatter(LocalInd const& localInd, LocalMatrix const& localMat)
{
thread_local std::vector<PetscInt> idx;
idx.resize(localInd.size());
// create a vector of global indices from the local indices using the local-to-global map
std::transform(localInd.begin(), localInd.end(), idx.begin(),
[&](auto const& mi) { return this->dofMap().global(mi); });
MatSetValues(matrix_, idx.size(), idx.data(), idx.size(), idx.data(), localMat.data(), ADD_VALUES);
}
/// Insert an element-matrix
template <class RowLocalInd, class ColLocalInd, class LocalMatrix>
void scatter(RowLocalInd const& rowLocalInd, ColLocalInd const& colLocalInd, LocalMatrix const& localMat)
{
thread_local std::vector<PetscInt> ri;
thread_local std::vector<PetscInt> ci;
ri.resize(rowLocalInd.size());
ci.resize(colLocalInd.size());
// create vectors of global indices from the local indices using the local-to-global map
std::transform(rowLocalInd.begin(), rowLocalInd.end(), ri.begin(),
[&](auto const& mi) { return this->dofMap().global(mi); });
std::transform(colLocalInd.begin(), colLocalInd.end(), ci.begin(),
[&](auto const& mi) { return this->dofMap().global(mi); });
MatSetValues(matrix_, ri.size(), ri.data(), ci.size(), ci.data(), localMat.data(), ADD_VALUES);
}
/// Create and initialize the matrix
template <class RowBasis, class ColBasis>
void init(RowBasis const& rowBasis, ColBasis const& colBasis, SymmetryStructure symmetry)
{
Dune::Timer t;
// update the non-zeros structure of the matrix.
pattern_.update(rowBasis, *comm_);
info(3, " update pattern needed {} seconds", t.elapsed());
t.reset();
// destroy an old matrix if created before
destroy();
info(3, " destroy old matrix needed {} seconds", t.elapsed());
t.reset();
// create a MATAIJ or MATSEQAIJ matrix with provided sparsity pattern
MatCreateAIJ(comm(),
dofMap().localSize(), dofMap().localSize(),
dofMap().globalSize(), dofMap().globalSize(),
0, pattern_.d_nnz().data(), 0, pattern_.o_nnz().data(), &matrix_);
// keep sparsity pattern even if we delete a row / column with e.g. MatZeroRows
MatSetOption(matrix_, MAT_KEEP_NONZERO_PATTERN, PETSC_TRUE);
// set symmetriy properties of the matrix
switch (symmetry) {
case SymmetryStructure::spd:
MatSetOption(matrix_, MAT_SPD, PETSC_TRUE);
break;
case SymmetryStructure::symmetric:
MatSetOption(matrix_, MAT_SYMMETRIC, PETSC_TRUE);
break;
case SymmetryStructure::hermitian:
MatSetOption(matrix_, MAT_HERMITIAN, PETSC_TRUE);
break;
case SymmetryStructure::structurally_symmetric:
MatSetOption(matrix_, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
break;
default:
/* do nothing */
break;
}
info(3, " create new matrix needed {} seconds", t.elapsed());
t.reset();
initialized_ = true;
}
/// Finish assembly. Must be called before matrix can be used in a KSP
void finish()
{
Dune::Timer t;
MatAssemblyBegin(matrix_, MAT_FINAL_ASSEMBLY);
MatAssemblyEnd(matrix_, MAT_FINAL_ASSEMBLY);
info(3, " finish matrix assembling needed {} seconds", t.elapsed());
}
/// Return the local number of nonzeros in the matrix
std::size_t nnz() const
{
MatInfo info;
MatGetInfo(matrix_, MAT_LOCAL, &info);
return std::size_t(info.nz_used);
}
private:
// The local-to-global map
auto const& dofMap() const
{
return comm_->dofMap();
}
// An MPI Communicator or PETSC_COMM_SELF
MPI_Comm comm() const
{
#if HAVE_MPI
return Environment::comm();
#else
return PETSC_COMM_SELF;
#endif
}
// Destroy the matrix if initialized before.
void destroy()
{
if (initialized_)
MatDestroy(&matrix_);
}
private:
std::shared_ptr<Communication> comm_;
/// The data-matrix
Mat matrix_;
MatrixNnzStructure pattern_;
bool initialized_ = false;
};
} // end namespace AMDiS
#pragma once
#include <algorithm>
#include <vector>
#include <amdis/common/parallel/Communicator.hpp>
namespace AMDiS
{
/// Sparsity pattern used to create PETSc matrices
class MatrixNnzStructure
{
public:
// Return Number of nonzeros in the diagonal part (owner part)
std::vector<PetscInt> const& d_nnz() const
{
return dnnz_;
}
/// Return Number of nonzeros in the off-diagonal part (overlap part)
std::vector<PetscInt> const& o_nnz() const
{
return onnz_;
}
/// Calculate the matrix pattern from the local pattern of a basis.
// NOTE: this assumes a square matrix with row-basis = col-basis.
template <class Basis, class Communication>
void update(Basis const& basis, Communication const& comm);
private:
std::vector<PetscInt> dnnz_; //< number of nonzeros in the diagonal part (owner part)
std::vector<PetscInt> onnz_; //< number of nonzeros in the off-diagonal part (overlap part)
// use a grid change index to decide whether to update the matrix pattern or not.
unsigned long changeIndex_ = 0;
bool initialized_ = false;
const Mpi::Tag tag_{916821}; //< Communication tag used internally
};
} // end namespace AMDiS
#include <amdis/linearalgebra/petsc/MatrixNnzStructure.inc.hpp>
#pragma once
#include <map>
#include <set>
#include <dune/common/timer.hh>
#include <dune/grid/common/partitionset.hh>
#include <amdis/GridTransferManager.hpp>
#include <amdis/Output.hpp>
#include <amdis/common/parallel/Communicator.hpp>
#include <amdis/common/parallel/RequestOperations.hpp>
#include <amdis/linearalgebra/AttributeSet.hpp>
namespace AMDiS {
template <class Basis, class Communication>
void MatrixNnzStructure::update(Basis const& basis, Communication const& c)
{
unsigned long newChangeIndex = GridTransferManager::get(basis.gridView().grid())->changeIndex();
bool gridChanged = newChangeIndex > changeIndex_;
changeIndex_ = newChangeIndex;
// update is not necessary
if (initialized_ && !gridChanged)
return;
Dune::Timer t;
auto const& dofMap = c.dofMap();
std::size_t localSize = dofMap.localSize();
test_exit(localSize > 0, "Local domain has no owned DOFs!");
std::vector<std::set<PetscInt>> d_nz(localSize), o_nz(localSize);
// column indices of row DOFs belonging to remote rank
// global-row -> {global-col}
std::map<PetscInt, std::set<PetscInt>> remote_nz;
// build up local nz structure
auto localView = basis.localView();
for (const auto& element : elements(basis.gridView(), Dune::Partitions::interior)) {
localView.bind(element);
std::size_t size = localView.size();
for (std::size_t i = 0; i < size; ++i) {
auto row = localView.index(i);
auto global_row = dofMap.global(row);
if (dofMap.owner(row)) {
auto local_row = dofMap.globalToLocal(global_row);
assert(local_row < localSize);
for (std::size_t j = 0; j < size; ++j) {
auto col = localView.index(j);
if (dofMap.owner(col))
d_nz[local_row].insert(dofMap.global(col));
else
o_nz[local_row].insert(dofMap.global(col));
}
} else {
auto& remote_row = remote_nz[global_row];
for (std::size_t j = 0; j < size; ++j) {
auto col = localView.index(j);
remote_row.insert(dofMap.global(col));
}
}
}
localView.unbind();
}
// insert size of sets into diagonal block nnz and off-diagonal block nnz
dnnz_.resize(localSize, 0);
onnz_.resize(localSize, 0);
std::transform(d_nz.begin(), d_nz.end(), dnnz_.begin(), [](auto const& s) { return s.size(); });
std::transform(o_nz.begin(), o_nz.end(), onnz_.begin(), [](auto const& s) { return s.size(); });
#if HAVE_MPI
Mpi::Communicator world(Environment::comm());
// exchange remote rows
using Attribute = DefaultAttributeSet::Type;
using RemoteNz = std::vector<PetscInt>;
std::vector<RemoteNz> sendList(world.size());
std::vector<std::size_t> receiveList(world.size(), 0);
for (auto const& rim : c.remoteIndices()) {
int p = rim.first;
auto* sourceRemoteIndexList = rim.second.first;
auto* targetRemoteIndexList = rim.second.second;
// receive from overlap
for (auto const& ri : *sourceRemoteIndexList) {
auto const& lip = ri.localIndexPair();
Attribute remoteAttr = ri.attribute();
Attribute myAttr = lip.local().attribute();
if (myAttr == Attribute::owner && remoteAttr == Attribute::overlap) {
assert(dofMap.owner(lip.local().local()));
receiveList[p]++;
}
}
// send to owner
for (auto const& ri : *targetRemoteIndexList) {
auto const& lip = ri.localIndexPair();
Attribute remoteAttr = ri.attribute();
Attribute myAttr = lip.local().attribute();
if (myAttr == Attribute::overlap && remoteAttr == Attribute::owner) {
assert(!dofMap.owner(lip.local().local()));
PetscInt global_row = dofMap.global(lip.local().local());
assert(dofMap.globalOwner(p, global_row));
PetscInt remoteDnnz = 0;
PetscInt remoteOnnz = 0;
for (PetscInt global_col : remote_nz[global_row]) {
if (dofMap.globalOwner(p, global_col))
remoteDnnz++;
else
remoteOnnz++;
}
auto& data = sendList[p];
data.push_back(global_row);
data.push_back(remoteDnnz);
data.push_back(remoteOnnz);
}
}
}
// 1. send remote nz structure to owner
std::vector<Mpi::Request> sendRequests;
for (int p = 0; p < world.size(); ++p) {
if (!sendList[p].empty()) {
sendRequests.emplace_back( world.isend(sendList[p], p, tag_) );
}
}
// 2. receive nz structure belonging to own DOFs from remote node
std::vector<Mpi::Request> recvRequests;
std::vector<RemoteNz> recvData(world.size());
for (int p = 0; p < world.size(); ++p) {
if (receiveList[p] > 0)
recvRequests.emplace_back( world.irecv(recvData[p], p, tag_) );
}
Mpi::wait_all(recvRequests.begin(), recvRequests.end());
// 3. insert all remote nz data into my local nnz sets
for (int p = 0; p < world.size(); ++p) {
auto const& data = recvData[p];
assert(data.size() == 3*receiveList[p]);
for (std::size_t i = 0; i < receiveList[p]; ++i) {
PetscInt global_row = data[3*i];
assert(dofMap.globalOwner(global_row));
PetscInt row_dnnz = data[3*i+1];
assert(row_dnnz < PetscInt(localSize));
PetscInt row_onnz = data[3*i+2];
assert(row_onnz < PetscInt(dofMap.globalSize()));
auto local_row = dofMap.globalToLocal(global_row);
assert(local_row < localSize);
dnnz_[local_row] += row_dnnz;
onnz_[local_row] += row_onnz;
}
}
// 4. check that the sum of diagonal and off-diagonal entries does not exceed the size of the matrix
// This may happen for very small size matrices
for (std::size_t i = 0; i < localSize; ++i) {
dnnz_[i] = std::min(dnnz_[i], PetscInt(localSize));
if (onnz_[i] + dnnz_[i] > PetscInt(dofMap.globalSize())) {
PetscInt diff = onnz_[i] + dnnz_[i] - dofMap.globalSize();
onnz_[i] -= diff;
}
}
Mpi::wait_all(sendRequests.begin(), sendRequests.end());
#endif // HAVE_MPI
initialized_ = true;
msg("update MatrixNnzStructure need {} sec", t.elapsed());
}
} // end namespace AMDiS
\ No newline at end of file
#pragma once
#include <petscpc.h>
#include <petscksp.h>
#include <dune/common/unused.hh>
#include <amdis/Initfile.hpp>
#include <amdis/linearalgebra/RunnerInterface.hpp>
#include <amdis/linearalgebra/SolverInfo.hpp>
#include <amdis/linearalgebra/petsc/Interface.hpp>
#ifndef KSPDGGMRES
#define KSPDGGMRES "dggmres"
#endif
namespace AMDiS
{
/// \brief Wrapper around PETSc KSP and PC objects to solve a linear system
/**
* Configure the KSP and PC types using initfile parameters, based on an initfile prefix `[prefix]`:
* All parameters are names as the PETSc command-line parameters with underscore replaced by space
* and removing the *type* postfix, e.g. instead of `ksp_type` write `ksp`, instead of `mat_solver_type`
* write `mat solver`.
*
* **Examples:**
* ```
* [prefix]->max it: 100 % maximum solver iterations
* [prefix]->rtol: 1.e-10 % relative residual tolerance
* [prefix]->pc: bjacobi % preconditioner type
* [prefix]->pc->sub ksp: preonly % sub KSP type used on the blocks
* [prefix]->pc->sub ksp->pc: ilu % preconditioner of the sub KSP type
* ```
*
* For the configuration of sub PC types and sub KSP types, one can use the current parameter
* as new prefix, e.g. `[sub-prefix] := [prefix]->pc`, or `[sub-sub-prefix] := [prefix]->pc->sub ksp`.
* Those sub PC and sub KSP types can be configured with all the possible parameters.
*
* For the configuration using command-line arguments possible a ksp-prefix or pc-prefix must be
* assigned to distinguish different KSP and PC objects. Therefore, for each initfile prefix, a
* PETSc prefix can be set: `[prefix]->prefix: name`, where `name` can be used on the commandline, e.g.
* `-name_ksp_type fgmres`.
**/
template <class Traits>
class PetscRunner
: public RunnerInterface<Traits>
{
public:
/// Constructor.
/**
* Stores the initfile prefix to configure the ksp and pc solver.
*
* Reads an info flag `[prefix]->info`:
* 0 ... No solver convergence information
* 1 ... Minimal output, print residual every 10th iteration
* 2 ... Full convergence output. Print residual, true residual and rel. residual every iteration.
**/
explicit PetscRunner(std::string const& prefix)
: prefix_(prefix)
{
Parameters::get(prefix + "->info", info_);
}
~PetscRunner()
{
exit();
}
/// Implements \ref RunnerInterface::init()
void init(typename Traits::Mat const& mat, typename Traits::Comm& comm) override
{
exit();
#if HAVE_MPI
KSPCreate(Environment::comm(), &ksp_);
#else
KSPCreate(PETSC_COMM_SELF, &ksp_);
#endif
PETSc::KSPSetOperators(ksp_, mat, mat);
initKSP(ksp_, prefix_);
initialized_ = true;
}
/// Implements \ref RunnerInterface::exit()
void exit() override
{
if (initialized_)
KSPDestroy(&ksp_);
initialized_ = false;
}
/// Implements \ref RunnerInterface::solve()
int solve(typename Traits::Mat const& A, typename Traits::Vec& x, typename Traits::Vec const& b, SolverInfo& solverInfo) override
{
assert(initialized_);
DUNE_UNUSED_PARAMETER(A);
KSPSolve(ksp_, b, x);
if (info_ > 0)
KSPReasonView(ksp_, PETSC_VIEWER_STDOUT_WORLD);
KSPConvergedReason reason;
KSPGetConvergedReason(ksp_, &reason);
return reason > 0 ? 0 : int(reason);
}
KSP ksp() { return ksp_; }
protected:
// initialize the KSP solver from the initfile
void initKSP(KSP ksp, std::string prefix) const
{
// see https://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/KSP/KSPType.html
auto kspTypeStr = Parameters::get<std::string>(prefix);
if (kspTypeStr) {
if (kspTypeStr.value() == "direct")
initDirectSolver(ksp, prefix);
else if (kspTypeStr.value() != "default") {
KSPSetType(ksp, kspTypeStr.value().c_str());
// initialize some KSP specific parameters
initKSPParameters(ksp, kspTypeStr.value().c_str(), prefix);
// set initial guess to nonzero only for non-preonly ksp type
if (kspTypeStr.value() != "preonly")
KSPSetInitialGuessNonzero(ksp, PETSC_TRUE);
}
}
// set a KSPMonitor if info > 0
int info = 0;
Parameters::get(prefix + "->info", info);
if (info == 1) {
KSPMonitorSet(ksp, PETSc::KSPMonitorCyclic, nullptr, nullptr);
} else if (info > 1) {
KSPMonitorSet(ksp, PETSc::KSPMonitorNoisy, nullptr, nullptr);
}
// set solver tolerances
auto maxIt = Parameters::get<PetscInt>(prefix + "->max it");
auto rTol = Parameters::get<PetscReal>(prefix + "->rtol");
auto aTol = Parameters::get<PetscReal>(prefix + "->atol");
auto dTol = Parameters::get<PetscReal>(prefix + "->divtol");
if (maxIt || rTol || aTol || dTol)
KSPSetTolerances(ksp,
rTol.value_or(PETSC_DEFAULT), aTol.value_or(PETSC_DEFAULT),
dTol.value_or(PETSC_DEFAULT), maxIt.value_or(PETSC_DEFAULT));
auto kspPrefixParam = Parameters::get<std::string>(prefix + "->prefix");
if (kspPrefixParam) {
std::string kspPrefix = kspPrefixParam.value() + "_";
KSPSetOptionsPrefix(ksp, kspPrefix.c_str());
KSPSetFromOptions(ksp);
}
PC pc;
KSPGetPC(ksp, &pc);
initPC(pc, prefix + "->pc");
// show details of ksp object
if (info >= 10) {
KSPView(ksp, PETSC_VIEWER_STDOUT_WORLD);
}
}
// initialize a direct solver from the initfile
void initDirectSolver(KSP ksp, std::string prefix) const
{
KSPSetInitialGuessNonzero(ksp, PETSC_TRUE);
KSPSetType(ksp, KSPRICHARDSON);
PC pc;
KSPGetPC(ksp, &pc);
PCSetType(pc, PCLU);
auto matSolverType = Parameters::get<std::string>(prefix + "->mat solver");
if (matSolverType)
PETSc::PCFactorSetMatSolverType(pc, matSolverType.value().c_str());
else {
Mat Amat, Pmat;
KSPGetOperators(ksp, &Amat, &Pmat);
MatType type;
MatGetType(Pmat, &type);
if (std::strcmp(type, MATSEQAIJ) == 0)
PETSc::PCFactorSetMatSolverType(pc, MATSOLVERUMFPACK);
else if (std::strcmp(type, MATAIJ) == 0)
PETSc::PCFactorSetMatSolverType(pc, MATSOLVERMUMPS);
}
}
// initialize the preconditioner pc from the initfile
void initPC(PC pc, std::string prefix) const
{
// see https://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/PC/PCType.html
auto pcType = Parameters::get<std::string>(prefix);
std::string pcTypeStr = "default";
if (pcType) {
pcTypeStr = pcType.value();
PCSetType(pc, pcTypeStr.c_str());
}
if (pcTypeStr == "lu") {
// configure matsolverpackage
// see https://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Mat/MatSolverType.html
auto matSolverType = Parameters::get<std::string>(prefix + "->mat solver");
if (matSolverType)
PETSc::PCFactorSetMatSolverType(pc, matSolverType.value().c_str());
} else if (pcTypeStr == "bjacobi") {
// configure sub solver
PetscInt n_local, first;
KSP* ksps;
PCSetUp(pc);
PCBJacobiGetSubKSP(pc, &n_local, &first, &ksps);
for (PetscInt i = 0; i < n_local; ++i)
initKSP(ksps[i], prefix + "->sub ksp");
} else if (pcTypeStr == "ksp") {
// configure sub ksp type
KSP ksp;
PCSetUp(pc);
PCKSPGetKSP(pc, &ksp);
initKSP(ksp, prefix + "->ksp");
}
auto pcPrefixParam = Parameters::get<std::string>(prefix + "->prefix");
if (pcPrefixParam) {
std::string pcPrefix = pcPrefixParam.value() + "_";
PCSetOptionsPrefix(pc, pcPrefix.c_str());
PCSetFromOptions(pc);
}
}
// provide initfile parameters for some PETSc KSP parameters
void initKSPParameters(KSP ksp, char const* ksptype, std::string prefix) const
{
// parameters for the Richardson solver
if (std::strcmp(ksptype, KSPRICHARDSON) == 0)
{
auto scale = Parameters::get<PetscReal>(prefix + "->scale");
if (scale)
KSPRichardsonSetScale(ksp, scale.value());
}
// parameters for the gmres solver
else if (std::strcmp(ksptype, KSPGMRES) == 0 ||
std::strcmp(ksptype, KSPFGMRES) == 0 ||
std::strcmp(ksptype, KSPLGMRES) == 0 ||
std::strcmp(ksptype, KSPDGGMRES) == 0 ||
std::strcmp(ksptype, KSPPGMRES) == 0)
{
auto restart = Parameters::get<PetscInt>(prefix + "->restart");
if (restart)
KSPGMRESSetRestart(ksp, restart.value());
auto gramschmidt = Parameters::get<std::string>(prefix + "->gramschmidt");
if (gramschmidt) {
if (gramschmidt.value() == "classical") {
KSPGMRESSetOrthogonalization(ksp, KSPGMRESClassicalGramSchmidtOrthogonalization);
auto refinementType = Parameters::get<std::string>(prefix + "->refinement type");
if (refinementType) {
if (refinementType.value() == "never")
KSPGMRESSetCGSRefinementType(ksp, KSP_GMRES_CGS_REFINE_NEVER);
else if (refinementType.value() == "ifneeded")
KSPGMRESSetCGSRefinementType(ksp, KSP_GMRES_CGS_REFINE_IFNEEDED);
else if (refinementType.value() == "always")
KSPGMRESSetCGSRefinementType(ksp, KSP_GMRES_CGS_REFINE_ALWAYS);
}
} else if (gramschmidt.value() == "modified") {
KSPGMRESSetOrthogonalization(ksp, KSPGMRESModifiedGramSchmidtOrthogonalization);
}
}
}
// parameters for the BiCGStab_ell solver
else if (std::strcmp(ksptype, KSPBCGSL) == 0)
{
auto ell = Parameters::get<PetscInt>(prefix + "->ell");
if (ell)
KSPBCGSLSetEll(ksp, ell.value());
}
// parameters for the GCR solver
else if (std::strcmp(ksptype, KSPGCR) == 0)
{
auto restart = Parameters::get<PetscInt>(prefix + "->restart");
if (restart)
KSPGCRSetRestart(ksp, restart.value());
}
}
private:
std::string prefix_;
int info_ = 0;
KSP ksp_;
bool initialized_ = false;
};
} // end namespace AMDiS
#pragma once
#include <vector>
#include <petscksp.h>
#include <amdis/CreatorMap.hpp>
#include <amdis/linearalgebra/LinearSolver.hpp>
#include <amdis/linearalgebra/petsc/PetscRunner.hpp>
namespace AMDiS
{
/// Adds default creators for linear solvers for the Petsc backend.
template <class Traits>
class DefaultCreators< LinearSolverInterface<Traits> >
{
using Solver = typename LinearSolver<Traits, PetscRunner<Traits>>::Creator;
using Map = CreatorMap<LinearSolverInterface<Traits>>;
public:
static void init()
{
// see https://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/KSP/KSPType.html
static const std::vector<std::string> kspTypes_ = {
KSPCG, KSPFCG, KSPGMRES, KSPFGMRES, KSPPREONLY, KSPMINRES, KSPCGS, KSPBCGS, KSPCGNE
#ifdef KSPCHEBYSHEV
, KSPCHEBYSHEV
#endif
#ifdef KSPGROPPCG
, KSPGROPPCG
#endif
#ifdef KSPPIPECG
, KSPPIPECG
#endif
#ifdef KSPPIPECGRR
, KSPPIPECGRR
#endif
#ifdef KSPPIPELCG
, KSPPIPELCG
#endif
#ifdef KSPCGNASH
, KSPCGNASH
#endif
#ifdef KSPCGSTCG
, KSPCGSTCG
#endif
#ifdef KSPCGGLTR
, KSPCGGLTR
#endif
#ifdef KSPPIPEFCG
, KSPPIPEFCG
#endif
#ifdef KSPPIPEFGMRES
, KSPPIPEFGMRES
#endif
#ifdef KSPLGMRES
, KSPLGMRES
#endif
#ifdef KSPDGMRES
, KSPDGMRES
#endif
#ifdef KSPPGMRES
, KSPPGMRES
#endif
#ifdef KSPTCQMR
, KSPTCQMR
#endif
#ifdef KSPIBCGS
, KSPIBCGS
#endif
#ifdef KSPFBCGS
, KSPFBCGS
#endif
#ifdef KSPFBCGSR
, KSPFBCGSR
#endif
#ifdef KSPBCGSL
, KSPBCGSL
#endif
#ifdef KSPPIPEBCGS
, KSPPIPEBCGS
#endif
#ifdef KSPTFQMR
, KSPTFQMR
#endif
#ifdef KSPCR
, KSPCR
#endif
#ifdef KSPPIPECR
, KSPPIPECR
#endif
#ifdef KSPLSQR
, KSPLSQR
#endif
#ifdef KSPQCG
, KSPQCG
#endif
#ifdef KSPBICG
, KSPBICG
#endif
#ifdef KSPSYMMLQ
, KSPSYMMLQ
#endif
#ifdef KSPLCD
, KSPLCD
#endif
#ifdef KSPPYTHON
, KSPPYTHON
#endif
#ifdef KSPGCR
, KSPGCR
#endif
#ifdef KSPPIPEGCR
, KSPPIPEGCR
#endif
#ifdef KSPTSIRM
, KSPTSIRM
#endif
#ifdef KSPCGLS
, KSPCGLS
#endif
#ifdef KSPFETIDP
, KSPFETIDP
#endif
#ifdef KSPRICHARDSON
, KSPRICHARDSON
#endif
};
auto solver = new Solver;
Map::addCreator("default", solver);
Map::addCreator("direct", solver);
for (auto const& kspType : kspTypes_)
Map::addCreator(kspType, solver);
}
};
} // end namespace AMDiS
#pragma once
#include <type_traits>
#include <petscmat.h>
#include <petscvec.h>
#include <dune/grid/common/partitionset.hh>
#include <amdis/linearalgebra/petsc/Communication.hpp>
namespace AMDiS
{
/** Traits class for a linear solver for the system AX=B using an FE space described by a dune-functions Basis
* Contains typedefs specific to the PETSc backend.
*/
template <class Basis, class T = double>
struct BackendTraits
{
static_assert(std::is_convertible<T,PetscScalar>::value, "");
using Mat = ::Mat;
using Vec = ::Vec;
using CoefficientType = PetscScalar;
using Comm = DistributedCommunication<Basis>;
using PartitionSet = Dune::Partitions::Interior;
};
} // end namespace AMDiS
#pragma once
#include <petscvec.h>
#include <dune/common/ftraits.hh>
#include <amdis/Output.hpp>
#include <amdis/common/FakeContainer.hpp>
#include <amdis/functions/Interpolate.hpp>
#include <amdis/operations/Assigner.hpp>
namespace AMDiS
{
/// The basic container that stores a base vector data
template <class T>
class VectorBackend
{
using Communication = typename T::Comm;
template <class A>
using AssignMode = std::conditional_t<std::is_same<A,Assigner::plus_assign>::value,
std::integral_constant<::InsertMode, ADD_VALUES>,
std::integral_constant<::InsertMode, INSERT_VALUES>>;
public:
using Traits = T;
/// The type of the base vector
using BaseVector = ::Vec;
/// The type of the elements of the DOFVector
using value_type = PetscScalar;
/// The index/size - type
using size_type = PetscInt;
public:
/// Constructor. Constructs new BaseVector.
VectorBackend(std::shared_ptr<Communication> comm)
: comm_(std::move(comm))
{}
/// Move constructor
VectorBackend(VectorBackend&& other)
{
swap(*this, other);
}
/// Copy constructor
VectorBackend(VectorBackend const& other)
: comm_(other.comm_)
, initialized_(other.initialized_)
{
if (other.initialized_) {
VecDuplicate(other.vector_, &vector_);
VecCopy(other.vector_, vector_);
}
}
/// Destructor
~VectorBackend()
{
destroy();
}
/// Move assignment operator
VectorBackend& operator=(VectorBackend&& other)
{
swap(*this, other);
return *this;
}
/// Copy assignment operator
VectorBackend& operator=(VectorBackend const& other)
{
return *this = VectorBackend(other);
}
/// Return the data-vector \ref vector_
BaseVector const& vector() const
{
return vector_;
}
/// Return the data-vector \ref vector_
BaseVector& vector()
{
return vector_;
}
/// Return the number of entries in the local part of the vector
std::size_t localSize() const
{
return dofMap().localSize();
}
/// Return the number of entries in the global vector
std::size_t globalSize() const
{
return dofMap().globalSize();
}
/// Resize the \ref vector_ to the size \p s
// [[possibly collective]]
template <class SizeInfo>
void init(SizeInfo const&, bool clear)
{
Dune::Timer t;
if (!initialized_) {
// create the vector
create();
info(3, " create new vector needed {} seconds", t.elapsed());
t.reset();
}
else {
PetscInt localSize = 0, globalSize = 0;
VecGetSize(vector_, &globalSize);
VecGetLocalSize(vector_, &localSize);
// re-create the vector
if (std::size_t(localSize) != dofMap().localSize() ||
std::size_t(globalSize) != dofMap().globalSize()) {
destroy();
create();
info(3, " destroy old and create new vector needed {} seconds", t.elapsed());
t.reset();
}
}
if (clear)
setZero();
initialized_ = true;
}
/// Finish assembly. Must be called before vector can be used in a KSP
// [[collective]]
void finish()
{
Dune::Timer t;
VecAssemblyBegin(vector_);
VecAssemblyEnd(vector_);
info(3, " finish vector assembly needed {} seconds", t.elapsed());
}
/// Update the ghost regions with values from the owning process
// [[collective]]
void synchronize()
{
Dune::Timer t;
VecGhostUpdateBegin(vector_, INSERT_VALUES, SCATTER_FORWARD);
VecGhostUpdateEnd(vector_, INSERT_VALUES, SCATTER_FORWARD);
info(3, " synchronize vector needed {} seconds", t.elapsed());
}
/// Access the entry \p i of the \ref vector with read-only-access.
// [[not collective]]
PetscScalar at(std::size_t i) const
{
PetscScalar y = 0;
gather(std::array<std::size_t,1>{i}, &y);
return y;
}
/// Access the entry \p i of the \ref vector with write-access.
// [[not collective]]
template <class Assign>
void insert(std::size_t dof, PetscScalar value, Assign)
{
assert(initialized_);
VecSetValue(vector_, dofMap().global(dof), value, AssignMode<Assign>::value);
}
/// Collect all values from this vector to indices given by `localInd` and store it into the output buffer.
// [[not collective]]
template <class IndexRange, class OutputIterator>
void gather(IndexRange const& localInd, OutputIterator buffer) const
{
forEach(localInd, [&buffer](auto&&, auto const& value) { *buffer = value; ++buffer; });
}
/// Store all values given by `localVal` with indices `localInd` in the vector
/**
* Store only those values with mask != 0. Use an assign mode for the storage. It is not allowed
* to switch the assign mode before calling VecAssemblyBegin and VecAssemblyEnd.
**/
// [[not collective]]
template <class IndexRange, class LocalValues, class MaskRange, class Assign>
void scatter(IndexRange const& localInd, LocalValues const& localVal, MaskRange const& mask, Assign)
{
static_assert(std::is_same<typename LocalValues::value_type, PetscScalar>::value, "");
assert(initialized_);
assert(localInd.size() == localVal.size());
// map local to global indices
std::vector<PetscInt> globalInd;
globalInd.reserve(localInd.size());
auto ind_it = std::begin(localInd);
auto mask_it = std::begin(mask);
for (; ind_it != std::end(localInd); ++mask_it, ++ind_it)
globalInd.push_back(*mask_it ? dofMap().global(*ind_it) : -1);
if (! std::is_same<MaskRange, FakeContainer<bool,true>>::value)
VecSetOption(vector_, VEC_IGNORE_NEGATIVE_INDICES, PETSC_TRUE);
VecSetValues(vector_, globalInd.size(), globalInd.data(), localVal.data(), AssignMode<Assign>::value);
VecSetOption(vector_, VEC_IGNORE_NEGATIVE_INDICES, PETSC_FALSE);
}
/// Apply the functor `f` to each vector entry to local index in `localInd` range.
// [[not collective]]
template <class IndexRange, class Func>
void forEach(IndexRange const& localInd, Func&& f) const
{
// Obtains the local ghosted representation of a parallel vector
Vec localGhostedVec = nullptr;
VecGhostGetLocalForm(vector_, &localGhostedVec);
assert(localGhostedVec != nullptr);
// A pointer to a contiguous array that contains this processor's portion of the vector data.
PetscScalar *ptr;
VecGetArray(localGhostedVec, &ptr);
for (auto const& dof : localInd)
{
if (dofMap().owner(dof)) {
// the index is a purely local entry
f(dof, ptr[dofMap().dofToLocal(dof)]);
}
else {
// ghost entries are stored at the end of the array
f(dof, ptr[dofMap().localSize() + dofMap().dofToGhost(dof)]);
}
}
VecRestoreArray(localGhostedVec, &ptr);
VecGhostRestoreLocalForm(vector_, &localGhostedVec);
}
/// Set all entries to \p value, including the ghost entries
// [[not collective]]
void set(PetscScalar value)
{
Vec localGhostedVec = nullptr;
VecGhostGetLocalForm(vector_, &localGhostedVec);
assert(localGhostedVec != nullptr);
VecSet(localGhostedVec, value);
VecGhostRestoreLocalForm(vector_, &localGhostedVec);
}
/// Zero all entries, including the ghost entries
// [[not collective]]
void setZero()
{
Vec localGhostedVec = nullptr;
VecGhostGetLocalForm(vector_, &localGhostedVec);
assert(localGhostedVec != nullptr);
VecZeroEntries(localGhostedVec);
VecGhostRestoreLocalForm(vector_, &localGhostedVec);
}
friend void swap(VectorBackend& lhs, VectorBackend& rhs)
{
std::swap(lhs.comm_, rhs.comm_);
std::swap(lhs.vector_, rhs.vector_);
std::swap(lhs.initialized_, rhs.initialized_);
}
private:
// The local-to-global map
auto const& dofMap() const
{
return comm_->dofMap();
}
// An MPI Communicator or PETSC_COMM_SELF
MPI_Comm comm() const
{
#if HAVE_MPI
return Environment::comm();
#else
return PETSC_COMM_SELF;
#endif
}
// Create a new Vector
void create()
{
VecCreateGhost(comm(),
dofMap().localSize(), dofMap().globalSize(), dofMap().ghostSize(), dofMap().ghostIndices().data(),
&vector_);
}
// Destroy an old vector if created before
void destroy()
{
if (initialized_)
VecDestroy(&vector_);
initialized_ = false;
}
private:
std::shared_ptr<Communication> comm_;
/// The data-vector
mutable Vec vector_ = nullptr;
bool initialized_ = false;
};
} // end namespace AMDiS
......@@ -100,7 +100,7 @@ dune_add_test(SOURCES OperationsTest.cpp
dune_add_test(SOURCES OperatorsTest.cpp
LINK_LIBRARIES amdis)
if(BACKEND STREQUAL "ISTL")
if(BACKEND STREQUAL "PETSC" OR BACKEND STREQUAL "ISTL")
foreach(_GRID RANGE 6)
dune_add_test(NAME "ParallelIndexSetTest_${_GRID}"
SOURCES ParallelIndexSetTest.cpp
......@@ -114,6 +114,20 @@ endforeach()
unset(_GRID)
endif()
if(BACKEND STREQUAL "PETSC")
foreach(_GRID RANGE 6)
dune_add_test(NAME "PETScCommTest_${_GRID}"
SOURCES PETScCommTest.cpp
COMPILE_DEFINITIONS "GRID_ID=${_GRID}"
LABELS "PETScCommTest"
LINK_LIBRARIES amdis
MPI_RANKS 2 3 4
TIMEOUT 300
CMAKE_GUARD MPI_FOUND)
endforeach()
unset(_GRID)
endif()
dune_add_test(SOURCES ProblemStatTest.cpp
LINK_LIBRARIES amdis)
......
#include <amdis/AMDiS.hpp>
#include <amdis/Integrate.hpp>
#include <amdis/LocalOperators.hpp>
#include <amdis/ProblemStat.hpp>
#include <amdis/common/parallel/Communicator.hpp>
#include <amdis/common/parallel/RequestOperations.hpp>
#include <dune/grid/onedgrid.hh>
#include <dune/grid/yaspgrid.hh>
#if HAVE_DUNE_SPGRID
#include <dune/grid/spgrid.hh>
#endif
#if HAVE_DUNE_ALUGRID
#include <dune/alugrid/grid.hh>
#endif
#if HAVE_DUNE_UGGRID
#include <dune/grid/uggrid.hh>
#endif
#include <dune/functions/functionspacebases/lagrangedgbasis.hh>
#include <dune/functions/functionspacebases/powerbasis.hh>
#include <dune/functions/functionspacebases/compositebasis.hh>
#include "Tests.hpp"
using namespace AMDiS;
template <class Basis>
void test_basis(Basis const& basis, std::string gridName, std::string basisName)
{
msg("test grid={}, basis={}...", gridName, basisName);
Mpi::Communicator world(Environment::comm());
using Traits = BackendTraits<Basis>;
using Comm = typename Traits::Comm;
auto comm = CommunicationCreator<Comm>::create(basis, "communication");
auto const& dofMap = comm->dofMap();
using GlobalIndex = typename Comm::DofMap::GlobalIndex;
std::vector<std::vector<GlobalIndex>> data(world.size());
data[world.rank()].reserve(dofMap.localSize());
auto lv = basis.localView();
for (auto const& e : elements(basis.gridView())) {
lv.bind(e);
for (std::size_t i = 0; i < lv.size(); ++i)
data[world.rank()].push_back(dofMap.global(lv.index(i)));
}
Mpi::Tag tag{3912};
if (world.rank() != 0)
world.send(data[world.rank()], 0, tag);
else {
std::vector<Mpi::Request> recvRequests;
for (int p = 1; p < world.size(); ++p)
recvRequests.emplace_back( world.irecv(data[p], p, tag) );
Mpi::wait_all(recvRequests.begin(), recvRequests.end());
std::set<GlobalIndex> globalDofs;
for (auto const& d : data)
for (auto const& g : d)
globalDofs.insert(g);
// all global DOF indices are collected
AMDIS_TEST( globalDofs.size() == dofMap.globalSize() );
// global DOFs form a consecutive range
AMDIS_TEST( *std::min_element(globalDofs.begin(), globalDofs.end()) == GlobalIndex(0u) );
AMDIS_TEST( *std::max_element(globalDofs.begin(), globalDofs.end()) == GlobalIndex(dofMap.globalSize()-1) );
}
}
std::string numCells(int dim)
{
std::string result = "";
for (int i = 0; i < dim; ++i)
result += "8 ";
return result;
}
template <class Grid>
void test_grid(std::string gridName)
{
Parameters::set("mesh->num cells", numCells(Grid::dimension)); // 8 8 8
Parameters::set("mesh->overlap", "0");
auto grid = MeshCreator<Grid>("mesh").create();
grid->loadBalance();
auto gv = grid->leafGridView();
auto const& indexSet = gv.indexSet();
bool allSimplex = true;
for (auto const& type : indexSet.types(0))
allSimplex = allSimplex && type.isSimplex();
// continuouse basis
Tools::for_range<1,4>([&](auto const k) {
if (allSimplex || k < 3) {
using namespace Dune::Functions::BasisFactory;
auto basis = makeBasis(gv, lagrange<k>());
test_basis(basis, gridName, "lagrange<" + std::to_string(k.value) + ">");
}
});
// discontinuous basis
Tools::for_range<1,5>([&](auto const k) {
using namespace Dune::Functions::BasisFactory;
auto basis = makeBasis(gv, lagrangeDG<k>());
test_basis(basis, gridName, "lagrangeDG<" + std::to_string(k.value) + ">");
});
// Taylor-Hood basis
{
using namespace Dune::Functions::BasisFactory;
auto basis = makeBasis(gv, composite(power<Grid::dimensionworld>(lagrange<2>(), flatInterleaved()), lagrange<1>(), flatLexicographic()));
test_basis(basis, gridName, "TaylorHood");
}
}
int main(int argc, char** argv)
{
Environment env(argc, argv);
#if GRID_ID == 0
test_grid<Dune::YaspGrid<2>>("YaspGrid<2>");
test_grid<Dune::YaspGrid<3>>("YaspGrid<3>");
#elif GRID_ID == 1 && HAVE_DUNE_UGGRID
Parameters::set("mesh->structured", "cube");
test_grid<Dune::UGGrid<2>>("UGGrid<2,cube>");
test_grid<Dune::UGGrid<3>>("UGGrid<3,cube>");
#elif GRID_ID == 2 && HAVE_DUNE_ALUGRID
Parameters::set("mesh->structured", "cube");
// test_grid<Dune::ALUGrid<2,2,Dune::cube,Dune::nonconforming>>("ALUGrid<2,cube,nonconforming>");
// test_grid<Dune::ALUGrid<3,3,Dune::cube,Dune::nonconforming>>("ALUGrid<3,cube,nonconforming>");
//test_grid<Dune::ALUGrid<2,3,Dune::cube,Dune::nonconforming>>();
#elif GRID_ID == 3 && HAVE_DUNE_SPGRID
test_grid<Dune::SPGrid<double,2>>("SPGrid<2>");
test_grid<Dune::SPGrid<double,3>>("SPGrid<3>");
#elif GRID_ID == 4 && HAVE_DUNE_UGGRID
Parameters::set("mesh->structured", "simplex");
test_grid<Dune::UGGrid<2>>("UGGrid<2,simplex>");
test_grid<Dune::UGGrid<3>>("UGGrid<3,simplex>");
#elif GRID_ID == 5 && HAVE_DUNE_ALUGRID
Parameters::set("mesh->structured", "simplex");
// test_grid<Dune::ALUGrid<2,2,Dune::simplex,Dune::nonconforming>>("ALUGrid<2,simplex,nonconforming>");
// test_grid<Dune::ALUGrid<3,3,Dune::simplex,Dune::nonconforming>>("ALUGrid<3,simplex,nonconforming>");
//test_grid<Dune::ALUGrid<2,3,Dune::simplex,Dune::nonconforming>>();
#elif GRID_ID == 6 && HAVE_DUNE_ALUGRID
Parameters::set("mesh->structured", "simplex");
test_grid<Dune::ALUGrid<2,2,Dune::simplex,Dune::conforming>>("ALUGrid<2,simplex,conforming>");
test_grid<Dune::ALUGrid<3,3,Dune::simplex,Dune::conforming>>("ALUGrid<2,simplex,conforming>");
//test_grid<Dune::ALUGrid<2,3,Dune::simplex,Dune::conforming>>();
#endif
return env.mpiRank() == 0 ? report_errors() : 0;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment