From da4acc4274b38d1df927eb6b2b4a0a22478ca05c Mon Sep 17 00:00:00 2001 From: Thomas Witkowski <thomas.witkowski@gmx.de> Date: Tue, 16 Oct 2012 14:07:20 +0000 Subject: [PATCH] Add support for BDDCML 2.0 and work on scalable FETI-DP in 3d using arithmetic constraints. --- AMDiS/src/DOFMatrix.cc | 33 +----- AMDiS/src/DOFMatrix.h | 11 -- AMDiS/src/DOFVector.cc | 4 +- AMDiS/src/DOFVector.h | 29 +---- AMDiS/src/DOFVector.hh | 4 - AMDiS/src/DirichletBC.cc | 30 +++-- AMDiS/src/parallel/BddcMlSolver.cc | 7 +- AMDiS/src/parallel/MeshDistributor.cc | 52 +-------- AMDiS/src/parallel/MeshDistributor.h | 8 -- AMDiS/src/parallel/PetscProblemStat.cc | 18 --- AMDiS/src/parallel/PetscProblemStat.h | 4 - AMDiS/src/parallel/PetscSolver.cc | 5 +- AMDiS/src/parallel/PetscSolver.h | 20 +++- AMDiS/src/parallel/PetscSolverFeti.cc | 16 +-- AMDiS/src/parallel/PetscSolverFeti.h | 5 - AMDiS/src/parallel/PetscSolverGlobalMatrix.cc | 106 ++++++++++++++++-- AMDiS/src/parallel/PetscSolverGlobalMatrix.h | 12 +- 17 files changed, 154 insertions(+), 210 deletions(-) diff --git a/AMDiS/src/DOFMatrix.cc b/AMDiS/src/DOFMatrix.cc index a0061a3d..af9d35ae 100644 --- a/AMDiS/src/DOFMatrix.cc +++ b/AMDiS/src/DOFMatrix.cc @@ -26,9 +26,6 @@ #include "BoundaryManager.h" #include "Assembler.h" #include "Serializer.h" -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS -#include "parallel/ParallelDofMapping.h" -#endif namespace AMDiS { @@ -209,21 +206,6 @@ namespace AMDiS { using namespace mtl; -#if 0 - if (MPI::COMM_WORLD.Get_rank() == 0) { - std::cout << "----- PRINT MAT " << rowElInfo->getElement()->getIndex() << "--------" << std::endl; - std::cout << elMat << std::endl; - std::cout << "rows: "; - for (int i = 0; i < rowIndices.size(); i++) - std::cout << rowIndices[i] << " "; - std::cout << std::endl; - std::cout << "cols: "; - for (int i = 0; i < colIndices.size(); i++) - std::cout << colIndices[i] << " "; - std::cout << std::endl; - } -#endif - for (int i = 0; i < nRow; i++) { DegreeOfFreedom row = rowIndices[i]; @@ -552,15 +534,12 @@ namespace AMDiS { // Do the following only in sequential code. In parallel mode, the specific // solver method must care about dirichlet boundary conditions. - +#ifndef HAVE_PARALLEL_DOMAIN_AMDIS inserter_type &ins = *inserter; for (std::set<int>::iterator it = dirichletDofs.begin(); it != dirichletDofs.end(); ++it) -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - // if (dofMap->isRankDof(*it)) - if (dofMap->isSet(*it)) + ins[*it][*it] = 1.0; #endif - ins[*it][*it] = 1.0; } @@ -583,12 +562,4 @@ namespace AMDiS { dirichletDofs.clear(); } - -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - void DOFMatrix::setDofMap(FeSpaceDofMap& m) - { - dofMap = &m; - } -#endif - } diff --git a/AMDiS/src/DOFMatrix.h b/AMDiS/src/DOFMatrix.h index e9d0a3cd..ec6b1914 100644 --- a/AMDiS/src/DOFMatrix.h +++ b/AMDiS/src/DOFMatrix.h @@ -415,10 +415,6 @@ namespace AMDiS { /// int memsize(); -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - void setDofMap(FeSpaceDofMap& m); -#endif - protected: /// Pointer to a FiniteElemSpace with information about corresponding row DOFs /// and basis functions @@ -480,13 +476,6 @@ namespace AMDiS { /// elemnts during assembling. int nnzPerRow; -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - /// Is used in parallel computations to check whether specific DOFs in the - /// row FE spaces are owned by the rank or not. This is used to ensure that - /// Dirichlet BC is handled correctly in parallel computations. - FeSpaceDofMap *dofMap; -#endif - /// Inserter object: implemented as pointer, allocated and deallocated as needed inserter_type *inserter; diff --git a/AMDiS/src/DOFVector.cc b/AMDiS/src/DOFVector.cc index 9023f6ec..358cc1e4 100644 --- a/AMDiS/src/DOFVector.cc +++ b/AMDiS/src/DOFVector.cc @@ -285,10 +285,10 @@ namespace AMDiS { elInfo = stack.traverseNext(elInfo); } - #ifdef HAVE_PARALLEL_DOMAIN_AMDIS +#ifdef HAVE_PARALLEL_DOMAIN_AMDIS double localResult = result; MPI::COMM_WORLD.Allreduce(&localResult, &result, 1, MPI_DOUBLE, MPI_SUM); - #endif +#endif return result; } diff --git a/AMDiS/src/DOFVector.h b/AMDiS/src/DOFVector.h index e7fc40b3..d18da641 100644 --- a/AMDiS/src/DOFVector.h +++ b/AMDiS/src/DOFVector.h @@ -42,10 +42,6 @@ #include "BasisFunction.h" #include "FiniteElemSpace.h" #include "SurfaceQuadrature.h" -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS -#include <mpi.h> -#include "parallel/ParallelDofMapping.h" -#endif namespace AMDiS { @@ -61,11 +57,7 @@ namespace AMDiS { elementVector(3), boundaryManager(NULL), nBasFcts(0) - { -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - dofMap = NULL; -#endif - } + {} DOFVectorBase(const FiniteElemSpace *f, string n); @@ -248,21 +240,6 @@ namespace AMDiS { return dirichletDofValues; } - -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - void setDofMap(FeSpaceDofMap& m) - { - dofMap = &m; - } - - inline bool isRankDof(DegreeOfFreedom dof) - { - TEST_EXIT_DBG(dofMap)("No rank dofs set!\n"); - - return dofMap->isRankDof(dof); - } -#endif - protected: /// const FiniteElemSpace *feSpace; @@ -292,10 +269,6 @@ namespace AMDiS { int dim; map<DegreeOfFreedom, T> dirichletDofValues; - -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - FeSpaceDofMap *dofMap; -#endif }; diff --git a/AMDiS/src/DOFVector.hh b/AMDiS/src/DOFVector.hh index 91df10cc..f70b4ecc 100644 --- a/AMDiS/src/DOFVector.hh +++ b/AMDiS/src/DOFVector.hh @@ -1060,10 +1060,6 @@ namespace AMDiS { this->boundaryManager = NULL; } -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - this->dofMap = rhs.dofMap; -#endif - return *this; } diff --git a/AMDiS/src/DirichletBC.cc b/AMDiS/src/DirichletBC.cc index 38952e59..bb9b779d 100644 --- a/AMDiS/src/DirichletBC.cc +++ b/AMDiS/src/DirichletBC.cc @@ -64,24 +64,20 @@ namespace AMDiS { const BasisFunction *basFcts = rowFeSpace->getBasisFcts(); for (int i = 0; i < nBasFcts; i++) { - -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - if (vector->isRankDof(dofIndices[i])) -#endif - if (localBound[i] == boundaryType) { - double value = 0.0; - if (f) { - elInfo->coordToWorld(*(basFcts->getCoords(i)), worldCoords); - value = (*f)(worldCoords); - } else { - if (dofVec) - value = (*dofVec)[dofIndices[i]]; - else - ERROR_EXIT("There is something wrong!\n"); - } - vector->setDirichletDofValue(dofIndices[i], value); - (*vector)[dofIndices[i]] = value; + if (localBound[i] == boundaryType) { + double value = 0.0; + if (f) { + elInfo->coordToWorld(*(basFcts->getCoords(i)), worldCoords); + value = (*f)(worldCoords); + } else { + if (dofVec) + value = (*dofVec)[dofIndices[i]]; + else + ERROR_EXIT("There is something wrong!\n"); } + vector->setDirichletDofValue(dofIndices[i], value); + (*vector)[dofIndices[i]] = value; + } } } diff --git a/AMDiS/src/parallel/BddcMlSolver.cc b/AMDiS/src/parallel/BddcMlSolver.cc index 5629c595..f7d705fc 100644 --- a/AMDiS/src/parallel/BddcMlSolver.cc +++ b/AMDiS/src/parallel/BddcMlSolver.cc @@ -232,6 +232,7 @@ namespace AMDiS { // matrix type (set here to unsymmetric) int matrixtype = 0; + Parameters::get("parallel->bddcml->matrix type", matrixtype); // Non zero structure of matrix vector<int> i_sparse; @@ -352,12 +353,14 @@ namespace AMDiS { &luser_constraints2); - int use_defaults_int = 1; + int use_defaults_int = 0; int parallel_division_int = 1; - int use_arithmetic_int = 0; + int use_arithmetic_int = 1; int use_adaptive_int = 0; int use_user_constraint_int = 0; + Parameters::get("parallel->bddcml->arithmetic constraints", use_arithmetic_int); + // MSG("call to \"bddcml_setup_preconditioner\" with the following arguments (each in one line):\n"); // MSG(" %d\n", matrixtype); // MSG(" %d\n", use_defaults_int); diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc index 8e2bade9..90569e02 100644 --- a/AMDiS/src/parallel/MeshDistributor.cc +++ b/AMDiS/src/parallel/MeshDistributor.cc @@ -189,8 +189,6 @@ namespace AMDiS { updateLocalGlobalNumbering(); - setRankDofs(); - elObjDb.setData(partitionMap, levelData); #if (DEBUG != 0) @@ -351,9 +349,6 @@ namespace AMDiS { #endif } - // Set DOF rank information to all matrices and vectors. - setRankDofs(); - // And delete some data, we there is no mesh adaptivty. if (!meshAdaptivity) elObjDb.clear(); @@ -475,10 +470,8 @@ namespace AMDiS { // DOFs object to the matrices and vectors of the added stationary problem, // and to remove the periodic boundary conditions on these objects. - if (initialized) { - setRankDofs(probStat, dofMap); + if (initialized) removePeriodicBoundaryConditions(probStat); - } } @@ -731,49 +724,6 @@ namespace AMDiS { } - void MeshDistributor::setRankDofs(ProblemStatSeq *probStat, - ParallelDofMapping &rankDofMap) - { - FUNCNAME("MeshDistributor::setRankDofs()"); - - int nComponents = probStat->getNumComponents(); - for (int i = 0; i < nComponents; i++) { - for (int j = 0; j < nComponents; j++) - if (probStat->getSystemMatrix(i, j)) { - const FiniteElemSpace *rowFeSpace = - probStat->getSystemMatrix(i, j)->getRowFeSpace(); - - TEST_EXIT(find(feSpaces.begin(), feSpaces.end(), rowFeSpace) != feSpaces.end()) - ("Should not happen!\n"); - - probStat->getSystemMatrix(i, j)->setDofMap(rankDofMap[rowFeSpace]); - } - - - TEST_EXIT_DBG(probStat->getRhs()->getDOFVector(i))("No RHS vector!\n"); - TEST_EXIT_DBG(probStat->getSolution()->getDOFVector(i)) - ("No solution vector!\n"); - TEST_EXIT_DBG(probStat->getRhsVector(i)->getFeSpace() == - probStat->getSolution(i)->getFeSpace()) - ("Should not happen!\n"); - - const FiniteElemSpace *feSpace = probStat->getRhsVector(i)->getFeSpace(); - TEST_EXIT(find(feSpaces.begin(), feSpaces.end(), feSpace) != feSpaces.end()) - ("Should not happen!\n"); - - probStat->getRhsVector(i)->setDofMap(rankDofMap[feSpace]); - probStat->getSolution(i)->setDofMap(rankDofMap[feSpace]); - } - } - - - void MeshDistributor::setRankDofs() - { - for (unsigned int i = 0; i < problemStat.size(); i++) - setRankDofs(problemStat[i], dofMap); - } - - void MeshDistributor::removePeriodicBoundaryConditions() { FUNCNAME("MeshDistributor::removePeriodicBoundaryConditions()"); diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h index 139d1935..df2297cf 100644 --- a/AMDiS/src/parallel/MeshDistributor.h +++ b/AMDiS/src/parallel/MeshDistributor.h @@ -347,14 +347,6 @@ namespace AMDiS { DofComm &dcom, const FiniteElemSpace *feSpace); - /// Sets \ref isRankDof to all matrices and rhs vectors in a given - /// stationary problem. - void setRankDofs(ProblemStatSeq *probStat, ParallelDofMapping &rankDofMap); - - /// Sets \ref isRankDof to all matrices and rhs vectors in all - /// stationary problems. - void setRankDofs(); - protected: void addProblemStat(ProblemStatSeq *probStat); diff --git a/AMDiS/src/parallel/PetscProblemStat.cc b/AMDiS/src/parallel/PetscProblemStat.cc index 44d1a01b..73dd638f 100644 --- a/AMDiS/src/parallel/PetscProblemStat.cc +++ b/AMDiS/src/parallel/PetscProblemStat.cc @@ -76,24 +76,6 @@ namespace AMDiS { } - void PetscProblemStat::buildAfterCoarsen(AdaptInfo *adaptInfo, Flag flag, - bool assembleMatrix, - bool assembleVector) - { - FUNCNAME("ParallelProblemStatBase::buildAfterCoarsen()"); - - TEST_EXIT_DBG(MeshDistributor::globalMeshDistributor != NULL) - ("Should not happen!\n"); - - MeshDistributor::globalMeshDistributor->checkMeshChange(); - MeshDistributor::globalMeshDistributor->setRankDofs(this, - petscSolver->getInteriorMap()); - ProblemStatSeq::buildAfterCoarsen(adaptInfo, flag, - assembleMatrix, - assembleVector); - } - - void PetscProblemStat::solve(AdaptInfo *adaptInfo, bool createMatrixData, bool storeMatrixData) diff --git a/AMDiS/src/parallel/PetscProblemStat.h b/AMDiS/src/parallel/PetscProblemStat.h index 53658648..a844b5c9 100644 --- a/AMDiS/src/parallel/PetscProblemStat.h +++ b/AMDiS/src/parallel/PetscProblemStat.h @@ -49,10 +49,6 @@ namespace AMDiS { delete petscSolver; } - void buildAfterCoarsen(AdaptInfo *adaptInfo, Flag flag, - bool assembleMatrix, - bool assembleVector); - void initialize(Flag initFlag, ProblemStatSeq *adoptProblem = NULL, Flag adoptFlag = INIT_NOTHING); diff --git a/AMDiS/src/parallel/PetscSolver.cc b/AMDiS/src/parallel/PetscSolver.cc index 918685ed..510d8c8f 100644 --- a/AMDiS/src/parallel/PetscSolver.cc +++ b/AMDiS/src/parallel/PetscSolver.cc @@ -23,9 +23,10 @@ namespace AMDiS { PetscSolver::PetscSolver() : ParallelCoarseSpaceMatVec(), kspPrefix(""), - removeRhsNullspace(false), + removeRhsNullspace(false), + hasConstantNullspace(false), isSymmetric(false), - hasConstantNullspace(false) + handleDirichletRows(true) { string kspStr = ""; Parameters::get("parallel->solver->petsc->ksp", kspStr); diff --git a/AMDiS/src/parallel/PetscSolver.h b/AMDiS/src/parallel/PetscSolver.h index 064fa73e..0eb404ec 100644 --- a/AMDiS/src/parallel/PetscSolver.h +++ b/AMDiS/src/parallel/PetscSolver.h @@ -62,7 +62,8 @@ namespace AMDiS { */ virtual void fillPetscMatrix(Matrix<DOFMatrix*> *mat) = 0; - /// + /// This function is just a small wrapper that creates a 1x1 matrix that + /// contains exactly one DOFMatrix and than calls \ref fillPetscMatrix void fillPetscMatrix(DOFMatrix* mat); /** \brief @@ -85,11 +86,6 @@ namespace AMDiS { /// Detroys all vector data structures. virtual void destroyVectorData() = 0; - virtual ParallelDofMapping &getInteriorMap() - { - return meshDistributor->getDofMap(); - } - virtual Flag getBoundaryDofRequirement() { return 0; @@ -139,6 +135,12 @@ namespace AMDiS { constNullspaceComponent.push_back(component); } + /// Informs the solver whether is has to handle dirichlet rows or not. + void setHandleDirichletRows(bool b) + { + handleDirichletRows = b; + } + protected: /** \brief * Copies between to PETSc vectors by using different index sets for the @@ -177,6 +179,12 @@ namespace AMDiS { bool isSymmetric; + /// If true, dirichlet rows are handled by the solver correspondently. To + /// set this value to false makes only sense, of this solver is just used + /// as a subsolver and the main solver above alread handles dirichlet rows + /// in some way. + bool handleDirichletRows; + vector<int> constNullspaceComponent; }; diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc index 22906e3f..583a1f63 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.cc +++ b/AMDiS/src/parallel/PetscSolverFeti.cc @@ -101,6 +101,7 @@ namespace AMDiS { if (subdomain == NULL) { subdomain = new PetscSolverGlobalMatrix(); subdomain->setSymmetric(isSymmetric); + subdomain->setHandleDirichletRows(false); if (meshLevel == 0) { subdomain->setMeshDistributor(meshDistributor, @@ -134,11 +135,6 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::createDirichletData()"); - bool removeDirichletRows = false; - Parameters::get("parallel->feti->remove dirichlet", removeDirichletRows); - if (!removeDirichletRows) - return; - int nComponents = mat.getSize(); for (int i = 0; i < nComponents; i++) { DOFMatrix* dofMat = mat[i][i]; @@ -735,13 +731,15 @@ namespace AMDiS { InteriorBoundary &intBound = meshDistributor->getIntBoundary(); std::set<BoundaryObject> allEdges; for (InteriorBoundary::iterator it(intBound.getOwn()); !it.end(); ++it) { - if (it->rankObj.subObj == EDGE && - testWirebasketEdge(it->rankObj, feSpaces[0]) && + if ((it->rankObj.subObj == FACE || + (it->rankObj.subObj == EDGE && + testWirebasketEdge(it->rankObj, feSpaces[0]))) && allEdges.count(it->rankObj) == 0) { bool dirichletOnlyEdge = true; DofContainer edgeDofs; it->rankObj.el->getAllDofs(feSpaces[0], it->rankObj, edgeDofs); + for (DofContainer::iterator dit = edgeDofs.begin(); dit != edgeDofs.end(); ++dit) { if (dirichletRows[feSpaces[0]].count(**dit) == 0) { @@ -919,12 +917,10 @@ namespace AMDiS { if (creationMode == 0) { // matK = inv(A_BB) A_BPi Mat matK; - MSG("START SOLVE!\n"); petsc_helper::blockMatMatSolve(subdomain->getSolver(), subdomain->getMatInteriorCoarse(), matK); - MSG("END SOLVE!\n"); - + // mat_schur_primal = A_PiPi - A_PiB inv(A_BB) A_BPi // = A_PiPi - A_PiB matK diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h index 04044235..c7035a86 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.h +++ b/AMDiS/src/parallel/PetscSolverFeti.h @@ -236,11 +236,6 @@ namespace AMDiS { /// order of: 1/h^2 void createH2vec(DOFVector<double> &vec); - virtual ParallelDofMapping &getInteriorMap() - { - return localDofMap; - } - protected: /// Mapping from primal DOF indices to a global index of primals. ParallelDofMapping primalDofMap; diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc index 3fc15695..5df5ff97 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc +++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc @@ -82,17 +82,24 @@ namespace AMDiS { if (!zeroStartVector) KSPSetInitialGuessNonzero(kspInterior, PETSC_TRUE); + removeDirichletRows(seqMat, meshDistributor->getDofMap(), getMatInterior()); + #if (DEBUG != 0) MSG("Fill petsc matrix 3 needed %.5f seconds\n", MPI::Wtime() - wtime); #endif - } - void PetscSolverGlobalMatrix::fillPetscMatrix(DOFMatrix *mat) - { - Matrix<DOFMatrix*> m(1, 1); - m[0][0] = mat; - fillPetscMatrix(&m); + // === For debugging allow to write the matrix to a file. === + + bool dbgWriteMatrix = false; + Parameters::get("parallel->debug->write matrix", dbgWriteMatrix); + if (dbgWriteMatrix) { + PetscViewer matView; + PetscViewerBinaryOpen(PETSC_COMM_WORLD, "mpi.mat", + FILE_MODE_WRITE, &matView); + MatView(getMatInterior(), matView); + PetscViewerDestroy(&matView); + } } @@ -249,7 +256,7 @@ namespace AMDiS { else PCFactorSetMatSolverPackage(pcInterior, MATSOLVERMUMPS); } - KSPSetFromOptions(kspInterior); + KSPSetFromOptions(kspInterior); } @@ -271,6 +278,20 @@ namespace AMDiS { } vecRhsAssembly(); + + removeDirichletRows(vec, meshDistributor->getDofMap(), getVecRhsInterior()); + + // === For debugging allow to write the rhs vector to a file. === + + bool dbgWriteRhs = false; + Parameters::get("parallel->debug->write rhs", dbgWriteRhs); + if (dbgWriteRhs) { + PetscViewer vecView; + PetscViewerBinaryOpen(PETSC_COMM_WORLD, "mpi.vec", + FILE_MODE_WRITE, &vecView); + VecView(getVecRhsInterior(), vecView); + PetscViewerDestroy(&vecView); + } } @@ -449,6 +470,77 @@ namespace AMDiS { } + void PetscSolverGlobalMatrix::removeDirichletRows(Matrix<DOFMatrix*> *seqMat, + ParallelDofMapping &dofMap, + Mat mpiMat) + { + FUNCNAME("PetscSolverGlobalMatrix::removeDirichletRows()"); + + if (!handleDirichletRows) + return; + + int nComponents = seqMat->getSize(); + vector<PetscInt> zeroRows; + + for (int i = 0; i < nComponents; i++) { + DOFMatrix *dofMat = (*seqMat)[i][i]; + if (!dofMat) + continue; + + const FiniteElemSpace *feSpace = dofMat->getRowFeSpace(); + + std::set<DegreeOfFreedom> &dRows = dofMat->getDirichletRows(); + for (std::set<DegreeOfFreedom>::iterator dofIt = dRows.begin(); + dofIt != dRows.end(); ++dofIt) { + MultiIndex multiIndex; + if (dofMap[feSpace].find(*dofIt, multiIndex) && + dofMap[feSpace].isRankDof(*dofIt)) + zeroRows.push_back(dofMap.getMatIndex(i, multiIndex.global)); + } + } + + MatZeroRows(mpiMat, zeroRows.size(), &(zeroRows[0]), 1.0, + PETSC_NULL, PETSC_NULL); + + PetscViewer view; + PetscViewerBinaryOpen(PETSC_COMM_WORLD, "interior.mat", + FILE_MODE_WRITE, &view); + MatView(mpiMat, view); + PetscViewerDestroy(&view); + } + + + void PetscSolverGlobalMatrix::removeDirichletRows(SystemVector *seqVec, + ParallelDofMapping &dofMap, + Vec mpiVec) + { + FUNCNAME("PetscSolverGlobalMatrix::removeDirichletRows()"); + + if (!handleDirichletRows) + return; + + int nComponents = seqVec->getSize(); + + for (int i = 0; i < nComponents; i++) { + DOFVector<double> *dofVec = seqVec->getDOFVector(i); + const FiniteElemSpace *feSpace = dofVec->getFeSpace(); + + map<DegreeOfFreedom, double>& dValues = dofVec->getDirichletValues(); + for (map<DegreeOfFreedom, double>::iterator dofIt = dValues.begin(); + dofIt != dValues.end(); ++dofIt) { + MultiIndex multiIndex; + if (dofMap[feSpace].find(dofIt->first, multiIndex) && + dofMap[feSpace].isRankDof(dofIt->first)) + VecSetValue(mpiVec, dofMap.getMatIndex(i, multiIndex.global), + dofIt->second, INSERT_VALUES); + } + } + + VecAssemblyBegin(mpiVec); + VecAssemblyEnd(mpiVec); + } + + void PetscSolverGlobalMatrix::createFieldSplit(PC pc) { FUNCNAME("PetscSolverGlobalMatrix::createFieldSplit()"); diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h index ccdbf99c..da09a39d 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h +++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h @@ -50,10 +50,6 @@ namespace AMDiS { void fillPetscMatrix(Matrix<DOFMatrix*> *mat); - /// This function is just a small wrapper that creates a 1x1 matrix that - /// contains exactly one DOFMatrix and than calls \ref fillPetscMatrix - void fillPetscMatrix(DOFMatrix *mat); - void fillPetscMatrixWithCoarseSpace(Matrix<DOFMatrix*> *mat); void fillPetscRhs(SystemVector *vec); @@ -67,6 +63,14 @@ namespace AMDiS { void destroyVectorData(); protected: + void removeDirichletRows(Matrix<DOFMatrix*> *seqMat, + ParallelDofMapping &dofMap, + Mat mpiMat); + + void removeDirichletRows(SystemVector *seqVec, + ParallelDofMapping &dofMap, + Vec mpiVec); + void createFieldSplit(PC pc); virtual void initPreconditioner(PC pc); -- GitLab