diff --git a/AMDiS/src/DOFMatrix.cc b/AMDiS/src/DOFMatrix.cc index a0061a3d79dfc339ad00f2a8ce0fab334531d6e8..af9d35ae1e0a3719d722efc4f359b525e399cb1c 100644 --- a/AMDiS/src/DOFMatrix.cc +++ b/AMDiS/src/DOFMatrix.cc @@ -26,9 +26,6 @@ #include "BoundaryManager.h" #include "Assembler.h" #include "Serializer.h" -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS -#include "parallel/ParallelDofMapping.h" -#endif namespace AMDiS { @@ -209,21 +206,6 @@ namespace AMDiS { using namespace mtl; -#if 0 - if (MPI::COMM_WORLD.Get_rank() == 0) { - std::cout << "----- PRINT MAT " << rowElInfo->getElement()->getIndex() << "--------" << std::endl; - std::cout << elMat << std::endl; - std::cout << "rows: "; - for (int i = 0; i < rowIndices.size(); i++) - std::cout << rowIndices[i] << " "; - std::cout << std::endl; - std::cout << "cols: "; - for (int i = 0; i < colIndices.size(); i++) - std::cout << colIndices[i] << " "; - std::cout << std::endl; - } -#endif - for (int i = 0; i < nRow; i++) { DegreeOfFreedom row = rowIndices[i]; @@ -552,15 +534,12 @@ namespace AMDiS { // Do the following only in sequential code. In parallel mode, the specific // solver method must care about dirichlet boundary conditions. - +#ifndef HAVE_PARALLEL_DOMAIN_AMDIS inserter_type &ins = *inserter; for (std::set<int>::iterator it = dirichletDofs.begin(); it != dirichletDofs.end(); ++it) -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - // if (dofMap->isRankDof(*it)) - if (dofMap->isSet(*it)) + ins[*it][*it] = 1.0; #endif - ins[*it][*it] = 1.0; } @@ -583,12 +562,4 @@ namespace AMDiS { dirichletDofs.clear(); } - -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - void DOFMatrix::setDofMap(FeSpaceDofMap& m) - { - dofMap = &m; - } -#endif - } diff --git a/AMDiS/src/DOFMatrix.h b/AMDiS/src/DOFMatrix.h index e9d0a3cd22973298767639f4eca9d8c43fd5cb43..ec6b1914663a0256ae8d67ab02991900507a09db 100644 --- a/AMDiS/src/DOFMatrix.h +++ b/AMDiS/src/DOFMatrix.h @@ -415,10 +415,6 @@ namespace AMDiS { /// int memsize(); -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - void setDofMap(FeSpaceDofMap& m); -#endif - protected: /// Pointer to a FiniteElemSpace with information about corresponding row DOFs /// and basis functions @@ -480,13 +476,6 @@ namespace AMDiS { /// elemnts during assembling. int nnzPerRow; -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - /// Is used in parallel computations to check whether specific DOFs in the - /// row FE spaces are owned by the rank or not. This is used to ensure that - /// Dirichlet BC is handled correctly in parallel computations. - FeSpaceDofMap *dofMap; -#endif - /// Inserter object: implemented as pointer, allocated and deallocated as needed inserter_type *inserter; diff --git a/AMDiS/src/DOFVector.cc b/AMDiS/src/DOFVector.cc index 9023f6ec429f94ce94028e482637f16804371128..358cc1e47be4e99d8b7ff6f8ad65ed6a6514946a 100644 --- a/AMDiS/src/DOFVector.cc +++ b/AMDiS/src/DOFVector.cc @@ -285,10 +285,10 @@ namespace AMDiS { elInfo = stack.traverseNext(elInfo); } - #ifdef HAVE_PARALLEL_DOMAIN_AMDIS +#ifdef HAVE_PARALLEL_DOMAIN_AMDIS double localResult = result; MPI::COMM_WORLD.Allreduce(&localResult, &result, 1, MPI_DOUBLE, MPI_SUM); - #endif +#endif return result; } diff --git a/AMDiS/src/DOFVector.h b/AMDiS/src/DOFVector.h index e7fc40b349cb5116bb33f31748aa4723b9fcdbe9..d18da64122b39c335211fd2ed99bb383a1b402d1 100644 --- a/AMDiS/src/DOFVector.h +++ b/AMDiS/src/DOFVector.h @@ -42,10 +42,6 @@ #include "BasisFunction.h" #include "FiniteElemSpace.h" #include "SurfaceQuadrature.h" -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS -#include <mpi.h> -#include "parallel/ParallelDofMapping.h" -#endif namespace AMDiS { @@ -61,11 +57,7 @@ namespace AMDiS { elementVector(3), boundaryManager(NULL), nBasFcts(0) - { -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - dofMap = NULL; -#endif - } + {} DOFVectorBase(const FiniteElemSpace *f, string n); @@ -248,21 +240,6 @@ namespace AMDiS { return dirichletDofValues; } - -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - void setDofMap(FeSpaceDofMap& m) - { - dofMap = &m; - } - - inline bool isRankDof(DegreeOfFreedom dof) - { - TEST_EXIT_DBG(dofMap)("No rank dofs set!\n"); - - return dofMap->isRankDof(dof); - } -#endif - protected: /// const FiniteElemSpace *feSpace; @@ -292,10 +269,6 @@ namespace AMDiS { int dim; map<DegreeOfFreedom, T> dirichletDofValues; - -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - FeSpaceDofMap *dofMap; -#endif }; diff --git a/AMDiS/src/DOFVector.hh b/AMDiS/src/DOFVector.hh index 91df10cc98dcfb4d42cdd91ca8211b000b1f69ec..f70b4ecc75189bbbcc21b2498159342086d1db85 100644 --- a/AMDiS/src/DOFVector.hh +++ b/AMDiS/src/DOFVector.hh @@ -1060,10 +1060,6 @@ namespace AMDiS { this->boundaryManager = NULL; } -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - this->dofMap = rhs.dofMap; -#endif - return *this; } diff --git a/AMDiS/src/DirichletBC.cc b/AMDiS/src/DirichletBC.cc index 38952e597980ab43e29916470a1b187f9eb2c5f8..bb9b779dc02489ca63ac06d9b0bd26b37a7504fc 100644 --- a/AMDiS/src/DirichletBC.cc +++ b/AMDiS/src/DirichletBC.cc @@ -64,24 +64,20 @@ namespace AMDiS { const BasisFunction *basFcts = rowFeSpace->getBasisFcts(); for (int i = 0; i < nBasFcts; i++) { - -#ifdef HAVE_PARALLEL_DOMAIN_AMDIS - if (vector->isRankDof(dofIndices[i])) -#endif - if (localBound[i] == boundaryType) { - double value = 0.0; - if (f) { - elInfo->coordToWorld(*(basFcts->getCoords(i)), worldCoords); - value = (*f)(worldCoords); - } else { - if (dofVec) - value = (*dofVec)[dofIndices[i]]; - else - ERROR_EXIT("There is something wrong!\n"); - } - vector->setDirichletDofValue(dofIndices[i], value); - (*vector)[dofIndices[i]] = value; + if (localBound[i] == boundaryType) { + double value = 0.0; + if (f) { + elInfo->coordToWorld(*(basFcts->getCoords(i)), worldCoords); + value = (*f)(worldCoords); + } else { + if (dofVec) + value = (*dofVec)[dofIndices[i]]; + else + ERROR_EXIT("There is something wrong!\n"); } + vector->setDirichletDofValue(dofIndices[i], value); + (*vector)[dofIndices[i]] = value; + } } } diff --git a/AMDiS/src/parallel/BddcMlSolver.cc b/AMDiS/src/parallel/BddcMlSolver.cc index 5629c595edfbbdaef5d8ef217440968e41ab475c..f7d705fccf5825f12c761d1284d41f0e6b09cee5 100644 --- a/AMDiS/src/parallel/BddcMlSolver.cc +++ b/AMDiS/src/parallel/BddcMlSolver.cc @@ -232,6 +232,7 @@ namespace AMDiS { // matrix type (set here to unsymmetric) int matrixtype = 0; + Parameters::get("parallel->bddcml->matrix type", matrixtype); // Non zero structure of matrix vector<int> i_sparse; @@ -352,12 +353,14 @@ namespace AMDiS { &luser_constraints2); - int use_defaults_int = 1; + int use_defaults_int = 0; int parallel_division_int = 1; - int use_arithmetic_int = 0; + int use_arithmetic_int = 1; int use_adaptive_int = 0; int use_user_constraint_int = 0; + Parameters::get("parallel->bddcml->arithmetic constraints", use_arithmetic_int); + // MSG("call to \"bddcml_setup_preconditioner\" with the following arguments (each in one line):\n"); // MSG(" %d\n", matrixtype); // MSG(" %d\n", use_defaults_int); diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc index 8e2bade902a032a424e9e81aed4e9a28c0b9dbf5..90569e02a62b0addce071bd9dadf5813dbc413e3 100644 --- a/AMDiS/src/parallel/MeshDistributor.cc +++ b/AMDiS/src/parallel/MeshDistributor.cc @@ -189,8 +189,6 @@ namespace AMDiS { updateLocalGlobalNumbering(); - setRankDofs(); - elObjDb.setData(partitionMap, levelData); #if (DEBUG != 0) @@ -351,9 +349,6 @@ namespace AMDiS { #endif } - // Set DOF rank information to all matrices and vectors. - setRankDofs(); - // And delete some data, we there is no mesh adaptivty. if (!meshAdaptivity) elObjDb.clear(); @@ -475,10 +470,8 @@ namespace AMDiS { // DOFs object to the matrices and vectors of the added stationary problem, // and to remove the periodic boundary conditions on these objects. - if (initialized) { - setRankDofs(probStat, dofMap); + if (initialized) removePeriodicBoundaryConditions(probStat); - } } @@ -731,49 +724,6 @@ namespace AMDiS { } - void MeshDistributor::setRankDofs(ProblemStatSeq *probStat, - ParallelDofMapping &rankDofMap) - { - FUNCNAME("MeshDistributor::setRankDofs()"); - - int nComponents = probStat->getNumComponents(); - for (int i = 0; i < nComponents; i++) { - for (int j = 0; j < nComponents; j++) - if (probStat->getSystemMatrix(i, j)) { - const FiniteElemSpace *rowFeSpace = - probStat->getSystemMatrix(i, j)->getRowFeSpace(); - - TEST_EXIT(find(feSpaces.begin(), feSpaces.end(), rowFeSpace) != feSpaces.end()) - ("Should not happen!\n"); - - probStat->getSystemMatrix(i, j)->setDofMap(rankDofMap[rowFeSpace]); - } - - - TEST_EXIT_DBG(probStat->getRhs()->getDOFVector(i))("No RHS vector!\n"); - TEST_EXIT_DBG(probStat->getSolution()->getDOFVector(i)) - ("No solution vector!\n"); - TEST_EXIT_DBG(probStat->getRhsVector(i)->getFeSpace() == - probStat->getSolution(i)->getFeSpace()) - ("Should not happen!\n"); - - const FiniteElemSpace *feSpace = probStat->getRhsVector(i)->getFeSpace(); - TEST_EXIT(find(feSpaces.begin(), feSpaces.end(), feSpace) != feSpaces.end()) - ("Should not happen!\n"); - - probStat->getRhsVector(i)->setDofMap(rankDofMap[feSpace]); - probStat->getSolution(i)->setDofMap(rankDofMap[feSpace]); - } - } - - - void MeshDistributor::setRankDofs() - { - for (unsigned int i = 0; i < problemStat.size(); i++) - setRankDofs(problemStat[i], dofMap); - } - - void MeshDistributor::removePeriodicBoundaryConditions() { FUNCNAME("MeshDistributor::removePeriodicBoundaryConditions()"); diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h index 139d19354b2db0cc90819fdc8c4abd750f7ed899..df2297cfcd3641674942f954bfd3da8b45f77f3f 100644 --- a/AMDiS/src/parallel/MeshDistributor.h +++ b/AMDiS/src/parallel/MeshDistributor.h @@ -347,14 +347,6 @@ namespace AMDiS { DofComm &dcom, const FiniteElemSpace *feSpace); - /// Sets \ref isRankDof to all matrices and rhs vectors in a given - /// stationary problem. - void setRankDofs(ProblemStatSeq *probStat, ParallelDofMapping &rankDofMap); - - /// Sets \ref isRankDof to all matrices and rhs vectors in all - /// stationary problems. - void setRankDofs(); - protected: void addProblemStat(ProblemStatSeq *probStat); diff --git a/AMDiS/src/parallel/PetscProblemStat.cc b/AMDiS/src/parallel/PetscProblemStat.cc index 44d1a01b93cf583afec48f32edab51dc83c511fb..73dd638f69ba9203784f5dc4bc17377d80c5a5e8 100644 --- a/AMDiS/src/parallel/PetscProblemStat.cc +++ b/AMDiS/src/parallel/PetscProblemStat.cc @@ -76,24 +76,6 @@ namespace AMDiS { } - void PetscProblemStat::buildAfterCoarsen(AdaptInfo *adaptInfo, Flag flag, - bool assembleMatrix, - bool assembleVector) - { - FUNCNAME("ParallelProblemStatBase::buildAfterCoarsen()"); - - TEST_EXIT_DBG(MeshDistributor::globalMeshDistributor != NULL) - ("Should not happen!\n"); - - MeshDistributor::globalMeshDistributor->checkMeshChange(); - MeshDistributor::globalMeshDistributor->setRankDofs(this, - petscSolver->getInteriorMap()); - ProblemStatSeq::buildAfterCoarsen(adaptInfo, flag, - assembleMatrix, - assembleVector); - } - - void PetscProblemStat::solve(AdaptInfo *adaptInfo, bool createMatrixData, bool storeMatrixData) diff --git a/AMDiS/src/parallel/PetscProblemStat.h b/AMDiS/src/parallel/PetscProblemStat.h index 536586489cc9e74d1be3deadb90447b920b462dc..a844b5c9f68882eba82d4c3498013b65b62bd368 100644 --- a/AMDiS/src/parallel/PetscProblemStat.h +++ b/AMDiS/src/parallel/PetscProblemStat.h @@ -49,10 +49,6 @@ namespace AMDiS { delete petscSolver; } - void buildAfterCoarsen(AdaptInfo *adaptInfo, Flag flag, - bool assembleMatrix, - bool assembleVector); - void initialize(Flag initFlag, ProblemStatSeq *adoptProblem = NULL, Flag adoptFlag = INIT_NOTHING); diff --git a/AMDiS/src/parallel/PetscSolver.cc b/AMDiS/src/parallel/PetscSolver.cc index 918685ed7c4b63fae038d5be75a985905c175aca..510d8c8ff8561dd60f58abffc8cfb672a0ce26f3 100644 --- a/AMDiS/src/parallel/PetscSolver.cc +++ b/AMDiS/src/parallel/PetscSolver.cc @@ -23,9 +23,10 @@ namespace AMDiS { PetscSolver::PetscSolver() : ParallelCoarseSpaceMatVec(), kspPrefix(""), - removeRhsNullspace(false), + removeRhsNullspace(false), + hasConstantNullspace(false), isSymmetric(false), - hasConstantNullspace(false) + handleDirichletRows(true) { string kspStr = ""; Parameters::get("parallel->solver->petsc->ksp", kspStr); diff --git a/AMDiS/src/parallel/PetscSolver.h b/AMDiS/src/parallel/PetscSolver.h index 064fa73ed84b8b7fd55c8f346f7d54798fec147d..0eb404ec15439160d699ad0aef389495830fdac9 100644 --- a/AMDiS/src/parallel/PetscSolver.h +++ b/AMDiS/src/parallel/PetscSolver.h @@ -62,7 +62,8 @@ namespace AMDiS { */ virtual void fillPetscMatrix(Matrix<DOFMatrix*> *mat) = 0; - /// + /// This function is just a small wrapper that creates a 1x1 matrix that + /// contains exactly one DOFMatrix and than calls \ref fillPetscMatrix void fillPetscMatrix(DOFMatrix* mat); /** \brief @@ -85,11 +86,6 @@ namespace AMDiS { /// Detroys all vector data structures. virtual void destroyVectorData() = 0; - virtual ParallelDofMapping &getInteriorMap() - { - return meshDistributor->getDofMap(); - } - virtual Flag getBoundaryDofRequirement() { return 0; @@ -139,6 +135,12 @@ namespace AMDiS { constNullspaceComponent.push_back(component); } + /// Informs the solver whether is has to handle dirichlet rows or not. + void setHandleDirichletRows(bool b) + { + handleDirichletRows = b; + } + protected: /** \brief * Copies between to PETSc vectors by using different index sets for the @@ -177,6 +179,12 @@ namespace AMDiS { bool isSymmetric; + /// If true, dirichlet rows are handled by the solver correspondently. To + /// set this value to false makes only sense, of this solver is just used + /// as a subsolver and the main solver above alread handles dirichlet rows + /// in some way. + bool handleDirichletRows; + vector<int> constNullspaceComponent; }; diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc index 22906e3f3b13e94a32e552d2e2229a01150dfdcb..583a1f63e26ca6497f5a47dfeea4323250e30216 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.cc +++ b/AMDiS/src/parallel/PetscSolverFeti.cc @@ -101,6 +101,7 @@ namespace AMDiS { if (subdomain == NULL) { subdomain = new PetscSolverGlobalMatrix(); subdomain->setSymmetric(isSymmetric); + subdomain->setHandleDirichletRows(false); if (meshLevel == 0) { subdomain->setMeshDistributor(meshDistributor, @@ -134,11 +135,6 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::createDirichletData()"); - bool removeDirichletRows = false; - Parameters::get("parallel->feti->remove dirichlet", removeDirichletRows); - if (!removeDirichletRows) - return; - int nComponents = mat.getSize(); for (int i = 0; i < nComponents; i++) { DOFMatrix* dofMat = mat[i][i]; @@ -735,13 +731,15 @@ namespace AMDiS { InteriorBoundary &intBound = meshDistributor->getIntBoundary(); std::set<BoundaryObject> allEdges; for (InteriorBoundary::iterator it(intBound.getOwn()); !it.end(); ++it) { - if (it->rankObj.subObj == EDGE && - testWirebasketEdge(it->rankObj, feSpaces[0]) && + if ((it->rankObj.subObj == FACE || + (it->rankObj.subObj == EDGE && + testWirebasketEdge(it->rankObj, feSpaces[0]))) && allEdges.count(it->rankObj) == 0) { bool dirichletOnlyEdge = true; DofContainer edgeDofs; it->rankObj.el->getAllDofs(feSpaces[0], it->rankObj, edgeDofs); + for (DofContainer::iterator dit = edgeDofs.begin(); dit != edgeDofs.end(); ++dit) { if (dirichletRows[feSpaces[0]].count(**dit) == 0) { @@ -919,12 +917,10 @@ namespace AMDiS { if (creationMode == 0) { // matK = inv(A_BB) A_BPi Mat matK; - MSG("START SOLVE!\n"); petsc_helper::blockMatMatSolve(subdomain->getSolver(), subdomain->getMatInteriorCoarse(), matK); - MSG("END SOLVE!\n"); - + // mat_schur_primal = A_PiPi - A_PiB inv(A_BB) A_BPi // = A_PiPi - A_PiB matK diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h index 0404423566484b44343dd3ee8ad11f07e70c77bb..c7035a86a24fea37f2a7373448a27836035fd9d7 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.h +++ b/AMDiS/src/parallel/PetscSolverFeti.h @@ -236,11 +236,6 @@ namespace AMDiS { /// order of: 1/h^2 void createH2vec(DOFVector<double> &vec); - virtual ParallelDofMapping &getInteriorMap() - { - return localDofMap; - } - protected: /// Mapping from primal DOF indices to a global index of primals. ParallelDofMapping primalDofMap; diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc index 3fc15695d3eb8f1ddf3bef71ec35b50002bccbaa..5df5ff9738d4fc021fefae5271da066985c527cb 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc +++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc @@ -82,17 +82,24 @@ namespace AMDiS { if (!zeroStartVector) KSPSetInitialGuessNonzero(kspInterior, PETSC_TRUE); + removeDirichletRows(seqMat, meshDistributor->getDofMap(), getMatInterior()); + #if (DEBUG != 0) MSG("Fill petsc matrix 3 needed %.5f seconds\n", MPI::Wtime() - wtime); #endif - } - void PetscSolverGlobalMatrix::fillPetscMatrix(DOFMatrix *mat) - { - Matrix<DOFMatrix*> m(1, 1); - m[0][0] = mat; - fillPetscMatrix(&m); + // === For debugging allow to write the matrix to a file. === + + bool dbgWriteMatrix = false; + Parameters::get("parallel->debug->write matrix", dbgWriteMatrix); + if (dbgWriteMatrix) { + PetscViewer matView; + PetscViewerBinaryOpen(PETSC_COMM_WORLD, "mpi.mat", + FILE_MODE_WRITE, &matView); + MatView(getMatInterior(), matView); + PetscViewerDestroy(&matView); + } } @@ -249,7 +256,7 @@ namespace AMDiS { else PCFactorSetMatSolverPackage(pcInterior, MATSOLVERMUMPS); } - KSPSetFromOptions(kspInterior); + KSPSetFromOptions(kspInterior); } @@ -271,6 +278,20 @@ namespace AMDiS { } vecRhsAssembly(); + + removeDirichletRows(vec, meshDistributor->getDofMap(), getVecRhsInterior()); + + // === For debugging allow to write the rhs vector to a file. === + + bool dbgWriteRhs = false; + Parameters::get("parallel->debug->write rhs", dbgWriteRhs); + if (dbgWriteRhs) { + PetscViewer vecView; + PetscViewerBinaryOpen(PETSC_COMM_WORLD, "mpi.vec", + FILE_MODE_WRITE, &vecView); + VecView(getVecRhsInterior(), vecView); + PetscViewerDestroy(&vecView); + } } @@ -449,6 +470,77 @@ namespace AMDiS { } + void PetscSolverGlobalMatrix::removeDirichletRows(Matrix<DOFMatrix*> *seqMat, + ParallelDofMapping &dofMap, + Mat mpiMat) + { + FUNCNAME("PetscSolverGlobalMatrix::removeDirichletRows()"); + + if (!handleDirichletRows) + return; + + int nComponents = seqMat->getSize(); + vector<PetscInt> zeroRows; + + for (int i = 0; i < nComponents; i++) { + DOFMatrix *dofMat = (*seqMat)[i][i]; + if (!dofMat) + continue; + + const FiniteElemSpace *feSpace = dofMat->getRowFeSpace(); + + std::set<DegreeOfFreedom> &dRows = dofMat->getDirichletRows(); + for (std::set<DegreeOfFreedom>::iterator dofIt = dRows.begin(); + dofIt != dRows.end(); ++dofIt) { + MultiIndex multiIndex; + if (dofMap[feSpace].find(*dofIt, multiIndex) && + dofMap[feSpace].isRankDof(*dofIt)) + zeroRows.push_back(dofMap.getMatIndex(i, multiIndex.global)); + } + } + + MatZeroRows(mpiMat, zeroRows.size(), &(zeroRows[0]), 1.0, + PETSC_NULL, PETSC_NULL); + + PetscViewer view; + PetscViewerBinaryOpen(PETSC_COMM_WORLD, "interior.mat", + FILE_MODE_WRITE, &view); + MatView(mpiMat, view); + PetscViewerDestroy(&view); + } + + + void PetscSolverGlobalMatrix::removeDirichletRows(SystemVector *seqVec, + ParallelDofMapping &dofMap, + Vec mpiVec) + { + FUNCNAME("PetscSolverGlobalMatrix::removeDirichletRows()"); + + if (!handleDirichletRows) + return; + + int nComponents = seqVec->getSize(); + + for (int i = 0; i < nComponents; i++) { + DOFVector<double> *dofVec = seqVec->getDOFVector(i); + const FiniteElemSpace *feSpace = dofVec->getFeSpace(); + + map<DegreeOfFreedom, double>& dValues = dofVec->getDirichletValues(); + for (map<DegreeOfFreedom, double>::iterator dofIt = dValues.begin(); + dofIt != dValues.end(); ++dofIt) { + MultiIndex multiIndex; + if (dofMap[feSpace].find(dofIt->first, multiIndex) && + dofMap[feSpace].isRankDof(dofIt->first)) + VecSetValue(mpiVec, dofMap.getMatIndex(i, multiIndex.global), + dofIt->second, INSERT_VALUES); + } + } + + VecAssemblyBegin(mpiVec); + VecAssemblyEnd(mpiVec); + } + + void PetscSolverGlobalMatrix::createFieldSplit(PC pc) { FUNCNAME("PetscSolverGlobalMatrix::createFieldSplit()"); diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h index ccdbf99ca8cf935060a875926b80f1f7de7b81f8..da09a39da6ec3a85f8a1f9bd75eec5e7ea5b21ad 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h +++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h @@ -50,10 +50,6 @@ namespace AMDiS { void fillPetscMatrix(Matrix<DOFMatrix*> *mat); - /// This function is just a small wrapper that creates a 1x1 matrix that - /// contains exactly one DOFMatrix and than calls \ref fillPetscMatrix - void fillPetscMatrix(DOFMatrix *mat); - void fillPetscMatrixWithCoarseSpace(Matrix<DOFMatrix*> *mat); void fillPetscRhs(SystemVector *vec); @@ -67,6 +63,14 @@ namespace AMDiS { void destroyVectorData(); protected: + void removeDirichletRows(Matrix<DOFMatrix*> *seqMat, + ParallelDofMapping &dofMap, + Mat mpiMat); + + void removeDirichletRows(SystemVector *seqVec, + ParallelDofMapping &dofMap, + Vec mpiVec); + void createFieldSplit(PC pc); virtual void initPreconditioner(PC pc);