From ecb93cc352d74ce20de9696a42e70da8c6f26021 Mon Sep 17 00:00:00 2001 From: Thomas Witkowski <thomas.witkowski@gmx.de> Date: Tue, 23 Oct 2012 06:22:56 +0000 Subject: [PATCH] Fixed FETI-DP for Stokes problem. --- AMDiS/src/MatrixVector.cc | 19 ---- AMDiS/src/MatrixVector.h | 5 - AMDiS/src/ProblemStat.h | 2 +- AMDiS/src/SystemVector.cc | 10 -- AMDiS/src/SystemVector.h | 2 - AMDiS/src/parallel/MeshDistributor.cc | 102 +++++++++--------- AMDiS/src/parallel/MeshDistributor.h | 15 +-- .../src/parallel/ParallelCoarseSpaceMatVec.cc | 3 +- AMDiS/src/parallel/ParallelDebug.cc | 16 +-- AMDiS/src/parallel/ParallelDofMapping.h | 2 +- AMDiS/src/parallel/PetscSolverFeti.cc | 50 ++++----- AMDiS/src/parallel/PetscSolverFeti.h | 7 +- AMDiS/src/parallel/PetscSolverFetiDebug.cc | 8 +- AMDiS/src/parallel/PetscSolverGlobalMatrix.cc | 2 +- 14 files changed, 108 insertions(+), 135 deletions(-) diff --git a/AMDiS/src/MatrixVector.cc b/AMDiS/src/MatrixVector.cc index 7909901e..638b88e5 100644 --- a/AMDiS/src/MatrixVector.cc +++ b/AMDiS/src/MatrixVector.cc @@ -16,23 +16,4 @@ namespace AMDiS { - vector<const FiniteElemSpace*> getComponentFeSpaces(Matrix<DOFMatrix*> &mat) - { - FUNCNAME("getComponentFeSpaces()"); - - int nComponents = mat.getNumRows(); - vector<const FiniteElemSpace*> result(nComponents); - - for (int i = 0; i < nComponents; i++) { - for (int j = 0; j < nComponents; j++) { - if (mat[i][j]) { - result[i] = mat[i][j]->getRowFeSpace(); - break; - } - } - } - - return result; - } - } diff --git a/AMDiS/src/MatrixVector.h b/AMDiS/src/MatrixVector.h index 3947983b..8c0e793a 100644 --- a/AMDiS/src/MatrixVector.h +++ b/AMDiS/src/MatrixVector.h @@ -518,11 +518,6 @@ namespace AMDiS { z[1] = x[2] * y[0] - x[0] * y[2]; z[2] = x[0] * y[1] - x[1] * y[0]; } - - - /// Returns a vector with the FE spaces of each row in the matrix. Thus, the - /// resulting vector may contain the same FE space multiple times. - vector<const FiniteElemSpace*> getComponentFeSpaces(Matrix<DOFMatrix*> &mat); } #endif // AMDIS_MATRIXVECTOR_H diff --git a/AMDiS/src/ProblemStat.h b/AMDiS/src/ProblemStat.h index ee6ead03..51f92b24 100644 --- a/AMDiS/src/ProblemStat.h +++ b/AMDiS/src/ProblemStat.h @@ -334,7 +334,7 @@ namespace AMDiS { } /// Returns \ref componentSpaces; - inline vector<const FiniteElemSpace*> getComponentFeSpaces() + inline vector<const FiniteElemSpace*> getComponentSpaces() { return componentSpaces; } diff --git a/AMDiS/src/SystemVector.cc b/AMDiS/src/SystemVector.cc index 8399b037..b83c32c0 100644 --- a/AMDiS/src/SystemVector.cc +++ b/AMDiS/src/SystemVector.cc @@ -35,14 +35,4 @@ namespace AMDiS { vectors[i]->deserialize(in); } - - vector<const FiniteElemSpace*> SystemVector::getComponentFeSpaces() const - { - vector<const FiniteElemSpace*> result(vectors.size()); - - for (unsigned int i = 0; i < vectors.size(); i++) - result[i] = vectors[i]->getFeSpace(); - - return result; - } } diff --git a/AMDiS/src/SystemVector.h b/AMDiS/src/SystemVector.h index 99c8998c..24131f2d 100644 --- a/AMDiS/src/SystemVector.h +++ b/AMDiS/src/SystemVector.h @@ -120,8 +120,6 @@ namespace AMDiS { return feSpace; } - vector<const FiniteElemSpace*> getComponentFeSpaces() const; - /// Here the system vector is interpreted as one large vector. The given /// is used as a global index which indicates a local vector number and /// a local index on this vector. The operator returns this local vector diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc index 96d23346..4aec9136 100644 --- a/AMDiS/src/parallel/MeshDistributor.cc +++ b/AMDiS/src/parallel/MeshDistributor.cc @@ -160,18 +160,18 @@ namespace AMDiS { bool doNext = false; do { doNext = false; - for (unsigned int i = 0; i < uniqueFeSpaces.size() - 1; i++) { - if (uniqueFeSpaces[i]->getBasisFcts()->getDegree() > - uniqueFeSpaces[i + 1]->getBasisFcts()->getDegree()) { - const FiniteElemSpace *tmp = uniqueFeSpaces[i + 1]; - uniqueFeSpaces[i + 1] = uniqueFeSpaces[i]; - uniqueFeSpaces[i] = tmp; + for (unsigned int i = 0; i < feSpaces.size() - 1; i++) { + if (feSpaces[i]->getBasisFcts()->getDegree() > + feSpaces[i + 1]->getBasisFcts()->getDegree()) { + const FiniteElemSpace *tmp = feSpaces[i + 1]; + feSpaces[i + 1] = feSpaces[i]; + feSpaces[i] = tmp; doNext = true; } } } while (doNext); - elObjDb.setFeSpace(uniqueFeSpaces[0]); + elObjDb.setFeSpace(feSpaces[0]); // If required, create hierarchical mesh level structure. createMeshLevelStructure(); @@ -193,7 +193,7 @@ namespace AMDiS { elObjDb.setData(partitionMap, levelData); #if (DEBUG != 0) - ParallelDebug::writeDebugFile(uniqueFeSpaces[uniqueFeSpaces.size() - 1], + ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], dofMap, debugOutputDir + "mpi-dbg", "dat"); #endif @@ -318,7 +318,7 @@ namespace AMDiS { ParallelDebug::testInteriorBoundary(*this); ParallelDebug::followBoundary(*this); - debug::writeMesh(uniqueFeSpaces[0], -1, debugOutputDir + "macro_mesh"); + debug::writeMesh(feSpaces[0], -1, debugOutputDir + "macro_mesh"); MSG("Debug mode tests finished!\n"); #endif @@ -369,9 +369,9 @@ namespace AMDiS { TEST_EXIT(componentSpaces.size() == 0) ("Parallelization of coupled problems is deactived at the moment!\n"); - componentSpaces = probStat->getComponentFeSpaces(); - uniqueFeSpaces = probStat->getFeSpaces(); - mesh = uniqueFeSpaces[0]->getMesh(); + componentSpaces = probStat->getComponentSpaces(); + feSpaces = probStat->getFeSpaces(); + mesh = feSpaces[0]->getMesh(); info = probStat->getInfo(); switch (mesh->getDim()) { @@ -668,8 +668,8 @@ namespace AMDiS { DofEdge dofEdge1 = edge1.first->getEdge(edge1.second); WorldVector<double> c0, c1; - mesh->getDofIndexCoords(dofEdge0.first, uniqueFeSpaces[0], c0); - mesh->getDofIndexCoords(dofEdge0.second, uniqueFeSpaces[0], c1); + mesh->getDofIndexCoords(dofEdge0.first, feSpaces[0], c0); + mesh->getDofIndexCoords(dofEdge0.second, feSpaces[0], c1); MSG("FOUND EDGE %d/%d <-> %d/%d\n", edge0.first->getIndex(), edge0.second, @@ -966,7 +966,7 @@ namespace AMDiS { MPI::Wtime() - first); #if (DEBUG != 0) - debug::writeMesh(uniqueFeSpaces[0], -1, debugOutputDir + "mesh"); + debug::writeMesh(feSpaces[0], -1, debugOutputDir + "mesh"); #endif // Because the mesh has been changed, update the DOF numbering and mappings. @@ -1267,7 +1267,7 @@ namespace AMDiS { if (writePartMesh > 0 && repartitioningCounter == 0) ParallelDebug::writePartitioningFile(debugOutputDir + "partitioning", repartitioningCounter, - uniqueFeSpaces[0]); + feSpaces[0]); repartitioningCounter++; @@ -1298,7 +1298,7 @@ namespace AMDiS { // === Run mesh partitioner to calculate a new mesh partitioning. === - partitioner->setLocalGlobalDofMap(&(dofMap[uniqueFeSpaces[0]].getMap())); + partitioner->setLocalGlobalDofMap(&(dofMap[feSpaces[0]].getMap())); bool partitioningSucceed = partitioner->partition(elemWeights, ADAPTIVE_REPART); if (!partitioningSucceed) { @@ -1475,11 +1475,11 @@ namespace AMDiS { // Note that also if there are no macros to be deleted, this function will // update the number of elements, vertices, etc. of the mesh. - mesh->removeMacroElements(deleteMacroElements, uniqueFeSpaces); + mesh->removeMacroElements(deleteMacroElements, feSpaces); // === Remove double DOFs. === MeshManipulation meshManipulation(mesh); - meshManipulation.deleteDoubleDofs(uniqueFeSpaces, newMacroEl, elObjDb); + meshManipulation.deleteDoubleDofs(feSpaces, newMacroEl, elObjDb); mesh->dofCompress(); partitioner->createPartitionMap(partitionMap); @@ -1515,7 +1515,7 @@ namespace AMDiS { ParallelDebug::writePartitioningFile(debugOutputDir + "partitioning", repartitioningCounter, - uniqueFeSpaces[0]); + feSpaces[0]); ParallelDebug::testAllElements(*this); ParallelDebug::testDoubleDofs(mesh); ParallelDebug::testInteriorBoundary(*this); @@ -1561,11 +1561,11 @@ namespace AMDiS { // === Create DOF communicator. === - dofComm.init(0, levelData, uniqueFeSpaces); + dofComm.init(0, levelData, feSpaces); dofComm.create(intBoundary); if (levelData.getLevelNumber() > 1) { - dofCommSd.init(0, levelData, uniqueFeSpaces); + dofCommSd.init(0, levelData, feSpaces); dofCommSd.create(intBoundarySd); } @@ -1577,8 +1577,8 @@ namespace AMDiS { int nLevels = levelData.getLevelNumber(); boundaryDofInfo.resize(nLevels); - for (unsigned int i = 0; i < uniqueFeSpaces.size(); i++) { - const FiniteElemSpace *feSpace = uniqueFeSpaces[i]; + for (unsigned int i = 0; i < feSpaces.size(); i++) { + const FiniteElemSpace *feSpace = feSpaces[i]; for (int level = 0; level < nLevels; level++) { @@ -1630,7 +1630,7 @@ namespace AMDiS { if (partitioner->getElementInRank()[(*it)->getIndex()] == false) macrosToRemove.insert(*it); - mesh->removeMacroElements(macrosToRemove, uniqueFeSpaces); + mesh->removeMacroElements(macrosToRemove, feSpaces); } @@ -1651,13 +1651,13 @@ namespace AMDiS { int nLevels = levelData.getLevelNumber(); TEST_EXIT_DBG(nLevels >= 1)("Should not happen!\n"); - dofMap.init(levelData, componentSpaces, uniqueFeSpaces); + dofMap.init(levelData, componentSpaces, feSpaces); dofMap.setMpiComm(levelData.getMpiComm(0), 0); dofMap.setDofComm(dofComm); dofMap.clear(); if (nLevels > 1) { - dofMapSd.init(levelData, componentSpaces, uniqueFeSpaces); + dofMapSd.init(levelData, componentSpaces, feSpaces); dofMapSd.setMpiComm(levelData.getMpiComm(1), 1); dofMapSd.setDofComm(dofCommSd); dofMapSd.clear(); @@ -1665,13 +1665,13 @@ namespace AMDiS { createBoundaryDofs(); - for (unsigned int i = 0; i < uniqueFeSpaces.size(); i++) - updateLocalGlobalNumbering(dofMap, dofComm, uniqueFeSpaces[i]); + for (unsigned int i = 0; i < feSpaces.size(); i++) + updateLocalGlobalNumbering(dofMap, dofComm, feSpaces[i]); dofMap.update(); if (nLevels > 1) { - for (unsigned int i = 0; i < uniqueFeSpaces.size(); i++) - updateLocalGlobalNumbering(dofMapSd, dofCommSd, uniqueFeSpaces[i]); + for (unsigned int i = 0; i < feSpaces.size(); i++) + updateLocalGlobalNumbering(dofMapSd, dofCommSd, feSpaces[i]); dofMapSd.update(); } @@ -1685,28 +1685,28 @@ namespace AMDiS { MSG("------------- Debug information -------------\n"); MSG("| number of levels: %d\n", nLevels); - MSG("| number of FE spaces: %d\n", uniqueFeSpaces.size()); + MSG("| number of FE spaces: %d\n", feSpaces.size()); - for (unsigned int i = 0; i < uniqueFeSpaces.size(); i++) { - MSG("| FE space = %d (pointer adr %p):\n", i, uniqueFeSpaces[i]); - MSG("| nRankDofs = %d\n", dofMap[uniqueFeSpaces[i]].nRankDofs); - MSG("| nOverallDofs = %d\n", dofMap[uniqueFeSpaces[i]].nOverallDofs); - MSG("| rStartDofs = %d\n", dofMap[uniqueFeSpaces[i]].rStartDofs); + for (unsigned int i = 0; i < feSpaces.size(); i++) { + MSG("| FE space = %d (pointer adr %p):\n", i, feSpaces[i]); + MSG("| nRankDofs = %d\n", dofMap[feSpaces[i]].nRankDofs); + MSG("| nOverallDofs = %d\n", dofMap[feSpaces[i]].nOverallDofs); + MSG("| rStartDofs = %d\n", dofMap[feSpaces[i]].rStartDofs); } if (nLevels > 1) { - for (unsigned int i = 0; i < uniqueFeSpaces.size(); i++) { + for (unsigned int i = 0; i < feSpaces.size(); i++) { MSG("| FE space = %d:\n", i); - MSG("| nRankDofs = %d\n", dofMapSd[uniqueFeSpaces[i]].nRankDofs); - MSG("| nOverallDofs = %d\n", dofMapSd[uniqueFeSpaces[i]].nOverallDofs); - MSG("| rStartDofs = %d\n", dofMapSd[uniqueFeSpaces[i]].rStartDofs); + MSG("| nRankDofs = %d\n", dofMapSd[feSpaces[i]].nRankDofs); + MSG("| nOverallDofs = %d\n", dofMapSd[feSpaces[i]].nOverallDofs); + MSG("| rStartDofs = %d\n", dofMapSd[feSpaces[i]].rStartDofs); } } // debug::writeElementIndexMesh(mesh, debugOutputDir + "elementIndex-" + // lexical_cast<string>(mpiRank) + ".vtu"); - ParallelDebug::writeDebugFile(uniqueFeSpaces[uniqueFeSpaces.size() - 1], + ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], dofMap, debugOutputDir + "mpi-dbg", "dat"); debug::testSortedDofs(mesh, elMap); @@ -1718,15 +1718,15 @@ namespace AMDiS { ParallelDebug::testGlobalIndexByCoords(*this); } #else - for (unsigned int i = 0; i < uniqueFeSpaces.size(); i++) + for (unsigned int i = 0; i < feSpaces.size(); i++) MSG("FE space %d: nRankDofs = %d nOverallDofs = %d\n", - i, dofMap[uniqueFeSpaces[i]].nRankDofs, - dofMap[uniqueFeSpaces[i]].nOverallDofs); + i, dofMap[feSpaces[i]].nRankDofs, + dofMap[feSpaces[i]].nOverallDofs); int tmp = 0; Parameters::get(name + "->write parallel debug file", tmp); if (tmp) - ParallelDebug::writeDebugFile(uniqueFeSpaces[uniqueFeSpaces.size() - 1], + ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], dofMap, debugOutputDir + "mpi-dbg", "dat"); #endif @@ -1784,8 +1784,8 @@ namespace AMDiS { // MPI::COMM_WORLD.Barrier(); [TODO: CHANGE BECAUSE NOT ALL RANKS HAVE PERIODIC MAP!!!] double first = MPI::Wtime(); - for (unsigned int i = 0; i < uniqueFeSpaces.size(); i++) - createPeriodicMap(uniqueFeSpaces[i]); + for (unsigned int i = 0; i < feSpaces.size(); i++) + createPeriodicMap(feSpaces[i]); // MPI::COMM_WORLD.Barrier(); INFO(info, 8)("Creation of periodic mapping needed %.5f seconds\n", @@ -2022,14 +2022,14 @@ namespace AMDiS { // Create a map from DOF indices to the corresponding DOF pointers. map<const FiniteElemSpace*, map<int, const DegreeOfFreedom*> > dofIndexMap; - for (unsigned int i = 0; i < uniqueFeSpaces.size(); i++) { - ElementDofIterator elDofIter(uniqueFeSpaces[i]); + for (unsigned int i = 0; i < feSpaces.size(); i++) { + ElementDofIterator elDofIter(feSpaces[i]); TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { elDofIter.reset(elInfo->getElement()); do { - dofIndexMap[uniqueFeSpaces[i]][elDofIter.getDof()] = elDofIter.getDofPtr(); + dofIndexMap[feSpaces[i]][elDofIter.getDof()] = elDofIter.getDofPtr(); } while (elDofIter.next()); elInfo = stack.traverseNext(elInfo); diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h index f82a7371..06b59bcf 100644 --- a/AMDiS/src/parallel/MeshDistributor.h +++ b/AMDiS/src/parallel/MeshDistributor.h @@ -137,20 +137,20 @@ namespace AMDiS { { FUNCNAME("MeshDistributor::getFeSpace()"); - TEST_EXIT_DBG(i < uniqueFeSpaces.size()) + TEST_EXIT_DBG(i < feSpaces.size()) ("Try to access FE space %d, but have only %d FE spaces!\n", - i, uniqueFeSpaces.size()); + i, feSpaces.size()); - return uniqueFeSpaces[i]; + return feSpaces[i]; } /// Returns all FE spaces, thus \ref feSpaces. inline vector<const FiniteElemSpace*>& getFeSpaces() { - return uniqueFeSpaces; + return feSpaces; } - inline const FiniteElemSpace* getComponentFeSpace(unsigned int i = 0) + inline const FiniteElemSpace* getComponentSpace(unsigned int i = 0) { FUNCNAME("MeshDistributor::getFeSpace()"); @@ -159,7 +159,7 @@ namespace AMDiS { return componentSpaces[i]; } - inline vector<const FiniteElemSpace*>& getComponentFeSpaces() + inline vector<const FiniteElemSpace*>& getComponentSpaces() { return componentSpaces; } @@ -504,7 +504,8 @@ namespace AMDiS { /// Finite element spaces of the problem. vector<const FiniteElemSpace*> componentSpaces; - vector<const FiniteElemSpace*> uniqueFeSpaces; + /// Set of all different FE spaces. + vector<const FiniteElemSpace*> feSpaces; /// Mesh of the problem. diff --git a/AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc b/AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc index 0ea17aa5..dbe6803c 100644 --- a/AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc +++ b/AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc @@ -115,7 +115,8 @@ namespace AMDiS { if (checkMeshChange()) { // Mesh has been changed, recompute interior DOF mapping. - vector<const FiniteElemSpace*> feSpaces = getComponentFeSpaces(seqMat); + vector<const FiniteElemSpace*> feSpaces = + meshDistributor->getComponentSpaces(); interiorMap->setComputeMatIndex(!localMatrix); interiorMap->update(); diff --git a/AMDiS/src/parallel/ParallelDebug.cc b/AMDiS/src/parallel/ParallelDebug.cc index f62563ac..cc51b18e 100644 --- a/AMDiS/src/parallel/ParallelDebug.cc +++ b/AMDiS/src/parallel/ParallelDebug.cc @@ -164,7 +164,7 @@ namespace AMDiS { perMap.periodicDofAssociations[feSpace].begin(); it != perMap.periodicDofAssociations[feSpace].end(); ++it) { WorldVector<double> c; - pdb.mesh->getDofIndexCoords(it->first, pdb.uniqueFeSpaces[0], c); + pdb.mesh->getDofIndexCoords(it->first, pdb.feSpaces[0], c); int nAssoc = it->second.size(); } @@ -329,7 +329,7 @@ namespace AMDiS { clock_t first = clock(); // Get FE space with basis functions of the highest degree - const FiniteElemSpace *feSpace = pdb.uniqueFeSpaces[pdb.uniqueFeSpaces.size() - 1]; + const FiniteElemSpace *feSpace = pdb.feSpaces[pdb.feSpaces.size() - 1]; int testCommonDofs = 1; Parameters::get("dbg->test common dofs", testCommonDofs); @@ -471,7 +471,7 @@ namespace AMDiS { FUNCNAME("ParallelDebug::testGlobalIndexByCoords()"); // Get FE space with basis functions of the highest degree - const FiniteElemSpace *feSpace = pdb.uniqueFeSpaces[pdb.uniqueFeSpaces.size() - 1]; + const FiniteElemSpace *feSpace = pdb.feSpaces[pdb.feSpaces.size() - 1]; DOFVector<WorldVector<double> > coords(feSpace, "tmp"); pdb.mesh->getDofIndexCoords(feSpace, coords); @@ -649,7 +649,7 @@ namespace AMDiS { #if 0 if (rank == -1 || pdb.mpiRank == rank) { - const FiniteElemSpace *feSpace = pdb.uniqueFeSpaces[0]; + const FiniteElemSpace *feSpace = pdb.feSpaces[0]; cout << "====== DOF MAP LOCAL -> GLOBAL ====== " << endl; @@ -700,7 +700,7 @@ namespace AMDiS { dofit != rankDofs.end(); ++dofit) { cout << " " << **dofit << endl; WorldVector<double> coords; - pdb.mesh->getDofIndexCoords(*dofit, pdb.uniqueFeSpaces[0], coords); + pdb.mesh->getDofIndexCoords(*dofit, pdb.feSpaces[0], coords); coords.print(); } @@ -709,7 +709,7 @@ namespace AMDiS { dofit != rankAllDofs.end(); ++dofit) { cout << " " << **dofit << endl; WorldVector<double> coords; - pdb.mesh->getDofIndexCoords(*dofit, pdb.uniqueFeSpaces[0], coords); + pdb.mesh->getDofIndexCoords(*dofit, pdb.feSpaces[0], coords); coords.print(); } } @@ -886,13 +886,13 @@ namespace AMDiS { if (followThisBound(it->rankObj.elIndex, it->neighObj.elIndex)) debug::writeLocalElementDofs(pdb.mpiRank, it->rankObj.elIndex, - pdb.uniqueFeSpaces[0]); + pdb.feSpaces[0]); for (InteriorBoundary::iterator it(pdb.intBoundary.other); !it.end(); ++it) if (followThisBound(it->rankObj.elIndex, it->neighObj.elIndex)) debug::writeLocalElementDofs(pdb.mpiRank, it->rankObj.elIndex, - pdb.uniqueFeSpaces[0]); + pdb.feSpaces[0]); } diff --git a/AMDiS/src/parallel/ParallelDofMapping.h b/AMDiS/src/parallel/ParallelDofMapping.h index f3823095..7bfae812 100644 --- a/AMDiS/src/parallel/ParallelDofMapping.h +++ b/AMDiS/src/parallel/ParallelDofMapping.h @@ -366,7 +366,7 @@ namespace AMDiS { /// FE space. ComponentDofMap& operator[](const FiniteElemSpace *feSpace) { - ERROR_EXIT("FE Space acces is not possible for component wise defined DOF mappings\n"); + ERROR_EXIT("FE Space access is not possible for component wise defined DOF mappings\n"); } /// Return data iterator. diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc index 0e224116..a1b21479 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.cc +++ b/AMDiS/src/parallel/PetscSolverFeti.cc @@ -100,8 +100,6 @@ namespace AMDiS { Parameters::get("parallel->feti->pressure component", pressureComponent); TEST_EXIT(pressureComponent >= 0) ("FETI-DP in Stokes mode, no pressure component defined!\n"); - - pressureFeSpace = feSpaces[pressureComponent]; } if (subdomain == NULL) { @@ -192,8 +190,8 @@ namespace AMDiS { interfaceDofMap.setMpiComm(levelData.getMpiComm(0), 0); } - for (unsigned int i = 0; i < meshDistributor->getComponentFeSpaces().size(); i++) { - const FiniteElemSpace *feSpace = meshDistributor->getComponentFeSpace(i); + for (unsigned int i = 0; i < meshDistributor->getComponentSpaces().size(); i++) { + const FiniteElemSpace *feSpace = meshDistributor->getComponentSpace(i); createPrimals(i, feSpace); @@ -213,8 +211,8 @@ namespace AMDiS { if (stokesMode) interfaceDofMap.update(); - for (unsigned int i = 0; i < meshDistributor->getComponentFeSpaces().size(); i++) { - const FiniteElemSpace *feSpace = meshDistributor->getComponentFeSpace(i); + for (unsigned int i = 0; i < meshDistributor->getComponentSpaces().size(); i++) { + const FiniteElemSpace *feSpace = meshDistributor->getComponentSpace(i); createLagrange(i, feSpace); createAugmentedLagrange(i, feSpace); } @@ -245,12 +243,12 @@ namespace AMDiS { } MSG("FETI-DP data created on mesh level %d\n", meshLevel); - for (unsigned int i = 0; i < meshDistributor->getComponentFeSpaces().size(); i++) { - const FiniteElemSpace *feSpace = meshDistributor->getComponentFeSpace(i); + for (unsigned int i = 0; i < meshDistributor->getComponentSpaces().size(); i++) { + const FiniteElemSpace *feSpace = meshDistributor->getComponentSpace(i); MSG("FETI-DP data for %d-ith component (FE space %p):\n", i, feSpace); - if (feSpace == pressureFeSpace) { + if (i == pressureComponent) { MSG(" nRankInterface = %d nOverallInterface = %d\n", interfaceDofMap[i].nRankDofs, interfaceDofMap[i].nOverallDofs); @@ -289,7 +287,7 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::createPrimals()"); - if (feSpace == pressureFeSpace) + if (component == pressureComponent) return; // === Define all vertices on the interior boundaries of the macro mesh === @@ -339,7 +337,7 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::createDuals()"); - if (feSpace == pressureFeSpace) + if (component == pressureComponent) return; // === Create global index of the dual nodes on each rank. === @@ -370,7 +368,7 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::createInterfaceNodes()"); - if (feSpace != pressureFeSpace) + if (component != pressureComponent) return; DofContainer allBoundaryDofs; @@ -394,7 +392,7 @@ namespace AMDiS { { FUNCNAME("PetscSolverFeti::createLagrange()"); - if (feSpace == pressureFeSpace) + if (component == pressureComponent) return; boundaryDofRanks[feSpace].clear(); @@ -1270,11 +1268,14 @@ namespace AMDiS { if (stokesMode) { // === Create H2 vec === + const FiniteElemSpace *pressureFeSpace = + meshDistributor->getComponentSpace(pressureComponent); + DOFVector<double> tmpvec(pressureFeSpace, "tmpvec"); createH2vec(tmpvec); interfaceDofMap.createVec(fetiInterfaceLumpedPreconData.h2vec); - DofMap& m = interfaceDofMap[pressureFeSpace].getMap(); + DofMap& m = interfaceDofMap[pressureComponent].getMap(); for (DofMap::iterator it = m.begin(); it != m.end(); ++it) { if (meshDistributor->getDofMap()[pressureFeSpace].isRankDof(it->first)) { int index = interfaceDofMap.getMatIndex(pressureComponent, it->first); @@ -1419,10 +1420,13 @@ namespace AMDiS { if (!stokesMode) return; + const FiniteElemSpace *pressureFeSpace = + meshDistributor->getComponentSpace(pressureComponent); + Vec ktest0, ktest1; localDofMap.createLocalVec(ktest0); localDofMap.createLocalVec(ktest1); - DofMap& m = localDofMap[pressureFeSpace].getMap(); + DofMap& m = localDofMap[pressureComponent].getMap(); for (DofMap::iterator it = m.begin(); it != m.end(); ++it) { if (meshDistributor->getDofMap()[pressureFeSpace].isRankDof(it->first)) { int index = localDofMap.getLocalMatIndex(pressureComponent, it->first); @@ -1675,7 +1679,7 @@ namespace AMDiS { // === And copy from PETSc local vectors to the DOF vectors. === - vector<const FiniteElemSpace*> feSpaces = vec.getComponentFeSpaces(); + vector<const FiniteElemSpace*> feSpaces = meshDistributor->getComponentSpaces(); int cnt = 0; for (int component = 0; component < nComponents; component++) { DOFVector<double>& dofVec = *(vec.getDOFVector(component)); @@ -1716,10 +1720,8 @@ namespace AMDiS { globalIsIndex.reserve(interfaceDofMap.getLocalDofs()); localIsIndex.reserve(interfaceDofMap.getLocalDofs()); - const FiniteElemSpace *pressureFeSpace = - vec.getDOFVector(pressureComponent)->getFeSpace(); int cnt = 0; - DofMap& dofMap = interfaceDofMap[pressureFeSpace].getMap(); + DofMap& dofMap = interfaceDofMap[pressureComponent].getMap(); for (DofMap::iterator it = dofMap.begin();it != dofMap.end(); ++it) { globalIsIndex.push_back(interfaceDofMap.getMatIndex(pressureComponent, it->first)); @@ -1761,8 +1763,8 @@ namespace AMDiS { cnt = 0; DOFVector<double>& dofVec = *(vec.getDOFVector(pressureComponent)); - for (DofMap::iterator it = interfaceDofMap[pressureFeSpace].getMap().begin(); - it != interfaceDofMap[pressureFeSpace].getMap().end(); ++it) { + for (DofMap::iterator it = interfaceDofMap[pressureComponent].getMap().begin(); + it != interfaceDofMap[pressureComponent].getMap().end(); ++it) { dofVec[it->first] = localSolInterface[cnt++]; } @@ -1777,7 +1779,7 @@ namespace AMDiS { // === Create all sets and indices. === - vector<const FiniteElemSpace*> feSpaces = AMDiS::getComponentFeSpaces(*mat); + vector<const FiniteElemSpace*> feSpaces = meshDistributor->getComponentSpaces(); initialize(feSpaces); @@ -1847,7 +1849,7 @@ namespace AMDiS { return; double wtime = MPI::Wtime(); - vector<const FiniteElemSpace*> feSpaces = AMDiS::getComponentFeSpaces(*mat); + vector<const FiniteElemSpace*> feSpaces = meshDistributor->getComponentSpaces(); int nRowsDual = dualDofMap.getRankDofs(); MatCreateSeqAIJ(PETSC_COMM_SELF, @@ -2187,7 +2189,7 @@ namespace AMDiS { VecAXPY(vecRhsLagrange, -1.0, tmp_lagrange); } else { - TEST_EXIT(augmentedLagrange)("Not yet implemented!\n"); + TEST_EXIT(!augmentedLagrange)("Not yet implemented!\n"); subdomain->solveGlobal(subdomain->getVecRhsInterior(), tmp_b0); diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h index 49b61e6f..8d3ac569 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.h +++ b/AMDiS/src/parallel/PetscSolverFeti.h @@ -344,10 +344,13 @@ namespace AMDiS { int nOverallEdges; + /// If true, the FETI-DP solver is applied to a Stokes like problem. Thus, + /// there is a pressure variable which is not part of the coarse grid + /// problem. bool stokesMode; - const FiniteElemSpace* pressureFeSpace; - + /// Only used if \ref stokesMode is enabled. In this case, this variable + /// defines the component number of the pressure variable. int pressureComponent; /// Maps from component number to set of DOFs which are Dirichlet DOfs in diff --git a/AMDiS/src/parallel/PetscSolverFetiDebug.cc b/AMDiS/src/parallel/PetscSolverFetiDebug.cc index 8e90c4e2..283ab9ab 100644 --- a/AMDiS/src/parallel/PetscSolverFetiDebug.cc +++ b/AMDiS/src/parallel/PetscSolverFetiDebug.cc @@ -72,10 +72,12 @@ namespace AMDiS { localDofMap.createLocalVec(ktest0); localDofMap.createLocalVec(ktest1); localDofMap.createVec(ktest2, feti.nGlobalOverallInterior); - - DofMap& m = localDofMap[feti.pressureFeSpace].getMap(); + + const FiniteElemSpace* pressureFeSpace = + feti.meshDistributor->getComponentSpace(feti.pressureComponent); + DofMap& m = localDofMap[feti.pressureComponent].getMap(); for (DofMap::iterator it = m.begin(); it != m.end(); ++it) { - if (feti.meshDistributor->getDofMap()[feti.pressureFeSpace].isRankDof(it->first)) { + if (feti.meshDistributor->getDofMap()[pressureFeSpace].isRankDof(it->first)) { int index = localDofMap.getLocalMatIndex(feti.pressureComponent, it->first); VecSetValue(ktest0, index, 1.0, INSERT_VALUES); } diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc index 47fece85..d3fc6c8b 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc +++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc @@ -111,7 +111,7 @@ namespace AMDiS { TEST_EXIT_DBG(coarseSpaceMap.size() == seqMat->getSize()) ("Wrong sizes %d %d\n", coarseSpaceMap.size(), seqMat->getSize()); - vector<const FiniteElemSpace*> feSpaces = AMDiS::getComponentFeSpaces(*seqMat); + vector<const FiniteElemSpace*> feSpaces = meshDistributor->getComponentSpaces(); // === Prepare traverse of sequentially created matrices. === -- GitLab