From 06c5b791e8ad2234c5d35e14e85c9f05262dc94e Mon Sep 17 00:00:00 2001 From: Thomas Witkowski <thomas.witkowski@gmx.de> Date: Wed, 24 Oct 2012 10:56:38 +0000 Subject: [PATCH] Fixed a lot of small problems. --- AMDiS/src/parallel/MeshDistributor.cc | 61 ++++++++++++------- AMDiS/src/parallel/MeshDistributor.h | 5 ++ AMDiS/src/parallel/ParallelDofMapping.cc | 38 +++++++++++- AMDiS/src/parallel/ParallelDofMapping.h | 19 +++++- AMDiS/src/parallel/PetscSolverGlobalMatrix.cc | 7 +-- 5 files changed, 100 insertions(+), 30 deletions(-) diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc index 8c00dad3..444cb894 100644 --- a/AMDiS/src/parallel/MeshDistributor.cc +++ b/AMDiS/src/parallel/MeshDistributor.cc @@ -88,7 +88,8 @@ namespace AMDiS { lastMeshChangeIndex(0), createBoundaryDofFlag(0), boundaryDofInfo(1), - meshAdaptivity(true) + meshAdaptivity(true), + hasPeriodicBoundary(false) { FUNCNAME("MeshDistributor::ParalleDomainBase()"); @@ -319,9 +320,6 @@ namespace AMDiS { MSG("Debug mode tests finished!\n"); #endif - // Create periodic DOF mapping, if there are periodic boundaries. - createPeriodicMap(); - // Remove periodic boundary conditions in sequential problem definition. removePeriodicBoundaryConditions(); @@ -339,9 +337,6 @@ namespace AMDiS { updateLocalGlobalNumbering(); - // === Update periodic mapping, if there are periodic boundaries. === - - createPeriodicMap(); #if (DEBUG != 0) ParallelDebug::testPeriodicBoundary(*this); #endif @@ -990,18 +985,13 @@ namespace AMDiS { MPI::COMM_WORLD.Barrier(); MSG("Parallel mesh adaption needed %.5f seconds\n", MPI::Wtime() - first); -#if (DEBUG != 0) - debug::writeMesh(feSpaces[0], -1, debugOutputDir + "mesh"); -#endif - // Because the mesh has been changed, update the DOF numbering and mappings. - updateLocalGlobalNumbering(); - - // Update periodic mapping, if there are periodic boundaries. - createPeriodicMap(); + // === Update the DOF numbering and mappings. === + updateLocalGlobalNumbering(); #if (DEBUG != 0) + debug::writeMesh(feSpaces[0], -1, debugOutputDir + "mesh"); ParallelDebug::testPeriodicBoundary(*this); #endif @@ -1518,6 +1508,7 @@ namespace AMDiS { createInteriorBoundary(false); + updateLocalGlobalNumbering(); @@ -1538,9 +1529,6 @@ namespace AMDiS { } } - // === Update periodic mapping, if there are periodic boundaries. === - - createPeriodicMap(); #if (DEBUG != 0) MSG("AMDiS runs in debug mode, so make some test ...\n"); @@ -1584,6 +1572,13 @@ namespace AMDiS { intBoundarySd.create(levelData, 1, elObjDb); ParallelDebug::printBoundaryInfo(intBoundarySd, 0, true); } + + if (firstCall) { + int tmpSend = static_cast<int>(intBoundary.hasPeriodic()); + int tmpRecv = 0; + mpiComm.Allreduce(&tmpSend, &tmpRecv, 1, MPI_INT, MPI_MAX); + hasPeriodicBoundary = static_cast<bool>(tmpRecv); + } } @@ -1680,6 +1675,8 @@ namespace AMDiS { debug::createSortedDofs(mesh, elMap); #endif + // === Update DOF communicator objects. === + createBoundaryDofs(); @@ -1691,14 +1688,30 @@ namespace AMDiS { vector<const FiniteElemSpace*>& dofMapSpaces = dofMaps[i]->getFeSpaces(); dofMaps[i]->clear(); + if (hasPeriodicBoundary) + dofMaps[i]->setComputeMatIndex(true); for (int j = 0; j < static_cast<int>(dofMapSpaces.size()); j++) updateLocalGlobalNumbering(*(dofMaps[i]), dofMapSpaces[j]); dofMaps[i]->update(); +#if (DEBUG != 0) + dofMaps[i]->printInfo(); +#endif + } + + + // === Create periodic DOF maps, if there are periodic boundaries. === + + if (hasPeriodicBoundary) { + createPeriodicMap(); + + for (int i = 0; i < static_cast<int>(dofMaps.size()); i++) + dofMaps[i]->updateMatIndex(); } + // === Update DOF admins due to new number of DOFs. === lastMeshChangeIndex = mesh->getChangeIndex(); @@ -1716,10 +1729,11 @@ namespace AMDiS { vector<const FiniteElemSpace*>& dofMapSpaces = dofMaps[i]->getFeSpaces(); for (int j = 0; j < static_cast<int>(dofMapSpaces.size()); j++) { - MSG("| FE space = %d (pointer adr %p):\n", j, feSpaces[j]); - MSG("| nRankDofs = %d\n", (*(dofMaps[i]))[feSpaces[j]].nRankDofs); - MSG("| nOverallDofs = %d\n", (*(dofMaps[i]))[feSpaces[j]].nOverallDofs); - MSG("| rStartDofs = %d\n", (*(dofMaps[i]))[feSpaces[j]].rStartDofs); + const FiniteElemSpace *feSpace = dofMapSpaces[j]; + MSG("| FE space = %d (pointer adr %p):\n", j, feSpace); + MSG("| nRankDofs = %d\n", (*(dofMaps[i]))[feSpace].nRankDofs); + MSG("| nOverallDofs = %d\n", (*(dofMaps[i]))[feSpace].nOverallDofs); + MSG("| rStartDofs = %d\n", (*(dofMaps[i]))[feSpace].rStartDofs); } } @@ -1828,6 +1842,7 @@ namespace AMDiS { ComponentDofMap &dofMap = (*(dofMaps[0]))[feSpace]; StdMpi<vector<int> > stdMpi(mpiComm, false); + // === Each rank traverse its periodic boundaries and sends the DOF === // === indices to the rank "on the other side" of the periodic boundary. === @@ -2035,6 +2050,7 @@ namespace AMDiS { SerUtil::serialize(out, nMeshChangesAfterLastRepartitioning); SerUtil::serialize(out, repartitioningCounter); + SerUtil::serialize(out, hasPeriodicBoundary); } @@ -2085,6 +2101,7 @@ namespace AMDiS { SerUtil::deserialize(in, nMeshChangesAfterLastRepartitioning); SerUtil::deserialize(in, repartitioningCounter); + SerUtil::deserialize(in, hasPeriodicBoundary); deserialized = true; } diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h index 8ed4d24b..db4b23f5 100644 --- a/AMDiS/src/parallel/MeshDistributor.h +++ b/AMDiS/src/parallel/MeshDistributor.h @@ -574,6 +574,11 @@ namespace AMDiS { /// is set to true, and thus no special assumption are made. bool meshAdaptivity; + /// Specifies whether the global domain has periodic boundaries. Thus, this + /// variable is not related to rank's subdomain but to the global problem + /// and therefore the value if the same on all ranks. + bool hasPeriodicBoundary; + /// Set of all parallel DOF mapping object that are registered by parallel /// solver objects and must be updated automatically after mesh change. vector<ParallelDofMapping*> dofMaps; diff --git a/AMDiS/src/parallel/ParallelDofMapping.cc b/AMDiS/src/parallel/ParallelDofMapping.cc index f3e17b2d..7cb015e3 100644 --- a/AMDiS/src/parallel/ParallelDofMapping.cc +++ b/AMDiS/src/parallel/ParallelDofMapping.cc @@ -358,6 +358,14 @@ namespace AMDiS { } + void ParallelDofMapping::updateMatIndex() + { + FUNCNAME("ParallelDofMapping::updateMatIndex()"); + + computeMatIndex(needMatIndexFromGlobal); + } + + void ParallelDofMapping::computeMatIndex(bool globalIndex) { FUNCNAME("ParallelDofMapping::computeMatIndex()"); @@ -488,7 +496,7 @@ namespace AMDiS { int counter = 0; - for (; !it.endDofIter(); it.nextDof()) + for (; !it.endDofIter(); it.nextDof()) { if (dofMap.count(it.getDofIndex())) { if (globalIndex) { TEST_EXIT_DBG(counter + 2 <= stdMpi.getRecvData(it.getRank()).size()) @@ -504,6 +512,7 @@ namespace AMDiS { stdMpi.getRecvData(it.getRank())[counter++]); } } + } } } } @@ -543,10 +552,33 @@ namespace AMDiS { { FUNCNAME("ParallelDofMapping::printInfo()"); + MSG("=== Parallel DOF mapping debug information ===\n"); if (mode == COMPONENT_WISE) { - MSG("Mapping is defined by component numbers!\n"); + MSG(" mapping is defined by component numbers!\n"); } else { - MSG("Mapping is defined by FE spaces!\n"); + MSG(" mapping is defined by FE spaces!\n"); + } + MSG(" matrix index is based on global DOF indices: %d\n", + needMatIndexFromGlobal); + + MSG(" nRankDofs = %d nLocalDofs = %d nOverallDofs = %d rStartDofs = %d\n", + nRankDofs, nLocalDofs, nOverallDofs, rStartDofs); + + int nComponents = componentSpaces.size(); + int nFeSpaces = feSpaces.size(); + MSG(" number of components: %d number of different FE spaces: %d\n", + nComponents, nFeSpaces); + + for (int i = 0; i < nComponents; i++) { + MSG(" component %d:\n", i); + MSG(" dof-to-mat-index has %d mappings\n", dofToMatIndex.getSize(i)); + if (dofToMatIndex.getSize(i) > 0) { + MSG(" dof-to-mat-index starts with (%d -> %d) and ends with (%d -> %d)\n", + dofToMatIndex.getData(i).begin()->first, + dofToMatIndex.getData(i).begin()->second, + (dofToMatIndex.getData(i).end() - 1)->first, + (dofToMatIndex.getData(i).end() - 1)->second); + } } } } diff --git a/AMDiS/src/parallel/ParallelDofMapping.h b/AMDiS/src/parallel/ParallelDofMapping.h index 0db33235..45e2e902 100644 --- a/AMDiS/src/parallel/ParallelDofMapping.h +++ b/AMDiS/src/parallel/ParallelDofMapping.h @@ -50,6 +50,8 @@ namespace AMDiS { class DofToMatIndex { public: + typedef boost::container::flat_map<DegreeOfFreedom, int> MapType; + DofToMatIndex() {} /// Reset the data structure. @@ -86,6 +88,18 @@ namespace AMDiS { return data[component][dof]; } + /// Returns the number of DOF mappings in one component + inline int getSize(int component) + { + return data[component].size(); + } + + /// Returns the whole mapping for one component + inline MapType& getData(int component) + { + return data[component]; + } + /// Returns for a given matrix index the component and (local or global) DOF /// index. As the data structure is not made for this kind of reverse /// search, this is very slow and should be only used for debugging. @@ -94,7 +108,7 @@ namespace AMDiS { private: /// The mapping data. For each system component there is a specific map that /// maps global DOF indices to global matrix indices. - map<int, boost::container::flat_map<DegreeOfFreedom, int> > data; + map<int, MapType> data; }; @@ -754,6 +768,9 @@ namespace AMDiS { /// Update the mapping. void update(); + /// Updates only the DOF to matrix index mapping + void updateMatIndex(); + /// Returns the global matrix index of a given DOF for a given /// component number. inline int getMatIndex(int ithComponent, DegreeOfFreedom d) diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc index d413a2e6..94dbd8cd 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc +++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc @@ -526,12 +526,11 @@ namespace AMDiS { } } - MatZeroRows(mpiMat, dirichletRows.size(), &(dirichletRows[0]), 1.0, + MatZeroRows(mpiMat, dirichletRows.size(), &(dirichletRows[0]), 0.0, PETSC_NULL, PETSC_NULL); - for (int i = 0; i < static_cast<int>(dirichletRows.size()); i++) { - MatSetValue(mpiMat, dirichletRows[i], dirichletCols[i], dirichletValues[i], INSERT_VALUES); - } + for (int i = 0; i < static_cast<int>(dirichletRows.size()); i++) + MatSetValue(mpiMat, dirichletRows[i], dirichletCols[i], dirichletValues[i], INSERT_VALUES); MatAssemblyBegin(mpiMat, MAT_FINAL_ASSEMBLY); MatAssemblyEnd(mpiMat, MAT_FINAL_ASSEMBLY); -- GitLab