diff --git a/AMDiS/src/ParMetisPartitioner.cc b/AMDiS/src/ParMetisPartitioner.cc index e9084ae419a9f71cb7540c431a24026634acde9f..21c473aa5a934aca44bd0588ace5f0bd691b540c 100644 --- a/AMDiS/src/ParMetisPartitioner.cc +++ b/AMDiS/src/ParMetisPartitioner.cc @@ -116,6 +116,7 @@ namespace AMDiS { } } + ParMetisMesh::~ParMetisMesh() { if (eptr) @@ -134,6 +135,7 @@ namespace AMDiS { delete [] elem_p2a; } + ParMetisGraph::ParMetisGraph(ParMetisMesh *parMesh, MPI::Intracomm *comm, int ncommonnodes) @@ -156,6 +158,7 @@ namespace AMDiS { &tmpComm); } + ParMetisGraph::~ParMetisGraph() { free(xadj); @@ -174,8 +177,11 @@ namespace AMDiS { } } + void ParMetisPartitioner::createPartitionData() { + FUNCNAME("ParMetrisPartitioner::createPartitionData()"); + int mpiRank = mpiComm->Get_rank(); int mpiSize = mpiComm->Get_size(); int nLeaves = mesh->getNumberOfLeaves(); @@ -195,8 +201,10 @@ namespace AMDiS { new PartitionElementData(element->getElementData()); element->setElementData(elData); - if (element->getIndex() >= mpiRank * elPerRank && - element->getIndex() < (mpiRank + 1) * elPerRank) + if ((element->getIndex() >= mpiRank * elPerRank && + element->getIndex() < (mpiRank + 1) * elPerRank) || + (element->getIndex() >= mpiSize * elPerRank && + mpiRank == mpiSize - 1)) elData->setPartitionStatus(IN); else elData->setPartitionStatus(UNDEFINED); @@ -205,6 +213,7 @@ namespace AMDiS { } } + void ParMetisPartitioner::partition(std::map<int, double> *elemWeights, PartitionMode mode, float itr) @@ -362,6 +371,7 @@ namespace AMDiS { delete [] part; } + void ParMetisPartitioner::fillCoarsePartitionVec(std::map<int, int> *partitionVec) { TEST_EXIT(partitionVec)("no partition vector\n"); @@ -405,6 +415,7 @@ namespace AMDiS { delete [] nPartitionElements; } + void ParMetisPartitioner::distributePartitioning(int *part) { FUNCNAME("ParMetisPartitioner::distributePartitioning()"); @@ -510,6 +521,7 @@ namespace AMDiS { delete [] recvBufferOffset; } + void ParMetisPartitioner::descendPartitionData(Element *element) { FUNCNAME("ParMetisPartitioner::descendPartitionData()"); diff --git a/AMDiS/src/parallel/ParallelDomainBase.cc b/AMDiS/src/parallel/ParallelDomainBase.cc index 9d6143524c134f9b10ade70e633a105145596f06..559728afc919245664cc9948cabc6ae193df4cd3 100644 --- a/AMDiS/src/parallel/ParallelDomainBase.cc +++ b/AMDiS/src/parallel/ParallelDomainBase.cc @@ -35,6 +35,7 @@ namespace AMDiS { return (*dof1 < *dof2); } + ParallelDomainBase::ParallelDomainBase(ProblemVec *problemStat, ProblemInstatVec *problemInstat) : iterationIF(problemStat), @@ -126,6 +127,8 @@ namespace AMDiS { else MSG("Skip write part mesh!\n"); } + + ParallelDomainDbg::testAllElements(*this); #endif // === Create interior boundary information. === @@ -450,8 +453,6 @@ namespace AMDiS { if (elCode.getCode() != recvCodes[i].getCode()) { TEST_EXIT_DBG(refineManager)("Refinement manager is not set correctly!\n"); - // MSG("START WITH I = %d\n", i); - bool b = fitElementToMeshCode(recvCodes[i], boundIt->rankObj.el, boundIt->rankObj.ithObj, @@ -496,9 +497,7 @@ namespace AMDiS { elInfo = stack.traverseNext(elInfo); } - // MSG("------- START 0 ----------\n"); - - meshChanged = fitElementToMeshCode2(code, stack, elInfo, ithSide, elType); + meshChanged = fitElementToMeshCode2(code, stack, ithSide, elType); return meshChanged; } @@ -518,8 +517,6 @@ namespace AMDiS { } if (s1 != -1) { - // MSG("------- START 1 ----------\n"); - TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(el->getMesh(), -1, Mesh::CALL_EVERY_EL_PREORDER | Mesh::FILL_NEIGH | Mesh::FILL_BOUND); @@ -528,10 +525,8 @@ namespace AMDiS { elInfo = stack.traverseNext(elInfo); } - meshChanged |= fitElementToMeshCode2(code, stack, elInfo, s1, el->getChildType(elType)); + meshChanged |= fitElementToMeshCode2(code, stack, s1, el->getChildType(elType)); } else { - // MSG("------- START 2 --------- %d \n", el->getIndex()); - TraverseStack stack; ElInfo *elInfo = stack.traverseFirst(el->getMesh(), -1, Mesh::CALL_EVERY_EL_PREORDER | Mesh::FILL_NEIGH | Mesh::FILL_BOUND); @@ -540,18 +535,15 @@ namespace AMDiS { elInfo = stack.traverseNext(elInfo); } - meshChanged |= fitElementToMeshCode2(code, stack, elInfo, s2, el->getChildType(elType)); + meshChanged |= fitElementToMeshCode2(code, stack, s2, el->getChildType(elType)); } - // MSG("-------- ENDE ---------\n"); - return meshChanged; } bool ParallelDomainBase::fitElementToMeshCode2(MeshStructure &code, TraverseStack &stack, - ElInfo *aelInfo, int ithSide, int elType) { @@ -559,8 +551,6 @@ namespace AMDiS { ElInfo *elInfo = stack.getElInfo(); - // MSG("START EL WITH LEVEL = %d\n", elInfo->getLevel()); - bool value = false; if (!elInfo) return value; @@ -573,9 +563,7 @@ namespace AMDiS { do { elInfo = stack.traverseNext(elInfo); } while (elInfo && elInfo->getLevel() > level); - - // MSG("RETURN CODE LEAF: %d\n", elInfo->getLevel()); - + return value; } @@ -583,7 +571,6 @@ namespace AMDiS { return value; if (el->isLeaf()) { - // MSG("REFINE CODE NO LEAF!\n"); el->setMark(1); refineManager->setMesh(el->getMesh()); refineManager->setStack(&stack); @@ -595,34 +582,28 @@ namespace AMDiS { int s2 = el->getSideOfChild(1, ithSide, elType); if (s1 != -1) { - // MSG("STEP LEFT 1\n"); stack.traverseNext(elInfo); code.nextElement(); - value |= fitElementToMeshCode2(code, stack, elInfo, s1, el->getChildType(elType)); + value |= fitElementToMeshCode2(code, stack, s1, el->getChildType(elType)); elInfo = stack.getElInfo(); - // MSG("STEP LEFT 2\n"); } else { do { elInfo = stack.traverseNext(elInfo); } while (elInfo && elInfo->getElement() != el->getSecondChild()); - // MSG("STEP LEFT OMMIT\n"); } TEST_EXIT_DBG(elInfo->getElement() == el->getSecondChild()) ("This should not happen!\n"); if (s2 != -1) { - // MSG("STEP RIGHT 1\n"); code.nextElement(); - value |= fitElementToMeshCode2(code, stack, elInfo, s2, el->getChildType(elType)); - // MSG("STEP RIGHT 2\n"); + value |= fitElementToMeshCode2(code, stack, s2, el->getChildType(elType)); } else { int level = elInfo->getLevel(); do { elInfo = stack.traverseNext(elInfo); } while (elInfo && elInfo->getLevel() > level); - // MSG("STEP RIGHT OMMIT\n"); } return value; @@ -711,29 +692,13 @@ namespace AMDiS { } infile.close(); - } else { - + } else { TraverseStack stack; - ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER); + ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL); while (elInfo) { - Element *element = elInfo->getElement(); - - // get partition data - PartitionElementData *partitionData = dynamic_cast<PartitionElementData*> - (element->getElementData(PARTITION_ED)); - - if (partitionData && partitionData->getPartitionStatus() == IN) { - int elNum = -1; - if (partitionData->getLevel() == 0) - elNum = element->getIndex(); - - TEST_EXIT_DBG(elNum != -1)("invalid element number\n"); - if (element->isLeaf()) { - elemWeights[elNum] += 1.0; - localWeightSum += 1.0; - } - } - + elemWeights[elInfo->getElement()->getIndex()] = 1.0; + localWeightSum++; + elInfo = stack.traverseNext(elInfo); } } @@ -791,7 +756,7 @@ namespace AMDiS { stdMpi.send(myIntBoundary.boundary); stdMpi.recv(otherIntBoundary.boundary); stdMpi.startCommunication<int>(MPI_INT); - + // === The information about all neighbouring boundaries has been received. So === // === the rank tests if its own atomic boundaries are in the same order. If === // === not, the atomic boundaries are swaped to the correct order. === @@ -800,10 +765,11 @@ namespace AMDiS { rankIt != otherIntBoundary.boundary.end(); ++rankIt) { // === We have received from rank "rankIt->first" the ordered list of element === - // === indices. We now have to sort the corresponding list in this rank to === + // === indices. Now, we have to sort the corresponding list in this rank to === // === get the same order. === - + for (int j = 0; j < static_cast<int>(rankIt->second.size()); j++) { + // If the expected object is not at place, search for it. BoundaryObject &recvedBound = stdMpi.getRecvData()[rankIt->first][j].rankObj; @@ -897,7 +863,7 @@ namespace AMDiS { while (elInfo) { Element *element = elInfo->getElement(); PartitionElementData *partitionData = - dynamic_cast<PartitionElementData*>(element->getElementData(PARTITION_ED)); + dynamic_cast<PartitionElementData*>(element->getElementData(PARTITION_ED)); // Check, if the element is within rank's partition. if (partitionData->getPartitionStatus() == IN) { diff --git a/AMDiS/src/parallel/ParallelDomainBase.h b/AMDiS/src/parallel/ParallelDomainBase.h index 11900c3d5344a89cb9bb2e684fd116c972562676..4b9b9b3622f05381ed1022787d53984493e4fa74 100644 --- a/AMDiS/src/parallel/ParallelDomainBase.h +++ b/AMDiS/src/parallel/ParallelDomainBase.h @@ -300,7 +300,6 @@ namespace AMDiS { bool fitElementToMeshCode2(MeshStructure &code, TraverseStack &stack, - ElInfo *elInfo, int ithSide, int elType); diff --git a/AMDiS/src/parallel/ParallelDomainDbg.cc b/AMDiS/src/parallel/ParallelDomainDbg.cc index eaabf5cb089010b7bccd72842ea1df11d4c03545..6d5a8fe3cf5e005f853f6a356847a91274ac429f 100644 --- a/AMDiS/src/parallel/ParallelDomainDbg.cc +++ b/AMDiS/src/parallel/ParallelDomainDbg.cc @@ -1,5 +1,6 @@ #include "ParallelDomainDbg.h" #include "ParallelDomainBase.h" +#include "PartitionElementData.h" #include "ProblemVec.h" #include "DOFVector.h" #include "FixVec.h" @@ -208,6 +209,37 @@ namespace AMDiS { } + void ParallelDomainDbg::testAllElements(ParallelDomainBase &pdb) + { + FUNCNAME("ParallelDomainDbg::testAllElements()"); + + TraverseStack stack; + ElInfo *elInfo = stack.traverseFirst(pdb.mesh, -1, Mesh::CALL_LEAF_EL); + while (elInfo) { + Element *element = elInfo->getElement(); + PartitionElementData *partitionData = + dynamic_cast<PartitionElementData*>(element->getElementData(PARTITION_ED)); + + int sendId = 0; + if (partitionData->getPartitionStatus() == IN) + sendId = 1; + int recvId = 0; + pdb.mpiComm.Allreduce(&sendId, &recvId, 1, MPI_INT, MPI_SUM); + + if (recvId != 1 && pdb.mpiRank == 0) { + if (recvId == 0) + ERROR_EXIT("Element %d has no member partition!\n", element->getIndex()); + + if (recvId > 1) + ERROR_EXIT("Element %d is member of more than pne partition!\n", + element->getIndex()); + } + + elInfo = stack.traverseNext(elInfo); + } + } + + void ParallelDomainDbg::printMapLocalGlobal(ParallelDomainBase &pdb, int rank) { if (rank == -1 || pdb.mpiRank == rank) { diff --git a/AMDiS/src/parallel/ParallelDomainDbg.h b/AMDiS/src/parallel/ParallelDomainDbg.h index 614d816eaae5466ba6a7785ec3732bdc1521afff..4224adbccc42f463201ac6f8b8e3b4467517a78f 100644 --- a/AMDiS/src/parallel/ParallelDomainDbg.h +++ b/AMDiS/src/parallel/ParallelDomainDbg.h @@ -47,6 +47,13 @@ namespace AMDiS { */ static void testCommonDofs(ParallelDomainBase &pdb, bool printCoords = false); + /** \brief + * Tests if all elements in the macro mesh are memeber of exactly one rank. + * + * \param[in] pdb Parallel problem definition used for debugging. + */ + static void testAllElements(ParallelDomainBase &pdb); + /** \brief * This function is used for debugging only. It prints all information from * the local to global dof mapping, see \ref mapLocalGlobalDofs.