diff --git a/AMDiS/src/Element.h b/AMDiS/src/Element.h index d10d704ec3479c95d056084bd775c8ebf80ae7e6..68a437019e58573a67fd47b090065f0307458f68 100644 --- a/AMDiS/src/Element.h +++ b/AMDiS/src/Element.h @@ -123,7 +123,7 @@ namespace AMDiS { /// Returns \ref dof[i][j] which is the j-th DOF of the i-th node of Element. inline DegreeOfFreedom getDof(int i, int j) const { - TEST_EXIT_DBG(dofValid)("Should not happen!\n"); + TEST_EXIT_DBG(dofValid)("DOFs are not valid in element %d!\n", index); return dof[i][j]; } @@ -131,7 +131,7 @@ namespace AMDiS { /// Returns \ref dof[i] which is a pointer to the DOFs of the i-th node. inline const DegreeOfFreedom* getDof(int i) const { - TEST_EXIT_DBG(dofValid)("Should not happen!\n"); + TEST_EXIT_DBG(dofValid)("DOFs are not valid in element %d!\n", index); return dof[i]; } @@ -139,7 +139,7 @@ namespace AMDiS { /// Returns a pointer to the DOFs of this Element inline const DegreeOfFreedom** getDof() const { - TEST_EXIT_DBG(dofValid)("Should not happen!\n"); + TEST_EXIT_DBG(dofValid)("DOFs are not valid in element %d!\n", index); return const_cast<const DegreeOfFreedom**>(dof); } diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc index 4d546bc91739a0de393ee87aa3b3e8bd3ffbc586..7dbf208172d89a670419c5c1038115f6e7f2ec6a 100644 --- a/AMDiS/src/parallel/MeshDistributor.cc +++ b/AMDiS/src/parallel/MeshDistributor.cc @@ -1006,9 +1006,10 @@ namespace AMDiS { #if (DEBUG != 0) ParallelDebug::testDoubleDofs(mesh); - if (repartCounter == 0) { + if (repartCounter == 0) { std::stringstream oss; oss << debugOutputDir << "partitioning-" << repartCounter << ".vtu"; + MSG("Write partitioning to %s\n", oss.str().c_str()); DOFVector<double> tmpa(feSpace, "tmp"); tmpa.set(mpiRank); VtkWriter::writeFile(&tmpa, oss.str()); diff --git a/AMDiS/src/parallel/ParMetisPartitioner.cc b/AMDiS/src/parallel/ParMetisPartitioner.cc index 51df6a8fe7675b4ac9ea31f342a67fbf455c47aa..1234739ea2c8e55a9dec391d8b5c9f74325e9f1d 100644 --- a/AMDiS/src/parallel/ParMetisPartitioner.cc +++ b/AMDiS/src/parallel/ParMetisPartitioner.cc @@ -11,7 +11,8 @@ #include <queue> -#include "ParMetisPartitioner.h" +#include "parallel/ParMetisPartitioner.h" +#include "parallel/MpiHelper.h" #include "Mesh.h" #include "Traverse.h" #include "ElInfo.h" @@ -32,7 +33,6 @@ namespace AMDiS { FUNCNAME("ParMetisMesh::ParMetisMesh()"); int mpiSize = mpiComm->Get_size(); - int nodeCounter = 0; int elementCounter = 0; int dow = Global::getGeo(WORLD); @@ -51,7 +51,7 @@ namespace AMDiS { // allocate memory eptr = new int[nElements + 1]; - eind = new int[nElements * (dim + 1)]; + eind = new int[nElements * (mesh->getGeo(VERTEX))]; elmdist = new int[mpiSize + 1]; elem_p2a = new int[nElements]; @@ -74,10 +74,11 @@ namespace AMDiS { elmdist[i] += elmdist[i - 1]; // traverse mesh and fill distributed ParMETIS data - DimVec<double> bary(dim, DEFAULT_VALUE, 1.0 / (dim + 1)); + DimVec<double> bary(dim, DEFAULT_VALUE, 1.0 / mesh->getGeo(VERTEX)); WorldVector<double> world; elementCounter = 0; + int nodeCounter = 0; elInfo = stack.traverseFirst(mesh, 0, Mesh::CALL_EL_LEVEL | Mesh::FILL_COORDS); while (elInfo) { @@ -91,19 +92,16 @@ namespace AMDiS { setAMDiSIndex(elementCounter, index); // write eptr entry - nodeCounter += dim + 1; + nodeCounter += mesh->getGeo(VERTEX); *ptr_eptr = nodeCounter; ptr_eptr++; // write eind entries (element nodes) for (int i = 0; i < dim + 1; i++) { - if (mapLocalGlobal) { - TEST_EXIT_DBG(mapLocalGlobal->count(element->getDof(i, 0))) - ("Should not happen!\n"); + if (mapLocalGlobal) *ptr_eind = (*mapLocalGlobal)[element->getDof(i, 0)]; - } else { - *ptr_eind = element->getDof(i, 0); - } + else + *ptr_eind = element->getDof(i, 0); ptr_eind++; } @@ -112,7 +110,7 @@ namespace AMDiS { if (ptr_xyz) { elInfo->coordToWorld(bary, world); for (int i = 0; i < dim; i++) { - *ptr_xyz = static_cast<float>(world[i]); + *ptr_xyz = static_cast<float>(world[i]); ptr_xyz++; } } @@ -179,6 +177,50 @@ namespace AMDiS { } + void ParMetisGraph::print() + { + FUNCNAME("ParMetisGraph::print()"); + + std::stringstream oss; + for (int i = 0; i <= MPI::COMM_WORLD.Get_size(); i++) + oss << parMetisMesh->getElementDist()[i] << " "; + + MSG("Element dist = %s\n", oss.str().c_str()); + + int mpiRank = MPI::COMM_WORLD.Get_rank(); + int nElements = parMetisMesh->getElementDist()[mpiRank + 1] - + parMetisMesh->getElementDist()[mpiRank]; + + MSG("nElements = %d in index range %d - %d\n", + nElements, + parMetisMesh->getElementDist()[mpiRank], + parMetisMesh->getElementDist()[mpiRank + 1]); + + oss.str(""); + oss.clear(); + + for (int i = 0; i <= nElements; i++) + oss << xadj[i] << ", "; + + MSG("xadj = {%s}\n", oss.str().c_str()); + + oss.str(""); + oss.clear(); + + for (int i = 0; i <= xadj[nElements] - 1; i++) + oss << adjncy[i] << ", "; + + MSG("adjncy = {%s}\n", oss.str().c_str()); + } + + + ParMetisPartitioner::~ParMetisPartitioner() + { + if (parMetisMesh) + delete parMetisMesh; + } + + void ParMetisPartitioner::createPartitionData() { FUNCNAME("ParMetrisPartitioner::createPartitionData()"); @@ -218,7 +260,9 @@ namespace AMDiS { int mpiSize = mpiComm->Get_size(); - // === create parmetis mesh === + + // === Create parmetis mesh === + if (parMetisMesh) delete parMetisMesh; @@ -228,7 +272,9 @@ namespace AMDiS { int nElements = parMetisMesh->getNumElements(); - // === create weight array === + + // === Create weight array === + std::vector<int> wgts(nElements); std::vector<float> floatWgts(nElements); unsigned int floatWgtsPos = 0; @@ -257,10 +303,14 @@ namespace AMDiS { mpiComm->Allreduce(&maxWgt, &tmp, 1, MPI_FLOAT, MPI_MAX); maxWgt = tmp; - // === create dual graph === + + // === Create dual graph === + ParMetisGraph parMetisGraph(parMetisMesh, mpiComm); - // === partitioning of dual graph === + + // === Partitioning of dual graph === + int wgtflag = 2; // weights at vertices only! int numflag = 0; // c numbering style! int ncon = 1; // one weight at each vertex! @@ -275,11 +325,31 @@ namespace AMDiS { // set tpwgts for (int i = 0; i < mpiSize; i++) tpwgts[i] = 1.0 / nparts; - + float scale = 10000.0 / maxWgt; - // scale wgts - for (int i = 0; i < nElements; i++) + + + // === Scale element weights. === + + double weightSum = 0.0; + + for (int i = 0; i < nElements; i++) { wgts[i] = static_cast<int>(floatWgts[i] * scale); + weightSum += wgts[i]; + } + + mpi::globalAdd(weightSum); + weightSum /= mpiSize; + + for (int i = 0; i < nElements; i++) { + if (wgts[i] > weightSum) { + MSG("ICH HABE EINEN %d > %f\n", wgts[i], weightSum); + wgts[i] = static_cast<int>(weightSum); + } + } + + + // === Start ParMETIS. === MPI_Comm tmpComm = MPI_Comm(*mpiComm); @@ -348,7 +418,9 @@ namespace AMDiS { ERROR_EXIT("unknown partitioning mode\n"); } - // === distribute new partition data === + + // === Distribute new partition data. === + distributePartitioning(&(part[0])); } @@ -357,7 +429,7 @@ namespace AMDiS { { FUNCNAME("ParMetisPartitioner::fillCoarsePartitionVec()"); - TEST_EXIT_DBG(partitionVec)("no partition vector\n"); + TEST_EXIT_DBG(partitionVec)("No partition vector!\n"); partitionVec->clear(); diff --git a/AMDiS/src/parallel/ParMetisPartitioner.h b/AMDiS/src/parallel/ParMetisPartitioner.h index 38d5e7b199c92749c450becfb9d77652fb9557cc..8fe6254115f09c09968e15bebdbd47672686d22b 100644 --- a/AMDiS/src/parallel/ParMetisPartitioner.h +++ b/AMDiS/src/parallel/ParMetisPartitioner.h @@ -158,6 +158,8 @@ namespace AMDiS { return adjncy; } + void print(); + protected: ParMetisMesh *parMetisMesh; @@ -177,6 +179,8 @@ namespace AMDiS { mapLocalGlobal(NULL) {} + ~ParMetisPartitioner(); + void partition(std::map<int, double> &elemWeights, PartitionMode mode = INITIAL, float itr = 1000000.0);