diff --git a/AMDiS/src/DOFAdmin.h b/AMDiS/src/DOFAdmin.h index 56ba2d14e37a9520a28e58e428b18859745b015e..3aa48152f144677ec6a68af75fe4ae03b0e8c970 100644 --- a/AMDiS/src/DOFAdmin.h +++ b/AMDiS/src/DOFAdmin.h @@ -262,7 +262,7 @@ namespace AMDiS { */ int getDOFIndex(); - /// Frees index dof. Used by Mesh::getDof() + /// Frees index DOF. Used by Mesh::getDof() void freeDofIndex(int dof); /// @@ -287,10 +287,10 @@ namespace AMDiS { /// allocated size of managed vectors and matrices int size; - /// number of used dof indices + /// number of used DOF indices int usedCount; - /// number of FREED dof indices (NOT size-sizeUsed) + /// number of FREED DOF indices (NOT size - sizeUsed) int holeCount; /// > max. index of a used entry @@ -302,7 +302,7 @@ namespace AMDiS { */ DimVec<int> nDof; - /// Dofs from previous DOFAdmins + /// DOFs from previous DOFAdmins DimVec<int> nPreDof; /// List of all managed DOFIndexed objects. diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc index e84bcb6cebf8556784ec57e25ebf11d01e13c06b..bc0c712f978bbd51705fc39f5c48e1852d7eb0a1 100644 --- a/AMDiS/src/parallel/MeshDistributor.cc +++ b/AMDiS/src/parallel/MeshDistributor.cc @@ -1685,42 +1685,35 @@ namespace AMDiS { recvDofs.clear(); if (createBoundaryDofFlag.isSet(BOUNDARY_SUBOBJ_SORTED)) { - DofContainer dofs; + for (int geo = FACE; geo >= VERTEX; geo--) + boundaryDofInfo.geoDofs[static_cast<GeoIndex>(geo)].clear(); for (int geo = FACE; geo >= VERTEX; geo--) { - std::set<const DegreeOfFreedom*> &dofSet = - boundaryDofInfo.geoDofs[static_cast<GeoIndex>(geo)]; - dofSet.clear(); - for (InteriorBoundary::iterator it(myIntBoundary); !it.end(); ++it) { if (it->rankObj.subObj == geo) { - dofs.clear(); + DofContainer dofs; it->rankObj.el->getAllDofs(feSpace, it->rankObj, dofs); DofContainer& tmp = sendDofs[it.getRank()]; tmp.insert(tmp.end(), dofs.begin(), dofs.end()); if (createBoundaryDofFlag.isSet(BOUNDARY_FILL_INFO_SEND_DOFS)) - dofSet.insert(dofs.begin(), dofs.end()); + boundaryDofInfo.geoDofs[static_cast<GeoIndex>(geo)].insert(dofs.begin(), dofs.end()); } } } for (int geo = FACE; geo >= VERTEX; geo--) { - std::set<const DegreeOfFreedom*> &dofSet = - boundaryDofInfo.geoDofs[static_cast<GeoIndex>(geo)]; - dofSet.clear(); - for (InteriorBoundary::iterator it(otherIntBoundary); !it.end(); ++it) { if (it->rankObj.subObj == geo) { - dofs.clear(); + DofContainer dofs; it->rankObj.el->getAllDofs(feSpace, it->rankObj, dofs); DofContainer& tmp = recvDofs[it.getRank()]; tmp.insert(tmp.end(), dofs.begin(), dofs.end()); if (createBoundaryDofFlag.isSet(BOUNDARY_FILL_INFO_RECV_DOFS)) - dofSet.insert(dofs.begin(), dofs.end()); + boundaryDofInfo.geoDofs[static_cast<GeoIndex>(geo)].insert(dofs.begin(), dofs.end()); } } } diff --git a/AMDiS/src/parallel/ParallelTypes.h b/AMDiS/src/parallel/ParallelTypes.h index 4fd6aa3f4ea515981c66f2f1b288274f0ab966d2..5f2c69c3b9eb6e45aa4dca57ee6b47422f4378dc 100644 --- a/AMDiS/src/parallel/ParallelTypes.h +++ b/AMDiS/src/parallel/ParallelTypes.h @@ -33,11 +33,17 @@ namespace AMDiS { using namespace std; + /// Defines set of DOF indices. + typedef std::set<DegreeOfFreedom> DofIndexSet; + /// Defines a mapping type from DOFs to rank numbers. typedef map<const DegreeOfFreedom*, int> DofToRank; /// Defines a mapping type from DOFs to a set of rank numbers. typedef map<const DegreeOfFreedom*, std::set<int> > DofToPartitions; + + /// Defines a mapping type from DOF indices to a set of rank numbers. + typedef map<DegreeOfFreedom, std::set<int> > DofIndexToPartitions; /// Defines a mapping type from rank numbers to sets of DOFs. typedef map<int, DofContainer> RankToDofContainer; diff --git a/AMDiS/src/parallel/PetscSolver.h b/AMDiS/src/parallel/PetscSolver.h index 496959d395fc3c6d7f6cca17e95618df4f623c6d..6dc58ee1c84a6dffb5efd398db8de3e8bc348643 100644 --- a/AMDiS/src/parallel/PetscSolver.h +++ b/AMDiS/src/parallel/PetscSolver.h @@ -46,7 +46,8 @@ namespace AMDiS { { public: PetscSolver() - : meshDistributor(NULL) + : meshDistributor(NULL), + mpiRank(-1) {} virtual ~PetscSolver() {} @@ -54,6 +55,7 @@ namespace AMDiS { void setMeshDistributor(MeshDistributor *m) { meshDistributor = m; + mpiRank = meshDistributor->getMpiRank(); } /** \brief @@ -81,6 +83,8 @@ namespace AMDiS { protected: MeshDistributor *meshDistributor; + + int mpiRank; /// Petsc's matrix structure. Mat petscMatrix; diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc index 359ce100b06e7e00f49b3e512582cbf96c558de6..217ec1bb4481af3b9ad3b85d3ee6e9a2c2147890 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.cc +++ b/AMDiS/src/parallel/PetscSolverFeti.cc @@ -20,33 +20,49 @@ namespace AMDiS { #ifdef HAVE_PETSC_DEV - void PetscSolverFeti::updateDofData(int nComponents) + void PetscSolverFeti::updateDofData() { FUNCNAME("PetscSolverFeti::updateDofData()"); + + TEST_EXIT(meshDistributor->getMesh()->getDim() == 2) + ("Works for 2D problems only!"); + + TEST_EXIT(meshDistributor->getFeSpace()->getBasisFcts()->getDegree() == 1) + ("Works for linear basis functions only!\n"); - definePrimals(); + createPrimals(); + + createDuals(); + + createLagrange(); + + createIndexB(); } - void PetscSolverFeti::definePrimals() + void PetscSolverFeti::createPrimals() { - FUNCNAME("PetscSolverFeti::definePrimals()"); + FUNCNAME("PetscSolverFeti::createPrimals()"); - TEST_EXIT_DBG(meshDistributor->getMesh()->getDim() == 2) - ("Works for 2D problems only!"); + primals.clear(); + DofContainerSet& vertices = + meshDistributor->getBoundaryDofInfo().geoDofs[VERTEX]; + TEST_EXIT_DBG(vertices.size())("No primal vertices on this rank!\n"); + for (DofContainerSet::iterator it = vertices.begin(); + it != vertices.end(); ++it) + primals.insert(**it); - primals = meshDistributor->getBoundaryDofInfo().geoDofs[VERTEX]; globalPrimalIndex.clear(); - - int nRankPrimals = 0; - for (DofContainerSet::iterator it = primals.begin(); - it != primals.end(); ++it) - if (meshDistributor->getIsRankDof(**it)) { - globalPrimalIndex[**it] = nRankPrimals; + nRankPrimals = 0; + for (DofIndexSet::iterator it = primals.begin(); it != primals.end(); ++it) + if (meshDistributor->getIsRankDof(*it)) { + globalPrimalIndex[*it] = nRankPrimals; nRankPrimals++; } - int nOverallPrimals = 0, rStartPrimals = 0; + + nOverallPrimals = 0; + int rStartPrimals = 0; mpi::getDofNumbering(meshDistributor->getMpiComm(), nRankPrimals, rStartPrimals, nOverallPrimals); @@ -56,18 +72,480 @@ namespace AMDiS { MSG_DBG("nRankPrimals = %d nOverallPrimals = %d\n", nRankPrimals, nOverallPrimals); + + StdMpi<vector<int> > stdMpi(meshDistributor->getMpiComm()); + RankToDofContainer& sendDofs = meshDistributor->getSendDofs(); + for (RankToDofContainer::iterator it = sendDofs.begin(); + it != sendDofs.end(); ++it) + for (DofContainer::iterator dofIt = it->second.begin(); + dofIt != it->second.end(); ++dofIt) + if (globalPrimalIndex.count(**dofIt)) + stdMpi.getSendData(it->first).push_back(globalPrimalIndex[**dofIt]); + stdMpi.updateSendDataSize(); + + RankToDofContainer& recvDofs = meshDistributor->getRecvDofs(); + for (RankToDofContainer::iterator it = recvDofs.begin(); + it != recvDofs.end(); ++it) { + bool recvFromRank = false; + for (DofContainer::iterator dofIt = it->second.begin(); + dofIt != it->second.end(); ++dofIt) + if (primals.count(**dofIt) && + meshDistributor->getIsRankDof(**dofIt) == false) { + recvFromRank = true; + break; + } + + if (recvFromRank) + stdMpi.recv(it->first); + } + stdMpi.startCommunication(); + + for (RankToDofContainer::iterator it = recvDofs.begin(); + it != recvDofs.end(); ++it) { + int i = 0; + for (DofContainer::iterator dofIt = it->second.begin(); + dofIt != it->second.end(); ++dofIt) { + if (primals.count(**dofIt) && + meshDistributor->getIsRankDof(**dofIt) == false) + globalPrimalIndex[**dofIt] = stdMpi.getRecvData(it->first)[i++]; + } + } + + TEST_EXIT_DBG(primals.size() == globalPrimalIndex.size()) + ("Number of primals %d, but number of global primals on this rank is %d!\n", + primals.size(), globalPrimalIndex.size()); + + + TEST_EXIT_DBG(nOverallPrimals > 0) + ("There are zero primal nodes in domain!\n"); + } + + + void PetscSolverFeti::createDuals() + { + FUNCNAME("PetscSolverFeti::createDuals()"); + + // === Create for each dual node the set of ranks that contain this === + // === node (denoted by W(x_j)). === + + boundaryDofRanks.clear(); + + RankToDofContainer& sendDofs = meshDistributor->getSendDofs(); + for (RankToDofContainer::iterator it = sendDofs.begin(); + it != sendDofs.end(); ++it) { + for (DofContainer::iterator dofIt = it->second.begin(); + dofIt != it->second.end(); ++dofIt) { + // If DOF is not primal, i.e., its a dual node + if (primals.count(**dofIt) == 0) { + boundaryDofRanks[**dofIt].insert(mpiRank); + boundaryDofRanks[**dofIt].insert(it->first); + } + } + } + + StdMpi<vector<std::set<int> > > stdMpi(meshDistributor->getMpiComm()); + for (RankToDofContainer::iterator it = sendDofs.begin(); + it != sendDofs.end(); ++it) + for (DofContainer::iterator dofIt = it->second.begin(); + dofIt != it->second.end(); ++dofIt) + if (primals.count(**dofIt) == 0) + stdMpi.getSendData(it->first).push_back(boundaryDofRanks[**dofIt]); + + stdMpi.updateSendDataSize(); + + RankToDofContainer& recvDofs = meshDistributor->getRecvDofs(); + for (RankToDofContainer::iterator it = recvDofs.begin(); + it != recvDofs.end(); ++it) { + bool recvFromRank = false; + for (DofContainer::iterator dofIt = it->second.begin(); + dofIt != it->second.end(); ++dofIt) + if (primals.count(**dofIt) == 0) { + recvFromRank = true; + break; + } + + if (recvFromRank) + stdMpi.recv(it->first); + } + stdMpi.startCommunication(); + + for (RankToDofContainer::iterator it = recvDofs.begin(); + it != recvDofs.end(); ++it) { + int i = 0; + for (DofContainer::iterator dofIt = it->second.begin(); + dofIt != it->second.end(); ++dofIt) + if (primals.count(**dofIt) == 0) + boundaryDofRanks[**dofIt] = stdMpi.getRecvData(it->first)[i++]; + } + + + // === Create global index of the dual nodes on each rank. === + + duals.clear(); + globalDualIndex.clear(); + + int nRankAllDofs = meshDistributor->getFeSpace()->getAdmin()->getUsedDofs(); + nRankB = nRankAllDofs - primals.size(); + nOverallB = 0; + rStartB = 0; + mpi::getDofNumbering(meshDistributor->getMpiComm(), + nRankB, rStartB, nOverallB); + DofContainer allBoundaryDofs; + meshDistributor->getAllBoundaryDofs(allBoundaryDofs); + int nRankInteriorDofs = nRankAllDofs - allBoundaryDofs.size(); + + int nRankDuals = 0; + for (DofContainer::iterator it = allBoundaryDofs.begin(); + it != allBoundaryDofs.end(); ++it) { + if (primals.count(**it) == 0) { + duals.insert(**it); + globalDualIndex[**it] = rStartB + nRankInteriorDofs + nRankDuals; + nRankDuals++; + } + } + + int nOverallDuals = nRankDuals; + mpi::globalAdd(nOverallDuals); + + MSG_DBG("nRankDuals = %d nOverallDuals = %d\n", + nRankDuals, nOverallDuals); + } + + + void PetscSolverFeti::createLagrange() + { + FUNCNAME("PetscSolverFeti::createLagrange()"); + + nRankLagrange = 0; + for (DofIndexSet::iterator it = duals.begin(); it != duals.end(); ++it) { + if (meshDistributor->getIsRankDof(*it)) { + dofFirstLagrange[*it] = nRankLagrange; + int degree = boundaryDofRanks[*it].size(); + nRankLagrange += (degree * (degree - 1)) / 2; + } + } + + nOverallLagrange = 0; + int rStartLagrange = 0; + mpi::getDofNumbering(meshDistributor->getMpiComm(), + nRankLagrange, rStartLagrange, nOverallLagrange); + + for (DofIndexSet::iterator it = duals.begin(); it != duals.end(); ++it) + if (meshDistributor->getIsRankDof(*it)) + dofFirstLagrange[*it] += rStartLagrange; + + MSG_DBG("nRankLagrange = %d nOverallLagrange = %d\n", + nRankLagrange, nOverallLagrange); + + + // === === + + StdMpi<vector<int> > stdMpi(meshDistributor->getMpiComm()); + RankToDofContainer& sendDofs = meshDistributor->getSendDofs(); + for (RankToDofContainer::iterator it = sendDofs.begin(); + it != sendDofs.end(); ++it) + for (DofContainer::iterator dofIt = it->second.begin(); + dofIt != it->second.end(); ++dofIt) { + if (primals.count(**dofIt) == 0) { + TEST_EXIT_DBG(dofFirstLagrange.count(**dofIt))("Should not happen!\n"); + stdMpi.getSendData(it->first).push_back(dofFirstLagrange[**dofIt]); + } + } + stdMpi.updateSendDataSize(); + + RankToDofContainer& recvDofs = meshDistributor->getRecvDofs(); + for (RankToDofContainer::iterator it = recvDofs.begin(); + it != recvDofs.end(); ++it) { + bool recvData = false; + for (DofContainer::iterator dofIt = it->second.begin(); + dofIt != it->second.end(); ++dofIt) + if (primals.count(**dofIt) == 0) { + recvData = true; + break; + } + + if (recvData) + stdMpi.recv(it->first); + } + + stdMpi.startCommunication(); + + for (RankToDofContainer::iterator it = recvDofs.begin(); + it != recvDofs.end(); ++it) { + int counter = 0; + for (unsigned int i = 0; i < it->second.size(); i++) + if (primals.count(*(it->second[i])) == 0) + dofFirstLagrange[*(it->second[i])] = stdMpi.getRecvData(it->first)[counter++]; + } + + } + + + void PetscSolverFeti::createIndexB() + { + FUNCNAME("PetscSolverFeti::createIndeB()"); + + globalIndexB.clear(); + DOFAdmin* admin = meshDistributor->getFeSpace()->getAdmin(); + + for (int i = 0; i < admin->getUsedSize(); i++) + if (admin->isDofFree(i) == false && primals.count(i) == 0) + if (duals.count(i) == 0 && primals.count(i) == 0) + globalIndexB[i] = -1; + + int nInterior = 0; + for (DofMapping::iterator it = globalIndexB.begin(); + it != globalIndexB.end(); ++it) { + it->second = nInterior + rStartB; + nInterior++; + } + + TEST_EXIT_DBG(nInterior + primals.size() + duals.size() == + static_cast<unsigned int>(admin->getUsedDofs())) + ("Should not happen!\n"); + + for (DofIndexSet::iterator it = duals.begin(); + it != duals.end(); ++it) + globalIndexB[*it] = globalDualIndex[*it]; } - void PetscSolverFeti::fillPetscMatrix(Matrix<DOFMatrix*> *mat, SystemVector *vec) + void PetscSolverFeti::createMatLagrange(int nComponents) + { + FUNCNAME("PetscSolverFeti::createMatLagrange()"); + + MatCreateMPIAIJ(PETSC_COMM_WORLD, + nRankLagrange * nComponents, nRankB * nComponents, + nOverallLagrange * nComponents, nOverallB * nComponents, + 2, PETSC_NULL, 2, PETSC_NULL, + &mat_lagrange); + + for (DofIndexSet::iterator it = duals.begin(); it != duals.end(); ++it) { + TEST_EXIT_DBG(dofFirstLagrange.count(*it))("Should not happen!\n"); + TEST_EXIT_DBG(boundaryDofRanks.count(*it))("Should not happen!\n"); + + int index = dofFirstLagrange[*it]; + vector<int> W(boundaryDofRanks[*it].begin(), boundaryDofRanks[*it].end()); + int degree = W.size(); + + TEST_EXIT_DBG(globalDualIndex.count(*it))("Should not happen!\n"); + int dualCol = globalDualIndex[*it]; + + for (int i = 0; i < degree; i++) { + for (int j = i + 1; j < degree; j++) { + if (W[i] == mpiRank || W[j] == mpiRank) { + for (int k = 0; k < nComponents; k++) { + int rowIndex = index * nComponents + k; + int colIndex = dualCol * nComponents + k; + double value = (W[i] == mpiRank ? 1.0 : -1.0); + MatSetValue(mat_lagrange, rowIndex, colIndex, value, + INSERT_VALUES); + } + } + + index++; + } + } + } + + MatAssemblyBegin(mat_lagrange, MAT_FINAL_ASSEMBLY); + MatAssemblyEnd(mat_lagrange, MAT_FINAL_ASSEMBLY); + } + + + void PetscSolverFeti::fillPetscMatrix(Matrix<DOFMatrix*> *mat, + SystemVector *vec) { FUNCNAME("PetscSolverFeti::fillPetscMatrix()"); + + updateDofData(); + + int nComponents = vec->getSize(); + + MatCreateMPIAIJ(PETSC_COMM_WORLD, + nRankB * nComponents, nRankB * nComponents, + nOverallB * nComponents, nOverallB * nComponents, + 100, PETSC_NULL, 100, PETSC_NULL, + &mat_b_b); + + MatCreateMPIAIJ(PETSC_COMM_WORLD, + nRankPrimals * nComponents, nRankPrimals * nComponents, + nOverallPrimals * nComponents, nOverallPrimals * nComponents, + 10, PETSC_NULL, 10, PETSC_NULL, + &mat_primal_primal); + + MatCreateMPIAIJ(PETSC_COMM_WORLD, + nRankB * nComponents, nRankPrimals * nComponents, + nOverallB * nComponents, nOverallPrimals * nComponents, + 100, PETSC_NULL, 100, PETSC_NULL, + &mat_b_primal); + + MatCreateMPIAIJ(PETSC_COMM_WORLD, + nRankPrimals * nComponents, nRankB * nComponents, + nOverallPrimals * nComponents, nOverallB * nComponents, + 100, PETSC_NULL, 100, PETSC_NULL, + &mat_primal_b); + + using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end; + namespace traits = mtl::traits; + typedef DOFMatrix::base_matrix_type Matrix; + + typedef traits::range_generator<row, Matrix>::type cursor_type; + typedef traits::range_generator<nz, cursor_type>::type icursor_type; + + vector<int> cols, colsOther; + vector<double> values, valuesOther; + cols.reserve(300); + colsOther.reserve(300); + values.reserve(300); + valuesOther.reserve(300); + + for (int i = 0; i < nComponents; i++) + for (int j = 0; j < nComponents; j++) + if ((*mat)[i][j]) { + traits::col<Matrix>::type col((*mat)[i][j]->getBaseMatrix()); + traits::const_value<Matrix>::type value((*mat)[i][j]->getBaseMatrix()); + + for (cursor_type cursor = begin<row>((*mat)[i][j]->getBaseMatrix()), + cend = end<row>((*mat)[i][j]->getBaseMatrix()); cursor != cend; ++cursor) { + bool rowPrimal = primals.count(*cursor) != 0; + + cols.clear(); + values.clear(); + + colsOther.clear(); + valuesOther.clear(); + + for (icursor_type icursor = begin<nz>(cursor), icend = end<nz>(cursor); + icursor != icend; ++icursor) { + if (primals.count(col(*icursor)) != 0) { + TEST_EXIT_DBG(globalPrimalIndex.count(col(*icursor))) + ("No global primal index for DOF %d!\n", col(*icursor)); + + int colIndex = globalPrimalIndex[col(*icursor)] * nComponents + j; + + if (rowPrimal) { + cols.push_back(colIndex); + values.push_back(value(*icursor)); + } else { + colsOther.push_back(colIndex); + valuesOther.push_back(value(*icursor)); + } + } else { + TEST_EXIT_DBG(globalIndexB.count(col(*icursor))) + ("No global B index for DOF %d!\n", col(*icursor)); + + int colIndex = globalIndexB[col(*icursor)] * nComponents + j; + + if (rowPrimal) { + colsOther.push_back(colIndex); + valuesOther.push_back(value(*icursor)); + } else { + cols.push_back(colIndex); + values.push_back(value(*icursor)); + } + } + } + + if (rowPrimal) { + TEST_EXIT_DBG(globalPrimalIndex.count(*cursor)) + ("Should not happen!\n"); + + int rowIndex = globalPrimalIndex[*cursor] * nComponents + i; + MatSetValues(mat_primal_primal, 1, &rowIndex, cols.size(), + &(cols[0]), &(values[0]), ADD_VALUES); + + if (colsOther.size()) + MatSetValues(mat_primal_b, 1, &rowIndex, colsOther.size(), + &(colsOther[0]), &(valuesOther[0]), ADD_VALUES); + } else { + TEST_EXIT_DBG(globalIndexB.count(*cursor)) + ("Should not happen!\n"); + + int rowIndex = globalIndexB[*cursor] * nComponents + i; + MatSetValues(mat_b_b, 1, &rowIndex, cols.size(), + &(cols[0]), &(values[0]), ADD_VALUES); + + if (colsOther.size()) + MatSetValues(mat_b_primal, 1, &rowIndex, colsOther.size(), + &(colsOther[0]), &(valuesOther[0]), ADD_VALUES); + } + } + } + + + MatAssemblyBegin(mat_b_b, MAT_FINAL_ASSEMBLY); + MatAssemblyEnd(mat_b_b, MAT_FINAL_ASSEMBLY); + + MatAssemblyBegin(mat_primal_primal, MAT_FINAL_ASSEMBLY); + MatAssemblyEnd(mat_primal_primal, MAT_FINAL_ASSEMBLY); + + MatAssemblyBegin(mat_b_primal, MAT_FINAL_ASSEMBLY); + MatAssemblyEnd(mat_b_primal, MAT_FINAL_ASSEMBLY); + + MatAssemblyBegin(mat_primal_b, MAT_FINAL_ASSEMBLY); + MatAssemblyEnd(mat_primal_b, MAT_FINAL_ASSEMBLY); + + + // === === + + VecCreate(PETSC_COMM_WORLD, &vec_b); + VecSetSizes(vec_b, nRankB * nComponents, nOverallB * nComponents); + VecSetType(vec_b, VECMPI); + + VecCreate(PETSC_COMM_WORLD, &vec_primal); + VecSetSizes(vec_primal, nRankPrimals * nComponents, + nOverallPrimals * nComponents); + VecSetType(vec_primal, VECMPI); + + for (int i = 0; i < nComponents; i++) { + DOFVector<double>::Iterator dofIt(vec->getDOFVector(i), USED_DOFS); + for (dofIt.reset(); !dofIt.end(); ++dofIt) { + int index = dofIt.getDOFIndex(); + if (primals.count(index)) { + TEST_EXIT_DBG(globalPrimalIndex.count(index)) + ("Should not happen!\n"); + + index = globalPrimalIndex[index] * nComponents + i; + double value = *dofIt; + VecSetValues(vec_primal, 1, &index, &value, ADD_VALUES); + } else { + TEST_EXIT_DBG(globalIndexB.count(index)) + ("Should not happen!\n"); + + index = globalIndexB[index] * nComponents + i; + double value = *dofIt; + VecSetValues(vec_b, 1, &index, &value, ADD_VALUES); + } + } + } + + VecAssemblyBegin(vec_b); + VecAssemblyEnd(vec_b); + + VecAssemblyBegin(vec_primal); + VecAssemblyEnd(vec_primal); + + + createMatLagrange(nComponents); + + MSG("FINISHED!\n"); + exit(0); } void PetscSolverFeti::solvePetscMatrix(SystemVector &vec, AdaptInfo *adaptInfo) { FUNCNAME("PetscSolverFeti::solvePetscMatrix()"); + + MatDestroy(mat_b_b); + MatDestroy(mat_primal_primal); + MatDestroy(mat_b_primal); + MatDestroy(mat_primal_b); + MatDestroy(mat_lagrange); + + VecDestroy(vec_b); + VecDestroy(vec_primal); } #endif diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h index 07479356b3c1490cbf15a3c42f710944201f611d..c22bf2d4b516212e83343b273b134ff0a3dfc50b 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.h +++ b/AMDiS/src/parallel/PetscSolverFeti.h @@ -51,14 +51,58 @@ namespace AMDiS { MeshDistributor::BOUNDARY_FILL_INFO_RECV_DOFS; } protected: - void updateDofData(int nComponents); + void updateDofData(); - void definePrimals(); + void createPrimals(); + + void createDuals(); + + void createLagrange(); + + void createIndexB(); + + void createMatLagrange(int nComponents); protected: - DofContainerSet primals; + DofIndexSet primals; DofMapping globalPrimalIndex; + + int nRankPrimals; + + int nOverallPrimals; + + DofIndexSet duals; + + DofMapping globalDualIndex; + + DofIndexToPartitions boundaryDofRanks; + + DofMapping dofFirstLagrange; + + int nRankLagrange; + + int nOverallLagrange; + + DofMapping globalIndexB; + + int nRankB; + + int nOverallB; + + int rStartB; + + Mat mat_b_b; + + Mat mat_primal_primal; + + Mat mat_b_primal, mat_primal_b; + + Mat mat_lagrange; + + Vec vec_b; + + Vec vec_primal; }; #endif diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc index 602ef981e4649fb01a6a6241ad36f1314e8e356c..cc79543d22aeebaf8926c2c5bfdfbe6863cc6dc4 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc +++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc @@ -195,7 +195,7 @@ namespace AMDiS { TEST_EXIT(mat)("No DOFMatrix!\n"); using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end; - namespace traits= mtl::traits; + namespace traits = mtl::traits; typedef DOFMatrix::base_matrix_type Matrix; traits::col<Matrix>::type col(mat->getBaseMatrix()); @@ -408,8 +408,11 @@ namespace AMDiS { } - void PetscSolverGlobalMatrix::setDofVector(Vec& petscVec, DOFVector<double>* vec, - int dispMult, int dispAdd, bool rankOnly) + void PetscSolverGlobalMatrix::setDofVector(Vec& petscVec, + DOFVector<double>* vec, + int dispMult, + int dispAdd, + bool rankOnly) { FUNCNAME("PetscSolverGlobalMatrix::setDofVector()"); diff --git a/AMDiS/src/parallel/StdMpi.cc b/AMDiS/src/parallel/StdMpi.cc index 2cb17e027a48e4c4addffb36e3c9bcbefd5b18a0..4bde99ca70dd3ab5537c72e0784a6125c40c9433 100644 --- a/AMDiS/src/parallel/StdMpi.cc +++ b/AMDiS/src/parallel/StdMpi.cc @@ -15,15 +15,17 @@ namespace AMDiS { MPI_Datatype StdMpiHelper<int>::mpiDataType = MPI_INT; - MPI_Datatype StdMpiHelper<std::vector<int> >::mpiDataType = MPI_INT; - MPI_Datatype StdMpiHelper<std::vector<double> >::mpiDataType = MPI_DOUBLE; - MPI_Datatype StdMpiHelper<std::vector<std::vector<double> > >::mpiDataType = MPI_DOUBLE; - MPI_Datatype StdMpiHelper<std::vector<MeshStructure> >::mpiDataType = MPI_UNSIGNED_LONG; - MPI_Datatype StdMpiHelper<std::vector<AtomicBoundary> >::mpiDataType = MPI_INT; - MPI_Datatype StdMpiHelper<std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> > >::mpiDataType = MPI_INT; - MPI_Datatype StdMpiHelper<std::vector<std::pair<int, int> > >::mpiDataType = MPI_INT; - MPI_Datatype StdMpiHelper<std::vector<WorldVector<double> > >::mpiDataType = MPI_DOUBLE; - MPI_Datatype StdMpiHelper<std::map<WorldVector<double>, int> >::mpiDataType = MPI_DOUBLE; + MPI_Datatype StdMpiHelper<vector<int> >::mpiDataType = MPI_INT; + MPI_Datatype StdMpiHelper<std::set<int> >::mpiDataType = MPI_INT; + MPI_Datatype StdMpiHelper<vector<std::set<int> > >::mpiDataType = MPI_INT; + MPI_Datatype StdMpiHelper<vector<double> >::mpiDataType = MPI_DOUBLE; + MPI_Datatype StdMpiHelper<vector<vector<double> > >::mpiDataType = MPI_DOUBLE; + MPI_Datatype StdMpiHelper<vector<MeshStructure> >::mpiDataType = MPI_UNSIGNED_LONG; + MPI_Datatype StdMpiHelper<vector<AtomicBoundary> >::mpiDataType = MPI_INT; + MPI_Datatype StdMpiHelper<map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> > >::mpiDataType = MPI_INT; + MPI_Datatype StdMpiHelper<vector<std::pair<int, int> > >::mpiDataType = MPI_INT; + MPI_Datatype StdMpiHelper<vector<WorldVector<double> > >::mpiDataType = MPI_DOUBLE; + MPI_Datatype StdMpiHelper<map<WorldVector<double>, int> >::mpiDataType = MPI_DOUBLE; // T = int @@ -45,25 +47,25 @@ namespace AMDiS { - // T = std::vector<int> + // T = vector<int> - int StdMpiHelper<std::vector<int> >::getBufferSize(std::vector<int> &data) + int StdMpiHelper<vector<int> >::getBufferSize(vector<int> &data) { return data.size(); } - int StdMpiHelper<std::vector<int> >::getBufferSize(std::vector<const int*> &data) + int StdMpiHelper<vector<int> >::getBufferSize(vector<const int*> &data) { return data.size(); } - void StdMpiHelper<std::vector<int> >::createBuffer(std::vector<int> &data, int *buf) + void StdMpiHelper<vector<int> >::createBuffer(vector<int> &data, int *buf) { for (unsigned int i = 0; i < data.size(); i++) buf[i] = data[i]; } - void StdMpiHelper<std::vector<int> >::makeFromBuffer(std::vector<int> &data, int *buf, int bufSize) + void StdMpiHelper<vector<int> >::makeFromBuffer(vector<int> &data, int *buf, int bufSize) { data.resize(bufSize); @@ -73,20 +75,86 @@ namespace AMDiS { - // T = std::vector<double> + // T = std::set<int> - int StdMpiHelper<std::vector<double> >::getBufferSize(std::vector<double> &data) + int StdMpiHelper<std::set<int> >::getBufferSize(std::set<int> &data) { return data.size(); } - void StdMpiHelper<std::vector<double> >::createBuffer(std::vector<double> &data, double *buf) + void StdMpiHelper<std::set<int> >::createBuffer(std::set<int> &data, int *buf) + { + int i = 0; + for (std::set<int>::iterator it = data.begin(); it != data.end(); ++it) + buf[i++] = *it; + } + + void StdMpiHelper<std::set<int> >::makeFromBuffer(std::set<int> &data, int *buf, int bufSize) + { + data.clear(); + + for (int i = 0; i < bufSize; i++) + data.insert(buf[i]); + } + + + // T = vector<std::set<int> > + + int StdMpiHelper<vector<std::set<int> > >::getBufferSize(vector<std::set<int> > &data) + { + int dataSize = 1; + for (unsigned int i = 0; i < data.size(); i++) + dataSize += data[i].size() + 1; + + return dataSize; + } + + void StdMpiHelper<vector<std::set<int> > >::createBuffer(vector<std::set<int> > &data, int *buf) + { + int counter = 1; + buf[0] = data.size(); + + for (unsigned int i = 0; i < data.size(); i++) { + buf[counter++] = data[i].size(); + for (std::set<int>::iterator it = data[i].begin(); it != data[i].end(); ++it) + buf[counter++] = *it; + } + } + + void StdMpiHelper<vector<std::set<int> > >::makeFromBuffer(vector<std::set<int> > &data, int *buf, int bufSize) + { + FUNCNAME("StdMpiHelper<vector<set<int>>>::makeFromBuffer()"); + + int counter = 1; + data.resize(buf[0]); + + for (int i = 0; i < buf[0]; i++) { + data[i].clear(); + int setSize = buf[counter++]; + for (int j = 0; j < setSize; j++) + data[i].insert(buf[counter++]); + } + + TEST_EXIT(counter == bufSize)("Counter is %d, but buffer size is %d!\n", + counter, bufSize); + } + + + + // T = vector<double> + + int StdMpiHelper<vector<double> >::getBufferSize(vector<double> &data) + { + return data.size(); + } + + void StdMpiHelper<vector<double> >::createBuffer(vector<double> &data, double *buf) { for (unsigned int i = 0; i < data.size(); i++) buf[i] = data[i]; } - void StdMpiHelper<std::vector<double> >::makeFromBuffer(std::vector<double> &data, double *buf, int bufSize) + void StdMpiHelper<vector<double> >::makeFromBuffer(vector<double> &data, double *buf, int bufSize) { data.resize(bufSize); @@ -96,9 +164,9 @@ namespace AMDiS { - // T = std::vector<std::vector<double> > + // T = vector<vector<double> > - int StdMpiHelper<std::vector<std::vector<double> > >::getBufferSize(std::vector<std::vector<double> > &data) + int StdMpiHelper<vector<vector<double> > >::getBufferSize(vector<vector<double> > &data) { int size = 1; @@ -108,7 +176,7 @@ namespace AMDiS { return size; } - void StdMpiHelper<std::vector<std::vector<double> > >::createBuffer(std::vector<std::vector<double> > &data, double *buf) + void StdMpiHelper<vector<vector<double> > >::createBuffer(vector<vector<double> > &data, double *buf) { buf[0] = data.size(); int counter = 1; @@ -120,7 +188,7 @@ namespace AMDiS { } } - void StdMpiHelper<std::vector<std::vector<double> > >::makeFromBuffer(std::vector<std::vector<double> > &data, double *buf, int bufSize) + void StdMpiHelper<vector<vector<double> > >::makeFromBuffer(vector<vector<double> > &data, double *buf, int bufSize) { data.resize(static_cast<unsigned int>(buf[0])); int counter = 1; @@ -135,9 +203,9 @@ namespace AMDiS { - // T = std::vector<MeshStructure> + // T = vector<MeshStructure> - int StdMpiHelper<std::vector<MeshStructure> >::getBufferSize(std::vector<MeshStructure> &data) + int StdMpiHelper<vector<MeshStructure> >::getBufferSize(vector<MeshStructure> &data) { int size = 0; for (unsigned int i = 0; i < data.size(); i++) @@ -146,7 +214,7 @@ namespace AMDiS { return size; } - void StdMpiHelper<std::vector<MeshStructure> >::createBuffer(std::vector<MeshStructure> &data, uint64_t *buf) + void StdMpiHelper<vector<MeshStructure> >::createBuffer(vector<MeshStructure> &data, uint64_t *buf) { int pos = 0; for (unsigned int i = 0; i < data.size(); i++) { @@ -158,7 +226,7 @@ namespace AMDiS { } } - void StdMpiHelper<std::vector<MeshStructure> >::makeFromBuffer(std::vector<MeshStructure> &data, + void StdMpiHelper<vector<MeshStructure> >::makeFromBuffer(vector<MeshStructure> &data, uint64_t *buf, int bufSize) { int pos = 0; @@ -166,7 +234,7 @@ namespace AMDiS { while (pos < bufSize) { int codeSize = buf[pos++]; int nElements = buf[pos++]; - std::vector<uint64_t> code; + vector<uint64_t> code; code.resize(codeSize); for (int i = 0; i < codeSize; i++) code[i] = buf[pos++]; @@ -180,14 +248,14 @@ namespace AMDiS { - // T = std::vector<AtomicBoundary> + // T = vector<AtomicBoundary> - int StdMpiHelper<std::vector<AtomicBoundary> >::getBufferSize(std::vector<AtomicBoundary> &data) + int StdMpiHelper<vector<AtomicBoundary> >::getBufferSize(vector<AtomicBoundary> &data) { return data.size() * 6; } - void StdMpiHelper<std::vector<AtomicBoundary> >::createBuffer(std::vector<AtomicBoundary> &data, int *buf) + void StdMpiHelper<vector<AtomicBoundary> >::createBuffer(vector<AtomicBoundary> &data, int *buf) { for (unsigned int i = 0; i < data.size(); i++) { buf[i * 6] = data[i].rankObj.elIndex; @@ -199,7 +267,7 @@ namespace AMDiS { } } - void StdMpiHelper<std::vector<AtomicBoundary> >::makeFromBuffer(std::vector<AtomicBoundary> &data, int *buf, int bufSize) + void StdMpiHelper<vector<AtomicBoundary> >::makeFromBuffer(vector<AtomicBoundary> &data, int *buf, int bufSize) { if (bufSize == 0) return; @@ -219,13 +287,13 @@ namespace AMDiS { - // T = std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> > + // T = map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> > - int StdMpiHelper<std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> > >::getBufferSize(std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> > &data) + int StdMpiHelper<map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> > >::getBufferSize(map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> > &data) { int size = 1; - for (std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> >::iterator it = data.begin(); + for (map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> >::iterator it = data.begin(); it != data.end(); ++it) { size += 2 + it->second.size() * 2; } @@ -233,17 +301,17 @@ namespace AMDiS { return size; } - void StdMpiHelper<std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> > >::createBuffer(std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> > &data, int *buf) + void StdMpiHelper<map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> > >::createBuffer(map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> > &data, int *buf) { buf[0] = data.size(); int counter = 1; - for (std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> >::iterator it = data.begin(); + for (map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> >::iterator it = data.begin(); it != data.end(); ++it) { buf[counter++] = it->first; buf[counter++] = it->second.size(); - for (std::map<DegreeOfFreedom, DegreeOfFreedom>::iterator it2 = it->second.begin(); + for (map<DegreeOfFreedom, DegreeOfFreedom>::iterator it2 = it->second.begin(); it2 != it->second.end(); ++it2) { buf[counter++] = it2->first; buf[counter++] = it2->second; @@ -251,7 +319,7 @@ namespace AMDiS { } } - void StdMpiHelper<std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> > >::makeFromBuffer(std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> > &data, int *buf, int bufSize) + void StdMpiHelper<map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> > >::makeFromBuffer(map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> > &data, int *buf, int bufSize) { data.clear(); @@ -262,7 +330,7 @@ namespace AMDiS { for (int i = 0; i < buf[0]; i++) { BoundaryType bound = buf[counter++]; - std::map<DegreeOfFreedom, DegreeOfFreedom> dofs; + map<DegreeOfFreedom, DegreeOfFreedom> dofs; int nDofs = buf[counter++]; for (int j = 0; j < nDofs; j++) { @@ -280,14 +348,14 @@ namespace AMDiS { - // T = std::vector<std::pair<int, int> > + // T = vector<std::pair<int, int> > - int StdMpiHelper<std::vector<std::pair<int, int> > >::getBufferSize(std::vector<std::pair<int, int> > &data) + int StdMpiHelper<vector<std::pair<int, int> > >::getBufferSize(vector<std::pair<int, int> > &data) { return data.size() * 2; } - void StdMpiHelper<std::vector<std::pair<int, int> > >::createBuffer(std::vector<std::pair<int, int> > &data, int *buf) + void StdMpiHelper<vector<std::pair<int, int> > >::createBuffer(vector<std::pair<int, int> > &data, int *buf) { for (unsigned int i = 0; i < data.size(); i++) { buf[i * 2] = data[i].first; @@ -295,7 +363,7 @@ namespace AMDiS { } } - void StdMpiHelper<std::vector<std::pair<int, int> > >::makeFromBuffer(std::vector<std::pair<int, int> > &data, + void StdMpiHelper<vector<std::pair<int, int> > >::makeFromBuffer(vector<std::pair<int, int> > &data, int *buf, int bufSize) { if (bufSize == 0) @@ -312,14 +380,14 @@ namespace AMDiS { - // T = std::vector<WorldVector<double> > + // T = vector<WorldVector<double> > - int StdMpiHelper<std::vector<WorldVector<double> > >::getBufferSize(std::vector<WorldVector<double> > &data) + int StdMpiHelper<vector<WorldVector<double> > >::getBufferSize(vector<WorldVector<double> > &data) { return data.size() * Global::getGeo(WORLD); } - void StdMpiHelper<std::vector<WorldVector<double> > >::createBuffer(std::vector<WorldVector<double> > &data, double *buf) + void StdMpiHelper<vector<WorldVector<double> > >::createBuffer(vector<WorldVector<double> > &data, double *buf) { int dimOfWorld = Global::getGeo(WORLD); int pos = 0; @@ -328,7 +396,7 @@ namespace AMDiS { buf[pos++] = data[i][j]; } - void StdMpiHelper<std::vector<WorldVector<double> > >::makeFromBuffer(std::vector<WorldVector<double> > &data, double *buf, int bufSize) + void StdMpiHelper<vector<WorldVector<double> > >::makeFromBuffer(vector<WorldVector<double> > &data, double *buf, int bufSize) { int dimOfWorld = Global::getGeo(WORLD); TEST_EXIT(bufSize % Global::getGeo(WORLD) == 0)("This should not happen!\n"); @@ -342,17 +410,17 @@ namespace AMDiS { - // T = std::map<WorldVector<double>, int> + // T = map<WorldVector<double>, int> - int StdMpiHelper<std::map<WorldVector<double>, int> >::getBufferSize(std::map<WorldVector<double>, int> &data) + int StdMpiHelper<map<WorldVector<double>, int> >::getBufferSize(map<WorldVector<double>, int> &data) { return data.size() * (Global::getGeo(WORLD) + 1); } - void StdMpiHelper<std::map<WorldVector<double>, int> >::createBuffer(std::map<WorldVector<double>, int> &data, double* buf) + void StdMpiHelper<map<WorldVector<double>, int> >::createBuffer(map<WorldVector<double>, int> &data, double* buf) { int i = 0; - for (std::map<WorldVector<double>, int>::iterator it = data.begin(); + for (map<WorldVector<double>, int>::iterator it = data.begin(); it != data.end(); ++it) { for (int j = 0; j < Global::getGeo(WORLD); j++) buf[i++] = it->first[j]; @@ -360,7 +428,7 @@ namespace AMDiS { } } - void StdMpiHelper<std::map<WorldVector<double>, int> >::makeFromBuffer(std::map<WorldVector<double>, int> &data, double* buf, int bufSize) + void StdMpiHelper<map<WorldVector<double>, int> >::makeFromBuffer(map<WorldVector<double>, int> &data, double* buf, int bufSize) { if (bufSize == 0) return; diff --git a/AMDiS/src/parallel/StdMpi.h b/AMDiS/src/parallel/StdMpi.h index 433c43c734f1dfa7263d12e894beab6e76cf3eeb..d3516aaafd79e45e3325db2c7049d055c6fd4cec 100644 --- a/AMDiS/src/parallel/StdMpi.h +++ b/AMDiS/src/parallel/StdMpi.h @@ -30,6 +30,8 @@ #include "parallel/InteriorBoundary.h" namespace AMDiS { + + using namespace std; /** \brief * The class StdMpiHelper defines for a type a set of variables, types and functions @@ -62,138 +64,166 @@ namespace AMDiS { template<> - struct StdMpiHelper<std::vector<int> > { + struct StdMpiHelper<vector<int> > { static MPI_Datatype mpiDataType; typedef int cppDataType; - static int getBufferSize(std::vector<int> &data); + static int getBufferSize(vector<int> &data); + + static int getBufferSize(vector<const int*> &data); + + static void createBuffer(vector<int> &data, int *buf); + + static void makeFromBuffer(vector<int> &data, int *buf, int bufSize); + }; + - static int getBufferSize(std::vector<const int*> &data); + template<> + struct StdMpiHelper<std::set<int> > { + static MPI_Datatype mpiDataType; + + typedef int cppDataType; + + static int getBufferSize(std::set<int> &data); - static void createBuffer(std::vector<int> &data, int *buf); + static void createBuffer(std::set<int> &data, int *buf); - static void makeFromBuffer(std::vector<int> &data, int *buf, int bufSize); + static void makeFromBuffer(std::set<int> &data, int *buf, int bufSize); }; + template<> + struct StdMpiHelper<vector<std::set<int> > > { + static MPI_Datatype mpiDataType; + + typedef int cppDataType; + + static int getBufferSize(vector<std::set<int> > &data); + + static void createBuffer(vector<std::set<int> > &data, int *buf); + + static void makeFromBuffer(vector<std::set<int> > &data, + int *buf, int bufSize); + }; + template<> - struct StdMpiHelper<std::vector<double> > { + struct StdMpiHelper<vector<double> > { static MPI_Datatype mpiDataType; typedef double cppDataType; - static int getBufferSize(std::vector<double> &data); + static int getBufferSize(vector<double> &data); - static void createBuffer(std::vector<double> &data, double *buf); + static void createBuffer(vector<double> &data, double *buf); - static void makeFromBuffer(std::vector<double> &data, double *buf, int bufSize); + static void makeFromBuffer(vector<double> &data, double *buf, int bufSize); }; template<> - struct StdMpiHelper<std::vector<std::vector<double> > > { + struct StdMpiHelper<vector<vector<double> > > { static MPI_Datatype mpiDataType; typedef double cppDataType; - static int getBufferSize(std::vector<std::vector<double> > &data); + static int getBufferSize(vector<vector<double> > &data); - static void createBuffer(std::vector<std::vector<double> > &data, double *buf); + static void createBuffer(vector<vector<double> > &data, double *buf); - static void makeFromBuffer(std::vector<std::vector<double> > &data, double *buf, int bufSize); + static void makeFromBuffer(vector<vector<double> > &data, double *buf, int bufSize); }; // MeshDistributor::MeshCodeVec template<> - struct StdMpiHelper<std::vector<MeshStructure> > { + struct StdMpiHelper<vector<MeshStructure> > { static MPI_Datatype mpiDataType; typedef uint64_t cppDataType; - static int getBufferSize(std::vector<MeshStructure> &data); + static int getBufferSize(vector<MeshStructure> &data); - static void createBuffer(std::vector<MeshStructure> &data, uint64_t *buf); + static void createBuffer(vector<MeshStructure> &data, uint64_t *buf); - static void makeFromBuffer(std::vector<MeshStructure> &data, + static void makeFromBuffer(vector<MeshStructure> &data, uint64_t *buf, int bufSize); }; template<> - struct StdMpiHelper<std::vector<AtomicBoundary> > { + struct StdMpiHelper<vector<AtomicBoundary> > { static MPI_Datatype mpiDataType; typedef int cppDataType; - static int getBufferSize(std::vector<AtomicBoundary> &data); + static int getBufferSize(vector<AtomicBoundary> &data); - static void createBuffer(std::vector<AtomicBoundary> &data, int *buf); + static void createBuffer(vector<AtomicBoundary> &data, int *buf); - static void makeFromBuffer(std::vector<AtomicBoundary> &data, int *buf, int bufSize); + static void makeFromBuffer(vector<AtomicBoundary> &data, int *buf, int bufSize); }; // MeshDistributor::PeriodicDofMap template<> - struct StdMpiHelper<std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> > > { + struct StdMpiHelper<map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> > > { static MPI_Datatype mpiDataType; typedef int cppDataType; - static int getBufferSize(std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> > &data); + static int getBufferSize(map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> > &data); - static void createBuffer(std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> > &data, int *buf); + static void createBuffer(map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> > &data, int *buf); - static void makeFromBuffer(std::map<BoundaryType, std::map<DegreeOfFreedom, DegreeOfFreedom> > &data, int *buf, int bufSize); + static void makeFromBuffer(map<BoundaryType, map<DegreeOfFreedom, DegreeOfFreedom> > &data, int *buf, int bufSize); }; // PetscSolver::createPetscNnzStructure::MatrixNnzEntry template<> - struct StdMpiHelper<std::vector<std::pair<int, int> > > { + struct StdMpiHelper<vector<std::pair<int, int> > > { static MPI_Datatype mpiDataType; typedef int cppDataType; - static int getBufferSize(std::vector<std::pair<int, int> > &data); + static int getBufferSize(vector<std::pair<int, int> > &data); - static void createBuffer(std::vector<std::pair<int, int> > &data, int *buf); + static void createBuffer(vector<std::pair<int, int> > &data, int *buf); - static void makeFromBuffer(std::vector<std::pair<int, int> > &data, + static void makeFromBuffer(vector<std::pair<int, int> > &data, int *buf, int bufSize); }; // ParallelDebug::CoordsVec template<> - struct StdMpiHelper<std::vector<WorldVector<double> > > { + struct StdMpiHelper<vector<WorldVector<double> > > { static MPI_Datatype mpiDataType; typedef double cppDataType; - static int getBufferSize(std::vector<WorldVector<double> > &data); + static int getBufferSize(vector<WorldVector<double> > &data); - static void createBuffer(std::vector<WorldVector<double> > &data, double *buf); + static void createBuffer(vector<WorldVector<double> > &data, double *buf); - static void makeFromBuffer(std::vector<WorldVector<double> > &data, double *buf, int bufSize); + static void makeFromBuffer(vector<WorldVector<double> > &data, double *buf, int bufSize); }; // ParallelDebug::testGlobalIndexByCoords::CoordsIndexMap template<> - struct StdMpiHelper<std::map<WorldVector<double>, int> > { + struct StdMpiHelper<map<WorldVector<double>, int> > { static MPI_Datatype mpiDataType; typedef double cppDataType; - static int getBufferSize(std::map<WorldVector<double>, int> &data); + static int getBufferSize(map<WorldVector<double>, int> &data); - static void createBuffer(std::map<WorldVector<double>, int> &data, double* buf); + static void createBuffer(map<WorldVector<double>, int> &data, double* buf); - static void makeFromBuffer(std::map<WorldVector<double>, int> &data, double* buf, int bufSize); + static void makeFromBuffer(map<WorldVector<double>, int> &data, double* buf, int bufSize); }; @@ -255,13 +285,13 @@ namespace AMDiS { * \param[in] data Maps rank number to the data that should be send to * these ranks. */ - void send(std::map<int, SendT>& data) + void send(map<int, SendT>& data) { FUNCNAME("StdMpi::send()"); TEST_EXIT_DBG(commPrepared)("Communication is not prepared!\n"); - for (typename std::map<int, SendT>::iterator it = data.begin(); + for (typename map<int, SendT>::iterator it = data.begin(); it != data.end(); ++it) { sendData[it->first] = it->second; sendDataSize[it->first] = StdMpiHelper<SendT>::getBufferSize(it->second); @@ -270,7 +300,7 @@ namespace AMDiS { /// Returns sending data, see \ref sendData. - std::map<int, SendT>& getSendData() + map<int, SendT>& getSendData() { return sendData; } @@ -286,9 +316,9 @@ namespace AMDiS { /// Updates the buffer sizes for all sending data. void updateSendDataSize() { - for (typename std::map<int, SendT>::iterator it = sendData.begin(); + for (typename map<int, SendT>::iterator it = sendData.begin(); it != sendData.end(); ++it) - sendDataSize[it->first] = StdMpiHelper<SendT>::getBufferSize(it->second); + sendDataSize[it->first] = StdMpiHelper<SendT>::getBufferSize(it->second); } @@ -322,13 +352,13 @@ namespace AMDiS { /// Is used to specify from which rank data will be received. template<class T> - void recv(std::map<int, T> &fromRanks) + void recv(map<int, T> &fromRanks) { FUNCNAME("StdMpi::recv()"); TEST_EXIT_DBG(commPrepared)("Communication is not prepared!\n"); - for (typename std::map<int, T>::iterator it = fromRanks.begin(); + for (typename map<int, T>::iterator it = fromRanks.begin(); it != fromRanks.end(); ++it) recvDataSize[it->first] = (exchangeDataSize ? -1 : StdMpiHelper<SendT>::getBufferSize(it->second)); @@ -336,7 +366,7 @@ namespace AMDiS { /// Returns received data, see \ref recvData. - std::map<int, RecvT>& getRecvData() + map<int, RecvT>& getRecvData() { return recvData; } @@ -353,12 +383,14 @@ namespace AMDiS { /// this will be done in this function. void commDataSize() { + FUNCNAME("StdMpi::commDataSize()"); + MPI::Request request[sendData.size() + recvDataSize.size()]; - std::vector<int> sendBuffers, recvBuffers; + vector<int> sendBuffers, recvBuffers; int requestCounter = 0; int i = 0; - for (typename std::map<int, int>::iterator sendIt = sendDataSize.begin(); + for (typename map<int, int>::iterator sendIt = sendDataSize.begin(); sendIt != sendDataSize.end(); ++sendIt) { sendBuffers.push_back(sendIt->second); request[requestCounter++] = @@ -366,7 +398,7 @@ namespace AMDiS { i++; } - for (std::map<int, int>::iterator recvIt = recvDataSize.begin(); + for (map<int, int>::iterator recvIt = recvDataSize.begin(); recvIt != recvDataSize.end(); ++recvIt) request[requestCounter++] = mpiComm.Irecv(&(recvIt->second), 1, MPI_INT, recvIt->first, 0); @@ -391,9 +423,9 @@ namespace AMDiS { MPI::Request request[sendData.size() + recvDataSize.size()]; int requestCounter = 0; - std::vector<cppDataType*> sendBuffers, recvBuffers; + vector<cppDataType*> sendBuffers, recvBuffers; - for (typename std::map<int, SendT>::iterator sendIt = sendData.begin(); + for (typename map<int, SendT>::iterator sendIt = sendData.begin(); sendIt != sendData.end(); ++sendIt) { int bufferSize = sendDataSize[sendIt->first]; cppDataType* buf = new cppDataType[bufferSize]; @@ -405,8 +437,12 @@ namespace AMDiS { sendBuffers.push_back(buf); } - for (std::map<int, int>::iterator recvIt = recvDataSize.begin(); + for (map<int, int>::iterator recvIt = recvDataSize.begin(); recvIt != recvDataSize.end(); ++recvIt) { + TEST_EXIT_DBG(recvIt->second > 0) + ("Zero MPI buffer size for receiving data from rank %d!\n", + recvIt->first); + cppDataType* buf = new cppDataType[recvIt->second]; request[requestCounter++] = @@ -422,7 +458,7 @@ namespace AMDiS { sendBuffers.clear(); int i = 0; - for (std::map<int, int>::iterator recvIt = recvDataSize.begin(); + for (map<int, int>::iterator recvIt = recvDataSize.begin(); recvIt != recvDataSize.end(); ++recvIt) { StdMpiHelper<SendT>::makeFromBuffer(recvData[recvIt->first], @@ -440,14 +476,14 @@ namespace AMDiS { MPI::Intracomm mpiComm; /// - std::map<int, SendT> sendData; + map<int, SendT> sendData; /// - std::map<int, RecvT> recvData; + map<int, RecvT> recvData; - std::map<int, int> sendDataSize; + map<int, int> sendDataSize; - std::map<int, int> recvDataSize; + map<int, int> recvDataSize; bool commPrepared;