diff --git a/AMDiS/src/ProblemStat.h b/AMDiS/src/ProblemStat.h index 51f92b24ecdd362254c423fa705c9d99f64fac3e..9bec32ec2d5a5c92e48d96b1131b49e64a8f52d3 100644 --- a/AMDiS/src/ProblemStat.h +++ b/AMDiS/src/ProblemStat.h @@ -328,13 +328,13 @@ namespace AMDiS { } /// Returns \ref feSpaces. - inline vector<const FiniteElemSpace*> getFeSpaces() + inline vector<const FiniteElemSpace*>& getFeSpaces() { return feSpaces; } /// Returns \ref componentSpaces; - inline vector<const FiniteElemSpace*> getComponentSpaces() + inline vector<const FiniteElemSpace*>& getComponentSpaces() { return componentSpaces; } diff --git a/AMDiS/src/parallel/BddcMlSolver.cc b/AMDiS/src/parallel/BddcMlSolver.cc index f7d705fccf5825f12c761d1284d41f0e6b09cee5..d08605d30d52f2d057ad0c77bb1fbe627b7e3bd0 100644 --- a/AMDiS/src/parallel/BddcMlSolver.cc +++ b/AMDiS/src/parallel/BddcMlSolver.cc @@ -89,7 +89,6 @@ namespace AMDiS { MSG("nelem = %d\n", nelem); // global number of nodes - ParallelDofMapping &dofMap = meshDistributor->getDofMap(); int nnod = dofMap[feSpace].nOverallDofs; MSG("nnod = %d\n", nnod); @@ -443,8 +442,6 @@ namespace AMDiS { typedef traits::range_generator<row, Matrix>::type cursor_type; typedef traits::range_generator<nz, cursor_type>::type icursor_type; - ParallelDofMapping &dofMap = meshDistributor->getDofMap(); - for (cursor_type cursor = begin<row>(dmat->getBaseMatrix()), cend = end<row>(dmat->getBaseMatrix()); cursor != cend; ++cursor) { for (icursor_type icursor = begin<nz>(cursor), icend = end<nz>(cursor); diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc index 4aec91366afda1a4b5d98abea3df121a3177e618..8c00dad3ccee86d766f69bfe8bcd3b5ba103a45a 100644 --- a/AMDiS/src/parallel/MeshDistributor.cc +++ b/AMDiS/src/parallel/MeshDistributor.cc @@ -74,13 +74,9 @@ namespace AMDiS { : problemStat(0), initialized(false), name("parallel"), - componentSpaces(0), mesh(NULL), refineManager(NULL), - info(10), partitioner(NULL), - dofMap(FESPACE_WISE), - dofMapSd(FESPACE_WISE), deserialized(false), writeSerializationFile(false), repartitioningAllowed(false), @@ -149,8 +145,7 @@ namespace AMDiS { TEST_EXIT(mpiSize > 1) ("Parallelization does not work with only one process!\n"); - - TEST_EXIT(componentSpaces.size() > 0) + TEST_EXIT(feSpaces.size() > 0) ("No FE space has been defined for the mesh distributor!\n"); TEST_EXIT(mesh)("No mesh has been defined for the mesh distributor!\n"); @@ -193,8 +188,9 @@ namespace AMDiS { elObjDb.setData(partitionMap, levelData); #if (DEBUG != 0) + TEST_EXIT_DBG(dofMaps.size())("No DOF mapping defined!\n"); ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], - dofMap, + *(dofMaps[0]), debugOutputDir + "mpi-dbg", "dat"); #endif @@ -366,13 +362,25 @@ namespace AMDiS { TEST_EXIT_DBG(probStat->getFeSpaces().size()) ("No FE spaces in stationary problem!\n"); - TEST_EXIT(componentSpaces.size() == 0) - ("Parallelization of coupled problems is deactived at the moment!\n"); - componentSpaces = probStat->getComponentSpaces(); - feSpaces = probStat->getFeSpaces(); - mesh = feSpaces[0]->getMesh(); - info = probStat->getInfo(); + // === Add all FE spaces from stationary problem. === + + vector<const FiniteElemSpace*> newFeSpaces = probStat->getFeSpaces(); + for (int i = 0; i < static_cast<int>(newFeSpaces.size()); i++) + if (find(feSpaces.begin(), feSpaces.end(), newFeSpaces[i]) == + feSpaces.end()) + feSpaces.push_back(newFeSpaces[i]); + + + // === Add mesh of stationary problem and create a corresponding === + // === refinement manager object. === + + if (mesh != NULL) { + TEST_EXIT(mesh == probStat->getMesh()) + ("Does not yet support for different meshes!\n"); + } else { + mesh = probStat->getMesh(); + } switch (mesh->getDim()) { case 2: @@ -388,6 +396,9 @@ namespace AMDiS { partitioner->setMesh(mesh); + // === Check whether the stationary problem should be serialized. === + + // Create parallel serialization file writer, if needed. int writeSerialization = 0; Parameters::get(probStat->getName() + "->output->write serialization", @@ -407,6 +418,9 @@ namespace AMDiS { writeSerializationFile = true; } + + // === Check whether the stationary problem should be deserialized. === + int readSerialization = 0; Parameters::get(probStat->getName() + "->input->read serialization", readSerialization); @@ -455,9 +469,9 @@ namespace AMDiS { problemStat.push_back(probStat); - // If the mesh distributor is already initialized, don't forget to set rank - // DOFs object to the matrices and vectors of the added stationary problem, - // and to remove the periodic boundary conditions on these objects. + + // === If the mesh distributor is already initialized, don't forget to === + // === remove the periodic boundary conditions on these objects. === if (initialized) removePeriodicBoundaryConditions(probStat); @@ -478,6 +492,18 @@ namespace AMDiS { void MeshDistributor::exitParallelization() {} + + void MeshDistributor::registerDofMap(ParallelDofMapping &dofMap) + { + FUNCNAME("MeshDistributor::registerDofMap()"); + + TEST_EXIT(find(dofMaps.begin(), dofMaps.end(), &dofMap) == + dofMaps.end()) + ("Parallel DOF mapping already registerd in mesh distributor object!\n"); + + dofMaps.push_back(&dofMap); + } + void MeshDistributor::testForMacroMesh() { @@ -962,8 +988,7 @@ namespace AMDiS { } MPI::COMM_WORLD.Barrier(); - INFO(info, 8)("Parallel mesh adaption needed %.5f seconds\n", - MPI::Wtime() - first); + MSG("Parallel mesh adaption needed %.5f seconds\n", MPI::Wtime() - first); #if (DEBUG != 0) debug::writeMesh(feSpaces[0], -1, debugOutputDir + "mesh"); @@ -1178,6 +1203,10 @@ namespace AMDiS { map<int, map<const FiniteElemSpace*, DofContainer> > &data, map<const FiniteElemSpace*, map<int, const DegreeOfFreedom*> > &dofIndexMap) { + FUNCNAME("MeshDistributor::deserialize()"); + + ERROR_EXIT("Must be reimplemented!\n"); +#if 0 data.clear(); int mapSize = 0; @@ -1191,6 +1220,7 @@ namespace AMDiS { data[rank][componentSpaces[j]], dofIndexMap[componentSpaces[j]]); } +#endif } @@ -1298,6 +1328,8 @@ namespace AMDiS { // === Run mesh partitioner to calculate a new mesh partitioning. === + TEST_EXIT(dofMaps.size())("No DOF mapping defined!\n"); + ParallelDofMapping &dofMap = *(dofMaps[0]); partitioner->setLocalGlobalDofMap(&(dofMap[feSpaces[0]].getMap())); bool partitioningSucceed = partitioner->partition(elemWeights, ADAPTIVE_REPART); @@ -1648,33 +1680,25 @@ namespace AMDiS { debug::createSortedDofs(mesh, elMap); #endif - int nLevels = levelData.getLevelNumber(); - TEST_EXIT_DBG(nLevels >= 1)("Should not happen!\n"); - - dofMap.init(levelData, componentSpaces, feSpaces); - dofMap.setMpiComm(levelData.getMpiComm(0), 0); - dofMap.setDofComm(dofComm); - dofMap.clear(); - - if (nLevels > 1) { - dofMapSd.init(levelData, componentSpaces, feSpaces); - dofMapSd.setMpiComm(levelData.getMpiComm(1), 1); - dofMapSd.setDofComm(dofCommSd); - dofMapSd.clear(); - } - createBoundaryDofs(); - for (unsigned int i = 0; i < feSpaces.size(); i++) - updateLocalGlobalNumbering(dofMap, dofComm, feSpaces[i]); - dofMap.update(); - if (nLevels > 1) { - for (unsigned int i = 0; i < feSpaces.size(); i++) - updateLocalGlobalNumbering(dofMapSd, dofCommSd, feSpaces[i]); - dofMapSd.update(); + // === Update all registered DOF mapping objects. === + + TEST_EXIT(dofMaps.size())("No DOF mapping defined!\n"); + + for (int i = 0; i < static_cast<int>(dofMaps.size()); i++) { + vector<const FiniteElemSpace*>& dofMapSpaces = dofMaps[i]->getFeSpaces(); + + dofMaps[i]->clear(); + + for (int j = 0; j < static_cast<int>(dofMapSpaces.size()); j++) + updateLocalGlobalNumbering(*(dofMaps[i]), dofMapSpaces[j]); + + dofMaps[i]->update(); } + // === Update DOF admins due to new number of DOFs. === lastMeshChangeIndex = mesh->getChangeIndex(); @@ -1684,30 +1708,25 @@ namespace AMDiS { ParallelDebug::testDofContainerCommunication(*this); MSG("------------- Debug information -------------\n"); - MSG("| number of levels: %d\n", nLevels); + MSG("| number of levels: %d\n", levelData.getLevelNumber()); MSG("| number of FE spaces: %d\n", feSpaces.size()); - for (unsigned int i = 0; i < feSpaces.size(); i++) { - MSG("| FE space = %d (pointer adr %p):\n", i, feSpaces[i]); - MSG("| nRankDofs = %d\n", dofMap[feSpaces[i]].nRankDofs); - MSG("| nOverallDofs = %d\n", dofMap[feSpaces[i]].nOverallDofs); - MSG("| rStartDofs = %d\n", dofMap[feSpaces[i]].rStartDofs); - } - - if (nLevels > 1) { - for (unsigned int i = 0; i < feSpaces.size(); i++) { - MSG("| FE space = %d:\n", i); - MSG("| nRankDofs = %d\n", dofMapSd[feSpaces[i]].nRankDofs); - MSG("| nOverallDofs = %d\n", dofMapSd[feSpaces[i]].nOverallDofs); - MSG("| rStartDofs = %d\n", dofMapSd[feSpaces[i]].rStartDofs); + for (int i = 0; i < static_cast<int>(dofMaps.size()); i++) { + vector<const FiniteElemSpace*>& dofMapSpaces = dofMaps[i]->getFeSpaces(); + + for (int j = 0; j < static_cast<int>(dofMapSpaces.size()); j++) { + MSG("| FE space = %d (pointer adr %p):\n", j, feSpaces[j]); + MSG("| nRankDofs = %d\n", (*(dofMaps[i]))[feSpaces[j]].nRankDofs); + MSG("| nOverallDofs = %d\n", (*(dofMaps[i]))[feSpaces[j]].nOverallDofs); + MSG("| rStartDofs = %d\n", (*(dofMaps[i]))[feSpaces[j]].rStartDofs); } } - + // debug::writeElementIndexMesh(mesh, debugOutputDir + "elementIndex-" + // lexical_cast<string>(mpiRank) + ".vtu"); ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], - dofMap, + *(dofMaps[0]), debugOutputDir + "mpi-dbg", "dat"); debug::testSortedDofs(mesh, elMap); @@ -1718,16 +1737,20 @@ namespace AMDiS { ParallelDebug::testGlobalIndexByCoords(*this); } #else - for (unsigned int i = 0; i < feSpaces.size(); i++) - MSG("FE space %d: nRankDofs = %d nOverallDofs = %d\n", - i, dofMap[feSpaces[i]].nRankDofs, - dofMap[feSpaces[i]].nOverallDofs); + for (int i = 0; i < static_cast<int>(dofsMaps.size()); i++) { + vector<const FiniteElemSpace*>& dofMapSpaces = dofMaps[i]->getFeSpaces(); + + for (int j = 0; j < static_cast<int>(dofMapSpaces.size()); j++) + MSG("FE space %d: nRankDofs = %d nOverallDofs = %d\n", j, + (*(dofMaps[i]))[feSpaces[j]].nRankDofs, + (*(dofMaps[i]))[feSpaces[j]].nOverallDofs); + } int tmp = 0; Parameters::get(name + "->write parallel debug file", tmp); if (tmp) ParallelDebug::writeDebugFile(feSpaces[feSpaces.size() - 1], - dofMap, + *(dofMaps[0]) debugOutputDir + "mpi-dbg", "dat"); #endif @@ -1736,12 +1759,13 @@ namespace AMDiS { } - void MeshDistributor::updateLocalGlobalNumbering(ParallelDofMapping &dmap, - DofComm &dcom, + void MeshDistributor::updateLocalGlobalNumbering(ParallelDofMapping &dofMap, const FiniteElemSpace *feSpace) { FUNCNAME("MeshDistributor::updateLocalGlobalNumbering()"); + DofComm &dcom = dofMap.getDofComm(); + // === Get all DOFs in ranks partition. === std::set<const DegreeOfFreedom*> rankDofSet; @@ -1750,7 +1774,9 @@ namespace AMDiS { DofContainer rankDofs(rankDofSet.begin(), rankDofSet.end()); sort(rankDofs.begin(), rankDofs.end(), cmpDofsByValue); + // === Traverse interior boundaries and get all DOFs on them. === + DofContainerSet nonRankDofs; for (DofComm::Iterator it(dcom.getRecvDofs(), 0, feSpace); !it.end(); it.nextRank()) @@ -1759,11 +1785,11 @@ namespace AMDiS { for (unsigned int i = 0; i < rankDofs.size(); i++) if (nonRankDofs.count(rankDofs[i]) == 0) - dmap[feSpace].insertRankDof(*(rankDofs[i])); + dofMap[feSpace].insertRankDof(*(rankDofs[i])); for (DofContainerSet::iterator it = nonRankDofs.begin(); it != nonRankDofs.end(); ++it) - dmap[feSpace].insertNonRankDof(**it); + dofMap[feSpace].insertNonRankDof(**it); } @@ -1788,8 +1814,7 @@ namespace AMDiS { createPeriodicMap(feSpaces[i]); // MPI::COMM_WORLD.Barrier(); - INFO(info, 8)("Creation of periodic mapping needed %.5f seconds\n", - MPI::Wtime() - first); + MSG("Creation of periodic mapping needed %.5f seconds\n", MPI::Wtime() - first); } @@ -1797,8 +1822,10 @@ namespace AMDiS { { FUNCNAME("MeshDistributor::createPeriodicMap()"); - DofComm::LevelDataType &periodicDofs = dofComm.getPeriodicDofs(); + TEST_EXIT(dofMaps.size())("No DOF mapping defined!\n"); + DofComm::LevelDataType &periodicDofs = dofComm.getPeriodicDofs(); + ComponentDofMap &dofMap = (*(dofMaps[0]))[feSpace]; StdMpi<vector<int> > stdMpi(mpiComm, false); // === Each rank traverse its periodic boundaries and sends the DOF === @@ -1832,8 +1859,8 @@ namespace AMDiS { BoundaryType type = bound.type; for (unsigned int j = 0; j < dofs0.size(); j++) { - DegreeOfFreedom globalDof0 = dofMap[feSpace][*(dofs0[j])].global; - DegreeOfFreedom globalDof1 = dofMap[feSpace][*(dofs1[j])].global; + DegreeOfFreedom globalDof0 = dofMap[*(dofs0[j])].global; + DegreeOfFreedom globalDof1 = dofMap[*(dofs1[j])].global; if (!periodicMap.isPeriodicOnBound(feSpace, type, globalDof0)) periodicMap.add(feSpace, type, globalDof0, globalDof1); @@ -1858,7 +1885,7 @@ namespace AMDiS { // Send the global indices to the rank on the other side. stdMpi.getSendData(it->first).reserve(dofs.size()); for (unsigned int i = 0; i < dofs.size(); i++) - stdMpi.getSendData(it->first).push_back(dofMap[feSpace][*(dofs[i])].global); + stdMpi.getSendData(it->first).push_back(dofMap[*(dofs[i])].global); // Receive from this rank the same number of dofs. stdMpi.recv(it->first, dofs.size()); @@ -1884,7 +1911,7 @@ namespace AMDiS { // Added the received DOFs to the mapping. for (unsigned int i = 0; i < dofs.size(); i++) { - int globalDofIndex = dofMap[feSpace][*(dofs[i])].global; + int globalDofIndex = dofMap[*(dofs[i])].global; int mapGlobalDofIndex = stdMpi.getRecvData(it->first)[i]; BoundaryType type = types[i]; @@ -1917,7 +1944,7 @@ namespace AMDiS { boundIt->rankObj.el->getAllDofs(feSpace, boundIt->rankObj, dofs); for (unsigned int i = 0; i < dofs.size(); i++) { - DegreeOfFreedom globalDof = dofMap[feSpace][*dofs[i]].global; + DegreeOfFreedom globalDof = dofMap[*dofs[i]].global; std::set<BoundaryType>& assoc = periodicMap.getAssociations(feSpace, globalDof); diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h index 06b59bcf2fc9e4e69e26031e2daa625038e741a2..8ed4d24b0cb5eceb5505bddbee81c90b08cec107 100644 --- a/AMDiS/src/parallel/MeshDistributor.h +++ b/AMDiS/src/parallel/MeshDistributor.h @@ -63,10 +63,20 @@ namespace AMDiS { public: ~MeshDistributor(); + /// Initialization of mesh distributor. void initParallelization(); + /// Clean up procedure for the mesh distributor and attached objects. void exitParallelization(); + /** \brief + * Register a parallel DOF mapping. This DOF mapping object will than + * automatically updated by the mesh distributer after mesh changes. + * + * \param[in] dofMap Parallel DOF mapping object. + */ + void registerDofMap(ParallelDofMapping &dofMap); + /// Adds a DOFVector to the set of \ref interchangeVecs. Thus, this vector /// will be automatically interchanged between ranks when mesh is /// repartitioned. @@ -132,49 +142,6 @@ namespace AMDiS { return mesh; } - /// Returns an FE space from \ref feSpaces. - inline const FiniteElemSpace* getFeSpace(unsigned int i = 0) - { - FUNCNAME("MeshDistributor::getFeSpace()"); - - TEST_EXIT_DBG(i < feSpaces.size()) - ("Try to access FE space %d, but have only %d FE spaces!\n", - i, feSpaces.size()); - - return feSpaces[i]; - } - - /// Returns all FE spaces, thus \ref feSpaces. - inline vector<const FiniteElemSpace*>& getFeSpaces() - { - return feSpaces; - } - - inline const FiniteElemSpace* getComponentSpace(unsigned int i = 0) - { - FUNCNAME("MeshDistributor::getFeSpace()"); - - TEST_EXIT_DBG(i < componentSpaces.size())("Should not happen!\n"); - - return componentSpaces[i]; - } - - inline vector<const FiniteElemSpace*>& getComponentSpaces() - { - return componentSpaces; - } - - /// Returns the DOF mapping object, \ref dofMap. - inline ParallelDofMapping& getDofMap() - { - return dofMap; - } - - inline ParallelDofMapping& getDofMapSd() - { - return dofMapSd; - } - /// Returns the periodic mapping handler, \ref periodicMap. inline PeriodicMap& getPeriodicMap() { @@ -360,7 +327,6 @@ namespace AMDiS { /// Updates the local and global DOF numbering after the mesh has been /// changed. void updateLocalGlobalNumbering(ParallelDofMapping &dmap, - DofComm &dcom, const FiniteElemSpace *feSpace); protected: @@ -501,14 +467,12 @@ namespace AMDiS { /// Name of the problem (as used in the init files) string name; - /// Finite element spaces of the problem. - vector<const FiniteElemSpace*> componentSpaces; - /// Set of all different FE spaces. vector<const FiniteElemSpace*> feSpaces; - - /// Mesh of the problem. + /// Pointer to the only mesh. Note that we do not yet support multi mesh + /// method, thus even if we consider coupled problems, all problems must + /// be defined on the same mesh. Mesh *mesh; /// A refinement manager that should be used on the mesh. It is used to @@ -516,9 +480,6 @@ namespace AMDiS { /// elements on the other side of the interior boundary. RefinementManager *refineManager; - /// Info level. - int info; - /// Pointer to a mesh partitioner that is used to partition the mesh to /// the ranks. MeshPartitioner *partitioner; @@ -531,11 +492,6 @@ namespace AMDiS { /// macro element. map<int, int> partitionMap; - /// Mapping object to map from local DOF indices to global ones. - ParallelDofMapping dofMap; - - ParallelDofMapping dofMapSd; - /// Database to store and query all sub-objects of all elements of the /// macro mesh. ElementObjectDatabase elObjDb; @@ -608,6 +564,8 @@ namespace AMDiS { /// all boundary DOFs. vector<map<const FiniteElemSpace*, BoundaryDofInfo> > boundaryDofInfo; + /// Stores information about hierarchical decomposition of the mesh levels. + /// Is used to specify multi level solver methods. MeshLevelData levelData; /// If there is no mesh adaptivity, the mesh distributor can remove some @@ -616,6 +574,10 @@ namespace AMDiS { /// is set to true, and thus no special assumption are made. bool meshAdaptivity; + /// Set of all parallel DOF mapping object that are registered by parallel + /// solver objects and must be updated automatically after mesh change. + vector<ParallelDofMapping*> dofMaps; + public: static bool sebastianMode; diff --git a/AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc b/AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc index dbe6803c0e5067cc81c087f58a13568d317be661..1c703b38de364933dba0d07962d95651b21b3f4f 100644 --- a/AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc +++ b/AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc @@ -115,8 +115,6 @@ namespace AMDiS { if (checkMeshChange()) { // Mesh has been changed, recompute interior DOF mapping. - vector<const FiniteElemSpace*> feSpaces = - meshDistributor->getComponentSpaces(); interiorMap->setComputeMatIndex(!localMatrix); interiorMap->update(); diff --git a/AMDiS/src/parallel/ParallelDebug.cc b/AMDiS/src/parallel/ParallelDebug.cc index cc51b18e11181e656fa256edc7fbb4b5837e5a68..241ddb148e6492893e7fd417ba39dec6fa9a6900 100644 --- a/AMDiS/src/parallel/ParallelDebug.cc +++ b/AMDiS/src/parallel/ParallelDebug.cc @@ -462,7 +462,7 @@ namespace AMDiS { mpi::globalAdd(foundError); TEST_EXIT(foundError == 0)("Error found on at least on rank!\n"); - INFO(pdb.info, 8)("Test common dofs needed %.5f seconds\n", TIME_USED(first, clock())); + MSG("Test common dofs needed %.5f seconds\n", TIME_USED(first, clock())); } @@ -481,7 +481,7 @@ namespace AMDiS { DOFIterator<WorldVector<double> > it(&coords, USED_DOFS); for (it.reset(); !it.end(); ++it) { - coordsToIndex[(*it)] = pdb.dofMap[feSpace][it.getDOFIndex()].global; + coordsToIndex[(*it)] = (*(pdb.dofMaps[0]))[feSpace][it.getDOFIndex()].global; // MSG(" CHECK FOR DOF %d AT COORDS %f %f %f\n", // coordsToIndex[(*it)], (*it)[0], (*it)[1], (*it)[2]); } diff --git a/AMDiS/src/parallel/ParallelDofMapping.cc b/AMDiS/src/parallel/ParallelDofMapping.cc index c42017ce29c262747026d88a22988ad764e1173e..d0c65bfaf50786b4afd0394818036f2ae678ffa9 100644 --- a/AMDiS/src/parallel/ParallelDofMapping.cc +++ b/AMDiS/src/parallel/ParallelDofMapping.cc @@ -243,6 +243,7 @@ namespace AMDiS { levelData = &ldata; globalMapping = b; componentSpaces = fe; + feSpaces = uniqueFe; data->init(fe, uniqueFe, globalMapping, ldata); } diff --git a/AMDiS/src/parallel/ParallelDofMapping.h b/AMDiS/src/parallel/ParallelDofMapping.h index 7bfae81237f445eb2856b5a53a0b25e19f9a842a..9ed7f481f689dc2220a16ea5e5f2570db46815ee 100644 --- a/AMDiS/src/parallel/ParallelDofMapping.h +++ b/AMDiS/src/parallel/ParallelDofMapping.h @@ -151,7 +151,8 @@ namespace AMDiS { { FUNCNAME("ComponentDofMap::insertRankDof()"); - TEST_EXIT_DBG(dofMap.count(dof0) == 0)("Should not happen!\n"); + TEST_EXIT_DBG(dofMap.count(dof0) == 0) + ("DOF %d is already defined in mapping!\n", dof0); dofMap[dof0].local = dof1; nLocalDofs++; @@ -637,17 +638,17 @@ namespace AMDiS { * Initialize the parallel DOF mapping. * * \param[in] m MPI communicator. - * \param[in] fe The FE spaces of all components of the + * \param[in] componentSpaces The FE spaces of all components of the * PDE to be solved. - * \param[in] uniqueFe Unique list of FE spaces. Thus, two + * \param[in] feSpaces Unique list of FE spaces. Thus, two * arbitrary elements of this list are always * different. * \param[in] globalMapping If true, at least one rank's mapping con- * taines DOFs that are not owend by the rank. */ void init(MeshLevelData& mld, - vector<const FiniteElemSpace*> &fe, - vector<const FiniteElemSpace*> &uniqueFe, + vector<const FiniteElemSpace*> &componentSpaces, + vector<const FiniteElemSpace*> &feSpaces, bool globalMapping = true); /// In the case of having only one FE space, this init function can be used. @@ -774,6 +775,12 @@ namespace AMDiS { return dofToMatIndex.get(ithComponent, d) - rStartDofs; } + /// Returns the set of unique FE spaces. + inline vector<const FiniteElemSpace*>& getFeSpaces() + { + return feSpaces; + } + // Writes all data of this object to an output stream. void serialize(ostream &out) { @@ -871,8 +878,11 @@ namespace AMDiS { /// consideration of possibly multiple components. DofToMatIndex dofToMatIndex; - /// FE spaces of all components + /// FE spaces of all components. vector<const FiniteElemSpace*> componentSpaces; + + /// Set of unique FE spaces. + vector<const FiniteElemSpace*> feSpaces; }; } diff --git a/AMDiS/src/parallel/PetscProblemStat.cc b/AMDiS/src/parallel/PetscProblemStat.cc index 73dd638f69ba9203784f5dc4bc17377d80c5a5e8..0d4424682b2eaf17ce6d914c4bdee6aac9f3437d 100644 --- a/AMDiS/src/parallel/PetscProblemStat.cc +++ b/AMDiS/src/parallel/PetscProblemStat.cc @@ -70,9 +70,14 @@ namespace AMDiS { ProblemStatSeq* adoptProblem, Flag adoptFlag) { - ParallelProblemStatBase::initialize(initFlag, adoptProblem, adoptFlag); + ParallelProblemStatBase::initialize(initFlag, adoptProblem, adoptFlag); + + meshDistributor->setBoundaryDofRequirement(petscSolver->getBoundaryDofRequirement()); - meshDistributor->setBoundaryDofRequirement(petscSolver->getBoundaryDofRequirement()); + petscSolver->setMeshDistributor(meshDistributor, + meshDistributor->getMpiComm(), + PETSC_COMM_SELF); + petscSolver->init(this->getComponentSpaces(), this->getFeSpaces()); } @@ -86,13 +91,8 @@ namespace AMDiS { double wtime = MPI::Wtime(); - if (createMatrixData) { - petscSolver->setMeshDistributor(meshDistributor, - meshDistributor->getMpiComm(), - PETSC_COMM_SELF); - petscSolver->setDofMapping(&(meshDistributor->getDofMap())); + if (createMatrixData) petscSolver->fillPetscMatrix(systemMatrix); - } petscSolver->fillPetscRhs(rhs); diff --git a/AMDiS/src/parallel/PetscSolver.cc b/AMDiS/src/parallel/PetscSolver.cc index 510d8c8ff8561dd60f58abffc8cfb672a0ce26f3..58bdacd9cb900032ff56cae481e332724c34a401 100644 --- a/AMDiS/src/parallel/PetscSolver.cc +++ b/AMDiS/src/parallel/PetscSolver.cc @@ -21,13 +21,17 @@ namespace AMDiS { using namespace std; PetscSolver::PetscSolver() - : ParallelCoarseSpaceMatVec(), + : ParallelCoarseSpaceMatVec(), + dofMap(FESPACE_WISE), + dofMapSd(FESPACE_WISE), kspPrefix(""), removeRhsNullspace(false), hasConstantNullspace(false), isSymmetric(false), handleDirichletRows(true) { + setDofMapping(&dofMap); + string kspStr = ""; Parameters::get("parallel->solver->petsc->ksp", kspStr); if (kspStr != "") @@ -40,6 +44,38 @@ namespace AMDiS { } + void PetscSolver::init(vector<const FiniteElemSpace*> &fe0, + vector<const FiniteElemSpace*> &fe1) + { + FUNCNAME("PetscSolver::init()"); + + TEST_EXIT(meshDistributor)("No mesh distributor object defined!\n"); + TEST_EXIT(fe0.size())("No component spaces in PETSc solver object!\n"); + TEST_EXIT(fe1.size())("No FE spaces in PETSc solver object!\n"); + + componentSpaces = fe0; + feSpaces = fe1; + + MeshLevelData& levelData = meshDistributor->getMeshLevelData(); + int nLevels = levelData.getLevelNumber(); + TEST_EXIT_DBG(nLevels >= 1)("Should not happen!\n"); + + dofMap.init(levelData, componentSpaces, feSpaces); + dofMap.setMpiComm(levelData.getMpiComm(0), 0); + dofMap.setDofComm(meshDistributor->getDofComm()); + dofMap.clear(); + meshDistributor->registerDofMap(dofMap); + + if (nLevels > 1) { + dofMapSd.init(levelData, componentSpaces, feSpaces); + dofMapSd.setMpiComm(levelData.getMpiComm(1), 1); + dofMapSd.setDofComm(meshDistributor->getDofCommSd()); + dofMapSd.clear(); + meshDistributor->registerDofMap(dofMapSd); + } + } + + void PetscSolver::fillPetscMatrix(DOFMatrix* mat) { Matrix<DOFMatrix*> sysMat(1, 1); diff --git a/AMDiS/src/parallel/PetscSolver.h b/AMDiS/src/parallel/PetscSolver.h index 0eb404ec15439160d699ad0aef389495830fdac9..31d0267476d797766b33faa708b01644e90043b5 100644 --- a/AMDiS/src/parallel/PetscSolver.h +++ b/AMDiS/src/parallel/PetscSolver.h @@ -54,6 +54,9 @@ namespace AMDiS { virtual ~PetscSolver() {} + virtual void init(vector<const FiniteElemSpace*> &componentSpaces, + vector<const FiniteElemSpace*> &feSpaces); + /** \brief * Create a PETSc matrix. The given DOF matrices are used to create the nnz * structure of the PETSc matrix and the values are transfered to it. @@ -159,6 +162,16 @@ namespace AMDiS { /// Run test, if matrix is symmetric. bool testMatrixSymmetric(Mat mat, bool advancedTest = false); protected: + /// FE spaces of all components for the stationary problem the specific + /// solver object is registered to. + vector<const FiniteElemSpace*> componentSpaces; + + /// Set of unique FE spaces in \ref componentSpaces. + vector<const FiniteElemSpace*> feSpaces; + + /// + ParallelDofMapping dofMap, dofMapSd; + /// PETSc solver object KSP kspInterior; diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc index a1b21479064aa087cc8100d690bd2daa5e3ed098..253ecd46855b054c767a57feb9455e0ac3c7f58b 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.cc +++ b/AMDiS/src/parallel/PetscSolverFeti.cc @@ -83,7 +83,7 @@ namespace AMDiS { } - void PetscSolverFeti::initialize(vector<const FiniteElemSpace*> feSpaces) + void PetscSolverFeti::initialize() { FUNCNAME("PetscSolverFeti::initialize()"); @@ -91,8 +91,6 @@ namespace AMDiS { ("Mesh hierarchy does not contain %d levels!\n", meshLevel + 1); MeshLevelData& levelData = meshDistributor->getMeshLevelData(); - vector<const FiniteElemSpace*>& uniqueFe = meshDistributor->getFeSpaces(); - stokesMode = false; Parameters::get("parallel->feti->stokes mode", stokesMode); @@ -118,19 +116,19 @@ namespace AMDiS { } } - primalDofMap.init(levelData, feSpaces, uniqueFe); - dualDofMap.init(levelData, feSpaces, uniqueFe, false); - localDofMap.init(levelData, feSpaces, uniqueFe, meshLevel != 0); - lagrangeMap.init(levelData, feSpaces, uniqueFe); + primalDofMap.init(levelData, componentSpaces, feSpaces); + dualDofMap.init(levelData, componentSpaces, feSpaces, false); + localDofMap.init(levelData, componentSpaces, feSpaces, meshLevel != 0); + lagrangeMap.init(levelData, componentSpaces, feSpaces); if (stokesMode) - interfaceDofMap.init(levelData, feSpaces, uniqueFe); + interfaceDofMap.init(levelData, componentSpaces, feSpaces); if (fetiPreconditioner == FETI_DIRICHLET) { TEST_EXIT(meshLevel == 0) ("Dirichlet preconditioner not yet implemented for multilevel FETI-DP\n"); - interiorDofMap.init(levelData, feSpaces, uniqueFe, false); + interiorDofMap.init(levelData, componentSpaces, feSpaces, false); } } @@ -156,10 +154,6 @@ namespace AMDiS { double timeCounter = MPI::Wtime(); - TEST_EXIT(meshDistributor)("No mesh distributor object defined!\n"); - TEST_EXIT(meshDistributor->getFeSpaces().size() > 0) - ("No FE space defined in mesh distributor!\n"); - MeshLevelData& levelData = meshDistributor->getMeshLevelData(); primalDofMap.clear(); @@ -190,16 +184,12 @@ namespace AMDiS { interfaceDofMap.setMpiComm(levelData.getMpiComm(0), 0); } - for (unsigned int i = 0; i < meshDistributor->getComponentSpaces().size(); i++) { - const FiniteElemSpace *feSpace = meshDistributor->getComponentSpace(i); - - createPrimals(i, feSpace); - - createDuals(i, feSpace); - - createInterfaceNodes(i, feSpace); - - createIndexB(i, feSpace); + int nComponents = componentSpaces.size(); + for (int component = 0; component < nComponents; component++) { + createPrimals(component); + createDuals(component); + createInterfaceNodes(component); + createIndexB(component); } primalDofMap.update(); @@ -211,10 +201,9 @@ namespace AMDiS { if (stokesMode) interfaceDofMap.update(); - for (unsigned int i = 0; i < meshDistributor->getComponentSpaces().size(); i++) { - const FiniteElemSpace *feSpace = meshDistributor->getComponentSpace(i); - createLagrange(i, feSpace); - createAugmentedLagrange(i, feSpace); + for (int component = 0; component < nComponents; component++) { + createLagrange(component); + createAugmentedLagrange(component); } lagrangeMap.update(); @@ -243,8 +232,8 @@ namespace AMDiS { } MSG("FETI-DP data created on mesh level %d\n", meshLevel); - for (unsigned int i = 0; i < meshDistributor->getComponentSpaces().size(); i++) { - const FiniteElemSpace *feSpace = meshDistributor->getComponentSpace(i); + for (unsigned int i = 0; i < componentSpaces.size(); i++) { + const FiniteElemSpace *feSpace = componentSpaces[i]; MSG("FETI-DP data for %d-ith component (FE space %p):\n", i, feSpace); @@ -282,14 +271,15 @@ namespace AMDiS { } - void PetscSolverFeti::createPrimals(int component, - const FiniteElemSpace *feSpace) + void PetscSolverFeti::createPrimals(int component) { FUNCNAME("PetscSolverFeti::createPrimals()"); if (component == pressureComponent) return; + const FiniteElemSpace *feSpace = componentSpaces[component]; + // === Define all vertices on the interior boundaries of the macro mesh === // === to be primal variables. === @@ -325,21 +315,22 @@ namespace AMDiS { // === create local indices of the primals starting at zero. === for (DofIndexSet::iterator it = primals.begin(); it != primals.end(); ++it) - if (meshDistributor->getDofMap()[feSpace].isRankDof(*it)) { + if (dofMap[feSpace].isRankDof(*it)) { primalDofMap[component].insertRankDof(*it); } else primalDofMap[component].insertNonRankDof(*it); } - void PetscSolverFeti::createDuals(int component, - const FiniteElemSpace *feSpace) + void PetscSolverFeti::createDuals(int component) { FUNCNAME("PetscSolverFeti::createDuals()"); if (component == pressureComponent) return; + const FiniteElemSpace *feSpace = componentSpaces[component]; + // === Create global index of the dual nodes on each rank. === DofContainer allBoundaryDofs; @@ -356,21 +347,21 @@ namespace AMDiS { if (meshLevel == 0) { dualDofMap[component].insertRankDof(**it); } else { - if (meshDistributor->getDofMapSd()[feSpace].isRankDof(**it)) + if (dofMapSd[feSpace].isRankDof(**it)) dualDofMap[component].insertRankDof(**it); } } } - void PetscSolverFeti::createInterfaceNodes(int component, - const FiniteElemSpace *feSpace) + void PetscSolverFeti::createInterfaceNodes(int component) { FUNCNAME("PetscSolverFeti::createInterfaceNodes()"); if (component != pressureComponent) return; + const FiniteElemSpace *feSpace = componentSpaces[component]; DofContainer allBoundaryDofs; meshDistributor->getAllBoundaryDofs(feSpace, meshLevel, allBoundaryDofs); @@ -379,7 +370,7 @@ namespace AMDiS { if (dirichletRows[component].count(**it)) continue; - if (meshDistributor->getDofMap()[feSpace].isRankDof(**it)) + if (dofMap[feSpace].isRankDof(**it)) interfaceDofMap[component].insertRankDof(**it); else interfaceDofMap[component].insertNonRankDof(**it); @@ -387,14 +378,14 @@ namespace AMDiS { } - void PetscSolverFeti::createLagrange(int component, - const FiniteElemSpace *feSpace) + void PetscSolverFeti::createLagrange(int component) { FUNCNAME("PetscSolverFeti::createLagrange()"); if (component == pressureComponent) return; + const FiniteElemSpace *feSpace = componentSpaces[component]; boundaryDofRanks[feSpace].clear(); // Stores for all rank owned communication DOFs, if the counterpart is @@ -413,7 +404,7 @@ namespace AMDiS { subdomainRankDofs.reserve(it.getDofs().size()); for (; !it.endDofIter(); it.nextDof()) { - if (meshDistributor->getDofMapSd()[feSpace].isRankDof(it.getDofIndex())) + if (dofMapSd[feSpace].isRankDof(it.getDofIndex())) subdomainRankDofs.push_back(1); else subdomainRankDofs.push_back(0); @@ -483,7 +474,7 @@ namespace AMDiS { if (!isPrimal(component, it.getDofIndex())) { if (meshLevel == 0 || (meshLevel > 0 && - meshDistributor->getDofMapSd()[feSpace].isRankDof(it.getDofIndex()))) { + dofMapSd[feSpace].isRankDof(it.getDofIndex()))) { recvFromRank = true; break; } @@ -503,7 +494,7 @@ namespace AMDiS { if (!isPrimal(component, it.getDofIndex())) if (meshLevel == 0 || (meshLevel > 0 && - meshDistributor->getDofMapSd()[feSpace].isRankDof(it.getDofIndex()))) + dofMapSd[feSpace].isRankDof(it.getDofIndex()))) boundaryDofRanks[feSpace][it.getDofIndex()] = stdMpi.getRecvData(it.getRank())[i++]; else @@ -517,7 +508,7 @@ namespace AMDiS { int nRankLagrange = 0; DofMap& dualMap = dualDofMap[component].getMap(); for (DofMap::iterator it = dualMap.begin(); it != dualMap.end(); ++it) { - if (meshDistributor->getDofMap()[feSpace].isRankDof(it->first)) { + if (dofMap[feSpace].isRankDof(it->first)) { lagrangeMap[component].insertRankDof(it->first, nRankLagrange); int degree = boundaryDofRanks[feSpace][it->first].size(); nRankLagrange += (degree * (degree - 1)) / 2; @@ -529,8 +520,7 @@ namespace AMDiS { } - void PetscSolverFeti::createAugmentedLagrange(int component, - const FiniteElemSpace *feSpace) + void PetscSolverFeti::createAugmentedLagrange(int component) { FUNCNAME("PetscSolverFeti::createAugmentedLagrange()"); @@ -539,11 +529,12 @@ namespace AMDiS { } - void PetscSolverFeti::createIndexB(int component, - const FiniteElemSpace *feSpace) + void PetscSolverFeti::createIndexB(int component) { FUNCNAME("PetscSolverFeti::createIndexB()"); + + const FiniteElemSpace *feSpace = componentSpaces[component]; DOFAdmin* admin = feSpace->getAdmin(); // === To ensure that all interior node on each rank are listen first in === @@ -567,7 +558,7 @@ namespace AMDiS { nLocalInterior++; } else { - if (meshDistributor->getDofMapSd()[feSpace].isRankDof(i)) + if (dofMapSd[feSpace].isRankDof(i)) localDofMap[component].insertRankDof(i); else localDofMap[component].insertNonRankDof(i); @@ -584,7 +575,7 @@ namespace AMDiS { if (meshLevel == 0) { localDofMap[component].insertRankDof(it->first); } else { - if (meshDistributor->getDofMapSd()[feSpace].isRankDof(it->first)) + if (dofMapSd[feSpace].isRankDof(it->first)) localDofMap[component].insertRankDof(it->first); else localDofMap[component].insertNonRankDof(it->first); @@ -593,7 +584,7 @@ namespace AMDiS { } - void PetscSolverFeti::createMatLagrange(vector<const FiniteElemSpace*> &feSpaces) + void PetscSolverFeti::createMatLagrange() { FUNCNAME("PetscSolverFeti::createMatLagrange()"); @@ -619,19 +610,19 @@ namespace AMDiS { // === m == r, than the rank sets -1.0 for the corresponding === // === constraint. === - for (unsigned int component = 0; component < feSpaces.size(); component++) { + for (unsigned int component = 0; component < componentSpaces.size(); component++) { DofMap &dualMap = dualDofMap[component].getMap(); for (DofMap::iterator it = dualMap.begin(); it != dualMap.end(); ++it) { - TEST_EXIT_DBG(boundaryDofRanks[feSpaces[component]].count(it->first)) + TEST_EXIT_DBG(boundaryDofRanks[componentSpaces[component]].count(it->first)) ("Should not happen!\n"); // Global index of the first Lagrange constriant for this node. int index = lagrangeMap.getMatIndex(component, it->first); // Copy set of all ranks that contain this dual node. - vector<int> W(boundaryDofRanks[feSpaces[component]][it->first].begin(), - boundaryDofRanks[feSpaces[component]][it->first].end()); + vector<int> W(boundaryDofRanks[componentSpaces[component]][it->first].begin(), + boundaryDofRanks[componentSpaces[component]][it->first].end()); // Number of ranks that contain this dual node. int degree = W.size(); @@ -654,7 +645,7 @@ namespace AMDiS { // === Create scaling factors for scaling the lagrange matrix, which === // === is required for FETI-DP preconditioners. === - if (meshDistributor->getDofMap()[feSpaces[component]].isRankDof(it->first)) { + if (dofMap[componentSpaces[component]].isRankDof(it->first)) { int nConstraints = (degree * (degree - 1)) / 2; for (int i = 0; i < nConstraints; i++) { VecSetValue(vec_scale_lagrange, @@ -723,7 +714,7 @@ namespace AMDiS { } - void PetscSolverFeti::createMatAugmentedLagrange(vector<const FiniteElemSpace*> &feSpaces) + void PetscSolverFeti::createMatAugmentedLagrange() { FUNCNAME("PetscSolverFeti::createMatAugmentedLagrange()"); @@ -764,9 +755,9 @@ namespace AMDiS { MSG("nRankEdges = %d, nOverallEdges = %d\n", nRankEdges, nOverallEdges); - nRankEdges *= feSpaces.size(); - rStartEdges *= feSpaces.size(); - nOverallEdges *= feSpaces.size(); + nRankEdges *= componentSpaces.size(); + rStartEdges *= componentSpaces.size(); + nOverallEdges *= componentSpaces.size(); MatCreateAIJ(mpiCommGlobal, nRankEdges, lagrangeMap.getRankDofs(), @@ -778,9 +769,9 @@ namespace AMDiS { int rowCounter = rStartEdges; for (std::set<BoundaryObject>::iterator edgeIt = allEdges.begin(); edgeIt != allEdges.end(); ++edgeIt) { - for (int component = 0; component < feSpaces.size(); component++) { + for (int component = 0; component < componentSpaces.size(); component++) { DofContainer edgeDofs; - edgeIt->el->getAllDofs(feSpaces[component], *edgeIt, edgeDofs); + edgeIt->el->getAllDofs(componentSpaces[component], *edgeIt, edgeDofs); for (DofContainer::iterator it = edgeDofs.begin(); it != edgeDofs.end(); ++it) { @@ -815,7 +806,7 @@ namespace AMDiS { } - void PetscSolverFeti::createSchurPrimalKsp(vector<const FiniteElemSpace*> &feSpaces) + void PetscSolverFeti::createSchurPrimalKsp() { FUNCNAME("PetscSolverFeti::createSchurPrimalKsp()"); @@ -1105,7 +1096,7 @@ namespace AMDiS { } - void PetscSolverFeti::createFetiKsp(vector<const FiniteElemSpace*> &feSpaces) + void PetscSolverFeti::createFetiKsp() { FUNCNAME("PetscSolverFeti::createFetiKsp()"); @@ -1209,7 +1200,7 @@ namespace AMDiS { TEST_EXIT_DBG(meshLevel == 0) ("Should not happen, check usage of localDofMap!\n"); - for (unsigned int component = 0; component < feSpaces.size(); component++) { + for (unsigned int component = 0; component < componentSpaces.size(); component++) { DofMap &dualMap = dualDofMap[component].getMap(); for (DofMap::iterator it = dualMap.begin(); it != dualMap.end(); ++it) { DegreeOfFreedom d = it->first; @@ -1252,7 +1243,7 @@ namespace AMDiS { MatGetVecs(mat_duals_duals, PETSC_NULL, &(lumpedData->tmp_vec_duals1)); - for (unsigned int component = 0; component < feSpaces.size(); component++) { + for (unsigned int component = 0; component < componentSpaces.size(); component++) { if (stokesMode && component == pressureComponent) continue; @@ -1269,7 +1260,7 @@ namespace AMDiS { // === Create H2 vec === const FiniteElemSpace *pressureFeSpace = - meshDistributor->getComponentSpace(pressureComponent); + componentSpaces[pressureComponent]; DOFVector<double> tmpvec(pressureFeSpace, "tmpvec"); createH2vec(tmpvec); @@ -1277,7 +1268,7 @@ namespace AMDiS { DofMap& m = interfaceDofMap[pressureComponent].getMap(); for (DofMap::iterator it = m.begin(); it != m.end(); ++it) { - if (meshDistributor->getDofMap()[pressureFeSpace].isRankDof(it->first)) { + if (dofMap[pressureFeSpace].isRankDof(it->first)) { int index = interfaceDofMap.getMatIndex(pressureComponent, it->first); VecSetValue(fetiInterfaceLumpedPreconData.h2vec, index, tmpvec[it->first], INSERT_VALUES); @@ -1420,15 +1411,14 @@ namespace AMDiS { if (!stokesMode) return; - const FiniteElemSpace *pressureFeSpace = - meshDistributor->getComponentSpace(pressureComponent); + const FiniteElemSpace *pressureFeSpace = componentSpaces[pressureComponent]; Vec ktest0, ktest1; localDofMap.createLocalVec(ktest0); localDofMap.createLocalVec(ktest1); DofMap& m = localDofMap[pressureComponent].getMap(); for (DofMap::iterator it = m.begin(); it != m.end(); ++it) { - if (meshDistributor->getDofMap()[pressureFeSpace].isRankDof(it->first)) { + if (dofMap[pressureFeSpace].isRankDof(it->first)) { int index = localDofMap.getLocalMatIndex(pressureComponent, it->first); VecSetValue(ktest0, index, 1.0, INSERT_VALUES); } @@ -1679,7 +1669,6 @@ namespace AMDiS { // === And copy from PETSc local vectors to the DOF vectors. === - vector<const FiniteElemSpace*> feSpaces = meshDistributor->getComponentSpaces(); int cnt = 0; for (int component = 0; component < nComponents; component++) { DOFVector<double>& dofVec = *(vec.getDOFVector(component)); @@ -1690,7 +1679,7 @@ namespace AMDiS { int petscIndex = localDofMap.getLocalMatIndex(component, it->first); dofVec[it->first] = localSolB[petscIndex]; } else { - if (meshDistributor->getDofMapSd()[feSpaces[component]].isRankDof(it->first)) { + if (dofMapSd[componentSpaces[component]].isRankDof(it->first)) { int petscIndex = localDofMap.getLocalMatIndex(component, it->first); TEST_EXIT(petscIndex < bsize)("Should not happen!\n"); dofVec[it->first] = localSolB[petscIndex]; @@ -1779,9 +1768,7 @@ namespace AMDiS { // === Create all sets and indices. === - vector<const FiniteElemSpace*> feSpaces = meshDistributor->getComponentSpaces(); - - initialize(feSpaces); + initialize(); createDirichletData(*mat); @@ -1817,16 +1804,16 @@ namespace AMDiS { createPreconditionerMatrix(mat); // === Create and fill PETSc matrix for Lagrange constraints. === - createMatLagrange(feSpaces); + createMatLagrange(); // === === - createMatAugmentedLagrange(feSpaces); + createMatAugmentedLagrange(); // === Create PETSc solver for the Schur complement on primal variables. === - createSchurPrimalKsp(feSpaces); + createSchurPrimalKsp(); // === Create PETSc solver for the FETI-DP operator. === - createFetiKsp(feSpaces); + createFetiKsp(); // === If required, run debug tests. === dbgMatrix(mat); @@ -1849,7 +1836,6 @@ namespace AMDiS { return; double wtime = MPI::Wtime(); - vector<const FiniteElemSpace*> feSpaces = meshDistributor->getComponentSpaces(); int nRowsDual = dualDofMap.getRankDofs(); MatCreateSeqAIJ(PETSC_COMM_SELF, @@ -1906,14 +1892,19 @@ namespace AMDiS { if (!dofMat) continue; + + TEST_EXIT_DBG(dofMat->getRowFeSpace() == componentSpaces[rowComponent]) + ("Wrong matrix row FE space!\n"); + TEST_EXIT_DBG(dofMat->getColFeSpace() == componentSpaces[colComponent]) + ("Wrong matrix col FE space!!\n"); if (stokesMode && (rowComponent == pressureComponent || colComponent == pressureComponent)) continue; - const FiniteElemSpace *rowFeSpace = feSpaces[rowComponent]; - const FiniteElemSpace *colFeSpace = feSpaces[colComponent]; + const FiniteElemSpace *rowFeSpace = dofMat->getRowFeSpace(); + const FiniteElemSpace *colFeSpace = dofMat->getColFeSpace(); traits::col<Matrix>::type col(dofMat->getBaseMatrix()); traits::const_value<Matrix>::type value(dofMat->getBaseMatrix()); diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h index 8d3ac569c9856528cdc7af2c90b970c1f1fca0bc..e10d725b1a776f389cd33b0b2bb99a9b5e72aa9a 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.h +++ b/AMDiS/src/parallel/PetscSolverFeti.h @@ -69,9 +69,8 @@ namespace AMDiS { MeshDistributor::BOUNDARY_FILL_INFO_RECV_DOFS; } - /// Initialization of the data structures with a given list of the FE - /// spaces of all components. - void initialize(vector<const FiniteElemSpace*> feSpaces); + /// Initialization of the FETI-DPdata structures. + void initialize(); int getNumberOfPrimals() { @@ -109,35 +108,29 @@ namespace AMDiS { /// Defines which boundary nodes are primal. Creates global index of /// the primal variables. - void createPrimals(int component, - const FiniteElemSpace *feSpace); + void createPrimals(int component); /// Defines the set of dual variables and creates the global index of /// dual variables. - void createDuals(int component, - const FiniteElemSpace *feSpace); + void createDuals(int component); /// - void createInterfaceNodes(int component, - const FiniteElemSpace *feSpace); + void createInterfaceNodes(int component); /// Create Lagrange multiplier variables corresponding to the dual /// variables. - void createLagrange(int component, - const FiniteElemSpace *feSpace); + void createLagrange(int component); - void createAugmentedLagrange(int component, - const FiniteElemSpace *feSpace); + void createAugmentedLagrange(int component); /// Creates a global index of the B variables. - void createIndexB(int component, - const FiniteElemSpace *feSpace); + void createIndexB(int component); /// Creates the Lagrange multiplier constraints and assembles them /// to \ref mat_lagrange. - void createMatLagrange(vector<const FiniteElemSpace*> &feSpaces); + void createMatLagrange(); - void createMatAugmentedLagrange(vector<const FiniteElemSpace*> &feSpaces); + void createMatAugmentedLagrange(); bool testWirebasketEdge(BoundaryObject &edge, const FiniteElemSpace *feSpace); @@ -146,7 +139,7 @@ namespace AMDiS { /// Creates PETSc KSP solver object for solving the Schur complement /// system on the primal variables, \ref ksp_schur_primal - void createSchurPrimalKsp(vector<const FiniteElemSpace*> &feSpaces); + void createSchurPrimalKsp(); /// void createMatExplicitSchurPrimal(); @@ -158,7 +151,7 @@ namespace AMDiS { void destroySchurPrimalKsp(); /// Creates PETSc KSP solver object for the FETI-DP operator, \ref ksp_feti - void createFetiKsp(vector<const FiniteElemSpace*> &feSpaces); + void createFetiKsp(); /// Destroys FETI-DP operator, \ref ksp_feti void destroyFetiKsp(); diff --git a/AMDiS/src/parallel/PetscSolverFetiDebug.cc b/AMDiS/src/parallel/PetscSolverFetiDebug.cc index 283ab9abbdc2ced9507dedc5e57d733fc8a5ab50..ad575e210fd4f95e2dc5a367c734b09876ac509f 100644 --- a/AMDiS/src/parallel/PetscSolverFetiDebug.cc +++ b/AMDiS/src/parallel/PetscSolverFetiDebug.cc @@ -74,10 +74,10 @@ namespace AMDiS { localDofMap.createVec(ktest2, feti.nGlobalOverallInterior); const FiniteElemSpace* pressureFeSpace = - feti.meshDistributor->getComponentSpace(feti.pressureComponent); + feti.componentSpaces[feti.pressureComponent]; DofMap& m = localDofMap[feti.pressureComponent].getMap(); for (DofMap::iterator it = m.begin(); it != m.end(); ++it) { - if (feti.meshDistributor->getDofMap()[pressureFeSpace].isRankDof(it->first)) { + if (feti.dofMap[pressureFeSpace].isRankDof(it->first)) { int index = localDofMap.getLocalMatIndex(feti.pressureComponent, it->first); VecSetValue(ktest0, index, 1.0, INSERT_VALUES); } diff --git a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc index 8476ef41dad503c0beaa7ff589d24034b39cb8d0..a40896fcfbe6ddd696d1a91bf5228cf49000cce1 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc +++ b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.cc @@ -28,7 +28,7 @@ namespace AMDiS { createMatVec(*seqMat); - const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + const FiniteElemSpace *feSpace = componentSpaces[0]; nComponents = seqMat->getNumRows(); int nRankRows = (*interiorMap)[feSpace].nRankDofs; int nOverallRows = (*interiorMap)[feSpace].nOverallDofs; @@ -109,7 +109,7 @@ namespace AMDiS { TEST_EXIT_DBG(vec)("NO DOF vector defined!\n"); nComponents = vec->getSize(); - const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + const FiniteElemSpace *feSpace = componentSpaces[0]; int nRankRows = (*interiorMap)[feSpace].nRankDofs; int nOverallRows = (*interiorMap)[feSpace].nOverallDofs; @@ -139,7 +139,7 @@ namespace AMDiS { KSPGetPC(kspInterior, &pcInterior); setBlockPreconditioner(pcInterior); - const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + const FiniteElemSpace *feSpace = componentSpaces[0]; VecDuplicate(getVecRhsInterior(), &petscSolVec); // PETSc. @@ -205,7 +205,7 @@ namespace AMDiS { TEST_EXIT(petscMat)("No PETSc matrix!\n"); TEST_EXIT(seqMat)("No DOFMatrix!\n"); - const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + const FiniteElemSpace *feSpace = componentSpaces[0]; using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end; namespace traits = mtl::traits; @@ -262,7 +262,7 @@ namespace AMDiS { { FUNCNAME("PetscSolverGlobalBlockMatrix::setDofVector()"); - const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + const FiniteElemSpace *feSpace = componentSpaces[0]; // Traverse all used DOFs in the dof vector. DOFVector<double>::Iterator dofIt(vec, USED_DOFS); diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc index d3fc6c8bc4c93702511399456af6f00702d9cba1..d4cdceeab3783153ae5a5691636d5c023930e131 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc +++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc @@ -82,7 +82,7 @@ namespace AMDiS { if (!zeroStartVector) KSPSetInitialGuessNonzero(kspInterior, PETSC_TRUE); - removeDirichletRows(seqMat, meshDistributor->getDofMap(), getMatInterior()); + removeDirichletRows(seqMat, dofMap, getMatInterior()); #if (DEBUG != 0) MSG("Fill petsc matrix 3 needed %.5f seconds\n", MPI::Wtime() - wtime); @@ -111,8 +111,6 @@ namespace AMDiS { TEST_EXIT_DBG(coarseSpaceMap.size() == seqMat->getSize()) ("Wrong sizes %d %d\n", coarseSpaceMap.size(), seqMat->getSize()); - vector<const FiniteElemSpace*> feSpaces = meshDistributor->getComponentSpaces(); - // === Prepare traverse of sequentially created matrices. === using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end; @@ -277,7 +275,7 @@ namespace AMDiS { vecRhsAssembly(); - removeDirichletRows(vec, meshDistributor->getDofMap(), getVecRhsInterior()); + removeDirichletRows(vec, dofMap, getVecRhsInterior()); // === For debugging allow to write the rhs vector to a file. === diff --git a/AMDiS/src/parallel/PetscSolverSchur.cc b/AMDiS/src/parallel/PetscSolverSchur.cc index 77b692f38e38fd8e2c1cf4d1bc6c2d5ae0380de5..1c0db4e6b7c6a1d5721e6098156d3797d6068761 100644 --- a/AMDiS/src/parallel/PetscSolverSchur.cc +++ b/AMDiS/src/parallel/PetscSolverSchur.cc @@ -25,7 +25,7 @@ namespace AMDiS { TEST_EXIT_DBG(meshDistributor)("No mesh distributor object defined!\n"); TEST_EXIT_DBG(interiorMap)("No parallel DOF map defined!\n"); - const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + const FiniteElemSpace *feSpace = componentSpaces[0]; typedef map<int, DofContainer> RankToDofContainer; typedef map<DegreeOfFreedom, bool> DofIndexToBool; @@ -182,7 +182,7 @@ namespace AMDiS { createMatVec(*seqMat); - const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + const FiniteElemSpace *feSpace = componentSpaces[0]; int nComponents = seqMat->getNumRows(); updateDofData(nComponents); @@ -257,7 +257,7 @@ namespace AMDiS { { FUNCNAME("PetscSolverSchur::fillPetscRhs()"); - const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + const FiniteElemSpace *feSpace = componentSpaces[0]; int nComponents = vec->getSize(); int nRankRows = (*interiorMap)[feSpace].nRankDofs * nComponents; int nOverallRows = (*interiorMap)[feSpace].nOverallDofs * nComponents; @@ -276,7 +276,7 @@ namespace AMDiS { { FUNCNAME("PetscSolverSchur::solvePetscMatrix()"); - const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + const FiniteElemSpace *feSpace = componentSpaces[0]; int nComponents = vec.getSize(); KSPCreate(mpiCommGlobal, &kspInterior); @@ -345,7 +345,7 @@ namespace AMDiS { TEST_EXIT(seqMat)("No DOFMatrix!\n"); - const FiniteElemSpace* feSpace = meshDistributor->getFeSpace(0); + const FiniteElemSpace* feSpace = componentSpaces[0]; using mtl::tag::row; using mtl::tag::nz; using mtl::begin; using mtl::end; namespace traits= mtl::traits; typedef DOFMatrix::base_matrix_type Matrix; @@ -431,7 +431,7 @@ namespace AMDiS { void PetscSolverSchur::setDofVector(Vec& petscVec, DOFVector<double>* vec, int dispMult, int dispAdd, bool rankOnly) { - const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0); + const FiniteElemSpace *feSpace = componentSpaces[0]; DOFVector<double>::Iterator dofIt(vec, USED_DOFS); for (dofIt.reset(); !dofIt.end(); ++dofIt) {