From 499eda349bafd50f90b73555621c7593432cffef Mon Sep 17 00:00:00 2001 From: "Praetorius, Simon" <simon.praetorius@tu-dresden.de> Date: Wed, 28 Aug 2019 12:54:38 +0200 Subject: [PATCH] update UniqueBorderPartition to support UGGrid --- .../linearalgebra/istl/Communication.inc.hpp | 64 +++--- src/amdis/utility/UniqueBorderPartition.hpp | 203 ++++++++++++++++-- test/UniqueBorderPartitionTest.cpp | 13 +- 3 files changed, 228 insertions(+), 52 deletions(-) diff --git a/src/amdis/linearalgebra/istl/Communication.inc.hpp b/src/amdis/linearalgebra/istl/Communication.inc.hpp index 299a1ce3..89c60224 100644 --- a/src/amdis/linearalgebra/istl/Communication.inc.hpp +++ b/src/amdis/linearalgebra/istl/Communication.inc.hpp @@ -83,18 +83,20 @@ void ISTLCommunication<Basis> auto const& gv = basis.gridView(); // make disjoint partition of border entities - EntitySet borderEntities; - using DataHandle = UniqueBorderPartitionDataHandle<Grid>; - DataHandle handle(gv.comm().rank(), borderEntities, gv.grid().globalIdSet()); - gv.communicate(handle, - Dune::InterfaceType::InteriorBorder_InteriorBorder_Interface, - Dune::CommunicationDirection::ForwardCommunication); + using DataHandle = UniqueBorderPartition<Grid>; + DataHandle borderEntities(gv.comm().rank(), gv.grid()); + for (int i = 0; i < borderEntities.numIterations(); ++i) { + gv.communicate(borderEntities, + Dune::InterfaceType::InteriorBorder_All_Interface, + Dune::CommunicationDirection::ForwardCommunication); + } if (gv.overlapSize(0) + gv.ghostSize(0) == 0) // TODO(FM): Add support for this special case error_exit("Using grids with ghostSize(0) + overlapSize(0) == 0 is not supported\n"); auto lv = basis.localView(); + std::vector<bool> visited(basis.dimension(), false); GlobalBasisIdSet<Basis> dofIdSet(basis); pis.beginResize(); @@ -104,28 +106,34 @@ void ISTLCommunication<Basis> dofIdSet.bind(e); for (std::size_t i = 0; i < dofIdSet.size(); ++i) { - LocalCommIdType localId = lv.index(i); - GlobalCommIdType globalId = dofIdSet.id(i); - PType attribute = dofIdSet.partitionType(i); - switch (attribute) - { - case PType::InteriorEntity: - pis.add(globalId, LI(localId, Attribute::owner, true)); - break; - case PType::BorderEntity: - // mark border entity as owned iff it is part of the process's borderEntities set - if (borderEntities.count(dofIdSet.entityId(i)) == 1) - pis.add(globalId, LI(localId, Attribute::owner, true)); - else - pis.add(globalId, LI(localId, Attribute::overlap, true)); - break; - case PType::OverlapEntity: - case PType::FrontEntity: - pis.add(globalId, LI(localId, Attribute::overlap, true)); - break; - case PType::GhostEntity: - pis.add(globalId, LI(localId, Attribute::copy, true)); - break; + LocalCommIdType localIndex = lv.index(i); + if (!visited[localIndex]) { + GlobalCommIdType globalId = dofIdSet.id(i); + PType attribute = dofIdSet.partitionType(i); + switch (attribute) + { + case PType::InteriorEntity: + pis.add(globalId, LI(localIndex, Attribute::owner, true)); + break; + case PType::BorderEntity: + // mark border entity as owned iff it is part of the process's borderEntities set + if (borderEntities.contains(dofIdSet.entityId(i))) + pis.add(globalId, LI(localIndex, Attribute::owner, true)); + else + pis.add(globalId, LI(localIndex, Attribute::overlap, true)); + break; + case PType::OverlapEntity: + case PType::FrontEntity: + pis.add(globalId, LI(localIndex, Attribute::overlap, true)); + break; + case PType::GhostEntity: + pis.add(globalId, LI(localIndex, Attribute::copy, true)); + break; + default: + error_exit("Unknown partition type."); + } + + visited[localIndex] = true; } } dofIdSet.unbind(); diff --git a/src/amdis/utility/UniqueBorderPartition.hpp b/src/amdis/utility/UniqueBorderPartition.hpp index 6a3000b2..f5fc1b67 100644 --- a/src/amdis/utility/UniqueBorderPartition.hpp +++ b/src/amdis/utility/UniqueBorderPartition.hpp @@ -6,6 +6,12 @@ #include <dune/common/unused.hh> #include <dune/grid/common/datahandleif.hh> +namespace Dune +{ + // forward declaration + template <int dim> class UGGrid; +} + namespace AMDiS { /// \brief Determine for each border entity which processor owns it @@ -18,8 +24,8 @@ namespace AMDiS * be communicated. Here we assign the entity to the processor with the lowest rank. **/ template <class Grid> - class UniqueBorderPartitionDataHandle - : public Dune::CommDataHandleIF<UniqueBorderPartitionDataHandle<Grid>, int> + class UniqueBorderPartition + : public Dune::CommDataHandleIF<UniqueBorderPartition<Grid>, int> { using IdSet = typename Grid::GlobalIdSet; using IdType = typename IdSet::IdType; @@ -32,22 +38,19 @@ namespace AMDiS /// communicator. /** * \param rank The own processor rank - * \param borderEntities A set of entity ids filled with all border entities - * owned by this processor after communication. * \param idSet The id set of entity ids to store in borderEntities, * typically the grid globalIdSet. * * NOTE: Since idSet is stored by reference it must not go out of scope * until all calls to \ref gather and \ref scatter are finished. **/ - UniqueBorderPartitionDataHandle(int rank, EntitySet& borderEntities, IdSet const& idSet) + UniqueBorderPartition(int rank, Grid const& grid) : myrank_(rank) - , borderEntities_(&borderEntities) - , idSet_(idSet) + , idSet_(grid.globalIdSet()) {} - // Communicate all entities except for grid elements - bool contains(int /*dim*/, int codim) const { return codim != 0; } + // Communicate all entities + bool contains(int /*dim*/, int /*codim*/) const { return true; } // communicate exactly one integer, the rank bool fixedSize(int /*dim*/, int /*codim*/) const { return true; } @@ -60,27 +63,197 @@ namespace AMDiS void gather(MessageBuffer& buff, Entity const& e) const { buff.write(myrank_); - // insert all border entities - borderEntities_->insert(idSet_.id(e)); } template <class MessageBuffer, class Entity> void scatter(MessageBuffer& buff, Entity const& e, std::size_t n) + { + scatterImpl(buff, e, n, int_t<Entity::codimension>{}); + } + + /// Returns whether id is owned by this rank + bool contains(IdType const& id) const + { + return notOwner_.count(id) == 0; + } + + /// Number of iterations to perform the communication in order to collect all neighboring entities + int numIterations() const + { + return 1; + } + + private: + + template <class MessageBuffer, class Entity, int cd> + void scatterImpl(MessageBuffer& buff, Entity const& e, std::size_t n, int_t<cd>) + { + DUNE_UNUSED_PARAMETER(n); // n == 1 + assert(n == 1); + + int rank = 0; + buff.read(rank); + + // insert only border entities that are owned by other processors, i.e. rank > myrank + // Those entities are not owned by this rank. + if (rank > myrank_) + notOwner_.insert(idSet_.id(e)); + } + + template <class MessageBuffer, class Entity> + void scatterImpl(MessageBuffer& buff, Entity const& e, std::size_t n, int_t<0>) + { + DUNE_UNUSED_PARAMETER(n); // n == 1 + assert(n == 1); + + int rank = 0; + buff.read(rank); + + // insert only border entities that are owned by other processors, i.e. rank > myrank + // Those entities are not owned by this rank. + if (rank > myrank_) { + for (int codim = 1; codim <= Grid::dimension; ++codim) { + for (int i = 0; i < int(e.subEntities(codim)); ++i) { + notOwner_.insert(idSet_.subId(e, i, codim)); + } + } + } + } + + private: + int myrank_; + EntitySet notOwner_; + IdSet const& idSet_; + }; + + + /// Specialization for UGGrid that can not communicate over edges. + template <int dim> + class UniqueBorderPartition<Dune::UGGrid<dim>> + : public Dune::CommDataHandleIF<UniqueBorderPartition<Dune::UGGrid<dim>>, int> + { + using Grid = Dune::UGGrid<dim>; + using IdSet = typename Grid::LocalIdSet; + using IdType = typename IdSet::IdType; + + public: + using EntitySet = std::set<IdType>; + + public: + /// \brief Construct a UniqueBorderPartition DataHandle to be used in a GridView + /// communicator. + /** + * \param rank The own processor rank + * \param idSet The id set of entity ids to store in borderEntities, + * typically the grid globalIdSet. + * + * NOTE: Since idSet is stored by reference it must not go out of scope + * until all calls to \ref gather and \ref scatter are finished. + **/ + UniqueBorderPartition(int rank, Grid const& grid) + : myrank_(rank) + , idSet_(grid.localIdSet()) + {} + + // Communicate all entities + bool contains(int /*dim*/, int codim) const { return true; } + + // see size() + bool fixedSize(int /*dim*/, int codim) const { return codim != 0; } + + // for codim=0 elements communicate data for all subEntities, otherwise communicate just the owner rank + template <class Entity> + std::size_t size(Entity const& e) const + { + if (Entity::codimension != 0) + return 1; + + std::size_t s = 0; + for (int codim = 1; codim <= Grid::dimension; ++codim) + s += e.subEntities(codim); + return s; + } + + template <class MessageBuffer, class Entity> + void gather(MessageBuffer& buff, Entity const& e) const + { + gatherImpl(buff, e, int_t<Entity::codimension>{}); + } + + template <class MessageBuffer, class Entity> + void scatter(MessageBuffer& buff, Entity const& e, std::size_t n) + { + scatterImpl(buff, e, n, int_t<Entity::codimension>{}); + } + + /// Returns whether id is in EntitySet + bool contains(IdType const& id) const + { + return entityToRank_[id] == myrank_; + } + + /// Number of iterations to perform the communication in order to collect all neighboring entities + int numIterations() const + { + return Grid::dimension; + } + + private: + template <class MessageBuffer, class Entity, int cd> + void gatherImpl(MessageBuffer& buff, Entity const& e, int_t<cd>) const + { + int& rank = entityToRank_[idSet_.id(e)]; + rank = std::max(rank, myrank_); + buff.write(rank); + } + + template <class MessageBuffer, class Entity> + void gatherImpl(MessageBuffer& buff, Entity const& e, int_t<0>) const + { + // maybe use global unique numbering (?) + for (int codim = 1; codim <= Grid::dimension; ++codim) { + for (int i = 0; i < int(e.subEntities(codim)); ++i) { + int& rank = entityToRank_[idSet_.subId(e, i, codim)]; + rank = std::max(rank, myrank_); + buff.write(rank); + } + } + } + + template <class MessageBuffer, class Entity, int cd> + void scatterImpl(MessageBuffer& buff, Entity const& e, std::size_t n, int_t<cd>) { DUNE_UNUSED_PARAMETER(n); // n == 1 assert(n == 1); int rank = 0; buff.read(rank); - // remove all border entities with rank < myrank_ - if (rank < myrank_) - borderEntities_->erase(idSet_.id(e)); + + int& storedRank = entityToRank_[idSet_.id(e)]; + storedRank = std::max(rank, storedRank); + } + + template <class MessageBuffer, class Entity> + void scatterImpl(MessageBuffer& buff, Entity const& e, std::size_t n, int_t<0>) + { + std::size_t j = 0; + for (int codim = 1; codim <= Grid::dimension; ++codim) { + for (int i = 0; i < int(e.subEntities(codim)); ++i, ++j) { + assert(j < n); + int rank = 0; + buff.read(rank); + + int& storedRank = entityToRank_[idSet_.subId(e, i, codim)]; + storedRank = std::max(rank, storedRank); + } + } } private: int myrank_; - EntitySet* borderEntities_; + mutable std::map<IdType, int> entityToRank_; IdSet const& idSet_; }; } // end namespace AMDiS + diff --git a/test/UniqueBorderPartitionTest.cpp b/test/UniqueBorderPartitionTest.cpp index e23f9522..14def3d9 100644 --- a/test/UniqueBorderPartitionTest.cpp +++ b/test/UniqueBorderPartitionTest.cpp @@ -11,17 +11,12 @@ template <class GridView> void test(GridView const& gv) { using Grid = typename GridView::Grid; - using DataHandle = UniqueBorderPartitionDataHandle<Grid>; + using DataHandle = UniqueBorderPartition<Grid>; - using EntitySet = typename DataHandle::EntitySet; - EntitySet borderEntities; - - DataHandle handle(gv.comm().rank(), borderEntities, gv.grid().globalIdSet()); - gv.communicate(handle, - Dune::InterfaceType::InteriorBorder_InteriorBorder_Interface, + DataHandle borderEntities(gv.comm().rank(), gv.grid()); + gv.communicate(borderEntities, + Dune::InterfaceType::InteriorBorder_All_Interface, Dune::CommunicationDirection::ForwardCommunication); - - msg("#borderEntities = {}", borderEntities.size()); } int main(int argc, char** argv) -- GitLab