Skip to content
Snippets Groups Projects
Commit 3d761c3f authored by Oliver Sander's avatar Oliver Sander Committed by sander
Browse files

Use GlobalIndexSet class from dune-grid, instead of the local copy

The local copy was to get the thing developed and working.  Now that it is
working we upstream it into dune-grid.

[[Imported from SVN: r9922]]
parent a7b59f97
No related branches found
No related tags found
No related merge requests found
/** \file
*
* \brief Implement the functionality that provides a globally unique index for
* all kinds of entities of a Dune grid. Such functionality is relevant
* for a number of cases:
* e.g. to map a degree of freedom associated wit an entity to its
* location in a global matrix or global vector; e.g. to associate data
* with entities in an adaptive mesh refinement scheme.
*
* Nota bene: the functionality to provide a globally unique index for an entity, over all
* processes in a distributed memory parallel compute environment is at present
* not available in the Dune framework; therefore, we implement such functionality in a class
* that is templatized by the GridView Type and the codimension of the entity for which the
* globaly unique index shall be computed.
*
* Method: (1) we use the UniqueEntityPartition class that reports if a specific entity belongs
* to a specific process;
*
* (2) we compute the number of entities that are owned by the specific process;
*
* (3) we communicate the index of entities that are owned by the process to processes
* that also contain these entities but do not own them, so that on a non-owner process
* we have information on the index of the entity that it got from the owner-process;
*
* Thus, we can access the same element in a global, distributed matrix that is managed
* by all processes; a typical case would be using matrix and vector routines from the PETSc
* or trilinos parallel linear algebra packages for distributed memory parallel computers.
*
* Copyright by Benedikt Oswald and Patrick Leidenberger, 2002-2010. All rights reserved.
*
* \author Benedikt Oswald, Patrick Leidenberger
*
* \warning None
*
* \attention globally unique indices are ONLY provided for entities of the
* InteriorBorder_Partition type, NOT for the Ghost_Partition type !!!
*/
#ifndef GLOBALUNIQUEINDEX_HH_
#define GLOBALUNIQUEINDEX_HH_
/** \brief Include standard header files. */
#include <vector>
#include <iostream>
#include <fstream>
#include <map>
#include <utility>
/** include base class functionality for the communication interface */
#include <dune/grid/common/datahandleif.hh>
// Include proprietary header files.
#include "uniqueentitypartition.hh"
#include <dune/common/version.hh>
/** include parallel capability */
#if HAVE_MPI
#include <dune/common/parallel/mpihelper.hh>
#endif
/*********************************************************************************************/
/* calculate globally unique index over all processes in a Dune grid that is distributed */
/* over all the processes in a distributed memory parallel environment, for all entities */
/* of a given codim in a given GridView, assuming they all have the same geometry, */
/* i.e. codim, type */
/*********************************************************************************************/
namespace Dune {
template<class GridView, int CODIM>
class GlobalIndexSet
{
private:
/** define data types */
typedef typename GridView::Grid Grid;
typedef typename GridView::Grid::GlobalIdSet GlobalIdSet;
typedef typename GridView::Grid::GlobalIdSet::IdType IdType;
typedef typename GridView::Traits::template Codim<0>::Iterator Iterator;
typedef typename GridView::Traits::template Codim<CODIM>::Entity Entity;
#if HAVE_MPI
/** define data type related to collective communication */
typedef typename Dune::template CollectiveCommunication<MPI_Comm> CollectiveCommunication;
#else
typedef typename Grid::CollectiveCommunication CollectiveCommunication;
#endif
typedef std::map<IdType,int> MapId2Index;
typedef std::map<int,int> IndexMap;
typedef UniqueEntityPartition<GridView,CODIM> UniqueEntityPartitionType;
private:
/* A DataHandle class to communicate the global index from the
* owning to the non-owning entity; the class is based on the MinimumExchange
* class in the parallelsolver.hh header file.
*/
template<class GlobalIdSet, class IdType, class Map, typename ValueType>
class IndexExchange : public Dune::CommDataHandleIF<IndexExchange<GlobalIdSet,IdType,Map,ValueType>,ValueType>
{
public:
//! returns true if data for this codim should be communicated
bool contains (int dim, int codim) const
{
return(codim==CODIM);
}
//! returns true if size per entity of given dim and codim is a constant
bool fixedsize (int dim, int codim) const
{
return(true);
}
/*! how many objects of type DataType have to be sent for a given entity
*
* Note: Only the sender side needs to know this size. */
template<class EntityType>
size_t size (EntityType& e) const
{
return(1);
}
/*! pack data from user to message buffer */
template<class MessageBuffer, class EntityType>
void gather (MessageBuffer& buff, const EntityType& e) const
{
IdType id=globalidset_.id(e);
buff.write((*mapid2entity_.find(id)).second);
}
/*! unpack data from message buffer to user
n is the number of objects sent by the sender
*/
template<class MessageBuffer, class EntityType>
void scatter (MessageBuffer& buff, const EntityType& entity, size_t n)
{
ValueType x;
buff.read(x);
/** only if the incoming index is a valid one,
i.e. if it is greater than zero, will it be
inserted as the global index; it is made
sure in the upper class, i.e. GlobalUniqueIndex,
that non-owning processes use -1 to mark an entity
that they do not own.
*/
if(x >= 0)
{
IdType id = globalidset_.id(entity);
mapid2entity_.erase(id);
mapid2entity_.insert(std::make_pair(id,x));
const int lindex = indexSet_.index(entity);
localGlobalMap_[lindex] = x;
globalLocalMap_[x] = lindex;
}
}
//! constructor
IndexExchange (const GlobalIdSet& globalidset, Map& mapid2entity, int &rank,
const typename GridView::IndexSet& localIndexSet, IndexMap& localGlobal, IndexMap& globalLocal)
: globalidset_(globalidset),
mapid2entity_(mapid2entity),
rank_(rank),
indexSet_(localIndexSet),
localGlobalMap_(localGlobal),
globalLocalMap_(globalLocal)
{
}
private:
const GlobalIdSet& globalidset_;
Map& mapid2entity_;
int& rank_;
const typename GridView::IndexSet& indexSet_;
IndexMap& localGlobalMap_;
IndexMap& globalLocalMap_;
};
public:
/**********************************************************************************************************************/
/* \brief class constructor - when the class is instantiated by passing a const reference to a GridView object, */
/* we calculate the complete set of global unique indices so that we can then */
/* later query the global index, by directly passing the entity in question: then the respective global */
/* index is returned. */
/**********************************************************************************************************************/
GlobalIndexSet(const GridView& gridview)
: gridview_(gridview),
uniqueEntityPartition_(gridview)
{
int rank = gridview.comm().rank();
int size = gridview.comm().size();
nLocalEntity_=0;
nGlobalEntity_=0;
nLocalEntity_ = uniqueEntityPartition_.numOwners();
/** compute the global, non-redundant number of entities, i.e. the number of entities in the set
* without double, aka. redundant entities, on the interprocessor boundary via global reduce. */
const CollectiveCommunication& collective = gridview_.comm();
nGlobalEntity_ = collective.template sum<int>(nLocalEntity_);
/** communicate the number of locally owned entities to all other processes so that the respective offset
* can be calculated on the respective processor; we use the Dune mpi collective communication facility
* for this; first, we gather the number of locally owned entities on the root process and, second, we
* broadcast the array to all processes where the respective offset can be calculated. */
std::vector<int> offset(size);
std::fill(offset.begin(), offset.end(), 0);
/** gather number of locally owned entities on root process */
collective.template gather<int>(&nLocalEntity_,offset.data(),1,0);
/** broadcast the array containing the number of locally owned entities to all processes */
collective.template broadcast<int>(offset.data(),size,0);
indexOffset_.clear();
indexOffset_.resize(size,0);
for(unsigned int ii=0; ii<indexOffset_.size(); ++ii)
for(unsigned int jj=0; jj < ii; ++jj)
indexOffset_[ii] += offset[jj];
/** compute globally unique index over all processes; the idea of the algorithm is as follows: if
* an entity is owned by the process, it is assigned an index that is the addition of the offset
* specific for this process and a consecutively incremented counter; if the entity is not owned
* by the process, it is assigned -1, which signals that this specific entity will get its global
* unique index through communication afterwards;
*
* thus, the calculation of the globally unique index is divided into 2 stages:
*
* (1) we calculate the global index independently;
*
* (2) we achieve parallel adjustment by communicating the index
* from the owning entity to the non-owning entity.
*
*/
/** 1st stage of global index calculation: calculate global index for owned entities */
/** intialize map that stores an entity's global index via it's globally unique id as key */
MapId2Index globalIndex;
const typename GridView::IndexSet& indexSet = gridview.indexSet();
const GlobalIdSet &globalIdSet=gridview_.grid().globalIdSet(); /** retrieve globally unique Id set */
int myoffset=indexOffset_[rank];
int globalcontrib=0; /** initialize contribution for the global index */
std::vector<bool> firstTime(gridview_.size(CODIM));
std::fill(firstTime.begin(), firstTime.end(), true);
for(Iterator iter = gridview_.template begin<0>();iter!=gridview_.template end<0>(); ++iter)
{
#if DUNE_VERSION_NEWER(DUNE_GRID,2,4)
for (size_t i=0; i<iter->subEntities(CODIM); i++)
#else
for (size_t i=0; i<iter->template count<CODIM>(); i++)
#endif
{
IdType id=globalIdSet.subId(*iter,i,CODIM); /** retrieve the entity's id */
int idx = gridview_.indexSet().subIndex(*iter,i,CODIM);
if (! firstTime[idx] )
continue;
firstTime[idx] = false;
if(uniqueEntityPartition_.owner(idx) == true) /** if the entity is owned by the process, go ahead with computing the global index */
{
const int gindex = myoffset + globalcontrib; /** compute global index */
globalIndex.insert(std::make_pair(id,gindex)); /** insert pair (key, value) into the map */
const int lindex = idx;
localGlobalMap_[lindex] = gindex;
globalLocalMap_[gindex] = lindex;
globalcontrib++; /** increment contribution to global index */
}
else /** if entity is not owned, insert -1 to signal not yet calculated global index */
{
globalIndex.insert(std::make_pair(id,-1));
}
}
}
/** 2nd stage of global index calculation: communicate global index for non-owned entities */
// Create the data handle and communicate.
IndexExchange<GlobalIdSet,IdType,MapId2Index,int> dataHandle(globalIdSet,globalIndex,rank,indexSet,localGlobalMap_,globalLocalMap_);
gridview_.communicate(dataHandle, Dune::All_All_Interface, Dune::ForwardCommunication);
}
/** \brief Given a local index, retrieve its index globally unique over all processes. */
int index(const int& localIndex) const {
return localGlobalMap_.find(localIndex)->second;
}
int localIndex(const int& globalIndex) const {
return globalLocalMap_.find(globalIndex)->second;
}
int index(const typename GridView::template Codim<CODIM>::Entity& entity) const
{
return localGlobalMap_.find(gridview_.indexSet().index(entity))->second;
}
int localIndex(const typename GridView::template Codim<CODIM>::Entity& entity) const {
return gridview_.indexSet().index(entity);
}
inline unsigned int nGlobalEntity() const
{
return(nGlobalEntity_);
}
inline unsigned int nOwnedLocalEntity() const
{
return(nLocalEntity_);
}
protected:
/** store data members */
const GridView gridview_; /** store a const reference to a gridview */
UniqueEntityPartitionType uniqueEntityPartition_;
int nLocalEntity_; /** store number of entities that are owned by the local */
int nGlobalEntity_; /** store global number of entities, i.e. number of entities without rendundant entities on interprocessor boundaries */
std::vector<int> indexOffset_; /** store offset of entity index on every process */
IndexMap localGlobalMap_;
IndexMap globalLocalMap_;
};
} // namespace Dune
#endif /* GLOBALUNIQUEINDEX_HH_ */
......@@ -13,7 +13,6 @@
// Include Dune header files
#include <dune/common/version.hh>
#include "uniqueentitypartition.hh"
/** include parallel capability */
#if HAVE_MPI
......@@ -35,9 +34,9 @@ namespace Dune {
P2BasisMapper<GridView> p2Mapper(gridView);
GlobalIndexSet<GridView,2> globalVertexIndex(gridView);
GlobalIndexSet<GridView,1> globalEdgeIndex(gridView);
GlobalIndexSet<GridView,0> globalElementIndex(gridView);
GlobalIndexSet<GridView> globalVertexIndex(gridView,2);
GlobalIndexSet<GridView> globalEdgeIndex(gridView,1);
GlobalIndexSet<GridView> globalElementIndex(gridView,0);
// total number of degrees of freedom
nGlobalEntity_ = globalVertexIndex.nGlobalEntity() + globalEdgeIndex.nGlobalEntity() + globalElementIndex.nGlobalEntity();
......
......@@ -5,7 +5,7 @@
#include <dune/istl/matrixindexset.hh>
#include <dune/gfe/parallel/globalindex.hh>
#include <dune/grid/utility/globalindexset.hh>
#include <dune/gfe/parallel/mpifunctions.hh>
......
/** \file
* \brief implement universally usable, unique partitioning of entities (vertex, edge, face, element)
* in a Dune grid; it is a general problem in parallel calculation to assign entities in a unique
* way to processors; thus, we need a unique criterion; here, we will use the rule that an entity
* is always assigned to the process with the lowest rank.
*
*
* In particular, it is the objective to provide this functionality in a unique way so that it can
* be used in a wide range of an application's needs: e.g., it can be used to assign faces,
* situated on the inter processor boundary, in a unique way, i.e. the face is always assigned to
* the process with the lower rank. A concrete application of this mechanism is the implementation
* of the transparent integral boundary condition to remove redundant triangular faces of the
* fictitious boundary surface. Indeed, the implementation of the UniqueEntityPartition class has
* been motivated by this need. However, its usage is much wider. It will also be the underlying
* infrastructure to calculate global, consecutive, zero-starting indices for grid entities over all
* the processes over which the grid is partitioned.
*
*
* \author Benedikt Oswald, Patrick Leidenberger
*
* \warning None
*
* \attention
*
* \bug
*
* \todo
*/
#ifndef ENTITY_PARTITONING_HH
#define ENTITY_PARTITONING_HH
/** \brief Include standard header files. */
#include <vector>
#include <iostream>
#include <fstream>
#include <map>
#include <utility>
#include <dune/common/version.hh>
/** Include base class functionality for the communication interface */
#include <dune/grid/common/datahandleif.hh>
/*********************************************************************************************/
/* calculate unique partitioning for all entities of a given codim in a given GridView, */
/* assuming they all have the same geometry, i.e. codim, type */
/*********************************************************************************************/
template<class GridView, int CODIM>
class UniqueEntityPartition
{
private:
/* A DataHandle class to cacluate the minimum of a std::vector which is accompanied by an index set */
template<class IS, class V> // mapper type and vector type
class MinimumExchange : public Dune::CommDataHandleIF<MinimumExchange<IS,V>,typename V::value_type>
{
public:
//! export type of data for message buffer
typedef typename V::value_type DataType;
//! returns true if data for this codim should be communicated
bool contains (int dim, int codim) const
{
return(codim==CODIM);
}
//! returns true if size per entity of given dim and codim is a constant
bool fixedsize (int dim, int codim) const
{
return(true);
}
/*! how many objects of type DataType have to be sent for a given entity
*
* Note: Only the sender side needs to know this size. */
template<class EntityType>
size_t size (EntityType& e) const
{
return(1);
}
/*! pack data from user to message buffer */
template<class MessageBuffer, class EntityType>
void gather (MessageBuffer& buff, const EntityType& e) const
{
buff.write(v_[indexset_.index(e)]);
}
/*! unpack data from message buffer to user
n is the number of objects sent by the sender
*/
template<class MessageBuffer, class EntityType>
void scatter (MessageBuffer& buff, const EntityType& e, size_t n)
{
DataType x; buff.read(x);
if (x>=0) // other is -1 means, he does not want it
{
v_[indexset_.index(e)] = std::min(x,v_[indexset_.index(e)]);
}
}
//! constructor
MinimumExchange (const IS& indexset, V& v)
: indexset_(indexset),
v_(v)
{
;
}
private:
const IS& indexset_;
V& v_;
};
public:
/** declare often uses types */
typedef typename GridView::Traits::template Codim<CODIM>::Iterator Iterator;
typedef typename GridView::Traits::template Codim<CODIM>::Entity Entity;
/*! \brief Constructor needs to know the grid function space
*/
UniqueEntityPartition (const GridView& gridview)
: gridview_(gridview),
assignment_(gridview.size(CODIM)),
rank_(gridview_.comm().rank())
{
/** extract types from the GridView data type */
typedef typename GridView::IndexSet IndexSet;
// assign own rank to entities that I might have
for(auto it = gridview_.template begin<0>();it!=gridview_.template end<0>(); ++it)
#if DUNE_VERSION_NEWER(DUNE_GRID,2,4)
for (int i=0; i<it->subEntities(CODIM); i++)
#else
for (int i=0; i<it->template count<CODIM>(); i++)
#endif
{
assignment_[gridview_.indexSet().template subIndex(*it,i,CODIM)]
= ( (it->template subEntity<CODIM>(i)->partitionType()==Dune::InteriorEntity) || (it->template subEntity<CODIM>(i)->partitionType()==Dune::BorderEntity) )
? rank_ // set to own rank
: - 1; // it is a ghost entity, I will not possibly own it.
}
/** exchange entity index through communication */
MinimumExchange<IndexSet,std::vector<int> > dh(gridview_.indexSet(),assignment_);
gridview_.communicate(dh,Dune::All_All_Interface,Dune::ForwardCommunication);
/* convert vector of minimum ranks to assignment vector */
for (size_t i=0; i<assignment_.size(); i++)
assignment_[i] = (assignment_[i] == rank_) ? 1 : 0;
}
/** answer question if entity belongs to me, to this process */
bool owner(size_t i)
{
return assignment_[i];
}
size_t numOwners() const
{
return std::accumulate(assignment_.begin(), assignment_.end(), 0);
}
/** answer question if entity belongs to me, to this process */
bool owner(const Entity& entity)
{
return(assignment_[gridview_.indexSet().template index(entity)]);
}
private:
/** declare private data members */
const GridView& gridview_;
std::vector<int> assignment_;
int rank_;
};
#endif /** ENTITY_PARTITONING_HH */
......@@ -3,7 +3,7 @@
#include <vector>
#include <dune/gfe/parallel/globalindex.hh>
#include <dune/grid/utility/globalindexset.hh>
#include <dune/gfe/parallel/mpifunctions.hh>
......
......@@ -183,8 +183,8 @@ setup(const GridType& grid,
// If we are on more than 1 processors, join all local transfer matrices on rank 0,
// and construct a single global transfer operator there.
typedef Dune::GlobalIndexSet<typename GridType::LeafGridView, gridDim> LeafP1GUIndex;
LeafP1GUIndex p1Index(grid_->leafGridView());
typedef Dune::GlobalIndexSet<typename GridType::LeafGridView> LeafP1GUIndex;
LeafP1GUIndex p1Index(grid_->leafGridView(), gridDim);
typedef typename TruncatedCompressedMGTransfer<CorrectionType>::TransferOperatorType TransferOperatorType;
MatrixCommunicator<GUIndex,
......@@ -203,9 +203,9 @@ setup(const GridType& grid,
// If we are on more than 1 processors, join all local transfer matrices on rank 0,
// and construct a single global transfer operator there.
typedef Dune::GlobalIndexSet<typename GridType::LevelGridView, gridDim> LevelGUIndex;
LevelGUIndex fineGUIndex(grid_->levelGridView(i+2));
LevelGUIndex coarseGUIndex(grid_->levelGridView(i+1));
typedef Dune::GlobalIndexSet<typename GridType::LevelGridView> LevelGUIndex;
LevelGUIndex fineGUIndex(grid_->levelGridView(i+2), gridDim);
LevelGUIndex coarseGUIndex(grid_->levelGridView(i+1), gridDim);
typedef typename TruncatedCompressedMGTransfer<CorrectionType>::TransferOperatorType TransferOperatorType;
MatrixCommunicator<LevelGUIndex,
......
......@@ -18,7 +18,7 @@
#include <dune/fufem/functionspacebases/p3nodalbasis.hh>
#include "geodesicfeassembler.hh"
#include <dune/gfe/parallel/globalindex.hh>
#include <dune/grid/utility/globalindexset.hh>
#include <dune/gfe/parallel/globalp2mapper.hh>
/** \brief Riemannian trust-region solver for geodesic finite-element problems */
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment