diff --git a/AMDiS/CMakeLists.txt b/AMDiS/CMakeLists.txt
index d9860f9f8d60762709290967deb3c20cead2fe24..072224bb85c4998c07f276c1a17d18d1df7a616c 100644
--- a/AMDiS/CMakeLists.txt
+++ b/AMDiS/CMakeLists.txt
@@ -220,7 +220,7 @@ if(ENABLE_PARALLEL_DOMAIN)
 
 	list(APPEND COMPILEFLAGS "-DHAVE_PARALLEL_DOMAIN_AMDIS=1")
 	SET(PARALLEL_DOMAIN_AMDIS_SRC
-               	${SOURCE_DIR}/parallel/ParMetisPartitioner.cc
+               	${SOURCE_DIR}/parallel/DofComm.cc
 		${SOURCE_DIR}/parallel/CheckerPartitioner.cc
 		${SOURCE_DIR}/parallel/ElementObjectData.cc
 		${SOURCE_DIR}/parallel/MeshDistributor.cc 
diff --git a/AMDiS/src/parallel/DofComm.cc b/AMDiS/src/parallel/DofComm.cc
new file mode 100644
index 0000000000000000000000000000000000000000..28a5109140ac7480d902a56d0e6e69ec038f661f
--- /dev/null
+++ b/AMDiS/src/parallel/DofComm.cc
@@ -0,0 +1,60 @@
+//
+// Software License for AMDiS
+//
+// Copyright (c) 2010 Dresden University of Technology 
+// All rights reserved.
+// Authors: Simon Vey, Thomas Witkowski et al.
+//
+// This file is part of AMDiS
+//
+// See also license.opensource.txt in the distribution.
+
+
+#include "DofComm.h"
+
+namespace AMDiS {
+
+  using namespace std;
+
+  void DofComm::removeEmpty()
+  {
+    for (DataIter dit = data.begin(); dit != data.end(); ++dit) {
+      FeMapIter it = dit->second.begin();
+      while (it != dit->second.end()) {
+	if (it->second.size() == 0) {
+	  const FiniteElemSpace *fe = it->first;
+	  ++it;
+	  dit->second.erase(fe);
+	} else
+	  ++it;
+      }
+    }
+  }
+
+  
+  void DofComm::Iterator::setNextFeMap()
+  {
+    if (dataIter != dofComm.data.end()) {
+      feMapIter = dataIter->second.begin();
+      
+      if (traverseFeSpace != NULL) {
+	TEST_EXIT_DBG(dataIter->second.count(traverseFeSpace))
+	  ("Should not happen!\n");
+	
+	while (feMapIter->first != traverseFeSpace &&
+	       feMapIter != dataIter->second.end())
+	  ++feMapIter;
+	
+	TEST_EXIT_DBG(feMapIter != dataIter->second.end() &&
+		      feMapIter->first == traverseFeSpace)
+	  ("Should not happen!\n");
+      }
+      
+      if (feMapIter != dataIter->second.end())
+	dofIter = feMapIter->second.begin();
+      
+      dofCounter = 0;
+    }
+  }
+
+}
diff --git a/AMDiS/src/parallel/DofComm.h b/AMDiS/src/parallel/DofComm.h
new file mode 100644
index 0000000000000000000000000000000000000000..8543beabc6fc76af0895faacc4c80b319c29cac8
--- /dev/null
+++ b/AMDiS/src/parallel/DofComm.h
@@ -0,0 +1,182 @@
+// ============================================================================
+// ==                                                                        ==
+// == AMDiS - Adaptive multidimensional simulations                          ==
+// ==                                                                        ==
+// ==  http://www.amdis-fem.org                                              ==
+// ==                                                                        ==
+// ============================================================================
+//
+// Software License for AMDiS
+//
+// Copyright (c) 2010 Dresden University of Technology 
+// All rights reserved.
+// Authors: Simon Vey, Thomas Witkowski et al.
+//
+// This file is part of AMDiS
+//
+// See also license.opensource.txt in the distribution.
+
+
+
+/** \file DofComm.h */
+
+#ifndef AMDIS_DOF_COMM_H
+#define AMDIS_DOF_COMM_H
+
+#include <map>
+#include "FiniteElemSpace.h"
+#include "Global.h"
+
+namespace AMDiS {
+
+  using namespace std;
+
+  class DofComm
+  {
+  public:
+    DofComm() {}
+    
+    typedef map<const FiniteElemSpace*, DofContainer> FeMapType;
+    typedef FeMapType::iterator FeMapIter;
+    typedef map<int, FeMapType> DataType;
+    typedef DataType::iterator DataIter;
+
+    DofContainer& getDofCont(int rank, const FiniteElemSpace *feSpace)
+    {
+      return data[rank][feSpace];
+    }
+
+    void removeEmpty();
+
+    void clear()
+    {
+      data.clear();
+    }
+
+    DataType& getData()
+    {
+      return data;
+    }
+
+  protected:
+    DataType data;
+
+    friend class Iterator;
+
+  public:
+    class Iterator
+    {
+    public:
+      Iterator(DofComm &dc,
+	       const FiniteElemSpace *fe = NULL)
+	: dofComm(dc),
+	  dofCounter(-1),
+	  traverseFeSpace(fe)
+      {
+	FUNCNAME("DofComm::Iterator::Iterator()");
+
+	dataIter = dofComm.data.begin();
+
+	setNextFeMap();
+      }
+
+      inline bool end()
+      {
+	return (dataIter == dofComm.data.end());
+      }
+      
+      inline void nextRank()
+      {
+	++dataIter;
+
+	setNextFeMap();
+      }
+
+      inline void nextFeSpace()
+      {
+	++feMapIter;
+      }
+
+      inline void beginDofIter(const FiniteElemSpace *fe = NULL)
+      {
+	FUNCNAME("DofComm::Iterator::beginDofIter()");
+
+	if (fe != NULL) {
+	  feMapIter = dataIter->second.begin();
+
+	  while (feMapIter->first != fe &&
+		 feMapIter != dataIter->second.end())
+	    ++feMapIter;
+	}
+
+	TEST_EXIT_DBG(feMapIter != dataIter->second.end())
+	  ("Should not happen!\n");
+
+	dofIter = feMapIter->second.begin();
+	dofCounter = 0;
+      }
+
+      inline bool endDofIter()
+      {
+	return (dofIter == feMapIter->second.end());
+      }
+      
+      inline void nextDof()
+      {
+	++dofIter;
+	++dofCounter;
+      }
+
+      inline int getRank()
+      {
+	return dataIter->first;
+      }
+
+      inline const FiniteElemSpace* getFeSpace()
+      {
+	return feMapIter->first;
+      }
+
+      inline DofContainer& getDofs()
+      {
+	return feMapIter->second;
+      }
+
+      inline const DegreeOfFreedom* getDof()
+      {
+	return *dofIter;
+      }
+
+      inline DegreeOfFreedom getDofIndex()
+      {
+	return **dofIter;
+      }
+
+      inline int getDofCounter()
+      {
+	return dofCounter;
+      }
+
+    protected:
+      void setNextFeMap();
+
+    protected:
+      DofComm &dofComm;
+      
+      DofComm::DataIter dataIter;
+      
+      DofComm::FeMapIter feMapIter;
+
+      DofContainer::iterator dofIter;
+
+      int dofCounter;
+
+      const FiniteElemSpace *traverseFeSpace;
+    };
+
+
+  };
+
+}
+
+#endif // AMDIS_DOF_COMM_H
diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc
index db3769d5ac32bba92545fa7c6e900919c4fa1948..7206dc5cd2ed18a68059b236b194c2430497d1ca 100644
--- a/AMDiS/src/parallel/MeshDistributor.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -28,6 +28,7 @@
 #include "parallel/SimplePartitioner.h"
 #include "parallel/CheckerPartitioner.h"
 #include "parallel/MpiHelper.h"
+#include "parallel/DofComm.h"
 #include "io/ElementFileWriter.h"
 #include "io/MacroInfo.h"
 #include "io/VtkWriter.h"
@@ -522,44 +523,34 @@ namespace AMDiS {
   {
     FUNCNAME("MeshDistributor::synchVector()");
 
-    int nComponents = vec.getSize();
     StdMpi<vector<double> > stdMpi(mpiComm);
 
-    typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
-
-    for (it_type sendIt = sendDofs.begin(); sendIt != sendDofs.end(); ++sendIt) {
+    for (DofComm::Iterator it(sendDofs); !it.end(); it.nextRank()) {
       vector<double> dofs;
 
-      for (int i = 0; i < nComponents; i++) {
-	TEST_EXIT_DBG(sendIt->second.count(vec.getFeSpace(i)))
-	  ("Should not happen!\n");
-
-	DofContainer &feDofs = sendIt->second[vec.getFeSpace(i)];
-	DOFVector<double>& dofVec = *(vec.getDOFVector(i));
+      for (int i = 0; i < vec.getSize(); i++) {
+	DOFVector<double> &dofVec = *(vec.getDOFVector(i));
 
-	int nFeDofs = feDofs.size();
-	for (int j = 0; j < nFeDofs; j++) 
-	  dofs.push_back(dofVec[*(feDofs[j])]);
-      }	
+	for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof())
+	  dofs.push_back(dofVec[it.getDofIndex()]);
+      }
 
-      stdMpi.send(sendIt->first, dofs);
+      stdMpi.send(it.getRank(), dofs);
     }
-
-    for (it_type recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt)
-      stdMpi.recv(recvIt->first);
+	   
+    for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank())
+      stdMpi.recv(it.getRank());
 
     stdMpi.startCommunication();
 
-    for (it_type recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) {
-
+    for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank()) {
       int counter = 0;
-      for (int i = 0; i < nComponents; i++) {
-	DofContainer &feDofs = recvIt->second[vec.getFeSpace(i)];
-	DOFVector<double>& dofVec = *(vec.getDOFVector(i));
-	int nFeDofs = feDofs.size();
-	
-	for (int j = 0; j < nFeDofs; j++)
-	  dofVec[*(feDofs[j])] = stdMpi.getRecvData(recvIt->first)[counter++];
+
+      for (int i = 0; i < vec.getSize(); i++) {
+	DOFVector<double> &dofVec = *(vec.getDOFVector(i));
+
+	for (it.beginDofIter(vec.getFeSpace(i)); !it.endDofIter(); it.nextDof())
+	  dofVec[it.getDofIndex()] = stdMpi.getRecvData(it.getRank())[counter++];
       }
     }
   }
@@ -685,15 +676,11 @@ namespace AMDiS {
     FUNCNAME("MeshDistributor::getAllBoundaryDofs()");
 
     DofContainerSet dofSet;
+    for (DofComm::Iterator it(sendDofs, feSpace); !it.end(); it.nextRank())
+      dofSet.insert(it.getDofs().begin(), it.getDofs().end());
+    for (DofComm::Iterator it(recvDofs, feSpace); !it.end(); it.nextRank())
+      dofSet.insert(it.getDofs().begin(), it.getDofs().end());
 
-    typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
-
-    for (it_type it = sendDofs.begin(); it != sendDofs.end(); ++it)
-      dofSet.insert(it->second[feSpace].begin(), it->second[feSpace].end());
-
-    for (it_type it = recvDofs.begin(); it != recvDofs.end(); ++it)
-      dofSet.insert(it->second[feSpace].begin(), it->second[feSpace].end());
-    
     dofs.clear();
     dofs.insert(dofs.begin(), dofSet.begin(), dofSet.end());
   }
@@ -993,17 +980,13 @@ namespace AMDiS {
 
     boundaryDofs.clear();
 
-    typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
-
-    for (it_type it = sendDofs.begin(); it != sendDofs.end(); ++it)
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].begin(); ++dofIt)
-	boundaryDofs.insert(**dofIt);
+    for (DofComm::Iterator it(sendDofs, feSpace); !it.end(); it.nextRank())
+      for (; !it.endDofIter(); it.nextDof())
+	boundaryDofs.insert(it.getDofIndex());
 
-    for (it_type it = recvDofs.begin(); it != recvDofs.end(); ++it)
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].begin(); ++dofIt)
-	boundaryDofs.insert(**dofIt);
+    for (DofComm::Iterator it(recvDofs, feSpace); !it.end(); it.nextRank())
+      for (; !it.endDofIter(); it.nextDof())
+	boundaryDofs.insert(it.getDofIndex());
   }
 
 
@@ -1820,7 +1803,7 @@ namespace AMDiS {
 	    DofContainer dofs;
 	    it->rankObj.el->getAllDofs(feSpace, it->rankObj, dofs);
 
-	    DofContainer& tmp = sendDofs[it.getRank()][feSpace];
+	    DofContainer& tmp = sendDofs.getDofCont(it.getRank(), feSpace);
 	    tmp.insert(tmp.end(), dofs.begin(), dofs.end());
 
 	    if (createBoundaryDofFlag.isSet(BOUNDARY_FILL_INFO_SEND_DOFS))
@@ -1836,7 +1819,7 @@ namespace AMDiS {
 	    DofContainer dofs;
 	    it->rankObj.el->getAllDofs(feSpace, it->rankObj, dofs);
 
-	    DofContainer& tmp = recvDofs[it.getRank()][feSpace];
+	    DofContainer& tmp = recvDofs.getDofCont(it.getRank(), feSpace);
 	    tmp.insert(tmp.end(), dofs.begin(), dofs.end());
 
 	    if (createBoundaryDofFlag.isSet(BOUNDARY_FILL_INFO_RECV_DOFS))
@@ -1847,41 +1830,18 @@ namespace AMDiS {
     } else {
       for (InteriorBoundary::iterator it(myIntBoundary); !it.end(); ++it)
 	it->rankObj.el->getAllDofs(feSpace, it->rankObj, 
-				   sendDofs[it.getRank()][feSpace]);
+				   sendDofs.getDofCont(it.getRank(), feSpace));
       
       for (InteriorBoundary::iterator it(otherIntBoundary); !it.end(); ++it)
 	it->rankObj.el->getAllDofs(feSpace, it->rankObj, 
-				   recvDofs[it.getRank()][feSpace]);
+				   recvDofs.getDofCont(it.getRank(), feSpace));
     }
 
 
     // === Delete all empty DOF send and recv positions ===
 
-    typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
-    for (it_type sendIt = sendDofs.begin(); sendIt != sendDofs.end(); ++sendIt) {
-      map<const FiniteElemSpace*, DofContainer>::iterator it = 
-	sendIt->second.begin();      
-      while (it != sendIt->second.end()) {
-	if (it->second.size() == 0) {
-	  const FiniteElemSpace* fe = it->first;
-	  ++it;
-	  sendIt->second.erase(fe);
-	} else
-	  ++it;
-      }
-    }
-    for (it_type recvIt = recvDofs.begin(); recvIt != recvDofs.end(); ++recvIt) {
-      map<const FiniteElemSpace*, DofContainer>::iterator it = 
-	recvIt->second.begin();      
-      while (it != recvIt->second.end()) {
-	if (it->second.size() == 0) {
-	  const FiniteElemSpace* fe = it->first;
-	  ++it;
-	  recvIt->second.erase(fe);
-	} else
-	  ++it;
-      }
-    }
+    sendDofs.removeEmpty();
+    recvDofs.removeEmpty();
   }
 
 
@@ -1964,18 +1924,14 @@ namespace AMDiS {
 
     // All DOFs that must be received are DOFs not owned by rank and have 
     // therefore to be removed from the set 'rankDofs'.
-    for (map<int, map<const FiniteElemSpace*, DofContainer> >::iterator recvIt = recvDofs.begin();
-	 recvIt != recvDofs.end(); ++recvIt) {
-      DofContainer &rDofs = recvIt->second[feSpace];
-      for (DofContainer::iterator dofIt = rDofs.begin();
-	   dofIt != rDofs.end(); ++dofIt) {
+    for (DofComm::Iterator it(recvDofs, feSpace); !it.end(); it.nextRank()) {
+      for (; !it.endDofIter(); it.nextDof()) {
 	DofContainer::iterator eraseIt = 
-	  find(rankDofs.begin(), rankDofs.end(), *dofIt);
-	if (eraseIt != rankDofs.end()) 
+	  find(rankDofs.begin(), rankDofs.end(), it.getDof());
+	if (eraseIt != rankDofs.end())
 	  rankDofs.erase(eraseIt);
       }
     }
-
 	
     // Get displacment for global rank DOF ordering and global DOF number.
     dofFeData[feSpace].nRankDofs = rankDofs.size();
@@ -1994,26 +1950,25 @@ namespace AMDiS {
     // === Send and receive new DOF indices. ===
 
 #if (DEBUG != 0)
-    ParallelDebug::testDofContainerCommunication(*this, sendDofs, recvDofs);
+    ParallelDebug::testDofContainerCommunication(*this, 
+						 sendDofs.getData(), 
+						 recvDofs.getData());
 #endif
     
     StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiComm);
-    for (map<int, map<const FiniteElemSpace*, DofContainer> >::iterator sendIt = sendDofs.begin();
-	 sendIt != sendDofs.end(); ++sendIt) {
-      DofContainer &sDofs = sendIt->second[feSpace];
-
-      stdMpi.getSendData(sendIt->first).resize(0);
-      stdMpi.getSendData(sendIt->first).reserve(sDofs.size());
-      for (DofContainer::iterator dofIt = sDofs.begin();
-	   dofIt != sDofs.end(); ++dofIt)
-	stdMpi.getSendData(sendIt->first).push_back(rankDofsNewGlobalIndex[*dofIt]);
+    for (DofComm::Iterator it(sendDofs, feSpace); !it.end(); it.nextRank()) {
+      stdMpi.getSendData(it.getRank()).resize(0);
+      stdMpi.getSendData(it.getRank()).reserve(it.getDofs().size());
+
+      for (; !it.endDofIter(); it.nextDof())
+	stdMpi.getSendData(it.getRank()).
+	  push_back(rankDofsNewGlobalIndex[it.getDof()]);
     }
 
     stdMpi.updateSendDataSize();
 
-    for (map<int, map<const FiniteElemSpace*, DofContainer> >::iterator recvIt = recvDofs.begin();
-	 recvIt != recvDofs.end(); ++recvIt)
-      stdMpi.recv(recvIt->first);
+    for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank())
+      stdMpi.recv(it.getRank());
 
     stdMpi.startCommunication();
 
@@ -2024,15 +1979,11 @@ namespace AMDiS {
     for (int i = 0; i < nRankAllDofs; i++)
       dofFeData[feSpace].isRankDof[i] = true;    
 
-
-    for (map<int, map<const FiniteElemSpace*, DofContainer> >::iterator recvIt = recvDofs.begin();
-	 recvIt != recvDofs.end(); ++recvIt) {
-      DofContainer &rDofs = recvIt->second[feSpace];
-      int i = 0;
-      for (DofContainer::iterator dofIt = rDofs.begin();
-	   dofIt != rDofs.end(); ++dofIt) {
-	rankDofsNewGlobalIndex[*dofIt] = stdMpi.getRecvData(recvIt->first)[i++];
-	dofFeData[feSpace].isRankDof[**dofIt] = false;
+    for (DofComm::Iterator it(recvDofs, feSpace); !it.end(); it.nextRank()) {
+      for (; !it.endDofIter(); it.nextDof()) {
+	rankDofsNewGlobalIndex[it.getDof()] = 
+	  stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
+	dofFeData[feSpace].isRankDof[it.getDofIndex()] = false;
       }
     }
 
@@ -2296,8 +2247,8 @@ namespace AMDiS {
     otherIntBoundary.serialize(out);
     periodicBoundary.serialize(out);
 
-    serialize(out, sendDofs);
-    serialize(out, recvDofs);
+    serialize(out, sendDofs.getData());
+    serialize(out, recvDofs.getData());
 
     // === Serialieze FE space dependent data ===
 
@@ -2368,8 +2319,8 @@ namespace AMDiS {
     otherIntBoundary.deserialize(in, elIndexMap);
     periodicBoundary.deserialize(in, elIndexMap);
 
-    deserialize(in, sendDofs, dofMap);
-    deserialize(in, recvDofs, dofMap);
+    deserialize(in, sendDofs.getData(), dofMap);
+    deserialize(in, recvDofs.getData(), dofMap);
 
     // === Deerialieze FE space dependent data ===
     
diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h
index 154e4db667ed3337e0d539f67204b823ca02c4fa..57806d240af54fc5f381404978594cdaedc5da1a 100644
--- a/AMDiS/src/parallel/MeshDistributor.h
+++ b/AMDiS/src/parallel/MeshDistributor.h
@@ -25,6 +25,7 @@
 
 
 #include <mpi.h>
+#include "parallel/DofComm.h"
 #include "parallel/ElementObjectData.h"
 #include "parallel/ParallelTypes.h"
 #include "parallel/MeshPartitioner.h"
@@ -74,7 +75,8 @@ namespace AMDiS {
     /// Maps local dof indices to real dof indices.
     DofMapping mapLocalDofIndex;  
   };
-  
+
+
   class MeshDistributor
   {
   private:
@@ -275,12 +277,12 @@ namespace AMDiS {
       return (periodicDof[type].count(globalDofIndex) > 0);
     }
 
-    map<int, map<const FiniteElemSpace*, DofContainer> >& getSendDofs()
+    DofComm& getSendDofs()
     {
       return sendDofs;
     }
 
-    map<int, map<const FiniteElemSpace*, DofContainer> >& getRecvDofs()
+    DofComm& getRecvDofs()
     {
       return recvDofs;
     }
@@ -348,30 +350,25 @@ namespace AMDiS {
 
       const FiniteElemSpace *fe = vec.getFeSpace();
 
-      typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
-
-      for (it_type sendIt = sendDofs.begin(); 
-	   sendIt != sendDofs.end(); ++sendIt) {
+      for (DofComm::Iterator it(sendDofs, fe); !it.end(); it.nextRank()) {
 	vector<T> dofs;
-	int nSendDofs = sendIt->second[fe].size();
-	dofs.reserve(nSendDofs);
+	dofs.reserve(it.getDofs().size());
 	
-	for (int i = 0; i < nSendDofs; i++)
-	  dofs.push_back(vec[*((sendIt->second[fe])[i])]);
+	for (; !it.endDofIter(); it.nextDof())
+	  dofs.push_back(vec[it.getDofIndex()]);
 	
-	stdMpi.send(sendIt->first, dofs);
+	stdMpi.send(it.getRank(), dofs);
       }
-      
-      for (it_type recvIt = recvDofs.begin();
-	   recvIt != recvDofs.end(); ++recvIt)
-	stdMpi.recv(recvIt->first, recvIt->second[fe].size());
-      
+	     
+      for (DofComm::Iterator it(recvDofs); !it.end(); it.nextRank())
+        stdMpi.recv(it.getRank());
+	     
       stdMpi.startCommunication();
-      
-      for (it_type recvIt = recvDofs.begin();
-	   recvIt != recvDofs.end(); ++recvIt)
-	for (unsigned int i = 0; i < recvIt->second.size(); i++)
-	  vec[*(recvIt->second[fe])[i]] = stdMpi.getRecvData(recvIt->first)[i];
+
+      for (DofComm::Iterator it(recvDofs, fe); !it.end(); it.nextRank())
+	for (; !it.endDofIter(); it.nextDof())
+	  vec[it.getDofIndex()] = 
+	     stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
     }
     
     /** \brief
@@ -633,16 +630,14 @@ namespace AMDiS {
      * This map contains for each rank the list of DOFs the current rank must 
      * send to exchange solution DOFs at the interior boundaries.
      */
-    //    map<FiniteElemSpace, RankToDofContainer> sendDofs;
-    map<int, map<const FiniteElemSpace*, DofContainer> > sendDofs;
+    DofComm sendDofs;
 
     /** \brief
      * This map contains on each rank the list of DOFs from which the current 
      * rank will receive DOF values (i.e., this are all DOFs at an interior 
      * boundary). The DOF indices are given in rank's local numbering.
      */
-    //    map<FiniteElemSpace, RankToDofContainer> recvDofs;
-    map<int, map<const FiniteElemSpace*, DofContainer> > recvDofs;
+    DofComm recvDofs;
 
     /** \brief
      * If periodic boundaries are used, this map stores, for each periodic 
diff --git a/AMDiS/src/parallel/ParallelDebug.cc b/AMDiS/src/parallel/ParallelDebug.cc
index 7eeb7b84da1c492c87881754eec3001cc7c8841e..f61f32de9c166d55036c01abccdc63f369b4c495 100644
--- a/AMDiS/src/parallel/ParallelDebug.cc
+++ b/AMDiS/src/parallel/ParallelDebug.cc
@@ -349,16 +349,13 @@ namespace AMDiS {
     DOFVector<WorldVector<double> > coords(feSpace, "dofCorrds");
     pdb.mesh->getDofIndexCoords(feSpace, coords);
 
-    typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
-    for (it_type it = pdb.sendDofs.begin(); it != pdb.sendDofs.end(); ++it)
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin(); 
-	   dofIt != it->second[feSpace].end(); ++dofIt)
-	sendCoords[it->first].push_back(coords[**dofIt]);
+    for (DofComm::Iterator it(pdb.sendDofs, feSpace); !it.end(); it.nextRank())
+      for (; !it.endDofIter(); it.nextDof())
+	sendCoords[it.getRank()].push_back(coords[it.getDofIndex()]);
 
-    for (it_type it = pdb.recvDofs.begin(); it != pdb.recvDofs.end(); ++it)
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].end(); ++dofIt)
-	recvCoords[it->first].push_back(coords[**dofIt]);
+    for (DofComm::Iterator it(pdb.recvDofs, feSpace); !it.end(); it.nextRank())
+      for (; !it.endDofIter(); it.nextDof())
+	recvCoords[it.getRank()].push_back(coords[it.getDofIndex()]);
 
     vector<int> sendSize(pdb.mpiSize, 0);
     vector<int> recvSize(pdb.mpiSize, 0);
@@ -446,7 +443,8 @@ namespace AMDiS {
 	    oss << ")";
 	    MSG("%s\n", oss.str().c_str());
 	    
-	    debug::printInfoByDof(feSpace, *(pdb.recvDofs[it->first][feSpace][i]));
+	    debug::printInfoByDof(feSpace, 
+				  *(pdb.recvDofs.getDofCont(it->first, feSpace)[i]));
 	  }
 	  ERROR("Wrong DOFs in rank %d!\n", pdb.mpiRank);
 	  foundError = 1;
@@ -479,17 +477,16 @@ namespace AMDiS {
 	pdb.dofFeData[feSpace].mapLocalGlobalDofs[it.getDOFIndex()];    
 
     StdMpi<CoordsIndexMap> stdMpi(pdb.mpiComm, true);
-    typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
-    for (it_type it = pdb.sendDofs.begin(); it != pdb.sendDofs.end(); ++it)
-      stdMpi.send(it->first, coordsToIndex);
-    for (it_type it = pdb.recvDofs.begin(); it != pdb.recvDofs.end(); ++it)
-      stdMpi.recv(it->first);
+    for (DofComm::Iterator it(pdb.sendDofs, feSpace); !it.end(); it.nextRank())
+      stdMpi.send(it.getRank(), coordsToIndex);
+    for (DofComm::Iterator it(pdb.recvDofs, feSpace); !it.end(); it.nextRank())
+      stdMpi.recv(it.getRank());
    
     stdMpi.startCommunication();
 
     int foundError = 0;
-    for (it_type it = pdb.recvDofs.begin(); it != pdb.recvDofs.end(); ++it) {
-      CoordsIndexMap& otherCoords = stdMpi.getRecvData(it->first);
+    for (DofComm::Iterator it(pdb.recvDofs, feSpace); !it.end(); it.nextRank()) {
+      CoordsIndexMap& otherCoords = stdMpi.getRecvData(it.getRank());
 
       for (CoordsIndexMap::iterator coordsIt = otherCoords.begin();
 	   coordsIt != otherCoords.end(); ++coordsIt) {
@@ -503,7 +500,7 @@ namespace AMDiS {
 	  oss << " do not fit together on rank " 
 	      << pdb.getMpiRank() << " (global index: " 
 	      << coordsToIndex[coordsIt->first] << " and on rank "
-	      << it->first << " (global index: " << coordsIt->second << ")";
+	      << it.getRank() << " (global index: " << coordsIt->second << ")";
 
 	  MSG("[DBG] %s\n", oss.str().c_str());
 	  foundError = 1;
@@ -648,20 +645,15 @@ namespace AMDiS {
 	pdb.mesh->getDofIndexCoords(it->first, feSpace, coords);
 	coords.print();
 
-	typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
-	for (it_type rankit = pdb.sendDofs.begin(); rankit != pdb.sendDofs.end(); ++rankit) {
-	  for (DofContainer::iterator dofit = rankit->second[feSpace].begin();
-	       dofit != rankit->second[feSpace].end(); ++dofit)
-	    if (**dofit == it->first)
-	      cout << "SEND DOF TO " << rankit->first << endl;	  
-	}
-
-	for (it_type rankit = pdb.recvDofs.begin(); rankit != pdb.recvDofs.end(); ++rankit) {
-	  for (DofContainer::iterator dofit = rankit->second[feSpace].begin();
-	       dofit != rankit->second[feSpace].end(); ++dofit)
-	    if (**dofit == it->first)
-	      cout << "RECV DOF FROM " << rankit->first << endl;	  
-	}
+	for (DofComm::Iterator rit(pdb.sendDofs, feSpace); !rit.end(); rit.nextRank())
+	  for (; !rit.endDofIter(); rit.nextDof())
+	    if (it->first == rit.getDofIndex())
+	      cout << "SEND DOF TO " << rit.getRank() << endl;
+	
+	for (DofComm::Iterator rit(pdb.recvDofs, feSpace); !rit.end(); rit.nextRank())
+	  for (; !rit.endDofIter(); rit.nextDof())
+	    if (it->first == rit.getDofIndex())
+	      cout << "RECV DOF FROM " << rit.getRank() << endl;
 
 	cout << "------" << endl;
       }
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index 1a58c810da13614a1af2f5308e1dc1de5ce47c8a..1ef21bb1e0fd8df01f3494dabf1ad025f946e89e 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -283,38 +283,39 @@ namespace AMDiS {
     typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
 
     StdMpi<vector<int> > stdMpi(meshDistributor->getMpiComm());
-    for (it_type it = meshDistributor->getSendDofs().begin();
-	 it != meshDistributor->getSendDofs().end(); ++it)
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].end(); ++dofIt)
-	if (globalPrimalIndex.count(**dofIt))
-	  stdMpi.getSendData(it->first).push_back(globalPrimalIndex[**dofIt]);
+    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
+	 !it.end(); it.nextRank())
+      for (; !it.endDofIter(); it.nextDof())
+	if (globalPrimalIndex.count(it.getDofIndex()))
+	  stdMpi.getSendData(it.getRank()).push_back(globalPrimalIndex[it.getDofIndex()]);
+
     stdMpi.updateSendDataSize();
 
-    for (it_type it = meshDistributor->getRecvDofs().begin();
-	 it != meshDistributor->getRecvDofs().end(); ++it) {
+    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
+	 !it.end(); it.nextRank()) {
       bool recvFromRank = false;
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].end(); ++dofIt)
-	if (primals.count(**dofIt) && 
-	    meshDistributor->getIsRankDof(feSpace, **dofIt) == false) {
+      for (; !it.endDofIter(); it.nextDof()) {
+	if (primals.count(it.getDofIndex()) && 
+	    meshDistributor->getIsRankDof(feSpace, it.getDofIndex()) == false) {
 	  recvFromRank = true;
 	  break;
 	}
+      }
 
       if (recvFromRank) 
-	stdMpi.recv(it->first);
+	stdMpi.recv(it.getRank());
     }
+
     stdMpi.startCommunication();
 
-    for (it_type it = meshDistributor->getRecvDofs().begin();
-	 it != meshDistributor->getRecvDofs().end(); ++it) {
+    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
+	 !it.end(); it.nextRank()) {
       int i = 0;
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].end(); ++dofIt) {
-	if (primals.count(**dofIt) && 
-	    meshDistributor->getIsRankDof(feSpace, **dofIt) == false)
-	  globalPrimalIndex[**dofIt] = stdMpi.getRecvData(it->first)[i++];
+      for (; !it.endDofIter(); it.nextDof()) {
+	if (primals.count(it.getDofIndex()) && 
+	    meshDistributor->getIsRankDof(feSpace, it.getDofIndex()) == false)
+	  globalPrimalIndex[it.getDofIndex()] = 
+	    stdMpi.getRecvData(it.getRank())[i++];
       }
     }
 
@@ -335,59 +336,55 @@ namespace AMDiS {
 
     boundaryDofRanks.clear();
 
-    typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
-
-    for (it_type it = meshDistributor->getSendDofs().begin();
-	 it != meshDistributor->getSendDofs().end(); ++it) {
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].end(); ++dofIt) {
+    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace); 
+	 !it.end(); it.nextRank())
+      for (; !it.endDofIter(); it.nextDof()) {
 	// If DOF is not primal, i.e., its a dual node
-	if (globalPrimalIndex.count(**dofIt) == 0) {
-	  boundaryDofRanks[**dofIt].insert(mpiRank);
-	  boundaryDofRanks[**dofIt].insert(it->first);
+	if (globalPrimalIndex.count(it.getDofIndex()) == 0) {
+	  boundaryDofRanks[it.getDofIndex()].insert(mpiRank);
+	  boundaryDofRanks[it.getDofIndex()].insert(it.getRank());
 	}
       }
-    }
-
+	
 
     // === Communicate these sets for all rank owned dual nodes to other ===
     // === ranks that also have this node.                               ===
 
     StdMpi<vector<std::set<int> > > stdMpi(meshDistributor->getMpiComm());
-    for (it_type it = meshDistributor->getSendDofs().begin();
-	 it != meshDistributor->getSendDofs().end(); ++it)
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].end(); ++dofIt)
-	if (globalPrimalIndex.count(**dofIt) == 0)
-	  stdMpi.getSendData(it->first).push_back(boundaryDofRanks[**dofIt]);
+
+    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
+	 !it.end(); it.nextRank())
+      for (; !it.endDofIter(); it.nextDof())
+	if (globalPrimalIndex.count(it.getDofIndex()) == 0)
+	  stdMpi.getSendData(it.getRank()).push_back(boundaryDofRanks[it.getDofIndex()]);
 
     stdMpi.updateSendDataSize();
 
-    for (it_type it = meshDistributor->getRecvDofs().begin();
-	 it != meshDistributor->getRecvDofs().end(); ++it) {
+    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace); 
+	 !it.end(); it.nextRank()) {
       bool recvFromRank = false;
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].end(); ++dofIt)
-	if (globalPrimalIndex.count(**dofIt) == 0) {
+      for (; !it.endDofIter(); it.nextDof()) {
+	if (globalPrimalIndex.count(it.getDofIndex()) == 0) {
 	  recvFromRank = true;
 	  break;
 	}
+      }
 
       if (recvFromRank)
-	stdMpi.recv(it->first);
+	stdMpi.recv(it.getRank());
     }
+
     stdMpi.startCommunication();
 
-    for (it_type it = meshDistributor->getRecvDofs().begin();
-	 it != meshDistributor->getRecvDofs().end(); ++it) {
+    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace); 
+	 !it.end(); it.nextRank()) {
       int i = 0;
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].end(); ++dofIt)	
-	if (globalPrimalIndex.count(**dofIt) == 0)
-	  boundaryDofRanks[**dofIt] = stdMpi.getRecvData(it->first)[i++];
+      for (; !it.endDofIter(); it.nextDof())
+	if (globalPrimalIndex.count(it.getDofIndex()) == 0)
+	  boundaryDofRanks[it.getDofIndex()] = 
+	    stdMpi.getRecvData(it.getRank())[i++];
     }
 
-
     // === Create global index of the dual nodes on each rank. ===
 
     duals.clear();
@@ -460,43 +457,41 @@ namespace AMDiS {
     // === Communicate dofFirstLagrange to all other ranks. ===
 
     StdMpi<vector<int> > stdMpi(meshDistributor->getMpiComm());
-    typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
 
-    for (it_type it = meshDistributor->getSendDofs().begin();
-	 it != meshDistributor->getSendDofs().end(); ++it)
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].end(); ++dofIt) {
-	if (globalPrimalIndex.count(**dofIt) == 0) {
-	  TEST_EXIT_DBG(dofFirstLagrange.count(**dofIt))("Should not happen!\n");
-	  stdMpi.getSendData(it->first).push_back(dofFirstLagrange[**dofIt]);
+    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
+	 !it.end(); it.nextRank())
+      for (; !it.endDofIter(); it.nextDof())
+	if (globalPrimalIndex.count(it.getDofIndex()) == 0) {
+	  TEST_EXIT_DBG(dofFirstLagrange.count(it.getDofIndex()))
+	    ("Should not happen!\n");
+	  stdMpi.getSendData(it.getRank()).push_back(dofFirstLagrange[it.getDofIndex()]);
 	}
-      }
+
     stdMpi.updateSendDataSize();
 
-    for (it_type it = meshDistributor->getRecvDofs().begin();
-	 it != meshDistributor->getRecvDofs().end(); ++it) {
+    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
+	 !it.end(); it.nextRank()) {
       bool recvData = false;
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].end(); ++dofIt)
-	if (globalPrimalIndex.count(**dofIt) == 0) {
+      for (; !it.endDofIter(); it.nextDof())
+	if (globalPrimalIndex.count(it.getDofIndex()) == 0) {
 	  recvData = true;
 	  break;
 	}
 	  
       if (recvData)
-	stdMpi.recv(it->first);
+	stdMpi.recv(it.getRank());
     }
 
     stdMpi.startCommunication();
 
-    for (it_type it = meshDistributor->getRecvDofs().begin();
-	 it != meshDistributor->getRecvDofs().end(); ++it) {
+    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
+	 !it.end(); it.nextRank()) {
       int counter = 0;
-      DofContainer &dc = it->second[feSpace];
-      for (unsigned int i = 0; i < dc.size(); i++)
-	if (globalPrimalIndex.count(*(dc[i])) == 0)
-	  dofFirstLagrange[*(dc[i])] = stdMpi.getRecvData(it->first)[counter++];
-    }     
+      for (; !it.endDofIter(); it.nextDof())
+	if (globalPrimalIndex.count(it.getDofIndex()) == 0)
+	  dofFirstLagrange[it.getDofIndex()] = 
+	    stdMpi.getRecvData(it.getRank())[counter++];
+    }
   }
 
 
diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
index 8f49a34ddc86bd42ea9dd2b686199e60d9a11846..89264924ddd22f1b3267643c66f1cd126966f006 100644
--- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
+++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc
@@ -494,23 +494,21 @@ namespace AMDiS {
     map<DegreeOfFreedom, int> sendDofToRank;
 
 
-    // First, create for all ranks we send data to MatrixNnzEntry object with 0 entries.
-    typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
-    for (it_type it = meshDistributor->getRecvDofs().begin();
-	 it != meshDistributor->getRecvDofs().end(); ++it) {
-      sendMatrixEntry[it->first].resize(0);
-
-      for (DofContainer::iterator dofIt = it->second[feSpace].begin();
-	   dofIt != it->second[feSpace].end(); ++dofIt)
-	sendDofToRank[**dofIt] = it->first;
+    // First, create for all ranks we send data to MatrixNnzEntry object 
+    // with 0 entries.
+    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
+	 !it.end(); it.nextRank()) {
+      sendMatrixEntry[it.getRank()].resize(0); 
+
+      for (; !it.endDofIter(); it.nextDof())
+	sendDofToRank[it.getDofIndex()] = it.getRank();
     }
 
 
     std::set<int> recvFromRank;
-    for (it_type it = meshDistributor->getSendDofs().begin();
-	 it != meshDistributor->getSendDofs().end(); ++it)
-      recvFromRank.insert(it->first);
-
+    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
+	 !it.end(); it.nextRank())
+      recvFromRank.insert(it.getRank());
 
     for (int i = 0; i < nComponents; i++) {
       for (int j = 0; j < nComponents; j++) {
diff --git a/AMDiS/src/parallel/PetscSolverSchur.cc b/AMDiS/src/parallel/PetscSolverSchur.cc
index c0dd58d34ff558e29ecdfd4b7c184309b606ffa9..b46f365779d9ac2b4d29cdc67fe315378a69cafe 100644
--- a/AMDiS/src/parallel/PetscSolverSchur.cc
+++ b/AMDiS/src/parallel/PetscSolverSchur.cc
@@ -32,19 +32,14 @@ namespace AMDiS {
 
     boundaryDofs.clear();
     std::set<DegreeOfFreedom> boundaryLocalDofs;
-    typedef map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it_type;
-    for (it_type rankIt = meshDistributor->getSendDofs().begin();
-	 rankIt != meshDistributor->getSendDofs().end(); ++rankIt) {
-      for (DofContainer::iterator dofIt = rankIt->second[feSpace].begin(); 
-	   dofIt != rankIt->second[feSpace].end(); ++dofIt) {
-	boundaryLocalDofs.insert(**dofIt);
-	  
-	boundaryDofs.insert(meshDistributor->mapLocalToGlobal(feSpace, **dofIt));
+    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
+	 !it.end(); it.nextRank())
+      for (; !it.endDofIter(); it.nextDof()) {
+	boundaryLocalDofs.insert(it.getDofIndex());	  
+	boundaryDofs.insert(meshDistributor->mapLocalToGlobal(feSpace, it.getDofIndex()));
       }
-    }
-      
-
 
+      
     nBoundaryDofs = boundaryDofs.size();
     mpi::getDofNumbering(mpiComm, nBoundaryDofs, 
 			 rStartBoundaryDofs, nOverallBoundaryDofs);
@@ -98,13 +93,11 @@ namespace AMDiS {
 #endif
 
 
-
     std::set<DegreeOfFreedom> otherBoundaryLocalDofs;
-    for (it_type rankIt = meshDistributor->getRecvDofs().begin();
-	 rankIt != meshDistributor->getRecvDofs().end(); ++rankIt)
-      for (DofContainer::iterator dofIt = rankIt->second[feSpace].begin(); 
-	   dofIt != rankIt->second[feSpace].end(); ++dofIt)
-	otherBoundaryLocalDofs.insert(**dofIt);
+    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
+	 !it.end(); it.nextRank())
+      for (; !it.endDofIter(); it.nextDof())
+	otherBoundaryLocalDofs.insert(it.getDofIndex());
       
     interiorDofs.clear();
     DofIndexToBool& isRankDof = meshDistributor->getIsRankDof(feSpace);
@@ -133,39 +126,39 @@ namespace AMDiS {
 
 
     StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiComm);
-    for (it_type sendIt = meshDistributor->getSendDofs().begin();
-	 sendIt != meshDistributor->getSendDofs().end(); ++sendIt) {
-      stdMpi.getSendData(sendIt->first).resize(0);
-      stdMpi.getSendData(sendIt->first).reserve(sendIt->second.size());
-      for (DofContainer::iterator dofIt = sendIt->second[feSpace].begin();
-	   dofIt != sendIt->second[feSpace].end(); ++dofIt) {
-	int globalSendDof = meshDistributor->mapLocalToGlobal(feSpace, **dofIt);
+    for (DofComm::Iterator it(meshDistributor->getSendDofs(), feSpace);
+	 !it.end(); it.nextRank()) {
+      stdMpi.getSendData(it.getRank()).resize(0);
+      stdMpi.getSendData(it.getRank()).reserve(it.getDofs().size());
+
+      for (; !it.endDofIter(); it.nextDof()) {
+	int globalSendDof = 
+	  meshDistributor->mapLocalToGlobal(feSpace, it.getDofIndex());
 
 	TEST_EXIT_DBG(mapGlobalBoundaryDof.count(globalSendDof))
 	  ("No mapping for boundary DOF %d!\n", globalSendDof);
 
-	stdMpi.getSendData(sendIt->first).push_back(mapGlobalBoundaryDof[globalSendDof]);
+	stdMpi.getSendData(it.getRank()).push_back(mapGlobalBoundaryDof[globalSendDof]);
       }
-    }     
+    }
 
     stdMpi.updateSendDataSize();
-    for (it_type recvIt = meshDistributor->getRecvDofs().begin();
-	 recvIt != meshDistributor->getRecvDofs().end(); ++recvIt)
-      stdMpi.recv(recvIt->first);
 
-    stdMpi.startCommunication();
+    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
+	 !it.end(); it.nextRank())
+      stdMpi.recv(it.getRank());
 
-    for (it_type recvIt = meshDistributor->getRecvDofs().begin();
-	 recvIt != meshDistributor->getRecvDofs().end(); ++recvIt) {
-      int i = 0;
-      for (DofContainer::iterator dofIt = recvIt->second[feSpace].begin();
-	   dofIt != recvIt->second[feSpace].end(); ++dofIt) {
-	int globalRecvDof = meshDistributor->mapLocalToGlobal(feSpace, **dofIt);
-	mapGlobalBoundaryDof[globalRecvDof] = stdMpi.getRecvData(recvIt->first)[i++];
+    stdMpi.startCommunication();
 
+    for (DofComm::Iterator it(meshDistributor->getRecvDofs(), feSpace);
+	 !it.end(); it.nextRank())
+      for (; !it.endDofIter(); it.nextDof()) {
+	int globalRecvDof = 
+	  meshDistributor->mapLocalToGlobal(feSpace, it.getDofIndex());
+	mapGlobalBoundaryDof[globalRecvDof] = 
+	  stdMpi.getRecvData(it.getRank())[it.getDofCounter()];
 	boundaryDofs.insert(globalRecvDof);
       }
-    }
 
 
     // === Create PETSc IS structurs for interior and boundary DOFs. ===