From 6f81ca98142828c9b3b7556575121ccbb1569857 Mon Sep 17 00:00:00 2001
From: Thomas Witkowski <thomas.witkowski@gmx.de>
Date: Wed, 8 Jul 2009 14:35:40 +0000
Subject: [PATCH] Work on pdd, now it mostly works for the stationary case,
 also for higher order element. Just some small bugs :)

---
 AMDiS/src/DataCollector.cc         |   9 +-
 AMDiS/src/ParallelDomainProblem.cc | 307 +++++++++++++++++------------
 AMDiS/src/ParallelDomainProblem.h  |  13 +-
 AMDiS/src/VtkWriter.hh             |  15 +-
 4 files changed, 204 insertions(+), 140 deletions(-)

diff --git a/AMDiS/src/DataCollector.cc b/AMDiS/src/DataCollector.cc
index 733964f8..c158402f 100644
--- a/AMDiS/src/DataCollector.cc
+++ b/AMDiS/src/DataCollector.cc
@@ -236,11 +236,10 @@ namespace AMDiS {
       
       // get dof index of this vertex
       vertexDOF = dof[i][nPreDofs];
-     
+
       // search for coords at this dof
       std::list<VertexInfo>::iterator it =
-	  find((*vertexInfos)[vertexDOF].begin(),
-	       (*vertexInfos)[vertexDOF].end(),
+	  find((*vertexInfos)[vertexDOF].begin(), (*vertexInfos)[vertexDOF].end(),
 	       vertexCoords);
       
       // coords not yet in list?
@@ -250,7 +249,7 @@ namespace AMDiS {
 
 	// add new vertex info to list
 	(*vertexInfos)[vertexDOF].push_front(newVertexInfo);
-	
+
 	// set iterator to new vertex info
 	it = (*vertexInfos)[vertexDOF].begin();	
 
@@ -318,7 +317,7 @@ namespace AMDiS {
 	// search for interpolation point coordinates, and insert them to the 
 	// dof-entry, if not contained in the list
 	std::list<WorldVector<double> >::iterator it =
-		find((*interpPointCoords)[dofi].begin(),
+		find((*interpPointCoords)[dofi].begin(), 
 		     (*interpPointCoords)[dofi].end(),
 		     *vertexCoords);
 	
diff --git a/AMDiS/src/ParallelDomainProblem.cc b/AMDiS/src/ParallelDomainProblem.cc
index 773b72d1..06c5ac3d 100644
--- a/AMDiS/src/ParallelDomainProblem.cc
+++ b/AMDiS/src/ParallelDomainProblem.cc
@@ -24,8 +24,8 @@ namespace AMDiS {
 
   PetscErrorCode myKSPMonitor(KSP ksp, PetscInt iter, PetscReal rnorm, void *)
   {
-    if (iter % 100 == 0 && MPI::COMM_WORLD.Get_rank() == 0)
-      std::cout << "  Iteration " << iter << ": " << rnorm << "\n";
+    if (iter % 1 == 0 && MPI::COMM_WORLD.Get_rank() == 0)
+      std::cout << "  Iteration " << iter << ": " << rnorm << std::endl;
 
     return 0;
   }
@@ -103,21 +103,11 @@ namespace AMDiS {
     DbgTestInteriorBoundary();
 #endif
 
-    // === Reset all DOFAdmins of the mesh. ===
 
-    int nAdmins = mesh->getNumberOfDOFAdmin();
-    for (int i = 0; i < nAdmins; i++) {
-      DOFAdmin& admin = const_cast<DOFAdmin&>(mesh->getDOFAdmin(i));
+    // === Reset all DOFAdmins of the mesh. ===
 
-      for (int j = 0; j < admin.getSize(); j++)
-	admin.setDOFFree(j, true);
-      for (int j = 0; j < static_cast<int>(mapLocalGlobalDOFs.size()); j++)
- 	admin.setDOFFree(j, false);
+    updateDofAdmins();
 
-      admin.setUsedSize(mapLocalGlobalDOFs.size());
-      admin.setUsedCount(mapLocalGlobalDOFs.size());
-      admin.setFirstHole(mapLocalGlobalDOFs.size());
-    }
 
     // === Global refinements. ===
 
@@ -127,13 +117,25 @@ namespace AMDiS {
     if (globalRefinement > 0) {
       refinementManager->globalRefine(mesh, globalRefinement);
 
+#if (DEBUG != 0)
+      elMap.clear();
+      DbgCreateElementMap(elMap);
+#endif
+
       updateLocalGlobalNumbering(nRankDOFs, nOverallDOFs);
+
+      updateDofAdmins();
+
+#if (DEBUG != 0)
+      DbgTestElementMap(elMap);
+#endif
     }
 
 #if (DEBUG != 0)
     DbgTestCommonDofs();
 #endif
 
+
     // === Create petsc matrix. ===
 
     int ierr;
@@ -155,6 +157,24 @@ namespace AMDiS {
   {
   }
 
+  
+  void ParallelDomainBase::updateDofAdmins()
+  {
+    int nAdmins = mesh->getNumberOfDOFAdmin();
+    for (int i = 0; i < nAdmins; i++) {
+      DOFAdmin& admin = const_cast<DOFAdmin&>(mesh->getDOFAdmin(i));
+      
+      for (int j = 0; j < admin.getSize(); j++)
+	admin.setDOFFree(j, true);
+      for (int j = 0; j < static_cast<int>(mapLocalGlobalDOFs.size()); j++)
+ 	admin.setDOFFree(j, false);
+
+      admin.setUsedSize(mapLocalGlobalDOFs.size());
+      admin.setUsedCount(mapLocalGlobalDOFs.size());
+      admin.setFirstHole(mapLocalGlobalDOFs.size());
+    }
+  }
+
 
   void ParallelDomainBase::testForMacroMesh()
   {
@@ -229,10 +249,11 @@ namespace AMDiS {
     KSPCreate(PETSC_COMM_WORLD, &ksp);
     KSPSetOperators(ksp, petscMatrix, petscMatrix, DIFFERENT_NONZERO_PATTERN);
     KSPGetPC(ksp, &pc);
-    PCSetType(pc, PCNONE);
+    //    PCSetType(pc, PCNONE);
+    PCSetType(pc, PCJACOBI);
     KSPSetTolerances(ksp, 1.e-7, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
-    //    KSPSetType(ksp, KSPBCGS);
-    KSPSetType(ksp, KSPCG);
+    KSPSetType(ksp, KSPBCGS);
+    //KSPSetType(ksp, KSPCG);
     KSPMonitorSet(ksp, myKSPMonitor, PETSC_NULL, 0);
     KSPSolve(ksp, petscRhsVec, petscSolVec);
 
@@ -244,14 +265,12 @@ namespace AMDiS {
 
     PetscScalar *vecPointer;
     VecGetArray(petscSolVec, &vecPointer);
-    vec->set(1.0);
 
     for (int i = 0; i < nRankDOFs; i++)
-      (*vec)[mapGlobalLocalDOFs[i]] = vecPointer[i];
+      (*vec)[mapLocalToDofIndex[i]] = vecPointer[i];
 
     VecRestoreArray(petscSolVec, &vecPointer);
 
-#if 1
     std::vector<double*> sendBuffers(sendDofs.size());
     std::vector<double*> recvBuffers(recvDofs.size());
 
@@ -286,7 +305,6 @@ namespace AMDiS {
 
     MPI::Request::Waitall(requestCounter, request);
 
-    
     i = 0;
     for (RankToDofContainer::iterator recvIt = recvDofs.begin();
 	 recvIt != recvDofs.end();
@@ -299,7 +317,6 @@ namespace AMDiS {
     
     for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++)
       delete [] sendBuffers[i];
-#endif
   }
 
 
@@ -533,27 +550,45 @@ namespace AMDiS {
     nRankDOFs = rankDOFs.size();
     nOverallDOFs = partitionDOFs.size();
 
+
     // === Get starting position for global rank dof ordering. ====
 
-    //    int rstart = 0;
+    rstart = 0;
     mpiComm.Scan(&nRankDOFs, &rstart, 1, MPI_INT, MPI_SUM);
     rstart -= nRankDOFs;
 
+    
+    // === Create for all dofs in rank new indices. The new index must start at ===
+    // === index 0, must be continues and have the same order as the indices    ===
+    // === had before.                                                          ===
 
-    rankDofsNewLocalIndex.clear();
-    rankDofsNewGlobalIndex.clear();
 
+    // Do not change the indices now, but create a new indexing a store it here.
+    DofIndexMap rankDofsNewLocalIndex;
+    isRankDof.clear();
     int i = 0;
     for (DofContainer::iterator dofIt = rankAllDofs.begin();
 	 dofIt != rankAllDofs.end(); ++dofIt) {
-      rankDofsNewLocalIndex[*dofIt] = i++;
+      rankDofsNewLocalIndex[*dofIt] = i;
+      // First, we set all dofs in ranks partition to be owend by the rank. Later,
+      // the dofs in ranks partition that are owned by other rank are set to false.
       isRankDof[i] = true;
+      i++;
     }
 
+
+    // === Create for all rank owned dofs a new global indexing.                ===
+
+    // Stores for all rank owned dofs a new global index.
+    DofIndexMap rankOwnedDofsNewGlobalIndex;
+    // Stores for all rank owned dofs a continues local index.
+    DofIndexMap rankOwnedDofsNewLocalIndex;
+
     i = 0;
     for (DofContainer::iterator dofIt = rankDOFs.begin();
 	 dofIt != rankDOFs.end(); ++dofIt) {
-      rankDofsNewGlobalIndex[*dofIt] = i + rstart;
+      rankOwnedDofsNewGlobalIndex[*dofIt] = i + rstart;
+      rankOwnedDofsNewLocalIndex[*dofIt] = i;
       i++;
     }
 
@@ -580,10 +615,10 @@ namespace AMDiS {
 	     itRanks != partitionDOFs[it->first].end();
 	     ++itRanks) {
 	  if (*itRanks != mpiRank) {
-	    TEST_EXIT_DBG(rankDofsNewGlobalIndex.count(it->first) == 1)
+	    TEST_EXIT_DBG(rankOwnedDofsNewGlobalIndex.count(it->first) == 1)
 	      ("DOF Key not found!\n");
 
-	    sendNewDofs[*itRanks][it->first] = rankDofsNewGlobalIndex[it->first];
+	    sendNewDofs[*itRanks][it->first] = rankOwnedDofsNewGlobalIndex[it->first];
 	  }
 	}
       } else {
@@ -654,8 +689,6 @@ namespace AMDiS {
     // === Change dof indices for rank partition. ===
 
     mapLocalGlobalDOFs.clear();
-    mapGlobalLocalDOFs.clear();
-    isRankDof.clear();
 
     // === Change dof indices at boundary from other ranks. ===
 
@@ -692,7 +725,7 @@ namespace AMDiS {
 	    dofChanged[dofIt->first] = true;
 
 	    recvDofs[recvIt->first].push_back(dofIt->first);
-	    rankDofsNewGlobalIndex[dofIt->first] = newGlobalDof;
+	    rankOwnedDofsNewGlobalIndex[dofIt->first] = newGlobalDof;
 	    isRankDof[rankDofsNewLocalIndex[dofIt->first]] = false;
 
 	    found = true;
@@ -706,42 +739,22 @@ namespace AMDiS {
       delete [] recvBuffers[i];
     }
 
+    
+    // === Create now the local to global index, and vice verse, mappings.     ===
+
     for (DofIndexMap::iterator dofIt = rankDofsNewLocalIndex.begin();
 	 dofIt != rankDofsNewLocalIndex.end(); ++dofIt) {
       DegreeOfFreedom localDof = dofIt->second;
-      DegreeOfFreedom globalDof = rankDofsNewGlobalIndex[dofIt->first];
+      DegreeOfFreedom globalDof = rankOwnedDofsNewGlobalIndex[dofIt->first];
 
       *const_cast<DegreeOfFreedom*>(dofIt->first) = localDof;
       mapLocalGlobalDOFs[localDof] = globalDof;
-      mapGlobalLocalDOFs[globalDof] = localDof;
-    }
-
-#if 0
-    if (mpiRank == 0) {
-      for (DofContainer::iterator dofIt = recvDofs[1].begin();
-	   dofIt != recvDofs[1].end(); ++dofIt) {
-	std::cout << "RECV " << **dofIt << std::endl;
-      }
-    }
-
-    exit(0);
-#endif
-
-#if 0
-    if (mpiRank == 0) {
-      for (DofContainer::iterator dofIt = rankAllDofs.begin();
-	   dofIt != rankAllDofs.end(); ++dofIt) {
-	std::cout << "DOF = " << **dofIt << " GLOBAL index = " 
-		  << rankDofsNewGlobalIndex[*dofIt] << " FROM MAP = " 
-		  << mapLocalGlobalDOFs[**dofIt]
-		  << "  AND BACK "
-		  << mapGlobalLocalDOFs[mapLocalGlobalDOFs[**dofIt]] 
-		  << std::endl;
-      }
     }
 
-    exit(0);
-#endif
+    mapLocalToDofIndex.clear();
+    for (DofIndexMap::iterator dofIt = rankOwnedDofsNewLocalIndex.begin();
+	 dofIt != rankOwnedDofsNewLocalIndex.end(); ++dofIt)
+      mapLocalToDofIndex[dofIt->second] = *(dofIt->first);
   }
 
 
@@ -749,29 +762,35 @@ namespace AMDiS {
   {
     FUNCNAME("ParallelDomainBase::updateLocalGlobalNumbering()");
 
-    std::set<const DegreeOfFreedom*> rankDOFs;
-    DofToRank newBoundaryDOFs;
-
+    typedef std::set<const DegreeOfFreedom*> DofSet;
 
     // === Get all DOFs in ranks partition. ===
 
     ElementDofIterator elDofIt(feSpace);
-
+    DofSet rankDOFSet;
     TraverseStack stack;
     ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
     while (elInfo) {
       Element *element = elInfo->getElement();     
       elDofIt.reset(element);
       do {
-	rankDOFs.insert(elDofIt.getDofPtr());
+	rankDOFSet.insert(elDofIt.getDofPtr());
       } while(elDofIt.next());
 
       elInfo = stack.traverseNext(elInfo);
     }
 
+    DofContainer rankAllDofs;
+    for (DofSet::iterator dofIt = rankDOFSet.begin(); dofIt != rankDOFSet.end(); ++dofIt)
+      rankAllDofs.push_back(*dofIt);
+    sort(rankAllDofs.begin(), rankAllDofs.end(), cmpDofsByValue);
+    DofContainer rankDOFs = rankAllDofs;
+
+
     // === Traverse on interior boundaries and move all not ranked owned DOFs from ===
     // === rankDOFs to boundaryDOFs.                                               ===
 
+    DofToRank newBoundaryDOFs;
     RankToDofContainer sendNewDofs;
     RankToDofContainer recvNewDofs;
 
@@ -781,8 +800,7 @@ namespace AMDiS {
       for (std::vector<AtomicBoundary>::iterator boundIt = it->second.begin();
 	   boundIt != it->second.end(); ++boundIt) {
 
-	const DegreeOfFreedom *dof1 = NULL;
-	const DegreeOfFreedom *dof2 = NULL;
+	const DegreeOfFreedom *dof1, *dof2;
 
 	switch (boundIt->rankObject.ithObjAtBoundary) {
 	case 0:
@@ -809,15 +827,14 @@ namespace AMDiS {
 	newBoundaryDOFs[dof1] = boundaryDOFs[dof1];
 	newBoundaryDOFs[dof2] = boundaryDOFs[dof2];
 
-  	if (find(sendNewDofs[it->first].begin(), sendNewDofs[it->first].end(), dof1) ==
-  	    sendNewDofs[it->first].end())
- 	  sendNewDofs[it->first].push_back(dof1);
-  	if (find(sendNewDofs[it->first].begin(), sendNewDofs[it->first].end(), dof2) ==
-  	    sendNewDofs[it->first].end())
- 	  sendNewDofs[it->first].push_back(dof2);
-		
+	DofContainer &dofsToSend = sendNewDofs[it->first];
+
+  	if (find(dofsToSend.begin(), dofsToSend.end(), dof1) == dofsToSend.end())
+ 	  dofsToSend.push_back(dof1);
+  	if (find(dofsToSend.begin(), dofsToSend.end(), dof2) == dofsToSend.end())
+ 	  dofsToSend.push_back(dof2);
+
 	DofContainer boundDOFs;
-	
 	addAllVertexDOFs(boundIt->rankObject.el, 
 			 boundIt->rankObject.ithObjAtBoundary,
 			 boundDOFs);	
@@ -827,7 +844,7 @@ namespace AMDiS {
 
 	for (int i = 0; i < static_cast<int>(boundDOFs.size()); i++) {
 	  newBoundaryDOFs[boundDOFs[i]] = mpiRank;
-	  sendNewDofs[it->first].push_back(boundDOFs[i]);
+	  dofsToSend.push_back(boundDOFs[i]);
 	}
 	
       }
@@ -839,8 +856,7 @@ namespace AMDiS {
       for (std::vector<AtomicBoundary>::iterator boundIt = it->second.begin();
 	   boundIt != it->second.end(); ++boundIt) {
 
-	const DegreeOfFreedom *dof1 = NULL;
-	const DegreeOfFreedom *dof2 = NULL;
+	const DegreeOfFreedom *dof1, *dof2;
 
 	switch (boundIt->rankObject.ithObjAtBoundary) {
 	case 0:
@@ -864,20 +880,23 @@ namespace AMDiS {
 	TEST_EXIT_DBG(boundaryDOFs.find(dof2) != boundaryDOFs.end())
 	  ("Should never happen!\n");
 
-	rankDOFs.erase(dof1);
-	rankDOFs.erase(dof2);
+	DofContainer::iterator eraseIt = find(rankDOFs.begin(), rankDOFs.end(), dof1);
+	if (eraseIt != rankDOFs.end())
+	  rankDOFs.erase(eraseIt);
+	eraseIt = find(rankDOFs.begin(), rankDOFs.end(), dof2);
+	if (eraseIt != rankDOFs.end())
+	  rankDOFs.erase(eraseIt);
+
 	newBoundaryDOFs[dof1] = boundaryDOFs[dof1];
 	newBoundaryDOFs[dof2] = boundaryDOFs[dof2];
 
-  	if (find(recvNewDofs[it->first].begin(), recvNewDofs[it->first].end(), dof1) ==
-  	    recvNewDofs[it->first].end())
- 	  recvNewDofs[it->first].push_back(dof1);
-  	if (find(recvNewDofs[it->first].begin(), recvNewDofs[it->first].end(), dof2) ==
-  	    recvNewDofs[it->first].end())
- 	  recvNewDofs[it->first].push_back(dof2);
-	
-	DofContainer boundDOFs;
-	
+	DofContainer &dofsToRecv = recvNewDofs[it->first];
+  	if (find(dofsToRecv.begin(), dofsToRecv.end(), dof1) == dofsToRecv.end())
+ 	  dofsToRecv.push_back(dof1);
+  	if (find(dofsToRecv.begin(), dofsToRecv.end(), dof2) == dofsToRecv.end())
+ 	  dofsToRecv.push_back(dof2);
+
+	DofContainer boundDOFs;	
 	addAllEdgeDOFs(boundIt->rankObject.el, 
  		       boundIt->rankObject.ithObjAtBoundary,
  		       boundDOFs);
@@ -886,9 +905,15 @@ namespace AMDiS {
 			 boundDOFs);
 
 	for (int i = static_cast<int>(boundDOFs.size()) - 1; i >= 0; i--) {
-	  rankDOFs.erase(boundDOFs[i]);
+	  TEST_EXIT_DBG(find(rankDOFs.begin(), rankDOFs.end(), boundDOFs[i]) != rankDOFs.end())
+	    ("Should never happen!\n");
+
+	  eraseIt = find(rankDOFs.begin(), rankDOFs.end(), boundDOFs[i]);
+	  if (eraseIt != rankDOFs.end())
+	    rankDOFs.erase(eraseIt);
+
 	  newBoundaryDOFs[boundDOFs[i]] = it->first;
-	  recvNewDofs[it->first].push_back(boundDOFs[i]);
+	  dofsToRecv.push_back(boundDOFs[i]);
 	}
       }
     }
@@ -907,21 +932,36 @@ namespace AMDiS {
     mpiComm.Allreduce(&nRankDOFs, &nOverallDOFs, 1, MPI_INT, MPI_SUM);
 
 
-    // === Create new local DOF index numbering. ===
+    // ===
 
-    mapLocalGlobalDOFs.clear();
-    mapGlobalLocalDOFs.clear();
-    isRankDof.clear();
 
+    // Do not change the indices now, but create a new indexing a store it here.
+    DofIndexMap rankDofsNewLocalIndex;
+    isRankDof.clear();
     int i = 0;
-    for (std::set<const DegreeOfFreedom*>::iterator dofIt = rankDOFs.begin();
-	 dofIt != rankDOFs.end(); ++dofIt, i++) {
-      *(const_cast<DegreeOfFreedom*>(*dofIt)) = i;
-      mapLocalGlobalDOFs[i] = rstart + i;
-      mapGlobalLocalDOFs[rstart + i] = i;
+    for (DofContainer::iterator dofIt = rankAllDofs.begin();
+	 dofIt != rankAllDofs.end(); ++dofIt) {
+      rankDofsNewLocalIndex[*dofIt] = i;
+      // First, we set all dofs in ranks partition to be owend by the rank. Later,
+      // the dofs in ranks partition that are owned by other rank are set to false.
       isRankDof[i] = true;
+      i++;
     }
 
+    // Stores for all rank owned dofs a new global index.
+    DofIndexMap rankOwnedDofsNewGlobalIndex;
+    // Stores for all rank owned dofs a continues local index.
+    DofIndexMap rankOwnedDofsNewLocalIndex;
+
+    i = 0;
+    for (DofContainer::iterator dofIt = rankDOFs.begin();
+	 dofIt != rankDOFs.end(); ++dofIt) {
+      rankOwnedDofsNewGlobalIndex[*dofIt] = i + rstart;
+      rankOwnedDofsNewLocalIndex[*dofIt] = i;
+      i++;
+    }
+
+
     // === Send new DOF indices. ===
 
     std::vector<int*> sendBuffers(sendNewDofs.size());
@@ -938,7 +978,7 @@ namespace AMDiS {
       int c = 0;
       for (DofContainer::iterator dofIt = sendIt->second.begin();
 	   dofIt != sendIt->second.end(); ++dofIt)
-	sendBuffers[i][c++] = (*dofIt)[0] + rstart;
+	sendBuffers[i][c++] = rankOwnedDofsNewGlobalIndex[*dofIt];
 
       request[requestCounter++] = 
 	mpiComm.Isend(sendBuffers[i], nSendDofs, MPI_INT, sendIt->first, 0);
@@ -956,23 +996,18 @@ namespace AMDiS {
 
     MPI::Request::Waitall(requestCounter, request);
 
-    i = 0;
-    for (RankToDofContainer::iterator sendIt = sendNewDofs.begin();
-	 sendIt != sendNewDofs.end(); ++sendIt)
-      delete [] sendBuffers[i++];
+    for (int j = 0; j < static_cast<int>(sendBuffers.size()); j++)
+      delete [] sendBuffers[j];
 
     i = 0;
-    int newDofIndex = nRankDOFs;
     for (RankToDofContainer::iterator recvIt = recvNewDofs.begin();
 	 recvIt != recvNewDofs.end(); ++recvIt) {      
       int j = 0;
       for (DofContainer::iterator dofIt = recvIt->second.begin();
 	   dofIt != recvIt->second.end(); ++dofIt) {
-	(*const_cast<DegreeOfFreedom*>(*dofIt)) = newDofIndex;
-	mapLocalGlobalDOFs[newDofIndex] = recvBuffers[i][j];
-	mapGlobalLocalDOFs[recvBuffers[i][j]] = newDofIndex;
-	isRankDof[newDofIndex] = false;
-	newDofIndex++;
+
+	rankOwnedDofsNewGlobalIndex[*dofIt] = recvBuffers[i][j];
+	isRankDof[rankOwnedDofsNewLocalIndex[*dofIt]] = false;
 	j++;
       }
 
@@ -984,6 +1019,24 @@ namespace AMDiS {
     sendDofs = sendNewDofs;
     recvDofs = recvNewDofs;
 
+
+    // === Create now the local to global index, and vice verse, mappings.     ===
+
+    mapLocalGlobalDOFs.clear();
+
+    for (DofIndexMap::iterator dofIt = rankDofsNewLocalIndex.begin();
+	 dofIt != rankDofsNewLocalIndex.end(); ++dofIt) {
+      DegreeOfFreedom localDof = dofIt->second;
+      DegreeOfFreedom globalDof = rankOwnedDofsNewGlobalIndex[dofIt->first];
+
+      *const_cast<DegreeOfFreedom*>(dofIt->first) = localDof;
+      mapLocalGlobalDOFs[localDof] = globalDof;
+    }
+
+    mapLocalToDofIndex.clear();
+    for (DofIndexMap::iterator dofIt = rankOwnedDofsNewLocalIndex.begin();
+	 dofIt != rankOwnedDofsNewLocalIndex.end(); ++dofIt)
+      mapLocalToDofIndex[dofIt->second] = *(dofIt->first);
   }
 
 
@@ -1252,7 +1305,7 @@ namespace AMDiS {
   }
   
 
-  void ParallelDomainBase::DbgTestCommonDofs()
+  void ParallelDomainBase::DbgTestCommonDofs(bool printCoords)
   {
     FUNCNAME("ParallelDomainBase::DbgTestCommonDofs()");
 
@@ -1359,24 +1412,28 @@ namespace AMDiS {
     for (RankToCoords::iterator it = sendCoords.begin(); it != sendCoords.end(); ++it)
       delete [] sendCoordsBuffer[it->first];
 
+    double eps = 1e-14;
+
     for (RankToCoords::iterator it = recvCoords.begin(); it != recvCoords.end(); ++it) {
       for (int i = 0; i < static_cast<int>(it->second.size()); i++) {
 
-	std::cout << "[DBG] i = " << i << std::endl;
-
-	std::cout.precision(4);
-	std::cout << "[DBG]  "
-		  << "Rank " << mpiRank << " from rank " << it->first
-		  << " expect coords (" 
-		  << (it->second)[i][0] << " , " <<  (it->second)[i][1] 
-		  << ")  received coords ("
-	          << recvCoordsBuffer[it->first][i * 2] << " , " 
-		  << recvCoordsBuffer[it->first][i * 2 + 1] << ")" << std::endl;
+	if (printCoords) {
+	  std::cout << "[DBG] i = " << i << std::endl;
+	  
+	  std::cout.precision(20);
+	  std::cout << "[DBG]  "
+		    << "Rank " << mpiRank << " from rank " << it->first
+		    << " expect coords (" 
+		    << (it->second)[i][0] << " , " <<  (it->second)[i][1] 
+		    << ")  received coords ("
+		    << recvCoordsBuffer[it->first][i * 2] << " , " 
+		    << recvCoordsBuffer[it->first][i * 2 + 1] << ")" << std::endl;
+	}
 
-	bool c0 = (it->second)[i][0] == recvCoordsBuffer[it->first][i * 2];
-	bool c1 = (it->second)[i][1] == recvCoordsBuffer[it->first][i * 2 + 1];
+	bool c0 = fabs((it->second)[i][0] - recvCoordsBuffer[it->first][i * 2]) < eps;
+	bool c1 = fabs((it->second)[i][1] - recvCoordsBuffer[it->first][i * 2 + 1]) < eps;
 
-	TEST_EXIT(c0 && c1)("Wrong DOFs!\n");
+	TEST_EXIT(c0 && c1)("Wrong DOFs in rank %d!\n", mpiRank);
       } 
 
       delete [] recvCoordsBuffer[it->first];
diff --git a/AMDiS/src/ParallelDomainProblem.h b/AMDiS/src/ParallelDomainProblem.h
index 32a46349..796a39fe 100644
--- a/AMDiS/src/ParallelDomainProblem.h
+++ b/AMDiS/src/ParallelDomainProblem.h
@@ -87,6 +87,8 @@ namespace AMDiS {
 
     void exitParallelization(AdaptInfo *adaptInfo);
 
+    void updateDofAdmins();
+
     /** \brief
      * Test, if the mesh consists of macro elements only. The mesh partitioning of
      * the parallelization works for macro meshes only and would fail, if the mesh
@@ -218,8 +220,11 @@ namespace AMDiS {
      * and compares the dof indices on them with the dof indices of the boundarys
      * neighbours. The function fails, when dof indices on an interior boundary does
      * not fit together.
+     *
+     * \param  printCoords   If true, the coords of all common dofs are printed to
+     *                       the screen.
      */
-    void DbgTestCommonDofs();
+    void DbgTestCommonDofs(bool printCoords = false);
 
     inline void orderDOFs(const DegreeOfFreedom* dof1,
 			  const DegreeOfFreedom* dof2,
@@ -352,8 +357,8 @@ namespace AMDiS {
     /// Maps local to global dof indices.
     DofMapping mapLocalGlobalDOFs;
 
-    /// Maps global to local dof indices.
-    DofMapping mapGlobalLocalDOFs;
+    /// Maps local dof indices to realy dof indices.
+    DofMapping mapLocalToDofIndex;  
 
     /** \brief
      * Maps all DOFs in ranks partition to a bool value. If it is true, the DOF is
@@ -363,8 +368,6 @@ namespace AMDiS {
     DofToBool isRankDof;
 
     int rstart;
-
-    DofIndexMap rankDofsNewLocalIndex, rankDofsNewGlobalIndex;
   };
 
   bool cmpDofsByValue(const DegreeOfFreedom* dof1, const DegreeOfFreedom* dof2)
diff --git a/AMDiS/src/VtkWriter.hh b/AMDiS/src/VtkWriter.hh
index ee37b0c4..29c0d47e 100644
--- a/AMDiS/src/VtkWriter.hh
+++ b/AMDiS/src/VtkWriter.hh
@@ -86,15 +86,17 @@ namespace AMDiS {
   template<typename T>
   void VtkWriter::writeVertexCoords(T &file)
   {
-    DOFVector< std::list<VertexInfo> > *vertexInfos = (*dataCollector)[0]->getVertexInfos();
+    DOFVector< std::list<VertexInfo> > *vertexInfos = 
+      (*dataCollector)[0]->getVertexInfos();
     DOFVector< std::list<VertexInfo> >::Iterator it(vertexInfos, USED_DOFS);
     int counter = 0;
 
+    int i = 0;
+
     // For all DOFs of vertices, write the coordinates.
     for (it.reset(); !it.end(); ++it) {
       // for all vertex infos of this DOF
-      std::list<VertexInfo>::iterator it2;
-      for (it2 = it->begin(); it2 != it->end(); ++it2) {
+      for (std::list<VertexInfo>::iterator it2 = it->begin(); it2 != it->end(); ++it2) {
 	it2->outputIndex = counter++;
 	writeCoord(file, it2->coords);
       }     
@@ -105,10 +107,13 @@ namespace AMDiS {
       DOFVector< std::list< WorldVector<double> > > *interpPointCoords = (*dataCollector)[0]->getInterpPointCoords();
       DOFVector< std::list< WorldVector<double> > >::Iterator pointIt(interpPointCoords, USED_DOFS);
       
+      counter = 0;
       for (pointIt.reset(); !pointIt.end(); ++pointIt) {
-	std::list< WorldVector<double> >::iterator it2;
-	for (it2 = pointIt->begin(); it2 != pointIt->end(); ++it2)
+	for (std::list< WorldVector<double> >::iterator it2 = pointIt->begin(); 
+	     it2 != pointIt->end(); ++it2) {
+	  counter++;
 	  writeCoord(file, *it2);
+	}
       }
     }
   }
-- 
GitLab