From fd574ac4555c67a4102a2f794f1a5d7bc0126c7d Mon Sep 17 00:00:00 2001
From: Thomas Witkowski <thomas.witkowski@gmx.de>
Date: Wed, 4 Jan 2012 09:26:35 +0000
Subject: [PATCH] Fixed bug in parallelization when senddofs and recvdofs are
 empty for some ranks.

---
 AMDiS/src/parallel/MeshDistributor.cc | 36 +++++++++++++++++++++++----
 AMDiS/src/parallel/ParallelDebug.cc   |  2 +-
 AMDiS/src/parallel/StdMpi.h           |  2 +-
 3 files changed, 33 insertions(+), 7 deletions(-)

diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc
index cb6ef646..e894e2a5 100644
--- a/AMDiS/src/parallel/MeshDistributor.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -352,11 +352,11 @@ namespace AMDiS {
     if (feSpace != NULL) {
       vector<FiniteElemSpace*> feSpaces = probStat->getFeSpaces();
       for (unsigned int i = 0; i < feSpaces.size(); i++) {
-	MSG("MESH %p <-> %p   BF %p <-> %p\n",
-	    feSpace->getMesh(),
-	    feSpaces[i]->getMesh(),
-	    feSpace->getBasisFcts(),
-	    feSpaces[i]->getBasisFcts());
+// 	MSG("MESH %p <-> %p   BF %p <-> %p\n",
+// 	    feSpace->getMesh(),
+// 	    feSpaces[i]->getMesh(),
+// 	    feSpace->getBasisFcts(),
+// 	    feSpaces[i]->getBasisFcts());
 	TEST_EXIT(feSpace == feSpaces[i])
 	  ("Parallelizaton is not supported for multiple FE spaces!\n");
       }
@@ -906,6 +906,7 @@ namespace AMDiS {
     stdMpi.send(sendCodes);
     for (RankToBoundMap::iterator it = allBound.begin(); it != allBound.end(); ++it)
       stdMpi.recv(it->first);
+    
     stdMpi.startCommunication();
  
     // === Compare received mesh structure codes. ===
@@ -1779,6 +1780,29 @@ namespace AMDiS {
 	it->rankObj.el->getAllDofs(feSpace, it->rankObj, 
 				   recvDofs[it.getRank()]);
     }
+
+
+    // === Delete all empty DOF send and recv positions ===
+
+    {
+      RankToDofContainer::iterator it = sendDofs.begin();
+      while (it != sendDofs.end()) {
+	if (it->second.size() == 0)
+	  sendDofs.erase(it++);
+	else
+	  ++it;
+      }
+    }
+
+    {
+      RankToDofContainer::iterator it = recvDofs.begin();
+      while (it != recvDofs.end()) {
+	if (it->second.size() == 0)
+	  recvDofs.erase(it++);
+	else
+	  ++it;
+      }
+    }
   }
 
 
@@ -1855,12 +1879,14 @@ namespace AMDiS {
     StdMpi<vector<DegreeOfFreedom> > stdMpi(mpiComm, false);
     for (RankToDofContainer::iterator sendIt = sendDofs.begin();
 	 sendIt != sendDofs.end(); ++sendIt) {
+
       stdMpi.getSendData(sendIt->first).resize(0);
       stdMpi.getSendData(sendIt->first).reserve(sendIt->second.size());
       for (DofContainer::iterator dofIt = sendIt->second.begin();
 	   dofIt != sendIt->second.end(); ++dofIt)
 	stdMpi.getSendData(sendIt->first).push_back(rankDofsNewGlobalIndex[*dofIt]);
     }
+
     stdMpi.updateSendDataSize();
     stdMpi.recv(recvDofs);
     stdMpi.startCommunication();
diff --git a/AMDiS/src/parallel/ParallelDebug.cc b/AMDiS/src/parallel/ParallelDebug.cc
index 2973cf97..6967bf6e 100644
--- a/AMDiS/src/parallel/ParallelDebug.cc
+++ b/AMDiS/src/parallel/ParallelDebug.cc
@@ -728,7 +728,7 @@ namespace AMDiS {
 
     int tmp = 0;
     Parameters::get("parallel->debug->print boundary info", tmp);
-    if (tmp <= 1)
+    if (tmp <= 0)
       return;
 
     for (InteriorBoundary::iterator it(pdb.myIntBoundary); !it.end(); ++it) {
diff --git a/AMDiS/src/parallel/StdMpi.h b/AMDiS/src/parallel/StdMpi.h
index 3c5eda1c..de677c86 100644
--- a/AMDiS/src/parallel/StdMpi.h
+++ b/AMDiS/src/parallel/StdMpi.h
@@ -318,7 +318,7 @@ namespace AMDiS {
     {
       for (typename map<int, SendT>::iterator it = sendData.begin(); 
 	   it != sendData.end(); ++it)
-	sendDataSize[it->first] = StdMpiHelper<SendT>::getBufferSize(it->second);            
+	sendDataSize[it->first] = StdMpiHelper<SendT>::getBufferSize(it->second);
     }
 
 
-- 
GitLab