From a1b5b1b92313bdc52f7c6282c01d6ac91439a006 Mon Sep 17 00:00:00 2001
From: Thomas Witkowski <thomas.witkowski@gmx.de>
Date: Fri, 10 Jul 2009 08:55:04 +0000
Subject: [PATCH] Some bugfixes for vec parallelization.

---
 AMDiS/src/ParallelDomainBase.cc | 20 +++++++++++---------
 AMDiS/src/ParallelDomainVec.cc  |  5 +----
 2 files changed, 12 insertions(+), 13 deletions(-)

diff --git a/AMDiS/src/ParallelDomainBase.cc b/AMDiS/src/ParallelDomainBase.cc
index 5560dc0a..50ef534f 100644
--- a/AMDiS/src/ParallelDomainBase.cc
+++ b/AMDiS/src/ParallelDomainBase.cc
@@ -21,7 +21,7 @@
 namespace AMDiS {
 
   PetscErrorCode myKSPMonitor(KSP ksp, PetscInt iter, PetscReal rnorm, void *)
-  {
+  {    
     if (iter % 100 == 0 && MPI::COMM_WORLD.Get_rank() == 0)
       std::cout << "  Iteration " << iter << ": " << rnorm << std::endl;
 
@@ -381,7 +381,8 @@ namespace AMDiS {
 #if (DEBUG != 0)
     int size = 0;
     VecGetLocalSize(petscSolVec, &size);
-    TEST_EXIT(size == nRankDOFs)("Vector and rank DOFs does not fit together!\n");
+    TEST_EXIT(size == nRankDOFs * nComponents)
+      ("Vector and rank DOFs does not fit together!\n");
 #endif
 
     PetscScalar *vecPointer;
@@ -395,6 +396,7 @@ namespace AMDiS {
 
     VecRestoreArray(petscSolVec, &vecPointer);
 
+
     std::vector<double*> sendBuffers(sendDofs.size());
     std::vector<double*> recvBuffers(recvDofs.size());
 
@@ -404,8 +406,8 @@ namespace AMDiS {
     int i = 0;
     for (RankToDofContainer::iterator sendIt = sendDofs.begin();
 	 sendIt != sendDofs.end(); ++sendIt, i++) {
-      int nSendDOFs = sendIt->second.size() * nComponents;
-      sendBuffers[i] = new double[nSendDOFs];
+      int nSendDOFs = sendIt->second.size();
+      sendBuffers[i] = new double[nSendDOFs * nComponents];
 
       int counter = 0;
       for (int j = 0; j < nComponents; j++) {
@@ -414,8 +416,8 @@ namespace AMDiS {
 	  sendBuffers[i][counter++] = (*dofvec)[*((sendIt->second)[k])];
       }
 
-      request[requestCounter++] =
-	mpiComm.Isend(sendBuffers[i], nSendDOFs, MPI_DOUBLE, sendIt->first, 0);
+      request[requestCounter++] = mpiComm.Isend(sendBuffers[i], nSendDOFs * nComponents,
+						MPI_DOUBLE, sendIt->first, 0);
     }
 
     i = 0;
@@ -439,8 +441,8 @@ namespace AMDiS {
       int counter = 0;
       for (int j = 0; j < nComponents; j++) {
 	DOFVector<double> *dofvec = vec->getDOFVector(j);
-	for (int k = 0; k < nRecvDOFs; k++)
-	  (*dofvec)[*(recvIt->second)[k]] = recvBuffers[i][counter++];
+ 	for (int k = 0; k < nRecvDOFs; k++)
+ 	  (*dofvec)[*(recvIt->second)[k]] = recvBuffers[i][counter++];
       }
 
       delete [] recvBuffers[i];
@@ -1494,7 +1496,7 @@ namespace AMDiS {
     for (RankToCoords::iterator it = sendCoords.begin(); it != sendCoords.end(); ++it)
       delete [] sendCoordsBuffer[it->first];
 
-    double eps = 1e-14;
+    double eps = 1e-13;
 
     for (RankToCoords::iterator it = recvCoords.begin(); it != recvCoords.end(); ++it) {
       for (int i = 0; i < static_cast<int>(it->second.size()); i++) {
diff --git a/AMDiS/src/ParallelDomainVec.cc b/AMDiS/src/ParallelDomainVec.cc
index 6e79a466..d6e234d0 100644
--- a/AMDiS/src/ParallelDomainVec.cc
+++ b/AMDiS/src/ParallelDomainVec.cc
@@ -38,12 +38,11 @@ namespace AMDiS {
 	  probVec->getSystemMatrix(i, j)->setRankDofs(isRankDof);
   }
 
+
   void ParallelDomainVec::solve()
   {
     FUNCNAME("ParallelDomainVec::solve()");
 
-#if 0
-
 #ifdef _OPENMP
     double wtime = omp_get_wtime();
 #endif
@@ -59,8 +58,6 @@ namespace AMDiS {
 #else
     INFO(info, 8)("solution of discrete system needed %.5f seconds\n",
 		   TIME_USED(first, clock()));
-#endif
-
 #endif
   }
 
-- 
GitLab