diff --git a/AMDiS/src/reinit/HL_SignedDistTraverse.cc b/AMDiS/src/reinit/HL_SignedDistTraverse.cc
index 23f8cf75cd8fa874997e7739d2c0a6e59a1ed386..278818b1b1821dcc86e41ccc12bc2d6e3d37be86 100644
--- a/AMDiS/src/reinit/HL_SignedDistTraverse.cc
+++ b/AMDiS/src/reinit/HL_SignedDistTraverse.cc
@@ -109,6 +109,12 @@ void HL_SignedDistTraverse::initializeBoundary()
     
     elInfo = stack.traverseNext(elInfo);
   }  // end of: mesh traverse 
+  
+#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
+  // In parallel AMDiS synchronize the bound_DOF DOFVector with the max-assigner and the sD_DOF DOFVector with the min-assigner
+  AMDiS::Parallel::MeshDistributor::globalMeshDistributor->synchVector(*bound_DOF, max_assigner());
+  AMDiS::Parallel::MeshDistributor::globalMeshDistributor->synchVector(*sD_DOF, min_to_zero_assigner());
+#endif
 }
 
 
@@ -123,6 +129,11 @@ void HL_SignedDistTraverse::HL_updateIteration()
   sDOld_DOF = new DOFVector<double>(feSpace, "sDOld_DOF");
   sDOld_DOF->copy(const_cast<DOFVector<double> &>(*sD_DOF));
   
+#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
+  // Update sDOld_DOF on interior domain boundaries
+  AMDiS::Parallel::MeshDistributor::globalMeshDistributor->synchVector(*sDOld_DOF, min_to_zero_assigner());
+#endif  
+  
   // ===== Gauss-Seidel or Jacobi iteration ? =====
   if (GaussSeidelFlag)
     update_DOF = sD_DOF;  
@@ -147,8 +158,23 @@ void HL_SignedDistTraverse::HL_updateIteration()
       elInfo = stack.traverseNext(elInfo);
     }
     
+#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
+    // Update sD_DOF on interior domain boundaries
+    AMDiS::Parallel::MeshDistributor::globalMeshDistributor->synchVector(*sD_DOF, min_to_zero_assigner());
+#endif    
+    
     // ===== Is tolerance reached ? =====
     tol_reached = checkTol();
+#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
+    // Convert bool to int that the datatype MPI_INT can be used in the MPI_Allreduce() command below.
+    int int_tol_reached = (tol_reached ? 1 : 0);
+    int int_tol_reached_overall;
+    // Here MPI_MIN operation in the MPI_Allreduce() command is used that each processor runs the cycle again until each
+    // processor has reached the tolerance. Otherwise the synchVector() method from above would fail.
+    MPI_Allreduce(&int_tol_reached, &int_tol_reached_overall, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD);
+    int_tol_reached = int_tol_reached_overall;
+    tol_reached = (int_tol_reached == 0 ? false : true);
+#endif
     
     sDOld_DOF->copy(const_cast<DOFVector<double> &>(*sD_DOF));
   }
diff --git a/AMDiS/src/reinit/HL_SignedDistTraverse.h b/AMDiS/src/reinit/HL_SignedDistTraverse.h
index 90950aafddc59305e0fcc928be917d9d8691f1d6..57acb6d0438c93642d1cbe61a64cc3351fc1f0c6 100644
--- a/AMDiS/src/reinit/HL_SignedDistTraverse.h
+++ b/AMDiS/src/reinit/HL_SignedDistTraverse.h
@@ -39,6 +39,32 @@ namespace reinit {
 
 using namespace AMDiS;
 
+#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
+  /**
+   * Functor which assigns a the maximum of a and b.
+   * (Used for synchronizing DOFVectors on interior boundaries in parallel AMDiS)
+   */
+  struct max_assigner: FunctorBase
+  {
+    int getDegree(int d0) const { return d0; }
+    static void eval(double& v0, const double& v1) { v0 = std::max(v0, v1); }
+    void operator()(double &v0, const double& v1) { eval(v0, v1); }
+  };
+#endif
+
+#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
+  /**
+   * Functor which assigns a the minimum of a and b with respect to 0.
+   * (Used for synchronizing DOFVectors on interior boundaries in parallel AMDiS)
+   */
+  struct min_to_zero_assigner: FunctorBase
+  {
+    int getDegree(int d0) const { return d0; }
+    static void eval(double& v0, const double& v1) { v0 = (std::abs(v0) < std::abs(v1) ? (v0) : (v1)); }
+    void operator()(double &v0, const double& v1) { eval(v0, v1); }
+  };
+#endif
+
 class HL_SignedDistTraverse : public HL_SignedDist
 {
 public: