diff --git a/AMDiS/src/parallel/MpiHelper.cc b/AMDiS/src/parallel/MpiHelper.cc
index 629fa2cfb75ffa8b3cce1585c6141ed0c50f10a2..1f315256f0435a5d2f1a1b596f0a46be9da352c6 100644
--- a/AMDiS/src/parallel/MpiHelper.cc
+++ b/AMDiS/src/parallel/MpiHelper.cc
@@ -35,6 +35,12 @@ namespace AMDiS {
       double valCopy = value;
       MPI::COMM_WORLD.Allreduce(&valCopy, &value, 1, MPI_DOUBLE, MPI_MIN);
     }
+
+    void globalMin(int &value)
+    {
+      int valCopy = value;
+      MPI::COMM_WORLD.Allreduce(&valCopy, &value, 1, MPI_INT, MPI_MIN);
+    }
     
     void globalMax(double &value)
     {
@@ -42,6 +48,12 @@ namespace AMDiS {
       MPI::COMM_WORLD.Allreduce(&valCopy, &value, 1, MPI_DOUBLE, MPI_MAX);
     }
 
+    void globalMax(int &value)
+    {
+      int valCopy = value;
+      MPI::COMM_WORLD.Allreduce(&valCopy, &value, 1, MPI_INT, MPI_MAX);
+    }
+
   }
 }
 
diff --git a/AMDiS/src/parallel/MpiHelper.h b/AMDiS/src/parallel/MpiHelper.h
index de15d0395d0b680193eebe51b271b1d5f2f014cf..e178af3e266f701bb10e3374eb0bb98073662676 100644
--- a/AMDiS/src/parallel/MpiHelper.h
+++ b/AMDiS/src/parallel/MpiHelper.h
@@ -38,8 +38,12 @@ namespace AMDiS {
 
     void globalMin(double &value);
 
+    void globalMin(int &value);
+
     void globalMax(double &value);
 
+    void globalMax(int &value);
+
     inline void startRand()
     {
       srand(time(NULL) * (MPI::COMM_WORLD.Get_rank() + 1));
diff --git a/AMDiS/src/parallel/ParMetisPartitioner.cc b/AMDiS/src/parallel/ParMetisPartitioner.cc
index f8790db344a64159a592664bc9ad74143bd35629..b1f889000eb596b15f0a522f4caa9333506ca934 100644
--- a/AMDiS/src/parallel/ParMetisPartitioner.cc
+++ b/AMDiS/src/parallel/ParMetisPartitioner.cc
@@ -331,8 +331,49 @@ namespace AMDiS {
 
     // === Scale element weights. ===
 
-    for (int i = 0; i < nElements; i++)
+    int smin = 9999999;
+    int smax = 0;
+    int ssum = 0;
+
+    for (int i = 0; i < nElements; i++) {
       wgts[i] = static_cast<int>(floatWgts[i] * scale);
+      smin = std::min(smin, wgts[i]);
+      smax = std::max(smax, wgts[i]);
+      ssum += wgts[i];
+    }
+
+    mpi::globalMin(smin);
+    mpi::globalMax(smax);
+    mpi::globalAdd(ssum);
+
+    MSG("DATA   SMIN = %d   SMAX = %d    SSUM = %d\n", smin, smax, ssum);
+
+    int kpart = ssum / mpiSize;
+    int kpartMax = 0;
+    for (int i = 0; i < nElements; i++)
+      if (wgts[i] < kpart)
+	kpartMax = max(kpartMax, wgts[i]);
+
+    mpi::globalMax(kpartMax);
+
+    MSG("KPART MAX: %d\n", kpartMax);
+
+    smin = 9999999;
+    smax = 0;
+    ssum = 0;
+
+    for (int i = 0; i < nElements; i++) {
+      wgts[i] = min(wgts[i], kpartMax);
+      smin = std::min(smin, wgts[i]);
+      smax = std::max(smax, wgts[i]);
+      ssum += wgts[i];
+    }
+
+    mpi::globalMin(smin);
+    mpi::globalMax(smax);
+    mpi::globalAdd(ssum);
+
+    MSG("DATA   SMIN = %d   SMAX = %d    SSUM = %d\n", smin, smax, ssum);
 
 
     // === Start ParMETIS. ===
@@ -359,6 +400,8 @@ namespace AMDiS {
       break;
     case ADAPTIVE_REPART:
       {
+	//	parMetisGraph.print();
+
 	std::vector<int> vsize(nElements);
 	for (int i = 0; i < nElements; i++)
 	  vsize[i] = static_cast<int>(floatWgts[i]);
@@ -407,7 +450,62 @@ namespace AMDiS {
 
     // === Distribute new partition data. ===
 
-    return distributePartitioning(&(part[0]));
+    bool b = distributePartitioning(&(part[0]));
+
+    if (!b) {
+      MSG("RETRY ParMETIS!\n");
+
+      std::vector<float> testub(14);
+      
+      testub[0] = 1.001;
+      testub[1] = 1.01;
+      testub[2] = 1.02;
+      testub[3] = 1.03;
+      testub[4] = 1.04;
+      testub[5] = 1.06;
+      testub[6] = 1.07;
+      testub[7] = 1.08;
+      testub[8] = 1.09;
+      testub[9] = 1.1;
+      testub[10] = 1.25;
+      testub[11] = 1.5;
+      testub[12] = 2.0;
+      testub[13] = 2.5;
+
+      for (int jj = 0; jj < testub.size(); jj++) {
+      ubvec = testub[jj];
+
+      std::vector<int> vsize(nElements);
+      for (int i = 0; i < nElements; i++)
+	vsize[i] = static_cast<int>(floatWgts[i]);
+
+
+      ParMETIS_V3_AdaptiveRepart(parMetisMesh->getElementDist(),
+				 parMetisGraph.getXAdj(),
+				 parMetisGraph.getAdjncy(),
+				 &(wgts[0]),
+				 NULL,
+				 &(vsize[0]),
+				 &wgtflag,
+				 &numflag,
+				 &ncon,
+				 &nparts,
+				 &(tpwgts[0]),
+				 &ubvec,
+				 &itr,
+				 options,
+				 &edgecut,
+				 &(part[0]),
+				 &tmpComm);
+
+
+      b = distributePartitioning(&(part[0]));
+
+      MSG("ParMETIS RETRY with %f: %d\n", ubvec, b);
+      }
+    }
+
+    return b;
   }