diff --git a/AMDiS/src/DOFMatrix.cc b/AMDiS/src/DOFMatrix.cc
index 7b78a901e2e74271fe6ce99f853c38fcbbe785c1..a0061a3d79dfc339ad00f2a8ce0fab334531d6e8 100644
--- a/AMDiS/src/DOFMatrix.cc
+++ b/AMDiS/src/DOFMatrix.cc
@@ -557,7 +557,8 @@ namespace AMDiS {
     for (std::set<int>::iterator it = dirichletDofs.begin(); 
 	 it != dirichletDofs.end(); ++it)
 #ifdef HAVE_PARALLEL_DOMAIN_AMDIS
-      if (dofMap->isRankDof(*it))
+      //      if (dofMap->isRankDof(*it))
+      if (dofMap->isSet(*it))
 #endif
 	ins[*it][*it] = 1.0;    
   }
diff --git a/AMDiS/src/parallel/BddcMlSolver.cc b/AMDiS/src/parallel/BddcMlSolver.cc
index 54d69bbcac3cf60ce8de32bb0453e3760e74a4de..5629c595edfbbdaef5d8ef217440968e41ab475c 100644
--- a/AMDiS/src/parallel/BddcMlSolver.cc
+++ b/AMDiS/src/parallel/BddcMlSolver.cc
@@ -100,10 +100,10 @@ namespace AMDiS {
     MSG("ndof = %d\n", ndof);
 
     // space dimenstion
-    int ndim = 2;
+    int ndim = mesh->getDim();
 
     // mesh dimension
-    int meshdim = 2;
+    int meshdim = mesh->getDim();
 
     // global indes of subdomain
     int isub = meshDistributor->getMpiRank();
@@ -121,8 +121,10 @@ namespace AMDiS {
 
     MSG("local nnods %d     ndofs %d\n", nnods, ndofs);
 
+    int nVertices = mesh->getGeo(VERTEX);
+
     // Length of array inet
-    int linet = nelems * 3;
+    int linet = nelems * nVertices;
     
     // Local array with indices of nodes on each element
     int inet[linet];
@@ -132,8 +134,8 @@ namespace AMDiS {
 	("Should not happen!\n");
 
       int localElIndex = mapElIndex[elInfo->getElement()->getIndex()];
-      for (int i = 0; i < 3; i++)
-	inet[localElIndex * 3 + i] = elInfo->getElement()->getDof(i, 0);
+      for (int i = 0; i < nVertices; i++)
+	inet[localElIndex * nVertices + i] = elInfo->getElement()->getDof(i, 0);
       elInfo = stack.traverseNext(elInfo);
     }
 
@@ -141,7 +143,7 @@ namespace AMDiS {
     // local array of number of nodes per element
     int nnet[nelems];
     for (int i = 0; i < nelems; i++)
-      nnet[i] = 3;
+      nnet[i] = nVertices;
 
     // local array with number of DOFs per node.
     int nndf[nnods];
@@ -172,7 +174,7 @@ namespace AMDiS {
 
 
     int lxyz1 = nnods;
-    int lxyz2 = 2;
+    int lxyz2 = mesh->getDim();
     // local array with coordinates of nodes
     double xyz[lxyz1 * lxyz2];
     
@@ -185,6 +187,9 @@ namespace AMDiS {
 	  xyz[i * nnods + j] = coordDofs[j][i];
     }
 
+    
+    // === Fill for dirichlet boundary conditions. ===
+
     // local array of indices denoting dirichlet boundary data
     int ifix[ndofs];
     for (int i = 0; i < ndofs; i++)
@@ -195,6 +200,20 @@ namespace AMDiS {
     for (int i = 0; i < ndofs; i++)
       fixv[i] = 0.0;
 
+    for (int i = 0; i < rhsVec->getSize(); i++) {
+      map<DegreeOfFreedom, double>& dcValues = 
+	rhsVec->getDOFVector(i)->getDirichletValues();
+
+      for (map<DegreeOfFreedom, double>::iterator it = dcValues.begin();
+	   it != dcValues.end(); ++it) {
+	int index = it->first * nComponents + i;
+	TEST_EXIT_DBG(index < ndofs)("Should not happen!\n");
+	ifix[index] = 1;
+	fixv[index] = it->second;
+      }
+    }
+
+
     // local rhs data
     double rhs[ndofs];
     for (int i = 0; i < nComponents; i++) {
@@ -233,6 +252,15 @@ namespace AMDiS {
     
     // Matrix is assembled
     int is_assembled_int = 0;
+
+
+    // Users constraints
+    double user_constraints = 0.0;
+   
+    int luser_constraints1 = 0;
+
+    int luser_constraints2 = 0;
+
     /*
     string tmp="";
 
@@ -318,13 +346,18 @@ namespace AMDiS {
 				 &(j_sparse[0]),
 				 &(a_sparse[0]),
 				 &la,
-				 &is_assembled_int);
+				 &is_assembled_int,
+ 				 &user_constraints,
+ 				 &luser_constraints1,
+ 				 &luser_constraints2);
 
 
     int use_defaults_int = 1;
     int parallel_division_int = 1;
     int use_arithmetic_int = 0;
     int use_adaptive_int = 0;
+    int use_user_constraint_int = 0;
+
     // MSG("call to \"bddcml_setup_preconditioner\" with the following arguments (each in one line):\n");
 //     MSG("  %d\n", matrixtype);
 //     MSG("  %d\n", use_defaults_int);
@@ -336,7 +369,8 @@ namespace AMDiS {
 				&use_defaults_int,
 				&parallel_division_int,
 				&use_arithmetic_int,
-				&use_adaptive_int);
+				&use_adaptive_int,
+				&use_user_constraint_int);
 
     int method = 1;
     double tol = 1.e-6;
@@ -410,21 +444,11 @@ namespace AMDiS {
 
     for (cursor_type cursor = begin<row>(dmat->getBaseMatrix()), 
 	   cend = end<row>(dmat->getBaseMatrix()); cursor != cend; ++cursor) {
-      int rowIndex = 
-	dofMap[feSpace][*cursor].global * nComponents + ithRowComponent;
-
       for (icursor_type icursor = begin<nz>(cursor), icend = end<nz>(cursor); 
 	   icursor != icend; ++icursor) {	
-	int colIndex = 
-	  dofMap[feSpace][col(*icursor)].global * nComponents + ithColComponent;
-
-	double val = value(*icursor);
-	
-	// 	i_sparse.push_back(rowIndex);
-	// 	j_sparse.push_back(colIndex);
 	i_sparse.push_back(*cursor * nComponents + ithRowComponent);
 	j_sparse.push_back(col(*icursor) * nComponents + ithColComponent);
-	a_sparse.push_back(val);
+	a_sparse.push_back(value(*icursor));
       }
     }
 
diff --git a/AMDiS/src/parallel/MeshDistributor.cc b/AMDiS/src/parallel/MeshDistributor.cc
index ee172ccbd996ef3587ba921731e656802c5f8cde..8e2bade902a032a424e9e81aed4e9a28c0b9dbf5 100644
--- a/AMDiS/src/parallel/MeshDistributor.cc
+++ b/AMDiS/src/parallel/MeshDistributor.cc
@@ -476,7 +476,7 @@ namespace AMDiS {
     // and to remove the periodic boundary conditions on these objects.
 
     if (initialized) {
-      setRankDofs(probStat);
+      setRankDofs(probStat, dofMap);
       removePeriodicBoundaryConditions(probStat);
     }
   }
@@ -731,7 +731,8 @@ namespace AMDiS {
   }
 
 
-  void MeshDistributor::setRankDofs(ProblemStatSeq *probStat)
+  void MeshDistributor::setRankDofs(ProblemStatSeq *probStat,
+				    ParallelDofMapping &rankDofMap)
   {
     FUNCNAME("MeshDistributor::setRankDofs()");
 
@@ -745,7 +746,7 @@ namespace AMDiS {
 	  TEST_EXIT(find(feSpaces.begin(), feSpaces.end(), rowFeSpace) != feSpaces.end())
 	    ("Should not happen!\n");
 
-	  probStat->getSystemMatrix(i, j)->setDofMap(dofMap[rowFeSpace]);
+	  probStat->getSystemMatrix(i, j)->setDofMap(rankDofMap[rowFeSpace]);
 	}
     
 
@@ -760,8 +761,8 @@ namespace AMDiS {
       TEST_EXIT(find(feSpaces.begin(), feSpaces.end(), feSpace) != feSpaces.end())
 	("Should not happen!\n");
 
-      probStat->getRhsVector(i)->setDofMap(dofMap[feSpace]);
-      probStat->getSolution(i)->setDofMap(dofMap[feSpace]);
+      probStat->getRhsVector(i)->setDofMap(rankDofMap[feSpace]);
+      probStat->getSolution(i)->setDofMap(rankDofMap[feSpace]);
     }    
   }
 
@@ -769,7 +770,7 @@ namespace AMDiS {
   void MeshDistributor::setRankDofs()
   {
     for (unsigned int i = 0; i < problemStat.size(); i++) 
-      setRankDofs(problemStat[i]);
+      setRankDofs(problemStat[i], dofMap);
   }
 
 
diff --git a/AMDiS/src/parallel/MeshDistributor.h b/AMDiS/src/parallel/MeshDistributor.h
index b2e372cd0def7c347ef9b309d64d0bdd7fdf0cf9..139d19354b2db0cc90819fdc8c4abd750f7ed899 100644
--- a/AMDiS/src/parallel/MeshDistributor.h
+++ b/AMDiS/src/parallel/MeshDistributor.h
@@ -347,6 +347,14 @@ namespace AMDiS {
 				    DofComm &dcom,
 				    const FiniteElemSpace *feSpace);
 
+    /// Sets \ref isRankDof to all matrices and rhs vectors in a given 
+    /// stationary problem.
+    void setRankDofs(ProblemStatSeq *probStat, ParallelDofMapping &rankDofMap);
+
+    /// Sets \ref isRankDof to all matrices and rhs vectors in all 
+    /// stationary problems.
+    void setRankDofs();
+
   protected:
     void addProblemStat(ProblemStatSeq *probStat);
 
@@ -396,14 +404,6 @@ namespace AMDiS {
      */
     bool checkAndAdaptBoundary(RankToBoundMap &allBound);
   
-    /// Sets \ref isRankDof to all matrices and rhs vectors in a given 
-    /// stationary problem.
-    void setRankDofs(ProblemStatSeq *probStat);
-
-    /// Sets \ref isRankDof to all matrices and rhs vectors in all 
-    /// stationary problems.
-    void setRankDofs();
-
     /// Removes all periodic boundary condition information from all matrices and
     /// vectors of all stationary problems and from the mesh itself.
     void removePeriodicBoundaryConditions();
diff --git a/AMDiS/src/parallel/ParallelProblemStatBase.cc b/AMDiS/src/parallel/ParallelProblemStatBase.cc
index d32277e2380fe06fc6b39f7ae04a4646fcfebba5..6fd3382fa88cdf80c50b002c7657be1cb6c47bdc 100644
--- a/AMDiS/src/parallel/ParallelProblemStatBase.cc
+++ b/AMDiS/src/parallel/ParallelProblemStatBase.cc
@@ -30,20 +30,6 @@ namespace AMDiS {
     ProblemStatSeq::buildAfterCoarsen(adaptInfo, flag, 
 				      assembleMatrix, 
 				      assembleVector);
-
-#if (DEBUG != 0)
-    double vm, rss;
-    processMemUsage(vm, rss);   
-    vm /= 1024.0;
-    rss /= 1024.0;
-    
-    MSG("My memory usage is VM = %.1f MB    RSS = %.1f MB\n", vm, rss);
-    
-    mpi::globalAdd(vm);
-    mpi::globalAdd(rss);
-
-    MSG("Overall memory usage is VM = %.1f MB    RSS = %.1f MB\n", vm, rss);
-#endif
   }
 
 
diff --git a/AMDiS/src/parallel/PetscProblemStat.cc b/AMDiS/src/parallel/PetscProblemStat.cc
index 73dd638f69ba9203784f5dc4bc17377d80c5a5e8..44d1a01b93cf583afec48f32edab51dc83c511fb 100644
--- a/AMDiS/src/parallel/PetscProblemStat.cc
+++ b/AMDiS/src/parallel/PetscProblemStat.cc
@@ -76,6 +76,24 @@ namespace AMDiS {
   }
 
 
+  void PetscProblemStat::buildAfterCoarsen(AdaptInfo *adaptInfo, Flag flag,
+					   bool assembleMatrix,
+					   bool assembleVector)
+  {
+    FUNCNAME("ParallelProblemStatBase::buildAfterCoarsen()");
+
+    TEST_EXIT_DBG(MeshDistributor::globalMeshDistributor != NULL)
+      ("Should not happen!\n");
+
+    MeshDistributor::globalMeshDistributor->checkMeshChange();
+    MeshDistributor::globalMeshDistributor->setRankDofs(this, 
+							petscSolver->getInteriorMap());
+    ProblemStatSeq::buildAfterCoarsen(adaptInfo, flag, 
+				      assembleMatrix, 
+				      assembleVector);
+  }
+
+
   void PetscProblemStat::solve(AdaptInfo *adaptInfo,
 			       bool createMatrixData,
 			       bool storeMatrixData)
diff --git a/AMDiS/src/parallel/PetscProblemStat.h b/AMDiS/src/parallel/PetscProblemStat.h
index a844b5c9f68882eba82d4c3498013b65b62bd368..536586489cc9e74d1be3deadb90447b920b462dc 100644
--- a/AMDiS/src/parallel/PetscProblemStat.h
+++ b/AMDiS/src/parallel/PetscProblemStat.h
@@ -49,6 +49,10 @@ namespace AMDiS {
 	delete petscSolver;
     }
 
+    void buildAfterCoarsen(AdaptInfo *adaptInfo, Flag flag,
+			   bool assembleMatrix,
+			   bool assembleVector);
+
     void initialize(Flag initFlag,
 		    ProblemStatSeq *adoptProblem = NULL,
 		    Flag adoptFlag = INIT_NOTHING);
diff --git a/AMDiS/src/parallel/PetscSolver.h b/AMDiS/src/parallel/PetscSolver.h
index 2dd50a340c6a1db3e40567f9391c72bc39eb822b..064fa73ed84b8b7fd55c8f346f7d54798fec147d 100644
--- a/AMDiS/src/parallel/PetscSolver.h
+++ b/AMDiS/src/parallel/PetscSolver.h
@@ -85,6 +85,11 @@ namespace AMDiS {
     /// Detroys all vector data structures.
     virtual void destroyVectorData() = 0;
 
+    virtual ParallelDofMapping &getInteriorMap()
+    {
+      return meshDistributor->getDofMap();
+    }
+
     virtual Flag getBoundaryDofRequirement()
     {
       return 0;
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index 3dd47123efcf8793e50707d26faa48406bf48b3c..22906e3f3b13e94a32e552d2e2229a01150dfdcb 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -316,6 +316,7 @@ namespace AMDiS {
     DofIndexSet primals;
     for (DofContainerSet::iterator it = vertices.begin(); 
 	 it != vertices.end(); ++it) {
+      
       if (dirichletRows[feSpace].count(**it))
 	continue;
 
@@ -340,9 +341,9 @@ namespace AMDiS {
     // === create local indices of the primals starting at zero.          ===
 
     for (DofIndexSet::iterator it = primals.begin(); it != primals.end(); ++it)
-      if (meshDistributor->getDofMap()[feSpace].isRankDof(*it))
+      if (meshDistributor->getDofMap()[feSpace].isRankDof(*it)) {
 	primalDofMap[feSpace].insertRankDof(*it);
-      else
+      } else
   	primalDofMap[feSpace].insertNonRankDof(*it);
   }
 
@@ -767,7 +768,7 @@ namespace AMDiS {
     MatCreateAIJ(mpiCommGlobal,
 		 nRankEdges, lagrangeMap.getRankDofs(),
 		 nOverallEdges, lagrangeMap.getOverallDofs(),
-		 1, PETSC_NULL, 1, PETSC_NULL, 
+		 2, PETSC_NULL, 2, PETSC_NULL, 
 		 &mat_augmented_lagrange);
     MatSetOption(mat_augmented_lagrange, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
 
@@ -918,9 +919,11 @@ namespace AMDiS {
     if (creationMode == 0) {
       // matK = inv(A_BB) A_BPi
       Mat matK;
+      MSG("START SOLVE!\n");
       petsc_helper::blockMatMatSolve(subdomain->getSolver(), 
 				     subdomain->getMatInteriorCoarse(),
 				     matK);
+      MSG("END SOLVE!\n");
       
       
       // mat_schur_primal = A_PiPi - A_PiB inv(A_BB) A_BPi
@@ -1459,7 +1462,7 @@ namespace AMDiS {
   }
 
 
-  void PetscSolverFeti::dbgMatrix()
+  void PetscSolverFeti::dbgMatrix(Matrix<DOFMatrix*> *mat)
   {
     FUNCNAME("PetscSolverFeti::dbgMatrix()");
 
@@ -1531,6 +1534,37 @@ namespace AMDiS {
       PetscViewerDestroy(&petscView);
     }
 
+    bool checkInteriorMatrix = false;;
+    Parameters::get("parallel->debug->check interior matrix",
+		    checkInteriorMatrix);
+    if (checkInteriorMatrix) {
+      int nZeroRows = PetscSolverFetiDebug::testZeroRows(subdomain->getMatInterior());
+      MSG("Interior matrix has %d zero rows!\n", nZeroRows);
+    }
+
+    bool printDirichlet = false;
+    Parameters::get("parallel->debug->print dirichlet information",
+		    printDirichlet);
+    if (printDirichlet) {
+      int nComponents = mat->getSize();
+      for (int i = 0; i < nComponents; i++) {
+	DOFMatrix *seqMat = (*mat)[i][i];
+	if (!seqMat)
+	  continue;
+
+	const FiniteElemSpace *feSpace = seqMat->getRowFeSpace();
+	TEST_EXIT(feSpace)("Should not happen!\n");
+	std::set<DegreeOfFreedom> &dirichletRows = seqMat->getDirichletRows();
+	for (std::set<DegreeOfFreedom>::iterator it = dirichletRows.begin();
+	     it != dirichletRows.end(); ++it) {
+	  if (localDofMap[feSpace].isSet(*it)) {
+	    MSG("Dirichlet dof %d in component %d with interior mat index %d\n",
+		*it, i, localDofMap.getMatIndex(i, *it));
+	  }
+	}
+      }
+    }
+
 
     int writeCoarseMatrix = 0;
     Parameters::get("parallel->debug->write coarse matrix", 
@@ -1744,7 +1778,7 @@ namespace AMDiS {
   
     subdomain->fillPetscMatrix(mat);
 
-    dbgMatrix();
+    dbgMatrix(mat);
 
     if (printTimings) {
       MPI::COMM_WORLD.Barrier();
@@ -1778,7 +1812,7 @@ namespace AMDiS {
     createFetiKsp(feSpaces);
 
     // === If required, run debug tests. ===
-    dbgMatrix();
+    dbgMatrix(mat);
   }
 
 
diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h
index f52cf2532ac51f1a451d8cb896742a9126cea0aa..0404423566484b44343dd3ee8ad11f07e70c77bb 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.h
+++ b/AMDiS/src/parallel/PetscSolverFeti.h
@@ -163,7 +163,7 @@ namespace AMDiS {
 
     /// In debug modes, this function runs some debug tests on the FETI
     /// matrices. In optimized mode, nothing is done here.
-    void dbgMatrix();
+    void dbgMatrix(Matrix<DOFMatrix*> *mat);
 
     /** \brief
      * Recovers AMDiS solution vector from PETSc's solution vectors of the
@@ -236,6 +236,11 @@ namespace AMDiS {
     /// order of: 1/h^2
     void createH2vec(DOFVector<double> &vec);
 
+    virtual ParallelDofMapping &getInteriorMap()
+    {
+      return localDofMap;
+    }
+
   protected:
     /// Mapping from primal DOF indices to a global index of primals.
     ParallelDofMapping primalDofMap;
diff --git a/AMDiS/src/parallel/PetscSolverFetiDebug.cc b/AMDiS/src/parallel/PetscSolverFetiDebug.cc
index b234568d3f922affebfd700e958dfc1aafa8273e..8e90c4e218ea316c6938a494df987302f7e1da6a 100644
--- a/AMDiS/src/parallel/PetscSolverFetiDebug.cc
+++ b/AMDiS/src/parallel/PetscSolverFetiDebug.cc
@@ -529,4 +529,38 @@ namespace AMDiS {
 
     VecDestroy(&rhsVec);
   }
+
+  
+  int PetscSolverFetiDebug::testZeroRows(Mat mat)
+  {
+    FUNCNAME("PetscSolverFetiDebug::testZeroRows()");
+
+    int nZeroRows = 0;
+
+    int firstIndex, lastIndex;
+    MatGetOwnershipRange(mat, &firstIndex, &lastIndex);
+
+    for (int i = firstIndex; i < lastIndex; i++) {
+      PetscInt nCols;
+      const PetscScalar *vals;
+
+      MatGetRow(mat, i, &nCols, PETSC_NULL, &vals);
+      bool nonZeroValues = false;
+      for (int j = 0; j < nCols; j++) {
+	if (vals[j] != 0.0) {
+	  nonZeroValues = true;
+	  break;
+	}
+      }
+
+      if (!nonZeroValues) {
+	MSG("Zero matrix row for global row index %d\n", i);
+	nZeroRows++;
+      }
+
+      MatRestoreRow(mat, i, &nCols, PETSC_NULL, &vals);
+    }
+
+    return nZeroRows;
+  }
 }
diff --git a/AMDiS/src/parallel/PetscSolverFetiDebug.h b/AMDiS/src/parallel/PetscSolverFetiDebug.h
index a74992c12e66f49e30e062dfc3a0935858837722..e151158e98980df37f8be2760f9ca8215b008337 100644
--- a/AMDiS/src/parallel/PetscSolverFetiDebug.h
+++ b/AMDiS/src/parallel/PetscSolverFetiDebug.h
@@ -127,6 +127,16 @@ namespace AMDiS {
      */
     static void debugFeti(PetscSolverFeti &feti, 
 			  Vec dbgRhsVec);
+
+    /** \brief
+     * This functions check a PETSc matrix for zero rows. Each zero row index
+     * is printed to the screen.
+     *
+     * \param[in]  mat    PETSc matrix which should be checked for zero rows.
+     *
+     * \return     int    Number of zero rows in matrix;
+     */
+    static int testZeroRows(Mat mat);
   };
 
 }