From 0498e5fc42f56fece8c5b512ad6bd3e0988af9f0 Mon Sep 17 00:00:00 2001
From: Thomas Witkowski <thomas.witkowski@gmx.de>
Date: Tue, 14 Feb 2012 12:57:00 +0000
Subject: [PATCH] On the way to an efficient FETI-DP implementation for mixed
 finite elements.

---
 AMDiS/CMakeLists.txt                  |   1 +
 AMDiS/src/parallel/FeSpaceMapping.cc  |  49 ++++++++
 AMDiS/src/parallel/FeSpaceMapping.h   |  61 ++++++----
 AMDiS/src/parallel/PetscSolverFeti.cc | 155 +++++++++++++-------------
 AMDiS/src/parallel/PetscSolverFeti.h  |  20 ++--
 5 files changed, 170 insertions(+), 116 deletions(-)
 create mode 100644 AMDiS/src/parallel/FeSpaceMapping.cc

diff --git a/AMDiS/CMakeLists.txt b/AMDiS/CMakeLists.txt
index 8524b8e5..9ebae66d 100644
--- a/AMDiS/CMakeLists.txt
+++ b/AMDiS/CMakeLists.txt
@@ -225,6 +225,7 @@ if(ENABLE_PARALLEL_DOMAIN)
                	${SOURCE_DIR}/parallel/DofComm.cc
 		${SOURCE_DIR}/parallel/CheckerPartitioner.cc
 		${SOURCE_DIR}/parallel/ElementObjectDatabase.cc
+		${SOURCE_DIR}/parallel/FeSpaceMapping.cc
 		${SOURCE_DIR}/parallel/MeshDistributor.cc 
 		${SOURCE_DIR}/parallel/MeshManipulation.cc
 		${SOURCE_DIR}/parallel/MeshPartitioner.cc
diff --git a/AMDiS/src/parallel/FeSpaceMapping.cc b/AMDiS/src/parallel/FeSpaceMapping.cc
new file mode 100644
index 00000000..ebd75d91
--- /dev/null
+++ b/AMDiS/src/parallel/FeSpaceMapping.cc
@@ -0,0 +1,49 @@
+//
+// Software License for AMDiS
+//
+// Copyright (c) 2010 Dresden University of Technology 
+// All rights reserved.
+// Authors: Simon Vey, Thomas Witkowski et al.
+//
+// This file is part of AMDiS
+//
+// See also license.opensource.txt in the distribution.
+
+
+#include "parallel/FeSpaceMapping.h"
+
+namespace AMDiS {
+
+  using namespace std;
+
+  void GlobalDofMap::clear()
+  {
+    dofMap.clear();
+    nRankDofs = 0;
+    nOverallDofs = 0;
+    rStartDofs = 0;
+  }
+
+
+  void GlobalDofMap::update(bool add)
+  {
+    for (DofMapping::iterator it = dofMap.begin(); it != dofMap.end(); ++it)
+      if (it->second == -1)
+	it->second = nRankDofs++;
+    
+    nOverallDofs = 0;
+    rStartDofs = 0;
+    mpi::getDofNumbering(*mpiComm, nRankDofs, rStartDofs, nOverallDofs);
+    
+    if (add)
+      addOffset(rStartDofs);
+  }
+
+
+  void GlobalDofMap::addOffset(int offset)
+  {
+    for (DofMapping::iterator it = dofMap.begin(); it != dofMap.end(); ++it)
+      it->second += offset;
+  }
+
+}
diff --git a/AMDiS/src/parallel/FeSpaceMapping.h b/AMDiS/src/parallel/FeSpaceMapping.h
index 2344459e..9538b9ee 100644
--- a/AMDiS/src/parallel/FeSpaceMapping.h
+++ b/AMDiS/src/parallel/FeSpaceMapping.h
@@ -20,6 +20,7 @@
 
 /** \file FeSpaceMapping.h */
 
+#include <vector>
 #include <map>
 #include "parallel/MpiHelper.h"
 #include "parallel/ParallelTypes.h"
@@ -40,13 +41,7 @@ namespace AMDiS {
 	rStartDofs(0)
     {}
     
-    void clear()
-    {
-      dofMap.clear();
-      nRankDofs = 0;
-      nOverallDofs = 0;
-      rStartDofs = 0;
-    }
+    void clear();
     
     DegreeOfFreedom operator[](DegreeOfFreedom d)
     {
@@ -61,8 +56,7 @@ namespace AMDiS {
       
       TEST_EXIT_DBG(dofMap.count(dof0) == 0)("Should not happen!\n");
       
-      dofMap[dof0] = (dof1 >= 0 ? dof1 : nRankDofs);
-      nRankDofs++;
+      dofMap[dof0] = dof1;
     }
     
     void insert(DegreeOfFreedom dof0, DegreeOfFreedom dof1)
@@ -89,21 +83,9 @@ namespace AMDiS {
       return dofMap;
     }
     
-    void update(bool add = true)
-    {
-      nOverallDofs = 0;
-      rStartDofs = 0;
-      mpi::getDofNumbering(*mpiComm, nRankDofs, rStartDofs, nOverallDofs);
-
-      if (add)
-	addOffset(rStartDofs);
-    }
+    void update(bool add = true);
 
-    void addOffset(int offset)
-    {
-      for (DofMapping::iterator it = dofMap.begin(); it != dofMap.end(); ++it)
-	it->second += offset;
-    }
+    void addOffset(int offset);
 
   private:
     MPI::Intracomm* mpiComm;
@@ -147,6 +129,39 @@ namespace AMDiS {
 	data.insert(make_pair(feSpace, T(mpiComm)));
     }    
 
+    int getRankDofs(vector<const FiniteElemSpace*> &feSpaces)
+    {
+      int result = 0;
+      for (unsigned int i = 0; i < feSpaces.size(); i++) {
+	TEST_EXIT_DBG(data.count(feSpaces[i]))("Should not happen!\n");
+	result += data.find(feSpaces[i])->second.nRankDofs;
+      }
+
+      return result;
+    }
+
+    int getOverallDofs(vector<const FiniteElemSpace*> &feSpaces)
+    {
+      int result = 0;
+      for (unsigned int i = 0; i < feSpaces.size(); i++) {
+	TEST_EXIT_DBG(data.count(feSpaces[i]))("Should not happen!\n");
+	result += data.find(feSpaces[i])->second.nOverallDofs;
+      }
+
+      return result;
+    }
+
+    int getStartDofs(vector<const FiniteElemSpace*> &feSpaces)
+    {
+      int result = 0;
+      for (unsigned int i = 0; i < feSpaces.size(); i++) {
+	TEST_EXIT_DBG(data.count(feSpaces[i]))("Should not happen!\n");
+	result += data.find(feSpaces[i])->second.rStartDofs;
+      }
+
+      return result;
+    }
+
   private:
     MPI::Intracomm* mpiComm;
     
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index 853a96d1..3110e994 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -216,22 +216,21 @@ namespace AMDiS {
     TEST_EXIT(meshDistributor->getFeSpace()->getBasisFcts()->getDegree() == 1)
       ("Works for linear basis functions only!\n");
    
-    createPrimals();
+    for (unsigned int i = 0; i < meshDistributor->getFeSpaces().size(); i++) {
+      const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(i);
 
-    createDuals();
-
-    createLagrange();
-    
-    createIndexB();
+      createPrimals(feSpace);      
+      createDuals(feSpace);
+      createLagrange(feSpace);      
+      createIndexB(feSpace);
+    }
   }
 
 
-  void PetscSolverFeti::createPrimals()
+  void PetscSolverFeti::createPrimals(const FiniteElemSpace *feSpace)
   {
     FUNCNAME("PetscSolverFeti::createPrimals()");  
 
-    const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
-
     // === Define all vertices on the interior boundaries of the macro mesh ===
     // === to be primal variables.                                          ===
 
@@ -313,12 +312,10 @@ namespace AMDiS {
   }
 
 
-  void PetscSolverFeti::createDuals()
+  void PetscSolverFeti::createDuals(const FiniteElemSpace *feSpace)
   {
     FUNCNAME("PetscSolverFeti::createDuals()");
 
-    const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
-    
     // === Create for each dual node that is owned by the rank, the set ===
     // === of ranks that contain this node (denoted by W(x_j)).         ===
 
@@ -377,15 +374,8 @@ namespace AMDiS {
 
     dualDofMap.addFeSpace(feSpace);
 
-    int nRankAllDofs = feSpace->getAdmin()->getUsedDofs();
-    nRankB = nRankAllDofs - primalDofMap[feSpace].size();
-    nOverallB = 0;
-    rStartB = 0;
-    mpi::getDofNumbering(meshDistributor->getMpiComm(),
-			 nRankB, rStartB, nOverallB);
     DofContainer allBoundaryDofs;
     meshDistributor->getAllBoundaryDofs(feSpace, allBoundaryDofs);
-    int nRankInteriorDofs = nRankAllDofs - allBoundaryDofs.size();
 
     for (DofContainer::iterator it = allBoundaryDofs.begin();
 	 it != allBoundaryDofs.end(); ++it)
@@ -393,24 +383,20 @@ namespace AMDiS {
 	dualDofMap[feSpace].insertRankDof(**it);
 
     dualDofMap[feSpace].update(false);
-    dualDofMap[feSpace].addOffset(rStartB + nRankInteriorDofs);
 
     MSG("nRankDuals = %d   nOverallDuals = %d\n",
 	dualDofMap[feSpace].nRankDofs, 
 	dualDofMap[feSpace].nOverallDofs);
-
-    nLocalDuals = dualDofMap[feSpace].size();
   }
 
   
-  void PetscSolverFeti::createLagrange()
+  void PetscSolverFeti::createLagrange(const FiniteElemSpace *feSpace)
   {
     FUNCNAME("PetscSolverFeti::createLagrange()");
 
     // === Reserve for each dual node, on the rank that owns this node, the ===
     // === appropriate number of Lagrange constraints.                      ===
 
-    const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
     lagrangeMap.addFeSpace(feSpace);
 
     int nRankLagrange = 0;
@@ -473,11 +459,10 @@ namespace AMDiS {
   }
 
 
-  void PetscSolverFeti::createIndexB()
+  void PetscSolverFeti::createIndexB(const FiniteElemSpace *feSpace)
   {
     FUNCNAME("PetscSolverFeti::createIndexB()");
 
-    const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
     localDofMap.addFeSpace(feSpace);
     DOFAdmin* admin = feSpace->getAdmin();
 
@@ -495,23 +480,28 @@ namespace AMDiS {
       }
     }
 
+    localDofMap[feSpace].update(false);
+
     TEST_EXIT_DBG(nLocalInterior + 
 		  primalDofMap[feSpace].size() + 
-		  nLocalDuals == 
+		  dualDofMap[feSpace].size() == 
 		  static_cast<unsigned int>(admin->getUsedDofs()))
       ("Should not happen!\n");
 
-
     // === And finally, add the global indicies of all dual nodes. ===
 
     for (DofMapping::iterator it = dualDofMap[feSpace].getMap().begin();
 	 it != dualDofMap[feSpace].getMap().end(); ++it)
-      localDofMap[feSpace].insert(it->first, it->second - rStartB);
-      //      localDofMap[feSpace].insertRankDof(it->first);
+      localDofMap[feSpace].insertRankDof(it->first);
+
+    localDofMap[feSpace].update(false);
+
+    dualDofMap[feSpace].addOffset(localDofMap[feSpace].rStartDofs + 
+				  nLocalInterior);
   }
 
 
-  void PetscSolverFeti::createMatLagrange()
+  void PetscSolverFeti::createMatLagrange(vector<const FiniteElemSpace*> &feSpaces)
   {
     FUNCNAME("PetscSolverFeti::createMatLagrange()");
 
@@ -520,10 +510,11 @@ namespace AMDiS {
     // === Create distributed matrix for Lagrange constraints. ===
 
     MatCreateMPIAIJ(PETSC_COMM_WORLD,
-		    lagrangeMap[feSpace].nRankDofs * nComponents, 
-		    nRankB * nComponents,
+		    lagrangeMap.getRankDofs(feSpaces),
+		    //		    lagrangeMap[feSpace].nRankDofs * nComponents, 
+		    localDofMap[feSpace].nRankDofs * nComponents,
 		    lagrangeMap[feSpace].nOverallDofs * nComponents, 
-		    nOverallB * nComponents,
+		    localDofMap[feSpace].nOverallDofs * nComponents,
 		    2, PETSC_NULL, 2, PETSC_NULL,
 		    &mat_lagrange);
 
@@ -585,7 +576,7 @@ namespace AMDiS {
       schurPrimalData.fetiSolver = this;
       
       VecCreateMPI(PETSC_COMM_WORLD, 
-		   nRankB * nComponents, nOverallB * nComponents,
+		   localDofMap[feSpace].nRankDofs * nComponents, localDofMap[feSpace].nOverallDofs * nComponents,
 		   &(schurPrimalData.tmp_vec_b));
       VecCreateMPI(PETSC_COMM_WORLD, 
 		   primalDofMap[feSpace].nRankDofs * nComponents, primalDofMap[feSpace].nOverallDofs * nComponents,
@@ -612,8 +603,8 @@ namespace AMDiS {
 
       int nRowsRankPrimal = primalDofMap[feSpace].nRankDofs * nComponents;
       int nRowsOverallPrimal = primalDofMap[feSpace].nOverallDofs * nComponents;
-      int nRowsRankB = nRankB * nComponents;
-      int nRowsOverallB = nOverallB * nComponents;
+      int nRowsRankB = localDofMap[feSpace].nRankDofs * nComponents;
+      int nRowsOverallB = localDofMap[feSpace].nOverallDofs * nComponents;
 
       Mat matBPi;
       MatCreateMPIAIJ(PETSC_COMM_WORLD,
@@ -628,8 +619,8 @@ namespace AMDiS {
 
       map<int, vector<pair<int, double> > > mat_b_primal_cols;
 
-      for (int i = 0; i < nRankB * nComponents; i++) {
-	PetscInt row = rStartB * nComponents + i;
+      for (int i = 0; i < localDofMap[feSpace].nRankDofs * nComponents; i++) {
+	PetscInt row = localDofMap[feSpace].rStartDofs * nComponents + i;
 	MatGetRow(mat_b_primal, row, &nCols, &cols, &values);
 
 	for (int j = 0; j < nCols; j++)
@@ -647,7 +638,7 @@ namespace AMDiS {
       for (map<int, vector<pair<int, double> > >::iterator it = mat_b_primal_cols.begin();
 	   it != mat_b_primal_cols.end(); ++it) {
 	Vec tmpVec;
-	VecCreateSeq(PETSC_COMM_SELF, nRankB * nComponents, &tmpVec);
+	VecCreateSeq(PETSC_COMM_SELF, localDofMap[feSpace].nRankDofs * nComponents, &tmpVec);
 
  	for (unsigned int i = 0; i < it->second.size(); i++) 
  	  VecSetValue(tmpVec, 
@@ -660,9 +651,9 @@ namespace AMDiS {
 
 	PetscScalar *tmpValues;
 	VecGetArray(tmpVec, &tmpValues);
-	for (int i  = 0; i < nRankB * nComponents; i++)
+	for (int i  = 0; i < localDofMap[feSpace].nRankDofs * nComponents; i++)
 	  MatSetValue(matBPi, 
-		      rStartB * nComponents + i,
+		      localDofMap[feSpace].rStartDofs * nComponents + i,
 		      it->first,
 		      tmpValues[i],
 		      ADD_VALUES);
@@ -731,7 +722,7 @@ namespace AMDiS {
     fetiData.ksp_schur_primal = &ksp_schur_primal;
 
     VecCreateMPI(PETSC_COMM_WORLD, 
-		 nRankB * nComponents, nOverallB * nComponents,
+		 localDofMap[feSpace].nRankDofs * nComponents, localDofMap[feSpace].nOverallDofs * nComponents,
 		 &(fetiData.tmp_vec_b));
     VecCreateMPI(PETSC_COMM_WORLD,
 		 lagrangeMap[feSpace].nRankDofs * nComponents, 
@@ -779,7 +770,7 @@ namespace AMDiS {
       fetiDirichletPreconData.ksp_interior = &ksp_interior;
       
       VecCreateMPI(PETSC_COMM_WORLD, 
-		   nRankB * nComponents, nOverallB * nComponents,
+		   localDofMap[feSpace].nRankDofs * nComponents, localDofMap[feSpace].nOverallDofs * nComponents,
 		   &(fetiDirichletPreconData.tmp_vec_b));      
       MatGetVecs(mat_duals_duals, PETSC_NULL, &(fetiDirichletPreconData.tmp_vec_duals0));
       MatGetVecs(mat_duals_duals, PETSC_NULL, &(fetiDirichletPreconData.tmp_vec_duals1));
@@ -797,7 +788,7 @@ namespace AMDiS {
       fetiLumpedPreconData.mat_duals_duals = &mat_duals_duals;
 
       VecCreateMPI(PETSC_COMM_WORLD, 
-		   nRankB * nComponents, nOverallB * nComponents,
+		   localDofMap[feSpace].nRankDofs * nComponents, localDofMap[feSpace].nOverallDofs * nComponents,
 		   &(fetiLumpedPreconData.tmp_vec_b));
       MatGetVecs(mat_duals_duals, PETSC_NULL, &(fetiLumpedPreconData.tmp_vec_duals0));
       MatGetVecs(mat_duals_duals, PETSC_NULL, &(fetiLumpedPreconData.tmp_vec_duals1));
@@ -958,6 +949,8 @@ namespace AMDiS {
   {
     FUNCNAME("PetscSolverFeti::fillPetscMatrix()");   
 
+    vector<const FiniteElemSpace*> feSpaces = getFeSpaces(mat);
+
     const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
     nComponents = mat->getSize();
 
@@ -971,12 +964,12 @@ namespace AMDiS {
 
     // === Create matrices for the FETI-DP method. ===
 
-    int nRowsRankB = nRankB * nComponents;
-    int nRowsOverallB = nOverallB * nComponents;
+    int nRowsRankB = localDofMap[feSpace].nRankDofs * nComponents;
+    int nRowsOverallB = localDofMap[feSpace].nOverallDofs * nComponents;
     int nRowsRankPrimal = primalDofMap[feSpace].nRankDofs * nComponents;
     int nRowsOverallPrimal = primalDofMap[feSpace].nOverallDofs * nComponents;
     int nRowsInterior = nLocalInterior * nComponents;
-    int nRowsDual = nLocalDuals * nComponents;
+    int nRowsDual = dualDofMap[feSpace].size() * nComponents;
 
     MatCreateSeqAIJ(PETSC_COMM_SELF, nRowsRankB, nRowsRankB, 30, PETSC_NULL,
 		    &mat_b_b);
@@ -1094,7 +1087,7 @@ namespace AMDiS {
 	      // Column is not a primal variable.
 
 	      int colIndex = 
-		(localDofMap[feSpace][col(*icursor)] + rStartB) * nComponents + j;
+		(localDofMap[feSpace][col(*icursor)] + localDofMap[feSpace].rStartDofs) * nComponents + j;
 
 	      if (rowPrimal) {
 		colsOther.push_back(colIndex);
@@ -1157,13 +1150,13 @@ namespace AMDiS {
 	  } else {
 	    int localRowIndex = localDofMap[feSpace][*cursor] * nComponents + i; 
 	    for (unsigned int k = 0; k < cols.size(); k++)
-	      cols[k] -= rStartB * nComponents;
+	      cols[k] -= localDofMap[feSpace].rStartDofs * nComponents;
 	    MatSetValues(mat_b_b, 1, &localRowIndex, cols.size(),
 			 &(cols[0]), &(values[0]), ADD_VALUES);
 
 	    if (colsOther.size()) {
 	      int globalRowIndex = 
-		(localDofMap[feSpace][*cursor] + rStartB) * nComponents + i; 
+		(localDofMap[feSpace][*cursor] + localDofMap[feSpace].rStartDofs) * nComponents + i; 
 	      MatSetValues(mat_b_primal, 1, &globalRowIndex, colsOther.size(),
 			   &(colsOther[0]), &(valuesOther[0]), ADD_VALUES);
 	    }
@@ -1258,7 +1251,7 @@ namespace AMDiS {
 
     // === Create and fill PETSc matrix for Lagrange constraints. ===
 
-    createMatLagrange();
+    createMatLagrange(feSpaces);
 
 
     // === Create solver for the non primal (thus local) variables. ===
@@ -1288,8 +1281,8 @@ namespace AMDiS {
     int nComponents = vec->getSize();
 
     VecCreateMPI(PETSC_COMM_WORLD, 
-		 nRankB * nComponents, 
-		 nOverallB * nComponents, &f_b);
+		 localDofMap[feSpace].nRankDofs * nComponents, 
+		 localDofMap[feSpace].nOverallDofs * nComponents, &f_b);
     VecCreateMPI(PETSC_COMM_WORLD, 
 		 primalDofMap[feSpace].nRankDofs * nComponents, 
 		 primalDofMap[feSpace].nOverallDofs * nComponents, &f_primal);
@@ -1303,7 +1296,7 @@ namespace AMDiS {
 	  double value = *dofIt;
 	  VecSetValues(f_primal, 1, &index, &value, ADD_VALUES);
 	} else {
-	  index = (localDofMap[feSpace][index] + rStartB) * nComponents + i;
+	  index = (localDofMap[feSpace][index] + localDofMap[feSpace].rStartDofs) * nComponents + i;
 	  VecSetValue(f_b, index, *dofIt, INSERT_VALUES);
 	}      
       }
@@ -1321,15 +1314,17 @@ namespace AMDiS {
   {
     FUNCNAME("PetscSolverFeti::solveLocalProblem()");
 
+    const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
+
     Vec tmp;
-    VecCreateSeq(PETSC_COMM_SELF, nRankB * nComponents, &tmp);
+    VecCreateSeq(PETSC_COMM_SELF, localDofMap[feSpace].nRankDofs * nComponents, &tmp);
     PetscScalar *tmpValues;
     VecGetArray(tmp, &tmpValues);
 
     PetscScalar *rhsValues;
     VecGetArray(rhs, &rhsValues);
 
-    for (int i = 0; i < nRankB * nComponents; i++)
+    for (int i = 0; i < localDofMap[feSpace].nRankDofs * nComponents; i++)
       tmpValues[i] = rhsValues[i];
 
     VecRestoreArray(rhs, &rhsValues);
@@ -1341,7 +1336,7 @@ namespace AMDiS {
     PetscScalar *solValues;
     VecGetArray(sol, &solValues);
 
-    for (int i = 0; i < nRankB * nComponents; i++) 
+    for (int i = 0; i < localDofMap[feSpace].nRankDofs * nComponents; i++) 
       solValues[i] = tmpValues[i];
 
     VecRestoreArray(sol, &solValues);
@@ -1358,7 +1353,7 @@ namespace AMDiS {
     const FiniteElemSpace *feSpace = meshDistributor->getFeSpace(0);
 
     int nOverallNest = 
-      (nOverallB + 
+      (localDofMap[feSpace].nOverallDofs + 
        primalDofMap[feSpace].nOverallDofs + 
        lagrangeMap[feSpace].nOverallDofs) * nComponents;
 
@@ -1377,8 +1372,8 @@ namespace AMDiS {
     const PetscScalar *vals;
 
     // === mat_b_b ===
-    for (int i = 0; i < nRankB * nComponents; i++) {
-      int rowIndex = rStartB * nComponents + i;
+    for (int i = 0; i < localDofMap[feSpace].nRankDofs * nComponents; i++) {
+      int rowIndex = localDofMap[feSpace].rStartDofs * nComponents + i;
       MatGetRow(mat_b_b, i, &nCols, &cols, &vals);
       MatSetValues(A_global, 1, &rowIndex, nCols, cols, vals, ADD_VALUES);
       MatRestoreRow(mat_b_b, rowIndex, &nCols, &cols, &vals);
@@ -1386,8 +1381,8 @@ namespace AMDiS {
 
     PetscScalar *g_f_b;
     VecGetArray(f_b, &g_f_b);
-    for (int i = 0; i < nRankB * nComponents; i++)
-      VecSetValue(A_global_rhs, rStartB * nComponents + i, g_f_b[i], INSERT_VALUES);
+    for (int i = 0; i < localDofMap[feSpace].nRankDofs * nComponents; i++)
+      VecSetValue(A_global_rhs, localDofMap[feSpace].rStartDofs * nComponents + i, g_f_b[i], INSERT_VALUES);
     VecRestoreArray(f_b, &g_f_b);
 
 
@@ -1398,10 +1393,10 @@ namespace AMDiS {
       int rowIndex = primalDofMap[feSpace].rStartDofs * nComponents + i;
       MatGetRow(mat_primal_primal, rowIndex, &nCols, &cols, &vals);
 
-      int rowIndexA = nOverallB * nComponents + rowIndex;
+      int rowIndexA = localDofMap[feSpace].nOverallDofs * nComponents + rowIndex;
       vector<int> colsA(nCols);
       for (int j = 0; j < nCols; j++)
-	colsA[j] = nOverallB * nComponents + cols[j];
+	colsA[j] = localDofMap[feSpace].nOverallDofs * nComponents + cols[j];
 
       MatSetValues(A_global, 1, &rowIndexA, nCols, &(colsA[0]), vals, ADD_VALUES);
       MatRestoreRow(mat_primal_primal, rowIndex, &nCols, &cols, &vals);
@@ -1411,20 +1406,20 @@ namespace AMDiS {
     VecGetArray(f_primal, &g_f_primal);
     for (int i = 0; i < primalDofMap[feSpace].nRankDofs * nComponents; i++)
       VecSetValue(A_global_rhs, 
-		  (nOverallB + primalDofMap[feSpace].rStartDofs) * nComponents + i, g_f_primal[i],
+		  (localDofMap[feSpace].nOverallDofs + primalDofMap[feSpace].rStartDofs) * nComponents + i, g_f_primal[i],
 		  INSERT_VALUES);
     VecRestoreArray(f_primal, &g_f_primal);
 
 
     // === mat_b_primal ===
 
-    for (int i = 0; i < nRankB * nComponents; i++) {
-      int rowIndex = rStartB * nComponents + i;
+    for (int i = 0; i < localDofMap[feSpace].nRankDofs * nComponents; i++) {
+      int rowIndex = localDofMap[feSpace].rStartDofs * nComponents + i;
       MatGetRow(mat_b_primal, rowIndex, &nCols, &cols, &vals);
 
       vector<int> colsA(nCols);
       for (int j = 0; j < nCols; j++)
-	colsA[j] = nOverallB * nComponents + cols[j];
+	colsA[j] = localDofMap[feSpace].nOverallDofs * nComponents + cols[j];
 
       MatSetValues(A_global, 1, &rowIndex, nCols, &(colsA[0]), vals, ADD_VALUES);
       MatRestoreRow(mat_b_primal, rowIndex, &nCols, &cols, &vals);
@@ -1436,7 +1431,7 @@ namespace AMDiS {
       int rowIndex = primalDofMap[feSpace].rStartDofs * nComponents + i;
       MatGetRow(mat_primal_b, rowIndex, &nCols, &cols, &vals);
 
-      int rowIndexA = nOverallB * nComponents + rowIndex;
+      int rowIndexA = localDofMap[feSpace].nOverallDofs * nComponents + rowIndex;
 
       MatSetValues(A_global, 1, &rowIndexA, nCols, cols, vals, ADD_VALUES);
       MatRestoreRow(mat_primal_b, rowIndex, &nCols, &cols, &vals);
@@ -1448,7 +1443,7 @@ namespace AMDiS {
       int rowIndex = lagrangeMap[feSpace].rStartDofs * nComponents + i;
       MatGetRow(mat_lagrange, rowIndex, &nCols, &cols, &vals);
 
-      int rowIndexA = (nOverallB + primalDofMap[feSpace].nOverallDofs) * nComponents + rowIndex;
+      int rowIndexA = (localDofMap[feSpace].nOverallDofs + primalDofMap[feSpace].nOverallDofs) * nComponents + rowIndex;
 
       MatSetValues(A_global, 1, &rowIndexA, nCols, cols, vals, ADD_VALUES);
       MatRestoreRow(mat_lagrange, rowIndex, &nCols, &cols, &vals);
@@ -1456,13 +1451,13 @@ namespace AMDiS {
     
     // === mat_lagrange_transpose ===
 
-    for (int i = 0; i < nRankB * nComponents; i++) {
-      int rowIndex = rStartB * nComponents + i;
+    for (int i = 0; i < localDofMap[feSpace].nRankDofs * nComponents; i++) {
+      int rowIndex = localDofMap[feSpace].rStartDofs * nComponents + i;
       MatGetRow(mat_lagrange_transpose, rowIndex, &nCols, &cols, &vals);
 
       vector<int> colsA(nCols);
       for (int j = 0; j < nCols; j++)
-	colsA[j] = (nOverallB + primalDofMap[feSpace].nOverallDofs) * nComponents + cols[j];
+	colsA[j] = (localDofMap[feSpace].nOverallDofs + primalDofMap[feSpace].nOverallDofs) * nComponents + cols[j];
 
       MatSetValues(A_global, 1, &rowIndex, nCols, &(colsA[0]), vals, ADD_VALUES);
       MatRestoreRow(mat_lagrange_transpose, rowIndex, &nCols, &cols, &vals);
@@ -1488,12 +1483,12 @@ namespace AMDiS {
     VecDuplicate(f_primal, &u_primal);
 
     vector<PetscInt> localBIndex, globalBIndex;
-    localBIndex.reserve(nRankB * nComponents);
-    globalBIndex.reserve(nRankB * nComponents);
+    localBIndex.reserve(localDofMap[feSpace].nRankDofs * nComponents);
+    globalBIndex.reserve(localDofMap[feSpace].nRankDofs * nComponents);
 
-    for (int i = 0; i < nRankB * nComponents; i++) {
-      localBIndex.push_back(rStartB * nComponents + i);
-      globalBIndex.push_back(rStartB * nComponents + i);
+    for (int i = 0; i < localDofMap[feSpace].nRankDofs * nComponents; i++) {
+      localBIndex.push_back(localDofMap[feSpace].rStartDofs * nComponents + i);
+      globalBIndex.push_back(localDofMap[feSpace].rStartDofs * nComponents + i);
     }
     IS localBIs, globalBIs;
     ISCreateGeneral(PETSC_COMM_SELF,
@@ -1526,7 +1521,7 @@ namespace AMDiS {
 
     for (int i = 0; i < primalDofMap[feSpace].nRankDofs * nComponents; i++) {
       localBIndex.push_back(primalDofMap[feSpace].rStartDofs * nComponents + i);
-      globalBIndex.push_back(nOverallB * nComponents + primalDofMap[feSpace].rStartDofs * nComponents + i);
+      globalBIndex.push_back(localDofMap[feSpace].nOverallDofs * nComponents + primalDofMap[feSpace].rStartDofs * nComponents + i);
     }
     ISCreateGeneral(PETSC_COMM_SELF,
 		    localBIndex.size(),
diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h
index 15425922..0b118680 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.h
+++ b/AMDiS/src/parallel/PetscSolverFeti.h
@@ -75,22 +75,22 @@ namespace AMDiS {
 
     /// Defines which boundary nodes are primal. Creates global index of
     /// the primal variables.
-    void createPrimals();
+    void createPrimals(const FiniteElemSpace *feSpace);
 
     /// Defines the set of dual variables and creates the global index of
     // dual variables.
-    void createDuals();
+    void createDuals(const FiniteElemSpace *feSpace);
 
     /// Create Lagrange multiplier variables corresponding to the dual 
     /// variables.
-    void createLagrange();
+    void createLagrange(const FiniteElemSpace *feSpace);
 
     /// Creates a global index of the B variables.
-    void createIndexB();
+    void createIndexB(const FiniteElemSpace *feSpace);
 
     /// Creates the Lagrange multiplier constraints and assembles them 
     /// to \ref mat_lagrange.
-    void createMatLagrange();
+    void createMatLagrange(vector<const FiniteElemSpace*> &feSpaces);
 
     /// Creates PETSc KSP solver object for solving the Schur complement
     /// system on the primal variables, \ref ksp_schur_primal
@@ -158,13 +158,10 @@ namespace AMDiS {
     /// constraint that is assigned to this DOF.
     FeSpaceData<GlobalDofMap> lagrangeMap;
     
-    /// Index for each non primal variables to the global index of
+    /// Index for each non primal DOF to the global index of
     /// B variables.
     FeSpaceData<GlobalDofMap> localDofMap;
 
-    /// Number of non primal, thus B, variables on rank and globally.
-    int nRankB, nOverallB, rStartB;    
-
     /// Stores to each dual boundary DOF the set of ranks in which the DOF
     /// is contained in.
     DofIndexToPartitions boundaryDofRanks;
@@ -232,10 +229,7 @@ namespace AMDiS {
 
     KSP ksp_interior;
 
-    int nLocalInterior;
-    
-    // Number of local nodes that are duals.
-    int nLocalDuals;
+    int nLocalInterior;    
   };
 
 }
-- 
GitLab