From 169b9d9aa42adcd2530c9733505c61dd7066cf51 Mon Sep 17 00:00:00 2001
From: Thomas Witkowski <thomas.witkowski@gmx.de>
Date: Wed, 22 Aug 2012 14:52:50 +0000
Subject: [PATCH] Added more features to FETI-DP, splitted into some more
 files.

---
 AMDiS/CMakeLists.txt                          |   2 +
 AMDiS/src/DOFVector.h                         |   1 +
 AMDiS/src/LeafData.h                          |   6 +-
 AMDiS/src/parallel/DofComm.cc                 |   1 +
 AMDiS/src/parallel/DofComm.h                  |   1 +
 AMDiS/src/parallel/ElementObjectDatabase.cc   |   1 +
 AMDiS/src/parallel/InteriorBoundary.h         |   1 -
 AMDiS/src/parallel/MeshLevelData.h            |   7 +-
 AMDiS/src/parallel/MpiHelper.cc               |   8 +-
 AMDiS/src/parallel/MpiHelper.h                |  13 +-
 .../src/parallel/ParallelCoarseSpaceMatVec.cc |  17 +-
 AMDiS/src/parallel/ParallelDofMapping.cc      |  12 +
 AMDiS/src/parallel/ParallelDofMapping.h       |  39 ++-
 AMDiS/src/parallel/PetscSolver.h              |   8 +-
 AMDiS/src/parallel/PetscSolverFeti.cc         | 301 +++---------------
 AMDiS/src/parallel/PetscSolverFeti.h          |  24 +-
 .../src/parallel/PetscSolverFetiOperators.cc  | 206 ++++++++++++
 AMDiS/src/parallel/PetscSolverFetiOperators.h |  44 +++
 AMDiS/src/parallel/PetscSolverFetiStructs.h   |  13 +-
 AMDiS/src/parallel/PetscSolverFetiTimings.cc  |  31 ++
 AMDiS/src/parallel/PetscSolverFetiTimings.h   |  46 +++
 21 files changed, 449 insertions(+), 333 deletions(-)
 create mode 100644 AMDiS/src/parallel/PetscSolverFetiOperators.cc
 create mode 100644 AMDiS/src/parallel/PetscSolverFetiOperators.h
 create mode 100644 AMDiS/src/parallel/PetscSolverFetiTimings.cc
 create mode 100644 AMDiS/src/parallel/PetscSolverFetiTimings.h

diff --git a/AMDiS/CMakeLists.txt b/AMDiS/CMakeLists.txt
index f2575d35..a6f4ce88 100644
--- a/AMDiS/CMakeLists.txt
+++ b/AMDiS/CMakeLists.txt
@@ -254,6 +254,8 @@ if(ENABLE_PARALLEL_DOMAIN)
 			${SOURCE_DIR}/parallel/PetscSolver.cc
 			${SOURCE_DIR}/parallel/PetscProblemStat.cc
 			${SOURCE_DIR}/parallel/PetscSolverFeti.cc
+			${SOURCE_DIR}/parallel/PetscSolverFetiOperators.cc
+			${SOURCE_DIR}/parallel/PetscSolverFetiTimings.cc
 			${SOURCE_DIR}/parallel/PetscSolverGlobalMatrix.cc
 			${SOURCE_DIR}/parallel/PetscSolverGlobalBlockMatrix.cc
 			${SOURCE_DIR}/parallel/PetscSolverSchur.cc)
diff --git a/AMDiS/src/DOFVector.h b/AMDiS/src/DOFVector.h
index f8f43606..16a33727 100644
--- a/AMDiS/src/DOFVector.h
+++ b/AMDiS/src/DOFVector.h
@@ -43,6 +43,7 @@
 #include "FiniteElemSpace.h"
 #include "SurfaceQuadrature.h"
 #ifdef HAVE_PARALLEL_DOMAIN_AMDIS
+#include <mpi.h>
 #include "parallel/ParallelDofMapping.h"
 #endif
 
diff --git a/AMDiS/src/LeafData.h b/AMDiS/src/LeafData.h
index 788c7949..4837f655 100644
--- a/AMDiS/src/LeafData.h
+++ b/AMDiS/src/LeafData.h
@@ -67,10 +67,8 @@ namespace AMDiS {
 	errorEstimate(0.0)
     {}
 
-    /** \brief
-     * Refinement of parent to child1 and child2.
-     * @return true: must this ElementData, else not allowed to delete it
-     */
+    /// Refinement of parent to child1 and child2.
+    /// @return true: must this ElementData, else not allowed to delete it
     bool refineElementData(Element* parent, 
 			   Element* child1,
 			   Element* child2,
diff --git a/AMDiS/src/parallel/DofComm.cc b/AMDiS/src/parallel/DofComm.cc
index c5baa1e1..216094c9 100644
--- a/AMDiS/src/parallel/DofComm.cc
+++ b/AMDiS/src/parallel/DofComm.cc
@@ -12,6 +12,7 @@
 
 #include "parallel/DofComm.h"
 #include "parallel/InteriorBoundary.h"
+#include "parallel/MeshLevelData.h"
 #include "FiniteElemSpace.h"
 
 namespace AMDiS {
diff --git a/AMDiS/src/parallel/DofComm.h b/AMDiS/src/parallel/DofComm.h
index eacea516..c71eded6 100644
--- a/AMDiS/src/parallel/DofComm.h
+++ b/AMDiS/src/parallel/DofComm.h
@@ -23,6 +23,7 @@
 #ifndef AMDIS_DOF_COMM_H
 #define AMDIS_DOF_COMM_H
 
+#include <mpi.h>
 #include <map>
 #include "parallel/ParallelTypes.h"
 #include "FiniteElemSpace.h"
diff --git a/AMDiS/src/parallel/ElementObjectDatabase.cc b/AMDiS/src/parallel/ElementObjectDatabase.cc
index 36ed5e54..cd35fb3b 100644
--- a/AMDiS/src/parallel/ElementObjectDatabase.cc
+++ b/AMDiS/src/parallel/ElementObjectDatabase.cc
@@ -12,6 +12,7 @@
 
 #include "VertexVector.h"
 #include "parallel/ElementObjectDatabase.h"
+#include "parallel/MeshLevelData.h"
 
 namespace AMDiS {
 
diff --git a/AMDiS/src/parallel/InteriorBoundary.h b/AMDiS/src/parallel/InteriorBoundary.h
index c5a211bb..2660253f 100644
--- a/AMDiS/src/parallel/InteriorBoundary.h
+++ b/AMDiS/src/parallel/InteriorBoundary.h
@@ -28,7 +28,6 @@
 
 #include "AMDiS_fwd.h"
 #include "BoundaryObject.h"
-#include "parallel/MeshLevelData.h"
 #include "parallel/ParallelTypes.h"
 
 namespace AMDiS {
diff --git a/AMDiS/src/parallel/MeshLevelData.h b/AMDiS/src/parallel/MeshLevelData.h
index 5b1a0962..66f826ab 100644
--- a/AMDiS/src/parallel/MeshLevelData.h
+++ b/AMDiS/src/parallel/MeshLevelData.h
@@ -20,15 +20,16 @@
 
 /** \file MeshLevelData.h */
 
+#ifndef AMDIS_MESH_LEVEL_DATA_H
+#define AMDIS_MESH_LEVEL_DATA_H
+
+
 #include <iostream>
 #include <set>
 #include <vector>
 #include <mpi.h>
 #include "Global.h"
 
-#ifndef AMDIS_MESH_LEVEL_DATA_H
-#define AMDIS_MESH_LEVEL_DATA_H
-
 namespace AMDiS {
 
   using namespace std;
diff --git a/AMDiS/src/parallel/MpiHelper.cc b/AMDiS/src/parallel/MpiHelper.cc
index 1f315256..008a7c05 100644
--- a/AMDiS/src/parallel/MpiHelper.cc
+++ b/AMDiS/src/parallel/MpiHelper.cc
@@ -10,10 +10,9 @@
 // See also license.opensource.txt in the distribution.
 
 
+#include <mpi.h>
 #include "MpiHelper.h"
 
-#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
-
 namespace AMDiS {
 
   namespace mpi {
@@ -54,7 +53,10 @@ namespace AMDiS {
       MPI::COMM_WORLD.Allreduce(&valCopy, &value, 1, MPI_INT, MPI_MAX);
     }
 
+    void startRand()
+    {
+      srand(time(NULL) * (MPI::COMM_WORLD.Get_rank() + 1));
+    }
   }
 }
 
-#endif
diff --git a/AMDiS/src/parallel/MpiHelper.h b/AMDiS/src/parallel/MpiHelper.h
index 3f0c5c20..a83c4af8 100644
--- a/AMDiS/src/parallel/MpiHelper.h
+++ b/AMDiS/src/parallel/MpiHelper.h
@@ -23,12 +23,10 @@
 #ifndef AMDIS_MPIHELPER_H
 #define AMDIS_MPIHELPER_H
 
-#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
-
-#include "Global.h"
+#include <mpi.h>
 #include <time.h>
 #include <stdlib.h>
-#include <mpi.h>
+#include "Global.h"
 
 namespace AMDiS {
 
@@ -63,10 +61,7 @@ namespace AMDiS {
       WARNING("Unknown type for globalMax. Can not determine maximal value of all processors!\n");
     }
     
-    inline void startRand()
-    {
-      srand(time(NULL) * (MPI::COMM_WORLD.Get_rank() + 1));
-    }
+    void startRand();
 
     /** \brief
      * In many situations a rank computes a number of local DOFs. Then all
@@ -97,5 +92,3 @@ namespace AMDiS {
 }
 
 #endif
-
-#endif
diff --git a/AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc b/AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc
index cb0c67de..19f9f752 100644
--- a/AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc
+++ b/AMDiS/src/parallel/ParallelCoarseSpaceMatVec.cc
@@ -191,13 +191,8 @@ namespace AMDiS {
 		   nOverallCoarseRows, nOverallCoarseRows,
 		   0, nnz[i + 1][i + 1].dnnz, 0, nnz[i + 1][i + 1].onnz,
 		   &mat[i + 1][i + 1]);
-      MSG("REMOVE THIS!\n");
-      MatSetOption(mat[i + 1][i + 1], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
-
-      VecCreateMPI(mpiCommGlobal, nRankCoarseRows, nOverallCoarseRows, 
-		   &vecSol[i + 1]);
-      VecCreateMPI(mpiCommGlobal, nRankCoarseRows, nOverallCoarseRows, 
-		   &vecRhs[i + 1]);
+      cMap->createVec(vecSol[i + 1]);
+      cMap->createVec(vecRhs[i + 1]);
     }
 
     for (int i = 0; i < nCoarseMap + 1; i++) {
@@ -215,18 +210,12 @@ namespace AMDiS {
 		     nRowsRankMat, nColsRankMat,
 		     nRowsOverallMat, nColsOverallMat,
 		     0, nnz[i][j].dnnz, 0, nnz[i][j].onnz,
-		     &mat[i][j]);
-	MSG("REMOVE THIS!\n");
-	MatSetOption(mat[i][j], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
-	  
+		     &mat[i][j]);	  
 	MatCreateAIJ(mpiCommGlobal,
 		     nColsRankMat, nRowsRankMat,
 		     nColsOverallMat, nRowsOverallMat,
 		     0, nnz[j][i].dnnz, 0, nnz[j][i].onnz,
 		     &mat[j][i]);
-	MSG("REMOVE THIS!\n");
-	MatSetOption(mat[j][i], MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
-
       }
     }    
   }
diff --git a/AMDiS/src/parallel/ParallelDofMapping.cc b/AMDiS/src/parallel/ParallelDofMapping.cc
index b65631c3..5ec7a83d 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.cc
+++ b/AMDiS/src/parallel/ParallelDofMapping.cc
@@ -11,6 +11,7 @@
 
 
 #include "parallel/ParallelDofMapping.h"
+#include "parallel/MeshLevelData.h"
 #include "parallel/StdMpi.h"
 
 namespace AMDiS {
@@ -37,6 +38,17 @@ namespace AMDiS {
   }
 
 
+  FeSpaceDofMap::FeSpaceDofMap(MeshLevelData* ld)
+    : levelData(ld),
+      dofComm(NULL),
+      feSpace(NULL),
+      needGlobalMapping(false),
+      isNonLocal(false)
+  {
+    clear();
+  }
+
+
   void FeSpaceDofMap::clear()
   {
     dofMap.clear();
diff --git a/AMDiS/src/parallel/ParallelDofMapping.h b/AMDiS/src/parallel/ParallelDofMapping.h
index 9d799d39..ab54b44a 100644
--- a/AMDiS/src/parallel/ParallelDofMapping.h
+++ b/AMDiS/src/parallel/ParallelDofMapping.h
@@ -20,20 +20,22 @@
 
 /** \file FeSpaceMapping.h */
 
+#ifndef AMDIS_FE_SPACE_MAPPING_H
+#define AMDIS_FE_SPACE_MAPPING_H
+
+#include <mpi.h>
 #include <vector>
 #include <map>
 #include <set>
+#include <petsc.h>
+#include <petscis.h>
+
+#include "AMDiS_fwd.h"
 #include "parallel/DofComm.h"
-#include "parallel/MeshLevelData.h"
 #include "parallel/MpiHelper.h"
 #include "parallel/ParallelTypes.h"
 #include "parallel/StdMpi.h"
 
-#include <petscis.h>
-
-#ifndef AMDIS_FE_SPACE_MAPPING_H
-#define AMDIS_FE_SPACE_MAPPING_H
-
 namespace AMDiS {
 
   using namespace std;
@@ -110,15 +112,7 @@ namespace AMDiS {
     }
      
     /// This is the only valid constructur to be used. 
-    FeSpaceDofMap(MeshLevelData* ld)
-      : levelData(ld),
-	dofComm(NULL),
-	feSpace(NULL),
-	needGlobalMapping(false),
-	isNonLocal(false)
-    {
-      clear();
-    }
+    FeSpaceDofMap(MeshLevelData* ld);
 
     /// Clears all data of the mapping.
     void clear();
@@ -483,6 +477,21 @@ namespace AMDiS {
 			int firstComponent, 
 			int nComponents);
 
+    /// Create a parallel distributed PETSc vector based on this mapping.
+    inline void createVec(Vec &vec)
+    {
+      VecCreateMPI(mpiComm, getRankDofs(), getOverallDofs(), &vec);
+    }
+
+    /// Create a parallel distributed PETsc vector based on this mapping but
+    /// with a different (larger) global size. This is used in multi-level 
+    /// method to embed a local vector into a subdomain spaned by several
+    /// ranks.
+    inline void createVec(Vec &vec, int nGlobalRows)
+    {
+      VecCreateMPI(mpiComm, getRankDofs(), nGlobalRows, &vec);
+    }
+
   protected:
     /// Insert a new FE space DOF mapping for a given FE space.
     void addFeSpace(const FiniteElemSpace* feSpace);
diff --git a/AMDiS/src/parallel/PetscSolver.h b/AMDiS/src/parallel/PetscSolver.h
index fd0b66ef..11565913 100644
--- a/AMDiS/src/parallel/PetscSolver.h
+++ b/AMDiS/src/parallel/PetscSolver.h
@@ -26,6 +26,10 @@
 #include <set>
 #include <map>
 #include <mpi.h>
+#include <petsc.h>
+#include <petscsys.h>
+#include <petscao.h>
+#include <petscksp.h>
 
 #include "AMDiS_fwd.h"
 #include "Global.h"
@@ -33,10 +37,6 @@
 #include "DOFMatrix.h"
 #include "parallel/MeshDistributor.h"
 #include "parallel/ParallelCoarseSpaceMatVec.h"
-#include <petsc.h>
-#include <petscsys.h>
-#include <petscao.h>
-#include <petscksp.h>
 
 namespace AMDiS {
 
diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc
index 74e20ca5..5085a695 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.cc
+++ b/AMDiS/src/parallel/PetscSolverFeti.cc
@@ -14,6 +14,8 @@
 #include "MatrixVector.h"
 #include "parallel/PetscSolverFeti.h"
 #include "parallel/PetscSolverFetiStructs.h"
+#include "parallel/PetscSolverFetiOperators.h"
+#include "parallel/PetscSolverFetiTimings.h"
 #include "parallel/StdMpi.h"
 #include "parallel/MpiHelper.h"
 #include "parallel/PetscSolverGlobalMatrix.h"
@@ -23,201 +25,6 @@ namespace AMDiS {
 
   using namespace std;
 
-  double FetiTimings::fetiSolve = 0.0;
-  double FetiTimings::fetiSolve01 = 0.0;
-  double FetiTimings::fetiSolve02 = 0.0;
-  double FetiTimings::fetiPreconditioner = 0.0;
-
-
-  // y = mat * x
-  int petscMultMatSchurPrimal(Mat mat, Vec x, Vec y)
-  {
-    // S_PiPi = K_PiPi - K_PiB inv(K_BB) K_BPi
-
-    void *ctx;
-    MatShellGetContext(mat, &ctx);
-    SchurPrimalData* data = static_cast<SchurPrimalData*>(ctx);
-
-    MatMult(data->subSolver->getMatInteriorCoarse(), x, data->tmp_vec_b);
-    data->subSolver->solveGlobal(data->tmp_vec_b, data->tmp_vec_b);
-    MatMult(data->subSolver->getMatCoarseInterior(), data->tmp_vec_b, 
-	    data->tmp_vec_primal);
-    MatMult(data->subSolver->getMatCoarse(), x, y);
-    VecAXPBY(y, -1.0, 1.0, data->tmp_vec_primal);
-
-    return 0;
-  }
-
-
-  // y = mat * x
-  int petscMultMatFeti(Mat mat, Vec x, Vec y)
-  {
-    FUNCNAME("petscMultMatFeti()");
-
-    //    F = L inv(K_BB) trans(L) + L inv(K_BB) K_BPi inv(S_PiPi) K_PiB inv(K_BB) trans(L)
-    // => F = L [I + inv(K_BB) K_BPi inv(S_PiPi) K_PiB] inv(K_BB) trans(L)
-
-    double wtime = MPI::Wtime();
-
-    void *ctx;
-    MatShellGetContext(mat, &ctx);
-    FetiData* data = static_cast<FetiData*>(ctx);
-
-    MatMultTranspose(*(data->mat_lagrange), x, data->tmp_vec_b);
-
-    double wtime01 = MPI::Wtime();
-    data->subSolver->solveGlobal(data->tmp_vec_b, data->tmp_vec_b);
-
-    FetiTimings::fetiSolve01 += (MPI::Wtime() - wtime01);
-
-    MatMult(*(data->mat_lagrange), data->tmp_vec_b, data->tmp_vec_lagrange);
-
-    MatMult(data->subSolver->getMatCoarseInterior(), 
-	    data->tmp_vec_b, data->tmp_vec_primal);
-
-    wtime01 = MPI::Wtime();
-    KSPSolve(*(data->ksp_schur_primal), data->tmp_vec_primal, data->tmp_vec_primal);
-    FetiTimings::fetiSolve02 += (MPI::Wtime() - wtime01);
-
-    MatMult(data->subSolver->getMatInteriorCoarse(), 
-	    data->tmp_vec_primal, data->tmp_vec_b);
-
-    wtime01 = MPI::Wtime();
-    data->subSolver->solveGlobal(data->tmp_vec_b, data->tmp_vec_b);
-    FetiTimings::fetiSolve01 += (MPI::Wtime() - wtime01);
-
-    MatMult(*(data->mat_lagrange), data->tmp_vec_b, y);
-
-    VecAXPBY(y, 1.0, 1.0, data->tmp_vec_lagrange);
-
-    FetiTimings::fetiSolve += (MPI::Wtime() - wtime);
-
-    return 0;
-  }
-
-
-  // y = PC * x
-  PetscErrorCode petscApplyFetiDirichletPrecon(PC pc, Vec x, Vec y)
-  {
-    double wtime = MPI::Wtime();
-
-    // Get data for the preconditioner
-    void *ctx;
-    PCShellGetContext(pc, &ctx);
-    FetiDirichletPreconData* data = static_cast<FetiDirichletPreconData*>(ctx);
-
-    // Multiply with scaled Lagrange constraint matrix.
-    MatMultTranspose(*(data->mat_lagrange_scaled), x, data->tmp_vec_b);
-
-
-    // === Restriction of the B nodes to the boundary nodes. ===
-
-    int nLocalB;
-    int nLocalDuals;
-    VecGetLocalSize(data->tmp_vec_b, &nLocalB);
-    VecGetLocalSize(data->tmp_vec_duals0, &nLocalDuals);
-
-    PetscScalar *local_b, *local_duals;
-    VecGetArray(data->tmp_vec_b, &local_b);
-    VecGetArray(data->tmp_vec_duals0, &local_duals);
-
-    for (map<int, int>::iterator it = data->localToDualMap.begin();
-	 it != data->localToDualMap.end(); ++it)
-      local_duals[it->second] = local_b[it->first];
-
-    VecRestoreArray(data->tmp_vec_b, &local_b);
-    VecRestoreArray(data->tmp_vec_duals0, &local_duals);
-
-
-    // === K_DD - K_DI inv(K_II) K_ID ===
-
-    MatMult(*(data->mat_duals_duals), data->tmp_vec_duals0, data->tmp_vec_duals1);
-
-    MatMult(*(data->mat_interior_duals), data->tmp_vec_duals0, data->tmp_vec_interior);
-    KSPSolve(*(data->ksp_interior), data->tmp_vec_interior, data->tmp_vec_interior);
-    MatMult(*(data->mat_duals_interior), data->tmp_vec_interior, data->tmp_vec_duals0);
-
-    VecAXPBY(data->tmp_vec_duals0, 1.0, -1.0, data->tmp_vec_duals1);
-
-
-    // === Prolongation from local dual nodes to B nodes.
-
-    VecGetArray(data->tmp_vec_b, &local_b);
-    VecGetArray(data->tmp_vec_duals0, &local_duals);
-
-    for (map<int, int>::iterator it = data->localToDualMap.begin();
-	 it != data->localToDualMap.end(); ++it)
-      local_b[it->first] = local_duals[it->second];
-
-    VecRestoreArray(data->tmp_vec_b, &local_b);
-    VecRestoreArray(data->tmp_vec_duals0, &local_duals);
-
-
-    // Multiply with scaled Lagrange constraint matrix.
-    MatMult(*(data->mat_lagrange_scaled), data->tmp_vec_b, y);
-
-    FetiTimings::fetiPreconditioner += (MPI::Wtime() - wtime);
-
-    return 0;
-  }
-
-
-  // y = PC * x
-  PetscErrorCode petscApplyFetiLumpedPrecon(PC pc, Vec x, Vec y)
-  {
-    // Get data for the preconditioner
-    void *ctx;
-    PCShellGetContext(pc, &ctx);
-    FetiLumpedPreconData* data = static_cast<FetiLumpedPreconData*>(ctx);
-
-    // Multiply with scaled Lagrange constraint matrix.
-    MatMultTranspose(*(data->mat_lagrange_scaled), x, data->tmp_vec_b);
-
-
-    // === Restriction of the B nodes to the boundary nodes. ===
-
-    int nLocalB;
-    int nLocalDuals;
-    VecGetLocalSize(data->tmp_vec_b, &nLocalB);
-    VecGetLocalSize(data->tmp_vec_duals0, &nLocalDuals);
-
-    PetscScalar *local_b, *local_duals;
-    VecGetArray(data->tmp_vec_b, &local_b);
-    VecGetArray(data->tmp_vec_duals0, &local_duals);
-
-    for (map<int, int>::iterator it = data->localToDualMap.begin();
-	 it != data->localToDualMap.end(); ++it)
-      local_duals[it->second] = local_b[it->first];
-
-    VecRestoreArray(data->tmp_vec_b, &local_b);
-    VecRestoreArray(data->tmp_vec_duals0, &local_duals);
-
-
-    // === K_DD ===
-
-    MatMult(*(data->mat_duals_duals), data->tmp_vec_duals0, data->tmp_vec_duals1);
-
-
-    // === Prolongation from local dual nodes to B nodes.
-
-    VecGetArray(data->tmp_vec_b, &local_b);
-    VecGetArray(data->tmp_vec_duals1, &local_duals);
-
-    for (map<int, int>::iterator it = data->localToDualMap.begin();
-	 it != data->localToDualMap.end(); ++it)
-      local_b[it->first] = local_duals[it->second];
-
-    VecRestoreArray(data->tmp_vec_b, &local_b);
-    VecRestoreArray(data->tmp_vec_duals0, &local_duals);
-
-
-    // Multiply with scaled Lagrange constraint matrix.
-    MatMult(*(data->mat_lagrange_scaled), data->tmp_vec_b, y);
-
-    return 0;
-  }
-
-
   PetscSolverFeti::PetscSolverFeti()
     : PetscSolver(),
       schurPrimalSolver(0),
@@ -804,14 +611,8 @@ namespace AMDiS {
 
       schurPrimalData.subSolver = subdomain;
       
-      VecCreateMPI(mpiCommGlobal, 
-		   localDofMap.getRankDofs(), 
-		   nGlobalOverallInterior,
-		   &(schurPrimalData.tmp_vec_b));
-      VecCreateMPI(mpiCommGlobal, 
-		   primalDofMap.getRankDofs(), 
-		   primalDofMap.getOverallDofs(),
-		   &(schurPrimalData.tmp_vec_primal));
+      localDofMap.createVec(schurPrimalData.tmp_vec_b, nGlobalOverallInterior);
+      primalDofMap.createVec(schurPrimalData.tmp_vec_primal);
 
       MatCreateShell(mpiCommGlobal,
 		     primalDofMap.getRankDofs(), 
@@ -967,18 +768,9 @@ namespace AMDiS {
     fetiData.subSolver = subdomain;
     fetiData.ksp_schur_primal = &ksp_schur_primal;
 
-    VecCreateMPI(mpiCommGlobal, 
-		 localDofMap.getRankDofs(), 
-		 nGlobalOverallInterior,
-		 &(fetiData.tmp_vec_b));
-    VecCreateMPI(mpiCommGlobal,
-		 lagrangeMap.getRankDofs(), 
-		 lagrangeMap.getOverallDofs(),
-		 &(fetiData.tmp_vec_lagrange));
-    VecCreateMPI(mpiCommGlobal, 
-		 primalDofMap.getRankDofs(), 
-		 primalDofMap.getOverallDofs(),
-		 &(fetiData.tmp_vec_primal));
+    localDofMap.createVec(fetiData.tmp_vec_b, nGlobalOverallInterior);
+    lagrangeMap.createVec(fetiData.tmp_vec_lagrange);
+    primalDofMap.createVec(fetiData.tmp_vec_primal);
 
     MatCreateShell(mpiCommGlobal,
 		   lagrangeMap.getRankDofs(), 
@@ -1027,10 +819,8 @@ namespace AMDiS {
       fetiDirichletPreconData.mat_duals_interior = &mat_duals_interior;
       fetiDirichletPreconData.ksp_interior = &ksp_interior;
       
-      VecCreateMPI(mpiCommGlobal, 
-		   localDofMap.getRankDofs(),
-		   nGlobalOverallInterior,
-		   &(fetiDirichletPreconData.tmp_vec_b));      
+      localDofMap.createVec(fetiDirichletPreconData.tmp_vec_b, 
+			    nGlobalOverallInterior);
       MatGetVecs(mat_duals_duals, PETSC_NULL, 
 		 &(fetiDirichletPreconData.tmp_vec_duals0));
       MatGetVecs(mat_duals_duals, PETSC_NULL, 
@@ -1084,10 +874,7 @@ namespace AMDiS {
 	}
       }
 
-      VecCreateMPI(mpiCommGlobal, 
-		   localDofMap.getRankDofs(),
-		   localDofMap.getOverallDofs(),
-		   &(fetiLumpedPreconData.tmp_vec_b));
+      localDofMap.createVec(fetiLumpedPreconData.tmp_vec_b);
       MatGetVecs(mat_duals_duals, PETSC_NULL, 
 		 &(fetiLumpedPreconData.tmp_vec_duals0));
       MatGetVecs(mat_duals_duals, PETSC_NULL, 
@@ -1219,12 +1006,8 @@ namespace AMDiS {
 		   lagrangeMap.getOverallDofs(), 
 		   PETSC_NULL, &fetiMat);
 
-      VecCreateMPI(mpiCommGlobal,
-		   lagrangeMap.getRankDofs(), lagrangeMap.getOverallDofs(), 
-		   &unitVector);
-      VecCreateMPI(mpiCommGlobal,
-		   lagrangeMap.getRankDofs(), lagrangeMap.getOverallDofs(), 
-		   &resultVector);
+      lagrangeMap.createVec(unitVector);
+      lagrangeMap.createVec(resultVector);
 
       PetscInt low, high;
       VecGetOwnershipRange(unitVector, &low, &high);
@@ -1631,39 +1414,43 @@ namespace AMDiS {
   {
     FUNCNAME("PetscSolverFeti::solveReducedFetiMatrix()");
 
-    // RHS vector.
-    Vec vec_rhs, vec_sol;
-
     // Some temporary vectors.
     Vec tmp_b0, tmp_b1, tmp_lagrange0, tmp_primal0, tmp_primal1;
+    localDofMap.createVec(tmp_b0, nGlobalOverallInterior);
+    localDofMap.createVec(tmp_b1, nGlobalOverallInterior);
+    primalDofMap.createVec(tmp_primal0);
+    primalDofMap.createVec(tmp_primal1);
 
-    VecCreateMPI(mpiCommGlobal, 
-		 localDofMap.getRankDofs(), 
-		 nGlobalOverallInterior, 
-		 &tmp_b0);
-    VecCreateMPI(mpiCommGlobal, 
-		 localDofMap.getRankDofs(), 
-		 nGlobalOverallInterior, 
-		 &tmp_b1);
-    VecCreateMPI(mpiCommGlobal,
-		 primalDofMap.getRankDofs(), 
-		 primalDofMap.getOverallDofs(), &tmp_primal0);
-    VecCreateMPI(mpiCommGlobal,
-		 primalDofMap.getRankDofs(), 
-		 primalDofMap.getOverallDofs(), &tmp_primal1);
-
+    // RHS vector.
+    Vec vecRhs, vecSol;
     MatGetVecs(mat_lagrange, PETSC_NULL, &tmp_lagrange0);
-    MatGetVecs(mat_lagrange, PETSC_NULL, &vec_rhs);
-    MatGetVecs(mat_lagrange, PETSC_NULL, &vec_sol);
+    MatGetVecs(mat_lagrange, PETSC_NULL, &vecRhs);
+    MatGetVecs(mat_lagrange, PETSC_NULL, &vecSol);
+
+
+
+    Vec vecRhsInterface, vecSolInterface;
+    Mat matInteriorInterface, matInterfaceInterior;
+    Mat matPrimalInterface, matInterfacePrimal;
+    if (enableStokesMode) {
+      vecRhsInterface = getVecRhsCoarseByComponent(2);
+      vecSolInterface = getVecSolCoarseByComponent(2);
+
+      matInteriorInterface = getMatInteriorCoarseByComponent(2);
+      matInterfaceInterior = getMatCoarseInteriorByComponent(2);
+      
+      matPrimalInterface = getMatCoarse(0, 1);
+      matInterfacePrimal = getMatCoarse(1, 0);
+    }
 
     // === Create new rhs ===
 
     double wtime = MPI::Wtime();
 
     //    d = L inv(K_BB) f_B - L inv(K_BB) K_BPi inv(S_PiPi) [f_Pi - K_PiB inv(K_BB) f_B]
-    // vec_rhs = L * inv(K_BB) * f_B
+    // vecRhs = L * inv(K_BB) * f_B
     subdomain->solveGlobal(subdomain->getVecRhsInterior(), tmp_b0);
-    MatMult(mat_lagrange, tmp_b0, vec_rhs);
+    MatMult(mat_lagrange, tmp_b0, vecRhs);
 
     // tmp_primal0 = M_PiB * inv(K_BB) * f_B
     MatMult(subdomain->getMatCoarseInterior(), tmp_b0, tmp_primal0);
@@ -1685,7 +1472,7 @@ namespace AMDiS {
     MatMult(mat_lagrange, tmp_b0, tmp_lagrange0);
 
     //
-    VecAXPBY(vec_rhs, -1.0, 1.0, tmp_lagrange0);
+    VecAXPBY(vecRhs, -1.0, 1.0, tmp_lagrange0);
 
     if (printTimings) {
       MPI::COMM_WORLD.Barrier();
@@ -1697,7 +1484,7 @@ namespace AMDiS {
 
 
     // === Solve with FETI-DP operator. ===
-    KSPSolve(ksp_feti, vec_rhs, vec_sol);
+    KSPSolve(ksp_feti, vecRhs, vecSol);
 
 
     if (printTimings) {
@@ -1718,7 +1505,7 @@ namespace AMDiS {
     subdomain->solveGlobal(subdomain->getVecRhsInterior(), tmp_b0);
     MatMult(subdomain->getMatCoarseInterior(), tmp_b0, tmp_primal1);
     VecAXPBY(tmp_primal0, -1.0, 1.0, tmp_primal1);
-    MatMultTranspose(mat_lagrange, vec_sol, tmp_b0);
+    MatMultTranspose(mat_lagrange, vecSol, tmp_b0);
     subdomain->solveGlobal(tmp_b0, tmp_b0);
     MatMult(subdomain->getMatCoarseInterior(), tmp_b0, tmp_primal1);
 
@@ -1729,7 +1516,7 @@ namespace AMDiS {
     // === Solve for u_b. ===
 
     VecCopy(subdomain->getVecRhsInterior(), tmp_b0);
-    MatMultTranspose(mat_lagrange, vec_sol, tmp_b1);
+    MatMultTranspose(mat_lagrange, vecSol, tmp_b1);
     VecAXPBY(tmp_b0, -1.0, 1.0, tmp_b1);
 
     MatMult(subdomain->getMatInteriorCoarse(), tmp_primal0, tmp_b1);
@@ -1746,8 +1533,8 @@ namespace AMDiS {
 	  MPI::Wtime() - wtime);
     }
 
-    VecDestroy(&vec_rhs);
-    VecDestroy(&vec_sol);
+    VecDestroy(&vecRhs);
+    VecDestroy(&vecSol);
     VecDestroy(&tmp_b0);
     VecDestroy(&tmp_b1);
     VecDestroy(&tmp_lagrange0);
diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h
index 241b625a..b6c4988c 100644
--- a/AMDiS/src/parallel/PetscSolverFeti.h
+++ b/AMDiS/src/parallel/PetscSolverFeti.h
@@ -262,6 +262,9 @@ namespace AMDiS {
     /// Data for MatMult operation in matrix \ref mat_feti
     FetiData fetiData;
 
+    /// Data for MatMult operation in matrix \ref mat_feti when using Stokes mode
+    FetiDataInterface fetiDataInterface;
+
     /// Defines which preconditioner should be used to solve the reduced
     /// FETI-DP system.
     FetiPreconditioner fetiPreconditioner;
@@ -297,27 +300,6 @@ namespace AMDiS {
     bool enableStokesMode;
   };
 
-
-  class FetiTimings {
-  private:
-    FetiTimings() {}
-
-  public:
-    static void reset()
-    {
-      fetiSolve = 0.0;
-      fetiSolve01 = 0.0;
-      fetiSolve02 = 0.0;
-      fetiPreconditioner = 0.0;
-    }
-
-  public:
-    static double fetiSolve;
-    static double fetiSolve01;
-    static double fetiSolve02;
-
-    static double fetiPreconditioner;
-  };
 }
 
 #endif
diff --git a/AMDiS/src/parallel/PetscSolverFetiOperators.cc b/AMDiS/src/parallel/PetscSolverFetiOperators.cc
new file mode 100644
index 00000000..5a4a75c3
--- /dev/null
+++ b/AMDiS/src/parallel/PetscSolverFetiOperators.cc
@@ -0,0 +1,206 @@
+//
+// Software License for AMDiS
+//
+// Copyright (c) 2010 Dresden University of Technology 
+// All rights reserved.
+// Authors: Simon Vey, Thomas Witkowski et al.
+//
+// This file is part of AMDiS
+//
+// See also license.opensource.txt in the distribution.
+
+#include "parallel/PetscSolverFetiOperators.h"
+#include "parallel/PetscSolverFetiStructs.h"
+#include "parallel/PetscSolverFetiTimings.h"
+
+namespace AMDiS {
+
+  // y = mat * x
+  int petscMultMatSchurPrimal(Mat mat, Vec x, Vec y)
+  {
+    // S_PiPi = K_PiPi - K_PiB inv(K_BB) K_BPi
+
+    void *ctx;
+    MatShellGetContext(mat, &ctx);
+    SchurPrimalData* data = static_cast<SchurPrimalData*>(ctx);
+
+    MatMult(data->subSolver->getMatInteriorCoarse(), x, data->tmp_vec_b);
+    data->subSolver->solveGlobal(data->tmp_vec_b, data->tmp_vec_b);
+    MatMult(data->subSolver->getMatCoarseInterior(), data->tmp_vec_b, 
+	    data->tmp_vec_primal);
+    MatMult(data->subSolver->getMatCoarse(), x, y);
+    VecAXPBY(y, -1.0, 1.0, data->tmp_vec_primal);
+
+    return 0;
+  }
+
+
+  // y = mat * x
+  int petscMultMatFeti(Mat mat, Vec x, Vec y)
+  {
+    FUNCNAME("petscMultMatFeti()");
+
+    //    F = L inv(K_BB) trans(L) + L inv(K_BB) K_BPi inv(S_PiPi) K_PiB inv(K_BB) trans(L)
+    // => F = L [I + inv(K_BB) K_BPi inv(S_PiPi) K_PiB] inv(K_BB) trans(L)
+
+    double wtime = MPI::Wtime();
+
+    void *ctx;
+    MatShellGetContext(mat, &ctx);
+    FetiData* data = static_cast<FetiData*>(ctx);
+
+    MatMultTranspose(*(data->mat_lagrange), x, data->tmp_vec_b);
+
+    double wtime01 = MPI::Wtime();
+    data->subSolver->solveGlobal(data->tmp_vec_b, data->tmp_vec_b);
+
+    FetiTimings::fetiSolve01 += (MPI::Wtime() - wtime01);
+
+    MatMult(*(data->mat_lagrange), data->tmp_vec_b, data->tmp_vec_lagrange);
+
+    MatMult(data->subSolver->getMatCoarseInterior(), 
+	    data->tmp_vec_b, data->tmp_vec_primal);
+
+    wtime01 = MPI::Wtime();
+    KSPSolve(*(data->ksp_schur_primal), data->tmp_vec_primal, data->tmp_vec_primal);
+    FetiTimings::fetiSolve02 += (MPI::Wtime() - wtime01);
+
+    MatMult(data->subSolver->getMatInteriorCoarse(), 
+	    data->tmp_vec_primal, data->tmp_vec_b);
+
+    wtime01 = MPI::Wtime();
+    data->subSolver->solveGlobal(data->tmp_vec_b, data->tmp_vec_b);
+    FetiTimings::fetiSolve01 += (MPI::Wtime() - wtime01);
+
+    MatMult(*(data->mat_lagrange), data->tmp_vec_b, y);
+
+    VecAXPBY(y, 1.0, 1.0, data->tmp_vec_lagrange);
+
+    FetiTimings::fetiSolve += (MPI::Wtime() - wtime);
+
+    return 0;
+  }
+
+
+  // y = PC * x
+  PetscErrorCode petscApplyFetiDirichletPrecon(PC pc, Vec x, Vec y)
+  {
+    double wtime = MPI::Wtime();
+
+    // Get data for the preconditioner
+    void *ctx;
+    PCShellGetContext(pc, &ctx);
+    FetiDirichletPreconData* data = static_cast<FetiDirichletPreconData*>(ctx);
+
+    // Multiply with scaled Lagrange constraint matrix.
+    MatMultTranspose(*(data->mat_lagrange_scaled), x, data->tmp_vec_b);
+
+
+    // === Restriction of the B nodes to the boundary nodes. ===
+
+    int nLocalB;
+    int nLocalDuals;
+    VecGetLocalSize(data->tmp_vec_b, &nLocalB);
+    VecGetLocalSize(data->tmp_vec_duals0, &nLocalDuals);
+
+    PetscScalar *local_b, *local_duals;
+    VecGetArray(data->tmp_vec_b, &local_b);
+    VecGetArray(data->tmp_vec_duals0, &local_duals);
+
+    for (map<int, int>::iterator it = data->localToDualMap.begin();
+	 it != data->localToDualMap.end(); ++it)
+      local_duals[it->second] = local_b[it->first];
+
+    VecRestoreArray(data->tmp_vec_b, &local_b);
+    VecRestoreArray(data->tmp_vec_duals0, &local_duals);
+
+
+    // === K_DD - K_DI inv(K_II) K_ID ===
+
+    MatMult(*(data->mat_duals_duals), data->tmp_vec_duals0, data->tmp_vec_duals1);
+
+    MatMult(*(data->mat_interior_duals), data->tmp_vec_duals0, data->tmp_vec_interior);
+    KSPSolve(*(data->ksp_interior), data->tmp_vec_interior, data->tmp_vec_interior);
+    MatMult(*(data->mat_duals_interior), data->tmp_vec_interior, data->tmp_vec_duals0);
+
+    VecAXPBY(data->tmp_vec_duals0, 1.0, -1.0, data->tmp_vec_duals1);
+
+
+    // === Prolongation from local dual nodes to B nodes.
+
+    VecGetArray(data->tmp_vec_b, &local_b);
+    VecGetArray(data->tmp_vec_duals0, &local_duals);
+
+    for (map<int, int>::iterator it = data->localToDualMap.begin();
+	 it != data->localToDualMap.end(); ++it)
+      local_b[it->first] = local_duals[it->second];
+
+    VecRestoreArray(data->tmp_vec_b, &local_b);
+    VecRestoreArray(data->tmp_vec_duals0, &local_duals);
+
+
+    // Multiply with scaled Lagrange constraint matrix.
+    MatMult(*(data->mat_lagrange_scaled), data->tmp_vec_b, y);
+
+    FetiTimings::fetiPreconditioner += (MPI::Wtime() - wtime);
+
+    return 0;
+  }
+
+
+  // y = PC * x
+  PetscErrorCode petscApplyFetiLumpedPrecon(PC pc, Vec x, Vec y)
+  {
+    // Get data for the preconditioner
+    void *ctx;
+    PCShellGetContext(pc, &ctx);
+    FetiLumpedPreconData* data = static_cast<FetiLumpedPreconData*>(ctx);
+
+    // Multiply with scaled Lagrange constraint matrix.
+    MatMultTranspose(*(data->mat_lagrange_scaled), x, data->tmp_vec_b);
+
+
+    // === Restriction of the B nodes to the boundary nodes. ===
+
+    int nLocalB;
+    int nLocalDuals;
+    VecGetLocalSize(data->tmp_vec_b, &nLocalB);
+    VecGetLocalSize(data->tmp_vec_duals0, &nLocalDuals);
+
+    PetscScalar *local_b, *local_duals;
+    VecGetArray(data->tmp_vec_b, &local_b);
+    VecGetArray(data->tmp_vec_duals0, &local_duals);
+
+    for (map<int, int>::iterator it = data->localToDualMap.begin();
+	 it != data->localToDualMap.end(); ++it)
+      local_duals[it->second] = local_b[it->first];
+
+    VecRestoreArray(data->tmp_vec_b, &local_b);
+    VecRestoreArray(data->tmp_vec_duals0, &local_duals);
+
+
+    // === K_DD ===
+
+    MatMult(*(data->mat_duals_duals), data->tmp_vec_duals0, data->tmp_vec_duals1);
+
+
+    // === Prolongation from local dual nodes to B nodes.
+
+    VecGetArray(data->tmp_vec_b, &local_b);
+    VecGetArray(data->tmp_vec_duals1, &local_duals);
+
+    for (map<int, int>::iterator it = data->localToDualMap.begin();
+	 it != data->localToDualMap.end(); ++it)
+      local_b[it->first] = local_duals[it->second];
+
+    VecRestoreArray(data->tmp_vec_b, &local_b);
+    VecRestoreArray(data->tmp_vec_duals0, &local_duals);
+
+
+    // Multiply with scaled Lagrange constraint matrix.
+    MatMult(*(data->mat_lagrange_scaled), data->tmp_vec_b, y);
+
+    return 0;
+  }
+
+}
diff --git a/AMDiS/src/parallel/PetscSolverFetiOperators.h b/AMDiS/src/parallel/PetscSolverFetiOperators.h
new file mode 100644
index 00000000..69a391b6
--- /dev/null
+++ b/AMDiS/src/parallel/PetscSolverFetiOperators.h
@@ -0,0 +1,44 @@
+// ============================================================================
+// ==                                                                        ==
+// == AMDiS - Adaptive multidimensional simulations                          ==
+// ==                                                                        ==
+// ==  http://www.amdis-fem.org                                              ==
+// ==                                                                        ==
+// ============================================================================
+//
+// Software License for AMDiS
+//
+// Copyright (c) 2010 Dresden University of Technology 
+// All rights reserved.
+// Authors: Simon Vey, Thomas Witkowski et al.
+//
+// This file is part of AMDiS
+//
+// See also license.opensource.txt in the distribution.
+
+
+
+/** \file PetscSolverFeti.h */
+
+#ifndef AMDIS_PETSC_SOLVER_FETI_OPERATORS_H
+#define AMDIS_PETSC_SOLVER_FETI_OPERATORS_H
+
+#include <mpi.h>
+#include <petsc.h>
+
+namespace AMDiS {
+
+  /// 
+  int petscMultMatSchurPrimal(Mat mat, Vec x, Vec y);
+
+  /// FETI-DP operator
+  int petscMultMatFeti(Mat mat, Vec x, Vec y);
+
+  /// y = PC * x
+  PetscErrorCode petscApplyFetiDirichletPrecon(PC pc, Vec x, Vec y);
+
+  /// y = PC * x
+  PetscErrorCode petscApplyFetiLumpedPrecon(PC pc, Vec x, Vec y);
+}
+
+#endif
diff --git a/AMDiS/src/parallel/PetscSolverFetiStructs.h b/AMDiS/src/parallel/PetscSolverFetiStructs.h
index e65eb3a4..086b3c79 100644
--- a/AMDiS/src/parallel/PetscSolverFetiStructs.h
+++ b/AMDiS/src/parallel/PetscSolverFetiStructs.h
@@ -72,11 +72,22 @@ namespace AMDiS {
   };
 
 
+  /// Extends the FETI-DP data set with interface variables (thus is used when
+  /// Stokes mode is enabled).
+  struct FetiDataInterface : FetiData {
+    Mat mat_interior_interface, mat_interface_interior;
+
+    Mat mat_primal_interface, mat_interface_primal;
+  };
+
+
   struct FetiDirichletPreconData {
     /// Matrix of scaled Lagrange variables.
     Mat *mat_lagrange_scaled;
 
-    Mat *mat_interior_interior, *mat_duals_duals, *mat_interior_duals, *mat_duals_interior;
+    Mat *mat_interior_interior, *mat_duals_duals;
+
+    Mat *mat_interior_duals, *mat_duals_interior;
 
     /// Pointer to the solver for \ref PetscSolverFeti::mat_bb.
     KSP *ksp_interior;
diff --git a/AMDiS/src/parallel/PetscSolverFetiTimings.cc b/AMDiS/src/parallel/PetscSolverFetiTimings.cc
new file mode 100644
index 00000000..5e1496c0
--- /dev/null
+++ b/AMDiS/src/parallel/PetscSolverFetiTimings.cc
@@ -0,0 +1,31 @@
+//
+// Software License for AMDiS
+//
+// Copyright (c) 2010 Dresden University of Technology 
+// All rights reserved.
+// Authors: Simon Vey, Thomas Witkowski et al.
+//
+// This file is part of AMDiS
+//
+// See also license.opensource.txt in the distribution.
+
+
+#include "parallel/PetscSolverFetiTimings.h"
+
+namespace AMDiS {
+
+  double FetiTimings::fetiSolve = 0.0;
+  double FetiTimings::fetiSolve01 = 0.0;
+  double FetiTimings::fetiSolve02 = 0.0;
+  double FetiTimings::fetiPreconditioner = 0.0;
+
+
+  void FetiTimings::reset()
+  {
+    fetiSolve = 0.0;
+    fetiSolve01 = 0.0;
+    fetiSolve02 = 0.0;
+    fetiPreconditioner = 0.0;
+  }
+
+}
diff --git a/AMDiS/src/parallel/PetscSolverFetiTimings.h b/AMDiS/src/parallel/PetscSolverFetiTimings.h
new file mode 100644
index 00000000..8f1b339d
--- /dev/null
+++ b/AMDiS/src/parallel/PetscSolverFetiTimings.h
@@ -0,0 +1,46 @@
+// ============================================================================
+// ==                                                                        ==
+// == AMDiS - Adaptive multidimensional simulations                          ==
+// ==                                                                        ==
+// ==  http://www.amdis-fem.org                                              ==
+// ==                                                                        ==
+// ============================================================================
+//
+// Software License for AMDiS
+//
+// Copyright (c) 2010 Dresden University of Technology 
+// All rights reserved.
+// Authors: Simon Vey, Thomas Witkowski et al.
+//
+// This file is part of AMDiS
+//
+// See also license.opensource.txt in the distribution.
+
+
+
+/** \file PetscSolverFetiTimings.h */
+
+
+#ifndef AMDIS_PETSC_SOLVER_FETI_TIMINGS_H
+#define AMDIS_PETSC_SOLVER_FETI_TIMINGS_H
+
+namespace AMDiS {
+
+  class FetiTimings {
+  private:
+    FetiTimings() {}
+
+  public:
+    static void reset();
+
+  public:
+    static double fetiSolve;
+    static double fetiSolve01;
+    static double fetiSolve02;
+
+    static double fetiPreconditioner;
+  };
+
+}
+
+#endif
-- 
GitLab