diff --git a/AMDiS/libtool b/AMDiS/libtool
index db01bc59718f48797b60d36d16c9ae7d8fd0eab2..4f4afd67949bd025c4883ad68bf263c224fa9462 100755
--- a/AMDiS/libtool
+++ b/AMDiS/libtool
@@ -30,10 +30,10 @@
 # the same distribution terms that you use for the rest of that program.
 
 # A sed program that does not truncate output.
-SED="/bin/sed"
+SED="/usr/bin/sed"
 
 # Sed that helps us avoid accidentally triggering echo(1) options like -n.
-Xsed="/bin/sed -e 1s/^X//"
+Xsed="/usr/bin/sed -e 1s/^X//"
 
 # The HP-UX ksh and POSIX shell print the target directory to stdout
 # if CDPATH is set.
@@ -44,7 +44,7 @@ available_tags=" CXX F77"
 
 # ### BEGIN LIBTOOL CONFIG
 
-# Libtool was configured on host NWRW15:
+# Libtool was configured on host p2s103:
 
 # Shell to use when invoking shell scripts.
 SHELL="/bin/sh"
@@ -66,12 +66,12 @@ fast_install=yes
 
 # The host system.
 host_alias=
-host=i686-redhat-linux-gnu
+host=x86_64-unknown-linux-gnu
 host_os=linux-gnu
 
 # The build system.
 build_alias=
-build=i686-redhat-linux-gnu
+build=x86_64-unknown-linux-gnu
 build_os=linux-gnu
 
 # An echo program that does not interpret backslashes.
@@ -82,25 +82,25 @@ AR="ar"
 AR_FLAGS="cru"
 
 # A C compiler.
-LTCC="gcc"
+LTCC="/licsoft/libraries/openmpi/1.2.6/64bit/bin/mpicc"
 
 # LTCC compiler flags.
 LTCFLAGS="-g -O2"
 
 # A language-specific compiler.
-CC="gcc"
+CC="/licsoft/libraries/openmpi/1.2.6/64bit/bin/mpicc"
 
 # Is the compiler the GNU C compiler?
 with_gcc=yes
 
-gcc_dir=`gcc -print-file-name=. | /bin/sed 's,/\.$,,'`
+gcc_dir=`gcc -print-file-name=. | /usr/bin/sed 's,/\.$,,'`
 gcc_ver=`gcc -dumpversion`
 
 # An ERE matcher.
 EGREP="grep -E"
 
 # The linker used to build libraries.
-LD="/usr/bin/ld"
+LD="/usr/x86_64-suse-linux/bin/ld -m elf_x86_64"
 
 # Whether we need hard or soft links.
 LN_S="ln -s"
@@ -174,7 +174,7 @@ dlopen_self=unknown
 dlopen_self_static=unknown
 
 # Compiler flag to prevent dynamic linking.
-link_static_flag="-static"
+link_static_flag=""
 
 # Compiler flag to turn off builtin functions.
 no_builtin_flag=" -fno-builtin"
@@ -328,10 +328,10 @@ variables_saved_for_relink="PATH LD_LIBRARY_PATH LD_RUN_PATH GCC_EXEC_PREFIX COM
 link_all_deplibs=unknown
 
 # Compile-time system search path for libraries
-sys_lib_search_path_spec=`echo " /usr/lib/gcc/i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../../i386-redhat-linux/lib/i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../../i386-redhat-linux/lib/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../ /lib/i386-redhat-linux/4.1.2/ /lib/ /usr/lib/i386-redhat-linux/4.1.2/ /usr/lib/" | $SED -e "s@${gcc_dir}@\${gcc_dir}@g;s@${gcc_ver}@\${gcc_ver}@g"`
+sys_lib_search_path_spec=`echo "/lib64 /usr/lib64 /usr/local/lib64" | $SED -e "s@${gcc_dir}@\${gcc_dir}@g;s@${gcc_ver}@\${gcc_ver}@g"`
 
 # Run-time system search path for libraries
-sys_lib_dlsearch_path_spec="/lib /usr/lib /usr/lib/octave-2.9.9 /usr/lib/qt-3.3/lib "
+sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /usr/X11R6/lib64/Xaw3d /usr/X11R6/lib64 /usr/X11R6/lib/Xaw3d /usr/X11R6/lib /usr/x86_64-suse-linux/lib /usr/local/lib64 /usr/local/lib /opt/kde3/lib64 /opt/kde3/lib /opt/gnome/lib64 /opt/gnome/lib /lib64 /lib /usr/lib64 /usr/lib /opt/cluster/intel/cce/9.1.042/lib /opt/cluster/intel/fce/9.1.036/lib /opt/cluster/Pathscale3.0/lib/2.9.99 /opt/cluster/Pathscale3.0/lib/2.9.99/32 /work/licsoft/compilers/pgi/linux86-64/6.2/lib /work/licsoft/compilers/pgi/linux86-64/6.2/libso "
 
 # Fix the shell variable $srcfile for the compiler.
 fix_srcfile_path=""
@@ -7266,7 +7266,7 @@ disable_libs=static
 # End:
 # ### BEGIN LIBTOOL TAG CONFIG: CXX
 
-# Libtool was configured on host NWRW15:
+# Libtool was configured on host p2s103:
 
 # Shell to use when invoking shell scripts.
 SHELL="/bin/sh"
@@ -7288,12 +7288,12 @@ fast_install=yes
 
 # The host system.
 host_alias=
-host=i686-redhat-linux-gnu
+host=x86_64-unknown-linux-gnu
 host_os=linux-gnu
 
 # The build system.
 build_alias=
-build=i686-redhat-linux-gnu
+build=x86_64-unknown-linux-gnu
 build_os=linux-gnu
 
 # An echo program that does not interpret backslashes.
@@ -7304,25 +7304,25 @@ AR="ar"
 AR_FLAGS="cru"
 
 # A C compiler.
-LTCC="gcc"
+LTCC="/licsoft/libraries/openmpi/1.2.6/64bit/bin/mpicc"
 
 # LTCC compiler flags.
 LTCFLAGS="-g -O2"
 
 # A language-specific compiler.
-CC="g++"
+CC="/licsoft/libraries/openmpi/1.2.6/64bit/bin/mpiCC"
 
 # Is the compiler the GNU C compiler?
 with_gcc=yes
 
-gcc_dir=`gcc -print-file-name=. | /bin/sed 's,/\.$,,'`
+gcc_dir=`gcc -print-file-name=. | /usr/bin/sed 's,/\.$,,'`
 gcc_ver=`gcc -dumpversion`
 
 # An ERE matcher.
 EGREP="grep -E"
 
 # The linker used to build libraries.
-LD="/usr/bin/ld"
+LD="/usr/x86_64-suse-linux/bin/ld -m elf_x86_64"
 
 # Whether we need hard or soft links.
 LN_S="ln -s"
@@ -7396,7 +7396,7 @@ dlopen_self=unknown
 dlopen_self_static=unknown
 
 # Compiler flag to prevent dynamic linking.
-link_static_flag="-static"
+link_static_flag=""
 
 # Compiler flag to turn off builtin functions.
 no_builtin_flag=" -fno-builtin"
@@ -7451,11 +7451,11 @@ striplib="strip --strip-unneeded"
 
 # Dependencies to place before the objects being linked to create a
 # shared library.
-predep_objects=`echo "/usr/lib/gcc/i386-redhat-linux/4.1.2/../../../crti.o /usr/lib/gcc/i386-redhat-linux/4.1.2/crtbeginS.o" | $SED -e "s@${gcc_dir}@\${gcc_dir}@g;s@${gcc_ver}@\${gcc_ver}@g"`
+predep_objects=`echo "/usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../lib64/crti.o /usr/lib64/gcc/x86_64-suse-linux/4.1.2/crtbeginS.o" | $SED -e "s@${gcc_dir}@\${gcc_dir}@g;s@${gcc_ver}@\${gcc_ver}@g"`
 
 # Dependencies to place after the objects being linked to create a
 # shared library.
-postdep_objects=`echo "/usr/lib/gcc/i386-redhat-linux/4.1.2/crtendS.o /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../crtn.o" | $SED -e "s@${gcc_dir}@\${gcc_dir}@g;s@${gcc_ver}@\${gcc_ver}@g"`
+postdep_objects=`echo "/usr/lib64/gcc/x86_64-suse-linux/4.1.2/crtendS.o /usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../lib64/crtn.o" | $SED -e "s@${gcc_dir}@\${gcc_dir}@g;s@${gcc_ver}@\${gcc_ver}@g"`
 
 # Dependencies to place before the objects being linked to create a
 # shared library.
@@ -7463,11 +7463,11 @@ predeps=""
 
 # Dependencies to place after the objects being linked to create a
 # shared library.
-postdeps="-lstdc++ -lm -lgcc_s -lc -lgcc_s"
+postdeps="-lmpi_cxx -lmpi -lopen-rte -lopen-pal -libverbs -lrt -lnuma -ldl -lnsl -lutil -ldl -lstdc++ -lm -lgcc_s -lpthread -lc -lgcc_s"
 
 # The library search path used internally by the compiler when linking
 # a shared library.
-compiler_lib_search_path=`echo "-L/usr/lib/gcc/i386-redhat-linux/4.1.2 -L/usr/lib/gcc/i386-redhat-linux/4.1.2 -L/usr/lib/gcc/i386-redhat-linux/4.1.2/../../.." | $SED -e "s@${gcc_dir}@\${gcc_dir}@g;s@${gcc_ver}@\${gcc_ver}@g"`
+compiler_lib_search_path=`echo "-L/usr/lib64 -L/licsoft/libraries/openmpi/1.2.6/64bit/lib -L/usr/lib64/gcc/x86_64-suse-linux/4.1.2 -L/usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../lib64 -L/lib/../lib64 -L/usr/lib/../lib64 -L/usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../../../x86_64-suse-linux/lib -L/usr/lib64/gcc/x86_64-suse-linux/4.1.2/../../.." | $SED -e "s@${gcc_dir}@\${gcc_dir}@g;s@${gcc_ver}@\${gcc_ver}@g"`
 
 # Method to check whether dependent libraries are shared objects.
 deplibs_check_method="pass_all"
@@ -7547,10 +7547,10 @@ variables_saved_for_relink="PATH LD_LIBRARY_PATH LD_RUN_PATH GCC_EXEC_PREFIX COM
 link_all_deplibs=unknown
 
 # Compile-time system search path for libraries
-sys_lib_search_path_spec=`echo " /usr/lib/gcc/i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../../i386-redhat-linux/lib/i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../../i386-redhat-linux/lib/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../i386-redhat-linux/4.1.2/ /usr/lib/gcc/i386-redhat-linux/4.1.2/../../../ /lib/i386-redhat-linux/4.1.2/ /lib/ /usr/lib/i386-redhat-linux/4.1.2/ /usr/lib/" | $SED -e "s@${gcc_dir}@\${gcc_dir}@g;s@${gcc_ver}@\${gcc_ver}@g"`
+sys_lib_search_path_spec=`echo "/lib64 /usr/lib64 /usr/local/lib64" | $SED -e "s@${gcc_dir}@\${gcc_dir}@g;s@${gcc_ver}@\${gcc_ver}@g"`
 
 # Run-time system search path for libraries
-sys_lib_dlsearch_path_spec="/lib /usr/lib /usr/lib/octave-2.9.9 /usr/lib/qt-3.3/lib "
+sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /usr/X11R6/lib64/Xaw3d /usr/X11R6/lib64 /usr/X11R6/lib/Xaw3d /usr/X11R6/lib /usr/x86_64-suse-linux/lib /usr/local/lib64 /usr/local/lib /opt/kde3/lib64 /opt/kde3/lib /opt/gnome/lib64 /opt/gnome/lib /lib64 /lib /usr/lib64 /usr/lib /opt/cluster/intel/cce/9.1.042/lib /opt/cluster/intel/fce/9.1.036/lib /opt/cluster/Pathscale3.0/lib/2.9.99 /opt/cluster/Pathscale3.0/lib/2.9.99/32 /work/licsoft/compilers/pgi/linux86-64/6.2/lib /work/licsoft/compilers/pgi/linux86-64/6.2/libso "
 
 # Fix the shell variable $srcfile for the compiler.
 fix_srcfile_path=""
@@ -7574,7 +7574,7 @@ include_expsyms=""
 
 # ### BEGIN LIBTOOL TAG CONFIG: F77
 
-# Libtool was configured on host NWRW15:
+# Libtool was configured on host p2s103:
 
 # Shell to use when invoking shell scripts.
 SHELL="/bin/sh"
@@ -7596,12 +7596,12 @@ fast_install=yes
 
 # The host system.
 host_alias=
-host=i686-redhat-linux-gnu
+host=x86_64-unknown-linux-gnu
 host_os=linux-gnu
 
 # The build system.
 build_alias=
-build=i686-redhat-linux-gnu
+build=x86_64-unknown-linux-gnu
 build_os=linux-gnu
 
 # An echo program that does not interpret backslashes.
@@ -7612,7 +7612,7 @@ AR="ar"
 AR_FLAGS="cru"
 
 # A C compiler.
-LTCC="gcc"
+LTCC="/licsoft/libraries/openmpi/1.2.6/64bit/bin/mpicc"
 
 # LTCC compiler flags.
 LTCFLAGS="-g -O2"
@@ -7621,16 +7621,16 @@ LTCFLAGS="-g -O2"
 CC="g77"
 
 # Is the compiler the GNU C compiler?
-with_gcc=yes
+with_gcc=
 
-gcc_dir=`gcc -print-file-name=. | /bin/sed 's,/\.$,,'`
+gcc_dir=`gcc -print-file-name=. | /usr/bin/sed 's,/\.$,,'`
 gcc_ver=`gcc -dumpversion`
 
 # An ERE matcher.
 EGREP="grep -E"
 
 # The linker used to build libraries.
-LD="/usr/bin/ld"
+LD="/usr/x86_64-suse-linux/bin/ld -m elf_x86_64"
 
 # Whether we need hard or soft links.
 LN_S="ln -s"
@@ -7858,10 +7858,10 @@ variables_saved_for_relink="PATH LD_LIBRARY_PATH LD_RUN_PATH GCC_EXEC_PREFIX COM
 link_all_deplibs=unknown
 
 # Compile-time system search path for libraries
-sys_lib_search_path_spec=`echo " /usr/lib/gcc/i386-redhat-linux/3.4.6/ /usr/lib/gcc/i386-redhat-linux/3.4.6/ /usr/lib/gcc/i386-redhat-linux/3.4.6/../../../../i386-redhat-linux/lib/i386-redhat-linux/3.4.6/ /usr/lib/gcc/i386-redhat-linux/3.4.6/../../../../i386-redhat-linux/lib/ /usr/lib/gcc/i386-redhat-linux/3.4.6/../../../i386-redhat-linux/3.4.6/ /usr/lib/gcc/i386-redhat-linux/3.4.6/../../../ /lib/i386-redhat-linux/3.4.6/ /lib/ /usr/lib/i386-redhat-linux/3.4.6/ /usr/lib/" | $SED -e "s@${gcc_dir}@\${gcc_dir}@g;s@${gcc_ver}@\${gcc_ver}@g"`
+sys_lib_search_path_spec=`echo "/lib64 /usr/lib64 /usr/local/lib64" | $SED -e "s@${gcc_dir}@\${gcc_dir}@g;s@${gcc_ver}@\${gcc_ver}@g"`
 
 # Run-time system search path for libraries
-sys_lib_dlsearch_path_spec="/lib /usr/lib /usr/lib/octave-2.9.9 /usr/lib/qt-3.3/lib "
+sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /usr/X11R6/lib64/Xaw3d /usr/X11R6/lib64 /usr/X11R6/lib/Xaw3d /usr/X11R6/lib /usr/x86_64-suse-linux/lib /usr/local/lib64 /usr/local/lib /opt/kde3/lib64 /opt/kde3/lib /opt/gnome/lib64 /opt/gnome/lib /lib64 /lib /usr/lib64 /usr/lib /opt/cluster/intel/cce/9.1.042/lib /opt/cluster/intel/fce/9.1.036/lib /opt/cluster/Pathscale3.0/lib/2.9.99 /opt/cluster/Pathscale3.0/lib/2.9.99/32 /work/licsoft/compilers/pgi/linux86-64/6.2/lib /work/licsoft/compilers/pgi/linux86-64/6.2/libso "
 
 # Fix the shell variable $srcfile for the compiler.
 fix_srcfile_path=""
diff --git a/AMDiS/src/MeshStructure.cc b/AMDiS/src/MeshStructure.cc
index 5e9451f67f68252f8e6f387e98eec2fa749d27bd..eb432902ea489f5ec061d99003a23e613ab2a970 100644
--- a/AMDiS/src/MeshStructure.cc
+++ b/AMDiS/src/MeshStructure.cc
@@ -14,14 +14,14 @@ namespace AMDiS {
 
   void MeshStructure::insertElement(bool isLeaf) {
     // overflow? -> next index
-    if(pos_ >= unsignedLongSize_) {
+    if (pos_ >= unsignedLongSize_) {
       code_.push_back(currentCode_);
       pos_ = 0;
       currentCode_ = 0;
     }
 
     // insert element in binary code
-    if(!isLeaf) {
+    if (!isLeaf) {
       unsigned long int one = 1;
       currentCode_ += (one << pos_);
     } 
@@ -44,7 +44,7 @@ namespace AMDiS {
     clear();
     TraverseStack stack;
     ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER);
-    while(elInfo) {
+    while (elInfo) {
       insertElement(elInfo->getElement()->isLeaf());
       elInfo = stack.traverseNext(elInfo);
     }
@@ -84,7 +84,7 @@ namespace AMDiS {
 
   bool MeshStructure::skipBranch(MeshStructure *insert)
   {
-    if(isLeafElement()) {
+    if (isLeafElement()) {
       return nextElement(insert);
     } else {
       bool cont = nextElement(insert);
@@ -104,13 +104,13 @@ namespace AMDiS {
     structure2->reset();
 
     bool cont = true;
-    while(cont) {
+    while (cont) {
       bool cont1, cont2;
-      if(structure1->isLeafElement() == structure2->isLeafElement()) {
+      if (structure1->isLeafElement() == structure2->isLeafElement()) {
 	cont1 = structure1->nextElement(result);
 	cont2 = structure2->nextElement();
       } else {
-	if(structure1->isLeafElement()) {
+	if (structure1->isLeafElement()) {
 	  cont1 = structure1->nextElement();
 	  cont2 = structure2->skipBranch(result);
 	} else {
@@ -127,7 +127,8 @@ namespace AMDiS {
 
   void MeshStructure::fitMeshToStructure(Mesh *mesh,
 					 RefinementManager *manager,
-					 bool checkPartition) 
+					 bool checkPartition,
+					 bool debugMode) 
   {
     FUNCNAME("MeshStructure::fitMeshToStructure()");
 
@@ -140,32 +141,32 @@ namespace AMDiS {
     // decorate leaf data
     reset();
     elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER);
-    while(elInfo) {
+    while (elInfo) {
       TEST_EXIT_DBG(cont)("unexpected structure code end!\n");
 
       Element *element = elInfo->getElement();
 
-      if(isLeafElement()) {
+      if (isLeafElement()) {
 	TEST_EXIT_DBG(element->isLeaf())("mesh finer than code\n");
       };
 
-      if(element->isLeaf() && !isLeafElement()) {
+      if (element->isLeaf() && !isLeafElement()) {
 	MeshStructure *structure = NEW MeshStructure();
 	cont = skipBranch(structure);
 	structure->commit();
 
 	bool decorate = true;
-	if(checkPartition) {
+	if (checkPartition) {
 	  PartitionElementData *partitionData = dynamic_cast<PartitionElementData*>
 	    (element->getElementData(PARTITION_ED));
 	  TEST_EXIT_DBG(partitionData)("no partition element data\n");
 	  PartitionStatus status = partitionData->getPartitionStatus();
-	  if(status == OUT || status == UNDEFINED) {
+	  if ((debugMode == false) && (status == OUT || status == UNDEFINED)) {
 	    decorate = false;
 	  }
 	}
 
-	if(decorate) {
+	if (decorate) {
 	  MeshStructure_ED *elData = NEW MeshStructure_ED(element->getElementData());
 	  elData->setStructure(structure);
 	  element->setElementData(elData);
@@ -185,9 +186,9 @@ namespace AMDiS {
     do {
       finished = true;
       elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
-      while(elInfo) {
+      while (elInfo) {
 	Element *element = elInfo->getElement();
-	if(element->getElementData(MESH_STRUCTURE) != NULL) {
+	if (element->getElementData(MESH_STRUCTURE) != NULL) {
 	  element->setMark(1);
 	  finished = false;
 	} else {
@@ -196,7 +197,7 @@ namespace AMDiS {
 	elInfo = stack.traverseNext(elInfo);
       }
       manager->refineMesh(mesh);
-    } while(!finished);
+    } while (!finished);
   }
 
 }
diff --git a/AMDiS/src/MeshStructure.h b/AMDiS/src/MeshStructure.h
index 4690391fe4bc51ad65f17f9e47a640c3179d7218..ca6fcb8e8d32fb2a452ff6bd6500e87a98d278a3 100644
--- a/AMDiS/src/MeshStructure.h
+++ b/AMDiS/src/MeshStructure.h
@@ -46,11 +46,12 @@ namespace AMDiS {
   
     void clear();
 
+    /** \brief
+     * Creates a mesh structure code from a Mesh object by traversing it in preorder.
+     */
     void init(Mesh *mesh);
 
-    void init(const std::vector<unsigned long int>& code,
-	      int numElements) 
-    {
+    void init(const std::vector<unsigned long int>& code, int numElements) {
       code_ = code;
       numElements_ = numElements;
       reset();
@@ -58,10 +59,8 @@ namespace AMDiS {
 
     void reset();
 
-    void insertElement(bool isLeaf);
-
     inline void commit() {
-      if(pos_ > 0) {
+      if (pos_ > 0) {
 	code_.push_back(currentCode_);
       }
       reset();
@@ -77,25 +76,37 @@ namespace AMDiS {
       return (currentCode_ & 1) == 0;
     };
 
+    /** \brief
+     * Merges a mesh structure code with its own mesh structure code. The
+     * result overwrites the own mesh structure code.
+     */
     void merge(MeshStructure *structure) {
       MeshStructure temp(*this);
       merge(&temp, structure, this);
     };
 
-    static void merge(MeshStructure *structure1,
-		      MeshStructure *structure2,
-		      MeshStructure *result);
-
+    /** \brief
+     * Fits a given mesh to the mesh structure code. 
+     *
+     * \param debugMode     In debugMode, the whole mesh is fitted to the mesh
+     *      structure code. Otherwise, the mesh is fitted only on the partition
+     *      of the current process.
+     */
     void fitMeshToStructure(Mesh *mesh,
 			    RefinementManager *manager,
-			    bool checkPartition = false);
+			    bool checkPartition = false,
+			    bool debugMode = false);
 
+    /** \brief
+     * Prints the mesh structure code.
+     */
     void print() {
       FUNCNAME("MeshStructure::print()");
+
       reset();
       bool cont = true;
-      while(cont) {
-	if(isLeafElement()) {
+      while (cont) {
+	if (isLeafElement()) {
 	  MSG("0");
 	} else {
 	  MSG("1");
@@ -105,6 +116,9 @@ namespace AMDiS {
       MSG("\n");
     };
 
+    /** \brief
+     * Returns the mesh structure code.
+     */
     inline const std::vector<unsigned long int>& getCode() {
       return code_;
     };
@@ -117,6 +131,20 @@ namespace AMDiS {
       return currentElement_;
     };
 
+  protected:
+    /** \brief
+     * Insert a new element to the structure code. Is used by the init 
+     * function.
+     */
+    void insertElement(bool isLeaf);
+
+    /** \brief
+     * Merges two mesh structure codes to one structure code.     
+     */
+    void merge(MeshStructure *structure1,
+	       MeshStructure *structure2,
+	       MeshStructure *result);
+
   protected:
     std::vector<unsigned long int> code_;
 
diff --git a/AMDiS/src/ParallelProblem.cc b/AMDiS/src/ParallelProblem.cc
index ae098f789797c3e73e42ee477901688cddcbe106..b5b7f46bb091ac4562fac2e20370d4aa020eee13 100644
--- a/AMDiS/src/ParallelProblem.cc
+++ b/AMDiS/src/ParallelProblem.cc
@@ -30,15 +30,26 @@ namespace AMDiS {
 
   bool elementInPartition(ElInfo *elInfo)
   {
-      PartitionElementData *elementData = dynamic_cast<PartitionElementData*>
-	(elInfo->getElement()->getElementData(PARTITION_ED));
-      if(elementData && elementData->getPartitionStatus() == IN) {
-	return true;
-      } else {
-	return false;
-      }
+    PartitionElementData *elementData = dynamic_cast<PartitionElementData*>
+      (elInfo->getElement()->getElementData(PARTITION_ED));
+    if (elementData && elementData->getPartitionStatus() == IN) {
+      return true;
+    } else {
+      return false;
+    }
   }
 
+  bool elementInPartitionDbg(ElInfo *elInfo) 
+  {
+    // In debug mode, the first partition has to write the whole domain.
+    if (MPI::COMM_WORLD.Get_rank() == 0) {
+      return true;
+    }
+
+    return elementInPartition(elInfo);
+  }
+      
+
   class MyDualTraverse : public DualTraverse
   {
   public:
@@ -72,8 +83,8 @@ namespace AMDiS {
     : iterationIF_(iterationIF),
       timeIF_(timeIF)
   {
-    mpiRank_ = MPI::COMM_WORLD.Get_rank();
-    mpiSize_ = MPI::COMM_WORLD.Get_size();
+    mpiRank = MPI::COMM_WORLD.Get_rank();
+    mpiSize = MPI::COMM_WORLD.Get_size();
   }
 
   // =========================================================================
@@ -89,9 +100,9 @@ namespace AMDiS {
 				   CoarseningManager *cm)
     : ParallelProblemBase(iterationIF, timeIF),
       name_(name),
-      mesh_(mesh),
-      refinementManager_(rm),
-      coarseningManager_(cm),
+      mesh(mesh),
+      refinementManager(rm),
+      coarseningManager(cm),
       repartitionSteps_(1),
       puEveryTimestep_(false),
       dofVectors_(vectors),
@@ -103,7 +114,8 @@ namespace AMDiS {
       adaptiveThresholds_(0),
       thresholdIncFactor_(2.0),
       thresholdDecFactor_(0.5),
-      repartTimeFactor_(10.0)
+      repartTimeFactor_(10.0),
+      debugMode(0)
   {
     GET_PARAMETER(0, name_ + "->upper part threshold", "%f", 
 		  &upperPartThreshold_);
@@ -120,7 +132,7 @@ namespace AMDiS {
     TEST_EXIT(localCoarseGridLevel_ >= globalCoarseGridLevel_)
       ("local coarse grid level < global coarse grid level\n");
 
-    partitioner_ = NEW ParMetisPartitioner(mesh_);
+    partitioner_ = NEW ParMetisPartitioner(mesh);
 
     GET_PARAMETER(0, name_ + "->adaptive thresholds", "%d", 
 		  &adaptiveThresholds_);
@@ -141,56 +153,54 @@ namespace AMDiS {
     }
     minUpperTH_ = upperPartThreshold_;
     maxLowerTH_ = lowerPartThreshold_;
+
+    GET_PARAMETER(0, name_ + "->debug mode", "%d", &debugMode);
+
+    if (debugMode) {
+      dbgMesh = NEW Mesh(mesh->getName(), mesh->getDim());
+    }
   }
 
   ParallelProblem::~ParallelProblem() 
   {
     DELETE partitioner_;
+
+    if (debugMode) {
+      DELETE dbgMesh;
+    }
   }
 
   bool ParallelProblem::doPartitioning(AdaptInfo *adaptInfo, double localWeightSum) 
   {
     FUNCNAME("ParallelProblem::doPartitioning()");
 
-    double *weightSum = GET_MEMORY(double, mpiSize_);
-    int *partArray = GET_MEMORY(int, mpiSize_);
-    int part;
-
-    //weightSum[mpiRank_] = localWeightSum;
+    double *weightSum = GET_MEMORY(double, mpiSize);
+    int *partArray = GET_MEMORY(int, mpiSize);
+    int part = 0;
 
     MPI::COMM_WORLD.Gather(&localWeightSum, 1, MPI_DOUBLE,
 			   weightSum, 1, MPI_DOUBLE, 0);
 
-    if(mpiRank_ == 0) {
+    if (mpiRank == 0) {
 
       double average = 0.0;
-      int i;
-      for(i = 0; i < mpiSize_; i++) {
+      for (int i = 0; i < mpiSize; i++) {
 	average += weightSum[i];
       }
 
-      average /= mpiSize_;
-
-      //MSG("average weight: %f\n", average);
+      average /= mpiSize;
 
-      part = 0;
-      for(i = 0; i < mpiSize_; i++) {
-	//MSG("weight sum %d: %f\n", i, weightSum[i]);
-
-	if((weightSum[i] / average) > upperPartThreshold_) {
+      for (int i = 0; i < mpiSize; i++) {
+	if ((weightSum[i] / average) > upperPartThreshold_) {
 	  part = 1;
 	  break;
 	}
-	if((weightSum[i] / average) < lowerPartThreshold_) {
+	if ((weightSum[i] / average) < lowerPartThreshold_) {
 	  part = 1;
 	  break;
 	}
       }
 
-      //MSG("upper threshold initial: %f\n", upperPartThreshold_);
-      //MSG("lower threshold initial: %f\n", lowerPartThreshold_);
-      //MSG("part initial: %d\n", part);
-
       double computationTime = TIME_USED(computationStart, clock());
       if (adaptiveThresholds_) {
 
@@ -215,36 +225,22 @@ namespace AMDiS {
 	}
       }
 
-      //MSG("comp time: %f\n", computationTime);
-      //MSG("part time: %f\n", partitioningTime);
-      //MSG("time quotient: %f\n", computationTime/partitioningTime);
-      //MSG("upper threshold final: %f\n", upperPartThreshold_);
-      //MSG("lower threshold final: %f\n", lowerPartThreshold_);
-      //MSG("part final: %d\n", part);
-
-      for(i = 0; i < mpiSize_; i++) {
+      for (int i = 0; i < mpiSize; i++) {
 	partArray[i] = part;
       }      
     }
 
     MPI::COMM_WORLD.Scatter(partArray, 1, MPI_INT,
 			    &part, 1, MPI_INT, 0);
-
     
-    //MSG("rank %d: part: %d\n", mpiRank_, part);
-
-
-    FREE_MEMORY(weightSum, double, mpiSize_);
-    FREE_MEMORY(partArray, int, mpiSize_);
+    FREE_MEMORY(weightSum, double, mpiSize);
+    FREE_MEMORY(partArray, int, mpiSize);
 
     return (part == 1);
   }
 
   bool ParallelProblem::doBuildGlobalSolution(AdaptInfo *adaptInfo) {
     return true;
-//       (puEveryTimestep_ || !timeIF_ ||
-// 	    (adaptInfo->getTimestepNumber() % repartitionSteps_ == 0) ||
-// 	    adaptInfo->getTime() >= adaptInfo->getEndTime());
   }
 
   void ParallelProblem::partitionMesh(AdaptInfo *adaptInfo)
@@ -264,7 +260,7 @@ namespace AMDiS {
 
   void ParallelProblem::refineOverlap(AdaptInfo *adaptInfo)
   {
-    int i, dim = mesh_->getDim();
+    int i, dim = mesh->getDim();
 
     bool finished = (localCoarseGridLevel_ == 0);
 
@@ -274,7 +270,7 @@ namespace AMDiS {
 
       // mark in/out/border dofs
       TraverseStack stack;
-      ElInfo *elInfo = stack.traverseFirst(mesh_, -1, Mesh::CALL_LEAF_EL);
+      ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
       while(elInfo) {
 	Element *element = elInfo->getElement();
 	PartitionElementData *partitionData = 
@@ -302,7 +298,7 @@ namespace AMDiS {
       // refine overlap-border and inner elements
       finished = true;
       bool marked = false;
-      elInfo = stack.traverseFirst(mesh_, -1, Mesh::CALL_LEAF_EL);
+      elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
       while(elInfo) {
 	Element *element = elInfo->getElement();
 	PartitionElementData *partitionData = 
@@ -331,14 +327,14 @@ namespace AMDiS {
 
 	elInfo = stack.traverseNext(elInfo);
       }
-      if(marked) refinementManager_->refineMesh(mesh_);
+      if(marked) refinementManager->refineMesh(mesh);
     }
   }
 
   void ParallelProblem::globalRefineOutOfPartition(AdaptInfo *adaptInfo)
   {
     TraverseStack stack;
-    ElInfo *elInfo = stack.traverseFirst(mesh_, -1, Mesh::CALL_LEAF_EL);
+    ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
     while(elInfo) {
       PartitionElementData *partitionData = 
 	dynamic_cast<PartitionElementData*>(elInfo->getElement()->getElementData(PARTITION_ED));
@@ -347,7 +343,7 @@ namespace AMDiS {
       elInfo = stack.traverseNext(elInfo);
     }
 
-    refinementManager_->refineMesh(mesh_);
+    refinementManager->refineMesh(mesh);
   }
 
   void ParallelProblem::coarsenOutOfPartition(AdaptInfo *adaptInfo)
@@ -355,7 +351,7 @@ namespace AMDiS {
     Flag meshCoarsened = 1;    
     while(meshCoarsened.getFlags() != 0) {
       TraverseStack stack;
-      ElInfo *elInfo = stack.traverseFirst(mesh_, -1, Mesh::CALL_LEAF_EL);
+      ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
       while(elInfo) {
 	Element *element = elInfo->getElement();
 	PartitionElementData *partitionData = 
@@ -368,64 +364,63 @@ namespace AMDiS {
 	}
 	elInfo = stack.traverseNext(elInfo);
       }
-      meshCoarsened = coarseningManager_->coarsenMesh(mesh_);
+      meshCoarsened = coarseningManager->coarsenMesh(mesh);
     }
     MPI::COMM_WORLD.Barrier();
   }
 
   void ParallelProblem::exchangeMeshStructureCodes(MeshStructure *structures)
   {
-    structures[mpiRank_].init(mesh_);
-    const std::vector<unsigned long int>& myCode = structures[mpiRank_].getCode();
-    int rank;
-
+    // every process creates a mesh structure code from its mesh.
+    structures[mpiRank].init(mesh);
+    const std::vector<unsigned long int>& myCode = structures[mpiRank].getCode();
 
     // broadcast code sizes
-    int *codeSize = GET_MEMORY(int, mpiSize_);
+    int *codeSize = GET_MEMORY(int, mpiSize);
     int tmp = static_cast<int>(myCode.size());
 
     MPI::COMM_WORLD.Allgather(&tmp, 1, MPI_INT,
 			      codeSize, 1, MPI_INT);
     
     // broadcast number of elements
-    int *elements = GET_MEMORY(int, mpiSize_);
-    tmp = structures[mpiRank_].getNumElements();
+    int *elements = GET_MEMORY(int, mpiSize);
+    tmp = structures[mpiRank].getNumElements();
     MPI::COMM_WORLD.Allgather(&tmp, 1, MPI_INT,
 			      elements, 1, MPI_INT);
 
     // broadcast codes
-    int *codeOffset = GET_MEMORY(int, mpiSize_);
+    int *codeOffset = GET_MEMORY(int, mpiSize);
     int codeSizeSum = 0;
-    for(rank = 0; rank < mpiSize_; rank++) {
+    for (int rank = 0; rank < mpiSize; rank++) {
       codeOffset[rank] = codeSizeSum;
       codeSizeSum += codeSize[rank];
     }
 
     unsigned long int *code = GET_MEMORY(unsigned long int, codeSizeSum);
-    unsigned long int *localCode = GET_MEMORY(unsigned long int, codeSize[mpiRank_]);
+    unsigned long int *localCode = GET_MEMORY(unsigned long int, codeSize[mpiRank]);
    
     unsigned long int *ptr;
     std::vector<unsigned long int>::const_iterator it, end = myCode.end();
   
-    for(ptr = localCode, it = myCode.begin();
-	it != end; 
-	++it, ++ptr) 
+    for (ptr = localCode, it = myCode.begin();
+	 it != end; 
+	 ++it, ++ptr) 
     {
       *ptr = *it;
     }
   
-    MPI::COMM_WORLD.Allgatherv(localCode, codeSize[mpiRank_], 
+    MPI::COMM_WORLD.Allgatherv(localCode, codeSize[mpiRank], 
 			       MPI_UNSIGNED_LONG, 
 			       code, codeSize, codeOffset,
 			       MPI_UNSIGNED_LONG);
     
-    for(rank = 0; rank < mpiSize_; rank++) {
-      if(rank != mpiRank_) {
+    for (int rank = 0; rank < mpiSize; rank++) {
+      if (rank != mpiRank) {
 	std::vector<unsigned long int> remoteCode;
 	unsigned long int *ptr;
 	unsigned long int *begin = code + codeOffset[rank]; 
 	unsigned long int *end = begin + codeSize[rank];
-	for(ptr = begin; ptr != end; ++ptr) {
+	for (ptr = begin; ptr != end; ++ptr) {
 	  remoteCode.push_back(*ptr);
 	}
 	structures[rank].init(remoteCode, elements[rank]);
@@ -433,11 +428,11 @@ namespace AMDiS {
     }
 
     // free memory
-    FREE_MEMORY(elements, int, mpiSize_);
+    FREE_MEMORY(elements, int, mpiSize);
     FREE_MEMORY(code, unsigned long int, codeSizeSum);
-    FREE_MEMORY(localCode, unsigned long int, codeSize[mpiRank_]);
-    FREE_MEMORY(codeOffset, int, mpiSize_);
-    FREE_MEMORY(codeSize, int, mpiSize_);
+    FREE_MEMORY(localCode, unsigned long int, codeSize[mpiRank]);
+    FREE_MEMORY(codeOffset, int, mpiSize);
+    FREE_MEMORY(codeSize, int, mpiSize);
     
   }
 
@@ -445,41 +440,34 @@ namespace AMDiS {
   {
     FUNCNAME("ParallelProblem::synchronizeMeshes()");
 
-    MeshStructure *structures = NEW MeshStructure[mpiSize_];
+    MeshStructure *structures = NEW MeshStructure[mpiSize];
 
-    // === build composite mesh structure ===
+    // build composite mesh structure
     exchangeMeshStructureCodes(structures);
 
     // merge codes
-    for (int rank = 0; rank < mpiSize_; rank++) {
-      if (rank != mpiRank_) {
-	structures[mpiRank_].merge(&structures[rank]);
+    for (int rank = 0; rank < mpiSize; rank++) {
+      if (rank != mpiRank) {
+	structures[mpiRank].merge(&structures[rank]);
       }
     }
-
-    // === build finest mesh ===
-    structures[mpiRank_].fitMeshToStructure(mesh_,
-					    refinementManager_,
-					    true);
-
-    DELETE [] structures;
-
-#if 0
-    // === count partition elements (only for debug) ===
-    int partitionElements = 0;
-    TraverseStack stack;
-    ElInfo *elInfo = stack.traverseFirst(mesh_, -1, Mesh::CALL_LEAF_EL);
-    while(elInfo) {
-      Element *element = elInfo->getElement();
-      PartitionElementData *partitionData = dynamic_cast<PartitionElementData*>
-	(element->getElementData(PARTITION_ED));
-      if(partitionData && partitionData->getPartitionStatus() == IN) {
-	partitionElements++;
+  
+    // build finest mesh on the rank partition
+    structures[mpiRank].fitMeshToStructure(mesh,
+					   refinementManager,
+					   true);
+
+    // In debug mode, process 0 builds the global solution mesh.
+    if (debugMode) {
+      dbgMesh = mesh;
+      if (mpiRank == 0) {
+	structures[mpiRank].fitMeshToStructure(dbgMesh,
+					       refinementManager,
+					       true, true);
       }
-      elInfo = stack.traverseNext(elInfo);
     }
-    MSG("rank %d partition elements: %d\n", mpiRank_, partitionElements);
-#endif
+
+    DELETE [] structures;
   }
 
 
@@ -488,15 +476,16 @@ namespace AMDiS {
     Element *element = elInfo->getElement();
     PartitionElementData *partitionData = dynamic_cast<PartitionElementData*>
       (element->getElementData(PARTITION_ED));
-    TEST_EXIT(partitionData)("no partition data\n");
+    TEST_EXIT_DBG(partitionData)("no partition data\n");
     PartitionStatus status = partitionData->getPartitionStatus();
-    if(status == IN) 
+    if (status == IN) 
       return true;
     else
       return false;
   }
 
   void ParallelProblem::exchangeRankSolutions(AdaptInfo *adaptInfo,
+					      Mesh *workMesh,
 					      std::vector<DOFVector<double>*> rankSolutions)
   {
     FUNCNAME("ParallelProblem::exchangeRankSolutions()");
@@ -509,28 +498,22 @@ namespace AMDiS {
 
 
     const FiniteElemSpace *feSpace = rankSolutions[0]->getFESpace();
-    TEST_EXIT(feSpace->getMesh() == mesh_)("invalid mesh\n");
-    int dim = mesh_->getDim();
+    int dim = workMesh->getDim();
     const BasisFunction *basFcts = feSpace->getBasisFcts();
     int numFcts = basFcts->getNumber();
     DegreeOfFreedom *coarseDOFs = GET_MEMORY(DegreeOfFreedom, numFcts);
     DegreeOfFreedom *fineDOFs = GET_MEMORY(DegreeOfFreedom, numFcts);
     DOFAdmin *admin =  feSpace->getAdmin();
-    int partition;
-
-    std::vector<std::vector<DegreeOfFreedom> > sendOrder;
-    std::vector<std::vector<DegreeOfFreedom> > recvOrder;
-    sendOrder.resize(mpiSize_);
-    recvOrder.resize(mpiSize_);
 
-    std::set<int>::iterator setIt, setBegin, setEnd;
+    std::vector<std::vector<DegreeOfFreedom> > sendOrder(mpiSize);
+    std::vector<std::vector<DegreeOfFreedom> > recvOrder(mpiSize);
 
     elementPartitions_.clear();
 
     int elementPartition = -1;
     Element *coarseElement = NULL;
     TraverseStack stack;
-    ElInfo *elInfo = stack.traverseFirst(mesh_, -1, Mesh::CALL_EVERY_EL_PREORDER);
+    ElInfo *elInfo = stack.traverseFirst(workMesh, -1, Mesh::CALL_EVERY_EL_PREORDER);
     while (elInfo) {
       Element *element = elInfo->getElement();
 
@@ -546,16 +529,14 @@ namespace AMDiS {
 
  	if (status != OUT) {
 	  if (partitionData->getLevel() == localCoarseGridLevel_) {
-	    basFcts->getLocalIndices(element,
-				     admin,
-				     coarseDOFs);
+	    basFcts->getLocalIndices(element, admin, coarseDOFs);
 
 	    // collect other partitions element belongs to
 	    for (int i = 0; i < dim + 1; i++) {
-	      setBegin = vertexPartitions_[coarseDOFs[i]].begin();
-	      setEnd = vertexPartitions_[coarseDOFs[i]].end();
-	      for (setIt = setBegin; setIt != setEnd; ++setIt) {
-		elementPartitions_[element].insert(*setIt/* - 1*/);
+	      std::set<int>::iterator setBegin = vertexPartitions_[coarseDOFs[i]].begin();
+	      std::set<int>::iterator setEnd = vertexPartitions_[coarseDOFs[i]].end();
+	      for (std::set<int>::iterator setIt = setBegin; setIt != setEnd; ++setIt) {
+		elementPartitions_[element].insert(*setIt);
 	      }	
 	    }
 
@@ -564,9 +545,7 @@ namespace AMDiS {
 
 
 	  if (element->isLeaf()) {
-	    basFcts->getLocalIndices(element,
-				     admin,
-				     fineDOFs);
+	    basFcts->getLocalIndices(element, admin, fineDOFs);
 
 	    for (int i = 0; i < numFcts; i++) {
 	      if (status == OVERLAP) {
@@ -575,11 +554,11 @@ namespace AMDiS {
 	      } 
 	      if (status == IN) {
 		// recv dofs
-		TEST_EXIT(elementPartition == mpiRank_)("???\n");
-		setBegin = elementPartitions_[coarseElement].begin();
-		setEnd = elementPartitions_[coarseElement].end();
-		for (setIt = setBegin; setIt != setEnd; ++setIt) {
-		  if (*setIt != mpiRank_) {
+		TEST_EXIT(elementPartition == mpiRank)("???\n");
+		std::set<int>::iterator setBegin = elementPartitions_[coarseElement].begin();
+		std::set<int>::iterator setEnd = elementPartitions_[coarseElement].end();
+		for (std::set<int>::iterator setIt = setBegin; setIt != setEnd; ++setIt) {
+		  if (*setIt != mpiRank) {
  		    recvOrder[*setIt].push_back(fineDOFs[i]);
 		  }
 		}
@@ -592,94 +571,16 @@ namespace AMDiS {
       elInfo = stack.traverseNext(elInfo);
     }
 
-
-#if 0
-    MyDualTraverse dualTraverse(localCoarseGridLevel_);//localCoarseGridLevel_);
-
-    ElInfo *elInfo1, *elInfo2;
-    ElInfo *large, *small;
-
-    bool cont = dualTraverse.traverseFirst(mesh_, mesh_,
-					   -1, -1,
-					   Mesh::CALL_EVERY_EL_PREORDER |
-					   Mesh::FILL_COORDS | 
-					   Mesh::FILL_DET,
-					   Mesh::CALL_LEAF_EL | 
-					   Mesh::FILL_COORDS,
-					   &elInfo1, &elInfo2,
-					   &small, &large);
-
-    while (cont) {
-      TEST_EXIT(elInfo1 == large && elInfo2 == small)
-	("error in dual traverse\n");
-
-      Element *element1 = elInfo1->getElement();
-      Element *element2 = elInfo2->getElement();
-    
-      // get partition status of element2
-      PartitionElementData *partitionData = dynamic_cast<PartitionElementData*>
-	(element2->getElementData(PARTITION_ED));
-      TEST_EXIT(partitionData)("no partition data\n");
-      PartitionStatus status = partitionData->getPartitionStatus();
-
-      if(status != OUT) {
-	basFcts->getLocalIndices(element1,
-				 admin,
-				 coarseDOFs);
-	basFcts->getLocalIndices(element2,
-				 admin,
-				 fineDOFs);
-
-	// collect other partitions element2 belongs to
-	for(i = 0; i < dim + 1; i++) {
-	  setBegin = vertexPartitions_[coarseDOFs[i]].begin();
-	  setEnd = vertexPartitions_[coarseDOFs[i]].end();
-	  for(setIt = setBegin; setIt != setEnd; ++setIt) {
-	    elementPartitions_[element1].insert(*setIt/* - 1*/);
-	  }	
-	}
-      
-	int elementPartition = partitionVec_[element1->getIndex()];
-
-	// collect send- and recv-dofs for every rank 
-	WorldVector<double> worldCoords;
-	for(i = 0; i < numFcts; i++) {
-	  elInfo2->coordToWorld(*(basFcts->getCoords(i)), &worldCoords);
-	  if (status == OVERLAP) {
-	    // send dofs
-	    //sortedSendDOFs[elementPartition][worldCoords] = fineDOFs[i];
-	    sendOrder[elementPartition].push_back(fineDOFs[i]);
-	  } 
-	  if (status == IN) {
-	    // recv dofs
-	    TEST_EXIT(elementPartition == mpiRank_)("???\n");
-	    setBegin = elementPartitions_[element1].begin();
-	    setEnd = elementPartitions_[element1].end();
-	    for(setIt = setBegin; setIt != setEnd; ++setIt) {
-	      //sortedRecvDOFs[*setIt][worldCoords] = fineDOFs[i];
-	      if(*setIt != mpiRank_) {
-		recvOrder[*setIt].push_back(fineDOFs[i]);
-	      }
-	    }
-	  }
-	}
-      }
-      
-      cont = dualTraverse.traverseNext(&elInfo1, &elInfo2,
-				       &small, &large);
-    }
-#endif
-
     // create send and recv buffers and fill send buffers
-    DOFVector<double> *solution = rankSolutions[mpiRank_];
+    DOFVector<double> *solution = rankSolutions[mpiRank];
 
     std::map<int, double*> sendBuffer;
     std::map<int, double*> recvBuffer;
     std::map<int, int> sendBufferSize;
     std::map<int, int> recvBufferSize;
 
-    for (partition = 0; partition < mpiSize_; partition++) {
-      if (partition != mpiRank_) {
+    for (int partition = 0; partition < mpiSize; partition++) {
+      if (partition != mpiRank) {
 	int sendSize = static_cast<int>(sendOrder[partition].size());
 	int recvSize = static_cast<int>(recvOrder[partition].size());
 
@@ -705,8 +606,8 @@ namespace AMDiS {
     }
 
     // non-blocking sends
-    for(partition = 0; partition < mpiSize_; partition++) {
-      if(partition != mpiRank_) {
+    for (int partition = 0; partition < mpiSize; partition++) {
+      if (partition != mpiRank) {
 	if(sendBufferSize[partition] > 0) {
 	  MPI::COMM_WORLD.Isend(sendBuffer[partition],
 				sendBufferSize[partition],
@@ -716,11 +617,11 @@ namespace AMDiS {
 	}
       }
     }    
-  
+   
     // blocking recieves
-    for(partition = 0; partition < mpiSize_; partition++) {
-      if(partition != mpiRank_) {
-	if(recvBufferSize[partition] > 0) {
+    for (int partition = 0; partition < mpiSize; partition++) {
+      if (partition != mpiRank) {
+	if (recvBufferSize[partition] > 0) {
 	  MPI::COMM_WORLD.Recv(recvBuffer[partition],
 			       recvBufferSize[partition],
 			       MPI_DOUBLE,
@@ -734,20 +635,19 @@ namespace AMDiS {
     MPI::COMM_WORLD.Barrier();
 
     // copy values into rank solutions
-    for (partition = 0; partition < mpiSize_; partition++) {
-      if (partition != mpiRank_) {
+    for (int partition = 0; partition < mpiSize; partition++) {
+      if (partition != mpiRank) {
 	std::vector<DegreeOfFreedom>::iterator dofIt = recvOrder[partition].begin();
-	for (i = 0; i < recvBufferSize[partition]; i++) {
-	  (*(rankSolutions[partition]))[*dofIt] = 
-	    recvBuffer[partition][i];
+	for (int i = 0; i < recvBufferSize[partition]; i++) {
+	  (*(rankSolutions[partition]))[*dofIt] = recvBuffer[partition][i];
 	  ++dofIt;
 	}
       }
     }    
-    
+
     // free send and recv buffers
-    for (partition = 0; partition < mpiSize_; partition++) {
-      if (partition != mpiRank_) {
+    for (int partition = 0; partition < mpiSize; partition++) {
+      if (partition != mpiRank) {
 	if (sendBufferSize[partition] > 0)
 	  FREE_MEMORY(sendBuffer[partition], 
 		      double,
@@ -758,150 +658,11 @@ namespace AMDiS {
 		      recvBufferSize[partition]);
       }
     }    
+
     FREE_MEMORY(coarseDOFs, DegreeOfFreedom, numFcts);
     FREE_MEMORY(fineDOFs, DegreeOfFreedom, numFcts);
   }
 
-#if 0
-  void ParallelProblem::exchangeElementMarkings(AdaptInfo *adaptInfo)
-  {
-    FUNCNAME("ParallelProblem::exchangeElementMarkings()");
-    int i;
-
-    partitioner_->fillLeafPartitionVec(&oldPartitionVec_, &oldPartitionVec_);
-    partitioner_->fillLeafPartitionVec(&partitionVec_, &partitionVec_);
-
-    //std::map<int, Element*> elementWithIndex;
-
-    // === get send and recieve orders ===
-    std::vector<std::vector<Element*> > recvOrder;
-    std::vector<std::vector<int> > markings;
-
-    markings.resize(mpiSize_);
-    recvOrder.resize(mpiSize_);
-
-    TraverseStack stack;
-    ElInfo *elInfo;
-
-    elInfo = stack.traverseFirst(mesh_, -1, Mesh::CALL_LEAF_EL);
-    while(elInfo) {
-      Element *element = elInfo->getElement();
-      int index = element->getIndex();
-      int oldPartition = oldPartitionVec_[index];
-      int newPartition = partitionVec_[index];
-      int mark = element->getMark();
-
-//       if(mark!=0)
-// 	MSG("rank %d: index %d  mark %d\n",
-// 	    mpiRank_, index,  mark);
-
-      if(oldPartition != newPartition) {
-	if(oldPartition == mpiRank_) {
-	  markings[newPartition].push_back(mark);
-// 	  MSG("rank %d: index %d   %d -> %d   mark %d\n",
-// 	      mpiRank_, index, oldPartition, newPartition, mark);
-	}
-	if(newPartition == mpiRank_) {
-	  recvOrder[oldPartition].push_back(element);
-// 	  MSG("rank %d: index %d   %d <- %d   mark %d\n",
-// 	      mpiRank_, index, oldPartition, newPartition, mark);
-	  //elementWithIndex[index] = element;
-	}
-      }
-      elInfo = stack.traverseNext(elInfo);
-    }
-
-    // === create send and recv buffers and fill send buffers ===
-    std::map<int, int*> sendBuffer;
-    std::map<int, int*> recvBuffer;
-    std::map<int, int> sendBufferSize;
-    std::map<int, int> recvBufferSize;
-
-    int partition;
-    for(partition = 0; partition < mpiSize_; partition++) {
-      if(partition != mpiRank_) {
-	int sendSize = static_cast<int>(markings[partition].size());
-	int recvSize = static_cast<int>(recvOrder[partition].size());
-      
-	sendBufferSize[partition] = sendSize;	
-	recvBufferSize[partition] = recvSize;
-	if(sendSize > 0) {
-	  sendBuffer[partition] = GET_MEMORY(int, sendSize);
-	  std::vector<int>::iterator it;
-	  it = markings[partition].begin();
-	  int *bufferIt, *bufferBegin, *bufferEnd;
-	  bufferBegin = sendBuffer[partition];
-	  bufferEnd = bufferBegin + sendSize;
-	  for(bufferIt = bufferBegin; 
-	      bufferIt < bufferEnd; 
-	      ++bufferIt, ++it) 
-	  {
-	    *bufferIt = *it;
-	  }
-	}
-	if(recvSize > 0)
-	  recvBuffer[partition] = GET_MEMORY(int, recvSize);
-      }
-    }
-
-    // === non-blocking sends ===
-    for(partition = 0; partition < mpiSize_; partition++) {
-      if(partition != mpiRank_) {
-	if(sendBufferSize[partition] > 0) {
-	  MPI::COMM_WORLD.Isend(sendBuffer[partition],
-				sendBufferSize[partition],
-				MPI_INT,
-				partition,
-				0);
-	}
-      }
-    }    
-    
-    // === blocking receives ===
-    for(partition = 0; partition < mpiSize_; partition++) {
-      if(partition != mpiRank_) {
-	if(recvBufferSize[partition] > 0) {
-	  MPI::COMM_WORLD.Recv(recvBuffer[partition],
-			       recvBufferSize[partition],
-			       MPI_INT,
-			       partition,
-			       0);
-	}
-      }
-    }    
-
-    // === wait for end of MPI communication ===
-    MPI::COMM_WORLD.Barrier();
-
-    // === copy received values into elements ===
-    for(partition = 0; partition < mpiSize_; partition++) {
-      if(partition != mpiRank_) {
-	std::vector<Element*>::iterator it = recvOrder[partition].begin();
-	for(i = 0; i < recvBufferSize[partition]; i++) {
-	  (*it)->setMark(recvBuffer[partition][i]);
-// 	  MSG(" *** rank %d, index %d from %d mark %d\n",
-// 	      mpiRank_, (*it)->getIndex(), i, recvBuffer[partition][i]);
-	  ++it;
-	}
-      }
-    }    
-
-    // === free send and receive buffers ===
-    for(partition = 0; partition < mpiSize_; partition++) {
-      if(partition != mpiRank_) {
-	if(sendBufferSize[partition] > 0)
-	  FREE_MEMORY(sendBuffer[partition], 
-		      int,
-		      sendBufferSize[partition]);
-	if(recvBufferSize[partition] > 0)
-	  FREE_MEMORY(recvBuffer[partition], 
-		      int,
-		      recvBufferSize[partition]);
-      }
-    }
-  }
-#endif
-
   void ParallelProblem::exchangeDOFVector(AdaptInfo *adaptInfo,
 					  DOFVector<double> *values)
   {
@@ -911,8 +672,8 @@ namespace AMDiS {
     // === get send and recieve orders ===
     std::vector<std::vector<DegreeOfFreedom> > sendOrder;
     std::vector<std::vector<DegreeOfFreedom> > recvOrder;
-    sendOrder.resize(mpiSize_);
-    recvOrder.resize(mpiSize_);
+    sendOrder.resize(mpiSize);
+    recvOrder.resize(mpiSize);
 
     int i;
     const FiniteElemSpace *feSpace = values->getFESpace();
@@ -936,13 +697,13 @@ namespace AMDiS {
 	// get dof indices
 	basFcts->getLocalIndices(element, admin, dofs);
 
-	if(oldPartition == mpiRank_) {
+	if(oldPartition == mpiRank) {
 	  for(i = 0; i < numFcts; i++) {
 	    // send element values to new partition
 	    sendOrder[newPartition].push_back(dofs[i]);
 	  }
 	}
-	if(newPartition == mpiRank_) {
+	if(newPartition == mpiRank) {
 	  for(i = 0; i < numFcts; i++) {
 	    // recv element values from old partition
 	    recvOrder[oldPartition].push_back(dofs[i]);
@@ -962,8 +723,8 @@ namespace AMDiS {
     std::map<int, int> recvBufferSize;
 
     int partition;
-    for(partition = 0; partition < mpiSize_; partition++) {
-      if(partition != mpiRank_) {
+    for(partition = 0; partition < mpiSize; partition++) {
+      if(partition != mpiRank) {
 	int sendSize = static_cast<int>(sendOrder[partition].size());
 	int recvSize = static_cast<int>(recvOrder[partition].size());
       
@@ -989,7 +750,7 @@ namespace AMDiS {
     }
 
 //     MSG("rank %d: send %d %d %d %d    recv %d %d %d %d\n", 
-// 	mpiRank_,
+// 	mpiRank,
 // 	sendBufferSize[0],
 // 	sendBufferSize[1],
 // 	sendBufferSize[2],
@@ -1000,8 +761,8 @@ namespace AMDiS {
 // 	recvBufferSize[3]);
 
     // === non-blocking sends ===
-    for(partition = 0; partition < mpiSize_; partition++) {
-      if(partition != mpiRank_) {
+    for(partition = 0; partition < mpiSize; partition++) {
+      if(partition != mpiRank) {
 	if(sendBufferSize[partition] > 0) {
 	  MPI::COMM_WORLD.Isend(sendBuffer[partition],
 				sendBufferSize[partition],
@@ -1013,8 +774,8 @@ namespace AMDiS {
     }    
     
     // === blocking receives ===
-    for(partition = 0; partition < mpiSize_; partition++) {
-      if(partition != mpiRank_) {
+    for(partition = 0; partition < mpiSize; partition++) {
+      if(partition != mpiRank) {
 	if(recvBufferSize[partition] > 0) {
 	  MPI::COMM_WORLD.Recv(recvBuffer[partition],
 			       recvBufferSize[partition],
@@ -1029,8 +790,8 @@ namespace AMDiS {
     MPI::COMM_WORLD.Barrier();
 
     // === copy received values into DOFVector ===
-    for(partition = 0; partition < mpiSize_; partition++) {
-      if(partition != mpiRank_) {
+    for(partition = 0; partition < mpiSize; partition++) {
+      if(partition != mpiRank) {
 	std::vector<DegreeOfFreedom>::iterator dofIt = recvOrder[partition].begin();
 	for(i = 0; i < recvBufferSize[partition]; i++) {
 	  (*values)[*dofIt] = recvBuffer[partition][i];
@@ -1040,8 +801,8 @@ namespace AMDiS {
     }    
 
     // === free send and receive buffers ===
-    for(partition = 0; partition < mpiSize_; partition++) {
-      if(partition != mpiRank_) {
+    for(partition = 0; partition < mpiSize; partition++) {
+      if(partition != mpiRank) {
 	if(sendBufferSize[partition] > 0)
 	  FREE_MEMORY(sendBuffer[partition], 
 		      double,
@@ -1061,7 +822,7 @@ namespace AMDiS {
     FUNCNAME("ParallelProblem::buildGlobalSolution()");
 
     const FiniteElemSpace *feSpace = globalSolution->getFESpace();
-    int i, dim = mesh_->getDim();
+    int dim = mesh->getDim();
     const BasisFunction *basFcts = feSpace->getBasisFcts();
     int numFcts = basFcts->getNumber();
     DegreeOfFreedom *coarseDOFs = GET_MEMORY(DegreeOfFreedom, numFcts);
@@ -1072,17 +833,6 @@ namespace AMDiS {
 
     MSG("Building global solution\n");
 
-    int j;
-
-//     DOFVector<double> wtest0(feSpace, "wtest0");
-//     wtest0.set(0.0);
-//     DOFVector<double> wtest1(feSpace, "wtest1");
-//     wtest1.set(0.0);
-//     DOFVector<double> wtest2(feSpace, "wtest2");
-//     wtest2.set(0.0);
-//     DOFVector<double> wtest3(feSpace, "wtest3");
-//     wtest3.set(0.0);
-
     // compute w[DOF][partition]->value
     std::map<DegreeOfFreedom, std::map<int, double> > w;
     std::map<DegreeOfFreedom, std::map<int, double> >::iterator wIt, wBegin, wEnd;
@@ -1099,75 +849,19 @@ namespace AMDiS {
     std::map<DegreeOfFreedom, bool> visited;
 
 
-#if 0    
-    TraverseStack stack;
-    ElInfo *elInfo = stack.traverseFirst(mesh_, -1, 
-					 Mesh::CALL_EVERY_EL_PREORDER |
-					 Mesh::FILL_COORDS);
-    while(elInfo) {
-      Element *element = elInfo->getElement();
-
-      PartitionElementData *partitionData = dynamic_cast<PartitionElementData*>
-	(element->getElementData(PARTITION_ED));
-
-      if(partitionData) {
-	PartitionStatus status = partitionData->getPartitionStatus();
-
-	if(status == IN) {
-	  if(partitionData->getLevel() == localCoarseGridLevel_) {
-	    basFcts->getLocalIndices(element, admin, coarseDOFs);
-	  }
-
-	  if(element->isLeaf()) {
-	    if(elementPartitions_[element].size() > 1) {
-	      // get fine dofs
-	      basFcts->getLocalIndices(element, admin, fineDOFs);
-      
-	      // for all fine DOFs
-	      for(i = 0; i < numFcts; i++) {
-		if(!visited[fineDOFs[i]]) {
-		  visited[fineDOFs[i]] = true;
-
-		  elInfo2->coordToWorld(*(basFcts->getCoords(i)), &worldCoord);
-		  elInfo1->worldToCoord(worldCoord, &baryCoord);
-
-		  // for all coarse vertex DOFs
-		  for(j = 0; j < dim + 1; j++) {
-		    partBegin = (*vertexPartitions)[coarseDOFs[j]].begin();
-		    partEnd = (*vertexPartitions)[coarseDOFs[j]].end();
-		    for(partIt = partBegin; partIt != partEnd; ++partIt) {
-		      int partition = *partIt/* - 1*/;
-		      double val = (*(linearFunctions->getPhi(j)))(baryCoord);
-		      w[fineDOFs[i]][partition] += val;
-		      sumW[fineDOFs[i]] += val;
-		    }
-		  }
-		}
-	      }
-	    }
-	  }
-	}
-      }
-      
-      elInfo = stack.traverseNext(elInfo);
-    }
-#endif
-
-    MyDualTraverse dualTraverse(localCoarseGridLevel_);//localCoarseGridLevel_);
+    MyDualTraverse dualTraverse(localCoarseGridLevel_);
     ElInfo *elInfo1, *elInfo2;
     ElInfo *large, *small;
 
-    bool cont;
-
-    cont = dualTraverse.traverseFirst(mesh_, mesh_,
-				      -1, -1,
-				      Mesh::CALL_EVERY_EL_PREORDER |
-				      Mesh::FILL_COORDS | 
-				      Mesh::FILL_DET,
-				      Mesh::CALL_LEAF_EL | 
-				      Mesh::FILL_COORDS,
-				      &elInfo1, &elInfo2,
-				      &small, &large);
+    bool cont = dualTraverse.traverseFirst(mesh, mesh,
+					   -1, -1,
+					   Mesh::CALL_EVERY_EL_PREORDER |
+					   Mesh::FILL_COORDS | 
+					   Mesh::FILL_DET,
+					   Mesh::CALL_LEAF_EL | 
+					   Mesh::FILL_COORDS,
+					   &elInfo1, &elInfo2,
+					   &small, &large);
 
     while (cont) {
       Element *element1 = elInfo1->getElement();
@@ -1175,48 +869,35 @@ namespace AMDiS {
       PartitionElementData *partitionData = 
 	dynamic_cast<PartitionElementData*>
 	(element1->getElementData(PARTITION_ED));
-      if(partitionData->getPartitionStatus() == IN) {
+      if (partitionData->getPartitionStatus() == IN) {
 
 	// get coarse dofs
-	if(element1 != lastCoarseElement) {
+	if (element1 != lastCoarseElement) {
 	  basFcts->getLocalIndices(element1, admin, coarseDOFs);
 	  lastCoarseElement = element1;
 	}
       
-	if(elementPartitions_[element1].size() > 1) {
+	if (elementPartitions_[element1].size() > 1) {
 	  // get fine dofs
 	  basFcts->getLocalIndices(element2, admin, fineDOFs);
       
 	  // for all fine DOFs
-	  for(i = 0; i < numFcts; i++) {
-	    if(!visited[fineDOFs[i]]) {
+	  for (int i = 0; i < numFcts; i++) {
+	    if (!visited[fineDOFs[i]]) {
 	      visited[fineDOFs[i]] = true;
 
 	      elInfo2->coordToWorld(*(basFcts->getCoords(i)), &worldCoord);
 	      elInfo1->worldToCoord(worldCoord, &baryCoord);
 
 	      // for all coarse vertex DOFs
-	      for(j = 0; j < dim + 1; j++) {
+	      for (int j = 0; j < dim + 1; j++) {
 		partBegin = vertexPartitions_[coarseDOFs[j]].begin();
 		partEnd = vertexPartitions_[coarseDOFs[j]].end();
-		for(partIt = partBegin; partIt != partEnd; ++partIt) {
+		for (partIt = partBegin; partIt != partEnd; ++partIt) {
 		  int partition = *partIt/* - 1*/;
 		  double val = (*(linearFunctions->getPhi(j)))(baryCoord);
 		  w[fineDOFs[i]][partition] += val;
 
-// 		  if(partition == 0) {
-// 		    wtest0[fineDOFs[i]] += val;
-// 		  }
-// 		  if(partition == 1) {
-// 		    wtest1[fineDOFs[i]] += val;
-// 		  }
-// 		  if(partition == 2) {
-// 		    wtest2[fineDOFs[i]] += val;
-// 		  }
-// 		  if(partition == 3) {
-// 		    wtest3[fineDOFs[i]] += val;
-// 		  }
-
 		  sumW[fineDOFs[i]] += val;
 		}
 	      }
@@ -1228,18 +909,6 @@ namespace AMDiS {
 				       &small, &large);
     }
 
-
-//     if(mpiRank_ == 0) {
-//       MacroWriter::writeMacro(feSpace, "output/wtest0.mesh");
-//       ValueWriter::writeValues(&wtest0, "output/wtest0.dat");
-//       MacroWriter::writeMacro(feSpace, "output/wtest1.mesh");
-//       ValueWriter::writeValues(&wtest1, "output/wtest1.dat");
-//       MacroWriter::writeMacro(feSpace, "output/wtest2.mesh");
-//       ValueWriter::writeValues(&wtest2, "output/wtest2.dat");
-//       MacroWriter::writeMacro(feSpace, "output/wtest3.mesh");
-//       ValueWriter::writeValues(&wtest3, "output/wtest3.dat");
-//     }
-
     FREE_MEMORY(coarseDOFs, DegreeOfFreedom, numFcts);
     FREE_MEMORY(fineDOFs, DegreeOfFreedom, numFcts);
 
@@ -1248,18 +917,18 @@ namespace AMDiS {
     wBegin = w.begin();
     wEnd = w.end();
 
-    for(wIt = wBegin; wIt != wEnd; ++wIt) {
+    for (wIt = wBegin; wIt != wEnd; ++wIt) {
       DegreeOfFreedom dof = wIt->first;
       (*globalSolution)[dof] = 0.0;
     }
     
-    for(wIt = wBegin; wIt != wEnd; ++wIt) {
+    for (wIt = wBegin; wIt != wEnd; ++wIt) {
       DegreeOfFreedom dof = wIt->first;
       std::map<int, double>::iterator partIt, partBegin, partEnd;
       partBegin = wIt->second.begin();
       partEnd = wIt->second.end();
     
-      for(partIt = partBegin; partIt != partEnd; ++partIt) {
+      for (partIt = partBegin; partIt != partEnd; ++partIt) {
 	int partition = partIt->first;
 	double wDOF = partIt->second;
 	(*globalSolution)[dof] += wDOF / sumW[dof] * (*(rankSolutions[partition]))[dof];
@@ -1277,7 +946,7 @@ namespace AMDiS {
     if(!add) errVec.clear();
 
     TraverseStack stack;
-    ElInfo *elInfo = stack.traverseFirst(mesh_, 
+    ElInfo *elInfo = stack.traverseFirst(mesh, 
 					 -1,
 					 Mesh::CALL_EVERY_EL_PREORDER);
     totalErr = 0.0;
@@ -1302,50 +971,9 @@ namespace AMDiS {
       elInfo = stack.traverseNext(elInfo);
     }
 
-//     double total_sum = 0.0;
-//     MPI::COMM_WORLD.Allreduce(&totalErr,
-// 			      &total_sum,
-// 			      1,
-// 			      MPI_DOUBLE,
-// 			      MPI_SUM);
-//     total_sum = sqrt(total_sum);
-
-//     MSG("error rank %d = %e (total %e)\n", mpiRank_, totalErr, total_sum);
-
     return totalErr;
   }
 
-//   void ParallelProblem::writeRankMacroAndValues(DOFVector<double> *vec, 
-// 						const char *name, 
-// 						double time)
-//   {
-//     char number[3];
-//     sprintf(number, "%d", MPI::COMM_WORLD.Get_rank());
-    
-//     std::string macroFile(name);
-//     macroFile += number;
-//     macroFile += ".mesh";
-
-//     ConditionalMacroWriter::writeMacro(vec->getFESpace(),
-// 				       const_cast<char*>(macroFile.c_str()),
-// 				       time,
-// 				       -1,
-// 				       Mesh::CALL_LEAF_EL,
-// 				       &writeElement);
-
-//     std::string valueFile(name);
-//     valueFile += number;
-//     valueFile += ".dat";
-
-//     ConditionalValueWriter::writeValues(vec,
-// 					const_cast<char*>(valueFile.c_str()),
-// 					time,
-// 					-1,
-// 					Mesh::CALL_LEAF_EL,
-// 					&writeElement);
-//   }
-
-
   double ParallelProblem::setElemWeights(AdaptInfo *adaptInfo) 
   {
     double localWeightSum = 0.0;
@@ -1354,7 +982,7 @@ namespace AMDiS {
     elemWeights_.clear();
 
     TraverseStack stack;
-    ElInfo *elInfo = stack.traverseFirst(mesh_, 
+    ElInfo *elInfo = stack.traverseFirst(mesh, 
 					 -1,
 					 Mesh::CALL_EVERY_EL_PREORDER);
     while(elInfo) {
@@ -1388,7 +1016,7 @@ namespace AMDiS {
       return;
 
     TraverseStack stack;
-    ElInfo *elInfo = stack.traverseFirst(mesh_, -1, Mesh::CALL_LEAF_EL);
+    ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_LEAF_EL);
     while (elInfo) {
       Element *element = elInfo->getElement();
       PartitionElementData *partitionData = dynamic_cast<PartitionElementData*>
@@ -1401,21 +1029,21 @@ namespace AMDiS {
       elInfo = stack.traverseNext(elInfo);
     }
 
-    refinementManager_->refineMesh(mesh_);
+    refinementManager->refineMesh(mesh);
   }
 
 
   void ParallelProblem::createOverlap(int level, int overlap, bool openOverlap,
 				      std::map<Element*, int> &overlapDistance)
   {
-    int i, dim = mesh_->getDim();
+    int i, dim = mesh->getDim();
 
     // === create dual graph (one common node) and prepare breadth-first search ===
     std::map<DegreeOfFreedom, std::vector<Element*> > vertexElements;
     std::queue<Element*> overlapQueue;
 
     TraverseStack stack;
-    ElInfo *elInfo = stack.traverseFirst(mesh_, -1, Mesh::CALL_EVERY_EL_PREORDER);
+    ElInfo *elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER);
     while(elInfo) {
       Element *element = elInfo->getElement();
       PartitionElementData *partitionData = 
@@ -1478,7 +1106,7 @@ namespace AMDiS {
 					     bool openOverlap,
 					     std::map<Element*, int> &overlapDistance)
   {
-    int dim = mesh_->getDim();
+    int dim = mesh->getDim();
 
     TraverseStack stack;
     ElInfo *elInfo;
@@ -1488,7 +1116,7 @@ namespace AMDiS {
 
     // first: partition elements ...
     int index, partition;
-    elInfo = stack.traverseFirst(mesh_, -1, Mesh::CALL_EVERY_EL_PREORDER);
+    elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER);
     while (elInfo) {
       Element *element = elInfo->getElement();
       PartitionElementData *partitionData = 
@@ -1509,42 +1137,42 @@ namespace AMDiS {
 
     if (overlap > 1 || openOverlap == false) {
       // exchange mesh structure codes
-      MeshStructure *structures = NEW MeshStructure[mpiSize_];
+      MeshStructure *structures = NEW MeshStructure[mpiSize];
       exchangeMeshStructureCodes(structures);
 
       // merge codes
       int rank;
-      for(rank = 0; rank < mpiSize_; rank++) {
-	if(rank != mpiRank_) {
-	  structures[mpiRank_].merge(&structures[rank]);
+      for(rank = 0; rank < mpiSize; rank++) {
+	if(rank != mpiRank) {
+	  structures[mpiRank].merge(&structures[rank]);
 	}
       }
     
-      MeshStructure &compositeStructure = structures[mpiRank_];
+      MeshStructure &compositeStructure = structures[mpiRank];
       compositeStructure.reset();
 
       // get composite indices of local overlap elements
       std::map<int, Element*> indexElement;
       std::vector<int> innerOverlapElements; // not at open overlap boundary
 
-      elInfo = stack.traverseFirst(mesh_, -1, Mesh::CALL_EVERY_EL_PREORDER);
-      while(elInfo) {
+      elInfo = stack.traverseFirst(mesh, -1, Mesh::CALL_EVERY_EL_PREORDER);
+      while (elInfo) {
 	Element *element = elInfo->getElement();
 	PartitionElementData *partitionData = 
 	  dynamic_cast<PartitionElementData*>(element->getElementData(PARTITION_ED));
 	
-	if(partitionData && partitionData->getLevel() == level) {
+	if (partitionData && partitionData->getLevel() == level) {
 	  int compositeIndex = compositeStructure.getCurrentElement();
 	  indexElement[compositeIndex] = element;
-	  if(partitionData->getPartitionStatus() == OVERLAP) {
+	  if (partitionData->getPartitionStatus() == OVERLAP) {
 	    int distance = overlapDistance[element];
-	    if(distance < overlap || !openOverlap) {
+	    if (distance < overlap || !openOverlap) {
 	      innerOverlapElements.push_back(compositeIndex);
 	    } 
 	  }
 	}
 	
-	if(element->isLeaf()) {
+	if (element->isLeaf()) {
 	  compositeStructure.skipBranch();
 	} else {
 	  compositeStructure.nextElement();
@@ -1555,21 +1183,21 @@ namespace AMDiS {
       // === exchange 'inner' overlap elements ===
 
       // exchange number of overlap elements
-      int *numOverlapElements = GET_MEMORY(int, mpiSize_);
+      int *numOverlapElements = GET_MEMORY(int, mpiSize);
       int tmp = static_cast<int>(innerOverlapElements.size());
       MPI::COMM_WORLD.Allgather(&tmp, 1, MPI_INT,
 			      numOverlapElements, 1, MPI_INT);
       
       // exchange overlap elements
-      int *offset = GET_MEMORY(int, mpiSize_);
+      int *offset = GET_MEMORY(int, mpiSize);
       int sum = 0;
-      for(rank = 0; rank < mpiSize_; rank++) {
+      for(rank = 0; rank < mpiSize; rank++) {
 	offset[rank] = sum;
 	sum += numOverlapElements[rank];
       }
 
       int *recvBuffer = GET_MEMORY(int, sum);
-      int *sendBuffer = GET_MEMORY(int, numOverlapElements[mpiRank_]);
+      int *sendBuffer = GET_MEMORY(int, numOverlapElements[mpiRank]);
    
       int *ptr;
       std::vector<int>::iterator elemIt, elemEnd = innerOverlapElements.end();
@@ -1581,7 +1209,7 @@ namespace AMDiS {
 	*ptr = *elemIt;
       }
   
-      MPI::COMM_WORLD.Allgatherv(sendBuffer, numOverlapElements[mpiRank_], 
+      MPI::COMM_WORLD.Allgatherv(sendBuffer, numOverlapElements[mpiRank], 
 				 MPI_INT, 
 				 recvBuffer, numOverlapElements, offset,
 				 MPI_INT);
@@ -1590,14 +1218,14 @@ namespace AMDiS {
 
       // fill vertexPartitions for 'inner' overlap elements
       int el;
-      for(rank = 0; rank < mpiSize_; rank++) {
+      for (rank = 0; rank < mpiSize; rank++) {
 	int numElements = numOverlapElements[rank];
 	//       MSG("rank %d overlap elements with %d: %d\n", 
-	// 	  mpiRank_, rank, numOverlapElements[rank]);
+	// 	  mpiRank, rank, numOverlapElements[rank]);
 	int *elements = recvBuffer + offset[rank];
-	for(el = 0; el < numElements; el++) {
+	for (el = 0; el < numElements; el++) {
 	  Element *element = indexElement[elements[el]];
-	  for(i = 0; i < dim + 1; i++) {
+	  for (int i = 0; i < dim + 1; i++) {
 	    vertexPartitions_[element->getDOF(i, 0)].insert(rank/* + 1*/);
 	  }
 	}
@@ -1606,8 +1234,8 @@ namespace AMDiS {
       // free memory
       DELETE [] structures;
       FREE_MEMORY(recvBuffer, int, sum);
-      FREE_MEMORY(sendBuffer, int, numOverlapElements[mpiRank_]);
-      FREE_MEMORY(numOverlapElements, int, mpiSize_);
+      FREE_MEMORY(sendBuffer, int, numOverlapElements[mpiRank]);
+      FREE_MEMORY(numOverlapElements, int, mpiSize);
     }
   }
 
@@ -1617,9 +1245,6 @@ namespace AMDiS {
     bool openOverlap = true;
 
     ParallelProblem::createOverlap(level, overlap, openOverlap, overlapDistance_);
-
-//     ParallelProblem::fillVertexPartitions(level, overlap, openOverlap, 
-//  					  overlapDistance_);
   }
 
   // =========================================================================
@@ -1627,50 +1252,67 @@ namespace AMDiS {
   // =========================================================================
   
   ParallelProblemScal::ParallelProblemScal(const std::string& name,
-					   ProblemScal *problem,
+					   ProblemScal *prob,
 					   ProblemInstatScal *problemInstat,
 					   std::vector<DOFVector<double>*> vectors)
     : ParallelProblem(name,
-		      problem,
+		      prob,
 		      problemInstat,
 		      vectors,
-		      problem->getMesh(),
-		      problem->getRefinementManager(),
-		      problem->getCoarseningManager()),
-      problem_(problem),
+		      prob->getMesh(),
+		      prob->getRefinementManager(),
+		      prob->getCoarseningManager()),
+      problem(prob),
       problemInstat_(problemInstat),
-      oldEstimator_(NULL),
-      oldMarker_(NULL),
-      rankSolution_(mpiSize_),
-      usersEstimator_(NULL),
-      usersMarker_(NULL)
+      oldEstimator(NULL),
+      oldMarker(NULL),
+      rankSolution(mpiSize),
+      usersEstimator(NULL),
+      usersMarker(NULL)
   {
-    int i;
-    for(i = 0; i < mpiSize_; i++) {
-      rankSolution_[i] = NEW DOFVector<double>(problem_->getFESpace(), "rank solution");
+    for (int i = 0; i < mpiSize; i++) {
+      rankSolution[i] = NEW DOFVector<double>(problem->getFESpace(), "rank solution");
     }
 
-    //vertexPartitions_ = NEW std::map<DegreeOfFreedom, std::set<int> >(problem_->getFESpace(), 
-    //						  "partition dof"); 
+    // Create vectors for debuging information
+    if (debugMode) {
+      dbgRankSolution.resize(mpiSize);
+      for (int i = 0; i < mpiSize; i++) {
+	dbgRankSolution[i] = NEW DOFVector<double>(problem->getFESpace(), "debug rank solution");
+      }
+      
+      if (mpiRank == 0) {
+	dbgSolution = NEW DOFVector<double>(problem->getFESpace(), "debug solution");
+      }
+    }
 
-    if(problemInstat_) {
+    
+    if (problemInstat_) {
       dofVectors_.push_back(problemInstat_->getOldSolution());
     }
   }
 
   ParallelProblemScal::~ParallelProblemScal()
   {
-    int i;
-    for(i = 0; i < mpiSize_; i++) {
-      DELETE rankSolution_[i];
+    for (int i = 0; i < mpiSize; i++) {
+      DELETE rankSolution[i];
+    }
+
+    if (debugMode) {
+      for (int i = 0; i < mpiSize; i++) {
+	DELETE dbgRankSolution[i];
+      }
+
+      if (mpiRank == 0) {
+	DELETE dbgSolution;
+      }
     }
-    //DELETE vertexPartitions_;
   }
 
 
   void ParallelProblemScal::initParallelization(AdaptInfo *adaptInfo)
   {
-    if (mpiSize_ > 1) {
+    if (mpiSize > 1) {
       clock_t partitioningStart = clock();
 
       partitioner_->createPartitionData();
@@ -1684,29 +1326,29 @@ namespace AMDiS {
 
       globalRefinements();
 
-      oldEstimator_ = problem_->getEstimator();
-      oldMarker_ = problem_->getMarker();
+      oldEstimator = problem->getEstimator();
+      oldMarker = problem->getMarker();
 
       ConditionalEstimator *condEstimator = NULL;
-      if (usersEstimator_) {
-	problem_->setEstimator(usersEstimator_);
+      if (usersEstimator) {
+	problem->setEstimator(usersEstimator);
       } else {
-	condEstimator = NEW ConditionalEstimator(oldEstimator_);
-	problem_->setEstimator(condEstimator);
+	condEstimator = NEW ConditionalEstimator(oldEstimator);
+	problem->setEstimator(condEstimator);
       }
 
-      if (usersMarker_) {
-	problem_->setMarker(usersMarker_);
+      if (usersMarker) {
+	problem->setMarker(usersMarker);
       } else {
 	ConditionalMarker *newMarker = NEW ConditionalMarker("parallel marker",
 							     -1,
-							     oldMarker_,
+							     oldMarker,
 							     globalCoarseGridLevel_,
 							     localCoarseGridLevel_);
-	problem_->setMarker(newMarker);
+	problem->setMarker(newMarker);
       }
 
-      if (mpiRank_ == 0) {
+      if (mpiRank == 0) {
 	clock_t partitioningEnd = clock();
 	partitioningTime = TIME_USED(partitioningStart, 
 				     partitioningEnd);
@@ -1715,12 +1357,12 @@ namespace AMDiS {
 
       // modify file writers
       char number[10];
-      sprintf(number, "%d", MPI::COMM_WORLD.Get_rank());
-      ::std::list<FileWriterInterface*> fileWriters = problem_->getFileWriterList();
-      ::std::list<FileWriterInterface*>::iterator fwIt, fwBegin, fwEnd;
+      sprintf(number, "%d", mpiRank);
+      ::std::vector<FileWriterInterface*> fileWriters = problem->getFileWriterList();
+      ::std::vector<FileWriterInterface*>::iterator fwIt, fwBegin, fwEnd;
       fwBegin = fileWriters.begin();
       fwEnd = fileWriters.end();
-      for(fwIt = fwBegin; fwIt != fwEnd; ++fwIt) {
+      for (fwIt = fwBegin; fwIt != fwEnd; ++fwIt) {
 	(*fwIt)->setFilename((*fwIt)->getFilename() + "_proc" + 
 			     ::std::string(number) + "_");
 	(*fwIt)->setTraverseProperties(-1, 0, elementInPartition);
@@ -1730,18 +1372,21 @@ namespace AMDiS {
 
   void ParallelProblemScal::exitParallelization(AdaptInfo *adaptInfo)
   {
-    if (mpiSize_ > 1) {
+    if (mpiSize > 1) {
       ParallelProblem::exitParallelization(adaptInfo);
 
-      if (!timeIF_) problem_->writeFiles(adaptInfo, true);
+      if (!timeIF_) 
+	problem->writeFiles(adaptInfo, true);
 
       partitioner_->deletePartitionData();
 
-      if (!usersEstimator_) DELETE problem_->getEstimator();
-      if (!usersMarker_) DELETE problem_->getMarker();
+      if (!usersEstimator) 
+	DELETE problem->getEstimator();
+      if (!usersMarker) 
+	DELETE problem->getMarker();
 
-      problem_->setEstimator(oldEstimator_);
-      problem_->setMarker(oldMarker_);    
+      problem->setEstimator(oldEstimator);
+      problem->setMarker(oldMarker);    
     }
   }
 
@@ -1749,12 +1394,12 @@ namespace AMDiS {
 
   ProblemStatBase *ParallelProblemScal::getProblem(int number) 
   { 
-    return problem_; 
+    return problem; 
   }
 
   ProblemStatBase *ParallelProblemScal::getProblem(const std::string& name)
   { 
-    return problem_;
+    return problem;
   }
 
 
@@ -1763,52 +1408,37 @@ namespace AMDiS {
     std::vector<DOFVector<double>*>::iterator it, itBegin, itEnd;
     itBegin = dofVectors_.begin();
     itEnd = dofVectors_.end();
-    for(it = itBegin; it != itEnd; ++it) {
+    for (it = itBegin; it != itEnd; ++it) {
       ParallelProblem::exchangeDOFVector(adaptInfo, *it);
     }
   }
 
   void ParallelProblemScal::exchangeRankSolutions(AdaptInfo *adaptInfo)
   {
-    rankSolution_[mpiRank_]->copy(*(problem_->getSolution()));
+    rankSolution[mpiRank]->copy(*(problem->getSolution()));
     ParallelProblem::exchangeRankSolutions(adaptInfo,
-					   rankSolution_);
+					   mesh,
+					   rankSolution);
+
+    if (debugMode) {
+      dbgRankSolution[mpiRank]->copy(*(problem->getSolution()));
+      ParallelProblem::exchangeRankSolutions(adaptInfo,
+					     dbgMesh,
+					     dbgRankSolution);
+    }
   }
 
   void ParallelProblemScal::buildGlobalSolution(AdaptInfo *adaptInfo)
   {
     ParallelProblem::buildGlobalSolution(adaptInfo,
-					 rankSolution_,
-					 problem_->getSolution());
-
-//     ParallelProblem::writeRankMacroAndValues(problem_->getSolution(),
-// 					     "output/debug_rank");
-
-
-//    {      
-//       char number[3];
-//       sprintf(number, "%d", MPI::COMM_WORLD.Get_rank());
-//       std::string meshfile = "output/debug_" + std::string(number) + ".mesh";
-//       std::string datfile = "output/debug_" + std::string(number) + ".dat";
-
-//       MacroWriter::writeMacro(problem_->getFESpace(), (char*)meshfile.c_str());
-//       ValueWriter::writeValues(problem_->getSolution(), (char*)datfile.c_str());
-
-//       if(mpiRank_ == 0) {
-// 	std::map<int, int> finePartitionVec;
-// 	partitioner_->fillLeafPartitionVec(&partitionVec_,
-// 					   &finePartitionVec);
-//  	ElementFileWriter elemFW("partition",
-//  				 problem_->getMesh(),
-//  				 problem_->getFESpace(),
-//  				 finePartitionVec);
-//  	elemFW.writeFiles(NULL, true);
-	
-// 	bool wait=true;
-// 	while(wait) {}
-//       }
+					 rankSolution,
+					 problem->getSolution());
 
-//     }
+    if (debugMode && mpiRank == 0) {
+      ParallelProblem::buildGlobalSolution(adaptInfo,
+					   dbgRankSolution,
+					   dbgSolution);
+    }
   }
 
   void ParallelProblemScal::coarsenOutOfPartition(AdaptInfo *adaptInfo)
@@ -1817,38 +1447,6 @@ namespace AMDiS {
     adaptInfo->allowCoarsening(true, 0);
     ParallelProblem::coarsenOutOfPartition(adaptInfo);
     adaptInfo->allowCoarsening(coarsenAllowed, 0);
-
-//     int level = localCoarseGridLevel_, overlap = 1;
-//     bool openOverlap = true;
-
-//     problem_->getMesh()->dofCompress();
-
-//     ParallelProblem::fillVertexPartitions(level, overlap, openOverlap, 
-// 					  overlapDistance_);
-
-//     overlapDistance_.clear();
-//     if(mpiRank_ == 0) { 
-//       problem_->writeFiles(adaptInfo, true);
-//       bool wait=true;
-//       while(wait) {}
-//     }
-
-//     ParallelProblem::writeRankMacroAndValues(problem_->getSolution(),
-// 					     "output/debug_rank",
-// 					     0.0);
-
-//     if(mpiRank_ == 0) {
-//       std::map<int, int> finePartitionVec;
-//       partitioner_->fillLeafPartitionVec(&partitionVec_,
-// 					 &finePartitionVec);
-//       ElementFileWriter elemFW("partition",
-// 			       problem_->getMesh(),
-// 			       problem_->getFESpace(),
-// 			       finePartitionVec);
-//       elemFW.writeFiles(NULL, true);
-//       bool wait = true;
-//       while(wait) {}
-//     }
   }
 
 
@@ -1857,36 +1455,48 @@ namespace AMDiS {
   // =========================================================================
   
   ParallelProblemVec::ParallelProblemVec(const std::string& name,
-					 ProblemVec *problem,
+					 ProblemVec *prob,
 					 ProblemInstatVec *problemInstat,
 					 std::vector<DOFVector<double>*> vectors)
     : ParallelProblem(name,
-		      problem,
+		      prob,
 		      problemInstat,
 		      vectors,
-		      problem->getMesh(0),
-		      problem->getRefinementManager(0),
-		      problem->getCoarseningManager(0)),
-      problem_(problem),
+		      prob->getMesh(0),
+		      prob->getRefinementManager(0),
+		      prob->getCoarseningManager(0)),
+      problem(prob),
       problemInstat_(problemInstat)
   {
-    numComponents_ = problem_->getNumComponents();
+    nComponents = problem->getNumComponents();
 
-    std::vector<FiniteElemSpace*> feSpaces(numComponents_);
-    int i, j;
-    for(i = 0; i < numComponents_; i++) {
-      feSpaces[i] = problem_->getFESpace(i);
+    std::vector<FiniteElemSpace*> feSpaces(nComponents);
+    for (int i = 0; i < nComponents; i++) {
+      feSpaces[i] = problem->getFESpace(i);
     }
-    rankSolution_.resize(mpiSize_);
-    for(i = 0; i < mpiSize_; i++) {
-      rankSolution_[i] = NEW SystemVector("rank solution", feSpaces, numComponents_);
-      for(j = 0; j < numComponents_; j++) {
-	rankSolution_[i]->setDOFVector(j, NEW DOFVector<double>(feSpaces[j], 
+
+    rankSolution.resize(mpiSize);
+    for (int i = 0; i < mpiSize; i++) {
+      rankSolution[i] = NEW SystemVector("rank solution", feSpaces, nComponents);
+      for (int j = 0; j < nComponents; j++) {
+	rankSolution[i]->setDOFVector(j, NEW DOFVector<double>(feSpaces[j], 
 								"rank solution"));
       }
     }
-    if(problemInstat_) {
-      for(i = 0; i < numComponents_; i++) {
+
+    if (debugMode) {
+      dbgRankSolution.resize(mpiSize);
+      for (int i = 0; i < mpiSize; i++) {
+	dbgRankSolution[i] = NEW SystemVector(*(rankSolution[i]));
+      }
+
+      if (mpiRank == 0) {
+	dbgSolution = NEW SystemVector(*(problem->getSolution()));
+      }
+    }
+
+    if (problemInstat_) {
+      for (int i = 0; i < nComponents; i++) {
 	dofVectors_.push_back(problemInstat_->getOldSolution()->getDOFVector(i));
       }
     }
@@ -1894,12 +1504,27 @@ namespace AMDiS {
 
   ParallelProblemVec::~ParallelProblemVec()
   {
-    int i, j;
-    for(i = 0; i < mpiSize_; i++) {
-      for(j = 0; j < numComponents_; j++) {
-	DELETE rankSolution_[i]->getDOFVector(j);
+    for (int i = 0; i < mpiSize; i++) {
+      for (int j = 0; j < nComponents; j++) {
+	DELETE rankSolution[i]->getDOFVector(j);
+      }
+      DELETE rankSolution[i];
+    }
+
+    if (debugMode) {
+      for (int i = 0; i < mpiSize; i++) {
+	for (int j = 0; j < nComponents; j++) {
+	  DELETE dbgRankSolution[i]->getDOFVector(j);
+	}
+	DELETE dbgRankSolution[i];
+      }
+
+      if (mpiRank == 0) {
+	for (int i = 0; i < nComponents; i++) {
+	  DELETE dbgSolution->getDOFVector(i);
+	}
+	DELETE dbgSolution;
       }
-      DELETE rankSolution_[i];
     }
   }
 
@@ -1907,8 +1532,7 @@ namespace AMDiS {
   {
     FUNCNAME("ParallelProblem::initParallelization()");
 
-    if(mpiSize_ > 1) {
-      int i;
+    if (mpiSize > 1) {
       partitioner_->createPartitionData();
       setElemWeights(adaptInfo);
       partitionMesh(adaptInfo);
@@ -1916,45 +1540,45 @@ namespace AMDiS {
       refineOverlap(adaptInfo);
       createOverlap(adaptInfo);
 
-      oldEstimator_ = problem_->getEstimator();
-      oldMarker_ = problem_->getMarker();
+      oldEstimator = problem->getEstimator();
+      oldMarker = problem->getMarker();
 
       std::vector<ConditionalEstimator*> condEstimator;
-      if(static_cast<int>(usersEstimator_.size()) == numComponents_) {
-	problem_->setEstimator(usersEstimator_);
+      if (static_cast<int>(usersEstimator.size()) == nComponents) {
+	problem->setEstimator(usersEstimator);
       } else {
-	for(i = 0; i < numComponents_; i++) {
-	  condEstimator.push_back(NEW ConditionalEstimator(oldEstimator_[i]));
-	  problem_->setEstimator(condEstimator[i], i);
+	for (int i = 0; i < nComponents; i++) {
+	  condEstimator.push_back(NEW ConditionalEstimator(oldEstimator[i]));
+	  problem->setEstimator(condEstimator[i], i);
 	}
       }
 
-      if(static_cast<int>(usersMarker_.size()) == numComponents_) {
-	for(i = 0; i < numComponents_; i++) {
-	  problem_->setMarker(usersMarker_[i], i);
+      if (static_cast<int>(usersMarker.size()) == nComponents) {
+	for (int i = 0; i < nComponents; i++) {
+	  problem->setMarker(usersMarker[i], i);
 	}
       } else {
-	TEST_EXIT(static_cast<int>(condEstimator.size()) == numComponents_)
+	TEST_EXIT(static_cast<int>(condEstimator.size()) == nComponents)
 	  ("use conditional marker only together with conditional estimator\n");
-	for(i = 0; i < numComponents_; i++) {
+	for (int i = 0; i < nComponents; i++) {
 	  ConditionalMarker *newMarker = 
 	    NEW ConditionalMarker("parallel marker",
 				  i,
-				  oldMarker_[i],
+				  oldMarker[i],
 				  globalCoarseGridLevel_,
 				  localCoarseGridLevel_);
-	  problem_->setMarker(newMarker, i);
+	  problem->setMarker(newMarker, i);
 	}
       }
 
       // modify file writers
       char number[10];
       sprintf(number, "%d", MPI::COMM_WORLD.Get_rank());
-      ::std::list<FileWriterInterface*> fileWriters = problem_->getFileWriterList();
-      ::std::list<FileWriterInterface*>::iterator fwIt, fwBegin, fwEnd;
+      ::std::vector<FileWriterInterface*> fileWriters = problem->getFileWriterList();
+      ::std::vector<FileWriterInterface*>::iterator fwIt, fwBegin, fwEnd;
       fwBegin = fileWriters.begin();
       fwEnd = fileWriters.end();
-      for(fwIt = fwBegin; fwIt != fwEnd; ++fwIt) {
+      for (fwIt = fwBegin; fwIt != fwEnd; ++fwIt) {
 	(*fwIt)->setFilename((*fwIt)->getFilename() + "_proc" + 
 			     ::std::string(number) + "_");
 	(*fwIt)->setTraverseProperties(-1, 0, elementInPartition);
@@ -1966,26 +1590,26 @@ namespace AMDiS {
   {
     FUNCNAME("ParallelProblem::exitParallelization()");
 
-    if(mpiSize_ > 1) {
+    if (mpiSize > 1) {
       ParallelProblem::exitParallelization(adaptInfo);
 
-      if(!timeIF_) problem_->writeFiles(adaptInfo, true);
+      if (!timeIF_) 
+	problem->writeFiles(adaptInfo, true);
 
       partitioner_->deletePartitionData();
 
-      int i;
-      for(i = 0; i < numComponents_; i++) {
-	if(static_cast<int>(usersEstimator_.size()) == numComponents_) 
-	  DELETE problem_->getEstimator(i);
-	if(static_cast<int>(usersMarker_.size()) == numComponents_) 
-	  DELETE problem_->getMarker(i);
+      for (int i = 0; i < nComponents; i++) {
+	if (static_cast<int>(usersEstimator.size()) == nComponents) 
+	  DELETE problem->getEstimator(i);
+	if (static_cast<int>(usersMarker.size()) == nComponents) 
+	  DELETE problem->getMarker(i);
 
-	problem_->setEstimator(oldEstimator_[i], i);
-	problem_->setMarker(oldMarker_[i], i);    
+	problem->setEstimator(oldEstimator[i], i);
+	problem->setMarker(oldMarker[i], i);    
       }
 
-      usersEstimator_.resize(0);
-      usersMarker_.resize(0);
+      usersEstimator.resize(0);
+      usersMarker.resize(0);
     }
   }
 
@@ -1994,66 +1618,88 @@ namespace AMDiS {
     std::vector<DOFVector<double>*>::iterator it, itBegin, itEnd;
     itBegin = dofVectors_.begin();
     itEnd = dofVectors_.end();
-    for(it = itBegin; it != itEnd; ++it) {
+    for (it = itBegin; it != itEnd; ++it) {
       ParallelProblem::exchangeDOFVector(adaptInfo, *it);
     }
   }
 
   void ParallelProblemVec::exchangeRankSolutions(AdaptInfo *adaptInfo) 
   {
-    rankSolution_[mpiRank_]->copy(*(problem_->getSolution()));
+    rankSolution[mpiRank]->copy(*(problem->getSolution()));
+    if (debugMode) {
+      dbgRankSolution[mpiRank]->copy(*(problem->getSolution()));
+    }
 
-    std::vector<DOFVector<double>*> rankSol(mpiSize_);
+    std::vector<DOFVector<double>*> rankSol(mpiSize);
 
-    for (int i = 0; i < numComponents_; i++) {
-      for (int j = 0; j < mpiSize_; j++) {
-	rankSol[j] = rankSolution_[j]->getDOFVector(i);
+    for (int i = 0; i < nComponents; i++) {
+      for (int j = 0; j < mpiSize; j++) {
+	rankSol[j] = rankSolution[j]->getDOFVector(i);
       }
-
       ParallelProblem::exchangeRankSolutions(adaptInfo,
+					     mesh,
 					     rankSol);
+
+      if (debugMode) {
+	for (int j = 0; j < mpiSize; j++) {
+	  rankSol[j] = dbgRankSolution[j]->getDOFVector(i);
+	}
+	ParallelProblem::exchangeRankSolutions(adaptInfo,
+					       dbgMesh,
+					       rankSol);	
+      }
     }
   }
 
   void ParallelProblemVec::buildGlobalSolution(AdaptInfo *adaptInfo) 
   {
-    std::vector<DOFVector<double>*> rankSol(mpiSize_);
+    std::vector<DOFVector<double>*> rankSol(mpiSize);
 
-    int i, j;
-    for(i = 0; i < numComponents_; i++) {
-      for(j = 0; j < mpiSize_; j++) {
-	rankSol[j] = rankSolution_[j]->getDOFVector(i);
+    for (int i = 0; i < nComponents; i++) {
+      for (int j = 0; j < mpiSize; j++) {
+	rankSol[j] = rankSolution[j]->getDOFVector(i);
       }
-
       ParallelProblem::buildGlobalSolution(adaptInfo,
 					   rankSol,
-					   problem_->getSolution()->getDOFVector(i));
+					   problem->getSolution()->getDOFVector(i));
+
+      if (debugMode && mpiRank == 0) {
+	for (int j = 0; j < mpiSize; j++) {
+	  rankSol[j] = dbgRankSolution[j]->getDOFVector(i);
+	}
+	ParallelProblem::buildGlobalSolution(adaptInfo,
+					     rankSol,
+					     dbgSolution->getDOFVector(i));	
+      }
     }
   }
 
-  int ParallelProblemVec::getNumProblems() { return 1; }
+  int ParallelProblemVec::getNumProblems() 
+  { 
+    return 1; 
+  }
 
   ProblemStatBase *ParallelProblemVec::getProblem(int number) 
   { 
-    return problem_; 
+    return problem; 
   }
 
   ProblemStatBase *ParallelProblemVec::getProblem(const std::string& name)
   { 
-    return problem_;
+    return problem;
   }
 
   void ParallelProblemVec::coarsenOutOfPartition(AdaptInfo *adaptInfo)
   {
-    std::vector<bool> coarsenAllowed(numComponents_);
-    int i;
-    for(i = 0; i < numComponents_; i++) {
+    std::vector<bool> coarsenAllowed(nComponents);
+
+    for (int i = 0; i < nComponents; i++) {
       coarsenAllowed[i] = adaptInfo->isCoarseningAllowed(i);
       adaptInfo->allowCoarsening(true, i);
     }
 
     ParallelProblem::coarsenOutOfPartition(adaptInfo);
-    for(i = 0; i < numComponents_; i++) {
+    for (int i = 0; i < nComponents; i++) {
       adaptInfo->allowCoarsening(coarsenAllowed[i], i);
     }
   }
diff --git a/AMDiS/src/ParallelProblem.h b/AMDiS/src/ParallelProblem.h
index f4665ac5eed7a62657c028e44b3095dcb7a5a900..74518172e561b211d4f5d8f9b965c8dc7c82f791 100644
--- a/AMDiS/src/ParallelProblem.h
+++ b/AMDiS/src/ParallelProblem.h
@@ -42,6 +42,7 @@ namespace AMDiS {
   class ParallelProblemInterface
   {
   public:
+    virtual ~ParallelProblemInterface() {};
     virtual void initParallelization(AdaptInfo *adaptInfo) = 0;
     virtual void exitParallelization(AdaptInfo *adaptInfo) = 0;
   };
@@ -63,17 +64,41 @@ namespace AMDiS {
 
     virtual ~ParallelProblemBase() {};
 
+    /** \brief
+     * Must return true, if a new partitioning of the domain (due to unbalanced
+     * calculation times) have to be done.
+     */
     virtual bool doPartitioning(AdaptInfo *adaptInfo, double localWeightSum) = 0;
+
     virtual bool doBuildGlobalSolution(AdaptInfo *adaptInfo) = 0;
+
     virtual double setElemWeights(AdaptInfo *adaptInfo) = 0;
+
     virtual void partitionMesh(AdaptInfo *adaptInfo) = 0;
+
     virtual void refineOverlap(AdaptInfo *adaptInfo) = 0;    
+
     virtual void globalRefineOutOfPartition(AdaptInfo *adaptInfo) = 0;    
+
     virtual void createOverlap(AdaptInfo *adaptInfo) = 0;
+
     virtual void exchangeDOFVectors(AdaptInfo *adaptInfo) = 0;
+
     virtual void coarsenOutOfPartition(AdaptInfo *adaptInfo) = 0;
+
+    /** \brief
+     *
+     */
     virtual void synchronizeMeshes(AdaptInfo *adaptInfo) = 0;
+
+    /** \brief
+     *
+     */
     virtual void exchangeRankSolutions(AdaptInfo *adaptInfo) = 0;
+
+    /** \brief
+     *
+     */
     virtual void buildGlobalSolution(AdaptInfo *adaptInfo) = 0;
 
     virtual void exitParallelization(AdaptInfo *adaptInfo)
@@ -96,11 +121,12 @@ namespace AMDiS {
 
     virtual void closeTimestep(AdaptInfo *adaptInfo)
     {
-      if (mpiSize_ > 1 && doBuildGlobalSolution(adaptInfo)) {
-	synchronizeMeshes(adaptInfo);
+      if (mpiSize > 1 && doBuildGlobalSolution(adaptInfo)) {
+	synchronizeMeshes(adaptInfo);	
  	exchangeRankSolutions(adaptInfo);
  	buildGlobalSolution(adaptInfo);
       }
+
       if (timeIF_) 
 	timeIF_->closeTimestep(adaptInfo);
     };
@@ -127,12 +153,11 @@ namespace AMDiS {
     {
       Flag flag;
 
-      if(mpiSize_ > 1 && toDo.isSet(MARK | ADAPT)) {
+      if (mpiSize > 1 && toDo.isSet(MARK | ADAPT)) {
  	flag = iterationIF_->oneIteration(adaptInfo, MARK | ADAPT);
 
 	double localWeightSum = setElemWeights(adaptInfo);
-	if(doPartitioning(adaptInfo, localWeightSum)) {
-
+	if (doPartitioning(adaptInfo, localWeightSum)) {
 	  clock_t partitioningStart = clock();
 
 	  synchronizeMeshes(adaptInfo);
@@ -155,31 +180,42 @@ namespace AMDiS {
       }
 
       // synchronize adaption flag
-      unsigned long *flagBuffer = GET_MEMORY(unsigned long, mpiSize_);
+      unsigned long *flagBuffer = GET_MEMORY(unsigned long, mpiSize);
 
       unsigned long localFlag = flag.getFlags();
 
       MPI::COMM_WORLD.Allgather(&localFlag, 1, MPI_UNSIGNED_LONG,
 				flagBuffer, 1, MPI_UNSIGNED_LONG);
-      int i;
-      for(i = 0; i < mpiSize_; i++) {
+      for (int i = 0; i < mpiSize; i++) {
 	flag.setFlag(flagBuffer[i]);
       }
-      FREE_MEMORY(flagBuffer, unsigned long, mpiSize_);
+      FREE_MEMORY(flagBuffer, unsigned long, mpiSize);
+
       return flag;
     };
 
-    virtual void endIteration(AdaptInfo *adaptInfo) 
-    {
+    virtual void endIteration(AdaptInfo *adaptInfo) {
       iterationIF_->endIteration(adaptInfo);
     };
 
+    virtual void startDelayedTimestepCalculation() {};
+
+    virtual bool existsDelayedCalculation() {
+      return false;
+    };
+
+
   protected:
-    int mpiRank_;
-    int mpiSize_;
+    int mpiRank;
+
+    int mpiSize;
+
     ProblemIterationInterface *iterationIF_;
+
     ProblemTimeInterface *timeIF_;
+
     clock_t computationStart;
+
     double partitioningTime;
   };
 
@@ -220,7 +256,11 @@ namespace AMDiS {
 
     virtual void globalRefinements();
 
+    /** \brief
+     *
+     */
     void exchangeRankSolutions(AdaptInfo *adaptInfo,
+			       Mesh *workMesh,
 			       std::vector<DOFVector<double>*> rankSolutions);
 
     void exchangeDOFVector(AdaptInfo *adaptInfo,
@@ -236,51 +276,61 @@ namespace AMDiS {
     void fillVertexPartitions(int level, int overlap, bool openOverlap,
 			      std::map<Element*, int> &overlapDistance);
 
-    void setRepartitionSteps(int steps) { repartitionSteps_ = steps; };
+    void setRepartitionSteps(int steps) { 
+      repartitionSteps_ = steps; 
+    };
 
-    void puEveryTimestep(bool pu) { puEveryTimestep_ = pu; };
+    void puEveryTimestep(bool pu) { 
+      puEveryTimestep_ = pu; 
+    };
 
-    void addDOFVector(DOFVector<double> *vec) 
-    {
+    void addDOFVector(DOFVector<double> *vec) {
       dofVectors_.push_back(vec);
     };
 
+    /** \brief
+     * Every process creates the mesh structure code of its mesh, and all
+     * processes broadcast their mesh structure codes.
+     */
     void exchangeMeshStructureCodes(MeshStructure *structures);
 
     static bool writeElement(ElInfo *elInfo);
 
-    //     static void writeRankMacroAndValues(DOFVector<double> *vec, 
-    // 					const char *name = NULL, 
-    // 					double time = 0.0);
-
+    Mesh* getDbgMesh() {
+      return dbgMesh;
+    }
 
     virtual void serialize(std::ostream&) {};
 
     virtual void deserialize(std::istream&) {};
 
   protected:
+    /** \brief
+     *
+     */
     double errors2map(std::map<int, double> &errMap, int comp, bool add);
 
-  protected:
+    std::vector<int> iList;
+
     /** \brief
      *
      */
     std::string name_;
 
     /** \brief
-     *
+     * Mesh of the problem.
      */
-    Mesh *mesh_;
+    Mesh *mesh;
 
     /** \brief
      *
      */
-    RefinementManager *refinementManager_;
+    RefinementManager *refinementManager;
 
     /** \brief
      *
      */
-    CoarseningManager *coarseningManager_;
+    CoarseningManager *coarseningManager;
 
     /** \brief
      * Pointer to the paritioner which is used to devide a mesh into partitions.
@@ -387,6 +437,20 @@ namespace AMDiS {
      *
      */
     double maxLowerTH_;
+
+    /** \brief
+     * Defines the debug mode. If it is 1, the processor with rank 0
+     * fills \ref dbgMesh, which than contains the whole mesh domain after 
+     * each timestep. This makes it easy to check the whole result without 
+     * manually fitting the subresults together.
+     */
+    int debugMode;
+
+    /** \brief
+     * In debug mode on process 0, this mesh contains the composition of all
+     * rank meshes.
+     */
+    Mesh *dbgMesh;
   };
 
   // =========================================================================
@@ -428,11 +492,11 @@ namespace AMDiS {
     virtual ProblemStatBase *getProblem(const std::string& name);
 
     void setEstimator(Estimator *est) { 
-      usersEstimator_ = est; 
+      usersEstimator = est; 
     };
 
     void setMarker(Marker *marker) { 
-      usersMarker_ = marker; 
+      usersMarker = marker; 
     };
 
     inline virtual const ::std::string& getName() { 
@@ -440,19 +504,40 @@ namespace AMDiS {
     };
 
   protected:
-    ProblemScal *problem_;
+    ProblemScal *problem;
 
     ProblemInstatScal *problemInstat_;
 
-    Estimator *oldEstimator_;
+    Estimator *oldEstimator;
+
+    Marker *oldMarker;
+
+    /** \brief
+     * Vector of all process' solution DOFVectors.
+     */
+    std::vector<DOFVector<double>*> rankSolution;
 
-    Marker *oldMarker_;
+    /** \brief
+     * Is used in debug mode in the same context as \ref rankSolution.
+     * But the solution is build using \ref dbgMesh.
+     */
+    std::vector<DOFVector<double>*> dbgRankSolution;
 
-    std::vector<DOFVector<double>*> rankSolution_;
+    /** \brief
+     * In debug mode and on process 0 only, this vector will be initialized
+     * to store the overall solution of all processes.
+     */
+    DOFVector<double>* dbgSolution;
 
-    Estimator *usersEstimator_;
+    /** \brief
+     *
+     */
+    Estimator *usersEstimator;
 
-    Marker *usersMarker_;
+    /** \brief
+     *
+     */
+    Marker *usersMarker;
   };
 
   // =========================================================================
@@ -492,33 +577,70 @@ namespace AMDiS {
     virtual ProblemStatBase *getProblem(const std::string& name);
 
     void setEstimator(std::vector<Estimator*> est) { 
-      usersEstimator_ = est; 
+      usersEstimator = est; 
     };
 
     void setMarker(std::vector<Marker*> marker) {
-      usersMarker_ = marker; 
+      usersMarker = marker; 
     };
 
     inline virtual const ::std::string& getName() { 
       return name_; 
     };
 
+    SystemVector* getDbgSolution() {
+      return dbgSolution;
+    };
+
   protected:
-    ProblemVec *problem_;
+    ProblemVec *problem;
 
+    /** \brief
+     *
+     */
     ProblemInstatVec *problemInstat_;
 
-    std::vector<Estimator*> oldEstimator_;
+    /** \brief
+     *
+     */
+    std::vector<Estimator*> oldEstimator;
+
+    /** \brief
+     *
+     */
+    std::vector<Marker*> oldMarker;
+
+    /** \brief
+     * Vector of all process' solution SystemVectors.
+     */
+    std::vector<SystemVector*> rankSolution;
 
-    std::vector<Marker*> oldMarker_;
+    /** \brief
+     * Is used in debug mode in the same context as \ref rankSolution.
+     * But the solution is build using \ref dbgMesh.
+     */
+    std::vector<SystemVector*> dbgRankSolution;
 
-    std::vector<SystemVector*> rankSolution_;
+    /** \brief
+     * In debug mode and on process 0 only, this vector will be initialized
+     * to store the overall solution of all processes.
+     */
+    SystemVector* dbgSolution;
 
-    std::vector<Estimator*> usersEstimator_;
+    /** \brief
+     *
+     */
+    std::vector<Estimator*> usersEstimator;
 
-    std::vector<Marker*> usersMarker_;
+    /** \brief
+     *
+     */
+    std::vector<Marker*> usersMarker;
 
-    int numComponents_;
+    /** \brief
+     * Number of components of the vectorial problem.
+     */
+    int nComponents;
   };
 
 }