From 2529675cd960d71851fab7796ce0af65bc69871b Mon Sep 17 00:00:00 2001 From: Thomas Witkowski <thomas.witkowski@gmx.de> Date: Tue, 28 Jul 2009 12:09:36 +0000 Subject: [PATCH] Updates estimator to work with parallel version. --- AMDiS/src/Estimator.h | 6 +++--- AMDiS/src/ParallelDomainBase.cc | 24 +++++++++++++++++------- AMDiS/src/ParallelDomainBase.h | 2 ++ AMDiS/src/ResidualEstimator.cc | 16 ++++++++++++++++ 4 files changed, 38 insertions(+), 10 deletions(-) diff --git a/AMDiS/src/Estimator.h b/AMDiS/src/Estimator.h index e36d5588..95069c62 100755 --- a/AMDiS/src/Estimator.h +++ b/AMDiS/src/Estimator.h @@ -171,15 +171,15 @@ namespace AMDiS { /// Sum of all error estimates double est_sum; + /// Maximal error estimate + double est_max; + /// Sum of all time error estimates double est_t_sum; /// Max of all time error estimates double est_t_max; - /// Maximal error estimate - double est_max; - /** \brief * Vector of DOFMatrix pointers. There can be more than one * DOFMatrix, if the Estimator is part of a vector valued diff --git a/AMDiS/src/ParallelDomainBase.cc b/AMDiS/src/ParallelDomainBase.cc index 68b3ad19..2e61a006 100644 --- a/AMDiS/src/ParallelDomainBase.cc +++ b/AMDiS/src/ParallelDomainBase.cc @@ -23,7 +23,7 @@ namespace AMDiS { PetscErrorCode myKSPMonitor(KSP ksp, PetscInt iter, PetscReal rnorm, void *) { - // if (iter % 100 == 0 && MPI::COMM_WORLD.Get_rank() == 0) + if (iter % 100 == 0 && MPI::COMM_WORLD.Get_rank() == 0) std::cout << " Petsc-Iteration " << iter << ": " << rnorm << std::endl; return 0; @@ -154,6 +154,10 @@ namespace AMDiS { VecCreate(PETSC_COMM_WORLD, &petscSolVec); VecSetSizes(petscSolVec, nRankRows, nOverallRows); VecSetType(petscSolVec, VECMPI); + + VecCreate(PETSC_COMM_WORLD, &petscTmpVec); + VecSetSizes(petscTmpVec, nRankRows, nOverallRows); + VecSetType(petscTmpVec, VECMPI); } @@ -161,6 +165,7 @@ namespace AMDiS { { VecDestroy(petscRhsVec); VecDestroy(petscSolVec); + VecDestroy(petscTmpVec); } @@ -441,7 +446,6 @@ namespace AMDiS { FUNCNAME("ParallelDomainBase::solvePetscMatrix()"); KSP solver; - PC pc; KSPCreate(PETSC_COMM_WORLD, &solver); KSPSetOperators(solver, petscMatrix, petscMatrix, SAME_NONZERO_PATTERN); @@ -449,11 +453,8 @@ namespace AMDiS { KSPSetTolerances(solver, 1.e-8, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT); KSPSetType(solver, KSPBCGS); KSPMonitorSet(solver, myKSPMonitor, PETSC_NULL, 0); + KSPSetFromOptions(solver); - KSPGetPC(solver, &pc); - PCSetType(pc, PCKSP); - - KSPSetUp(solver); KSPSolve(solver, petscRhsVec, petscSolVec); PetscScalar *vecPointer; @@ -522,8 +523,17 @@ namespace AMDiS { for (int i = 0; i < static_cast<int>(sendBuffers.size()); i++) delete [] sendBuffers[i]; + int iterations = 0; + KSPGetIterationNumber(solver, &iterations); + MSG(" Number of iterations: %d\n", iterations); + + double norm = 0.0; + MatMult(petscMatrix, petscSolVec, petscTmpVec); + VecAXPY(petscTmpVec, -1.0, petscRhsVec); + VecNorm(petscTmpVec, NORM_2, &norm); + MSG(" Residual norm: %e\n", norm); + MatDestroy(petscMatrix); - PCDestroy(pc); KSPDestroy(solver); } diff --git a/AMDiS/src/ParallelDomainBase.h b/AMDiS/src/ParallelDomainBase.h index 8e6d0f6f..7543ead1 100644 --- a/AMDiS/src/ParallelDomainBase.h +++ b/AMDiS/src/ParallelDomainBase.h @@ -359,6 +359,8 @@ namespace AMDiS { Vec petscSolVec; + Vec petscTmpVec; + /// Number of DOFs in the rank mesh. int nRankDOFs; diff --git a/AMDiS/src/ResidualEstimator.cc b/AMDiS/src/ResidualEstimator.cc index fab865ff..35a9cee0 100644 --- a/AMDiS/src/ResidualEstimator.cc +++ b/AMDiS/src/ResidualEstimator.cc @@ -6,6 +6,10 @@ #include "Traverse.h" #include "Parameters.h" +#ifdef HAVE_PARALLEL_DOMAIN_AMDIS +#include "mpi.h" +#endif + namespace AMDiS { ResidualEstimator::ResidualEstimator(std::string name, int r) @@ -126,6 +130,18 @@ namespace AMDiS { { FUNCNAME("ResidualEstimator::exit()"); +#ifdef HAVE_PARALLEL_DOMAIN_AMDIS + double send_est_sum = est_sum; + double send_est_max = est_max; + double send_est_t_sum = est_t_sum; + double send_est_t_max = est_t_max; + + MPI::COMM_WORLD.Allreduce(&send_est_sum, &est_sum, 1, MPI_DOUBLE, MPI_SUM); + MPI::COMM_WORLD.Allreduce(&send_est_max, &est_max, 1, MPI_DOUBLE, MPI_MAX); + MPI::COMM_WORLD.Allreduce(&send_est_t_sum, &est_t_sum, 1, MPI_DOUBLE, MPI_SUM); + MPI::COMM_WORLD.Allreduce(&send_est_t_max, &est_t_max, 1, MPI_DOUBLE, MPI_MAX); +#endif + est_sum = sqrt(est_sum); est_t_sum = sqrt(est_t_sum); -- GitLab