diff --git a/AMDiS/src/SystemVector.cc b/AMDiS/src/SystemVector.cc index b83c32c03f626e0142edbb050789c32e3062a2eb..6aca05292ac9be9c6542b0c983a9e211fd007fc9 100644 --- a/AMDiS/src/SystemVector.cc +++ b/AMDiS/src/SystemVector.cc @@ -30,7 +30,7 @@ namespace AMDiS { SerUtil::deserialize(in, size); vectors.resize(size); for (int i = oldSize; i < size; i++) - vectors[i] = new DOFVector<double>(feSpace[i], ""); + vectors[i] = new DOFVector<double>(componentSpaces[i], ""); for (int i = 0; i < size; i++) vectors[i]->deserialize(in); } diff --git a/AMDiS/src/SystemVector.h b/AMDiS/src/SystemVector.h index 24131f2d71cc8e4f81536bf8da022fd1ce2f95f1..5ae1289906cad48bf914faadda4316903ef51c91 100644 --- a/AMDiS/src/SystemVector.h +++ b/AMDiS/src/SystemVector.h @@ -37,16 +37,21 @@ namespace AMDiS { /// Constructor. SystemVector(std::string name_, std::vector<const FiniteElemSpace*> feSpace_, - int size) + int size, + bool createVec = false) : name(name_), - feSpace(feSpace_), + componentSpaces(feSpace_), vectors(size) - {} + { + if (createVec) + for (int i = 0; i < size; i++) + vectors[i] = new DOFVector<double>(componentSpaces[i], "tmp"); + } /// Copy Constructor. SystemVector(const SystemVector& rhs) : name(rhs.getName()), - feSpace(rhs.getFeSpaces()), + componentSpaces(rhs.getFeSpaces()), vectors(rhs.getNumVectors()) { for (unsigned int i = 0; i < vectors.size(); i++) @@ -111,13 +116,13 @@ namespace AMDiS { /// Returns the fe space for a given component. inline const FiniteElemSpace *getFeSpace(int i) const { - return feSpace[i]; + return componentSpaces[i]; } /// Returns the fe spaces for all components. inline std::vector<const FiniteElemSpace*> getFeSpaces() const { - return feSpace; + return componentSpaces; } /// Here the system vector is interpreted as one large vector. The given @@ -228,7 +233,7 @@ namespace AMDiS { std::string name; /// Finite element space. - std::vector<const FiniteElemSpace*> feSpace; + std::vector<const FiniteElemSpace*> componentSpaces; /// Local dof vectors. std::vector<DOFVector<double>*> vectors; diff --git a/AMDiS/src/parallel/BddcMlSolver.h b/AMDiS/src/parallel/BddcMlSolver.h index 2dcb5853fe5bf02eaf5e57614d8075f85b54b344..3950dd4083761ab00965a5bbc724899c4e630230 100644 --- a/AMDiS/src/parallel/BddcMlSolver.h +++ b/AMDiS/src/parallel/BddcMlSolver.h @@ -35,8 +35,8 @@ namespace AMDiS { class BddcMlSolver : public PetscSolver { public: - BddcMlSolver() - : PetscSolver(), + BddcMlSolver(string name) + : PetscSolver(name), rhsVec(NULL), mat(NULL) {} diff --git a/AMDiS/src/parallel/PetscProblemStat.cc b/AMDiS/src/parallel/PetscProblemStat.cc index 4aa37d3799a41a96e960fe48bfd485b54beaa1db..846cc6e097096290b68c0a2e020123bcc093f40e 100644 --- a/AMDiS/src/parallel/PetscProblemStat.cc +++ b/AMDiS/src/parallel/PetscProblemStat.cc @@ -37,21 +37,21 @@ namespace AMDiS { Parameters::get(initFileStr, tmp); if (tmp == "petsc-schur") { - petscSolver = new PetscSolverSchur(); + petscSolver = new PetscSolverSchur(initFileStr); } else if (tmp == "petsc-feti") { petscSolver = new PetscSolverFeti(initFileStr); } else if (tmp == "petsc-block") { - petscSolver = new PetscSolverGlobalBlockMatrix(); + petscSolver = new PetscSolverGlobalBlockMatrix(initFileStr); } else if (tmp == "petsc") { - petscSolver = new PetscSolverGlobalMatrix(); + petscSolver = new PetscSolverGlobalMatrix(initFileStr); } else if (tmp == "bddcml") { #ifdef HAVE_BDDC_ML - petscSolver = new BddcMlSolver(); + petscSolver = new BddcMlSolver(initFileStr); #else ERROR_EXIT("AMDiS was compiled without BDDC-ML support!\n"); #endif } else if (tmp == "petsc-stokes") { - petscSolver = new PetscSolverNavierStokes(); + petscSolver = new PetscSolverNavierStokes(initFileStr); } else { ERROR_EXIT("No parallel solver %s available!\n", tmp.c_str()); } diff --git a/AMDiS/src/parallel/PetscSolver.cc b/AMDiS/src/parallel/PetscSolver.cc index e8f0300ad817e4b03f21dc8004dd1ef95fd05e90..9cad407caa61258918d80fd3b3205897cdd1fce9 100644 --- a/AMDiS/src/parallel/PetscSolver.cc +++ b/AMDiS/src/parallel/PetscSolver.cc @@ -20,8 +20,9 @@ namespace AMDiS { using namespace std; - PetscSolver::PetscSolver() + PetscSolver::PetscSolver(string name) : ParallelCoarseSpaceMatVec(), + initFileStr(name), dofMap(FESPACE_WISE, true), dofMapSd(FESPACE_WISE, true), kspPrefix(""), diff --git a/AMDiS/src/parallel/PetscSolver.h b/AMDiS/src/parallel/PetscSolver.h index a227fbf65d92ab4a23b1b20b645d157c7beedf41..463c5ac1365ba1a5e645647faad7fcb7ca3bc56a 100644 --- a/AMDiS/src/parallel/PetscSolver.h +++ b/AMDiS/src/parallel/PetscSolver.h @@ -50,7 +50,7 @@ namespace AMDiS { class PetscSolver : public ParallelCoarseSpaceMatVec { public: - PetscSolver(); + PetscSolver(string name); virtual ~PetscSolver() {} @@ -145,6 +145,11 @@ namespace AMDiS { handleDirichletRows = b; } + ParallelDofMapping& getDofMap() + { + return dofMap; + } + protected: /** \brief * Copies between to PETSc vectors by using different index sets for the @@ -163,6 +168,9 @@ namespace AMDiS { /// Run test, if matrix is symmetric. bool testMatrixSymmetric(Mat mat, bool advancedTest = false); protected: + /// Prefix string for parameters in init file. + string initFileStr; + /// FE spaces of all components for the stationary problem the specific /// solver object is registered to. vector<const FiniteElemSpace*> componentSpaces; diff --git a/AMDiS/src/parallel/PetscSolverFeti.cc b/AMDiS/src/parallel/PetscSolverFeti.cc index 58dbf068b38737e4485676bc5a7237362458d18b..8eeb8a71a2d6a503c34d30e7761cc40dd12fb095 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.cc +++ b/AMDiS/src/parallel/PetscSolverFeti.cc @@ -29,8 +29,7 @@ namespace AMDiS { using namespace std; PetscSolverFeti::PetscSolverFeti(string name) - : PetscSolver(), - initFileStr(name), + : PetscSolver(name), primalDofMap(COMPONENT_WISE), dualDofMap(COMPONENT_WISE), interfaceDofMap(COMPONENT_WISE), @@ -108,7 +107,7 @@ namespace AMDiS { MeshLevelData& levelData = meshDistributor->getMeshLevelData(); if (subdomain == NULL) { - subdomain = new PetscSolverGlobalMatrix(); + subdomain = new PetscSolverGlobalMatrix(""); subdomain->setSymmetric(isSymmetric); subdomain->setHandleDirichletRows(false); @@ -1302,7 +1301,7 @@ namespace AMDiS { massMatrix.assembleOperator(op); if (!massMatrixSolver) { - massMatrixSolver = new PetscSolverGlobalMatrix; + massMatrixSolver = new PetscSolverGlobalMatrix(""); massMatrixSolver->setKspPrefix("mass_"); massMatrixSolver->setMeshDistributor(meshDistributor, mpiCommGlobal, diff --git a/AMDiS/src/parallel/PetscSolverFeti.h b/AMDiS/src/parallel/PetscSolverFeti.h index e43984976a82132469f22a927e0429aa5a92fac7..f761230e2c082ec6ebaa6b227d35d5168750dff0 100644 --- a/AMDiS/src/parallel/PetscSolverFeti.h +++ b/AMDiS/src/parallel/PetscSolverFeti.h @@ -230,9 +230,6 @@ namespace AMDiS { } protected: - /// Prefix string for parameters in init file. - string initFileStr; - /// Mapping from primal DOF indices to a global index of primals. ParallelDofMapping primalDofMap; diff --git a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.h b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.h index ef428934d5a9621c6a5d8387ba916c68bd42769d..e5269406dda335b17eacc393d5ce99c1b2f927bc 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.h +++ b/AMDiS/src/parallel/PetscSolverGlobalBlockMatrix.h @@ -34,8 +34,8 @@ namespace AMDiS { class PetscSolverGlobalBlockMatrix : public PetscSolver { public: - PetscSolverGlobalBlockMatrix() - : PetscSolver(), + PetscSolverGlobalBlockMatrix(string name) + : PetscSolver(name), nComponents(0), nBlocks(-1) {} diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc index 8d4feaa49cec385688e13484f53bdef7db3541b5..7700d26c447132a9b17f8f4b1f59cf784c6fad66 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc +++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.cc @@ -321,8 +321,7 @@ namespace AMDiS { if (nullspace.size() > 0) { VecDuplicate(getVecSolInterior(), &nullspaceBasis); - for (int i = 0; i < nComponents; i++) - setDofVector(nullspaceBasis, nullspace[0]->getDOFVector(i), i, true); + setDofVector(nullspaceBasis, *(nullspace[0]), true); VecAssemblyBegin(nullspaceBasis); VecAssemblyEnd(nullspaceBasis); @@ -882,4 +881,27 @@ namespace AMDiS { } } + + PetscSolver* PetscSolverGlobalMatrix::createSubSolver(int component, + string kspPrefix) + { + FUNCNAME("PetscSolverGlobalMatrix::createSubSolver()"); + + vector<const FiniteElemSpace*> fe; + fe.push_back(componentSpaces[component]); + + PetscSolver* subSolver = new PetscSolverGlobalMatrix(""); + subSolver->setKspPrefix(kspPrefix.c_str()); + subSolver->setMeshDistributor(meshDistributor, + mpiCommGlobal, + mpiCommLocal); + subSolver->init(fe, fe); + + ParallelDofMapping &subDofMap = subSolver->getDofMap(); + subDofMap[0] = dofMap[component]; + subDofMap.update(); + + return subSolver; + } + } diff --git a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h index 8f3f7ef172b1e9a44b35f33908e604d68e3508be..e35a3bd951029ca8b567a40273095fc21cb68750 100644 --- a/AMDiS/src/parallel/PetscSolverGlobalMatrix.h +++ b/AMDiS/src/parallel/PetscSolverGlobalMatrix.h @@ -39,8 +39,8 @@ namespace AMDiS { class PetscSolverGlobalMatrix : public PetscSolver { public: - PetscSolverGlobalMatrix() - : PetscSolver(), + PetscSolverGlobalMatrix(string name) + : PetscSolver(name), zeroStartVector(false), printMatInfo(false) { @@ -88,6 +88,14 @@ namespace AMDiS { */ void createFieldSplit(PC pc, string splitName, vector<int> &components); + /// Wrapper to create field split from only one component. + void createFieldSplit(PC pc, string splitName, int component) + { + vector<int> components; + components.push_back(component); + createFieldSplit(pc, splitName, components); + } + virtual void initSolver(KSP &ksp); virtual void exitSolver(KSP ksp); @@ -112,6 +120,15 @@ namespace AMDiS { setDofVector(vecInterior, PETSC_NULL, vec, nRowVec, rankOnly); } + inline void setDofVector(Vec vecInterior, + SystemVector &vec, + bool rankOnly = false) + { + for (int i = 0; i < vec.getSize(); i++) + setDofVector(vecInterior, PETSC_NULL, vec.getDOFVector(i), i, rankOnly); + } + + PetscSolver* createSubSolver(int component, string kspPrefix); protected: bool zeroStartVector; diff --git a/AMDiS/src/parallel/PetscSolverNavierStokes.cc b/AMDiS/src/parallel/PetscSolverNavierStokes.cc index 25d2b15bea41ab027ad3b32baff9c3a874e7455a..6b2cf09976167f5977ae2bff0713863c98a7903b 100644 --- a/AMDiS/src/parallel/PetscSolverNavierStokes.cc +++ b/AMDiS/src/parallel/PetscSolverNavierStokes.cc @@ -17,13 +17,70 @@ namespace AMDiS { using namespace std; + + int petscMultNavierStokesSchur(Mat mat, Vec x, Vec y) + { + void *ctx; + MatShellGetContext(mat, &ctx); + MatShellNavierStokesSchur* data = static_cast<MatShellNavierStokesSchur*>(ctx); + + switch (data->solverMode) { + case 0: + { + Vec vec0, vec1; + VecDuplicate(x, &vec0); + MatGetVecs(data->A00, &vec1, PETSC_NULL); + + MatMult(data->A11, x, y); + MatMult(data->A01, x, vec1); + KSPSolve(data->kspVelocity, vec1, vec1); + MatMult(data->A10, vec1, y); + VecAYPX(y, -1.0, vec0); + + VecDestroy(&vec0); + VecDestroy(&vec1); + } + break; + case 1: + { + Vec vec0, vec1; + VecDuplicate(x, &vec0); + VecDuplicate(x, &vec1); + + KSPSolve(data->kspLaplace, x, vec0); + MatMult(data->matConDif, vec0, vec1); + KSPSolve(data->kspMass, vec1, y); + + VecDestroy(&vec0); + VecDestroy(&vec1); + } + break; + default: + ERROR_EXIT("Wrong solver mode %d\n", data->solverMode); + } + } + + PetscSolverNavierStokes::PetscSolverNavierStokes(string name) + : PetscSolverGlobalMatrix(name), + pressureComponent(-1), + massMatrixSolver(NULL), + laplaceMatrixSolver(NULL) + { + Parameters::get(initFileStr + "->stokes->pressure component", + pressureComponent); + + TEST_EXIT(pressureComponent >= 0) + ("For using PETSc stokes solver you must define a pressure component!\n"); + + TEST_EXIT(pressureComponent == 2)("Fixed for pressure component = 2\n"); + } + + void PetscSolverNavierStokes::initSolver(KSP &ksp) { FUNCNAME("PetscSolverNavierStokes::initSolver()"); - MSG("RUN NAVIER STOKES SOLVER INIT!\n"); - KSPCreate(mpiCommGlobal, &ksp); KSPSetOperators(ksp, getMatInterior(), getMatInterior(), SAME_NONZERO_PATTERN); @@ -31,7 +88,25 @@ namespace AMDiS { KSPSetType(ksp, KSPGMRES); KSPSetOptionsPrefix(ksp, "ns_"); KSPSetFromOptions(ksp); - KSPMonitorSet(ksp, KSPMonitorDefault, PETSC_NULL, PETSC_NULL); + KSPMonitorSet(ksp, KSPMonitorTrueResidualNorm, PETSC_NULL, PETSC_NULL); + + + // === Create null space information. === + + Vec nullSpaceBasis; + VecDuplicate(getVecSolInterior(), &nullSpaceBasis); + + SystemVector basisVec("tmp", componentSpaces, componentSpaces.size(), true); + basisVec.set(0.0); + basisVec.getDOFVector(pressureComponent)->set(1.0); + + setDofVector(nullSpaceBasis, basisVec, true); + VecAssemblyBegin(nullSpaceBasis); + VecAssemblyEnd(nullSpaceBasis); + VecNormalize(nullSpaceBasis, PETSC_NULL); + + MatNullSpace matNullSpace; + MatNullSpaceCreate(mpiCommGlobal, PETSC_FALSE, 1, &nullSpaceBasis, &matNullSpace); } @@ -39,13 +114,9 @@ namespace AMDiS { { FUNCNAME("PetscSolverNavierStokes::initPreconditioner()"); - MSG("RUN NAVIER STOKES PRECONDITIONER INIT!\n"); - vector<int> velocityComponents; velocityComponents.push_back(0); velocityComponents.push_back(1); - vector<int> pressureComponent; - pressureComponent.push_back(2); PCSetType(pc, PCFIELDSPLIT); PCFieldSplitSetType(pc, PC_COMPOSITE_SCHUR); @@ -59,42 +130,126 @@ namespace AMDiS { int nSubKsp; PCFieldSplitGetSubKSP(pc, &nSubKsp, &subKsp); - TEST_EXIT(nSubKsp == 2)("Wrong numer of KSPs inside of the fieldsplit preconditioner!\n"); + TEST_EXIT(nSubKsp == 2) + ("Wrong numer of KSPs inside of the fieldsplit preconditioner!\n"); - KSP velocityKsp = subKsp[0]; - KSP schurKsp = subKsp[1]; + KSP kspVelocity = subKsp[0]; + KSP kspSchur = subKsp[1]; PetscFree(subKsp); Mat A00, A01, A10, A11; PCFieldSplitGetSchurBlocks(pc, &A00, &A01, &A10, &A11); + matShellContext.A00 = A00; + matShellContext.A01 = A01; + matShellContext.A10 = A10; + matShellContext.A11 = A11; + matShellContext.kspVelocity = kspVelocity; + + Mat matShell; + MatDuplicate(A11, MAT_DO_NOT_COPY_VALUES, &matShell); + MatSetType(matShell, MATSHELL); + MatShellSetContext(matShell, &matShellContext); + MatShellSetOperation(matShell, MATOP_MULT, (void(*)(void))petscMultNavierStokesSchur); + MatSetUp(matShell); + + KSPSetType(kspVelocity, KSPPREONLY); + PC pcSub; + KSPGetPC(kspVelocity, &pcSub); + PCSetType(pcSub, PCLU); + PCFactorSetMatSolverPackage(pcSub, MATSOLVERMUMPS); - MatInfo minfo; - PetscInt nRow, nCol; - MatGetSize(A00, &nRow, &nCol); - MatGetInfo(A00, MAT_GLOBAL_SUM, &minfo); - MSG("A00: %d x %d with %d nnz\n", nRow, nCol, static_cast<int>(minfo.nz_used)); - MatGetSize(A01, &nRow, &nCol); - MatGetInfo(A01, MAT_GLOBAL_SUM, &minfo); - MSG("A01: %d x %d with %d nnz\n", nRow, nCol, static_cast<int>(minfo.nz_used)); + KSPSetType(kspSchur, KSPGMRES); + KSPSetTolerances(kspSchur, 0, 1e-14, 1e+3, 1000); + KSPSetOperators(kspSchur, matShell, matShell, SAME_NONZERO_PATTERN); + KSPGetPC(kspSchur, &pcSub); + PCSetType(pcSub, PCNONE); - MatGetSize(A10, &nRow, &nCol); - MatGetInfo(A10, MAT_GLOBAL_SUM, &minfo); - MSG("A10: %d x %d with %d nnz\n", nRow, nCol, static_cast<int>(minfo.nz_used)); - MatGetSize(A11, &nRow, &nCol); - MatGetInfo(A11, MAT_GLOBAL_SUM, &minfo); - MSG("A11: %d x %d with %d nnz\n", nRow, nCol, static_cast<int>(minfo.nz_used)); + // === Mass matrix solver === - KSPSetType(velocityKsp, KSPPREONLY); - PC pcSub; - KSPGetPC(velocityKsp, &pcSub); + MSG("Initialize mass matrix solver ...\n"); + + const FiniteElemSpace *pressureFeSpace = componentSpaces[pressureComponent]; + DOFMatrix massMatrix(pressureFeSpace, pressureFeSpace); + Operator massOp(pressureFeSpace, pressureFeSpace); + Simple_ZOT zot; + massOp.addTerm(&zot); + massMatrix.assembleOperator(massOp); + + massMatrixSolver = createSubSolver(pressureComponent, "mass_"); + massMatrixSolver->fillPetscMatrix(&massMatrix); + + MSG("... OK!\n"); + + + // === Laplace matrix solver === + + MSG("Initialize laplace matrix solver ...\n"); + + DOFMatrix laplaceMatrix(pressureFeSpace, pressureFeSpace); + Operator laplaceOp(pressureFeSpace, pressureFeSpace); + Simple_SOT sot; + laplaceOp.addTerm(&sot); + laplaceMatrix.assembleOperator(laplaceOp); + + laplaceMatrixSolver = createSubSolver(pressureComponent, "laplace_"); + laplaceMatrixSolver->fillPetscMatrix(&laplaceMatrix); + + MSG("... OK!\n"); + + + // === Create convection-diffusion operator === + + MSG("Initialize pressure convection-diffusion operator ...\n"); + + double timestep = 1.0; + Parameters::get("navierstokes->adapt->timestep", timestep); + timestep = 1.0 / timestep; + + double nu = 0.01; + Parameters::get("nu", nu); + + DOFMatrix conDifMatrix(pressureFeSpace, pressureFeSpace); + Operator conDifOp(pressureFeSpace, pressureFeSpace); + Simple_ZOT conDif0(timestep); + conDifOp.addTerm(&conDif0); + Simple_SOT conDif1(nu); + conDifOp.addTerm(&conDif1); + Vector_FOT conDif2(0); + // conDifOp.addTerm(&conDif2, GRD_PHI); + Vector_FOT conDif3(1); + // conDifOp.addTerm(&conDif3, GRD_PHI); + + conDifMatrix.assembleOperator(conDifOp); + + conDifMatrixSolver = createSubSolver(pressureComponent, "condif_"); + conDifMatrixSolver->fillPetscMatrix(&conDifMatrix); + + MSG("... OK!\n"); + + matShellContext.kspMass = massMatrixSolver->getSolver(); + matShellContext.kspLaplace = laplaceMatrixSolver->getSolver(); + matShellContext.matConDif = conDifMatrixSolver->getMatInterior(); + + KSPSetType(matShellContext.kspMass, KSPRICHARDSON); + KSPSetTolerances(matShellContext.kspMass, 0, 1e-13, 1e+3, 1); + KSPGetPC(matShellContext.kspMass, &pcSub); PCSetType(pcSub, PCLU); PCFactorSetMatSolverPackage(pcSub, MATSOLVERMUMPS); + KSPSetFromOptions(matShellContext.kspMass); + + KSPSetType(matShellContext.kspLaplace, KSPCG); + KSPSetTolerances(matShellContext.kspLaplace, 0, 1e-13, 1e+3, 10000); + KSPGetPC(matShellContext.kspLaplace, &pcSub); + PCSetType(pcSub, PCJACOBI); + KSPSetFromOptions(matShellContext.kspLaplace); + + matShellContext.solverMode = 1; - KSPView(velocityKsp, PETSC_VIEWER_STDOUT_WORLD); - KSPView(schurKsp, PETSC_VIEWER_STDOUT_WORLD); + KSPSetFromOptions(kspVelocity); + KSPSetFromOptions(kspSchur); } } diff --git a/AMDiS/src/parallel/PetscSolverNavierStokes.h b/AMDiS/src/parallel/PetscSolverNavierStokes.h index 4d710047143016b0f69f695399cae119541b0958..6f97c66c9a8a3433d30a941bf54749aa8db0dafb 100644 --- a/AMDiS/src/parallel/PetscSolverNavierStokes.h +++ b/AMDiS/src/parallel/PetscSolverNavierStokes.h @@ -29,17 +29,39 @@ namespace AMDiS { using namespace std; + struct MatShellNavierStokesSchur { + int solverMode; + + // === Data for mode 0 (solve the schur complement directly) === + + Mat A00, A01, A10, A11; + + KSP kspVelocity; + + + // === Data for mode 1 === + + KSP kspMass, kspLaplace; + + Mat matConDif; + }; + class PetscSolverNavierStokes : public PetscSolverGlobalMatrix { public: - PetscSolverNavierStokes() - : PetscSolverGlobalMatrix() - {} + PetscSolverNavierStokes(string name); protected: void initSolver(KSP &ksp); void initPreconditioner(PC pc); + + private: + int pressureComponent; + + PetscSolver *massMatrixSolver, *laplaceMatrixSolver, *conDifMatrixSolver; + + MatShellNavierStokesSchur matShellContext; }; } diff --git a/AMDiS/src/parallel/PetscSolverSchur.h b/AMDiS/src/parallel/PetscSolverSchur.h index 598a2943b4aaca923cada3b248007ff8c7fab00c..5116027dc4f789e5e4c5a200ccfac789ea785e97 100644 --- a/AMDiS/src/parallel/PetscSolverSchur.h +++ b/AMDiS/src/parallel/PetscSolverSchur.h @@ -33,8 +33,8 @@ namespace AMDiS { class PetscSolverSchur : public PetscSolver { public: - PetscSolverSchur() - : PetscSolver() + PetscSolverSchur(string name) + : PetscSolver(name) {} void fillPetscMatrix(Matrix<DOFMatrix*> *mat);