Skip to content
Snippets Groups Projects
Commit 7c30c9c8 authored by Thomas Witkowski's avatar Thomas Witkowski
Browse files

Fixed memory issue in Navier-Stokes solver, added new mode for checker partitioner.

parent 74d7b9b5
No related branches found
No related tags found
No related merge requests found
Showing
with 1141 additions and 495 deletions
......@@ -101,8 +101,14 @@ namespace AMDiS {
for (int i = 0; i < nBasFcts; i++)
uh[i] = operator[](localIndices[i]);
value = basFcts->evalUh(lambda, uh);
} else
} else {
#ifdef HAVE_PARALLEL_DOMAIN_AMDIS
return 0.0;
#else
throw(std::runtime_error("Can not eval DOFVector at point p, because point is outside geometry."));
#endif
}
if (oldElInfo == NULL)
delete elInfo;
......
......@@ -15,7 +15,35 @@
namespace AMDiS {
CheckerPartitioner::CheckerPartitioner(MPI::Intracomm *comm)
: MeshPartitioner(comm),
mpiRank(mpiComm->Get_rank()),
mpiSize(mpiComm->Get_size()),
mode(0),
multilevel(false)
{
string modestr = "";
Parameters::get("parallel->partitioner->mode", modestr);
if (modestr == "x-stripes")
mode = 1;
else if (modestr == "y-stripes")
mode = 2;
else if (modestr == "z-stripes")
mode = 3;
else if (modestr == "tetrahedron-stripes")
mode = 4;
else if (modestr == "multilevel")
multilevel = true;
else {
if (modestr != "") {
ERROR_EXIT("No partitioner mode \"%s\"!\n", modestr.c_str());
}
}
}
bool CheckerPartitioner::createInitialPartitioning()
{
FUNCNAME("CheckerPartitioner::createInitialPartitioning()");
......@@ -47,6 +75,13 @@ namespace AMDiS {
TEST_EXIT(MPI::COMM_WORLD.Get_size() == 16)
("Multilevel partitioning is implemented for 16 nodes only!\n");
}
if (mode == 4) {
TEST_EXIT(mesh->getDim() == 3)("Works only in 3D!\n");
createTetrahedronStripes();
}
int dim = mesh->getDim();
TraverseStack stack;
......@@ -114,6 +149,31 @@ namespace AMDiS {
break;
case 4:
// tetrahedron-stripes
{
int inStripe = -1;
int stripePos = -1;
for (int stripe = 0; stripe < elInStripe.size(); stripe++) {
for (int pos = 0; pos < elInStripe[stripe].size(); pos++) {
if (elInStripe[stripe][pos] == elIndex) {
inStripe = stripe;
stripePos = pos;
break;
}
}
if (inStripe >= 0)
break;
}
TEST_EXIT(inStripe >= 0)("Should not happen!\n");
elInRank = inStripe;
}
break;
default:
ERROR_EXIT("Mode %d does not exists for checker based mesh partitioning!\n",
mode);
......@@ -131,4 +191,115 @@ namespace AMDiS {
return true;
}
void CheckerPartitioner::createTetrahedronStripes()
{
FUNCNAME("CheckerPartitioner::createTetrahedronStripes()");
vector<vector<MacroElement*> > stripes;
int nElements = 0;
TraverseStack stack;
ElInfo *elInfo =
stack.traverseFirst(mesh, 0, Mesh::CALL_EL_LEVEL | Mesh::FILL_COORDS);
while (elInfo) {
TEST_EXIT(elInfo->getLevel() == 0)("Should not happen!\n");
Element *el = elInfo->getElement();
int elIndex = el->getIndex();
int zeroCoordCounter = 0;
for (int i = 0; i < mesh->getGeo(VERTEX); i++)
if (fabs(elInfo->getCoord(i)[2]) < 1e-10)
zeroCoordCounter++;
if (zeroCoordCounter == 3) {
vector<MacroElement*> tmp;
tmp.push_back(elInfo->getMacroElement());
stripes.push_back(tmp);
vector<int> tmpIndex;
tmpIndex.push_back(elInfo->getMacroElement()->getIndex());
elInStripe.push_back(tmpIndex);
}
nElements++;
elInfo = stack.traverseNext(elInfo);
}
TEST_EXIT(mpiSize % stripes.size() == 0)
("Should not happen! mpiSize = %d but %d bottom elements found!\n",
mpiSize, stripes.size());
int testElementCounter = 0;
for (int stripe = 0; stripe < stripes.size(); stripe++) {
MacroElement *mel = stripes[stripe][0];
set<int> localDofs;
for (int i = 0; i < mesh->getGeo(VERTEX); i++)
if (fabs(mel->getCoord(i)[2]) < 1e-10)
localDofs.insert(i);
TEST_EXIT(localDofs.size() == 3)("Should not happen!\n");
while (mel != NULL) {
int replaceDof = -1;
for (int i = 0; i < mesh->getGeo(VERTEX); i++)
if (localDofs.count(i) == 0)
replaceDof = i;
bool found = false;
for (std::set<int>::iterator dit = localDofs.begin();
dit != localDofs.end(); ++dit) {
WorldVector<double> c0 = mel->getCoord(*dit);
WorldVector<double> c1 = mel->getCoord(replaceDof);
if (fabs(c0[0] - c1[0]) < 1e-10 &&
fabs(c0[1] - c1[1]) < 1e-10 &&
fabs(c0[2] - c1[2]) > 1e-10) {
found = true;
localDofs.erase(dit);
localDofs.insert(replaceDof);
break;
}
}
TEST_EXIT(found)("Should not happen!\n");
set<DegreeOfFreedom> faceDofs;
for (std::set<int>::iterator dit = localDofs.begin();
dit != localDofs.end(); ++dit)
faceDofs.insert(mel->getElement()->getDof(*dit, 0));
TEST_EXIT(faceDofs.size() == 3)("Should not happen!\n");
int localFace = -1;
for (int i = 0; i < mesh->getGeo(FACE); i++) {
DofFace face = mel->getElement()->getFace(i);
bool allVertexInFace =
faceDofs.count(face.get<0>()) &&
faceDofs.count(face.get<1>()) &&
faceDofs.count(face.get<2>());
if (allVertexInFace) {
localFace = i;
break;
}
}
TEST_EXIT(localFace >= 0)("Should not happen!\n");
MacroElement *mel_neigh = mel->getNeighbour(localFace);
if (mel_neigh) {
stripes[stripe].push_back(mel_neigh);
elInStripe[stripe].push_back(mel_neigh->getIndex());
}
mel = mel_neigh;
}
testElementCounter += stripes[stripe].size();
}
TEST_EXIT(testElementCounter == nElements)("Should not happen!\n");
}
}
......@@ -35,35 +35,14 @@ namespace AMDiS {
class CheckerPartitioner : public MeshPartitioner
{
public:
CheckerPartitioner(MPI::Intracomm *comm)
: MeshPartitioner(comm),
mpiRank(mpiComm->Get_rank()),
mpiSize(mpiComm->Get_size()),
mode(0),
multilevel(false)
{
string modestr = "";
Parameters::get("parallel->partitioner->mode", modestr);
if (modestr == "x-stripes")
mode = 1;
else if (modestr == "y-stripes")
mode = 2;
else if (modestr == "z-stripes")
mode = 3;
else if (modestr == "multilevel")
multilevel = true;
else {
if (modestr != "") {
ERROR_EXIT("No partitioner mode \"%s\"!\n", modestr.c_str());
}
}
}
CheckerPartitioner(MPI::Intracomm *comm);
~CheckerPartitioner() {}
bool createInitialPartitioning();
void createTetrahedronStripes();
/// \ref MeshPartitioner::partition
bool partition(map<int, double> &elemWeights, PartitionMode mode = INITIAL)
{
......@@ -81,8 +60,13 @@ namespace AMDiS {
/// 0: standard mode: each node gets one box
/// 1: x-stripes: each node gets one x-stripe of boxes
/// 2: y-stripes: each node gets one y-stripe of boxes
/// 3: z-stripes: each node gets one y-stripe of boxes
/// 4: tetrahedron-stripes: alias Hieram mode :)
int mode;
/// Only used in mode 4.
vector<vector<int> > elInStripe;
bool multilevel;
};
}
......
......@@ -579,10 +579,8 @@ namespace AMDiS {
vector<ParallelDofMapping*>::iterator it =
find(dofMaps.begin(), dofMaps.end(), &dofMap);
TEST_EXIT(it != dofMaps.end())
("Cannot find Parallel DOF mapping object which should be removed!\n");
dofMaps.erase(it);
if (it != dofMaps.end())
dofMaps.erase(it);
}
......@@ -908,7 +906,7 @@ namespace AMDiS {
{
FUNCNAME("MeshDistributor::createMeshLevelStructure()");
int levelMode = -1;
int levelMode = 0;
Parameters::get("parallel->level mode", levelMode);
TEST_EXIT(levelMode >= 0)("Wrong level mode %d!\n", levelMode);
......@@ -1305,7 +1303,8 @@ namespace AMDiS {
{
int mapSize = data.size();
SerUtil::serialize(out, mapSize);
for (map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it = data.begin(); it != data.end(); ++it) {
for (map<int, map<const FiniteElemSpace*, DofContainer> >::iterator it = data.begin();
it != data.end(); ++it) {
int rank = it->first;
SerUtil::serialize(out, rank);
......
......@@ -69,16 +69,7 @@ namespace AMDiS {
meshLevel = level;
meshDistributor = md;
#if 0
mpiCommGlobal = meshDistributor->getMpiComm(meshLevel);
if (meshLevel + 1 <
meshDistributor->getMeshLevelData().getNumberOfLevels())
mpiCommLocal = meshDistributor->getMpiComm(meshLevel + 1);
else
mpiCommLocal = MPI::COMM_SELF;
#endif
domainComm = meshDistributor->getMpiComm(meshLevel);
if (meshLevel >= 1)
coarseSpaceComm = meshDistributor->getMpiComm(meshLevel - 1);
}
......@@ -316,6 +307,7 @@ namespace AMDiS {
FUNCNAME("ParallelCoarseSpaceSolver::vecDestroy()");
int nVec = vecSol.size();
for (int i = 0; i < nVec; i++) {
VecDestroy(&vecSol[i]);
VecDestroy(&vecRhs[i]);
......@@ -391,7 +383,7 @@ namespace AMDiS {
int groupRowsInterior = 0;
if (domainComm.Get_rank() == 0)
groupRowsInterior = interiorMap->getOverallDofs();
mpi::getDofNumbering(coarseSpaceComm, groupRowsInterior,
rStartInterior, nGlobalOverallInterior);
......@@ -400,7 +392,7 @@ namespace AMDiS {
tmp = rStartInterior;
domainComm.Allreduce(&tmp, &rStartInterior, 1, MPI_INT, MPI_SUM);
}
}
}
}
......@@ -99,6 +99,20 @@ namespace AMDiS {
/// Destroys PETSc vector objects.
void vecDestroy();
/// Just for super trick
vector<vector<Mat> >& getMat()
{
return mat;
}
/// Just for super trick
vector<Vec>& getVecRhs()
{
return vecRhs;
}
/// Get interior matrix.
inline Mat& getMatInterior()
{
......@@ -243,7 +257,7 @@ namespace AMDiS {
/// zero stricture.
bool checkMeshChange();
private:
protected:
/// Matrix of PETSc matrices. mat[0][0] is the interior discretization
/// matrix, mat[1][1] corresponds to the first coarse space and so on.
/// mat[i][j], with i not equal to j, are the coupling between the interior
......@@ -291,7 +305,6 @@ namespace AMDiS {
/// some phase fields.
bool alwaysCreateNnzStructure;
protected:
/// Prefix string for parameters in init file.
string initFileStr;
......
......@@ -766,10 +766,10 @@ namespace AMDiS {
{
FUNCNAME("ParallelDebug::printBoundaryInfo()");
// int tmp = 0;
// Parameters::get("parallel->debug->print boundary info", tmp);
// if (tmp <= 0 && force == false)
// return;
int tmp = 0;
Parameters::get("parallel->debug->print boundary info", tmp);
if (tmp <= 0 && force == false)
return;
MSG("Interior boundary info:\n");
......@@ -1018,6 +1018,8 @@ namespace AMDiS {
{
FUNCNAME("ParallelDebug::writeCsvElementMap()");
return;
MSG("writing local Element map to CSV File \n");
Mesh *mesh = feSpace->getMesh();
......
......@@ -74,7 +74,7 @@ namespace AMDiS {
nOverallDofs = 0;
rStartDofs = 0;
mpi::getDofNumbering(mpiComm, nRankDofs, rStartDofs, nOverallDofs);
// === If required, compute also the global indices. ===
if (globalMapping) {
......
......@@ -74,7 +74,7 @@ namespace AMDiS {
}
void blockMatMatSolve(KSP ksp, Mat mat0, Mat &mat1)
void blockMatMatSolve(MPI::Intracomm mpiComm, KSP ksp, Mat mat0, Mat &mat1)
{
// === We have to calculate mat1 = ksp mat0: ===
// === - get all local column vectors from mat0 ===
......@@ -86,7 +86,7 @@ namespace AMDiS {
MatGetLocalSize(mat0, &localRows, &localCols);
MatGetSize(mat0, &globalRows, &globalCols);
MatCreateAIJ(PETSC_COMM_WORLD,
MatCreateAIJ(mpiComm,
localRows, localCols, globalRows, globalCols,
150, PETSC_NULL, 150, PETSC_NULL, &mat1);
MatSetOption(mat1, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
......@@ -253,7 +253,7 @@ namespace AMDiS {
const char* kspPrefix,
KSPType kspType,
PCType pcType,
MatSolverPackage matSolverPackage,
const MatSolverPackage matSolverPackage,
PetscReal rtol,
PetscReal atol,
PetscInt maxIt)
......
......@@ -77,11 +77,15 @@ namespace AMDiS {
* task. The overall number of rows of local matrices A must be the
* number of distriubted rows in B.
*
* \param[in] ksp inv(A) matrix given by a PETSc solver object.
* \param[in] mat0 matrix B
* \param[out] mat1 resulting matrix C, is created inside the function
* \param[in] mpiComm MPI Communicator object (must fit with ksp)
* \param[in] ksp inv(A) matrix given by a PETSc solver object.
* \param[in] mat0 matrix B
* \param[out] mat1 resulting matrix C, is created inside the function
*/
void blockMatMatSolve(KSP ksp, Mat mat0, Mat &mat1);
void blockMatMatSolve(MPI::Intracomm mpiComm,
KSP ksp,
Mat mat0,
Mat &mat1);
/** \brief
* Converts a 2x2 nested matrix to a MATAIJ matrix (thus not nested).
......@@ -92,10 +96,10 @@ namespace AMDiS {
void matNestConvert(Mat matNest, Mat &mat);
void setSolverWithLu(KSP ksp,
const char* kspPrefix,
const char* kspPrefix,
KSPType kspType,
PCType pcType,
MatSolverPackage matSolverPackage,
const MatSolverPackage matSolverPackage,
PetscReal rtol = PETSC_DEFAULT,
PetscReal atol = PETSC_DEFAULT,
PetscInt maxIt = PETSC_DEFAULT);
......
......@@ -23,7 +23,7 @@ namespace AMDiS {
PetscSolver::PetscSolver(string name)
: ParallelCoarseSpaceSolver(name),
dofMap(FESPACE_WISE, true),
dofMapSd(FESPACE_WISE, true),
dofMapSubDomain(FESPACE_WISE, true),
parallelDofMappingsRegistered(false),
kspPrefix(""),
removeRhsNullspace(false),
......@@ -73,19 +73,21 @@ namespace AMDiS {
parallelDofMappingsRegistered = true;
dofMap.init(componentSpaces, feSpaces);
dofMap.setMpiComm(levelData.getMpiComm(0));
dofMap.setDofComm(meshDistributor->getDofComm(0));
dofMap.setMpiComm(levelData.getMpiComm(meshLevel));
dofMap.setDofComm(meshDistributor->getDofComm(meshLevel));
dofMap.clear();
meshDistributor->registerDofMap(dofMap);
if (nLevels > 1 && levelData.getMpiComm(1) != MPI::COMM_SELF) {
MSG("WARNING: MAKE GENERAL!\n");
dofMapSd.init(componentSpaces, feSpaces);
dofMapSd.setMpiComm(levelData.getMpiComm(1));
dofMapSd.setDofComm(meshDistributor->getDofComm(1));
dofMapSd.clear();
meshDistributor->registerDofMap(dofMapSd);
if (meshLevel + 1 < nLevels &&
levelData.getMpiComm(meshLevel + 1) != MPI::COMM_SELF) {
dofMapSubDomain.init(componentSpaces, feSpaces);
dofMapSubDomain.setMpiComm(levelData.getMpiComm(meshLevel + 1));
dofMapSubDomain.setDofComm(meshDistributor->getDofComm(meshLevel + 1));
dofMapSubDomain.clear();
meshDistributor->registerDofMap(dofMapSubDomain);
}
meshDistributor->updateParallelDofMappings();
}
}
......@@ -114,6 +116,12 @@ namespace AMDiS {
{
FUNCNAME("PetscSolver::solveGlobal()");
int s, ls;
VecGetSize(rhs, &s);
VecGetLocalSize(rhs, &ls);
MSG("Solve global %d %d\n", ls, s);
ERROR_EXIT("Not implemented!\n");
}
......
......@@ -181,7 +181,10 @@ namespace AMDiS {
vector<const FiniteElemSpace*> feSpaces;
///
ParallelDofMapping dofMap, dofMapSd;
ParallelDofMapping dofMap;
///
ParallelDofMapping dofMapSubDomain;
/// If the parallel DOF mappaings of this solver are registered to the
/// mesh distributor object, this variable is set to true to remove them
......
This diff is collapsed.
......@@ -72,6 +72,9 @@ namespace AMDiS {
/// Solve the system using FETI-DP method.
void solvePetscMatrix(SystemVector &vec, AdaptInfo *adaptInfo);
/// Just for the super trick
void solveGlobal(Vec &rhs, Vec &sol);
/// Destroys all matrix data structures.
void destroyMatrixData();
......@@ -150,7 +153,8 @@ namespace AMDiS {
void createMatAugmentedLagrange();
bool testWirebasketEdge(BoundaryObject &edge, const FiniteElemSpace *feSpace);
bool testWirebasketEdge(BoundaryObject &edge,
const FiniteElemSpace *feSpace);
///
void createPreconditionerMatrix(Matrix<DOFMatrix*> *mat);
......@@ -171,9 +175,33 @@ namespace AMDiS {
/// Creates PETSc KSP solver object for the FETI-DP operator, \ref ksp_feti
void createFetiKsp();
///
void createFetiExactKsp();
///
void createFetiInexactKsp();
///
void createFetiInexactReducedKsp();
///
void createFetiPreconLumped(PC pc);
///
void createFetiPreconDirichlet(PC pc);
/// Destroys FETI-DP operator, \ref ksp_feti
void destroyFetiKsp();
///
void destroyFetiExactKsp();
///
void destroyFetiInexactKsp();
///
void destroyFetiInexactReducedKsp();
/// Create the null space of the FETI-DP operator (if there is one) and
/// attachets it to the corresponding matrices and KSP objects.
void createNullSpace();
......@@ -204,25 +232,26 @@ namespace AMDiS {
void recoverInterfaceSolution(Vec& vecInterface,
SystemVector &vec);
/** \brief
* Solves the FETI-DP system globally, thus without reducing it to the
* Lagrange multipliers. This should be used for debugging only to test
* if the FETI-DP system is setup correctly.
*
* \param[out] vec Solution DOF vectors.
*/
void solveFetiMatrix(SystemVector &vec);
///
void solveFeti(Vec &rhsInterior, Vec &rhsCoarse,
Vec &solInterior, Vec &solCoarse);
/** \brief
* Solves the FETI-DP system with reducing it first to the Lagrange
* multipliers. This is what one expects when using the FETI-DP methid :)
*
* \param[out] vec Solution DOF vectors.
*/
void solveReducedFetiMatrix(SystemVector &vec);
///
void solveFetiExact(Vec &rhsInterior, Vec &rhsCoarse,
Vec &solInterior, Vec &solCoarse);
///
void solveFetiInexact(Vec &rhsInterior, Vec &rhsCoarse,
Vec &solInterior, Vec &solCoarse);
///
void solveFetiInexactReduced(Vec &rhsInterior, Vec &rhsCoarse,
Vec &solInterior, Vec &solCoarse);
///
void resetStatistics();
///
void printStatistics();
/// Checks whether a given DOF is a primal DOF in a given component.
......@@ -247,6 +276,9 @@ namespace AMDiS {
}
protected:
/// Type of FETI-DP solver, i.e., exact or some inexact version
FetiSolverType fetiSolverType;
/// Mapping from primal DOF indices to a global index of primals.
ParallelDofMapping primalDofMap;
......@@ -309,6 +341,12 @@ namespace AMDiS {
/// Data for MatMult operation in matrix \ref mat_feti
FetiData fetiData;
///
FetiInexactData fetiInexactData;
///
FetiInexactPreconData fetiInexactPreconData;
/// Defines which preconditioner should be used to solve the reduced
/// FETI-DP system.
FetiPreconditioner fetiPreconditioner;
......@@ -338,11 +376,10 @@ namespace AMDiS {
PetscSolver *subdomain;
PetscSolver *massMatrixSolver;
int rStartInterior;
// Just a trick for multi level things, should be removed and generalized!
PetscSolver *mlSubdomain;
int nGlobalOverallInterior;
PetscSolver *massMatrixSolver;
bool printTimings;
......
......@@ -15,6 +15,26 @@
namespace AMDiS {
void copyGlobalLocal(Vec globalB, Vec localB)
{
double *valueGlobal, *valueLocal;
VecGetArray(globalB, &valueGlobal);
VecGetArray(localB, &valueLocal);
int nGlobal, nLocal;
VecGetLocalSize(globalB, &nGlobal);
VecGetLocalSize(localB, &nLocal);
TEST_EXIT(nGlobal == nLocal)("Should not happen!\n");
for (int i = 0; i < nGlobal; i++)
valueLocal[i] = valueGlobal[i];
VecRestoreArray(globalB, &valueGlobal);
VecRestoreArray(localB, &valueLocal);
}
int petscMultMatSchurPrimal(Mat mat, Vec x, Vec y)
{
// S_PiPi = K_PiPi - K_PiB inv(K_BB) K_BPi
......@@ -194,6 +214,173 @@ namespace AMDiS {
}
// y = mat * x
int petscMultMatFetiInexact(Mat mat, Vec x, Vec y)
{
FUNCNAME("petscMultMatFetiInexact()");
void *ctx;
MatShellGetContext(mat, &ctx);
FetiInexactData* data = static_cast<FetiInexactData*>(ctx);
Vec xInterior, xPrimal, xLagrange;
Vec yInterior, yPrimal, yLagrange;
VecNestGetSubVec(x, 0, &xInterior);
VecNestGetSubVec(x, 1, &xPrimal);
VecNestGetSubVec(x, 2, &xLagrange);
VecNestGetSubVec(y, 0, &yInterior);
VecNestGetSubVec(y, 1, &yPrimal);
VecNestGetSubVec(y, 2, &yLagrange);
Vec tmpInterior;
VecDuplicate(xInterior, &tmpInterior);
MatMult(*(data->matBPi), xPrimal, tmpInterior);
MatMultTranspose(*(data->mat_lagrange), xLagrange, yInterior);
VecAXPY(yInterior, 1.0, tmpInterior);
{
{
double *valueRhs, *valueTmp;
VecGetArray(xInterior, &valueRhs);
VecGetArray(data->tmp_vec_b0, &valueTmp);
int nRhsLocal, nTmpLocal;
VecGetLocalSize(xInterior, &nRhsLocal);
VecGetLocalSize(data->tmp_vec_b0, &nTmpLocal);
TEST_EXIT(nRhsLocal == nTmpLocal)("Should not happen!\n");
for (int i = 0; i < nRhsLocal; i++)
valueTmp[i] = valueRhs[i];
VecRestoreArray(xInterior, &valueRhs);
VecRestoreArray(data->tmp_vec_b0, &valueTmp);
}
MatMult(*(data->matBB), data->tmp_vec_b0, data->tmp_vec_b1);
{
double *valueRhs, *valueTmp;
VecGetArray(tmpInterior, &valueRhs);
VecGetArray(data->tmp_vec_b1, &valueTmp);
int nRhsLocal, nTmpLocal;
VecGetLocalSize(yInterior, &nRhsLocal);
VecGetLocalSize(data->tmp_vec_b1, &nTmpLocal);
TEST_EXIT(nRhsLocal == nTmpLocal)("Should not happen!\n");
for (int i = 0; i < nRhsLocal; i++)
valueRhs[i] = valueTmp[i];
VecRestoreArray(tmpInterior, &valueRhs);
VecRestoreArray(data->tmp_vec_b1, &valueTmp);
}
VecAXPY(yInterior, 1.0, tmpInterior);
}
{
Vec tmpPrimal;
VecDuplicate(xPrimal, &tmpPrimal);
MatMult(*(data->matPiB), xInterior, tmpPrimal);
MatMult(*(data->matPiPi), xPrimal, yPrimal);
VecAXPY(yPrimal, 1.0, tmpPrimal);
VecDestroy(&tmpPrimal);
}
MatMult(*(data->mat_lagrange), xInterior, yLagrange);
VecDestroy(&tmpInterior);
return 0;
}
PetscErrorCode pcInexactFetiShell(PC pc, Vec x, Vec y)
{
void *ctx;
PCShellGetContext(pc, &ctx);
FetiInexactPreconData* data = static_cast<FetiInexactPreconData*>(ctx);
Vec xInterior, xPrimal, xLagrange;
Vec yInterior, yPrimal, yLagrange;
VecNestGetSubVec(x, 0, &xInterior);
VecNestGetSubVec(x, 1, &xPrimal);
VecNestGetSubVec(x, 2, &xLagrange);
VecNestGetSubVec(y, 0, &yInterior);
VecNestGetSubVec(y, 1, &yPrimal);
VecNestGetSubVec(y, 2, &yLagrange);
Vec tmpPrimal;
VecDuplicate(xPrimal, &tmpPrimal);
Vec tmpInterior0, tmpInterior1;
VecDuplicate(xInterior, &tmpInterior0);
VecDuplicate(xInterior, &tmpInterior1);
{
// === First Block ===
copyGlobalLocal(xInterior, data->tmp_vec_b0);
KSPSolve(data->ksp_interior, data->tmp_vec_b0, data->tmp_vec_b0);
copyGlobalLocal(data->tmp_vec_b0, tmpInterior0);
MatMult(*(data->matPiB), tmpInterior0, tmpPrimal);
VecAYPX(tmpPrimal, -1.0, xPrimal);
// === Second Block ===
// tmpInterior0 already calculated
KSPSolve(data->ksp_schur, tmpPrimal, tmpPrimal);
// === Third Block ===
// tmpPrimal already calculated
MatMult(*(data->matBPi), tmpPrimal, tmpInterior1);
copyGlobalLocal(tmpInterior1, data->tmp_vec_b0);
KSPSolve(data->ksp_interior, data->tmp_vec_b0, data->tmp_vec_b0);
copyGlobalLocal(data->tmp_vec_b0, tmpInterior1);
VecAXPY(tmpInterior0, -1.0, tmpInterior1);
VecCopy(tmpInterior0, yInterior);
VecCopy(tmpPrimal, yPrimal);
}
{
Vec tmpLagrange;
VecDuplicate(xLagrange, &tmpLagrange);
MatMult(*(data->mat_lagrange), yInterior, tmpLagrange);
PCApply(data->pc_feti, tmpLagrange, yLagrange);
PCApply(data->pc_feti, xLagrange, tmpLagrange);
VecAXPY(yLagrange, -1.0, tmpLagrange);
VecDestroy(&tmpLagrange);
}
VecDestroy(&tmpPrimal);
VecDestroy(&tmpInterior0);
VecDestroy(&tmpInterior1);
}
// y = mat * x
int petscMultMatFetiAugmented(Mat mat, Vec x, Vec y)
{
......@@ -429,6 +616,7 @@ namespace AMDiS {
// === K_DD ===
MatMult(*(data->mat_duals_duals), data->tmp_vec_duals0, data->tmp_vec_duals1);
......
......@@ -28,6 +28,8 @@
namespace AMDiS {
void copyGlobalLocal(Vec globalB, Vec localB);
///
int petscMultMatSchurPrimal(Mat mat, Vec x, Vec y);
......@@ -37,6 +39,12 @@ namespace AMDiS {
/// FETI-DP operator
int petscMultMatFeti(Mat mat, Vec x, Vec y);
/// Inexact FETI-DP operator
int petscMultMatFetiInexact(Mat mat, Vec x, Vec y);
/// Inexact FETI-DP preconditioner
PetscErrorCode pcInexactFetiShell(PC pc, Vec x, Vec y);
/// FETI-DP operator with augmented Lagrange constraints
int petscMultMatFetiAugmented(Mat mat, Vec x, Vec y);
......
......@@ -32,6 +32,16 @@ namespace AMDiS {
class PetscSolverFeti;
enum FetiSolverType {
// Standard exakt FETI-DP system
EXACT,
// Inexact FETI-DP
INEXACT,
// Inexact reduced FETI-DP
INEXACT_REDUCED
};
/** \brief
* This structure is used when defining the MatShell operation for solving
* primal schur complement. \ref petscMultMatSchurPrimal
......@@ -98,6 +108,32 @@ namespace AMDiS {
};
struct FetiInexactData {
Mat *matBB, *matBPi, *matPiB, *matPiPi;
Mat *mat_lagrange;
Vec tmp_vec_b0, tmp_vec_b1;
};
struct FetiInexactPreconData {
KSP ksp_schur;
KSP ksp_interior;
KSP ksp_pc_feti;
PC pc_feti;
Mat *matPiB, *matBPi;
Mat *mat_lagrange;
Vec tmp_vec_b0;
};
struct FetiDirichletPreconData {
/// Matrix of scaled Lagrange variables.
Mat *mat_lagrange_scaled;
......
......@@ -413,9 +413,6 @@ namespace AMDiS {
{
FUNCNAME("PetscSolverGlobalMatrix::solveGlobal()");
double wtime = MPI::Wtime();
double t0 = 0.0, t1 = 0.0;
Vec tmp;
if (domainComm.Get_size() == 1)
interiorMap->createLocalVec(tmp);
......@@ -432,13 +429,7 @@ namespace AMDiS {
VecRestoreArray(rhs, &rhsValues);
VecRestoreArray(tmp, &tmpValues);
t0 = MPI::Wtime() - wtime;
wtime = MPI::Wtime();
KSPSolve(kspInterior, tmp, tmp);
t1 = MPI::Wtime() - wtime;
wtime = MPI::Wtime();
VecGetArray(tmp, &tmpValues);
VecGetArray(sol, &rhsValues);
......@@ -450,9 +441,6 @@ namespace AMDiS {
VecRestoreArray(tmp, &tmpValues);
VecDestroy(&tmp);
t0 += MPI::Wtime() - wtime;
// MSG("TIMEING: %.5f %.5f\n", t0, t1);
}
......@@ -540,7 +528,7 @@ namespace AMDiS {
}
void PetscSolverGlobalMatrix::exitSolver(KSP ksp)
void PetscSolverGlobalMatrix::exitSolver(KSP &ksp)
{
FUNCNAME("PetscSolverGlobalMatrix::exitSolver()");
......@@ -871,6 +859,8 @@ namespace AMDiS {
MatNullSpaceCreate(domainComm, PETSC_FALSE, 1, &nullSpaceBasis, &matNullSpace);
KSPSetNullSpace(ksp, matNullSpace);
MatNullSpaceDestroy(&matNullSpace);
VecDestroy(&nullSpaceBasis);
}
......
......@@ -100,7 +100,7 @@ namespace AMDiS {
virtual void initSolver(KSP &ksp);
virtual void exitSolver(KSP ksp);
virtual void exitSolver(KSP &ksp);
virtual void initPreconditioner(PC pc);
......
......@@ -104,6 +104,8 @@ namespace AMDiS {
FUNCNAME("PetscSolverNavierStokes::initSolver()");
// Create FGMRES based outer solver
MSG("CREATE POS 1: %p\n", &ksp);
KSPCreate(domainComm, &ksp);
KSPSetOperators(ksp, getMatInterior(), getMatInterior(), SAME_NONZERO_PATTERN);
KSPMonitorSet(ksp, KSPMonitorTrueResidualNorm, PETSC_NULL, PETSC_NULL);
......@@ -136,6 +138,7 @@ namespace AMDiS {
PCFieldSplitSetSchurFactType(pc, PC_FIELDSPLIT_SCHUR_FACT_FULL);
createFieldSplit(pc, "velocity", velocityComponents);
createFieldSplit(pc, "pressure", pressureComponent);
PCSetFromOptions(pc);
KSPSetUp(kspInterior);
......@@ -174,7 +177,6 @@ namespace AMDiS {
if (pressureNullSpace)
setConstantNullSpace(kspSchur);
// === Mass matrix solver ===
const FiniteElemSpace *pressureFeSpace = componentSpaces[pressureComponent];
......@@ -324,8 +326,11 @@ namespace AMDiS {
FUNCNAME("PetscSolverNavierStokes::exitPreconditioner()");
massMatrixSolver->destroyMatrixData();
massMatrixSolver->destroyVectorData();
laplaceMatrixSolver->destroyMatrixData();
laplaceMatrixSolver->destroyVectorData();
conDifMatrixSolver->destroyMatrixData();
conDifMatrixSolver->destroyVectorData();
delete massMatrixSolver;
massMatrixSolver = NULL;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment