ParallelIndexSet.hpp 3.52 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#pragma once

#include <cassert>
#include <vector>

#include <dune/common/timer.hh>

#include <amdis/Output.hpp>
#include <amdis/linearalgebra/AttributeSet.hpp>

#if HAVE_MPI
#include <dune/grid/common/gridenums.hh>
#include <dune/grid/common/partitionset.hh>

#include <amdis/Environment.hpp>
#include <amdis/functions/GlobalIdSet.hpp>
#include <amdis/utility/UniqueBorderPartition.hpp>
#endif

namespace AMDiS
{
  /// Fills a \p parallelIndexSet with indices from a \p basis.
  template <class Basis, class PIS>
  inline void buildParallelIndexSet(Basis const& basis, PIS& parallelIndexSet)
  {
    Dune::Timer t;
    using Attribute = typename AttributeSet<PIS>::type;
    using GI = typename PIS::GlobalIndex;
    using LI = typename PIS::LocalIndex;

#if HAVE_MPI
    if (Environment::mpiSize() > 1) // parallel indexset
    {
      auto const& gv = basis.gridView();
      auto lv = basis.localView();

      // make disjoint partition of border entities
      using GridView = typename Basis::GridView;
      using Grid = typename GridView::Grid;
      using DataHandle = UniqueBorderPartition<Grid>;
      DataHandle borderEntities(gv.comm().rank(), gv.grid());
      for (int i = 0; i < borderEntities.numIterations(); ++i) {
        gv.communicate(borderEntities,
          Dune::InterfaceType::InteriorBorder_All_Interface,
          Dune::CommunicationDirection::ForwardCommunication);
      }

      std::vector<bool> visited(basis.dimension(), false);
      GlobalBasisIdSet<Basis> dofIdSet(basis);
      parallelIndexSet.beginResize();
      for (auto const& e : elements(gv))
      {
        lv.bind(e);
        dofIdSet.bind(e);
        for (std::size_t i = 0; i < dofIdSet.size(); ++i)
        {
          auto localIndex = lv.index(i);
          if (!visited[localIndex]) {
            auto globalId = dofIdSet.id(i);
            using PType = Dune::PartitionType;
            PType pt = dofIdSet.partitionType(i);
            switch (pt)
            {
            case PType::InteriorEntity:
              parallelIndexSet.add(globalId, LI(localIndex, Attribute::owner, true));
              break;
            case PType::BorderEntity:
              if (borderEntities.contains(dofIdSet.entityId(i)))
                parallelIndexSet.add(globalId, LI(localIndex, Attribute::owner, true));
              else
                parallelIndexSet.add(globalId, LI(localIndex, Attribute::overlap, true));
              break;
            case PType::OverlapEntity:
              parallelIndexSet.add(globalId, LI(localIndex, Attribute::overlap, true));
              break;
            case PType::FrontEntity:
            case PType::GhostEntity:
              parallelIndexSet.add(globalId, LI(localIndex, Attribute::copy, true));
              break;
            default:
              error_exit("Unknown partition type.");
            }

            visited[localIndex] = true;
          }
        }
        dofIdSet.unbind();
        lv.unbind();
      }
      parallelIndexSet.endResize();
    }
    else // sequential indexset
#endif // HAVE_MPI
    {
      parallelIndexSet.beginResize();
      for (std::size_t localIndex = 0; localIndex < basis.dimension(); ++localIndex)
      {
        GI globalId{std::size_t(localIndex)};
        parallelIndexSet.add(globalId, LI(localIndex, Attribute::owner, true));
      }
      parallelIndexSet.endResize();
    }
    // test that all indices are inserted into the indexset
    assert(parallelIndexSet.size() == basis.dimension());

    info(2, "build ParallelIndexSet needed {} seconds", t.elapsed());
  }

} // end namespace AMDiS