DOFMapping.inc.hpp 5.16 KB
Newer Older
1
2
3
4
5
6
#pragma once

#include <utility>

#include <dune/common/timer.hh>

7
#if HAVE_MPI
8
9
#include <amdis/common/parallel/Collective.hpp>
#include <amdis/common/parallel/RequestOperations.hpp>
10
#endif
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152

namespace AMDiS {

#if HAVE_MPI

template <class PIS, class GI>
  template <class Communication>
void ParallelDofMapping<PIS,GI>::
update(Communication& c)
{
  Dune::Timer t;
  Mpi::Communicator world(Environment::comm());

  // clear all vectors and reset sizes
  reset();

  // 1. insert and number owner DOFs
  globalIndices_.resize(c.indexSet().size(), 0);
  owner_.resize(c.indexSet().size(), false);
  localSize_ = 0;
  ghostSize_ = 0;
  for (auto const& ip : c.indexSet()) {
    if (ip.local().attribute() == Attribute::owner) {
      size_type idx = ip.local();
      globalIndices_[idx] = localSize_++;
      owner_[idx] = true;
    } else {
      ghostSize_++;
    }
  }

  // communicate the local sizes from all processors
  Mpi::all_gather(world, localSize_, sizes_);

  // at which global index do the local partitions start
  starts_.resize(world.size() + 1);
  starts_[0] = 0;
  std::partial_sum(sizes_.begin(), sizes_.end(), starts_.begin()+1);
  globalSize_ = starts_.back();

  // update the global index for all local indices by shifting by global start position
  for (auto& i : globalIndices_)
    i += starts_[world.rank()];

  // build up the communication of overlap DOFs that do not yet have a global index
  // assigned. Therefore send the global index for all already computed owner DOFs
  // to the neighboring remote processors. And receive from those their owner DOFs
  // global indices.
  using GlobalAssoc = std::pair<typename ParallelIndexSet::GlobalIndex, size_type>; // {globalId, globalIndex}
  std::vector<std::vector<GlobalAssoc>> sendList(world.size());
  std::vector<std::size_t> receiveList(world.size(), 0);

  // Communicate attributes at the interface
  for (auto const& rim : c.remoteIndices()) {
    int p = rim.first;

    auto* sourceRemoteIndexList = rim.second.first;
    auto* targetRemoteIndexList = rim.second.second;

    // send to overlap
    for (auto const& ri : *sourceRemoteIndexList) {
      auto const& lip = ri.localIndexPair();
      Attribute remoteAttr = ri.attribute();
      Attribute myAttr = lip.local().attribute();
      if (myAttr == Attribute::owner && remoteAttr != Attribute::owner) {
        size_type globalIndex = globalIndices_[size_type(lip.local())];
        sendList[p].push_back({lip.global(), globalIndex});
      }
    }

    // receive from owner
    for (auto const& ri : *targetRemoteIndexList) {
      auto const& lip = ri.localIndexPair();
      Attribute remoteAttr = ri.attribute();
      Attribute myAttr = lip.local().attribute();
      if (myAttr != Attribute::owner && remoteAttr == Attribute::owner) {
        receiveList[p]++;
      }
    }
  }
  // all ghostDOFs must be communicated!
  assert(ghostSize_ == std::accumulate(receiveList.begin(), receiveList.end(), 0u));

  // send {globalId, globalIndex} to remote processors
  std::vector<Mpi::Request> sendRequests;
  for (int p = 0; p < world.size(); ++p) {
    if (!sendList[p].empty()) {
      sendRequests.emplace_back( world.isend(sendList[p], p, tag_) );
    }
  }

  // receive {globalID, globalIndex} from remote processors
  std::vector<Mpi::Request> recvRequests;
  std::vector<std::vector<GlobalAssoc>> recvData(world.size());
  for (int p = 0; p < world.size(); ++p) {
    if (receiveList[p] > 0)
      recvRequests.emplace_back( world.irecv(recvData[p], p, tag_) );
  }

  Mpi::wait_all(recvRequests.begin(), recvRequests.end());

  ghostIndices_.reserve(ghostSize_);
  ghostLocalIndices_.resize(c.indexSet().size(), LocalIndex(-1));

  // insert all remote global indices into the map
  std::size_t counter = 0;
  for (int p = 0; p < world.size(); ++p) {
    auto const& data = recvData[p];
    assert(data.size() == receiveList[p]);
    for (auto const& d : data) {
      typename PIS::IndexPair const& l = c.indexSet().at(d.first);
      assert(!owner_[size_type(l.local())]);

      globalIndices_[size_type(l.local())] = d.second;
      ghostIndices_.push_back(d.second);
      ghostLocalIndices_[size_type(l.local())] = counter++;
    }
  }
  assert(counter == ghostSize_);
  assert(ghostSize_ + localSize_ == c.indexSet().size());

  Mpi::wait_all(sendRequests.begin(), sendRequests.end());
  msg("update DofMapping need {} sec", t.elapsed());
}


template <class PIS, class GI>
void ParallelDofMapping<PIS,GI>::
debug() const
{
  int p = Environment::mpiRank();
  std::cout << "[" << p << "]  sizes_.size()=" << sizes_.size() << ", my_size=" << sizes_[p] << std::endl;
  std::cout << "[" << p << "]  starts_.size()=" << starts_.size() << ", my_start=" << starts_[p] << std::endl;
  std::cout << "[" << p << "]  localSize_=" << localSize_ << ", globalSize_=" << globalSize_ << ", ghostSize_=" << ghostSize_ << std::endl;
  std::cout << "[" << p << "]  globalIndices_.size()=" << globalIndices_.size() << std::endl;
  std::cout << "[" << p << "]  ghostIndices_.size()=" << ghostIndices_.size() << std::endl;
  std::cout << "[" << p << "]  ghostLocalIndices_.size()=" << ghostLocalIndices_.size() << std::endl;
  std::cout << "[" << p << "]  owner_.size()=" << owner_.size() << std::endl;
}
#endif

} // end namespace AMDiS