Commit ca0597ef authored by Praetorius, Simon's avatar Praetorius, Simon
Browse files

add missing utilities

parent 60733ab7
......@@ -24,3 +24,4 @@ add_subdirectory(geometries)
add_subdirectory(gridfunctions)
add_subdirectory(surfacedistance)
add_subdirectory(test)
add_subdirectory(utility)
install(FILES
kdtree.hh
vertexcount.hh
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dune/curvedsurfacegrid/utility)
#ifndef DUNE_CURVEDSURFACEGRID_KDTREE_HH
#define DUNE_CURVEDSURFACEGRID_KDTREE_HH
#include <cstdlib>
#include <iostream>
#include <nanoflann.hpp>
#include <dune/common/fvector.hh>
#include <dune/common/timer.hh>
namespace Dune {
/// \brief A simple vector-of-vectors adaptor for nanoflann, without duplicating the storage.
template <class T>
class KDTree
{
using WorldVector = FieldVector<T,3>;
/// \brief Type of the container to store the points
using VectorOfVectorsType = std::vector<WorldVector>;
/// \brief The type of the point coordinates (typically, double or float).
using num_t = T;
/// \brief The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc.
using Distance = nanoflann::metric_L2;
/// \brief The type for indices in the KD-tree index (typically, size_t of int)
using IndexType = std::size_t;
/// \brief If set to >0, it specifies a compile-time fixed dimensionality for the points in the data set, allowing more compiler optimizations.
static constexpr int DIM = WorldVector::size();
private:
using self_t = KDTree;
using metric_t = typename Distance::template traits<num_t,self_t>::distance_t;
using index_t = nanoflann::KDTreeSingleIndexAdaptor<metric_t,self_t,DIM,IndexType>;
private:
/// \brief The kd-tree index for the user to call its methods as usual with any other FLANN index.
std::unique_ptr<index_t> index_;
/// \brief The container storing all the points
const VectorOfVectorsType* data_;
/// \brief bounding box
WorldVector minCorner_;
WorldVector maxCorner_;
public:
/// Constructor: takes a const ref to the vector of vectors object with the data points
KDTree (const VectorOfVectorsType& mat, const WorldVector& minCorner, const WorldVector& maxCorner, int leaf_max_size = 10)
: data_(&mat)
, minCorner_(minCorner)
, maxCorner_(maxCorner)
{
index_ = std::make_unique<index_t>(DIM, *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size));
}
/// \brief Must be called before the index can be used
void update ()
{
Timer t;
assert(data_->size() != 0 && (*data_)[0].size() != 0);
index_->buildIndex();
std::cout << "update kdtree needed " << t.elapsed() << " seconds" << std::endl;
}
void update (const VectorOfVectorsType& mat)
{
data_ = &mat;
}
/// \brief Query for the numClosest closest points to a given queryPoint
void query (const WorldVector& queryPoint, std::size_t numClosest, std::vector<IndexType>& outIndices, std::vector<num_t>& outDistancesSq) const
{
nanoflann::KNNResultSet<num_t,IndexType> resultSet(numClosest);
resultSet.init(outIndices.data(), outDistancesSq.data());
index_->findNeighbors(resultSet, queryPoint.data(), nanoflann::SearchParams());
}
/// \brief Search for all point in a given radius around the queryPoint. Store the indices and the squared distance in the `matches` output vector
void query (const WorldVector& queryPoint, num_t radius, std::vector<std::pair<IndexType,num_t>>& matches) const
{
index_->radiusSearch(queryPoint.data(), radius, matches, nanoflann::SearchParams());
}
/** @name Interface expected by KDTreeSingleIndexAdaptor
* @{ */
const self_t& derived () const { return *this; }
self_t& derived () { return *this; }
// Must return the number of data points
std::size_t kdtree_get_point_count () const
{
return data_->size();
}
// Returns the dim'th component of the idx'th point in the class:
num_t kdtree_get_pt (std::size_t idx, std::size_t dim) const
{
return (*data_)[idx][dim];
}
// Optional bounding-box computation: return false to default to a standard bbox computation loop.
// Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again.
// Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds)
template <class BBOX>
bool kdtree_get_bbox (BBOX& bb) const
{
for (int i = 0; i < DIM; ++i) {
bb[i].low = minCorner_[i];
bb[i].high = maxCorner_[i];
}
return true;
}
/** @} */
}; // end of KDTree
} // end namespace Dune
#endif // DUNE_CURVEDSURFACEGRID_KDTREE_HH
#ifndef DUNE_CURVEDSURFACEGRID_VERTEXCACHE_HH
#define DUNE_CURVEDSURFACEGRID_VERTEXCACHE_HH
#include <dune/common/fvector.hh>
#define VERTEXCACHE_DEBUG_MODE 0
namespace Dune {
template <class T, class FT>
class VertexCache
{
using Self = VertexCache<T, FT>;
using Vertex = FieldVector<FT, 3>;
const unsigned partition_threshold = 64;
const unsigned depth_threshold = 7;
const FT epsilon = std::sqrt(std::numeric_limits<FT>::epsilon());
unsigned depth_;
Vertex cbeg_, cmid_, cend_; //< coordinates
std::vector<VertexCache<T, FT>> partitions_;
std::vector<std::pair<Vertex, T>> entries_; //< VertexCache behaves like a map<Vertex, T>
public:
VertexCache (const Vertex& cbeg, const Vertex& cend, unsigned depth = 0)
: depth_(depth)
, cbeg_(cbeg)
, cmid_((cbeg + cend) / 2.0)
, cend_(cend)
{}
VertexCache (const Self& other)
: depth_(other.depth_)
, cbeg_(other.cbeg_)
, cmid_(other.cmid_)
, cend_(other.cend_)
, partitions_(other.partitions_)
, entries_(other.entries_)
{}
VertexCache (Self&& other)
: depth_(other.depth_)
, cbeg_(other.cbeg_)
, cmid_(other.cmid_)
, cend_(other.cend_)
, partitions_(std::move(other.partitions_))
, entries_(std::move(other.entries_))
{}
Self& operator= (const Self&) = delete;
Self& operator= (Self&&) = delete;
void clear ()
{
entries_.clear();
partitions_.clear();
}
void split ()
{
//p0: (beg, beg, beg) - (mid, mid, mid)
//p1: (mid, beg, beg) - (end, mid, mid)
//p2: (beg, mid, beg) - (mid, end, mid)
//p3: (mid, mid, beg) - (end, end, mid)
//p4: (beg, beg, mid) - (mid, mid, end)
//p5: (mid, beg, mid) - (end, mid, end)
//p6: (beg, mid, mid) - (mid, end, end)
//p7: (mid, mid, mid) - (end, end, end)
partitions_.reserve(8);
Vertex c0 = cbeg_, c1 = cmid_;
partitions_.emplace_back(c0, c1, depth_ + 1); //p0
c0[0] = cmid_[0];
c1[0] = cend_[0];
partitions_.emplace_back(c0, c1, depth_ + 1); //p1
c0[0] = cbeg_[0];
c1[0] = cmid_[0];
c0[1] = cmid_[1];
c1[1] = cend_[1];
partitions_.emplace_back(c0, c1, depth_ + 1); //p2
c0[0] = cmid_[0];
c1[0] = cend_[0];
partitions_.emplace_back(c0, c1, depth_ + 1); //p3
c0[0] = cbeg_[0];
c1[0] = cmid_[0];
c0[1] = cbeg_[1];
c1[1] = cmid_[1];
c0[2] = cmid_[2];
c1[2] = cend_[2];
partitions_.emplace_back(c0, c1, depth_ + 1); //p4
c0[0] = cmid_[0];
c1[0] = cend_[0];
partitions_.emplace_back(c0, c1, depth_ + 1); //p5
c0[0] = cbeg_[0];
c1[0] = cmid_[0];
c0[1] = cmid_[1];
c1[1] = cend_[1];
partitions_.emplace_back(c0, c1, depth_ + 1); //p6
partitions_.emplace_back(cmid_, cend_, depth_ + 1); //p7
for(auto it = entries_.begin(), end = entries_.end(); it != end; ++it) {
insert(it->first, it->second);
}
entries_.clear();
}
void insert (const Vertex& v, const T& data)
{
if (partitions_.empty()) {
entries_.push_back(std::pair<Vertex, T>(v, data));
if (entries_.size() > partition_threshold && depth_ < depth_threshold) split();
} else {
if (v[2] <= cmid_[2]) { //p0, p1, p2, p3
if (v[1] <= cmid_[1]) { //p0, p1
if (v[0] <= cmid_[0]) partitions_[0].insert(v, data);
else partitions_[1].insert(v, data);
} else { //p2, p3
if(v[0] <= cmid_[0]) partitions_[2].insert(v, data);
else partitions_[3].insert(v, data);
}
} else { //p4, p5, p6, p7
if (v[1] <= cmid_[1]) { //p4, p5
if (v[0] <= cmid_[0]) partitions_[4].insert(v, data);
else partitions_[5].insert(v, data);
} else { //p6, p7
if(v[0] <= cmid_[0]) partitions_[6].insert(v, data);
else partitions_[7].insert(v, data);
}
}
}
}
bool find (const Vertex& v, T& result)
{
if (partitions_.empty()) {
for (auto it = entries_.begin(), end = entries_.end(); it != end; ++it) {
Vertex &t = it->first;
if (fabs(t[0] - v[0]) <= epsilon && fabs(t[1] - v[1]) <= epsilon
&& fabs(t[2] - v[2]) <= epsilon) {
result = it->second;
return true;
}
}
} else {
if (v[2] - epsilon <= cmid_[2]) { //p0, p1, p2, p3
if (v[1] - epsilon <= cmid_[1]) { //p0, p1
if (v[0] - epsilon <= cmid_[0]) if (partitions_[0].find(v, result)) return true;
if (v[0] + epsilon >= cmid_[0]) if (partitions_[1].find(v, result)) return true;
}
if (v[1] + epsilon >= cmid_[1]) { //p2, p3
if (v[0] - epsilon <= cmid_[0]) if (partitions_[2].find(v, result)) return true;
if (v[0] + epsilon >= cmid_[0]) if (partitions_[3].find(v, result)) return true;
}
}
if (v[2] + epsilon >= cmid_[2]) { //p4, p5, p6, p7
if (v[1] - epsilon <= cmid_[1]) { //p4, p5
if (v[0] - epsilon <= cmid_[0]) if (partitions_[4].find(v, result)) return true;
if( v[0] + epsilon >= cmid_[0]) if (partitions_[5].find(v, result)) return true;
}
if (v[1] + epsilon >= cmid_[1]) { //p6, p7
if (v[0] - epsilon <= cmid_[0]) if (partitions_[6].find(v, result)) return true;
if (v[0] + epsilon >= cmid_[0]) if (partitions_[7].find(v, result)) return true;
}
}
}
return false;
}
#if VERTEXCACHE_DEBUG_MODE != 0
void debug_output_statistics () const
{
std::cout << "\nVertexCache statistics:" << std::endl;
size_t node_count = 0, leaf_count = 0, crowded_leaf_count = 0, entry_count = 0,
max_leaf_entries = 0;
debug_statistics_first_run(node_count, leaf_count, crowded_leaf_count,
entry_count, max_leaf_entries);
double avg = (double)entry_count / leaf_count;
double std_dev = 0.0;
if(entry_count > 0){
debug_statistics_second_run(avg, entry_count, std_dev);
std_dev = sqrt(std_dev);
}
std::cout << " node-count: " << node_count << std::endl;
std::cout << " leaf-count: " << leaf_count
<< " (" << (double)leaf_count / node_count * 100.0 << "%)" << std::endl;
std::cout << " crowded leaf-count: " << crowded_leaf_count
<< " (" << (double)crowded_leaf_count / leaf_count * 100.0 << "%)" << std::endl;
std::cout << " entry-count: " << entry_count << std::endl;
std::cout << " max. leaf-entries: " << max_leaf_entries << std::endl;
std::cout << " avg. leaf-entries: " << avg << std::endl;
std::cout << " std. deviation: " << std_dev << std::endl;
}
void debug_statistics_first_run (size_t &node_count, size_t &leaf_count,
size_t &crowded_leaf_count, size_t &entry_count, size_t &max_leaf_entries) const
{
++node_count;
if(partitions_.empty()){
++leaf_count;
entry_count += entries_.size();
max_leaf_entries = std::max(max_leaf_entries, entries_.size());
if(entries_.size() > partition_threshold) ++crowded_leaf_count;
}else{
for(int i = 0; i < 8; ++i){
partitions_[i].debug_statistics_first_run(node_count, leaf_count,
crowded_leaf_count, entry_count, max_leaf_entries);
}
}
}
void debug_statistics_second_run (double avg, size_t entry_count, double &std_dev_sum) const
{
if (partitions_.empty()) {
double deviation = (double)entries_.size() - avg;
std_dev_sum += (deviation * deviation) / entry_count;
} else {
for (int i = 0; i < 8; ++i) {
partitions_[i].debug_statistics_second_run(avg, entry_count, std_dev_sum);
}
}
}
void debug_output (int indentation=0) const
{
std::string id_str = std::string(indentation, ' ');
if (indentation == 0) std::cout << "\nVertexCache:" << std::endl;
if (partitions_.empty()) {
for (auto it = entries_.begin(), end = entries_.end(); it != end; ++it) {
std::cout << id_str << it->first << " -> " << it->second << std::endl;
}
} else {
for (int i = 0; i < 8; ++i) {
std::cout << id_str << "p" << i << "{" << std::endl;
partitions_[i].debug_output(indentation + 2);
std::cout << id_str << "}" << std::endl;
}
}
}
#endif
};
} // end namespace Dune
#endif // DUNE_CURVEDSURFACEGRID_VERTEXCACHE_HH
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment