Generalized mph_map for trade-offs.
This commit is contained in:
parent
8ebb9da1ab
commit
d5b579fbd6
@ -34,7 +34,7 @@ LDFLAGS="$LIBM $LDFLAGS"
|
||||
CFLAGS="-Wall"
|
||||
|
||||
AC_PROG_CXX
|
||||
CXXFLAGS="-Wall -Wno-unused-function -DNDEBUG -O3 -fomit-frame-pointer $CXXFLAGS"
|
||||
CXXFLAGS="$CXXFLAGS -Wall -Wno-unused-function -DNDEBUG -O3 -fomit-frame-pointer"
|
||||
AC_ENABLE_CXXMPH
|
||||
if test x$cxxmph = xtrue; then
|
||||
AC_COMPILE_STDCXX_0X
|
||||
|
@ -4,16 +4,13 @@
|
||||
#include "bm_common.h"
|
||||
#include "mph_map.h"
|
||||
|
||||
using cxxmph::mph_map;
|
||||
using std::string;
|
||||
using std::unordered_map;
|
||||
|
||||
// Another reference benchmark:
|
||||
// http://blog.aggregateknowledge.com/tag/bigmemory/
|
||||
|
||||
namespace cxxmph {
|
||||
|
||||
|
||||
template <class MapType, class T>
|
||||
const T* myfind(const MapType& mymap, const T& k) {
|
||||
auto it = mymap.find(k);
|
||||
@ -100,13 +97,24 @@ using namespace cxxmph;
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
srandom(4);
|
||||
Benchmark::Register(new BM_CreateUrls<dense_hash_map<StringPiece, StringPiece>>("URLS100k"));
|
||||
Benchmark::Register(new BM_CreateUrls<std::unordered_map<StringPiece, StringPiece>>("URLS100k"));
|
||||
Benchmark::Register(new BM_CreateUrls<mph_map<StringPiece, StringPiece>>("URLS100k"));
|
||||
Benchmark::Register(new BM_CreateUrls<unordered_map<StringPiece, StringPiece>>("URLS100k"));
|
||||
Benchmark::Register(new BM_CreateUrls<sparse_hash_map<StringPiece, StringPiece>>("URLS100k"));
|
||||
|
||||
Benchmark::Register(new BM_SearchUrls<dense_hash_map<StringPiece, StringPiece>>("URLS100k", 10*1000 * 1000, 0));
|
||||
Benchmark::Register(new BM_SearchUrls<std::unordered_map<StringPiece, StringPiece, Murmur3StringPiece>>("URLS100k", 10*1000 * 1000, 0));
|
||||
Benchmark::Register(new BM_SearchUrls<mph_map<StringPiece, StringPiece>>("URLS100k", 10*1000 * 1000, 0));
|
||||
Benchmark::Register(new BM_SearchUrls<unordered_map<StringPiece, StringPiece, Murmur3StringPiece>>("URLS100k", 10*1000 * 1000, 0));
|
||||
Benchmark::Register(new BM_SearchUrls<sparse_hash_map<StringPiece, StringPiece>>("URLS100k", 10*1000 * 1000, 0));
|
||||
|
||||
Benchmark::Register(new BM_SearchUrls<dense_hash_map<StringPiece, StringPiece>>("URLS100k", 10*1000 * 1000, 0.9));
|
||||
Benchmark::Register(new BM_SearchUrls<std::unordered_map<StringPiece, StringPiece, Murmur3StringPiece>>("URLS100k", 10*1000 * 1000, 0.9));
|
||||
Benchmark::Register(new BM_SearchUrls<mph_map<StringPiece, StringPiece>>("URLS100k", 10*1000 * 1000, 0.9));
|
||||
Benchmark::Register(new BM_SearchUrls<unordered_map<StringPiece, StringPiece, Murmur3StringPiece>>("URLS100k", 10*1000 * 1000, 0.9));
|
||||
Benchmark::Register(new BM_SearchUrls<sparse_hash_map<StringPiece, StringPiece>>("URLS100k", 10*1000 * 1000, 0.9));
|
||||
|
||||
Benchmark::Register(new BM_SearchUint64<dense_hash_map<uint64_t, uint64_t>>);
|
||||
Benchmark::Register(new BM_SearchUint64<std::unordered_map<uint64_t, uint64_t>>);
|
||||
Benchmark::Register(new BM_SearchUint64<mph_map<uint64_t, uint64_t>>);
|
||||
Benchmark::Register(new BM_SearchUint64<unordered_map<uint64_t, uint64_t>>);
|
||||
Benchmark::Register(new BM_SearchUint64<sparse_hash_map<uint64_t, uint64_t>>);
|
||||
Benchmark::RunAll();
|
||||
}
|
||||
|
@ -48,8 +48,8 @@ namespace cxxmph {
|
||||
|
||||
class MPHIndex {
|
||||
public:
|
||||
MPHIndex(double c = 1.23, uint8_t b = 7) :
|
||||
c_(c), b_(b), m_(0), n_(0), k_(0), r_(1),
|
||||
MPHIndex(bool square = false, double c = 1.23, uint8_t b = 7) :
|
||||
c_(c), b_(b), m_(0), n_(0), k_(0), square_(square), r_(1),
|
||||
ranktable_(NULL), ranktable_size_(0) { }
|
||||
~MPHIndex();
|
||||
|
||||
@ -66,6 +66,8 @@ class MPHIndex {
|
||||
uint32_t perfect_hash_size() const { return n_; }
|
||||
template <class SeededHashFcn, class Key> // must agree with Reset
|
||||
uint32_t perfect_hash(const Key& x) const; // way faster than the minimal
|
||||
template <class SeededHashFcn, class Key> // must agree with Reset
|
||||
uint32_t perfect_square(const Key& x) const; // even faster but needs square=true
|
||||
uint32_t minimal_perfect_hash_size() const { return size(); }
|
||||
template <class SeededHashFcn, class Key> // must agree with Reset
|
||||
uint32_t minimal_perfect_hash(const Key& x) const;
|
||||
@ -93,6 +95,7 @@ class MPHIndex {
|
||||
uint32_t m_; // edges count
|
||||
uint32_t n_; // vertex count
|
||||
uint32_t k_; // kth index in ranktable, $k = log_2(n=3r)\varepsilon$
|
||||
bool square_; // make bit vector size a power of 2
|
||||
|
||||
// Values used during search
|
||||
|
||||
@ -124,7 +127,7 @@ bool MPHIndex::Reset(
|
||||
if ((r_ % 2) == 0) r_ += 1;
|
||||
// This can be used to speed mods, but increases occupation too much.
|
||||
// Needs to try http://gmplib.org/manual/Integer-Exponentiation.html instead
|
||||
// r_ = nextpoweroftwo(r_);
|
||||
if (square_) r_ = nextpoweroftwo(r_);
|
||||
nest_displacement_[0] = 0;
|
||||
nest_displacement_[1] = r_;
|
||||
nest_displacement_[2] = (r_ << 1);
|
||||
@ -173,6 +176,21 @@ bool MPHIndex::Mapping(
|
||||
return false;
|
||||
}
|
||||
|
||||
template <class SeededHashFcn, class Key>
|
||||
uint32_t MPHIndex::perfect_square(const Key& key) const {
|
||||
if (!g_.size()) return 0;
|
||||
h128 h = SeededHashFcn().hash128(key, hash_seed_[0]);
|
||||
h[0] = (h[0] & (r_-1)) + nest_displacement_[0];
|
||||
h[1] = (h[1] & (r_-1)) + nest_displacement_[1];
|
||||
h[2] = (h[2] & (r_-1)) + nest_displacement_[2];
|
||||
assert((h[0]) < g_.size());
|
||||
assert((h[1]) < g_.size());
|
||||
assert((h[2]) < g_.size());
|
||||
uint8_t nest = threebit_mod3[g_[h[0]] + g_[h[1]] + g_[h[2]]];
|
||||
uint32_t vertex = h[nest];
|
||||
return vertex;
|
||||
}
|
||||
|
||||
template <class SeededHashFcn, class Key>
|
||||
uint32_t MPHIndex::perfect_hash(const Key& key) const {
|
||||
if (!g_.size()) return 0;
|
||||
@ -180,17 +198,14 @@ uint32_t MPHIndex::perfect_hash(const Key& key) const {
|
||||
h[0] = (h[0] % r_) + nest_displacement_[0];
|
||||
h[1] = (h[1] % r_) + nest_displacement_[1];
|
||||
h[2] = (h[2] % r_) + nest_displacement_[2];
|
||||
// h[0] = (h[0] & (r_-1)) + nest_displacement_[0];
|
||||
// h[1] = (h[1] & (r_-1)) + nest_displacement_[1];
|
||||
// h[2] = (h[2] & (r_-1)) + nest_displacement_[2];
|
||||
assert((h[0]) < g_.size());
|
||||
assert((h[1]) < g_.size());
|
||||
assert((h[2]) < g_.size());
|
||||
uint8_t nest = threebit_mod3[
|
||||
g_[h[0]] + g_[h[1]] + g_[h[2]]];
|
||||
uint8_t nest = threebit_mod3[g_[h[0]] + g_[h[1]] + g_[h[2]]];
|
||||
uint32_t vertex = h[nest];
|
||||
return vertex;
|
||||
}
|
||||
|
||||
template <class SeededHashFcn, class Key>
|
||||
uint32_t MPHIndex::minimal_perfect_hash(const Key& key) const {
|
||||
return Rank(perfect_hash<SeededHashFcn, Key>(key));
|
||||
@ -206,15 +221,48 @@ uint32_t MPHIndex::index(const Key& key) const {
|
||||
template <class Key, class HashFcn = typename seeded_hash<std::hash<Key>>::hash_function>
|
||||
class SimpleMPHIndex : public MPHIndex {
|
||||
public:
|
||||
SimpleMPHIndex(bool advanced_usage = false) : MPHIndex(advanced_usage) {}
|
||||
template <class ForwardIterator>
|
||||
bool Reset(ForwardIterator begin, ForwardIterator end, uint32_t size) {
|
||||
return MPHIndex::Reset<HashFcn>(begin, end, size);
|
||||
}
|
||||
uint32_t index(const Key& key) const { return MPHIndex::index<HashFcn>(key); }
|
||||
uint32_t perfect_hash(const Key& key) const { return MPHIndex::perfect_hash<HashFcn>(key); }
|
||||
uint32_t minimal_perfect_hash(const Key& key) const { return MPHIndex::minimal_perfect_hash<HashFcn>(key); }
|
||||
};
|
||||
|
||||
// The parameters minimal and square trade memory usage for evaluation speed.
|
||||
// Minimal decreases speed and memory usage, and square does the opposite.
|
||||
// Using minimal=true and square=false is the same as SimpleMPHIndex.
|
||||
template <bool minimal, bool square, class Key, class HashFcn>
|
||||
struct FlexibleMPHIndex {};
|
||||
|
||||
template <class Key, class HashFcn>
|
||||
struct FlexibleMPHIndex<true, false, Key, HashFcn>
|
||||
: public SimpleMPHIndex<Key, HashFcn> {
|
||||
FlexibleMPHIndex() : SimpleMPHIndex<Key, HashFcn>(false) {}
|
||||
uint32_t index(const Key& key) const {
|
||||
return MPHIndex::minimal_perfect_hash<HashFcn>(key); }
|
||||
uint32_t size() const { return MPHIndex::minimal_perfect_hash_size(); }
|
||||
};
|
||||
template <class Key, class HashFcn>
|
||||
struct FlexibleMPHIndex<false, true, Key, HashFcn>
|
||||
: public SimpleMPHIndex<Key, HashFcn> {
|
||||
FlexibleMPHIndex() : SimpleMPHIndex<Key, HashFcn>(true) {}
|
||||
uint32_t index(const Key& key) const {
|
||||
return MPHIndex::perfect_square<HashFcn>(key); }
|
||||
uint32_t size() const { return MPHIndex::perfect_hash_size(); }
|
||||
};
|
||||
template <class Key, class HashFcn>
|
||||
struct FlexibleMPHIndex<false, false, Key, HashFcn>
|
||||
: public SimpleMPHIndex<Key, HashFcn> {
|
||||
FlexibleMPHIndex() : SimpleMPHIndex<Key, HashFcn>(false) {}
|
||||
uint32_t index(const Key& key) const {
|
||||
return MPHIndex::index<HashFcn>(key); }
|
||||
uint32_t size() const { return MPHIndex::perfect_hash_size(); }
|
||||
};
|
||||
// From a trade-off perspective this case does not make much sense.
|
||||
// template <class Key, class HashFcn>
|
||||
// class FlexibleMPHIndex<true, true, Key, HashFcn>
|
||||
|
||||
} // namespace cxxmph
|
||||
|
||||
#endif // __CXXMPH_MPH_INDEX_H__
|
||||
|
@ -5,12 +5,12 @@
|
||||
//
|
||||
// This class not necessarily faster than unordered_map (or ext/hash_map).
|
||||
// Benchmark your code before using it. If you do not call rehash() before
|
||||
// starting your reads, it will be definitively slower than unordered_map.
|
||||
// starting your reads, it will be very likely slower than unordered_map.
|
||||
//
|
||||
// For large sets of urls, which are a somewhat expensive to compare, I found
|
||||
// this class to be about 10% faster than unordered_map.
|
||||
// For large sets of urls (>100k), which are a somewhat expensive to compare, I
|
||||
// found this class to be about 10%-30% faster than unordered_map.
|
||||
//
|
||||
// The space overhead of this map is 1.93 bits per bucket and it achieves 100%
|
||||
// The space overhead of this map is 2.6 bits per bucket and it achieves 100%
|
||||
// occupation with a rehash call.
|
||||
|
||||
#include <algorithm>
|
||||
@ -30,17 +30,18 @@ namespace cxxmph {
|
||||
|
||||
using std::pair;
|
||||
using std::make_pair;
|
||||
using std::unordered_map;
|
||||
using std::vector;
|
||||
|
||||
// Save on repetitive typing.
|
||||
#define MPH_MAP_TMPL_SPEC template <class Key, class Data, class HashFcn, class EqualKey, class Alloc>
|
||||
#define MPH_MAP_CLASS_SPEC mph_map<Key, Data, HashFcn, EqualKey, Alloc>
|
||||
#define MPH_MAP_TMPL_SPEC \
|
||||
template <bool minimal, bool square, \
|
||||
class Key, class Data, class HashFcn, class EqualKey, class Alloc>
|
||||
#define MPH_MAP_CLASS_SPEC mph_map_base<minimal, square, Key, Data, HashFcn, EqualKey, Alloc>
|
||||
#define MPH_MAP_METHOD_DECL(r, m) MPH_MAP_TMPL_SPEC typename MPH_MAP_CLASS_SPEC::r MPH_MAP_CLASS_SPEC::m
|
||||
#define MPH_MAP_INLINE_METHOD_DECL(r, m) MPH_MAP_TMPL_SPEC inline typename MPH_MAP_CLASS_SPEC::r MPH_MAP_CLASS_SPEC::m
|
||||
|
||||
template <class Key, class Data, class HashFcn = std::hash<Key>, class EqualKey = std::equal_to<Key>, class Alloc = std::allocator<Data> >
|
||||
class mph_map {
|
||||
template <bool minimal, bool square, class Key, class Data, class HashFcn = std::hash<Key>, class EqualKey = std::equal_to<Key>, class Alloc = std::allocator<Data> >
|
||||
class mph_map_base {
|
||||
public:
|
||||
typedef Key key_type;
|
||||
typedef Data data_type;
|
||||
@ -63,8 +64,8 @@ class mph_map {
|
||||
typedef bool bool_type;
|
||||
typedef pair<iterator, bool> insert_return_type;
|
||||
|
||||
mph_map();
|
||||
~mph_map();
|
||||
mph_map_base();
|
||||
~mph_map_base();
|
||||
|
||||
iterator begin();
|
||||
iterator end();
|
||||
@ -83,7 +84,7 @@ class mph_map {
|
||||
data_type& operator[](const key_type &k);
|
||||
const data_type& operator[](const key_type &k) const;
|
||||
|
||||
size_type bucket_count() const { return index_.minimal_perfect_hash_size() + slack_.bucket_count(); }
|
||||
size_type bucket_count() const { return index_.size() + slack_.bucket_count(); }
|
||||
void rehash(size_type nbuckets /*ignored*/);
|
||||
|
||||
protected: // mimicking STL implementation
|
||||
@ -106,9 +107,9 @@ class mph_map {
|
||||
void pack();
|
||||
vector<value_type> values_;
|
||||
vector<bool> present_;
|
||||
SimpleMPHIndex<Key, typename seeded_hash<HashFcn>::hash_function> index_;
|
||||
FlexibleMPHIndex<minimal, square, Key, typename seeded_hash<HashFcn>::hash_function> index_;
|
||||
// TODO(davi) optimize slack to use hash from index rather than calculate its own
|
||||
typedef unordered_map<h128, uint32_t, h128::hash32> slack_type;
|
||||
typedef std::unordered_map<h128, uint32_t, h128::hash32> slack_type;
|
||||
slack_type slack_;
|
||||
size_type size_;
|
||||
typename seeded_hash<HashFcn>::hash_function hasher128_;
|
||||
@ -119,13 +120,11 @@ bool operator==(const MPH_MAP_CLASS_SPEC& lhs, const MPH_MAP_CLASS_SPEC& rhs) {
|
||||
return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin());
|
||||
}
|
||||
|
||||
MPH_MAP_TMPL_SPEC MPH_MAP_CLASS_SPEC::mph_map() : size_(0) {
|
||||
MPH_MAP_TMPL_SPEC MPH_MAP_CLASS_SPEC::mph_map_base() : size_(0) {
|
||||
clear();
|
||||
pack();
|
||||
}
|
||||
|
||||
MPH_MAP_TMPL_SPEC MPH_MAP_CLASS_SPEC::~mph_map() {
|
||||
}
|
||||
MPH_MAP_TMPL_SPEC MPH_MAP_CLASS_SPEC::~mph_map_base() { }
|
||||
|
||||
MPH_MAP_METHOD_DECL(insert_return_type, insert)(const value_type& x) {
|
||||
auto it = find(x.first);
|
||||
@ -154,13 +153,13 @@ MPH_MAP_METHOD_DECL(void_type, pack)() {
|
||||
make_iterator_first(begin()),
|
||||
make_iterator_first(end()), size_);
|
||||
if (!success) { exit(-1); }
|
||||
vector<value_type> new_values(index_.minimal_perfect_hash_size());
|
||||
vector<value_type> new_values(index_.size());
|
||||
new_values.reserve(new_values.size() * 2);
|
||||
vector<bool> new_present(index_.minimal_perfect_hash_size(), false);
|
||||
vector<bool> new_present(index_.size(), false);
|
||||
new_present.reserve(new_present.size() * 2);
|
||||
for (iterator it = begin(), it_end = end(); it != it_end; ++it) {
|
||||
size_type id = index_.minimal_perfect_hash(it->first);
|
||||
assert(id < index_.minimal_perfect_hash_size());
|
||||
size_type id = index_.index(it->first);
|
||||
assert(id < index_.size());
|
||||
assert(id < new_values.size());
|
||||
new_values[id] = *it;
|
||||
new_present[id] = true;
|
||||
@ -216,10 +215,10 @@ MPH_MAP_INLINE_METHOD_DECL(my_int32_t, index)(const key_type& k) const {
|
||||
auto sit = slack_.find(hasher128_.hash128(k, 0));
|
||||
if (sit != slack_.end()) return sit->second;
|
||||
}
|
||||
if (__builtin_expect(index_.minimal_perfect_hash_size(), 1)) {
|
||||
auto minimal_perfect_hash = index_.minimal_perfect_hash(k);
|
||||
if (__builtin_expect(present_[minimal_perfect_hash], true)) {
|
||||
return minimal_perfect_hash;
|
||||
if (__builtin_expect(index_.size(), 1)) {
|
||||
auto id = index_.index(k);
|
||||
if (__builtin_expect(present_[id], true)) {
|
||||
return id;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
@ -235,6 +234,21 @@ MPH_MAP_METHOD_DECL(void_type, rehash)(size_type nbuckets) {
|
||||
slack_type().swap(slack_);
|
||||
}
|
||||
|
||||
#define MPH_MAP_PREAMBLE template <class Key, class Data,\
|
||||
class HashFcn = std::hash<Key>, class EqualKey = std::equal_to<Key>,\
|
||||
class Alloc = std::allocator<Data> >
|
||||
|
||||
MPH_MAP_PREAMBLE class mph_map : public mph_map_base<
|
||||
false, false, Key, Data, HashFcn, EqualKey, Alloc> {};
|
||||
MPH_MAP_PREAMBLE class unordered_map : public mph_map_base<
|
||||
false, false, Key, Data, HashFcn, EqualKey, Alloc> {};
|
||||
MPH_MAP_PREAMBLE class hash_map : public mph_map_base<
|
||||
false, false, Key, Data, HashFcn, EqualKey, Alloc> {};
|
||||
|
||||
MPH_MAP_PREAMBLE class dense_hash_map : public mph_map_base<
|
||||
false, true, Key, Data, HashFcn, EqualKey, Alloc> {};
|
||||
MPH_MAP_PREAMBLE class sparse_hash_map : public mph_map_base<
|
||||
true, false, Key, Data, HashFcn, EqualKey, Alloc> {};
|
||||
|
||||
} // namespace cxxmph
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user