/src/rocksdb/util/dynamic_bloom.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
2 | | // This source code is licensed under both the GPLv2 (found in the |
3 | | // COPYING file in the root directory) and Apache 2.0 License |
4 | | // (found in the LICENSE.Apache file in the root directory). |
5 | | |
6 | | #include "dynamic_bloom.h" |
7 | | |
8 | | #include <algorithm> |
9 | | |
10 | | #include "memory/allocator.h" |
11 | | #include "port/port.h" |
12 | | #include "rocksdb/slice.h" |
13 | | #include "util/hash.h" |
14 | | |
15 | | namespace ROCKSDB_NAMESPACE { |
16 | | |
17 | | namespace { |
18 | | |
19 | 0 | uint32_t roundUpToPow2(uint32_t x) { |
20 | 0 | uint32_t rv = 1; |
21 | 0 | while (rv < x) { |
22 | 0 | rv <<= 1; |
23 | 0 | } |
24 | 0 | return rv; |
25 | 0 | } |
26 | | } // namespace |
27 | | |
28 | | DynamicBloom::DynamicBloom(Allocator* allocator, uint32_t total_bits, |
29 | | uint32_t num_probes, size_t huge_page_tlb_size, |
30 | | Logger* logger) |
31 | | // Round down, except round up with 1 |
32 | 0 | : kNumDoubleProbes((num_probes + (num_probes == 1)) / 2) { |
33 | 0 | assert(num_probes % 2 == 0); // limitation of current implementation |
34 | 0 | assert(num_probes <= 10); // limitation of current implementation |
35 | 0 | assert(kNumDoubleProbes > 0); |
36 | | |
37 | | // Determine how much to round off + align by so that x ^ i (that's xor) is |
38 | | // a valid u64 index if x is a valid u64 index and 0 <= i < kNumDoubleProbes. |
39 | 0 | uint32_t block_bytes = /*bytes/u64*/ 8 * |
40 | 0 | /*u64s*/ std::max(1U, roundUpToPow2(kNumDoubleProbes)); |
41 | 0 | uint32_t block_bits = block_bytes * 8; |
42 | 0 | uint32_t blocks = (total_bits + block_bits - 1) / block_bits; |
43 | 0 | uint32_t sz = blocks * block_bytes; |
44 | 0 | kLen = sz / /*bytes/u64*/ 8; |
45 | 0 | assert(kLen > 0); |
46 | | #ifndef NDEBUG |
47 | | for (uint32_t i = 0; i < kNumDoubleProbes; ++i) { |
48 | | // Ensure probes starting at last word are in range |
49 | | assert(((kLen - 1) ^ i) < kLen); |
50 | | } |
51 | | #endif |
52 | | |
53 | | // Padding to correct for allocation not originally aligned on block_bytes |
54 | | // boundary |
55 | 0 | sz += block_bytes - 1; |
56 | 0 | assert(allocator); |
57 | |
|
58 | 0 | char* raw = allocator->AllocateAligned(sz, huge_page_tlb_size, logger); |
59 | 0 | memset(raw, 0, sz); |
60 | 0 | auto block_offset = reinterpret_cast<uintptr_t>(raw) % block_bytes; |
61 | 0 | if (block_offset > 0) { |
62 | | // Align on block_bytes boundary |
63 | 0 | raw += block_bytes - block_offset; |
64 | 0 | } |
65 | 0 | static_assert(sizeof(std::atomic<uint64_t>) == sizeof(uint64_t), |
66 | 0 | "Expecting zero-space-overhead atomic"); |
67 | 0 | data_ = reinterpret_cast<std::atomic<uint64_t>*>(raw); |
68 | 0 | } |
69 | | |
70 | | } // namespace ROCKSDB_NAMESPACE |