/src/rocksdb/table/block_based/partitioned_index_reader.h
Line | Count | Source |
1 | | // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. |
2 | | // This source code is licensed under both the GPLv2 (found in the |
3 | | // COPYING file in the root directory) and Apache 2.0 License |
4 | | // (found in the LICENSE.Apache file in the root directory). |
5 | | // |
6 | | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. |
7 | | // Use of this source code is governed by a BSD-style license that can be |
8 | | // found in the LICENSE file. See the AUTHORS file for names of contributors. |
9 | | #pragma once |
10 | | #include "table/block_based/index_reader_common.h" |
11 | | #include "util/hash_containers.h" |
12 | | |
13 | | namespace ROCKSDB_NAMESPACE { |
14 | | // Index that allows binary search lookup in a two-level index structure. |
15 | | class PartitionIndexReader : public BlockBasedTable::IndexReaderCommon { |
16 | | public: |
17 | | // Read the partition index from the file and create an instance for |
18 | | // `PartitionIndexReader`. |
19 | | // On success, index_reader will be populated; otherwise it will remain |
20 | | // unmodified. |
21 | | static Status Create(const BlockBasedTable* table, const ReadOptions& ro, |
22 | | FilePrefetchBuffer* prefetch_buffer, bool use_cache, |
23 | | bool prefetch, bool pin, |
24 | | BlockCacheLookupContext* lookup_context, |
25 | | std::unique_ptr<IndexReader>* index_reader); |
26 | | |
27 | | // return a two-level iterator: first level is on the partition index |
28 | | InternalIteratorBase<IndexValue>* NewIterator( |
29 | | const ReadOptions& read_options, bool /* disable_prefix_seek */, |
30 | | IndexBlockIter* iter, GetContext* get_context, |
31 | | BlockCacheLookupContext* lookup_context) override; |
32 | | |
33 | | Status CacheDependencies(const ReadOptions& ro, bool pin, |
34 | | FilePrefetchBuffer* tail_prefetch_buffer) override; |
35 | 0 | size_t ApproximateMemoryUsage() const override { |
36 | 0 | size_t usage = ApproximateIndexBlockMemoryUsage(); |
37 | 0 | #ifdef ROCKSDB_MALLOC_USABLE_SIZE |
38 | 0 | usage += malloc_usable_size(const_cast<PartitionIndexReader*>(this)); |
39 | | #else |
40 | | usage += sizeof(*this); |
41 | | #endif // ROCKSDB_MALLOC_USABLE_SIZE |
42 | | // TODO(myabandeh): more accurate estimate of partition_map_ mem usage |
43 | 0 | return usage; |
44 | 0 | } |
45 | | void EraseFromCacheBeforeDestruction( |
46 | | uint32_t /*uncache_aggressiveness*/) override; |
47 | | |
48 | | private: |
49 | | PartitionIndexReader(const BlockBasedTable* t, |
50 | | CachableEntry<Block>&& index_block) |
51 | 0 | : IndexReaderCommon(t, std::move(index_block)) {} |
52 | | |
53 | | // For partition blocks pinned in cache. This is expected to be "all or |
54 | | // none" so that !partition_map_.empty() can use an iterator expecting |
55 | | // all partitions to be saved here. |
56 | | UnorderedMap<uint64_t, CachableEntry<Block>> partition_map_; |
57 | | }; |
58 | | } // namespace ROCKSDB_NAMESPACE |