Coverage Report

Created: 2025-10-26 07:13

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/rocksdb/cache/lru_cache.cc
Line
Count
Source
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2
//  This source code is licensed under both the GPLv2 (found in the
3
//  COPYING file in the root directory) and Apache 2.0 License
4
//  (found in the LICENSE.Apache file in the root directory).
5
//
6
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7
// Use of this source code is governed by a BSD-style license that can be
8
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10
#include "cache/lru_cache.h"
11
12
#include <cassert>
13
#include <cstdint>
14
#include <cstdio>
15
#include <cstdlib>
16
17
#include "cache/secondary_cache_adapter.h"
18
#include "monitoring/perf_context_imp.h"
19
#include "monitoring/statistics_impl.h"
20
#include "port/lang.h"
21
#include "util/distributed_mutex.h"
22
23
namespace ROCKSDB_NAMESPACE {
24
namespace lru_cache {
25
26
LRUHandleTable::LRUHandleTable(int max_upper_hash_bits,
27
                               MemoryAllocator* allocator)
28
3.10M
    : length_bits_(/* historical starting size*/ 4),
29
3.10M
      list_(new LRUHandle* [size_t{1} << length_bits_] {}),
30
3.10M
      elems_(0),
31
3.10M
      max_length_bits_(max_upper_hash_bits),
32
3.10M
      allocator_(allocator) {}
33
34
3.10M
LRUHandleTable::~LRUHandleTable() {
35
3.10M
  auto alloc = allocator_;
36
3.10M
  ApplyToEntriesRange(
37
3.10M
      [alloc](LRUHandle* h) {
38
0
        if (!h->HasRefs()) {
39
0
          h->Free(alloc);
40
0
        }
41
0
      },
42
3.10M
      0, size_t{1} << length_bits_);
43
3.10M
}
44
45
219k
LRUHandle* LRUHandleTable::Lookup(const Slice& key, uint32_t hash) {
46
219k
  return *FindPointer(key, hash);
47
219k
}
48
49
96.9k
LRUHandle* LRUHandleTable::Insert(LRUHandle* h) {
50
96.9k
  LRUHandle** ptr = FindPointer(h->key(), h->hash);
51
96.9k
  LRUHandle* old = *ptr;
52
96.9k
  h->next_hash = (old == nullptr ? nullptr : old->next_hash);
53
96.9k
  *ptr = h;
54
96.9k
  if (old == nullptr) {
55
96.7k
    ++elems_;
56
96.7k
    if ((elems_ >> length_bits_) > 0) {  // elems_ >= length
57
      // Since each cache entry is fairly large, we aim for a small
58
      // average linked list length (<= 1).
59
0
      Resize();
60
0
    }
61
96.7k
  }
62
96.9k
  return old;
63
96.9k
}
64
65
104k
LRUHandle* LRUHandleTable::Remove(const Slice& key, uint32_t hash) {
66
104k
  LRUHandle** ptr = FindPointer(key, hash);
67
104k
  LRUHandle* result = *ptr;
68
104k
  if (result != nullptr) {
69
96.9k
    *ptr = result->next_hash;
70
96.9k
    --elems_;
71
96.9k
  }
72
104k
  return result;
73
104k
}
74
75
420k
LRUHandle** LRUHandleTable::FindPointer(const Slice& key, uint32_t hash) {
76
420k
  LRUHandle** ptr = &list_[hash >> (32 - length_bits_)];
77
421k
  while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
78
391
    ptr = &(*ptr)->next_hash;
79
391
  }
80
420k
  return ptr;
81
420k
}
82
83
0
void LRUHandleTable::Resize() {
84
0
  if (length_bits_ >= max_length_bits_) {
85
    // Due to reaching limit of hash information, if we made the table bigger,
86
    // we would allocate more addresses but only the same number would be used.
87
0
    return;
88
0
  }
89
0
  if (length_bits_ >= 31) {
90
    // Avoid undefined behavior shifting uint32_t by 32.
91
0
    return;
92
0
  }
93
94
0
  uint32_t old_length = uint32_t{1} << length_bits_;
95
0
  int new_length_bits = length_bits_ + 1;
96
0
  std::unique_ptr<LRUHandle*[]> new_list{
97
0
      new LRUHandle* [size_t{1} << new_length_bits] {}};
98
0
  [[maybe_unused]] uint32_t count = 0;
99
0
  for (uint32_t i = 0; i < old_length; i++) {
100
0
    LRUHandle* h = list_[i];
101
0
    while (h != nullptr) {
102
0
      LRUHandle* next = h->next_hash;
103
0
      uint32_t hash = h->hash;
104
0
      LRUHandle** ptr = &new_list[hash >> (32 - new_length_bits)];
105
0
      h->next_hash = *ptr;
106
0
      *ptr = h;
107
0
      h = next;
108
0
      count++;
109
0
    }
110
0
  }
111
0
  assert(elems_ == count);
112
0
  list_ = std::move(new_list);
113
0
  length_bits_ = new_length_bits;
114
0
}
115
116
LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit,
117
                             double high_pri_pool_ratio,
118
                             double low_pri_pool_ratio, bool use_adaptive_mutex,
119
                             CacheMetadataChargePolicy metadata_charge_policy,
120
                             int max_upper_hash_bits,
121
                             MemoryAllocator* allocator,
122
                             const Cache::EvictionCallback* eviction_callback)
123
3.10M
    : CacheShardBase(metadata_charge_policy),
124
3.10M
      capacity_(0),
125
3.10M
      high_pri_pool_usage_(0),
126
3.10M
      low_pri_pool_usage_(0),
127
3.10M
      strict_capacity_limit_(strict_capacity_limit),
128
3.10M
      high_pri_pool_ratio_(high_pri_pool_ratio),
129
3.10M
      high_pri_pool_capacity_(0),
130
3.10M
      low_pri_pool_ratio_(low_pri_pool_ratio),
131
3.10M
      low_pri_pool_capacity_(0),
132
3.10M
      table_(max_upper_hash_bits, allocator),
133
3.10M
      usage_(0),
134
3.10M
      lru_usage_(0),
135
3.10M
      mutex_(use_adaptive_mutex),
136
3.10M
      eviction_callback_(*eviction_callback) {
137
  // Make empty circular linked list.
138
3.10M
  lru_.next = &lru_;
139
3.10M
  lru_.prev = &lru_;
140
3.10M
  lru_low_pri_ = &lru_;
141
3.10M
  lru_bottom_pri_ = &lru_;
142
3.10M
  SetCapacity(capacity);
143
3.10M
}
144
145
3.10M
void LRUCacheShard::EraseUnRefEntries() {
146
3.10M
  autovector<LRUHandle*> last_reference_list;
147
3.10M
  {
148
3.10M
    DMutexLock l(mutex_);
149
3.10M
    while (lru_.next != &lru_) {
150
0
      LRUHandle* old = lru_.next;
151
      // LRU list contains only elements which can be evicted.
152
0
      assert(old->InCache() && !old->HasRefs());
153
0
      LRU_Remove(old);
154
0
      table_.Remove(old->key(), old->hash);
155
0
      old->SetInCache(false);
156
0
      assert(usage_ >= old->total_charge);
157
0
      usage_ -= old->total_charge;
158
0
      last_reference_list.push_back(old);
159
0
    }
160
3.10M
  }
161
162
3.10M
  for (auto entry : last_reference_list) {
163
0
    entry->Free(table_.GetAllocator());
164
0
  }
165
3.10M
}
166
167
void LRUCacheShard::ApplyToSomeEntries(
168
    const std::function<void(const Slice& key, Cache::ObjectPtr value,
169
                             size_t charge,
170
                             const Cache::CacheItemHelper* helper)>& callback,
171
0
    size_t average_entries_per_lock, size_t* state) {
172
  // The state is essentially going to be the starting hash, which works
173
  // nicely even if we resize between calls because we use upper-most
174
  // hash bits for table indexes.
175
0
  DMutexLock l(mutex_);
176
0
  int length_bits = table_.GetLengthBits();
177
0
  size_t length = size_t{1} << length_bits;
178
179
0
  assert(average_entries_per_lock > 0);
180
  // Assuming we are called with same average_entries_per_lock repeatedly,
181
  // this simplifies some logic (index_end will not overflow).
182
0
  assert(average_entries_per_lock < length || *state == 0);
183
184
0
  size_t index_begin = *state >> (sizeof(size_t) * 8u - length_bits);
185
0
  size_t index_end = index_begin + average_entries_per_lock;
186
0
  if (index_end >= length) {
187
    // Going to end
188
0
    index_end = length;
189
0
    *state = SIZE_MAX;
190
0
  } else {
191
0
    *state = index_end << (sizeof(size_t) * 8u - length_bits);
192
0
  }
193
194
0
  table_.ApplyToEntriesRange(
195
0
      [callback,
196
0
       metadata_charge_policy = metadata_charge_policy_](LRUHandle* h) {
197
0
        callback(h->key(), h->value, h->GetCharge(metadata_charge_policy),
198
0
                 h->helper);
199
0
      },
200
0
      index_begin, index_end);
201
0
}
202
203
void LRUCacheShard::TEST_GetLRUList(LRUHandle** lru, LRUHandle** lru_low_pri,
204
0
                                    LRUHandle** lru_bottom_pri) {
205
0
  DMutexLock l(mutex_);
206
0
  *lru = &lru_;
207
0
  *lru_low_pri = lru_low_pri_;
208
0
  *lru_bottom_pri = lru_bottom_pri_;
209
0
}
210
211
0
size_t LRUCacheShard::TEST_GetLRUSize() {
212
0
  DMutexLock l(mutex_);
213
0
  LRUHandle* lru_handle = lru_.next;
214
0
  size_t lru_size = 0;
215
0
  while (lru_handle != &lru_) {
216
0
    lru_size++;
217
0
    lru_handle = lru_handle->next;
218
0
  }
219
0
  return lru_size;
220
0
}
221
222
0
double LRUCacheShard::GetHighPriPoolRatio() {
223
0
  DMutexLock l(mutex_);
224
0
  return high_pri_pool_ratio_;
225
0
}
226
227
0
double LRUCacheShard::GetLowPriPoolRatio() {
228
0
  DMutexLock l(mutex_);
229
0
  return low_pri_pool_ratio_;
230
0
}
231
232
20.1k
void LRUCacheShard::LRU_Remove(LRUHandle* e) {
233
20.1k
  assert(e->next != nullptr);
234
20.1k
  assert(e->prev != nullptr);
235
20.1k
  if (lru_low_pri_ == e) {
236
20.1k
    lru_low_pri_ = e->prev;
237
20.1k
  }
238
20.1k
  if (lru_bottom_pri_ == e) {
239
20.1k
    lru_bottom_pri_ = e->prev;
240
20.1k
  }
241
20.1k
  e->next->prev = e->prev;
242
20.1k
  e->prev->next = e->next;
243
20.1k
  e->prev = e->next = nullptr;
244
20.1k
  assert(lru_usage_ >= e->total_charge);
245
20.1k
  lru_usage_ -= e->total_charge;
246
20.1k
  assert(!e->InHighPriPool() || !e->InLowPriPool());
247
20.1k
  if (e->InHighPriPool()) {
248
0
    assert(high_pri_pool_usage_ >= e->total_charge);
249
0
    high_pri_pool_usage_ -= e->total_charge;
250
20.1k
  } else if (e->InLowPriPool()) {
251
0
    assert(low_pri_pool_usage_ >= e->total_charge);
252
0
    low_pri_pool_usage_ -= e->total_charge;
253
0
  }
254
20.1k
}
255
256
20.1k
void LRUCacheShard::LRU_Insert(LRUHandle* e) {
257
20.1k
  assert(e->next == nullptr);
258
20.1k
  assert(e->prev == nullptr);
259
20.1k
  if (high_pri_pool_ratio_ > 0 && (e->IsHighPri() || e->HasHit())) {
260
    // Inset "e" to head of LRU list.
261
0
    e->next = &lru_;
262
0
    e->prev = lru_.prev;
263
0
    e->prev->next = e;
264
0
    e->next->prev = e;
265
0
    e->SetInHighPriPool(true);
266
0
    e->SetInLowPriPool(false);
267
0
    high_pri_pool_usage_ += e->total_charge;
268
0
    MaintainPoolSize();
269
20.1k
  } else if (low_pri_pool_ratio_ > 0 &&
270
0
             (e->IsHighPri() || e->IsLowPri() || e->HasHit())) {
271
    // Insert "e" to the head of low-pri pool.
272
0
    e->next = lru_low_pri_->next;
273
0
    e->prev = lru_low_pri_;
274
0
    e->prev->next = e;
275
0
    e->next->prev = e;
276
0
    e->SetInHighPriPool(false);
277
0
    e->SetInLowPriPool(true);
278
0
    low_pri_pool_usage_ += e->total_charge;
279
0
    lru_low_pri_ = e;
280
0
    MaintainPoolSize();
281
20.1k
  } else {
282
    // Insert "e" to the head of bottom-pri pool.
283
20.1k
    e->next = lru_bottom_pri_->next;
284
20.1k
    e->prev = lru_bottom_pri_;
285
20.1k
    e->prev->next = e;
286
20.1k
    e->next->prev = e;
287
20.1k
    e->SetInHighPriPool(false);
288
20.1k
    e->SetInLowPriPool(false);
289
    // if the low-pri pool is empty, lru_low_pri_ also needs to be updated.
290
20.1k
    if (lru_bottom_pri_ == lru_low_pri_) {
291
20.1k
      lru_low_pri_ = e;
292
20.1k
    }
293
20.1k
    lru_bottom_pri_ = e;
294
20.1k
  }
295
20.1k
  lru_usage_ += e->total_charge;
296
20.1k
}
297
298
0
void LRUCacheShard::MaintainPoolSize() {
299
0
  while (high_pri_pool_usage_ > high_pri_pool_capacity_) {
300
    // Overflow last entry in high-pri pool to low-pri pool.
301
0
    lru_low_pri_ = lru_low_pri_->next;
302
0
    assert(lru_low_pri_ != &lru_);
303
0
    assert(lru_low_pri_->InHighPriPool());
304
0
    lru_low_pri_->SetInHighPriPool(false);
305
0
    lru_low_pri_->SetInLowPriPool(true);
306
0
    assert(high_pri_pool_usage_ >= lru_low_pri_->total_charge);
307
0
    high_pri_pool_usage_ -= lru_low_pri_->total_charge;
308
0
    low_pri_pool_usage_ += lru_low_pri_->total_charge;
309
0
  }
310
311
0
  while (low_pri_pool_usage_ > low_pri_pool_capacity_) {
312
    // Overflow last entry in low-pri pool to bottom-pri pool.
313
0
    lru_bottom_pri_ = lru_bottom_pri_->next;
314
0
    assert(lru_bottom_pri_ != &lru_);
315
0
    assert(lru_bottom_pri_->InLowPriPool());
316
0
    lru_bottom_pri_->SetInHighPriPool(false);
317
0
    lru_bottom_pri_->SetInLowPriPool(false);
318
0
    assert(low_pri_pool_usage_ >= lru_bottom_pri_->total_charge);
319
0
    low_pri_pool_usage_ -= lru_bottom_pri_->total_charge;
320
0
  }
321
0
}
322
323
void LRUCacheShard::EvictFromLRU(size_t charge,
324
3.19M
                                 autovector<LRUHandle*>* deleted) {
325
3.19M
  while ((usage_ + charge) > capacity_ && lru_.next != &lru_) {
326
0
    LRUHandle* old = lru_.next;
327
    // LRU list contains only elements which can be evicted.
328
0
    assert(old->InCache() && !old->HasRefs());
329
0
    LRU_Remove(old);
330
0
    table_.Remove(old->key(), old->hash);
331
0
    old->SetInCache(false);
332
0
    assert(usage_ >= old->total_charge);
333
0
    usage_ -= old->total_charge;
334
0
    deleted->push_back(old);
335
0
  }
336
3.19M
}
337
338
void LRUCacheShard::NotifyEvicted(
339
3.19M
    const autovector<LRUHandle*>& evicted_handles) {
340
3.19M
  MemoryAllocator* alloc = table_.GetAllocator();
341
3.19M
  for (LRUHandle* entry : evicted_handles) {
342
0
    if (eviction_callback_ &&
343
0
        eviction_callback_(entry->key(), static_cast<Cache::Handle*>(entry),
344
0
                           entry->HasHit())) {
345
      // Callback took ownership of obj; just free handle
346
0
      free(entry);
347
0
    } else {
348
      // Free the entries here outside of mutex for performance reasons.
349
0
      entry->Free(alloc);
350
0
    }
351
0
  }
352
3.19M
}
353
354
3.10M
void LRUCacheShard::SetCapacity(size_t capacity) {
355
3.10M
  autovector<LRUHandle*> last_reference_list;
356
3.10M
  {
357
3.10M
    DMutexLock l(mutex_);
358
3.10M
    capacity_ = capacity;
359
3.10M
    high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
360
3.10M
    low_pri_pool_capacity_ = capacity_ * low_pri_pool_ratio_;
361
3.10M
    EvictFromLRU(0, &last_reference_list);
362
3.10M
  }
363
364
3.10M
  NotifyEvicted(last_reference_list);
365
3.10M
}
366
367
0
void LRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
368
0
  DMutexLock l(mutex_);
369
0
  strict_capacity_limit_ = strict_capacity_limit;
370
0
}
371
372
96.5k
Status LRUCacheShard::InsertItem(LRUHandle* e, LRUHandle** handle) {
373
96.5k
  Status s = Status::OK();
374
96.5k
  autovector<LRUHandle*> last_reference_list;
375
376
96.5k
  {
377
96.5k
    DMutexLock l(mutex_);
378
379
    // Free the space following strict LRU policy until enough space
380
    // is freed or the lru list is empty.
381
96.5k
    EvictFromLRU(e->total_charge, &last_reference_list);
382
383
96.5k
    if ((usage_ + e->total_charge) > capacity_ &&
384
0
        (strict_capacity_limit_ || handle == nullptr)) {
385
0
      e->SetInCache(false);
386
0
      if (handle == nullptr) {
387
        // Don't insert the entry but still return ok, as if the entry inserted
388
        // into cache and get evicted immediately.
389
0
        last_reference_list.push_back(e);
390
0
      } else {
391
0
        free(e);
392
0
        e = nullptr;
393
0
        *handle = nullptr;
394
0
        s = Status::MemoryLimit("Insert failed due to LRU cache being full.");
395
0
      }
396
96.5k
    } else {
397
      // Insert into the cache. Note that the cache might get larger than its
398
      // capacity if not enough space was freed up.
399
96.5k
      LRUHandle* old = table_.Insert(e);
400
96.5k
      usage_ += e->total_charge;
401
96.5k
      if (old != nullptr) {
402
0
        s = Status::OkOverwritten();
403
0
        assert(old->InCache());
404
0
        old->SetInCache(false);
405
0
        if (!old->HasRefs()) {
406
          // old is on LRU because it's in cache and its reference count is 0.
407
0
          LRU_Remove(old);
408
0
          assert(usage_ >= old->total_charge);
409
0
          usage_ -= old->total_charge;
410
0
          last_reference_list.push_back(old);
411
0
        }
412
0
      }
413
96.5k
      if (handle == nullptr) {
414
0
        LRU_Insert(e);
415
96.5k
      } else {
416
        // If caller already holds a ref, no need to take one here.
417
96.7k
        if (!e->HasRefs()) {
418
96.7k
          e->Ref();
419
96.7k
        }
420
96.5k
        *handle = e;
421
96.5k
      }
422
96.5k
    }
423
96.5k
  }
424
425
96.5k
  NotifyEvicted(last_reference_list);
426
427
96.5k
  return s;
428
96.5k
}
429
430
LRUHandle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash,
431
                                 const Cache::CacheItemHelper* /*helper*/,
432
                                 Cache::CreateContext* /*create_context*/,
433
                                 Cache::Priority /*priority*/,
434
219k
                                 Statistics* /*stats*/) {
435
219k
  DMutexLock l(mutex_);
436
219k
  LRUHandle* e = table_.Lookup(key, hash);
437
219k
  if (e != nullptr) {
438
25.8k
    assert(e->InCache());
439
25.8k
    if (!e->HasRefs()) {
440
      // The entry is in LRU since it's in hash and has no external
441
      // references.
442
20.1k
      LRU_Remove(e);
443
20.1k
    }
444
25.8k
    e->Ref();
445
25.8k
    e->SetHit();
446
25.8k
  }
447
219k
  return e;
448
219k
}
449
450
0
bool LRUCacheShard::Ref(LRUHandle* e) {
451
0
  DMutexLock l(mutex_);
452
  // To create another reference - entry must be already externally referenced.
453
0
  assert(e->HasRefs());
454
0
  e->Ref();
455
0
  return true;
456
0
}
457
458
0
void LRUCacheShard::SetHighPriorityPoolRatio(double high_pri_pool_ratio) {
459
0
  DMutexLock l(mutex_);
460
0
  high_pri_pool_ratio_ = high_pri_pool_ratio;
461
0
  high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
462
0
  MaintainPoolSize();
463
0
}
464
465
0
void LRUCacheShard::SetLowPriorityPoolRatio(double low_pri_pool_ratio) {
466
0
  DMutexLock l(mutex_);
467
0
  low_pri_pool_ratio_ = low_pri_pool_ratio;
468
0
  low_pri_pool_capacity_ = capacity_ * low_pri_pool_ratio_;
469
0
  MaintainPoolSize();
470
0
}
471
472
bool LRUCacheShard::Release(LRUHandle* e, bool /*useful*/,
473
122k
                            bool erase_if_last_ref) {
474
122k
  if (e == nullptr) {
475
0
    return false;
476
0
  }
477
122k
  bool must_free;
478
122k
  bool was_in_cache;
479
122k
  {
480
122k
    DMutexLock l(mutex_);
481
122k
    must_free = e->Unref();
482
122k
    was_in_cache = e->InCache();
483
122k
    if (must_free && was_in_cache) {
484
      // The item is still in cache, and nobody else holds a reference to it.
485
117k
      if (usage_ > capacity_ || erase_if_last_ref) {
486
        // The LRU list must be empty since the cache is full.
487
96.9k
        assert(lru_.next == &lru_ || erase_if_last_ref);
488
        // Take this opportunity and remove the item.
489
96.9k
        table_.Remove(e->key(), e->hash);
490
96.9k
        e->SetInCache(false);
491
96.9k
      } else {
492
        // Put the item back on the LRU list, and don't free it.
493
20.1k
        LRU_Insert(e);
494
20.1k
        must_free = false;
495
20.1k
      }
496
117k
    }
497
    // If about to be freed, then decrement the cache usage.
498
122k
    if (must_free) {
499
96.9k
      assert(usage_ >= e->total_charge);
500
96.9k
      usage_ -= e->total_charge;
501
96.9k
    }
502
122k
  }
503
504
  // Free the entry here outside of mutex for performance reasons.
505
122k
  if (must_free) {
506
    // Only call eviction callback if we're sure no one requested erasure
507
    // FIXME: disabled because of test churn
508
96.9k
    if (false && was_in_cache && !erase_if_last_ref && eviction_callback_ &&
509
0
        eviction_callback_(e->key(), static_cast<Cache::Handle*>(e),
510
0
                           e->HasHit())) {
511
      // Callback took ownership of obj; just free handle
512
0
      free(e);
513
96.9k
    } else {
514
96.9k
      e->Free(table_.GetAllocator());
515
96.9k
    }
516
96.9k
  }
517
122k
  return must_free;
518
122k
}
519
520
LRUHandle* LRUCacheShard::CreateHandle(const Slice& key, uint32_t hash,
521
                                       Cache::ObjectPtr value,
522
                                       const Cache::CacheItemHelper* helper,
523
96.6k
                                       size_t charge) {
524
96.6k
  assert(helper);
525
  // value == nullptr is reserved for indicating failure in SecondaryCache
526
96.6k
  assert(!(helper->IsSecondaryCacheCompatible() && value == nullptr));
527
528
  // Allocate the memory here outside of the mutex.
529
  // If the cache is full, we'll have to release it.
530
  // It shouldn't happen very often though.
531
96.6k
  LRUHandle* e =
532
96.6k
      static_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
533
534
96.6k
  e->value = value;
535
96.6k
  e->m_flags = 0;
536
96.6k
  e->im_flags = 0;
537
96.6k
  e->helper = helper;
538
96.6k
  e->key_length = key.size();
539
96.6k
  e->hash = hash;
540
96.6k
  e->refs = 0;
541
96.6k
  e->next = e->prev = nullptr;
542
96.6k
  memcpy(e->key_data, key.data(), key.size());
543
96.6k
  e->CalcTotalCharge(charge, metadata_charge_policy_);
544
545
96.6k
  return e;
546
96.6k
}
547
548
Status LRUCacheShard::Insert(const Slice& key, uint32_t hash,
549
                             Cache::ObjectPtr value,
550
                             const Cache::CacheItemHelper* helper,
551
                             size_t charge, LRUHandle** handle,
552
96.7k
                             Cache::Priority priority) {
553
96.7k
  LRUHandle* e = CreateHandle(key, hash, value, helper, charge);
554
96.7k
  e->SetPriority(priority);
555
96.7k
  e->SetInCache(true);
556
96.7k
  return InsertItem(e, handle);
557
96.7k
}
558
559
LRUHandle* LRUCacheShard::CreateStandalone(const Slice& key, uint32_t hash,
560
                                           Cache::ObjectPtr value,
561
                                           const Cache::CacheItemHelper* helper,
562
                                           size_t charge,
563
0
                                           bool allow_uncharged) {
564
0
  LRUHandle* e = CreateHandle(key, hash, value, helper, charge);
565
0
  e->SetIsStandalone(true);
566
0
  e->Ref();
567
568
0
  autovector<LRUHandle*> last_reference_list;
569
570
0
  {
571
0
    DMutexLock l(mutex_);
572
573
0
    EvictFromLRU(e->total_charge, &last_reference_list);
574
575
0
    if (strict_capacity_limit_ && (usage_ + e->total_charge) > capacity_) {
576
0
      if (allow_uncharged) {
577
0
        e->total_charge = 0;
578
0
      } else {
579
0
        free(e);
580
0
        e = nullptr;
581
0
      }
582
0
    } else {
583
0
      usage_ += e->total_charge;
584
0
    }
585
0
  }
586
587
0
  NotifyEvicted(last_reference_list);
588
0
  return e;
589
0
}
590
591
7.20k
void LRUCacheShard::Erase(const Slice& key, uint32_t hash) {
592
7.20k
  LRUHandle* e;
593
7.20k
  bool last_reference = false;
594
7.20k
  {
595
7.20k
    DMutexLock l(mutex_);
596
7.20k
    e = table_.Remove(key, hash);
597
7.20k
    if (e != nullptr) {
598
0
      assert(e->InCache());
599
0
      e->SetInCache(false);
600
0
      if (!e->HasRefs()) {
601
        // The entry is in LRU since it's in hash and has no external references
602
0
        LRU_Remove(e);
603
0
        assert(usage_ >= e->total_charge);
604
0
        usage_ -= e->total_charge;
605
0
        last_reference = true;
606
0
      }
607
0
    }
608
7.20k
  }
609
610
  // Free the entry here outside of mutex for performance reasons.
611
  // last_reference will only be true if e != nullptr.
612
7.20k
  if (last_reference) {
613
0
    e->Free(table_.GetAllocator());
614
0
  }
615
7.20k
}
616
617
0
size_t LRUCacheShard::GetUsage() const {
618
0
  DMutexLock l(mutex_);
619
0
  return usage_;
620
0
}
621
622
0
size_t LRUCacheShard::GetPinnedUsage() const {
623
0
  DMutexLock l(mutex_);
624
0
  assert(usage_ >= lru_usage_);
625
0
  return usage_ - lru_usage_;
626
0
}
627
628
0
size_t LRUCacheShard::GetOccupancyCount() const {
629
0
  DMutexLock l(mutex_);
630
0
  return table_.GetOccupancyCount();
631
0
}
632
633
0
size_t LRUCacheShard::GetTableAddressCount() const {
634
0
  DMutexLock l(mutex_);
635
0
  return size_t{1} << table_.GetLengthBits();
636
0
}
637
638
0
void LRUCacheShard::AppendPrintableOptions(std::string& str) const {
639
0
  const int kBufferSize = 200;
640
0
  char buffer[kBufferSize];
641
0
  {
642
0
    DMutexLock l(mutex_);
643
0
    snprintf(buffer, kBufferSize, "    high_pri_pool_ratio: %.3lf\n",
644
0
             high_pri_pool_ratio_);
645
0
    snprintf(buffer + strlen(buffer), kBufferSize - strlen(buffer),
646
0
             "    low_pri_pool_ratio: %.3lf\n", low_pri_pool_ratio_);
647
0
  }
648
0
  str.append(buffer);
649
0
}
650
651
48.4k
LRUCache::LRUCache(const LRUCacheOptions& opts) : ShardedCache(opts) {
652
48.4k
  size_t per_shard = GetPerShardCapacity();
653
48.4k
  MemoryAllocator* alloc = memory_allocator();
654
3.10M
  InitShards([&](LRUCacheShard* cs) {
655
3.10M
    new (cs) LRUCacheShard(per_shard, opts.strict_capacity_limit,
656
3.10M
                           opts.high_pri_pool_ratio, opts.low_pri_pool_ratio,
657
3.10M
                           opts.use_adaptive_mutex, opts.metadata_charge_policy,
658
3.10M
                           /* max_upper_hash_bits */ 32 - opts.num_shard_bits,
659
3.10M
                           alloc, &eviction_callback_);
660
3.10M
  });
661
48.4k
}
662
663
219k
Cache::ObjectPtr LRUCache::Value(Handle* handle) {
664
219k
  auto h = static_cast<const LRUHandle*>(handle);
665
219k
  return h->value;
666
219k
}
667
668
0
size_t LRUCache::GetCharge(Handle* handle) const {
669
0
  return static_cast<const LRUHandle*>(handle)->GetCharge(
670
0
      GetShard(0).metadata_charge_policy_);
671
0
}
672
673
const Cache::CacheItemHelper* LRUCache::GetCacheItemHelper(
674
0
    Handle* handle) const {
675
0
  auto h = static_cast<const LRUHandle*>(handle);
676
0
  return h->helper;
677
0
}
678
679
void LRUCache::ApplyToHandle(
680
    Cache* cache, Handle* handle,
681
    const std::function<void(const Slice& key, ObjectPtr value, size_t charge,
682
0
                             const CacheItemHelper* helper)>& callback) {
683
0
  auto cache_ptr = static_cast<LRUCache*>(cache);
684
0
  auto h = static_cast<const LRUHandle*>(handle);
685
0
  callback(h->key(), h->value,
686
0
           h->GetCharge(cache_ptr->GetShard(0).metadata_charge_policy_),
687
0
           h->helper);
688
0
}
689
690
0
size_t LRUCache::TEST_GetLRUSize() {
691
0
  return SumOverShards([](LRUCacheShard& cs) { return cs.TEST_GetLRUSize(); });
692
0
}
693
694
0
double LRUCache::GetHighPriPoolRatio() {
695
0
  return GetShard(0).GetHighPriPoolRatio();
696
0
}
697
698
}  // namespace lru_cache
699
700
48.4k
std::shared_ptr<Cache> LRUCacheOptions::MakeSharedCache() const {
701
48.4k
  if (num_shard_bits >= 20) {
702
0
    return nullptr;  // The cache cannot be sharded into too many fine pieces.
703
0
  }
704
48.4k
  if (high_pri_pool_ratio < 0.0 || high_pri_pool_ratio > 1.0) {
705
    // Invalid high_pri_pool_ratio
706
0
    return nullptr;
707
0
  }
708
48.4k
  if (low_pri_pool_ratio < 0.0 || low_pri_pool_ratio > 1.0) {
709
    // Invalid low_pri_pool_ratio
710
0
    return nullptr;
711
0
  }
712
48.4k
  if (low_pri_pool_ratio + high_pri_pool_ratio > 1.0) {
713
    // Invalid high_pri_pool_ratio and low_pri_pool_ratio combination
714
0
    return nullptr;
715
0
  }
716
  // For sanitized options
717
48.4k
  LRUCacheOptions opts = *this;
718
48.4k
  if (opts.num_shard_bits < 0) {
719
0
    opts.num_shard_bits = GetDefaultCacheShardBits(capacity);
720
0
  }
721
48.4k
  std::shared_ptr<Cache> cache = std::make_shared<LRUCache>(opts);
722
48.4k
  if (secondary_cache) {
723
0
    cache = std::make_shared<CacheWithSecondaryAdapter>(cache, secondary_cache);
724
0
  }
725
48.4k
  return cache;
726
48.4k
}
727
728
0
std::shared_ptr<RowCache> LRUCacheOptions::MakeSharedRowCache() const {
729
0
  if (secondary_cache) {
730
    // Not allowed for a RowCache
731
0
    return nullptr;
732
0
  }
733
  // Works while RowCache is an alias for Cache
734
0
  return MakeSharedCache();
735
0
}
736
}  // namespace ROCKSDB_NAMESPACE