Coverage Report

Created: 2026-04-10 07:52

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/rocksdb/util/mutexlock.h
Line
Count
Source
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
2
//  This source code is licensed under both the GPLv2 (found in the
3
//  COPYING file in the root directory) and Apache 2.0 License
4
//  (found in the LICENSE.Apache file in the root directory).
5
//
6
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
7
// Use of this source code is governed by a BSD-style license that can be
8
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
10
#pragma once
11
#include <assert.h>
12
13
#include <atomic>
14
#include <functional>
15
#include <memory>
16
#include <mutex>
17
#include <thread>
18
19
#include "port/port.h"
20
#include "util/fastrange.h"
21
#include "util/hash.h"
22
23
namespace ROCKSDB_NAMESPACE {
24
25
// Helper class that locks a mutex on construction and unlocks the mutex when
26
// the destructor of the MutexLock object is invoked.
27
//
28
// Typical usage:
29
//
30
//   void MyClass::MyMethod() {
31
//     MutexLock l(&mu_);       // mu_ is an instance variable
32
//     ... some complex code, possibly with multiple return paths ...
33
//   }
34
35
class MutexLock {
36
 public:
37
2.51M
  explicit MutexLock(port::Mutex* mu) : mu_(mu) { this->mu_->Lock(); }
38
  // No copying allowed
39
  MutexLock(const MutexLock&) = delete;
40
  void operator=(const MutexLock&) = delete;
41
42
2.51M
  ~MutexLock() { this->mu_->Unlock(); }
43
44
 private:
45
  port::Mutex* const mu_;
46
};
47
48
//
49
// Acquire a ReadLock on the specified RWMutex.
50
// The Lock will be automatically released when the
51
// object goes out of scope.
52
//
53
class ReadLock {
54
 public:
55
83.7k
  explicit ReadLock(port::RWMutex* mu) : mu_(mu) { this->mu_->ReadLock(); }
56
  // No copying allowed
57
  ReadLock(const ReadLock&) = delete;
58
  void operator=(const ReadLock&) = delete;
59
60
83.7k
  ~ReadLock() { this->mu_->ReadUnlock(); }
61
62
 private:
63
  port::RWMutex* const mu_;
64
};
65
66
//
67
// Automatically unlock a locked mutex when the object is destroyed
68
//
69
class ReadUnlock {
70
 public:
71
0
  explicit ReadUnlock(port::RWMutex* mu) : mu_(mu) { mu->AssertHeld(); }
72
  // No copying allowed
73
  ReadUnlock(const ReadUnlock&) = delete;
74
  ReadUnlock& operator=(const ReadUnlock&) = delete;
75
76
0
  ~ReadUnlock() { mu_->ReadUnlock(); }
77
78
 private:
79
  port::RWMutex* const mu_;
80
};
81
82
//
83
// Acquire a WriteLock on the specified RWMutex.
84
// The Lock will be automatically released then the
85
// object goes out of scope.
86
//
87
class WriteLock {
88
 public:
89
169k
  explicit WriteLock(port::RWMutex* mu) : mu_(mu) { this->mu_->WriteLock(); }
90
  // No copying allowed
91
  WriteLock(const WriteLock&) = delete;
92
  void operator=(const WriteLock&) = delete;
93
94
169k
  ~WriteLock() { this->mu_->WriteUnlock(); }
95
96
 private:
97
  port::RWMutex* const mu_;
98
};
99
100
//
101
// SpinMutex has very low overhead for low-contention cases.  Method names
102
// are chosen so you can use std::unique_lock or std::lock_guard with it.
103
//
104
class SpinMutex {
105
 public:
106
3.30M
  SpinMutex() : locked_(false) {}
107
108
1.32M
  bool try_lock() {
109
1.32M
    auto currently_locked = locked_.load(std::memory_order_relaxed);
110
1.32M
    return !currently_locked &&
111
1.32M
           locked_.compare_exchange_weak(currently_locked, true,
112
1.32M
                                         std::memory_order_acquire,
113
1.32M
                                         std::memory_order_relaxed);
114
1.32M
  }
115
116
9.40k
  void lock() {
117
9.40k
    for (size_t tries = 0;; ++tries) {
118
9.40k
      if (try_lock()) {
119
        // success
120
9.40k
        break;
121
9.40k
      }
122
0
      port::AsmVolatilePause();
123
0
      if (tries > 100) {
124
0
        std::this_thread::yield();
125
0
      }
126
0
    }
127
9.40k
  }
128
129
1.32M
  void unlock() { locked_.store(false, std::memory_order_release); }
130
131
 private:
132
  std::atomic<bool> locked_;
133
};
134
135
// For preventing false sharing, especially for mutexes.
136
// NOTE: if a mutex is less than half the size of a cache line, it would
137
// make more sense for Striped structure below to pack more than one mutex
138
// into each cache line, as this would only reduce contention for the same
139
// amount of space and cache sharing. However, a mutex is often 40 bytes out
140
// of a 64 byte cache line.
141
template <class T>
142
struct ALIGN_AS(CACHE_LINE_SIZE) CacheAlignedWrapper {
143
  T obj_;
144
};
145
template <class T>
146
struct Unwrap {
147
  using type = T;
148
  static type& Go(T& t) { return t; }
149
};
150
template <class T>
151
struct Unwrap<CacheAlignedWrapper<T>> {
152
  using type = T;
153
108k
  static type& Go(CacheAlignedWrapper<T>& t) { return t.obj_; }
154
};
155
156
//
157
// Inspired by Guava: https://github.com/google/guava/wiki/StripedExplained
158
// A striped Lock. This offers the underlying lock striping similar
159
// to that of ConcurrentHashMap in a reusable form, and extends it for
160
// semaphores and read-write locks. Conceptually, lock striping is the technique
161
// of dividing a lock into many <i>stripes</i>, increasing the granularity of a
162
// single lock and allowing independent operations to lock different stripes and
163
// proceed concurrently, instead of creating contention for a single lock.
164
//
165
template <class T, class Key = Slice, class Hash = SliceNPHasher64>
166
class Striped {
167
 public:
168
  explicit Striped(size_t stripe_count)
169
167k
      : stripe_count_(stripe_count), data_(new T[stripe_count]) {}
170
171
  using Unwrapped = typename Unwrap<T>::type;
172
108k
  Unwrapped& Get(const Key& key, uint64_t seed = 0) {
173
108k
    size_t index = FastRangeGeneric(hash_(key, seed), stripe_count_);
174
108k
    return Unwrap<T>::Go(data_[index]);
175
108k
  }
176
177
  size_t ApproximateMemoryUsage() const {
178
    // NOTE: could use malloc_usable_size() here, but that could count unmapped
179
    // pages and could mess up unit test OccLockBucketsTest::CacheAligned
180
    return sizeof(*this) + stripe_count_ * sizeof(T);
181
  }
182
183
 private:
184
  size_t stripe_count_;
185
  std::unique_ptr<T[]> data_;
186
  Hash hash_;
187
};
188
189
}  // namespace ROCKSDB_NAMESPACE