Coverage Report

Created: 2025-11-15 06:18

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/abseil-cpp/absl/base/internal/spinlock.cc
Line
Count
Source
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
#include "absl/base/internal/spinlock.h"
16
17
#include <algorithm>
18
#include <atomic>
19
#include <cstdint>
20
#include <limits>
21
22
#include "absl/base/attributes.h"
23
#include "absl/base/call_once.h"
24
#include "absl/base/config.h"
25
#include "absl/base/internal/atomic_hook.h"
26
#include "absl/base/internal/cycleclock.h"
27
#include "absl/base/internal/scheduling_mode.h"
28
#include "absl/base/internal/spinlock_wait.h"
29
#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */
30
#include "absl/base/internal/tsan_mutex_interface.h"
31
32
// Description of lock-word:
33
//  31..00: [............................3][2][1][0]
34
//
35
//     [0]: kSpinLockHeld
36
//     [1]: kSpinLockCooperative
37
//     [2]: kSpinLockDisabledScheduling
38
// [31..3]: ONLY kSpinLockSleeper OR
39
//          Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT
40
//
41
// Detailed descriptions:
42
//
43
// Bit [0]: The lock is considered held iff kSpinLockHeld is set.
44
//
45
// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when
46
//          contended iff kSpinLockCooperative is set.
47
//
48
// Bit [2]: This bit is exclusive from bit [1].  It is used only by a
49
//          non-cooperative lock.  When set, indicates that scheduling was
50
//          successfully disabled when the lock was acquired.  May be unset,
51
//          even if non-cooperative, if a ThreadIdentity did not yet exist at
52
//          time of acquisition.
53
//
54
// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was
55
//          acquired without contention, however, at least one waiter exists.
56
//
57
//          Otherwise, bits [31..3] represent the time spent by the current lock
58
//          holder to acquire the lock.  There may be outstanding waiter(s).
59
60
namespace absl {
61
ABSL_NAMESPACE_BEGIN
62
namespace base_internal {
63
64
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook<void (*)(
65
    const void *lock, int64_t wait_cycles)>
66
    submit_profile_data;
67
68
void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
69
0
                                         int64_t wait_cycles)) {
70
0
  submit_profile_data.Store(fn);
71
0
}
72
73
// Monitor the lock to see if its value changes within some time period
74
// (adaptive_spin_count_ loop iterations). The last value read from the lock
75
// is returned from the method.
76
ABSL_CONST_INIT std::atomic<int> SpinLock::adaptive_spin_count_{0};
77
0
uint32_t SpinLock::SpinLoop() {
78
  // We are already in the slow path of SpinLock, initialize the
79
  // adaptive_spin_count here.
80
0
  if (adaptive_spin_count_.load(std::memory_order_relaxed) == 0) {
81
0
    int current_spin_count = 0;
82
0
    int new_spin_count = NumCPUs() > 1 ? 1000 : 1;
83
    // If this fails, the value will remain unchanged. We may not spin for the
84
    // intended duration, but that is still safe. We will try again on the next
85
    // call to SpinLoop.
86
0
    adaptive_spin_count_.compare_exchange_weak(
87
0
        current_spin_count, new_spin_count, std::memory_order_relaxed,
88
0
        std::memory_order_relaxed);
89
0
  }
90
0
  int c = adaptive_spin_count_.load(std::memory_order_relaxed);
91
0
  uint32_t lock_value;
92
0
  do {
93
0
    lock_value = lockword_.load(std::memory_order_relaxed);
94
0
  } while ((lock_value & kSpinLockHeld) != 0 && --c > 0);
95
0
  return lock_value;
96
0
}
97
98
0
void SpinLock::SlowLock() {
99
0
  uint32_t lock_value = SpinLoop();
100
0
  lock_value = TryLockInternal(lock_value, 0);
101
0
  if ((lock_value & kSpinLockHeld) == 0) {
102
0
    return;
103
0
  }
104
105
0
  SchedulingMode scheduling_mode;
106
0
  if ((lock_value & kSpinLockCooperative) != 0) {
107
0
    scheduling_mode = SCHEDULE_COOPERATIVE_AND_KERNEL;
108
0
  } else {
109
0
    scheduling_mode = SCHEDULE_KERNEL_ONLY;
110
0
  }
111
112
  // The lock was not obtained initially, so this thread needs to wait for
113
  // it.  Record the current timestamp in the local variable wait_start_time
114
  // so the total wait time can be stored in the lockword once this thread
115
  // obtains the lock.
116
0
  int64_t wait_start_time = CycleClock::Now();
117
0
  uint32_t wait_cycles = 0;
118
0
  int lock_wait_call_count = 0;
119
0
  while ((lock_value & kSpinLockHeld) != 0) {
120
    // If the lock is currently held, but not marked as having a sleeper, mark
121
    // it as having a sleeper.
122
0
    if ((lock_value & kWaitTimeMask) == 0) {
123
      // Here, just "mark" that the thread is going to sleep.  Don't store the
124
      // lock wait time in the lock -- the lock word stores the amount of time
125
      // that the current holder waited before acquiring the lock, not the wait
126
      // time of any thread currently waiting to acquire it.
127
0
      if (lockword_.compare_exchange_strong(
128
0
              lock_value, lock_value | kSpinLockSleeper,
129
0
              std::memory_order_relaxed, std::memory_order_relaxed)) {
130
        // Successfully transitioned to kSpinLockSleeper.  Pass
131
        // kSpinLockSleeper to the SpinLockWait routine to properly indicate
132
        // the last lock_value observed.
133
0
        lock_value |= kSpinLockSleeper;
134
0
      } else if ((lock_value & kSpinLockHeld) == 0) {
135
        // Lock is free again, so try and acquire it before sleeping.  The
136
        // new lock state will be the number of cycles this thread waited if
137
        // this thread obtains the lock.
138
0
        lock_value = TryLockInternal(lock_value, wait_cycles);
139
0
        continue;  // Skip the delay at the end of the loop.
140
0
      } else if ((lock_value & kWaitTimeMask) == 0) {
141
        // The lock is still held, without a waiter being marked, but something
142
        // else about the lock word changed, causing our CAS to fail. For
143
        // example, a new lock holder may have acquired the lock with
144
        // kSpinLockDisabledScheduling set, whereas the previous holder had not
145
        // set that flag. In this case, attempt again to mark ourselves as a
146
        // waiter.
147
0
        continue;
148
0
      }
149
0
    }
150
151
    // SpinLockDelay() calls into fiber scheduler, we need to see
152
    // synchronization there to avoid false positives.
153
0
    ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
154
    // Wait for an OS specific delay.
155
0
    SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
156
0
                  scheduling_mode);
157
0
    ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
158
    // Spin again after returning from the wait routine to give this thread
159
    // some chance of obtaining the lock.
160
0
    lock_value = SpinLoop();
161
0
    wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now());
162
0
    lock_value = TryLockInternal(lock_value, wait_cycles);
163
0
  }
164
0
}
165
166
0
void SpinLock::SlowUnlock(uint32_t lock_value) {
167
0
  SpinLockWake(&lockword_,
168
0
               false);  // wake waiter if necessary
169
170
  // If our acquisition was contended, collect contentionz profile info.  We
171
  // reserve a unitary wait time to represent that a waiter exists without our
172
  // own acquisition having been contended.
173
0
  if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
174
0
    const int64_t wait_cycles = DecodeWaitCycles(lock_value);
175
0
    ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
176
0
    submit_profile_data(this, wait_cycles);
177
0
    ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
178
0
  }
179
0
}
180
181
// We use the upper 29 bits of the lock word to store the time spent waiting to
182
// acquire this lock.  This is reported by contentionz profiling.  Since the
183
// lower bits of the cycle counter wrap very quickly on high-frequency
184
// processors we divide to reduce the granularity to 2^kProfileTimestampShift
185
// sized units.  On a 4Ghz machine this will lose track of wait times greater
186
// than (2^29/4 Ghz)*128 =~ 17.2 seconds.  Such waits should be extremely rare.
187
static constexpr int kProfileTimestampShift = 7;
188
189
// We currently reserve the lower 3 bits.
190
static constexpr int kLockwordReservedShift = 3;
191
192
uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
193
0
                                    int64_t wait_end_time) {
194
0
  static const int64_t kMaxWaitTime =
195
0
      std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift;
196
0
  int64_t scaled_wait_time =
197
0
      (wait_end_time - wait_start_time) >> kProfileTimestampShift;
198
199
  // Return a representation of the time spent waiting that can be stored in
200
  // the lock word's upper bits.
201
0
  uint32_t clamped = static_cast<uint32_t>(
202
0
      std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift);
203
204
0
  if (clamped == 0) {
205
0
    return kSpinLockSleeper;  // Just wake waiters, but don't record contention.
206
0
  }
207
  // Bump up value if necessary to avoid returning kSpinLockSleeper.
208
0
  const uint32_t kMinWaitTime =
209
0
      kSpinLockSleeper + (1 << kLockwordReservedShift);
210
0
  if (clamped == kSpinLockSleeper) {
211
0
    return kMinWaitTime;
212
0
  }
213
0
  return clamped;
214
0
}
215
216
0
int64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
217
  // Cast to uint32_t first to ensure bits [63:32] are cleared.
218
0
  const int64_t scaled_wait_time =
219
0
      static_cast<uint32_t>(lock_value & kWaitTimeMask);
220
0
  return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
221
0
}
222
223
}  // namespace base_internal
224
ABSL_NAMESPACE_END
225
}  // namespace absl