Coverage Report

Created: 2023-09-25 06:27

/src/abseil-cpp/absl/base/internal/spinlock.cc
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
#include "absl/base/internal/spinlock.h"
16
17
#include <algorithm>
18
#include <atomic>
19
#include <limits>
20
21
#include "absl/base/attributes.h"
22
#include "absl/base/config.h"
23
#include "absl/base/internal/atomic_hook.h"
24
#include "absl/base/internal/cycleclock.h"
25
#include "absl/base/internal/spinlock_wait.h"
26
#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */
27
#include "absl/base/call_once.h"
28
29
// Description of lock-word:
30
//  31..00: [............................3][2][1][0]
31
//
32
//     [0]: kSpinLockHeld
33
//     [1]: kSpinLockCooperative
34
//     [2]: kSpinLockDisabledScheduling
35
// [31..3]: ONLY kSpinLockSleeper OR
36
//          Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT
37
//
38
// Detailed descriptions:
39
//
40
// Bit [0]: The lock is considered held iff kSpinLockHeld is set.
41
//
42
// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when
43
//          contended iff kSpinLockCooperative is set.
44
//
45
// Bit [2]: This bit is exclusive from bit [1].  It is used only by a
46
//          non-cooperative lock.  When set, indicates that scheduling was
47
//          successfully disabled when the lock was acquired.  May be unset,
48
//          even if non-cooperative, if a ThreadIdentity did not yet exist at
49
//          time of acquisition.
50
//
51
// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was
52
//          acquired without contention, however, at least one waiter exists.
53
//
54
//          Otherwise, bits [31..3] represent the time spent by the current lock
55
//          holder to acquire the lock.  There may be outstanding waiter(s).
56
57
namespace absl {
58
ABSL_NAMESPACE_BEGIN
59
namespace base_internal {
60
61
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook<void (*)(
62
    const void *lock, int64_t wait_cycles)>
63
    submit_profile_data;
64
65
void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
66
0
                                         int64_t wait_cycles)) {
67
0
  submit_profile_data.Store(fn);
68
0
}
69
70
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
71
// Static member variable definitions.
72
constexpr uint32_t SpinLock::kSpinLockHeld;
73
constexpr uint32_t SpinLock::kSpinLockCooperative;
74
constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
75
constexpr uint32_t SpinLock::kSpinLockSleeper;
76
constexpr uint32_t SpinLock::kWaitTimeMask;
77
#endif
78
79
// Uncommon constructors.
80
SpinLock::SpinLock(base_internal::SchedulingMode mode)
81
8
    : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
82
8
  ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
83
8
}
84
85
// Monitor the lock to see if its value changes within some time period
86
// (adaptive_spin_count loop iterations). The last value read from the lock
87
// is returned from the method.
88
0
uint32_t SpinLock::SpinLoop() {
89
  // We are already in the slow path of SpinLock, initialize the
90
  // adaptive_spin_count here.
91
0
  ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count;
92
0
  ABSL_CONST_INIT static int adaptive_spin_count = 0;
93
0
  base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() {
94
0
    adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1;
95
0
  });
96
97
0
  int c = adaptive_spin_count;
98
0
  uint32_t lock_value;
99
0
  do {
100
0
    lock_value = lockword_.load(std::memory_order_relaxed);
101
0
  } while ((lock_value & kSpinLockHeld) != 0 && --c > 0);
102
0
  return lock_value;
103
0
}
104
105
0
void SpinLock::SlowLock() {
106
0
  uint32_t lock_value = SpinLoop();
107
0
  lock_value = TryLockInternal(lock_value, 0);
108
0
  if ((lock_value & kSpinLockHeld) == 0) {
109
0
    return;
110
0
  }
111
112
0
  base_internal::SchedulingMode scheduling_mode;
113
0
  if ((lock_value & kSpinLockCooperative) != 0) {
114
0
    scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
115
0
  } else {
116
0
    scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
117
0
  }
118
119
  // The lock was not obtained initially, so this thread needs to wait for
120
  // it.  Record the current timestamp in the local variable wait_start_time
121
  // so the total wait time can be stored in the lockword once this thread
122
  // obtains the lock.
123
0
  int64_t wait_start_time = CycleClock::Now();
124
0
  uint32_t wait_cycles = 0;
125
0
  int lock_wait_call_count = 0;
126
0
  while ((lock_value & kSpinLockHeld) != 0) {
127
    // If the lock is currently held, but not marked as having a sleeper, mark
128
    // it as having a sleeper.
129
0
    if ((lock_value & kWaitTimeMask) == 0) {
130
      // Here, just "mark" that the thread is going to sleep.  Don't store the
131
      // lock wait time in the lock -- the lock word stores the amount of time
132
      // that the current holder waited before acquiring the lock, not the wait
133
      // time of any thread currently waiting to acquire it.
134
0
      if (lockword_.compare_exchange_strong(
135
0
              lock_value, lock_value | kSpinLockSleeper,
136
0
              std::memory_order_relaxed, std::memory_order_relaxed)) {
137
        // Successfully transitioned to kSpinLockSleeper.  Pass
138
        // kSpinLockSleeper to the SpinLockWait routine to properly indicate
139
        // the last lock_value observed.
140
0
        lock_value |= kSpinLockSleeper;
141
0
      } else if ((lock_value & kSpinLockHeld) == 0) {
142
        // Lock is free again, so try and acquire it before sleeping.  The
143
        // new lock state will be the number of cycles this thread waited if
144
        // this thread obtains the lock.
145
0
        lock_value = TryLockInternal(lock_value, wait_cycles);
146
0
        continue;   // Skip the delay at the end of the loop.
147
0
      } else if ((lock_value & kWaitTimeMask) == 0) {
148
        // The lock is still held, without a waiter being marked, but something
149
        // else about the lock word changed, causing our CAS to fail. For
150
        // example, a new lock holder may have acquired the lock with
151
        // kSpinLockDisabledScheduling set, whereas the previous holder had not
152
        // set that flag. In this case, attempt again to mark ourselves as a
153
        // waiter.
154
0
        continue;
155
0
      }
156
0
    }
157
158
    // SpinLockDelay() calls into fiber scheduler, we need to see
159
    // synchronization there to avoid false positives.
160
0
    ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
161
    // Wait for an OS specific delay.
162
0
    base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
163
0
                                 scheduling_mode);
164
0
    ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
165
    // Spin again after returning from the wait routine to give this thread
166
    // some chance of obtaining the lock.
167
0
    lock_value = SpinLoop();
168
0
    wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now());
169
0
    lock_value = TryLockInternal(lock_value, wait_cycles);
170
0
  }
171
0
}
172
173
0
void SpinLock::SlowUnlock(uint32_t lock_value) {
174
0
  base_internal::SpinLockWake(&lockword_,
175
0
                              false);  // wake waiter if necessary
176
177
  // If our acquisition was contended, collect contentionz profile info.  We
178
  // reserve a unitary wait time to represent that a waiter exists without our
179
  // own acquisition having been contended.
180
0
  if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
181
0
    const int64_t wait_cycles = DecodeWaitCycles(lock_value);
182
0
    ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
183
0
    submit_profile_data(this, wait_cycles);
184
0
    ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
185
0
  }
186
0
}
187
188
// We use the upper 29 bits of the lock word to store the time spent waiting to
189
// acquire this lock.  This is reported by contentionz profiling.  Since the
190
// lower bits of the cycle counter wrap very quickly on high-frequency
191
// processors we divide to reduce the granularity to 2^kProfileTimestampShift
192
// sized units.  On a 4Ghz machine this will lose track of wait times greater
193
// than (2^29/4 Ghz)*128 =~ 17.2 seconds.  Such waits should be extremely rare.
194
static constexpr int kProfileTimestampShift = 7;
195
196
// We currently reserve the lower 3 bits.
197
static constexpr int kLockwordReservedShift = 3;
198
199
uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
200
0
                                    int64_t wait_end_time) {
201
0
  static const int64_t kMaxWaitTime =
202
0
      std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift;
203
0
  int64_t scaled_wait_time =
204
0
      (wait_end_time - wait_start_time) >> kProfileTimestampShift;
205
206
  // Return a representation of the time spent waiting that can be stored in
207
  // the lock word's upper bits.
208
0
  uint32_t clamped = static_cast<uint32_t>(
209
0
      std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift);
210
211
0
  if (clamped == 0) {
212
0
    return kSpinLockSleeper;  // Just wake waiters, but don't record contention.
213
0
  }
214
  // Bump up value if necessary to avoid returning kSpinLockSleeper.
215
0
  const uint32_t kMinWaitTime =
216
0
      kSpinLockSleeper + (1 << kLockwordReservedShift);
217
0
  if (clamped == kSpinLockSleeper) {
218
0
    return kMinWaitTime;
219
0
  }
220
0
  return clamped;
221
0
}
222
223
0
int64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
224
  // Cast to uint32_t first to ensure bits [63:32] are cleared.
225
0
  const int64_t scaled_wait_time =
226
0
      static_cast<uint32_t>(lock_value & kWaitTimeMask);
227
0
  return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
228
0
}
229
230
}  // namespace base_internal
231
ABSL_NAMESPACE_END
232
}  // namespace absl