Coverage Report

Created: 2026-04-12 06:05

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/abseil-cpp/absl/base/internal/low_level_scheduling.h
Line
Count
Source
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// Core interfaces and definitions used by by low-level interfaces such as
16
// SpinLock.
17
18
#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
19
#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
20
21
#include <atomic>
22
23
#include "absl/base/internal/raw_logging.h"
24
#include "absl/base/internal/scheduling_mode.h"
25
#include "absl/base/internal/thread_identity.h"
26
#include "absl/base/macros.h"
27
28
// The following two declarations exist so SchedulingGuard may friend them with
29
// the appropriate language linkage.  These callbacks allow libc internals, such
30
// as function level statics, to schedule cooperatively when locking.
31
extern "C" bool __google_disable_rescheduling(void);
32
extern "C" void __google_enable_rescheduling(bool disable_result);
33
34
namespace absl {
35
ABSL_NAMESPACE_BEGIN
36
class CondVar;
37
class Mutex;
38
39
namespace synchronization_internal {
40
int MutexDelay(int32_t c, int mode);
41
}  // namespace synchronization_internal
42
43
namespace base_internal {
44
45
class SchedulingHelper;  // To allow use of SchedulingGuard.
46
class SpinLock;          // To allow use of SchedulingGuard.
47
48
// SchedulingGuard
49
// Provides guard semantics that may be used to disable cooperative rescheduling
50
// of the calling thread within specific program blocks.  This is used to
51
// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative
52
// scheduling depends on.
53
//
54
// Domain implementations capable of rescheduling in reaction to involuntary
55
// kernel thread actions (e.g blocking due to a pagefault or syscall) must
56
// guarantee that an annotated thread is not allowed to (cooperatively)
57
// reschedule until the annotated region is complete.
58
//
59
// It is an error to attempt to use a cooperatively scheduled resource (e.g.
60
// Mutex) within a rescheduling-disabled region.
61
//
62
// All methods are async-signal safe.
63
class SchedulingGuard {
64
 public:
65
  // Returns true iff the calling thread may be cooperatively rescheduled.
66
  static bool ReschedulingIsAllowed();
67
  SchedulingGuard(const SchedulingGuard&) = delete;
68
  SchedulingGuard& operator=(const SchedulingGuard&) = delete;
69
70
  // Disable cooperative rescheduling of the calling thread.  It may still
71
  // initiate scheduling operations (e.g. wake-ups), however, it may not itself
72
  // reschedule.  Nestable.  The returned result is opaque, clients should not
73
  // attempt to interpret it.
74
  // REQUIRES: Result must be passed to a pairing EnableScheduling().
75
  static bool DisableRescheduling();
76
77
  // Marks the end of a rescheduling disabled region, previously started by
78
  // DisableRescheduling().
79
  // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling().
80
  static void EnableRescheduling(bool disable_result);
81
82
  // A scoped helper for {Disable, Enable}Rescheduling().
83
  // REQUIRES: destructor must run in same thread as constructor.
84
  struct ScopedDisable {
85
0
    ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); }
86
0
    ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); }
87
88
    bool disabled;
89
  };
90
91
  // A scoped helper to enable rescheduling temporarily.
92
  // REQUIRES: destructor must run in same thread as constructor.
93
  class ScopedEnable {
94
   public:
95
    ScopedEnable();
96
    ~ScopedEnable();
97
98
   private:
99
    int scheduling_disabled_depth_;
100
  };
101
};
102
103
//------------------------------------------------------------------------------
104
// End of public interfaces.
105
//------------------------------------------------------------------------------
106
107
0
inline bool SchedulingGuard::ReschedulingIsAllowed() {
108
0
  ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
109
0
  if (identity != nullptr) {
110
0
    ThreadIdentity::SchedulerState* state = &identity->scheduler_state;
111
0
    // For a thread to be eligible for re-scheduling it must have a bound
112
0
    // schedulable (otherwise it's not cooperative) and not be within a
113
0
    // SchedulerGuard region.
114
0
    return state->bound_schedulable.load(std::memory_order_relaxed) !=
115
0
               nullptr &&
116
0
           state->scheduling_disabled_depth.load(std::memory_order_relaxed) ==
117
0
               0;
118
0
  } else {
119
0
    // Cooperative threads always have a ThreadIdentity.
120
0
    return false;
121
0
  }
122
0
}
123
124
// We don't use [[nodiscard]] here as some clients (e.g.
125
// FinishPotentiallyBlockingRegion()) cannot yet properly consume it.
126
678k
inline bool SchedulingGuard::DisableRescheduling() {
127
678k
  ThreadIdentity* identity;
128
678k
  identity = CurrentThreadIdentityIfPresent();
129
678k
  if (identity != nullptr) {
130
    // The depth is accessed concurrently from other threads, so it must be
131
    // atomic, but it's only mutated from this thread, so we don't need an
132
    // atomic increment.
133
678k
    int old_val = identity->scheduler_state.scheduling_disabled_depth.load(
134
678k
        std::memory_order_relaxed);
135
678k
    identity->scheduler_state.scheduling_disabled_depth.store(
136
678k
        old_val + 1, std::memory_order_relaxed);
137
678k
    return true;
138
678k
  } else {
139
4
    return false;
140
4
  }
141
678k
}
142
143
678k
inline void SchedulingGuard::EnableRescheduling(bool disable_result) {
144
678k
  if (!disable_result) {
145
    // There was no installed thread identity at the time that scheduling was
146
    // disabled, so we have nothing to do.  This is an implementation detail
147
    // that may change in the future, clients may not depend on it.
148
    // EnableRescheduling() must always be called.
149
1
    return;
150
1
  }
151
152
678k
  ThreadIdentity* identity;
153
  // A thread identity exists, see above
154
678k
  identity = CurrentThreadIdentityIfPresent();
155
  // The depth is accessed concurrently from other threads, so it must be
156
  // atomic, but it's only mutated from this thread, so we don't need an atomic
157
  // decrement.
158
678k
  int old_val = identity->scheduler_state.scheduling_disabled_depth.load(
159
678k
      std::memory_order_relaxed);
160
678k
  identity->scheduler_state.scheduling_disabled_depth.store(
161
678k
      old_val - 1, std::memory_order_relaxed);
162
678k
}
163
164
0
inline SchedulingGuard::ScopedEnable::ScopedEnable() {
165
0
  ThreadIdentity* identity;
166
0
  identity = CurrentThreadIdentityIfPresent();
167
0
  if (identity != nullptr) {
168
0
    scheduling_disabled_depth_ =
169
0
        identity->scheduler_state.scheduling_disabled_depth.load(
170
0
            std::memory_order_relaxed);
171
0
    if (scheduling_disabled_depth_ != 0) {
172
      // The store below does not need to be compare_exchange because
173
      // the value is never modified concurrently (only accessed).
174
0
      identity->scheduler_state.scheduling_disabled_depth.store(
175
0
          0, std::memory_order_relaxed);
176
0
    }
177
0
  } else {
178
0
    scheduling_disabled_depth_ = 0;
179
0
  }
180
0
}
181
182
0
inline SchedulingGuard::ScopedEnable::~ScopedEnable() {
183
0
  if (scheduling_disabled_depth_ == 0) {
184
0
    return;
185
0
  }
186
0
  ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
187
  // itentity is guaranteed to exist, see the constructor above.
188
0
  identity->scheduler_state.scheduling_disabled_depth.store(
189
0
      scheduling_disabled_depth_, std::memory_order_relaxed);
190
0
}
191
192
}  // namespace base_internal
193
ABSL_NAMESPACE_END
194
}  // namespace absl
195
196
#endif  // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_