Coverage Report

Created: 2026-04-01 06:29

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/abseil-cpp/absl/base/internal/low_level_scheduling.h
Line
Count
Source
1
// Copyright 2017 The Abseil Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//      https://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
//
15
// Core interfaces and definitions used by by low-level interfaces such as
16
// SpinLock.
17
18
#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
19
#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
20
21
#include "absl/base/internal/raw_logging.h"
22
#include "absl/base/internal/scheduling_mode.h"
23
#include "absl/base/internal/thread_identity.h"
24
#include "absl/base/macros.h"
25
26
// The following two declarations exist so SchedulingGuard may friend them with
27
// the appropriate language linkage.  These callbacks allow libc internals, such
28
// as function level statics, to schedule cooperatively when locking.
29
extern "C" bool __google_disable_rescheduling(void);
30
extern "C" void __google_enable_rescheduling(bool disable_result);
31
32
namespace absl {
33
ABSL_NAMESPACE_BEGIN
34
class CondVar;
35
class Mutex;
36
37
namespace synchronization_internal {
38
int MutexDelay(int32_t c, int mode);
39
}  // namespace synchronization_internal
40
41
namespace base_internal {
42
43
class SchedulingHelper;  // To allow use of SchedulingGuard.
44
class SpinLock;          // To allow use of SchedulingGuard.
45
46
// SchedulingGuard
47
// Provides guard semantics that may be used to disable cooperative rescheduling
48
// of the calling thread within specific program blocks.  This is used to
49
// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative
50
// scheduling depends on.
51
//
52
// Domain implementations capable of rescheduling in reaction to involuntary
53
// kernel thread actions (e.g blocking due to a pagefault or syscall) must
54
// guarantee that an annotated thread is not allowed to (cooperatively)
55
// reschedule until the annotated region is complete.
56
//
57
// It is an error to attempt to use a cooperatively scheduled resource (e.g.
58
// Mutex) within a rescheduling-disabled region.
59
//
60
// All methods are async-signal safe.
61
class SchedulingGuard {
62
 public:
63
  // Returns true iff the calling thread may be cooperatively rescheduled.
64
  static bool ReschedulingIsAllowed();
65
  SchedulingGuard(const SchedulingGuard&) = delete;
66
  SchedulingGuard& operator=(const SchedulingGuard&) = delete;
67
68
  // Disable cooperative rescheduling of the calling thread.  It may still
69
  // initiate scheduling operations (e.g. wake-ups), however, it may not itself
70
  // reschedule.  Nestable.  The returned result is opaque, clients should not
71
  // attempt to interpret it.
72
  // REQUIRES: Result must be passed to a pairing EnableScheduling().
73
  static bool DisableRescheduling();
74
75
  // Marks the end of a rescheduling disabled region, previously started by
76
  // DisableRescheduling().
77
  // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling().
78
  static void EnableRescheduling(bool disable_result);
79
80
  // A scoped helper for {Disable, Enable}Rescheduling().
81
  // REQUIRES: destructor must run in same thread as constructor.
82
  struct ScopedDisable {
83
0
    ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); }
84
0
    ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); }
85
86
    bool disabled;
87
  };
88
89
  // A scoped helper to enable rescheduling temporarily.
90
  // REQUIRES: destructor must run in same thread as constructor.
91
  class ScopedEnable {
92
   public:
93
    ScopedEnable();
94
    ~ScopedEnable();
95
96
   private:
97
    int scheduling_disabled_depth_;
98
  };
99
};
100
101
//------------------------------------------------------------------------------
102
// End of public interfaces.
103
//------------------------------------------------------------------------------
104
105
0
inline bool SchedulingGuard::ReschedulingIsAllowed() {
106
0
  ThreadIdentity* identity;
107
0
  identity = CurrentThreadIdentityIfPresent();
108
0
  if (identity != nullptr) {
109
0
    ThreadIdentity::SchedulerState* state;
110
0
    state = &identity->scheduler_state;
111
0
    // For a thread to be eligible for re-scheduling it must have a bound
112
0
    // schedulable (otherwise it's not cooperative) and not be within a
113
0
    // SchedulerGuard region.
114
0
    return state->get_bound_schedulable() != nullptr &&
115
0
           state->scheduling_disabled_depth.load(std::memory_order_relaxed) ==
116
0
               0;
117
0
  } else {
118
0
    // Cooperative threads always have a ThreadIdentity.
119
0
    return false;
120
0
  }
121
0
}
122
123
// We don't use [[nodiscard]] here as some clients (e.g.
124
// FinishPotentiallyBlockingRegion()) cannot yet properly consume it.
125
678k
inline bool SchedulingGuard::DisableRescheduling() {
126
678k
  ThreadIdentity* identity;
127
678k
  identity = CurrentThreadIdentityIfPresent();
128
678k
  if (identity != nullptr) {
129
    // The depth is accessed concurrently from other threads, so it must be
130
    // atomic, but it's only mutated from this thread, so we don't need an
131
    // atomic increment.
132
678k
    int old_val = identity->scheduler_state.scheduling_disabled_depth.load(
133
678k
        std::memory_order_relaxed);
134
678k
    identity->scheduler_state.scheduling_disabled_depth.store(
135
678k
        old_val + 1, std::memory_order_relaxed);
136
678k
    return true;
137
678k
  } else {
138
4
    return false;
139
4
  }
140
678k
}
141
142
678k
inline void SchedulingGuard::EnableRescheduling(bool disable_result) {
143
678k
  if (!disable_result) {
144
    // There was no installed thread identity at the time that scheduling was
145
    // disabled, so we have nothing to do.  This is an implementation detail
146
    // that may change in the future, clients may not depend on it.
147
    // EnableRescheduling() must always be called.
148
1
    return;
149
1
  }
150
151
678k
  ThreadIdentity* identity;
152
  // A thread identity exists, see above
153
678k
  identity = CurrentThreadIdentityIfPresent();
154
  // The depth is accessed concurrently from other threads, so it must be
155
  // atomic, but it's only mutated from this thread, so we don't need an atomic
156
  // decrement.
157
678k
  int old_val = identity->scheduler_state.scheduling_disabled_depth.load(
158
678k
      std::memory_order_relaxed);
159
678k
  identity->scheduler_state.scheduling_disabled_depth.store(
160
678k
      old_val - 1, std::memory_order_relaxed);
161
678k
}
162
163
0
inline SchedulingGuard::ScopedEnable::ScopedEnable() {
164
0
  ThreadIdentity* identity;
165
0
  identity = CurrentThreadIdentityIfPresent();
166
0
  if (identity != nullptr) {
167
0
    scheduling_disabled_depth_ =
168
0
        identity->scheduler_state.scheduling_disabled_depth.load(
169
0
            std::memory_order_relaxed);
170
0
    if (scheduling_disabled_depth_ != 0) {
171
      // The store below does not need to be compare_exchange because
172
      // the value is never modified concurrently (only accessed).
173
0
      identity->scheduler_state.scheduling_disabled_depth.store(
174
0
          0, std::memory_order_relaxed);
175
0
    }
176
0
  } else {
177
0
    scheduling_disabled_depth_ = 0;
178
0
  }
179
0
}
180
181
0
inline SchedulingGuard::ScopedEnable::~ScopedEnable() {
182
0
  if (scheduling_disabled_depth_ == 0) {
183
0
    return;
184
0
  }
185
0
  ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
186
  // itentity is guaranteed to exist, see the constructor above.
187
0
  identity->scheduler_state.scheduling_disabled_depth.store(
188
0
      scheduling_disabled_depth_, std::memory_order_relaxed);
189
0
}
190
191
}  // namespace base_internal
192
ABSL_NAMESPACE_END
193
}  // namespace absl
194
195
#endif  // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_