Coverage Report

Created: 2025-03-11 06:06

/src/brpc/src/bthread/mutex.cpp
Line
Count
Source (jump to first uncovered line)
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
// bthread - An M:N threading library to make applications more concurrent.
19
20
// Date: Sun Aug  3 12:46:15 CST 2014
21
22
#include <sys/cdefs.h>
23
#include <pthread.h>
24
#include <dlfcn.h>                               // dlsym
25
#include <fcntl.h>                               // O_RDONLY
26
#include "butil/atomicops.h"
27
#include "bvar/bvar.h"
28
#include "bvar/collector.h"
29
#include "butil/macros.h"                         // BAIDU_CASSERT
30
#include "butil/containers/flat_map.h"
31
#include "butil/iobuf.h"
32
#include "butil/fd_guard.h"
33
#include "butil/files/file.h"
34
#include "butil/files/file_path.h"
35
#include "butil/file_util.h"
36
#include "butil/unique_ptr.h"
37
#include "butil/memory/scope_guard.h"
38
#include "butil/third_party/murmurhash3/murmurhash3.h"
39
#include "butil/third_party/symbolize/symbolize.h"
40
#include "butil/logging.h"
41
#include "butil/object_pool.h"
42
#include "butil/debug/stack_trace.h"
43
#include "butil/thread_local.h"
44
#include "bthread/butex.h"                       // butex_*
45
#include "bthread/mutex.h"                       // bthread_mutex_t
46
#include "bthread/sys_futex.h"
47
#include "bthread/log.h"
48
#include "bthread/processor.h"
49
#include "bthread/task_group.h"
50
51
__BEGIN_DECLS
52
extern void* BAIDU_WEAK _dl_sym(void* handle, const char* symbol, void* caller);
53
__END_DECLS
54
55
namespace bthread {
56
57
EXTERN_BAIDU_VOLATILE_THREAD_LOCAL(TaskGroup*, tls_task_group);
58
59
// Warm up backtrace before main().
60
const butil::debug::StackTrace ALLOW_UNUSED dummy_bt;
61
62
// For controlling contentions collected per second.
63
bvar::CollectorSpeedLimit g_cp_sl = BVAR_COLLECTOR_SPEED_LIMIT_INITIALIZER;
64
65
const size_t MAX_CACHED_CONTENTIONS = 512;
66
// Skip frames which are always same: the unlock function and submit_contention()
67
const int SKIPPED_STACK_FRAMES = 2;
68
69
struct SampledContention : public bvar::Collected {
70
    // time taken by lock and unlock, normalized according to sampling_range
71
    int64_t duration_ns;
72
    // number of samples, normalized according to to sampling_range
73
    double count;
74
    void* stack[26];      // backtrace.
75
    int nframes;          // #elements in stack
76
77
    // Implement bvar::Collected
78
    void dump_and_destroy(size_t round) override;
79
    void destroy() override;
80
0
    bvar::CollectorSpeedLimit* speed_limit() override { return &g_cp_sl; }
81
82
0
    size_t hash_code() const {
83
0
        if (nframes == 0) {
84
0
            return 0;
85
0
        }
86
0
        if (_hash_code == 0) {
87
0
            _hash_code = 1;
88
0
            uint32_t seed = nframes;
89
0
            butil::MurmurHash3_x86_32(stack, sizeof(void*) * nframes, seed, &_hash_code);
90
0
        }
91
0
        return _hash_code;
92
0
    }
93
private:
94
friend butil::ObjectPool<SampledContention>;
95
    SampledContention()
96
0
        : duration_ns(0), count(0), stack{NULL}, nframes(0), _hash_code(0) {}
97
    ~SampledContention() override = default;
98
99
    mutable uint32_t _hash_code; // For combining samples with hashmap.
100
};
101
102
BAIDU_CASSERT(sizeof(SampledContention) == 256, be_friendly_to_allocator);
103
104
// Functor to compare contentions.
105
struct ContentionEqual {
106
    bool operator()(const SampledContention* c1,
107
0
                    const SampledContention* c2) const {
108
0
        return c1->hash_code() == c2->hash_code() &&
109
0
            c1->nframes == c2->nframes &&
110
0
            memcmp(c1->stack, c2->stack, sizeof(void*) * c1->nframes) == 0;
111
0
    }
112
};
113
114
// Functor to hash contentions.
115
struct ContentionHash {
116
0
    size_t operator()(const SampledContention* c) const {
117
0
        return c->hash_code();
118
0
    }
119
};
120
121
// The global context for contention profiler.
122
class ContentionProfiler {
123
public:
124
    typedef butil::FlatMap<SampledContention*, SampledContention*,
125
                          ContentionHash, ContentionEqual> ContentionMap;
126
127
    explicit ContentionProfiler(const char* name);
128
    ~ContentionProfiler();
129
    
130
    void dump_and_destroy(SampledContention* c);
131
132
    // Write buffered data into resulting file. If `ending' is true, append
133
    // content of /proc/self/maps and retry writing until buffer is empty.
134
    void flush_to_disk(bool ending);
135
136
    void init_if_needed();
137
private:
138
    bool _init;  // false before first dump_and_destroy is called
139
    bool _first_write;      // true if buffer was not written to file yet.
140
    std::string _filename;  // the file storing profiling result.
141
    butil::IOBuf _disk_buf;  // temp buf before saving the file.
142
    ContentionMap _dedup_map; // combining same samples to make result smaller.
143
};
144
145
ContentionProfiler::ContentionProfiler(const char* name)
146
0
    : _init(false)
147
0
    , _first_write(true)
148
0
    , _filename(name) {
149
0
}
150
151
0
ContentionProfiler::~ContentionProfiler() {
152
0
    if (!_init) {
153
        // Don't write file if dump_and_destroy was never called. We may create
154
        // such instances in ContentionProfilerStart.
155
0
        return;
156
0
    }
157
0
    flush_to_disk(true);
158
0
}
159
160
0
void ContentionProfiler::init_if_needed() {
161
0
    if (!_init) {
162
        // Already output nanoseconds, always set cycles/second to 1000000000.
163
0
        _disk_buf.append("--- contention\ncycles/second=1000000000\n");
164
0
        if (_dedup_map.init(1024, 60) != 0) {
165
0
            LOG(WARNING) << "Fail to initialize dedup_map";
166
0
        }
167
0
        _init = true;
168
0
    }
169
0
}
170
    
171
0
void ContentionProfiler::dump_and_destroy(SampledContention* c) {
172
0
    init_if_needed();
173
    // Categorize the contention.
174
0
    SampledContention** p_c2 = _dedup_map.seek(c);
175
0
    if (p_c2) {
176
        // Most contentions are caused by several hotspots, this should be
177
        // the common branch.
178
0
        SampledContention* c2 = *p_c2;
179
0
        c2->duration_ns += c->duration_ns;
180
0
        c2->count += c->count;
181
0
        c->destroy();
182
0
    } else {
183
0
        _dedup_map.insert(c, c);
184
0
    }
185
0
    if (_dedup_map.size() > MAX_CACHED_CONTENTIONS) {
186
0
        flush_to_disk(false);
187
0
    }
188
0
}
189
190
0
void ContentionProfiler::flush_to_disk(bool ending) {
191
0
    BT_VLOG << "flush_to_disk(ending=" << ending << ")";
192
    
193
    // Serialize contentions in _dedup_map into _disk_buf.
194
0
    if (!_dedup_map.empty()) {
195
0
        BT_VLOG << "dedup_map=" << _dedup_map.size();
196
0
        butil::IOBufBuilder os;
197
0
        for (ContentionMap::const_iterator
198
0
                 it = _dedup_map.begin(); it != _dedup_map.end(); ++it) {
199
0
            SampledContention* c = it->second;
200
0
            os << c->duration_ns << ' ' << (size_t)ceil(c->count) << " @";
201
0
            for (int i = SKIPPED_STACK_FRAMES; i < c->nframes; ++i) {
202
0
                os << ' ' << (void*)c->stack[i];
203
0
            }
204
0
            os << '\n';
205
0
            c->destroy();
206
0
        }
207
0
        _dedup_map.clear();
208
0
        _disk_buf.append(os.buf());
209
0
    }
210
211
    // Append /proc/self/maps to the end of the contention file, required by
212
    // pprof.pl, otherwise the functions in sys libs are not interpreted.
213
0
    if (ending) {
214
0
        BT_VLOG << "Append /proc/self/maps";
215
        // Failures are not critical, don't return directly.
216
0
        butil::IOPortal mem_maps;
217
0
        const butil::fd_guard fd(open("/proc/self/maps", O_RDONLY));
218
0
        if (fd >= 0) {
219
0
            while (true) {
220
0
                ssize_t nr = mem_maps.append_from_file_descriptor(fd, 8192);
221
0
                if (nr < 0) {
222
0
                    if (errno == EINTR) {
223
0
                        continue;
224
0
                    }
225
0
                    PLOG(ERROR) << "Fail to read /proc/self/maps";
226
0
                    break;
227
0
                }
228
0
                if (nr == 0) {
229
0
                    _disk_buf.append(mem_maps);
230
0
                    break;
231
0
                }
232
0
            }
233
0
        } else {
234
0
            PLOG(ERROR) << "Fail to open /proc/self/maps";
235
0
        }
236
0
    }
237
    // Write _disk_buf into _filename
238
0
    butil::File::Error error;
239
0
    butil::FilePath path(_filename);
240
0
    butil::FilePath dir = path.DirName();
241
0
    if (!butil::CreateDirectoryAndGetError(dir, &error)) {
242
0
        LOG(ERROR) << "Fail to create directory=`" << dir.value()
243
0
                   << "', " << error;
244
0
        return;
245
0
    }
246
    // Truncate on first write, append on later writes.
247
0
    int flag = O_APPEND;
248
0
    if (_first_write) {
249
0
        _first_write = false;
250
0
        flag = O_TRUNC;
251
0
    }
252
0
    butil::fd_guard fd(open(_filename.c_str(), O_WRONLY|O_CREAT|flag, 0666));
253
0
    if (fd < 0) {
254
0
        PLOG(ERROR) << "Fail to open " << _filename;
255
0
        return;
256
0
    }
257
    // Write once normally, write until empty in the end.
258
0
    do {
259
0
        ssize_t nw = _disk_buf.cut_into_file_descriptor(fd);
260
0
        if (nw < 0) {
261
0
            if (errno == EINTR) {
262
0
                continue;
263
0
            }
264
0
            PLOG(ERROR) << "Fail to write into " << _filename;
265
0
            return;
266
0
        }
267
0
        BT_VLOG << "Write " << nw << " bytes into " << _filename;
268
0
    } while (!_disk_buf.empty() && ending);
269
0
}
270
271
// If contention profiler is on, this variable will be set with a valid
272
// instance. NULL otherwise.
273
BAIDU_CACHELINE_ALIGNMENT ContentionProfiler* g_cp = NULL;
274
// Need this version to solve an issue that non-empty entries left by
275
// previous contention profilers should be detected and overwritten.
276
static uint64_t g_cp_version = 0;
277
// Protecting accesses to g_cp.
278
static pthread_mutex_t g_cp_mutex = PTHREAD_MUTEX_INITIALIZER;
279
280
// The map storing information for profiling pthread_mutex. Different from
281
// bthread_mutex, we can't save stuff into pthread_mutex, we neither can
282
// save the info in TLS reliably, since a mutex can be unlocked in a different
283
// thread from the one locked (although rare, undefined behavior)
284
// This map must be very fast, since it's accessed inside the lock.
285
// Layout of the map:
286
//  * Align each entry by cacheline so that different threads do not collide.
287
//  * Hash the mutex into the map by its address. If the entry is occupied,
288
//    cancel sampling.
289
// The canceling rate should be small provided that programs are unlikely to
290
// lock a lot of mutexes simultaneously.
291
const size_t MUTEX_MAP_SIZE = 1024;
292
BAIDU_CASSERT((MUTEX_MAP_SIZE & (MUTEX_MAP_SIZE - 1)) == 0, must_be_power_of_2);
293
struct BAIDU_CACHELINE_ALIGNMENT MutexMapEntry {
294
    butil::static_atomic<uint64_t> versioned_mutex;
295
    bthread_contention_site_t csite;
296
};
297
static MutexMapEntry g_mutex_map[MUTEX_MAP_SIZE] = {}; // zero-initialize
298
299
0
void SampledContention::dump_and_destroy(size_t /*round*/) {
300
0
    if (g_cp) {
301
        // Must be protected with mutex to avoid race with deletion of ctx.
302
        // dump_and_destroy is called from dumping thread only so this mutex
303
        // is not contended at most of time.
304
0
        BAIDU_SCOPED_LOCK(g_cp_mutex);
305
0
        if (g_cp) {
306
0
            g_cp->dump_and_destroy(this);
307
0
            return;
308
0
        }
309
0
    }
310
0
    destroy();
311
0
}
312
313
0
void SampledContention::destroy() {
314
0
    _hash_code = 0;
315
0
    butil::return_object(this);
316
0
}
317
318
// Remember the conflict hashes for troubleshooting, should be 0 at most of time.
319
static butil::static_atomic<int64_t> g_nconflicthash = BUTIL_STATIC_ATOMIC_INIT(0);
320
0
static int64_t get_nconflicthash(void*) {
321
0
    return g_nconflicthash.load(butil::memory_order_relaxed);
322
0
}
323
324
// Start profiling contention.
325
0
bool ContentionProfilerStart(const char* filename) {
326
0
    if (filename == NULL) {
327
0
        LOG(ERROR) << "Parameter [filename] is NULL";
328
0
        return false;
329
0
    }
330
    // g_cp is also the flag marking start/stop.
331
0
    if (g_cp) {
332
0
        return false;
333
0
    }
334
335
    // Create related global bvar lazily.
336
0
    static bvar::PassiveStatus<int64_t> g_nconflicthash_var
337
0
        ("contention_profiler_conflict_hash", get_nconflicthash, NULL);
338
0
    static bvar::DisplaySamplingRatio g_sampling_ratio_var(
339
0
        "contention_profiler_sampling_ratio", &g_cp_sl);
340
    
341
    // Optimistic locking. A not-used ContentionProfiler does not write file.
342
0
    std::unique_ptr<ContentionProfiler> ctx(new ContentionProfiler(filename));
343
0
    {
344
0
        BAIDU_SCOPED_LOCK(g_cp_mutex);
345
0
        if (g_cp) {
346
0
            return false;
347
0
        }
348
0
        g_cp = ctx.release();
349
0
        ++g_cp_version;  // invalidate non-empty entries that may exist.
350
0
    }
351
0
    return true;
352
0
}
353
354
// Stop contention profiler.
355
0
void ContentionProfilerStop() {
356
0
    ContentionProfiler* ctx = NULL;
357
0
    if (g_cp) {
358
0
        std::unique_lock<pthread_mutex_t> mu(g_cp_mutex);
359
0
        if (g_cp) {
360
0
            ctx = g_cp;
361
0
            g_cp = NULL;
362
0
            mu.unlock();
363
364
            // make sure it's initialiazed in case no sample was gathered,
365
            // otherwise nothing will be written and succeeding pprof will fail.
366
0
            ctx->init_if_needed();
367
            // Deletion is safe because usages of g_cp are inside g_cp_mutex.
368
0
            delete ctx;
369
0
            return;
370
0
        }
371
0
    }
372
0
    LOG(ERROR) << "Contention profiler is not started!";
373
0
}
374
375
0
bool is_contention_site_valid(const bthread_contention_site_t& cs) {
376
0
    return bvar::is_sampling_range_valid(cs.sampling_range);
377
0
}
378
379
0
void make_contention_site_invalid(bthread_contention_site_t* cs) {
380
0
    cs->sampling_range = 0;
381
0
}
382
383
#ifndef NO_PTHREAD_MUTEX_HOOK
384
// Replace pthread_mutex_lock and pthread_mutex_unlock:
385
// First call to sys_pthread_mutex_lock sets sys_pthread_mutex_lock to the
386
// real function so that next calls go to the real function directly. This
387
// technique avoids calling pthread_once each time.
388
typedef int (*MutexInitOp)(pthread_mutex_t*, const pthread_mutexattr_t*);
389
typedef int (*MutexOp)(pthread_mutex_t*);
390
int first_sys_pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* mutexattr);
391
int first_sys_pthread_mutex_destroy(pthread_mutex_t* mutex);
392
int first_sys_pthread_mutex_lock(pthread_mutex_t* mutex);
393
int first_sys_pthread_mutex_trylock(pthread_mutex_t* mutex);
394
int first_sys_pthread_mutex_unlock(pthread_mutex_t* mutex);
395
static MutexInitOp sys_pthread_mutex_init = first_sys_pthread_mutex_init;
396
static MutexOp sys_pthread_mutex_destroy = first_sys_pthread_mutex_destroy;
397
static MutexOp sys_pthread_mutex_lock = first_sys_pthread_mutex_lock;
398
static MutexOp sys_pthread_mutex_trylock = first_sys_pthread_mutex_trylock;
399
static MutexOp sys_pthread_mutex_unlock = first_sys_pthread_mutex_unlock;
400
#if HAS_PTHREAD_MUTEX_TIMEDLOCK
401
typedef int (*TimedMutexOp)(pthread_mutex_t*, const struct timespec*);
402
int first_sys_pthread_mutex_timedlock(pthread_mutex_t* mutex,
403
                                      const struct timespec* __abstime);
404
static TimedMutexOp sys_pthread_mutex_timedlock = first_sys_pthread_mutex_timedlock;
405
#endif // HAS_PTHREAD_MUTEX_TIMEDLOCK
406
407
static pthread_once_t init_sys_mutex_lock_once = PTHREAD_ONCE_INIT;
408
409
// dlsym may call malloc to allocate space for dlerror and causes contention
410
// profiler to deadlock at boostraping when the program is linked with
411
// libunwind. The deadlock bt:
412
//   #0  0x00007effddc99b80 in __nanosleep_nocancel () at ../sysdeps/unix/syscall-template.S:81
413
//   #1  0x00000000004b4df7 in butil::internal::SpinLockDelay(int volatile*, int, int) ()
414
//   #2  0x00000000004b4d57 in SpinLock::SlowLock() ()
415
//   #3  0x00000000004b4a63 in tcmalloc::ThreadCache::InitModule() ()
416
//   #4  0x00000000004aa2b5 in tcmalloc::ThreadCache::GetCache() ()
417
//   #5  0x000000000040c6c5 in (anonymous namespace)::do_malloc_no_errno(unsigned long) [clone.part.16] ()
418
//   #6  0x00000000006fc125 in tc_calloc ()
419
//   #7  0x00007effdd245690 in _dlerror_run (operate=operate@entry=0x7effdd245130 <dlsym_doit>, args=args@entry=0x7fff483dedf0) at dlerror.c:141
420
//   #8  0x00007effdd245198 in __dlsym (handle=<optimized out>, name=<optimized out>) at dlsym.c:70
421
//   #9  0x0000000000666517 in bthread::init_sys_mutex_lock () at bthread/mutex.cpp:358
422
//   #10 0x00007effddc97a90 in pthread_once () at ../nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S:103
423
//   #11 0x000000000066649f in bthread::first_sys_pthread_mutex_lock (mutex=0xbaf880 <_ULx86_64_lock>) at bthread/mutex.cpp:366
424
//   #12 0x00000000006678bc in pthread_mutex_lock_impl (mutex=0xbaf880 <_ULx86_64_lock>) at bthread/mutex.cpp:489
425
//   #13 pthread_mutex_lock (__mutex=__mutex@entry=0xbaf880 <_ULx86_64_lock>) at bthread/mutex.cpp:751
426
//   #14 0x00000000004c6ea1 in _ULx86_64_init () at x86_64/Gglobal.c:83
427
//   #15 0x00000000004c44fb in _ULx86_64_init_local (cursor=0x7fff483df340, uc=0x7fff483def90) at x86_64/Ginit_local.c:47
428
//   #16 0x00000000004b5012 in GetStackTrace(void**, int, int) ()
429
//   #17 0x00000000004b2095 in tcmalloc::PageHeap::GrowHeap(unsigned long) ()
430
//   #18 0x00000000004b23a3 in tcmalloc::PageHeap::New(unsigned long) ()
431
//   #19 0x00000000004ad457 in tcmalloc::CentralFreeList::Populate() ()
432
//   #20 0x00000000004ad628 in tcmalloc::CentralFreeList::FetchFromSpansSafe() ()
433
//   #21 0x00000000004ad6a3 in tcmalloc::CentralFreeList::RemoveRange(void**, void**, int) ()
434
//   #22 0x00000000004b3ed3 in tcmalloc::ThreadCache::FetchFromCentralCache(unsigned long, unsigned long) ()
435
//   #23 0x00000000006fbb9a in tc_malloc ()
436
// Call _dl_sym which is a private function in glibc to workaround the malloc
437
// causing deadlock temporarily. This fix is hardly portable.
438
439
20
static void init_sys_mutex_lock() {
440
// When bRPC library is linked as a shared library, need to make sure bRPC
441
// shared library is loaded before the pthread shared library. Otherwise,
442
// it may cause runtime error: undefined symbol: pthread_mutex_xxx.
443
// Alternatively, static linking can also avoid this problem.
444
20
#if defined(OS_LINUX)
445
    // TODO: may need dlvsym when GLIBC has multiple versions of a same symbol.
446
    // http://blog.fesnel.com/blog/2009/08/25/preloading-with-multiple-symbol-versions
447
20
    if (_dl_sym) {
448
20
        sys_pthread_mutex_init = (MutexInitOp)_dl_sym(
449
20
            RTLD_NEXT, "pthread_mutex_init", (void*)init_sys_mutex_lock);
450
20
        sys_pthread_mutex_destroy = (MutexOp)_dl_sym(
451
20
            RTLD_NEXT, "pthread_mutex_destroy", (void*)init_sys_mutex_lock);
452
20
        sys_pthread_mutex_lock = (MutexOp)_dl_sym(
453
20
            RTLD_NEXT, "pthread_mutex_lock", (void*)init_sys_mutex_lock);
454
20
        sys_pthread_mutex_unlock = (MutexOp)_dl_sym(
455
20
            RTLD_NEXT, "pthread_mutex_unlock", (void*)init_sys_mutex_lock);
456
20
        sys_pthread_mutex_trylock = (MutexOp)_dl_sym(
457
20
            RTLD_NEXT, "pthread_mutex_trylock", (void*)init_sys_mutex_lock);
458
20
#if HAS_PTHREAD_MUTEX_TIMEDLOCK
459
20
        sys_pthread_mutex_timedlock = (TimedMutexOp)_dl_sym(
460
20
            RTLD_NEXT, "pthread_mutex_timedlock", (void*)init_sys_mutex_lock);
461
20
#endif // HAS_PTHREAD_MUTEX_TIMEDLOCK
462
20
    } else {
463
        // _dl_sym may be undefined reference in some system, fallback to dlsym
464
0
        sys_pthread_mutex_init = (MutexInitOp)dlsym(RTLD_NEXT, "pthread_mutex_init");
465
0
        sys_pthread_mutex_destroy = (MutexOp)dlsym(RTLD_NEXT, "pthread_mutex_destroy");
466
0
        sys_pthread_mutex_lock = (MutexOp)dlsym(RTLD_NEXT, "pthread_mutex_lock");
467
0
        sys_pthread_mutex_unlock = (MutexOp)dlsym(RTLD_NEXT, "pthread_mutex_unlock");
468
0
        sys_pthread_mutex_trylock = (MutexOp)dlsym(RTLD_NEXT, "pthread_mutex_trylock");
469
0
#if HAS_PTHREAD_MUTEX_TIMEDLOCK
470
0
        sys_pthread_mutex_timedlock = (TimedMutexOp)dlsym(RTLD_NEXT, "pthread_mutex_timedlock");
471
0
#endif // HAS_PTHREAD_MUTEX_TIMEDLOCK
472
0
    }
473
#elif defined(OS_MACOSX)
474
    // TODO: look workaround for dlsym on mac
475
    sys_pthread_mutex_init = (MutexInitOp)dlsym(RTLD_NEXT, "pthread_mutex_init");
476
    sys_pthread_mutex_destroy = (MutexOp)dlsym(RTLD_NEXT, "pthread_mutex_destroy");
477
    sys_pthread_mutex_lock = (MutexOp)dlsym(RTLD_NEXT, "pthread_mutex_lock");
478
    sys_pthread_mutex_trylock = (MutexOp)dlsym(RTLD_NEXT, "pthread_mutex_trylock");
479
    sys_pthread_mutex_unlock = (MutexOp)dlsym(RTLD_NEXT, "pthread_mutex_unlock");
480
#endif
481
20
}
482
483
// Make sure pthread functions are ready before main().
484
const int ALLOW_UNUSED dummy = pthread_once(&init_sys_mutex_lock_once, init_sys_mutex_lock);
485
486
0
int first_sys_pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* mutexattr) {
487
0
    pthread_once(&init_sys_mutex_lock_once, init_sys_mutex_lock);
488
0
    return sys_pthread_mutex_init(mutex, mutexattr);
489
0
}
490
491
0
int first_sys_pthread_mutex_destroy(pthread_mutex_t* mutex) {
492
0
    pthread_once(&init_sys_mutex_lock_once, init_sys_mutex_lock);
493
0
    return sys_pthread_mutex_destroy(mutex);
494
0
}
495
496
20
int first_sys_pthread_mutex_lock(pthread_mutex_t* mutex) {
497
20
    pthread_once(&init_sys_mutex_lock_once, init_sys_mutex_lock);
498
20
    return sys_pthread_mutex_lock(mutex);
499
20
}
500
501
0
int first_sys_pthread_mutex_trylock(pthread_mutex_t* mutex) {
502
0
    pthread_once(&init_sys_mutex_lock_once, init_sys_mutex_lock);
503
0
    return sys_pthread_mutex_trylock(mutex);
504
0
}
505
506
#if HAS_PTHREAD_MUTEX_TIMEDLOCK
507
int first_sys_pthread_mutex_timedlock(pthread_mutex_t* mutex,
508
0
                                      const struct timespec* abstime) {
509
0
    pthread_once(&init_sys_mutex_lock_once, init_sys_mutex_lock);
510
0
    return sys_pthread_mutex_timedlock(mutex, abstime);
511
0
}
512
#endif // HAS_PTHREAD_MUTEX_TIMEDLOCK
513
514
0
int first_sys_pthread_mutex_unlock(pthread_mutex_t* mutex) {
515
0
    pthread_once(&init_sys_mutex_lock_once, init_sys_mutex_lock);
516
0
    return sys_pthread_mutex_unlock(mutex);
517
0
}
518
#endif
519
520
template <typename Mutex>
521
0
inline uint64_t hash_mutex_ptr(const Mutex* m) {
522
0
    return butil::fmix64((uint64_t)m);
523
0
}
Unexecuted instantiation: unsigned long bthread::hash_mutex_ptr<pthread_mutex_t>(pthread_mutex_t const*)
Unexecuted instantiation: unsigned long bthread::hash_mutex_ptr<bthread::internal::FastPthreadMutex>(bthread::internal::FastPthreadMutex const*)
524
525
// Mark being inside locking so that pthread_mutex calls inside collecting
526
// code are never sampled, otherwise deadlock may occur.
527
static __thread bool tls_inside_lock = false;
528
529
// Warn up some singleton objects used in contention profiler
530
// to avoid deadlock in malloc call stack.
531
static __thread bool tls_warn_up = false;
532
533
#if BRPC_DEBUG_BTHREAD_SCHE_SAFETY
534
// ++tls_pthread_lock_count when pthread locking,
535
// --tls_pthread_lock_count when pthread unlocking.
536
// Only when it is equal to 0, it is safe for the bthread to be scheduled.
537
// Note: If a mutex is locked/unlocked in different thread,
538
// `tls_pthread_lock_count' is inaccurate, so this feature cannot be used.
539
static __thread int tls_pthread_lock_count = 0;
540
541
#define ADD_TLS_PTHREAD_LOCK_COUNT ++tls_pthread_lock_count
542
#define SUB_TLS_PTHREAD_LOCK_COUNT --tls_pthread_lock_count
543
544
void CheckBthreadScheSafety() {
545
    if (BAIDU_LIKELY(0 == tls_pthread_lock_count)) {
546
        return;
547
    }
548
549
    // It can only be checked once because the counter is messed up.
550
    LOG_BACKTRACE_ONCE(ERROR) << "bthread is suspended while holding "
551
                              << tls_pthread_lock_count << " pthread locks.";
552
}
553
#else
554
32.5k
#define ADD_TLS_PTHREAD_LOCK_COUNT ((void)0)
555
32.5k
#define SUB_TLS_PTHREAD_LOCK_COUNT ((void)0)
556
0
void CheckBthreadScheSafety() {}
557
#endif // BRPC_DEBUG_BTHREAD_SCHE_SAFETY
558
559
// Speed up with TLS:
560
//   Most pthread_mutex are locked and unlocked in the same thread. Putting
561
//   contention information in TLS avoids collisions that may occur in
562
//   g_mutex_map. However when user unlocks in another thread, the info cached
563
//   in the locking thread is not removed, making the space bloated. We use a
564
//   simple strategy to solve the issue: If a thread has enough thread-local
565
//   space to store the info, save it, otherwise save it in g_mutex_map. For
566
//   a program that locks and unlocks in the same thread and does not lock a
567
//   lot of mutexes simulateneously, this strategy always uses the TLS.
568
#ifndef DONT_SPEEDUP_PTHREAD_CONTENTION_PROFILER_WITH_TLS
569
const int TLS_MAX_COUNT = 3;
570
struct MutexAndContentionSite {
571
    void* mutex;
572
    bthread_contention_site_t csite;
573
};
574
struct TLSPthreadContentionSites {
575
    int count;
576
    uint64_t cp_version;
577
    MutexAndContentionSite list[TLS_MAX_COUNT];
578
};
579
static __thread TLSPthreadContentionSites tls_csites = {0,0,{}};
580
#endif  // DONT_SPEEDUP_PTHREAD_CONTENTION_PROFILER_WITH_TLS
581
582
// Guaranteed in linux/win.
583
const int PTR_BITS = 48;
584
585
template <typename Mutex>
586
inline bthread_contention_site_t*
587
0
add_pthread_contention_site(const Mutex* mutex) {
588
0
    MutexMapEntry& entry = g_mutex_map[hash_mutex_ptr(mutex) & (MUTEX_MAP_SIZE - 1)];
589
0
    butil::static_atomic<uint64_t>& m = entry.versioned_mutex;
590
0
    uint64_t expected = m.load(butil::memory_order_relaxed);
591
    // If the entry is not used or used by previous profiler, try to CAS it.
592
0
    if (expected == 0 ||
593
0
        (expected >> PTR_BITS) != (g_cp_version & ((1 << (64 - PTR_BITS)) - 1))) {
594
0
        uint64_t desired = (g_cp_version << PTR_BITS) | (uint64_t)mutex;
595
0
        if (m.compare_exchange_strong(
596
0
                expected, desired, butil::memory_order_acquire)) {
597
0
            return &entry.csite;
598
0
        }
599
0
    }
600
0
    g_nconflicthash.fetch_add(1, butil::memory_order_relaxed);
601
0
    return NULL;
602
0
}
Unexecuted instantiation: bthread_contention_site_t* bthread::add_pthread_contention_site<pthread_mutex_t>(pthread_mutex_t const*)
Unexecuted instantiation: bthread_contention_site_t* bthread::add_pthread_contention_site<bthread::internal::FastPthreadMutex>(bthread::internal::FastPthreadMutex const*)
603
604
template <typename Mutex>
605
inline bool remove_pthread_contention_site(const Mutex* mutex,
606
0
                                           bthread_contention_site_t* saved_csite) {
607
0
    MutexMapEntry& entry = g_mutex_map[hash_mutex_ptr(mutex) & (MUTEX_MAP_SIZE - 1)];
608
0
    butil::static_atomic<uint64_t>& m = entry.versioned_mutex;
609
0
    if ((m.load(butil::memory_order_relaxed) & ((((uint64_t)1) << PTR_BITS) - 1))
610
0
        != (uint64_t)mutex) {
611
        // This branch should be the most common case since most locks are
612
        // neither contended nor sampled. We have one memory indirection and
613
        // several bitwise operations here, the cost should be ~ 5-50ns
614
0
        return false;
615
0
    }
616
    // Although this branch is inside a contended lock, we should also make it
617
    // as simple as possible because altering the critical section too much
618
    // may make unpredictable impact to thread interleaving status, which
619
    // makes profiling result less accurate.
620
0
    *saved_csite = entry.csite;
621
0
    make_contention_site_invalid(&entry.csite);
622
0
    m.store(0, butil::memory_order_release);
623
0
    return true;
624
0
}
Unexecuted instantiation: bool bthread::remove_pthread_contention_site<pthread_mutex_t>(pthread_mutex_t const*, bthread_contention_site_t*)
Unexecuted instantiation: bool bthread::remove_pthread_contention_site<bthread::internal::FastPthreadMutex>(bthread::internal::FastPthreadMutex const*, bthread_contention_site_t*)
625
626
// Submit the contention along with the callsite('s stacktrace)
627
0
void submit_contention(const bthread_contention_site_t& csite, int64_t now_ns) {
628
0
    tls_inside_lock = true;
629
0
    BRPC_SCOPE_EXIT {
630
0
        tls_inside_lock = false;
631
0
    };
632
633
0
    butil::debug::StackTrace stack(true); // May lock.
634
0
    if (0 == stack.FrameCount()) {
635
0
        return;
636
0
    }
637
    // There are two situations where we need to check whether in the
638
    // malloc call stack:
639
    // 1. Warn up some singleton objects used in `submit_contention'
640
    // to avoid deadlock in malloc call stack.
641
    // 2. LocalPool is empty, GlobalPool may allocate memory by malloc.
642
0
    if (!tls_warn_up || butil::local_pool_free_empty<SampledContention>()) {
643
        // In malloc call stack, can not submit contention.
644
0
        if (stack.FindSymbol((void*)malloc)) {
645
0
            return;
646
0
        }
647
0
    }
648
649
0
    auto sc = butil::get_object<SampledContention>();
650
    // Normalize duration_us and count so that they're addable in later
651
    // processings. Notice that sampling_range is adjusted periodically by
652
    // collecting thread.
653
0
    sc->duration_ns = csite.duration_ns * bvar::COLLECTOR_SAMPLING_BASE
654
0
        / csite.sampling_range;
655
0
    sc->count = bvar::COLLECTOR_SAMPLING_BASE / (double)csite.sampling_range;
656
0
    sc->nframes = stack.CopyAddressTo(sc->stack, arraysize(sc->stack));
657
0
    sc->submit(now_ns / 1000);  // may lock
658
    // Once submit a contention, complete warn up.
659
0
    tls_warn_up = true;
660
0
}
661
662
#if BRPC_DEBUG_LOCK
663
#define MUTEX_RESET_OWNER_COMMON(owner)                                              \
664
    ((butil::atomic<bool>*)&(owner).hold)                                            \
665
        ->store(false, butil::memory_order_relaxed)
666
667
#define PTHREAD_MUTEX_SET_OWNER(owner)                                               \
668
    owner.id = pthread_numeric_id();                                                 \
669
    ((butil::atomic<bool>*)&(owner).hold)                                            \
670
        ->store(true, butil::memory_order_release)
671
672
// Check if the mutex has been locked by the current thread.
673
// Double lock on the same thread will cause deadlock.
674
#define PTHREAD_MUTEX_CHECK_OWNER(owner)                                             \
675
    bool hold = ((butil::atomic<bool>*)&(owner).hold)                                \
676
        ->load(butil::memory_order_acquire);                                         \
677
    if (hold && (owner).id == pthread_numeric_id()) {                                \
678
        butil::debug::StackTrace trace(true);                                        \
679
        LOG(ERROR) << "Detected deadlock caused by double lock of FastPthreadMutex:" \
680
                   << std::endl << trace.ToString();                                 \
681
    }
682
#else
683
0
#define MUTEX_RESET_OWNER_COMMON(owner) ((void)owner)
684
0
#define PTHREAD_MUTEX_SET_OWNER(owner) ((void)owner)
685
0
#define PTHREAD_MUTEX_CHECK_OWNER(owner) ((void)owner)
686
#endif // BRPC_DEBUG_LOCK
687
688
namespace internal {
689
690
#ifndef NO_PTHREAD_MUTEX_HOOK
691
692
#if BRPC_DEBUG_LOCK
693
struct BAIDU_CACHELINE_ALIGNMENT MutexOwnerMapEntry {
694
    butil::static_atomic<bool> valid;
695
    pthread_mutex_t* mutex;
696
    mutex_owner_t owner;
697
};
698
699
// The map storing owner information for pthread_mutex. Different from
700
// bthread_mutex, we can't save stuff into pthread_mutex, we neither can
701
// save the info in TLS reliably, since a mutex can be unlocked in a different
702
// thread from the one locked (although rare).
703
static MutexOwnerMapEntry g_mutex_owner_map[MUTEX_MAP_SIZE] = {}; // zero-initialize
704
705
static void InitMutexOwnerMapEntry(pthread_mutex_t* mutex,
706
                                   const pthread_mutexattr_t* mutexattr) {
707
    int type = PTHREAD_MUTEX_DEFAULT;
708
    if (NULL != mutexattr) {
709
        pthread_mutexattr_gettype(mutexattr, &type);
710
    }
711
    // Only normal mutexes are tracked.
712
    if (type != PTHREAD_MUTEX_NORMAL) {
713
        return;
714
    }
715
716
    // Fast path: If the hash entry is not used, use it.
717
    MutexOwnerMapEntry& hash_entry =
718
        g_mutex_owner_map[hash_mutex_ptr(mutex) & (MUTEX_MAP_SIZE - 1)];
719
    if (!hash_entry.valid.exchange(true, butil::memory_order_relaxed)) {
720
        MUTEX_RESET_OWNER_COMMON(hash_entry.owner);
721
        return;
722
    }
723
724
    // Slow path: Find an unused entry.
725
    for (auto& entry : g_mutex_owner_map) {
726
        if (!entry.valid.exchange(true, butil::memory_order_relaxed)) {
727
            MUTEX_RESET_OWNER_COMMON(entry.owner);
728
            return;
729
        }
730
    }
731
}
732
733
static BUTIL_FORCE_INLINE
734
MutexOwnerMapEntry* FindMutexOwnerMapEntry(pthread_mutex_t* mutex) {
735
    if (NULL == mutex) {
736
        return NULL;
737
    }
738
739
    // Fast path.
740
    MutexOwnerMapEntry* hash_entry =
741
        &g_mutex_owner_map[hash_mutex_ptr(mutex) & (MUTEX_MAP_SIZE - 1)];
742
    if (hash_entry->valid.load(butil::memory_order_relaxed) && hash_entry->mutex == mutex) {
743
        return hash_entry;
744
    }
745
    // Slow path.
746
    for (auto& entry : g_mutex_owner_map) {
747
        if (entry.valid.load(butil::memory_order_relaxed) && entry.mutex == mutex) {
748
            return &entry;
749
        }
750
    }
751
    return NULL;
752
}
753
754
static void DestroyMutexOwnerMapEntry(pthread_mutex_t* mutex) {
755
    MutexOwnerMapEntry* entry = FindMutexOwnerMapEntry(mutex);
756
    if (NULL != entry) {
757
        entry->valid.store(false, butil::memory_order_relaxed);
758
    }
759
}
760
761
#define INIT_MUTEX_OWNER_MAP_ENTRY(mutex, mutexattr) \
762
    ::bthread::internal::InitMutexOwnerMapEntry(mutex, mutexattr)
763
764
#define DESTROY_MUTEX_OWNER_MAP_ENTRY(mutex) \
765
    ::bthread::internal::DestroyMutexOwnerMapEntry(mutex)
766
767
#define FIND_SYS_PTHREAD_MUTEX_OWNER_MAP_ENTRY(mutex) \
768
    MutexOwnerMapEntry* entry = ::bthread::internal::FindMutexOwnerMapEntry(mutex)
769
770
#define SYS_PTHREAD_MUTEX_CHECK_OWNER              \
771
    if (NULL != entry) {                           \
772
        PTHREAD_MUTEX_CHECK_OWNER(entry->owner);   \
773
    }
774
775
#define SYS_PTHREAD_MUTEX_SET_OWNER                \
776
    if (NULL != entry) {                           \
777
        PTHREAD_MUTEX_SET_OWNER(entry->owner);     \
778
    }
779
780
#define SYS_PTHREAD_MUTEX_RESET_OWNER(mutex)       \
781
    FIND_SYS_PTHREAD_MUTEX_OWNER_MAP_ENTRY(mutex); \
782
    if (NULL != entry) {                           \
783
        MUTEX_RESET_OWNER_COMMON(entry->owner);           \
784
    }
785
786
#else
787
11.6k
#define INIT_MUTEX_OWNER_MAP_ENTRY(mutex, mutexattr) ((void)0)
788
9.49k
#define DESTROY_MUTEX_OWNER_MAP_ENTRY(mutex) ((void)0)
789
32.5k
#define FIND_SYS_PTHREAD_MUTEX_OWNER_MAP_ENTRY(mutex) ((void)0)
790
32.5k
#define SYS_PTHREAD_MUTEX_CHECK_OWNER ((void)0)
791
32.5k
#define SYS_PTHREAD_MUTEX_SET_OWNER ((void)0)
792
32.5k
#define SYS_PTHREAD_MUTEX_RESET_OWNER(mutex) ((void)0)
793
#endif // BRPC_DEBUG_LOCK
794
795
796
#if HAS_PTHREAD_MUTEX_TIMEDLOCK
797
BUTIL_FORCE_INLINE int pthread_mutex_lock_internal(pthread_mutex_t* mutex,
798
32.5k
                                                   const struct timespec* abstime) {
799
32.5k
    int rc = 0;
800
32.5k
    if (NULL == abstime) {
801
32.5k
        FIND_SYS_PTHREAD_MUTEX_OWNER_MAP_ENTRY(mutex);
802
32.5k
        SYS_PTHREAD_MUTEX_CHECK_OWNER;
803
32.5k
        rc = sys_pthread_mutex_lock(mutex);
804
32.5k
        if (0 == rc) {
805
32.5k
            SYS_PTHREAD_MUTEX_SET_OWNER;
806
32.5k
        }
807
32.5k
    } else {
808
0
        rc = sys_pthread_mutex_timedlock(mutex, abstime);
809
0
        if (0 == rc) {
810
0
            FIND_SYS_PTHREAD_MUTEX_OWNER_MAP_ENTRY(mutex);
811
0
            SYS_PTHREAD_MUTEX_SET_OWNER;
812
0
        }
813
0
    }
814
32.5k
    if (0 == rc) {
815
32.5k
        ADD_TLS_PTHREAD_LOCK_COUNT;
816
32.5k
    }
817
32.5k
    return rc;
818
32.5k
}
819
#else
820
BUTIL_FORCE_INLINE int pthread_mutex_lock_internal(pthread_mutex_t* mutex,
821
                                                   const struct timespec*/* Not supported */) {
822
    FIND_SYS_PTHREAD_MUTEX_OWNER_MAP_ENTRY(mutex);
823
    SYS_PTHREAD_MUTEX_CHECK_OWNER;
824
    int rc = sys_pthread_mutex_lock(mutex);
825
    if (0 == rc) {
826
        SYS_PTHREAD_MUTEX_SET_OWNER;
827
        ADD_TLS_PTHREAD_LOCK_COUNT;
828
    }
829
    return rc;
830
}
831
#endif // HAS_PTHREAD_MUTEX_TIMEDLOCK
832
833
0
BUTIL_FORCE_INLINE int pthread_mutex_trylock_internal(pthread_mutex_t* mutex) {
834
0
    int rc = sys_pthread_mutex_trylock(mutex);
835
0
    if (0 == rc) {
836
0
        FIND_SYS_PTHREAD_MUTEX_OWNER_MAP_ENTRY(mutex);
837
0
        SYS_PTHREAD_MUTEX_SET_OWNER;
838
0
        ADD_TLS_PTHREAD_LOCK_COUNT;
839
0
    }
840
0
    return rc;
841
0
}
842
843
32.5k
BUTIL_FORCE_INLINE int pthread_mutex_unlock_internal(pthread_mutex_t* mutex) {
844
32.5k
    SYS_PTHREAD_MUTEX_RESET_OWNER(mutex);
845
32.5k
    SUB_TLS_PTHREAD_LOCK_COUNT;
846
32.5k
    return sys_pthread_mutex_unlock(mutex);
847
32.5k
}
848
#endif // NO_PTHREAD_MUTEX_HOOK
849
850
BUTIL_FORCE_INLINE int pthread_mutex_lock_internal(FastPthreadMutex* mutex,
851
0
                                                   const struct timespec* abstime) {
852
0
    if (NULL == abstime) {
853
0
        mutex->lock();
854
0
        return 0;
855
0
    } else {
856
0
        return mutex->timed_lock(abstime) ? 0 : errno;
857
0
    }
858
0
}
859
860
0
BUTIL_FORCE_INLINE int pthread_mutex_trylock_internal(FastPthreadMutex* mutex) {
861
0
    return mutex->try_lock() ? 0 : EBUSY;
862
0
}
863
864
0
BUTIL_FORCE_INLINE int pthread_mutex_unlock_internal(FastPthreadMutex* mutex) {
865
0
    mutex->unlock();
866
0
    return 0;
867
0
}
868
869
template <typename Mutex>
870
32.5k
BUTIL_FORCE_INLINE int pthread_mutex_lock_impl(Mutex* mutex, const struct timespec* abstime) {
871
    // Don't change behavior of lock when profiler is off.
872
32.5k
    if (!g_cp ||
873
        // collecting code including backtrace() and submit() may call
874
        // pthread_mutex_lock and cause deadlock. Don't sample.
875
32.5k
        tls_inside_lock) {
876
32.5k
        return pthread_mutex_lock_internal(mutex, abstime);
877
32.5k
    }
878
    // Don't slow down non-contended locks.
879
0
    int rc = pthread_mutex_trylock_internal(mutex);
880
0
    if (rc != EBUSY) {
881
0
        return rc;
882
0
    }
883
    // Ask bvar::Collector if this (contended) locking should be sampled
884
0
    const size_t sampling_range = bvar::is_collectable(&g_cp_sl);
885
886
0
    bthread_contention_site_t* csite = NULL;
887
0
#ifndef DONT_SPEEDUP_PTHREAD_CONTENTION_PROFILER_WITH_TLS
888
0
    TLSPthreadContentionSites& fast_alt = tls_csites;
889
0
    if (fast_alt.cp_version != g_cp_version) {
890
0
        fast_alt.cp_version = g_cp_version;
891
0
        fast_alt.count = 0;
892
0
    }
893
0
    if (fast_alt.count < TLS_MAX_COUNT) {
894
0
        MutexAndContentionSite& entry = fast_alt.list[fast_alt.count++];
895
0
        entry.mutex = mutex;
896
0
        csite = &entry.csite;
897
0
        if (!bvar::is_sampling_range_valid(sampling_range)) {
898
0
            make_contention_site_invalid(&entry.csite);
899
0
            return pthread_mutex_lock_internal(mutex, abstime);
900
0
        }
901
0
    }
902
0
#endif
903
0
    if (!bvar::is_sampling_range_valid(sampling_range)) {  // don't sample
904
0
        return pthread_mutex_lock_internal(mutex, abstime);
905
0
    }
906
    // Lock and monitor the waiting time.
907
0
    const int64_t start_ns = butil::cpuwide_time_ns();
908
0
    rc = pthread_mutex_lock_internal(mutex, abstime);
909
0
    if (!rc) { // Inside lock
910
0
        if (!csite) {
911
0
            csite = add_pthread_contention_site(mutex);
912
0
            if (csite == NULL) {
913
0
                return rc;
914
0
            }
915
0
        }
916
0
        csite->duration_ns = butil::cpuwide_time_ns() - start_ns;
917
0
        csite->sampling_range = sampling_range;
918
0
    } // else rare
919
0
    return rc;
920
0
}
int bthread::internal::pthread_mutex_lock_impl<pthread_mutex_t>(pthread_mutex_t*, timespec const*)
Line
Count
Source
870
32.5k
BUTIL_FORCE_INLINE int pthread_mutex_lock_impl(Mutex* mutex, const struct timespec* abstime) {
871
    // Don't change behavior of lock when profiler is off.
872
32.5k
    if (!g_cp ||
873
        // collecting code including backtrace() and submit() may call
874
        // pthread_mutex_lock and cause deadlock. Don't sample.
875
32.5k
        tls_inside_lock) {
876
32.5k
        return pthread_mutex_lock_internal(mutex, abstime);
877
32.5k
    }
878
    // Don't slow down non-contended locks.
879
0
    int rc = pthread_mutex_trylock_internal(mutex);
880
0
    if (rc != EBUSY) {
881
0
        return rc;
882
0
    }
883
    // Ask bvar::Collector if this (contended) locking should be sampled
884
0
    const size_t sampling_range = bvar::is_collectable(&g_cp_sl);
885
886
0
    bthread_contention_site_t* csite = NULL;
887
0
#ifndef DONT_SPEEDUP_PTHREAD_CONTENTION_PROFILER_WITH_TLS
888
0
    TLSPthreadContentionSites& fast_alt = tls_csites;
889
0
    if (fast_alt.cp_version != g_cp_version) {
890
0
        fast_alt.cp_version = g_cp_version;
891
0
        fast_alt.count = 0;
892
0
    }
893
0
    if (fast_alt.count < TLS_MAX_COUNT) {
894
0
        MutexAndContentionSite& entry = fast_alt.list[fast_alt.count++];
895
0
        entry.mutex = mutex;
896
0
        csite = &entry.csite;
897
0
        if (!bvar::is_sampling_range_valid(sampling_range)) {
898
0
            make_contention_site_invalid(&entry.csite);
899
0
            return pthread_mutex_lock_internal(mutex, abstime);
900
0
        }
901
0
    }
902
0
#endif
903
0
    if (!bvar::is_sampling_range_valid(sampling_range)) {  // don't sample
904
0
        return pthread_mutex_lock_internal(mutex, abstime);
905
0
    }
906
    // Lock and monitor the waiting time.
907
0
    const int64_t start_ns = butil::cpuwide_time_ns();
908
0
    rc = pthread_mutex_lock_internal(mutex, abstime);
909
0
    if (!rc) { // Inside lock
910
0
        if (!csite) {
911
0
            csite = add_pthread_contention_site(mutex);
912
0
            if (csite == NULL) {
913
0
                return rc;
914
0
            }
915
0
        }
916
0
        csite->duration_ns = butil::cpuwide_time_ns() - start_ns;
917
0
        csite->sampling_range = sampling_range;
918
0
    } // else rare
919
0
    return rc;
920
0
}
Unexecuted instantiation: int bthread::internal::pthread_mutex_lock_impl<bthread::internal::FastPthreadMutex>(bthread::internal::FastPthreadMutex*, timespec const*)
921
922
template <typename Mutex>
923
0
BUTIL_FORCE_INLINE int pthread_mutex_trylock_impl(Mutex* mutex) {
924
0
    return pthread_mutex_trylock_internal(mutex);
925
0
}
926
927
template <typename Mutex>
928
32.5k
BUTIL_FORCE_INLINE int pthread_mutex_unlock_impl(Mutex* mutex) {
929
    // Don't change behavior of unlock when profiler is off.
930
32.5k
    if (!g_cp || tls_inside_lock) {
931
        // This branch brings an issue that an entry created by
932
        // add_pthread_contention_site may not be cleared. Thus we add a
933
        // 16-bit rolling version in the entry to find out such entry.
934
32.5k
        return pthread_mutex_unlock_internal(mutex);
935
32.5k
    }
936
0
    int64_t unlock_start_ns = 0;
937
0
    bool miss_in_tls = true;
938
0
    bthread_contention_site_t saved_csite = {0,0};
939
0
#ifndef DONT_SPEEDUP_PTHREAD_CONTENTION_PROFILER_WITH_TLS
940
0
    TLSPthreadContentionSites& fast_alt = tls_csites;
941
0
    for (int i = fast_alt.count - 1; i >= 0; --i) {
942
0
        if (fast_alt.list[i].mutex == mutex) {
943
0
            if (is_contention_site_valid(fast_alt.list[i].csite)) {
944
0
                saved_csite = fast_alt.list[i].csite;
945
0
                unlock_start_ns = butil::cpuwide_time_ns();
946
0
            }
947
0
            fast_alt.list[i] = fast_alt.list[--fast_alt.count];
948
0
            miss_in_tls = false;
949
0
            break;
950
0
        }
951
0
    }
952
0
#endif
953
    // Check the map to see if the lock is sampled. Notice that we're still
954
    // inside critical section.
955
0
    if (miss_in_tls) {
956
0
        if (remove_pthread_contention_site(mutex, &saved_csite)) {
957
0
            unlock_start_ns = butil::cpuwide_time_ns();
958
0
        }
959
0
    }
960
0
    const int rc = pthread_mutex_unlock_internal(mutex);
961
    // [Outside lock]
962
0
    if (unlock_start_ns) {
963
0
        const int64_t unlock_end_ns = butil::cpuwide_time_ns();
964
0
        saved_csite.duration_ns += unlock_end_ns - unlock_start_ns;
965
0
        submit_contention(saved_csite, unlock_end_ns);
966
0
    }
967
0
    return rc;
968
32.5k
}
int bthread::internal::pthread_mutex_unlock_impl<pthread_mutex_t>(pthread_mutex_t*)
Line
Count
Source
928
32.5k
BUTIL_FORCE_INLINE int pthread_mutex_unlock_impl(Mutex* mutex) {
929
    // Don't change behavior of unlock when profiler is off.
930
32.5k
    if (!g_cp || tls_inside_lock) {
931
        // This branch brings an issue that an entry created by
932
        // add_pthread_contention_site may not be cleared. Thus we add a
933
        // 16-bit rolling version in the entry to find out such entry.
934
32.5k
        return pthread_mutex_unlock_internal(mutex);
935
32.5k
    }
936
0
    int64_t unlock_start_ns = 0;
937
0
    bool miss_in_tls = true;
938
0
    bthread_contention_site_t saved_csite = {0,0};
939
0
#ifndef DONT_SPEEDUP_PTHREAD_CONTENTION_PROFILER_WITH_TLS
940
0
    TLSPthreadContentionSites& fast_alt = tls_csites;
941
0
    for (int i = fast_alt.count - 1; i >= 0; --i) {
942
0
        if (fast_alt.list[i].mutex == mutex) {
943
0
            if (is_contention_site_valid(fast_alt.list[i].csite)) {
944
0
                saved_csite = fast_alt.list[i].csite;
945
0
                unlock_start_ns = butil::cpuwide_time_ns();
946
0
            }
947
0
            fast_alt.list[i] = fast_alt.list[--fast_alt.count];
948
0
            miss_in_tls = false;
949
0
            break;
950
0
        }
951
0
    }
952
0
#endif
953
    // Check the map to see if the lock is sampled. Notice that we're still
954
    // inside critical section.
955
0
    if (miss_in_tls) {
956
0
        if (remove_pthread_contention_site(mutex, &saved_csite)) {
957
0
            unlock_start_ns = butil::cpuwide_time_ns();
958
0
        }
959
0
    }
960
0
    const int rc = pthread_mutex_unlock_internal(mutex);
961
    // [Outside lock]
962
0
    if (unlock_start_ns) {
963
0
        const int64_t unlock_end_ns = butil::cpuwide_time_ns();
964
0
        saved_csite.duration_ns += unlock_end_ns - unlock_start_ns;
965
0
        submit_contention(saved_csite, unlock_end_ns);
966
0
    }
967
0
    return rc;
968
32.5k
}
Unexecuted instantiation: int bthread::internal::pthread_mutex_unlock_impl<bthread::internal::FastPthreadMutex>(bthread::internal::FastPthreadMutex*)
969
970
}
971
972
#ifndef NO_PTHREAD_MUTEX_HOOK
973
32.5k
BUTIL_FORCE_INLINE int pthread_mutex_lock_impl(pthread_mutex_t* mutex) {
974
32.5k
    return internal::pthread_mutex_lock_impl(mutex, NULL);
975
32.5k
}
976
977
0
BUTIL_FORCE_INLINE int pthread_mutex_trylock_impl(pthread_mutex_t* mutex) {
978
0
    return internal::pthread_mutex_trylock_impl(mutex);
979
0
}
980
981
#if HAS_PTHREAD_MUTEX_TIMEDLOCK
982
BUTIL_FORCE_INLINE int pthread_mutex_timedlock_impl(pthread_mutex_t* mutex,
983
0
                                                    const struct timespec* abstime) {
984
0
    return internal::pthread_mutex_lock_impl(mutex, abstime);
985
0
}
986
#endif // HAS_PTHREAD_MUTEX_TIMEDLOCK
987
988
32.5k
BUTIL_FORCE_INLINE int pthread_mutex_unlock_impl(pthread_mutex_t* mutex) {
989
32.5k
    return internal::pthread_mutex_unlock_impl(mutex);
990
32.5k
}
991
#endif
992
993
// Implement bthread_mutex_t related functions
994
struct MutexInternal {
995
    butil::static_atomic<unsigned char> locked;
996
    butil::static_atomic<unsigned char> contended;
997
    unsigned short padding;
998
};
999
1000
const MutexInternal MUTEX_CONTENDED_RAW = {{1},{1},0};
1001
const MutexInternal MUTEX_LOCKED_RAW = {{1},{0},0};
1002
// Define as macros rather than constants which can't be put in read-only
1003
// section and affected by initialization-order fiasco.
1004
0
#define BTHREAD_MUTEX_CONTENDED (*(const unsigned*)&bthread::MUTEX_CONTENDED_RAW)
1005
0
#define BTHREAD_MUTEX_LOCKED (*(const unsigned*)&bthread::MUTEX_LOCKED_RAW)
1006
1007
BAIDU_CASSERT(sizeof(unsigned) == sizeof(MutexInternal),
1008
              sizeof_mutex_internal_must_equal_unsigned);
1009
1010
#if BRPC_DEBUG_LOCK
1011
1012
#define BTHREAD_MUTEX_SET_OWNER                                                             \
1013
    do {                                                                                    \
1014
        TaskGroup* task_group = BAIDU_GET_VOLATILE_THREAD_LOCAL(tls_task_group);            \
1015
        if (NULL != task_group && !task_group->is_current_main_task()) {                    \
1016
            m->owner.id = bthread_self();                                                   \
1017
        } else {                                                                            \
1018
            m->owner.id = pthread_numeric_id();                                             \
1019
        }                                                                                   \
1020
        ((butil::atomic<bool>*)&m->owner.hold)                                              \
1021
            ->store(true, butil::memory_order_release);                                     \
1022
    } while(false)
1023
1024
// Check if the mutex has been locked by the current thread.
1025
// Double lock on the same thread will cause deadlock.
1026
#define BTHREAD_MUTEX_CHECK_OWNER                                                            \
1027
        bool hold = ((butil::atomic<bool>*)&m->owner.hold)                                   \
1028
            ->load(butil::memory_order_acquire);                                             \
1029
        bool double_lock =                                                                   \
1030
            hold && (m->owner.id == bthread_self() || m->owner.id == pthread_numeric_id());  \
1031
        if (double_lock) {                                                                   \
1032
            butil::debug::StackTrace trace(true);                                            \
1033
            LOG(ERROR) << "Detected deadlock caused by double lock of bthread_mutex_t:"      \
1034
                       << std::endl << trace.ToString();                                     \
1035
       }
1036
#else
1037
0
#define BTHREAD_MUTEX_SET_OWNER ((void)0)
1038
0
#define BTHREAD_MUTEX_CHECK_OWNER ((void)0)
1039
#endif // BRPC_DEBUG_LOCK
1040
1041
0
inline int mutex_trylock_impl(bthread_mutex_t* m) {
1042
0
    MutexInternal* split = (MutexInternal*)m->butex;
1043
0
    if (!split->locked.exchange(1, butil::memory_order_acquire)) {
1044
0
        BTHREAD_MUTEX_SET_OWNER;
1045
0
        return 0;
1046
0
    }
1047
0
    return EBUSY;
1048
0
}
1049
1050
const int MAX_SPIN_ITER = 4;
1051
1052
inline int mutex_lock_contended_impl(bthread_mutex_t* __restrict m,
1053
0
                                     const struct timespec* __restrict abstime) {
1054
0
    BTHREAD_MUTEX_CHECK_OWNER;
1055
    // When a bthread first contends for a lock, active spinning makes sense.
1056
    // Spin only few times and only if local `rq' is empty.
1057
0
    TaskGroup* g = BAIDU_GET_VOLATILE_THREAD_LOCAL(tls_task_group);
1058
0
    if (BAIDU_UNLIKELY(NULL == g || g->rq_size() == 0)) {
1059
0
        for (int i = 0; i < MAX_SPIN_ITER; ++i) {
1060
0
            cpu_relax();
1061
0
        }
1062
0
    }
1063
1064
0
    bool queue_lifo = false;
1065
0
    bool first_wait = true;
1066
0
    auto whole = (butil::atomic<unsigned>*)m->butex;
1067
0
    while (whole->exchange(BTHREAD_MUTEX_CONTENDED) & BTHREAD_MUTEX_LOCKED) {
1068
0
        if (bthread::butex_wait(whole, BTHREAD_MUTEX_CONTENDED, abstime, queue_lifo) < 0 &&
1069
0
            errno != EWOULDBLOCK && errno != EINTR/*note*/) {
1070
            // A mutex lock should ignore interruptions in general since
1071
            // user code is unlikely to check the return value.
1072
0
            return errno;
1073
0
        }
1074
        // Ignore EWOULDBLOCK and EINTR.
1075
0
        if (first_wait && 0 == errno) {
1076
0
            first_wait = false;
1077
0
        }
1078
0
        if (!first_wait) {
1079
            // Normally, bthreads are queued in FIFO order. But competing with new
1080
            // arriving bthreads over the ownership of mutex, a woken up bthread
1081
            // has good chances of losing. Because new arriving bthreads are already
1082
            // running on CPU and there can be lots of them. In such case, for fairness,
1083
            // to avoid starvation, it is queued at the head of the waiter queue.
1084
0
            queue_lifo = true;
1085
0
        }
1086
0
    }
1087
0
    BTHREAD_MUTEX_SET_OWNER;
1088
0
    return 0;
1089
0
}
1090
1091
#ifdef BTHREAD_USE_FAST_PTHREAD_MUTEX
1092
namespace internal {
1093
1094
0
FastPthreadMutex::FastPthreadMutex() : _futex(0) {
1095
0
    MUTEX_RESET_OWNER_COMMON(_owner);
1096
0
}
1097
1098
0
int FastPthreadMutex::lock_contended(const struct timespec* abstime) {
1099
0
    int64_t abstime_us = 0;
1100
0
    if (NULL != abstime) {
1101
0
        abstime_us = butil::timespec_to_microseconds(*abstime);
1102
0
    }
1103
0
    auto whole = (butil::atomic<unsigned>*)&_futex;
1104
0
    while (whole->exchange(BTHREAD_MUTEX_CONTENDED) & BTHREAD_MUTEX_LOCKED) {
1105
0
        timespec* ptimeout = NULL;
1106
0
        timespec timeout{};
1107
0
        if (NULL != abstime) {
1108
0
            timeout = butil::microseconds_to_timespec(
1109
0
                abstime_us - butil::gettimeofday_us());
1110
0
            ptimeout = &timeout;
1111
0
        }
1112
0
        if (NULL == abstime  || abstime_us > MIN_SLEEP_US) {
1113
0
            if (futex_wait_private(whole, BTHREAD_MUTEX_CONTENDED, ptimeout) < 0
1114
0
                && errno != EWOULDBLOCK && errno != EINTR/*note*/) {
1115
                // A mutex lock should ignore interruptions in general since
1116
                // user code is unlikely to check the return value.
1117
0
                return errno;
1118
0
            }
1119
0
        } else {
1120
0
            errno = ETIMEDOUT;
1121
0
            return errno;
1122
0
        }
1123
0
    }
1124
0
    PTHREAD_MUTEX_SET_OWNER(_owner);
1125
0
    ADD_TLS_PTHREAD_LOCK_COUNT;
1126
0
    return 0;
1127
0
}
1128
1129
0
void FastPthreadMutex::lock() {
1130
0
    if (try_lock()) {
1131
0
        return;
1132
0
    }
1133
1134
0
    PTHREAD_MUTEX_CHECK_OWNER(_owner);
1135
0
    (void)lock_contended(NULL);
1136
0
}
1137
1138
0
bool FastPthreadMutex::try_lock() {
1139
0
    auto split = (bthread::MutexInternal*)&_futex;
1140
0
    bool lock = !split->locked.exchange(1, butil::memory_order_acquire);
1141
0
    if (lock) {
1142
0
        PTHREAD_MUTEX_SET_OWNER(_owner);
1143
0
        ADD_TLS_PTHREAD_LOCK_COUNT;
1144
0
    }
1145
0
    return lock;
1146
0
}
1147
1148
0
bool FastPthreadMutex::timed_lock(const struct timespec* abstime) {
1149
0
    if (try_lock()) {
1150
0
        return true;
1151
0
    }
1152
0
    return 0 == lock_contended(abstime);
1153
0
}
1154
1155
0
void FastPthreadMutex::unlock() {
1156
0
    SUB_TLS_PTHREAD_LOCK_COUNT;
1157
0
    MUTEX_RESET_OWNER_COMMON(_owner);
1158
0
    auto whole = (butil::atomic<unsigned>*)&_futex;
1159
0
    const unsigned prev = whole->exchange(0, butil::memory_order_release);
1160
    // CAUTION: the mutex may be destroyed, check comments before butex_create
1161
0
    if (prev != BTHREAD_MUTEX_LOCKED) {
1162
0
        futex_wake_private(whole, 1);
1163
0
    }
1164
0
}
1165
1166
} // namespace internal
1167
#endif // BTHREAD_USE_FAST_PTHREAD_MUTEX
1168
1169
0
void FastPthreadMutex::lock() {
1170
0
    internal::pthread_mutex_lock_impl(&_mutex, NULL);
1171
0
}
1172
1173
0
void FastPthreadMutex::unlock() {
1174
0
    internal::pthread_mutex_unlock_impl(&_mutex);
1175
0
}
1176
1177
#if defined(BTHREAD_USE_FAST_PTHREAD_MUTEX) || HAS_PTHREAD_MUTEX_TIMEDLOCK
1178
0
bool FastPthreadMutex::timed_lock(const struct timespec* abstime) {
1179
0
    return internal::pthread_mutex_lock_impl(&_mutex, abstime) == 0;
1180
0
}
1181
#endif // BTHREAD_USE_FAST_PTHREAD_MUTEX HAS_PTHREAD_MUTEX_TIMEDLOCK
1182
1183
} // namespace bthread
1184
1185
__BEGIN_DECLS
1186
1187
int bthread_mutex_init(bthread_mutex_t* __restrict m,
1188
0
                       const bthread_mutexattr_t* __restrict attr) {
1189
0
    bthread::make_contention_site_invalid(&m->csite);
1190
0
    MUTEX_RESET_OWNER_COMMON(m->owner);
1191
0
    m->butex = bthread::butex_create_checked<unsigned>();
1192
0
    if (!m->butex) {
1193
0
        return ENOMEM;
1194
0
    }
1195
0
    *m->butex = 0;
1196
0
    m->enable_csite = NULL == attr ? true : attr->enable_csite;
1197
0
    return 0;
1198
0
}
1199
1200
0
int bthread_mutex_destroy(bthread_mutex_t* m) {
1201
0
    bthread::butex_destroy(m->butex);
1202
0
    return 0;
1203
0
}
1204
1205
0
int bthread_mutex_trylock(bthread_mutex_t* m) {
1206
0
    return bthread::mutex_trylock_impl(m);
1207
0
}
1208
1209
0
int bthread_mutex_lock_contended(bthread_mutex_t* m) {
1210
0
    return bthread::mutex_lock_contended_impl(m, NULL);
1211
0
}
1212
1213
static int bthread_mutex_lock_impl(bthread_mutex_t* __restrict m,
1214
0
                                   const struct timespec* __restrict abstime) {
1215
0
    if (0 == bthread::mutex_trylock_impl(m)) {
1216
0
        return 0;
1217
0
    }
1218
    // Don't sample when contention profiler is off.
1219
0
    if (!bthread::g_cp) {
1220
0
        return bthread::mutex_lock_contended_impl(m, abstime);
1221
0
    }
1222
    // Ask Collector if this (contended) locking should be sampled.
1223
0
    const size_t sampling_range =
1224
0
        m->enable_csite ? bvar::is_collectable(&bthread::g_cp_sl) : bvar::INVALID_SAMPLING_RANGE;
1225
0
    if (!bvar::is_sampling_range_valid(sampling_range)) { // Don't sample
1226
0
        return bthread::mutex_lock_contended_impl(m, abstime);
1227
0
    }
1228
    // Start sampling.
1229
0
    const int64_t start_ns = butil::cpuwide_time_ns();
1230
    // NOTE: Don't modify m->csite outside lock since multiple threads are
1231
    // still contending with each other.
1232
0
    const int rc = bthread::mutex_lock_contended_impl(m, abstime);
1233
0
    if (!rc) { // Inside lock
1234
0
        m->csite.duration_ns = butil::cpuwide_time_ns() - start_ns;
1235
0
        m->csite.sampling_range = sampling_range;
1236
0
    } else if (rc == ETIMEDOUT) {
1237
        // Failed to lock due to ETIMEDOUT, submit the elapse directly.
1238
0
        const int64_t end_ns = butil::cpuwide_time_ns();
1239
0
        const bthread_contention_site_t csite = {end_ns - start_ns, sampling_range};
1240
0
        bthread::submit_contention(csite, end_ns);
1241
0
    }
1242
0
    return rc;
1243
0
}
1244
1245
0
int bthread_mutex_lock(bthread_mutex_t* m) {
1246
0
    return bthread_mutex_lock_impl(m, NULL);
1247
0
}
1248
1249
int bthread_mutex_timedlock(bthread_mutex_t* __restrict m,
1250
0
                            const struct timespec* __restrict abstime) {
1251
0
    return bthread_mutex_lock_impl(m, abstime);
1252
0
}
1253
1254
0
int bthread_mutex_unlock(bthread_mutex_t* m) {
1255
0
    auto whole = (butil::atomic<unsigned>*)m->butex;
1256
0
    bthread_contention_site_t saved_csite = {0, 0};
1257
0
    bool is_valid = bthread::is_contention_site_valid(m->csite);
1258
0
    if (is_valid) {
1259
0
        saved_csite = m->csite;
1260
0
        bthread::make_contention_site_invalid(&m->csite);
1261
0
    }
1262
0
    MUTEX_RESET_OWNER_COMMON(m->owner);
1263
0
    const unsigned prev = whole->exchange(0, butil::memory_order_release);
1264
    // CAUTION: the mutex may be destroyed, check comments before butex_create
1265
0
    if (prev == BTHREAD_MUTEX_LOCKED) {
1266
0
        return 0;
1267
0
    }
1268
    // Wakeup one waiter
1269
0
    if (!is_valid) {
1270
0
        bthread::butex_wake(whole);
1271
0
        return 0;
1272
0
    }
1273
0
    const int64_t unlock_start_ns = butil::cpuwide_time_ns();
1274
0
    bthread::butex_wake(whole);
1275
0
    const int64_t unlock_end_ns = butil::cpuwide_time_ns();
1276
0
    saved_csite.duration_ns += unlock_end_ns - unlock_start_ns;
1277
0
    bthread::submit_contention(saved_csite, unlock_end_ns);
1278
0
    return 0;
1279
0
}
1280
1281
0
int bthread_mutexattr_init(bthread_mutexattr_t* attr) {
1282
0
    attr->enable_csite = true;
1283
0
    return 0;
1284
0
}
1285
1286
0
int bthread_mutexattr_disable_csite(bthread_mutexattr_t* attr) {
1287
0
    attr->enable_csite = false;
1288
0
    return 0;
1289
0
}
1290
1291
0
int bthread_mutexattr_destroy(bthread_mutexattr_t* attr) {
1292
0
    attr->enable_csite = true;
1293
0
    return 0;
1294
0
}
1295
1296
#ifndef NO_PTHREAD_MUTEX_HOOK
1297
1298
int pthread_mutex_init(pthread_mutex_t * __restrict mutex,
1299
11.6k
                       const pthread_mutexattr_t* __restrict mutexattr) {
1300
11.6k
    INIT_MUTEX_OWNER_MAP_ENTRY(mutex, mutexattr);
1301
11.6k
    return bthread::sys_pthread_mutex_init(mutex, mutexattr);
1302
11.6k
}
1303
1304
9.49k
int pthread_mutex_destroy(pthread_mutex_t* mutex) {
1305
9.49k
    DESTROY_MUTEX_OWNER_MAP_ENTRY(mutex);
1306
9.49k
    return bthread::sys_pthread_mutex_destroy(mutex);
1307
9.49k
}
1308
1309
32.5k
int pthread_mutex_lock(pthread_mutex_t* mutex) {
1310
32.5k
    return bthread::pthread_mutex_lock_impl(mutex);
1311
32.5k
}
1312
1313
#if defined(OS_LINUX) && defined(OS_POSIX) && defined(__USE_XOPEN2K)
1314
int pthread_mutex_timedlock(pthread_mutex_t *__restrict __mutex,
1315
0
                    const struct timespec *__restrict __abstime) {
1316
0
    return bthread::pthread_mutex_timedlock_impl(__mutex, __abstime);
1317
0
}
1318
#endif // OS_POSIX __USE_XOPEN2K
1319
1320
0
int pthread_mutex_trylock(pthread_mutex_t* mutex) {
1321
0
    return bthread::pthread_mutex_trylock_impl(mutex);
1322
0
}
1323
1324
32.5k
int pthread_mutex_unlock(pthread_mutex_t* mutex) {
1325
32.5k
    return bthread::pthread_mutex_unlock_impl(mutex);
1326
32.5k
}
1327
#endif // NO_PTHREAD_MUTEX_HOOK
1328
1329
1330
__END_DECLS