/src/brpc/src/bthread/key.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | // bthread - An M:N threading library to make applications more concurrent. |
19 | | |
20 | | // Date: Sun Aug 3 12:46:15 CST 2014 |
21 | | |
22 | | #include <pthread.h> |
23 | | #include <gflags/gflags.h> |
24 | | |
25 | | #include "bthread/errno.h" // EAGAIN |
26 | | #include "bthread/task_group.h" // TaskGroup |
27 | | #include "butil/atomicops.h" |
28 | | #include "butil/macros.h" |
29 | | #include "butil/thread_key.h" |
30 | | #include "butil/thread_local.h" |
31 | | #include "bvar/passive_status.h" |
32 | | |
33 | | // Implement bthread_key_t related functions |
34 | | |
35 | | namespace bthread { |
36 | | |
37 | | DEFINE_uint32(key_table_list_size, 4000, |
38 | | "The maximum length of the KeyTableList. Once this value is " |
39 | | "exceeded, a portion of the KeyTables will be moved to the " |
40 | | "global free_keytables list."); |
41 | | |
42 | | DEFINE_uint32(borrow_from_globle_size, 200, |
43 | | "The maximum number of KeyTables retrieved in a single operation " |
44 | | "from the global free_keytables when no KeyTable exists in the " |
45 | | "current thread's keytable_list."); |
46 | | |
47 | | EXTERN_BAIDU_VOLATILE_THREAD_LOCAL(TaskGroup*, tls_task_group); |
48 | | |
49 | | class KeyTable; |
50 | | |
51 | | // defined in task_group.cpp |
52 | | extern __thread LocalStorage tls_bls; |
53 | | static __thread bool tls_ever_created_keytable = false; |
54 | | |
55 | | // We keep thread specific data in a two-level array. The top-level array |
56 | | // contains at most KEY_1STLEVEL_SIZE pointers to dynamically allocated |
57 | | // arrays of at most KEY_2NDLEVEL_SIZE data pointers. Many applications |
58 | | // may just occupy one or two second level array, thus this machanism keeps |
59 | | // memory footprint smaller and we can change KEY_1STLEVEL_SIZE to a |
60 | | // bigger number more freely. The tradeoff is an additional memory indirection: |
61 | | // negligible at most time. |
62 | | static const uint32_t KEY_2NDLEVEL_SIZE = 32; |
63 | | |
64 | | // Notice that we're trying to make the memory of second level and first |
65 | | // level both 256 bytes to make memory allocator happier. |
66 | | static const uint32_t KEY_1STLEVEL_SIZE = 31; |
67 | | |
68 | | // Max tls in one thread, currently the value is 992 which should be enough |
69 | | // for most projects throughout baidu. |
70 | | static const uint32_t KEYS_MAX = KEY_2NDLEVEL_SIZE * KEY_1STLEVEL_SIZE; |
71 | | |
72 | | // destructors/version of TLS. |
73 | | struct KeyInfo { |
74 | | uint32_t version; |
75 | | void (*dtor)(void*, const void*); |
76 | | const void* dtor_args; |
77 | | }; |
78 | | static KeyInfo s_key_info[KEYS_MAX] = {}; |
79 | | |
80 | | // For allocating keys. |
81 | | static pthread_mutex_t s_key_mutex = PTHREAD_MUTEX_INITIALIZER; |
82 | | static size_t nfreekey = 0; |
83 | | static size_t nkey = 0; |
84 | | static uint32_t s_free_keys[KEYS_MAX]; |
85 | | |
86 | | // Stats. |
87 | | static butil::static_atomic<size_t> nkeytable = BUTIL_STATIC_ATOMIC_INIT(0); |
88 | | static butil::static_atomic<size_t> nsubkeytable = BUTIL_STATIC_ATOMIC_INIT(0); |
89 | | |
90 | | // The second-level array. |
91 | | // Align with cacheline to avoid false sharing. |
92 | | class BAIDU_CACHELINE_ALIGNMENT SubKeyTable { |
93 | | public: |
94 | 0 | SubKeyTable() { |
95 | 0 | memset(_data, 0, sizeof(_data)); |
96 | 0 | nsubkeytable.fetch_add(1, butil::memory_order_relaxed); |
97 | 0 | } |
98 | | |
99 | | // NOTE: Call clear first. |
100 | 0 | ~SubKeyTable() { |
101 | 0 | nsubkeytable.fetch_sub(1, butil::memory_order_relaxed); |
102 | 0 | } |
103 | | |
104 | 0 | void clear(uint32_t offset) { |
105 | 0 | for (uint32_t i = 0; i < KEY_2NDLEVEL_SIZE; ++i) { |
106 | 0 | void* p = _data[i].ptr; |
107 | 0 | if (p) { |
108 | | // Set the position to NULL before calling dtor which may set |
109 | | // the position again. |
110 | 0 | _data[i].ptr = NULL; |
111 | |
|
112 | 0 | KeyInfo info = bthread::s_key_info[offset + i]; |
113 | 0 | if (info.dtor && _data[i].version == info.version) { |
114 | 0 | info.dtor(p, info.dtor_args); |
115 | 0 | } |
116 | 0 | } |
117 | 0 | } |
118 | 0 | } |
119 | | |
120 | 0 | bool cleared() const { |
121 | | // We need to iterate again to check if every slot is empty. An |
122 | | // alternative is remember if set_data() was called during clear. |
123 | 0 | for (uint32_t i = 0; i < KEY_2NDLEVEL_SIZE; ++i) { |
124 | 0 | if (_data[i].ptr) { |
125 | 0 | return false; |
126 | 0 | } |
127 | 0 | } |
128 | 0 | return true; |
129 | 0 | } |
130 | | |
131 | 0 | inline void* get_data(uint32_t index, uint32_t version) const { |
132 | 0 | if (_data[index].version == version) { |
133 | 0 | return _data[index].ptr; |
134 | 0 | } |
135 | 0 | return NULL; |
136 | 0 | } |
137 | 0 | inline void set_data(uint32_t index, uint32_t version, void* data) { |
138 | 0 | _data[index].version = version; |
139 | 0 | _data[index].ptr = data; |
140 | 0 | } |
141 | | |
142 | | private: |
143 | | struct Data { |
144 | | uint32_t version; |
145 | | void* ptr; |
146 | | }; |
147 | | Data _data[KEY_2NDLEVEL_SIZE]; |
148 | | }; |
149 | | |
150 | | // The first-level array. |
151 | | // Align with cacheline to avoid false sharing. |
152 | | class BAIDU_CACHELINE_ALIGNMENT KeyTable { |
153 | | public: |
154 | 0 | KeyTable() : next(NULL) { |
155 | 0 | memset(_subs, 0, sizeof(_subs)); |
156 | 0 | nkeytable.fetch_add(1, butil::memory_order_relaxed); |
157 | 0 | } |
158 | | |
159 | 0 | ~KeyTable() { |
160 | 0 | nkeytable.fetch_sub(1, butil::memory_order_relaxed); |
161 | 0 | for (int ntry = 0; ntry < PTHREAD_DESTRUCTOR_ITERATIONS; ++ntry) { |
162 | 0 | for (uint32_t i = 0; i < KEY_1STLEVEL_SIZE; ++i) { |
163 | 0 | if (_subs[i]) { |
164 | 0 | _subs[i]->clear(i * KEY_2NDLEVEL_SIZE); |
165 | 0 | } |
166 | 0 | } |
167 | 0 | bool all_cleared = true; |
168 | 0 | for (uint32_t i = 0; i < KEY_1STLEVEL_SIZE; ++i) { |
169 | 0 | if (_subs[i] != NULL && !_subs[i]->cleared()) { |
170 | 0 | all_cleared = false; |
171 | 0 | break; |
172 | 0 | } |
173 | 0 | } |
174 | 0 | if (all_cleared) { |
175 | 0 | for (uint32_t i = 0; i < KEY_1STLEVEL_SIZE; ++i) { |
176 | 0 | delete _subs[i]; |
177 | 0 | } |
178 | 0 | return; |
179 | 0 | } |
180 | 0 | } |
181 | 0 | LOG(ERROR) << "Fail to destroy all objects in KeyTable[" << this << ']'; |
182 | 0 | } |
183 | | |
184 | 0 | inline void* get_data(bthread_key_t key) const { |
185 | 0 | const uint32_t subidx = key.index / KEY_2NDLEVEL_SIZE; |
186 | 0 | if (subidx < KEY_1STLEVEL_SIZE) { |
187 | 0 | const SubKeyTable* sub_kt = _subs[subidx]; |
188 | 0 | if (sub_kt) { |
189 | 0 | return sub_kt->get_data( |
190 | 0 | key.index - subidx * KEY_2NDLEVEL_SIZE, key.version); |
191 | 0 | } |
192 | 0 | } |
193 | 0 | return NULL; |
194 | 0 | } |
195 | | |
196 | 0 | inline int set_data(bthread_key_t key, void* data) { |
197 | 0 | const uint32_t subidx = key.index / KEY_2NDLEVEL_SIZE; |
198 | 0 | if (subidx < KEY_1STLEVEL_SIZE && |
199 | 0 | key.version == s_key_info[key.index].version) { |
200 | 0 | SubKeyTable* sub_kt = _subs[subidx]; |
201 | 0 | if (sub_kt == NULL) { |
202 | 0 | sub_kt = new (std::nothrow) SubKeyTable; |
203 | 0 | if (NULL == sub_kt) { |
204 | 0 | return ENOMEM; |
205 | 0 | } |
206 | 0 | _subs[subidx] = sub_kt; |
207 | 0 | } |
208 | 0 | sub_kt->set_data(key.index - subidx * KEY_2NDLEVEL_SIZE, |
209 | 0 | key.version, data); |
210 | 0 | return 0; |
211 | 0 | } |
212 | 0 | CHECK(false) << "bthread_setspecific is called on invalid " << key; |
213 | 0 | return EINVAL; |
214 | 0 | } |
215 | | |
216 | | public: |
217 | | KeyTable* next; |
218 | | private: |
219 | | SubKeyTable* _subs[KEY_1STLEVEL_SIZE]; |
220 | | }; |
221 | | |
222 | | class BAIDU_CACHELINE_ALIGNMENT KeyTableList { |
223 | | public: |
224 | | KeyTableList() : |
225 | 0 | _head(NULL), _tail(NULL), _length(0) {} |
226 | | |
227 | 0 | ~KeyTableList() { |
228 | 0 | TaskGroup* g = BAIDU_GET_VOLATILE_THREAD_LOCAL(tls_task_group); |
229 | 0 | KeyTable* old_kt = tls_bls.keytable; |
230 | 0 | KeyTable* keytable = _head; |
231 | 0 | while (keytable) { |
232 | 0 | KeyTable* kt = keytable; |
233 | 0 | keytable = kt->next; |
234 | 0 | tls_bls.keytable = kt; |
235 | 0 | if (g) { |
236 | 0 | g->current_task()->local_storage.keytable = kt; |
237 | 0 | } |
238 | 0 | delete kt; |
239 | 0 | if (old_kt == kt) { |
240 | 0 | old_kt = NULL; |
241 | 0 | } |
242 | 0 | g = BAIDU_GET_VOLATILE_THREAD_LOCAL(tls_task_group); |
243 | 0 | } |
244 | 0 | tls_bls.keytable = old_kt; |
245 | 0 | if (g) { |
246 | 0 | g->current_task()->local_storage.keytable = old_kt; |
247 | 0 | } |
248 | 0 | } |
249 | | |
250 | 0 | void append(KeyTable* keytable) { |
251 | 0 | if (keytable == NULL) { |
252 | 0 | return; |
253 | 0 | } |
254 | 0 | if (_head == NULL) { |
255 | 0 | _head = _tail = keytable; |
256 | 0 | } else { |
257 | 0 | _tail->next = keytable; |
258 | 0 | _tail = keytable; |
259 | 0 | } |
260 | 0 | keytable->next = NULL; |
261 | 0 | _length++; |
262 | 0 | } |
263 | | |
264 | 0 | KeyTable* remove_front() { |
265 | 0 | if (_head == NULL) { |
266 | 0 | return NULL; |
267 | 0 | } |
268 | 0 | KeyTable* temp = _head; |
269 | 0 | _head = _head->next; |
270 | 0 | _length--; |
271 | 0 | if (_head == NULL) { |
272 | 0 | _tail = NULL; |
273 | 0 | } |
274 | 0 | return temp; |
275 | 0 | } |
276 | | |
277 | 0 | int move_first_n_to_target(KeyTable** target, uint32_t size) { |
278 | 0 | if (size > _length || _head == NULL) { |
279 | 0 | return 0; |
280 | 0 | } |
281 | | |
282 | 0 | KeyTable* current = _head; |
283 | 0 | KeyTable* prev = NULL; |
284 | 0 | uint32_t count = 0; |
285 | 0 | while (current != NULL && count < size) { |
286 | 0 | prev = current; |
287 | 0 | current = current->next; |
288 | 0 | count++; |
289 | 0 | } |
290 | 0 | if (prev != NULL) { |
291 | 0 | if (*target == NULL) { |
292 | 0 | *target = _head; |
293 | 0 | prev->next = NULL; |
294 | 0 | } else { |
295 | 0 | prev->next = *target; |
296 | 0 | *target = _head; |
297 | 0 | } |
298 | 0 | _head = current; |
299 | 0 | _length -= count; |
300 | 0 | if (_head == NULL) { |
301 | 0 | _tail = NULL; |
302 | 0 | } |
303 | 0 | } |
304 | 0 | return count; |
305 | 0 | } |
306 | | |
307 | 0 | inline uint32_t get_length() const { |
308 | 0 | return _length; |
309 | 0 | } |
310 | | |
311 | | // Only for test |
312 | 0 | inline bool check_length() { |
313 | 0 | KeyTable* current = _head; |
314 | 0 | uint32_t count = 0; |
315 | 0 | while (current != NULL) { |
316 | 0 | current = current->next; |
317 | 0 | count++; |
318 | 0 | } |
319 | 0 | return count == _length; |
320 | 0 | } |
321 | | |
322 | | private: |
323 | | KeyTable* _head; |
324 | | KeyTable* _tail; |
325 | | uint32_t _length; |
326 | | }; |
327 | | |
328 | 0 | KeyTable* borrow_keytable(bthread_keytable_pool_t* pool) { |
329 | 0 | if (pool != NULL && (pool->list || pool->free_keytables)) { |
330 | 0 | KeyTable* p; |
331 | 0 | pthread_rwlock_rdlock(&pool->rwlock); |
332 | 0 | auto list = (butil::ThreadLocal<bthread::KeyTableList>*)pool->list; |
333 | 0 | if (list) { |
334 | 0 | p = list->get()->remove_front(); |
335 | 0 | if (p) { |
336 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
337 | 0 | return p; |
338 | 0 | } |
339 | 0 | } |
340 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
341 | 0 | if (pool->free_keytables) { |
342 | 0 | pthread_rwlock_wrlock(&pool->rwlock); |
343 | 0 | p = (KeyTable*)pool->free_keytables; |
344 | 0 | if (list) { |
345 | 0 | for (uint32_t i = 0; i < FLAGS_borrow_from_globle_size; ++i) { |
346 | 0 | if (p) { |
347 | 0 | pool->free_keytables = p->next; |
348 | 0 | list->get()->append(p); |
349 | 0 | p = (KeyTable*)pool->free_keytables; |
350 | 0 | --pool->size; |
351 | 0 | } else { |
352 | 0 | break; |
353 | 0 | } |
354 | 0 | } |
355 | 0 | KeyTable* result = list->get()->remove_front(); |
356 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
357 | 0 | return result; |
358 | 0 | } else { |
359 | 0 | if (p) { |
360 | 0 | pool->free_keytables = p->next; |
361 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
362 | 0 | return p; |
363 | 0 | } |
364 | 0 | } |
365 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
366 | 0 | } |
367 | 0 | } |
368 | 0 | return NULL; |
369 | 0 | } |
370 | | |
371 | | // Referenced in task_group.cpp, must be extern. |
372 | | // Caller of this function must hold the KeyTable |
373 | 0 | void return_keytable(bthread_keytable_pool_t* pool, KeyTable* kt) { |
374 | 0 | if (NULL == kt) { |
375 | 0 | return; |
376 | 0 | } |
377 | 0 | if (pool == NULL) { |
378 | 0 | delete kt; |
379 | 0 | return; |
380 | 0 | } |
381 | 0 | pthread_rwlock_rdlock(&pool->rwlock); |
382 | 0 | if (pool->destroyed) { |
383 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
384 | 0 | delete kt; |
385 | 0 | return; |
386 | 0 | } |
387 | 0 | auto list = (butil::ThreadLocal<bthread::KeyTableList>*)pool->list; |
388 | 0 | list->get()->append(kt); |
389 | 0 | if (list->get()->get_length() > FLAGS_key_table_list_size) { |
390 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
391 | 0 | pthread_rwlock_wrlock(&pool->rwlock); |
392 | 0 | if (!pool->destroyed) { |
393 | 0 | int out = list->get()->move_first_n_to_target( |
394 | 0 | (KeyTable**)(&pool->free_keytables), |
395 | 0 | FLAGS_key_table_list_size / 2); |
396 | 0 | pool->size += out; |
397 | 0 | } |
398 | 0 | } |
399 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
400 | 0 | } |
401 | | |
402 | 0 | static void cleanup_pthread(void* arg) { |
403 | 0 | KeyTable* kt = static_cast<KeyTable*>(arg); |
404 | 0 | if (kt) { |
405 | 0 | delete kt; |
406 | | // After deletion: tls may be set during deletion. |
407 | 0 | tls_bls.keytable = NULL; |
408 | 0 | } |
409 | 0 | } |
410 | | |
411 | 0 | static void arg_as_dtor(void* data, const void* arg) { |
412 | 0 | typedef void (*KeyDtor)(void*); |
413 | 0 | return ((KeyDtor)arg)(data); |
414 | 0 | } |
415 | | |
416 | 2 | static int get_key_count(void*) { |
417 | 2 | BAIDU_SCOPED_LOCK(bthread::s_key_mutex); |
418 | 2 | return (int)nkey - (int)nfreekey; |
419 | 2 | } |
420 | 2 | static size_t get_keytable_count(void*) { |
421 | 2 | return nkeytable.load(butil::memory_order_relaxed); |
422 | 2 | } |
423 | 2 | static size_t get_keytable_memory(void*) { |
424 | 2 | const size_t n = nkeytable.load(butil::memory_order_relaxed); |
425 | 2 | const size_t nsub = nsubkeytable.load(butil::memory_order_relaxed); |
426 | 2 | return n * sizeof(KeyTable) + nsub * sizeof(SubKeyTable); |
427 | 2 | } |
428 | | |
429 | | static bvar::PassiveStatus<int> s_bthread_key_count( |
430 | | "bthread_key_count", get_key_count, NULL); |
431 | | static bvar::PassiveStatus<size_t> s_bthread_keytable_count( |
432 | | "bthread_keytable_count", get_keytable_count, NULL); |
433 | | static bvar::PassiveStatus<size_t> s_bthread_keytable_memory( |
434 | | "bthread_keytable_memory", get_keytable_memory, NULL); |
435 | | |
436 | | } // namespace bthread |
437 | | |
438 | | extern "C" { |
439 | | |
440 | 0 | int bthread_keytable_pool_init(bthread_keytable_pool_t* pool) { |
441 | 0 | if (pool == NULL) { |
442 | 0 | LOG(ERROR) << "Param[pool] is NULL"; |
443 | 0 | return EINVAL; |
444 | 0 | } |
445 | 0 | pthread_rwlock_init(&pool->rwlock, NULL); |
446 | 0 | pool->list = new butil::ThreadLocal<bthread::KeyTableList>(); |
447 | 0 | pool->free_keytables = NULL; |
448 | 0 | pool->size = 0; |
449 | 0 | pool->destroyed = 0; |
450 | 0 | return 0; |
451 | 0 | } |
452 | | |
453 | 0 | int bthread_keytable_pool_destroy(bthread_keytable_pool_t* pool) { |
454 | 0 | if (pool == NULL) { |
455 | 0 | LOG(ERROR) << "Param[pool] is NULL"; |
456 | 0 | return EINVAL; |
457 | 0 | } |
458 | 0 | bthread::KeyTable* saved_free_keytables = NULL; |
459 | 0 | pthread_rwlock_wrlock(&pool->rwlock); |
460 | 0 | pool->destroyed = 1; |
461 | 0 | pool->size = 0; |
462 | 0 | delete (butil::ThreadLocal<bthread::KeyTableList>*)pool->list; |
463 | 0 | saved_free_keytables = (bthread::KeyTable*)pool->free_keytables; |
464 | 0 | pool->list = NULL; |
465 | 0 | pool->free_keytables = NULL; |
466 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
467 | | |
468 | | // Cheat get/setspecific and destroy the keytables. |
469 | 0 | bthread::TaskGroup* g = |
470 | 0 | bthread::BAIDU_GET_VOLATILE_THREAD_LOCAL(tls_task_group); |
471 | 0 | bthread::KeyTable* old_kt = bthread::tls_bls.keytable; |
472 | 0 | while (saved_free_keytables) { |
473 | 0 | bthread::KeyTable* kt = saved_free_keytables; |
474 | 0 | saved_free_keytables = kt->next; |
475 | 0 | bthread::tls_bls.keytable = kt; |
476 | 0 | if (g) { |
477 | 0 | g->current_task()->local_storage.keytable = kt; |
478 | 0 | } |
479 | 0 | delete kt; |
480 | 0 | g = bthread::BAIDU_GET_VOLATILE_THREAD_LOCAL(tls_task_group); |
481 | 0 | } |
482 | 0 | bthread::tls_bls.keytable = old_kt; |
483 | 0 | if (g) { |
484 | 0 | g->current_task()->local_storage.keytable = old_kt; |
485 | 0 | } |
486 | | // TODO: return_keytable may race with this function, we don't destroy |
487 | | // the mutex right now. |
488 | | // pthread_mutex_destroy(&pool->mutex); |
489 | 0 | return 0; |
490 | 0 | } |
491 | | |
492 | | int bthread_keytable_pool_getstat(bthread_keytable_pool_t* pool, |
493 | 0 | bthread_keytable_pool_stat_t* stat) { |
494 | 0 | if (pool == NULL || stat == NULL) { |
495 | 0 | LOG(ERROR) << "Param[pool] or Param[stat] is NULL"; |
496 | 0 | return EINVAL; |
497 | 0 | } |
498 | 0 | pthread_rwlock_wrlock(&pool->rwlock); |
499 | 0 | stat->nfree = pool->size; |
500 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
501 | 0 | return 0; |
502 | 0 | } |
503 | | |
504 | 0 | int get_thread_local_keytable_list_length(bthread_keytable_pool_t* pool) { |
505 | 0 | if (pool == NULL) { |
506 | 0 | LOG(ERROR) << "Param[pool] is NULL"; |
507 | 0 | return EINVAL; |
508 | 0 | } |
509 | 0 | int length = 0; |
510 | 0 | pthread_rwlock_rdlock(&pool->rwlock); |
511 | 0 | if (pool->destroyed) { |
512 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
513 | 0 | return length; |
514 | 0 | } |
515 | 0 | auto list = (butil::ThreadLocal<bthread::KeyTableList>*)pool->list; |
516 | 0 | if (list) { |
517 | 0 | length = (int)(list->get()->get_length()); |
518 | 0 | if (!list->get()->check_length()) { |
519 | 0 | LOG(ERROR) << "Length is not equal"; |
520 | 0 | } |
521 | 0 | } |
522 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
523 | 0 | return length; |
524 | 0 | } |
525 | | |
526 | | // TODO: this is not strict `reserve' because we only check #free. |
527 | | // Currently there's no way to track KeyTables that may be returned |
528 | | // to the pool in future. |
529 | | void bthread_keytable_pool_reserve(bthread_keytable_pool_t* pool, |
530 | | size_t nfree, |
531 | | bthread_key_t key, |
532 | | void* ctor(const void*), |
533 | 0 | const void* ctor_args) { |
534 | 0 | if (pool == NULL) { |
535 | 0 | LOG(ERROR) << "Param[pool] is NULL"; |
536 | 0 | return; |
537 | 0 | } |
538 | 0 | bthread_keytable_pool_stat_t stat; |
539 | 0 | if (bthread_keytable_pool_getstat(pool, &stat) != 0) { |
540 | 0 | LOG(ERROR) << "Fail to getstat of pool=" << pool; |
541 | 0 | return; |
542 | 0 | } |
543 | 0 | for (size_t i = stat.nfree; i < nfree; ++i) { |
544 | 0 | bthread::KeyTable* kt = new (std::nothrow) bthread::KeyTable; |
545 | 0 | if (kt == NULL) { |
546 | 0 | break; |
547 | 0 | } |
548 | 0 | void* data = ctor(ctor_args); |
549 | 0 | if (data) { |
550 | 0 | kt->set_data(key, data); |
551 | 0 | } // else append kt w/o data. |
552 | |
|
553 | 0 | pthread_rwlock_wrlock(&pool->rwlock); |
554 | 0 | if (pool->destroyed) { |
555 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
556 | 0 | delete kt; |
557 | 0 | break; |
558 | 0 | } |
559 | 0 | kt->next = (bthread::KeyTable*)pool->free_keytables; |
560 | 0 | pool->free_keytables = kt; |
561 | 0 | ++pool->size; |
562 | 0 | pthread_rwlock_unlock(&pool->rwlock); |
563 | 0 | if (data == NULL) { |
564 | 0 | break; |
565 | 0 | } |
566 | 0 | } |
567 | 0 | } |
568 | | |
569 | | int bthread_key_create2(bthread_key_t* key, |
570 | | void (*dtor)(void*, const void*), |
571 | 0 | const void* dtor_args) { |
572 | 0 | uint32_t index = 0; |
573 | 0 | { |
574 | 0 | BAIDU_SCOPED_LOCK(bthread::s_key_mutex); |
575 | 0 | if (bthread::nfreekey > 0) { |
576 | 0 | index = bthread::s_free_keys[--bthread::nfreekey]; |
577 | 0 | } else if (bthread::nkey < bthread::KEYS_MAX) { |
578 | 0 | index = bthread::nkey++; |
579 | 0 | } else { |
580 | 0 | return EAGAIN; // what pthread_key_create returns in this case. |
581 | 0 | } |
582 | 0 | } |
583 | 0 | bthread::s_key_info[index].dtor = dtor; |
584 | 0 | bthread::s_key_info[index].dtor_args = dtor_args; |
585 | 0 | key->index = index; |
586 | 0 | key->version = bthread::s_key_info[index].version; |
587 | 0 | if (key->version == 0) { |
588 | 0 | ++bthread::s_key_info[index].version; |
589 | 0 | ++key->version; |
590 | 0 | } |
591 | 0 | return 0; |
592 | 0 | } |
593 | | |
594 | 0 | int bthread_key_create(bthread_key_t* key, void (*dtor)(void*)) { |
595 | 0 | if (dtor == NULL) { |
596 | 0 | return bthread_key_create2(key, NULL, NULL); |
597 | 0 | } else { |
598 | 0 | return bthread_key_create2(key, bthread::arg_as_dtor, (const void*)dtor); |
599 | 0 | } |
600 | 0 | } |
601 | | |
602 | 0 | int bthread_key_delete(bthread_key_t key) { |
603 | 0 | if (key.index < bthread::KEYS_MAX && |
604 | 0 | key.version == bthread::s_key_info[key.index].version) { |
605 | 0 | BAIDU_SCOPED_LOCK(bthread::s_key_mutex); |
606 | 0 | if (key.version == bthread::s_key_info[key.index].version) { |
607 | 0 | if (++bthread::s_key_info[key.index].version == 0) { |
608 | 0 | ++bthread::s_key_info[key.index].version; |
609 | 0 | } |
610 | 0 | bthread::s_key_info[key.index].dtor = NULL; |
611 | 0 | bthread::s_key_info[key.index].dtor_args = NULL; |
612 | 0 | bthread::s_free_keys[bthread::nfreekey++] = key.index; |
613 | 0 | return 0; |
614 | 0 | } |
615 | 0 | } |
616 | 0 | CHECK(false) << "bthread_key_delete is called on invalid " << key; |
617 | 0 | return EINVAL; |
618 | 0 | } |
619 | | |
620 | | // NOTE: Can't borrow_keytable in bthread_setspecific, otherwise following |
621 | | // memory leak may occur: |
622 | | // -> bthread_getspecific fails to borrow_keytable and returns NULL. |
623 | | // -> bthread_setspecific succeeds to borrow_keytable and overwrites old data |
624 | | // at the position with newly created data, the old data is leaked. |
625 | 0 | int bthread_setspecific(bthread_key_t key, void* data) { |
626 | 0 | bthread::KeyTable* kt = bthread::tls_bls.keytable; |
627 | 0 | if (NULL == kt) { |
628 | 0 | kt = new (std::nothrow) bthread::KeyTable; |
629 | 0 | if (NULL == kt) { |
630 | 0 | return ENOMEM; |
631 | 0 | } |
632 | 0 | bthread::tls_bls.keytable = kt; |
633 | 0 | bthread::TaskGroup* const g = bthread::BAIDU_GET_VOLATILE_THREAD_LOCAL(tls_task_group); |
634 | 0 | if (g) { |
635 | 0 | g->current_task()->local_storage.keytable = kt; |
636 | 0 | } else { |
637 | | // Only cleanup keytable created by pthread. |
638 | | // keytable created by bthread will be deleted |
639 | | // in `return_keytable' or `bthread_keytable_pool_destroy'. |
640 | 0 | if (!bthread::tls_ever_created_keytable) { |
641 | 0 | bthread::tls_ever_created_keytable = true; |
642 | 0 | CHECK_EQ(0, butil::thread_atexit(bthread::cleanup_pthread, kt)); |
643 | 0 | } |
644 | 0 | } |
645 | 0 | } |
646 | 0 | return kt->set_data(key, data); |
647 | 0 | } |
648 | | |
649 | 0 | void* bthread_getspecific(bthread_key_t key) { |
650 | 0 | bthread::KeyTable* kt = bthread::tls_bls.keytable; |
651 | 0 | if (kt) { |
652 | 0 | return kt->get_data(key); |
653 | 0 | } |
654 | 0 | bthread::TaskGroup* const g = bthread::BAIDU_GET_VOLATILE_THREAD_LOCAL(tls_task_group); |
655 | 0 | if (g) { |
656 | 0 | bthread::TaskMeta* const task = g->current_task(); |
657 | 0 | kt = bthread::borrow_keytable(task->attr.keytable_pool); |
658 | 0 | if (kt) { |
659 | 0 | g->current_task()->local_storage.keytable = kt; |
660 | 0 | bthread::tls_bls.keytable = kt; |
661 | 0 | return kt->get_data(key); |
662 | 0 | } |
663 | 0 | } |
664 | 0 | return NULL; |
665 | 0 | } |
666 | | |
667 | 0 | void bthread_assign_data(void* data) { |
668 | 0 | bthread::tls_bls.assigned_data = data; |
669 | 0 | } |
670 | | |
671 | 0 | void* bthread_get_assigned_data() { |
672 | 0 | return bthread::tls_bls.assigned_data; |
673 | 0 | } |
674 | | |
675 | | } // extern "C" |