/src/brpc/src/bthread/task_meta.h
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | // bthread - An M:N threading library to make applications more concurrent. |
19 | | |
20 | | // Date: Tue Jul 10 17:40:58 CST 2012 |
21 | | |
22 | | #ifndef BTHREAD_TASK_META_H |
23 | | #define BTHREAD_TASK_META_H |
24 | | |
25 | | #include <pthread.h> // pthread_spin_init |
26 | | #include "bthread/butex.h" // butex_construct/destruct |
27 | | #include "butil/atomicops.h" // butil::atomic |
28 | | #include "bthread/types.h" // bthread_attr_t |
29 | | #include "bthread/stack.h" // ContextualStack |
30 | | #include "bthread/timer_thread.h" |
31 | | #include "butil/thread_local.h" |
32 | | |
33 | | namespace bthread { |
34 | | |
35 | | struct TaskStatistics { |
36 | | int64_t cputime_ns; |
37 | | int64_t nswitch; |
38 | | int64_t cpu_usage_ns; |
39 | | }; |
40 | | |
41 | | class KeyTable; |
42 | | struct ButexWaiter; |
43 | | |
44 | | struct LocalStorage { |
45 | | KeyTable* keytable; |
46 | | void* assigned_data; |
47 | | void* rpcz_parent_span; // Points to std::weak_ptr<brpc::Span>* (managed by brpc) |
48 | | }; |
49 | | |
50 | | #define BTHREAD_LOCAL_STORAGE_INITIALIZER { NULL, NULL, NULL } |
51 | | |
52 | | const static LocalStorage LOCAL_STORAGE_INIT = BTHREAD_LOCAL_STORAGE_INITIALIZER; |
53 | | |
54 | | EXTERN_BAIDU_VOLATILE_THREAD_LOCAL(LocalStorage, tls_bls); |
55 | | |
56 | | enum TaskStatus { |
57 | | TASK_STATUS_UNKNOWN, |
58 | | TASK_STATUS_CREATED, |
59 | | TASK_STATUS_FIRST_READY, |
60 | | TASK_STATUS_READY, |
61 | | TASK_STATUS_JUMPING, |
62 | | TASK_STATUS_RUNNING, |
63 | | TASK_STATUS_SUSPENDED, |
64 | | TASK_STATUS_END, |
65 | | }; |
66 | | |
67 | | struct TaskMeta { |
68 | | // [Not Reset] |
69 | | butil::atomic<ButexWaiter*> current_waiter{NULL}; |
70 | | uint64_t current_sleep{TimerThread::INVALID_TASK_ID}; |
71 | | |
72 | | // A flag to mark if the Timer scheduling failed. |
73 | | bool sleep_failed{false}; |
74 | | |
75 | | // A builtin flag to mark if the thread is stopping. |
76 | | bool stop{false}; |
77 | | |
78 | | // The thread is interrupted and should wake up from some blocking ops. |
79 | | bool interrupted{false}; |
80 | | |
81 | | // Scheduling of the thread can be delayed. |
82 | | bool about_to_quit{false}; |
83 | | |
84 | | // [Not Reset] guarantee visibility of version_butex. |
85 | | pthread_spinlock_t version_lock{}; |
86 | | |
87 | | // [Not Reset] only modified by one bthread at any time, no need to be atomic |
88 | | uint32_t* version_butex{NULL}; |
89 | | |
90 | | // The identifier. It does not have to be here, however many code is |
91 | | // simplified if they can get tid from TaskMeta. |
92 | | bthread_t tid{INVALID_BTHREAD}; |
93 | | |
94 | | // User function and argument |
95 | | void* (*fn)(void*){NULL}; |
96 | | void* arg{NULL}; |
97 | | |
98 | | // Stack of this task. |
99 | | ContextualStack* stack{NULL}; |
100 | | |
101 | | // Attributes creating this task |
102 | | bthread_attr_t attr{BTHREAD_ATTR_NORMAL}; |
103 | | |
104 | | // Statistics |
105 | | int64_t cpuwide_start_ns{0}; |
106 | | TaskStatistics stat{}; |
107 | | |
108 | | // bthread local storage, sync with tls_bls (defined in task_group.cpp) |
109 | | // when the bthread is created or destroyed. |
110 | | // DO NOT use this field directly, use tls_bls instead. |
111 | | LocalStorage local_storage{}; |
112 | | |
113 | | // Only used when TaskTracer is enabled. |
114 | | // Bthread status. |
115 | | TaskStatus status{TASK_STATUS_UNKNOWN}; |
116 | | // Whether bthread is traced? |
117 | | bool traced{false}; |
118 | | // [Not Reset] guarantee tracing completion before jumping. |
119 | | pthread_mutex_t trace_lock{}; |
120 | | // Worker thread id. |
121 | | pthread_t worker_tid{}; |
122 | | |
123 | | public: |
124 | | // Only initialize [Not Reset] fields, other fields will be reset in |
125 | | // bthread_start* functions |
126 | 0 | TaskMeta() { |
127 | 0 | pthread_spin_init(&version_lock, 0); |
128 | 0 | version_butex = butex_create_checked<uint32_t>(); |
129 | 0 | *version_butex = 1; |
130 | 0 | pthread_mutex_init(&trace_lock, NULL); |
131 | 0 | } |
132 | | |
133 | 0 | ~TaskMeta() { |
134 | 0 | pthread_mutex_destroy(&trace_lock); |
135 | 0 | butex_destroy(version_butex); |
136 | 0 | version_butex = NULL; |
137 | 0 | pthread_spin_destroy(&version_lock); |
138 | 0 | } |
139 | | |
140 | 0 | void set_stack(ContextualStack* s) { |
141 | 0 | stack = s; |
142 | 0 | } |
143 | | |
144 | 0 | ContextualStack* release_stack() { |
145 | 0 | ContextualStack* tmp = stack; |
146 | 0 | stack = NULL; |
147 | 0 | return tmp; |
148 | 0 | } |
149 | | |
150 | 0 | StackType stack_type() const { |
151 | 0 | return static_cast<StackType>(attr.stack_type); |
152 | 0 | } |
153 | | }; |
154 | | |
155 | | // Global callback for creating a new bthread span when creating a new bthread. |
156 | | // This is set by brpc layer. When a bthread is created with BTHREAD_INHERIT_SPAN, |
157 | | // this callback is invoked to create a new span for the bthread. |
158 | | // The returned void* points to a heap-allocated weak_ptr<Span>* managed by brpc layer. |
159 | | // Returns NULL if span creation is disabled or fails. |
160 | | extern void* (*g_create_bthread_span)(); |
161 | | |
162 | | // Global destructor callback for rpcz_parent_span. |
163 | | // This is set by brpc layer to clean up the heap-allocated weak_ptr. |
164 | | // bthread layer doesn't know the concrete type, it just calls this function |
165 | | // with the void* pointer when cleaning up LocalStorage. |
166 | | extern void (*g_rpcz_parent_span_dtor)(void*); |
167 | | |
168 | | // Global callback invoked when a bthread ends (used by higher layers to |
169 | | // observe and react to bthread end events, e.g., to finish spans). This |
170 | | // pointer is set by the upper layer during initialization. |
171 | | extern void (*g_end_bthread_span)(); |
172 | | |
173 | | } // namespace bthread |
174 | | |
175 | | #endif // BTHREAD_TASK_META_H |