/src/ffmpeg/libavutil/refstruct.c
Line | Count | Source |
1 | | /* |
2 | | * This file is part of FFmpeg. |
3 | | * |
4 | | * FFmpeg is free software; you can redistribute it and/or |
5 | | * modify it under the terms of the GNU Lesser General Public |
6 | | * License as published by the Free Software Foundation; either |
7 | | * version 2.1 of the License, or (at your option) any later version. |
8 | | * |
9 | | * FFmpeg is distributed in the hope that it will be useful, |
10 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | | * Lesser General Public License for more details. |
13 | | * |
14 | | * You should have received a copy of the GNU Lesser General Public |
15 | | * License along with FFmpeg; if not, write to the Free Software |
16 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
17 | | */ |
18 | | |
19 | | #include <stdatomic.h> |
20 | | #include <stdint.h> |
21 | | #include <string.h> |
22 | | |
23 | | #include "refstruct.h" |
24 | | |
25 | | #include "avassert.h" |
26 | | #include "error.h" |
27 | | #include "macros.h" |
28 | | #include "mem.h" |
29 | | #include "mem_internal.h" |
30 | | #include "thread.h" |
31 | | |
32 | | #ifndef REFSTRUCT_CHECKED |
33 | | #ifndef ASSERT_LEVEL |
34 | | #define ASSERT_LEVEL 0 |
35 | | #endif |
36 | | #define REFSTRUCT_CHECKED (ASSERT_LEVEL >= 1) |
37 | | #endif |
38 | | |
39 | | #if REFSTRUCT_CHECKED |
40 | | #define ff_assert(cond) av_assert0(cond) |
41 | | #else |
42 | 0 | #define ff_assert(cond) ((void)0) |
43 | | #endif |
44 | | |
45 | | #define REFSTRUCT_COOKIE AV_NE((uint64_t)MKBETAG('R', 'e', 'f', 'S') << 32 | MKBETAG('t', 'r', 'u', 'c'), \ |
46 | | MKTAG('R', 'e', 'f', 'S') | (uint64_t)MKTAG('t', 'r', 'u', 'c') << 32) |
47 | | |
48 | | #ifndef _MSC_VER |
49 | 0 | #define REFCOUNT_OFFSET FFALIGN(sizeof(RefCount), FFMAX(ALIGN_64, _Alignof(max_align_t))) |
50 | | #else |
51 | | #define REFCOUNT_OFFSET FFALIGN(sizeof(RefCount), ALIGN_64) |
52 | | #endif |
53 | | |
54 | | typedef struct RefCount { |
55 | | /** |
56 | | * An uintptr_t is big enough to hold the address of every reference, |
57 | | * so no overflow can happen when incrementing the refcount as long as |
58 | | * the user does not throw away references. |
59 | | */ |
60 | | atomic_uintptr_t refcount; |
61 | | AVRefStructOpaque opaque; |
62 | | void (*free_cb)(AVRefStructOpaque opaque, void *obj); |
63 | | void (*free)(void *ref); |
64 | | |
65 | | #if REFSTRUCT_CHECKED |
66 | | uint64_t cookie; |
67 | | #endif |
68 | | } RefCount; |
69 | | |
70 | | static RefCount *get_refcount(void *obj) |
71 | 0 | { |
72 | 0 | RefCount *ref = (RefCount*)((char*)obj - REFCOUNT_OFFSET); |
73 | 0 | ff_assert(ref->cookie == REFSTRUCT_COOKIE); |
74 | 0 | return ref; |
75 | 0 | } |
76 | | |
77 | | static const RefCount *cget_refcount(const void *obj) |
78 | 0 | { |
79 | 0 | const RefCount *ref = (const RefCount*)((const char*)obj - REFCOUNT_OFFSET); |
80 | 0 | ff_assert(ref->cookie == REFSTRUCT_COOKIE); |
81 | 0 | return ref; |
82 | 0 | } |
83 | | |
84 | | static void *get_userdata(void *buf) |
85 | 0 | { |
86 | 0 | return (char*)buf + REFCOUNT_OFFSET; |
87 | 0 | } |
88 | | |
89 | | static void refcount_init(RefCount *ref, AVRefStructOpaque opaque, |
90 | | void (*free_cb)(AVRefStructOpaque opaque, void *obj)) |
91 | 0 | { |
92 | 0 | atomic_init(&ref->refcount, 1); |
93 | 0 | ref->opaque = opaque; |
94 | 0 | ref->free_cb = free_cb; |
95 | 0 | ref->free = av_free; |
96 | |
|
97 | | #if REFSTRUCT_CHECKED |
98 | | ref->cookie = REFSTRUCT_COOKIE; |
99 | | #endif |
100 | 0 | } |
101 | | |
102 | | void *av_refstruct_alloc_ext_c(size_t size, unsigned flags, AVRefStructOpaque opaque, |
103 | | void (*free_cb)(AVRefStructOpaque opaque, void *obj)) |
104 | 0 | { |
105 | 0 | void *buf, *obj; |
106 | |
|
107 | 0 | if (size > SIZE_MAX - REFCOUNT_OFFSET) |
108 | 0 | return NULL; |
109 | 0 | buf = av_malloc(size + REFCOUNT_OFFSET); |
110 | 0 | if (!buf) |
111 | 0 | return NULL; |
112 | 0 | refcount_init(buf, opaque, free_cb); |
113 | 0 | obj = get_userdata(buf); |
114 | 0 | if (!(flags & AV_REFSTRUCT_FLAG_NO_ZEROING)) |
115 | 0 | memset(obj, 0, size); |
116 | |
|
117 | 0 | return obj; |
118 | 0 | } |
119 | | |
120 | | void av_refstruct_unref(void *objp) |
121 | 0 | { |
122 | 0 | void *obj; |
123 | 0 | RefCount *ref; |
124 | |
|
125 | 0 | memcpy(&obj, objp, sizeof(obj)); |
126 | 0 | if (!obj) |
127 | 0 | return; |
128 | 0 | memcpy(objp, &(void *){ NULL }, sizeof(obj)); |
129 | |
|
130 | 0 | ref = get_refcount(obj); |
131 | 0 | if (atomic_fetch_sub_explicit(&ref->refcount, 1, memory_order_acq_rel) == 1) { |
132 | 0 | if (ref->free_cb) |
133 | 0 | ref->free_cb(ref->opaque, obj); |
134 | 0 | ref->free(ref); |
135 | 0 | } |
136 | |
|
137 | 0 | return; |
138 | 0 | } |
139 | | |
140 | | void *av_refstruct_ref(void *obj) |
141 | 0 | { |
142 | 0 | RefCount *ref = get_refcount(obj); |
143 | |
|
144 | 0 | atomic_fetch_add_explicit(&ref->refcount, 1, memory_order_relaxed); |
145 | |
|
146 | 0 | return obj; |
147 | 0 | } |
148 | | |
149 | | const void *av_refstruct_ref_c(const void *obj) |
150 | 0 | { |
151 | | /* Casting const away here is fine, as it is only supposed |
152 | | * to apply to the user's data and not our bookkeeping data. */ |
153 | 0 | RefCount *ref = get_refcount((void*)obj); |
154 | |
|
155 | 0 | atomic_fetch_add_explicit(&ref->refcount, 1, memory_order_relaxed); |
156 | |
|
157 | 0 | return obj; |
158 | 0 | } |
159 | | |
160 | | void av_refstruct_replace(void *dstp, const void *src) |
161 | 0 | { |
162 | 0 | const void *dst; |
163 | 0 | memcpy(&dst, dstp, sizeof(dst)); |
164 | |
|
165 | 0 | if (src == dst) |
166 | 0 | return; |
167 | 0 | av_refstruct_unref(dstp); |
168 | 0 | if (src) { |
169 | 0 | dst = av_refstruct_ref_c(src); |
170 | 0 | memcpy(dstp, &dst, sizeof(dst)); |
171 | 0 | } |
172 | 0 | } |
173 | | |
174 | | int av_refstruct_exclusive(const void *obj) |
175 | 0 | { |
176 | 0 | const RefCount *ref = cget_refcount(obj); |
177 | | /* Casting const away here is safe, because it is a load. |
178 | | * It is necessary because atomic_load_explicit() does not |
179 | | * accept const atomics in C11 (see also N1807). */ |
180 | 0 | return atomic_load_explicit((atomic_uintptr_t*)&ref->refcount, memory_order_acquire) == 1; |
181 | 0 | } |
182 | | |
183 | | struct AVRefStructPool { |
184 | | size_t size; |
185 | | AVRefStructOpaque opaque; |
186 | | int (*init_cb)(AVRefStructOpaque opaque, void *obj); |
187 | | void (*reset_cb)(AVRefStructOpaque opaque, void *obj); |
188 | | void (*free_entry_cb)(AVRefStructOpaque opaque, void *obj); |
189 | | void (*free_cb)(AVRefStructOpaque opaque); |
190 | | |
191 | | int uninited; |
192 | | unsigned entry_flags; |
193 | | unsigned pool_flags; |
194 | | |
195 | | /** The number of outstanding entries not in available_entries. */ |
196 | | atomic_uintptr_t refcount; |
197 | | /** |
198 | | * This is a linked list of available entries; |
199 | | * the RefCount's opaque pointer is used as next pointer |
200 | | * for available entries. |
201 | | * While the entries are in use, the opaque is a pointer |
202 | | * to the corresponding AVRefStructPool. |
203 | | */ |
204 | | RefCount *available_entries; |
205 | | AVMutex mutex; |
206 | | }; |
207 | | |
208 | | static void pool_free(AVRefStructPool *pool) |
209 | 0 | { |
210 | 0 | ff_mutex_destroy(&pool->mutex); |
211 | 0 | if (pool->free_cb) |
212 | 0 | pool->free_cb(pool->opaque); |
213 | 0 | av_free(get_refcount(pool)); |
214 | 0 | } |
215 | | |
216 | | static void pool_free_entry(AVRefStructPool *pool, RefCount *ref) |
217 | 0 | { |
218 | 0 | if (pool->free_entry_cb) |
219 | 0 | pool->free_entry_cb(pool->opaque, get_userdata(ref)); |
220 | 0 | av_free(ref); |
221 | 0 | } |
222 | | |
223 | | static void pool_return_entry(void *ref_) |
224 | 0 | { |
225 | 0 | RefCount *ref = ref_; |
226 | 0 | AVRefStructPool *pool = ref->opaque.nc; |
227 | |
|
228 | 0 | ff_mutex_lock(&pool->mutex); |
229 | 0 | if (!pool->uninited) { |
230 | 0 | ref->opaque.nc = pool->available_entries; |
231 | 0 | pool->available_entries = ref; |
232 | 0 | ref = NULL; |
233 | 0 | } |
234 | 0 | ff_mutex_unlock(&pool->mutex); |
235 | |
|
236 | 0 | if (ref) |
237 | 0 | pool_free_entry(pool, ref); |
238 | |
|
239 | 0 | if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1) |
240 | 0 | pool_free(pool); |
241 | 0 | } |
242 | | |
243 | | static void pool_reset_entry(AVRefStructOpaque opaque, void *entry) |
244 | 0 | { |
245 | 0 | AVRefStructPool *pool = opaque.nc; |
246 | |
|
247 | 0 | pool->reset_cb(pool->opaque, entry); |
248 | 0 | } |
249 | | |
250 | | static int refstruct_pool_get_ext(void *datap, AVRefStructPool *pool) |
251 | 0 | { |
252 | 0 | void *ret = NULL; |
253 | |
|
254 | 0 | memcpy(datap, &(void *){ NULL }, sizeof(void*)); |
255 | |
|
256 | 0 | ff_mutex_lock(&pool->mutex); |
257 | 0 | ff_assert(!pool->uninited); |
258 | 0 | if (pool->available_entries) { |
259 | 0 | RefCount *ref = pool->available_entries; |
260 | 0 | ret = get_userdata(ref); |
261 | 0 | pool->available_entries = ref->opaque.nc; |
262 | 0 | ref->opaque.nc = pool; |
263 | 0 | atomic_init(&ref->refcount, 1); |
264 | 0 | } |
265 | 0 | ff_mutex_unlock(&pool->mutex); |
266 | |
|
267 | 0 | if (!ret) { |
268 | 0 | RefCount *ref; |
269 | 0 | ret = av_refstruct_alloc_ext(pool->size, pool->entry_flags, pool, |
270 | 0 | pool->reset_cb ? pool_reset_entry : NULL); |
271 | 0 | if (!ret) |
272 | 0 | return AVERROR(ENOMEM); |
273 | 0 | ref = get_refcount(ret); |
274 | 0 | ref->free = pool_return_entry; |
275 | 0 | if (pool->init_cb) { |
276 | 0 | int err = pool->init_cb(pool->opaque, ret); |
277 | 0 | if (err < 0) { |
278 | 0 | if (pool->pool_flags & AV_REFSTRUCT_POOL_FLAG_RESET_ON_INIT_ERROR) |
279 | 0 | pool->reset_cb(pool->opaque, ret); |
280 | 0 | if (pool->pool_flags & AV_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR) |
281 | 0 | pool->free_entry_cb(pool->opaque, ret); |
282 | 0 | av_free(ref); |
283 | 0 | return err; |
284 | 0 | } |
285 | 0 | } |
286 | 0 | } |
287 | 0 | atomic_fetch_add_explicit(&pool->refcount, 1, memory_order_relaxed); |
288 | |
|
289 | 0 | if (pool->pool_flags & AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME) |
290 | 0 | memset(ret, 0, pool->size); |
291 | |
|
292 | 0 | memcpy(datap, &ret, sizeof(ret)); |
293 | |
|
294 | 0 | return 0; |
295 | 0 | } |
296 | | |
297 | | void *av_refstruct_pool_get(AVRefStructPool *pool) |
298 | 0 | { |
299 | 0 | void *ret; |
300 | 0 | refstruct_pool_get_ext(&ret, pool); |
301 | 0 | return ret; |
302 | 0 | } |
303 | | |
304 | | /** |
305 | | * Hint: The content of pool_unref() and refstruct_pool_uninit() |
306 | | * could currently be merged; they are only separate functions |
307 | | * in case we would ever introduce weak references. |
308 | | */ |
309 | | static void pool_unref(void *ref) |
310 | 0 | { |
311 | 0 | AVRefStructPool *pool = get_userdata(ref); |
312 | 0 | if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1) |
313 | 0 | pool_free(pool); |
314 | 0 | } |
315 | | |
316 | | static void refstruct_pool_uninit(AVRefStructOpaque unused, void *obj) |
317 | 0 | { |
318 | 0 | AVRefStructPool *pool = obj; |
319 | 0 | RefCount *entry; |
320 | |
|
321 | 0 | ff_mutex_lock(&pool->mutex); |
322 | 0 | ff_assert(!pool->uninited); |
323 | 0 | pool->uninited = 1; |
324 | 0 | entry = pool->available_entries; |
325 | 0 | pool->available_entries = NULL; |
326 | 0 | ff_mutex_unlock(&pool->mutex); |
327 | |
|
328 | 0 | while (entry) { |
329 | 0 | void *next = entry->opaque.nc; |
330 | 0 | pool_free_entry(pool, entry); |
331 | 0 | entry = next; |
332 | 0 | } |
333 | 0 | } |
334 | | |
335 | | AVRefStructPool *av_refstruct_pool_alloc(size_t size, unsigned flags) |
336 | 0 | { |
337 | 0 | return av_refstruct_pool_alloc_ext(size, flags, NULL, NULL, NULL, NULL, NULL); |
338 | 0 | } |
339 | | |
340 | | AVRefStructPool *av_refstruct_pool_alloc_ext_c(size_t size, unsigned flags, |
341 | | AVRefStructOpaque opaque, |
342 | | int (*init_cb)(AVRefStructOpaque opaque, void *obj), |
343 | | void (*reset_cb)(AVRefStructOpaque opaque, void *obj), |
344 | | void (*free_entry_cb)(AVRefStructOpaque opaque, void *obj), |
345 | | void (*free_cb)(AVRefStructOpaque opaque)) |
346 | 0 | { |
347 | 0 | AVRefStructPool *pool = av_refstruct_alloc_ext(sizeof(*pool), 0, NULL, |
348 | 0 | refstruct_pool_uninit); |
349 | 0 | int err; |
350 | |
|
351 | 0 | if (!pool) |
352 | 0 | return NULL; |
353 | 0 | get_refcount(pool)->free = pool_unref; |
354 | |
|
355 | 0 | pool->size = size; |
356 | 0 | pool->opaque = opaque; |
357 | 0 | pool->init_cb = init_cb; |
358 | 0 | pool->reset_cb = reset_cb; |
359 | 0 | pool->free_entry_cb = free_entry_cb; |
360 | 0 | pool->free_cb = free_cb; |
361 | 0 | #define COMMON_FLAGS AV_REFSTRUCT_POOL_FLAG_NO_ZEROING |
362 | 0 | pool->entry_flags = flags & COMMON_FLAGS; |
363 | | // Filter out nonsense combinations to avoid checks later. |
364 | 0 | if (!pool->reset_cb) |
365 | 0 | flags &= ~AV_REFSTRUCT_POOL_FLAG_RESET_ON_INIT_ERROR; |
366 | 0 | if (!pool->free_entry_cb) |
367 | 0 | flags &= ~AV_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR; |
368 | 0 | pool->pool_flags = flags; |
369 | |
|
370 | 0 | if (flags & AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME) { |
371 | | // We will zero the buffer before every use, so zeroing |
372 | | // upon allocating the buffer is unnecessary. |
373 | 0 | pool->entry_flags |= AV_REFSTRUCT_FLAG_NO_ZEROING; |
374 | 0 | } |
375 | |
|
376 | 0 | atomic_init(&pool->refcount, 1); |
377 | |
|
378 | 0 | err = ff_mutex_init(&pool->mutex, NULL); |
379 | 0 | if (err) { |
380 | | // Don't call av_refstruct_uninit() on pool, as it hasn't been properly |
381 | | // set up and is just a POD right now. |
382 | 0 | av_free(get_refcount(pool)); |
383 | 0 | return NULL; |
384 | 0 | } |
385 | 0 | return pool; |
386 | 0 | } |