/src/mpv/video/mp_image_pool.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * This file is part of mpv. |
3 | | * |
4 | | * mpv is free software; you can redistribute it and/or |
5 | | * modify it under the terms of the GNU Lesser General Public |
6 | | * License as published by the Free Software Foundation; either |
7 | | * version 2.1 of the License, or (at your option) any later version. |
8 | | * |
9 | | * mpv is distributed in the hope that it will be useful, |
10 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | | * GNU Lesser General Public License for more details. |
13 | | * |
14 | | * You should have received a copy of the GNU Lesser General Public |
15 | | * License along with mpv. If not, see <http://www.gnu.org/licenses/>. |
16 | | */ |
17 | | |
18 | | #include "config.h" |
19 | | |
20 | | #include <stddef.h> |
21 | | #include <stdbool.h> |
22 | | #include <assert.h> |
23 | | |
24 | | #include <libavutil/buffer.h> |
25 | | #include <libavutil/hwcontext.h> |
26 | | #if HAVE_VULKAN |
27 | | #include <libavutil/hwcontext_vulkan.h> |
28 | | #endif |
29 | | #include <libavutil/mem.h> |
30 | | #include <libavutil/pixdesc.h> |
31 | | |
32 | | #include "mpv_talloc.h" |
33 | | |
34 | | #include "common/common.h" |
35 | | |
36 | | #include "fmt-conversion.h" |
37 | | #include "mp_image_pool.h" |
38 | | #include "mp_image.h" |
39 | | #include "osdep/threads.h" |
40 | | |
41 | | static mp_static_mutex pool_mutex = MP_STATIC_MUTEX_INITIALIZER; |
42 | 48.8k | #define pool_lock() mp_mutex_lock(&pool_mutex) |
43 | 48.8k | #define pool_unlock() mp_mutex_unlock(&pool_mutex) |
44 | | |
45 | | // Thread-safety: the pool itself is not thread-safe, but pool-allocated images |
46 | | // can be referenced and unreferenced from other threads. (As long as the image |
47 | | // destructors are thread-safe.) |
48 | | |
49 | | struct mp_image_pool { |
50 | | struct mp_image **images; |
51 | | int num_images; |
52 | | |
53 | | int fmt, w, h; |
54 | | |
55 | | mp_image_allocator allocator; |
56 | | void *allocator_ctx; |
57 | | |
58 | | bool use_lru; |
59 | | unsigned int lru_counter; |
60 | | }; |
61 | | |
62 | | // Used to gracefully handle the case when the pool is freed while image |
63 | | // references allocated from the image pool are still held by someone. |
64 | | struct image_flags { |
65 | | // If both of these are false, the image must be freed. |
66 | | bool referenced; // outside mp_image reference exists |
67 | | bool pool_alive; // the mp_image_pool references this |
68 | | unsigned int order; // for LRU allocation (basically a timestamp) |
69 | | }; |
70 | | |
71 | | static void image_pool_destructor(void *ptr) |
72 | 168k | { |
73 | 168k | struct mp_image_pool *pool = ptr; |
74 | 168k | mp_image_pool_clear(pool); |
75 | 168k | } |
76 | | |
77 | | // If tparent!=NULL, set it as talloc parent for the pool. |
78 | | struct mp_image_pool *mp_image_pool_new(void *tparent) |
79 | 168k | { |
80 | 168k | struct mp_image_pool *pool = talloc_ptrtype(tparent, pool); |
81 | 168k | talloc_set_destructor(pool, image_pool_destructor); |
82 | 168k | *pool = (struct mp_image_pool) {0}; |
83 | 168k | return pool; |
84 | 168k | } |
85 | | |
86 | | void mp_image_pool_clear(struct mp_image_pool *pool) |
87 | 198k | { |
88 | 201k | for (int n = 0; n < pool->num_images; n++) { |
89 | 3.22k | struct mp_image *img = pool->images[n]; |
90 | 3.22k | struct image_flags *it = img->priv; |
91 | 3.22k | bool referenced; |
92 | 3.22k | pool_lock(); |
93 | 3.22k | mp_assert(it->pool_alive); |
94 | 3.22k | it->pool_alive = false; |
95 | 3.22k | referenced = it->referenced; |
96 | 3.22k | pool_unlock(); |
97 | 3.22k | if (!referenced) |
98 | 2.68k | talloc_free(img); |
99 | 3.22k | } |
100 | 198k | pool->num_images = 0; |
101 | 198k | } |
102 | | |
103 | | // This is the only function that is allowed to run in a different thread. |
104 | | // (Consider passing an image to another thread, which frees it.) |
105 | | static void unref_image(void *opaque, uint8_t *data) |
106 | 7.81k | { |
107 | 7.81k | struct mp_image *img = opaque; |
108 | 7.81k | struct image_flags *it = img->priv; |
109 | 7.81k | bool alive; |
110 | 7.81k | pool_lock(); |
111 | 7.81k | mp_assert(it->referenced); |
112 | 7.81k | it->referenced = false; |
113 | 7.81k | alive = it->pool_alive; |
114 | 7.81k | pool_unlock(); |
115 | 7.81k | if (!alive) |
116 | 549 | talloc_free(img); |
117 | 7.81k | } |
118 | | |
119 | | // Return a new image of given format/size. Unlike mp_image_pool_get(), this |
120 | | // returns NULL if there is no free image of this format/size. |
121 | | struct mp_image *mp_image_pool_get_no_alloc(struct mp_image_pool *pool, int fmt, |
122 | | int w, int h) |
123 | 37.7k | { |
124 | 37.7k | struct mp_image *new = NULL; |
125 | 37.7k | pool_lock(); |
126 | 38.9k | for (int n = 0; n < pool->num_images; n++) { |
127 | 8.99k | struct mp_image *img = pool->images[n]; |
128 | 8.99k | struct image_flags *img_it = img->priv; |
129 | 8.99k | mp_assert(img_it->pool_alive); |
130 | 8.99k | if (!img_it->referenced) { |
131 | 7.81k | if (img->imgfmt == fmt && img->w == w && img->h == h) { |
132 | 7.81k | if (pool->use_lru) { |
133 | 0 | struct image_flags *new_it = new ? new->priv : NULL; |
134 | 0 | if (!new_it || new_it->order > img_it->order) |
135 | 0 | new = img; |
136 | 7.81k | } else { |
137 | 7.81k | new = img; |
138 | 7.81k | break; |
139 | 7.81k | } |
140 | 7.81k | } |
141 | 7.81k | } |
142 | 8.99k | } |
143 | 37.7k | pool_unlock(); |
144 | 37.7k | if (!new) |
145 | 29.9k | return NULL; |
146 | | |
147 | | // Reference the new image. Since mp_image_pool is not declared thread-safe, |
148 | | // and unreffing images from other threads does not allocate new images, |
149 | | // no synchronization is required here. |
150 | 39.0k | for (int p = 0; p < MP_MAX_PLANES; p++) |
151 | 31.2k | mp_assert(!!new->bufs[p] == !p); // only 1 AVBufferRef |
152 | | |
153 | 7.81k | struct mp_image *ref = mp_image_new_dummy_ref(new); |
154 | | |
155 | | // This assumes the buffer is at this point exclusively owned by us: we |
156 | | // can't track whether the buffer is unique otherwise. |
157 | | // (av_buffer_is_writable() checks the refcount of the new buffer only.) |
158 | 7.81k | int flags = av_buffer_is_writable(new->bufs[0]) ? 0 : AV_BUFFER_FLAG_READONLY; |
159 | 7.81k | ref->bufs[0] = av_buffer_create(new->bufs[0]->data, new->bufs[0]->size, |
160 | 7.81k | unref_image, new, flags); |
161 | 7.81k | if (!ref->bufs[0]) { |
162 | 0 | talloc_free(ref); |
163 | 0 | return NULL; |
164 | 0 | } |
165 | | |
166 | 7.81k | struct image_flags *it = new->priv; |
167 | 7.81k | mp_assert(!it->referenced && it->pool_alive); |
168 | 7.81k | it->referenced = true; |
169 | 7.81k | it->order = ++pool->lru_counter; |
170 | 7.81k | return ref; |
171 | 7.81k | } |
172 | | |
173 | | void mp_image_pool_add(struct mp_image_pool *pool, struct mp_image *new) |
174 | 3.22k | { |
175 | 3.22k | struct image_flags *it = talloc_ptrtype(new, it); |
176 | 3.22k | *it = (struct image_flags) { .pool_alive = true }; |
177 | 3.22k | new->priv = it; |
178 | 3.22k | MP_TARRAY_APPEND(pool, pool->images, pool->num_images, new); |
179 | 3.22k | } |
180 | | |
181 | | // Return a new image of given format/size. The only difference to |
182 | | // mp_image_alloc() is that there is a transparent mechanism to recycle image |
183 | | // data allocations through this pool. |
184 | | // If pool==NULL, mp_image_alloc() is called (for convenience). |
185 | | // The image can be free'd with talloc_free(). |
186 | | // Returns NULL on OOM. |
187 | | struct mp_image *mp_image_pool_get(struct mp_image_pool *pool, int fmt, |
188 | | int w, int h) |
189 | 7.81k | { |
190 | 7.81k | if (!pool) |
191 | 0 | return mp_image_alloc(fmt, w, h); |
192 | 7.81k | struct mp_image *new = mp_image_pool_get_no_alloc(pool, fmt, w, h); |
193 | 7.81k | if (!new) { |
194 | 3.22k | if (fmt != pool->fmt || w != pool->w || h != pool->h) |
195 | 2.66k | mp_image_pool_clear(pool); |
196 | 3.22k | pool->fmt = fmt; |
197 | 3.22k | pool->w = w; |
198 | 3.22k | pool->h = h; |
199 | 3.22k | if (pool->allocator) { |
200 | 0 | new = pool->allocator(pool->allocator_ctx, fmt, w, h); |
201 | 3.22k | } else { |
202 | 3.22k | new = mp_image_alloc(fmt, w, h); |
203 | 3.22k | } |
204 | 3.22k | if (!new) |
205 | 0 | return NULL; |
206 | 3.22k | mp_image_pool_add(pool, new); |
207 | 3.22k | new = mp_image_pool_get_no_alloc(pool, fmt, w, h); |
208 | 3.22k | } |
209 | 7.81k | return new; |
210 | 7.81k | } |
211 | | |
212 | | // Like mp_image_new_copy(), but allocate the image out of the pool. |
213 | | // If pool==NULL, a plain copy is made (for convenience). |
214 | | // Returns NULL on OOM. |
215 | | struct mp_image *mp_image_pool_new_copy(struct mp_image_pool *pool, |
216 | | struct mp_image *img) |
217 | 0 | { |
218 | 0 | struct mp_image *new = mp_image_pool_get(pool, img->imgfmt, img->w, img->h); |
219 | 0 | if (new) { |
220 | 0 | mp_image_copy(new, img); |
221 | 0 | mp_image_copy_attributes(new, img); |
222 | 0 | } |
223 | 0 | return new; |
224 | 0 | } |
225 | | |
226 | | // Like mp_image_make_writeable(), but if a copy has to be made, allocate it |
227 | | // out of the pool. |
228 | | // If pool==NULL, mp_image_make_writeable() is called (for convenience). |
229 | | // Returns false on failure (see mp_image_make_writeable()). |
230 | | bool mp_image_pool_make_writeable(struct mp_image_pool *pool, |
231 | | struct mp_image *img) |
232 | 60 | { |
233 | 60 | if (mp_image_is_writeable(img)) |
234 | 60 | return true; |
235 | 0 | struct mp_image *new = mp_image_pool_new_copy(pool, img); |
236 | 0 | if (!new) |
237 | 0 | return false; |
238 | 0 | mp_image_steal_data(img, new); |
239 | 0 | mp_assert(mp_image_is_writeable(img)); |
240 | 0 | return true; |
241 | 0 | } |
242 | | |
243 | | // Call cb(cb_data, fmt, w, h) to allocate an image. Note that the resulting |
244 | | // image must use only 1 AVBufferRef. The returned image must also be owned |
245 | | // exclusively by the image pool, otherwise mp_image_is_writeable() will not |
246 | | // work due to FFmpeg restrictions. |
247 | | void mp_image_pool_set_allocator(struct mp_image_pool *pool, |
248 | | mp_image_allocator cb, void *cb_data) |
249 | 0 | { |
250 | 0 | pool->allocator = cb; |
251 | 0 | pool->allocator_ctx = cb_data; |
252 | 0 | } |
253 | | |
254 | | // Put into LRU mode. (Likely better for hwaccel surfaces, but worse for memory.) |
255 | | void mp_image_pool_set_lru(struct mp_image_pool *pool) |
256 | 0 | { |
257 | 0 | pool->use_lru = true; |
258 | 0 | } |
259 | | |
260 | | // Return the sw image format mp_image_hw_download() would use. This can be |
261 | | // different from src->params.hw_subfmt in obscure cases. |
262 | | int mp_image_hw_download_get_sw_format(struct mp_image *src) |
263 | 0 | { |
264 | 0 | if (!src->hwctx) |
265 | 0 | return 0; |
266 | | |
267 | | // Try to find the first format which we can apparently use. |
268 | 0 | int imgfmt = 0; |
269 | 0 | enum AVPixelFormat *fmts; |
270 | 0 | if (av_hwframe_transfer_get_formats(src->hwctx, |
271 | 0 | AV_HWFRAME_TRANSFER_DIRECTION_FROM, &fmts, 0) < 0) |
272 | 0 | return 0; |
273 | 0 | for (int n = 0; fmts[n] != AV_PIX_FMT_NONE; n++) { |
274 | 0 | imgfmt = pixfmt2imgfmt(fmts[n]); |
275 | 0 | if (imgfmt) |
276 | 0 | break; |
277 | 0 | } |
278 | 0 | av_free(fmts); |
279 | |
|
280 | 0 | return imgfmt; |
281 | 0 | } |
282 | | |
283 | | // Copies the contents of the HW surface src to system memory and returns it. |
284 | | // If swpool is not NULL, it's used to allocate the target image. |
285 | | // src must be a hw surface with a AVHWFramesContext attached. |
286 | | // The returned image is cropped as needed. |
287 | | // Returns NULL on failure. |
288 | | struct mp_image *mp_image_hw_download(struct mp_image *src, |
289 | | struct mp_image_pool *swpool) |
290 | 0 | { |
291 | 0 | int imgfmt = mp_image_hw_download_get_sw_format(src); |
292 | 0 | if (!imgfmt) |
293 | 0 | return NULL; |
294 | | |
295 | 0 | mp_assert(src->hwctx); |
296 | 0 | AVHWFramesContext *fctx = (void *)src->hwctx->data; |
297 | |
|
298 | 0 | struct mp_image *dst = |
299 | 0 | mp_image_pool_get(swpool, imgfmt, fctx->width, fctx->height); |
300 | 0 | if (!dst) |
301 | 0 | return NULL; |
302 | | |
303 | | // Target image must be writable, so unref it. |
304 | 0 | AVFrame *dstav = mp_image_to_av_frame_and_unref(dst); |
305 | 0 | if (!dstav) |
306 | 0 | return NULL; |
307 | | |
308 | 0 | AVFrame *srcav = mp_image_to_av_frame(src); |
309 | 0 | if (!srcav) { |
310 | 0 | av_frame_unref(dstav); |
311 | 0 | return NULL; |
312 | 0 | } |
313 | | |
314 | 0 | int res = av_hwframe_transfer_data(dstav, srcav, 0); |
315 | 0 | av_frame_free(&srcav); |
316 | 0 | dst = mp_image_from_av_frame(dstav); |
317 | 0 | av_frame_free(&dstav); |
318 | 0 | if (res >= 0 && dst) { |
319 | 0 | mp_image_set_size(dst, src->w, src->h); |
320 | 0 | mp_image_copy_attributes(dst, src); |
321 | 0 | } else { |
322 | 0 | mp_image_unrefp(&dst); |
323 | 0 | } |
324 | 0 | return dst; |
325 | 0 | } |
326 | | |
327 | | bool mp_image_hw_upload(struct mp_image *hw_img, struct mp_image *src) |
328 | 0 | { |
329 | 0 | if (hw_img->w != src->w || hw_img->h != src->h) |
330 | 0 | return false; |
331 | | |
332 | 0 | if (!hw_img->hwctx) |
333 | 0 | return false; |
334 | | |
335 | 0 | bool ok = false; |
336 | 0 | AVFrame *dstav = NULL; |
337 | 0 | AVFrame *srcav = NULL; |
338 | | |
339 | | // This means the destination image will not be "writable", which would be |
340 | | // a pain if FFmpeg enforced this - fortunately it doesn't care. We can |
341 | | // transfer data to it even if there are multiple refs. |
342 | 0 | dstav = mp_image_to_av_frame(hw_img); |
343 | 0 | if (!dstav) |
344 | 0 | goto done; |
345 | | |
346 | 0 | srcav = mp_image_to_av_frame(src); |
347 | 0 | if (!srcav) |
348 | 0 | goto done; |
349 | | |
350 | 0 | ok = av_hwframe_transfer_data(dstav, srcav, 0) >= 0; |
351 | |
|
352 | 0 | done: |
353 | 0 | av_frame_free(&srcav); |
354 | 0 | av_frame_free(&dstav); |
355 | |
|
356 | 0 | if (ok) |
357 | 0 | mp_image_copy_attributes(hw_img, src); |
358 | 0 | return ok; |
359 | 0 | } |
360 | | |
361 | | bool mp_update_av_hw_frames_pool(struct AVBufferRef **hw_frames_ctx, |
362 | | struct AVBufferRef *hw_device_ctx, |
363 | | int imgfmt, int sw_imgfmt, int w, int h, |
364 | | bool disable_multiplane) |
365 | 0 | { |
366 | 0 | enum AVPixelFormat format = imgfmt2pixfmt(imgfmt); |
367 | 0 | enum AVPixelFormat sw_format = imgfmt2pixfmt(sw_imgfmt); |
368 | |
|
369 | 0 | if (format == AV_PIX_FMT_NONE || sw_format == AV_PIX_FMT_NONE || |
370 | 0 | !hw_device_ctx || w < 1 || h < 1) |
371 | 0 | { |
372 | 0 | av_buffer_unref(hw_frames_ctx); |
373 | 0 | return false; |
374 | 0 | } |
375 | | |
376 | 0 | if (*hw_frames_ctx) { |
377 | 0 | AVHWFramesContext *hw_frames = (void *)(*hw_frames_ctx)->data; |
378 | |
|
379 | 0 | if (hw_frames->device_ref->data != hw_device_ctx->data || |
380 | 0 | hw_frames->format != format || hw_frames->sw_format != sw_format || |
381 | 0 | hw_frames->width != w || hw_frames->height != h) |
382 | 0 | av_buffer_unref(hw_frames_ctx); |
383 | 0 | } |
384 | |
|
385 | 0 | if (!*hw_frames_ctx) { |
386 | 0 | *hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx); |
387 | 0 | if (!*hw_frames_ctx) |
388 | 0 | return false; |
389 | | |
390 | 0 | AVHWFramesContext *hw_frames = (void *)(*hw_frames_ctx)->data; |
391 | 0 | hw_frames->format = format; |
392 | 0 | hw_frames->sw_format = sw_format; |
393 | 0 | hw_frames->width = w; |
394 | 0 | hw_frames->height = h; |
395 | |
|
396 | | #if HAVE_VULKAN |
397 | | if (format == AV_PIX_FMT_VULKAN && disable_multiplane) { |
398 | | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(sw_format); |
399 | | if ((desc->flags & AV_PIX_FMT_FLAG_PLANAR) && |
400 | | !(desc->flags & AV_PIX_FMT_FLAG_RGB)) { |
401 | | AVVulkanFramesContext *vk_frames = hw_frames->hwctx; |
402 | | vk_frames->flags = AV_VK_FRAME_FLAG_DISABLE_MULTIPLANE; |
403 | | } |
404 | | } |
405 | | #endif |
406 | |
|
407 | 0 | if (av_hwframe_ctx_init(*hw_frames_ctx) < 0) { |
408 | 0 | av_buffer_unref(hw_frames_ctx); |
409 | 0 | return false; |
410 | 0 | } |
411 | 0 | } |
412 | | |
413 | 0 | return true; |
414 | 0 | } |
415 | | |
416 | | struct mp_image *mp_av_pool_image_hw_upload(struct AVBufferRef *hw_frames_ctx, |
417 | | struct mp_image *src) |
418 | 0 | { |
419 | 0 | AVFrame *av_frame = av_frame_alloc(); |
420 | 0 | if (!av_frame) |
421 | 0 | return NULL; |
422 | 0 | if (av_hwframe_get_buffer(hw_frames_ctx, av_frame, 0) < 0) { |
423 | 0 | av_frame_free(&av_frame); |
424 | 0 | return NULL; |
425 | 0 | } |
426 | 0 | struct mp_image *dst = mp_image_from_av_frame(av_frame); |
427 | 0 | av_frame_free(&av_frame); |
428 | 0 | if (!dst) |
429 | 0 | return NULL; |
430 | | |
431 | 0 | if (dst->w < src->w || dst->h < src->h) { |
432 | 0 | talloc_free(dst); |
433 | 0 | return NULL; |
434 | 0 | } |
435 | | |
436 | 0 | mp_image_set_size(dst, src->w, src->h); |
437 | |
|
438 | 0 | if (!mp_image_hw_upload(dst, src)) { |
439 | 0 | talloc_free(dst); |
440 | 0 | return NULL; |
441 | 0 | } |
442 | | |
443 | 0 | mp_image_copy_attributes(dst, src); |
444 | 0 | return dst; |
445 | 0 | } |
446 | | |
447 | | struct mp_image *mp_av_pool_image_hw_map(struct AVBufferRef *hw_frames_ctx, |
448 | | struct mp_image *src) |
449 | 0 | { |
450 | 0 | AVFrame *dst_frame = av_frame_alloc(); |
451 | 0 | if (!dst_frame) |
452 | 0 | return NULL; |
453 | | |
454 | 0 | dst_frame->format = ((AVHWFramesContext*)hw_frames_ctx->data)->format; |
455 | 0 | dst_frame->hw_frames_ctx = av_buffer_ref(hw_frames_ctx); |
456 | |
|
457 | 0 | AVFrame *src_frame = mp_image_to_av_frame(src); |
458 | 0 | if (av_hwframe_map(dst_frame, src_frame, 0) < 0) { |
459 | 0 | av_frame_free(&src_frame); |
460 | 0 | av_frame_free(&dst_frame); |
461 | 0 | return NULL; |
462 | 0 | } |
463 | 0 | av_frame_free(&src_frame); |
464 | |
|
465 | 0 | struct mp_image *dst = mp_image_from_av_frame(dst_frame); |
466 | 0 | av_frame_free(&dst_frame); |
467 | 0 | if (!dst) |
468 | 0 | return NULL; |
469 | | |
470 | 0 | mp_image_copy_attributes(dst, src); |
471 | 0 | return dst; |
472 | 0 | } |