/src/ffmpeg/libavcodec/utvideodec.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Ut Video decoder |
3 | | * Copyright (c) 2011 Konstantin Shishkov |
4 | | * |
5 | | * This file is part of FFmpeg. |
6 | | * |
7 | | * FFmpeg is free software; you can redistribute it and/or |
8 | | * modify it under the terms of the GNU Lesser General Public |
9 | | * License as published by the Free Software Foundation; either |
10 | | * version 2.1 of the License, or (at your option) any later version. |
11 | | * |
12 | | * FFmpeg is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | | * Lesser General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU Lesser General Public |
18 | | * License along with FFmpeg; if not, write to the Free Software |
19 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
20 | | */ |
21 | | |
22 | | /** |
23 | | * @file |
24 | | * Ut Video decoder |
25 | | */ |
26 | | |
27 | | #include <inttypes.h> |
28 | | #include <stdlib.h> |
29 | | |
30 | 1.33M | #define CACHED_BITSTREAM_READER !ARCH_X86_32 |
31 | | #define UNCHECKED_BITSTREAM_READER 1 |
32 | | |
33 | | #include "libavutil/intreadwrite.h" |
34 | | #include "libavutil/mem.h" |
35 | | #include "libavutil/pixdesc.h" |
36 | | #include "avcodec.h" |
37 | | #include "bswapdsp.h" |
38 | | #include "bytestream.h" |
39 | | #include "codec_internal.h" |
40 | | #include "get_bits.h" |
41 | | #include "lossless_videodsp.h" |
42 | | #include "thread.h" |
43 | | #include "utvideo.h" |
44 | | #include "utvideodsp.h" |
45 | | |
46 | | typedef struct UtvideoContext { |
47 | | AVCodecContext *avctx; |
48 | | UTVideoDSPContext utdsp; |
49 | | BswapDSPContext bdsp; |
50 | | LLVidDSPContext llviddsp; |
51 | | |
52 | | uint32_t frame_info_size, flags, frame_info, offset; |
53 | | int planes; |
54 | | int slices; |
55 | | int compression; |
56 | | int interlaced; |
57 | | int frame_pred; |
58 | | int pro; |
59 | | int pack; |
60 | | |
61 | | uint8_t *slice_bits; |
62 | | int slice_bits_size; |
63 | | void *buffer; |
64 | | |
65 | | const uint8_t *packed_stream[4][256]; |
66 | | size_t packed_stream_size[4][256]; |
67 | | const uint8_t *control_stream[4][256]; |
68 | | size_t control_stream_size[4][256]; |
69 | | } UtvideoContext; |
70 | | |
71 | | typedef struct HuffEntry { |
72 | | uint8_t len; |
73 | | uint16_t sym; |
74 | | } HuffEntry; |
75 | | |
76 | | static int build_huff(UtvideoContext *c, const uint8_t *src, VLC *vlc, |
77 | | VLC_MULTI *multi, int *fsym, unsigned nb_elems) |
78 | 32.3k | { |
79 | 32.3k | int i; |
80 | 32.3k | HuffEntry he[1024]; |
81 | 32.3k | uint8_t bits[1024]; |
82 | 32.3k | uint16_t codes_count[33] = { 0 }; |
83 | | |
84 | 32.3k | *fsym = -1; |
85 | 1.35M | for (i = 0; i < nb_elems; i++) { |
86 | 1.34M | if (src[i] == 0) { |
87 | 26.2k | *fsym = i; |
88 | 26.2k | return 0; |
89 | 1.32M | } else if (src[i] == 255) { |
90 | 1.17M | bits[i] = 0; |
91 | 1.17M | } else if (src[i] <= 32) { |
92 | 141k | bits[i] = src[i]; |
93 | 141k | } else |
94 | 1.43k | return AVERROR_INVALIDDATA; |
95 | | |
96 | 1.32M | codes_count[bits[i]]++; |
97 | 1.32M | } |
98 | 4.66k | if (codes_count[0] == nb_elems) |
99 | 69 | return AVERROR_INVALIDDATA; |
100 | | |
101 | | /* For Ut Video, longer codes are to the left of the tree and |
102 | | * for codes with the same length the symbol is descending from |
103 | | * left to right. So after the next loop --codes_count[i] will |
104 | | * be the index of the first (lowest) symbol of length i when |
105 | | * indexed by the position in the tree with left nodes being first. */ |
106 | 151k | for (int i = 31; i >= 0; i--) |
107 | 147k | codes_count[i] += codes_count[i + 1]; |
108 | | |
109 | 1.27M | for (unsigned i = 0; i < nb_elems; i++) |
110 | 1.26M | he[--codes_count[bits[i]]] = (HuffEntry) { bits[i], i }; |
111 | | |
112 | 325k | #define VLC_BITS 11 |
113 | 4.59k | return ff_vlc_init_multi_from_lengths(vlc, multi, VLC_BITS, nb_elems, codes_count[0], |
114 | 4.59k | &he[0].len, sizeof(*he), |
115 | 4.59k | &he[0].sym, sizeof(*he), 2, 0, 0, c->avctx); |
116 | 4.66k | } |
117 | | |
118 | 573k | #define READ_PLANE(b, end) \ |
119 | 573k | { \ |
120 | 573k | buf = !use_pred ? dest : c->buffer; \ |
121 | 573k | i = 0; \ |
122 | 666k | for (; CACHED_BITSTREAM_READER && i < width-end && get_bits_left(&gb) > 0;) {\ |
123 | 95.1k | ret = get_vlc_multi(&gb, (uint8_t *)buf + i * b, multi.table, \ |
124 | 95.1k | vlc.table, VLC_BITS, 3, b); \ |
125 | 95.1k | if (ret > 0) \ |
126 | 95.1k | i += ret; \ |
127 | 95.1k | if (ret <= 0) \ |
128 | 95.1k | goto fail; \ |
129 | 95.1k | } \ |
130 | 796k | for (; i < width && get_bits_left(&gb) > 0; i++) \ |
131 | 571k | buf[i] = get_vlc2(&gb, vlc.table, VLC_BITS, 3); \ |
132 | 571k | if (use_pred) { \ |
133 | 6.65k | if (b == 2) \ |
134 | 6.65k | c->llviddsp.add_left_pred_int16((uint16_t *)dest, (const uint16_t *)buf, 0x3ff, width, prev); \ |
135 | 6.65k | else \ |
136 | 6.65k | c->llviddsp.add_left_pred((uint8_t *)dest, (const uint8_t *)buf, width, prev); \ |
137 | 6.65k | } \ |
138 | 571k | prev = dest[width-1]; \ |
139 | 571k | dest += stride; \ |
140 | 571k | } |
141 | | |
142 | | static int decode_plane10(UtvideoContext *c, int plane_no, |
143 | | uint16_t *dst, ptrdiff_t stride, |
144 | | int width, int height, |
145 | | const uint8_t *src, const uint8_t *huff, |
146 | | int use_pred) |
147 | 1.87k | { |
148 | 1.87k | int i, j, slice, pix, ret; |
149 | 1.87k | int sstart, send; |
150 | 1.87k | VLC_MULTI multi; |
151 | 1.87k | VLC vlc; |
152 | 1.87k | GetBitContext gb; |
153 | 1.87k | int prev, fsym; |
154 | | |
155 | 1.87k | if ((ret = build_huff(c, huff, &vlc, &multi, &fsym, 1024)) < 0) { |
156 | 272 | av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n"); |
157 | 272 | return ret; |
158 | 272 | } |
159 | 1.60k | if (fsym >= 0) { // build_huff reported a symbol to fill slices with |
160 | 1.48k | send = 0; |
161 | 3.78k | for (slice = 0; slice < c->slices; slice++) { |
162 | 2.29k | uint16_t *dest; |
163 | | |
164 | 2.29k | sstart = send; |
165 | 2.29k | send = (height * (slice + 1) / c->slices); |
166 | 2.29k | dest = dst + sstart * stride; |
167 | | |
168 | 2.29k | prev = 0x200; |
169 | 3.52M | for (j = sstart; j < send; j++) { |
170 | 262M | for (i = 0; i < width; i++) { |
171 | 259M | pix = fsym; |
172 | 259M | if (use_pred) { |
173 | 9.25M | prev += pix; |
174 | 9.25M | prev &= 0x3FF; |
175 | 9.25M | pix = prev; |
176 | 9.25M | } |
177 | 259M | dest[i] = pix; |
178 | 259M | } |
179 | 3.52M | dest += stride; |
180 | 3.52M | } |
181 | 2.29k | } |
182 | 1.48k | return 0; |
183 | 1.48k | } |
184 | | |
185 | 117 | send = 0; |
186 | 194 | for (slice = 0; slice < c->slices; slice++) { |
187 | 117 | uint16_t *dest, *buf; |
188 | 117 | int slice_data_start, slice_data_end, slice_size; |
189 | | |
190 | 117 | sstart = send; |
191 | 117 | send = (height * (slice + 1) / c->slices); |
192 | 117 | dest = dst + sstart * stride; |
193 | | |
194 | | // slice offset and size validation was done earlier |
195 | 117 | slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0; |
196 | 117 | slice_data_end = AV_RL32(src + slice * 4); |
197 | 117 | slice_size = slice_data_end - slice_data_start; |
198 | | |
199 | 117 | if (!slice_size) { |
200 | 5 | av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol " |
201 | 5 | "yet a slice has a length of zero.\n"); |
202 | 5 | goto fail; |
203 | 5 | } |
204 | | |
205 | 112 | memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); |
206 | 112 | c->bdsp.bswap_buf((uint32_t *) c->slice_bits, |
207 | 112 | (uint32_t *)(src + slice_data_start + c->slices * 4), |
208 | 112 | (slice_data_end - slice_data_start + 3) >> 2); |
209 | 112 | init_get_bits(&gb, c->slice_bits, slice_size * 8); |
210 | | |
211 | 112 | prev = 0x200; |
212 | 98.0k | for (j = sstart; j < send; j++) |
213 | 97.9k | READ_PLANE(2, 3) |
214 | 77 | if (get_bits_left(&gb) > 32) |
215 | 75 | av_log(c->avctx, AV_LOG_WARNING, |
216 | 75 | "%d bits left after decoding slice\n", get_bits_left(&gb)); |
217 | 77 | } |
218 | | |
219 | 77 | ff_vlc_free(&vlc); |
220 | 77 | ff_vlc_free_multi(&multi); |
221 | | |
222 | 77 | return 0; |
223 | 40 | fail: |
224 | 40 | ff_vlc_free(&vlc); |
225 | 40 | ff_vlc_free_multi(&multi); |
226 | 40 | return AVERROR_INVALIDDATA; |
227 | 117 | } |
228 | | |
229 | | static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt) |
230 | 32.6k | { |
231 | 32.6k | const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no; |
232 | | |
233 | 32.6k | if (interlaced) |
234 | 10.1k | return ~(1 + 2 * is_luma); |
235 | | |
236 | 22.4k | return ~is_luma; |
237 | 32.6k | } |
238 | | |
239 | | static int decode_plane(UtvideoContext *c, int plane_no, |
240 | | uint8_t *dst, ptrdiff_t stride, |
241 | | int width, int height, |
242 | | const uint8_t *src, int use_pred) |
243 | 32.6k | { |
244 | 32.6k | int i, j, slice, pix; |
245 | 32.6k | int sstart, send; |
246 | 32.6k | VLC_MULTI multi; |
247 | 32.6k | VLC vlc; |
248 | 32.6k | GetBitContext gb; |
249 | 32.6k | int ret, prev, fsym; |
250 | 32.6k | const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt); |
251 | | |
252 | 32.6k | if (c->pack) { |
253 | 2.19k | send = 0; |
254 | 3.69k | for (slice = 0; slice < c->slices; slice++) { |
255 | 3.45k | GetBitContext cbit, pbit; |
256 | 3.45k | uint8_t *dest, *p; |
257 | | |
258 | 3.45k | ret = init_get_bits8_le(&cbit, c->control_stream[plane_no][slice], c->control_stream_size[plane_no][slice]); |
259 | 3.45k | if (ret < 0) |
260 | 0 | return ret; |
261 | | |
262 | 3.45k | ret = init_get_bits8_le(&pbit, c->packed_stream[plane_no][slice], c->packed_stream_size[plane_no][slice]); |
263 | 3.45k | if (ret < 0) |
264 | 0 | return ret; |
265 | | |
266 | 3.45k | sstart = send; |
267 | 3.45k | send = (height * (slice + 1) / c->slices) & cmask; |
268 | 3.45k | dest = dst + sstart * stride; |
269 | | |
270 | 3.45k | if (3 * ((dst + send * stride - dest + 7)/8) > get_bits_left(&cbit)) |
271 | 1.68k | return AVERROR_INVALIDDATA; |
272 | | |
273 | 23.4k | for (p = dest; p < dst + send * stride; p += 8) { |
274 | 21.9k | int bits = get_bits_le(&cbit, 3); |
275 | | |
276 | 21.9k | if (bits == 0) { |
277 | 20.9k | *(uint64_t *) p = 0; |
278 | 20.9k | } else { |
279 | 965 | uint32_t sub = 0x80 >> (8 - (bits + 1)), add; |
280 | 965 | int k; |
281 | | |
282 | 965 | if ((bits + 1) * 8 > get_bits_left(&pbit)) |
283 | 269 | return AVERROR_INVALIDDATA; |
284 | | |
285 | 6.26k | for (k = 0; k < 8; k++) { |
286 | | |
287 | 5.56k | p[k] = get_bits_le(&pbit, bits + 1); |
288 | 5.56k | add = (~p[k] & sub) << (8 - bits); |
289 | 5.56k | p[k] -= sub; |
290 | 5.56k | p[k] += add; |
291 | 5.56k | } |
292 | 696 | } |
293 | 21.9k | } |
294 | 1.76k | } |
295 | | |
296 | 238 | return 0; |
297 | 2.19k | } |
298 | | |
299 | 30.4k | if (build_huff(c, src, &vlc, &multi, &fsym, 256)) { |
300 | 1.62k | av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n"); |
301 | 1.62k | return AVERROR_INVALIDDATA; |
302 | 1.62k | } |
303 | 28.8k | if (fsym >= 0) { // build_huff reported a symbol to fill slices with |
304 | 24.7k | send = 0; |
305 | 61.4k | for (slice = 0; slice < c->slices; slice++) { |
306 | 36.6k | uint8_t *dest; |
307 | | |
308 | 36.6k | sstart = send; |
309 | 36.6k | send = (height * (slice + 1) / c->slices) & cmask; |
310 | 36.6k | dest = dst + sstart * stride; |
311 | | |
312 | 36.6k | prev = 0x80; |
313 | 55.5M | for (j = sstart; j < send; j++) { |
314 | 4.81G | for (i = 0; i < width; i++) { |
315 | 4.75G | pix = fsym; |
316 | 4.75G | if (use_pred) { |
317 | 9.47M | prev += (unsigned)pix; |
318 | 9.47M | pix = prev; |
319 | 9.47M | } |
320 | 4.75G | dest[i] = pix; |
321 | 4.75G | } |
322 | 55.5M | dest += stride; |
323 | 55.5M | } |
324 | 36.6k | } |
325 | 24.7k | return 0; |
326 | 24.7k | } |
327 | | |
328 | 4.08k | src += 256; |
329 | | |
330 | 4.08k | send = 0; |
331 | 5.23k | for (slice = 0; slice < c->slices; slice++) { |
332 | 4.08k | uint8_t *dest, *buf; |
333 | 4.08k | int slice_data_start, slice_data_end, slice_size; |
334 | | |
335 | 4.08k | sstart = send; |
336 | 4.08k | send = (height * (slice + 1) / c->slices) & cmask; |
337 | 4.08k | dest = dst + sstart * stride; |
338 | | |
339 | | // slice offset and size validation was done earlier |
340 | 4.08k | slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0; |
341 | 4.08k | slice_data_end = AV_RL32(src + slice * 4); |
342 | 4.08k | slice_size = slice_data_end - slice_data_start; |
343 | | |
344 | 4.08k | if (!slice_size) { |
345 | 2.32k | av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol " |
346 | 2.32k | "yet a slice has a length of zero.\n"); |
347 | 2.32k | goto fail; |
348 | 2.32k | } |
349 | | |
350 | 1.76k | memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); |
351 | 1.76k | c->bdsp.bswap_buf((uint32_t *) c->slice_bits, |
352 | 1.76k | (uint32_t *)(src + slice_data_start + c->slices * 4), |
353 | 1.76k | (slice_data_end - slice_data_start + 3) >> 2); |
354 | 1.76k | init_get_bits(&gb, c->slice_bits, slice_size * 8); |
355 | | |
356 | 1.76k | prev = 0x80; |
357 | 475k | for (j = sstart; j < send; j++) |
358 | 474k | READ_PLANE(1, 5) |
359 | 1.15k | if (get_bits_left(&gb) > 32) |
360 | 448 | av_log(c->avctx, AV_LOG_WARNING, |
361 | 448 | "%d bits left after decoding slice\n", get_bits_left(&gb)); |
362 | 1.15k | } |
363 | | |
364 | 1.15k | ff_vlc_free(&vlc); |
365 | 1.15k | ff_vlc_free_multi(&multi); |
366 | | |
367 | 1.15k | return 0; |
368 | 2.93k | fail: |
369 | 2.93k | ff_vlc_free(&vlc); |
370 | 2.93k | ff_vlc_free_multi(&multi); |
371 | 2.93k | return AVERROR_INVALIDDATA; |
372 | 4.08k | } |
373 | | |
374 | | #undef A |
375 | | #undef B |
376 | | #undef C |
377 | | |
378 | | static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, |
379 | | int width, int height, int slices, int rmode) |
380 | 1.51k | { |
381 | 1.51k | int i, j, slice; |
382 | 1.51k | int A, B, C; |
383 | 1.51k | uint8_t *bsrc; |
384 | 1.51k | int slice_start, slice_height; |
385 | 1.51k | const int cmask = ~rmode; |
386 | | |
387 | 5.31k | for (slice = 0; slice < slices; slice++) { |
388 | 3.80k | slice_start = ((slice * height) / slices) & cmask; |
389 | 3.80k | slice_height = ((((slice + 1) * height) / slices) & cmask) - |
390 | 3.80k | slice_start; |
391 | | |
392 | 3.80k | if (!slice_height) |
393 | 1.13k | continue; |
394 | 2.66k | bsrc = src + slice_start * stride; |
395 | | |
396 | | // first line - left neighbour prediction |
397 | 2.66k | bsrc[0] += 0x80; |
398 | 2.66k | c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); |
399 | 2.66k | bsrc += stride; |
400 | 2.66k | if (slice_height <= 1) |
401 | 446 | continue; |
402 | | // second line - first element has top prediction, the rest uses median |
403 | 2.22k | C = bsrc[-stride]; |
404 | 2.22k | bsrc[0] += C; |
405 | 2.22k | A = B = bsrc[0]; |
406 | 23.1k | for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */ |
407 | 20.8k | B = bsrc[i - stride]; |
408 | 20.8k | bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); |
409 | 20.8k | C = B; |
410 | 20.8k | A = bsrc[i]; |
411 | 20.8k | } |
412 | 2.22k | if (width > 16) |
413 | 1.24k | c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride + 16, |
414 | 1.24k | bsrc + 16, width - 16, &A, &B); |
415 | | |
416 | 2.22k | bsrc += stride; |
417 | | // the rest of lines use continuous median prediction |
418 | 3.30M | for (j = 2; j < slice_height; j++) { |
419 | 3.30M | c->llviddsp.add_median_pred(bsrc, bsrc - stride, |
420 | 3.30M | bsrc, width, &A, &B); |
421 | 3.30M | bsrc += stride; |
422 | 3.30M | } |
423 | 2.22k | } |
424 | 1.51k | } |
425 | | |
426 | | /* UtVideo interlaced mode treats every two lines as a single one, |
427 | | * so restoring function should take care of possible padding between |
428 | | * two parts of the same "line". |
429 | | */ |
430 | | static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, |
431 | | int width, int height, int slices, int rmode) |
432 | 1.46k | { |
433 | 1.46k | int i, j, slice; |
434 | 1.46k | int A, B, C; |
435 | 1.46k | uint8_t *bsrc; |
436 | 1.46k | int slice_start, slice_height; |
437 | 1.46k | const int cmask = ~(rmode ? 3 : 1); |
438 | 1.46k | const ptrdiff_t stride2 = stride << 1; |
439 | | |
440 | 4.21k | for (slice = 0; slice < slices; slice++) { |
441 | 2.74k | slice_start = ((slice * height) / slices) & cmask; |
442 | 2.74k | slice_height = ((((slice + 1) * height) / slices) & cmask) - |
443 | 2.74k | slice_start; |
444 | 2.74k | slice_height >>= 1; |
445 | 2.74k | if (!slice_height) |
446 | 987 | continue; |
447 | | |
448 | 1.75k | bsrc = src + slice_start * stride; |
449 | | |
450 | | // first line - left neighbour prediction |
451 | 1.75k | bsrc[0] += 0x80; |
452 | 1.75k | A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); |
453 | 1.75k | c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A); |
454 | 1.75k | bsrc += stride2; |
455 | 1.75k | if (slice_height <= 1) |
456 | 319 | continue; |
457 | | // second line - first element has top prediction, the rest uses median |
458 | 1.44k | C = bsrc[-stride2]; |
459 | 1.44k | bsrc[0] += C; |
460 | 1.44k | A = bsrc[0]; |
461 | 15.5k | for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */ |
462 | 14.1k | B = bsrc[i - stride2]; |
463 | 14.1k | bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); |
464 | 14.1k | C = B; |
465 | 14.1k | A = bsrc[i]; |
466 | 14.1k | } |
467 | 1.44k | if (width > 16) |
468 | 879 | c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride2 + 16, |
469 | 879 | bsrc + 16, width - 16, &A, &B); |
470 | | |
471 | 1.44k | c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride, |
472 | 1.44k | bsrc + stride, width, &A, &B); |
473 | 1.44k | bsrc += stride2; |
474 | | // the rest of lines use continuous median prediction |
475 | 1.31M | for (j = 2; j < slice_height; j++) { |
476 | 1.31M | c->llviddsp.add_median_pred(bsrc, bsrc - stride2, |
477 | 1.31M | bsrc, width, &A, &B); |
478 | 1.31M | c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride, |
479 | 1.31M | bsrc + stride, width, &A, &B); |
480 | 1.31M | bsrc += stride2; |
481 | 1.31M | } |
482 | 1.44k | } |
483 | 1.46k | } |
484 | | |
485 | | static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, |
486 | | int width, int height, int slices, int rmode) |
487 | 4.03k | { |
488 | 4.03k | int i, j, slice; |
489 | 4.03k | int A, B, C; |
490 | 4.03k | uint8_t *bsrc; |
491 | 4.03k | int slice_start, slice_height; |
492 | 4.03k | const int cmask = ~rmode; |
493 | 4.03k | int min_width = FFMIN(width, 32); |
494 | | |
495 | 14.4k | for (slice = 0; slice < slices; slice++) { |
496 | 10.3k | slice_start = ((slice * height) / slices) & cmask; |
497 | 10.3k | slice_height = ((((slice + 1) * height) / slices) & cmask) - |
498 | 10.3k | slice_start; |
499 | | |
500 | 10.3k | if (!slice_height) |
501 | 2.10k | continue; |
502 | 8.29k | bsrc = src + slice_start * stride; |
503 | | |
504 | | // first line - left neighbour prediction |
505 | 8.29k | bsrc[0] += 0x80; |
506 | 8.29k | c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); |
507 | 8.29k | bsrc += stride; |
508 | 8.29k | if (slice_height <= 1) |
509 | 3.30k | continue; |
510 | 14.4M | for (j = 1; j < slice_height; j++) { |
511 | | // second line - first element has top prediction, the rest uses gradient |
512 | 14.4M | bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF; |
513 | 179M | for (i = 1; i < min_width; i++) { /* dsp need align 32 */ |
514 | 164M | A = bsrc[i - stride]; |
515 | 164M | B = bsrc[i - (stride + 1)]; |
516 | 164M | C = bsrc[i - 1]; |
517 | 164M | bsrc[i] = (A - B + C + bsrc[i]) & 0xFF; |
518 | 164M | } |
519 | 14.4M | if (width > 32) |
520 | 3.41M | c->llviddsp.add_gradient_pred(bsrc + 32, stride, width - 32); |
521 | 14.4M | bsrc += stride; |
522 | 14.4M | } |
523 | 4.99k | } |
524 | 4.03k | } |
525 | | |
526 | | static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, |
527 | | int width, int height, int slices, int rmode) |
528 | 1.29k | { |
529 | 1.29k | int i, j, slice; |
530 | 1.29k | int A, B, C; |
531 | 1.29k | uint8_t *bsrc; |
532 | 1.29k | int slice_start, slice_height; |
533 | 1.29k | const int cmask = ~(rmode ? 3 : 1); |
534 | 1.29k | const ptrdiff_t stride2 = stride << 1; |
535 | 1.29k | int min_width = FFMIN(width, 32); |
536 | | |
537 | 3.77k | for (slice = 0; slice < slices; slice++) { |
538 | 2.48k | slice_start = ((slice * height) / slices) & cmask; |
539 | 2.48k | slice_height = ((((slice + 1) * height) / slices) & cmask) - |
540 | 2.48k | slice_start; |
541 | 2.48k | slice_height >>= 1; |
542 | 2.48k | if (!slice_height) |
543 | 930 | continue; |
544 | | |
545 | 1.55k | bsrc = src + slice_start * stride; |
546 | | |
547 | | // first line - left neighbour prediction |
548 | 1.55k | bsrc[0] += 0x80; |
549 | 1.55k | A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); |
550 | 1.55k | c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A); |
551 | 1.55k | bsrc += stride2; |
552 | 1.55k | if (slice_height <= 1) |
553 | 465 | continue; |
554 | 1.07M | for (j = 1; j < slice_height; j++) { |
555 | | // second line - first element has top prediction, the rest uses gradient |
556 | 1.07M | bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF; |
557 | 17.0M | for (i = 1; i < min_width; i++) { /* dsp need align 32 */ |
558 | 15.9M | A = bsrc[i - stride2]; |
559 | 15.9M | B = bsrc[i - (stride2 + 1)]; |
560 | 15.9M | C = bsrc[i - 1]; |
561 | 15.9M | bsrc[i] = (A - B + C + bsrc[i]) & 0xFF; |
562 | 15.9M | } |
563 | 1.07M | if (width > 32) |
564 | 395k | c->llviddsp.add_gradient_pred(bsrc + 32, stride2, width - 32); |
565 | | |
566 | 1.07M | A = bsrc[-stride]; |
567 | 1.07M | B = bsrc[-(1 + stride + stride - width)]; |
568 | 1.07M | C = bsrc[width - 1]; |
569 | 1.07M | bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF; |
570 | 107M | for (i = 1; i < width; i++) { |
571 | 106M | A = bsrc[i - stride]; |
572 | 106M | B = bsrc[i - (1 + stride)]; |
573 | 106M | C = bsrc[i - 1 + stride]; |
574 | 106M | bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF; |
575 | 106M | } |
576 | 1.07M | bsrc += stride2; |
577 | 1.07M | } |
578 | 1.08k | } |
579 | 1.29k | } |
580 | | |
581 | | static int decode_frame(AVCodecContext *avctx, AVFrame *frame, |
582 | | int *got_frame, AVPacket *avpkt) |
583 | 70.8k | { |
584 | 70.8k | const uint8_t *buf = avpkt->data; |
585 | 70.8k | int buf_size = avpkt->size; |
586 | 70.8k | UtvideoContext *c = avctx->priv_data; |
587 | 70.8k | int i, j; |
588 | 70.8k | const uint8_t *plane_start[5] = {NULL}; |
589 | 70.8k | int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size; |
590 | 70.8k | int ret; |
591 | 70.8k | GetByteContext gb; |
592 | | |
593 | 70.8k | if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0) |
594 | 2.28k | return ret; |
595 | | |
596 | | /* parse plane structure to get frame flags and validate slice offsets */ |
597 | 68.5k | bytestream2_init(&gb, buf, buf_size); |
598 | | |
599 | 68.5k | if (c->pack) { |
600 | 16.8k | const uint8_t *packed_stream; |
601 | 16.8k | const uint8_t *control_stream; |
602 | 16.8k | GetByteContext pb; |
603 | 16.8k | uint32_t nb_cbs; |
604 | 16.8k | int left; |
605 | | |
606 | 16.8k | c->frame_info = PRED_GRADIENT << 8; |
607 | | |
608 | 16.8k | if (bytestream2_get_byte(&gb) != 1) |
609 | 11.8k | return AVERROR_INVALIDDATA; |
610 | 5.06k | bytestream2_skip(&gb, 3); |
611 | 5.06k | c->offset = bytestream2_get_le32(&gb); |
612 | | |
613 | 5.06k | if (buf_size <= c->offset + 8LL) |
614 | 1.44k | return AVERROR_INVALIDDATA; |
615 | | |
616 | 3.61k | bytestream2_init(&pb, buf + 8 + c->offset, buf_size - 8 - c->offset); |
617 | | |
618 | 3.61k | nb_cbs = bytestream2_get_le32(&pb); |
619 | 3.61k | if (nb_cbs > c->offset) |
620 | 645 | return AVERROR_INVALIDDATA; |
621 | | |
622 | 2.97k | packed_stream = buf + 8; |
623 | 2.97k | control_stream = packed_stream + (c->offset - nb_cbs); |
624 | 2.97k | left = control_stream - packed_stream; |
625 | | |
626 | 10.5k | for (i = 0; i < c->planes; i++) { |
627 | 124k | for (j = 0; j < c->slices; j++) { |
628 | 117k | c->packed_stream[i][j] = packed_stream; |
629 | 117k | c->packed_stream_size[i][j] = bytestream2_get_le32(&pb); |
630 | 117k | if (c->packed_stream_size[i][j] > left) |
631 | 500 | return AVERROR_INVALIDDATA; |
632 | 116k | left -= c->packed_stream_size[i][j]; |
633 | 116k | packed_stream += c->packed_stream_size[i][j]; |
634 | 116k | } |
635 | 8.07k | } |
636 | | |
637 | 2.47k | left = buf + buf_size - control_stream; |
638 | | |
639 | 8.94k | for (i = 0; i < c->planes; i++) { |
640 | 119k | for (j = 0; j < c->slices; j++) { |
641 | 113k | c->control_stream[i][j] = control_stream; |
642 | 113k | c->control_stream_size[i][j] = bytestream2_get_le32(&pb); |
643 | 113k | if (c->control_stream_size[i][j] > left) |
644 | 513 | return AVERROR_INVALIDDATA; |
645 | 112k | left -= c->control_stream_size[i][j]; |
646 | 112k | control_stream += c->control_stream_size[i][j]; |
647 | 112k | } |
648 | 6.98k | } |
649 | 51.7k | } else if (c->pro) { |
650 | 7.59k | if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) { |
651 | 3.87k | av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n"); |
652 | 3.87k | return AVERROR_INVALIDDATA; |
653 | 3.87k | } |
654 | 3.71k | c->frame_info = bytestream2_get_le32u(&gb); |
655 | 3.71k | c->slices = ((c->frame_info >> 16) & 0xff) + 1; |
656 | 6.71k | for (i = 0; i < c->planes; i++) { |
657 | 6.03k | plane_start[i] = gb.buffer; |
658 | 6.03k | if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) { |
659 | 2.15k | av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n"); |
660 | 2.15k | return AVERROR_INVALIDDATA; |
661 | 2.15k | } |
662 | 3.87k | slice_start = 0; |
663 | 3.87k | slice_end = 0; |
664 | 9.22k | for (j = 0; j < c->slices; j++) { |
665 | 6.22k | slice_end = bytestream2_get_le32u(&gb); |
666 | 6.22k | if (slice_end < 0 || slice_end < slice_start || |
667 | 6.22k | bytestream2_get_bytes_left(&gb) < slice_end + 1024LL) { |
668 | 875 | av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n"); |
669 | 875 | return AVERROR_INVALIDDATA; |
670 | 875 | } |
671 | 5.34k | slice_size = slice_end - slice_start; |
672 | 5.34k | slice_start = slice_end; |
673 | 5.34k | max_slice_size = FFMAX(max_slice_size, slice_size); |
674 | 5.34k | } |
675 | 3.00k | plane_size = slice_end; |
676 | 3.00k | bytestream2_skipu(&gb, plane_size); |
677 | 3.00k | bytestream2_skipu(&gb, 1024); |
678 | 3.00k | } |
679 | 682 | plane_start[c->planes] = gb.buffer; |
680 | 44.1k | } else { |
681 | 85.0k | for (i = 0; i < c->planes; i++) { |
682 | 73.1k | plane_start[i] = gb.buffer; |
683 | 73.1k | if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) { |
684 | 29.8k | av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n"); |
685 | 29.8k | return AVERROR_INVALIDDATA; |
686 | 29.8k | } |
687 | 43.3k | bytestream2_skipu(&gb, 256); |
688 | 43.3k | slice_start = 0; |
689 | 43.3k | slice_end = 0; |
690 | 100k | for (j = 0; j < c->slices; j++) { |
691 | 60.0k | slice_end = bytestream2_get_le32u(&gb); |
692 | 60.0k | if (slice_end < 0 || slice_end < slice_start || |
693 | 60.0k | bytestream2_get_bytes_left(&gb) < slice_end) { |
694 | 2.43k | av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n"); |
695 | 2.43k | return AVERROR_INVALIDDATA; |
696 | 2.43k | } |
697 | 57.5k | slice_size = slice_end - slice_start; |
698 | 57.5k | slice_start = slice_end; |
699 | 57.5k | max_slice_size = FFMAX(max_slice_size, slice_size); |
700 | 57.5k | } |
701 | 40.9k | plane_size = slice_end; |
702 | 40.9k | bytestream2_skipu(&gb, plane_size); |
703 | 40.9k | } |
704 | 11.8k | plane_start[c->planes] = gb.buffer; |
705 | 11.8k | if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) { |
706 | 259 | av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n"); |
707 | 259 | return AVERROR_INVALIDDATA; |
708 | 259 | } |
709 | 11.6k | c->frame_info = bytestream2_get_le32u(&gb); |
710 | 11.6k | } |
711 | 14.2k | av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n", |
712 | 14.2k | c->frame_info); |
713 | | |
714 | 14.2k | c->frame_pred = (c->frame_info >> 8) & 3; |
715 | | |
716 | 14.2k | max_slice_size += 4*avctx->width; |
717 | | |
718 | 14.2k | if (!c->pack) { |
719 | 12.2k | av_fast_malloc(&c->slice_bits, &c->slice_bits_size, |
720 | 12.2k | max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE); |
721 | | |
722 | 12.2k | if (!c->slice_bits) { |
723 | 0 | av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n"); |
724 | 0 | return AVERROR(ENOMEM); |
725 | 0 | } |
726 | 12.2k | } |
727 | | |
728 | 14.2k | switch (c->avctx->pix_fmt) { |
729 | 1.76k | case AV_PIX_FMT_GBRP: |
730 | 2.04k | case AV_PIX_FMT_GBRAP: |
731 | 5.78k | for (i = 0; i < c->planes; i++) { |
732 | 4.76k | ret = decode_plane(c, i, frame->data[i], |
733 | 4.76k | frame->linesize[i], avctx->width, |
734 | 4.76k | avctx->height, plane_start[i], |
735 | 4.76k | c->frame_pred == PRED_LEFT); |
736 | 4.76k | if (ret) |
737 | 1.02k | return ret; |
738 | 3.74k | if (c->frame_pred == PRED_MEDIAN) { |
739 | 551 | if (!c->interlaced) { |
740 | 337 | restore_median_planar(c, frame->data[i], |
741 | 337 | frame->linesize[i], avctx->width, |
742 | 337 | avctx->height, c->slices, 0); |
743 | 337 | } else { |
744 | 214 | restore_median_planar_il(c, frame->data[i], |
745 | 214 | frame->linesize[i], |
746 | 214 | avctx->width, avctx->height, c->slices, |
747 | 214 | 0); |
748 | 214 | } |
749 | 3.18k | } else if (c->frame_pred == PRED_GRADIENT) { |
750 | 1.12k | if (!c->interlaced) { |
751 | 912 | restore_gradient_planar(c, frame->data[i], |
752 | 912 | frame->linesize[i], avctx->width, |
753 | 912 | avctx->height, c->slices, 0); |
754 | 912 | } else { |
755 | 213 | restore_gradient_planar_il(c, frame->data[i], |
756 | 213 | frame->linesize[i], |
757 | 213 | avctx->width, avctx->height, c->slices, |
758 | 213 | 0); |
759 | 213 | } |
760 | 1.12k | } |
761 | 3.74k | } |
762 | 1.01k | c->utdsp.restore_rgb_planes(frame->data[2], frame->data[0], frame->data[1], |
763 | 1.01k | frame->linesize[2], frame->linesize[0], frame->linesize[1], |
764 | 1.01k | avctx->width, avctx->height); |
765 | 1.01k | break; |
766 | 86 | case AV_PIX_FMT_GBRAP10: |
767 | 682 | case AV_PIX_FMT_GBRP10: |
768 | 2.24k | for (i = 0; i < c->planes; i++) { |
769 | 1.87k | ret = decode_plane10(c, i, (uint16_t *)frame->data[i], |
770 | 1.87k | frame->linesize[i] / 2, avctx->width, |
771 | 1.87k | avctx->height, plane_start[i], |
772 | 1.87k | plane_start[i + 1] - 1024, |
773 | 1.87k | c->frame_pred == PRED_LEFT); |
774 | 1.87k | if (ret) |
775 | 312 | return ret; |
776 | 1.87k | } |
777 | 370 | c->utdsp.restore_rgb_planes10((uint16_t *)frame->data[2], (uint16_t *)frame->data[0], (uint16_t *)frame->data[1], |
778 | 370 | frame->linesize[2] / 2, frame->linesize[0] / 2, frame->linesize[1] / 2, |
779 | 370 | avctx->width, avctx->height); |
780 | 370 | break; |
781 | 4.50k | case AV_PIX_FMT_YUV420P: |
782 | 14.0k | for (i = 0; i < 3; i++) { |
783 | 11.6k | ret = decode_plane(c, i, frame->data[i], frame->linesize[i], |
784 | 11.6k | avctx->width >> !!i, avctx->height >> !!i, |
785 | 11.6k | plane_start[i], c->frame_pred == PRED_LEFT); |
786 | 11.6k | if (ret) |
787 | 2.11k | return ret; |
788 | 9.54k | if (c->frame_pred == PRED_MEDIAN) { |
789 | 1.00k | if (!c->interlaced) { |
790 | 595 | restore_median_planar(c, frame->data[i], frame->linesize[i], |
791 | 595 | avctx->width >> !!i, avctx->height >> !!i, |
792 | 595 | c->slices, !i); |
793 | 595 | } else { |
794 | 409 | restore_median_planar_il(c, frame->data[i], frame->linesize[i], |
795 | 409 | avctx->width >> !!i, |
796 | 409 | avctx->height >> !!i, |
797 | 409 | c->slices, !i); |
798 | 409 | } |
799 | 8.54k | } else if (c->frame_pred == PRED_GRADIENT) { |
800 | 1.23k | if (!c->interlaced) { |
801 | 837 | restore_gradient_planar(c, frame->data[i], frame->linesize[i], |
802 | 837 | avctx->width >> !!i, avctx->height >> !!i, |
803 | 837 | c->slices, !i); |
804 | 837 | } else { |
805 | 398 | restore_gradient_planar_il(c, frame->data[i], frame->linesize[i], |
806 | 398 | avctx->width >> !!i, |
807 | 398 | avctx->height >> !!i, |
808 | 398 | c->slices, !i); |
809 | 398 | } |
810 | 1.23k | } |
811 | 9.54k | } |
812 | 2.38k | break; |
813 | 3.05k | case AV_PIX_FMT_YUV422P: |
814 | 9.83k | for (i = 0; i < 3; i++) { |
815 | 7.75k | ret = decode_plane(c, i, frame->data[i], frame->linesize[i], |
816 | 7.75k | avctx->width >> !!i, avctx->height, |
817 | 7.75k | plane_start[i], c->frame_pred == PRED_LEFT); |
818 | 7.75k | if (ret) |
819 | 980 | return ret; |
820 | 6.77k | if (c->frame_pred == PRED_MEDIAN) { |
821 | 662 | if (!c->interlaced) { |
822 | 315 | restore_median_planar(c, frame->data[i], frame->linesize[i], |
823 | 315 | avctx->width >> !!i, avctx->height, |
824 | 315 | c->slices, 0); |
825 | 347 | } else { |
826 | 347 | restore_median_planar_il(c, frame->data[i], frame->linesize[i], |
827 | 347 | avctx->width >> !!i, avctx->height, |
828 | 347 | c->slices, 0); |
829 | 347 | } |
830 | 6.11k | } else if (c->frame_pred == PRED_GRADIENT) { |
831 | 1.30k | if (!c->interlaced) { |
832 | 945 | restore_gradient_planar(c, frame->data[i], frame->linesize[i], |
833 | 945 | avctx->width >> !!i, avctx->height, |
834 | 945 | c->slices, 0); |
835 | 945 | } else { |
836 | 359 | restore_gradient_planar_il(c, frame->data[i], frame->linesize[i], |
837 | 359 | avctx->width >> !!i, avctx->height, |
838 | 359 | c->slices, 0); |
839 | 359 | } |
840 | 1.30k | } |
841 | 6.77k | } |
842 | 2.07k | break; |
843 | 3.96k | case AV_PIX_FMT_YUV444P: |
844 | 10.0k | for (i = 0; i < 3; i++) { |
845 | 8.48k | ret = decode_plane(c, i, frame->data[i], frame->linesize[i], |
846 | 8.48k | avctx->width, avctx->height, |
847 | 8.48k | plane_start[i], c->frame_pred == PRED_LEFT); |
848 | 8.48k | if (ret) |
849 | 2.39k | return ret; |
850 | 6.09k | if (c->frame_pred == PRED_MEDIAN) { |
851 | 766 | if (!c->interlaced) { |
852 | 270 | restore_median_planar(c, frame->data[i], frame->linesize[i], |
853 | 270 | avctx->width, avctx->height, |
854 | 270 | c->slices, 0); |
855 | 496 | } else { |
856 | 496 | restore_median_planar_il(c, frame->data[i], frame->linesize[i], |
857 | 496 | avctx->width, avctx->height, |
858 | 496 | c->slices, 0); |
859 | 496 | } |
860 | 5.32k | } else if (c->frame_pred == PRED_GRADIENT) { |
861 | 1.66k | if (!c->interlaced) { |
862 | 1.33k | restore_gradient_planar(c, frame->data[i], frame->linesize[i], |
863 | 1.33k | avctx->width, avctx->height, |
864 | 1.33k | c->slices, 0); |
865 | 1.33k | } else { |
866 | 325 | restore_gradient_planar_il(c, frame->data[i], frame->linesize[i], |
867 | 325 | avctx->width, avctx->height, |
868 | 325 | c->slices, 0); |
869 | 325 | } |
870 | 1.66k | } |
871 | 6.09k | } |
872 | 1.57k | break; |
873 | 1.57k | case AV_PIX_FMT_YUV420P10: |
874 | 0 | for (i = 0; i < 3; i++) { |
875 | 0 | ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2, |
876 | 0 | avctx->width >> !!i, avctx->height >> !!i, |
877 | 0 | plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT); |
878 | 0 | if (ret) |
879 | 0 | return ret; |
880 | 0 | } |
881 | 0 | break; |
882 | 0 | case AV_PIX_FMT_YUV422P10: |
883 | 0 | for (i = 0; i < 3; i++) { |
884 | 0 | ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2, |
885 | 0 | avctx->width >> !!i, avctx->height, |
886 | 0 | plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT); |
887 | 0 | if (ret) |
888 | 0 | return ret; |
889 | 0 | } |
890 | 0 | break; |
891 | 14.2k | } |
892 | | |
893 | 7.41k | if (c->interlaced) |
894 | 2.07k | frame->flags |= AV_FRAME_FLAG_INTERLACED; |
895 | | |
896 | 7.41k | *got_frame = 1; |
897 | | |
898 | | /* always report that the buffer was completely consumed */ |
899 | 7.41k | return buf_size; |
900 | 14.2k | } |
901 | | |
902 | | static av_cold int decode_init(AVCodecContext *avctx) |
903 | 2.97k | { |
904 | 2.97k | UtvideoContext * const c = avctx->priv_data; |
905 | 2.97k | int h_shift, v_shift; |
906 | | |
907 | 2.97k | c->avctx = avctx; |
908 | | |
909 | 2.97k | ff_utvideodsp_init(&c->utdsp); |
910 | 2.97k | ff_bswapdsp_init(&c->bdsp); |
911 | 2.97k | ff_llviddsp_init(&c->llviddsp); |
912 | | |
913 | 2.97k | c->slice_bits_size = 0; |
914 | | |
915 | 2.97k | switch (avctx->codec_tag) { |
916 | 212 | case MKTAG('U', 'L', 'R', 'G'): |
917 | 212 | c->planes = 3; |
918 | 212 | avctx->pix_fmt = AV_PIX_FMT_GBRP; |
919 | 212 | break; |
920 | 37 | case MKTAG('U', 'L', 'R', 'A'): |
921 | 37 | c->planes = 4; |
922 | 37 | avctx->pix_fmt = AV_PIX_FMT_GBRAP; |
923 | 37 | break; |
924 | 15 | case MKTAG('U', 'L', 'Y', '0'): |
925 | 15 | c->planes = 3; |
926 | 15 | avctx->pix_fmt = AV_PIX_FMT_YUV420P; |
927 | 15 | avctx->colorspace = AVCOL_SPC_BT470BG; |
928 | 15 | break; |
929 | 3 | case MKTAG('U', 'L', 'Y', '2'): |
930 | 3 | c->planes = 3; |
931 | 3 | avctx->pix_fmt = AV_PIX_FMT_YUV422P; |
932 | 3 | avctx->colorspace = AVCOL_SPC_BT470BG; |
933 | 3 | break; |
934 | 2 | case MKTAG('U', 'L', 'Y', '4'): |
935 | 2 | c->planes = 3; |
936 | 2 | avctx->pix_fmt = AV_PIX_FMT_YUV444P; |
937 | 2 | avctx->colorspace = AVCOL_SPC_BT470BG; |
938 | 2 | break; |
939 | 1 | case MKTAG('U', 'Q', 'Y', '0'): |
940 | 1 | c->planes = 3; |
941 | 1 | c->pro = 1; |
942 | 1 | avctx->pix_fmt = AV_PIX_FMT_YUV420P10; |
943 | 1 | break; |
944 | 1 | case MKTAG('U', 'Q', 'Y', '2'): |
945 | 1 | c->planes = 3; |
946 | 1 | c->pro = 1; |
947 | 1 | avctx->pix_fmt = AV_PIX_FMT_YUV422P10; |
948 | 1 | break; |
949 | 285 | case MKTAG('U', 'Q', 'R', 'G'): |
950 | 285 | c->planes = 3; |
951 | 285 | c->pro = 1; |
952 | 285 | avctx->pix_fmt = AV_PIX_FMT_GBRP10; |
953 | 285 | break; |
954 | 162 | case MKTAG('U', 'Q', 'R', 'A'): |
955 | 162 | c->planes = 4; |
956 | 162 | c->pro = 1; |
957 | 162 | avctx->pix_fmt = AV_PIX_FMT_GBRAP10; |
958 | 162 | break; |
959 | 531 | case MKTAG('U', 'L', 'H', '0'): |
960 | 531 | c->planes = 3; |
961 | 531 | avctx->pix_fmt = AV_PIX_FMT_YUV420P; |
962 | 531 | avctx->colorspace = AVCOL_SPC_BT709; |
963 | 531 | break; |
964 | 331 | case MKTAG('U', 'L', 'H', '2'): |
965 | 331 | c->planes = 3; |
966 | 331 | avctx->pix_fmt = AV_PIX_FMT_YUV422P; |
967 | 331 | avctx->colorspace = AVCOL_SPC_BT709; |
968 | 331 | break; |
969 | 332 | case MKTAG('U', 'L', 'H', '4'): |
970 | 332 | c->planes = 3; |
971 | 332 | avctx->pix_fmt = AV_PIX_FMT_YUV444P; |
972 | 332 | avctx->colorspace = AVCOL_SPC_BT709; |
973 | 332 | break; |
974 | 4 | case MKTAG('U', 'M', 'Y', '2'): |
975 | 4 | c->planes = 3; |
976 | 4 | c->pack = 1; |
977 | 4 | avctx->pix_fmt = AV_PIX_FMT_YUV422P; |
978 | 4 | avctx->colorspace = AVCOL_SPC_BT470BG; |
979 | 4 | break; |
980 | 100 | case MKTAG('U', 'M', 'H', '2'): |
981 | 100 | c->planes = 3; |
982 | 100 | c->pack = 1; |
983 | 100 | avctx->pix_fmt = AV_PIX_FMT_YUV422P; |
984 | 100 | avctx->colorspace = AVCOL_SPC_BT709; |
985 | 100 | break; |
986 | 5 | case MKTAG('U', 'M', 'Y', '4'): |
987 | 5 | c->planes = 3; |
988 | 5 | c->pack = 1; |
989 | 5 | avctx->pix_fmt = AV_PIX_FMT_YUV444P; |
990 | 5 | avctx->colorspace = AVCOL_SPC_BT470BG; |
991 | 5 | break; |
992 | 195 | case MKTAG('U', 'M', 'H', '4'): |
993 | 195 | c->planes = 3; |
994 | 195 | c->pack = 1; |
995 | 195 | avctx->pix_fmt = AV_PIX_FMT_YUV444P; |
996 | 195 | avctx->colorspace = AVCOL_SPC_BT709; |
997 | 195 | break; |
998 | 105 | case MKTAG('U', 'M', 'R', 'G'): |
999 | 105 | c->planes = 3; |
1000 | 105 | c->pack = 1; |
1001 | 105 | avctx->pix_fmt = AV_PIX_FMT_GBRP; |
1002 | 105 | break; |
1003 | 15 | case MKTAG('U', 'M', 'R', 'A'): |
1004 | 15 | c->planes = 4; |
1005 | 15 | c->pack = 1; |
1006 | 15 | avctx->pix_fmt = AV_PIX_FMT_GBRAP; |
1007 | 15 | break; |
1008 | 636 | default: |
1009 | 636 | av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n", |
1010 | 636 | avctx->codec_tag); |
1011 | 636 | return AVERROR_INVALIDDATA; |
1012 | 2.97k | } |
1013 | | |
1014 | 2.33k | av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &h_shift, &v_shift); |
1015 | 2.33k | if ((avctx->width & ((1<<h_shift)-1)) || |
1016 | 2.33k | (avctx->height & ((1<<v_shift)-1))) { |
1017 | 2 | avpriv_request_sample(avctx, "Odd dimensions"); |
1018 | 2 | return AVERROR_PATCHWELCOME; |
1019 | 2 | } |
1020 | | |
1021 | 2.33k | if (c->pack && avctx->extradata_size >= 16) { |
1022 | 416 | av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n", |
1023 | 416 | avctx->extradata[3], avctx->extradata[2], |
1024 | 416 | avctx->extradata[1], avctx->extradata[0]); |
1025 | 416 | av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n", |
1026 | 416 | AV_RB32(avctx->extradata + 4)); |
1027 | 416 | c->compression = avctx->extradata[8]; |
1028 | 416 | if (c->compression != 2) |
1029 | 413 | avpriv_request_sample(avctx, "Unknown compression type"); |
1030 | 416 | c->slices = avctx->extradata[9] + 1; |
1031 | 1.91k | } else if (!c->pro && avctx->extradata_size >= 16) { |
1032 | 1.45k | av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n", |
1033 | 1.45k | avctx->extradata[3], avctx->extradata[2], |
1034 | 1.45k | avctx->extradata[1], avctx->extradata[0]); |
1035 | 1.45k | av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n", |
1036 | 1.45k | AV_RB32(avctx->extradata + 4)); |
1037 | 1.45k | c->frame_info_size = AV_RL32(avctx->extradata + 8); |
1038 | 1.45k | c->flags = AV_RL32(avctx->extradata + 12); |
1039 | | |
1040 | 1.45k | if (c->frame_info_size != 4) |
1041 | 1.45k | avpriv_request_sample(avctx, "Frame info not 4 bytes"); |
1042 | 1.45k | av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags); |
1043 | 1.45k | c->slices = (c->flags >> 24) + 1; |
1044 | 1.45k | c->compression = c->flags & 1; |
1045 | 1.45k | c->interlaced = c->flags & 0x800; |
1046 | 1.45k | } else if (c->pro && avctx->extradata_size == 8) { |
1047 | 434 | av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n", |
1048 | 434 | avctx->extradata[3], avctx->extradata[2], |
1049 | 434 | avctx->extradata[1], avctx->extradata[0]); |
1050 | 434 | av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n", |
1051 | 434 | AV_RB32(avctx->extradata + 4)); |
1052 | 434 | c->interlaced = 0; |
1053 | 434 | c->frame_info_size = 4; |
1054 | 434 | } else { |
1055 | 30 | av_log(avctx, AV_LOG_ERROR, |
1056 | 30 | "Insufficient extradata size %d, should be at least 16\n", |
1057 | 30 | avctx->extradata_size); |
1058 | 30 | return AVERROR_INVALIDDATA; |
1059 | 30 | } |
1060 | | |
1061 | 2.30k | c->buffer = av_calloc(avctx->width + 8, c->pro?2:1); |
1062 | 2.30k | if (!c->buffer) |
1063 | 0 | return AVERROR(ENOMEM); |
1064 | | |
1065 | 2.30k | return 0; |
1066 | 2.30k | } |
1067 | | |
1068 | | static av_cold int decode_end(AVCodecContext *avctx) |
1069 | 2.30k | { |
1070 | 2.30k | UtvideoContext * const c = avctx->priv_data; |
1071 | | |
1072 | 2.30k | av_freep(&c->slice_bits); |
1073 | 2.30k | av_freep(&c->buffer); |
1074 | | |
1075 | 2.30k | return 0; |
1076 | 2.30k | } |
1077 | | |
1078 | | const FFCodec ff_utvideo_decoder = { |
1079 | | .p.name = "utvideo", |
1080 | | CODEC_LONG_NAME("Ut Video"), |
1081 | | .p.type = AVMEDIA_TYPE_VIDEO, |
1082 | | .p.id = AV_CODEC_ID_UTVIDEO, |
1083 | | .priv_data_size = sizeof(UtvideoContext), |
1084 | | .init = decode_init, |
1085 | | .close = decode_end, |
1086 | | FF_CODEC_DECODE_CB(decode_frame), |
1087 | | .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS, |
1088 | | }; |