/src/ffmpeg/libavcodec/mjpegenc.c
Line | Count | Source |
1 | | /* |
2 | | * MJPEG encoder |
3 | | * Copyright (c) 2000, 2001 Fabrice Bellard |
4 | | * Copyright (c) 2003 Alex Beregszaszi |
5 | | * Copyright (c) 2003-2004 Michael Niedermayer |
6 | | * |
7 | | * Support for external huffman table, various fixes (AVID workaround), |
8 | | * aspecting, new decode_frame mechanism and apple mjpeg-b support |
9 | | * by Alex Beregszaszi |
10 | | * |
11 | | * This file is part of FFmpeg. |
12 | | * |
13 | | * FFmpeg is free software; you can redistribute it and/or |
14 | | * modify it under the terms of the GNU Lesser General Public |
15 | | * License as published by the Free Software Foundation; either |
16 | | * version 2.1 of the License, or (at your option) any later version. |
17 | | * |
18 | | * FFmpeg is distributed in the hope that it will be useful, |
19 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
20 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
21 | | * Lesser General Public License for more details. |
22 | | * |
23 | | * You should have received a copy of the GNU Lesser General Public |
24 | | * License along with FFmpeg; if not, write to the Free Software |
25 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
26 | | */ |
27 | | |
28 | | /** |
29 | | * @file |
30 | | * MJPEG encoder. |
31 | | */ |
32 | | |
33 | | #include "config_components.h" |
34 | | |
35 | | #include "libavutil/mem.h" |
36 | | |
37 | | #include "avcodec.h" |
38 | | #include "codec_internal.h" |
39 | | #include "jpegtables.h" |
40 | | #include "mjpegenc_common.h" |
41 | | #include "mjpegenc_huffman.h" |
42 | | #include "mpegvideo.h" |
43 | | #include "mjpeg.h" |
44 | | #include "mjpegenc.h" |
45 | | #include "mpegvideoenc.h" |
46 | | #include "profiles.h" |
47 | | |
48 | | /** |
49 | | * Buffer of JPEG frame data. |
50 | | * |
51 | | * Optimal Huffman table generation requires the frame data to be loaded into |
52 | | * a buffer so that the tables can be computed. |
53 | | * There are at most mb_width*mb_height*12*64 of these per frame. |
54 | | */ |
55 | | typedef struct MJpegHuffmanCode { |
56 | | // 0=DC lum, 1=DC chrom, 2=AC lum, 3=AC chrom |
57 | | uint8_t table_id; ///< The Huffman table id associated with the data. |
58 | | uint8_t code; ///< The exponent. |
59 | | uint16_t mant; ///< The mantissa. |
60 | | } MJpegHuffmanCode; |
61 | | |
62 | | /* The following is the private context of MJPEG/AMV decoder. |
63 | | * Note that when using slice threading only the main thread's |
64 | | * MPVEncContext is followed by a MjpegContext; the other threads |
65 | | * can access this shared context via MPVEncContext.mjpeg. */ |
66 | | typedef struct MJPEGEncContext { |
67 | | MPVMainEncContext mpeg; |
68 | | MJpegContext mjpeg; |
69 | | } MJPEGEncContext; |
70 | | |
71 | | static av_cold void init_uni_ac_vlc(const uint8_t huff_size_ac[256], |
72 | | uint8_t *uni_ac_vlc_len) |
73 | 3.38k | { |
74 | 436k | for (int i = 0; i < 128; i++) { |
75 | 433k | int level = i - 64; |
76 | 433k | if (!level) |
77 | 3.38k | continue; |
78 | 27.9M | for (int run = 0; run < 64; run++) { |
79 | 27.5M | int len, code, nbits; |
80 | 27.5M | int alevel = FFABS(level); |
81 | | |
82 | 27.5M | len = (run >> 4) * huff_size_ac[0xf0]; |
83 | | |
84 | 27.5M | nbits= av_log2_16bit(alevel) + 1; |
85 | 27.5M | code = ((15&run) << 4) | nbits; |
86 | | |
87 | 27.5M | len += huff_size_ac[code] + nbits; |
88 | | |
89 | 27.5M | uni_ac_vlc_len[UNI_AC_ENC_INDEX(run, i)] = len; |
90 | | // We ignore EOB as its just a constant which does not change generally |
91 | 27.5M | } |
92 | 429k | } |
93 | 3.38k | } |
94 | | |
95 | | static void mjpeg_encode_picture_header(MPVEncContext *const s) |
96 | 936 | { |
97 | 936 | ff_mjpeg_encode_picture_header(s->c.avctx, &s->pb, s->c.cur_pic.ptr->f, s->mjpeg_ctx, |
98 | 936 | s->c.intra_scantable.permutated, 0, |
99 | 936 | s->c.intra_matrix, s->c.chroma_intra_matrix, |
100 | 936 | s->c.slice_context_count > 1); |
101 | | |
102 | 936 | s->esc_pos = put_bytes_count(&s->pb, 0); |
103 | 936 | for (int i = 1; i < s->c.slice_context_count; i++) |
104 | 0 | s->c.enc_contexts[i]->esc_pos = 0; |
105 | 936 | } |
106 | | |
107 | | static int mjpeg_amv_encode_picture_header(MPVMainEncContext *const m) |
108 | 936 | { |
109 | 936 | MJPEGEncContext *const m2 = (MJPEGEncContext*)m; |
110 | 936 | MPVEncContext *const s = &m->s; |
111 | 936 | av_assert2(s->mjpeg_ctx == &m2->mjpeg); |
112 | | /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */ |
113 | 936 | if (!CONFIG_MJPEG_ENCODER || m2->mjpeg.huffman != HUFFMAN_TABLE_OPTIMAL) |
114 | 338 | mjpeg_encode_picture_header(s); |
115 | | |
116 | 936 | return 0; |
117 | 936 | } |
118 | | |
119 | | #if CONFIG_MJPEG_ENCODER |
120 | | /** |
121 | | * Encodes and outputs the entire frame in the JPEG format. |
122 | | * |
123 | | * @param main The MPVMainEncContext. |
124 | | */ |
125 | | static void mjpeg_encode_picture_frame(MPVMainEncContext *const main) |
126 | 598 | { |
127 | 598 | MPVEncContext *const s = &main->s; |
128 | 598 | int nbits, code, table_id; |
129 | 598 | MJpegContext *m = s->mjpeg_ctx; |
130 | 598 | uint8_t *huff_size[4] = { m->huff_size_dc_luminance, |
131 | 598 | m->huff_size_dc_chrominance, |
132 | 598 | m->huff_size_ac_luminance, |
133 | 598 | m->huff_size_ac_chrominance }; |
134 | 598 | uint16_t *huff_code[4] = { m->huff_code_dc_luminance, |
135 | 598 | m->huff_code_dc_chrominance, |
136 | 598 | m->huff_code_ac_luminance, |
137 | 598 | m->huff_code_ac_chrominance }; |
138 | 598 | size_t total_bits = 0; |
139 | 598 | size_t bytes_needed; |
140 | | |
141 | 598 | main->header_bits = get_bits_diff(s); |
142 | | // Estimate the total size first |
143 | 29.5M | for (int i = 0; i < m->huff_ncode; i++) { |
144 | 29.5M | table_id = m->huff_buffer[i].table_id; |
145 | 29.5M | code = m->huff_buffer[i].code; |
146 | 29.5M | nbits = code & 0xf; |
147 | | |
148 | 29.5M | total_bits += huff_size[table_id][code] + nbits; |
149 | 29.5M | } |
150 | | |
151 | 598 | bytes_needed = (total_bits + 7) / 8; |
152 | 598 | ff_mpv_reallocate_putbitbuffer(s, bytes_needed, bytes_needed); |
153 | | |
154 | 29.5M | for (int i = 0; i < m->huff_ncode; i++) { |
155 | 29.5M | table_id = m->huff_buffer[i].table_id; |
156 | 29.5M | code = m->huff_buffer[i].code; |
157 | 29.5M | nbits = code & 0xf; |
158 | | |
159 | 29.5M | put_bits(&s->pb, huff_size[table_id][code], huff_code[table_id][code]); |
160 | 29.5M | if (nbits != 0) { |
161 | 21.6M | put_sbits(&s->pb, nbits, m->huff_buffer[i].mant); |
162 | 21.6M | } |
163 | 29.5M | } |
164 | | |
165 | 598 | m->huff_ncode = 0; |
166 | 598 | s->i_tex_bits = get_bits_diff(s); |
167 | 598 | } |
168 | | |
169 | | /** |
170 | | * Builds all 4 optimal Huffman tables. |
171 | | * |
172 | | * Uses the data stored in the JPEG buffer to compute the tables. |
173 | | * Stores the Huffman tables in the bits_* and val_* arrays in the MJpegContext. |
174 | | * |
175 | | * @param m MJpegContext containing the JPEG buffer. |
176 | | */ |
177 | | static void mjpeg_build_optimal_huffman(MJpegContext *m) |
178 | 598 | { |
179 | 598 | MJpegEncHuffmanContext dc_luminance_ctx; |
180 | 598 | MJpegEncHuffmanContext dc_chrominance_ctx; |
181 | 598 | MJpegEncHuffmanContext ac_luminance_ctx; |
182 | 598 | MJpegEncHuffmanContext ac_chrominance_ctx; |
183 | 598 | MJpegEncHuffmanContext *ctx[4] = { &dc_luminance_ctx, |
184 | 598 | &dc_chrominance_ctx, |
185 | 598 | &ac_luminance_ctx, |
186 | 598 | &ac_chrominance_ctx }; |
187 | 2.99k | for (int i = 0; i < 4; i++) |
188 | 2.39k | ff_mjpeg_encode_huffman_init(ctx[i]); |
189 | | |
190 | 29.5M | for (int i = 0; i < m->huff_ncode; i++) { |
191 | 29.5M | int table_id = m->huff_buffer[i].table_id; |
192 | 29.5M | int code = m->huff_buffer[i].code; |
193 | | |
194 | 29.5M | ff_mjpeg_encode_huffman_increment(ctx[table_id], code); |
195 | 29.5M | } |
196 | | |
197 | 598 | ff_mjpeg_encode_huffman_close(&dc_luminance_ctx, |
198 | 598 | m->bits_dc_luminance, |
199 | 598 | m->val_dc_luminance, 12); |
200 | 598 | ff_mjpeg_encode_huffman_close(&dc_chrominance_ctx, |
201 | 598 | m->bits_dc_chrominance, |
202 | 598 | m->val_dc_chrominance, 12); |
203 | 598 | ff_mjpeg_encode_huffman_close(&ac_luminance_ctx, |
204 | 598 | m->bits_ac_luminance, |
205 | 598 | m->val_ac_luminance, 256); |
206 | 598 | ff_mjpeg_encode_huffman_close(&ac_chrominance_ctx, |
207 | 598 | m->bits_ac_chrominance, |
208 | 598 | m->val_ac_chrominance, 256); |
209 | | |
210 | 598 | ff_mjpeg_build_huffman_codes(m->huff_size_dc_luminance, |
211 | 598 | m->huff_code_dc_luminance, |
212 | 598 | m->bits_dc_luminance, |
213 | 598 | m->val_dc_luminance); |
214 | 598 | ff_mjpeg_build_huffman_codes(m->huff_size_dc_chrominance, |
215 | 598 | m->huff_code_dc_chrominance, |
216 | 598 | m->bits_dc_chrominance, |
217 | 598 | m->val_dc_chrominance); |
218 | 598 | ff_mjpeg_build_huffman_codes(m->huff_size_ac_luminance, |
219 | 598 | m->huff_code_ac_luminance, |
220 | 598 | m->bits_ac_luminance, |
221 | 598 | m->val_ac_luminance); |
222 | 598 | ff_mjpeg_build_huffman_codes(m->huff_size_ac_chrominance, |
223 | 598 | m->huff_code_ac_chrominance, |
224 | 598 | m->bits_ac_chrominance, |
225 | 598 | m->val_ac_chrominance); |
226 | 598 | } |
227 | | #endif |
228 | | |
229 | | /** |
230 | | * Writes the complete JPEG frame when optimal huffman tables are enabled, |
231 | | * otherwise writes the stuffing. |
232 | | * |
233 | | * Header + values + stuffing. |
234 | | * |
235 | | * @param s The MPVEncContext. |
236 | | * @return int Error code, 0 if successful. |
237 | | */ |
238 | | int ff_mjpeg_encode_stuffing(MPVEncContext *const s) |
239 | 936 | { |
240 | 936 | MJpegContext *const m = s->mjpeg_ctx; |
241 | 936 | PutBitContext *pbc = &s->pb; |
242 | 936 | int mb_y = s->c.mb_y - !s->c.mb_x; |
243 | 936 | int ret; |
244 | | |
245 | 936 | #if CONFIG_MJPEG_ENCODER |
246 | 936 | if (m->huffman == HUFFMAN_TABLE_OPTIMAL) { |
247 | | /* HUFFMAN_TABLE_OPTIMAL is incompatible with slice threading, |
248 | | * therefore the following cast is allowed. */ |
249 | 598 | MPVMainEncContext *const main = (MPVMainEncContext*)s; |
250 | | |
251 | 598 | mjpeg_build_optimal_huffman(m); |
252 | | |
253 | | // Replace the VLCs with the optimal ones. |
254 | | // The default ones may be used for trellis during quantization. |
255 | 598 | init_uni_ac_vlc(m->huff_size_ac_luminance, m->uni_ac_vlc_len); |
256 | 598 | init_uni_ac_vlc(m->huff_size_ac_chrominance, m->uni_chroma_ac_vlc_len); |
257 | 598 | s->intra_ac_vlc_length = |
258 | 598 | s->intra_ac_vlc_last_length = m->uni_ac_vlc_len; |
259 | 598 | s->intra_chroma_ac_vlc_length = |
260 | 598 | s->intra_chroma_ac_vlc_last_length = m->uni_chroma_ac_vlc_len; |
261 | | |
262 | 598 | mjpeg_encode_picture_header(s); |
263 | 598 | mjpeg_encode_picture_frame(main); |
264 | 598 | } |
265 | 936 | #endif |
266 | | |
267 | 936 | ret = ff_mpv_reallocate_putbitbuffer(s, put_bits_count(&s->pb) / 8 + 100, |
268 | 936 | put_bits_count(&s->pb) / 4 + 1000); |
269 | 936 | if (ret < 0) { |
270 | 0 | av_log(s->c.avctx, AV_LOG_ERROR, "Buffer reallocation failed\n"); |
271 | 0 | goto fail; |
272 | 0 | } |
273 | | |
274 | 936 | ff_mjpeg_escape_FF(pbc, s->esc_pos); |
275 | | |
276 | 936 | if (s->c.slice_context_count > 1 && mb_y < s->c.mb_height - 1) |
277 | 0 | put_marker(pbc, RST0 + (mb_y&7)); |
278 | 936 | s->esc_pos = put_bytes_count(pbc, 0); |
279 | | |
280 | 936 | fail: |
281 | 3.74k | for (int i = 0; i < 3; i++) |
282 | 2.80k | s->last_dc[i] = 128 << s->c.intra_dc_precision; |
283 | | |
284 | 936 | return ret; |
285 | 936 | } |
286 | | |
287 | | static int alloc_huffman(MJPEGEncContext *const m2) |
288 | 627 | { |
289 | 627 | MJpegContext *const m = &m2->mjpeg; |
290 | 627 | MPVEncContext *const s = &m2->mpeg.s; |
291 | 627 | static const char blocks_per_mb[] = { |
292 | 627 | [CHROMA_420] = 6, [CHROMA_422] = 8, [CHROMA_444] = 12 |
293 | 627 | }; |
294 | 627 | size_t num_blocks; |
295 | | |
296 | | // Make sure we have enough space to hold this frame. |
297 | 627 | num_blocks = s->c.mb_num * blocks_per_mb[s->c.chroma_format]; |
298 | | |
299 | 627 | m->huff_buffer = av_malloc_array(num_blocks, |
300 | 627 | 64 /* codes per MB */ * sizeof(MJpegHuffmanCode)); |
301 | 627 | if (!m->huff_buffer) |
302 | 0 | return AVERROR(ENOMEM); |
303 | 627 | return 0; |
304 | 627 | } |
305 | | |
306 | | static av_cold int mjpeg_encode_close(AVCodecContext *avctx) |
307 | 1.10k | { |
308 | 1.10k | MJPEGEncContext *const mjpeg = avctx->priv_data; |
309 | 1.10k | av_freep(&mjpeg->mjpeg.huff_buffer); |
310 | 1.10k | ff_mpv_encode_end(avctx); |
311 | 1.10k | return 0; |
312 | 1.10k | } |
313 | | |
314 | | /** |
315 | | * Add code and table_id to the JPEG buffer. |
316 | | * |
317 | | * @param s The MJpegContext which contains the JPEG buffer. |
318 | | * @param table_id Which Huffman table the code belongs to. |
319 | | * @param code The encoded exponent of the coefficients and the run-bits. |
320 | | */ |
321 | | static inline void mjpeg_encode_code(MJpegContext *s, uint8_t table_id, int code) |
322 | 29.5M | { |
323 | 29.5M | MJpegHuffmanCode *c = &s->huff_buffer[s->huff_ncode++]; |
324 | 29.5M | c->table_id = table_id; |
325 | 29.5M | c->code = code; |
326 | 29.5M | } |
327 | | |
328 | | /** |
329 | | * Add the coefficient's data to the JPEG buffer. |
330 | | * |
331 | | * @param s The MJpegContext which contains the JPEG buffer. |
332 | | * @param table_id Which Huffman table the code belongs to. |
333 | | * @param val The coefficient. |
334 | | * @param run The run-bits. |
335 | | */ |
336 | | static void mjpeg_encode_coef(MJpegContext *s, uint8_t table_id, int val, int run) |
337 | 25.4M | { |
338 | 25.4M | int mant, code; |
339 | | |
340 | 25.4M | if (val == 0) { |
341 | 3.81M | av_assert0(run == 0); |
342 | 3.81M | mjpeg_encode_code(s, table_id, 0); |
343 | 21.6M | } else { |
344 | 21.6M | mant = val; |
345 | 21.6M | if (val < 0) { |
346 | 10.7M | val = -val; |
347 | 10.7M | mant--; |
348 | 10.7M | } |
349 | | |
350 | 21.6M | code = (run << 4) | (av_log2_16bit(val) + 1); |
351 | | |
352 | 21.6M | s->huff_buffer[s->huff_ncode].mant = mant; |
353 | 21.6M | mjpeg_encode_code(s, table_id, code); |
354 | 21.6M | } |
355 | 25.4M | } |
356 | | |
357 | | /** |
358 | | * Add the block's data into the JPEG buffer. |
359 | | * |
360 | | * @param s The MPVEncContext that contains the JPEG buffer. |
361 | | * @param block The block. |
362 | | * @param n The block's index or number. |
363 | | */ |
364 | | static void record_block(MPVEncContext *const s, int16_t block[], int n) |
365 | 4.28M | { |
366 | 4.28M | int i, j, table_id; |
367 | 4.28M | int component, dc, last_index, val, run; |
368 | 4.28M | MJpegContext *m = s->mjpeg_ctx; |
369 | | |
370 | | /* DC coef */ |
371 | 4.28M | component = (n <= 3 ? 0 : (n&1) + 1); |
372 | 4.28M | table_id = (n <= 3 ? 0 : 1); |
373 | 4.28M | dc = block[0]; /* overflow is impossible */ |
374 | 4.28M | val = dc - s->last_dc[component]; |
375 | | |
376 | 4.28M | mjpeg_encode_coef(m, table_id, val, 0); |
377 | | |
378 | 4.28M | s->last_dc[component] = dc; |
379 | | |
380 | | /* AC coefs */ |
381 | | |
382 | 4.28M | run = 0; |
383 | 4.28M | last_index = s->c.block_last_index[n]; |
384 | 4.28M | table_id |= 2; |
385 | | |
386 | 32.3M | for(i=1;i<=last_index;i++) { |
387 | 28.0M | j = s->c.intra_scantable.permutated[i]; |
388 | 28.0M | val = block[j]; |
389 | | |
390 | 28.0M | if (val == 0) { |
391 | 6.88M | run++; |
392 | 21.1M | } else { |
393 | 21.1M | while (run >= 16) { |
394 | 1.87k | mjpeg_encode_code(m, table_id, 0xf0); |
395 | 1.87k | run -= 16; |
396 | 1.87k | } |
397 | 21.1M | mjpeg_encode_coef(m, table_id, val, run); |
398 | 21.1M | run = 0; |
399 | 21.1M | } |
400 | 28.0M | } |
401 | | |
402 | | /* output EOB only if not already 64 values */ |
403 | 4.28M | if (last_index < 63 || run != 0) |
404 | 4.13M | mjpeg_encode_code(m, table_id, 0); |
405 | 4.28M | } |
406 | | |
407 | | static void encode_block(MPVEncContext *const s, int16_t block[], int n) |
408 | 2.14M | { |
409 | 2.14M | int mant, nbits, code, i, j; |
410 | 2.14M | int component, dc, run, last_index, val; |
411 | 2.14M | const MJpegContext *const m = s->mjpeg_ctx; |
412 | 2.14M | const uint16_t *huff_code_ac; |
413 | 2.14M | const uint8_t *huff_size_ac; |
414 | | |
415 | | /* DC coef */ |
416 | 2.14M | component = (n <= 3 ? 0 : (n&1) + 1); |
417 | 2.14M | dc = block[0]; /* overflow is impossible */ |
418 | 2.14M | val = dc - s->last_dc[component]; |
419 | 2.14M | if (n < 4) { |
420 | 1.42M | ff_mjpeg_encode_dc(&s->pb, val, m->huff_size_dc_luminance, m->huff_code_dc_luminance); |
421 | 1.42M | huff_size_ac = m->huff_size_ac_luminance; |
422 | 1.42M | huff_code_ac = m->huff_code_ac_luminance; |
423 | 1.42M | } else { |
424 | 713k | ff_mjpeg_encode_dc(&s->pb, val, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); |
425 | 713k | huff_size_ac = m->huff_size_ac_chrominance; |
426 | 713k | huff_code_ac = m->huff_code_ac_chrominance; |
427 | 713k | } |
428 | 2.14M | s->last_dc[component] = dc; |
429 | | |
430 | | /* AC coefs */ |
431 | | |
432 | 2.14M | run = 0; |
433 | 2.14M | last_index = s->c.block_last_index[n]; |
434 | 12.4M | for(i=1;i<=last_index;i++) { |
435 | 10.3M | j = s->c.intra_scantable.permutated[i]; |
436 | 10.3M | val = block[j]; |
437 | 10.3M | if (val == 0) { |
438 | 2.75M | run++; |
439 | 7.56M | } else { |
440 | 7.56M | while (run >= 16) { |
441 | 1.44k | put_bits(&s->pb, huff_size_ac[0xf0], huff_code_ac[0xf0]); |
442 | 1.44k | run -= 16; |
443 | 1.44k | } |
444 | 7.56M | mant = val; |
445 | 7.56M | if (val < 0) { |
446 | 3.78M | val = -val; |
447 | 3.78M | mant--; |
448 | 3.78M | } |
449 | | |
450 | 7.56M | nbits= av_log2_16bit(val) + 1; |
451 | 7.56M | code = (run << 4) | nbits; |
452 | | |
453 | 7.56M | put_bits(&s->pb, huff_size_ac[code], huff_code_ac[code]); |
454 | | |
455 | 7.56M | put_sbits(&s->pb, nbits, mant); |
456 | 7.56M | run = 0; |
457 | 7.56M | } |
458 | 10.3M | } |
459 | | |
460 | | /* output EOB only if not already 64 values */ |
461 | 2.14M | if (last_index < 63 || run != 0) |
462 | 2.07M | put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]); |
463 | 2.14M | } |
464 | | |
465 | | static void mjpeg_record_mb(MPVEncContext *const s, int16_t block[][64], |
466 | | int unused_x, int unused_y) |
467 | 555k | { |
468 | 555k | if (s->c.chroma_format == CHROMA_444) { |
469 | 190k | record_block(s, block[0], 0); |
470 | 190k | record_block(s, block[2], 2); |
471 | 190k | record_block(s, block[4], 4); |
472 | 190k | record_block(s, block[8], 8); |
473 | 190k | record_block(s, block[5], 5); |
474 | 190k | record_block(s, block[9], 9); |
475 | | |
476 | 190k | if (16*s->c.mb_x+8 < s->c.width) { |
477 | 90.7k | record_block(s, block[1], 1); |
478 | 90.7k | record_block(s, block[3], 3); |
479 | 90.7k | record_block(s, block[6], 6); |
480 | 90.7k | record_block(s, block[10], 10); |
481 | 90.7k | record_block(s, block[7], 7); |
482 | 90.7k | record_block(s, block[11], 11); |
483 | 90.7k | } |
484 | 365k | } else { |
485 | 2.19M | for (int i = 0; i < 5; i++) |
486 | 1.82M | record_block(s, block[i], i); |
487 | 365k | if (s->c.chroma_format == CHROMA_420) { |
488 | 161k | record_block(s, block[5], 5); |
489 | 203k | } else { |
490 | 203k | record_block(s, block[6], 6); |
491 | 203k | record_block(s, block[5], 5); |
492 | 203k | record_block(s, block[7], 7); |
493 | 203k | } |
494 | 365k | } |
495 | 555k | } |
496 | | |
497 | | static void mjpeg_encode_mb(MPVEncContext *const s, int16_t block[][64], |
498 | | int unused_x, int unused_y) |
499 | 356k | { |
500 | 356k | if (s->c.chroma_format == CHROMA_444) { |
501 | 0 | encode_block(s, block[0], 0); |
502 | 0 | encode_block(s, block[2], 2); |
503 | 0 | encode_block(s, block[4], 4); |
504 | 0 | encode_block(s, block[8], 8); |
505 | 0 | encode_block(s, block[5], 5); |
506 | 0 | encode_block(s, block[9], 9); |
507 | |
|
508 | 0 | if (16 * s->c.mb_x + 8 < s->c.width) { |
509 | 0 | encode_block(s, block[1], 1); |
510 | 0 | encode_block(s, block[3], 3); |
511 | 0 | encode_block(s, block[6], 6); |
512 | 0 | encode_block(s, block[10], 10); |
513 | 0 | encode_block(s, block[7], 7); |
514 | 0 | encode_block(s, block[11], 11); |
515 | 0 | } |
516 | 356k | } else { |
517 | 2.14M | for (int i = 0; i < 5; i++) |
518 | 1.78M | encode_block(s, block[i], i); |
519 | 356k | if (s->c.chroma_format == CHROMA_420) { |
520 | 356k | encode_block(s, block[5], 5); |
521 | 356k | } else { |
522 | 0 | encode_block(s, block[6], 6); |
523 | 0 | encode_block(s, block[5], 5); |
524 | 0 | encode_block(s, block[7], 7); |
525 | 0 | } |
526 | 356k | } |
527 | | |
528 | 356k | s->i_tex_bits += get_bits_diff(s); |
529 | 356k | } |
530 | | |
531 | | static av_cold int mjpeg_encode_init(AVCodecContext *avctx) |
532 | 1.10k | { |
533 | 1.10k | MJPEGEncContext *const m2 = avctx->priv_data; |
534 | 1.10k | MJpegContext *const m = &m2->mjpeg; |
535 | 1.10k | MPVEncContext *const s = &m2->mpeg.s; |
536 | 1.10k | int ret; |
537 | | |
538 | 1.10k | s->mjpeg_ctx = m; |
539 | 1.10k | m2->mpeg.encode_picture_header = mjpeg_amv_encode_picture_header; |
540 | | // May be overridden below |
541 | 1.10k | s->encode_mb = mjpeg_encode_mb; |
542 | | |
543 | 1.10k | if (s->mpv_flags & FF_MPV_FLAG_QP_RD) { |
544 | | // Used to produce garbage with MJPEG. |
545 | 0 | av_log(avctx, AV_LOG_ERROR, |
546 | 0 | "QP RD is no longer compatible with MJPEG or AMV\n"); |
547 | 0 | return AVERROR(EINVAL); |
548 | 0 | } |
549 | | |
550 | | /* The following check is automatically true for AMV, |
551 | | * but it doesn't hurt either. */ |
552 | 1.10k | ret = ff_mjpeg_encode_check_pix_fmt(avctx); |
553 | 1.10k | if (ret < 0) |
554 | 6 | return ret; |
555 | | |
556 | 1.10k | if (avctx->width > 65500 || avctx->height > 65500) { |
557 | 6 | av_log(avctx, AV_LOG_ERROR, "JPEG does not support resolutions above 65500x65500\n"); |
558 | 6 | return AVERROR(EINVAL); |
559 | 6 | } |
560 | | |
561 | | // Build default Huffman tables. |
562 | | // These may be overwritten later with more optimal Huffman tables, but |
563 | | // they are needed at least right now for some processes like trellis. |
564 | 1.09k | ff_mjpeg_build_huffman_codes(m->huff_size_dc_luminance, |
565 | 1.09k | m->huff_code_dc_luminance, |
566 | 1.09k | ff_mjpeg_bits_dc_luminance, |
567 | 1.09k | ff_mjpeg_val_dc); |
568 | 1.09k | ff_mjpeg_build_huffman_codes(m->huff_size_dc_chrominance, |
569 | 1.09k | m->huff_code_dc_chrominance, |
570 | 1.09k | ff_mjpeg_bits_dc_chrominance, |
571 | 1.09k | ff_mjpeg_val_dc); |
572 | 1.09k | ff_mjpeg_build_huffman_codes(m->huff_size_ac_luminance, |
573 | 1.09k | m->huff_code_ac_luminance, |
574 | 1.09k | ff_mjpeg_bits_ac_luminance, |
575 | 1.09k | ff_mjpeg_val_ac_luminance); |
576 | 1.09k | ff_mjpeg_build_huffman_codes(m->huff_size_ac_chrominance, |
577 | 1.09k | m->huff_code_ac_chrominance, |
578 | 1.09k | ff_mjpeg_bits_ac_chrominance, |
579 | 1.09k | ff_mjpeg_val_ac_chrominance); |
580 | | |
581 | 1.09k | init_uni_ac_vlc(m->huff_size_ac_luminance, m->uni_ac_vlc_len); |
582 | 1.09k | init_uni_ac_vlc(m->huff_size_ac_chrominance, m->uni_chroma_ac_vlc_len); |
583 | | |
584 | 1.09k | s->min_qcoeff = -1023; |
585 | 1.09k | s->max_qcoeff = 1023; |
586 | | |
587 | 1.09k | s->intra_ac_vlc_length = |
588 | 1.09k | s->intra_ac_vlc_last_length = m->uni_ac_vlc_len; |
589 | 1.09k | s->intra_chroma_ac_vlc_length = |
590 | 1.09k | s->intra_chroma_ac_vlc_last_length = m->uni_chroma_ac_vlc_len; |
591 | | |
592 | 1.09k | ret = ff_mpv_encode_init(avctx); |
593 | 1.09k | if (ret < 0) |
594 | 90 | return ret; |
595 | | |
596 | | // Buffers start out empty. |
597 | 1.00k | m->huff_ncode = 0; |
598 | | |
599 | 1.00k | if (s->c.slice_context_count > 1) |
600 | 0 | m->huffman = HUFFMAN_TABLE_DEFAULT; |
601 | | |
602 | 1.00k | if (m->huffman == HUFFMAN_TABLE_OPTIMAL) { |
603 | | // If we are here, we have only one slice_context. So no loop necessary. |
604 | 627 | s->encode_mb = mjpeg_record_mb; |
605 | 627 | return alloc_huffman(m2); |
606 | 627 | } |
607 | | |
608 | 377 | return 0; |
609 | 1.00k | } |
610 | | |
611 | | #if CONFIG_AMV_ENCODER |
612 | | // maximum over s->mjpeg_vsample[i] |
613 | 1.15k | #define V_MAX 2 |
614 | | static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, |
615 | | const AVFrame *pic_arg, int *got_packet) |
616 | 396 | { |
617 | 396 | MPVEncContext *const s = avctx->priv_data; |
618 | 396 | AVFrame *pic; |
619 | 396 | int i, ret; |
620 | 396 | int chroma_v_shift = 1; /* AMV is 420-only */ |
621 | | |
622 | 396 | if ((avctx->height & 15) && avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) { |
623 | 12 | av_log(avctx, AV_LOG_ERROR, |
624 | 12 | "Heights which are not a multiple of 16 might fail with some decoders, " |
625 | 12 | "use vstrict=-1 / -strict -1 to use %d anyway.\n", avctx->height); |
626 | 12 | av_log(avctx, AV_LOG_WARNING, "If you have a device that plays AMV videos, please test if videos " |
627 | 12 | "with such heights work with it and report your findings to ffmpeg-devel@ffmpeg.org\n"); |
628 | 12 | return AVERROR_EXPERIMENTAL; |
629 | 12 | } |
630 | | |
631 | 384 | pic = av_frame_clone(pic_arg); |
632 | 384 | if (!pic) |
633 | 0 | return AVERROR(ENOMEM); |
634 | | //picture should be flipped upside-down |
635 | 1.53k | for(i=0; i < 3; i++) { |
636 | 1.15k | int vsample = i ? 2 >> chroma_v_shift : 2; |
637 | 1.15k | pic->data[i] += pic->linesize[i] * (vsample * s->c.height / V_MAX - 1); |
638 | 1.15k | pic->linesize[i] *= -1; |
639 | 1.15k | } |
640 | 384 | ret = ff_mpv_encode_picture(avctx, pkt, pic, got_packet); |
641 | 384 | av_frame_free(&pic); |
642 | 384 | return ret; |
643 | 384 | } |
644 | | #endif |
645 | | |
646 | | #define OFFSET(x) offsetof(MJPEGEncContext, mjpeg.x) |
647 | | #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM |
648 | | static const AVOption options[] = { |
649 | | #define AMV_OPTIONS_OFFSET 4 |
650 | | { "huffman", "Huffman table strategy", OFFSET(huffman), AV_OPT_TYPE_INT, { .i64 = HUFFMAN_TABLE_OPTIMAL }, 0, NB_HUFFMAN_TABLE_OPTION - 1, VE, .unit = "huffman" }, |
651 | | { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = HUFFMAN_TABLE_DEFAULT }, INT_MIN, INT_MAX, VE, .unit = "huffman" }, |
652 | | { "optimal", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = HUFFMAN_TABLE_OPTIMAL }, INT_MIN, INT_MAX, VE, .unit = "huffman" }, |
653 | | { "force_duplicated_matrix", "Always write luma and chroma matrix for mjpeg, useful for rtp streaming.", OFFSET(force_duplicated_matrix), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, VE }, |
654 | | FF_MPV_COMMON_OPTS |
655 | | { NULL}, |
656 | | }; |
657 | | |
658 | | #if CONFIG_MJPEG_ENCODER |
659 | | static const AVClass mjpeg_class = { |
660 | | .class_name = "mjpeg encoder", |
661 | | .item_name = av_default_item_name, |
662 | | .option = options, |
663 | | .version = LIBAVUTIL_VERSION_INT, |
664 | | }; |
665 | | |
666 | | static int mjpeg_get_supported_config(const AVCodecContext *avctx, |
667 | | const AVCodec *codec, |
668 | | enum AVCodecConfig config, |
669 | | unsigned flags, const void **out, |
670 | | int *out_num) |
671 | 680 | { |
672 | 680 | if (config == AV_CODEC_CONFIG_COLOR_RANGE) { |
673 | 0 | static const enum AVColorRange mjpeg_ranges[] = { |
674 | 0 | AVCOL_RANGE_MPEG, AVCOL_RANGE_JPEG, AVCOL_RANGE_UNSPECIFIED, |
675 | 0 | }; |
676 | 0 | int strict = avctx ? avctx->strict_std_compliance : 0; |
677 | 0 | int index = strict > FF_COMPLIANCE_UNOFFICIAL ? 1 : 0; |
678 | 0 | *out = &mjpeg_ranges[index]; |
679 | 0 | *out_num = FF_ARRAY_ELEMS(mjpeg_ranges) - index - 1; |
680 | 0 | return 0; |
681 | 0 | } |
682 | | |
683 | 680 | return ff_default_get_supported_config(avctx, codec, config, flags, out, out_num); |
684 | 680 | } |
685 | | |
686 | | const FFCodec ff_mjpeg_encoder = { |
687 | | .p.name = "mjpeg", |
688 | | CODEC_LONG_NAME("MJPEG (Motion JPEG)"), |
689 | | .p.type = AVMEDIA_TYPE_VIDEO, |
690 | | .p.id = AV_CODEC_ID_MJPEG, |
691 | | .priv_data_size = sizeof(MJPEGEncContext), |
692 | | .init = mjpeg_encode_init, |
693 | | FF_CODEC_ENCODE_CB(ff_mpv_encode_picture), |
694 | | .close = mjpeg_encode_close, |
695 | | .p.capabilities = AV_CODEC_CAP_DR1 | |
696 | | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS | |
697 | | AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, |
698 | | .caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_ICC_PROFILES, |
699 | | CODEC_PIXFMTS(AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, |
700 | | AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P), |
701 | | .p.priv_class = &mjpeg_class, |
702 | | .p.profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles), |
703 | | .get_supported_config = mjpeg_get_supported_config, |
704 | | }; |
705 | | #endif |
706 | | |
707 | | #if CONFIG_AMV_ENCODER |
708 | | static const AVClass amv_class = { |
709 | | .class_name = "amv encoder", |
710 | | .item_name = av_default_item_name, |
711 | | .option = options + AMV_OPTIONS_OFFSET, |
712 | | .version = LIBAVUTIL_VERSION_INT, |
713 | | }; |
714 | | |
715 | | const FFCodec ff_amv_encoder = { |
716 | | .p.name = "amv", |
717 | | CODEC_LONG_NAME("AMV Video"), |
718 | | .p.type = AVMEDIA_TYPE_VIDEO, |
719 | | .p.id = AV_CODEC_ID_AMV, |
720 | | .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, |
721 | | .priv_data_size = sizeof(MJPEGEncContext), |
722 | | .init = mjpeg_encode_init, |
723 | | FF_CODEC_ENCODE_CB(amv_encode_picture), |
724 | | .close = mjpeg_encode_close, |
725 | | .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, |
726 | | CODEC_PIXFMTS(AV_PIX_FMT_YUVJ420P), |
727 | | .color_ranges = AVCOL_RANGE_JPEG, |
728 | | .p.priv_class = &amv_class, |
729 | | }; |
730 | | #endif |