/src/ffmpeg/libavcodec/mpegvideo.c
Line | Count | Source |
1 | | /* |
2 | | * The simplest mpeg encoder (well, it was the simplest!) |
3 | | * Copyright (c) 2000,2001 Fabrice Bellard |
4 | | * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> |
5 | | * |
6 | | * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at> |
7 | | * |
8 | | * This file is part of FFmpeg. |
9 | | * |
10 | | * FFmpeg is free software; you can redistribute it and/or |
11 | | * modify it under the terms of the GNU Lesser General Public |
12 | | * License as published by the Free Software Foundation; either |
13 | | * version 2.1 of the License, or (at your option) any later version. |
14 | | * |
15 | | * FFmpeg is distributed in the hope that it will be useful, |
16 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
18 | | * Lesser General Public License for more details. |
19 | | * |
20 | | * You should have received a copy of the GNU Lesser General Public |
21 | | * License along with FFmpeg; if not, write to the Free Software |
22 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
23 | | */ |
24 | | |
25 | | /** |
26 | | * @file |
27 | | * The simplest mpeg encoder (well, it was the simplest!). |
28 | | */ |
29 | | |
30 | | #include "libavutil/attributes.h" |
31 | | #include "libavutil/avassert.h" |
32 | | #include "libavutil/imgutils.h" |
33 | | #include "libavutil/internal.h" |
34 | | #include "libavutil/intreadwrite.h" |
35 | | #include "libavutil/mem.h" |
36 | | |
37 | | #include "avcodec.h" |
38 | | #include "blockdsp.h" |
39 | | #include "idctdsp.h" |
40 | | #include "mathops.h" |
41 | | #include "mpeg_er.h" |
42 | | #include "mpegutils.h" |
43 | | #include "mpegvideo.h" |
44 | | #include "mpegvideodata.h" |
45 | | #include "libavutil/refstruct.h" |
46 | | |
47 | | |
48 | | static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h) |
49 | 0 | { |
50 | 0 | while(h--) |
51 | 0 | memset(dst + h*linesize, 128, 16); |
52 | 0 | } |
53 | | |
54 | | static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h) |
55 | 0 | { |
56 | 0 | while(h--) |
57 | 0 | memset(dst + h*linesize, 128, 8); |
58 | 0 | } |
59 | | |
60 | | /* init common dct for both encoder and decoder */ |
61 | | static av_cold void dsp_init(MpegEncContext *s) |
62 | 259k | { |
63 | 259k | ff_blockdsp_init(&s->bdsp); |
64 | 259k | ff_hpeldsp_init(&s->hdsp, s->avctx->flags); |
65 | 259k | ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample); |
66 | | |
67 | 259k | if (s->avctx->debug & FF_DEBUG_NOMC) { |
68 | 0 | int i; |
69 | 0 | for (i=0; i<4; i++) { |
70 | 0 | s->hdsp.avg_pixels_tab[0][i] = gray16; |
71 | 0 | s->hdsp.put_pixels_tab[0][i] = gray16; |
72 | 0 | s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16; |
73 | |
|
74 | 0 | s->hdsp.avg_pixels_tab[1][i] = gray8; |
75 | 0 | s->hdsp.put_pixels_tab[1][i] = gray8; |
76 | 0 | s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8; |
77 | 0 | } |
78 | 0 | } |
79 | 259k | } |
80 | | |
81 | | av_cold void ff_mpv_idct_init(MpegEncContext *s) |
82 | 182k | { |
83 | 182k | if (s->codec_id == AV_CODEC_ID_MPEG4) |
84 | 37.9k | s->idsp.mpeg4_studio_profile = s->studio_profile; |
85 | 182k | ff_idctdsp_init(&s->idsp, s->avctx); |
86 | | |
87 | | /* load & permutate scantables |
88 | | * note: only wmv uses different ones |
89 | | */ |
90 | 182k | if (s->alternate_scan) { |
91 | 5.10k | ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan); |
92 | 5.10k | ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan); |
93 | 177k | } else { |
94 | 177k | ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct); |
95 | 177k | ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); |
96 | 177k | } |
97 | 182k | } |
98 | | |
99 | | av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s) |
100 | 415k | { |
101 | 415k | const int nb_slices = s->slice_context_count; |
102 | 415k | const size_t slice_size = s->slice_ctx_size; |
103 | | |
104 | 415k | for (int i = 1; i < nb_slices; i++) { |
105 | 0 | s->thread_context[i] = av_memdup(s, slice_size); |
106 | 0 | if (!s->thread_context[i]) |
107 | 0 | return AVERROR(ENOMEM); |
108 | 0 | s->thread_context[i]->start_mb_y = |
109 | 0 | (s->mb_height * (i ) + nb_slices / 2) / nb_slices; |
110 | 0 | s->thread_context[i]->end_mb_y = |
111 | 0 | (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices; |
112 | 0 | } |
113 | 415k | s->start_mb_y = 0; |
114 | 415k | s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices |
115 | 415k | : s->mb_height; |
116 | 415k | return 0; |
117 | 415k | } |
118 | | |
119 | | static av_cold void free_duplicate_context(MpegEncContext *s) |
120 | 464k | { |
121 | 464k | if (!s) |
122 | 0 | return; |
123 | | |
124 | 464k | av_freep(&s->sc.edge_emu_buffer); |
125 | 464k | av_freep(&s->sc.scratchpad_buf); |
126 | 464k | s->sc.obmc_scratchpad = NULL; |
127 | 464k | s->sc.linesize = 0; |
128 | 464k | } |
129 | | |
130 | | static av_cold void free_duplicate_contexts(MpegEncContext *s) |
131 | 464k | { |
132 | 464k | for (int i = 1; i < s->slice_context_count; i++) { |
133 | 0 | free_duplicate_context(s->thread_context[i]); |
134 | 0 | av_freep(&s->thread_context[i]); |
135 | 0 | } |
136 | 464k | free_duplicate_context(s); |
137 | 464k | } |
138 | | |
139 | | int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src) |
140 | 0 | { |
141 | 0 | #define COPY(M) \ |
142 | 0 | M(ScratchpadContext, sc) \ |
143 | 0 | M(int, start_mb_y) \ |
144 | 0 | M(int, end_mb_y) \ |
145 | 0 | M(int16_t*, dc_val) \ |
146 | 0 | M(void*, ac_val) |
147 | |
|
148 | 0 | int ret; |
149 | | // FIXME copy only needed parts |
150 | 0 | #define BACKUP(T, member) T member = dst->member; |
151 | 0 | COPY(BACKUP) |
152 | 0 | memcpy(dst, src, sizeof(MpegEncContext)); |
153 | 0 | #define RESTORE(T, member) dst->member = member; |
154 | 0 | COPY(RESTORE) |
155 | |
|
156 | 0 | ret = ff_mpv_framesize_alloc(dst->avctx, &dst->sc, dst->linesize); |
157 | 0 | if (ret < 0) { |
158 | 0 | av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context " |
159 | 0 | "scratch buffers.\n"); |
160 | 0 | return ret; |
161 | 0 | } |
162 | 0 | return 0; |
163 | 0 | } |
164 | | |
165 | | /** |
166 | | * Set the given MpegEncContext to common defaults |
167 | | * (same for encoding and decoding). |
168 | | * The changed fields will not depend upon the |
169 | | * prior state of the MpegEncContext. |
170 | | */ |
171 | | av_cold void ff_mpv_common_defaults(MpegEncContext *s) |
172 | 157k | { |
173 | 157k | s->chroma_qscale_table = ff_default_chroma_qscale_table; |
174 | 157k | s->progressive_frame = 1; |
175 | 157k | s->progressive_sequence = 1; |
176 | 157k | s->picture_structure = PICT_FRAME; |
177 | | |
178 | 157k | s->slice_context_count = 1; |
179 | 157k | } |
180 | | |
181 | | static av_cold void free_buffer_pools(BufferPoolContext *pools) |
182 | 464k | { |
183 | 464k | av_refstruct_pool_uninit(&pools->mbskip_table_pool); |
184 | 464k | av_refstruct_pool_uninit(&pools->qscale_table_pool); |
185 | 464k | av_refstruct_pool_uninit(&pools->mb_type_pool); |
186 | 464k | av_refstruct_pool_uninit(&pools->motion_val_pool); |
187 | 464k | av_refstruct_pool_uninit(&pools->ref_index_pool); |
188 | 464k | pools->alloc_mb_height = pools->alloc_mb_width = pools->alloc_mb_stride = 0; |
189 | 464k | } |
190 | | |
191 | | av_cold int ff_mpv_init_context_frame(MpegEncContext *s) |
192 | 415k | { |
193 | 415k | int nb_slices = (HAVE_THREADS && |
194 | 415k | s->avctx->active_thread_type & FF_THREAD_SLICE) ? |
195 | 415k | s->avctx->thread_count : 1; |
196 | 415k | BufferPoolContext *const pools = &s->buffer_pools; |
197 | 415k | int y_size, c_size, yc_size, mb_array_size, mv_table_size, x, y; |
198 | 415k | int mb_height; |
199 | | |
200 | 415k | if (s->encoding && s->avctx->slices) |
201 | 0 | nb_slices = s->avctx->slices; |
202 | | |
203 | 415k | if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence) |
204 | 41.8k | s->mb_height = (s->height + 31) / 32 * 2; |
205 | 373k | else |
206 | 373k | s->mb_height = (s->height + 15) / 16; |
207 | | |
208 | 415k | if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) { |
209 | 0 | int max_slices; |
210 | 0 | if (s->mb_height) |
211 | 0 | max_slices = FFMIN(MAX_THREADS, s->mb_height); |
212 | 0 | else |
213 | 0 | max_slices = MAX_THREADS; |
214 | 0 | av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d)," |
215 | 0 | " reducing to %d\n", nb_slices, max_slices); |
216 | 0 | nb_slices = max_slices; |
217 | 0 | } |
218 | | |
219 | 415k | s->slice_context_count = nb_slices; |
220 | | |
221 | | /* VC-1 can change from being progressive to interlaced on a per-frame |
222 | | * basis. We therefore allocate certain buffers so big that they work |
223 | | * in both instances. */ |
224 | 415k | mb_height = s->msmpeg4_version == MSMP4_VC1 ? |
225 | 354k | FFALIGN(s->mb_height, 2) : s->mb_height; |
226 | | |
227 | 415k | s->mb_width = (s->width + 15) / 16; |
228 | 415k | s->mb_stride = s->mb_width + 1; |
229 | 415k | s->b8_stride = s->mb_width * 2 + 1; |
230 | 415k | mb_array_size = mb_height * s->mb_stride; |
231 | 415k | mv_table_size = (mb_height + 2) * s->mb_stride + 1; |
232 | | |
233 | | /* set default edge pos, will be overridden |
234 | | * in decode_header if needed */ |
235 | 415k | s->h_edge_pos = s->mb_width * 16; |
236 | 415k | s->v_edge_pos = s->mb_height * 16; |
237 | | |
238 | 415k | s->mb_num = s->mb_width * s->mb_height; |
239 | | |
240 | 415k | s->block_wrap[0] = |
241 | 415k | s->block_wrap[1] = |
242 | 415k | s->block_wrap[2] = |
243 | 415k | s->block_wrap[3] = s->b8_stride; |
244 | 415k | s->block_wrap[4] = |
245 | 415k | s->block_wrap[5] = s->mb_stride; |
246 | | |
247 | 415k | y_size = s->b8_stride * (2 * mb_height + 1); |
248 | 415k | c_size = s->mb_stride * (mb_height + 1); |
249 | 415k | yc_size = y_size + 2 * c_size; |
250 | | |
251 | 415k | if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1)) |
252 | 0 | return AVERROR(ENOMEM); |
253 | 87.4M | for (y = 0; y < s->mb_height; y++) |
254 | 1.14G | for (x = 0; x < s->mb_width; x++) |
255 | 1.05G | s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride; |
256 | | |
257 | 415k | s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed? |
258 | | |
259 | 1.51M | #define ALLOC_POOL(name, size, flags) do { \ |
260 | 1.51M | pools->name ##_pool = av_refstruct_pool_alloc((size), (flags)); \ |
261 | 1.51M | if (!pools->name ##_pool) \ |
262 | 1.51M | return AVERROR(ENOMEM); \ |
263 | 1.51M | } while (0) |
264 | | |
265 | 415k | if (s->codec_id == AV_CODEC_ID_MPEG4 || |
266 | 387k | (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) { |
267 | | /* interlaced direct mode decoding tables */ |
268 | 28.0k | int16_t (*tmp)[2] = av_calloc(mv_table_size, 4 * sizeof(*tmp)); |
269 | 28.0k | if (!tmp) |
270 | 0 | return AVERROR(ENOMEM); |
271 | 28.0k | s->p_field_mv_table_base = tmp; |
272 | 28.0k | tmp += s->mb_stride + 1; |
273 | 84.1k | for (int i = 0; i < 2; i++) { |
274 | 168k | for (int j = 0; j < 2; j++) { |
275 | 112k | s->p_field_mv_table[i][j] = tmp; |
276 | 112k | tmp += mv_table_size; |
277 | 112k | } |
278 | 56.1k | } |
279 | 28.0k | if (s->codec_id == AV_CODEC_ID_MPEG4) { |
280 | 28.0k | ALLOC_POOL(mbskip_table, mb_array_size + 2, |
281 | 28.0k | !s->encoding ? AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME : 0); |
282 | 28.0k | if (!s->encoding) { |
283 | | /* cbp, pred_dir */ |
284 | 27.6k | if (!(s->cbp_table = av_mallocz(mb_array_size)) || |
285 | 27.6k | !(s->pred_dir_table = av_mallocz(mb_array_size))) |
286 | 0 | return AVERROR(ENOMEM); |
287 | 27.6k | } |
288 | 28.0k | } |
289 | 28.0k | } |
290 | | |
291 | 415k | if (s->msmpeg4_version >= MSMP4_V3) { |
292 | 69.9k | s->coded_block_base = av_mallocz(y_size); |
293 | 69.9k | if (!s->coded_block_base) |
294 | 0 | return AVERROR(ENOMEM); |
295 | 69.9k | s->coded_block = s->coded_block_base + s->b8_stride + 1; |
296 | 69.9k | } |
297 | | |
298 | 415k | if (s->h263_pred || s->h263_aic || !s->encoding) { |
299 | | // When encoding, each slice (and therefore each thread) |
300 | | // gets its own ac_val and dc_val buffers in order to avoid |
301 | | // races. |
302 | 411k | size_t allslice_yc_size = yc_size * (s->encoding ? nb_slices : 1); |
303 | 411k | if (s->out_format == FMT_H263) { |
304 | | /* ac values */ |
305 | 301k | if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, allslice_yc_size)) |
306 | 0 | return AVERROR(ENOMEM); |
307 | 301k | s->ac_val = s->ac_val_base + s->b8_stride + 1; |
308 | 301k | } |
309 | | |
310 | | /* dc values */ |
311 | | // MN: we need these for error resilience of intra-frames |
312 | | // Allocating them unconditionally for decoders also means |
313 | | // that we don't need to reinitialize when e.g. h263_aic changes. |
314 | | |
315 | | // y_size and therefore yc_size is always odd; allocate one element |
316 | | // more for each encoder slice in order to be able to align each slice's |
317 | | // dc_val to four in order to use aligned stores when cleaning dc_val. |
318 | 411k | allslice_yc_size += s->encoding * nb_slices; |
319 | 411k | if (!FF_ALLOC_TYPED_ARRAY(s->dc_val_base, allslice_yc_size)) |
320 | 0 | return AVERROR(ENOMEM); |
321 | 411k | s->dc_val = s->dc_val_base + s->b8_stride + 1; |
322 | 7.29G | for (size_t i = 0; i < allslice_yc_size; ++i) |
323 | 7.29G | s->dc_val_base[i] = 1024; |
324 | 411k | } |
325 | | |
326 | | // Note the + 1 is for a quicker MPEG-4 slice_end detection |
327 | 415k | if (!(s->mbskip_table = av_mallocz(mb_array_size + 2)) || |
328 | | /* which mb is an intra block, init macroblock skip table */ |
329 | 415k | !(s->mbintra_table = av_mallocz(mb_array_size))) |
330 | 0 | return AVERROR(ENOMEM); |
331 | | |
332 | 415k | ALLOC_POOL(qscale_table, mv_table_size, 0); |
333 | 415k | ALLOC_POOL(mb_type, mv_table_size * sizeof(uint32_t), 0); |
334 | | |
335 | 415k | if (s->out_format == FMT_H263 || s->encoding || |
336 | 327k | (s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_MVS)) { |
337 | 327k | const int b8_array_size = s->b8_stride * mb_height * 2; |
338 | 327k | int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t); |
339 | 327k | int ref_index_size = 4 * mb_array_size; |
340 | | |
341 | | /* FIXME: The output of H.263 with OBMC depends upon |
342 | | * the earlier content of the buffer; therefore we set |
343 | | * the flags to always reset returned buffers here. */ |
344 | 327k | ALLOC_POOL(motion_val, mv_size, AV_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME); |
345 | 327k | ALLOC_POOL(ref_index, ref_index_size, 0); |
346 | 327k | } |
347 | 415k | #undef ALLOC_POOL |
348 | 415k | pools->alloc_mb_width = s->mb_width; |
349 | 415k | pools->alloc_mb_height = mb_height; |
350 | 415k | pools->alloc_mb_stride = s->mb_stride; |
351 | | |
352 | 415k | return !CONFIG_MPEGVIDEODEC || s->encoding ? 0 : ff_mpeg_er_init(s); |
353 | 415k | } |
354 | | |
355 | | /** |
356 | | * init common structure for both encoder and decoder. |
357 | | * this assumes that some variables like width/height are already set |
358 | | */ |
359 | | av_cold int ff_mpv_common_init(MpegEncContext *s) |
360 | 264k | { |
361 | 264k | int ret; |
362 | | |
363 | 264k | if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) { |
364 | 0 | av_log(s->avctx, AV_LOG_ERROR, |
365 | 0 | "decoding to AV_PIX_FMT_NONE is not supported.\n"); |
366 | 0 | return AVERROR(EINVAL); |
367 | 0 | } |
368 | | |
369 | 264k | if ((s->width || s->height) && |
370 | 242k | av_image_check_size(s->width, s->height, 0, s->avctx)) |
371 | 5.10k | return AVERROR(EINVAL); |
372 | | |
373 | 259k | dsp_init(s); |
374 | | |
375 | | /* set chroma shifts */ |
376 | 259k | ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, |
377 | 259k | &s->chroma_x_shift, |
378 | 259k | &s->chroma_y_shift); |
379 | 259k | if (ret) |
380 | 0 | return ret; |
381 | | |
382 | 259k | if ((ret = ff_mpv_init_context_frame(s))) |
383 | 0 | goto fail; |
384 | | |
385 | 259k | s->context_initialized = 1; |
386 | 259k | s->thread_context[0] = s; |
387 | | |
388 | | // if (s->width && s->height) { |
389 | 259k | if (!s->encoding) { |
390 | 252k | ret = ff_mpv_init_duplicate_contexts(s); |
391 | 252k | if (ret < 0) |
392 | 0 | goto fail; |
393 | 252k | } |
394 | | // } |
395 | | |
396 | 259k | return 0; |
397 | 0 | fail: |
398 | 0 | ff_mpv_common_end(s); |
399 | 0 | return ret; |
400 | 259k | } |
401 | | |
402 | | av_cold void ff_mpv_free_context_frame(MpegEncContext *s) |
403 | 464k | { |
404 | 464k | free_duplicate_contexts(s); |
405 | | |
406 | 464k | free_buffer_pools(&s->buffer_pools); |
407 | 464k | av_freep(&s->p_field_mv_table_base); |
408 | 1.39M | for (int i = 0; i < 2; i++) |
409 | 2.78M | for (int j = 0; j < 2; j++) |
410 | 1.85M | s->p_field_mv_table[i][j] = NULL; |
411 | | |
412 | 464k | av_freep(&s->ac_val_base); |
413 | 464k | av_freep(&s->dc_val_base); |
414 | 464k | av_freep(&s->coded_block_base); |
415 | 464k | av_freep(&s->mbintra_table); |
416 | 464k | av_freep(&s->cbp_table); |
417 | 464k | av_freep(&s->pred_dir_table); |
418 | | |
419 | 464k | av_freep(&s->mbskip_table); |
420 | | |
421 | 464k | av_freep(&s->er.error_status_table); |
422 | 464k | av_freep(&s->er.er_temp_buffer); |
423 | 464k | av_freep(&s->mb_index2xy); |
424 | | |
425 | 464k | s->linesize = s->uvlinesize = 0; |
426 | 464k | } |
427 | | |
428 | | av_cold void ff_mpv_common_end(MpegEncContext *s) |
429 | 308k | { |
430 | 308k | ff_mpv_free_context_frame(s); |
431 | 308k | if (s->slice_context_count > 1) |
432 | 0 | s->slice_context_count = 1; |
433 | | |
434 | 308k | ff_mpv_unref_picture(&s->last_pic); |
435 | 308k | ff_mpv_unref_picture(&s->cur_pic); |
436 | 308k | ff_mpv_unref_picture(&s->next_pic); |
437 | | |
438 | 308k | s->context_initialized = 0; |
439 | 308k | s->context_reinit = 0; |
440 | 308k | s->linesize = s->uvlinesize = 0; |
441 | 308k | } |
442 | | |
443 | | |
444 | | /** |
445 | | * Clean dc, ac for the current non-intra MB. |
446 | | */ |
447 | | void ff_clean_intra_table_entries(MpegEncContext *s) |
448 | 3.55M | { |
449 | 3.55M | int wrap = s->b8_stride; |
450 | 3.55M | int xy = s->block_index[0]; |
451 | | /* chroma */ |
452 | 3.55M | unsigned uxy = s->block_index[4]; |
453 | 3.55M | unsigned vxy = s->block_index[5]; |
454 | 3.55M | int16_t *dc_val = s->dc_val; |
455 | | |
456 | 3.55M | AV_WN32A(dc_val + xy, 1024 << 16 | 1024); |
457 | 3.55M | AV_WN32 (dc_val + xy + wrap, 1024 << 16 | 1024); |
458 | 3.55M | dc_val[uxy] = |
459 | 3.55M | dc_val[vxy] = 1024; |
460 | | /* ac pred */ |
461 | 3.55M | int16_t (*ac_val)[16] = s->ac_val; |
462 | 3.55M | av_assume(!((uintptr_t)ac_val & 0xF)); |
463 | | // Don't reset the upper-left luma block, as it will only ever be |
464 | | // referenced by blocks from the same macroblock. |
465 | 3.55M | memset(ac_val[xy + 1], 0, sizeof(*ac_val)); |
466 | 3.55M | memset(ac_val[xy + wrap], 0, 2 * sizeof(*ac_val)); |
467 | | /* ac pred */ |
468 | 3.55M | memset(ac_val[uxy], 0, sizeof(*ac_val)); |
469 | 3.55M | memset(ac_val[vxy], 0, sizeof(*ac_val)); |
470 | 3.55M | } |
471 | | |
472 | 19.0M | void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename |
473 | 19.0M | const int linesize = s->cur_pic.linesize[0]; //not s->linesize as this would be wrong for field pics |
474 | 19.0M | const int uvlinesize = s->cur_pic.linesize[1]; |
475 | 19.0M | const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres; |
476 | 19.0M | const int height_of_mb = 4 - s->avctx->lowres; |
477 | | |
478 | 19.0M | s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2; |
479 | 19.0M | s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2; |
480 | 19.0M | s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2; |
481 | 19.0M | s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2; |
482 | 19.0M | s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1; |
483 | 19.0M | s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1; |
484 | | //block_index is not used by mpeg2, so it is not affected by chroma_format |
485 | | |
486 | 19.0M | s->dest[0] = s->cur_pic.data[0] + (int)((s->mb_x - 1U) << width_of_mb); |
487 | 19.0M | s->dest[1] = s->cur_pic.data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift)); |
488 | 19.0M | s->dest[2] = s->cur_pic.data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift)); |
489 | | |
490 | 19.0M | if (s->picture_structure == PICT_FRAME) { |
491 | 18.9M | s->dest[0] += s->mb_y * linesize << height_of_mb; |
492 | 18.9M | s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift); |
493 | 18.9M | s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift); |
494 | 18.9M | } else { |
495 | 78.7k | s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb; |
496 | 78.7k | s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift); |
497 | 78.7k | s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift); |
498 | 78.7k | av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD)); |
499 | 78.7k | } |
500 | 19.0M | } |
501 | | |
502 | | /** |
503 | | * set qscale and update qscale dependent variables. |
504 | | */ |
505 | | void ff_set_qscale(MpegEncContext * s, int qscale) |
506 | 5.37M | { |
507 | 5.37M | if (qscale < 1) |
508 | 269k | qscale = 1; |
509 | 5.10M | else if (qscale > 31) |
510 | 59.9k | qscale = 31; |
511 | | |
512 | 5.37M | s->qscale = qscale; |
513 | 5.37M | s->chroma_qscale= s->chroma_qscale_table[qscale]; |
514 | | |
515 | 5.37M | s->y_dc_scale= s->y_dc_scale_table[ qscale ]; |
516 | 5.37M | s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ]; |
517 | 5.37M | } |