/src/ffmpeg/libavcodec/vc1_pred.c
Line | Count | Source |
1 | | /* |
2 | | * VC-1 and WMV3 decoder |
3 | | * Copyright (c) 2011 Mashiat Sarker Shakkhar |
4 | | * Copyright (c) 2006-2007 Konstantin Shishkov |
5 | | * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer |
6 | | * |
7 | | * This file is part of FFmpeg. |
8 | | * |
9 | | * FFmpeg is free software; you can redistribute it and/or |
10 | | * modify it under the terms of the GNU Lesser General Public |
11 | | * License as published by the Free Software Foundation; either |
12 | | * version 2.1 of the License, or (at your option) any later version. |
13 | | * |
14 | | * FFmpeg is distributed in the hope that it will be useful, |
15 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
17 | | * Lesser General Public License for more details. |
18 | | * |
19 | | * You should have received a copy of the GNU Lesser General Public |
20 | | * License along with FFmpeg; if not, write to the Free Software |
21 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
22 | | */ |
23 | | |
24 | | /** |
25 | | * @file |
26 | | * VC-1 and WMV3 block decoding routines |
27 | | */ |
28 | | |
29 | | #include "mathops.h" |
30 | | #include "mpegutils.h" |
31 | | #include "mpegvideo.h" |
32 | | #include "vc1.h" |
33 | | #include "vc1_pred.h" |
34 | | #include "vc1data.h" |
35 | | |
36 | | static av_always_inline int scaleforsame_x(const VC1Context *v, int n /* MV */, int dir) |
37 | 1.47M | { |
38 | 1.47M | int scaledvalue, refdist; |
39 | 1.47M | int scalesame1, scalesame2; |
40 | 1.47M | int scalezone1_x, zone1offset_x; |
41 | 1.47M | int table_index = dir ^ v->second_field; |
42 | | |
43 | 1.47M | if (v->s.pict_type != AV_PICTURE_TYPE_B) |
44 | 862k | refdist = v->refdist; |
45 | 609k | else |
46 | 609k | refdist = dir ? v->brfd : v->frfd; |
47 | 1.47M | if (refdist > 3) |
48 | 2.43k | refdist = 3; |
49 | 1.47M | scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist]; |
50 | 1.47M | scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist]; |
51 | 1.47M | scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist]; |
52 | 1.47M | zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist]; |
53 | | |
54 | 1.47M | if (FFABS(n) > 255) |
55 | 15.4k | scaledvalue = n; |
56 | 1.45M | else { |
57 | 1.45M | if (FFABS(n) < scalezone1_x) |
58 | 1.14M | scaledvalue = (n * scalesame1) >> 8; |
59 | 312k | else { |
60 | 312k | if (n < 0) |
61 | 127k | scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x; |
62 | 184k | else |
63 | 184k | scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x; |
64 | 312k | } |
65 | 1.45M | } |
66 | 1.47M | return av_clip(scaledvalue, -v->range_x, v->range_x - 1); |
67 | 1.47M | } |
68 | | |
69 | | static av_always_inline int scaleforsame_y(const VC1Context *v, int n /* MV */, int dir) |
70 | 1.47M | { |
71 | 1.47M | int scaledvalue, refdist; |
72 | 1.47M | int scalesame1, scalesame2; |
73 | 1.47M | int scalezone1_y, zone1offset_y; |
74 | 1.47M | int table_index = dir ^ v->second_field; |
75 | | |
76 | 1.47M | if (v->s.pict_type != AV_PICTURE_TYPE_B) |
77 | 862k | refdist = v->refdist; |
78 | 609k | else |
79 | 609k | refdist = dir ? v->brfd : v->frfd; |
80 | 1.47M | if (refdist > 3) |
81 | 2.43k | refdist = 3; |
82 | 1.47M | scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist]; |
83 | 1.47M | scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist]; |
84 | 1.47M | scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist]; |
85 | 1.47M | zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist]; |
86 | | |
87 | 1.47M | if (FFABS(n) > 63) |
88 | 46.1k | scaledvalue = n; |
89 | 1.42M | else { |
90 | 1.42M | if (FFABS(n) < scalezone1_y) |
91 | 995k | scaledvalue = (n * scalesame1) >> 8; |
92 | 429k | else { |
93 | 429k | if (n < 0) |
94 | 145k | scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y; |
95 | 283k | else |
96 | 283k | scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y; |
97 | 429k | } |
98 | 1.42M | } |
99 | | |
100 | 1.47M | if (v->cur_field_type && !v->ref_field_type[dir]) |
101 | 0 | return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2); |
102 | 1.47M | else |
103 | 1.47M | return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1); |
104 | 1.47M | } |
105 | | |
106 | | static av_always_inline int scaleforopp_x(const VC1Context *v, int n /* MV */) |
107 | 64.4k | { |
108 | 64.4k | int scalezone1_x, zone1offset_x; |
109 | 64.4k | int scaleopp1, scaleopp2, brfd; |
110 | 64.4k | int scaledvalue; |
111 | | |
112 | 64.4k | brfd = FFMIN(v->brfd, 3); |
113 | 64.4k | scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd]; |
114 | 64.4k | zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd]; |
115 | 64.4k | scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd]; |
116 | 64.4k | scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd]; |
117 | | |
118 | 64.4k | if (FFABS(n) > 255) |
119 | 1.71k | scaledvalue = n; |
120 | 62.7k | else { |
121 | 62.7k | if (FFABS(n) < scalezone1_x) |
122 | 53.3k | scaledvalue = (n * scaleopp1) >> 8; |
123 | 9.35k | else { |
124 | 9.35k | if (n < 0) |
125 | 5.83k | scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x; |
126 | 3.51k | else |
127 | 3.51k | scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x; |
128 | 9.35k | } |
129 | 62.7k | } |
130 | 64.4k | return av_clip(scaledvalue, -v->range_x, v->range_x - 1); |
131 | 64.4k | } |
132 | | |
133 | | static av_always_inline int scaleforopp_y(const VC1Context *v, int n /* MV */, int dir) |
134 | 64.4k | { |
135 | 64.4k | int scalezone1_y, zone1offset_y; |
136 | 64.4k | int scaleopp1, scaleopp2, brfd; |
137 | 64.4k | int scaledvalue; |
138 | | |
139 | 64.4k | brfd = FFMIN(v->brfd, 3); |
140 | 64.4k | scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd]; |
141 | 64.4k | zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd]; |
142 | 64.4k | scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd]; |
143 | 64.4k | scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd]; |
144 | | |
145 | 64.4k | if (FFABS(n) > 63) |
146 | 2.64k | scaledvalue = n; |
147 | 61.7k | else { |
148 | 61.7k | if (FFABS(n) < scalezone1_y) |
149 | 43.1k | scaledvalue = (n * scaleopp1) >> 8; |
150 | 18.6k | else { |
151 | 18.6k | if (n < 0) |
152 | 6.83k | scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y; |
153 | 11.8k | else |
154 | 11.8k | scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y; |
155 | 18.6k | } |
156 | 61.7k | } |
157 | 64.4k | if (v->cur_field_type && !v->ref_field_type[dir]) { |
158 | 15.4k | return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2); |
159 | 48.9k | } else { |
160 | 48.9k | return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1); |
161 | 48.9k | } |
162 | 64.4k | } |
163 | | |
164 | | static av_always_inline int scaleforsame(const VC1Context *v, int n /* MV */, |
165 | | int dim, int dir) |
166 | 3.06M | { |
167 | 3.06M | int brfd, scalesame; |
168 | 3.06M | int hpel = 1 - v->s.quarter_sample; |
169 | | |
170 | 3.06M | n >>= hpel; |
171 | 3.06M | if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) { |
172 | 2.94M | if (dim) |
173 | 1.47M | n = scaleforsame_y(v, n, dir) * (1 << hpel); |
174 | 1.47M | else |
175 | 1.47M | n = scaleforsame_x(v, n, dir) * (1 << hpel); |
176 | 2.94M | return n; |
177 | 2.94M | } |
178 | 118k | brfd = FFMIN(v->brfd, 3); |
179 | 118k | scalesame = ff_vc1_b_field_mvpred_scales[0][brfd]; |
180 | | |
181 | 118k | n = (n * scalesame >> 8) * (1 << hpel); |
182 | 118k | return n; |
183 | 3.06M | } |
184 | | |
185 | | static av_always_inline int scaleforopp(const VC1Context *v, int n /* MV */, |
186 | | int dim, int dir) |
187 | 3.21M | { |
188 | 3.21M | int refdist, scaleopp; |
189 | 3.21M | int hpel = 1 - v->s.quarter_sample; |
190 | | |
191 | 3.21M | n >>= hpel; |
192 | 3.21M | if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) { |
193 | 128k | if (dim) |
194 | 64.4k | n = scaleforopp_y(v, n, dir) * (1 << hpel); |
195 | 64.4k | else |
196 | 64.4k | n = scaleforopp_x(v, n) * (1 << hpel); |
197 | 128k | return n; |
198 | 128k | } |
199 | 3.08M | if (v->s.pict_type != AV_PICTURE_TYPE_B) |
200 | 1.66M | refdist = v->refdist; |
201 | 1.41M | else |
202 | 1.41M | refdist = dir ? v->brfd : v->frfd; |
203 | 3.08M | refdist = FFMIN(refdist, 3); |
204 | 3.08M | scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist]; |
205 | | |
206 | 3.08M | n = (n * scaleopp >> 8) * (1 << hpel); |
207 | 3.08M | return n; |
208 | 3.21M | } |
209 | | |
210 | | /** Predict and set motion vector |
211 | | */ |
212 | | void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, |
213 | | int mv1, int r_x, int r_y, uint8_t* is_intra, |
214 | | int pred_flag, int dir) |
215 | 36.7M | { |
216 | 36.7M | MpegEncContext *s = &v->s; |
217 | 36.7M | int xy, wrap, off = 0; |
218 | 36.7M | int px, py; |
219 | 36.7M | int sum; |
220 | 36.7M | int mixedmv_pic, num_samefield = 0, num_oppfield = 0; |
221 | 36.7M | int opposite, a_f, b_f, c_f; |
222 | 36.7M | int16_t field_predA[2]; |
223 | 36.7M | int16_t field_predB[2]; |
224 | 36.7M | int16_t field_predC[2]; |
225 | 36.7M | int a_valid, b_valid, c_valid; |
226 | 36.7M | int hybridmv_thresh, y_bias = 0; |
227 | | |
228 | 36.7M | if (v->mv_mode == MV_PMODE_MIXED_MV || |
229 | 13.9M | ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV))) |
230 | 30.7M | mixedmv_pic = 1; |
231 | 5.99M | else |
232 | 5.99M | mixedmv_pic = 0; |
233 | | /* scale MV difference to be quad-pel */ |
234 | 36.7M | if (!s->quarter_sample) { |
235 | 4.56M | dmv_x *= 2; |
236 | 4.56M | dmv_y *= 2; |
237 | 4.56M | } |
238 | | |
239 | 36.7M | wrap = s->b8_stride; |
240 | 36.7M | xy = s->block_index[n]; |
241 | | |
242 | 36.7M | if (s->mb_intra) { |
243 | 1.14M | s->mv[0][n][0] = s->cur_pic.motion_val[0][xy + v->blocks_off][0] = 0; |
244 | 1.14M | s->mv[0][n][1] = s->cur_pic.motion_val[0][xy + v->blocks_off][1] = 0; |
245 | 1.14M | s->cur_pic.motion_val[1][xy + v->blocks_off][0] = 0; |
246 | 1.14M | s->cur_pic.motion_val[1][xy + v->blocks_off][1] = 0; |
247 | 1.14M | if (mv1) { /* duplicate motion data for 1-MV block */ |
248 | 433k | s->cur_pic.motion_val[0][xy + 1 + v->blocks_off][0] = 0; |
249 | 433k | s->cur_pic.motion_val[0][xy + 1 + v->blocks_off][1] = 0; |
250 | 433k | s->cur_pic.motion_val[0][xy + wrap + v->blocks_off][0] = 0; |
251 | 433k | s->cur_pic.motion_val[0][xy + wrap + v->blocks_off][1] = 0; |
252 | 433k | s->cur_pic.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0; |
253 | 433k | s->cur_pic.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0; |
254 | 433k | v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0; |
255 | 433k | s->cur_pic.motion_val[1][xy + 1 + v->blocks_off][0] = 0; |
256 | 433k | s->cur_pic.motion_val[1][xy + 1 + v->blocks_off][1] = 0; |
257 | 433k | s->cur_pic.motion_val[1][xy + wrap + v->blocks_off][0] = 0; |
258 | 433k | s->cur_pic.motion_val[1][xy + wrap + v->blocks_off][1] = 0; |
259 | 433k | s->cur_pic.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0; |
260 | 433k | s->cur_pic.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0; |
261 | 433k | } |
262 | 1.14M | return; |
263 | 1.14M | } |
264 | | |
265 | 35.5M | a_valid = !s->first_slice_line || (n == 2 || n == 3); |
266 | 35.5M | b_valid = a_valid; |
267 | 35.5M | c_valid = s->mb_x || (n == 1 || n == 3); |
268 | 35.5M | if (mv1) { |
269 | 12.5M | if (v->field_mode && mixedmv_pic) |
270 | 1.85M | off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; |
271 | 10.7M | else |
272 | 10.7M | off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2; |
273 | 12.5M | b_valid = b_valid && s->mb_width > 1; |
274 | 22.9M | } else { |
275 | | //in 4-MV mode different blocks have different B predictor position |
276 | 22.9M | switch (n) { |
277 | 5.76M | case 0: |
278 | 5.76M | if (v->res_rtm_flag) |
279 | 3.39M | off = s->mb_x ? -1 : 1; |
280 | 2.36M | else |
281 | 2.36M | off = s->mb_x ? -1 : 2 * s->mb_width - wrap - 1; |
282 | 5.76M | break; |
283 | 5.78M | case 1: |
284 | 5.78M | off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1; |
285 | 5.78M | break; |
286 | 5.78M | case 2: |
287 | 5.78M | off = 1; |
288 | 5.78M | break; |
289 | 5.64M | case 3: |
290 | 5.64M | off = -1; |
291 | 22.9M | } |
292 | 22.9M | if (v->field_mode && s->mb_width == 1) |
293 | 32.8k | b_valid = b_valid && c_valid; |
294 | 22.9M | } |
295 | | |
296 | 35.5M | if (v->field_mode) { |
297 | 5.02M | a_valid = a_valid && !is_intra[xy - wrap]; |
298 | 5.02M | b_valid = b_valid && !is_intra[xy - wrap + off]; |
299 | 5.02M | c_valid = c_valid && !is_intra[xy - 1]; |
300 | 5.02M | } |
301 | | |
302 | 35.5M | if (a_valid) { |
303 | 32.0M | const int16_t *A = s->cur_pic.motion_val[dir][xy - wrap + v->blocks_off]; |
304 | 32.0M | a_f = v->mv_f[dir][xy - wrap + v->blocks_off]; |
305 | 32.0M | num_oppfield += a_f; |
306 | 32.0M | num_samefield += 1 - a_f; |
307 | 32.0M | field_predA[0] = A[0]; |
308 | 32.0M | field_predA[1] = A[1]; |
309 | 32.0M | } else { |
310 | 3.55M | field_predA[0] = field_predA[1] = 0; |
311 | 3.55M | a_f = 0; |
312 | 3.55M | } |
313 | 35.5M | if (b_valid) { |
314 | 31.5M | const int16_t *B = s->cur_pic.motion_val[dir][xy - wrap + off + v->blocks_off]; |
315 | 31.5M | b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off]; |
316 | 31.5M | num_oppfield += b_f; |
317 | 31.5M | num_samefield += 1 - b_f; |
318 | 31.5M | field_predB[0] = B[0]; |
319 | 31.5M | field_predB[1] = B[1]; |
320 | 31.5M | } else { |
321 | 3.96M | field_predB[0] = field_predB[1] = 0; |
322 | 3.96M | b_f = 0; |
323 | 3.96M | } |
324 | 35.5M | if (c_valid) { |
325 | 33.7M | const int16_t *C = s->cur_pic.motion_val[dir][xy - 1 + v->blocks_off]; |
326 | 33.7M | c_f = v->mv_f[dir][xy - 1 + v->blocks_off]; |
327 | 33.7M | num_oppfield += c_f; |
328 | 33.7M | num_samefield += 1 - c_f; |
329 | 33.7M | field_predC[0] = C[0]; |
330 | 33.7M | field_predC[1] = C[1]; |
331 | 33.7M | } else { |
332 | 1.78M | field_predC[0] = field_predC[1] = 0; |
333 | 1.78M | c_f = 0; |
334 | 1.78M | } |
335 | | |
336 | 35.5M | if (v->field_mode) { |
337 | 5.02M | if (!v->numref) |
338 | | // REFFIELD determines if the last field or the second-last field is |
339 | | // to be used as reference |
340 | 367k | opposite = 1 - v->reffield; |
341 | 4.66M | else { |
342 | 4.66M | if (num_samefield <= num_oppfield) |
343 | 2.52M | opposite = 1 - pred_flag; |
344 | 2.13M | else |
345 | 2.13M | opposite = pred_flag; |
346 | 4.66M | } |
347 | 5.02M | } else |
348 | 30.5M | opposite = 0; |
349 | 35.5M | if (opposite) { |
350 | 2.62M | v->mv_f[dir][xy + v->blocks_off] = 1; |
351 | 2.62M | v->ref_field_type[dir] = !v->cur_field_type; |
352 | 2.62M | if (a_valid && !a_f) { |
353 | 462k | field_predA[0] = scaleforopp(v, field_predA[0], 0, dir); |
354 | 462k | field_predA[1] = scaleforopp(v, field_predA[1], 1, dir); |
355 | 462k | } |
356 | 2.62M | if (b_valid && !b_f) { |
357 | 563k | field_predB[0] = scaleforopp(v, field_predB[0], 0, dir); |
358 | 563k | field_predB[1] = scaleforopp(v, field_predB[1], 1, dir); |
359 | 563k | } |
360 | 2.62M | if (c_valid && !c_f) { |
361 | 579k | field_predC[0] = scaleforopp(v, field_predC[0], 0, dir); |
362 | 579k | field_predC[1] = scaleforopp(v, field_predC[1], 1, dir); |
363 | 579k | } |
364 | 32.9M | } else { |
365 | 32.9M | v->mv_f[dir][xy + v->blocks_off] = 0; |
366 | 32.9M | v->ref_field_type[dir] = v->cur_field_type; |
367 | 32.9M | if (a_valid && a_f) { |
368 | 446k | field_predA[0] = scaleforsame(v, field_predA[0], 0, dir); |
369 | 446k | field_predA[1] = scaleforsame(v, field_predA[1], 1, dir); |
370 | 446k | } |
371 | 32.9M | if (b_valid && b_f) { |
372 | 525k | field_predB[0] = scaleforsame(v, field_predB[0], 0, dir); |
373 | 525k | field_predB[1] = scaleforsame(v, field_predB[1], 1, dir); |
374 | 525k | } |
375 | 32.9M | if (c_valid && c_f) { |
376 | 559k | field_predC[0] = scaleforsame(v, field_predC[0], 0, dir); |
377 | 559k | field_predC[1] = scaleforsame(v, field_predC[1], 1, dir); |
378 | 559k | } |
379 | 32.9M | } |
380 | | |
381 | 35.5M | if (a_valid) { |
382 | 32.0M | px = field_predA[0]; |
383 | 32.0M | py = field_predA[1]; |
384 | 32.0M | } else if (c_valid) { |
385 | 3.30M | px = field_predC[0]; |
386 | 3.30M | py = field_predC[1]; |
387 | 3.30M | } else if (b_valid) { |
388 | 22.7k | px = field_predB[0]; |
389 | 22.7k | py = field_predB[1]; |
390 | 229k | } else { |
391 | 229k | px = 0; |
392 | 229k | py = 0; |
393 | 229k | } |
394 | | |
395 | 35.5M | if (num_samefield + num_oppfield > 1) { |
396 | 31.7M | px = mid_pred(field_predA[0], field_predB[0], field_predC[0]); |
397 | 31.7M | py = mid_pred(field_predA[1], field_predB[1], field_predC[1]); |
398 | 31.7M | } |
399 | | |
400 | | /* Pullback MV as specified in 8.3.5.3.4 */ |
401 | 35.5M | if (!v->field_mode) { |
402 | 30.5M | int qx, qy, X, Y; |
403 | 30.5M | int MV = mv1 ? -60 : -28; |
404 | 30.5M | qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0); |
405 | 30.5M | qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0); |
406 | 30.5M | X = (s->mb_width << 6) - 4; |
407 | 30.5M | Y = (s->mb_height << 6) - 4; |
408 | 30.5M | if (qx + px < MV) px = MV - qx; |
409 | 30.5M | if (qy + py < MV) py = MV - qy; |
410 | 30.5M | if (qx + px > X) px = X - qx; |
411 | 30.5M | if (qy + py > Y) py = Y - qy; |
412 | 30.5M | } |
413 | | |
414 | 35.5M | if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) { |
415 | | /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */ |
416 | 32.9M | hybridmv_thresh = 32; |
417 | 32.9M | if (a_valid && c_valid) { |
418 | 28.4M | if (is_intra[xy - wrap]) |
419 | 920k | sum = FFABS(px) + FFABS(py); |
420 | 27.5M | else |
421 | 27.5M | sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]); |
422 | 28.4M | if (sum > hybridmv_thresh) { |
423 | 1.88M | if (get_bits1(&v->gb)) { // read HYBRIDPRED bit |
424 | 1.02M | px = field_predA[0]; |
425 | 1.02M | py = field_predA[1]; |
426 | 1.02M | } else { |
427 | 860k | px = field_predC[0]; |
428 | 860k | py = field_predC[1]; |
429 | 860k | } |
430 | 26.6M | } else { |
431 | 26.6M | if (is_intra[xy - 1]) |
432 | 842k | sum = FFABS(px) + FFABS(py); |
433 | 25.7M | else |
434 | 25.7M | sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]); |
435 | 26.6M | if (sum > hybridmv_thresh) { |
436 | 3.52M | if (get_bits1(&v->gb)) { |
437 | 1.66M | px = field_predA[0]; |
438 | 1.66M | py = field_predA[1]; |
439 | 1.86M | } else { |
440 | 1.86M | px = field_predC[0]; |
441 | 1.86M | py = field_predC[1]; |
442 | 1.86M | } |
443 | 3.52M | } |
444 | 26.6M | } |
445 | 28.4M | } |
446 | 32.9M | } |
447 | | |
448 | 35.5M | if (v->field_mode && v->numref) |
449 | 4.66M | r_y >>= 1; |
450 | 35.5M | if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0) |
451 | 2.13M | y_bias = 1; |
452 | | /* store MV using signed modulus of MV range defined in 4.11 */ |
453 | 35.5M | s->mv[dir][n][0] = s->cur_pic.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x; |
454 | 35.5M | s->mv[dir][n][1] = s->cur_pic.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias; |
455 | 35.5M | if (mv1) { /* duplicate motion data for 1-MV block */ |
456 | 12.5M | s->cur_pic.motion_val[dir][xy + 1 + v->blocks_off][0] = s->cur_pic.motion_val[dir][xy + v->blocks_off][0]; |
457 | 12.5M | s->cur_pic.motion_val[dir][xy + 1 + v->blocks_off][1] = s->cur_pic.motion_val[dir][xy + v->blocks_off][1]; |
458 | 12.5M | s->cur_pic.motion_val[dir][xy + wrap + v->blocks_off][0] = s->cur_pic.motion_val[dir][xy + v->blocks_off][0]; |
459 | 12.5M | s->cur_pic.motion_val[dir][xy + wrap + v->blocks_off][1] = s->cur_pic.motion_val[dir][xy + v->blocks_off][1]; |
460 | 12.5M | s->cur_pic.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->cur_pic.motion_val[dir][xy + v->blocks_off][0]; |
461 | 12.5M | s->cur_pic.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->cur_pic.motion_val[dir][xy + v->blocks_off][1]; |
462 | 12.5M | v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off]; |
463 | 12.5M | v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off]; |
464 | 12.5M | } |
465 | 35.5M | } |
466 | | |
467 | | /** Predict and set motion vector for interlaced frame picture MBs |
468 | | */ |
469 | | void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, |
470 | | int mvn, int r_x, int r_y, int dir) |
471 | 37.9M | { |
472 | 37.9M | MpegEncContext *s = &v->s; |
473 | 37.9M | int xy, wrap, off = 0; |
474 | 37.9M | int A[2], B[2], C[2]; |
475 | 37.9M | int px = 0, py = 0; |
476 | 37.9M | int a_valid = 0, b_valid = 0, c_valid = 0; |
477 | 37.9M | int field_a, field_b, field_c; // 0: same, 1: opposite |
478 | 37.9M | int total_valid, num_samefield, num_oppfield; |
479 | 37.9M | int pos_c, pos_b, n_adj; |
480 | | |
481 | 37.9M | wrap = s->b8_stride; |
482 | 37.9M | xy = s->block_index[n]; |
483 | | |
484 | 37.9M | if (s->mb_intra) { |
485 | 0 | s->mv[0][n][0] = s->cur_pic.motion_val[0][xy][0] = 0; |
486 | 0 | s->mv[0][n][1] = s->cur_pic.motion_val[0][xy][1] = 0; |
487 | 0 | s->cur_pic.motion_val[1][xy][0] = 0; |
488 | 0 | s->cur_pic.motion_val[1][xy][1] = 0; |
489 | 0 | if (mvn == 1) { /* duplicate motion data for 1-MV block */ |
490 | 0 | s->cur_pic.motion_val[0][xy + 1][0] = 0; |
491 | 0 | s->cur_pic.motion_val[0][xy + 1][1] = 0; |
492 | 0 | s->cur_pic.motion_val[0][xy + wrap][0] = 0; |
493 | 0 | s->cur_pic.motion_val[0][xy + wrap][1] = 0; |
494 | 0 | s->cur_pic.motion_val[0][xy + wrap + 1][0] = 0; |
495 | 0 | s->cur_pic.motion_val[0][xy + wrap + 1][1] = 0; |
496 | 0 | v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0; |
497 | 0 | s->cur_pic.motion_val[1][xy + 1][0] = 0; |
498 | 0 | s->cur_pic.motion_val[1][xy + 1][1] = 0; |
499 | 0 | s->cur_pic.motion_val[1][xy + wrap][0] = 0; |
500 | 0 | s->cur_pic.motion_val[1][xy + wrap][1] = 0; |
501 | 0 | s->cur_pic.motion_val[1][xy + wrap + 1][0] = 0; |
502 | 0 | s->cur_pic.motion_val[1][xy + wrap + 1][1] = 0; |
503 | 0 | } |
504 | 0 | return; |
505 | 0 | } |
506 | | |
507 | 37.9M | off = ((n == 0) || (n == 1)) ? 1 : -1; |
508 | | /* predict A */ |
509 | 37.9M | if (s->mb_x || (n == 1) || (n == 3)) { |
510 | 35.4M | if ((v->blk_mv_type[xy]) // current block (MB) has a field MV |
511 | 28.7M | || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV |
512 | 28.7M | A[0] = s->cur_pic.motion_val[dir][xy - 1][0]; |
513 | 28.7M | A[1] = s->cur_pic.motion_val[dir][xy - 1][1]; |
514 | 28.7M | a_valid = 1; |
515 | 28.7M | } else { // current block has frame mv and cand. has field MV (so average) |
516 | 6.67M | A[0] = (s->cur_pic.motion_val[dir][xy - 1][0] |
517 | 6.67M | + s->cur_pic.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1; |
518 | 6.67M | A[1] = (s->cur_pic.motion_val[dir][xy - 1][1] |
519 | 6.67M | + s->cur_pic.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1; |
520 | 6.67M | a_valid = 1; |
521 | 6.67M | } |
522 | 35.4M | if (!(n & 1) && v->is_intra[s->mb_x - 1]) { |
523 | 438k | a_valid = 0; |
524 | 438k | A[0] = A[1] = 0; |
525 | 438k | } |
526 | 35.4M | } else |
527 | 2.49M | A[0] = A[1] = 0; |
528 | | /* Predict B and C */ |
529 | 37.9M | B[0] = B[1] = C[0] = C[1] = 0; |
530 | 37.9M | if (n == 0 || n == 1 || v->blk_mv_type[xy]) { |
531 | 37.7M | if (!s->first_slice_line) { |
532 | 33.7M | if (!v->is_intra[s->mb_x - s->mb_stride]) { |
533 | 33.3M | b_valid = 1; |
534 | 33.3M | n_adj = n | 2; |
535 | 33.3M | pos_b = s->block_index[n_adj] - 2 * wrap; |
536 | 33.3M | if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) { |
537 | 10.4M | n_adj = (n & 2) | (n & 1); |
538 | 10.4M | } |
539 | 33.3M | B[0] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0]; |
540 | 33.3M | B[1] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1]; |
541 | 33.3M | if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) { |
542 | 6.35M | B[0] = (B[0] + s->cur_pic.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1; |
543 | 6.35M | B[1] = (B[1] + s->cur_pic.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1; |
544 | 6.35M | } |
545 | 33.3M | } |
546 | 33.7M | if (s->mb_width > 1) { |
547 | 33.3M | if (!v->is_intra[s->mb_x - s->mb_stride + 1]) { |
548 | 32.9M | c_valid = 1; |
549 | 32.9M | n_adj = 2; |
550 | 32.9M | pos_c = s->block_index[2] - 2 * wrap + 2; |
551 | 32.9M | if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) { |
552 | 9.76M | n_adj = n & 2; |
553 | 9.76M | } |
554 | 32.9M | C[0] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0]; |
555 | 32.9M | C[1] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1]; |
556 | 32.9M | if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) { |
557 | 6.02M | C[0] = (1 + C[0] + (s->cur_pic.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1; |
558 | 6.02M | C[1] = (1 + C[1] + (s->cur_pic.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1; |
559 | 6.02M | } |
560 | 32.9M | if (s->mb_x == s->mb_width - 1) { |
561 | 1.73M | if (!v->is_intra[s->mb_x - s->mb_stride - 1]) { |
562 | 1.72M | c_valid = 1; |
563 | 1.72M | n_adj = 3; |
564 | 1.72M | pos_c = s->block_index[3] - 2 * wrap - 2; |
565 | 1.72M | if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) { |
566 | 437k | n_adj = n | 1; |
567 | 437k | } |
568 | 1.72M | C[0] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0]; |
569 | 1.72M | C[1] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1]; |
570 | 1.72M | if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) { |
571 | 268k | C[0] = (1 + C[0] + s->cur_pic.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1; |
572 | 268k | C[1] = (1 + C[1] + s->cur_pic.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1; |
573 | 268k | } |
574 | 1.72M | } else |
575 | 12.9k | c_valid = 0; |
576 | 1.73M | } |
577 | 32.9M | } |
578 | 33.3M | } |
579 | 33.7M | } |
580 | 37.7M | } else { |
581 | 248k | pos_b = s->block_index[1]; |
582 | 248k | b_valid = 1; |
583 | 248k | B[0] = s->cur_pic.motion_val[dir][pos_b][0]; |
584 | 248k | B[1] = s->cur_pic.motion_val[dir][pos_b][1]; |
585 | 248k | pos_c = s->block_index[0]; |
586 | 248k | c_valid = 1; |
587 | 248k | C[0] = s->cur_pic.motion_val[dir][pos_c][0]; |
588 | 248k | C[1] = s->cur_pic.motion_val[dir][pos_c][1]; |
589 | 248k | } |
590 | | |
591 | 37.9M | total_valid = a_valid + b_valid + c_valid; |
592 | | // check if predictor A is out of bounds |
593 | 37.9M | if (!s->mb_x && !(n == 1 || n == 3)) { |
594 | 2.49M | A[0] = A[1] = 0; |
595 | 2.49M | } |
596 | | // check if predictor B is out of bounds |
597 | 37.9M | if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) { |
598 | 3.91M | B[0] = B[1] = C[0] = C[1] = 0; |
599 | 3.91M | } |
600 | 37.9M | if (!v->blk_mv_type[xy]) { |
601 | 17.7M | if (s->mb_width == 1) { |
602 | 284k | px = B[0]; |
603 | 284k | py = B[1]; |
604 | 17.4M | } else { |
605 | 17.4M | if (total_valid >= 2) { |
606 | 15.6M | px = mid_pred(A[0], B[0], C[0]); |
607 | 15.6M | py = mid_pred(A[1], B[1], C[1]); |
608 | 15.6M | } else if (total_valid) { |
609 | 1.81M | if (a_valid) { px = A[0]; py = A[1]; } |
610 | 26.7k | else if (b_valid) { px = B[0]; py = B[1]; } |
611 | 14.5k | else { px = C[0]; py = C[1]; } |
612 | 1.81M | } |
613 | 17.4M | } |
614 | 20.1M | } else { |
615 | 20.1M | if (a_valid) |
616 | 18.8M | field_a = (A[1] & 4) ? 1 : 0; |
617 | 1.35M | else |
618 | 1.35M | field_a = 0; |
619 | 20.1M | if (b_valid) |
620 | 17.8M | field_b = (B[1] & 4) ? 1 : 0; |
621 | 2.30M | else |
622 | 2.30M | field_b = 0; |
623 | 20.1M | if (c_valid) |
624 | 17.6M | field_c = (C[1] & 4) ? 1 : 0; |
625 | 2.48M | else |
626 | 2.48M | field_c = 0; |
627 | | |
628 | 20.1M | num_oppfield = field_a + field_b + field_c; |
629 | 20.1M | num_samefield = total_valid - num_oppfield; |
630 | 20.1M | if (total_valid == 3) { |
631 | 16.3M | if ((num_samefield == 3) || (num_oppfield == 3)) { |
632 | 10.7M | px = mid_pred(A[0], B[0], C[0]); |
633 | 10.7M | py = mid_pred(A[1], B[1], C[1]); |
634 | 10.7M | } else if (num_samefield >= num_oppfield) { |
635 | | /* take one MV from same field set depending on priority |
636 | | the check for B may not be necessary */ |
637 | 3.19M | px = !field_a ? A[0] : B[0]; |
638 | 3.19M | py = !field_a ? A[1] : B[1]; |
639 | 3.19M | } else { |
640 | 2.41M | px = field_a ? A[0] : B[0]; |
641 | 2.41M | py = field_a ? A[1] : B[1]; |
642 | 2.41M | } |
643 | 16.3M | } else if (total_valid == 2) { |
644 | 1.53M | if (num_samefield >= num_oppfield) { |
645 | 1.29M | if (!field_a && a_valid) { |
646 | 336k | px = A[0]; |
647 | 336k | py = A[1]; |
648 | 963k | } else if (!field_b && b_valid) { |
649 | 829k | px = B[0]; |
650 | 829k | py = B[1]; |
651 | 829k | } else /*if (c_valid)*/ { |
652 | 133k | av_assert1(c_valid); |
653 | 133k | px = C[0]; |
654 | 133k | py = C[1]; |
655 | 133k | } |
656 | 1.29M | } else { |
657 | 233k | if (field_a && a_valid) { |
658 | 85.5k | px = A[0]; |
659 | 85.5k | py = A[1]; |
660 | 148k | } else /*if (field_b && b_valid)*/ { |
661 | 148k | av_assert1(field_b && b_valid); |
662 | 148k | px = B[0]; |
663 | 148k | py = B[1]; |
664 | 148k | } |
665 | 233k | } |
666 | 2.25M | } else if (total_valid == 1) { |
667 | 2.15M | px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]); |
668 | 2.15M | py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]); |
669 | 2.15M | } |
670 | 20.1M | } |
671 | | |
672 | | /* store MV using signed modulus of MV range defined in 4.11 */ |
673 | 37.9M | s->mv[dir][n][0] = s->cur_pic.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x; |
674 | 37.9M | s->mv[dir][n][1] = s->cur_pic.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y; |
675 | 37.9M | if (mvn == 1) { /* duplicate motion data for 1-MV block */ |
676 | 17.2M | s->cur_pic.motion_val[dir][xy + 1 ][0] = s->cur_pic.motion_val[dir][xy][0]; |
677 | 17.2M | s->cur_pic.motion_val[dir][xy + 1 ][1] = s->cur_pic.motion_val[dir][xy][1]; |
678 | 17.2M | s->cur_pic.motion_val[dir][xy + wrap ][0] = s->cur_pic.motion_val[dir][xy][0]; |
679 | 17.2M | s->cur_pic.motion_val[dir][xy + wrap ][1] = s->cur_pic.motion_val[dir][xy][1]; |
680 | 17.2M | s->cur_pic.motion_val[dir][xy + wrap + 1][0] = s->cur_pic.motion_val[dir][xy][0]; |
681 | 17.2M | s->cur_pic.motion_val[dir][xy + wrap + 1][1] = s->cur_pic.motion_val[dir][xy][1]; |
682 | 20.6M | } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */ |
683 | 17.7M | s->cur_pic.motion_val[dir][xy + 1][0] = s->cur_pic.motion_val[dir][xy][0]; |
684 | 17.7M | s->cur_pic.motion_val[dir][xy + 1][1] = s->cur_pic.motion_val[dir][xy][1]; |
685 | 17.7M | s->mv[dir][n + 1][0] = s->mv[dir][n][0]; |
686 | 17.7M | s->mv[dir][n + 1][1] = s->mv[dir][n][1]; |
687 | 17.7M | } |
688 | 37.9M | } |
689 | | |
690 | | void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], |
691 | | int direct, int mvtype) |
692 | 10.3M | { |
693 | 10.3M | MpegEncContext *s = &v->s; |
694 | 10.3M | int xy, wrap; |
695 | 10.3M | int px, py; |
696 | 10.3M | int sum; |
697 | 10.3M | int r_x, r_y; |
698 | 10.3M | const uint8_t *is_intra = v->mb_type; |
699 | | |
700 | 10.3M | av_assert0(!v->field_mode); |
701 | | |
702 | 10.3M | r_x = v->range_x; |
703 | 10.3M | r_y = v->range_y; |
704 | | /* scale MV difference to be quad-pel */ |
705 | 10.3M | if (!s->quarter_sample) { |
706 | 5.31M | dmv_x[0] *= 2; |
707 | 5.31M | dmv_y[0] *= 2; |
708 | 5.31M | dmv_x[1] *= 2; |
709 | 5.31M | dmv_y[1] *= 2; |
710 | 5.31M | } |
711 | | |
712 | 10.3M | wrap = s->b8_stride; |
713 | 10.3M | xy = s->block_index[0]; |
714 | | |
715 | 10.3M | if (s->mb_intra) { |
716 | 561k | s->cur_pic.motion_val[0][xy][0] = |
717 | 561k | s->cur_pic.motion_val[0][xy][1] = |
718 | 561k | s->cur_pic.motion_val[1][xy][0] = |
719 | 561k | s->cur_pic.motion_val[1][xy][1] = 0; |
720 | 561k | return; |
721 | 561k | } |
722 | 9.81M | if (direct && s->next_pic.ptr->field_picture) |
723 | 7.39k | av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n"); |
724 | | |
725 | 9.81M | s->mv[0][0][0] = scale_mv(s->next_pic.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample); |
726 | 9.81M | s->mv[0][0][1] = scale_mv(s->next_pic.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample); |
727 | 9.81M | s->mv[1][0][0] = scale_mv(s->next_pic.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample); |
728 | 9.81M | s->mv[1][0][1] = scale_mv(s->next_pic.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample); |
729 | | |
730 | | /* Pullback predicted motion vectors as specified in 8.4.5.4 */ |
731 | 9.81M | s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6)); |
732 | 9.81M | s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6)); |
733 | 9.81M | s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6)); |
734 | 9.81M | s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6)); |
735 | 9.81M | if (direct) { |
736 | 3.27M | s->cur_pic.motion_val[0][xy][0] = s->mv[0][0][0]; |
737 | 3.27M | s->cur_pic.motion_val[0][xy][1] = s->mv[0][0][1]; |
738 | 3.27M | s->cur_pic.motion_val[1][xy][0] = s->mv[1][0][0]; |
739 | 3.27M | s->cur_pic.motion_val[1][xy][1] = s->mv[1][0][1]; |
740 | 3.27M | return; |
741 | 3.27M | } |
742 | | |
743 | 6.54M | if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) { |
744 | 3.55M | int16_t *C = s->cur_pic.motion_val[0][xy - 2]; |
745 | 3.55M | const int16_t *A = s->cur_pic.motion_val[0][xy - wrap * 2]; |
746 | 3.55M | int off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; |
747 | 3.55M | const int16_t *B = s->cur_pic.motion_val[0][xy - wrap * 2 + off]; |
748 | | |
749 | 3.55M | if (!s->mb_x) C[0] = C[1] = 0; |
750 | 3.55M | if (!s->first_slice_line) { // predictor A is not out of bounds |
751 | 3.24M | if (s->mb_width == 1) { |
752 | 33.7k | px = A[0]; |
753 | 33.7k | py = A[1]; |
754 | 3.21M | } else { |
755 | 3.21M | px = mid_pred(A[0], B[0], C[0]); |
756 | 3.21M | py = mid_pred(A[1], B[1], C[1]); |
757 | 3.21M | } |
758 | 3.24M | } else if (s->mb_x) { // predictor C is not out of bounds |
759 | 296k | px = C[0]; |
760 | 296k | py = C[1]; |
761 | 296k | } else { |
762 | 13.6k | px = py = 0; |
763 | 13.6k | } |
764 | | /* Pullback MV as specified in 8.3.5.3.4 */ |
765 | 3.55M | { |
766 | 3.55M | int qx, qy, X, Y; |
767 | 3.55M | int sh = v->profile < PROFILE_ADVANCED ? 5 : 6; |
768 | 3.55M | int MV = 4 - (1 << sh); |
769 | 3.55M | qx = (s->mb_x << sh); |
770 | 3.55M | qy = (s->mb_y << sh); |
771 | 3.55M | X = (s->mb_width << sh) - 4; |
772 | 3.55M | Y = (s->mb_height << sh) - 4; |
773 | 3.55M | if (qx + px < MV) px = MV - qx; |
774 | 3.55M | if (qy + py < MV) py = MV - qy; |
775 | 3.55M | if (qx + px > X) px = X - qx; |
776 | 3.55M | if (qy + py > Y) py = Y - qy; |
777 | 3.55M | } |
778 | | /* Calculate hybrid prediction as specified in 8.3.5.3.5 */ |
779 | 3.55M | if (0 && !s->first_slice_line && s->mb_x) { |
780 | 0 | if (is_intra[xy - wrap]) |
781 | 0 | sum = FFABS(px) + FFABS(py); |
782 | 0 | else |
783 | 0 | sum = FFABS(px - A[0]) + FFABS(py - A[1]); |
784 | 0 | if (sum > 32) { |
785 | 0 | if (get_bits1(&v->gb)) { |
786 | 0 | px = A[0]; |
787 | 0 | py = A[1]; |
788 | 0 | } else { |
789 | 0 | px = C[0]; |
790 | 0 | py = C[1]; |
791 | 0 | } |
792 | 0 | } else { |
793 | 0 | if (is_intra[xy - 2]) |
794 | 0 | sum = FFABS(px) + FFABS(py); |
795 | 0 | else |
796 | 0 | sum = FFABS(px - C[0]) + FFABS(py - C[1]); |
797 | 0 | if (sum > 32) { |
798 | 0 | if (get_bits1(&v->gb)) { |
799 | 0 | px = A[0]; |
800 | 0 | py = A[1]; |
801 | 0 | } else { |
802 | 0 | px = C[0]; |
803 | 0 | py = C[1]; |
804 | 0 | } |
805 | 0 | } |
806 | 0 | } |
807 | 0 | } |
808 | | /* store MV using signed modulus of MV range defined in 4.11 */ |
809 | 3.55M | s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x; |
810 | 3.55M | s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y; |
811 | 3.55M | } |
812 | 6.54M | if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) { |
813 | 4.54M | int16_t *C = s->cur_pic.motion_val[1][xy - 2]; |
814 | 4.54M | const int16_t *A = s->cur_pic.motion_val[1][xy - wrap * 2]; |
815 | 4.54M | int off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; |
816 | 4.54M | const int16_t *B = s->cur_pic.motion_val[1][xy - wrap * 2 + off]; |
817 | | |
818 | 4.54M | if (!s->mb_x) |
819 | 128k | C[0] = C[1] = 0; |
820 | 4.54M | if (!s->first_slice_line) { // predictor A is not out of bounds |
821 | 4.30M | if (s->mb_width == 1) { |
822 | 44.6k | px = A[0]; |
823 | 44.6k | py = A[1]; |
824 | 4.25M | } else { |
825 | 4.25M | px = mid_pred(A[0], B[0], C[0]); |
826 | 4.25M | py = mid_pred(A[1], B[1], C[1]); |
827 | 4.25M | } |
828 | 4.30M | } else if (s->mb_x) { // predictor C is not out of bounds |
829 | 227k | px = C[0]; |
830 | 227k | py = C[1]; |
831 | 227k | } else { |
832 | 13.7k | px = py = 0; |
833 | 13.7k | } |
834 | | /* Pullback MV as specified in 8.3.5.3.4 */ |
835 | 4.54M | { |
836 | 4.54M | int qx, qy, X, Y; |
837 | 4.54M | int sh = v->profile < PROFILE_ADVANCED ? 5 : 6; |
838 | 4.54M | int MV = 4 - (1 << sh); |
839 | 4.54M | qx = (s->mb_x << sh); |
840 | 4.54M | qy = (s->mb_y << sh); |
841 | 4.54M | X = (s->mb_width << sh) - 4; |
842 | 4.54M | Y = (s->mb_height << sh) - 4; |
843 | 4.54M | if (qx + px < MV) px = MV - qx; |
844 | 4.54M | if (qy + py < MV) py = MV - qy; |
845 | 4.54M | if (qx + px > X) px = X - qx; |
846 | 4.54M | if (qy + py > Y) py = Y - qy; |
847 | 4.54M | } |
848 | | /* Calculate hybrid prediction as specified in 8.3.5.3.5 */ |
849 | 4.54M | if (0 && !s->first_slice_line && s->mb_x) { |
850 | 0 | if (is_intra[xy - wrap]) |
851 | 0 | sum = FFABS(px) + FFABS(py); |
852 | 0 | else |
853 | 0 | sum = FFABS(px - A[0]) + FFABS(py - A[1]); |
854 | 0 | if (sum > 32) { |
855 | 0 | if (get_bits1(&v->gb)) { |
856 | 0 | px = A[0]; |
857 | 0 | py = A[1]; |
858 | 0 | } else { |
859 | 0 | px = C[0]; |
860 | 0 | py = C[1]; |
861 | 0 | } |
862 | 0 | } else { |
863 | 0 | if (is_intra[xy - 2]) |
864 | 0 | sum = FFABS(px) + FFABS(py); |
865 | 0 | else |
866 | 0 | sum = FFABS(px - C[0]) + FFABS(py - C[1]); |
867 | 0 | if (sum > 32) { |
868 | 0 | if (get_bits1(&v->gb)) { |
869 | 0 | px = A[0]; |
870 | 0 | py = A[1]; |
871 | 0 | } else { |
872 | 0 | px = C[0]; |
873 | 0 | py = C[1]; |
874 | 0 | } |
875 | 0 | } |
876 | 0 | } |
877 | 0 | } |
878 | | /* store MV using signed modulus of MV range defined in 4.11 */ |
879 | | |
880 | 4.54M | s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x; |
881 | 4.54M | s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y; |
882 | 4.54M | } |
883 | 6.54M | s->cur_pic.motion_val[0][xy][0] = s->mv[0][0][0]; |
884 | 6.54M | s->cur_pic.motion_val[0][xy][1] = s->mv[0][0][1]; |
885 | 6.54M | s->cur_pic.motion_val[1][xy][0] = s->mv[1][0][0]; |
886 | 6.54M | s->cur_pic.motion_val[1][xy][1] = s->mv[1][0][1]; |
887 | 6.54M | } |
888 | | |
889 | | void ff_vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, |
890 | | int mv1, int *pred_flag) |
891 | 1.60M | { |
892 | 1.60M | int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0; |
893 | 1.60M | MpegEncContext *s = &v->s; |
894 | 1.60M | int mb_pos = s->mb_x + s->mb_y * s->mb_stride; |
895 | | |
896 | 1.60M | if (v->bmvtype == BMV_TYPE_DIRECT) { |
897 | 11.9k | int total_opp, k, f; |
898 | 11.9k | if (s->next_pic.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) { |
899 | 11.6k | s->mv[0][0][0] = scale_mv(s->next_pic.motion_val[1][s->block_index[0] + v->blocks_off][0], |
900 | 11.6k | v->bfraction, 0, s->quarter_sample); |
901 | 11.6k | s->mv[0][0][1] = scale_mv(s->next_pic.motion_val[1][s->block_index[0] + v->blocks_off][1], |
902 | 11.6k | v->bfraction, 0, s->quarter_sample); |
903 | 11.6k | s->mv[1][0][0] = scale_mv(s->next_pic.motion_val[1][s->block_index[0] + v->blocks_off][0], |
904 | 11.6k | v->bfraction, 1, s->quarter_sample); |
905 | 11.6k | s->mv[1][0][1] = scale_mv(s->next_pic.motion_val[1][s->block_index[0] + v->blocks_off][1], |
906 | 11.6k | v->bfraction, 1, s->quarter_sample); |
907 | | |
908 | 11.6k | total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off] |
909 | 11.6k | + v->mv_f_next[0][s->block_index[1] + v->blocks_off] |
910 | 11.6k | + v->mv_f_next[0][s->block_index[2] + v->blocks_off] |
911 | 11.6k | + v->mv_f_next[0][s->block_index[3] + v->blocks_off]; |
912 | 11.6k | f = (total_opp > 2) ? 1 : 0; |
913 | 11.6k | } else { |
914 | 348 | s->mv[0][0][0] = s->mv[0][0][1] = 0; |
915 | 348 | s->mv[1][0][0] = s->mv[1][0][1] = 0; |
916 | 348 | f = 0; |
917 | 348 | } |
918 | 11.9k | v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f; |
919 | 59.9k | for (k = 0; k < 4; k++) { |
920 | 47.9k | s->cur_pic.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0]; |
921 | 47.9k | s->cur_pic.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1]; |
922 | 47.9k | s->cur_pic.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0]; |
923 | 47.9k | s->cur_pic.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1]; |
924 | 47.9k | v->mv_f[0][s->block_index[k] + v->blocks_off] = f; |
925 | 47.9k | v->mv_f[1][s->block_index[k] + v->blocks_off] = f; |
926 | 47.9k | } |
927 | 11.9k | return; |
928 | 11.9k | } |
929 | 1.59M | if (v->bmvtype == BMV_TYPE_INTERPOLATED) { |
930 | 120k | ff_vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type, pred_flag[0], 0); |
931 | 120k | ff_vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type, pred_flag[1], 1); |
932 | 120k | return; |
933 | 120k | } |
934 | 1.47M | if (dir) { // backward |
935 | 797k | ff_vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type, pred_flag[1], 1); |
936 | 797k | if (n == 3 || mv1) { |
937 | 438k | ff_vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type, 0, 0); |
938 | 438k | } |
939 | 797k | } else { // forward |
940 | 672k | ff_vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type, pred_flag[0], 0); |
941 | 672k | if (n == 3 || mv1) { |
942 | 478k | ff_vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type, 0, 1); |
943 | 478k | } |
944 | 672k | } |
945 | 1.47M | } |