/src/aom/aom_dsp/flow_estimation/disflow.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2016, Alliance for Open Media. All rights reserved. |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | // Dense Inverse Search flow algorithm |
13 | | // Paper: https://arxiv.org/abs/1603.03590 |
14 | | |
15 | | #include <assert.h> |
16 | | #include <math.h> |
17 | | |
18 | | #include "aom_dsp/aom_dsp_common.h" |
19 | | #include "aom_dsp/flow_estimation/corner_detect.h" |
20 | | #include "aom_dsp/flow_estimation/disflow.h" |
21 | | #include "aom_dsp/flow_estimation/ransac.h" |
22 | | #include "aom_dsp/pyramid.h" |
23 | | #include "aom_mem/aom_mem.h" |
24 | | |
25 | | #include "config/aom_dsp_rtcd.h" |
26 | | |
27 | | // Amount to downsample the flow field by. |
28 | | // e.g., DOWNSAMPLE_SHIFT = 2 (DOWNSAMPLE_FACTOR == 4) means we calculate |
29 | | // one flow point for each 4x4 pixel region of the frame |
30 | | // Must be a power of 2 |
31 | 0 | #define DOWNSAMPLE_SHIFT 3 |
32 | 0 | #define DOWNSAMPLE_FACTOR (1 << DOWNSAMPLE_SHIFT) |
33 | | |
34 | | // Filters used when upscaling the flow field from one pyramid level |
35 | | // to another. See upscale_flow_component for details on kernel selection |
36 | 0 | #define FLOW_UPSCALE_TAPS 4 |
37 | | |
38 | | // Number of outermost flow field entries (on each edge) which can't be |
39 | | // computed, because the patch they correspond to extends outside of the |
40 | | // frame |
41 | | // The border is (DISFLOW_PATCH_SIZE >> 1) pixels, which is |
42 | | // (DISFLOW_PATCH_SIZE >> 1) >> DOWNSAMPLE_SHIFT many flow field entries |
43 | 0 | #define FLOW_BORDER_INNER ((DISFLOW_PATCH_SIZE >> 1) >> DOWNSAMPLE_SHIFT) |
44 | | |
45 | | // Number of extra padding entries on each side of the flow field. |
46 | | // These samples are added so that we do not need to apply clamping when |
47 | | // interpolating or upsampling the flow field |
48 | 0 | #define FLOW_BORDER_OUTER (FLOW_UPSCALE_TAPS / 2) |
49 | | |
50 | | // When downsampling the flow field, each flow field entry covers a square |
51 | | // region of pixels in the image pyramid. This value is equal to the position |
52 | | // of the center of that region, as an offset from the top/left edge. |
53 | | // |
54 | | // Note: Using ((DOWNSAMPLE_FACTOR - 1) / 2) is equivalent to the more |
55 | | // natural expression ((DOWNSAMPLE_FACTOR / 2) - 1), |
56 | | // unless DOWNSAMPLE_FACTOR == 1 (ie, no downsampling), in which case |
57 | | // this gives the correct offset of 0 instead of -1. |
58 | 0 | #define UPSAMPLE_CENTER_OFFSET ((DOWNSAMPLE_FACTOR - 1) / 2) |
59 | | |
60 | | static double flow_upscale_filter[2][FLOW_UPSCALE_TAPS] = { |
61 | | // Cubic interpolation kernels for phase=0.75 and phase=0.25, respectively |
62 | | { -3 / 128., 29 / 128., 111 / 128., -9 / 128. }, |
63 | | { -9 / 128., 111 / 128., 29 / 128., -3 / 128. } |
64 | | }; |
65 | | |
66 | 0 | static inline void get_cubic_kernel_dbl(double x, double kernel[4]) { |
67 | | // Check that the fractional position is in range. |
68 | | // |
69 | | // Note: x is calculated from, e.g., `u_frac = u - floor(u)`. |
70 | | // Mathematically, this implies that 0 <= x < 1. However, in practice it is |
71 | | // possible to have x == 1 due to floating point rounding. This is fine, |
72 | | // and we still interpolate correctly if we allow x = 1. |
73 | 0 | assert(0 <= x && x <= 1); |
74 | |
|
75 | 0 | double x2 = x * x; |
76 | 0 | double x3 = x2 * x; |
77 | 0 | kernel[0] = -0.5 * x + x2 - 0.5 * x3; |
78 | 0 | kernel[1] = 1.0 - 2.5 * x2 + 1.5 * x3; |
79 | 0 | kernel[2] = 0.5 * x + 2.0 * x2 - 1.5 * x3; |
80 | 0 | kernel[3] = -0.5 * x2 + 0.5 * x3; |
81 | 0 | } |
82 | | |
83 | 0 | static inline void get_cubic_kernel_int(double x, int kernel[4]) { |
84 | 0 | double kernel_dbl[4]; |
85 | 0 | get_cubic_kernel_dbl(x, kernel_dbl); |
86 | |
|
87 | 0 | kernel[0] = (int)rint(kernel_dbl[0] * (1 << DISFLOW_INTERP_BITS)); |
88 | 0 | kernel[1] = (int)rint(kernel_dbl[1] * (1 << DISFLOW_INTERP_BITS)); |
89 | 0 | kernel[2] = (int)rint(kernel_dbl[2] * (1 << DISFLOW_INTERP_BITS)); |
90 | 0 | kernel[3] = (int)rint(kernel_dbl[3] * (1 << DISFLOW_INTERP_BITS)); |
91 | 0 | } |
92 | | |
93 | | static inline double get_cubic_value_dbl(const double *p, |
94 | 0 | const double kernel[4]) { |
95 | 0 | return kernel[0] * p[0] + kernel[1] * p[1] + kernel[2] * p[2] + |
96 | 0 | kernel[3] * p[3]; |
97 | 0 | } |
98 | | |
99 | 0 | static inline int get_cubic_value_int(const int *p, const int kernel[4]) { |
100 | 0 | return kernel[0] * p[0] + kernel[1] * p[1] + kernel[2] * p[2] + |
101 | 0 | kernel[3] * p[3]; |
102 | 0 | } |
103 | | |
104 | | static inline double bicubic_interp_one(const double *arr, int stride, |
105 | | const double h_kernel[4], |
106 | 0 | const double v_kernel[4]) { |
107 | 0 | double tmp[1 * 4]; |
108 | | |
109 | | // Horizontal convolution |
110 | 0 | for (int i = -1; i < 3; ++i) { |
111 | 0 | tmp[i + 1] = get_cubic_value_dbl(&arr[i * stride - 1], h_kernel); |
112 | 0 | } |
113 | | |
114 | | // Vertical convolution |
115 | 0 | return get_cubic_value_dbl(tmp, v_kernel); |
116 | 0 | } |
117 | | |
118 | | static int determine_disflow_correspondence(const ImagePyramid *src_pyr, |
119 | | const ImagePyramid *ref_pyr, |
120 | | CornerList *corners, |
121 | | const FlowField *flow, |
122 | 0 | Correspondence *correspondences) { |
123 | 0 | const int width = flow->width; |
124 | 0 | const int height = flow->height; |
125 | 0 | const int stride = flow->stride; |
126 | |
|
127 | 0 | int num_correspondences = 0; |
128 | 0 | for (int i = 0; i < corners->num_corners; ++i) { |
129 | 0 | const int x0 = corners->corners[2 * i]; |
130 | 0 | const int y0 = corners->corners[2 * i + 1]; |
131 | | |
132 | | // Offset points, to compensate for the fact that (say) a flow field entry |
133 | | // at horizontal index i, is nominally associated with the pixel at |
134 | | // horizontal coordinate (i << DOWNSAMPLE_FACTOR) + UPSAMPLE_CENTER_OFFSET |
135 | | // This offset must be applied before we split the coordinate into integer |
136 | | // and fractional parts, in order for the interpolation to be correct. |
137 | 0 | const int x = x0 - UPSAMPLE_CENTER_OFFSET; |
138 | 0 | const int y = y0 - UPSAMPLE_CENTER_OFFSET; |
139 | | |
140 | | // Split the pixel coordinates into integer flow field coordinates and |
141 | | // an offset for interpolation |
142 | 0 | const int flow_x = x >> DOWNSAMPLE_SHIFT; |
143 | 0 | const double flow_sub_x = |
144 | 0 | (x & (DOWNSAMPLE_FACTOR - 1)) / (double)DOWNSAMPLE_FACTOR; |
145 | 0 | const int flow_y = y >> DOWNSAMPLE_SHIFT; |
146 | 0 | const double flow_sub_y = |
147 | 0 | (y & (DOWNSAMPLE_FACTOR - 1)) / (double)DOWNSAMPLE_FACTOR; |
148 | | |
149 | | // Exclude points which would sample from the outer border of the flow |
150 | | // field, as this would give lower-quality results. |
151 | | // |
152 | | // Note: As we never read from the border region at pyramid level 0, we |
153 | | // can skip filling it in. If the conditions here are removed, or any |
154 | | // other logic is added which reads from this border region, then |
155 | | // compute_flow_field() will need to be modified to call |
156 | | // fill_flow_field_borders() at pyramid level 0 to set up the correct |
157 | | // border data. |
158 | 0 | if (flow_x < 1 || (flow_x + 2) >= width) continue; |
159 | 0 | if (flow_y < 1 || (flow_y + 2) >= height) continue; |
160 | | |
161 | 0 | double h_kernel[4]; |
162 | 0 | double v_kernel[4]; |
163 | 0 | get_cubic_kernel_dbl(flow_sub_x, h_kernel); |
164 | 0 | get_cubic_kernel_dbl(flow_sub_y, v_kernel); |
165 | |
|
166 | 0 | double flow_u = bicubic_interp_one(&flow->u[flow_y * stride + flow_x], |
167 | 0 | stride, h_kernel, v_kernel); |
168 | 0 | double flow_v = bicubic_interp_one(&flow->v[flow_y * stride + flow_x], |
169 | 0 | stride, h_kernel, v_kernel); |
170 | | |
171 | | // Refine the interpolated flow vector one last time |
172 | 0 | const int patch_tl_x = x0 - DISFLOW_PATCH_CENTER; |
173 | 0 | const int patch_tl_y = y0 - DISFLOW_PATCH_CENTER; |
174 | 0 | aom_compute_flow_at_point( |
175 | 0 | src_pyr->layers[0].buffer, ref_pyr->layers[0].buffer, patch_tl_x, |
176 | 0 | patch_tl_y, src_pyr->layers[0].width, src_pyr->layers[0].height, |
177 | 0 | src_pyr->layers[0].stride, &flow_u, &flow_v); |
178 | | |
179 | | // Use original points (without offsets) when filling in correspondence |
180 | | // array |
181 | 0 | correspondences[num_correspondences].x = x0; |
182 | 0 | correspondences[num_correspondences].y = y0; |
183 | 0 | correspondences[num_correspondences].rx = x0 + flow_u; |
184 | 0 | correspondences[num_correspondences].ry = y0 + flow_v; |
185 | 0 | num_correspondences++; |
186 | 0 | } |
187 | 0 | return num_correspondences; |
188 | 0 | } |
189 | | |
190 | | // Compare two regions of width x height pixels, one rooted at position |
191 | | // (x, y) in src and the other at (x + u, y + v) in ref. |
192 | | // This function returns the sum of squared pixel differences between |
193 | | // the two regions. |
194 | | static inline void compute_flow_vector(const uint8_t *src, const uint8_t *ref, |
195 | | int width, int height, int stride, int x, |
196 | | int y, double u, double v, |
197 | | const int16_t *dx, const int16_t *dy, |
198 | 0 | int *b) { |
199 | 0 | memset(b, 0, 2 * sizeof(*b)); |
200 | | |
201 | | // Split offset into integer and fractional parts, and compute cubic |
202 | | // interpolation kernels |
203 | 0 | const int u_int = (int)floor(u); |
204 | 0 | const int v_int = (int)floor(v); |
205 | 0 | const double u_frac = u - floor(u); |
206 | 0 | const double v_frac = v - floor(v); |
207 | |
|
208 | 0 | int h_kernel[4]; |
209 | 0 | int v_kernel[4]; |
210 | 0 | get_cubic_kernel_int(u_frac, h_kernel); |
211 | 0 | get_cubic_kernel_int(v_frac, v_kernel); |
212 | | |
213 | | // Storage for intermediate values between the two convolution directions |
214 | 0 | int tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 3)]; |
215 | 0 | int *tmp = tmp_ + DISFLOW_PATCH_SIZE; // Offset by one row |
216 | | |
217 | | // Clamp coordinates so that all pixels we fetch will remain within the |
218 | | // allocated border region, but allow them to go far enough out that |
219 | | // the border pixels' values do not change. |
220 | | // Since we are calculating an 8x8 block, the bottom-right pixel |
221 | | // in the block has coordinates (x0 + 7, y0 + 7). Then, the cubic |
222 | | // interpolation has 4 taps, meaning that the output of pixel |
223 | | // (x_w, y_w) depends on the pixels in the range |
224 | | // ([x_w - 1, x_w + 2], [y_w - 1, y_w + 2]). |
225 | | // |
226 | | // Thus the most extreme coordinates which will be fetched are |
227 | | // (x0 - 1, y0 - 1) and (x0 + 9, y0 + 9). |
228 | 0 | const int x0 = clamp(x + u_int, -9, width); |
229 | 0 | const int y0 = clamp(y + v_int, -9, height); |
230 | | |
231 | | // Horizontal convolution |
232 | 0 | for (int i = -1; i < DISFLOW_PATCH_SIZE + 2; ++i) { |
233 | 0 | const int y_w = y0 + i; |
234 | 0 | for (int j = 0; j < DISFLOW_PATCH_SIZE; ++j) { |
235 | 0 | const int x_w = x0 + j; |
236 | 0 | int arr[4]; |
237 | |
|
238 | 0 | arr[0] = (int)ref[y_w * stride + (x_w - 1)]; |
239 | 0 | arr[1] = (int)ref[y_w * stride + (x_w + 0)]; |
240 | 0 | arr[2] = (int)ref[y_w * stride + (x_w + 1)]; |
241 | 0 | arr[3] = (int)ref[y_w * stride + (x_w + 2)]; |
242 | | |
243 | | // Apply kernel and round, keeping 6 extra bits of precision. |
244 | | // |
245 | | // 6 is the maximum allowable number of extra bits which will avoid |
246 | | // the intermediate values overflowing an int16_t. The most extreme |
247 | | // intermediate value occurs when: |
248 | | // * The input pixels are [0, 255, 255, 0] |
249 | | // * u_frac = 0.5 |
250 | | // In this case, the un-scaled output is 255 * 1.125 = 286.875. |
251 | | // As an integer with 6 fractional bits, that is 18360, which fits |
252 | | // in an int16_t. But with 7 fractional bits it would be 36720, |
253 | | // which is too large. |
254 | 0 | tmp[i * DISFLOW_PATCH_SIZE + j] = ROUND_POWER_OF_TWO( |
255 | 0 | get_cubic_value_int(arr, h_kernel), DISFLOW_INTERP_BITS - 6); |
256 | 0 | } |
257 | 0 | } |
258 | | |
259 | | // Vertical convolution |
260 | 0 | for (int i = 0; i < DISFLOW_PATCH_SIZE; ++i) { |
261 | 0 | for (int j = 0; j < DISFLOW_PATCH_SIZE; ++j) { |
262 | 0 | const int *p = &tmp[i * DISFLOW_PATCH_SIZE + j]; |
263 | 0 | const int arr[4] = { p[-DISFLOW_PATCH_SIZE], p[0], p[DISFLOW_PATCH_SIZE], |
264 | 0 | p[2 * DISFLOW_PATCH_SIZE] }; |
265 | 0 | const int result = get_cubic_value_int(arr, v_kernel); |
266 | | |
267 | | // Apply kernel and round. |
268 | | // This time, we have to round off the 6 extra bits which were kept |
269 | | // earlier, but we also want to keep DISFLOW_DERIV_SCALE_LOG2 extra bits |
270 | | // of precision to match the scale of the dx and dy arrays. |
271 | 0 | const int round_bits = DISFLOW_INTERP_BITS + 6 - DISFLOW_DERIV_SCALE_LOG2; |
272 | 0 | const int warped = ROUND_POWER_OF_TWO(result, round_bits); |
273 | 0 | const int src_px = src[(x + j) + (y + i) * stride] << 3; |
274 | 0 | const int dt = warped - src_px; |
275 | 0 | b[0] += dx[i * DISFLOW_PATCH_SIZE + j] * dt; |
276 | 0 | b[1] += dy[i * DISFLOW_PATCH_SIZE + j] * dt; |
277 | 0 | } |
278 | 0 | } |
279 | 0 | } |
280 | | |
281 | | static inline void sobel_filter(const uint8_t *src, int src_stride, |
282 | 0 | int16_t *dst, int dst_stride, int dir) { |
283 | 0 | int16_t tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 2)]; |
284 | 0 | int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE; |
285 | | |
286 | | // Sobel filter kernel |
287 | | // This must have an overall scale factor equal to DISFLOW_DERIV_SCALE, |
288 | | // in order to produce correctly scaled outputs. |
289 | | // To work out the scale factor, we multiply two factors: |
290 | | // |
291 | | // * For the derivative filter (sobel_a), comparing our filter |
292 | | // image[x - 1] - image[x + 1] |
293 | | // to the standard form |
294 | | // d/dx image[x] = image[x+1] - image[x] |
295 | | // tells us that we're actually calculating -2 * d/dx image[2] |
296 | | // |
297 | | // * For the smoothing filter (sobel_b), all coefficients are positive |
298 | | // so the scale factor is just the sum of the coefficients |
299 | | // |
300 | | // Thus we need to make sure that DISFLOW_DERIV_SCALE = 2 * sum(sobel_b) |
301 | | // (and take care of the - sign from sobel_a elsewhere) |
302 | 0 | static const int16_t sobel_a[3] = { 1, 0, -1 }; |
303 | 0 | static const int16_t sobel_b[3] = { 1, 2, 1 }; |
304 | 0 | const int taps = 3; |
305 | | |
306 | | // horizontal filter |
307 | 0 | const int16_t *h_kernel = dir ? sobel_a : sobel_b; |
308 | |
|
309 | 0 | for (int y = -1; y < DISFLOW_PATCH_SIZE + 1; ++y) { |
310 | 0 | for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) { |
311 | 0 | int sum = 0; |
312 | 0 | for (int k = 0; k < taps; ++k) { |
313 | 0 | sum += h_kernel[k] * src[y * src_stride + (x + k - 1)]; |
314 | 0 | } |
315 | 0 | tmp[y * DISFLOW_PATCH_SIZE + x] = sum; |
316 | 0 | } |
317 | 0 | } |
318 | | |
319 | | // vertical filter |
320 | 0 | const int16_t *v_kernel = dir ? sobel_b : sobel_a; |
321 | |
|
322 | 0 | for (int y = 0; y < DISFLOW_PATCH_SIZE; ++y) { |
323 | 0 | for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) { |
324 | 0 | int sum = 0; |
325 | 0 | for (int k = 0; k < taps; ++k) { |
326 | 0 | sum += v_kernel[k] * tmp[(y + k - 1) * DISFLOW_PATCH_SIZE + x]; |
327 | 0 | } |
328 | 0 | dst[y * dst_stride + x] = sum; |
329 | 0 | } |
330 | 0 | } |
331 | 0 | } |
332 | | |
333 | | // Computes the components of the system of equations used to solve for |
334 | | // a flow vector. |
335 | | // |
336 | | // The flow equations are a least-squares system, derived as follows: |
337 | | // |
338 | | // For each pixel in the patch, we calculate the current error `dt`, |
339 | | // and the x and y gradients `dx` and `dy` of the source patch. |
340 | | // This means that, to first order, the squared error for this pixel is |
341 | | // |
342 | | // (dt + u * dx + v * dy)^2 |
343 | | // |
344 | | // where (u, v) are the incremental changes to the flow vector. |
345 | | // |
346 | | // We then want to find the values of u and v which minimize the sum |
347 | | // of the squared error across all pixels. Conveniently, this fits exactly |
348 | | // into the form of a least squares problem, with one equation |
349 | | // |
350 | | // u * dx + v * dy = -dt |
351 | | // |
352 | | // for each pixel. |
353 | | // |
354 | | // Summing across all pixels in a square window of size DISFLOW_PATCH_SIZE, |
355 | | // and absorbing the - sign elsewhere, this results in the least squares system |
356 | | // |
357 | | // M = |sum(dx * dx) sum(dx * dy)| |
358 | | // |sum(dx * dy) sum(dy * dy)| |
359 | | // |
360 | | // b = |sum(dx * dt)| |
361 | | // |sum(dy * dt)| |
362 | | static inline void compute_flow_matrix(const int16_t *dx, int dx_stride, |
363 | | const int16_t *dy, int dy_stride, |
364 | 0 | double *M) { |
365 | 0 | int tmp[4] = { 0 }; |
366 | |
|
367 | 0 | for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) { |
368 | 0 | for (int j = 0; j < DISFLOW_PATCH_SIZE; j++) { |
369 | 0 | tmp[0] += dx[i * dx_stride + j] * dx[i * dx_stride + j]; |
370 | 0 | tmp[1] += dx[i * dx_stride + j] * dy[i * dy_stride + j]; |
371 | | // Don't compute tmp[2], as it should be equal to tmp[1] |
372 | 0 | tmp[3] += dy[i * dy_stride + j] * dy[i * dy_stride + j]; |
373 | 0 | } |
374 | 0 | } |
375 | | |
376 | | // Apply regularization |
377 | | // We follow the standard regularization method of adding `k * I` before |
378 | | // inverting. This ensures that the matrix will be invertible. |
379 | | // |
380 | | // Setting the regularization strength k to 1 seems to work well here, as |
381 | | // typical values coming from the other equations are very large (1e5 to |
382 | | // 1e6, with an upper limit of around 6e7, at the time of writing). |
383 | | // It also preserves the property that all matrix values are whole numbers, |
384 | | // which is convenient for integerized SIMD implementation. |
385 | 0 | tmp[0] += 1; |
386 | 0 | tmp[3] += 1; |
387 | |
|
388 | 0 | tmp[2] = tmp[1]; |
389 | |
|
390 | 0 | M[0] = (double)tmp[0]; |
391 | 0 | M[1] = (double)tmp[1]; |
392 | 0 | M[2] = (double)tmp[2]; |
393 | 0 | M[3] = (double)tmp[3]; |
394 | 0 | } |
395 | | |
396 | | // Try to invert the matrix M |
397 | | // Note: Due to the nature of how a least-squares matrix is constructed, all of |
398 | | // the eigenvalues will be >= 0, and therefore det M >= 0 as well. |
399 | | // The regularization term `+ k * I` further ensures that det M >= k^2. |
400 | | // As mentioned in compute_flow_matrix(), here we use k = 1, so det M >= 1. |
401 | | // So we don't have to worry about non-invertible matrices here. |
402 | 0 | static inline void invert_2x2(const double *M, double *M_inv) { |
403 | 0 | double det = (M[0] * M[3]) - (M[1] * M[2]); |
404 | 0 | assert(det >= 1); |
405 | 0 | const double det_inv = 1 / det; |
406 | |
|
407 | 0 | M_inv[0] = M[3] * det_inv; |
408 | 0 | M_inv[1] = -M[1] * det_inv; |
409 | 0 | M_inv[2] = -M[2] * det_inv; |
410 | 0 | M_inv[3] = M[0] * det_inv; |
411 | 0 | } |
412 | | |
413 | | void aom_compute_flow_at_point_c(const uint8_t *src, const uint8_t *ref, int x, |
414 | | int y, int width, int height, int stride, |
415 | 0 | double *u, double *v) { |
416 | 0 | double M[4]; |
417 | 0 | double M_inv[4]; |
418 | 0 | int b[2]; |
419 | 0 | int16_t dx[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]; |
420 | 0 | int16_t dy[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]; |
421 | | |
422 | | // Compute gradients within this patch |
423 | 0 | const uint8_t *src_patch = &src[y * stride + x]; |
424 | 0 | sobel_filter(src_patch, stride, dx, DISFLOW_PATCH_SIZE, 1); |
425 | 0 | sobel_filter(src_patch, stride, dy, DISFLOW_PATCH_SIZE, 0); |
426 | |
|
427 | 0 | compute_flow_matrix(dx, DISFLOW_PATCH_SIZE, dy, DISFLOW_PATCH_SIZE, M); |
428 | 0 | invert_2x2(M, M_inv); |
429 | |
|
430 | 0 | for (int itr = 0; itr < DISFLOW_MAX_ITR; itr++) { |
431 | 0 | compute_flow_vector(src, ref, width, height, stride, x, y, *u, *v, dx, dy, |
432 | 0 | b); |
433 | | |
434 | | // Solve flow equations to find a better estimate for the flow vector |
435 | | // at this point |
436 | 0 | const double step_u = M_inv[0] * b[0] + M_inv[1] * b[1]; |
437 | 0 | const double step_v = M_inv[2] * b[0] + M_inv[3] * b[1]; |
438 | 0 | *u += fclamp(step_u * DISFLOW_STEP_SIZE, -2, 2); |
439 | 0 | *v += fclamp(step_v * DISFLOW_STEP_SIZE, -2, 2); |
440 | |
|
441 | 0 | if (fabs(step_u) + fabs(step_v) < DISFLOW_STEP_SIZE_THRESOLD) { |
442 | | // Stop iteration when we're close to convergence |
443 | 0 | break; |
444 | 0 | } |
445 | 0 | } |
446 | 0 | } |
447 | | |
448 | | static void fill_flow_field_borders(double *flow, int width, int height, |
449 | 0 | int stride) { |
450 | | // Calculate the bounds of the rectangle which was filled in by |
451 | | // compute_flow_field() before calling this function. |
452 | | // These indices are inclusive on both ends. |
453 | 0 | const int left_index = FLOW_BORDER_INNER; |
454 | 0 | const int right_index = (width - FLOW_BORDER_INNER - 1); |
455 | 0 | const int top_index = FLOW_BORDER_INNER; |
456 | 0 | const int bottom_index = (height - FLOW_BORDER_INNER - 1); |
457 | | |
458 | | // Left area |
459 | 0 | for (int i = top_index; i <= bottom_index; i += 1) { |
460 | 0 | double *row = flow + i * stride; |
461 | 0 | const double left = row[left_index]; |
462 | 0 | for (int j = -FLOW_BORDER_OUTER; j < left_index; j++) { |
463 | 0 | row[j] = left; |
464 | 0 | } |
465 | 0 | } |
466 | | |
467 | | // Right area |
468 | 0 | for (int i = top_index; i <= bottom_index; i += 1) { |
469 | 0 | double *row = flow + i * stride; |
470 | 0 | const double right = row[right_index]; |
471 | 0 | for (int j = right_index + 1; j < width + FLOW_BORDER_OUTER; j++) { |
472 | 0 | row[j] = right; |
473 | 0 | } |
474 | 0 | } |
475 | | |
476 | | // Top area |
477 | 0 | const double *top_row = flow + top_index * stride - FLOW_BORDER_OUTER; |
478 | 0 | for (int i = -FLOW_BORDER_OUTER; i < top_index; i++) { |
479 | 0 | double *row = flow + i * stride - FLOW_BORDER_OUTER; |
480 | 0 | size_t length = width + 2 * FLOW_BORDER_OUTER; |
481 | 0 | memcpy(row, top_row, length * sizeof(*row)); |
482 | 0 | } |
483 | | |
484 | | // Bottom area |
485 | 0 | const double *bottom_row = flow + bottom_index * stride - FLOW_BORDER_OUTER; |
486 | 0 | for (int i = bottom_index + 1; i < height + FLOW_BORDER_OUTER; i++) { |
487 | 0 | double *row = flow + i * stride - FLOW_BORDER_OUTER; |
488 | 0 | size_t length = width + 2 * FLOW_BORDER_OUTER; |
489 | 0 | memcpy(row, bottom_row, length * sizeof(*row)); |
490 | 0 | } |
491 | 0 | } |
492 | | |
493 | | // Upscale one component of the flow field, from a size of |
494 | | // cur_width x cur_height to a size of (2*cur_width) x (2*cur_height), storing |
495 | | // the result back into the same buffer. This function also scales the flow |
496 | | // vector by 2, so that when we move to the next pyramid level down, the implied |
497 | | // motion vector is the same. |
498 | | // |
499 | | // The temporary buffer tmpbuf must be large enough to hold an intermediate |
500 | | // array of size stride * cur_height, *plus* FLOW_BORDER_OUTER rows above and |
501 | | // below. In other words, indices from -FLOW_BORDER_OUTER * stride to |
502 | | // (cur_height + FLOW_BORDER_OUTER) * stride - 1 must be valid. |
503 | | // |
504 | | // Note that the same stride is used for u before and after upscaling |
505 | | // and for the temporary buffer, for simplicity. |
506 | | // |
507 | | // A note on phasing: |
508 | | // |
509 | | // The flow fields at two adjacent pyramid levels are offset from each other, |
510 | | // and we need to account for this in the construction of the interpolation |
511 | | // kernels. |
512 | | // |
513 | | // Consider an 8x8 pixel patch at pyramid level n. This is split into four |
514 | | // patches at pyramid level n-1. Bringing these patches back up to pyramid level |
515 | | // n, each sub-patch covers 4x4 pixels, and between them they cover the same |
516 | | // 8x8 region. |
517 | | // |
518 | | // Therefore, at pyramid level n, two adjacent patches look like this: |
519 | | // |
520 | | // + - - - - - - - + - - - - - - - + |
521 | | // | | | |
522 | | // | x x | x x | |
523 | | // | | | |
524 | | // | # | # | |
525 | | // | | | |
526 | | // | x x | x x | |
527 | | // | | | |
528 | | // + - - - - - - - + - - - - - - - + |
529 | | // |
530 | | // where # marks the center of a patch at pyramid level n (the input to this |
531 | | // function), and x marks the center of a patch at pyramid level n-1 (the output |
532 | | // of this function). |
533 | | // |
534 | | // By counting pixels (marked by +, -, and |), we can see that the flow vectors |
535 | | // at pyramid level n-1 are offset relative to the flow vectors at pyramid |
536 | | // level n, by 1/4 of the larger (input) patch size. Therefore, our |
537 | | // interpolation kernels need to have phases of 0.25 and 0.75. |
538 | | // |
539 | | // In addition, in order to handle the frame edges correctly, we need to |
540 | | // generate one output vector to the left and one to the right of each input |
541 | | // vector, even though these must be interpolated using different source points. |
542 | | static void upscale_flow_component(double *flow, int cur_width, int cur_height, |
543 | 0 | int stride, double *tmpbuf) { |
544 | 0 | const int half_len = FLOW_UPSCALE_TAPS / 2; |
545 | | |
546 | | // Check that the outer border is large enough to avoid needing to clamp |
547 | | // the source locations |
548 | 0 | assert(half_len <= FLOW_BORDER_OUTER); |
549 | | |
550 | | // Horizontal upscale and multiply by 2 |
551 | 0 | for (int i = 0; i < cur_height; i++) { |
552 | 0 | for (int j = 0; j < cur_width; j++) { |
553 | 0 | double left = 0; |
554 | 0 | for (int k = -half_len; k < half_len; k++) { |
555 | 0 | left += |
556 | 0 | flow[i * stride + (j + k)] * flow_upscale_filter[0][k + half_len]; |
557 | 0 | } |
558 | 0 | tmpbuf[i * stride + (2 * j + 0)] = 2.0 * left; |
559 | | |
560 | | // Right output pixel is 0.25 units to the right of the input pixel |
561 | 0 | double right = 0; |
562 | 0 | for (int k = -(half_len - 1); k < (half_len + 1); k++) { |
563 | 0 | right += flow[i * stride + (j + k)] * |
564 | 0 | flow_upscale_filter[1][k + (half_len - 1)]; |
565 | 0 | } |
566 | 0 | tmpbuf[i * stride + (2 * j + 1)] = 2.0 * right; |
567 | 0 | } |
568 | 0 | } |
569 | | |
570 | | // Fill in top and bottom borders of tmpbuf |
571 | 0 | const double *top_row = &tmpbuf[0]; |
572 | 0 | for (int i = -FLOW_BORDER_OUTER; i < 0; i++) { |
573 | 0 | double *row = &tmpbuf[i * stride]; |
574 | 0 | memcpy(row, top_row, 2 * cur_width * sizeof(*row)); |
575 | 0 | } |
576 | |
|
577 | 0 | const double *bottom_row = &tmpbuf[(cur_height - 1) * stride]; |
578 | 0 | for (int i = cur_height; i < cur_height + FLOW_BORDER_OUTER; i++) { |
579 | 0 | double *row = &tmpbuf[i * stride]; |
580 | 0 | memcpy(row, bottom_row, 2 * cur_width * sizeof(*row)); |
581 | 0 | } |
582 | | |
583 | | // Vertical upscale |
584 | 0 | int upscaled_width = cur_width * 2; |
585 | 0 | for (int i = 0; i < cur_height; i++) { |
586 | 0 | for (int j = 0; j < upscaled_width; j++) { |
587 | 0 | double top = 0; |
588 | 0 | for (int k = -half_len; k < half_len; k++) { |
589 | 0 | top += |
590 | 0 | tmpbuf[(i + k) * stride + j] * flow_upscale_filter[0][k + half_len]; |
591 | 0 | } |
592 | 0 | flow[(2 * i) * stride + j] = top; |
593 | |
|
594 | 0 | double bottom = 0; |
595 | 0 | for (int k = -(half_len - 1); k < (half_len + 1); k++) { |
596 | 0 | bottom += tmpbuf[(i + k) * stride + j] * |
597 | 0 | flow_upscale_filter[1][k + (half_len - 1)]; |
598 | 0 | } |
599 | 0 | flow[(2 * i + 1) * stride + j] = bottom; |
600 | 0 | } |
601 | 0 | } |
602 | 0 | } |
603 | | |
604 | | // make sure flow_u and flow_v start at 0 |
605 | | static bool compute_flow_field(const ImagePyramid *src_pyr, |
606 | | const ImagePyramid *ref_pyr, int n_levels, |
607 | 0 | FlowField *flow) { |
608 | 0 | bool mem_status = true; |
609 | |
|
610 | 0 | double *flow_u = flow->u; |
611 | 0 | double *flow_v = flow->v; |
612 | |
|
613 | 0 | double *tmpbuf0; |
614 | 0 | double *tmpbuf; |
615 | |
|
616 | 0 | if (n_levels < 2) { |
617 | | // tmpbuf not needed |
618 | 0 | tmpbuf0 = NULL; |
619 | 0 | tmpbuf = NULL; |
620 | 0 | } else { |
621 | | // This line must match the calculation of cur_flow_height below |
622 | 0 | const int layer1_height = src_pyr->layers[1].height >> DOWNSAMPLE_SHIFT; |
623 | |
|
624 | 0 | const size_t tmpbuf_size = |
625 | 0 | (layer1_height + 2 * FLOW_BORDER_OUTER) * flow->stride; |
626 | 0 | tmpbuf0 = aom_malloc(tmpbuf_size * sizeof(*tmpbuf0)); |
627 | 0 | if (!tmpbuf0) { |
628 | 0 | mem_status = false; |
629 | 0 | goto free_tmpbuf; |
630 | 0 | } |
631 | 0 | tmpbuf = tmpbuf0 + FLOW_BORDER_OUTER * flow->stride; |
632 | 0 | } |
633 | | |
634 | | // Compute flow field from coarsest to finest level of the pyramid |
635 | | // |
636 | | // Note: We stop after refining pyramid level 1 and interpolating it to |
637 | | // generate an initial flow field at level 0. We do *not* refine the dense |
638 | | // flow field at level 0. Instead, we wait until we have generated |
639 | | // correspondences by interpolating this flow field, and then refine the |
640 | | // correspondences themselves. This is both faster and gives better output |
641 | | // compared to refining the flow field at level 0 and then interpolating. |
642 | 0 | for (int level = n_levels - 1; level >= 1; --level) { |
643 | 0 | const PyramidLayer *cur_layer = &src_pyr->layers[level]; |
644 | 0 | const int cur_width = cur_layer->width; |
645 | 0 | const int cur_height = cur_layer->height; |
646 | 0 | const int cur_stride = cur_layer->stride; |
647 | |
|
648 | 0 | const uint8_t *src_buffer = cur_layer->buffer; |
649 | 0 | const uint8_t *ref_buffer = ref_pyr->layers[level].buffer; |
650 | |
|
651 | 0 | const int cur_flow_width = cur_width >> DOWNSAMPLE_SHIFT; |
652 | 0 | const int cur_flow_height = cur_height >> DOWNSAMPLE_SHIFT; |
653 | 0 | const int cur_flow_stride = flow->stride; |
654 | |
|
655 | 0 | for (int i = FLOW_BORDER_INNER; i < cur_flow_height - FLOW_BORDER_INNER; |
656 | 0 | i += 1) { |
657 | 0 | for (int j = FLOW_BORDER_INNER; j < cur_flow_width - FLOW_BORDER_INNER; |
658 | 0 | j += 1) { |
659 | 0 | const int flow_field_idx = i * cur_flow_stride + j; |
660 | | |
661 | | // Calculate the position of a patch of size DISFLOW_PATCH_SIZE pixels, |
662 | | // which is centered on the region covered by this flow field entry |
663 | 0 | const int patch_center_x = |
664 | 0 | (j << DOWNSAMPLE_SHIFT) + UPSAMPLE_CENTER_OFFSET; // In pixels |
665 | 0 | const int patch_center_y = |
666 | 0 | (i << DOWNSAMPLE_SHIFT) + UPSAMPLE_CENTER_OFFSET; // In pixels |
667 | 0 | const int patch_tl_x = patch_center_x - DISFLOW_PATCH_CENTER; |
668 | 0 | const int patch_tl_y = patch_center_y - DISFLOW_PATCH_CENTER; |
669 | 0 | assert(patch_tl_x >= 0); |
670 | 0 | assert(patch_tl_y >= 0); |
671 | |
|
672 | 0 | aom_compute_flow_at_point(src_buffer, ref_buffer, patch_tl_x, |
673 | 0 | patch_tl_y, cur_width, cur_height, cur_stride, |
674 | 0 | &flow_u[flow_field_idx], |
675 | 0 | &flow_v[flow_field_idx]); |
676 | 0 | } |
677 | 0 | } |
678 | | |
679 | | // Fill in the areas which we haven't explicitly computed, with copies |
680 | | // of the outermost values which we did compute |
681 | 0 | fill_flow_field_borders(flow_u, cur_flow_width, cur_flow_height, |
682 | 0 | cur_flow_stride); |
683 | 0 | fill_flow_field_borders(flow_v, cur_flow_width, cur_flow_height, |
684 | 0 | cur_flow_stride); |
685 | |
|
686 | 0 | if (level > 0) { |
687 | 0 | const int upscale_flow_width = cur_flow_width << 1; |
688 | 0 | const int upscale_flow_height = cur_flow_height << 1; |
689 | 0 | const int upscale_stride = flow->stride; |
690 | |
|
691 | 0 | upscale_flow_component(flow_u, cur_flow_width, cur_flow_height, |
692 | 0 | cur_flow_stride, tmpbuf); |
693 | 0 | upscale_flow_component(flow_v, cur_flow_width, cur_flow_height, |
694 | 0 | cur_flow_stride, tmpbuf); |
695 | | |
696 | | // If we didn't fill in the rightmost column or bottommost row during |
697 | | // upsampling (in order to keep the ratio to exactly 2), fill them |
698 | | // in here by copying the next closest column/row |
699 | 0 | const PyramidLayer *next_layer = &src_pyr->layers[level - 1]; |
700 | 0 | const int next_flow_width = next_layer->width >> DOWNSAMPLE_SHIFT; |
701 | 0 | const int next_flow_height = next_layer->height >> DOWNSAMPLE_SHIFT; |
702 | | |
703 | | // Rightmost column |
704 | 0 | if (next_flow_width > upscale_flow_width) { |
705 | 0 | assert(next_flow_width == upscale_flow_width + 1); |
706 | 0 | for (int i = 0; i < upscale_flow_height; i++) { |
707 | 0 | const int index = i * upscale_stride + upscale_flow_width; |
708 | 0 | flow_u[index] = flow_u[index - 1]; |
709 | 0 | flow_v[index] = flow_v[index - 1]; |
710 | 0 | } |
711 | 0 | } |
712 | | |
713 | | // Bottommost row |
714 | 0 | if (next_flow_height > upscale_flow_height) { |
715 | 0 | assert(next_flow_height == upscale_flow_height + 1); |
716 | 0 | for (int j = 0; j < next_flow_width; j++) { |
717 | 0 | const int index = upscale_flow_height * upscale_stride + j; |
718 | 0 | flow_u[index] = flow_u[index - upscale_stride]; |
719 | 0 | flow_v[index] = flow_v[index - upscale_stride]; |
720 | 0 | } |
721 | 0 | } |
722 | 0 | } |
723 | 0 | } |
724 | |
|
725 | 0 | free_tmpbuf: |
726 | 0 | aom_free(tmpbuf0); |
727 | 0 | return mem_status; |
728 | 0 | } |
729 | | |
730 | 0 | static FlowField *alloc_flow_field(int frame_width, int frame_height) { |
731 | 0 | FlowField *flow = (FlowField *)aom_malloc(sizeof(FlowField)); |
732 | 0 | if (flow == NULL) return NULL; |
733 | | |
734 | | // Calculate the size of the bottom (largest) layer of the flow pyramid |
735 | 0 | flow->width = frame_width >> DOWNSAMPLE_SHIFT; |
736 | 0 | flow->height = frame_height >> DOWNSAMPLE_SHIFT; |
737 | 0 | flow->stride = flow->width + 2 * FLOW_BORDER_OUTER; |
738 | |
|
739 | 0 | const size_t flow_size = |
740 | 0 | flow->stride * (size_t)(flow->height + 2 * FLOW_BORDER_OUTER); |
741 | |
|
742 | 0 | flow->buf0 = aom_calloc(2 * flow_size, sizeof(*flow->buf0)); |
743 | 0 | if (!flow->buf0) { |
744 | 0 | aom_free(flow); |
745 | 0 | return NULL; |
746 | 0 | } |
747 | | |
748 | 0 | flow->u = flow->buf0 + FLOW_BORDER_OUTER * flow->stride + FLOW_BORDER_OUTER; |
749 | 0 | flow->v = flow->u + flow_size; |
750 | |
|
751 | 0 | return flow; |
752 | 0 | } |
753 | | |
754 | 0 | static void free_flow_field(FlowField *flow) { |
755 | 0 | aom_free(flow->buf0); |
756 | 0 | aom_free(flow); |
757 | 0 | } |
758 | | |
759 | | // Compute flow field between `src` and `ref`, and then use that flow to |
760 | | // compute a global motion model relating the two frames. |
761 | | // |
762 | | // Following the convention in flow_estimation.h, the flow vectors are computed |
763 | | // at fixed points in `src` and point to the corresponding locations in `ref`, |
764 | | // regardless of the temporal ordering of the frames. |
765 | | bool av1_compute_global_motion_disflow( |
766 | | TransformationType type, YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *ref, |
767 | | int bit_depth, int downsample_level, MotionModel *motion_models, |
768 | 0 | int num_motion_models, bool *mem_alloc_failed) { |
769 | | // Precompute information we will need about each frame |
770 | 0 | ImagePyramid *src_pyramid = src->y_pyramid; |
771 | 0 | CornerList *src_corners = src->corners; |
772 | 0 | ImagePyramid *ref_pyramid = ref->y_pyramid; |
773 | |
|
774 | 0 | const int src_layers = |
775 | 0 | aom_compute_pyramid(src, bit_depth, DISFLOW_PYRAMID_LEVELS, src_pyramid); |
776 | 0 | const int ref_layers = |
777 | 0 | aom_compute_pyramid(ref, bit_depth, DISFLOW_PYRAMID_LEVELS, ref_pyramid); |
778 | |
|
779 | 0 | if (src_layers < 0 || ref_layers < 0) { |
780 | 0 | *mem_alloc_failed = true; |
781 | 0 | return false; |
782 | 0 | } |
783 | 0 | if (!av1_compute_corner_list(src, bit_depth, downsample_level, src_corners)) { |
784 | 0 | *mem_alloc_failed = true; |
785 | 0 | return false; |
786 | 0 | } |
787 | | |
788 | 0 | assert(src_layers == ref_layers); |
789 | |
|
790 | 0 | const int src_width = src_pyramid->layers[0].width; |
791 | 0 | const int src_height = src_pyramid->layers[0].height; |
792 | 0 | assert(ref_pyramid->layers[0].width == src_width); |
793 | 0 | assert(ref_pyramid->layers[0].height == src_height); |
794 | |
|
795 | 0 | FlowField *flow = alloc_flow_field(src_width, src_height); |
796 | 0 | if (!flow) { |
797 | 0 | *mem_alloc_failed = true; |
798 | 0 | return false; |
799 | 0 | } |
800 | | |
801 | 0 | if (!compute_flow_field(src_pyramid, ref_pyramid, src_layers, flow)) { |
802 | 0 | *mem_alloc_failed = true; |
803 | 0 | free_flow_field(flow); |
804 | 0 | return false; |
805 | 0 | } |
806 | | |
807 | | // find correspondences between the two images using the flow field |
808 | 0 | Correspondence *correspondences = |
809 | 0 | aom_malloc(src_corners->num_corners * sizeof(*correspondences)); |
810 | 0 | if (!correspondences) { |
811 | 0 | *mem_alloc_failed = true; |
812 | 0 | free_flow_field(flow); |
813 | 0 | return false; |
814 | 0 | } |
815 | | |
816 | 0 | const int num_correspondences = determine_disflow_correspondence( |
817 | 0 | src_pyramid, ref_pyramid, src_corners, flow, correspondences); |
818 | |
|
819 | 0 | bool result = ransac(correspondences, num_correspondences, type, |
820 | 0 | motion_models, num_motion_models, mem_alloc_failed); |
821 | |
|
822 | 0 | aom_free(correspondences); |
823 | 0 | free_flow_field(flow); |
824 | 0 | return result; |
825 | 0 | } |