/src/libvpx/vpx_dsp/avg.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2014 The WebM project authors. All Rights Reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | |
11 | | #include <assert.h> |
12 | | #include <stdlib.h> |
13 | | |
14 | | #include "./vpx_dsp_rtcd.h" |
15 | | #include "vpx_ports/mem.h" |
16 | | |
17 | 0 | unsigned int vpx_avg_8x8_c(const uint8_t *s, int p) { |
18 | 0 | int i, j; |
19 | 0 | int sum = 0; |
20 | 0 | for (i = 0; i < 8; ++i, s += p) |
21 | 0 | for (j = 0; j < 8; sum += s[j], ++j) { |
22 | 0 | } |
23 | |
|
24 | 0 | return (sum + 32) >> 6; |
25 | 0 | } |
26 | | |
27 | 0 | unsigned int vpx_avg_4x4_c(const uint8_t *s, int p) { |
28 | 0 | int i, j; |
29 | 0 | int sum = 0; |
30 | 0 | for (i = 0; i < 4; ++i, s += p) |
31 | 0 | for (j = 0; j < 4; sum += s[j], ++j) { |
32 | 0 | } |
33 | |
|
34 | 0 | return (sum + 8) >> 4; |
35 | 0 | } |
36 | | |
37 | | #if CONFIG_VP9_HIGHBITDEPTH |
38 | | // src_diff: 13 bit, dynamic range [-4095, 4095] |
39 | | // coeff: 16 bit |
40 | | static void hadamard_highbd_col8_first_pass(const int16_t *src_diff, |
41 | | ptrdiff_t src_stride, |
42 | 0 | int16_t *coeff) { |
43 | 0 | int16_t b0 = src_diff[0 * src_stride] + src_diff[1 * src_stride]; |
44 | 0 | int16_t b1 = src_diff[0 * src_stride] - src_diff[1 * src_stride]; |
45 | 0 | int16_t b2 = src_diff[2 * src_stride] + src_diff[3 * src_stride]; |
46 | 0 | int16_t b3 = src_diff[2 * src_stride] - src_diff[3 * src_stride]; |
47 | 0 | int16_t b4 = src_diff[4 * src_stride] + src_diff[5 * src_stride]; |
48 | 0 | int16_t b5 = src_diff[4 * src_stride] - src_diff[5 * src_stride]; |
49 | 0 | int16_t b6 = src_diff[6 * src_stride] + src_diff[7 * src_stride]; |
50 | 0 | int16_t b7 = src_diff[6 * src_stride] - src_diff[7 * src_stride]; |
51 | |
|
52 | 0 | int16_t c0 = b0 + b2; |
53 | 0 | int16_t c1 = b1 + b3; |
54 | 0 | int16_t c2 = b0 - b2; |
55 | 0 | int16_t c3 = b1 - b3; |
56 | 0 | int16_t c4 = b4 + b6; |
57 | 0 | int16_t c5 = b5 + b7; |
58 | 0 | int16_t c6 = b4 - b6; |
59 | 0 | int16_t c7 = b5 - b7; |
60 | |
|
61 | 0 | coeff[0] = c0 + c4; |
62 | 0 | coeff[7] = c1 + c5; |
63 | 0 | coeff[3] = c2 + c6; |
64 | 0 | coeff[4] = c3 + c7; |
65 | 0 | coeff[2] = c0 - c4; |
66 | 0 | coeff[6] = c1 - c5; |
67 | 0 | coeff[1] = c2 - c6; |
68 | 0 | coeff[5] = c3 - c7; |
69 | 0 | } |
70 | | |
71 | | // src_diff: 16 bit, dynamic range [-32760, 32760] |
72 | | // coeff: 19 bit |
73 | | static void hadamard_highbd_col8_second_pass(const int16_t *src_diff, |
74 | | ptrdiff_t src_stride, |
75 | 0 | int32_t *coeff) { |
76 | 0 | int32_t b0 = src_diff[0 * src_stride] + src_diff[1 * src_stride]; |
77 | 0 | int32_t b1 = src_diff[0 * src_stride] - src_diff[1 * src_stride]; |
78 | 0 | int32_t b2 = src_diff[2 * src_stride] + src_diff[3 * src_stride]; |
79 | 0 | int32_t b3 = src_diff[2 * src_stride] - src_diff[3 * src_stride]; |
80 | 0 | int32_t b4 = src_diff[4 * src_stride] + src_diff[5 * src_stride]; |
81 | 0 | int32_t b5 = src_diff[4 * src_stride] - src_diff[5 * src_stride]; |
82 | 0 | int32_t b6 = src_diff[6 * src_stride] + src_diff[7 * src_stride]; |
83 | 0 | int32_t b7 = src_diff[6 * src_stride] - src_diff[7 * src_stride]; |
84 | |
|
85 | 0 | int32_t c0 = b0 + b2; |
86 | 0 | int32_t c1 = b1 + b3; |
87 | 0 | int32_t c2 = b0 - b2; |
88 | 0 | int32_t c3 = b1 - b3; |
89 | 0 | int32_t c4 = b4 + b6; |
90 | 0 | int32_t c5 = b5 + b7; |
91 | 0 | int32_t c6 = b4 - b6; |
92 | 0 | int32_t c7 = b5 - b7; |
93 | |
|
94 | 0 | coeff[0] = c0 + c4; |
95 | 0 | coeff[7] = c1 + c5; |
96 | 0 | coeff[3] = c2 + c6; |
97 | 0 | coeff[4] = c3 + c7; |
98 | 0 | coeff[2] = c0 - c4; |
99 | 0 | coeff[6] = c1 - c5; |
100 | 0 | coeff[1] = c2 - c6; |
101 | 0 | coeff[5] = c3 - c7; |
102 | 0 | } |
103 | | |
104 | | // The order of the output coeff of the hadamard is not important. For |
105 | | // optimization purposes the final transpose may be skipped. |
106 | | void vpx_highbd_hadamard_8x8_c(const int16_t *src_diff, ptrdiff_t src_stride, |
107 | 0 | tran_low_t *coeff) { |
108 | 0 | int idx; |
109 | 0 | int16_t buffer[64]; |
110 | 0 | int32_t buffer2[64]; |
111 | 0 | int16_t *tmp_buf = &buffer[0]; |
112 | 0 | for (idx = 0; idx < 8; ++idx) { |
113 | | // src_diff: 13 bit |
114 | | // buffer: 16 bit, dynamic range [-32760, 32760] |
115 | 0 | hadamard_highbd_col8_first_pass(src_diff, src_stride, tmp_buf); |
116 | 0 | tmp_buf += 8; |
117 | 0 | ++src_diff; |
118 | 0 | } |
119 | |
|
120 | 0 | tmp_buf = &buffer[0]; |
121 | 0 | for (idx = 0; idx < 8; ++idx) { |
122 | | // buffer: 16 bit |
123 | | // buffer2: 19 bit, dynamic range [-262080, 262080] |
124 | 0 | hadamard_highbd_col8_second_pass(tmp_buf, 8, buffer2 + 8 * idx); |
125 | 0 | ++tmp_buf; |
126 | 0 | } |
127 | |
|
128 | 0 | for (idx = 0; idx < 64; ++idx) coeff[idx] = (tran_low_t)buffer2[idx]; |
129 | 0 | } |
130 | | |
131 | | // In place 16x16 2D Hadamard transform |
132 | | void vpx_highbd_hadamard_16x16_c(const int16_t *src_diff, ptrdiff_t src_stride, |
133 | 0 | tran_low_t *coeff) { |
134 | 0 | int idx; |
135 | 0 | for (idx = 0; idx < 4; ++idx) { |
136 | | // src_diff: 13 bit, dynamic range [-4095, 4095] |
137 | 0 | const int16_t *src_ptr = |
138 | 0 | src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8; |
139 | 0 | vpx_highbd_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64); |
140 | 0 | } |
141 | | |
142 | | // coeff: 19 bit, dynamic range [-262080, 262080] |
143 | 0 | for (idx = 0; idx < 64; ++idx) { |
144 | 0 | tran_low_t a0 = coeff[0]; |
145 | 0 | tran_low_t a1 = coeff[64]; |
146 | 0 | tran_low_t a2 = coeff[128]; |
147 | 0 | tran_low_t a3 = coeff[192]; |
148 | |
|
149 | 0 | tran_low_t b0 = (a0 + a1) >> 1; |
150 | 0 | tran_low_t b1 = (a0 - a1) >> 1; |
151 | 0 | tran_low_t b2 = (a2 + a3) >> 1; |
152 | 0 | tran_low_t b3 = (a2 - a3) >> 1; |
153 | | |
154 | | // new coeff dynamic range: 20 bit |
155 | 0 | coeff[0] = b0 + b2; |
156 | 0 | coeff[64] = b1 + b3; |
157 | 0 | coeff[128] = b0 - b2; |
158 | 0 | coeff[192] = b1 - b3; |
159 | |
|
160 | 0 | ++coeff; |
161 | 0 | } |
162 | 0 | } |
163 | | |
164 | | void vpx_highbd_hadamard_32x32_c(const int16_t *src_diff, ptrdiff_t src_stride, |
165 | 0 | tran_low_t *coeff) { |
166 | 0 | int idx; |
167 | 0 | for (idx = 0; idx < 4; ++idx) { |
168 | | // src_diff: 13 bit, dynamic range [-4095, 4095] |
169 | 0 | const int16_t *src_ptr = |
170 | 0 | src_diff + (idx >> 1) * 16 * src_stride + (idx & 0x01) * 16; |
171 | 0 | vpx_highbd_hadamard_16x16_c(src_ptr, src_stride, coeff + idx * 256); |
172 | 0 | } |
173 | | |
174 | | // coeff: 20 bit |
175 | 0 | for (idx = 0; idx < 256; ++idx) { |
176 | 0 | tran_low_t a0 = coeff[0]; |
177 | 0 | tran_low_t a1 = coeff[256]; |
178 | 0 | tran_low_t a2 = coeff[512]; |
179 | 0 | tran_low_t a3 = coeff[768]; |
180 | |
|
181 | 0 | tran_low_t b0 = (a0 + a1) >> 2; |
182 | 0 | tran_low_t b1 = (a0 - a1) >> 2; |
183 | 0 | tran_low_t b2 = (a2 + a3) >> 2; |
184 | 0 | tran_low_t b3 = (a2 - a3) >> 2; |
185 | | |
186 | | // new coeff dynamic range: 20 bit |
187 | 0 | coeff[0] = b0 + b2; |
188 | 0 | coeff[256] = b1 + b3; |
189 | 0 | coeff[512] = b0 - b2; |
190 | 0 | coeff[768] = b1 - b3; |
191 | |
|
192 | 0 | ++coeff; |
193 | 0 | } |
194 | 0 | } |
195 | | #endif // CONFIG_VP9_HIGHBITDEPTH |
196 | | |
197 | | // src_diff: first pass, 9 bit, dynamic range [-255, 255] |
198 | | // second pass, 12 bit, dynamic range [-2040, 2040] |
199 | | static void hadamard_col8(const int16_t *src_diff, ptrdiff_t src_stride, |
200 | 0 | int16_t *coeff) { |
201 | 0 | int16_t b0 = src_diff[0 * src_stride] + src_diff[1 * src_stride]; |
202 | 0 | int16_t b1 = src_diff[0 * src_stride] - src_diff[1 * src_stride]; |
203 | 0 | int16_t b2 = src_diff[2 * src_stride] + src_diff[3 * src_stride]; |
204 | 0 | int16_t b3 = src_diff[2 * src_stride] - src_diff[3 * src_stride]; |
205 | 0 | int16_t b4 = src_diff[4 * src_stride] + src_diff[5 * src_stride]; |
206 | 0 | int16_t b5 = src_diff[4 * src_stride] - src_diff[5 * src_stride]; |
207 | 0 | int16_t b6 = src_diff[6 * src_stride] + src_diff[7 * src_stride]; |
208 | 0 | int16_t b7 = src_diff[6 * src_stride] - src_diff[7 * src_stride]; |
209 | |
|
210 | 0 | int16_t c0 = b0 + b2; |
211 | 0 | int16_t c1 = b1 + b3; |
212 | 0 | int16_t c2 = b0 - b2; |
213 | 0 | int16_t c3 = b1 - b3; |
214 | 0 | int16_t c4 = b4 + b6; |
215 | 0 | int16_t c5 = b5 + b7; |
216 | 0 | int16_t c6 = b4 - b6; |
217 | 0 | int16_t c7 = b5 - b7; |
218 | |
|
219 | 0 | coeff[0] = c0 + c4; |
220 | 0 | coeff[7] = c1 + c5; |
221 | 0 | coeff[3] = c2 + c6; |
222 | 0 | coeff[4] = c3 + c7; |
223 | 0 | coeff[2] = c0 - c4; |
224 | 0 | coeff[6] = c1 - c5; |
225 | 0 | coeff[1] = c2 - c6; |
226 | 0 | coeff[5] = c3 - c7; |
227 | 0 | } |
228 | | |
229 | | // The order of the output coeff of the hadamard is not important. For |
230 | | // optimization purposes the final transpose may be skipped. |
231 | | void vpx_hadamard_8x8_c(const int16_t *src_diff, ptrdiff_t src_stride, |
232 | 0 | tran_low_t *coeff) { |
233 | 0 | int idx; |
234 | 0 | int16_t buffer[64]; |
235 | 0 | int16_t buffer2[64]; |
236 | 0 | int16_t *tmp_buf = &buffer[0]; |
237 | 0 | for (idx = 0; idx < 8; ++idx) { |
238 | 0 | hadamard_col8(src_diff, src_stride, tmp_buf); // src_diff: 9 bit |
239 | | // dynamic range [-255, 255] |
240 | 0 | tmp_buf += 8; |
241 | 0 | ++src_diff; |
242 | 0 | } |
243 | |
|
244 | 0 | tmp_buf = &buffer[0]; |
245 | 0 | for (idx = 0; idx < 8; ++idx) { |
246 | 0 | hadamard_col8(tmp_buf, 8, buffer2 + 8 * idx); // tmp_buf: 12 bit |
247 | | // dynamic range [-2040, 2040] |
248 | | // buffer2: 15 bit |
249 | | // dynamic range [-16320, 16320] |
250 | 0 | ++tmp_buf; |
251 | 0 | } |
252 | |
|
253 | 0 | for (idx = 0; idx < 64; ++idx) coeff[idx] = (tran_low_t)buffer2[idx]; |
254 | 0 | } |
255 | | |
256 | | // In place 16x16 2D Hadamard transform |
257 | | void vpx_hadamard_16x16_c(const int16_t *src_diff, ptrdiff_t src_stride, |
258 | 0 | tran_low_t *coeff) { |
259 | 0 | int idx; |
260 | 0 | for (idx = 0; idx < 4; ++idx) { |
261 | | // src_diff: 9 bit, dynamic range [-255, 255] |
262 | 0 | const int16_t *src_ptr = |
263 | 0 | src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8; |
264 | 0 | vpx_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64); |
265 | 0 | } |
266 | | |
267 | | // coeff: 15 bit, dynamic range [-16320, 16320] |
268 | 0 | for (idx = 0; idx < 64; ++idx) { |
269 | 0 | tran_low_t a0 = coeff[0]; |
270 | 0 | tran_low_t a1 = coeff[64]; |
271 | 0 | tran_low_t a2 = coeff[128]; |
272 | 0 | tran_low_t a3 = coeff[192]; |
273 | |
|
274 | 0 | tran_low_t b0 = (a0 + a1) >> 1; // (a0 + a1): 16 bit, [-32640, 32640] |
275 | 0 | tran_low_t b1 = (a0 - a1) >> 1; // b0-b3: 15 bit, dynamic range |
276 | 0 | tran_low_t b2 = (a2 + a3) >> 1; // [-16320, 16320] |
277 | 0 | tran_low_t b3 = (a2 - a3) >> 1; |
278 | |
|
279 | 0 | coeff[0] = b0 + b2; // 16 bit, [-32640, 32640] |
280 | 0 | coeff[64] = b1 + b3; |
281 | 0 | coeff[128] = b0 - b2; |
282 | 0 | coeff[192] = b1 - b3; |
283 | |
|
284 | 0 | ++coeff; |
285 | 0 | } |
286 | 0 | } |
287 | | |
288 | | void vpx_hadamard_32x32_c(const int16_t *src_diff, ptrdiff_t src_stride, |
289 | 0 | tran_low_t *coeff) { |
290 | 0 | int idx; |
291 | 0 | for (idx = 0; idx < 4; ++idx) { |
292 | | // src_diff: 9 bit, dynamic range [-255, 255] |
293 | 0 | const int16_t *src_ptr = |
294 | 0 | src_diff + (idx >> 1) * 16 * src_stride + (idx & 0x01) * 16; |
295 | 0 | vpx_hadamard_16x16_c(src_ptr, src_stride, coeff + idx * 256); |
296 | 0 | } |
297 | | |
298 | | // coeff: 16 bit, dynamic range [-32768, 32767] |
299 | 0 | for (idx = 0; idx < 256; ++idx) { |
300 | 0 | tran_low_t a0 = coeff[0]; |
301 | 0 | tran_low_t a1 = coeff[256]; |
302 | 0 | tran_low_t a2 = coeff[512]; |
303 | 0 | tran_low_t a3 = coeff[768]; |
304 | |
|
305 | 0 | tran_low_t b0 = (a0 + a1) >> 2; // (a0 + a1): 17 bit, [-65536, 65535] |
306 | 0 | tran_low_t b1 = (a0 - a1) >> 2; // b0-b3: 15 bit, dynamic range |
307 | 0 | tran_low_t b2 = (a2 + a3) >> 2; // [-16384, 16383] |
308 | 0 | tran_low_t b3 = (a2 - a3) >> 2; |
309 | |
|
310 | 0 | coeff[0] = b0 + b2; // 16 bit, [-32768, 32767] |
311 | 0 | coeff[256] = b1 + b3; |
312 | 0 | coeff[512] = b0 - b2; |
313 | 0 | coeff[768] = b1 - b3; |
314 | |
|
315 | 0 | ++coeff; |
316 | 0 | } |
317 | 0 | } |
318 | | |
319 | | #if CONFIG_VP9_HIGHBITDEPTH |
320 | | // coeff: dynamic range 20 bit. |
321 | | // length: value range {16, 64, 256, 1024}. |
322 | 0 | int vpx_highbd_satd_c(const tran_low_t *coeff, int length) { |
323 | 0 | int i; |
324 | 0 | int satd = 0; |
325 | 0 | for (i = 0; i < length; ++i) satd += abs(coeff[i]); |
326 | | |
327 | | // satd: 30 bits |
328 | 0 | return satd; |
329 | 0 | } |
330 | | #endif // CONFIG_VP9_HIGHBITDEPTH |
331 | | |
332 | | // coeff: 16 bits, dynamic range [-32640, 32640]. |
333 | | // length: value range {16, 64, 256, 1024}. |
334 | 0 | int vpx_satd_c(const tran_low_t *coeff, int length) { |
335 | 0 | int i; |
336 | 0 | int satd = 0; |
337 | 0 | for (i = 0; i < length; ++i) satd += abs(coeff[i]); |
338 | | |
339 | | // satd: 26 bits, dynamic range [-32640 * 1024, 32640 * 1024] |
340 | 0 | return satd; |
341 | 0 | } |
342 | | |
343 | | // Integer projection onto row vectors. |
344 | | // height: value range {16, 32, 64}. |
345 | | void vpx_int_pro_row_c(int16_t hbuf[16], const uint8_t *ref, |
346 | 0 | const int ref_stride, const int height) { |
347 | 0 | int idx; |
348 | 0 | const int norm_factor = height >> 1; |
349 | 0 | assert(height >= 2); |
350 | 0 | for (idx = 0; idx < 16; ++idx) { |
351 | 0 | int i; |
352 | 0 | hbuf[idx] = 0; |
353 | | // hbuf[idx]: 14 bit, dynamic range [0, 16320]. |
354 | 0 | for (i = 0; i < height; ++i) hbuf[idx] += ref[i * ref_stride]; |
355 | | // hbuf[idx]: 9 bit, dynamic range [0, 510]. |
356 | 0 | hbuf[idx] /= norm_factor; |
357 | 0 | ++ref; |
358 | 0 | } |
359 | 0 | } |
360 | | |
361 | | // width: value range {16, 32, 64}. |
362 | 0 | int16_t vpx_int_pro_col_c(const uint8_t *ref, const int width) { |
363 | 0 | int idx; |
364 | 0 | int16_t sum = 0; |
365 | | // sum: 14 bit, dynamic range [0, 16320] |
366 | 0 | for (idx = 0; idx < width; ++idx) sum += ref[idx]; |
367 | 0 | return sum; |
368 | 0 | } |
369 | | |
370 | | // ref: [0 - 510] |
371 | | // src: [0 - 510] |
372 | | // bwl: {2, 3, 4} |
373 | 0 | int vpx_vector_var_c(const int16_t *ref, const int16_t *src, const int bwl) { |
374 | 0 | int i; |
375 | 0 | int width = 4 << bwl; |
376 | 0 | int sse = 0, mean = 0, var; |
377 | |
|
378 | 0 | for (i = 0; i < width; ++i) { |
379 | 0 | int diff = ref[i] - src[i]; // diff: dynamic range [-510, 510], 10 bits. |
380 | 0 | mean += diff; // mean: dynamic range 16 bits. |
381 | 0 | sse += diff * diff; // sse: dynamic range 26 bits. |
382 | 0 | } |
383 | | |
384 | | // (mean * mean): dynamic range 31 bits. |
385 | 0 | var = sse - ((mean * mean) >> (bwl + 2)); |
386 | 0 | return var; |
387 | 0 | } |
388 | | |
389 | | void vpx_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp, |
390 | 0 | int *min, int *max) { |
391 | 0 | int i, j; |
392 | 0 | *min = 255; |
393 | 0 | *max = 0; |
394 | 0 | for (i = 0; i < 8; ++i, s += p, d += dp) { |
395 | 0 | for (j = 0; j < 8; ++j) { |
396 | 0 | int diff = abs(s[j] - d[j]); |
397 | 0 | *min = diff < *min ? diff : *min; |
398 | 0 | *max = diff > *max ? diff : *max; |
399 | 0 | } |
400 | 0 | } |
401 | 0 | } |
402 | | |
403 | | #if CONFIG_VP9_HIGHBITDEPTH |
404 | 0 | unsigned int vpx_highbd_avg_8x8_c(const uint8_t *s8, int p) { |
405 | 0 | int i, j; |
406 | 0 | int sum = 0; |
407 | 0 | const uint16_t *s = CONVERT_TO_SHORTPTR(s8); |
408 | 0 | for (i = 0; i < 8; ++i, s += p) |
409 | 0 | for (j = 0; j < 8; sum += s[j], ++j) { |
410 | 0 | } |
411 | |
|
412 | 0 | return (sum + 32) >> 6; |
413 | 0 | } |
414 | | |
415 | 0 | unsigned int vpx_highbd_avg_4x4_c(const uint8_t *s8, int p) { |
416 | 0 | int i, j; |
417 | 0 | int sum = 0; |
418 | 0 | const uint16_t *s = CONVERT_TO_SHORTPTR(s8); |
419 | 0 | for (i = 0; i < 4; ++i, s += p) |
420 | 0 | for (j = 0; j < 4; sum += s[j], ++j) { |
421 | 0 | } |
422 | |
|
423 | 0 | return (sum + 8) >> 4; |
424 | 0 | } |
425 | | |
426 | | void vpx_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8, |
427 | 0 | int dp, int *min, int *max) { |
428 | 0 | int i, j; |
429 | 0 | const uint16_t *s = CONVERT_TO_SHORTPTR(s8); |
430 | 0 | const uint16_t *d = CONVERT_TO_SHORTPTR(d8); |
431 | 0 | *min = 65535; |
432 | 0 | *max = 0; |
433 | 0 | for (i = 0; i < 8; ++i, s += p, d += dp) { |
434 | 0 | for (j = 0; j < 8; ++j) { |
435 | 0 | int diff = abs(s[j] - d[j]); |
436 | 0 | *min = diff < *min ? diff : *min; |
437 | 0 | *max = diff > *max ? diff : *max; |
438 | 0 | } |
439 | 0 | } |
440 | 0 | } |
441 | | #endif // CONFIG_VP9_HIGHBITDEPTH |