/src/aom/av1/common/x86/warp_plane_sse2.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2019, Alliance for Open Media. All rights reserved |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #include <emmintrin.h> |
13 | | |
14 | | #include "aom_dsp/x86/synonyms.h" |
15 | | #include "av1/common/warped_motion.h" |
16 | | #include "config/av1_rtcd.h" |
17 | | |
18 | | int64_t av1_calc_frame_error_sse2(const uint8_t *const ref, int ref_stride, |
19 | | const uint8_t *const dst, int p_width, |
20 | 0 | int p_height, int dst_stride) { |
21 | 0 | int64_t sum_error = 0; |
22 | 0 | int i, j; |
23 | 0 | __m128i row_error, col_error; |
24 | 0 | __m128i zero = _mm_setzero_si128(); |
25 | 0 | __m128i dup_255 = _mm_set1_epi16(255); |
26 | 0 | col_error = zero; |
27 | 0 | for (i = 0; i < (p_height); i++) { |
28 | 0 | row_error = zero; |
29 | 0 | for (j = 0; j < (p_width / 16); j++) { |
30 | 0 | __m128i ref_8 = |
31 | 0 | _mm_load_si128((__m128i *)(ref + (j * 16) + (i * ref_stride))); |
32 | 0 | __m128i dst_8 = |
33 | 0 | _mm_load_si128((__m128i *)(dst + (j * 16) + (i * dst_stride))); |
34 | 0 | __m128i ref_16_lo = _mm_unpacklo_epi8(ref_8, zero); |
35 | 0 | __m128i ref_16_hi = _mm_unpackhi_epi8(ref_8, zero); |
36 | 0 | __m128i dst_16_lo = _mm_unpacklo_epi8(dst_8, zero); |
37 | 0 | __m128i dst_16_hi = _mm_unpackhi_epi8(dst_8, zero); |
38 | |
|
39 | 0 | __m128i diff_1 = |
40 | 0 | _mm_add_epi16(_mm_sub_epi16(dst_16_lo, ref_16_lo), dup_255); |
41 | 0 | __m128i diff_2 = |
42 | 0 | _mm_add_epi16(_mm_sub_epi16(dst_16_hi, ref_16_hi), dup_255); |
43 | |
|
44 | 0 | __m128i error_1_lo = |
45 | 0 | _mm_set_epi32(error_measure_lut[_mm_extract_epi16(diff_1, 3)], |
46 | 0 | error_measure_lut[_mm_extract_epi16(diff_1, 2)], |
47 | 0 | error_measure_lut[_mm_extract_epi16(diff_1, 1)], |
48 | 0 | error_measure_lut[_mm_extract_epi16(diff_1, 0)]); |
49 | 0 | __m128i error_1_hi = |
50 | 0 | _mm_set_epi32(error_measure_lut[_mm_extract_epi16(diff_1, 7)], |
51 | 0 | error_measure_lut[_mm_extract_epi16(diff_1, 6)], |
52 | 0 | error_measure_lut[_mm_extract_epi16(diff_1, 5)], |
53 | 0 | error_measure_lut[_mm_extract_epi16(diff_1, 4)]); |
54 | 0 | __m128i error_2_lo = |
55 | 0 | _mm_set_epi32(error_measure_lut[_mm_extract_epi16(diff_2, 3)], |
56 | 0 | error_measure_lut[_mm_extract_epi16(diff_2, 2)], |
57 | 0 | error_measure_lut[_mm_extract_epi16(diff_2, 1)], |
58 | 0 | error_measure_lut[_mm_extract_epi16(diff_2, 0)]); |
59 | 0 | __m128i error_2_hi = |
60 | 0 | _mm_set_epi32(error_measure_lut[_mm_extract_epi16(diff_2, 7)], |
61 | 0 | error_measure_lut[_mm_extract_epi16(diff_2, 6)], |
62 | 0 | error_measure_lut[_mm_extract_epi16(diff_2, 5)], |
63 | 0 | error_measure_lut[_mm_extract_epi16(diff_2, 4)]); |
64 | |
|
65 | 0 | __m128i error_1 = _mm_add_epi32(error_1_lo, error_1_hi); |
66 | 0 | __m128i error_2 = _mm_add_epi32(error_2_lo, error_2_hi); |
67 | 0 | __m128i error_1_2 = _mm_add_epi32(error_1, error_2); |
68 | |
|
69 | 0 | row_error = _mm_add_epi32(row_error, error_1_2); |
70 | 0 | } |
71 | 0 | __m128i col_error_lo = _mm_unpacklo_epi32(row_error, zero); |
72 | 0 | __m128i col_error_hi = _mm_unpackhi_epi32(row_error, zero); |
73 | 0 | __m128i col_error_temp = _mm_add_epi64(col_error_lo, col_error_hi); |
74 | 0 | col_error = _mm_add_epi64(col_error, col_error_temp); |
75 | | // Error summation for remaining width, which is not multiple of 16 |
76 | 0 | if (p_width & 0xf) { |
77 | 0 | for (int l = j * 16; l < p_width; ++l) { |
78 | 0 | sum_error += (int64_t)error_measure(dst[l + i * dst_stride] - |
79 | 0 | ref[l + i * ref_stride]); |
80 | 0 | } |
81 | 0 | } |
82 | 0 | } |
83 | 0 | int64_t sum_error_d_0, sum_error_d_1; |
84 | 0 | xx_storel_64(&sum_error_d_0, col_error); |
85 | 0 | xx_storel_64(&sum_error_d_1, _mm_srli_si128(col_error, 8)); |
86 | 0 | sum_error = (sum_error + sum_error_d_0 + sum_error_d_1); |
87 | 0 | return sum_error; |
88 | 0 | } |