/src/libvpx/vpx_dsp/x86/quantize_avx.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2017 The WebM project authors. All Rights Reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | |
11 | | #include <assert.h> |
12 | | #if defined(_MSC_VER) |
13 | | #include <intrin.h> |
14 | | #endif |
15 | | #include <immintrin.h> |
16 | | |
17 | | #include "./vpx_dsp_rtcd.h" |
18 | | #include "vpx/vpx_integer.h" |
19 | | #include "vpx_dsp/x86/bitdepth_conversion_sse2.h" |
20 | | #include "vpx_dsp/x86/quantize_sse2.h" |
21 | | #include "vpx_dsp/x86/quantize_ssse3.h" |
22 | | #include "vp9/common/vp9_scan.h" |
23 | | #include "vp9/encoder/vp9_block.h" |
24 | | |
25 | | void vpx_quantize_b_avx(const tran_low_t *coeff_ptr, intptr_t n_coeffs, |
26 | | const struct macroblock_plane *const mb_plane, |
27 | | tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, |
28 | | const int16_t *dequant_ptr, uint16_t *eob_ptr, |
29 | 0 | const struct ScanOrder *const scan_order) { |
30 | 0 | const __m128i zero = _mm_setzero_si128(); |
31 | 0 | const __m256i big_zero = _mm256_setzero_si256(); |
32 | 0 | int index; |
33 | 0 | const int16_t *iscan = scan_order->iscan; |
34 | |
|
35 | 0 | __m128i zbin, round, quant, dequant, shift; |
36 | 0 | __m128i coeff0, coeff1; |
37 | 0 | __m128i qcoeff0, qcoeff1; |
38 | 0 | __m128i cmp_mask0, cmp_mask1; |
39 | 0 | __m128i all_zero; |
40 | 0 | __m128i eob = zero, eob0; |
41 | |
|
42 | 0 | *eob_ptr = 0; |
43 | |
|
44 | 0 | load_b_values(mb_plane, &zbin, &round, &quant, dequant_ptr, &dequant, &shift); |
45 | | |
46 | | // Do DC and first 15 AC. |
47 | 0 | coeff0 = load_tran_low(coeff_ptr); |
48 | 0 | coeff1 = load_tran_low(coeff_ptr + 8); |
49 | |
|
50 | 0 | qcoeff0 = _mm_abs_epi16(coeff0); |
51 | 0 | qcoeff1 = _mm_abs_epi16(coeff1); |
52 | |
|
53 | 0 | cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); |
54 | 0 | zbin = _mm_unpackhi_epi64(zbin, zbin); // Switch DC to AC |
55 | 0 | cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); |
56 | |
|
57 | 0 | all_zero = _mm_or_si128(cmp_mask0, cmp_mask1); |
58 | 0 | if (_mm_test_all_zeros(all_zero, all_zero)) { |
59 | 0 | _mm256_store_si256((__m256i *)(qcoeff_ptr), big_zero); |
60 | 0 | _mm256_store_si256((__m256i *)(dqcoeff_ptr), big_zero); |
61 | 0 | #if CONFIG_VP9_HIGHBITDEPTH |
62 | 0 | _mm256_store_si256((__m256i *)(qcoeff_ptr + 8), big_zero); |
63 | 0 | _mm256_store_si256((__m256i *)(dqcoeff_ptr + 8), big_zero); |
64 | 0 | #endif // CONFIG_VP9_HIGHBITDEPTH |
65 | |
|
66 | 0 | if (n_coeffs == 16) return; |
67 | | |
68 | 0 | round = _mm_unpackhi_epi64(round, round); |
69 | 0 | quant = _mm_unpackhi_epi64(quant, quant); |
70 | 0 | shift = _mm_unpackhi_epi64(shift, shift); |
71 | 0 | dequant = _mm_unpackhi_epi64(dequant, dequant); |
72 | 0 | } else { |
73 | 0 | calculate_qcoeff(&qcoeff0, round, quant, shift); |
74 | 0 | round = _mm_unpackhi_epi64(round, round); |
75 | 0 | quant = _mm_unpackhi_epi64(quant, quant); |
76 | 0 | shift = _mm_unpackhi_epi64(shift, shift); |
77 | 0 | calculate_qcoeff(&qcoeff1, round, quant, shift); |
78 | | |
79 | | // Reinsert signs |
80 | 0 | qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0); |
81 | 0 | qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1); |
82 | | |
83 | | // Mask out zbin threshold coeffs |
84 | 0 | qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0); |
85 | 0 | qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1); |
86 | |
|
87 | 0 | store_tran_low(qcoeff0, qcoeff_ptr); |
88 | 0 | store_tran_low(qcoeff1, qcoeff_ptr + 8); |
89 | |
|
90 | 0 | calculate_dqcoeff_and_store(qcoeff0, dequant, dqcoeff_ptr); |
91 | 0 | dequant = _mm_unpackhi_epi64(dequant, dequant); |
92 | 0 | calculate_dqcoeff_and_store(qcoeff1, dequant, dqcoeff_ptr + 8); |
93 | |
|
94 | 0 | eob = scan_for_eob(&qcoeff0, &qcoeff1, iscan, 0, zero); |
95 | 0 | } |
96 | | |
97 | | // AC only loop. |
98 | 0 | for (index = 16; index < n_coeffs; index += 16) { |
99 | 0 | coeff0 = load_tran_low(coeff_ptr + index); |
100 | 0 | coeff1 = load_tran_low(coeff_ptr + index + 8); |
101 | |
|
102 | 0 | qcoeff0 = _mm_abs_epi16(coeff0); |
103 | 0 | qcoeff1 = _mm_abs_epi16(coeff1); |
104 | |
|
105 | 0 | cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); |
106 | 0 | cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); |
107 | |
|
108 | 0 | all_zero = _mm_or_si128(cmp_mask0, cmp_mask1); |
109 | 0 | if (_mm_test_all_zeros(all_zero, all_zero)) { |
110 | 0 | _mm256_store_si256((__m256i *)(qcoeff_ptr + index), big_zero); |
111 | 0 | _mm256_store_si256((__m256i *)(dqcoeff_ptr + index), big_zero); |
112 | 0 | #if CONFIG_VP9_HIGHBITDEPTH |
113 | 0 | _mm256_store_si256((__m256i *)(qcoeff_ptr + index + 8), big_zero); |
114 | 0 | _mm256_store_si256((__m256i *)(dqcoeff_ptr + index + 8), big_zero); |
115 | 0 | #endif // CONFIG_VP9_HIGHBITDEPTH |
116 | 0 | continue; |
117 | 0 | } |
118 | | |
119 | 0 | calculate_qcoeff(&qcoeff0, round, quant, shift); |
120 | 0 | calculate_qcoeff(&qcoeff1, round, quant, shift); |
121 | |
|
122 | 0 | qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0); |
123 | 0 | qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1); |
124 | |
|
125 | 0 | qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0); |
126 | 0 | qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1); |
127 | |
|
128 | 0 | store_tran_low(qcoeff0, qcoeff_ptr + index); |
129 | 0 | store_tran_low(qcoeff1, qcoeff_ptr + index + 8); |
130 | |
|
131 | 0 | calculate_dqcoeff_and_store(qcoeff0, dequant, dqcoeff_ptr + index); |
132 | 0 | calculate_dqcoeff_and_store(qcoeff1, dequant, dqcoeff_ptr + index + 8); |
133 | |
|
134 | 0 | eob0 = scan_for_eob(&qcoeff0, &qcoeff1, iscan, index, zero); |
135 | 0 | eob = _mm_max_epi16(eob, eob0); |
136 | 0 | } |
137 | |
|
138 | 0 | *eob_ptr = accumulate_eob(eob); |
139 | 0 | } |
140 | | |
141 | | void vpx_quantize_b_32x32_avx(const tran_low_t *coeff_ptr, |
142 | | const struct macroblock_plane *const mb_plane, |
143 | | tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, |
144 | | const int16_t *dequant_ptr, uint16_t *eob_ptr, |
145 | 0 | const struct ScanOrder *const scan_order) { |
146 | 0 | const __m128i zero = _mm_setzero_si128(); |
147 | 0 | const __m256i big_zero = _mm256_setzero_si256(); |
148 | 0 | int index; |
149 | 0 | const int16_t *iscan = scan_order->iscan; |
150 | |
|
151 | 0 | __m128i zbin, round, quant, dequant, shift; |
152 | 0 | __m128i coeff0, coeff1; |
153 | 0 | __m128i qcoeff0, qcoeff1; |
154 | 0 | __m128i cmp_mask0, cmp_mask1; |
155 | 0 | __m128i all_zero; |
156 | 0 | __m128i eob = zero, eob0; |
157 | |
|
158 | 0 | load_b_values32x32(mb_plane, &zbin, &round, &quant, dequant_ptr, &dequant, |
159 | 0 | &shift); |
160 | | |
161 | | // Do DC and first 15 AC. |
162 | 0 | coeff0 = load_tran_low(coeff_ptr); |
163 | 0 | coeff1 = load_tran_low(coeff_ptr + 8); |
164 | |
|
165 | 0 | qcoeff0 = _mm_abs_epi16(coeff0); |
166 | 0 | qcoeff1 = _mm_abs_epi16(coeff1); |
167 | |
|
168 | 0 | cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); |
169 | 0 | zbin = _mm_unpackhi_epi64(zbin, zbin); // Switch DC to AC. |
170 | 0 | cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); |
171 | |
|
172 | 0 | all_zero = _mm_or_si128(cmp_mask0, cmp_mask1); |
173 | 0 | if (_mm_test_all_zeros(all_zero, all_zero)) { |
174 | 0 | _mm256_store_si256((__m256i *)(qcoeff_ptr), big_zero); |
175 | 0 | _mm256_store_si256((__m256i *)(dqcoeff_ptr), big_zero); |
176 | 0 | #if CONFIG_VP9_HIGHBITDEPTH |
177 | 0 | _mm256_store_si256((__m256i *)(qcoeff_ptr + 8), big_zero); |
178 | 0 | _mm256_store_si256((__m256i *)(dqcoeff_ptr + 8), big_zero); |
179 | 0 | #endif // CONFIG_VP9_HIGHBITDEPTH |
180 | |
|
181 | 0 | round = _mm_unpackhi_epi64(round, round); |
182 | 0 | quant = _mm_unpackhi_epi64(quant, quant); |
183 | 0 | shift = _mm_unpackhi_epi64(shift, shift); |
184 | 0 | dequant = _mm_unpackhi_epi64(dequant, dequant); |
185 | 0 | } else { |
186 | 0 | calculate_qcoeff(&qcoeff0, round, quant, shift); |
187 | 0 | round = _mm_unpackhi_epi64(round, round); |
188 | 0 | quant = _mm_unpackhi_epi64(quant, quant); |
189 | 0 | shift = _mm_unpackhi_epi64(shift, shift); |
190 | 0 | calculate_qcoeff(&qcoeff1, round, quant, shift); |
191 | | |
192 | | // Reinsert signs. |
193 | 0 | qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0); |
194 | 0 | qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1); |
195 | | |
196 | | // Mask out zbin threshold coeffs. |
197 | 0 | qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0); |
198 | 0 | qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1); |
199 | |
|
200 | 0 | store_tran_low(qcoeff0, qcoeff_ptr); |
201 | 0 | store_tran_low(qcoeff1, qcoeff_ptr + 8); |
202 | |
|
203 | 0 | calculate_dqcoeff_and_store_32x32(qcoeff0, dequant, zero, dqcoeff_ptr); |
204 | 0 | dequant = _mm_unpackhi_epi64(dequant, dequant); |
205 | 0 | calculate_dqcoeff_and_store_32x32(qcoeff1, dequant, zero, dqcoeff_ptr + 8); |
206 | |
|
207 | 0 | eob = scan_for_eob(&qcoeff0, &qcoeff1, iscan, 0, zero); |
208 | 0 | } |
209 | | |
210 | | // AC only loop. |
211 | 0 | for (index = 16; index < 32 * 32; index += 16) { |
212 | 0 | coeff0 = load_tran_low(coeff_ptr + index); |
213 | 0 | coeff1 = load_tran_low(coeff_ptr + index + 8); |
214 | |
|
215 | 0 | qcoeff0 = _mm_abs_epi16(coeff0); |
216 | 0 | qcoeff1 = _mm_abs_epi16(coeff1); |
217 | |
|
218 | 0 | cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); |
219 | 0 | cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); |
220 | |
|
221 | 0 | all_zero = _mm_or_si128(cmp_mask0, cmp_mask1); |
222 | 0 | if (_mm_test_all_zeros(all_zero, all_zero)) { |
223 | 0 | _mm256_store_si256((__m256i *)(qcoeff_ptr + index), big_zero); |
224 | 0 | _mm256_store_si256((__m256i *)(dqcoeff_ptr + index), big_zero); |
225 | 0 | #if CONFIG_VP9_HIGHBITDEPTH |
226 | 0 | _mm256_store_si256((__m256i *)(qcoeff_ptr + index + 8), big_zero); |
227 | 0 | _mm256_store_si256((__m256i *)(dqcoeff_ptr + index + 8), big_zero); |
228 | 0 | #endif // CONFIG_VP9_HIGHBITDEPTH |
229 | 0 | continue; |
230 | 0 | } |
231 | | |
232 | 0 | calculate_qcoeff(&qcoeff0, round, quant, shift); |
233 | 0 | calculate_qcoeff(&qcoeff1, round, quant, shift); |
234 | |
|
235 | 0 | qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0); |
236 | 0 | qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1); |
237 | |
|
238 | 0 | qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0); |
239 | 0 | qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1); |
240 | |
|
241 | 0 | store_tran_low(qcoeff0, qcoeff_ptr + index); |
242 | 0 | store_tran_low(qcoeff1, qcoeff_ptr + index + 8); |
243 | |
|
244 | 0 | calculate_dqcoeff_and_store_32x32(qcoeff0, dequant, zero, |
245 | 0 | dqcoeff_ptr + index); |
246 | 0 | calculate_dqcoeff_and_store_32x32(qcoeff1, dequant, zero, |
247 | 0 | dqcoeff_ptr + index + 8); |
248 | |
|
249 | 0 | eob0 = scan_for_eob(&qcoeff0, &qcoeff1, iscan, index, zero); |
250 | 0 | eob = _mm_max_epi16(eob, eob0); |
251 | 0 | } |
252 | |
|
253 | 0 | *eob_ptr = accumulate_eob(eob); |
254 | 0 | } |