/src/libvpx/vpx_dsp/x86/quantize_ssse3.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2017 The WebM project authors. All Rights Reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | |
11 | | #include <assert.h> |
12 | | #include <tmmintrin.h> |
13 | | |
14 | | #include "./vpx_dsp_rtcd.h" |
15 | | #include "vpx/vpx_integer.h" |
16 | | #include "vpx_dsp/x86/bitdepth_conversion_sse2.h" |
17 | | #include "vpx_dsp/x86/quantize_sse2.h" |
18 | | #include "vpx_dsp/x86/quantize_ssse3.h" |
19 | | #include "vp9/common/vp9_scan.h" |
20 | | #include "vp9/encoder/vp9_block.h" |
21 | | |
22 | | void vpx_quantize_b_ssse3(const tran_low_t *coeff_ptr, intptr_t n_coeffs, |
23 | | const struct macroblock_plane *const mb_plane, |
24 | | tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, |
25 | | const int16_t *dequant_ptr, uint16_t *eob_ptr, |
26 | 0 | const struct ScanOrder *const scan_order) { |
27 | 0 | const __m128i zero = _mm_setzero_si128(); |
28 | 0 | int index = 16; |
29 | 0 | const int16_t *iscan = scan_order->iscan; |
30 | |
|
31 | 0 | __m128i zbin, round, quant, dequant, shift; |
32 | 0 | __m128i coeff0, coeff1; |
33 | 0 | __m128i qcoeff0, qcoeff1; |
34 | 0 | __m128i cmp_mask0, cmp_mask1; |
35 | 0 | __m128i eob, eob0; |
36 | |
|
37 | 0 | load_b_values(mb_plane, &zbin, &round, &quant, dequant_ptr, &dequant, &shift); |
38 | | |
39 | | // Do DC and first 15 AC. |
40 | 0 | coeff0 = load_tran_low(coeff_ptr); |
41 | 0 | coeff1 = load_tran_low(coeff_ptr + 8); |
42 | |
|
43 | 0 | qcoeff0 = _mm_abs_epi16(coeff0); |
44 | 0 | qcoeff1 = _mm_abs_epi16(coeff1); |
45 | |
|
46 | 0 | cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); |
47 | 0 | zbin = _mm_unpackhi_epi64(zbin, zbin); // Switch DC to AC |
48 | 0 | cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); |
49 | |
|
50 | 0 | calculate_qcoeff(&qcoeff0, round, quant, shift); |
51 | 0 | round = _mm_unpackhi_epi64(round, round); |
52 | 0 | quant = _mm_unpackhi_epi64(quant, quant); |
53 | 0 | shift = _mm_unpackhi_epi64(shift, shift); |
54 | 0 | calculate_qcoeff(&qcoeff1, round, quant, shift); |
55 | | |
56 | | // Reinsert signs |
57 | 0 | qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0); |
58 | 0 | qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1); |
59 | | |
60 | | // Mask out zbin threshold coeffs |
61 | 0 | qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0); |
62 | 0 | qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1); |
63 | |
|
64 | 0 | store_tran_low(qcoeff0, qcoeff_ptr); |
65 | 0 | store_tran_low(qcoeff1, qcoeff_ptr + 8); |
66 | |
|
67 | 0 | calculate_dqcoeff_and_store(qcoeff0, dequant, dqcoeff_ptr); |
68 | 0 | dequant = _mm_unpackhi_epi64(dequant, dequant); |
69 | 0 | calculate_dqcoeff_and_store(qcoeff1, dequant, dqcoeff_ptr + 8); |
70 | |
|
71 | 0 | eob = scan_for_eob(&qcoeff0, &qcoeff1, iscan, 0, zero); |
72 | | |
73 | | // AC only loop. |
74 | 0 | while (index < n_coeffs) { |
75 | 0 | coeff0 = load_tran_low(coeff_ptr + index); |
76 | 0 | coeff1 = load_tran_low(coeff_ptr + index + 8); |
77 | |
|
78 | 0 | qcoeff0 = _mm_abs_epi16(coeff0); |
79 | 0 | qcoeff1 = _mm_abs_epi16(coeff1); |
80 | |
|
81 | 0 | cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); |
82 | 0 | cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); |
83 | |
|
84 | 0 | calculate_qcoeff(&qcoeff0, round, quant, shift); |
85 | 0 | calculate_qcoeff(&qcoeff1, round, quant, shift); |
86 | |
|
87 | 0 | qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0); |
88 | 0 | qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1); |
89 | |
|
90 | 0 | qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0); |
91 | 0 | qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1); |
92 | |
|
93 | 0 | store_tran_low(qcoeff0, qcoeff_ptr + index); |
94 | 0 | store_tran_low(qcoeff1, qcoeff_ptr + index + 8); |
95 | |
|
96 | 0 | calculate_dqcoeff_and_store(qcoeff0, dequant, dqcoeff_ptr + index); |
97 | 0 | calculate_dqcoeff_and_store(qcoeff1, dequant, dqcoeff_ptr + index + 8); |
98 | |
|
99 | 0 | eob0 = scan_for_eob(&qcoeff0, &qcoeff1, iscan, index, zero); |
100 | 0 | eob = _mm_max_epi16(eob, eob0); |
101 | |
|
102 | 0 | index += 16; |
103 | 0 | } |
104 | |
|
105 | 0 | *eob_ptr = accumulate_eob(eob); |
106 | 0 | } |
107 | | |
108 | | void vpx_quantize_b_32x32_ssse3(const tran_low_t *coeff_ptr, |
109 | | const struct macroblock_plane *const mb_plane, |
110 | | tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, |
111 | | const int16_t *dequant_ptr, uint16_t *eob_ptr, |
112 | 0 | const struct ScanOrder *const scan_order) { |
113 | 0 | const __m128i zero = _mm_setzero_si128(); |
114 | 0 | int index; |
115 | 0 | const int16_t *iscan = scan_order->iscan; |
116 | |
|
117 | 0 | __m128i zbin, round, quant, dequant, shift; |
118 | 0 | __m128i coeff0, coeff1; |
119 | 0 | __m128i qcoeff0, qcoeff1; |
120 | 0 | __m128i cmp_mask0, cmp_mask1; |
121 | 0 | __m128i all_zero; |
122 | 0 | __m128i eob = zero, eob0; |
123 | |
|
124 | 0 | load_b_values32x32(mb_plane, &zbin, &round, &quant, dequant_ptr, &dequant, |
125 | 0 | &shift); |
126 | | |
127 | | // Do DC and first 15 AC. |
128 | 0 | coeff0 = load_tran_low(coeff_ptr); |
129 | 0 | coeff1 = load_tran_low(coeff_ptr + 8); |
130 | |
|
131 | 0 | qcoeff0 = _mm_abs_epi16(coeff0); |
132 | 0 | qcoeff1 = _mm_abs_epi16(coeff1); |
133 | |
|
134 | 0 | cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); |
135 | 0 | zbin = _mm_unpackhi_epi64(zbin, zbin); // Switch DC to AC. |
136 | 0 | cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); |
137 | |
|
138 | 0 | all_zero = _mm_or_si128(cmp_mask0, cmp_mask1); |
139 | 0 | if (_mm_movemask_epi8(all_zero) == 0) { |
140 | 0 | _mm_store_si128((__m128i *)(qcoeff_ptr), zero); |
141 | 0 | _mm_store_si128((__m128i *)(qcoeff_ptr + 8), zero); |
142 | 0 | _mm_store_si128((__m128i *)(dqcoeff_ptr), zero); |
143 | 0 | _mm_store_si128((__m128i *)(dqcoeff_ptr + 8), zero); |
144 | 0 | #if CONFIG_VP9_HIGHBITDEPTH |
145 | 0 | _mm_store_si128((__m128i *)(qcoeff_ptr + 4), zero); |
146 | 0 | _mm_store_si128((__m128i *)(qcoeff_ptr + 12), zero); |
147 | 0 | _mm_store_si128((__m128i *)(dqcoeff_ptr + 4), zero); |
148 | 0 | _mm_store_si128((__m128i *)(dqcoeff_ptr + 12), zero); |
149 | 0 | #endif // CONFIG_HIGHBITDEPTH |
150 | |
|
151 | 0 | round = _mm_unpackhi_epi64(round, round); |
152 | 0 | quant = _mm_unpackhi_epi64(quant, quant); |
153 | 0 | shift = _mm_unpackhi_epi64(shift, shift); |
154 | 0 | dequant = _mm_unpackhi_epi64(dequant, dequant); |
155 | 0 | } else { |
156 | 0 | calculate_qcoeff(&qcoeff0, round, quant, shift); |
157 | 0 | round = _mm_unpackhi_epi64(round, round); |
158 | 0 | quant = _mm_unpackhi_epi64(quant, quant); |
159 | 0 | shift = _mm_unpackhi_epi64(shift, shift); |
160 | 0 | calculate_qcoeff(&qcoeff1, round, quant, shift); |
161 | | |
162 | | // Reinsert signs. |
163 | 0 | qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0); |
164 | 0 | qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1); |
165 | | |
166 | | // Mask out zbin threshold coeffs. |
167 | 0 | qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0); |
168 | 0 | qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1); |
169 | |
|
170 | 0 | store_tran_low(qcoeff0, qcoeff_ptr); |
171 | 0 | store_tran_low(qcoeff1, qcoeff_ptr + 8); |
172 | |
|
173 | 0 | calculate_dqcoeff_and_store_32x32(qcoeff0, dequant, zero, dqcoeff_ptr); |
174 | 0 | dequant = _mm_unpackhi_epi64(dequant, dequant); |
175 | 0 | calculate_dqcoeff_and_store_32x32(qcoeff1, dequant, zero, dqcoeff_ptr + 8); |
176 | |
|
177 | 0 | eob = scan_for_eob(&qcoeff0, &qcoeff1, iscan, 0, zero); |
178 | 0 | } |
179 | | |
180 | | // AC only loop. |
181 | 0 | for (index = 16; index < 32 * 32; index += 16) { |
182 | 0 | coeff0 = load_tran_low(coeff_ptr + index); |
183 | 0 | coeff1 = load_tran_low(coeff_ptr + index + 8); |
184 | |
|
185 | 0 | qcoeff0 = _mm_abs_epi16(coeff0); |
186 | 0 | qcoeff1 = _mm_abs_epi16(coeff1); |
187 | |
|
188 | 0 | cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin); |
189 | 0 | cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin); |
190 | |
|
191 | 0 | all_zero = _mm_or_si128(cmp_mask0, cmp_mask1); |
192 | 0 | if (_mm_movemask_epi8(all_zero) == 0) { |
193 | 0 | _mm_store_si128((__m128i *)(qcoeff_ptr + index), zero); |
194 | 0 | _mm_store_si128((__m128i *)(qcoeff_ptr + index + 8), zero); |
195 | 0 | _mm_store_si128((__m128i *)(dqcoeff_ptr + index), zero); |
196 | 0 | _mm_store_si128((__m128i *)(dqcoeff_ptr + index + 8), zero); |
197 | 0 | #if CONFIG_VP9_HIGHBITDEPTH |
198 | 0 | _mm_store_si128((__m128i *)(qcoeff_ptr + index + 4), zero); |
199 | 0 | _mm_store_si128((__m128i *)(qcoeff_ptr + index + 12), zero); |
200 | 0 | _mm_store_si128((__m128i *)(dqcoeff_ptr + index + 4), zero); |
201 | 0 | _mm_store_si128((__m128i *)(dqcoeff_ptr + index + 12), zero); |
202 | 0 | #endif // CONFIG_VP9_HIGHBITDEPTH |
203 | 0 | continue; |
204 | 0 | } |
205 | | |
206 | 0 | calculate_qcoeff(&qcoeff0, round, quant, shift); |
207 | 0 | calculate_qcoeff(&qcoeff1, round, quant, shift); |
208 | |
|
209 | 0 | qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0); |
210 | 0 | qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1); |
211 | |
|
212 | 0 | qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0); |
213 | 0 | qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1); |
214 | |
|
215 | 0 | store_tran_low(qcoeff0, qcoeff_ptr + index); |
216 | 0 | store_tran_low(qcoeff1, qcoeff_ptr + index + 8); |
217 | |
|
218 | 0 | calculate_dqcoeff_and_store_32x32(qcoeff0, dequant, zero, |
219 | 0 | dqcoeff_ptr + index); |
220 | 0 | calculate_dqcoeff_and_store_32x32(qcoeff1, dequant, zero, |
221 | 0 | dqcoeff_ptr + 8 + index); |
222 | |
|
223 | 0 | eob0 = scan_for_eob(&qcoeff0, &qcoeff1, iscan, index, zero); |
224 | 0 | eob = _mm_max_epi16(eob, eob0); |
225 | 0 | } |
226 | |
|
227 | 0 | *eob_ptr = accumulate_eob(eob); |
228 | 0 | } |