/src/libvpx/vpx_dsp/x86/inv_txfm_sse2.c
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | /*  | 
2  |  |  *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.  | 
3  |  |  *  | 
4  |  |  *  Use of this source code is governed by a BSD-style license  | 
5  |  |  *  that can be found in the LICENSE file in the root of the source  | 
6  |  |  *  tree. An additional intellectual property rights grant can be found  | 
7  |  |  *  in the file PATENTS.  All contributing project authors may  | 
8  |  |  *  be found in the AUTHORS file in the root of the source tree.  | 
9  |  |  */  | 
10  |  |  | 
11  |  | #include <emmintrin.h>  // SSE2  | 
12  |  |  | 
13  |  | #include "./vpx_dsp_rtcd.h"  | 
14  |  | #include "vpx_dsp/x86/inv_txfm_sse2.h"  | 
15  |  | #include "vpx_dsp/x86/transpose_sse2.h"  | 
16  |  | #include "vpx_dsp/x86/txfm_common_sse2.h"  | 
17  |  |  | 
18  | 325M  | static INLINE void transpose_16bit_4(__m128i *res) { | 
19  | 325M  |   const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);  | 
20  | 325M  |   const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]);  | 
21  |  |  | 
22  | 325M  |   res[0] = _mm_unpacklo_epi16(tr0_0, tr0_1);  | 
23  | 325M  |   res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);  | 
24  | 325M  | }  | 
25  |  |  | 
26  |  | void vpx_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
27  | 137M  |                              int stride) { | 
28  | 137M  |   const __m128i eight = _mm_set1_epi16(8);  | 
29  | 137M  |   __m128i in[2];  | 
30  |  |  | 
31  |  |   // Rows  | 
32  | 137M  |   in[0] = load_input_data8(input);  | 
33  | 137M  |   in[1] = load_input_data8(input + 8);  | 
34  | 137M  |   idct4_sse2(in);  | 
35  |  |  | 
36  |  |   // Columns  | 
37  | 137M  |   idct4_sse2(in);  | 
38  |  |  | 
39  |  |   // Final round and shift  | 
40  | 137M  |   in[0] = _mm_add_epi16(in[0], eight);  | 
41  | 137M  |   in[1] = _mm_add_epi16(in[1], eight);  | 
42  | 137M  |   in[0] = _mm_srai_epi16(in[0], 4);  | 
43  | 137M  |   in[1] = _mm_srai_epi16(in[1], 4);  | 
44  |  |  | 
45  | 137M  |   recon_and_store4x4_sse2(in, dest, stride);  | 
46  | 137M  | }  | 
47  |  |  | 
48  |  | void vpx_idct4x4_1_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
49  | 14.8M  |                             int stride) { | 
50  | 14.8M  |   const __m128i zero = _mm_setzero_si128();  | 
51  | 14.8M  |   int a;  | 
52  | 14.8M  |   __m128i dc_value, d[2];  | 
53  |  |  | 
54  | 14.8M  |   a = (int)dct_const_round_shift((int16_t)input[0] * cospi_16_64);  | 
55  | 14.8M  |   a = (int)dct_const_round_shift(a * cospi_16_64);  | 
56  | 14.8M  |   a = ROUND_POWER_OF_TWO(a, 4);  | 
57  |  |  | 
58  | 14.8M  |   dc_value = _mm_set1_epi16(a);  | 
59  |  |  | 
60  |  |   // Reconstruction and Store  | 
61  | 14.8M  |   d[0] = _mm_cvtsi32_si128(*(const int *)(dest));  | 
62  | 14.8M  |   d[1] = _mm_cvtsi32_si128(*(const int *)(dest + stride * 3));  | 
63  | 14.8M  |   d[0] = _mm_unpacklo_epi32(d[0],  | 
64  | 14.8M  |                             _mm_cvtsi32_si128(*(const int *)(dest + stride)));  | 
65  | 14.8M  |   d[1] = _mm_unpacklo_epi32(  | 
66  | 14.8M  |       _mm_cvtsi32_si128(*(const int *)(dest + stride * 2)), d[1]);  | 
67  | 14.8M  |   d[0] = _mm_unpacklo_epi8(d[0], zero);  | 
68  | 14.8M  |   d[1] = _mm_unpacklo_epi8(d[1], zero);  | 
69  | 14.8M  |   d[0] = _mm_add_epi16(d[0], dc_value);  | 
70  | 14.8M  |   d[1] = _mm_add_epi16(d[1], dc_value);  | 
71  | 14.8M  |   d[0] = _mm_packus_epi16(d[0], d[1]);  | 
72  |  |  | 
73  | 14.8M  |   *(int *)dest = _mm_cvtsi128_si32(d[0]);  | 
74  | 14.8M  |   d[0] = _mm_srli_si128(d[0], 4);  | 
75  | 14.8M  |   *(int *)(dest + stride) = _mm_cvtsi128_si32(d[0]);  | 
76  | 14.8M  |   d[0] = _mm_srli_si128(d[0], 4);  | 
77  | 14.8M  |   *(int *)(dest + stride * 2) = _mm_cvtsi128_si32(d[0]);  | 
78  | 14.8M  |   d[0] = _mm_srli_si128(d[0], 4);  | 
79  | 14.8M  |   *(int *)(dest + stride * 3) = _mm_cvtsi128_si32(d[0]);  | 
80  | 14.8M  | }  | 
81  |  |  | 
82  | 325M  | void idct4_sse2(__m128i *const in) { | 
83  | 325M  |   const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);  | 
84  | 325M  |   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);  | 
85  | 325M  |   const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);  | 
86  | 325M  |   const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);  | 
87  | 325M  |   __m128i u[2];  | 
88  |  |  | 
89  | 325M  |   transpose_16bit_4(in);  | 
90  |  |   // stage 1  | 
91  | 325M  |   u[0] = _mm_unpacklo_epi16(in[0], in[1]);  | 
92  | 325M  |   u[1] = _mm_unpackhi_epi16(in[0], in[1]);  | 
93  | 325M  |   u[0] = idct_calc_wraplow_sse2(k__cospi_p16_p16, k__cospi_p16_m16, u[0]);  | 
94  | 325M  |   u[1] = idct_calc_wraplow_sse2(k__cospi_p08_p24, k__cospi_p24_m08, u[1]);  | 
95  |  |  | 
96  |  |   // stage 2  | 
97  | 325M  |   in[0] = _mm_add_epi16(u[0], u[1]);  | 
98  | 325M  |   in[1] = _mm_sub_epi16(u[0], u[1]);  | 
99  | 325M  |   in[1] = _mm_shuffle_epi32(in[1], 0x4E);  | 
100  | 325M  | }  | 
101  |  |  | 
102  | 83.2M  | void iadst4_sse2(__m128i *const in) { | 
103  | 83.2M  |   const __m128i k__sinpi_1_3 = pair_set_epi16(sinpi_1_9, sinpi_3_9);  | 
104  | 83.2M  |   const __m128i k__sinpi_4_2 = pair_set_epi16(sinpi_4_9, sinpi_2_9);  | 
105  | 83.2M  |   const __m128i k__sinpi_2_3 = pair_set_epi16(sinpi_2_9, sinpi_3_9);  | 
106  | 83.2M  |   const __m128i k__sinpi_1_4 = pair_set_epi16(sinpi_1_9, sinpi_4_9);  | 
107  | 83.2M  |   const __m128i k__sinpi_12_n3 =  | 
108  | 83.2M  |       pair_set_epi16(sinpi_1_9 + sinpi_2_9, -sinpi_3_9);  | 
109  | 83.2M  |   __m128i u[4], v[5];  | 
110  |  |  | 
111  |  |   // 00 01 20 21  02 03 22 23  | 
112  |  |   // 10 11 30 31  12 13 32 33  | 
113  | 83.2M  |   const __m128i tr0_0 = _mm_unpacklo_epi32(in[0], in[1]);  | 
114  | 83.2M  |   const __m128i tr0_1 = _mm_unpackhi_epi32(in[0], in[1]);  | 
115  |  |  | 
116  |  |   // 00 01 10 11  20 21 30 31  | 
117  |  |   // 02 03 12 13  22 23 32 33  | 
118  | 83.2M  |   in[0] = _mm_unpacklo_epi32(tr0_0, tr0_1);  | 
119  | 83.2M  |   in[1] = _mm_unpackhi_epi32(tr0_0, tr0_1);  | 
120  |  |  | 
121  | 83.2M  |   v[0] = _mm_madd_epi16(in[0], k__sinpi_1_3);    // s_1 * x0 + s_3 * x1  | 
122  | 83.2M  |   v[1] = _mm_madd_epi16(in[1], k__sinpi_4_2);    // s_4 * x2 + s_2 * x3  | 
123  | 83.2M  |   v[2] = _mm_madd_epi16(in[0], k__sinpi_2_3);    // s_2 * x0 + s_3 * x1  | 
124  | 83.2M  |   v[3] = _mm_madd_epi16(in[1], k__sinpi_1_4);    // s_1 * x2 + s_4 * x3  | 
125  | 83.2M  |   v[4] = _mm_madd_epi16(in[0], k__sinpi_12_n3);  // (s_1 + s_2) * x0 - s_3 * x1  | 
126  | 83.2M  |   in[0] = _mm_sub_epi16(in[0], in[1]);           // x0 - x2  | 
127  | 83.2M  |   in[1] = _mm_srli_epi32(in[1], 16);  | 
128  | 83.2M  |   in[0] = _mm_add_epi16(in[0], in[1]);  | 
129  | 83.2M  |   in[0] = _mm_slli_epi32(in[0], 16);  // x0 - x2 + x3  | 
130  |  |  | 
131  | 83.2M  |   u[0] = _mm_add_epi32(v[0], v[1]);  | 
132  | 83.2M  |   u[1] = _mm_sub_epi32(v[2], v[3]);  | 
133  | 83.2M  |   u[2] = _mm_madd_epi16(in[0], k__sinpi_1_3);  | 
134  | 83.2M  |   u[3] = _mm_sub_epi32(v[1], v[3]);  | 
135  | 83.2M  |   u[3] = _mm_add_epi32(u[3], v[4]);  | 
136  |  |  | 
137  | 83.2M  |   u[0] = dct_const_round_shift_sse2(u[0]);  | 
138  | 83.2M  |   u[1] = dct_const_round_shift_sse2(u[1]);  | 
139  | 83.2M  |   u[2] = dct_const_round_shift_sse2(u[2]);  | 
140  | 83.2M  |   u[3] = dct_const_round_shift_sse2(u[3]);  | 
141  |  |  | 
142  | 83.2M  |   in[0] = _mm_packs_epi32(u[0], u[1]);  | 
143  | 83.2M  |   in[1] = _mm_packs_epi32(u[2], u[3]);  | 
144  | 83.2M  | }  | 
145  |  |  | 
146  |  | static INLINE void load_buffer_8x8(const tran_low_t *const input,  | 
147  | 23.0M  |                                    __m128i *const in) { | 
148  | 23.0M  |   in[0] = load_input_data8(input + 0 * 8);  | 
149  | 23.0M  |   in[1] = load_input_data8(input + 1 * 8);  | 
150  | 23.0M  |   in[2] = load_input_data8(input + 2 * 8);  | 
151  | 23.0M  |   in[3] = load_input_data8(input + 3 * 8);  | 
152  | 23.0M  |   in[4] = load_input_data8(input + 4 * 8);  | 
153  | 23.0M  |   in[5] = load_input_data8(input + 5 * 8);  | 
154  | 23.0M  |   in[6] = load_input_data8(input + 6 * 8);  | 
155  | 23.0M  |   in[7] = load_input_data8(input + 7 * 8);  | 
156  | 23.0M  | }  | 
157  |  |  | 
158  |  | void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
159  | 23.0M  |                              int stride) { | 
160  | 23.0M  |   __m128i in[8];  | 
161  | 23.0M  |   int i;  | 
162  |  |  | 
163  |  |   // Load input data.  | 
164  | 23.0M  |   load_buffer_8x8(input, in);  | 
165  |  |  | 
166  |  |   // 2-D  | 
167  | 69.2M  |   for (i = 0; i < 2; i++) { | 
168  | 46.1M  |     vpx_idct8_sse2(in);  | 
169  | 46.1M  |   }  | 
170  |  |  | 
171  | 23.0M  |   write_buffer_8x8(in, dest, stride);  | 
172  | 23.0M  | }  | 
173  |  |  | 
174  |  | void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
175  | 0  |                              int stride) { | 
176  | 0  |   __m128i io[8];  | 
177  |  | 
  | 
178  | 0  |   io[0] = load_input_data4(input + 0 * 8);  | 
179  | 0  |   io[1] = load_input_data4(input + 1 * 8);  | 
180  | 0  |   io[2] = load_input_data4(input + 2 * 8);  | 
181  | 0  |   io[3] = load_input_data4(input + 3 * 8);  | 
182  |  | 
  | 
183  | 0  |   idct8x8_12_add_kernel_sse2(io);  | 
184  | 0  |   write_buffer_8x8(io, dest, stride);  | 
185  | 0  | }  | 
186  |  |  | 
187  |  | static INLINE void recon_and_store_8_dual(uint8_t *const dest,  | 
188  |  |                                           const __m128i in_x,  | 
189  | 5.54M  |                                           const int stride) { | 
190  | 5.54M  |   const __m128i zero = _mm_setzero_si128();  | 
191  | 5.54M  |   __m128i d0, d1;  | 
192  |  |  | 
193  | 5.54M  |   d0 = _mm_loadl_epi64((__m128i *)(dest + 0 * stride));  | 
194  | 5.54M  |   d1 = _mm_loadl_epi64((__m128i *)(dest + 1 * stride));  | 
195  | 5.54M  |   d0 = _mm_unpacklo_epi8(d0, zero);  | 
196  | 5.54M  |   d1 = _mm_unpacklo_epi8(d1, zero);  | 
197  | 5.54M  |   d0 = _mm_add_epi16(in_x, d0);  | 
198  | 5.54M  |   d1 = _mm_add_epi16(in_x, d1);  | 
199  | 5.54M  |   d0 = _mm_packus_epi16(d0, d1);  | 
200  | 5.54M  |   _mm_storel_epi64((__m128i *)(dest + 0 * stride), d0);  | 
201  | 5.54M  |   _mm_storeh_pi((__m64 *)(dest + 1 * stride), _mm_castsi128_ps(d0));  | 
202  | 5.54M  | }  | 
203  |  |  | 
204  |  | void vpx_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
205  | 1.38M  |                             int stride) { | 
206  | 1.38M  |   __m128i dc_value;  | 
207  | 1.38M  |   tran_high_t a1;  | 
208  | 1.38M  |   tran_low_t out =  | 
209  | 1.38M  |       WRAPLOW(dct_const_round_shift((int16_t)input[0] * cospi_16_64));  | 
210  |  |  | 
211  | 1.38M  |   out = WRAPLOW(dct_const_round_shift(out * cospi_16_64));  | 
212  | 1.38M  |   a1 = ROUND_POWER_OF_TWO(out, 5);  | 
213  | 1.38M  |   dc_value = _mm_set1_epi16((int16_t)a1);  | 
214  |  |  | 
215  | 1.38M  |   recon_and_store_8_dual(dest, dc_value, stride);  | 
216  | 1.38M  |   dest += 2 * stride;  | 
217  | 1.38M  |   recon_and_store_8_dual(dest, dc_value, stride);  | 
218  | 1.38M  |   dest += 2 * stride;  | 
219  | 1.38M  |   recon_and_store_8_dual(dest, dc_value, stride);  | 
220  | 1.38M  |   dest += 2 * stride;  | 
221  | 1.38M  |   recon_and_store_8_dual(dest, dc_value, stride);  | 
222  | 1.38M  | }  | 
223  |  |  | 
224  | 66.2M  | void vpx_idct8_sse2(__m128i *const in) { | 
225  |  |   // 8x8 Transpose is copied from vpx_fdct8x8_sse2()  | 
226  | 66.2M  |   transpose_16bit_8x8(in, in);  | 
227  |  |  | 
228  |  |   // 4-stage 1D idct8x8  | 
229  | 66.2M  |   idct8(in, in);  | 
230  | 66.2M  | }  | 
231  |  |  | 
232  | 32.9M  | void iadst8_sse2(__m128i *const in) { | 
233  | 32.9M  |   const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);  | 
234  | 32.9M  |   const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);  | 
235  | 32.9M  |   const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);  | 
236  | 32.9M  |   const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64);  | 
237  | 32.9M  |   const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64);  | 
238  | 32.9M  |   const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);  | 
239  | 32.9M  |   const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64);  | 
240  | 32.9M  |   const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64);  | 
241  | 32.9M  |   const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);  | 
242  | 32.9M  |   const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);  | 
243  | 32.9M  |   const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);  | 
244  | 32.9M  |   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);  | 
245  | 32.9M  |   const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);  | 
246  | 32.9M  |   const __m128i kZero = _mm_setzero_si128();  | 
247  | 32.9M  |   __m128i s[8], u[16], v[8], w[16];  | 
248  |  |  | 
249  |  |   // transpose  | 
250  | 32.9M  |   transpose_16bit_8x8(in, in);  | 
251  |  |  | 
252  |  |   // column transformation  | 
253  |  |   // stage 1  | 
254  |  |   // interleave and multiply/add into 32-bit integer  | 
255  | 32.9M  |   s[0] = _mm_unpacklo_epi16(in[7], in[0]);  | 
256  | 32.9M  |   s[1] = _mm_unpackhi_epi16(in[7], in[0]);  | 
257  | 32.9M  |   s[2] = _mm_unpacklo_epi16(in[5], in[2]);  | 
258  | 32.9M  |   s[3] = _mm_unpackhi_epi16(in[5], in[2]);  | 
259  | 32.9M  |   s[4] = _mm_unpacklo_epi16(in[3], in[4]);  | 
260  | 32.9M  |   s[5] = _mm_unpackhi_epi16(in[3], in[4]);  | 
261  | 32.9M  |   s[6] = _mm_unpacklo_epi16(in[1], in[6]);  | 
262  | 32.9M  |   s[7] = _mm_unpackhi_epi16(in[1], in[6]);  | 
263  |  |  | 
264  | 32.9M  |   u[0] = _mm_madd_epi16(s[0], k__cospi_p02_p30);  | 
265  | 32.9M  |   u[1] = _mm_madd_epi16(s[1], k__cospi_p02_p30);  | 
266  | 32.9M  |   u[2] = _mm_madd_epi16(s[0], k__cospi_p30_m02);  | 
267  | 32.9M  |   u[3] = _mm_madd_epi16(s[1], k__cospi_p30_m02);  | 
268  | 32.9M  |   u[4] = _mm_madd_epi16(s[2], k__cospi_p10_p22);  | 
269  | 32.9M  |   u[5] = _mm_madd_epi16(s[3], k__cospi_p10_p22);  | 
270  | 32.9M  |   u[6] = _mm_madd_epi16(s[2], k__cospi_p22_m10);  | 
271  | 32.9M  |   u[7] = _mm_madd_epi16(s[3], k__cospi_p22_m10);  | 
272  | 32.9M  |   u[8] = _mm_madd_epi16(s[4], k__cospi_p18_p14);  | 
273  | 32.9M  |   u[9] = _mm_madd_epi16(s[5], k__cospi_p18_p14);  | 
274  | 32.9M  |   u[10] = _mm_madd_epi16(s[4], k__cospi_p14_m18);  | 
275  | 32.9M  |   u[11] = _mm_madd_epi16(s[5], k__cospi_p14_m18);  | 
276  | 32.9M  |   u[12] = _mm_madd_epi16(s[6], k__cospi_p26_p06);  | 
277  | 32.9M  |   u[13] = _mm_madd_epi16(s[7], k__cospi_p26_p06);  | 
278  | 32.9M  |   u[14] = _mm_madd_epi16(s[6], k__cospi_p06_m26);  | 
279  | 32.9M  |   u[15] = _mm_madd_epi16(s[7], k__cospi_p06_m26);  | 
280  |  |  | 
281  |  |   // addition  | 
282  | 32.9M  |   w[0] = _mm_add_epi32(u[0], u[8]);  | 
283  | 32.9M  |   w[1] = _mm_add_epi32(u[1], u[9]);  | 
284  | 32.9M  |   w[2] = _mm_add_epi32(u[2], u[10]);  | 
285  | 32.9M  |   w[3] = _mm_add_epi32(u[3], u[11]);  | 
286  | 32.9M  |   w[4] = _mm_add_epi32(u[4], u[12]);  | 
287  | 32.9M  |   w[5] = _mm_add_epi32(u[5], u[13]);  | 
288  | 32.9M  |   w[6] = _mm_add_epi32(u[6], u[14]);  | 
289  | 32.9M  |   w[7] = _mm_add_epi32(u[7], u[15]);  | 
290  | 32.9M  |   w[8] = _mm_sub_epi32(u[0], u[8]);  | 
291  | 32.9M  |   w[9] = _mm_sub_epi32(u[1], u[9]);  | 
292  | 32.9M  |   w[10] = _mm_sub_epi32(u[2], u[10]);  | 
293  | 32.9M  |   w[11] = _mm_sub_epi32(u[3], u[11]);  | 
294  | 32.9M  |   w[12] = _mm_sub_epi32(u[4], u[12]);  | 
295  | 32.9M  |   w[13] = _mm_sub_epi32(u[5], u[13]);  | 
296  | 32.9M  |   w[14] = _mm_sub_epi32(u[6], u[14]);  | 
297  | 32.9M  |   w[15] = _mm_sub_epi32(u[7], u[15]);  | 
298  |  |  | 
299  |  |   // shift and rounding  | 
300  | 32.9M  |   u[0] = dct_const_round_shift_sse2(w[0]);  | 
301  | 32.9M  |   u[1] = dct_const_round_shift_sse2(w[1]);  | 
302  | 32.9M  |   u[2] = dct_const_round_shift_sse2(w[2]);  | 
303  | 32.9M  |   u[3] = dct_const_round_shift_sse2(w[3]);  | 
304  | 32.9M  |   u[4] = dct_const_round_shift_sse2(w[4]);  | 
305  | 32.9M  |   u[5] = dct_const_round_shift_sse2(w[5]);  | 
306  | 32.9M  |   u[6] = dct_const_round_shift_sse2(w[6]);  | 
307  | 32.9M  |   u[7] = dct_const_round_shift_sse2(w[7]);  | 
308  | 32.9M  |   u[8] = dct_const_round_shift_sse2(w[8]);  | 
309  | 32.9M  |   u[9] = dct_const_round_shift_sse2(w[9]);  | 
310  | 32.9M  |   u[10] = dct_const_round_shift_sse2(w[10]);  | 
311  | 32.9M  |   u[11] = dct_const_round_shift_sse2(w[11]);  | 
312  | 32.9M  |   u[12] = dct_const_round_shift_sse2(w[12]);  | 
313  | 32.9M  |   u[13] = dct_const_round_shift_sse2(w[13]);  | 
314  | 32.9M  |   u[14] = dct_const_round_shift_sse2(w[14]);  | 
315  | 32.9M  |   u[15] = dct_const_round_shift_sse2(w[15]);  | 
316  |  |  | 
317  |  |   // back to 16-bit and pack 8 integers into __m128i  | 
318  | 32.9M  |   in[0] = _mm_packs_epi32(u[0], u[1]);  | 
319  | 32.9M  |   in[1] = _mm_packs_epi32(u[2], u[3]);  | 
320  | 32.9M  |   in[2] = _mm_packs_epi32(u[4], u[5]);  | 
321  | 32.9M  |   in[3] = _mm_packs_epi32(u[6], u[7]);  | 
322  | 32.9M  |   in[4] = _mm_packs_epi32(u[8], u[9]);  | 
323  | 32.9M  |   in[5] = _mm_packs_epi32(u[10], u[11]);  | 
324  | 32.9M  |   in[6] = _mm_packs_epi32(u[12], u[13]);  | 
325  | 32.9M  |   in[7] = _mm_packs_epi32(u[14], u[15]);  | 
326  |  |  | 
327  |  |   // stage 2  | 
328  | 32.9M  |   s[0] = _mm_add_epi16(in[0], in[2]);  | 
329  | 32.9M  |   s[1] = _mm_add_epi16(in[1], in[3]);  | 
330  | 32.9M  |   s[2] = _mm_sub_epi16(in[0], in[2]);  | 
331  | 32.9M  |   s[3] = _mm_sub_epi16(in[1], in[3]);  | 
332  | 32.9M  |   u[0] = _mm_unpacklo_epi16(in[4], in[5]);  | 
333  | 32.9M  |   u[1] = _mm_unpackhi_epi16(in[4], in[5]);  | 
334  | 32.9M  |   u[2] = _mm_unpacklo_epi16(in[6], in[7]);  | 
335  | 32.9M  |   u[3] = _mm_unpackhi_epi16(in[6], in[7]);  | 
336  |  |  | 
337  | 32.9M  |   v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24);  | 
338  | 32.9M  |   v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24);  | 
339  | 32.9M  |   v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08);  | 
340  | 32.9M  |   v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08);  | 
341  | 32.9M  |   v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08);  | 
342  | 32.9M  |   v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08);  | 
343  | 32.9M  |   v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24);  | 
344  | 32.9M  |   v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24);  | 
345  |  |  | 
346  | 32.9M  |   w[0] = _mm_add_epi32(v[0], v[4]);  | 
347  | 32.9M  |   w[1] = _mm_add_epi32(v[1], v[5]);  | 
348  | 32.9M  |   w[2] = _mm_add_epi32(v[2], v[6]);  | 
349  | 32.9M  |   w[3] = _mm_add_epi32(v[3], v[7]);  | 
350  | 32.9M  |   w[4] = _mm_sub_epi32(v[0], v[4]);  | 
351  | 32.9M  |   w[5] = _mm_sub_epi32(v[1], v[5]);  | 
352  | 32.9M  |   w[6] = _mm_sub_epi32(v[2], v[6]);  | 
353  | 32.9M  |   w[7] = _mm_sub_epi32(v[3], v[7]);  | 
354  |  |  | 
355  | 32.9M  |   u[0] = dct_const_round_shift_sse2(w[0]);  | 
356  | 32.9M  |   u[1] = dct_const_round_shift_sse2(w[1]);  | 
357  | 32.9M  |   u[2] = dct_const_round_shift_sse2(w[2]);  | 
358  | 32.9M  |   u[3] = dct_const_round_shift_sse2(w[3]);  | 
359  | 32.9M  |   u[4] = dct_const_round_shift_sse2(w[4]);  | 
360  | 32.9M  |   u[5] = dct_const_round_shift_sse2(w[5]);  | 
361  | 32.9M  |   u[6] = dct_const_round_shift_sse2(w[6]);  | 
362  | 32.9M  |   u[7] = dct_const_round_shift_sse2(w[7]);  | 
363  |  |  | 
364  |  |   // back to 16-bit intergers  | 
365  | 32.9M  |   s[4] = _mm_packs_epi32(u[0], u[1]);  | 
366  | 32.9M  |   s[5] = _mm_packs_epi32(u[2], u[3]);  | 
367  | 32.9M  |   s[6] = _mm_packs_epi32(u[4], u[5]);  | 
368  | 32.9M  |   s[7] = _mm_packs_epi32(u[6], u[7]);  | 
369  |  |  | 
370  |  |   // stage 3  | 
371  | 32.9M  |   u[0] = _mm_unpacklo_epi16(s[2], s[3]);  | 
372  | 32.9M  |   u[1] = _mm_unpackhi_epi16(s[2], s[3]);  | 
373  | 32.9M  |   u[2] = _mm_unpacklo_epi16(s[6], s[7]);  | 
374  | 32.9M  |   u[3] = _mm_unpackhi_epi16(s[6], s[7]);  | 
375  |  |  | 
376  | 32.9M  |   s[2] = idct_calc_wraplow_sse2(u[0], u[1], k__cospi_p16_p16);  | 
377  | 32.9M  |   s[3] = idct_calc_wraplow_sse2(u[0], u[1], k__cospi_p16_m16);  | 
378  | 32.9M  |   s[6] = idct_calc_wraplow_sse2(u[2], u[3], k__cospi_p16_p16);  | 
379  | 32.9M  |   s[7] = idct_calc_wraplow_sse2(u[2], u[3], k__cospi_p16_m16);  | 
380  |  |  | 
381  | 32.9M  |   in[0] = s[0];  | 
382  | 32.9M  |   in[1] = _mm_sub_epi16(kZero, s[4]);  | 
383  | 32.9M  |   in[2] = s[6];  | 
384  | 32.9M  |   in[3] = _mm_sub_epi16(kZero, s[2]);  | 
385  | 32.9M  |   in[4] = s[3];  | 
386  | 32.9M  |   in[5] = _mm_sub_epi16(kZero, s[7]);  | 
387  | 32.9M  |   in[6] = s[5];  | 
388  | 32.9M  |   in[7] = _mm_sub_epi16(kZero, s[1]);  | 
389  | 32.9M  | }  | 
390  |  |  | 
391  |  | static INLINE void idct16_load8x8(const tran_low_t *const input,  | 
392  | 29.5k  |                                   __m128i *const in) { | 
393  | 29.5k  |   in[0] = load_input_data8(input + 0 * 16);  | 
394  | 29.5k  |   in[1] = load_input_data8(input + 1 * 16);  | 
395  | 29.5k  |   in[2] = load_input_data8(input + 2 * 16);  | 
396  | 29.5k  |   in[3] = load_input_data8(input + 3 * 16);  | 
397  | 29.5k  |   in[4] = load_input_data8(input + 4 * 16);  | 
398  | 29.5k  |   in[5] = load_input_data8(input + 5 * 16);  | 
399  | 29.5k  |   in[6] = load_input_data8(input + 6 * 16);  | 
400  | 29.5k  |   in[7] = load_input_data8(input + 7 * 16);  | 
401  | 29.5k  | }  | 
402  |  |  | 
403  |  | void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
404  | 0  |                                 int stride) { | 
405  | 0  |   __m128i l[16], r[16], out[16], *in;  | 
406  | 0  |   int i;  | 
407  |  | 
  | 
408  | 0  |   in = l;  | 
409  | 0  |   for (i = 0; i < 2; i++) { | 
410  | 0  |     idct16_load8x8(input, in);  | 
411  | 0  |     transpose_16bit_8x8(in, in);  | 
412  | 0  |     idct16_load8x8(input + 8, in + 8);  | 
413  | 0  |     transpose_16bit_8x8(in + 8, in + 8);  | 
414  | 0  |     idct16_8col(in, in);  | 
415  | 0  |     in = r;  | 
416  | 0  |     input += 128;  | 
417  | 0  |   }  | 
418  |  | 
  | 
419  | 0  |   for (i = 0; i < 16; i += 8) { | 
420  | 0  |     int j;  | 
421  | 0  |     transpose_16bit_8x8(l + i, out);  | 
422  | 0  |     transpose_16bit_8x8(r + i, out + 8);  | 
423  | 0  |     idct16_8col(out, out);  | 
424  |  | 
  | 
425  | 0  |     for (j = 0; j < 16; ++j) { | 
426  | 0  |       write_buffer_8x1(dest + j * stride, out[j]);  | 
427  | 0  |     }  | 
428  |  | 
  | 
429  | 0  |     dest += 8;  | 
430  | 0  |   }  | 
431  | 0  | }  | 
432  |  |  | 
433  |  | void vpx_idct16x16_38_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
434  | 29.5k  |                                int stride) { | 
435  | 29.5k  |   __m128i in[16], temp[16], out[16];  | 
436  | 29.5k  |   int i;  | 
437  |  |  | 
438  | 29.5k  |   idct16_load8x8(input, in);  | 
439  | 29.5k  |   transpose_16bit_8x8(in, in);  | 
440  |  |  | 
441  | 265k  |   for (i = 8; i < 16; i++) { | 
442  | 236k  |     in[i] = _mm_setzero_si128();  | 
443  | 236k  |   }  | 
444  | 29.5k  |   idct16_8col(in, temp);  | 
445  |  |  | 
446  | 88.5k  |   for (i = 0; i < 16; i += 8) { | 
447  | 59.0k  |     int j;  | 
448  | 59.0k  |     transpose_16bit_8x8(temp + i, in);  | 
449  | 59.0k  |     idct16_8col(in, out);  | 
450  |  |  | 
451  | 1.00M  |     for (j = 0; j < 16; ++j) { | 
452  | 944k  |       write_buffer_8x1(dest + j * stride, out[j]);  | 
453  | 944k  |     }  | 
454  |  |  | 
455  | 59.0k  |     dest += 8;  | 
456  | 59.0k  |   }  | 
457  | 29.5k  | }  | 
458  |  |  | 
459  |  | void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
460  | 26.0k  |                                int stride) { | 
461  | 26.0k  |   __m128i in[16], l[16];  | 
462  | 26.0k  |   int i;  | 
463  |  |  | 
464  |  |   // First 1-D inverse DCT  | 
465  |  |   // Load input data.  | 
466  | 26.0k  |   in[0] = load_input_data4(input + 0 * 16);  | 
467  | 26.0k  |   in[1] = load_input_data4(input + 1 * 16);  | 
468  | 26.0k  |   in[2] = load_input_data4(input + 2 * 16);  | 
469  | 26.0k  |   in[3] = load_input_data4(input + 3 * 16);  | 
470  |  |  | 
471  | 26.0k  |   idct16x16_10_pass1(in, l);  | 
472  |  |  | 
473  |  |   // Second 1-D inverse transform, performed per 8x16 block  | 
474  | 78.0k  |   for (i = 0; i < 16; i += 8) { | 
475  | 52.0k  |     int j;  | 
476  | 52.0k  |     idct16x16_10_pass2(l + i, in);  | 
477  |  |  | 
478  | 884k  |     for (j = 0; j < 16; ++j) { | 
479  | 832k  |       write_buffer_8x1(dest + j * stride, in[j]);  | 
480  | 832k  |     }  | 
481  |  |  | 
482  | 52.0k  |     dest += 8;  | 
483  | 52.0k  |   }  | 
484  | 26.0k  | }  | 
485  |  |  | 
486  | 13.7M  | static INLINE void recon_and_store_16(uint8_t *const dest, const __m128i in_x) { | 
487  | 13.7M  |   const __m128i zero = _mm_setzero_si128();  | 
488  | 13.7M  |   __m128i d0, d1;  | 
489  |  |  | 
490  | 13.7M  |   d0 = _mm_load_si128((__m128i *)(dest));  | 
491  | 13.7M  |   d1 = _mm_unpackhi_epi8(d0, zero);  | 
492  | 13.7M  |   d0 = _mm_unpacklo_epi8(d0, zero);  | 
493  | 13.7M  |   d0 = _mm_add_epi16(in_x, d0);  | 
494  | 13.7M  |   d1 = _mm_add_epi16(in_x, d1);  | 
495  | 13.7M  |   d0 = _mm_packus_epi16(d0, d1);  | 
496  | 13.7M  |   _mm_store_si128((__m128i *)(dest), d0);  | 
497  | 13.7M  | }  | 
498  |  |  | 
499  |  | void vpx_idct16x16_1_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
500  | 275k  |                               int stride) { | 
501  | 275k  |   __m128i dc_value;  | 
502  | 275k  |   int i;  | 
503  | 275k  |   tran_high_t a1;  | 
504  | 275k  |   tran_low_t out =  | 
505  | 275k  |       WRAPLOW(dct_const_round_shift((int16_t)input[0] * cospi_16_64));  | 
506  |  |  | 
507  | 275k  |   out = WRAPLOW(dct_const_round_shift(out * cospi_16_64));  | 
508  | 275k  |   a1 = ROUND_POWER_OF_TWO(out, 6);  | 
509  | 275k  |   dc_value = _mm_set1_epi16((int16_t)a1);  | 
510  |  |  | 
511  | 4.68M  |   for (i = 0; i < 16; ++i) { | 
512  | 4.41M  |     recon_and_store_16(dest, dc_value);  | 
513  | 4.41M  |     dest += stride;  | 
514  | 4.41M  |   }  | 
515  | 275k  | }  | 
516  |  |  | 
517  | 15.6M  | void vpx_iadst16_8col_sse2(__m128i *const in) { | 
518  |  |   // perform 16x16 1-D ADST for 8 columns  | 
519  | 15.6M  |   __m128i s[16], x[16], u[32], v[32];  | 
520  | 15.6M  |   const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);  | 
521  | 15.6M  |   const __m128i k__cospi_p31_m01 = pair_set_epi16(cospi_31_64, -cospi_1_64);  | 
522  | 15.6M  |   const __m128i k__cospi_p05_p27 = pair_set_epi16(cospi_5_64, cospi_27_64);  | 
523  | 15.6M  |   const __m128i k__cospi_p27_m05 = pair_set_epi16(cospi_27_64, -cospi_5_64);  | 
524  | 15.6M  |   const __m128i k__cospi_p09_p23 = pair_set_epi16(cospi_9_64, cospi_23_64);  | 
525  | 15.6M  |   const __m128i k__cospi_p23_m09 = pair_set_epi16(cospi_23_64, -cospi_9_64);  | 
526  | 15.6M  |   const __m128i k__cospi_p13_p19 = pair_set_epi16(cospi_13_64, cospi_19_64);  | 
527  | 15.6M  |   const __m128i k__cospi_p19_m13 = pair_set_epi16(cospi_19_64, -cospi_13_64);  | 
528  | 15.6M  |   const __m128i k__cospi_p17_p15 = pair_set_epi16(cospi_17_64, cospi_15_64);  | 
529  | 15.6M  |   const __m128i k__cospi_p15_m17 = pair_set_epi16(cospi_15_64, -cospi_17_64);  | 
530  | 15.6M  |   const __m128i k__cospi_p21_p11 = pair_set_epi16(cospi_21_64, cospi_11_64);  | 
531  | 15.6M  |   const __m128i k__cospi_p11_m21 = pair_set_epi16(cospi_11_64, -cospi_21_64);  | 
532  | 15.6M  |   const __m128i k__cospi_p25_p07 = pair_set_epi16(cospi_25_64, cospi_7_64);  | 
533  | 15.6M  |   const __m128i k__cospi_p07_m25 = pair_set_epi16(cospi_7_64, -cospi_25_64);  | 
534  | 15.6M  |   const __m128i k__cospi_p29_p03 = pair_set_epi16(cospi_29_64, cospi_3_64);  | 
535  | 15.6M  |   const __m128i k__cospi_p03_m29 = pair_set_epi16(cospi_3_64, -cospi_29_64);  | 
536  | 15.6M  |   const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64);  | 
537  | 15.6M  |   const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64);  | 
538  | 15.6M  |   const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64);  | 
539  | 15.6M  |   const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64);  | 
540  | 15.6M  |   const __m128i k__cospi_m28_p04 = pair_set_epi16(-cospi_28_64, cospi_4_64);  | 
541  | 15.6M  |   const __m128i k__cospi_m12_p20 = pair_set_epi16(-cospi_12_64, cospi_20_64);  | 
542  | 15.6M  |   const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);  | 
543  | 15.6M  |   const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);  | 
544  | 15.6M  |   const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);  | 
545  | 15.6M  |   const __m128i k__cospi_m16_m16 = _mm_set1_epi16(-cospi_16_64);  | 
546  | 15.6M  |   const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);  | 
547  | 15.6M  |   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);  | 
548  | 15.6M  |   const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);  | 
549  | 15.6M  |   const __m128i kZero = _mm_setzero_si128();  | 
550  |  |  | 
551  | 15.6M  |   u[0] = _mm_unpacklo_epi16(in[15], in[0]);  | 
552  | 15.6M  |   u[1] = _mm_unpackhi_epi16(in[15], in[0]);  | 
553  | 15.6M  |   u[2] = _mm_unpacklo_epi16(in[13], in[2]);  | 
554  | 15.6M  |   u[3] = _mm_unpackhi_epi16(in[13], in[2]);  | 
555  | 15.6M  |   u[4] = _mm_unpacklo_epi16(in[11], in[4]);  | 
556  | 15.6M  |   u[5] = _mm_unpackhi_epi16(in[11], in[4]);  | 
557  | 15.6M  |   u[6] = _mm_unpacklo_epi16(in[9], in[6]);  | 
558  | 15.6M  |   u[7] = _mm_unpackhi_epi16(in[9], in[6]);  | 
559  | 15.6M  |   u[8] = _mm_unpacklo_epi16(in[7], in[8]);  | 
560  | 15.6M  |   u[9] = _mm_unpackhi_epi16(in[7], in[8]);  | 
561  | 15.6M  |   u[10] = _mm_unpacklo_epi16(in[5], in[10]);  | 
562  | 15.6M  |   u[11] = _mm_unpackhi_epi16(in[5], in[10]);  | 
563  | 15.6M  |   u[12] = _mm_unpacklo_epi16(in[3], in[12]);  | 
564  | 15.6M  |   u[13] = _mm_unpackhi_epi16(in[3], in[12]);  | 
565  | 15.6M  |   u[14] = _mm_unpacklo_epi16(in[1], in[14]);  | 
566  | 15.6M  |   u[15] = _mm_unpackhi_epi16(in[1], in[14]);  | 
567  |  |  | 
568  | 15.6M  |   v[0] = _mm_madd_epi16(u[0], k__cospi_p01_p31);  | 
569  | 15.6M  |   v[1] = _mm_madd_epi16(u[1], k__cospi_p01_p31);  | 
570  | 15.6M  |   v[2] = _mm_madd_epi16(u[0], k__cospi_p31_m01);  | 
571  | 15.6M  |   v[3] = _mm_madd_epi16(u[1], k__cospi_p31_m01);  | 
572  | 15.6M  |   v[4] = _mm_madd_epi16(u[2], k__cospi_p05_p27);  | 
573  | 15.6M  |   v[5] = _mm_madd_epi16(u[3], k__cospi_p05_p27);  | 
574  | 15.6M  |   v[6] = _mm_madd_epi16(u[2], k__cospi_p27_m05);  | 
575  | 15.6M  |   v[7] = _mm_madd_epi16(u[3], k__cospi_p27_m05);  | 
576  | 15.6M  |   v[8] = _mm_madd_epi16(u[4], k__cospi_p09_p23);  | 
577  | 15.6M  |   v[9] = _mm_madd_epi16(u[5], k__cospi_p09_p23);  | 
578  | 15.6M  |   v[10] = _mm_madd_epi16(u[4], k__cospi_p23_m09);  | 
579  | 15.6M  |   v[11] = _mm_madd_epi16(u[5], k__cospi_p23_m09);  | 
580  | 15.6M  |   v[12] = _mm_madd_epi16(u[6], k__cospi_p13_p19);  | 
581  | 15.6M  |   v[13] = _mm_madd_epi16(u[7], k__cospi_p13_p19);  | 
582  | 15.6M  |   v[14] = _mm_madd_epi16(u[6], k__cospi_p19_m13);  | 
583  | 15.6M  |   v[15] = _mm_madd_epi16(u[7], k__cospi_p19_m13);  | 
584  | 15.6M  |   v[16] = _mm_madd_epi16(u[8], k__cospi_p17_p15);  | 
585  | 15.6M  |   v[17] = _mm_madd_epi16(u[9], k__cospi_p17_p15);  | 
586  | 15.6M  |   v[18] = _mm_madd_epi16(u[8], k__cospi_p15_m17);  | 
587  | 15.6M  |   v[19] = _mm_madd_epi16(u[9], k__cospi_p15_m17);  | 
588  | 15.6M  |   v[20] = _mm_madd_epi16(u[10], k__cospi_p21_p11);  | 
589  | 15.6M  |   v[21] = _mm_madd_epi16(u[11], k__cospi_p21_p11);  | 
590  | 15.6M  |   v[22] = _mm_madd_epi16(u[10], k__cospi_p11_m21);  | 
591  | 15.6M  |   v[23] = _mm_madd_epi16(u[11], k__cospi_p11_m21);  | 
592  | 15.6M  |   v[24] = _mm_madd_epi16(u[12], k__cospi_p25_p07);  | 
593  | 15.6M  |   v[25] = _mm_madd_epi16(u[13], k__cospi_p25_p07);  | 
594  | 15.6M  |   v[26] = _mm_madd_epi16(u[12], k__cospi_p07_m25);  | 
595  | 15.6M  |   v[27] = _mm_madd_epi16(u[13], k__cospi_p07_m25);  | 
596  | 15.6M  |   v[28] = _mm_madd_epi16(u[14], k__cospi_p29_p03);  | 
597  | 15.6M  |   v[29] = _mm_madd_epi16(u[15], k__cospi_p29_p03);  | 
598  | 15.6M  |   v[30] = _mm_madd_epi16(u[14], k__cospi_p03_m29);  | 
599  | 15.6M  |   v[31] = _mm_madd_epi16(u[15], k__cospi_p03_m29);  | 
600  |  |  | 
601  | 15.6M  |   u[0] = _mm_add_epi32(v[0], v[16]);  | 
602  | 15.6M  |   u[1] = _mm_add_epi32(v[1], v[17]);  | 
603  | 15.6M  |   u[2] = _mm_add_epi32(v[2], v[18]);  | 
604  | 15.6M  |   u[3] = _mm_add_epi32(v[3], v[19]);  | 
605  | 15.6M  |   u[4] = _mm_add_epi32(v[4], v[20]);  | 
606  | 15.6M  |   u[5] = _mm_add_epi32(v[5], v[21]);  | 
607  | 15.6M  |   u[6] = _mm_add_epi32(v[6], v[22]);  | 
608  | 15.6M  |   u[7] = _mm_add_epi32(v[7], v[23]);  | 
609  | 15.6M  |   u[8] = _mm_add_epi32(v[8], v[24]);  | 
610  | 15.6M  |   u[9] = _mm_add_epi32(v[9], v[25]);  | 
611  | 15.6M  |   u[10] = _mm_add_epi32(v[10], v[26]);  | 
612  | 15.6M  |   u[11] = _mm_add_epi32(v[11], v[27]);  | 
613  | 15.6M  |   u[12] = _mm_add_epi32(v[12], v[28]);  | 
614  | 15.6M  |   u[13] = _mm_add_epi32(v[13], v[29]);  | 
615  | 15.6M  |   u[14] = _mm_add_epi32(v[14], v[30]);  | 
616  | 15.6M  |   u[15] = _mm_add_epi32(v[15], v[31]);  | 
617  | 15.6M  |   u[16] = _mm_sub_epi32(v[0], v[16]);  | 
618  | 15.6M  |   u[17] = _mm_sub_epi32(v[1], v[17]);  | 
619  | 15.6M  |   u[18] = _mm_sub_epi32(v[2], v[18]);  | 
620  | 15.6M  |   u[19] = _mm_sub_epi32(v[3], v[19]);  | 
621  | 15.6M  |   u[20] = _mm_sub_epi32(v[4], v[20]);  | 
622  | 15.6M  |   u[21] = _mm_sub_epi32(v[5], v[21]);  | 
623  | 15.6M  |   u[22] = _mm_sub_epi32(v[6], v[22]);  | 
624  | 15.6M  |   u[23] = _mm_sub_epi32(v[7], v[23]);  | 
625  | 15.6M  |   u[24] = _mm_sub_epi32(v[8], v[24]);  | 
626  | 15.6M  |   u[25] = _mm_sub_epi32(v[9], v[25]);  | 
627  | 15.6M  |   u[26] = _mm_sub_epi32(v[10], v[26]);  | 
628  | 15.6M  |   u[27] = _mm_sub_epi32(v[11], v[27]);  | 
629  | 15.6M  |   u[28] = _mm_sub_epi32(v[12], v[28]);  | 
630  | 15.6M  |   u[29] = _mm_sub_epi32(v[13], v[29]);  | 
631  | 15.6M  |   u[30] = _mm_sub_epi32(v[14], v[30]);  | 
632  | 15.6M  |   u[31] = _mm_sub_epi32(v[15], v[31]);  | 
633  |  |  | 
634  | 15.6M  |   u[0] = dct_const_round_shift_sse2(u[0]);  | 
635  | 15.6M  |   u[1] = dct_const_round_shift_sse2(u[1]);  | 
636  | 15.6M  |   u[2] = dct_const_round_shift_sse2(u[2]);  | 
637  | 15.6M  |   u[3] = dct_const_round_shift_sse2(u[3]);  | 
638  | 15.6M  |   u[4] = dct_const_round_shift_sse2(u[4]);  | 
639  | 15.6M  |   u[5] = dct_const_round_shift_sse2(u[5]);  | 
640  | 15.6M  |   u[6] = dct_const_round_shift_sse2(u[6]);  | 
641  | 15.6M  |   u[7] = dct_const_round_shift_sse2(u[7]);  | 
642  | 15.6M  |   u[8] = dct_const_round_shift_sse2(u[8]);  | 
643  | 15.6M  |   u[9] = dct_const_round_shift_sse2(u[9]);  | 
644  | 15.6M  |   u[10] = dct_const_round_shift_sse2(u[10]);  | 
645  | 15.6M  |   u[11] = dct_const_round_shift_sse2(u[11]);  | 
646  | 15.6M  |   u[12] = dct_const_round_shift_sse2(u[12]);  | 
647  | 15.6M  |   u[13] = dct_const_round_shift_sse2(u[13]);  | 
648  | 15.6M  |   u[14] = dct_const_round_shift_sse2(u[14]);  | 
649  | 15.6M  |   u[15] = dct_const_round_shift_sse2(u[15]);  | 
650  | 15.6M  |   u[16] = dct_const_round_shift_sse2(u[16]);  | 
651  | 15.6M  |   u[17] = dct_const_round_shift_sse2(u[17]);  | 
652  | 15.6M  |   u[18] = dct_const_round_shift_sse2(u[18]);  | 
653  | 15.6M  |   u[19] = dct_const_round_shift_sse2(u[19]);  | 
654  | 15.6M  |   u[20] = dct_const_round_shift_sse2(u[20]);  | 
655  | 15.6M  |   u[21] = dct_const_round_shift_sse2(u[21]);  | 
656  | 15.6M  |   u[22] = dct_const_round_shift_sse2(u[22]);  | 
657  | 15.6M  |   u[23] = dct_const_round_shift_sse2(u[23]);  | 
658  | 15.6M  |   u[24] = dct_const_round_shift_sse2(u[24]);  | 
659  | 15.6M  |   u[25] = dct_const_round_shift_sse2(u[25]);  | 
660  | 15.6M  |   u[26] = dct_const_round_shift_sse2(u[26]);  | 
661  | 15.6M  |   u[27] = dct_const_round_shift_sse2(u[27]);  | 
662  | 15.6M  |   u[28] = dct_const_round_shift_sse2(u[28]);  | 
663  | 15.6M  |   u[29] = dct_const_round_shift_sse2(u[29]);  | 
664  | 15.6M  |   u[30] = dct_const_round_shift_sse2(u[30]);  | 
665  | 15.6M  |   u[31] = dct_const_round_shift_sse2(u[31]);  | 
666  |  |  | 
667  | 15.6M  |   s[0] = _mm_packs_epi32(u[0], u[1]);  | 
668  | 15.6M  |   s[1] = _mm_packs_epi32(u[2], u[3]);  | 
669  | 15.6M  |   s[2] = _mm_packs_epi32(u[4], u[5]);  | 
670  | 15.6M  |   s[3] = _mm_packs_epi32(u[6], u[7]);  | 
671  | 15.6M  |   s[4] = _mm_packs_epi32(u[8], u[9]);  | 
672  | 15.6M  |   s[5] = _mm_packs_epi32(u[10], u[11]);  | 
673  | 15.6M  |   s[6] = _mm_packs_epi32(u[12], u[13]);  | 
674  | 15.6M  |   s[7] = _mm_packs_epi32(u[14], u[15]);  | 
675  | 15.6M  |   s[8] = _mm_packs_epi32(u[16], u[17]);  | 
676  | 15.6M  |   s[9] = _mm_packs_epi32(u[18], u[19]);  | 
677  | 15.6M  |   s[10] = _mm_packs_epi32(u[20], u[21]);  | 
678  | 15.6M  |   s[11] = _mm_packs_epi32(u[22], u[23]);  | 
679  | 15.6M  |   s[12] = _mm_packs_epi32(u[24], u[25]);  | 
680  | 15.6M  |   s[13] = _mm_packs_epi32(u[26], u[27]);  | 
681  | 15.6M  |   s[14] = _mm_packs_epi32(u[28], u[29]);  | 
682  | 15.6M  |   s[15] = _mm_packs_epi32(u[30], u[31]);  | 
683  |  |  | 
684  |  |   // stage 2  | 
685  | 15.6M  |   u[0] = _mm_unpacklo_epi16(s[8], s[9]);  | 
686  | 15.6M  |   u[1] = _mm_unpackhi_epi16(s[8], s[9]);  | 
687  | 15.6M  |   u[2] = _mm_unpacklo_epi16(s[10], s[11]);  | 
688  | 15.6M  |   u[3] = _mm_unpackhi_epi16(s[10], s[11]);  | 
689  | 15.6M  |   u[4] = _mm_unpacklo_epi16(s[12], s[13]);  | 
690  | 15.6M  |   u[5] = _mm_unpackhi_epi16(s[12], s[13]);  | 
691  | 15.6M  |   u[6] = _mm_unpacklo_epi16(s[14], s[15]);  | 
692  | 15.6M  |   u[7] = _mm_unpackhi_epi16(s[14], s[15]);  | 
693  |  |  | 
694  | 15.6M  |   v[0] = _mm_madd_epi16(u[0], k__cospi_p04_p28);  | 
695  | 15.6M  |   v[1] = _mm_madd_epi16(u[1], k__cospi_p04_p28);  | 
696  | 15.6M  |   v[2] = _mm_madd_epi16(u[0], k__cospi_p28_m04);  | 
697  | 15.6M  |   v[3] = _mm_madd_epi16(u[1], k__cospi_p28_m04);  | 
698  | 15.6M  |   v[4] = _mm_madd_epi16(u[2], k__cospi_p20_p12);  | 
699  | 15.6M  |   v[5] = _mm_madd_epi16(u[3], k__cospi_p20_p12);  | 
700  | 15.6M  |   v[6] = _mm_madd_epi16(u[2], k__cospi_p12_m20);  | 
701  | 15.6M  |   v[7] = _mm_madd_epi16(u[3], k__cospi_p12_m20);  | 
702  | 15.6M  |   v[8] = _mm_madd_epi16(u[4], k__cospi_m28_p04);  | 
703  | 15.6M  |   v[9] = _mm_madd_epi16(u[5], k__cospi_m28_p04);  | 
704  | 15.6M  |   v[10] = _mm_madd_epi16(u[4], k__cospi_p04_p28);  | 
705  | 15.6M  |   v[11] = _mm_madd_epi16(u[5], k__cospi_p04_p28);  | 
706  | 15.6M  |   v[12] = _mm_madd_epi16(u[6], k__cospi_m12_p20);  | 
707  | 15.6M  |   v[13] = _mm_madd_epi16(u[7], k__cospi_m12_p20);  | 
708  | 15.6M  |   v[14] = _mm_madd_epi16(u[6], k__cospi_p20_p12);  | 
709  | 15.6M  |   v[15] = _mm_madd_epi16(u[7], k__cospi_p20_p12);  | 
710  |  |  | 
711  | 15.6M  |   u[0] = _mm_add_epi32(v[0], v[8]);  | 
712  | 15.6M  |   u[1] = _mm_add_epi32(v[1], v[9]);  | 
713  | 15.6M  |   u[2] = _mm_add_epi32(v[2], v[10]);  | 
714  | 15.6M  |   u[3] = _mm_add_epi32(v[3], v[11]);  | 
715  | 15.6M  |   u[4] = _mm_add_epi32(v[4], v[12]);  | 
716  | 15.6M  |   u[5] = _mm_add_epi32(v[5], v[13]);  | 
717  | 15.6M  |   u[6] = _mm_add_epi32(v[6], v[14]);  | 
718  | 15.6M  |   u[7] = _mm_add_epi32(v[7], v[15]);  | 
719  | 15.6M  |   u[8] = _mm_sub_epi32(v[0], v[8]);  | 
720  | 15.6M  |   u[9] = _mm_sub_epi32(v[1], v[9]);  | 
721  | 15.6M  |   u[10] = _mm_sub_epi32(v[2], v[10]);  | 
722  | 15.6M  |   u[11] = _mm_sub_epi32(v[3], v[11]);  | 
723  | 15.6M  |   u[12] = _mm_sub_epi32(v[4], v[12]);  | 
724  | 15.6M  |   u[13] = _mm_sub_epi32(v[5], v[13]);  | 
725  | 15.6M  |   u[14] = _mm_sub_epi32(v[6], v[14]);  | 
726  | 15.6M  |   u[15] = _mm_sub_epi32(v[7], v[15]);  | 
727  |  |  | 
728  | 15.6M  |   u[0] = dct_const_round_shift_sse2(u[0]);  | 
729  | 15.6M  |   u[1] = dct_const_round_shift_sse2(u[1]);  | 
730  | 15.6M  |   u[2] = dct_const_round_shift_sse2(u[2]);  | 
731  | 15.6M  |   u[3] = dct_const_round_shift_sse2(u[3]);  | 
732  | 15.6M  |   u[4] = dct_const_round_shift_sse2(u[4]);  | 
733  | 15.6M  |   u[5] = dct_const_round_shift_sse2(u[5]);  | 
734  | 15.6M  |   u[6] = dct_const_round_shift_sse2(u[6]);  | 
735  | 15.6M  |   u[7] = dct_const_round_shift_sse2(u[7]);  | 
736  | 15.6M  |   u[8] = dct_const_round_shift_sse2(u[8]);  | 
737  | 15.6M  |   u[9] = dct_const_round_shift_sse2(u[9]);  | 
738  | 15.6M  |   u[10] = dct_const_round_shift_sse2(u[10]);  | 
739  | 15.6M  |   u[11] = dct_const_round_shift_sse2(u[11]);  | 
740  | 15.6M  |   u[12] = dct_const_round_shift_sse2(u[12]);  | 
741  | 15.6M  |   u[13] = dct_const_round_shift_sse2(u[13]);  | 
742  | 15.6M  |   u[14] = dct_const_round_shift_sse2(u[14]);  | 
743  | 15.6M  |   u[15] = dct_const_round_shift_sse2(u[15]);  | 
744  |  |  | 
745  | 15.6M  |   x[0] = _mm_add_epi16(s[0], s[4]);  | 
746  | 15.6M  |   x[1] = _mm_add_epi16(s[1], s[5]);  | 
747  | 15.6M  |   x[2] = _mm_add_epi16(s[2], s[6]);  | 
748  | 15.6M  |   x[3] = _mm_add_epi16(s[3], s[7]);  | 
749  | 15.6M  |   x[4] = _mm_sub_epi16(s[0], s[4]);  | 
750  | 15.6M  |   x[5] = _mm_sub_epi16(s[1], s[5]);  | 
751  | 15.6M  |   x[6] = _mm_sub_epi16(s[2], s[6]);  | 
752  | 15.6M  |   x[7] = _mm_sub_epi16(s[3], s[7]);  | 
753  | 15.6M  |   x[8] = _mm_packs_epi32(u[0], u[1]);  | 
754  | 15.6M  |   x[9] = _mm_packs_epi32(u[2], u[3]);  | 
755  | 15.6M  |   x[10] = _mm_packs_epi32(u[4], u[5]);  | 
756  | 15.6M  |   x[11] = _mm_packs_epi32(u[6], u[7]);  | 
757  | 15.6M  |   x[12] = _mm_packs_epi32(u[8], u[9]);  | 
758  | 15.6M  |   x[13] = _mm_packs_epi32(u[10], u[11]);  | 
759  | 15.6M  |   x[14] = _mm_packs_epi32(u[12], u[13]);  | 
760  | 15.6M  |   x[15] = _mm_packs_epi32(u[14], u[15]);  | 
761  |  |  | 
762  |  |   // stage 3  | 
763  | 15.6M  |   u[0] = _mm_unpacklo_epi16(x[4], x[5]);  | 
764  | 15.6M  |   u[1] = _mm_unpackhi_epi16(x[4], x[5]);  | 
765  | 15.6M  |   u[2] = _mm_unpacklo_epi16(x[6], x[7]);  | 
766  | 15.6M  |   u[3] = _mm_unpackhi_epi16(x[6], x[7]);  | 
767  | 15.6M  |   u[4] = _mm_unpacklo_epi16(x[12], x[13]);  | 
768  | 15.6M  |   u[5] = _mm_unpackhi_epi16(x[12], x[13]);  | 
769  | 15.6M  |   u[6] = _mm_unpacklo_epi16(x[14], x[15]);  | 
770  | 15.6M  |   u[7] = _mm_unpackhi_epi16(x[14], x[15]);  | 
771  |  |  | 
772  | 15.6M  |   v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24);  | 
773  | 15.6M  |   v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24);  | 
774  | 15.6M  |   v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08);  | 
775  | 15.6M  |   v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08);  | 
776  | 15.6M  |   v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08);  | 
777  | 15.6M  |   v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08);  | 
778  | 15.6M  |   v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24);  | 
779  | 15.6M  |   v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24);  | 
780  | 15.6M  |   v[8] = _mm_madd_epi16(u[4], k__cospi_p08_p24);  | 
781  | 15.6M  |   v[9] = _mm_madd_epi16(u[5], k__cospi_p08_p24);  | 
782  | 15.6M  |   v[10] = _mm_madd_epi16(u[4], k__cospi_p24_m08);  | 
783  | 15.6M  |   v[11] = _mm_madd_epi16(u[5], k__cospi_p24_m08);  | 
784  | 15.6M  |   v[12] = _mm_madd_epi16(u[6], k__cospi_m24_p08);  | 
785  | 15.6M  |   v[13] = _mm_madd_epi16(u[7], k__cospi_m24_p08);  | 
786  | 15.6M  |   v[14] = _mm_madd_epi16(u[6], k__cospi_p08_p24);  | 
787  | 15.6M  |   v[15] = _mm_madd_epi16(u[7], k__cospi_p08_p24);  | 
788  |  |  | 
789  | 15.6M  |   u[0] = _mm_add_epi32(v[0], v[4]);  | 
790  | 15.6M  |   u[1] = _mm_add_epi32(v[1], v[5]);  | 
791  | 15.6M  |   u[2] = _mm_add_epi32(v[2], v[6]);  | 
792  | 15.6M  |   u[3] = _mm_add_epi32(v[3], v[7]);  | 
793  | 15.6M  |   u[4] = _mm_sub_epi32(v[0], v[4]);  | 
794  | 15.6M  |   u[5] = _mm_sub_epi32(v[1], v[5]);  | 
795  | 15.6M  |   u[6] = _mm_sub_epi32(v[2], v[6]);  | 
796  | 15.6M  |   u[7] = _mm_sub_epi32(v[3], v[7]);  | 
797  | 15.6M  |   u[8] = _mm_add_epi32(v[8], v[12]);  | 
798  | 15.6M  |   u[9] = _mm_add_epi32(v[9], v[13]);  | 
799  | 15.6M  |   u[10] = _mm_add_epi32(v[10], v[14]);  | 
800  | 15.6M  |   u[11] = _mm_add_epi32(v[11], v[15]);  | 
801  | 15.6M  |   u[12] = _mm_sub_epi32(v[8], v[12]);  | 
802  | 15.6M  |   u[13] = _mm_sub_epi32(v[9], v[13]);  | 
803  | 15.6M  |   u[14] = _mm_sub_epi32(v[10], v[14]);  | 
804  | 15.6M  |   u[15] = _mm_sub_epi32(v[11], v[15]);  | 
805  |  |  | 
806  | 15.6M  |   v[0] = dct_const_round_shift_sse2(u[0]);  | 
807  | 15.6M  |   v[1] = dct_const_round_shift_sse2(u[1]);  | 
808  | 15.6M  |   v[2] = dct_const_round_shift_sse2(u[2]);  | 
809  | 15.6M  |   v[3] = dct_const_round_shift_sse2(u[3]);  | 
810  | 15.6M  |   v[4] = dct_const_round_shift_sse2(u[4]);  | 
811  | 15.6M  |   v[5] = dct_const_round_shift_sse2(u[5]);  | 
812  | 15.6M  |   v[6] = dct_const_round_shift_sse2(u[6]);  | 
813  | 15.6M  |   v[7] = dct_const_round_shift_sse2(u[7]);  | 
814  | 15.6M  |   v[8] = dct_const_round_shift_sse2(u[8]);  | 
815  | 15.6M  |   v[9] = dct_const_round_shift_sse2(u[9]);  | 
816  | 15.6M  |   v[10] = dct_const_round_shift_sse2(u[10]);  | 
817  | 15.6M  |   v[11] = dct_const_round_shift_sse2(u[11]);  | 
818  | 15.6M  |   v[12] = dct_const_round_shift_sse2(u[12]);  | 
819  | 15.6M  |   v[13] = dct_const_round_shift_sse2(u[13]);  | 
820  | 15.6M  |   v[14] = dct_const_round_shift_sse2(u[14]);  | 
821  | 15.6M  |   v[15] = dct_const_round_shift_sse2(u[15]);  | 
822  |  |  | 
823  | 15.6M  |   s[0] = _mm_add_epi16(x[0], x[2]);  | 
824  | 15.6M  |   s[1] = _mm_add_epi16(x[1], x[3]);  | 
825  | 15.6M  |   s[2] = _mm_sub_epi16(x[0], x[2]);  | 
826  | 15.6M  |   s[3] = _mm_sub_epi16(x[1], x[3]);  | 
827  | 15.6M  |   s[4] = _mm_packs_epi32(v[0], v[1]);  | 
828  | 15.6M  |   s[5] = _mm_packs_epi32(v[2], v[3]);  | 
829  | 15.6M  |   s[6] = _mm_packs_epi32(v[4], v[5]);  | 
830  | 15.6M  |   s[7] = _mm_packs_epi32(v[6], v[7]);  | 
831  | 15.6M  |   s[8] = _mm_add_epi16(x[8], x[10]);  | 
832  | 15.6M  |   s[9] = _mm_add_epi16(x[9], x[11]);  | 
833  | 15.6M  |   s[10] = _mm_sub_epi16(x[8], x[10]);  | 
834  | 15.6M  |   s[11] = _mm_sub_epi16(x[9], x[11]);  | 
835  | 15.6M  |   s[12] = _mm_packs_epi32(v[8], v[9]);  | 
836  | 15.6M  |   s[13] = _mm_packs_epi32(v[10], v[11]);  | 
837  | 15.6M  |   s[14] = _mm_packs_epi32(v[12], v[13]);  | 
838  | 15.6M  |   s[15] = _mm_packs_epi32(v[14], v[15]);  | 
839  |  |  | 
840  |  |   // stage 4  | 
841  | 15.6M  |   u[0] = _mm_unpacklo_epi16(s[2], s[3]);  | 
842  | 15.6M  |   u[1] = _mm_unpackhi_epi16(s[2], s[3]);  | 
843  | 15.6M  |   u[2] = _mm_unpacklo_epi16(s[6], s[7]);  | 
844  | 15.6M  |   u[3] = _mm_unpackhi_epi16(s[6], s[7]);  | 
845  | 15.6M  |   u[4] = _mm_unpacklo_epi16(s[10], s[11]);  | 
846  | 15.6M  |   u[5] = _mm_unpackhi_epi16(s[10], s[11]);  | 
847  | 15.6M  |   u[6] = _mm_unpacklo_epi16(s[14], s[15]);  | 
848  | 15.6M  |   u[7] = _mm_unpackhi_epi16(s[14], s[15]);  | 
849  |  |  | 
850  | 15.6M  |   in[7] = idct_calc_wraplow_sse2(u[0], u[1], k__cospi_m16_m16);  | 
851  | 15.6M  |   in[8] = idct_calc_wraplow_sse2(u[0], u[1], k__cospi_p16_m16);  | 
852  | 15.6M  |   in[4] = idct_calc_wraplow_sse2(u[2], u[3], k__cospi_p16_p16);  | 
853  | 15.6M  |   in[11] = idct_calc_wraplow_sse2(u[2], u[3], k__cospi_m16_p16);  | 
854  | 15.6M  |   in[6] = idct_calc_wraplow_sse2(u[4], u[5], k__cospi_p16_p16);  | 
855  | 15.6M  |   in[9] = idct_calc_wraplow_sse2(u[4], u[5], k__cospi_m16_p16);  | 
856  | 15.6M  |   in[5] = idct_calc_wraplow_sse2(u[6], u[7], k__cospi_m16_m16);  | 
857  | 15.6M  |   in[10] = idct_calc_wraplow_sse2(u[6], u[7], k__cospi_p16_m16);  | 
858  |  |  | 
859  | 15.6M  |   in[0] = s[0];  | 
860  | 15.6M  |   in[1] = _mm_sub_epi16(kZero, s[8]);  | 
861  | 15.6M  |   in[2] = s[12];  | 
862  | 15.6M  |   in[3] = _mm_sub_epi16(kZero, s[4]);  | 
863  | 15.6M  |   in[12] = s[5];  | 
864  | 15.6M  |   in[13] = _mm_sub_epi16(kZero, s[13]);  | 
865  | 15.6M  |   in[14] = s[9];  | 
866  | 15.6M  |   in[15] = _mm_sub_epi16(kZero, s[1]);  | 
867  | 15.6M  | }  | 
868  |  |  | 
869  | 4.87M  | void idct16_sse2(__m128i *const in0, __m128i *const in1) { | 
870  | 4.87M  |   transpose_16bit_16x16(in0, in1);  | 
871  | 4.87M  |   idct16_8col(in0, in0);  | 
872  | 4.87M  |   idct16_8col(in1, in1);  | 
873  | 4.87M  | }  | 
874  |  |  | 
875  | 7.80M  | void iadst16_sse2(__m128i *const in0, __m128i *const in1) { | 
876  | 7.80M  |   transpose_16bit_16x16(in0, in1);  | 
877  | 7.80M  |   vpx_iadst16_8col_sse2(in0);  | 
878  | 7.80M  |   vpx_iadst16_8col_sse2(in1);  | 
879  | 7.80M  | }  | 
880  |  |  | 
881  |  | // Group the coefficient calculation into smaller functions to prevent stack  | 
882  |  | // spillover in 32x32 idct optimizations:  | 
883  |  | // quarter_1: 0-7  | 
884  |  | // quarter_2: 8-15  | 
885  |  | // quarter_3_4: 16-23, 24-31  | 
886  |  |  | 
887  |  | // For each 8x32 block __m128i in[32],  | 
888  |  | // Input with index, 0, 4  | 
889  |  | // output pixels: 0-7 in __m128i out[32]  | 
890  |  | static INLINE void idct32_34_8x32_quarter_1(const __m128i *const in /*in[32]*/,  | 
891  | 0  |                                             __m128i *const out /*out[8]*/) { | 
892  | 0  |   const __m128i zero = _mm_setzero_si128();  | 
893  | 0  |   __m128i step1[8], step2[8];  | 
894  |  |  | 
895  |  |   // stage 3  | 
896  | 0  |   butterfly(in[4], zero, cospi_28_64, cospi_4_64, &step1[4], &step1[7]);  | 
897  |  |  | 
898  |  |   // stage 4  | 
899  | 0  |   step2[0] = butterfly_cospi16(in[0]);  | 
900  | 0  |   step2[4] = step1[4];  | 
901  | 0  |   step2[5] = step1[4];  | 
902  | 0  |   step2[6] = step1[7];  | 
903  | 0  |   step2[7] = step1[7];  | 
904  |  |  | 
905  |  |   // stage 5  | 
906  | 0  |   step1[0] = step2[0];  | 
907  | 0  |   step1[1] = step2[0];  | 
908  | 0  |   step1[2] = step2[0];  | 
909  | 0  |   step1[3] = step2[0];  | 
910  | 0  |   step1[4] = step2[4];  | 
911  | 0  |   butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);  | 
912  | 0  |   step1[7] = step2[7];  | 
913  |  |  | 
914  |  |   // stage 6  | 
915  | 0  |   out[0] = _mm_add_epi16(step1[0], step1[7]);  | 
916  | 0  |   out[1] = _mm_add_epi16(step1[1], step1[6]);  | 
917  | 0  |   out[2] = _mm_add_epi16(step1[2], step1[5]);  | 
918  | 0  |   out[3] = _mm_add_epi16(step1[3], step1[4]);  | 
919  | 0  |   out[4] = _mm_sub_epi16(step1[3], step1[4]);  | 
920  | 0  |   out[5] = _mm_sub_epi16(step1[2], step1[5]);  | 
921  | 0  |   out[6] = _mm_sub_epi16(step1[1], step1[6]);  | 
922  | 0  |   out[7] = _mm_sub_epi16(step1[0], step1[7]);  | 
923  | 0  | }  | 
924  |  |  | 
925  |  | // For each 8x32 block __m128i in[32],  | 
926  |  | // Input with index, 2, 6  | 
927  |  | // output pixels: 8-15 in __m128i out[32]  | 
928  |  | static INLINE void idct32_34_8x32_quarter_2(const __m128i *const in /*in[32]*/,  | 
929  | 0  |                                             __m128i *const out /*out[16]*/) { | 
930  | 0  |   const __m128i zero = _mm_setzero_si128();  | 
931  | 0  |   __m128i step1[16], step2[16];  | 
932  |  |  | 
933  |  |   // stage 2  | 
934  | 0  |   butterfly(in[2], zero, cospi_30_64, cospi_2_64, &step2[8], &step2[15]);  | 
935  | 0  |   butterfly(zero, in[6], cospi_6_64, cospi_26_64, &step2[11], &step2[12]);  | 
936  |  |  | 
937  |  |   // stage 3  | 
938  | 0  |   step1[8] = step2[8];  | 
939  | 0  |   step1[9] = step2[8];  | 
940  | 0  |   step1[14] = step2[15];  | 
941  | 0  |   step1[15] = step2[15];  | 
942  | 0  |   step1[10] = step2[11];  | 
943  | 0  |   step1[11] = step2[11];  | 
944  | 0  |   step1[12] = step2[12];  | 
945  | 0  |   step1[13] = step2[12];  | 
946  |  | 
  | 
947  | 0  |   idct32_8x32_quarter_2_stage_4_to_6(step1, out);  | 
948  | 0  | }  | 
949  |  |  | 
950  |  | static INLINE void idct32_34_8x32_quarter_1_2(  | 
951  | 0  |     const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) { | 
952  | 0  |   __m128i temp[16];  | 
953  | 0  |   idct32_34_8x32_quarter_1(in, temp);  | 
954  | 0  |   idct32_34_8x32_quarter_2(in, temp);  | 
955  |  |   // stage 7  | 
956  | 0  |   add_sub_butterfly(temp, out, 16);  | 
957  | 0  | }  | 
958  |  |  | 
959  |  | // For each 8x32 block __m128i in[32],  | 
960  |  | // Input with odd index, 1, 3, 5, 7  | 
961  |  | // output pixels: 16-23, 24-31 in __m128i out[32]  | 
962  |  | static INLINE void idct32_34_8x32_quarter_3_4(  | 
963  | 0  |     const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) { | 
964  | 0  |   const __m128i zero = _mm_setzero_si128();  | 
965  | 0  |   __m128i step1[32];  | 
966  |  |  | 
967  |  |   // stage 1  | 
968  | 0  |   butterfly(in[1], zero, cospi_31_64, cospi_1_64, &step1[16], &step1[31]);  | 
969  | 0  |   butterfly(zero, in[7], cospi_7_64, cospi_25_64, &step1[19], &step1[28]);  | 
970  | 0  |   butterfly(in[5], zero, cospi_27_64, cospi_5_64, &step1[20], &step1[27]);  | 
971  | 0  |   butterfly(zero, in[3], cospi_3_64, cospi_29_64, &step1[23], &step1[24]);  | 
972  |  |  | 
973  |  |   // stage 3  | 
974  | 0  |   butterfly(step1[31], step1[16], cospi_28_64, cospi_4_64, &step1[17],  | 
975  | 0  |             &step1[30]);  | 
976  | 0  |   butterfly(step1[28], step1[19], -cospi_4_64, cospi_28_64, &step1[18],  | 
977  | 0  |             &step1[29]);  | 
978  | 0  |   butterfly(step1[27], step1[20], cospi_12_64, cospi_20_64, &step1[21],  | 
979  | 0  |             &step1[26]);  | 
980  | 0  |   butterfly(step1[24], step1[23], -cospi_20_64, cospi_12_64, &step1[22],  | 
981  | 0  |             &step1[25]);  | 
982  |  | 
  | 
983  | 0  |   idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);  | 
984  | 0  | }  | 
985  |  |  | 
986  |  | void idct32_34_8x32_sse2(const __m128i *const in /*in[32]*/,  | 
987  | 0  |                          __m128i *const out /*out[32]*/) { | 
988  | 0  |   __m128i temp[32];  | 
989  |  | 
  | 
990  | 0  |   idct32_34_8x32_quarter_1_2(in, temp);  | 
991  | 0  |   idct32_34_8x32_quarter_3_4(in, temp);  | 
992  |  |   // final stage  | 
993  | 0  |   add_sub_butterfly(temp, out, 32);  | 
994  | 0  | }  | 
995  |  |  | 
996  |  | // Only upper-left 8x8 has non-zero coeff  | 
997  |  | void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
998  | 0  |                                int stride) { | 
999  | 0  |   __m128i io[32], col[32];  | 
1000  | 0  |   int i;  | 
1001  |  |  | 
1002  |  |   // Load input data. Only need to load the top left 8x8 block.  | 
1003  | 0  |   load_transpose_16bit_8x8(input, 32, io);  | 
1004  | 0  |   idct32_34_8x32_sse2(io, col);  | 
1005  |  | 
  | 
1006  | 0  |   for (i = 0; i < 32; i += 8) { | 
1007  | 0  |     int j;  | 
1008  | 0  |     transpose_16bit_8x8(col + i, io);  | 
1009  | 0  |     idct32_34_8x32_sse2(io, io);  | 
1010  |  | 
  | 
1011  | 0  |     for (j = 0; j < 32; ++j) { | 
1012  | 0  |       write_buffer_8x1(dest + j * stride, io[j]);  | 
1013  | 0  |     }  | 
1014  |  | 
  | 
1015  | 0  |     dest += 8;  | 
1016  | 0  |   }  | 
1017  | 0  | }  | 
1018  |  |  | 
1019  |  | // For each 8x32 block __m128i in[32],  | 
1020  |  | // Input with index, 0, 4, 8, 12, 16, 20, 24, 28  | 
1021  |  | // output pixels: 0-7 in __m128i out[32]  | 
1022  |  | static INLINE void idct32_1024_8x32_quarter_1(  | 
1023  | 0  |     const __m128i *const in /*in[32]*/, __m128i *const out /*out[8]*/) { | 
1024  | 0  |   __m128i step1[8], step2[8];  | 
1025  |  |  | 
1026  |  |   // stage 3  | 
1027  | 0  |   butterfly(in[4], in[28], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);  | 
1028  | 0  |   butterfly(in[20], in[12], cospi_12_64, cospi_20_64, &step1[5], &step1[6]);  | 
1029  |  |  | 
1030  |  |   // stage 4  | 
1031  | 0  |   butterfly(in[0], in[16], cospi_16_64, cospi_16_64, &step2[1], &step2[0]);  | 
1032  | 0  |   butterfly(in[8], in[24], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);  | 
1033  | 0  |   step2[4] = _mm_add_epi16(step1[4], step1[5]);  | 
1034  | 0  |   step2[5] = _mm_sub_epi16(step1[4], step1[5]);  | 
1035  | 0  |   step2[6] = _mm_sub_epi16(step1[7], step1[6]);  | 
1036  | 0  |   step2[7] = _mm_add_epi16(step1[7], step1[6]);  | 
1037  |  |  | 
1038  |  |   // stage 5  | 
1039  | 0  |   step1[0] = _mm_add_epi16(step2[0], step2[3]);  | 
1040  | 0  |   step1[1] = _mm_add_epi16(step2[1], step2[2]);  | 
1041  | 0  |   step1[2] = _mm_sub_epi16(step2[1], step2[2]);  | 
1042  | 0  |   step1[3] = _mm_sub_epi16(step2[0], step2[3]);  | 
1043  | 0  |   step1[4] = step2[4];  | 
1044  | 0  |   butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);  | 
1045  | 0  |   step1[7] = step2[7];  | 
1046  |  |  | 
1047  |  |   // stage 6  | 
1048  | 0  |   out[0] = _mm_add_epi16(step1[0], step1[7]);  | 
1049  | 0  |   out[1] = _mm_add_epi16(step1[1], step1[6]);  | 
1050  | 0  |   out[2] = _mm_add_epi16(step1[2], step1[5]);  | 
1051  | 0  |   out[3] = _mm_add_epi16(step1[3], step1[4]);  | 
1052  | 0  |   out[4] = _mm_sub_epi16(step1[3], step1[4]);  | 
1053  | 0  |   out[5] = _mm_sub_epi16(step1[2], step1[5]);  | 
1054  | 0  |   out[6] = _mm_sub_epi16(step1[1], step1[6]);  | 
1055  | 0  |   out[7] = _mm_sub_epi16(step1[0], step1[7]);  | 
1056  | 0  | }  | 
1057  |  |  | 
1058  |  | // For each 8x32 block __m128i in[32],  | 
1059  |  | // Input with index, 2, 6, 10, 14, 18, 22, 26, 30  | 
1060  |  | // output pixels: 8-15 in __m128i out[32]  | 
1061  |  | static INLINE void idct32_1024_8x32_quarter_2(  | 
1062  | 0  |     const __m128i *const in /*in[32]*/, __m128i *const out /*out[16]*/) { | 
1063  | 0  |   __m128i step1[16], step2[16];  | 
1064  |  |  | 
1065  |  |   // stage 2  | 
1066  | 0  |   butterfly(in[2], in[30], cospi_30_64, cospi_2_64, &step2[8], &step2[15]);  | 
1067  | 0  |   butterfly(in[18], in[14], cospi_14_64, cospi_18_64, &step2[9], &step2[14]);  | 
1068  | 0  |   butterfly(in[10], in[22], cospi_22_64, cospi_10_64, &step2[10], &step2[13]);  | 
1069  | 0  |   butterfly(in[26], in[6], cospi_6_64, cospi_26_64, &step2[11], &step2[12]);  | 
1070  |  |  | 
1071  |  |   // stage 3  | 
1072  | 0  |   step1[8] = _mm_add_epi16(step2[8], step2[9]);  | 
1073  | 0  |   step1[9] = _mm_sub_epi16(step2[8], step2[9]);  | 
1074  | 0  |   step1[10] = _mm_sub_epi16(step2[11], step2[10]);  | 
1075  | 0  |   step1[11] = _mm_add_epi16(step2[11], step2[10]);  | 
1076  | 0  |   step1[12] = _mm_add_epi16(step2[12], step2[13]);  | 
1077  | 0  |   step1[13] = _mm_sub_epi16(step2[12], step2[13]);  | 
1078  | 0  |   step1[14] = _mm_sub_epi16(step2[15], step2[14]);  | 
1079  | 0  |   step1[15] = _mm_add_epi16(step2[15], step2[14]);  | 
1080  |  | 
  | 
1081  | 0  |   idct32_8x32_quarter_2_stage_4_to_6(step1, out);  | 
1082  | 0  | }  | 
1083  |  |  | 
1084  |  | static INLINE void idct32_1024_8x32_quarter_1_2(  | 
1085  | 0  |     const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) { | 
1086  | 0  |   __m128i temp[16];  | 
1087  | 0  |   idct32_1024_8x32_quarter_1(in, temp);  | 
1088  | 0  |   idct32_1024_8x32_quarter_2(in, temp);  | 
1089  |  |   // stage 7  | 
1090  | 0  |   add_sub_butterfly(temp, out, 16);  | 
1091  | 0  | }  | 
1092  |  |  | 
1093  |  | // For each 8x32 block __m128i in[32],  | 
1094  |  | // Input with odd index,  | 
1095  |  | // 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31  | 
1096  |  | // output pixels: 16-23, 24-31 in __m128i out[32]  | 
1097  |  | static INLINE void idct32_1024_8x32_quarter_3_4(  | 
1098  | 0  |     const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) { | 
1099  | 0  |   __m128i step1[32], step2[32];  | 
1100  |  |  | 
1101  |  |   // stage 1  | 
1102  | 0  |   butterfly(in[1], in[31], cospi_31_64, cospi_1_64, &step1[16], &step1[31]);  | 
1103  | 0  |   butterfly(in[17], in[15], cospi_15_64, cospi_17_64, &step1[17], &step1[30]);  | 
1104  | 0  |   butterfly(in[9], in[23], cospi_23_64, cospi_9_64, &step1[18], &step1[29]);  | 
1105  | 0  |   butterfly(in[25], in[7], cospi_7_64, cospi_25_64, &step1[19], &step1[28]);  | 
1106  |  | 
  | 
1107  | 0  |   butterfly(in[5], in[27], cospi_27_64, cospi_5_64, &step1[20], &step1[27]);  | 
1108  | 0  |   butterfly(in[21], in[11], cospi_11_64, cospi_21_64, &step1[21], &step1[26]);  | 
1109  |  | 
  | 
1110  | 0  |   butterfly(in[13], in[19], cospi_19_64, cospi_13_64, &step1[22], &step1[25]);  | 
1111  | 0  |   butterfly(in[29], in[3], cospi_3_64, cospi_29_64, &step1[23], &step1[24]);  | 
1112  |  |  | 
1113  |  |   // stage 2  | 
1114  | 0  |   step2[16] = _mm_add_epi16(step1[16], step1[17]);  | 
1115  | 0  |   step2[17] = _mm_sub_epi16(step1[16], step1[17]);  | 
1116  | 0  |   step2[18] = _mm_sub_epi16(step1[19], step1[18]);  | 
1117  | 0  |   step2[19] = _mm_add_epi16(step1[19], step1[18]);  | 
1118  | 0  |   step2[20] = _mm_add_epi16(step1[20], step1[21]);  | 
1119  | 0  |   step2[21] = _mm_sub_epi16(step1[20], step1[21]);  | 
1120  | 0  |   step2[22] = _mm_sub_epi16(step1[23], step1[22]);  | 
1121  | 0  |   step2[23] = _mm_add_epi16(step1[23], step1[22]);  | 
1122  |  | 
  | 
1123  | 0  |   step2[24] = _mm_add_epi16(step1[24], step1[25]);  | 
1124  | 0  |   step2[25] = _mm_sub_epi16(step1[24], step1[25]);  | 
1125  | 0  |   step2[26] = _mm_sub_epi16(step1[27], step1[26]);  | 
1126  | 0  |   step2[27] = _mm_add_epi16(step1[27], step1[26]);  | 
1127  | 0  |   step2[28] = _mm_add_epi16(step1[28], step1[29]);  | 
1128  | 0  |   step2[29] = _mm_sub_epi16(step1[28], step1[29]);  | 
1129  | 0  |   step2[30] = _mm_sub_epi16(step1[31], step1[30]);  | 
1130  | 0  |   step2[31] = _mm_add_epi16(step1[31], step1[30]);  | 
1131  |  |  | 
1132  |  |   // stage 3  | 
1133  | 0  |   step1[16] = step2[16];  | 
1134  | 0  |   step1[31] = step2[31];  | 
1135  | 0  |   butterfly(step2[30], step2[17], cospi_28_64, cospi_4_64, &step1[17],  | 
1136  | 0  |             &step1[30]);  | 
1137  | 0  |   butterfly(step2[29], step2[18], -cospi_4_64, cospi_28_64, &step1[18],  | 
1138  | 0  |             &step1[29]);  | 
1139  | 0  |   step1[19] = step2[19];  | 
1140  | 0  |   step1[20] = step2[20];  | 
1141  | 0  |   butterfly(step2[26], step2[21], cospi_12_64, cospi_20_64, &step1[21],  | 
1142  | 0  |             &step1[26]);  | 
1143  | 0  |   butterfly(step2[25], step2[22], -cospi_20_64, cospi_12_64, &step1[22],  | 
1144  | 0  |             &step1[25]);  | 
1145  | 0  |   step1[23] = step2[23];  | 
1146  | 0  |   step1[24] = step2[24];  | 
1147  | 0  |   step1[27] = step2[27];  | 
1148  | 0  |   step1[28] = step2[28];  | 
1149  |  | 
  | 
1150  | 0  |   idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);  | 
1151  | 0  | }  | 
1152  |  |  | 
1153  |  | void idct32_1024_8x32(const __m128i *const in /*in[32]*/,  | 
1154  | 0  |                       __m128i *const out /*out[32]*/) { | 
1155  | 0  |   __m128i temp[32];  | 
1156  |  | 
  | 
1157  | 0  |   idct32_1024_8x32_quarter_1_2(in, temp);  | 
1158  | 0  |   idct32_1024_8x32_quarter_3_4(in, temp);  | 
1159  |  |   // final stage  | 
1160  | 0  |   add_sub_butterfly(temp, out, 32);  | 
1161  | 0  | }  | 
1162  |  |  | 
1163  |  | void vpx_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
1164  | 0  |                                  int stride) { | 
1165  | 0  |   __m128i col[4][32], io[32];  | 
1166  | 0  |   int i;  | 
1167  |  |  | 
1168  |  |   // rows  | 
1169  | 0  |   for (i = 0; i < 4; i++) { | 
1170  | 0  |     load_transpose_16bit_8x8(&input[0], 32, &io[0]);  | 
1171  | 0  |     load_transpose_16bit_8x8(&input[8], 32, &io[8]);  | 
1172  | 0  |     load_transpose_16bit_8x8(&input[16], 32, &io[16]);  | 
1173  | 0  |     load_transpose_16bit_8x8(&input[24], 32, &io[24]);  | 
1174  | 0  |     idct32_1024_8x32(io, col[i]);  | 
1175  | 0  |     input += 32 << 3;  | 
1176  | 0  |   }  | 
1177  |  |  | 
1178  |  |   // columns  | 
1179  | 0  |   for (i = 0; i < 32; i += 8) { | 
1180  |  |     // Transpose 32x8 block to 8x32 block  | 
1181  | 0  |     transpose_16bit_8x8(col[0] + i, io);  | 
1182  | 0  |     transpose_16bit_8x8(col[1] + i, io + 8);  | 
1183  | 0  |     transpose_16bit_8x8(col[2] + i, io + 16);  | 
1184  | 0  |     transpose_16bit_8x8(col[3] + i, io + 24);  | 
1185  |  | 
  | 
1186  | 0  |     idct32_1024_8x32(io, io);  | 
1187  | 0  |     store_buffer_8x32(io, dest, stride);  | 
1188  | 0  |     dest += 8;  | 
1189  | 0  |   }  | 
1190  | 0  | }  | 
1191  |  |  | 
1192  |  | void vpx_idct32x32_135_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
1193  | 0  |                                 int stride) { | 
1194  | 0  |   __m128i col[2][32], in[32], out[32];  | 
1195  | 0  |   int i;  | 
1196  |  | 
  | 
1197  | 0  |   for (i = 16; i < 32; i++) { | 
1198  | 0  |     in[i] = _mm_setzero_si128();  | 
1199  | 0  |   }  | 
1200  |  |  | 
1201  |  |   // rows  | 
1202  | 0  |   for (i = 0; i < 2; i++) { | 
1203  | 0  |     load_transpose_16bit_8x8(&input[0], 32, &in[0]);  | 
1204  | 0  |     load_transpose_16bit_8x8(&input[8], 32, &in[8]);  | 
1205  | 0  |     idct32_1024_8x32(in, col[i]);  | 
1206  | 0  |     input += 32 << 3;  | 
1207  | 0  |   }  | 
1208  |  |  | 
1209  |  |   // columns  | 
1210  | 0  |   for (i = 0; i < 32; i += 8) { | 
1211  | 0  |     transpose_16bit_8x8(col[0] + i, in);  | 
1212  | 0  |     transpose_16bit_8x8(col[1] + i, in + 8);  | 
1213  | 0  |     idct32_1024_8x32(in, out);  | 
1214  | 0  |     store_buffer_8x32(out, dest, stride);  | 
1215  | 0  |     dest += 8;  | 
1216  | 0  |   }  | 
1217  | 0  | }  | 
1218  |  |  | 
1219  |  | void vpx_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest,  | 
1220  | 145k  |                               int stride) { | 
1221  | 145k  |   __m128i dc_value;  | 
1222  | 145k  |   int j;  | 
1223  | 145k  |   tran_high_t a1;  | 
1224  | 145k  |   tran_low_t out =  | 
1225  | 145k  |       WRAPLOW(dct_const_round_shift((int16_t)input[0] * cospi_16_64));  | 
1226  |  |  | 
1227  | 145k  |   out = WRAPLOW(dct_const_round_shift(out * cospi_16_64));  | 
1228  | 145k  |   a1 = ROUND_POWER_OF_TWO(out, 6);  | 
1229  | 145k  |   dc_value = _mm_set1_epi16((int16_t)a1);  | 
1230  |  |  | 
1231  | 4.81M  |   for (j = 0; j < 32; ++j) { | 
1232  | 4.67M  |     recon_and_store_16(dest + j * stride + 0, dc_value);  | 
1233  | 4.67M  |     recon_and_store_16(dest + j * stride + 16, dc_value);  | 
1234  | 4.67M  |   }  | 
1235  | 145k  | }  |