/src/mozilla-central/media/libyuv/libyuv/source/rotate_gcc.cc
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2015 The LibYuv Project Authors. All rights reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | |
11 | | #include "libyuv/rotate_row.h" |
12 | | #include "libyuv/row.h" |
13 | | |
14 | | #ifdef __cplusplus |
15 | | namespace libyuv { |
16 | | extern "C" { |
17 | | #endif |
18 | | |
19 | | // This module is for GCC x86 and x64. |
20 | | #if !defined(LIBYUV_DISABLE_X86) && \ |
21 | | (defined(__x86_64__) || (defined(__i386__) && !defined(_MSC_VER))) |
22 | | |
23 | | // Transpose 8x8. 32 or 64 bit, but not NaCL for 64 bit. |
24 | | #if defined(HAS_TRANSPOSEWX8_SSSE3) |
25 | | void TransposeWx8_SSSE3(const uint8_t* src, |
26 | | int src_stride, |
27 | | uint8_t* dst, |
28 | | int dst_stride, |
29 | 0 | int width) { |
30 | 0 | asm volatile( |
31 | 0 | // Read in the data from the source pointer. |
32 | 0 | // First round of bit swap. |
33 | 0 | LABELALIGN |
34 | 0 | "1: \n" |
35 | 0 | "movq (%0),%%xmm0 \n" |
36 | 0 | "movq (%0,%3),%%xmm1 \n" |
37 | 0 | "lea (%0,%3,2),%0 \n" |
38 | 0 | "punpcklbw %%xmm1,%%xmm0 \n" |
39 | 0 | "movq (%0),%%xmm2 \n" |
40 | 0 | "movdqa %%xmm0,%%xmm1 \n" |
41 | 0 | "palignr $0x8,%%xmm1,%%xmm1 \n" |
42 | 0 | "movq (%0,%3),%%xmm3 \n" |
43 | 0 | "lea (%0,%3,2),%0 \n" |
44 | 0 | "punpcklbw %%xmm3,%%xmm2 \n" |
45 | 0 | "movdqa %%xmm2,%%xmm3 \n" |
46 | 0 | "movq (%0),%%xmm4 \n" |
47 | 0 | "palignr $0x8,%%xmm3,%%xmm3 \n" |
48 | 0 | "movq (%0,%3),%%xmm5 \n" |
49 | 0 | "lea (%0,%3,2),%0 \n" |
50 | 0 | "punpcklbw %%xmm5,%%xmm4 \n" |
51 | 0 | "movdqa %%xmm4,%%xmm5 \n" |
52 | 0 | "movq (%0),%%xmm6 \n" |
53 | 0 | "palignr $0x8,%%xmm5,%%xmm5 \n" |
54 | 0 | "movq (%0,%3),%%xmm7 \n" |
55 | 0 | "lea (%0,%3,2),%0 \n" |
56 | 0 | "punpcklbw %%xmm7,%%xmm6 \n" |
57 | 0 | "neg %3 \n" |
58 | 0 | "movdqa %%xmm6,%%xmm7 \n" |
59 | 0 | "lea 0x8(%0,%3,8),%0 \n" |
60 | 0 | "palignr $0x8,%%xmm7,%%xmm7 \n" |
61 | 0 | "neg %3 \n" |
62 | 0 | // Second round of bit swap. |
63 | 0 | "punpcklwd %%xmm2,%%xmm0 \n" |
64 | 0 | "punpcklwd %%xmm3,%%xmm1 \n" |
65 | 0 | "movdqa %%xmm0,%%xmm2 \n" |
66 | 0 | "movdqa %%xmm1,%%xmm3 \n" |
67 | 0 | "palignr $0x8,%%xmm2,%%xmm2 \n" |
68 | 0 | "palignr $0x8,%%xmm3,%%xmm3 \n" |
69 | 0 | "punpcklwd %%xmm6,%%xmm4 \n" |
70 | 0 | "punpcklwd %%xmm7,%%xmm5 \n" |
71 | 0 | "movdqa %%xmm4,%%xmm6 \n" |
72 | 0 | "movdqa %%xmm5,%%xmm7 \n" |
73 | 0 | "palignr $0x8,%%xmm6,%%xmm6 \n" |
74 | 0 | "palignr $0x8,%%xmm7,%%xmm7 \n" |
75 | 0 | // Third round of bit swap. |
76 | 0 | // Write to the destination pointer. |
77 | 0 | "punpckldq %%xmm4,%%xmm0 \n" |
78 | 0 | "movq %%xmm0,(%1) \n" |
79 | 0 | "movdqa %%xmm0,%%xmm4 \n" |
80 | 0 | "palignr $0x8,%%xmm4,%%xmm4 \n" |
81 | 0 | "movq %%xmm4,(%1,%4) \n" |
82 | 0 | "lea (%1,%4,2),%1 \n" |
83 | 0 | "punpckldq %%xmm6,%%xmm2 \n" |
84 | 0 | "movdqa %%xmm2,%%xmm6 \n" |
85 | 0 | "movq %%xmm2,(%1) \n" |
86 | 0 | "palignr $0x8,%%xmm6,%%xmm6 \n" |
87 | 0 | "punpckldq %%xmm5,%%xmm1 \n" |
88 | 0 | "movq %%xmm6,(%1,%4) \n" |
89 | 0 | "lea (%1,%4,2),%1 \n" |
90 | 0 | "movdqa %%xmm1,%%xmm5 \n" |
91 | 0 | "movq %%xmm1,(%1) \n" |
92 | 0 | "palignr $0x8,%%xmm5,%%xmm5 \n" |
93 | 0 | "movq %%xmm5,(%1,%4) \n" |
94 | 0 | "lea (%1,%4,2),%1 \n" |
95 | 0 | "punpckldq %%xmm7,%%xmm3 \n" |
96 | 0 | "movq %%xmm3,(%1) \n" |
97 | 0 | "movdqa %%xmm3,%%xmm7 \n" |
98 | 0 | "palignr $0x8,%%xmm7,%%xmm7 \n" |
99 | 0 | "sub $0x8,%2 \n" |
100 | 0 | "movq %%xmm7,(%1,%4) \n" |
101 | 0 | "lea (%1,%4,2),%1 \n" |
102 | 0 | "jg 1b \n" |
103 | 0 | : "+r"(src), // %0 |
104 | 0 | "+r"(dst), // %1 |
105 | 0 | "+r"(width) // %2 |
106 | 0 | : "r"((intptr_t)(src_stride)), // %3 |
107 | 0 | "r"((intptr_t)(dst_stride)) // %4 |
108 | 0 | : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", |
109 | 0 | "xmm7"); |
110 | 0 | } |
111 | | #endif // defined(HAS_TRANSPOSEWX8_SSSE3) |
112 | | |
113 | | // Transpose 16x8. 64 bit |
114 | | #if defined(HAS_TRANSPOSEWX8_FAST_SSSE3) |
115 | | void TransposeWx8_Fast_SSSE3(const uint8_t* src, |
116 | | int src_stride, |
117 | | uint8_t* dst, |
118 | | int dst_stride, |
119 | 0 | int width) { |
120 | 0 | asm volatile( |
121 | 0 | // Read in the data from the source pointer. |
122 | 0 | // First round of bit swap. |
123 | 0 | LABELALIGN |
124 | 0 | "1: \n" |
125 | 0 | "movdqu (%0),%%xmm0 \n" |
126 | 0 | "movdqu (%0,%3),%%xmm1 \n" |
127 | 0 | "lea (%0,%3,2),%0 \n" |
128 | 0 | "movdqa %%xmm0,%%xmm8 \n" |
129 | 0 | "punpcklbw %%xmm1,%%xmm0 \n" |
130 | 0 | "punpckhbw %%xmm1,%%xmm8 \n" |
131 | 0 | "movdqu (%0),%%xmm2 \n" |
132 | 0 | "movdqa %%xmm0,%%xmm1 \n" |
133 | 0 | "movdqa %%xmm8,%%xmm9 \n" |
134 | 0 | "palignr $0x8,%%xmm1,%%xmm1 \n" |
135 | 0 | "palignr $0x8,%%xmm9,%%xmm9 \n" |
136 | 0 | "movdqu (%0,%3),%%xmm3 \n" |
137 | 0 | "lea (%0,%3,2),%0 \n" |
138 | 0 | "movdqa %%xmm2,%%xmm10 \n" |
139 | 0 | "punpcklbw %%xmm3,%%xmm2 \n" |
140 | 0 | "punpckhbw %%xmm3,%%xmm10 \n" |
141 | 0 | "movdqa %%xmm2,%%xmm3 \n" |
142 | 0 | "movdqa %%xmm10,%%xmm11 \n" |
143 | 0 | "movdqu (%0),%%xmm4 \n" |
144 | 0 | "palignr $0x8,%%xmm3,%%xmm3 \n" |
145 | 0 | "palignr $0x8,%%xmm11,%%xmm11 \n" |
146 | 0 | "movdqu (%0,%3),%%xmm5 \n" |
147 | 0 | "lea (%0,%3,2),%0 \n" |
148 | 0 | "movdqa %%xmm4,%%xmm12 \n" |
149 | 0 | "punpcklbw %%xmm5,%%xmm4 \n" |
150 | 0 | "punpckhbw %%xmm5,%%xmm12 \n" |
151 | 0 | "movdqa %%xmm4,%%xmm5 \n" |
152 | 0 | "movdqa %%xmm12,%%xmm13 \n" |
153 | 0 | "movdqu (%0),%%xmm6 \n" |
154 | 0 | "palignr $0x8,%%xmm5,%%xmm5 \n" |
155 | 0 | "palignr $0x8,%%xmm13,%%xmm13 \n" |
156 | 0 | "movdqu (%0,%3),%%xmm7 \n" |
157 | 0 | "lea (%0,%3,2),%0 \n" |
158 | 0 | "movdqa %%xmm6,%%xmm14 \n" |
159 | 0 | "punpcklbw %%xmm7,%%xmm6 \n" |
160 | 0 | "punpckhbw %%xmm7,%%xmm14 \n" |
161 | 0 | "neg %3 \n" |
162 | 0 | "movdqa %%xmm6,%%xmm7 \n" |
163 | 0 | "movdqa %%xmm14,%%xmm15 \n" |
164 | 0 | "lea 0x10(%0,%3,8),%0 \n" |
165 | 0 | "palignr $0x8,%%xmm7,%%xmm7 \n" |
166 | 0 | "palignr $0x8,%%xmm15,%%xmm15 \n" |
167 | 0 | "neg %3 \n" |
168 | 0 | // Second round of bit swap. |
169 | 0 | "punpcklwd %%xmm2,%%xmm0 \n" |
170 | 0 | "punpcklwd %%xmm3,%%xmm1 \n" |
171 | 0 | "movdqa %%xmm0,%%xmm2 \n" |
172 | 0 | "movdqa %%xmm1,%%xmm3 \n" |
173 | 0 | "palignr $0x8,%%xmm2,%%xmm2 \n" |
174 | 0 | "palignr $0x8,%%xmm3,%%xmm3 \n" |
175 | 0 | "punpcklwd %%xmm6,%%xmm4 \n" |
176 | 0 | "punpcklwd %%xmm7,%%xmm5 \n" |
177 | 0 | "movdqa %%xmm4,%%xmm6 \n" |
178 | 0 | "movdqa %%xmm5,%%xmm7 \n" |
179 | 0 | "palignr $0x8,%%xmm6,%%xmm6 \n" |
180 | 0 | "palignr $0x8,%%xmm7,%%xmm7 \n" |
181 | 0 | "punpcklwd %%xmm10,%%xmm8 \n" |
182 | 0 | "punpcklwd %%xmm11,%%xmm9 \n" |
183 | 0 | "movdqa %%xmm8,%%xmm10 \n" |
184 | 0 | "movdqa %%xmm9,%%xmm11 \n" |
185 | 0 | "palignr $0x8,%%xmm10,%%xmm10 \n" |
186 | 0 | "palignr $0x8,%%xmm11,%%xmm11 \n" |
187 | 0 | "punpcklwd %%xmm14,%%xmm12 \n" |
188 | 0 | "punpcklwd %%xmm15,%%xmm13 \n" |
189 | 0 | "movdqa %%xmm12,%%xmm14 \n" |
190 | 0 | "movdqa %%xmm13,%%xmm15 \n" |
191 | 0 | "palignr $0x8,%%xmm14,%%xmm14 \n" |
192 | 0 | "palignr $0x8,%%xmm15,%%xmm15 \n" |
193 | 0 | // Third round of bit swap. |
194 | 0 | // Write to the destination pointer. |
195 | 0 | "punpckldq %%xmm4,%%xmm0 \n" |
196 | 0 | "movq %%xmm0,(%1) \n" |
197 | 0 | "movdqa %%xmm0,%%xmm4 \n" |
198 | 0 | "palignr $0x8,%%xmm4,%%xmm4 \n" |
199 | 0 | "movq %%xmm4,(%1,%4) \n" |
200 | 0 | "lea (%1,%4,2),%1 \n" |
201 | 0 | "punpckldq %%xmm6,%%xmm2 \n" |
202 | 0 | "movdqa %%xmm2,%%xmm6 \n" |
203 | 0 | "movq %%xmm2,(%1) \n" |
204 | 0 | "palignr $0x8,%%xmm6,%%xmm6 \n" |
205 | 0 | "punpckldq %%xmm5,%%xmm1 \n" |
206 | 0 | "movq %%xmm6,(%1,%4) \n" |
207 | 0 | "lea (%1,%4,2),%1 \n" |
208 | 0 | "movdqa %%xmm1,%%xmm5 \n" |
209 | 0 | "movq %%xmm1,(%1) \n" |
210 | 0 | "palignr $0x8,%%xmm5,%%xmm5 \n" |
211 | 0 | "movq %%xmm5,(%1,%4) \n" |
212 | 0 | "lea (%1,%4,2),%1 \n" |
213 | 0 | "punpckldq %%xmm7,%%xmm3 \n" |
214 | 0 | "movq %%xmm3,(%1) \n" |
215 | 0 | "movdqa %%xmm3,%%xmm7 \n" |
216 | 0 | "palignr $0x8,%%xmm7,%%xmm7 \n" |
217 | 0 | "movq %%xmm7,(%1,%4) \n" |
218 | 0 | "lea (%1,%4,2),%1 \n" |
219 | 0 | "punpckldq %%xmm12,%%xmm8 \n" |
220 | 0 | "movq %%xmm8,(%1) \n" |
221 | 0 | "movdqa %%xmm8,%%xmm12 \n" |
222 | 0 | "palignr $0x8,%%xmm12,%%xmm12 \n" |
223 | 0 | "movq %%xmm12,(%1,%4) \n" |
224 | 0 | "lea (%1,%4,2),%1 \n" |
225 | 0 | "punpckldq %%xmm14,%%xmm10 \n" |
226 | 0 | "movdqa %%xmm10,%%xmm14 \n" |
227 | 0 | "movq %%xmm10,(%1) \n" |
228 | 0 | "palignr $0x8,%%xmm14,%%xmm14 \n" |
229 | 0 | "punpckldq %%xmm13,%%xmm9 \n" |
230 | 0 | "movq %%xmm14,(%1,%4) \n" |
231 | 0 | "lea (%1,%4,2),%1 \n" |
232 | 0 | "movdqa %%xmm9,%%xmm13 \n" |
233 | 0 | "movq %%xmm9,(%1) \n" |
234 | 0 | "palignr $0x8,%%xmm13,%%xmm13 \n" |
235 | 0 | "movq %%xmm13,(%1,%4) \n" |
236 | 0 | "lea (%1,%4,2),%1 \n" |
237 | 0 | "punpckldq %%xmm15,%%xmm11 \n" |
238 | 0 | "movq %%xmm11,(%1) \n" |
239 | 0 | "movdqa %%xmm11,%%xmm15 \n" |
240 | 0 | "palignr $0x8,%%xmm15,%%xmm15 \n" |
241 | 0 | "sub $0x10,%2 \n" |
242 | 0 | "movq %%xmm15,(%1,%4) \n" |
243 | 0 | "lea (%1,%4,2),%1 \n" |
244 | 0 | "jg 1b \n" |
245 | 0 | : "+r"(src), // %0 |
246 | 0 | "+r"(dst), // %1 |
247 | 0 | "+r"(width) // %2 |
248 | 0 | : "r"((intptr_t)(src_stride)), // %3 |
249 | 0 | "r"((intptr_t)(dst_stride)) // %4 |
250 | 0 | : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", |
251 | 0 | "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", |
252 | 0 | "xmm15"); |
253 | 0 | } |
254 | | #endif // defined(HAS_TRANSPOSEWX8_FAST_SSSE3) |
255 | | |
256 | | // Transpose UV 8x8. 64 bit. |
257 | | #if defined(HAS_TRANSPOSEUVWX8_SSE2) |
258 | | void TransposeUVWx8_SSE2(const uint8_t* src, |
259 | | int src_stride, |
260 | | uint8_t* dst_a, |
261 | | int dst_stride_a, |
262 | | uint8_t* dst_b, |
263 | | int dst_stride_b, |
264 | 0 | int width) { |
265 | 0 | asm volatile( |
266 | 0 | // Read in the data from the source pointer. |
267 | 0 | // First round of bit swap. |
268 | 0 | LABELALIGN |
269 | 0 | "1: \n" |
270 | 0 | "movdqu (%0),%%xmm0 \n" |
271 | 0 | "movdqu (%0,%4),%%xmm1 \n" |
272 | 0 | "lea (%0,%4,2),%0 \n" |
273 | 0 | "movdqa %%xmm0,%%xmm8 \n" |
274 | 0 | "punpcklbw %%xmm1,%%xmm0 \n" |
275 | 0 | "punpckhbw %%xmm1,%%xmm8 \n" |
276 | 0 | "movdqa %%xmm8,%%xmm1 \n" |
277 | 0 | "movdqu (%0),%%xmm2 \n" |
278 | 0 | "movdqu (%0,%4),%%xmm3 \n" |
279 | 0 | "lea (%0,%4,2),%0 \n" |
280 | 0 | "movdqa %%xmm2,%%xmm8 \n" |
281 | 0 | "punpcklbw %%xmm3,%%xmm2 \n" |
282 | 0 | "punpckhbw %%xmm3,%%xmm8 \n" |
283 | 0 | "movdqa %%xmm8,%%xmm3 \n" |
284 | 0 | "movdqu (%0),%%xmm4 \n" |
285 | 0 | "movdqu (%0,%4),%%xmm5 \n" |
286 | 0 | "lea (%0,%4,2),%0 \n" |
287 | 0 | "movdqa %%xmm4,%%xmm8 \n" |
288 | 0 | "punpcklbw %%xmm5,%%xmm4 \n" |
289 | 0 | "punpckhbw %%xmm5,%%xmm8 \n" |
290 | 0 | "movdqa %%xmm8,%%xmm5 \n" |
291 | 0 | "movdqu (%0),%%xmm6 \n" |
292 | 0 | "movdqu (%0,%4),%%xmm7 \n" |
293 | 0 | "lea (%0,%4,2),%0 \n" |
294 | 0 | "movdqa %%xmm6,%%xmm8 \n" |
295 | 0 | "punpcklbw %%xmm7,%%xmm6 \n" |
296 | 0 | "neg %4 \n" |
297 | 0 | "lea 0x10(%0,%4,8),%0 \n" |
298 | 0 | "punpckhbw %%xmm7,%%xmm8 \n" |
299 | 0 | "movdqa %%xmm8,%%xmm7 \n" |
300 | 0 | "neg %4 \n" |
301 | 0 | // Second round of bit swap. |
302 | 0 | "movdqa %%xmm0,%%xmm8 \n" |
303 | 0 | "movdqa %%xmm1,%%xmm9 \n" |
304 | 0 | "punpckhwd %%xmm2,%%xmm8 \n" |
305 | 0 | "punpckhwd %%xmm3,%%xmm9 \n" |
306 | 0 | "punpcklwd %%xmm2,%%xmm0 \n" |
307 | 0 | "punpcklwd %%xmm3,%%xmm1 \n" |
308 | 0 | "movdqa %%xmm8,%%xmm2 \n" |
309 | 0 | "movdqa %%xmm9,%%xmm3 \n" |
310 | 0 | "movdqa %%xmm4,%%xmm8 \n" |
311 | 0 | "movdqa %%xmm5,%%xmm9 \n" |
312 | 0 | "punpckhwd %%xmm6,%%xmm8 \n" |
313 | 0 | "punpckhwd %%xmm7,%%xmm9 \n" |
314 | 0 | "punpcklwd %%xmm6,%%xmm4 \n" |
315 | 0 | "punpcklwd %%xmm7,%%xmm5 \n" |
316 | 0 | "movdqa %%xmm8,%%xmm6 \n" |
317 | 0 | "movdqa %%xmm9,%%xmm7 \n" |
318 | 0 | // Third round of bit swap. |
319 | 0 | // Write to the destination pointer. |
320 | 0 | "movdqa %%xmm0,%%xmm8 \n" |
321 | 0 | "punpckldq %%xmm4,%%xmm0 \n" |
322 | 0 | "movlpd %%xmm0,(%1) \n" // Write back U channel |
323 | 0 | "movhpd %%xmm0,(%2) \n" // Write back V channel |
324 | 0 | "punpckhdq %%xmm4,%%xmm8 \n" |
325 | 0 | "movlpd %%xmm8,(%1,%5) \n" |
326 | 0 | "lea (%1,%5,2),%1 \n" |
327 | 0 | "movhpd %%xmm8,(%2,%6) \n" |
328 | 0 | "lea (%2,%6,2),%2 \n" |
329 | 0 | "movdqa %%xmm2,%%xmm8 \n" |
330 | 0 | "punpckldq %%xmm6,%%xmm2 \n" |
331 | 0 | "movlpd %%xmm2,(%1) \n" |
332 | 0 | "movhpd %%xmm2,(%2) \n" |
333 | 0 | "punpckhdq %%xmm6,%%xmm8 \n" |
334 | 0 | "movlpd %%xmm8,(%1,%5) \n" |
335 | 0 | "lea (%1,%5,2),%1 \n" |
336 | 0 | "movhpd %%xmm8,(%2,%6) \n" |
337 | 0 | "lea (%2,%6,2),%2 \n" |
338 | 0 | "movdqa %%xmm1,%%xmm8 \n" |
339 | 0 | "punpckldq %%xmm5,%%xmm1 \n" |
340 | 0 | "movlpd %%xmm1,(%1) \n" |
341 | 0 | "movhpd %%xmm1,(%2) \n" |
342 | 0 | "punpckhdq %%xmm5,%%xmm8 \n" |
343 | 0 | "movlpd %%xmm8,(%1,%5) \n" |
344 | 0 | "lea (%1,%5,2),%1 \n" |
345 | 0 | "movhpd %%xmm8,(%2,%6) \n" |
346 | 0 | "lea (%2,%6,2),%2 \n" |
347 | 0 | "movdqa %%xmm3,%%xmm8 \n" |
348 | 0 | "punpckldq %%xmm7,%%xmm3 \n" |
349 | 0 | "movlpd %%xmm3,(%1) \n" |
350 | 0 | "movhpd %%xmm3,(%2) \n" |
351 | 0 | "punpckhdq %%xmm7,%%xmm8 \n" |
352 | 0 | "sub $0x8,%3 \n" |
353 | 0 | "movlpd %%xmm8,(%1,%5) \n" |
354 | 0 | "lea (%1,%5,2),%1 \n" |
355 | 0 | "movhpd %%xmm8,(%2,%6) \n" |
356 | 0 | "lea (%2,%6,2),%2 \n" |
357 | 0 | "jg 1b \n" |
358 | 0 | : "+r"(src), // %0 |
359 | 0 | "+r"(dst_a), // %1 |
360 | 0 | "+r"(dst_b), // %2 |
361 | 0 | "+r"(width) // %3 |
362 | 0 | : "r"((intptr_t)(src_stride)), // %4 |
363 | 0 | "r"((intptr_t)(dst_stride_a)), // %5 |
364 | 0 | "r"((intptr_t)(dst_stride_b)) // %6 |
365 | 0 | : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", |
366 | 0 | "xmm7", "xmm8", "xmm9"); |
367 | 0 | } |
368 | | #endif // defined(HAS_TRANSPOSEUVWX8_SSE2) |
369 | | #endif // defined(__x86_64__) || defined(__i386__) |
370 | | |
371 | | #ifdef __cplusplus |
372 | | } // extern "C" |
373 | | } // namespace libyuv |
374 | | #endif |