/src/mozilla-central/media/libopus/celt/vq.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (c) 2007-2008 CSIRO |
2 | | Copyright (c) 2007-2009 Xiph.Org Foundation |
3 | | Written by Jean-Marc Valin */ |
4 | | /* |
5 | | Redistribution and use in source and binary forms, with or without |
6 | | modification, are permitted provided that the following conditions |
7 | | are met: |
8 | | |
9 | | - Redistributions of source code must retain the above copyright |
10 | | notice, this list of conditions and the following disclaimer. |
11 | | |
12 | | - Redistributions in binary form must reproduce the above copyright |
13 | | notice, this list of conditions and the following disclaimer in the |
14 | | documentation and/or other materials provided with the distribution. |
15 | | |
16 | | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
17 | | ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
18 | | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
19 | | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER |
20 | | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
21 | | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
22 | | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
23 | | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
24 | | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
25 | | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
26 | | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | | */ |
28 | | |
29 | | #ifdef HAVE_CONFIG_H |
30 | | #include "config.h" |
31 | | #endif |
32 | | |
33 | | #include "mathops.h" |
34 | | #include "cwrs.h" |
35 | | #include "vq.h" |
36 | | #include "arch.h" |
37 | | #include "os_support.h" |
38 | | #include "bands.h" |
39 | | #include "rate.h" |
40 | | #include "pitch.h" |
41 | | |
42 | | #ifndef OVERRIDE_vq_exp_rotation1 |
43 | | static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s) |
44 | 0 | { |
45 | 0 | int i; |
46 | 0 | opus_val16 ms; |
47 | 0 | celt_norm *Xptr; |
48 | 0 | Xptr = X; |
49 | 0 | ms = NEG16(s); |
50 | 0 | for (i=0;i<len-stride;i++) |
51 | 0 | { |
52 | 0 | celt_norm x1, x2; |
53 | 0 | x1 = Xptr[0]; |
54 | 0 | x2 = Xptr[stride]; |
55 | 0 | Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15)); |
56 | 0 | *Xptr++ = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15)); |
57 | 0 | } |
58 | 0 | Xptr = &X[len-2*stride-1]; |
59 | 0 | for (i=len-2*stride-1;i>=0;i--) |
60 | 0 | { |
61 | 0 | celt_norm x1, x2; |
62 | 0 | x1 = Xptr[0]; |
63 | 0 | x2 = Xptr[stride]; |
64 | 0 | Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15)); |
65 | 0 | *Xptr-- = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15)); |
66 | 0 | } |
67 | 0 | } |
68 | | #endif /* OVERRIDE_vq_exp_rotation1 */ |
69 | | |
70 | | void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread) |
71 | 0 | { |
72 | 0 | static const int SPREAD_FACTOR[3]={15,10,5}; |
73 | 0 | int i; |
74 | 0 | opus_val16 c, s; |
75 | 0 | opus_val16 gain, theta; |
76 | 0 | int stride2=0; |
77 | 0 | int factor; |
78 | 0 |
|
79 | 0 | if (2*K>=len || spread==SPREAD_NONE) |
80 | 0 | return; |
81 | 0 | factor = SPREAD_FACTOR[spread-1]; |
82 | 0 |
|
83 | 0 | gain = celt_div((opus_val32)MULT16_16(Q15_ONE,len),(opus_val32)(len+factor*K)); |
84 | 0 | theta = HALF16(MULT16_16_Q15(gain,gain)); |
85 | 0 |
|
86 | 0 | c = celt_cos_norm(EXTEND32(theta)); |
87 | 0 | s = celt_cos_norm(EXTEND32(SUB16(Q15ONE,theta))); /* sin(theta) */ |
88 | 0 |
|
89 | 0 | if (len>=8*stride) |
90 | 0 | { |
91 | 0 | stride2 = 1; |
92 | 0 | /* This is just a simple (equivalent) way of computing sqrt(len/stride) with rounding. |
93 | 0 | It's basically incrementing long as (stride2+0.5)^2 < len/stride. */ |
94 | 0 | while ((stride2*stride2+stride2)*stride + (stride>>2) < len) |
95 | 0 | stride2++; |
96 | 0 | } |
97 | 0 | /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for |
98 | 0 | extract_collapse_mask().*/ |
99 | 0 | len = celt_udiv(len, stride); |
100 | 0 | for (i=0;i<stride;i++) |
101 | 0 | { |
102 | 0 | if (dir < 0) |
103 | 0 | { |
104 | 0 | if (stride2) |
105 | 0 | exp_rotation1(X+i*len, len, stride2, s, c); |
106 | 0 | exp_rotation1(X+i*len, len, 1, c, s); |
107 | 0 | } else { |
108 | 0 | exp_rotation1(X+i*len, len, 1, c, -s); |
109 | 0 | if (stride2) |
110 | 0 | exp_rotation1(X+i*len, len, stride2, s, -c); |
111 | 0 | } |
112 | 0 | } |
113 | 0 | } |
114 | | |
115 | | /** Takes the pitch vector and the decoded residual vector, computes the gain |
116 | | that will give ||p+g*y||=1 and mixes the residual with the pitch. */ |
117 | | static void normalise_residual(int * OPUS_RESTRICT iy, celt_norm * OPUS_RESTRICT X, |
118 | | int N, opus_val32 Ryy, opus_val16 gain) |
119 | 0 | { |
120 | 0 | int i; |
121 | | #ifdef FIXED_POINT |
122 | | int k; |
123 | | #endif |
124 | | opus_val32 t; |
125 | 0 | opus_val16 g; |
126 | 0 |
|
127 | | #ifdef FIXED_POINT |
128 | | k = celt_ilog2(Ryy)>>1; |
129 | | #endif |
130 | 0 | t = VSHR32(Ryy, 2*(k-7)); |
131 | 0 | g = MULT16_16_P15(celt_rsqrt_norm(t),gain); |
132 | 0 |
|
133 | 0 | i=0; |
134 | 0 | do |
135 | 0 | X[i] = EXTRACT16(PSHR32(MULT16_16(g, iy[i]), k+1)); |
136 | 0 | while (++i < N); |
137 | 0 | } |
138 | | |
139 | | static unsigned extract_collapse_mask(int *iy, int N, int B) |
140 | 0 | { |
141 | 0 | unsigned collapse_mask; |
142 | 0 | int N0; |
143 | 0 | int i; |
144 | 0 | if (B<=1) |
145 | 0 | return 1; |
146 | 0 | /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for |
147 | 0 | exp_rotation().*/ |
148 | 0 | N0 = celt_udiv(N, B); |
149 | 0 | collapse_mask = 0; |
150 | 0 | i=0; do { |
151 | 0 | int j; |
152 | 0 | unsigned tmp=0; |
153 | 0 | j=0; do { |
154 | 0 | tmp |= iy[i*N0+j]; |
155 | 0 | } while (++j<N0); |
156 | 0 | collapse_mask |= (tmp!=0)<<i; |
157 | 0 | } while (++i<B); |
158 | 0 | return collapse_mask; |
159 | 0 | } |
160 | | |
161 | | opus_val16 op_pvq_search_c(celt_norm *X, int *iy, int K, int N, int arch) |
162 | 0 | { |
163 | 0 | VARDECL(celt_norm, y); |
164 | 0 | VARDECL(int, signx); |
165 | 0 | int i, j; |
166 | 0 | int pulsesLeft; |
167 | 0 | opus_val32 sum; |
168 | 0 | opus_val32 xy; |
169 | 0 | opus_val16 yy; |
170 | 0 | SAVE_STACK; |
171 | 0 |
|
172 | 0 | (void)arch; |
173 | 0 | ALLOC(y, N, celt_norm); |
174 | 0 | ALLOC(signx, N, int); |
175 | 0 |
|
176 | 0 | /* Get rid of the sign */ |
177 | 0 | sum = 0; |
178 | 0 | j=0; do { |
179 | 0 | signx[j] = X[j]<0; |
180 | 0 | /* OPT: Make sure the compiler doesn't use a branch on ABS16(). */ |
181 | 0 | X[j] = ABS16(X[j]); |
182 | 0 | iy[j] = 0; |
183 | 0 | y[j] = 0; |
184 | 0 | } while (++j<N); |
185 | 0 |
|
186 | 0 | xy = yy = 0; |
187 | 0 |
|
188 | 0 | pulsesLeft = K; |
189 | 0 |
|
190 | 0 | /* Do a pre-search by projecting on the pyramid */ |
191 | 0 | if (K > (N>>1)) |
192 | 0 | { |
193 | 0 | opus_val16 rcp; |
194 | 0 | j=0; do { |
195 | 0 | sum += X[j]; |
196 | 0 | } while (++j<N); |
197 | 0 |
|
198 | 0 | /* If X is too small, just replace it with a pulse at 0 */ |
199 | | #ifdef FIXED_POINT |
200 | | if (sum <= K) |
201 | | #else |
202 | | /* Prevents infinities and NaNs from causing too many pulses |
203 | 0 | to be allocated. 64 is an approximation of infinity here. */ |
204 | 0 | if (!(sum > EPSILON && sum < 64)) |
205 | 0 | #endif |
206 | 0 | { |
207 | 0 | X[0] = QCONST16(1.f,14); |
208 | 0 | j=1; do |
209 | 0 | X[j]=0; |
210 | 0 | while (++j<N); |
211 | 0 | sum = QCONST16(1.f,14); |
212 | 0 | } |
213 | | #ifdef FIXED_POINT |
214 | | rcp = EXTRACT16(MULT16_32_Q16(K, celt_rcp(sum))); |
215 | | #else |
216 | | /* Using K+e with e < 1 guarantees we cannot get more than K pulses. */ |
217 | 0 | rcp = EXTRACT16(MULT16_32_Q16(K+0.8f, celt_rcp(sum))); |
218 | 0 | #endif |
219 | 0 | j=0; do { |
220 | | #ifdef FIXED_POINT |
221 | | /* It's really important to round *towards zero* here */ |
222 | | iy[j] = MULT16_16_Q15(X[j],rcp); |
223 | | #else |
224 | | iy[j] = (int)floor(rcp*X[j]); |
225 | 0 | #endif |
226 | 0 | y[j] = (celt_norm)iy[j]; |
227 | 0 | yy = MAC16_16(yy, y[j],y[j]); |
228 | 0 | xy = MAC16_16(xy, X[j],y[j]); |
229 | 0 | y[j] *= 2; |
230 | 0 | pulsesLeft -= iy[j]; |
231 | 0 | } while (++j<N); |
232 | 0 | } |
233 | 0 | celt_sig_assert(pulsesLeft>=0); |
234 | 0 |
|
235 | 0 | /* This should never happen, but just in case it does (e.g. on silence) |
236 | 0 | we fill the first bin with pulses. */ |
237 | | #ifdef FIXED_POINT_DEBUG |
238 | | celt_sig_assert(pulsesLeft<=N+3); |
239 | | #endif |
240 | 0 | if (pulsesLeft > N+3) |
241 | 0 | { |
242 | 0 | opus_val16 tmp = (opus_val16)pulsesLeft; |
243 | 0 | yy = MAC16_16(yy, tmp, tmp); |
244 | 0 | yy = MAC16_16(yy, tmp, y[0]); |
245 | 0 | iy[0] += pulsesLeft; |
246 | 0 | pulsesLeft=0; |
247 | 0 | } |
248 | 0 |
|
249 | 0 | for (i=0;i<pulsesLeft;i++) |
250 | 0 | { |
251 | 0 | opus_val16 Rxy, Ryy; |
252 | 0 | int best_id; |
253 | 0 | opus_val32 best_num; |
254 | 0 | opus_val16 best_den; |
255 | | #ifdef FIXED_POINT |
256 | | int rshift; |
257 | | #endif |
258 | | #ifdef FIXED_POINT |
259 | | rshift = 1+celt_ilog2(K-pulsesLeft+i+1); |
260 | | #endif |
261 | | best_id = 0; |
262 | 0 | /* The squared magnitude term gets added anyway, so we might as well |
263 | 0 | add it outside the loop */ |
264 | 0 | yy = ADD16(yy, 1); |
265 | 0 |
|
266 | 0 | /* Calculations for position 0 are out of the loop, in part to reduce |
267 | 0 | mispredicted branches (since the if condition is usually false) |
268 | 0 | in the loop. */ |
269 | 0 | /* Temporary sums of the new pulse(s) */ |
270 | 0 | Rxy = EXTRACT16(SHR32(ADD32(xy, EXTEND32(X[0])),rshift)); |
271 | 0 | /* We're multiplying y[j] by two so we don't have to do it here */ |
272 | 0 | Ryy = ADD16(yy, y[0]); |
273 | 0 |
|
274 | 0 | /* Approximate score: we maximise Rxy/sqrt(Ryy) (we're guaranteed that |
275 | 0 | Rxy is positive because the sign is pre-computed) */ |
276 | 0 | Rxy = MULT16_16_Q15(Rxy,Rxy); |
277 | 0 | best_den = Ryy; |
278 | 0 | best_num = Rxy; |
279 | 0 | j=1; |
280 | 0 | do { |
281 | 0 | /* Temporary sums of the new pulse(s) */ |
282 | 0 | Rxy = EXTRACT16(SHR32(ADD32(xy, EXTEND32(X[j])),rshift)); |
283 | 0 | /* We're multiplying y[j] by two so we don't have to do it here */ |
284 | 0 | Ryy = ADD16(yy, y[j]); |
285 | 0 |
|
286 | 0 | /* Approximate score: we maximise Rxy/sqrt(Ryy) (we're guaranteed that |
287 | 0 | Rxy is positive because the sign is pre-computed) */ |
288 | 0 | Rxy = MULT16_16_Q15(Rxy,Rxy); |
289 | 0 | /* The idea is to check for num/den >= best_num/best_den, but that way |
290 | 0 | we can do it without any division */ |
291 | 0 | /* OPT: It's not clear whether a cmov is faster than a branch here |
292 | 0 | since the condition is more often false than true and using |
293 | 0 | a cmov introduces data dependencies across iterations. The optimal |
294 | 0 | choice may be architecture-dependent. */ |
295 | 0 | if (opus_unlikely(MULT16_16(best_den, Rxy) > MULT16_16(Ryy, best_num))) |
296 | 0 | { |
297 | 0 | best_den = Ryy; |
298 | 0 | best_num = Rxy; |
299 | 0 | best_id = j; |
300 | 0 | } |
301 | 0 | } while (++j<N); |
302 | 0 |
|
303 | 0 | /* Updating the sums of the new pulse(s) */ |
304 | 0 | xy = ADD32(xy, EXTEND32(X[best_id])); |
305 | 0 | /* We're multiplying y[j] by two so we don't have to do it here */ |
306 | 0 | yy = ADD16(yy, y[best_id]); |
307 | 0 |
|
308 | 0 | /* Only now that we've made the final choice, update y/iy */ |
309 | 0 | /* Multiplying y[j] by 2 so we don't have to do it everywhere else */ |
310 | 0 | y[best_id] += 2; |
311 | 0 | iy[best_id]++; |
312 | 0 | } |
313 | 0 |
|
314 | 0 | /* Put the original sign back */ |
315 | 0 | j=0; |
316 | 0 | do { |
317 | 0 | /*iy[j] = signx[j] ? -iy[j] : iy[j];*/ |
318 | 0 | /* OPT: The is more likely to be compiled without a branch than the code above |
319 | 0 | but has the same performance otherwise. */ |
320 | 0 | iy[j] = (iy[j]^-signx[j]) + signx[j]; |
321 | 0 | } while (++j<N); |
322 | 0 | RESTORE_STACK; |
323 | 0 | return yy; |
324 | 0 | } |
325 | | |
326 | | unsigned alg_quant(celt_norm *X, int N, int K, int spread, int B, ec_enc *enc, |
327 | | opus_val16 gain, int resynth, int arch) |
328 | 0 | { |
329 | 0 | VARDECL(int, iy); |
330 | 0 | opus_val16 yy; |
331 | 0 | unsigned collapse_mask; |
332 | 0 | SAVE_STACK; |
333 | 0 |
|
334 | 0 | celt_assert2(K>0, "alg_quant() needs at least one pulse"); |
335 | 0 | celt_assert2(N>1, "alg_quant() needs at least two dimensions"); |
336 | 0 |
|
337 | 0 | /* Covers vectorization by up to 4. */ |
338 | 0 | ALLOC(iy, N+3, int); |
339 | 0 |
|
340 | 0 | exp_rotation(X, N, 1, B, K, spread); |
341 | 0 |
|
342 | 0 | yy = op_pvq_search(X, iy, K, N, arch); |
343 | 0 |
|
344 | 0 | encode_pulses(iy, N, K, enc); |
345 | 0 |
|
346 | 0 | if (resynth) |
347 | 0 | { |
348 | 0 | normalise_residual(iy, X, N, yy, gain); |
349 | 0 | exp_rotation(X, N, -1, B, K, spread); |
350 | 0 | } |
351 | 0 |
|
352 | 0 | collapse_mask = extract_collapse_mask(iy, N, B); |
353 | 0 | RESTORE_STACK; |
354 | 0 | return collapse_mask; |
355 | 0 | } |
356 | | |
357 | | /** Decode pulse vector and combine the result with the pitch vector to produce |
358 | | the final normalised signal in the current band. */ |
359 | | unsigned alg_unquant(celt_norm *X, int N, int K, int spread, int B, |
360 | | ec_dec *dec, opus_val16 gain) |
361 | 0 | { |
362 | 0 | opus_val32 Ryy; |
363 | 0 | unsigned collapse_mask; |
364 | 0 | VARDECL(int, iy); |
365 | 0 | SAVE_STACK; |
366 | 0 |
|
367 | 0 | celt_assert2(K>0, "alg_unquant() needs at least one pulse"); |
368 | 0 | celt_assert2(N>1, "alg_unquant() needs at least two dimensions"); |
369 | 0 | ALLOC(iy, N, int); |
370 | 0 | Ryy = decode_pulses(iy, N, K, dec); |
371 | 0 | normalise_residual(iy, X, N, Ryy, gain); |
372 | 0 | exp_rotation(X, N, -1, B, K, spread); |
373 | 0 | collapse_mask = extract_collapse_mask(iy, N, B); |
374 | 0 | RESTORE_STACK; |
375 | 0 | return collapse_mask; |
376 | 0 | } |
377 | | |
378 | | #ifndef OVERRIDE_renormalise_vector |
379 | | void renormalise_vector(celt_norm *X, int N, opus_val16 gain, int arch) |
380 | 0 | { |
381 | 0 | int i; |
382 | | #ifdef FIXED_POINT |
383 | | int k; |
384 | | #endif |
385 | | opus_val32 E; |
386 | 0 | opus_val16 g; |
387 | 0 | opus_val32 t; |
388 | 0 | celt_norm *xptr; |
389 | 0 | E = EPSILON + celt_inner_prod(X, X, N, arch); |
390 | | #ifdef FIXED_POINT |
391 | | k = celt_ilog2(E)>>1; |
392 | | #endif |
393 | 0 | t = VSHR32(E, 2*(k-7)); |
394 | 0 | g = MULT16_16_P15(celt_rsqrt_norm(t),gain); |
395 | 0 |
|
396 | 0 | xptr = X; |
397 | 0 | for (i=0;i<N;i++) |
398 | 0 | { |
399 | 0 | *xptr = EXTRACT16(PSHR32(MULT16_16(g, *xptr), k+1)); |
400 | 0 | xptr++; |
401 | 0 | } |
402 | 0 | /*return celt_sqrt(E);*/ |
403 | 0 | } |
404 | | #endif /* OVERRIDE_renormalise_vector */ |
405 | | |
406 | | int stereo_itheta(const celt_norm *X, const celt_norm *Y, int stereo, int N, int arch) |
407 | 0 | { |
408 | 0 | int i; |
409 | 0 | int itheta; |
410 | 0 | opus_val16 mid, side; |
411 | 0 | opus_val32 Emid, Eside; |
412 | 0 |
|
413 | 0 | Emid = Eside = EPSILON; |
414 | 0 | if (stereo) |
415 | 0 | { |
416 | 0 | for (i=0;i<N;i++) |
417 | 0 | { |
418 | 0 | celt_norm m, s; |
419 | 0 | m = ADD16(SHR16(X[i],1),SHR16(Y[i],1)); |
420 | 0 | s = SUB16(SHR16(X[i],1),SHR16(Y[i],1)); |
421 | 0 | Emid = MAC16_16(Emid, m, m); |
422 | 0 | Eside = MAC16_16(Eside, s, s); |
423 | 0 | } |
424 | 0 | } else { |
425 | 0 | Emid += celt_inner_prod(X, X, N, arch); |
426 | 0 | Eside += celt_inner_prod(Y, Y, N, arch); |
427 | 0 | } |
428 | 0 | mid = celt_sqrt(Emid); |
429 | 0 | side = celt_sqrt(Eside); |
430 | | #ifdef FIXED_POINT |
431 | | /* 0.63662 = 2/pi */ |
432 | | itheta = MULT16_16_Q15(QCONST16(0.63662f,15),celt_atan2p(side, mid)); |
433 | | #else |
434 | | itheta = (int)floor(.5f+16384*0.63662f*fast_atan2f(side,mid)); |
435 | 0 | #endif |
436 | 0 |
|
437 | 0 | return itheta; |
438 | 0 | } |