Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (c) 2007-2008 CSIRO |
2 | | Copyright (c) 2007-2009 Xiph.Org Foundation |
3 | | Written by Jean-Marc Valin */ |
4 | | /* |
5 | | Redistribution and use in source and binary forms, with or without |
6 | | modification, are permitted provided that the following conditions |
7 | | are met: |
8 | | |
9 | | - Redistributions of source code must retain the above copyright |
10 | | notice, this list of conditions and the following disclaimer. |
11 | | |
12 | | - Redistributions in binary form must reproduce the above copyright |
13 | | notice, this list of conditions and the following disclaimer in the |
14 | | documentation and/or other materials provided with the distribution. |
15 | | |
16 | | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
17 | | ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
18 | | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
19 | | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER |
20 | | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
21 | | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
22 | | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
23 | | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
24 | | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
25 | | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
26 | | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | | */ |
28 | | |
29 | | #ifdef HAVE_CONFIG_H |
30 | | #include "config.h" |
31 | | #endif |
32 | | |
33 | | #include "mathops.h" |
34 | | #include "cwrs.h" |
35 | | #include "vq.h" |
36 | | #include "arch.h" |
37 | | #include "os_support.h" |
38 | | #include "bands.h" |
39 | | #include "rate.h" |
40 | | #include "pitch.h" |
41 | | |
42 | | #if defined(MIPSr1_ASM) |
43 | | #include "mips/vq_mipsr1.h" |
44 | | #endif |
45 | | |
46 | | #ifndef OVERRIDE_vq_exp_rotation1 |
47 | | static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s) |
48 | 0 | { |
49 | 0 | int i; |
50 | 0 | opus_val16 ms; |
51 | 0 | celt_norm *Xptr; |
52 | 0 | Xptr = X; |
53 | 0 | ms = NEG16(s); |
54 | 0 | for (i=0;i<len-stride;i++) |
55 | 0 | { |
56 | 0 | celt_norm x1, x2; |
57 | 0 | x1 = Xptr[0]; |
58 | 0 | x2 = Xptr[stride]; |
59 | 0 | Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15)); |
60 | 0 | *Xptr++ = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15)); |
61 | 0 | } |
62 | 0 | Xptr = &X[len-2*stride-1]; |
63 | 0 | for (i=len-2*stride-1;i>=0;i--) |
64 | 0 | { |
65 | 0 | celt_norm x1, x2; |
66 | 0 | x1 = Xptr[0]; |
67 | 0 | x2 = Xptr[stride]; |
68 | 0 | Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15)); |
69 | 0 | *Xptr-- = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15)); |
70 | 0 | } |
71 | 0 | } |
72 | | #endif /* OVERRIDE_vq_exp_rotation1 */ |
73 | | |
74 | | void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread) |
75 | 0 | { |
76 | 0 | static const int SPREAD_FACTOR[3]={15,10,5}; |
77 | 0 | int i; |
78 | 0 | opus_val16 c, s; |
79 | 0 | opus_val16 gain, theta; |
80 | 0 | int stride2=0; |
81 | 0 | int factor; |
82 | |
|
83 | 0 | if (2*K>=len || spread==SPREAD_NONE) |
84 | 0 | return; |
85 | 0 | factor = SPREAD_FACTOR[spread-1]; |
86 | |
|
87 | 0 | gain = celt_div((opus_val32)MULT16_16(Q15_ONE,len),(opus_val32)(len+factor*K)); |
88 | 0 | theta = HALF16(MULT16_16_Q15(gain,gain)); |
89 | |
|
90 | 0 | c = celt_cos_norm(EXTEND32(theta)); |
91 | 0 | s = celt_cos_norm(EXTEND32(SUB16(Q15ONE,theta))); /* sin(theta) */ |
92 | |
|
93 | 0 | if (len>=8*stride) |
94 | 0 | { |
95 | 0 | stride2 = 1; |
96 | | /* This is just a simple (equivalent) way of computing sqrt(len/stride) with rounding. |
97 | | It's basically incrementing long as (stride2+0.5)^2 < len/stride. */ |
98 | 0 | while ((stride2*stride2+stride2)*stride + (stride>>2) < len) |
99 | 0 | stride2++; |
100 | 0 | } |
101 | | /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for |
102 | | extract_collapse_mask().*/ |
103 | 0 | len = celt_udiv(len, stride); |
104 | 0 | for (i=0;i<stride;i++) |
105 | 0 | { |
106 | 0 | if (dir < 0) |
107 | 0 | { |
108 | 0 | if (stride2) |
109 | 0 | exp_rotation1(X+i*len, len, stride2, s, c); |
110 | 0 | exp_rotation1(X+i*len, len, 1, c, s); |
111 | 0 | } else { |
112 | 0 | exp_rotation1(X+i*len, len, 1, c, -s); |
113 | 0 | if (stride2) |
114 | 0 | exp_rotation1(X+i*len, len, stride2, s, -c); |
115 | 0 | } |
116 | 0 | } |
117 | 0 | } |
118 | | |
119 | | /** Takes the pitch vector and the decoded residual vector, computes the gain |
120 | | that will give ||p+g*y||=1 and mixes the residual with the pitch. */ |
121 | | static void normalise_residual(int * OPUS_RESTRICT iy, celt_norm * OPUS_RESTRICT X, |
122 | | int N, opus_val32 Ryy, opus_val16 gain) |
123 | 0 | { |
124 | 0 | int i; |
125 | | #ifdef FIXED_POINT |
126 | | int k; |
127 | | #endif |
128 | 0 | opus_val32 t; |
129 | 0 | opus_val16 g; |
130 | |
|
131 | | #ifdef FIXED_POINT |
132 | | k = celt_ilog2(Ryy)>>1; |
133 | | #endif |
134 | 0 | t = VSHR32(Ryy, 2*(k-7)); |
135 | 0 | g = MULT16_16_P15(celt_rsqrt_norm(t),gain); |
136 | |
|
137 | 0 | i=0; |
138 | 0 | do |
139 | 0 | X[i] = EXTRACT16(PSHR32(MULT16_16(g, iy[i]), k+1)); |
140 | 0 | while (++i < N); |
141 | 0 | } |
142 | | |
143 | | static unsigned extract_collapse_mask(int *iy, int N, int B) |
144 | 0 | { |
145 | 0 | unsigned collapse_mask; |
146 | 0 | int N0; |
147 | 0 | int i; |
148 | 0 | if (B<=1) |
149 | 0 | return 1; |
150 | | /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for |
151 | | exp_rotation().*/ |
152 | 0 | N0 = celt_udiv(N, B); |
153 | 0 | collapse_mask = 0; |
154 | 0 | i=0; do { |
155 | 0 | int j; |
156 | 0 | unsigned tmp=0; |
157 | 0 | j=0; do { |
158 | 0 | tmp |= iy[i*N0+j]; |
159 | 0 | } while (++j<N0); |
160 | 0 | collapse_mask |= (tmp!=0)<<i; |
161 | 0 | } while (++i<B); |
162 | 0 | return collapse_mask; |
163 | 0 | } |
164 | | |
165 | | opus_val16 op_pvq_search_c(celt_norm *X, int *iy, int K, int N, int arch) |
166 | 0 | { |
167 | 0 | VARDECL(celt_norm, y); |
168 | 0 | VARDECL(int, signx); |
169 | 0 | int i, j; |
170 | 0 | int pulsesLeft; |
171 | 0 | opus_val32 sum; |
172 | 0 | opus_val32 xy; |
173 | 0 | opus_val16 yy; |
174 | 0 | SAVE_STACK; |
175 | |
|
176 | 0 | (void)arch; |
177 | 0 | ALLOC(y, N, celt_norm); |
178 | 0 | ALLOC(signx, N, int); |
179 | | |
180 | | /* Get rid of the sign */ |
181 | 0 | sum = 0; |
182 | 0 | j=0; do { |
183 | 0 | signx[j] = X[j]<0; |
184 | | /* OPT: Make sure the compiler doesn't use a branch on ABS16(). */ |
185 | 0 | X[j] = ABS16(X[j]); |
186 | 0 | iy[j] = 0; |
187 | 0 | y[j] = 0; |
188 | 0 | } while (++j<N); |
189 | |
|
190 | 0 | xy = yy = 0; |
191 | |
|
192 | 0 | pulsesLeft = K; |
193 | | |
194 | | /* Do a pre-search by projecting on the pyramid */ |
195 | 0 | if (K > (N>>1)) |
196 | 0 | { |
197 | 0 | opus_val16 rcp; |
198 | 0 | j=0; do { |
199 | 0 | sum += X[j]; |
200 | 0 | } while (++j<N); |
201 | | |
202 | | /* If X is too small, just replace it with a pulse at 0 */ |
203 | | #ifdef FIXED_POINT |
204 | | if (sum <= K) |
205 | | #else |
206 | | /* Prevents infinities and NaNs from causing too many pulses |
207 | | to be allocated. 64 is an approximation of infinity here. */ |
208 | 0 | if (!(sum > EPSILON && sum < 64)) |
209 | 0 | #endif |
210 | 0 | { |
211 | 0 | X[0] = QCONST16(1.f,14); |
212 | 0 | j=1; do |
213 | 0 | X[j]=0; |
214 | 0 | while (++j<N); |
215 | 0 | sum = QCONST16(1.f,14); |
216 | 0 | } |
217 | | #ifdef FIXED_POINT |
218 | | rcp = EXTRACT16(MULT16_32_Q16(K, celt_rcp(sum))); |
219 | | #else |
220 | | /* Using K+e with e < 1 guarantees we cannot get more than K pulses. */ |
221 | 0 | rcp = EXTRACT16(MULT16_32_Q16(K+0.8f, celt_rcp(sum))); |
222 | 0 | #endif |
223 | 0 | j=0; do { |
224 | | #ifdef FIXED_POINT |
225 | | /* It's really important to round *towards zero* here */ |
226 | | iy[j] = MULT16_16_Q15(X[j],rcp); |
227 | | #else |
228 | 0 | iy[j] = (int)floor(rcp*X[j]); |
229 | 0 | #endif |
230 | 0 | y[j] = (celt_norm)iy[j]; |
231 | 0 | yy = MAC16_16(yy, y[j],y[j]); |
232 | 0 | xy = MAC16_16(xy, X[j],y[j]); |
233 | 0 | y[j] *= 2; |
234 | 0 | pulsesLeft -= iy[j]; |
235 | 0 | } while (++j<N); |
236 | 0 | } |
237 | 0 | celt_sig_assert(pulsesLeft>=0); |
238 | | |
239 | | /* This should never happen, but just in case it does (e.g. on silence) |
240 | | we fill the first bin with pulses. */ |
241 | | #ifdef FIXED_POINT_DEBUG |
242 | | celt_sig_assert(pulsesLeft<=N+3); |
243 | | #endif |
244 | 0 | if (pulsesLeft > N+3) |
245 | 0 | { |
246 | 0 | opus_val16 tmp = (opus_val16)pulsesLeft; |
247 | 0 | yy = MAC16_16(yy, tmp, tmp); |
248 | 0 | yy = MAC16_16(yy, tmp, y[0]); |
249 | 0 | iy[0] += pulsesLeft; |
250 | 0 | pulsesLeft=0; |
251 | 0 | } |
252 | |
|
253 | 0 | for (i=0;i<pulsesLeft;i++) |
254 | 0 | { |
255 | 0 | opus_val16 Rxy, Ryy; |
256 | 0 | int best_id; |
257 | 0 | opus_val32 best_num; |
258 | 0 | opus_val16 best_den; |
259 | | #ifdef FIXED_POINT |
260 | | int rshift; |
261 | | #endif |
262 | | #ifdef FIXED_POINT |
263 | | rshift = 1+celt_ilog2(K-pulsesLeft+i+1); |
264 | | #endif |
265 | 0 | best_id = 0; |
266 | | /* The squared magnitude term gets added anyway, so we might as well |
267 | | add it outside the loop */ |
268 | 0 | yy = ADD16(yy, 1); |
269 | | |
270 | | /* Calculations for position 0 are out of the loop, in part to reduce |
271 | | mispredicted branches (since the if condition is usually false) |
272 | | in the loop. */ |
273 | | /* Temporary sums of the new pulse(s) */ |
274 | 0 | Rxy = EXTRACT16(SHR32(ADD32(xy, EXTEND32(X[0])),rshift)); |
275 | | /* We're multiplying y[j] by two so we don't have to do it here */ |
276 | 0 | Ryy = ADD16(yy, y[0]); |
277 | | |
278 | | /* Approximate score: we maximise Rxy/sqrt(Ryy) (we're guaranteed that |
279 | | Rxy is positive because the sign is pre-computed) */ |
280 | 0 | Rxy = MULT16_16_Q15(Rxy,Rxy); |
281 | 0 | best_den = Ryy; |
282 | 0 | best_num = Rxy; |
283 | 0 | j=1; |
284 | 0 | do { |
285 | | /* Temporary sums of the new pulse(s) */ |
286 | 0 | Rxy = EXTRACT16(SHR32(ADD32(xy, EXTEND32(X[j])),rshift)); |
287 | | /* We're multiplying y[j] by two so we don't have to do it here */ |
288 | 0 | Ryy = ADD16(yy, y[j]); |
289 | | |
290 | | /* Approximate score: we maximise Rxy/sqrt(Ryy) (we're guaranteed that |
291 | | Rxy is positive because the sign is pre-computed) */ |
292 | 0 | Rxy = MULT16_16_Q15(Rxy,Rxy); |
293 | | /* The idea is to check for num/den >= best_num/best_den, but that way |
294 | | we can do it without any division */ |
295 | | /* OPT: It's not clear whether a cmov is faster than a branch here |
296 | | since the condition is more often false than true and using |
297 | | a cmov introduces data dependencies across iterations. The optimal |
298 | | choice may be architecture-dependent. */ |
299 | 0 | if (opus_unlikely(MULT16_16(best_den, Rxy) > MULT16_16(Ryy, best_num))) |
300 | 0 | { |
301 | 0 | best_den = Ryy; |
302 | 0 | best_num = Rxy; |
303 | 0 | best_id = j; |
304 | 0 | } |
305 | 0 | } while (++j<N); |
306 | | |
307 | | /* Updating the sums of the new pulse(s) */ |
308 | 0 | xy = ADD32(xy, EXTEND32(X[best_id])); |
309 | | /* We're multiplying y[j] by two so we don't have to do it here */ |
310 | 0 | yy = ADD16(yy, y[best_id]); |
311 | | |
312 | | /* Only now that we've made the final choice, update y/iy */ |
313 | | /* Multiplying y[j] by 2 so we don't have to do it everywhere else */ |
314 | 0 | y[best_id] += 2; |
315 | 0 | iy[best_id]++; |
316 | 0 | } |
317 | | |
318 | | /* Put the original sign back */ |
319 | 0 | j=0; |
320 | 0 | do { |
321 | | /*iy[j] = signx[j] ? -iy[j] : iy[j];*/ |
322 | | /* OPT: The is more likely to be compiled without a branch than the code above |
323 | | but has the same performance otherwise. */ |
324 | 0 | iy[j] = (iy[j]^-signx[j]) + signx[j]; |
325 | 0 | } while (++j<N); |
326 | 0 | RESTORE_STACK; |
327 | 0 | return yy; |
328 | 0 | } |
329 | | |
330 | | unsigned alg_quant(celt_norm *X, int N, int K, int spread, int B, ec_enc *enc, |
331 | | opus_val16 gain, int resynth, int arch) |
332 | 0 | { |
333 | 0 | VARDECL(int, iy); |
334 | 0 | opus_val16 yy; |
335 | 0 | unsigned collapse_mask; |
336 | 0 | SAVE_STACK; |
337 | |
|
338 | 0 | celt_assert2(K>0, "alg_quant() needs at least one pulse"); |
339 | 0 | celt_assert2(N>1, "alg_quant() needs at least two dimensions"); |
340 | | |
341 | | /* Covers vectorization by up to 4. */ |
342 | 0 | ALLOC(iy, N+3, int); |
343 | |
|
344 | 0 | exp_rotation(X, N, 1, B, K, spread); |
345 | |
|
346 | 0 | yy = op_pvq_search(X, iy, K, N, arch); |
347 | |
|
348 | 0 | encode_pulses(iy, N, K, enc); |
349 | |
|
350 | 0 | if (resynth) |
351 | 0 | { |
352 | 0 | normalise_residual(iy, X, N, yy, gain); |
353 | 0 | exp_rotation(X, N, -1, B, K, spread); |
354 | 0 | } |
355 | |
|
356 | 0 | collapse_mask = extract_collapse_mask(iy, N, B); |
357 | 0 | RESTORE_STACK; |
358 | 0 | return collapse_mask; |
359 | 0 | } |
360 | | |
361 | | /** Decode pulse vector and combine the result with the pitch vector to produce |
362 | | the final normalised signal in the current band. */ |
363 | | unsigned alg_unquant(celt_norm *X, int N, int K, int spread, int B, |
364 | | ec_dec *dec, opus_val16 gain) |
365 | 0 | { |
366 | 0 | opus_val32 Ryy; |
367 | 0 | unsigned collapse_mask; |
368 | 0 | VARDECL(int, iy); |
369 | 0 | SAVE_STACK; |
370 | |
|
371 | 0 | celt_assert2(K>0, "alg_unquant() needs at least one pulse"); |
372 | 0 | celt_assert2(N>1, "alg_unquant() needs at least two dimensions"); |
373 | 0 | ALLOC(iy, N, int); |
374 | 0 | Ryy = decode_pulses(iy, N, K, dec); |
375 | 0 | normalise_residual(iy, X, N, Ryy, gain); |
376 | 0 | exp_rotation(X, N, -1, B, K, spread); |
377 | 0 | collapse_mask = extract_collapse_mask(iy, N, B); |
378 | 0 | RESTORE_STACK; |
379 | 0 | return collapse_mask; |
380 | 0 | } |
381 | | |
382 | | #ifndef OVERRIDE_renormalise_vector |
383 | | void renormalise_vector(celt_norm *X, int N, opus_val16 gain, int arch) |
384 | 0 | { |
385 | 0 | int i; |
386 | | #ifdef FIXED_POINT |
387 | | int k; |
388 | | #endif |
389 | 0 | opus_val32 E; |
390 | 0 | opus_val16 g; |
391 | 0 | opus_val32 t; |
392 | 0 | celt_norm *xptr; |
393 | 0 | E = EPSILON + celt_inner_prod(X, X, N, arch); |
394 | | #ifdef FIXED_POINT |
395 | | k = celt_ilog2(E)>>1; |
396 | | #endif |
397 | 0 | t = VSHR32(E, 2*(k-7)); |
398 | 0 | g = MULT16_16_P15(celt_rsqrt_norm(t),gain); |
399 | |
|
400 | 0 | xptr = X; |
401 | 0 | for (i=0;i<N;i++) |
402 | 0 | { |
403 | 0 | *xptr = EXTRACT16(PSHR32(MULT16_16(g, *xptr), k+1)); |
404 | 0 | xptr++; |
405 | 0 | } |
406 | | /*return celt_sqrt(E);*/ |
407 | 0 | } |
408 | | #endif /* OVERRIDE_renormalise_vector */ |
409 | | |
410 | | int stereo_itheta(const celt_norm *X, const celt_norm *Y, int stereo, int N, int arch) |
411 | 0 | { |
412 | 0 | int i; |
413 | 0 | int itheta; |
414 | 0 | opus_val16 mid, side; |
415 | 0 | opus_val32 Emid, Eside; |
416 | |
|
417 | 0 | Emid = Eside = EPSILON; |
418 | 0 | if (stereo) |
419 | 0 | { |
420 | 0 | for (i=0;i<N;i++) |
421 | 0 | { |
422 | 0 | celt_norm m, s; |
423 | 0 | m = ADD16(SHR16(X[i],1),SHR16(Y[i],1)); |
424 | 0 | s = SUB16(SHR16(X[i],1),SHR16(Y[i],1)); |
425 | 0 | Emid = MAC16_16(Emid, m, m); |
426 | 0 | Eside = MAC16_16(Eside, s, s); |
427 | 0 | } |
428 | 0 | } else { |
429 | 0 | Emid += celt_inner_prod(X, X, N, arch); |
430 | 0 | Eside += celt_inner_prod(Y, Y, N, arch); |
431 | 0 | } |
432 | 0 | mid = celt_sqrt(Emid); |
433 | 0 | side = celt_sqrt(Eside); |
434 | | #ifdef FIXED_POINT |
435 | | /* 0.63662 = 2/pi */ |
436 | | itheta = MULT16_16_Q15(QCONST16(0.63662f,15),celt_atan2p(side, mid)); |
437 | | #else |
438 | 0 | itheta = (int)floor(.5f+16384*0.63662f*fast_atan2f(side,mid)); |
439 | 0 | #endif |
440 | |
|
441 | 0 | return itheta; |
442 | 0 | } |