/src/ffmpeg/libavcodec/atrac3.c
Line | Count | Source |
1 | | /* |
2 | | * ATRAC3 compatible decoder |
3 | | * Copyright (c) 2006-2008 Maxim Poliakovski |
4 | | * Copyright (c) 2006-2008 Benjamin Larsson |
5 | | * |
6 | | * This file is part of FFmpeg. |
7 | | * |
8 | | * FFmpeg is free software; you can redistribute it and/or |
9 | | * modify it under the terms of the GNU Lesser General Public |
10 | | * License as published by the Free Software Foundation; either |
11 | | * version 2.1 of the License, or (at your option) any later version. |
12 | | * |
13 | | * FFmpeg is distributed in the hope that it will be useful, |
14 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | | * Lesser General Public License for more details. |
17 | | * |
18 | | * You should have received a copy of the GNU Lesser General Public |
19 | | * License along with FFmpeg; if not, write to the Free Software |
20 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
21 | | */ |
22 | | |
23 | | /** |
24 | | * @file |
25 | | * ATRAC3 compatible decoder. |
26 | | * This decoder handles Sony's ATRAC3 data. |
27 | | * |
28 | | * Container formats used to store ATRAC3 data: |
29 | | * RealMedia (.rm), RIFF WAV (.wav, .at3), Sony OpenMG (.oma, .aa3). |
30 | | * |
31 | | * To use this decoder, a calling application must supply the extradata |
32 | | * bytes provided in the containers above. |
33 | | */ |
34 | | |
35 | | #include <math.h> |
36 | | #include <stddef.h> |
37 | | |
38 | | #include "libavutil/attributes.h" |
39 | | #include "libavutil/float_dsp.h" |
40 | | #include "libavutil/libm.h" |
41 | | #include "libavutil/mem.h" |
42 | | #include "libavutil/mem_internal.h" |
43 | | #include "libavutil/thread.h" |
44 | | #include "libavutil/tx.h" |
45 | | |
46 | | #include "avcodec.h" |
47 | | #include "bytestream.h" |
48 | | #include "codec_internal.h" |
49 | | #include "decode.h" |
50 | | #include "get_bits.h" |
51 | | |
52 | | #include "atrac.h" |
53 | | #include "atrac3data.h" |
54 | | |
55 | 3.46k | #define MIN_CHANNELS 1 |
56 | 1.73k | #define MAX_CHANNELS 8 |
57 | 6.79k | #define MAX_JS_PAIRS 8 / 2 |
58 | | |
59 | 2.14M | #define JOINT_STEREO 0x12 |
60 | 2.30k | #define SINGLE 0x2 |
61 | | |
62 | 2.06M | #define SAMPLES_PER_FRAME 1024 |
63 | 952k | #define MDCT_SIZE 512 |
64 | | |
65 | 5.09M | #define ATRAC3_VLC_BITS 8 |
66 | | |
67 | | typedef struct GainBlock { |
68 | | AtracGainInfo g_block[4]; |
69 | | } GainBlock; |
70 | | |
71 | | typedef struct TonalComponent { |
72 | | int pos; |
73 | | int num_coefs; |
74 | | float coef[8]; |
75 | | } TonalComponent; |
76 | | |
77 | | typedef struct ChannelUnit { |
78 | | int bands_coded; |
79 | | int num_components; |
80 | | float prev_frame[SAMPLES_PER_FRAME]; |
81 | | int gc_blk_switch; |
82 | | TonalComponent components[64]; |
83 | | GainBlock gain_block[2]; |
84 | | |
85 | | DECLARE_ALIGNED(32, float, spectrum)[SAMPLES_PER_FRAME]; |
86 | | DECLARE_ALIGNED(32, float, imdct_buf)[SAMPLES_PER_FRAME]; |
87 | | |
88 | | float delay_buf1[46]; ///<qmf delay buffers |
89 | | float delay_buf2[46]; |
90 | | float delay_buf3[46]; |
91 | | } ChannelUnit; |
92 | | |
93 | | typedef struct ATRAC3Context { |
94 | | GetBitContext gb; |
95 | | //@{ |
96 | | /** stream data */ |
97 | | int coding_mode; |
98 | | |
99 | | ChannelUnit *units; |
100 | | //@} |
101 | | //@{ |
102 | | /** joint-stereo related variables */ |
103 | | int matrix_coeff_index_prev[MAX_JS_PAIRS][4]; |
104 | | int matrix_coeff_index_now[MAX_JS_PAIRS][4]; |
105 | | int matrix_coeff_index_next[MAX_JS_PAIRS][4]; |
106 | | int weighting_delay[MAX_JS_PAIRS][6]; |
107 | | //@} |
108 | | //@{ |
109 | | /** data buffers */ |
110 | | uint8_t *decoded_bytes_buffer; |
111 | | float temp_buf[1070]; |
112 | | //@} |
113 | | //@{ |
114 | | /** extradata */ |
115 | | int scrambled_stream; |
116 | | //@} |
117 | | |
118 | | AtracGCContext gainc_ctx; |
119 | | AVTXContext *mdct_ctx; |
120 | | av_tx_fn mdct_fn; |
121 | | void (*vector_fmul)(float *dst, const float *src0, const float *src1, |
122 | | int len); |
123 | | } ATRAC3Context; |
124 | | |
125 | | static DECLARE_ALIGNED(32, float, mdct_window)[MDCT_SIZE]; |
126 | | static VLCElem atrac3_vlc_table[7 * 1 << ATRAC3_VLC_BITS]; |
127 | | static VLC spectral_coeff_tab[7]; |
128 | | |
129 | | /** |
130 | | * Regular 512 points IMDCT without overlapping, with the exception of the |
131 | | * swapping of odd bands caused by the reverse spectra of the QMF. |
132 | | * |
133 | | * @param odd_band 1 if the band is an odd band |
134 | | */ |
135 | | static void imlt(ATRAC3Context *q, float *input, float *output, int odd_band) |
136 | 952k | { |
137 | 952k | int i; |
138 | | |
139 | 952k | if (odd_band) { |
140 | | /** |
141 | | * Reverse the odd bands before IMDCT, this is an effect of the QMF |
142 | | * transform or it gives better compression to do it this way. |
143 | | * FIXME: It should be possible to handle this in imdct_calc |
144 | | * for that to happen a modification of the prerotation step of |
145 | | * all SIMD code and C code is needed. |
146 | | * Or fix the functions before so they generate a pre reversed spectrum. |
147 | | */ |
148 | 15.4M | for (i = 0; i < 128; i++) |
149 | 15.2M | FFSWAP(float, input[i], input[255 - i]); |
150 | 119k | } |
151 | | |
152 | 952k | q->mdct_fn(q->mdct_ctx, output, input, sizeof(float)); |
153 | | |
154 | | /* Perform windowing on the output. */ |
155 | 952k | q->vector_fmul(output, output, mdct_window, MDCT_SIZE); |
156 | 952k | } |
157 | | |
158 | | /* |
159 | | * indata descrambling, only used for data coming from the rm container |
160 | | */ |
161 | | static int decode_bytes(const uint8_t *input, uint8_t *out, int bytes) |
162 | 217k | { |
163 | 217k | int i, off; |
164 | 217k | uint32_t c; |
165 | 217k | const uint32_t *buf; |
166 | 217k | uint32_t *output = (uint32_t *)out; |
167 | | |
168 | 217k | off = (intptr_t)input & 3; |
169 | 217k | buf = (const uint32_t *)(input - off); |
170 | 217k | if (off) |
171 | 7.59k | c = av_be2ne32((0x537F6103U >> (off * 8)) | (0x537F6103U << (32 - (off * 8)))); |
172 | 209k | else |
173 | 209k | c = av_be2ne32(0x537F6103U); |
174 | 217k | bytes += 3 + off; |
175 | 981k | for (i = 0; i < bytes / 4; i++) |
176 | 764k | output[i] = c ^ buf[i]; |
177 | | |
178 | 217k | if (off) |
179 | 7.59k | avpriv_request_sample(NULL, "Offset of %d", off); |
180 | | |
181 | 217k | return off; |
182 | 217k | } |
183 | | |
184 | | static av_cold void init_imdct_window(void) |
185 | 2 | { |
186 | 2 | int i, j; |
187 | | |
188 | | /* generate the mdct window, for details see |
189 | | * http://wiki.multimedia.cx/index.php?title=RealAudio_atrc#Windows */ |
190 | 258 | for (i = 0, j = 255; i < 128; i++, j--) { |
191 | 256 | float wi = sin(((i + 0.5) / 256.0 - 0.5) * M_PI) + 1.0; |
192 | 256 | float wj = sin(((j + 0.5) / 256.0 - 0.5) * M_PI) + 1.0; |
193 | 256 | float w = 0.5 * (wi * wi + wj * wj); |
194 | 256 | mdct_window[i] = mdct_window[511 - i] = wi / w; |
195 | 256 | mdct_window[j] = mdct_window[511 - j] = wj / w; |
196 | 256 | } |
197 | 2 | } |
198 | | |
199 | | static av_cold int atrac3_decode_close(AVCodecContext *avctx) |
200 | 1.73k | { |
201 | 1.73k | ATRAC3Context *q = avctx->priv_data; |
202 | | |
203 | 1.73k | av_freep(&q->units); |
204 | 1.73k | av_freep(&q->decoded_bytes_buffer); |
205 | | |
206 | 1.73k | av_tx_uninit(&q->mdct_ctx); |
207 | | |
208 | 1.73k | return 0; |
209 | 1.73k | } |
210 | | |
211 | | /** |
212 | | * Mantissa decoding |
213 | | * |
214 | | * @param selector which table the output values are coded with |
215 | | * @param coding_flag constant length coding or variable length coding |
216 | | * @param mantissas mantissa output table |
217 | | * @param num_codes number of values to get |
218 | | */ |
219 | | static void read_quant_spectral_coeffs(GetBitContext *gb, int selector, |
220 | | int coding_flag, int *mantissas, |
221 | | int num_codes) |
222 | 1.23M | { |
223 | 1.23M | int i, code, huff_symb; |
224 | | |
225 | 1.23M | if (selector == 1) |
226 | 8.49k | num_codes /= 2; |
227 | | |
228 | 1.23M | if (coding_flag != 0) { |
229 | | /* constant length coding (CLC) */ |
230 | 760k | int num_bits = clc_length_tab[selector]; |
231 | | |
232 | 760k | if (selector > 1) { |
233 | 3.49M | for (i = 0; i < num_codes; i++) { |
234 | 2.73M | if (num_bits) |
235 | 2.73M | code = get_sbits(gb, num_bits); |
236 | 0 | else |
237 | 0 | code = 0; |
238 | 2.73M | mantissas[i] = code; |
239 | 2.73M | } |
240 | 758k | } else { |
241 | 32.1k | for (i = 0; i < num_codes; i++) { |
242 | 29.7k | if (num_bits) |
243 | 29.7k | code = get_bits(gb, num_bits); // num_bits is always 4 in this case |
244 | 0 | else |
245 | 0 | code = 0; |
246 | 29.7k | mantissas[i * 2 ] = mantissa_clc_tab[code >> 2]; |
247 | 29.7k | mantissas[i * 2 + 1] = mantissa_clc_tab[code & 3]; |
248 | 29.7k | } |
249 | 2.37k | } |
250 | 760k | } else { |
251 | | /* variable length coding (VLC) */ |
252 | 477k | if (selector != 1) { |
253 | 5.52M | for (i = 0; i < num_codes; i++) { |
254 | 5.05M | mantissas[i] = get_vlc2(gb, spectral_coeff_tab[selector-1].table, |
255 | 5.05M | ATRAC3_VLC_BITS, 1); |
256 | 5.05M | } |
257 | 471k | } else { |
258 | 45.8k | for (i = 0; i < num_codes; i++) { |
259 | 39.6k | huff_symb = get_vlc2(gb, spectral_coeff_tab[selector - 1].table, |
260 | 39.6k | ATRAC3_VLC_BITS, 1); |
261 | 39.6k | mantissas[i * 2 ] = mantissa_vlc_tab[huff_symb * 2 ]; |
262 | 39.6k | mantissas[i * 2 + 1] = mantissa_vlc_tab[huff_symb * 2 + 1]; |
263 | 39.6k | } |
264 | 6.12k | } |
265 | 477k | } |
266 | 1.23M | } |
267 | | |
268 | | /** |
269 | | * Restore the quantized band spectrum coefficients |
270 | | * |
271 | | * @return subband count, fix for broken specification/files |
272 | | */ |
273 | | static int decode_spectrum(GetBitContext *gb, float *output) |
274 | 828k | { |
275 | 828k | int num_subbands, coding_mode, i, j, first, last, subband_size; |
276 | 828k | int subband_vlc_index[32], sf_index[32]; |
277 | 828k | int mantissas[128]; |
278 | 828k | float scale_factor; |
279 | | |
280 | 828k | num_subbands = get_bits(gb, 5); // number of coded subbands |
281 | 828k | coding_mode = get_bits1(gb); // coding Mode: 0 - VLC/ 1-CLC |
282 | | |
283 | | /* get the VLC selector table for the subbands, 0 means not coded */ |
284 | 2.15M | for (i = 0; i <= num_subbands; i++) |
285 | 1.32M | subband_vlc_index[i] = get_bits(gb, 3); |
286 | | |
287 | | /* read the scale factor indexes from the stream */ |
288 | 2.15M | for (i = 0; i <= num_subbands; i++) { |
289 | 1.32M | if (subband_vlc_index[i] != 0) |
290 | 457k | sf_index[i] = get_bits(gb, 6); |
291 | 1.32M | } |
292 | | |
293 | 2.15M | for (i = 0; i <= num_subbands; i++) { |
294 | 1.32M | first = subband_tab[i ]; |
295 | 1.32M | last = subband_tab[i + 1]; |
296 | | |
297 | 1.32M | subband_size = last - first; |
298 | | |
299 | 1.32M | if (subband_vlc_index[i] != 0) { |
300 | | /* decode spectral coefficients for this subband */ |
301 | | /* TODO: This can be done faster is several blocks share the |
302 | | * same VLC selector (subband_vlc_index) */ |
303 | 457k | read_quant_spectral_coeffs(gb, subband_vlc_index[i], coding_mode, |
304 | 457k | mantissas, subband_size); |
305 | | |
306 | | /* decode the scale factor for this subband */ |
307 | 457k | scale_factor = ff_atrac_sf_table[sf_index[i]] * |
308 | 457k | inv_max_quant[subband_vlc_index[i]]; |
309 | | |
310 | | /* inverse quantize the coefficients */ |
311 | 6.08M | for (j = 0; first < last; first++, j++) |
312 | 5.62M | output[first] = mantissas[j] * scale_factor; |
313 | 866k | } else { |
314 | | /* this subband was not coded, so zero the entire subband */ |
315 | 866k | memset(output + first, 0, subband_size * sizeof(*output)); |
316 | 866k | } |
317 | 1.32M | } |
318 | | |
319 | | /* clear the subbands that were not coded */ |
320 | 828k | first = subband_tab[i]; |
321 | 828k | memset(output + first, 0, (SAMPLES_PER_FRAME - first) * sizeof(*output)); |
322 | 828k | return num_subbands; |
323 | 828k | } |
324 | | |
325 | | /** |
326 | | * Restore the quantized tonal components |
327 | | * |
328 | | * @param components tonal components |
329 | | * @param num_bands number of coded bands |
330 | | */ |
331 | | static int decode_tonal_components(GetBitContext *gb, |
332 | | TonalComponent *components, int num_bands) |
333 | 835k | { |
334 | 835k | int i, b, c, m; |
335 | 835k | int nb_components, coding_mode_selector, coding_mode; |
336 | 835k | int band_flags[4], mantissa[8]; |
337 | 835k | int component_count = 0; |
338 | | |
339 | 835k | nb_components = get_bits(gb, 5); |
340 | | |
341 | | /* no tonal components */ |
342 | 835k | if (nb_components == 0) |
343 | 691k | return 0; |
344 | | |
345 | 143k | coding_mode_selector = get_bits(gb, 2); |
346 | 143k | if (coding_mode_selector == 2) |
347 | 1.37k | return AVERROR_INVALIDDATA; |
348 | | |
349 | 142k | coding_mode = coding_mode_selector & 1; |
350 | | |
351 | 704k | for (i = 0; i < nb_components; i++) { |
352 | 567k | int coded_values_per_component, quant_step_index; |
353 | | |
354 | 1.31M | for (b = 0; b <= num_bands; b++) |
355 | 747k | band_flags[b] = get_bits1(gb); |
356 | | |
357 | 567k | coded_values_per_component = get_bits(gb, 3); |
358 | | |
359 | 567k | quant_step_index = get_bits(gb, 3); |
360 | 567k | if (quant_step_index <= 1) |
361 | 4.88k | return AVERROR_INVALIDDATA; |
362 | | |
363 | 562k | if (coding_mode_selector == 3) |
364 | 97.6k | coding_mode = get_bits1(gb); |
365 | | |
366 | 3.52M | for (b = 0; b < (num_bands + 1) * 4; b++) { |
367 | 2.96M | int coded_components; |
368 | | |
369 | 2.96M | if (band_flags[b >> 2] == 0) |
370 | 2.48M | continue; |
371 | | |
372 | 480k | coded_components = get_bits(gb, 3); |
373 | | |
374 | 1.26M | for (c = 0; c < coded_components; c++) { |
375 | 781k | TonalComponent *cmp = &components[component_count]; |
376 | 781k | int sf_index, coded_values, max_coded_values; |
377 | 781k | float scale_factor; |
378 | | |
379 | 781k | sf_index = get_bits(gb, 6); |
380 | 781k | if (component_count >= 64) |
381 | 565 | return AVERROR_INVALIDDATA; |
382 | | |
383 | 780k | cmp->pos = b * 64 + get_bits(gb, 6); |
384 | | |
385 | 780k | max_coded_values = SAMPLES_PER_FRAME - cmp->pos; |
386 | 780k | coded_values = coded_values_per_component + 1; |
387 | 780k | coded_values = FFMIN(max_coded_values, coded_values); |
388 | | |
389 | 780k | scale_factor = ff_atrac_sf_table[sf_index] * |
390 | 780k | inv_max_quant[quant_step_index]; |
391 | | |
392 | 780k | read_quant_spectral_coeffs(gb, quant_step_index, coding_mode, |
393 | 780k | mantissa, coded_values); |
394 | | |
395 | 780k | cmp->num_coefs = coded_values; |
396 | | |
397 | | /* inverse quant */ |
398 | 3.08M | for (m = 0; m < coded_values; m++) |
399 | 2.30M | cmp->coef[m] = mantissa[m] * scale_factor; |
400 | | |
401 | 780k | component_count++; |
402 | 780k | } |
403 | 480k | } |
404 | 562k | } |
405 | | |
406 | 136k | return component_count; |
407 | 142k | } |
408 | | |
409 | | /** |
410 | | * Decode gain parameters for the coded bands |
411 | | * |
412 | | * @param block the gainblock for the current band |
413 | | * @param num_bands amount of coded bands |
414 | | */ |
415 | | static int decode_gain_control(GetBitContext *gb, GainBlock *block, |
416 | | int num_bands) |
417 | 841k | { |
418 | 841k | int b, j; |
419 | 841k | int *level, *loc; |
420 | | |
421 | 841k | AtracGainInfo *gain = block->g_block; |
422 | | |
423 | 2.76M | for (b = 0; b <= num_bands; b++) { |
424 | 1.93M | gain[b].num_points = get_bits(gb, 3); |
425 | 1.93M | level = gain[b].lev_code; |
426 | 1.93M | loc = gain[b].loc_code; |
427 | | |
428 | 2.13M | for (j = 0; j < gain[b].num_points; j++) { |
429 | 203k | level[j] = get_bits(gb, 4); |
430 | 203k | loc[j] = get_bits(gb, 5); |
431 | 203k | if (j && loc[j] <= loc[j - 1]) |
432 | 6.03k | return AVERROR_INVALIDDATA; |
433 | 203k | } |
434 | 1.93M | } |
435 | | |
436 | | /* Clear the unused blocks. */ |
437 | 2.25M | for (; b < 4 ; b++) |
438 | 1.41M | gain[b].num_points = 0; |
439 | | |
440 | 835k | return 0; |
441 | 841k | } |
442 | | |
443 | | /** |
444 | | * Combine the tonal band spectrum and regular band spectrum |
445 | | * |
446 | | * @param spectrum output spectrum buffer |
447 | | * @param num_components number of tonal components |
448 | | * @param components tonal components for this band |
449 | | * @return position of the last tonal coefficient |
450 | | */ |
451 | | static int add_tonal_components(float *spectrum, int num_components, |
452 | | TonalComponent *components) |
453 | 828k | { |
454 | 828k | int i, j, last_pos = -1; |
455 | 828k | float *input, *output; |
456 | | |
457 | 1.55M | for (i = 0; i < num_components; i++) { |
458 | 729k | last_pos = FFMAX(components[i].pos + components[i].num_coefs, last_pos); |
459 | 729k | input = components[i].coef; |
460 | 729k | output = &spectrum[components[i].pos]; |
461 | | |
462 | 2.77M | for (j = 0; j < components[i].num_coefs; j++) |
463 | 2.04M | output[j] += input[j]; |
464 | 729k | } |
465 | | |
466 | 828k | return last_pos; |
467 | 828k | } |
468 | | |
469 | | #define INTERPOLATE(old, new, nsample) \ |
470 | 309k | ((old) + (nsample) * 0.125 * ((new) - (old))) |
471 | | |
472 | | static void reverse_matrixing(float *su1, float *su2, int *prev_code, |
473 | | int *curr_code) |
474 | 5.91k | { |
475 | 5.91k | int i, nsample, band; |
476 | 5.91k | float mc1_l, mc1_r, mc2_l, mc2_r; |
477 | | |
478 | 29.5k | for (i = 0, band = 0; band < 4 * 256; band += 256, i++) { |
479 | 23.6k | int s1 = prev_code[i]; |
480 | 23.6k | int s2 = curr_code[i]; |
481 | 23.6k | nsample = band; |
482 | | |
483 | 23.6k | if (s1 != s2) { |
484 | | /* Selector value changed, interpolation needed. */ |
485 | 6.45k | mc1_l = matrix_coeffs[s1 * 2 ]; |
486 | 6.45k | mc1_r = matrix_coeffs[s1 * 2 + 1]; |
487 | 6.45k | mc2_l = matrix_coeffs[s2 * 2 ]; |
488 | 6.45k | mc2_r = matrix_coeffs[s2 * 2 + 1]; |
489 | | |
490 | | /* Interpolation is done over the first eight samples. */ |
491 | 58.0k | for (; nsample < band + 8; nsample++) { |
492 | 51.6k | float c1 = su1[nsample]; |
493 | 51.6k | float c2 = su2[nsample]; |
494 | 51.6k | c2 = c1 * INTERPOLATE(mc1_l, mc2_l, nsample - band) + |
495 | 51.6k | c2 * INTERPOLATE(mc1_r, mc2_r, nsample - band); |
496 | 51.6k | su1[nsample] = c2; |
497 | 51.6k | su2[nsample] = c1 * 2.0 - c2; |
498 | 51.6k | } |
499 | 6.45k | } |
500 | | |
501 | | /* Apply the matrix without interpolation. */ |
502 | 23.6k | switch (s2) { |
503 | 6.02k | case 0: /* M/S decoding */ |
504 | 1.53M | for (; nsample < band + 256; nsample++) { |
505 | 1.52M | float c1 = su1[nsample]; |
506 | 1.52M | float c2 = su2[nsample]; |
507 | 1.52M | su1[nsample] = c2 * 2.0; |
508 | 1.52M | su2[nsample] = (c1 - c2) * 2.0; |
509 | 1.52M | } |
510 | 6.02k | break; |
511 | 6.53k | case 1: |
512 | 1.66M | for (; nsample < band + 256; nsample++) { |
513 | 1.65M | float c1 = su1[nsample]; |
514 | 1.65M | float c2 = su2[nsample]; |
515 | 1.65M | su1[nsample] = (c1 + c2) * 2.0; |
516 | 1.65M | su2[nsample] = c2 * -2.0; |
517 | 1.65M | } |
518 | 6.53k | break; |
519 | 4.65k | case 2: |
520 | 11.0k | case 3: |
521 | 2.83M | for (; nsample < band + 256; nsample++) { |
522 | 2.81M | float c1 = su1[nsample]; |
523 | 2.81M | float c2 = su2[nsample]; |
524 | 2.81M | su1[nsample] = c1 + c2; |
525 | 2.81M | su2[nsample] = c1 - c2; |
526 | 2.81M | } |
527 | 11.0k | break; |
528 | 0 | default: |
529 | 0 | av_unreachable("curr_code/matrix_coeff_index_* values are stored in two bits"); |
530 | 23.6k | } |
531 | 23.6k | } |
532 | 5.91k | } |
533 | | |
534 | | static void get_channel_weights(int index, int flag, float ch[2]) |
535 | 8.61k | { |
536 | 8.61k | if (index == 7) { |
537 | 1.55k | ch[0] = 1.0; |
538 | 1.55k | ch[1] = 1.0; |
539 | 7.06k | } else { |
540 | 7.06k | ch[0] = (index & 7) / 7.0; |
541 | 7.06k | ch[1] = sqrt(2 - ch[0] * ch[0]); |
542 | 7.06k | if (flag) |
543 | 844 | FFSWAP(float, ch[0], ch[1]); |
544 | 7.06k | } |
545 | 8.61k | } |
546 | | |
547 | | static void channel_weighting(float *su1, float *su2, int *p3) |
548 | 5.91k | { |
549 | 5.91k | int band, nsample; |
550 | | /* w[x][y] y=0 is left y=1 is right */ |
551 | 5.91k | float w[2][2]; |
552 | | |
553 | 5.91k | if (p3[1] != 7 || p3[3] != 7) { |
554 | 4.30k | get_channel_weights(p3[1], p3[0], w[0]); |
555 | 4.30k | get_channel_weights(p3[3], p3[2], w[1]); |
556 | | |
557 | 17.2k | for (band = 256; band < 4 * 256; band += 256) { |
558 | 116k | for (nsample = band; nsample < band + 8; nsample++) { |
559 | 103k | su1[nsample] *= INTERPOLATE(w[0][0], w[0][1], nsample - band); |
560 | 103k | su2[nsample] *= INTERPOLATE(w[1][0], w[1][1], nsample - band); |
561 | 103k | } |
562 | 3.21M | for(; nsample < band + 256; nsample++) { |
563 | 3.20M | su1[nsample] *= w[1][0]; |
564 | 3.20M | su2[nsample] *= w[1][1]; |
565 | 3.20M | } |
566 | 12.9k | } |
567 | 4.30k | } |
568 | 5.91k | } |
569 | | |
570 | | /** |
571 | | * Decode a Sound Unit |
572 | | * |
573 | | * @param snd the channel unit to be used |
574 | | * @param output the decoded samples before IQMF in float representation |
575 | | * @param channel_num channel number |
576 | | * @param coding_mode the coding mode (JOINT_STEREO or single channels) |
577 | | */ |
578 | | static int decode_channel_sound_unit(ATRAC3Context *q, GetBitContext *gb, |
579 | | ChannelUnit *snd, float *output, |
580 | | int channel_num, int coding_mode) |
581 | 953k | { |
582 | 953k | int band, ret, num_subbands, last_tonal, num_bands; |
583 | 953k | GainBlock *gain1 = &snd->gain_block[ snd->gc_blk_switch]; |
584 | 953k | GainBlock *gain2 = &snd->gain_block[1 - snd->gc_blk_switch]; |
585 | | |
586 | 953k | if (coding_mode == JOINT_STEREO && (channel_num % 2) == 1) { |
587 | 8.70k | if (get_bits(gb, 2) != 3) { |
588 | 2.59k | av_log(NULL,AV_LOG_ERROR,"JS mono Sound Unit id != 3.\n"); |
589 | 2.59k | return AVERROR_INVALIDDATA; |
590 | 2.59k | } |
591 | 944k | } else { |
592 | 944k | if (get_bits(gb, 6) != 0x28) { |
593 | 109k | av_log(NULL,AV_LOG_ERROR,"Sound Unit id != 0x28.\n"); |
594 | 109k | return AVERROR_INVALIDDATA; |
595 | 109k | } |
596 | 944k | } |
597 | | |
598 | | /* number of coded QMF bands */ |
599 | 841k | snd->bands_coded = get_bits(gb, 2); |
600 | | |
601 | 841k | ret = decode_gain_control(gb, gain2, snd->bands_coded); |
602 | 841k | if (ret) |
603 | 6.03k | return ret; |
604 | | |
605 | 835k | snd->num_components = decode_tonal_components(gb, snd->components, |
606 | 835k | snd->bands_coded); |
607 | 835k | if (snd->num_components < 0) |
608 | 6.82k | return snd->num_components; |
609 | | |
610 | 828k | num_subbands = decode_spectrum(gb, snd->spectrum); |
611 | | |
612 | | /* Merge the decoded spectrum and tonal components. */ |
613 | 828k | last_tonal = add_tonal_components(snd->spectrum, snd->num_components, |
614 | 828k | snd->components); |
615 | | |
616 | | |
617 | | /* calculate number of used MLT/QMF bands according to the amount of coded |
618 | | spectral lines */ |
619 | 828k | num_bands = (subband_tab[num_subbands + 1] - 1) >> 8; |
620 | 828k | if (last_tonal >= 0) |
621 | 112k | num_bands = FFMAX((last_tonal + 256) >> 8, num_bands); |
622 | | |
623 | | |
624 | | /* Reconstruct time domain samples. */ |
625 | 4.14M | for (band = 0; band < 4; band++) { |
626 | | /* Perform the IMDCT step without overlapping. */ |
627 | 3.31M | if (band <= num_bands) |
628 | 952k | imlt(q, &snd->spectrum[band * 256], snd->imdct_buf, band & 1); |
629 | 2.36M | else |
630 | 2.36M | memset(snd->imdct_buf, 0, 512 * sizeof(*snd->imdct_buf)); |
631 | | |
632 | | /* gain compensation and overlapping */ |
633 | 3.31M | ff_atrac_gain_compensation(&q->gainc_ctx, snd->imdct_buf, |
634 | 3.31M | &snd->prev_frame[band * 256], |
635 | 3.31M | &gain1->g_block[band], &gain2->g_block[band], |
636 | 3.31M | 256, &output[band * 256]); |
637 | 3.31M | } |
638 | | |
639 | | /* Swap the gain control buffers for the next frame. */ |
640 | 828k | snd->gc_blk_switch ^= 1; |
641 | | |
642 | 828k | return 0; |
643 | 835k | } |
644 | | |
645 | | static int decode_frame(AVCodecContext *avctx, const uint8_t *databuf, |
646 | | float **out_samples) |
647 | 217k | { |
648 | 217k | ATRAC3Context *q = avctx->priv_data; |
649 | 217k | int ret, i, ch; |
650 | 217k | uint8_t *ptr1; |
651 | 217k | int channels = avctx->ch_layout.nb_channels; |
652 | | |
653 | 217k | if (q->coding_mode == JOINT_STEREO) { |
654 | | /* channel coupling mode */ |
655 | | |
656 | | /* Decode sound unit pairs (channels are expected to be even). |
657 | | * Multichannel joint stereo interleaves pairs (6ch: 2ch + 2ch + 2ch) */ |
658 | 8.74k | const uint8_t *js_databuf; |
659 | 8.74k | int js_pair, js_block_align; |
660 | | |
661 | 8.74k | js_block_align = (avctx->block_align / channels) * 2; /* block pair */ |
662 | | |
663 | 14.6k | for (ch = 0; ch < channels; ch = ch + 2) { |
664 | 14.3k | js_pair = ch/2; |
665 | 14.3k | js_databuf = databuf + js_pair * js_block_align; /* align to current pair */ |
666 | | |
667 | | /* Set the bitstream reader at the start of first channel sound unit. */ |
668 | 14.3k | init_get_bits(&q->gb, |
669 | 14.3k | js_databuf, js_block_align * 8); |
670 | | |
671 | | /* decode Sound Unit 1 */ |
672 | 14.3k | ret = decode_channel_sound_unit(q, &q->gb, &q->units[ch], |
673 | 14.3k | out_samples[ch], ch, JOINT_STEREO); |
674 | 14.3k | if (ret != 0) |
675 | 5.44k | return ret; |
676 | | |
677 | | /* Framedata of the su2 in the joint-stereo mode is encoded in |
678 | | * reverse byte order so we need to swap it first. */ |
679 | 8.89k | if (js_databuf == q->decoded_bytes_buffer) { |
680 | 7.51k | uint8_t *ptr2 = q->decoded_bytes_buffer + js_block_align - 1; |
681 | 7.51k | ptr1 = q->decoded_bytes_buffer; |
682 | 25.3k | for (i = 0; i < js_block_align / 2; i++, ptr1++, ptr2--) |
683 | 17.8k | FFSWAP(uint8_t, *ptr1, *ptr2); |
684 | 7.51k | } else { |
685 | 1.38k | const uint8_t *ptr2 = js_databuf + js_block_align - 1; |
686 | 124k | for (i = 0; i < js_block_align; i++) |
687 | 123k | q->decoded_bytes_buffer[i] = *ptr2--; |
688 | 1.38k | } |
689 | | |
690 | | /* Skip the sync codes (0xF8). */ |
691 | 8.89k | ptr1 = q->decoded_bytes_buffer; |
692 | 11.4k | for (i = 4; *ptr1 == 0xF8; i++, ptr1++) { |
693 | 2.76k | if (i >= js_block_align) |
694 | 197 | return AVERROR_INVALIDDATA; |
695 | 2.76k | } |
696 | | |
697 | | |
698 | | /* set the bitstream reader at the start of the second Sound Unit */ |
699 | 8.70k | ret = init_get_bits8(&q->gb, |
700 | 8.70k | ptr1, q->decoded_bytes_buffer + js_block_align - ptr1); |
701 | 8.70k | if (ret < 0) |
702 | 0 | return ret; |
703 | | |
704 | | /* Fill the Weighting coeffs delay buffer */ |
705 | 8.70k | memmove(q->weighting_delay[js_pair], &q->weighting_delay[js_pair][2], |
706 | 8.70k | 4 * sizeof(*q->weighting_delay[js_pair])); |
707 | 8.70k | q->weighting_delay[js_pair][4] = get_bits1(&q->gb); |
708 | 8.70k | q->weighting_delay[js_pair][5] = get_bits(&q->gb, 3); |
709 | | |
710 | 43.5k | for (i = 0; i < 4; i++) { |
711 | 34.8k | q->matrix_coeff_index_prev[js_pair][i] = q->matrix_coeff_index_now[js_pair][i]; |
712 | 34.8k | q->matrix_coeff_index_now[js_pair][i] = q->matrix_coeff_index_next[js_pair][i]; |
713 | 34.8k | q->matrix_coeff_index_next[js_pair][i] = get_bits(&q->gb, 2); |
714 | 34.8k | } |
715 | | |
716 | | /* Decode Sound Unit 2. */ |
717 | 8.70k | ret = decode_channel_sound_unit(q, &q->gb, &q->units[ch+1], |
718 | 8.70k | out_samples[ch+1], ch+1, JOINT_STEREO); |
719 | 8.70k | if (ret != 0) |
720 | 2.78k | return ret; |
721 | | |
722 | | /* Reconstruct the channel coefficients. */ |
723 | 5.91k | reverse_matrixing(out_samples[ch], out_samples[ch+1], |
724 | 5.91k | q->matrix_coeff_index_prev[js_pair], |
725 | 5.91k | q->matrix_coeff_index_now[js_pair]); |
726 | | |
727 | 5.91k | channel_weighting(out_samples[ch], out_samples[ch+1], q->weighting_delay[js_pair]); |
728 | 5.91k | } |
729 | 208k | } else { |
730 | | /* single channels */ |
731 | | /* Decode the channel sound units. */ |
732 | 869k | for (i = 0; i < channels; i++) { |
733 | | /* Set the bitstream reader at the start of a channel sound unit. */ |
734 | 680k | init_get_bits(&q->gb, |
735 | 680k | databuf + i * avctx->block_align / channels, |
736 | 680k | avctx->block_align * 8 / channels); |
737 | | |
738 | 680k | ret = decode_channel_sound_unit(q, &q->gb, &q->units[i], |
739 | 680k | out_samples[i], i, q->coding_mode); |
740 | 680k | if (ret != 0) |
741 | 19.4k | return ret; |
742 | 680k | } |
743 | 208k | } |
744 | | |
745 | | /* Apply the iQMF synthesis filter. */ |
746 | 850k | for (i = 0; i < channels; i++) { |
747 | 660k | float *p1 = out_samples[i]; |
748 | 660k | float *p2 = p1 + 256; |
749 | 660k | float *p3 = p2 + 256; |
750 | 660k | float *p4 = p3 + 256; |
751 | 660k | ff_atrac_iqmf(p1, p2, 256, p1, q->units[i].delay_buf1, q->temp_buf); |
752 | 660k | ff_atrac_iqmf(p4, p3, 256, p3, q->units[i].delay_buf2, q->temp_buf); |
753 | 660k | ff_atrac_iqmf(p1, p3, 512, p1, q->units[i].delay_buf3, q->temp_buf); |
754 | 660k | } |
755 | | |
756 | 189k | return 0; |
757 | 217k | } |
758 | | |
759 | | static int al_decode_frame(AVCodecContext *avctx, const uint8_t *databuf, |
760 | | int size, float **out_samples) |
761 | 234k | { |
762 | 234k | ATRAC3Context *q = avctx->priv_data; |
763 | 234k | int channels = avctx->ch_layout.nb_channels; |
764 | 234k | int ret, i; |
765 | | |
766 | | /* Set the bitstream reader at the start of a channel sound unit. */ |
767 | 234k | init_get_bits(&q->gb, databuf, size * 8); |
768 | | /* single channels */ |
769 | | /* Decode the channel sound units. */ |
770 | 387k | for (i = 0; i < channels; i++) { |
771 | 249k | ret = decode_channel_sound_unit(q, &q->gb, &q->units[i], |
772 | 249k | out_samples[i], i, q->coding_mode); |
773 | 249k | if (ret != 0) |
774 | 96.9k | return ret; |
775 | 15.3M | while (i < channels && get_bits_left(&q->gb) > 6 && show_bits(&q->gb, 6) != 0x28) { |
776 | 15.1M | skip_bits(&q->gb, 1); |
777 | 15.1M | } |
778 | 152k | } |
779 | | |
780 | | /* Apply the iQMF synthesis filter. */ |
781 | 277k | for (i = 0; i < channels; i++) { |
782 | 139k | float *p1 = out_samples[i]; |
783 | 139k | float *p2 = p1 + 256; |
784 | 139k | float *p3 = p2 + 256; |
785 | 139k | float *p4 = p3 + 256; |
786 | 139k | ff_atrac_iqmf(p1, p2, 256, p1, q->units[i].delay_buf1, q->temp_buf); |
787 | 139k | ff_atrac_iqmf(p4, p3, 256, p3, q->units[i].delay_buf2, q->temp_buf); |
788 | 139k | ff_atrac_iqmf(p1, p3, 512, p1, q->units[i].delay_buf3, q->temp_buf); |
789 | 139k | } |
790 | | |
791 | 137k | return 0; |
792 | 234k | } |
793 | | |
794 | | static int atrac3_decode_frame(AVCodecContext *avctx, AVFrame *frame, |
795 | | int *got_frame_ptr, AVPacket *avpkt) |
796 | 306k | { |
797 | 306k | const uint8_t *buf = avpkt->data; |
798 | 306k | int buf_size = avpkt->size; |
799 | 306k | ATRAC3Context *q = avctx->priv_data; |
800 | 306k | int ret; |
801 | 306k | const uint8_t *databuf; |
802 | | |
803 | 306k | if (buf_size < avctx->block_align) { |
804 | 88.6k | av_log(avctx, AV_LOG_ERROR, |
805 | 88.6k | "Frame too small (%d bytes). Truncated file?\n", buf_size); |
806 | 88.6k | return AVERROR_INVALIDDATA; |
807 | 88.6k | } |
808 | | |
809 | | /* get output buffer */ |
810 | 217k | frame->nb_samples = SAMPLES_PER_FRAME; |
811 | 217k | if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) |
812 | 0 | return ret; |
813 | | |
814 | | /* Check if we need to descramble and what buffer to pass on. */ |
815 | 217k | if (q->scrambled_stream) { |
816 | 217k | decode_bytes(buf, q->decoded_bytes_buffer, avctx->block_align); |
817 | 217k | databuf = q->decoded_bytes_buffer; |
818 | 217k | } else { |
819 | 632 | databuf = buf; |
820 | 632 | } |
821 | | |
822 | 217k | ret = decode_frame(avctx, databuf, (float **)frame->extended_data); |
823 | 217k | if (ret) { |
824 | 27.9k | av_log(avctx, AV_LOG_ERROR, "Frame decoding error!\n"); |
825 | 27.9k | return ret; |
826 | 27.9k | } |
827 | | |
828 | 189k | *got_frame_ptr = 1; |
829 | | |
830 | 189k | return avctx->block_align; |
831 | 217k | } |
832 | | |
833 | | static int atrac3al_decode_frame(AVCodecContext *avctx, AVFrame *frame, |
834 | | int *got_frame_ptr, AVPacket *avpkt) |
835 | 234k | { |
836 | 234k | int ret; |
837 | | |
838 | 234k | frame->nb_samples = SAMPLES_PER_FRAME; |
839 | 234k | if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) |
840 | 0 | return ret; |
841 | | |
842 | 234k | ret = al_decode_frame(avctx, avpkt->data, avpkt->size, |
843 | 234k | (float **)frame->extended_data); |
844 | 234k | if (ret) { |
845 | 96.9k | av_log(avctx, AV_LOG_ERROR, "Frame decoding error!\n"); |
846 | 96.9k | return ret; |
847 | 96.9k | } |
848 | | |
849 | 137k | *got_frame_ptr = 1; |
850 | | |
851 | 137k | return avpkt->size; |
852 | 234k | } |
853 | | |
854 | | static av_cold void atrac3_init_static_data(void) |
855 | 2 | { |
856 | 2 | VLCElem *table = atrac3_vlc_table; |
857 | 2 | const uint8_t (*hufftabs)[2] = atrac3_hufftabs; |
858 | 2 | int i; |
859 | | |
860 | 2 | init_imdct_window(); |
861 | 2 | ff_atrac_generate_tables(); |
862 | | |
863 | | /* Initialize the VLC tables. */ |
864 | 16 | for (i = 0; i < 7; i++) { |
865 | 14 | spectral_coeff_tab[i].table = table; |
866 | 14 | spectral_coeff_tab[i].table_allocated = 256; |
867 | 14 | ff_vlc_init_from_lengths(&spectral_coeff_tab[i], ATRAC3_VLC_BITS, huff_tab_sizes[i], |
868 | 14 | &hufftabs[0][1], 2, |
869 | 14 | &hufftabs[0][0], 2, 1, |
870 | 14 | -31, VLC_INIT_USE_STATIC, NULL); |
871 | 14 | hufftabs += huff_tab_sizes[i]; |
872 | 14 | table += 256; |
873 | 14 | } |
874 | 2 | } |
875 | | |
876 | | static av_cold int atrac3_decode_init(AVCodecContext *avctx) |
877 | 1.73k | { |
878 | 1.73k | static AVOnce init_static_once = AV_ONCE_INIT; |
879 | 1.73k | int i, js_pair, ret; |
880 | 1.73k | int version, delay, samples_per_frame, frame_factor; |
881 | 1.73k | const uint8_t *edata_ptr = avctx->extradata; |
882 | 1.73k | ATRAC3Context *q = avctx->priv_data; |
883 | 1.73k | AVFloatDSPContext *fdsp; |
884 | 1.73k | float scale = 1.0 / 32768; |
885 | 1.73k | int channels = avctx->ch_layout.nb_channels; |
886 | | |
887 | 1.73k | if (channels < MIN_CHANNELS || channels > MAX_CHANNELS) { |
888 | 155 | av_log(avctx, AV_LOG_ERROR, "Channel configuration error!\n"); |
889 | 155 | return AVERROR(EINVAL); |
890 | 155 | } |
891 | | |
892 | | /* Take care of the codec-specific extradata. */ |
893 | 1.57k | if (avctx->codec_id == AV_CODEC_ID_ATRAC3AL) { |
894 | 755 | version = 4; |
895 | 755 | samples_per_frame = SAMPLES_PER_FRAME * channels; |
896 | 755 | delay = 0x88E; |
897 | 755 | q->coding_mode = SINGLE; |
898 | 823 | } else if (avctx->extradata_size == 14) { |
899 | | /* Parse the extradata, WAV format */ |
900 | 116 | av_log(avctx, AV_LOG_DEBUG, "[0-1] %d\n", |
901 | 116 | bytestream_get_le16(&edata_ptr)); // Unknown value always 1 |
902 | 116 | edata_ptr += 4; // samples per channel |
903 | 116 | q->coding_mode = bytestream_get_le16(&edata_ptr); |
904 | 116 | av_log(avctx, AV_LOG_DEBUG,"[8-9] %d\n", |
905 | 116 | bytestream_get_le16(&edata_ptr)); //Dupe of coding mode |
906 | 116 | frame_factor = bytestream_get_le16(&edata_ptr); // Unknown always 1 |
907 | 116 | av_log(avctx, AV_LOG_DEBUG,"[12-13] %d\n", |
908 | 116 | bytestream_get_le16(&edata_ptr)); // Unknown always 0 |
909 | | |
910 | | /* setup */ |
911 | 116 | samples_per_frame = SAMPLES_PER_FRAME * channels; |
912 | 116 | version = 4; |
913 | 116 | delay = 0x88E; |
914 | 116 | q->coding_mode = q->coding_mode ? JOINT_STEREO : SINGLE; |
915 | 116 | q->scrambled_stream = 0; |
916 | | |
917 | 116 | if (avctx->block_align != 96 * channels * frame_factor && |
918 | 52 | avctx->block_align != 152 * channels * frame_factor && |
919 | 51 | avctx->block_align != 192 * channels * frame_factor) { |
920 | 46 | av_log(avctx, AV_LOG_ERROR, "Unknown frame/channel/frame_factor " |
921 | 46 | "configuration %d/%d/%d\n", avctx->block_align, |
922 | 46 | channels, frame_factor); |
923 | 46 | return AVERROR_INVALIDDATA; |
924 | 46 | } |
925 | 707 | } else if (avctx->extradata_size == 12 || avctx->extradata_size == 10) { |
926 | | /* Parse the extradata, RM format. */ |
927 | 667 | version = bytestream_get_be32(&edata_ptr); |
928 | 667 | samples_per_frame = bytestream_get_be16(&edata_ptr); |
929 | 667 | delay = bytestream_get_be16(&edata_ptr); |
930 | 667 | q->coding_mode = bytestream_get_be16(&edata_ptr); |
931 | 667 | q->scrambled_stream = 1; |
932 | | |
933 | 667 | } else { |
934 | 40 | av_log(avctx, AV_LOG_ERROR, "Unknown extradata size %d.\n", |
935 | 40 | avctx->extradata_size); |
936 | 40 | return AVERROR(EINVAL); |
937 | 40 | } |
938 | | |
939 | | /* Check the extradata */ |
940 | | |
941 | 1.49k | if (version != 4) { |
942 | 32 | av_log(avctx, AV_LOG_ERROR, "Version %d != 4.\n", version); |
943 | 32 | return AVERROR_INVALIDDATA; |
944 | 32 | } |
945 | | |
946 | 1.46k | if (samples_per_frame != SAMPLES_PER_FRAME * channels) { |
947 | 22 | av_log(avctx, AV_LOG_ERROR, "Unknown amount of samples per frame %d.\n", |
948 | 22 | samples_per_frame); |
949 | 22 | return AVERROR_INVALIDDATA; |
950 | 22 | } |
951 | | |
952 | 1.43k | if (delay != 0x88E) { |
953 | 18 | av_log(avctx, AV_LOG_ERROR, "Unknown amount of delay %x != 0x88E.\n", |
954 | 18 | delay); |
955 | 18 | return AVERROR_INVALIDDATA; |
956 | 18 | } |
957 | | |
958 | 1.42k | if (q->coding_mode == SINGLE) |
959 | 1.12k | av_log(avctx, AV_LOG_DEBUG, "Single channels detected.\n"); |
960 | 294 | else if (q->coding_mode == JOINT_STEREO) { |
961 | 276 | if (channels % 2 == 1) { /* Joint stereo channels must be even */ |
962 | 1 | av_log(avctx, AV_LOG_ERROR, "Invalid joint stereo channel configuration.\n"); |
963 | 1 | return AVERROR_INVALIDDATA; |
964 | 1 | } |
965 | 275 | av_log(avctx, AV_LOG_DEBUG, "Joint stereo detected.\n"); |
966 | 275 | } else { |
967 | 18 | av_log(avctx, AV_LOG_ERROR, "Unknown channel coding mode %x!\n", |
968 | 18 | q->coding_mode); |
969 | 18 | return AVERROR_INVALIDDATA; |
970 | 18 | } |
971 | | |
972 | 1.40k | if (avctx->block_align > 4096 || avctx->block_align <= 0) |
973 | 42 | return AVERROR(EINVAL); |
974 | | |
975 | 1.35k | q->decoded_bytes_buffer = av_mallocz(FFALIGN(avctx->block_align, 4) + |
976 | 1.35k | AV_INPUT_BUFFER_PADDING_SIZE); |
977 | 1.35k | if (!q->decoded_bytes_buffer) |
978 | 0 | return AVERROR(ENOMEM); |
979 | | |
980 | 1.35k | avctx->sample_fmt = AV_SAMPLE_FMT_FLTP; |
981 | | |
982 | | /* initialize the MDCT transform */ |
983 | 1.35k | if ((ret = av_tx_init(&q->mdct_ctx, &q->mdct_fn, AV_TX_FLOAT_MDCT, 1, 256, |
984 | 1.35k | &scale, AV_TX_FULL_IMDCT)) < 0) { |
985 | 0 | av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n"); |
986 | 0 | return ret; |
987 | 0 | } |
988 | | |
989 | | /* init the joint-stereo decoding data */ |
990 | 6.79k | for (js_pair = 0; js_pair < MAX_JS_PAIRS; js_pair++) { |
991 | 5.43k | q->weighting_delay[js_pair][0] = 0; |
992 | 5.43k | q->weighting_delay[js_pair][1] = 7; |
993 | 5.43k | q->weighting_delay[js_pair][2] = 0; |
994 | 5.43k | q->weighting_delay[js_pair][3] = 7; |
995 | 5.43k | q->weighting_delay[js_pair][4] = 0; |
996 | 5.43k | q->weighting_delay[js_pair][5] = 7; |
997 | | |
998 | 27.1k | for (i = 0; i < 4; i++) { |
999 | 21.7k | q->matrix_coeff_index_prev[js_pair][i] = 3; |
1000 | 21.7k | q->matrix_coeff_index_now[js_pair][i] = 3; |
1001 | 21.7k | q->matrix_coeff_index_next[js_pair][i] = 3; |
1002 | 21.7k | } |
1003 | 5.43k | } |
1004 | | |
1005 | 1.35k | ff_atrac_init_gain_compensation(&q->gainc_ctx, 4, 3); |
1006 | 1.35k | fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); |
1007 | 1.35k | if (!fdsp) |
1008 | 0 | return AVERROR(ENOMEM); |
1009 | 1.35k | q->vector_fmul = fdsp->vector_fmul; |
1010 | 1.35k | av_free(fdsp); |
1011 | | |
1012 | 1.35k | q->units = av_calloc(channels, sizeof(*q->units)); |
1013 | 1.35k | if (!q->units) |
1014 | 0 | return AVERROR(ENOMEM); |
1015 | | |
1016 | 1.35k | ff_thread_once(&init_static_once, atrac3_init_static_data); |
1017 | | |
1018 | 1.35k | return 0; |
1019 | 1.35k | } |
1020 | | |
1021 | | const FFCodec ff_atrac3_decoder = { |
1022 | | .p.name = "atrac3", |
1023 | | CODEC_LONG_NAME("ATRAC3 (Adaptive TRansform Acoustic Coding 3)"), |
1024 | | .p.type = AVMEDIA_TYPE_AUDIO, |
1025 | | .p.id = AV_CODEC_ID_ATRAC3, |
1026 | | .priv_data_size = sizeof(ATRAC3Context), |
1027 | | .init = atrac3_decode_init, |
1028 | | .close = atrac3_decode_close, |
1029 | | FF_CODEC_DECODE_CB(atrac3_decode_frame), |
1030 | | .p.capabilities = AV_CODEC_CAP_DR1, |
1031 | | CODEC_SAMPLEFMTS(AV_SAMPLE_FMT_FLTP), |
1032 | | .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, |
1033 | | }; |
1034 | | |
1035 | | const FFCodec ff_atrac3al_decoder = { |
1036 | | .p.name = "atrac3al", |
1037 | | CODEC_LONG_NAME("ATRAC3 AL (Adaptive TRansform Acoustic Coding 3 Advanced Lossless)"), |
1038 | | .p.type = AVMEDIA_TYPE_AUDIO, |
1039 | | .p.id = AV_CODEC_ID_ATRAC3AL, |
1040 | | .priv_data_size = sizeof(ATRAC3Context), |
1041 | | .init = atrac3_decode_init, |
1042 | | .close = atrac3_decode_close, |
1043 | | FF_CODEC_DECODE_CB(atrac3al_decode_frame), |
1044 | | .p.capabilities = AV_CODEC_CAP_DR1, |
1045 | | CODEC_SAMPLEFMTS(AV_SAMPLE_FMT_FLTP), |
1046 | | .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, |
1047 | | }; |