/src/mozilla-central/dom/media/AudioConverter.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "AudioConverter.h" |
8 | | #include <string.h> |
9 | | #include <speex/speex_resampler.h> |
10 | | #include <cmath> |
11 | | |
12 | | /* |
13 | | * Parts derived from MythTV AudioConvert Class |
14 | | * Created by Jean-Yves Avenard. |
15 | | * |
16 | | * Copyright (C) Bubblestuff Pty Ltd 2013 |
17 | | * Copyright (C) foobum@gmail.com 2010 |
18 | | */ |
19 | | |
20 | | namespace mozilla { |
21 | | |
22 | | AudioConverter::AudioConverter(const AudioConfig& aIn, const AudioConfig& aOut) |
23 | | : mIn(aIn) |
24 | | , mOut(aOut) |
25 | | , mResampler(nullptr) |
26 | 0 | { |
27 | 0 | MOZ_DIAGNOSTIC_ASSERT( |
28 | 0 | aIn.Format() == aOut.Format() && aIn.Interleaved() == aOut.Interleaved(), |
29 | 0 | "No format or rate conversion is supported at this stage"); |
30 | 0 | MOZ_DIAGNOSTIC_ASSERT( |
31 | 0 | aOut.Channels() <= 2 || aIn.Channels() == aOut.Channels(), |
32 | 0 | "Only down/upmixing to mono or stereo is supported at this stage"); |
33 | 0 | MOZ_DIAGNOSTIC_ASSERT(aOut.Interleaved(), |
34 | 0 | "planar audio format not supported"); |
35 | 0 | mIn.Layout().MappingTable(mOut.Layout(), &mChannelOrderMap); |
36 | 0 | if (aIn.Rate() != aOut.Rate()) { |
37 | 0 | RecreateResampler(); |
38 | 0 | } |
39 | 0 | } |
40 | | |
41 | | AudioConverter::~AudioConverter() |
42 | 0 | { |
43 | 0 | if (mResampler) { |
44 | 0 | speex_resampler_destroy(mResampler); |
45 | 0 | mResampler = nullptr; |
46 | 0 | } |
47 | 0 | } |
48 | | |
49 | | bool |
50 | | AudioConverter::CanWorkInPlace() const |
51 | 0 | { |
52 | 0 | bool needDownmix = mIn.Channels() > mOut.Channels(); |
53 | 0 | bool needUpmix = mIn.Channels() < mOut.Channels(); |
54 | 0 | bool canDownmixInPlace = |
55 | 0 | mIn.Channels() * AudioConfig::SampleSize(mIn.Format()) >= |
56 | 0 | mOut.Channels() * AudioConfig::SampleSize(mOut.Format()); |
57 | 0 | bool needResample = mIn.Rate() != mOut.Rate(); |
58 | 0 | bool canResampleInPlace = mIn.Rate() >= mOut.Rate(); |
59 | 0 | // We should be able to work in place if 1s of audio input takes less space |
60 | 0 | // than 1s of audio output. However, as we downmix before resampling we can't |
61 | 0 | // perform any upsampling in place (e.g. if incoming rate >= outgoing rate) |
62 | 0 | return !needUpmix && (!needDownmix || canDownmixInPlace) && |
63 | 0 | (!needResample || canResampleInPlace); |
64 | 0 | } |
65 | | |
66 | | size_t |
67 | | AudioConverter::ProcessInternal(void* aOut, const void* aIn, size_t aFrames) |
68 | 0 | { |
69 | 0 | if (!aFrames) { |
70 | 0 | return 0; |
71 | 0 | } |
72 | 0 | if (mIn.Channels() > mOut.Channels()) { |
73 | 0 | return DownmixAudio(aOut, aIn, aFrames); |
74 | 0 | } else if (mIn.Channels() < mOut.Channels()) { |
75 | 0 | return UpmixAudio(aOut, aIn, aFrames); |
76 | 0 | } else if (mIn.Layout() != mOut.Layout() && CanReorderAudio()) { |
77 | 0 | ReOrderInterleavedChannels(aOut, aIn, aFrames); |
78 | 0 | } else if (aIn != aOut) { |
79 | 0 | memmove(aOut, aIn, FramesOutToBytes(aFrames)); |
80 | 0 | } |
81 | 0 | return aFrames; |
82 | 0 | } |
83 | | |
84 | | // Reorder interleaved channels. |
85 | | // Can work in place (e.g aOut == aIn). |
86 | | template <class AudioDataType> |
87 | | void |
88 | | _ReOrderInterleavedChannels(AudioDataType* aOut, const AudioDataType* aIn, |
89 | | uint32_t aFrames, uint32_t aChannels, |
90 | | const uint8_t* aChannelOrderMap) |
91 | 0 | { |
92 | 0 | MOZ_DIAGNOSTIC_ASSERT(aChannels <= AudioConfig::ChannelLayout::MAX_CHANNELS); |
93 | 0 | AudioDataType val[AudioConfig::ChannelLayout::MAX_CHANNELS]; |
94 | 0 | for (uint32_t i = 0; i < aFrames; i++) { |
95 | 0 | for (uint32_t j = 0; j < aChannels; j++) { |
96 | 0 | val[j] = aIn[aChannelOrderMap[j]]; |
97 | 0 | } |
98 | 0 | for (uint32_t j = 0; j < aChannels; j++) { |
99 | 0 | aOut[j] = val[j]; |
100 | 0 | } |
101 | 0 | aOut += aChannels; |
102 | 0 | aIn += aChannels; |
103 | 0 | } |
104 | 0 | } Unexecuted instantiation: void mozilla::_ReOrderInterleavedChannels<unsigned char>(unsigned char*, unsigned char const*, unsigned int, unsigned int, unsigned char const*) Unexecuted instantiation: void mozilla::_ReOrderInterleavedChannels<short>(short*, short const*, unsigned int, unsigned int, unsigned char const*) Unexecuted instantiation: void mozilla::_ReOrderInterleavedChannels<int>(int*, int const*, unsigned int, unsigned int, unsigned char const*) |
105 | | |
106 | | void |
107 | | AudioConverter::ReOrderInterleavedChannels(void* aOut, const void* aIn, |
108 | | size_t aFrames) const |
109 | 0 | { |
110 | 0 | MOZ_DIAGNOSTIC_ASSERT(mIn.Channels() == mOut.Channels()); |
111 | 0 | MOZ_DIAGNOSTIC_ASSERT(CanReorderAudio()); |
112 | 0 |
|
113 | 0 | if (mChannelOrderMap.IsEmpty() || mOut.Channels() == 1 || |
114 | 0 | mOut.Layout() == mIn.Layout()) { |
115 | 0 | // If channel count is 1, planar and non-planar formats are the same or |
116 | 0 | // there's nothing to reorder, or if we don't know how to re-order. |
117 | 0 | if (aOut != aIn) { |
118 | 0 | memmove(aOut, aIn, FramesOutToBytes(aFrames)); |
119 | 0 | } |
120 | 0 | return; |
121 | 0 | } |
122 | 0 |
|
123 | 0 | uint32_t bits = AudioConfig::FormatToBits(mOut.Format()); |
124 | 0 | switch (bits) { |
125 | 0 | case 8: |
126 | 0 | _ReOrderInterleavedChannels((uint8_t*)aOut, (const uint8_t*)aIn, |
127 | 0 | aFrames, mIn.Channels(), mChannelOrderMap.Elements()); |
128 | 0 | break; |
129 | 0 | case 16: |
130 | 0 | _ReOrderInterleavedChannels((int16_t*)aOut,(const int16_t*)aIn, |
131 | 0 | aFrames, mIn.Channels(), mChannelOrderMap.Elements()); |
132 | 0 | break; |
133 | 0 | default: |
134 | 0 | MOZ_DIAGNOSTIC_ASSERT(AudioConfig::SampleSize(mOut.Format()) == 4); |
135 | 0 | _ReOrderInterleavedChannels((int32_t*)aOut,(const int32_t*)aIn, |
136 | 0 | aFrames, mIn.Channels(), mChannelOrderMap.Elements()); |
137 | 0 | break; |
138 | 0 | } |
139 | 0 | } |
140 | | |
141 | | static inline int16_t clipTo15(int32_t aX) |
142 | 0 | { |
143 | 0 | return aX < -32768 ? -32768 : aX <= 32767 ? aX : 32767; |
144 | 0 | } |
145 | | |
146 | | template<typename TYPE> |
147 | | static void |
148 | | dumbUpDownMix(TYPE* aOut, |
149 | | int32_t aOutChannels, |
150 | | const TYPE* aIn, |
151 | | int32_t aInChannels, |
152 | | int32_t aFrames) |
153 | 0 | { |
154 | 0 | if (aIn == aOut) { |
155 | 0 | return; |
156 | 0 | } |
157 | 0 | int32_t commonChannels = std::min(aInChannels, aOutChannels); |
158 | 0 |
|
159 | 0 | for (int32_t i = 0; i < aFrames; i++) { |
160 | 0 | for (int32_t j = 0; j < commonChannels; j++) { |
161 | 0 | aOut[i * aOutChannels + j] = aIn[i * aInChannels + j]; |
162 | 0 | } |
163 | 0 | for (int32_t j = 0; j < aInChannels - aOutChannels; j++) { |
164 | 0 | aOut[i * aOutChannels + j] = 0; |
165 | 0 | } |
166 | 0 | } |
167 | 0 | } Unexecuted instantiation: Unified_cpp_dom_media1.cpp:void mozilla::dumbUpDownMix<float>(float*, int, float const*, int, int) Unexecuted instantiation: Unified_cpp_dom_media1.cpp:void mozilla::dumbUpDownMix<short>(short*, int, short const*, int, int) |
168 | | |
169 | | size_t |
170 | | AudioConverter::DownmixAudio(void* aOut, const void* aIn, size_t aFrames) const |
171 | 0 | { |
172 | 0 | MOZ_ASSERT(mIn.Format() == AudioConfig::FORMAT_S16 || |
173 | 0 | mIn.Format() == AudioConfig::FORMAT_FLT); |
174 | 0 | MOZ_ASSERT(mIn.Channels() >= mOut.Channels()); |
175 | 0 | MOZ_ASSERT(mOut.Layout() == AudioConfig::ChannelLayout(2) || |
176 | 0 | mOut.Layout() == AudioConfig::ChannelLayout(1)); |
177 | 0 |
|
178 | 0 | uint32_t channels = mIn.Channels(); |
179 | 0 |
|
180 | 0 | if (channels == 1 && mOut.Channels() == 1) { |
181 | 0 | if (aOut != aIn) { |
182 | 0 | memmove(aOut, aIn, FramesOutToBytes(aFrames)); |
183 | 0 | } |
184 | 0 | return aFrames; |
185 | 0 | } |
186 | 0 |
|
187 | 0 | if (!mIn.Layout().IsValid() || !mOut.Layout().IsValid()) { |
188 | 0 | // Dumb copy dropping extra channels. |
189 | 0 | if (mIn.Format() == AudioConfig::FORMAT_FLT) { |
190 | 0 | dumbUpDownMix(static_cast<float*>(aOut), |
191 | 0 | mOut.Channels(), |
192 | 0 | static_cast<const float*>(aIn), |
193 | 0 | mIn.Channels(), |
194 | 0 | aFrames); |
195 | 0 | } else if (mIn.Format() == AudioConfig::FORMAT_S16) { |
196 | 0 | dumbUpDownMix(static_cast<int16_t*>(aOut), |
197 | 0 | mOut.Channels(), |
198 | 0 | static_cast<const int16_t*>(aIn), |
199 | 0 | mIn.Channels(), |
200 | 0 | aFrames); |
201 | 0 | } else { |
202 | 0 | MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type"); |
203 | 0 | } |
204 | 0 | return aFrames; |
205 | 0 | } |
206 | 0 | |
207 | 0 | MOZ_ASSERT(mIn.Layout() == |
208 | 0 | AudioConfig::ChannelLayout::SMPTEDefault(mIn.Layout()), |
209 | 0 | "Can only downmix input data in SMPTE layout"); |
210 | 0 | if (channels > 2) { |
211 | 0 | if (mIn.Format() == AudioConfig::FORMAT_FLT) { |
212 | 0 | // Downmix matrix. Per-row normalization 1 for rows 3,4 and 2 for rows 5-8. |
213 | 0 | static const float dmatrix[6][8][2]= { |
214 | 0 | /*3*/{{0.5858f,0},{0,0.5858f},{0.4142f,0.4142f}}, |
215 | 0 | /*4*/{{0.4226f,0},{0,0.4226f},{0.366f, 0.2114f},{0.2114f,0.366f}}, |
216 | 0 | /*5*/{{0.6510f,0},{0,0.6510f},{0.4600f,0.4600f},{0.5636f,0.3254f},{0.3254f,0.5636f}}, |
217 | 0 | /*6*/{{0.5290f,0},{0,0.5290f},{0.3741f,0.3741f},{0.3741f,0.3741f},{0.4582f,0.2645f},{0.2645f,0.4582f}}, |
218 | 0 | /*7*/{{0.4553f,0},{0,0.4553f},{0.3220f,0.3220f},{0.3220f,0.3220f},{0.2788f,0.2788f},{0.3943f,0.2277f},{0.2277f,0.3943f}}, |
219 | 0 | /*8*/{{0.3886f,0},{0,0.3886f},{0.2748f,0.2748f},{0.2748f,0.2748f},{0.3366f,0.1943f},{0.1943f,0.3366f},{0.3366f,0.1943f},{0.1943f,0.3366f}}, |
220 | 0 | }; |
221 | 0 | // Re-write the buffer with downmixed data |
222 | 0 | const float* in = static_cast<const float*>(aIn); |
223 | 0 | float* out = static_cast<float*>(aOut); |
224 | 0 | for (uint32_t i = 0; i < aFrames; i++) { |
225 | 0 | float sampL = 0.0; |
226 | 0 | float sampR = 0.0; |
227 | 0 | for (uint32_t j = 0; j < channels; j++) { |
228 | 0 | sampL += in[i*mIn.Channels()+j]*dmatrix[mIn.Channels()-3][j][0]; |
229 | 0 | sampR += in[i*mIn.Channels()+j]*dmatrix[mIn.Channels()-3][j][1]; |
230 | 0 | } |
231 | 0 | *out++ = sampL; |
232 | 0 | *out++ = sampR; |
233 | 0 | } |
234 | 0 | } else if (mIn.Format() == AudioConfig::FORMAT_S16) { |
235 | 0 | // Downmix matrix. Per-row normalization 1 for rows 3,4 and 2 for rows 5-8. |
236 | 0 | // Coefficients in Q14. |
237 | 0 | static const int16_t dmatrix[6][8][2]= { |
238 | 0 | /*3*/{{9598, 0},{0, 9598},{6786,6786}}, |
239 | 0 | /*4*/{{6925, 0},{0, 6925},{5997,3462},{3462,5997}}, |
240 | 0 | /*5*/{{10663,0},{0, 10663},{7540,7540},{9234,5331},{5331,9234}}, |
241 | 0 | /*6*/{{8668, 0},{0, 8668},{6129,6129},{6129,6129},{7507,4335},{4335,7507}}, |
242 | 0 | /*7*/{{7459, 0},{0, 7459},{5275,5275},{5275,5275},{4568,4568},{6460,3731},{3731,6460}}, |
243 | 0 | /*8*/{{6368, 0},{0, 6368},{4502,4502},{4502,4502},{5514,3184},{3184,5514},{5514,3184},{3184,5514}} |
244 | 0 | }; |
245 | 0 | // Re-write the buffer with downmixed data |
246 | 0 | const int16_t* in = static_cast<const int16_t*>(aIn); |
247 | 0 | int16_t* out = static_cast<int16_t*>(aOut); |
248 | 0 | for (uint32_t i = 0; i < aFrames; i++) { |
249 | 0 | int32_t sampL = 0; |
250 | 0 | int32_t sampR = 0; |
251 | 0 | for (uint32_t j = 0; j < channels; j++) { |
252 | 0 | sampL+=in[i*channels+j]*dmatrix[channels-3][j][0]; |
253 | 0 | sampR+=in[i*channels+j]*dmatrix[channels-3][j][1]; |
254 | 0 | } |
255 | 0 | *out++ = clipTo15((sampL + 8192)>>14); |
256 | 0 | *out++ = clipTo15((sampR + 8192)>>14); |
257 | 0 | } |
258 | 0 | } else { |
259 | 0 | MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type"); |
260 | 0 | } |
261 | 0 |
|
262 | 0 | // If we are to continue downmixing to mono, start working on the output |
263 | 0 | // buffer. |
264 | 0 | aIn = aOut; |
265 | 0 | channels = 2; |
266 | 0 | } |
267 | 0 |
|
268 | 0 | if (mOut.Channels() == 1) { |
269 | 0 | if (mIn.Format() == AudioConfig::FORMAT_FLT) { |
270 | 0 | const float* in = static_cast<const float*>(aIn); |
271 | 0 | float* out = static_cast<float*>(aOut); |
272 | 0 | for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) { |
273 | 0 | float sample = 0.0; |
274 | 0 | // The sample of the buffer would be interleaved. |
275 | 0 | sample = (in[fIdx*channels] + in[fIdx*channels + 1]) * 0.5; |
276 | 0 | *out++ = sample; |
277 | 0 | } |
278 | 0 | } else if (mIn.Format() == AudioConfig::FORMAT_S16) { |
279 | 0 | const int16_t* in = static_cast<const int16_t*>(aIn); |
280 | 0 | int16_t* out = static_cast<int16_t*>(aOut); |
281 | 0 | for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) { |
282 | 0 | int32_t sample = 0.0; |
283 | 0 | // The sample of the buffer would be interleaved. |
284 | 0 | sample = (in[fIdx*channels] + in[fIdx*channels + 1]) * 0.5; |
285 | 0 | *out++ = sample; |
286 | 0 | } |
287 | 0 | } else { |
288 | 0 | MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type"); |
289 | 0 | } |
290 | 0 | } |
291 | 0 | return aFrames; |
292 | 0 | } |
293 | | |
294 | | size_t |
295 | | AudioConverter::ResampleAudio(void* aOut, const void* aIn, size_t aFrames) |
296 | 0 | { |
297 | 0 | if (!mResampler) { |
298 | 0 | return 0; |
299 | 0 | } |
300 | 0 | uint32_t outframes = ResampleRecipientFrames(aFrames); |
301 | 0 | uint32_t inframes = aFrames; |
302 | 0 |
|
303 | 0 | int error; |
304 | 0 | if (mOut.Format() == AudioConfig::FORMAT_FLT) { |
305 | 0 | const float* in = reinterpret_cast<const float*>(aIn); |
306 | 0 | float* out = reinterpret_cast<float*>(aOut); |
307 | 0 | error = |
308 | 0 | speex_resampler_process_interleaved_float(mResampler, in, &inframes, |
309 | 0 | out, &outframes); |
310 | 0 | } else if (mOut.Format() == AudioConfig::FORMAT_S16) { |
311 | 0 | const int16_t* in = reinterpret_cast<const int16_t*>(aIn); |
312 | 0 | int16_t* out = reinterpret_cast<int16_t*>(aOut); |
313 | 0 | error = |
314 | 0 | speex_resampler_process_interleaved_int(mResampler, in, &inframes, |
315 | 0 | out, &outframes); |
316 | 0 | } else { |
317 | 0 | MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type"); |
318 | 0 | error = RESAMPLER_ERR_ALLOC_FAILED; |
319 | 0 | } |
320 | 0 | MOZ_ASSERT(error == RESAMPLER_ERR_SUCCESS); |
321 | 0 | if (error != RESAMPLER_ERR_SUCCESS) { |
322 | 0 | speex_resampler_destroy(mResampler); |
323 | 0 | mResampler = nullptr; |
324 | 0 | return 0; |
325 | 0 | } |
326 | 0 | MOZ_ASSERT(inframes == aFrames, "Some frames will be dropped"); |
327 | 0 | return outframes; |
328 | 0 | } |
329 | | |
330 | | void |
331 | | AudioConverter::RecreateResampler() |
332 | 0 | { |
333 | 0 | if (mResampler) { |
334 | 0 | speex_resampler_destroy(mResampler); |
335 | 0 | } |
336 | 0 | int error; |
337 | 0 | mResampler = speex_resampler_init(mOut.Channels(), |
338 | 0 | mIn.Rate(), |
339 | 0 | mOut.Rate(), |
340 | 0 | SPEEX_RESAMPLER_QUALITY_DEFAULT, |
341 | 0 | &error); |
342 | 0 |
|
343 | 0 | if (error == RESAMPLER_ERR_SUCCESS) { |
344 | 0 | speex_resampler_skip_zeros(mResampler); |
345 | 0 | } else { |
346 | 0 | NS_WARNING("Failed to initialize resampler."); |
347 | 0 | mResampler = nullptr; |
348 | 0 | } |
349 | 0 | } |
350 | | |
351 | | size_t |
352 | | AudioConverter::DrainResampler(void* aOut) |
353 | 0 | { |
354 | 0 | if (!mResampler) { |
355 | 0 | return 0; |
356 | 0 | } |
357 | 0 | int frames = speex_resampler_get_input_latency(mResampler); |
358 | 0 | AlignedByteBuffer buffer(FramesOutToBytes(frames)); |
359 | 0 | if (!buffer) { |
360 | 0 | // OOM |
361 | 0 | return 0; |
362 | 0 | } |
363 | 0 | frames = ResampleAudio(aOut, buffer.Data(), frames); |
364 | 0 | // Tore down the resampler as it's easier than handling follow-up. |
365 | 0 | RecreateResampler(); |
366 | 0 | return frames; |
367 | 0 | } |
368 | | |
369 | | size_t |
370 | | AudioConverter::UpmixAudio(void* aOut, const void* aIn, size_t aFrames) const |
371 | 0 | { |
372 | 0 | MOZ_ASSERT(mIn.Format() == AudioConfig::FORMAT_S16 || |
373 | 0 | mIn.Format() == AudioConfig::FORMAT_FLT); |
374 | 0 | MOZ_ASSERT(mIn.Channels() < mOut.Channels()); |
375 | 0 | MOZ_ASSERT(mIn.Channels() == 1, "Can only upmix mono for now"); |
376 | 0 | MOZ_ASSERT(mOut.Channels() == 2, "Can only upmix to stereo for now"); |
377 | 0 |
|
378 | 0 | if (!mIn.Layout().IsValid() || !mOut.Layout().IsValid() || |
379 | 0 | mOut.Channels() != 2) { |
380 | 0 | // Dumb copy the channels and insert silence for the extra channels. |
381 | 0 | if (mIn.Format() == AudioConfig::FORMAT_FLT) { |
382 | 0 | dumbUpDownMix(static_cast<float*>(aOut), |
383 | 0 | mOut.Channels(), |
384 | 0 | static_cast<const float*>(aIn), |
385 | 0 | mIn.Channels(), |
386 | 0 | aFrames); |
387 | 0 | } else if (mIn.Format() == AudioConfig::FORMAT_S16) { |
388 | 0 | dumbUpDownMix(static_cast<int16_t*>(aOut), |
389 | 0 | mOut.Channels(), |
390 | 0 | static_cast<const int16_t*>(aIn), |
391 | 0 | mIn.Channels(), |
392 | 0 | aFrames); |
393 | 0 | } else { |
394 | 0 | MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type"); |
395 | 0 | } |
396 | 0 | return aFrames; |
397 | 0 | } |
398 | 0 | |
399 | 0 | // Upmix mono to stereo. |
400 | 0 | // This is a very dumb mono to stereo upmixing, power levels are preserved |
401 | 0 | // following the calculation: left = right = -3dB*mono. |
402 | 0 | if (mIn.Format() == AudioConfig::FORMAT_FLT) { |
403 | 0 | const float m3db = std::sqrt(0.5); // -3dB = sqrt(1/2) |
404 | 0 | const float* in = static_cast<const float*>(aIn); |
405 | 0 | float* out = static_cast<float*>(aOut); |
406 | 0 | for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) { |
407 | 0 | float sample = in[fIdx] * m3db; |
408 | 0 | // The samples of the buffer would be interleaved. |
409 | 0 | *out++ = sample; |
410 | 0 | *out++ = sample; |
411 | 0 | } |
412 | 0 | } else if (mIn.Format() == AudioConfig::FORMAT_S16) { |
413 | 0 | const int16_t* in = static_cast<const int16_t*>(aIn); |
414 | 0 | int16_t* out = static_cast<int16_t*>(aOut); |
415 | 0 | for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) { |
416 | 0 | int16_t sample = ((int32_t)in[fIdx] * 11585) >> 14; // close enough to i*sqrt(0.5) |
417 | 0 | // The samples of the buffer would be interleaved. |
418 | 0 | *out++ = sample; |
419 | 0 | *out++ = sample; |
420 | 0 | } |
421 | 0 | } else { |
422 | 0 | MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type"); |
423 | 0 | } |
424 | 0 |
|
425 | 0 | return aFrames; |
426 | 0 | } |
427 | | |
428 | | size_t |
429 | | AudioConverter::ResampleRecipientFrames(size_t aFrames) const |
430 | 0 | { |
431 | 0 | if (!aFrames && mIn.Rate() != mOut.Rate()) { |
432 | 0 | if (!mResampler) { |
433 | 0 | return 0; |
434 | 0 | } |
435 | 0 | // We drain by pushing in get_input_latency() samples of 0 |
436 | 0 | aFrames = speex_resampler_get_input_latency(mResampler); |
437 | 0 | } |
438 | 0 | return (uint64_t)aFrames * mOut.Rate() / mIn.Rate() + 1; |
439 | 0 | } |
440 | | |
441 | | size_t |
442 | | AudioConverter::FramesOutToSamples(size_t aFrames) const |
443 | 0 | { |
444 | 0 | return aFrames * mOut.Channels(); |
445 | 0 | } |
446 | | |
447 | | size_t |
448 | | AudioConverter::SamplesInToFrames(size_t aSamples) const |
449 | 0 | { |
450 | 0 | return aSamples / mIn.Channels(); |
451 | 0 | } |
452 | | |
453 | | size_t |
454 | | AudioConverter::FramesOutToBytes(size_t aFrames) const |
455 | 0 | { |
456 | 0 | return FramesOutToSamples(aFrames) * AudioConfig::SampleSize(mOut.Format()); |
457 | 0 | } |
458 | | } // namespace mozilla |