/src/mozilla-central/dom/media/webaudio/WaveShaperNode.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "WaveShaperNode.h" |
8 | | #include "mozilla/dom/WaveShaperNodeBinding.h" |
9 | | #include "AlignmentUtils.h" |
10 | | #include "AudioNode.h" |
11 | | #include "AudioNodeEngine.h" |
12 | | #include "AudioNodeStream.h" |
13 | | #include "mozilla/PodOperations.h" |
14 | | |
15 | | namespace mozilla { |
16 | | namespace dom { |
17 | | |
18 | | NS_IMPL_CYCLE_COLLECTION_CLASS(WaveShaperNode) |
19 | | |
20 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(WaveShaperNode, AudioNode) |
21 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER |
22 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK_END |
23 | | |
24 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(WaveShaperNode, AudioNode) |
25 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END |
26 | | |
27 | 0 | NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(WaveShaperNode) |
28 | 0 | NS_IMPL_CYCLE_COLLECTION_TRACE_PRESERVED_WRAPPER |
29 | 0 | NS_IMPL_CYCLE_COLLECTION_TRACE_END |
30 | | |
31 | 0 | NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(WaveShaperNode) |
32 | 0 | NS_INTERFACE_MAP_END_INHERITING(AudioNode) |
33 | | |
34 | | NS_IMPL_ADDREF_INHERITED(WaveShaperNode, AudioNode) |
35 | | NS_IMPL_RELEASE_INHERITED(WaveShaperNode, AudioNode) |
36 | | |
37 | | static uint32_t ValueOf(OverSampleType aType) |
38 | 0 | { |
39 | 0 | switch (aType) { |
40 | 0 | case OverSampleType::None: return 1; |
41 | 0 | case OverSampleType::_2x: return 2; |
42 | 0 | case OverSampleType::_4x: return 4; |
43 | 0 | default: |
44 | 0 | MOZ_ASSERT_UNREACHABLE("We should never reach here"); |
45 | 0 | return 1; |
46 | 0 | } |
47 | 0 | } |
48 | | |
49 | | class Resampler final |
50 | | { |
51 | | public: |
52 | | Resampler() |
53 | | : mType(OverSampleType::None) |
54 | | , mUpSampler(nullptr) |
55 | | , mDownSampler(nullptr) |
56 | | , mChannels(0) |
57 | | , mSampleRate(0) |
58 | 0 | { |
59 | 0 | } |
60 | | |
61 | | ~Resampler() |
62 | 0 | { |
63 | 0 | Destroy(); |
64 | 0 | } |
65 | | |
66 | | void Reset(uint32_t aChannels, TrackRate aSampleRate, OverSampleType aType) |
67 | 0 | { |
68 | 0 | if (aChannels == mChannels && |
69 | 0 | aSampleRate == mSampleRate && |
70 | 0 | aType == mType) { |
71 | 0 | return; |
72 | 0 | } |
73 | 0 | |
74 | 0 | mChannels = aChannels; |
75 | 0 | mSampleRate = aSampleRate; |
76 | 0 | mType = aType; |
77 | 0 |
|
78 | 0 | Destroy(); |
79 | 0 |
|
80 | 0 | if (aType == OverSampleType::None) { |
81 | 0 | mBuffer.Clear(); |
82 | 0 | return; |
83 | 0 | } |
84 | 0 | |
85 | 0 | mUpSampler = speex_resampler_init(aChannels, |
86 | 0 | aSampleRate, |
87 | 0 | aSampleRate * ValueOf(aType), |
88 | 0 | SPEEX_RESAMPLER_QUALITY_MIN, |
89 | 0 | nullptr); |
90 | 0 | mDownSampler = speex_resampler_init(aChannels, |
91 | 0 | aSampleRate * ValueOf(aType), |
92 | 0 | aSampleRate, |
93 | 0 | SPEEX_RESAMPLER_QUALITY_MIN, |
94 | 0 | nullptr); |
95 | 0 | mBuffer.SetLength(WEBAUDIO_BLOCK_SIZE*ValueOf(aType)); |
96 | 0 | } |
97 | | |
98 | | float* UpSample(uint32_t aChannel, const float* aInputData, uint32_t aBlocks) |
99 | 0 | { |
100 | 0 | uint32_t inSamples = WEBAUDIO_BLOCK_SIZE; |
101 | 0 | uint32_t outSamples = WEBAUDIO_BLOCK_SIZE*aBlocks; |
102 | 0 | float* outputData = mBuffer.Elements(); |
103 | 0 |
|
104 | 0 | MOZ_ASSERT(mBuffer.Length() == outSamples); |
105 | 0 |
|
106 | 0 | WebAudioUtils::SpeexResamplerProcess(mUpSampler, aChannel, |
107 | 0 | aInputData, &inSamples, |
108 | 0 | outputData, &outSamples); |
109 | 0 |
|
110 | 0 | MOZ_ASSERT(inSamples == WEBAUDIO_BLOCK_SIZE && outSamples == WEBAUDIO_BLOCK_SIZE*aBlocks); |
111 | 0 |
|
112 | 0 | return outputData; |
113 | 0 | } |
114 | | |
115 | | void DownSample(uint32_t aChannel, float* aOutputData, uint32_t aBlocks) |
116 | 0 | { |
117 | 0 | uint32_t inSamples = WEBAUDIO_BLOCK_SIZE*aBlocks; |
118 | 0 | uint32_t outSamples = WEBAUDIO_BLOCK_SIZE; |
119 | 0 | const float* inputData = mBuffer.Elements(); |
120 | 0 |
|
121 | 0 | MOZ_ASSERT(mBuffer.Length() == inSamples); |
122 | 0 |
|
123 | 0 | WebAudioUtils::SpeexResamplerProcess(mDownSampler, aChannel, |
124 | 0 | inputData, &inSamples, |
125 | 0 | aOutputData, &outSamples); |
126 | 0 |
|
127 | 0 | MOZ_ASSERT(inSamples == WEBAUDIO_BLOCK_SIZE*aBlocks && outSamples == WEBAUDIO_BLOCK_SIZE); |
128 | 0 | } |
129 | | |
130 | | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
131 | 0 | { |
132 | 0 | size_t amount = 0; |
133 | 0 | // Future: properly measure speex memory |
134 | 0 | amount += aMallocSizeOf(mUpSampler); |
135 | 0 | amount += aMallocSizeOf(mDownSampler); |
136 | 0 | amount += mBuffer.ShallowSizeOfExcludingThis(aMallocSizeOf); |
137 | 0 | return amount; |
138 | 0 | } |
139 | | |
140 | | private: |
141 | | void Destroy() |
142 | 0 | { |
143 | 0 | if (mUpSampler) { |
144 | 0 | speex_resampler_destroy(mUpSampler); |
145 | 0 | mUpSampler = nullptr; |
146 | 0 | } |
147 | 0 | if (mDownSampler) { |
148 | 0 | speex_resampler_destroy(mDownSampler); |
149 | 0 | mDownSampler = nullptr; |
150 | 0 | } |
151 | 0 | } |
152 | | |
153 | | private: |
154 | | OverSampleType mType; |
155 | | SpeexResamplerState* mUpSampler; |
156 | | SpeexResamplerState* mDownSampler; |
157 | | uint32_t mChannels; |
158 | | TrackRate mSampleRate; |
159 | | nsTArray<float> mBuffer; |
160 | | }; |
161 | | |
162 | | class WaveShaperNodeEngine final : public AudioNodeEngine |
163 | | { |
164 | | public: |
165 | | explicit WaveShaperNodeEngine(AudioNode* aNode) |
166 | | : AudioNodeEngine(aNode) |
167 | | , mType(OverSampleType::None) |
168 | 0 | { |
169 | 0 | } |
170 | | |
171 | | enum Parameteres { |
172 | | TYPE |
173 | | }; |
174 | | |
175 | | void SetRawArrayData(nsTArray<float>& aCurve) override |
176 | 0 | { |
177 | 0 | mCurve.SwapElements(aCurve); |
178 | 0 | } |
179 | | |
180 | | void SetInt32Parameter(uint32_t aIndex, int32_t aValue) override |
181 | 0 | { |
182 | 0 | switch (aIndex) { |
183 | 0 | case TYPE: |
184 | 0 | mType = static_cast<OverSampleType>(aValue); |
185 | 0 | break; |
186 | 0 | default: |
187 | 0 | NS_ERROR("Bad WaveShaperNode Int32Parameter"); |
188 | 0 | } |
189 | 0 | } |
190 | | |
191 | | template <uint32_t blocks> |
192 | | void ProcessCurve(const float* aInputBuffer, float* aOutputBuffer) |
193 | 0 | { |
194 | 0 | for (uint32_t j = 0; j < WEBAUDIO_BLOCK_SIZE*blocks; ++j) { |
195 | 0 | // Index into the curve array based on the amplitude of the |
196 | 0 | // incoming signal by using an amplitude range of [-1, 1] and |
197 | 0 | // performing a linear interpolation of the neighbor values. |
198 | 0 | float index = (mCurve.Length() - 1) * (aInputBuffer[j] + 1.0f) / 2.0f; |
199 | 0 | if (index < 0.0f) { |
200 | 0 | aOutputBuffer[j] = mCurve[0]; |
201 | 0 | } else { |
202 | 0 | int32_t indexLower = index; |
203 | 0 | if (static_cast<uint32_t>(indexLower) >= mCurve.Length() - 1) { |
204 | 0 | aOutputBuffer[j] = mCurve[mCurve.Length() - 1]; |
205 | 0 | } else { |
206 | 0 | uint32_t indexHigher = indexLower + 1; |
207 | 0 | float interpolationFactor = index - indexLower; |
208 | 0 | aOutputBuffer[j] = (1.0f - interpolationFactor) * mCurve[indexLower] + |
209 | 0 | interpolationFactor * mCurve[indexHigher]; |
210 | 0 | } |
211 | 0 | } |
212 | 0 | } |
213 | 0 | } Unexecuted instantiation: void mozilla::dom::WaveShaperNodeEngine::ProcessCurve<1u>(float const*, float*) Unexecuted instantiation: void mozilla::dom::WaveShaperNodeEngine::ProcessCurve<2u>(float const*, float*) Unexecuted instantiation: void mozilla::dom::WaveShaperNodeEngine::ProcessCurve<4u>(float const*, float*) |
214 | | |
215 | | void ProcessBlock(AudioNodeStream* aStream, |
216 | | GraphTime aFrom, |
217 | | const AudioBlock& aInput, |
218 | | AudioBlock* aOutput, |
219 | | bool* aFinished) override |
220 | 0 | { |
221 | 0 | uint32_t channelCount = aInput.ChannelCount(); |
222 | 0 | if (!mCurve.Length()) { |
223 | 0 | // Optimize the case where we don't have a curve buffer |
224 | 0 | *aOutput = aInput; |
225 | 0 | return; |
226 | 0 | } |
227 | 0 | |
228 | 0 | // If the input is null, check to see if non-null output will be produced |
229 | 0 | bool nullInput = false; |
230 | 0 | if (channelCount == 0) { |
231 | 0 | float index = (mCurve.Length() - 1) * 0.5; |
232 | 0 | uint32_t indexLower = index; |
233 | 0 | uint32_t indexHigher = indexLower + 1; |
234 | 0 | float interpolationFactor = index - indexLower; |
235 | 0 | if ((1.0f - interpolationFactor) * mCurve[indexLower] + |
236 | 0 | interpolationFactor * mCurve[indexHigher] == 0.0) { |
237 | 0 | *aOutput = aInput; |
238 | 0 | return; |
239 | 0 | } else { |
240 | 0 | nullInput = true; |
241 | 0 | channelCount = 1; |
242 | 0 | } |
243 | 0 | } |
244 | 0 |
|
245 | 0 | aOutput->AllocateChannels(channelCount); |
246 | 0 | for (uint32_t i = 0; i < channelCount; ++i) { |
247 | 0 | const float* inputSamples; |
248 | 0 | float scaledInput[WEBAUDIO_BLOCK_SIZE + 4]; |
249 | 0 | float* alignedScaledInput = ALIGNED16(scaledInput); |
250 | 0 | ASSERT_ALIGNED16(alignedScaledInput); |
251 | 0 | if (!nullInput) { |
252 | 0 | if (aInput.mVolume != 1.0f) { |
253 | 0 | AudioBlockCopyChannelWithScale( |
254 | 0 | static_cast<const float*>(aInput.mChannelData[i]), |
255 | 0 | aInput.mVolume, |
256 | 0 | alignedScaledInput); |
257 | 0 | inputSamples = alignedScaledInput; |
258 | 0 | } else { |
259 | 0 | inputSamples = static_cast<const float*>(aInput.mChannelData[i]); |
260 | 0 | } |
261 | 0 | } else { |
262 | 0 | PodZero(alignedScaledInput, WEBAUDIO_BLOCK_SIZE); |
263 | 0 | inputSamples = alignedScaledInput; |
264 | 0 | } |
265 | 0 | float* outputBuffer = aOutput->ChannelFloatsForWrite(i); |
266 | 0 | float* sampleBuffer; |
267 | 0 |
|
268 | 0 | switch (mType) { |
269 | 0 | case OverSampleType::None: |
270 | 0 | mResampler.Reset(channelCount, aStream->SampleRate(), OverSampleType::None); |
271 | 0 | ProcessCurve<1>(inputSamples, outputBuffer); |
272 | 0 | break; |
273 | 0 | case OverSampleType::_2x: |
274 | 0 | mResampler.Reset(channelCount, aStream->SampleRate(), OverSampleType::_2x); |
275 | 0 | sampleBuffer = mResampler.UpSample(i, inputSamples, 2); |
276 | 0 | ProcessCurve<2>(sampleBuffer, sampleBuffer); |
277 | 0 | mResampler.DownSample(i, outputBuffer, 2); |
278 | 0 | break; |
279 | 0 | case OverSampleType::_4x: |
280 | 0 | mResampler.Reset(channelCount, aStream->SampleRate(), OverSampleType::_4x); |
281 | 0 | sampleBuffer = mResampler.UpSample(i, inputSamples, 4); |
282 | 0 | ProcessCurve<4>(sampleBuffer, sampleBuffer); |
283 | 0 | mResampler.DownSample(i, outputBuffer, 4); |
284 | 0 | break; |
285 | 0 | default: |
286 | 0 | MOZ_ASSERT_UNREACHABLE("We should never reach here"); |
287 | 0 | } |
288 | 0 | } |
289 | 0 | } |
290 | | |
291 | | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override |
292 | 0 | { |
293 | 0 | size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); |
294 | 0 | amount += mCurve.ShallowSizeOfExcludingThis(aMallocSizeOf); |
295 | 0 | amount += mResampler.SizeOfExcludingThis(aMallocSizeOf); |
296 | 0 | return amount; |
297 | 0 | } |
298 | | |
299 | | size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override |
300 | 0 | { |
301 | 0 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
302 | 0 | } |
303 | | |
304 | | private: |
305 | | nsTArray<float> mCurve; |
306 | | OverSampleType mType; |
307 | | Resampler mResampler; |
308 | | }; |
309 | | |
310 | | WaveShaperNode::WaveShaperNode(AudioContext* aContext) |
311 | | : AudioNode(aContext, |
312 | | 2, |
313 | | ChannelCountMode::Max, |
314 | | ChannelInterpretation::Speakers) |
315 | | , mType(OverSampleType::None) |
316 | 0 | { |
317 | 0 | WaveShaperNodeEngine* engine = new WaveShaperNodeEngine(this); |
318 | 0 | mStream = AudioNodeStream::Create(aContext, engine, |
319 | 0 | AudioNodeStream::NO_STREAM_FLAGS, |
320 | 0 | aContext->Graph()); |
321 | 0 | } |
322 | | |
323 | | /* static */ already_AddRefed<WaveShaperNode> |
324 | | WaveShaperNode::Create(AudioContext& aAudioContext, |
325 | | const WaveShaperOptions& aOptions, |
326 | | ErrorResult& aRv) |
327 | 0 | { |
328 | 0 | if (aAudioContext.CheckClosed(aRv)) { |
329 | 0 | return nullptr; |
330 | 0 | } |
331 | 0 | |
332 | 0 | RefPtr<WaveShaperNode> audioNode = new WaveShaperNode(&aAudioContext); |
333 | 0 |
|
334 | 0 | audioNode->Initialize(aOptions, aRv); |
335 | 0 | if (NS_WARN_IF(aRv.Failed())) { |
336 | 0 | return nullptr; |
337 | 0 | } |
338 | 0 | |
339 | 0 | if (aOptions.mCurve.WasPassed()) { |
340 | 0 | audioNode->SetCurveInternal(aOptions.mCurve.Value(), aRv); |
341 | 0 | if (NS_WARN_IF(aRv.Failed())) { |
342 | 0 | return nullptr; |
343 | 0 | } |
344 | 0 | } |
345 | 0 | |
346 | 0 | audioNode->SetOversample(aOptions.mOversample); |
347 | 0 | return audioNode.forget(); |
348 | 0 | } |
349 | | |
350 | | JSObject* |
351 | | WaveShaperNode::WrapObject(JSContext *aCx, JS::Handle<JSObject*> aGivenProto) |
352 | 0 | { |
353 | 0 | return WaveShaperNode_Binding::Wrap(aCx, this, aGivenProto); |
354 | 0 | } |
355 | | |
356 | | void |
357 | | WaveShaperNode::SetCurve(const Nullable<Float32Array>& aCurve, ErrorResult& aRv) |
358 | 0 | { |
359 | 0 | // Let's purge the cached value for the curve attribute. |
360 | 0 | WaveShaperNode_Binding::ClearCachedCurveValue(this); |
361 | 0 |
|
362 | 0 | if (aCurve.IsNull()) { |
363 | 0 | CleanCurveInternal(); |
364 | 0 | return; |
365 | 0 | } |
366 | 0 | |
367 | 0 | const Float32Array& floats = aCurve.Value(); |
368 | 0 |
|
369 | 0 | floats.ComputeLengthAndData(); |
370 | 0 | if (floats.IsShared()) { |
371 | 0 | // Throw if the object is mapping shared memory (must opt in). |
372 | 0 | aRv.ThrowTypeError<MSG_TYPEDARRAY_IS_SHARED>(NS_LITERAL_STRING("Argument of WaveShaperNode.setCurve")); |
373 | 0 | return; |
374 | 0 | } |
375 | 0 |
|
376 | 0 | nsTArray<float> curve; |
377 | 0 | uint32_t argLength = floats.Length(); |
378 | 0 | if (!curve.SetLength(argLength, fallible)) { |
379 | 0 | aRv.Throw(NS_ERROR_OUT_OF_MEMORY); |
380 | 0 | return; |
381 | 0 | } |
382 | 0 | |
383 | 0 | PodCopy(curve.Elements(), floats.Data(), argLength); |
384 | 0 | SetCurveInternal(curve, aRv); |
385 | 0 | } |
386 | | |
387 | | void |
388 | | WaveShaperNode::SetCurveInternal(const nsTArray<float>& aCurve, |
389 | | ErrorResult& aRv) |
390 | 0 | { |
391 | 0 | if (aCurve.Length() < 2) { |
392 | 0 | aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); |
393 | 0 | return; |
394 | 0 | } |
395 | 0 | |
396 | 0 | mCurve = aCurve; |
397 | 0 | SendCurveToStream(); |
398 | 0 | } |
399 | | |
400 | | void |
401 | | WaveShaperNode::CleanCurveInternal() |
402 | 0 | { |
403 | 0 | mCurve.Clear(); |
404 | 0 | SendCurveToStream(); |
405 | 0 | } |
406 | | |
407 | | void |
408 | | WaveShaperNode::SendCurveToStream() |
409 | 0 | { |
410 | 0 | AudioNodeStream* ns = mStream; |
411 | 0 | MOZ_ASSERT(ns, "Why don't we have a stream here?"); |
412 | 0 |
|
413 | 0 | nsTArray<float> copyCurve(mCurve); |
414 | 0 | ns->SetRawArrayData(copyCurve); |
415 | 0 | } |
416 | | |
417 | | void |
418 | | WaveShaperNode::GetCurve(JSContext* aCx, |
419 | | JS::MutableHandle<JSObject*> aRetval) |
420 | 0 | { |
421 | 0 | // Let's return a null value if the list is empty. |
422 | 0 | if (mCurve.IsEmpty()) { |
423 | 0 | aRetval.set(nullptr); |
424 | 0 | return; |
425 | 0 | } |
426 | 0 | |
427 | 0 | MOZ_ASSERT(mCurve.Length() >= 2); |
428 | 0 | aRetval.set(Float32Array::Create(aCx, this, mCurve.Length(), |
429 | 0 | mCurve.Elements())); |
430 | 0 | } |
431 | | |
432 | | void |
433 | | WaveShaperNode::SetOversample(OverSampleType aType) |
434 | 0 | { |
435 | 0 | mType = aType; |
436 | 0 | SendInt32ParameterToStream(WaveShaperNodeEngine::TYPE, |
437 | 0 | static_cast<int32_t>(aType)); |
438 | 0 | } |
439 | | |
440 | | } // namespace dom |
441 | | } // namespace mozilla |