/src/mozilla-central/dom/media/webaudio/DelayNode.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "DelayNode.h" |
8 | | #include "mozilla/dom/DelayNodeBinding.h" |
9 | | #include "AudioNodeEngine.h" |
10 | | #include "AudioNodeStream.h" |
11 | | #include "AudioDestinationNode.h" |
12 | | #include "WebAudioUtils.h" |
13 | | #include "DelayBuffer.h" |
14 | | #include "PlayingRefChangeHandler.h" |
15 | | |
16 | | namespace mozilla { |
17 | | namespace dom { |
18 | | |
19 | | NS_IMPL_CYCLE_COLLECTION_INHERITED(DelayNode, AudioNode, |
20 | | mDelay) |
21 | | |
22 | 0 | NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(DelayNode) |
23 | 0 | NS_INTERFACE_MAP_END_INHERITING(AudioNode) |
24 | | |
25 | | NS_IMPL_ADDREF_INHERITED(DelayNode, AudioNode) |
26 | | NS_IMPL_RELEASE_INHERITED(DelayNode, AudioNode) |
27 | | |
28 | | class DelayNodeEngine final : public AudioNodeEngine |
29 | | { |
30 | | typedef PlayingRefChangeHandler PlayingRefChanged; |
31 | | public: |
32 | | DelayNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination, |
33 | | float aMaxDelayTicks) |
34 | | : AudioNodeEngine(aNode) |
35 | | , mDestination(aDestination->Stream()) |
36 | | // Keep the default value in sync with the default value in DelayNode::DelayNode. |
37 | | , mDelay(0.f) |
38 | | // Use a smoothing range of 20ms |
39 | | , mBuffer(std::max(aMaxDelayTicks, |
40 | | static_cast<float>(WEBAUDIO_BLOCK_SIZE))) |
41 | | , mMaxDelay(aMaxDelayTicks) |
42 | | , mHaveProducedBeforeInput(false) |
43 | | , mLeftOverData(INT32_MIN) |
44 | 0 | { |
45 | 0 | } |
46 | | |
47 | | DelayNodeEngine* AsDelayNodeEngine() override |
48 | 0 | { |
49 | 0 | return this; |
50 | 0 | } |
51 | | |
52 | | enum Parameters { |
53 | | DELAY, |
54 | | }; |
55 | | void RecvTimelineEvent(uint32_t aIndex, |
56 | | AudioTimelineEvent& aEvent) override |
57 | 0 | { |
58 | 0 | MOZ_ASSERT(mDestination); |
59 | 0 | WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, |
60 | 0 | mDestination); |
61 | 0 |
|
62 | 0 | switch (aIndex) { |
63 | 0 | case DELAY: |
64 | 0 | mDelay.InsertEvent<int64_t>(aEvent); |
65 | 0 | break; |
66 | 0 | default: |
67 | 0 | NS_ERROR("Bad DelayNodeEngine TimelineParameter"); |
68 | 0 | } |
69 | 0 | } |
70 | | |
71 | | void ProcessBlock(AudioNodeStream* aStream, |
72 | | GraphTime aFrom, |
73 | | const AudioBlock& aInput, |
74 | | AudioBlock* aOutput, |
75 | | bool* aFinished) override |
76 | 0 | { |
77 | 0 | MOZ_ASSERT(aStream->SampleRate() == mDestination->SampleRate()); |
78 | 0 |
|
79 | 0 | if (!aInput.IsSilentOrSubnormal()) { |
80 | 0 | if (mLeftOverData <= 0) { |
81 | 0 | RefPtr<PlayingRefChanged> refchanged = |
82 | 0 | new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF); |
83 | 0 | aStream->Graph()->DispatchToMainThreadAfterStreamStateUpdate( |
84 | 0 | refchanged.forget()); |
85 | 0 | } |
86 | 0 | mLeftOverData = mBuffer.MaxDelayTicks(); |
87 | 0 | } else if (mLeftOverData > 0) { |
88 | 0 | mLeftOverData -= WEBAUDIO_BLOCK_SIZE; |
89 | 0 | } else { |
90 | 0 | if (mLeftOverData != INT32_MIN) { |
91 | 0 | mLeftOverData = INT32_MIN; |
92 | 0 | aStream->ScheduleCheckForInactive(); |
93 | 0 |
|
94 | 0 | // Delete our buffered data now we no longer need it |
95 | 0 | mBuffer.Reset(); |
96 | 0 |
|
97 | 0 | RefPtr<PlayingRefChanged> refchanged = |
98 | 0 | new PlayingRefChanged(aStream, PlayingRefChanged::RELEASE); |
99 | 0 | aStream->Graph()->DispatchToMainThreadAfterStreamStateUpdate( |
100 | 0 | refchanged.forget()); |
101 | 0 | } |
102 | 0 | aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
103 | 0 | return; |
104 | 0 | } |
105 | 0 |
|
106 | 0 | mBuffer.Write(aInput); |
107 | 0 |
|
108 | 0 | // Skip output update if mLastChunks has already been set by |
109 | 0 | // ProduceBlockBeforeInput() when in a cycle. |
110 | 0 | if (!mHaveProducedBeforeInput) { |
111 | 0 | UpdateOutputBlock(aStream, aFrom, aOutput, 0.0); |
112 | 0 | } |
113 | 0 | mHaveProducedBeforeInput = false; |
114 | 0 | mBuffer.NextBlock(); |
115 | 0 | } |
116 | | |
117 | | void UpdateOutputBlock(AudioNodeStream* aStream, GraphTime aFrom, |
118 | | AudioBlock* aOutput, float minDelay) |
119 | 0 | { |
120 | 0 | float maxDelay = mMaxDelay; |
121 | 0 | float sampleRate = aStream->SampleRate(); |
122 | 0 | ChannelInterpretation channelInterpretation = |
123 | 0 | aStream->GetChannelInterpretation(); |
124 | 0 | if (mDelay.HasSimpleValue()) { |
125 | 0 | // If this DelayNode is in a cycle, make sure the delay value is at least |
126 | 0 | // one block, even if that is greater than maxDelay. |
127 | 0 | float delayFrames = mDelay.GetValue() * sampleRate; |
128 | 0 | float delayFramesClamped = |
129 | 0 | std::max(minDelay, std::min(delayFrames, maxDelay)); |
130 | 0 | mBuffer.Read(delayFramesClamped, aOutput, channelInterpretation); |
131 | 0 | } else { |
132 | 0 | // Compute the delay values for the duration of the input AudioChunk |
133 | 0 | // If this DelayNode is in a cycle, make sure the delay value is at least |
134 | 0 | // one block. |
135 | 0 | StreamTime tick = mDestination->GraphTimeToStreamTime(aFrom); |
136 | 0 | float values[WEBAUDIO_BLOCK_SIZE]; |
137 | 0 | mDelay.GetValuesAtTime(tick, values,WEBAUDIO_BLOCK_SIZE); |
138 | 0 |
|
139 | 0 | float computedDelay[WEBAUDIO_BLOCK_SIZE]; |
140 | 0 | for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) { |
141 | 0 | float delayAtTick = values[counter] * sampleRate; |
142 | 0 | float delayAtTickClamped = |
143 | 0 | std::max(minDelay, std::min(delayAtTick, maxDelay)); |
144 | 0 | computedDelay[counter] = delayAtTickClamped; |
145 | 0 | } |
146 | 0 | mBuffer.Read(computedDelay, aOutput, channelInterpretation); |
147 | 0 | } |
148 | 0 | } |
149 | | |
150 | | void ProduceBlockBeforeInput(AudioNodeStream* aStream, |
151 | | GraphTime aFrom, |
152 | | AudioBlock* aOutput) override |
153 | 0 | { |
154 | 0 | if (mLeftOverData <= 0) { |
155 | 0 | aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
156 | 0 | } else { |
157 | 0 | UpdateOutputBlock(aStream, aFrom, aOutput, WEBAUDIO_BLOCK_SIZE); |
158 | 0 | } |
159 | 0 | mHaveProducedBeforeInput = true; |
160 | 0 | } |
161 | | |
162 | | bool IsActive() const override |
163 | 0 | { |
164 | 0 | return mLeftOverData != INT32_MIN; |
165 | 0 | } |
166 | | |
167 | | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override |
168 | 0 | { |
169 | 0 | size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); |
170 | 0 | // Not owned: |
171 | 0 | // - mDestination - probably not owned |
172 | 0 | // - mDelay - shares ref with AudioNode, don't count |
173 | 0 | amount += mBuffer.SizeOfExcludingThis(aMallocSizeOf); |
174 | 0 | return amount; |
175 | 0 | } |
176 | | |
177 | | size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override |
178 | 0 | { |
179 | 0 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
180 | 0 | } |
181 | | |
182 | | RefPtr<AudioNodeStream> mDestination; |
183 | | AudioParamTimeline mDelay; |
184 | | DelayBuffer mBuffer; |
185 | | float mMaxDelay; |
186 | | bool mHaveProducedBeforeInput; |
187 | | // How much data we have in our buffer which needs to be flushed out when our inputs |
188 | | // finish. |
189 | | int32_t mLeftOverData; |
190 | | }; |
191 | | |
192 | | DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay) |
193 | | : AudioNode(aContext, |
194 | | 2, |
195 | | ChannelCountMode::Max, |
196 | | ChannelInterpretation::Speakers) |
197 | | , mDelay(new AudioParam(this, DelayNodeEngine::DELAY, "delayTime", 0.0f, |
198 | | 0.f, aMaxDelay)) |
199 | 0 | { |
200 | 0 | DelayNodeEngine* engine = |
201 | 0 | new DelayNodeEngine(this, aContext->Destination(), |
202 | 0 | aContext->SampleRate() * aMaxDelay); |
203 | 0 | mStream = AudioNodeStream::Create(aContext, engine, |
204 | 0 | AudioNodeStream::NO_STREAM_FLAGS, |
205 | 0 | aContext->Graph()); |
206 | 0 | } |
207 | | |
208 | | /* static */ already_AddRefed<DelayNode> |
209 | | DelayNode::Create(AudioContext& aAudioContext, |
210 | | const DelayOptions& aOptions, |
211 | | ErrorResult& aRv) |
212 | 0 | { |
213 | 0 | if (aAudioContext.CheckClosed(aRv)) { |
214 | 0 | return nullptr; |
215 | 0 | } |
216 | 0 | |
217 | 0 | if (aOptions.mMaxDelayTime <= 0. || aOptions.mMaxDelayTime >= 180.) { |
218 | 0 | aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
219 | 0 | return nullptr; |
220 | 0 | } |
221 | 0 | |
222 | 0 | RefPtr<DelayNode> audioNode = new DelayNode(&aAudioContext, |
223 | 0 | aOptions.mMaxDelayTime); |
224 | 0 |
|
225 | 0 | audioNode->Initialize(aOptions, aRv); |
226 | 0 | if (NS_WARN_IF(aRv.Failed())) { |
227 | 0 | return nullptr; |
228 | 0 | } |
229 | 0 | |
230 | 0 | audioNode->DelayTime()->SetValue(aOptions.mDelayTime); |
231 | 0 | return audioNode.forget(); |
232 | 0 | } |
233 | | |
234 | | size_t |
235 | | DelayNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
236 | 0 | { |
237 | 0 | size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); |
238 | 0 | amount += mDelay->SizeOfIncludingThis(aMallocSizeOf); |
239 | 0 | return amount; |
240 | 0 | } |
241 | | |
242 | | size_t |
243 | | DelayNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const |
244 | 0 | { |
245 | 0 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
246 | 0 | } |
247 | | |
248 | | JSObject* |
249 | | DelayNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) |
250 | 0 | { |
251 | 0 | return DelayNode_Binding::Wrap(aCx, this, aGivenProto); |
252 | 0 | } |
253 | | |
254 | | } // namespace dom |
255 | | } // namespace mozilla |