/src/mozilla-central/dom/media/webaudio/AudioContext.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "AudioContext.h" |
8 | | |
9 | | #include "blink/PeriodicWave.h" |
10 | | |
11 | | #include "mozilla/AutoplayPermissionManager.h" |
12 | | #include "mozilla/ErrorResult.h" |
13 | | #include "mozilla/NotNull.h" |
14 | | #include "mozilla/OwningNonNull.h" |
15 | | #include "mozilla/RefPtr.h" |
16 | | #include "mozilla/Preferences.h" |
17 | | #include "mozilla/StaticPrefs.h" |
18 | | |
19 | | #include "mozilla/dom/AnalyserNode.h" |
20 | | #include "mozilla/dom/AnalyserNodeBinding.h" |
21 | | #include "mozilla/dom/AudioBufferSourceNodeBinding.h" |
22 | | #include "mozilla/dom/AudioContextBinding.h" |
23 | | #include "mozilla/dom/BaseAudioContextBinding.h" |
24 | | #include "mozilla/dom/BiquadFilterNodeBinding.h" |
25 | | #include "mozilla/dom/ChannelMergerNodeBinding.h" |
26 | | #include "mozilla/dom/ChannelSplitterNodeBinding.h" |
27 | | #include "mozilla/dom/ConvolverNodeBinding.h" |
28 | | #include "mozilla/dom/DelayNodeBinding.h" |
29 | | #include "mozilla/dom/DynamicsCompressorNodeBinding.h" |
30 | | #include "mozilla/dom/GainNodeBinding.h" |
31 | | #include "mozilla/dom/IIRFilterNodeBinding.h" |
32 | | #include "mozilla/dom/HTMLMediaElement.h" |
33 | | #include "mozilla/dom/MediaElementAudioSourceNodeBinding.h" |
34 | | #include "mozilla/dom/MediaStreamAudioSourceNodeBinding.h" |
35 | | #include "mozilla/dom/OfflineAudioContextBinding.h" |
36 | | #include "mozilla/dom/OscillatorNodeBinding.h" |
37 | | #include "mozilla/dom/PannerNodeBinding.h" |
38 | | #include "mozilla/dom/PeriodicWaveBinding.h" |
39 | | #include "mozilla/dom/Promise.h" |
40 | | #include "mozilla/dom/StereoPannerNodeBinding.h" |
41 | | #include "mozilla/dom/WaveShaperNodeBinding.h" |
42 | | #include "mozilla/dom/Worklet.h" |
43 | | |
44 | | #include "AudioBuffer.h" |
45 | | #include "AudioBufferSourceNode.h" |
46 | | #include "AudioChannelService.h" |
47 | | #include "AudioDestinationNode.h" |
48 | | #include "AudioListener.h" |
49 | | #include "AudioNodeStream.h" |
50 | | #include "AudioStream.h" |
51 | | #include "AutoplayPolicy.h" |
52 | | #include "BiquadFilterNode.h" |
53 | | #include "ChannelMergerNode.h" |
54 | | #include "ChannelSplitterNode.h" |
55 | | #include "ConstantSourceNode.h" |
56 | | #include "ConvolverNode.h" |
57 | | #include "DelayNode.h" |
58 | | #include "DynamicsCompressorNode.h" |
59 | | #include "GainNode.h" |
60 | | #include "IIRFilterNode.h" |
61 | | #include "MediaElementAudioSourceNode.h" |
62 | | #include "MediaStreamAudioDestinationNode.h" |
63 | | #include "MediaStreamAudioSourceNode.h" |
64 | | #include "MediaStreamGraph.h" |
65 | | #include "nsContentUtils.h" |
66 | | #include "nsGlobalWindowInner.h" |
67 | | #include "nsNetCID.h" |
68 | | #include "nsNetUtil.h" |
69 | | #include "nsPIDOMWindow.h" |
70 | | #include "nsPrintfCString.h" |
71 | | #include "nsRFPService.h" |
72 | | #include "OscillatorNode.h" |
73 | | #include "PannerNode.h" |
74 | | #include "PeriodicWave.h" |
75 | | #include "ScriptProcessorNode.h" |
76 | | #include "StereoPannerNode.h" |
77 | | #include "WaveShaperNode.h" |
78 | | |
79 | | extern mozilla::LazyLogModule gAutoplayPermissionLog; |
80 | | |
81 | | #define AUTOPLAY_LOG(msg, ...) \ |
82 | 0 | MOZ_LOG(gAutoplayPermissionLog, LogLevel::Debug, (msg, ##__VA_ARGS__)) |
83 | | |
84 | | namespace mozilla { |
85 | | namespace dom { |
86 | | |
87 | | // 0 is a special value that MediaStreams use to denote they are not part of a |
88 | | // AudioContext. |
89 | | static dom::AudioContext::AudioContextId gAudioContextId = 1; |
90 | | |
91 | | NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext) |
92 | | |
93 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext) |
94 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK(mDestination) |
95 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener) |
96 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK(mPromiseGripArray) |
97 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK(mPendingResumePromises) |
98 | 0 | if (!tmp->mIsStarted) { |
99 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK(mActiveNodes) |
100 | 0 | } |
101 | 0 | // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed explicitly. |
102 | 0 | // mAllNodes is an array of weak pointers, ignore it here. |
103 | 0 | // mBasicWaveFormCache cannot participate in cycles, ignore it here. |
104 | 0 |
|
105 | 0 | // Remove weak reference on the global window as the context is not usable |
106 | 0 | // without mDestination. |
107 | 0 | tmp->DisconnectFromWindow(); |
108 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(DOMEventTargetHelper) |
109 | | |
110 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioContext, |
111 | 0 | DOMEventTargetHelper) |
112 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDestination) |
113 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mListener) |
114 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPromiseGripArray) |
115 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPendingResumePromises) |
116 | 0 | if (!tmp->mIsStarted) { |
117 | 0 | MOZ_ASSERT(tmp->mIsOffline, |
118 | 0 | "Online AudioContexts should always be started"); |
119 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mActiveNodes) |
120 | 0 | } |
121 | 0 | // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed explicitly. |
122 | 0 | // mAllNodes is an array of weak pointers, ignore it here. |
123 | 0 | // mBasicWaveFormCache cannot participate in cycles, ignore it here. |
124 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END |
125 | | |
126 | | NS_IMPL_ADDREF_INHERITED(AudioContext, DOMEventTargetHelper) |
127 | | NS_IMPL_RELEASE_INHERITED(AudioContext, DOMEventTargetHelper) |
128 | | |
129 | 0 | NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioContext) |
130 | 0 | NS_INTERFACE_MAP_ENTRY(nsIMemoryReporter) |
131 | 0 | NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper) |
132 | | |
133 | | static float GetSampleRateForAudioContext(bool aIsOffline, float aSampleRate) |
134 | 0 | { |
135 | 0 | if (aIsOffline || aSampleRate != 0.0) { |
136 | 0 | return aSampleRate; |
137 | 0 | } else { |
138 | 0 | return static_cast<float>(CubebUtils::PreferredSampleRate()); |
139 | 0 | } |
140 | 0 | } |
141 | | |
142 | | AudioContext::AudioContext(nsPIDOMWindowInner* aWindow, |
143 | | bool aIsOffline, |
144 | | uint32_t aNumberOfChannels, |
145 | | uint32_t aLength, |
146 | | float aSampleRate) |
147 | | : DOMEventTargetHelper(aWindow) |
148 | | , mId(gAudioContextId++) |
149 | | , mSampleRate(GetSampleRateForAudioContext(aIsOffline, aSampleRate)) |
150 | | , mAudioContextState(AudioContextState::Suspended) |
151 | | , mNumberOfChannels(aNumberOfChannels) |
152 | | , mIsOffline(aIsOffline) |
153 | | , mIsStarted(!aIsOffline) |
154 | | , mIsShutDown(false) |
155 | | , mCloseCalled(false) |
156 | | , mSuspendCalled(false) |
157 | | , mIsDisconnecting(false) |
158 | 0 | { |
159 | 0 | bool mute = aWindow->AddAudioContext(this); |
160 | 0 |
|
161 | 0 | // Note: AudioDestinationNode needs an AudioContext that must already be |
162 | 0 | // bound to the window. |
163 | 0 | bool allowedToStart = AutoplayPolicy::IsAllowedToPlay(*this); |
164 | 0 | mDestination = new AudioDestinationNode(this, |
165 | 0 | aIsOffline, |
166 | 0 | allowedToStart, |
167 | 0 | aNumberOfChannels, |
168 | 0 | aLength, |
169 | 0 | aSampleRate); |
170 | 0 |
|
171 | 0 | // The context can't be muted until it has a destination. |
172 | 0 | if (mute) { |
173 | 0 | Mute(); |
174 | 0 | } |
175 | 0 |
|
176 | 0 | if (!allowedToStart) { |
177 | 0 | // Not allowed to start, delay the transition from `suspended` to `running`. |
178 | 0 | SuspendInternal(nullptr); |
179 | 0 | EnsureAutoplayRequested(); |
180 | 0 | } |
181 | 0 |
|
182 | 0 | FFTBlock::MainThreadInit(); |
183 | 0 | } |
184 | | |
185 | | void |
186 | | AudioContext::EnsureAutoplayRequested() |
187 | 0 | { |
188 | 0 | nsPIDOMWindowInner* parent = GetParentObject(); |
189 | 0 | if (!parent || !parent->AsGlobal()) { |
190 | 0 | return; |
191 | 0 | } |
192 | 0 | |
193 | 0 | RefPtr<AutoplayPermissionManager> request = |
194 | 0 | AutoplayPolicy::RequestFor(*(parent->GetExtantDoc())); |
195 | 0 | if (!request) { |
196 | 0 | return; |
197 | 0 | } |
198 | 0 | |
199 | 0 | RefPtr<AudioContext> self = this; |
200 | 0 | request->RequestWithPrompt() |
201 | 0 | ->Then(parent->AsGlobal()->AbstractMainThreadFor(TaskCategory::Other), |
202 | 0 | __func__, |
203 | 0 | [ self, request ]( |
204 | 0 | bool aApproved) { |
205 | 0 | AUTOPLAY_LOG("%p Autoplay request approved request=%p", |
206 | 0 | self.get(), |
207 | 0 | request.get()); |
208 | 0 | self->ResumeInternal(); |
209 | 0 | }, |
210 | 0 | [self, request](nsresult aError) { |
211 | 0 | AUTOPLAY_LOG("%p Autoplay request denied request=%p", |
212 | 0 | self.get(), |
213 | 0 | request.get()); |
214 | 0 | self->DispatchBlockedEvent(); |
215 | 0 | }); |
216 | 0 | } |
217 | | |
218 | | nsresult |
219 | | AudioContext::Init() |
220 | 0 | { |
221 | 0 | if (!mIsOffline) { |
222 | 0 | nsresult rv = mDestination->CreateAudioChannelAgent(); |
223 | 0 | if (NS_WARN_IF(NS_FAILED(rv))) { |
224 | 0 | return rv; |
225 | 0 | } |
226 | 0 | } |
227 | 0 | |
228 | 0 | return NS_OK; |
229 | 0 | } |
230 | | |
231 | | void |
232 | | AudioContext::DisconnectFromWindow() |
233 | 0 | { |
234 | 0 | nsPIDOMWindowInner* window = GetOwner(); |
235 | 0 | if (window) { |
236 | 0 | window->RemoveAudioContext(this); |
237 | 0 | } |
238 | 0 | } |
239 | | |
240 | | AudioContext::~AudioContext() |
241 | 0 | { |
242 | 0 | DisconnectFromWindow(); |
243 | 0 | UnregisterWeakMemoryReporter(this); |
244 | 0 | } |
245 | | |
246 | | JSObject* |
247 | | AudioContext::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) |
248 | 0 | { |
249 | 0 | if (mIsOffline) { |
250 | 0 | return OfflineAudioContext_Binding::Wrap(aCx, this, aGivenProto); |
251 | 0 | } else { |
252 | 0 | return AudioContext_Binding::Wrap(aCx, this, aGivenProto); |
253 | 0 | } |
254 | 0 | } |
255 | | |
256 | | /* static */ already_AddRefed<AudioContext> |
257 | | AudioContext::Constructor(const GlobalObject& aGlobal, |
258 | | const AudioContextOptions& aOptions, |
259 | | ErrorResult& aRv) |
260 | 0 | { |
261 | 0 | // Audio playback is not yet supported when recording or replaying. See bug 1304147. |
262 | 0 | if (recordreplay::IsRecordingOrReplaying()) { |
263 | 0 | aRv.Throw(NS_ERROR_NOT_AVAILABLE); |
264 | 0 | return nullptr; |
265 | 0 | } |
266 | 0 | |
267 | 0 | nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(aGlobal.GetAsSupports()); |
268 | 0 | if (!window) { |
269 | 0 | aRv.Throw(NS_ERROR_FAILURE); |
270 | 0 | return nullptr; |
271 | 0 | } |
272 | 0 | |
273 | 0 | float sampleRate = MediaStreamGraph::REQUEST_DEFAULT_SAMPLE_RATE; |
274 | 0 | if (Preferences::GetBool("media.webaudio.audiocontextoptions-samplerate.enabled")) { |
275 | 0 | if (aOptions.mSampleRate > 0 && |
276 | 0 | (aOptions.mSampleRate - WebAudioUtils::MinSampleRate < 0.0 || |
277 | 0 | WebAudioUtils::MaxSampleRate - aOptions.mSampleRate < 0.0)) { |
278 | 0 | aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
279 | 0 | return nullptr; |
280 | 0 | } |
281 | 0 | sampleRate = aOptions.mSampleRate; |
282 | 0 | } |
283 | 0 |
|
284 | 0 | uint32_t maxChannelCount = std::min<uint32_t>(WebAudioUtils::MaxChannelCount, |
285 | 0 | CubebUtils::MaxNumberOfChannels()); |
286 | 0 | RefPtr<AudioContext> object = |
287 | 0 | new AudioContext(window, false, maxChannelCount, |
288 | 0 | 0, sampleRate); |
289 | 0 | aRv = object->Init(); |
290 | 0 | if (NS_WARN_IF(aRv.Failed())) { |
291 | 0 | return nullptr; |
292 | 0 | } |
293 | 0 | |
294 | 0 | RegisterWeakMemoryReporter(object); |
295 | 0 |
|
296 | 0 | return object.forget(); |
297 | 0 | } |
298 | | |
299 | | /* static */ already_AddRefed<AudioContext> |
300 | | AudioContext::Constructor(const GlobalObject& aGlobal, |
301 | | const OfflineAudioContextOptions& aOptions, |
302 | | ErrorResult& aRv) |
303 | 0 | { |
304 | 0 | return Constructor(aGlobal, |
305 | 0 | aOptions.mNumberOfChannels, |
306 | 0 | aOptions.mLength, |
307 | 0 | aOptions.mSampleRate, |
308 | 0 | aRv); |
309 | 0 | } |
310 | | |
311 | | /* static */ already_AddRefed<AudioContext> |
312 | | AudioContext::Constructor(const GlobalObject& aGlobal, |
313 | | uint32_t aNumberOfChannels, |
314 | | uint32_t aLength, |
315 | | float aSampleRate, |
316 | | ErrorResult& aRv) |
317 | 0 | { |
318 | 0 | // Audio playback is not yet supported when recording or replaying. See bug 1304147. |
319 | 0 | if (recordreplay::IsRecordingOrReplaying()) { |
320 | 0 | aRv.Throw(NS_ERROR_NOT_AVAILABLE); |
321 | 0 | return nullptr; |
322 | 0 | } |
323 | 0 | |
324 | 0 | nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(aGlobal.GetAsSupports()); |
325 | 0 | if (!window) { |
326 | 0 | aRv.Throw(NS_ERROR_FAILURE); |
327 | 0 | return nullptr; |
328 | 0 | } |
329 | 0 | |
330 | 0 | if (aNumberOfChannels == 0 || |
331 | 0 | aNumberOfChannels > WebAudioUtils::MaxChannelCount || |
332 | 0 | aLength == 0 || |
333 | 0 | aSampleRate < WebAudioUtils::MinSampleRate || |
334 | 0 | aSampleRate > WebAudioUtils::MaxSampleRate) { |
335 | 0 | // The DOM binding protects us against infinity and NaN |
336 | 0 | aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
337 | 0 | return nullptr; |
338 | 0 | } |
339 | 0 | |
340 | 0 | RefPtr<AudioContext> object = new AudioContext(window, |
341 | 0 | true, |
342 | 0 | aNumberOfChannels, |
343 | 0 | aLength, |
344 | 0 | aSampleRate); |
345 | 0 |
|
346 | 0 | RegisterWeakMemoryReporter(object); |
347 | 0 |
|
348 | 0 | return object.forget(); |
349 | 0 | } |
350 | | |
351 | | bool AudioContext::CheckClosed(ErrorResult& aRv) |
352 | 0 | { |
353 | 0 | if (mAudioContextState == AudioContextState::Closed || |
354 | 0 | mIsShutDown || |
355 | 0 | mIsDisconnecting) { |
356 | 0 | aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); |
357 | 0 | return true; |
358 | 0 | } |
359 | 0 | return false; |
360 | 0 | } |
361 | | |
362 | | already_AddRefed<AudioBufferSourceNode> |
363 | | AudioContext::CreateBufferSource(ErrorResult& aRv) |
364 | 0 | { |
365 | 0 | return AudioBufferSourceNode::Create(nullptr, *this, |
366 | 0 | AudioBufferSourceOptions(), |
367 | 0 | aRv); |
368 | 0 | } |
369 | | |
370 | | already_AddRefed<ConstantSourceNode> |
371 | | AudioContext::CreateConstantSource(ErrorResult& aRv) |
372 | 0 | { |
373 | 0 | if (CheckClosed(aRv)) { |
374 | 0 | return nullptr; |
375 | 0 | } |
376 | 0 | |
377 | 0 | RefPtr<ConstantSourceNode> constantSourceNode = |
378 | 0 | new ConstantSourceNode(this); |
379 | 0 | return constantSourceNode.forget(); |
380 | 0 | } |
381 | | |
382 | | already_AddRefed<AudioBuffer> |
383 | | AudioContext::CreateBuffer(uint32_t aNumberOfChannels, uint32_t aLength, |
384 | | float aSampleRate, |
385 | | ErrorResult& aRv) |
386 | 0 | { |
387 | 0 | if (!aNumberOfChannels) { |
388 | 0 | aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); |
389 | 0 | return nullptr; |
390 | 0 | } |
391 | 0 | |
392 | 0 | return AudioBuffer::Create(GetOwner(), aNumberOfChannels, aLength, |
393 | 0 | aSampleRate, aRv); |
394 | 0 | } |
395 | | |
396 | | namespace { |
397 | | |
398 | | bool IsValidBufferSize(uint32_t aBufferSize) { |
399 | | switch (aBufferSize) { |
400 | | case 0: // let the implementation choose the buffer size |
401 | | case 256: |
402 | | case 512: |
403 | | case 1024: |
404 | | case 2048: |
405 | | case 4096: |
406 | | case 8192: |
407 | | case 16384: |
408 | | return true; |
409 | | default: |
410 | | return false; |
411 | | } |
412 | | } |
413 | | |
414 | | } // namespace |
415 | | |
416 | | already_AddRefed<MediaStreamAudioDestinationNode> |
417 | | AudioContext::CreateMediaStreamDestination(ErrorResult& aRv) |
418 | 0 | { |
419 | 0 | return MediaStreamAudioDestinationNode::Create(*this, AudioNodeOptions(), |
420 | 0 | aRv); |
421 | 0 | } |
422 | | |
423 | | already_AddRefed<ScriptProcessorNode> |
424 | | AudioContext::CreateScriptProcessor(uint32_t aBufferSize, |
425 | | uint32_t aNumberOfInputChannels, |
426 | | uint32_t aNumberOfOutputChannels, |
427 | | ErrorResult& aRv) |
428 | 0 | { |
429 | 0 | if ((aNumberOfInputChannels == 0 && aNumberOfOutputChannels == 0) || |
430 | 0 | aNumberOfInputChannels > WebAudioUtils::MaxChannelCount || |
431 | 0 | aNumberOfOutputChannels > WebAudioUtils::MaxChannelCount || |
432 | 0 | !IsValidBufferSize(aBufferSize)) { |
433 | 0 | aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); |
434 | 0 | return nullptr; |
435 | 0 | } |
436 | 0 | |
437 | 0 | if (CheckClosed(aRv)) { |
438 | 0 | return nullptr; |
439 | 0 | } |
440 | 0 | |
441 | 0 | RefPtr<ScriptProcessorNode> scriptProcessor = |
442 | 0 | new ScriptProcessorNode(this, aBufferSize, aNumberOfInputChannels, |
443 | 0 | aNumberOfOutputChannels); |
444 | 0 | return scriptProcessor.forget(); |
445 | 0 | } |
446 | | |
447 | | already_AddRefed<AnalyserNode> |
448 | | AudioContext::CreateAnalyser(ErrorResult& aRv) |
449 | 0 | { |
450 | 0 | return AnalyserNode::Create(*this, AnalyserOptions(), aRv); |
451 | 0 | } |
452 | | |
453 | | already_AddRefed<StereoPannerNode> |
454 | | AudioContext::CreateStereoPanner(ErrorResult& aRv) |
455 | 0 | { |
456 | 0 | return StereoPannerNode::Create(*this, StereoPannerOptions(), aRv); |
457 | 0 | } |
458 | | |
459 | | already_AddRefed<MediaElementAudioSourceNode> |
460 | | AudioContext::CreateMediaElementSource(HTMLMediaElement& aMediaElement, |
461 | | ErrorResult& aRv) |
462 | 0 | { |
463 | 0 | MediaElementAudioSourceOptions options; |
464 | 0 | options.mMediaElement = aMediaElement; |
465 | 0 |
|
466 | 0 | return MediaElementAudioSourceNode::Create(*this, options, aRv); |
467 | 0 | } |
468 | | |
469 | | already_AddRefed<MediaStreamAudioSourceNode> |
470 | | AudioContext::CreateMediaStreamSource(DOMMediaStream& aMediaStream, |
471 | | ErrorResult& aRv) |
472 | 0 | { |
473 | 0 | MediaStreamAudioSourceOptions options; |
474 | 0 | options.mMediaStream = aMediaStream; |
475 | 0 |
|
476 | 0 | return MediaStreamAudioSourceNode::Create(*this, options, aRv); |
477 | 0 | } |
478 | | |
479 | | already_AddRefed<GainNode> |
480 | | AudioContext::CreateGain(ErrorResult& aRv) |
481 | 0 | { |
482 | 0 | return GainNode::Create(*this, GainOptions(), aRv); |
483 | 0 | } |
484 | | |
485 | | already_AddRefed<WaveShaperNode> |
486 | | AudioContext::CreateWaveShaper(ErrorResult& aRv) |
487 | 0 | { |
488 | 0 | return WaveShaperNode::Create(*this, WaveShaperOptions(), aRv); |
489 | 0 | } |
490 | | |
491 | | already_AddRefed<DelayNode> |
492 | | AudioContext::CreateDelay(double aMaxDelayTime, ErrorResult& aRv) |
493 | 0 | { |
494 | 0 | DelayOptions options; |
495 | 0 | options.mMaxDelayTime = aMaxDelayTime; |
496 | 0 | return DelayNode::Create(*this, options, aRv); |
497 | 0 | } |
498 | | |
499 | | already_AddRefed<PannerNode> |
500 | | AudioContext::CreatePanner(ErrorResult& aRv) |
501 | 0 | { |
502 | 0 | return PannerNode::Create(*this, PannerOptions(), aRv); |
503 | 0 | } |
504 | | |
505 | | already_AddRefed<ConvolverNode> |
506 | | AudioContext::CreateConvolver(ErrorResult& aRv) |
507 | 0 | { |
508 | 0 | return ConvolverNode::Create(nullptr, *this, ConvolverOptions(), aRv); |
509 | 0 | } |
510 | | |
511 | | already_AddRefed<ChannelSplitterNode> |
512 | | AudioContext::CreateChannelSplitter(uint32_t aNumberOfOutputs, ErrorResult& aRv) |
513 | 0 | { |
514 | 0 | ChannelSplitterOptions options; |
515 | 0 | options.mNumberOfOutputs = aNumberOfOutputs; |
516 | 0 | return ChannelSplitterNode::Create(*this, options, aRv); |
517 | 0 | } |
518 | | |
519 | | already_AddRefed<ChannelMergerNode> |
520 | | AudioContext::CreateChannelMerger(uint32_t aNumberOfInputs, ErrorResult& aRv) |
521 | 0 | { |
522 | 0 | ChannelMergerOptions options; |
523 | 0 | options.mNumberOfInputs = aNumberOfInputs; |
524 | 0 | return ChannelMergerNode::Create(*this, options, aRv); |
525 | 0 | } |
526 | | |
527 | | already_AddRefed<DynamicsCompressorNode> |
528 | | AudioContext::CreateDynamicsCompressor(ErrorResult& aRv) |
529 | 0 | { |
530 | 0 | return DynamicsCompressorNode::Create(*this, DynamicsCompressorOptions(), aRv); |
531 | 0 | } |
532 | | |
533 | | already_AddRefed<BiquadFilterNode> |
534 | | AudioContext::CreateBiquadFilter(ErrorResult& aRv) |
535 | 0 | { |
536 | 0 | return BiquadFilterNode::Create(*this, BiquadFilterOptions(), aRv); |
537 | 0 | } |
538 | | |
539 | | already_AddRefed<IIRFilterNode> |
540 | | AudioContext::CreateIIRFilter(const Sequence<double>& aFeedforward, |
541 | | const Sequence<double>& aFeedback, |
542 | | mozilla::ErrorResult& aRv) |
543 | 0 | { |
544 | 0 | IIRFilterOptions options; |
545 | 0 | options.mFeedforward = aFeedforward; |
546 | 0 | options.mFeedback = aFeedback; |
547 | 0 | return IIRFilterNode::Create(*this, options, aRv); |
548 | 0 | } |
549 | | |
550 | | already_AddRefed<OscillatorNode> |
551 | | AudioContext::CreateOscillator(ErrorResult& aRv) |
552 | 0 | { |
553 | 0 | return OscillatorNode::Create(*this, OscillatorOptions(), aRv); |
554 | 0 | } |
555 | | |
556 | | already_AddRefed<PeriodicWave> |
557 | | AudioContext::CreatePeriodicWave(const Float32Array& aRealData, |
558 | | const Float32Array& aImagData, |
559 | | const PeriodicWaveConstraints& aConstraints, |
560 | | ErrorResult& aRv) |
561 | 0 | { |
562 | 0 | aRealData.ComputeLengthAndData(); |
563 | 0 | aImagData.ComputeLengthAndData(); |
564 | 0 |
|
565 | 0 | if (aRealData.Length() != aImagData.Length() || |
566 | 0 | aRealData.Length() == 0) { |
567 | 0 | aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); |
568 | 0 | return nullptr; |
569 | 0 | } |
570 | 0 | |
571 | 0 | RefPtr<PeriodicWave> periodicWave = |
572 | 0 | new PeriodicWave(this, aRealData.Data(), aImagData.Data(), |
573 | 0 | aImagData.Length(), aConstraints.mDisableNormalization, |
574 | 0 | aRv); |
575 | 0 | if (aRv.Failed()) { |
576 | 0 | return nullptr; |
577 | 0 | } |
578 | 0 | return periodicWave.forget(); |
579 | 0 | } |
580 | | |
581 | | AudioListener* |
582 | | AudioContext::Listener() |
583 | 0 | { |
584 | 0 | if (!mListener) { |
585 | 0 | mListener = new AudioListener(this); |
586 | 0 | } |
587 | 0 | return mListener; |
588 | 0 | } |
589 | | |
590 | | Worklet* |
591 | | AudioContext::GetAudioWorklet(ErrorResult& aRv) |
592 | 0 | { |
593 | 0 | if (!mWorklet) { |
594 | 0 | nsCOMPtr<nsPIDOMWindowInner> window = GetOwner(); |
595 | 0 | if (NS_WARN_IF(!window)) { |
596 | 0 | aRv.Throw(NS_ERROR_FAILURE); |
597 | 0 | return nullptr; |
598 | 0 | } |
599 | 0 | nsCOMPtr<nsIPrincipal> principal = |
600 | 0 | nsGlobalWindowInner::Cast(window)->GetPrincipal(); |
601 | 0 | if (NS_WARN_IF(!principal)) { |
602 | 0 | aRv.Throw(NS_ERROR_FAILURE); |
603 | 0 | return nullptr; |
604 | 0 | } |
605 | 0 | |
606 | 0 | mWorklet = new Worklet(window, principal, Worklet::eAudioWorklet); |
607 | 0 | } |
608 | 0 |
|
609 | 0 | return mWorklet; |
610 | 0 | } |
611 | | |
612 | | bool |
613 | | AudioContext::IsRunning() const |
614 | 0 | { |
615 | 0 | return mAudioContextState == AudioContextState::Running; |
616 | 0 | } |
617 | | |
618 | | already_AddRefed<Promise> |
619 | | AudioContext::DecodeAudioData(const ArrayBuffer& aBuffer, |
620 | | const Optional<OwningNonNull<DecodeSuccessCallback> >& aSuccessCallback, |
621 | | const Optional<OwningNonNull<DecodeErrorCallback> >& aFailureCallback, |
622 | | ErrorResult& aRv) |
623 | 0 | { |
624 | 0 | nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject()); |
625 | 0 | RefPtr<Promise> promise; |
626 | 0 | AutoJSAPI jsapi; |
627 | 0 | jsapi.Init(); |
628 | 0 | JSContext* cx = jsapi.cx(); |
629 | 0 |
|
630 | 0 | JS::Rooted<JSObject*> obj(cx, js::CheckedUnwrap(aBuffer.Obj())); |
631 | 0 | if (!obj) { |
632 | 0 | aRv.Throw(NS_ERROR_DOM_SECURITY_ERR); |
633 | 0 | return nullptr; |
634 | 0 | } |
635 | 0 | |
636 | 0 | JSAutoRealm ar(cx, obj); |
637 | 0 |
|
638 | 0 | promise = Promise::Create(parentObject, aRv); |
639 | 0 | if (aRv.Failed()) { |
640 | 0 | return nullptr; |
641 | 0 | } |
642 | 0 | |
643 | 0 | aBuffer.ComputeLengthAndData(); |
644 | 0 |
|
645 | 0 | if (aBuffer.IsShared()) { |
646 | 0 | // Throw if the object is mapping shared memory (must opt in). |
647 | 0 | aRv.ThrowTypeError<MSG_TYPEDARRAY_IS_SHARED>(NS_LITERAL_STRING("Argument of AudioContext.decodeAudioData")); |
648 | 0 | return nullptr; |
649 | 0 | } |
650 | 0 |
|
651 | 0 | if (!aBuffer.Data()) { |
652 | 0 | // Throw if the buffer is detached |
653 | 0 | aRv.ThrowTypeError<MSG_TYPEDARRAY_IS_DETACHED>(NS_LITERAL_STRING("Argument of AudioContext.decodeAudioData")); |
654 | 0 | return nullptr; |
655 | 0 | } |
656 | 0 |
|
657 | 0 | // Detach the array buffer |
658 | 0 | size_t length = aBuffer.Length(); |
659 | 0 |
|
660 | 0 | uint8_t* data = static_cast<uint8_t*>(JS_StealArrayBufferContents(cx, obj)); |
661 | 0 |
|
662 | 0 | // Sniff the content of the media. |
663 | 0 | // Failed type sniffing will be handled by AsyncDecodeWebAudio. |
664 | 0 | nsAutoCString contentType; |
665 | 0 | NS_SniffContent(NS_DATA_SNIFFER_CATEGORY, nullptr, data, length, contentType); |
666 | 0 |
|
667 | 0 | RefPtr<DecodeErrorCallback> failureCallback; |
668 | 0 | RefPtr<DecodeSuccessCallback> successCallback; |
669 | 0 | if (aFailureCallback.WasPassed()) { |
670 | 0 | failureCallback = &aFailureCallback.Value(); |
671 | 0 | } |
672 | 0 | if (aSuccessCallback.WasPassed()) { |
673 | 0 | successCallback = &aSuccessCallback.Value(); |
674 | 0 | } |
675 | 0 | UniquePtr<WebAudioDecodeJob> job( |
676 | 0 | new WebAudioDecodeJob(this, |
677 | 0 | promise, successCallback, failureCallback)); |
678 | 0 | AsyncDecodeWebAudio(contentType.get(), data, length, *job); |
679 | 0 | // Transfer the ownership to mDecodeJobs |
680 | 0 | mDecodeJobs.AppendElement(std::move(job)); |
681 | 0 |
|
682 | 0 | return promise.forget(); |
683 | 0 | } |
684 | | |
685 | | void |
686 | | AudioContext::RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob) |
687 | 0 | { |
688 | 0 | // Since UniquePtr doesn't provide an operator== which allows you to compare |
689 | 0 | // against raw pointers, we need to iterate manually. |
690 | 0 | for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) { |
691 | 0 | if (mDecodeJobs[i].get() == aDecodeJob) { |
692 | 0 | mDecodeJobs.RemoveElementAt(i); |
693 | 0 | break; |
694 | 0 | } |
695 | 0 | } |
696 | 0 | } |
697 | | |
698 | | void |
699 | | AudioContext::RegisterActiveNode(AudioNode* aNode) |
700 | 0 | { |
701 | 0 | if (!mIsShutDown) { |
702 | 0 | mActiveNodes.PutEntry(aNode); |
703 | 0 | } |
704 | 0 | } |
705 | | |
706 | | void |
707 | | AudioContext::UnregisterActiveNode(AudioNode* aNode) |
708 | 0 | { |
709 | 0 | mActiveNodes.RemoveEntry(aNode); |
710 | 0 | } |
711 | | |
712 | | uint32_t |
713 | | AudioContext::MaxChannelCount() const |
714 | 0 | { |
715 | 0 | return std::min<uint32_t>(WebAudioUtils::MaxChannelCount, |
716 | 0 | mIsOffline ? mNumberOfChannels : CubebUtils::MaxNumberOfChannels()); |
717 | 0 | } |
718 | | |
719 | | uint32_t |
720 | | AudioContext::ActiveNodeCount() const |
721 | 0 | { |
722 | 0 | return mActiveNodes.Count(); |
723 | 0 | } |
724 | | |
725 | | MediaStreamGraph* |
726 | | AudioContext::Graph() const |
727 | 0 | { |
728 | 0 | return Destination()->Stream()->Graph(); |
729 | 0 | } |
730 | | |
731 | | MediaStream* |
732 | | AudioContext::DestinationStream() const |
733 | 0 | { |
734 | 0 | if (Destination()) { |
735 | 0 | return Destination()->Stream(); |
736 | 0 | } |
737 | 0 | return nullptr; |
738 | 0 | } |
739 | | |
740 | | double |
741 | | AudioContext::CurrentTime() |
742 | 0 | { |
743 | 0 | MediaStream* stream = Destination()->Stream(); |
744 | 0 |
|
745 | 0 | double rawTime = stream->StreamTimeToSeconds(stream->GetCurrentTime()); |
746 | 0 |
|
747 | 0 | // CurrentTime increments in intervals of 128/sampleRate. If the Timer |
748 | 0 | // Precision Reduction is smaller than this interval, the jittered time |
749 | 0 | // can always be reversed to the raw step of the interval. In that case |
750 | 0 | // we can simply return the un-reduced time; and avoid breaking tests. |
751 | 0 | // We have to convert each variable into a common magnitude, we choose ms. |
752 | 0 | if ((128/mSampleRate) * 1000.0 > nsRFPService::TimerResolution() / 1000.0) { |
753 | 0 | return rawTime; |
754 | 0 | } |
755 | 0 | |
756 | 0 | // The value of a MediaStream's CurrentTime will always advance forward; it will never |
757 | 0 | // reset (even if one rewinds a video.) Therefore we can use a single Random Seed |
758 | 0 | // initialized at the same time as the object. |
759 | 0 | return nsRFPService::ReduceTimePrecisionAsSecs( |
760 | 0 | rawTime, GetRandomTimelineSeed()); |
761 | 0 | } |
762 | | |
763 | | void AudioContext::DisconnectFromOwner() |
764 | 0 | { |
765 | 0 | mIsDisconnecting = true; |
766 | 0 | Shutdown(); |
767 | 0 | DOMEventTargetHelper::DisconnectFromOwner(); |
768 | 0 | } |
769 | | |
770 | | void |
771 | | AudioContext::BindToOwner(nsIGlobalObject* aNew) |
772 | 0 | { |
773 | 0 | auto scopeExit = MakeScopeExit([&] { |
774 | 0 | DOMEventTargetHelper::BindToOwner(aNew); |
775 | 0 | }); |
776 | 0 |
|
777 | 0 | if (GetOwner()) { |
778 | 0 | GetOwner()->RemoveAudioContext(this); |
779 | 0 | } |
780 | 0 |
|
781 | 0 | nsCOMPtr<nsPIDOMWindowInner> newWindow = do_QueryInterface(aNew); |
782 | 0 | if (newWindow) { |
783 | 0 | newWindow->AddAudioContext(this); |
784 | 0 | } |
785 | 0 | } |
786 | | |
787 | | void |
788 | | AudioContext::Shutdown() |
789 | 0 | { |
790 | 0 | mIsShutDown = true; |
791 | 0 |
|
792 | 0 | // We don't want to touch promises if the global is going away soon. |
793 | 0 | if (!mIsDisconnecting) { |
794 | 0 | if (!mIsOffline) { |
795 | 0 | RefPtr<Promise> ignored = Close(IgnoreErrors()); |
796 | 0 | } |
797 | 0 |
|
798 | 0 | for (auto p : mPromiseGripArray) { |
799 | 0 | p->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR); |
800 | 0 | } |
801 | 0 |
|
802 | 0 | mPromiseGripArray.Clear(); |
803 | 0 |
|
804 | 0 | for (const auto& p : mPendingResumePromises) { |
805 | 0 | p->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR); |
806 | 0 | } |
807 | 0 | mPendingResumePromises.Clear(); |
808 | 0 | } |
809 | 0 |
|
810 | 0 | // Release references to active nodes. |
811 | 0 | // Active AudioNodes don't unregister in destructors, at which point the |
812 | 0 | // Node is already unregistered. |
813 | 0 | mActiveNodes.Clear(); |
814 | 0 |
|
815 | 0 | // For offline contexts, we can destroy the MediaStreamGraph at this point. |
816 | 0 | if (mIsOffline && mDestination) { |
817 | 0 | mDestination->OfflineShutdown(); |
818 | 0 | } |
819 | 0 | } |
820 | | |
821 | | StateChangeTask::StateChangeTask(AudioContext* aAudioContext, |
822 | | void* aPromise, |
823 | | AudioContextState aNewState) |
824 | | : Runnable("dom::StateChangeTask") |
825 | | , mAudioContext(aAudioContext) |
826 | | , mPromise(aPromise) |
827 | | , mAudioNodeStream(nullptr) |
828 | | , mNewState(aNewState) |
829 | 0 | { |
830 | 0 | MOZ_ASSERT(NS_IsMainThread(), |
831 | 0 | "This constructor should be used from the main thread."); |
832 | 0 | } |
833 | | |
834 | | StateChangeTask::StateChangeTask(AudioNodeStream* aStream, |
835 | | void* aPromise, |
836 | | AudioContextState aNewState) |
837 | | : Runnable("dom::StateChangeTask") |
838 | | , mAudioContext(nullptr) |
839 | | , mPromise(aPromise) |
840 | | , mAudioNodeStream(aStream) |
841 | | , mNewState(aNewState) |
842 | 0 | { |
843 | 0 | MOZ_ASSERT(!NS_IsMainThread(), |
844 | 0 | "This constructor should be used from the graph thread."); |
845 | 0 | } |
846 | | |
847 | | NS_IMETHODIMP |
848 | | StateChangeTask::Run() |
849 | 0 | { |
850 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
851 | 0 |
|
852 | 0 | if (!mAudioContext && !mAudioNodeStream) { |
853 | 0 | return NS_OK; |
854 | 0 | } |
855 | 0 | if (mAudioNodeStream) { |
856 | 0 | AudioNode* node = mAudioNodeStream->Engine()->NodeMainThread(); |
857 | 0 | if (!node) { |
858 | 0 | return NS_OK; |
859 | 0 | } |
860 | 0 | mAudioContext = node->Context(); |
861 | 0 | if (!mAudioContext) { |
862 | 0 | return NS_OK; |
863 | 0 | } |
864 | 0 | } |
865 | 0 | |
866 | 0 | mAudioContext->OnStateChanged(mPromise, mNewState); |
867 | 0 | // We have can't call Release() on the AudioContext on the MSG thread, so we |
868 | 0 | // unref it here, on the main thread. |
869 | 0 | mAudioContext = nullptr; |
870 | 0 |
|
871 | 0 | return NS_OK; |
872 | 0 | } |
873 | | |
874 | | /* This runnable allows to fire the "statechange" event */ |
875 | | class OnStateChangeTask final : public Runnable |
876 | | { |
877 | | public: |
878 | | explicit OnStateChangeTask(AudioContext* aAudioContext) |
879 | | : Runnable("dom::OnStateChangeTask") |
880 | | , mAudioContext(aAudioContext) |
881 | 0 | {} |
882 | | |
883 | | NS_IMETHODIMP |
884 | | Run() override |
885 | 0 | { |
886 | 0 | nsPIDOMWindowInner* parent = mAudioContext->GetParentObject(); |
887 | 0 | if (!parent) { |
888 | 0 | return NS_ERROR_FAILURE; |
889 | 0 | } |
890 | 0 | |
891 | 0 | nsIDocument* doc = parent->GetExtantDoc(); |
892 | 0 | if (!doc) { |
893 | 0 | return NS_ERROR_FAILURE; |
894 | 0 | } |
895 | 0 | |
896 | 0 | return nsContentUtils::DispatchTrustedEvent(doc, |
897 | 0 | static_cast<DOMEventTargetHelper*>(mAudioContext), |
898 | 0 | NS_LITERAL_STRING("statechange"), |
899 | 0 | CanBubble::eNo, Cancelable::eNo); |
900 | 0 | } |
901 | | |
902 | | private: |
903 | | RefPtr<AudioContext> mAudioContext; |
904 | | }; |
905 | | |
906 | | |
907 | | void |
908 | | AudioContext::Dispatch(already_AddRefed<nsIRunnable>&& aRunnable) |
909 | 0 | { |
910 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
911 | 0 | nsCOMPtr<nsIGlobalObject> parentObject = |
912 | 0 | do_QueryInterface(GetParentObject()); |
913 | 0 | // It can happen that this runnable took a long time to reach the main thread, |
914 | 0 | // and the global is not valid anymore. |
915 | 0 | if (parentObject) { |
916 | 0 | parentObject->AbstractMainThreadFor(TaskCategory::Other) |
917 | 0 | ->Dispatch(std::move(aRunnable)); |
918 | 0 | } else { |
919 | 0 | RefPtr<nsIRunnable> runnable(aRunnable); |
920 | 0 | runnable = nullptr; |
921 | 0 | } |
922 | 0 | } |
923 | | |
924 | | void |
925 | | AudioContext::OnStateChanged(void* aPromise, AudioContextState aNewState) |
926 | 0 | { |
927 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
928 | 0 |
|
929 | 0 | // This can happen if close() was called right after creating the |
930 | 0 | // AudioContext, before the context has switched to "running". |
931 | 0 | if (mAudioContextState == AudioContextState::Closed && |
932 | 0 | aNewState == AudioContextState::Running && |
933 | 0 | !aPromise) { |
934 | 0 | return; |
935 | 0 | } |
936 | 0 | |
937 | 0 | // This can happen if this is called in reaction to a |
938 | 0 | // MediaStreamGraph shutdown, and a AudioContext was being |
939 | 0 | // suspended at the same time, for example if a page was being |
940 | 0 | // closed. |
941 | 0 | if (mAudioContextState == AudioContextState::Closed && |
942 | 0 | aNewState == AudioContextState::Suspended) { |
943 | 0 | return; |
944 | 0 | } |
945 | 0 | |
946 | 0 | #ifndef WIN32 // Bug 1170547 |
947 | 0 | #ifndef XP_MACOSX |
948 | | #ifdef DEBUG |
949 | | |
950 | | if (!((mAudioContextState == AudioContextState::Suspended && |
951 | | aNewState == AudioContextState::Running) || |
952 | | (mAudioContextState == AudioContextState::Running && |
953 | | aNewState == AudioContextState::Suspended) || |
954 | | (mAudioContextState == AudioContextState::Running && |
955 | | aNewState == AudioContextState::Closed) || |
956 | | (mAudioContextState == AudioContextState::Suspended && |
957 | | aNewState == AudioContextState::Closed) || |
958 | | (mAudioContextState == aNewState))) { |
959 | | fprintf(stderr, |
960 | | "Invalid transition: mAudioContextState: %d -> aNewState %d\n", |
961 | | static_cast<int>(mAudioContextState), static_cast<int>(aNewState)); |
962 | | MOZ_ASSERT(false); |
963 | | } |
964 | | |
965 | | #endif // DEBUG |
966 | | #endif // XP_MACOSX |
967 | 0 | #endif // WIN32 |
968 | 0 | |
969 | 0 | if (aPromise) { |
970 | 0 | Promise* promise = reinterpret_cast<Promise*>(aPromise); |
971 | 0 | // It is possible for the promise to have been removed from |
972 | 0 | // mPromiseGripArray if the cycle collector has severed our connections. DO |
973 | 0 | // NOT dereference the promise pointer in that case since it may point to |
974 | 0 | // already freed memory. |
975 | 0 | if (mPromiseGripArray.Contains(promise)) { |
976 | 0 | promise->MaybeResolveWithUndefined(); |
977 | 0 | DebugOnly<bool> rv = mPromiseGripArray.RemoveElement(promise); |
978 | 0 | MOZ_ASSERT(rv, "Promise wasn't in the grip array?"); |
979 | 0 | } |
980 | 0 | } |
981 | 0 |
|
982 | 0 | // Resolve all pending promises once the audio context has been allowed to |
983 | 0 | // start. |
984 | 0 | if (aNewState == AudioContextState::Running) { |
985 | 0 | for (const auto& p : mPendingResumePromises) { |
986 | 0 | p->MaybeResolveWithUndefined(); |
987 | 0 | } |
988 | 0 | mPendingResumePromises.Clear(); |
989 | 0 | } |
990 | 0 |
|
991 | 0 | if (mAudioContextState != aNewState) { |
992 | 0 | RefPtr<OnStateChangeTask> task = new OnStateChangeTask(this); |
993 | 0 | Dispatch(task.forget()); |
994 | 0 | } |
995 | 0 |
|
996 | 0 | mAudioContextState = aNewState; |
997 | 0 | } |
998 | | |
999 | | nsTArray<MediaStream*> |
1000 | | AudioContext::GetAllStreams() const |
1001 | 0 | { |
1002 | 0 | nsTArray<MediaStream*> streams; |
1003 | 0 | for (auto iter = mAllNodes.ConstIter(); !iter.Done(); iter.Next()) { |
1004 | 0 | MediaStream* s = iter.Get()->GetKey()->GetStream(); |
1005 | 0 | if (s) { |
1006 | 0 | streams.AppendElement(s); |
1007 | 0 | } |
1008 | 0 | } |
1009 | 0 | return streams; |
1010 | 0 | } |
1011 | | |
1012 | | already_AddRefed<Promise> |
1013 | | AudioContext::Suspend(ErrorResult& aRv) |
1014 | 0 | { |
1015 | 0 | nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject()); |
1016 | 0 | RefPtr<Promise> promise; |
1017 | 0 | promise = Promise::Create(parentObject, aRv); |
1018 | 0 | if (aRv.Failed()) { |
1019 | 0 | return nullptr; |
1020 | 0 | } |
1021 | 0 | if (mIsOffline) { |
1022 | 0 | promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
1023 | 0 | return promise.forget(); |
1024 | 0 | } |
1025 | 0 | |
1026 | 0 | if (mAudioContextState == AudioContextState::Closed || |
1027 | 0 | mCloseCalled) { |
1028 | 0 | promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR); |
1029 | 0 | return promise.forget(); |
1030 | 0 | } |
1031 | 0 | |
1032 | 0 | mPromiseGripArray.AppendElement(promise); |
1033 | 0 | SuspendInternal(promise); |
1034 | 0 | return promise.forget(); |
1035 | 0 | } |
1036 | | |
1037 | | void |
1038 | | AudioContext::SuspendInternal(void* aPromise) |
1039 | 0 | { |
1040 | 0 | Destination()->Suspend(); |
1041 | 0 |
|
1042 | 0 | nsTArray<MediaStream*> streams; |
1043 | 0 | // If mSuspendCalled is true then we already suspended all our streams, |
1044 | 0 | // so don't suspend them again (since suspend(); suspend(); resume(); should |
1045 | 0 | // cancel both suspends). But we still need to do ApplyAudioContextOperation |
1046 | 0 | // to ensure our new promise is resolved. |
1047 | 0 | if (!mSuspendCalled) { |
1048 | 0 | streams = GetAllStreams(); |
1049 | 0 | } |
1050 | 0 | Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(), |
1051 | 0 | streams, |
1052 | 0 | AudioContextOperation::Suspend, |
1053 | 0 | aPromise); |
1054 | 0 |
|
1055 | 0 | mSuspendCalled = true; |
1056 | 0 | } |
1057 | | |
1058 | | already_AddRefed<Promise> |
1059 | | AudioContext::Resume(ErrorResult& aRv) |
1060 | 0 | { |
1061 | 0 | nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject()); |
1062 | 0 | RefPtr<Promise> promise; |
1063 | 0 | promise = Promise::Create(parentObject, aRv); |
1064 | 0 | if (aRv.Failed()) { |
1065 | 0 | return nullptr; |
1066 | 0 | } |
1067 | 0 | |
1068 | 0 | if (mIsOffline) { |
1069 | 0 | promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
1070 | 0 | return promise.forget(); |
1071 | 0 | } |
1072 | 0 | |
1073 | 0 | if (mAudioContextState == AudioContextState::Closed || |
1074 | 0 | mCloseCalled) { |
1075 | 0 | promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR); |
1076 | 0 | return promise.forget(); |
1077 | 0 | } |
1078 | 0 | |
1079 | 0 | mPendingResumePromises.AppendElement(promise); |
1080 | 0 |
|
1081 | 0 | const bool isAllowedToPlay = AutoplayPolicy::IsAllowedToPlay(*this); |
1082 | 0 | if (isAllowedToPlay) { |
1083 | 0 | ResumeInternal(); |
1084 | 0 | } else { |
1085 | 0 | DispatchBlockedEvent(); |
1086 | 0 | } |
1087 | 0 |
|
1088 | 0 | AUTOPLAY_LOG("Resume AudioContext %p, IsAllowedToPlay=%d", |
1089 | 0 | this, isAllowedToPlay); |
1090 | 0 | return promise.forget(); |
1091 | 0 | } |
1092 | | |
1093 | | void |
1094 | | AudioContext::ResumeInternal() |
1095 | 0 | { |
1096 | 0 | Destination()->Resume(); |
1097 | 0 |
|
1098 | 0 | nsTArray<MediaStream*> streams; |
1099 | 0 | // If mSuspendCalled is false then we already resumed all our streams, |
1100 | 0 | // so don't resume them again (since suspend(); resume(); resume(); should |
1101 | 0 | // be OK). But we still need to do ApplyAudioContextOperation |
1102 | 0 | // to ensure our new promise is resolved. |
1103 | 0 | if (mSuspendCalled) { |
1104 | 0 | streams = GetAllStreams(); |
1105 | 0 | } |
1106 | 0 | Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(), |
1107 | 0 | streams, |
1108 | 0 | AudioContextOperation::Resume, |
1109 | 0 | nullptr); |
1110 | 0 | mSuspendCalled = false; |
1111 | 0 | } |
1112 | | |
1113 | | void |
1114 | | AudioContext::DispatchBlockedEvent() |
1115 | 0 | { |
1116 | 0 | if (!StaticPrefs::MediaBlockEventEnabled()) { |
1117 | 0 | return; |
1118 | 0 | } |
1119 | 0 | |
1120 | 0 | RefPtr<AudioContext> self = this; |
1121 | 0 | RefPtr<nsIRunnable> r = NS_NewRunnableFunction( |
1122 | 0 | "AudioContext::AutoplayBlocked", |
1123 | 0 | [self] () { |
1124 | 0 | nsPIDOMWindowInner* parent = self->GetParentObject(); |
1125 | 0 | if (!parent) { |
1126 | 0 | return; |
1127 | 0 | } |
1128 | 0 | |
1129 | 0 | nsIDocument* doc = parent->GetExtantDoc(); |
1130 | 0 | if (!doc) { |
1131 | 0 | return; |
1132 | 0 | } |
1133 | 0 | |
1134 | 0 | AUTOPLAY_LOG("Dispatch `blocked` event for AudioContext %p", self.get()); |
1135 | 0 | nsContentUtils::DispatchTrustedEvent( |
1136 | 0 | doc, |
1137 | 0 | static_cast<DOMEventTargetHelper*>(self), |
1138 | 0 | NS_LITERAL_STRING("blocked"), |
1139 | 0 | CanBubble::eNo, |
1140 | 0 | Cancelable::eNo); |
1141 | 0 | }); |
1142 | 0 | Dispatch(r.forget()); |
1143 | 0 | } |
1144 | | |
1145 | | already_AddRefed<Promise> |
1146 | | AudioContext::Close(ErrorResult& aRv) |
1147 | 0 | { |
1148 | 0 | nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject()); |
1149 | 0 | RefPtr<Promise> promise; |
1150 | 0 | promise = Promise::Create(parentObject, aRv); |
1151 | 0 | if (aRv.Failed()) { |
1152 | 0 | return nullptr; |
1153 | 0 | } |
1154 | 0 | |
1155 | 0 | if (mIsOffline) { |
1156 | 0 | promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR); |
1157 | 0 | return promise.forget(); |
1158 | 0 | } |
1159 | 0 | |
1160 | 0 | if (mAudioContextState == AudioContextState::Closed) { |
1161 | 0 | promise->MaybeResolve(NS_ERROR_DOM_INVALID_STATE_ERR); |
1162 | 0 | return promise.forget(); |
1163 | 0 | } |
1164 | 0 | |
1165 | 0 | if (Destination()) { |
1166 | 0 | Destination()->DestroyAudioChannelAgent(); |
1167 | 0 | } |
1168 | 0 |
|
1169 | 0 | mPromiseGripArray.AppendElement(promise); |
1170 | 0 |
|
1171 | 0 | // This can be called when freeing a document, and the streams are dead at |
1172 | 0 | // this point, so we need extra null-checks. |
1173 | 0 | MediaStream* ds = DestinationStream(); |
1174 | 0 | if (ds) { |
1175 | 0 | nsTArray<MediaStream*> streams; |
1176 | 0 | // If mSuspendCalled or mCloseCalled are true then we already suspended |
1177 | 0 | // all our streams, so don't suspend them again. But we still need to do |
1178 | 0 | // ApplyAudioContextOperation to ensure our new promise is resolved. |
1179 | 0 | if (!mSuspendCalled && !mCloseCalled) { |
1180 | 0 | streams = GetAllStreams(); |
1181 | 0 | } |
1182 | 0 | Graph()->ApplyAudioContextOperation(ds->AsAudioNodeStream(), streams, |
1183 | 0 | AudioContextOperation::Close, promise); |
1184 | 0 | } |
1185 | 0 | mCloseCalled = true; |
1186 | 0 |
|
1187 | 0 | return promise.forget(); |
1188 | 0 | } |
1189 | | |
1190 | | void |
1191 | | AudioContext::RegisterNode(AudioNode* aNode) |
1192 | 0 | { |
1193 | 0 | MOZ_ASSERT(!mAllNodes.Contains(aNode)); |
1194 | 0 | mAllNodes.PutEntry(aNode); |
1195 | 0 | } |
1196 | | |
1197 | | void |
1198 | | AudioContext::UnregisterNode(AudioNode* aNode) |
1199 | 0 | { |
1200 | 0 | MOZ_ASSERT(mAllNodes.Contains(aNode)); |
1201 | 0 | mAllNodes.RemoveEntry(aNode); |
1202 | 0 | } |
1203 | | |
1204 | | JSObject* |
1205 | | AudioContext::GetGlobalJSObject() const |
1206 | 0 | { |
1207 | 0 | nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject()); |
1208 | 0 | if (!parentObject) { |
1209 | 0 | return nullptr; |
1210 | 0 | } |
1211 | 0 | |
1212 | 0 | // This can also return null. |
1213 | 0 | return parentObject->GetGlobalJSObject(); |
1214 | 0 | } |
1215 | | |
1216 | | already_AddRefed<Promise> |
1217 | | AudioContext::StartRendering(ErrorResult& aRv) |
1218 | 0 | { |
1219 | 0 | nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject()); |
1220 | 0 |
|
1221 | 0 | MOZ_ASSERT(mIsOffline, "This should only be called on OfflineAudioContext"); |
1222 | 0 | if (mIsStarted) { |
1223 | 0 | aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); |
1224 | 0 | return nullptr; |
1225 | 0 | } |
1226 | 0 | |
1227 | 0 | mIsStarted = true; |
1228 | 0 | RefPtr<Promise> promise = Promise::Create(parentObject, aRv); |
1229 | 0 | if (aRv.Failed()) { |
1230 | 0 | return nullptr; |
1231 | 0 | } |
1232 | 0 | mDestination->StartRendering(promise); |
1233 | 0 |
|
1234 | 0 | OnStateChanged(nullptr, AudioContextState::Running); |
1235 | 0 |
|
1236 | 0 | return promise.forget(); |
1237 | 0 | } |
1238 | | |
1239 | | unsigned long |
1240 | | AudioContext::Length() |
1241 | 0 | { |
1242 | 0 | MOZ_ASSERT(mIsOffline); |
1243 | 0 | return mDestination->Length(); |
1244 | 0 | } |
1245 | | |
1246 | | void |
1247 | | AudioContext::Mute() const |
1248 | 0 | { |
1249 | 0 | MOZ_ASSERT(!mIsOffline); |
1250 | 0 | if (mDestination) { |
1251 | 0 | mDestination->Mute(); |
1252 | 0 | } |
1253 | 0 | } |
1254 | | |
1255 | | void |
1256 | | AudioContext::Unmute() const |
1257 | 0 | { |
1258 | 0 | MOZ_ASSERT(!mIsOffline); |
1259 | 0 | if (mDestination) { |
1260 | 0 | mDestination->Unmute(); |
1261 | 0 | } |
1262 | 0 | } |
1263 | | |
1264 | | size_t |
1265 | | AudioContext::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const |
1266 | 0 | { |
1267 | 0 | // AudioNodes are tracked separately because we do not want the AudioContext |
1268 | 0 | // to track all of the AudioNodes it creates, so we wouldn't be able to |
1269 | 0 | // traverse them from here. |
1270 | 0 |
|
1271 | 0 | size_t amount = aMallocSizeOf(this); |
1272 | 0 | if (mListener) { |
1273 | 0 | amount += mListener->SizeOfIncludingThis(aMallocSizeOf); |
1274 | 0 | } |
1275 | 0 | amount += mDecodeJobs.ShallowSizeOfExcludingThis(aMallocSizeOf); |
1276 | 0 | for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) { |
1277 | 0 | amount += mDecodeJobs[i]->SizeOfIncludingThis(aMallocSizeOf); |
1278 | 0 | } |
1279 | 0 | amount += mActiveNodes.ShallowSizeOfExcludingThis(aMallocSizeOf); |
1280 | 0 | return amount; |
1281 | 0 | } |
1282 | | |
1283 | | NS_IMETHODIMP |
1284 | | AudioContext::CollectReports(nsIHandleReportCallback* aHandleReport, |
1285 | | nsISupports* aData, bool aAnonymize) |
1286 | 0 | { |
1287 | 0 | const nsLiteralCString |
1288 | 0 | nodeDescription("Memory used by AudioNode DOM objects (Web Audio)."); |
1289 | 0 | for (auto iter = mAllNodes.ConstIter(); !iter.Done(); iter.Next()) { |
1290 | 0 | AudioNode* node = iter.Get()->GetKey(); |
1291 | 0 | int64_t amount = node->SizeOfIncludingThis(MallocSizeOf); |
1292 | 0 | nsPrintfCString domNodePath("explicit/webaudio/audio-node/%s/dom-nodes", |
1293 | 0 | node->NodeType()); |
1294 | 0 | aHandleReport->Callback(EmptyCString(), domNodePath, KIND_HEAP, UNITS_BYTES, |
1295 | 0 | amount, nodeDescription, aData); |
1296 | 0 | } |
1297 | 0 |
|
1298 | 0 | int64_t amount = SizeOfIncludingThis(MallocSizeOf); |
1299 | 0 | MOZ_COLLECT_REPORT( |
1300 | 0 | "explicit/webaudio/audiocontext", KIND_HEAP, UNITS_BYTES, amount, |
1301 | 0 | "Memory used by AudioContext objects (Web Audio)."); |
1302 | 0 |
|
1303 | 0 | return NS_OK; |
1304 | 0 | } |
1305 | | |
1306 | | BasicWaveFormCache* |
1307 | | AudioContext::GetBasicWaveFormCache() |
1308 | 0 | { |
1309 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
1310 | 0 | if (!mBasicWaveFormCache) { |
1311 | 0 | mBasicWaveFormCache = new BasicWaveFormCache(SampleRate()); |
1312 | 0 | } |
1313 | 0 | return mBasicWaveFormCache; |
1314 | 0 | } |
1315 | | |
1316 | | BasicWaveFormCache::BasicWaveFormCache(uint32_t aSampleRate) |
1317 | | : mSampleRate(aSampleRate) |
1318 | 0 | { |
1319 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
1320 | 0 | } |
1321 | | BasicWaveFormCache::~BasicWaveFormCache() |
1322 | 0 | { } |
1323 | | |
1324 | | WebCore::PeriodicWave* |
1325 | | BasicWaveFormCache::GetBasicWaveForm(OscillatorType aType) |
1326 | 0 | { |
1327 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
1328 | 0 | if (aType == OscillatorType::Sawtooth) { |
1329 | 0 | if (!mSawtooth) { |
1330 | 0 | mSawtooth = WebCore::PeriodicWave::createSawtooth(mSampleRate); |
1331 | 0 | } |
1332 | 0 | return mSawtooth; |
1333 | 0 | } else if (aType == OscillatorType::Square) { |
1334 | 0 | if (!mSquare) { |
1335 | 0 | mSquare = WebCore::PeriodicWave::createSquare(mSampleRate); |
1336 | 0 | } |
1337 | 0 | return mSquare; |
1338 | 0 | } else if (aType == OscillatorType::Triangle) { |
1339 | 0 | if (!mTriangle) { |
1340 | 0 | mTriangle = WebCore::PeriodicWave::createTriangle(mSampleRate); |
1341 | 0 | } |
1342 | 0 | return mTriangle; |
1343 | 0 | } else { |
1344 | 0 | MOZ_ASSERT(false, "Not reached"); |
1345 | 0 | return nullptr; |
1346 | 0 | } |
1347 | 0 | } |
1348 | | |
1349 | | } // namespace dom |
1350 | | } // namespace mozilla |