/src/mozilla-central/dom/media/webaudio/AudioNodeStream.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
4 | | * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | #include "AudioNodeStream.h" |
7 | | |
8 | | #include "MediaStreamGraphImpl.h" |
9 | | #include "MediaStreamListener.h" |
10 | | #include "AudioNodeEngine.h" |
11 | | #include "ThreeDPoint.h" |
12 | | #include "AudioChannelFormat.h" |
13 | | #include "AudioParamTimeline.h" |
14 | | #include "AudioContext.h" |
15 | | #include "nsMathUtils.h" |
16 | | #include "AlignmentUtils.h" |
17 | | |
18 | | using namespace mozilla::dom; |
19 | | |
20 | | namespace mozilla { |
21 | | |
22 | | /** |
23 | | * An AudioNodeStream produces a single audio track with ID |
24 | | * AUDIO_TRACK. This track has rate AudioContext::sIdealAudioRate |
25 | | * for regular audio contexts, and the rate requested by the web content |
26 | | * for offline audio contexts. |
27 | | * Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples. |
28 | | * Note: This must be a different value than MEDIA_STREAM_DEST_TRACK_ID |
29 | | */ |
30 | | |
31 | | AudioNodeStream::AudioNodeStream(AudioNodeEngine* aEngine, |
32 | | Flags aFlags, |
33 | | TrackRate aSampleRate) |
34 | | : ProcessedMediaStream() |
35 | | , mEngine(aEngine) |
36 | | , mSampleRate(aSampleRate) |
37 | | , mFlags(aFlags) |
38 | | , mNumberOfInputChannels(2) |
39 | | , mIsActive(aEngine->IsActive()) |
40 | | , mMarkAsFinishedAfterThisBlock(false) |
41 | | , mAudioParamStream(false) |
42 | | , mPassThrough(false) |
43 | 0 | { |
44 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
45 | 0 | mSuspendedCount = !(mIsActive || mFlags & EXTERNAL_OUTPUT); |
46 | 0 | mChannelCountMode = ChannelCountMode::Max; |
47 | 0 | mChannelInterpretation = ChannelInterpretation::Speakers; |
48 | 0 | // AudioNodes are always producing data |
49 | 0 | mHasCurrentData = true; |
50 | 0 | mLastChunks.SetLength(std::max(uint16_t(1), mEngine->OutputCount())); |
51 | 0 | MOZ_COUNT_CTOR(AudioNodeStream); |
52 | 0 | } |
53 | | |
54 | | AudioNodeStream::~AudioNodeStream() |
55 | 0 | { |
56 | 0 | MOZ_ASSERT(mActiveInputCount == 0); |
57 | 0 | MOZ_COUNT_DTOR(AudioNodeStream); |
58 | 0 | } |
59 | | |
60 | | void |
61 | | AudioNodeStream::DestroyImpl() |
62 | 0 | { |
63 | 0 | // These are graph thread objects, so clean up on graph thread. |
64 | 0 | mInputChunks.Clear(); |
65 | 0 | mLastChunks.Clear(); |
66 | 0 |
|
67 | 0 | ProcessedMediaStream::DestroyImpl(); |
68 | 0 | } |
69 | | |
70 | | /* static */ already_AddRefed<AudioNodeStream> |
71 | | AudioNodeStream::Create(AudioContext* aCtx, AudioNodeEngine* aEngine, |
72 | | Flags aFlags, MediaStreamGraph* aGraph) |
73 | 0 | { |
74 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
75 | 0 | MOZ_RELEASE_ASSERT(aGraph); |
76 | 0 |
|
77 | 0 | // MediaRecorders use an AudioNodeStream, but no AudioNode |
78 | 0 | AudioNode* node = aEngine->NodeMainThread(); |
79 | 0 |
|
80 | 0 | RefPtr<AudioNodeStream> stream = |
81 | 0 | new AudioNodeStream(aEngine, aFlags, aGraph->GraphRate()); |
82 | 0 | stream->mSuspendedCount += aCtx->ShouldSuspendNewStream(); |
83 | 0 | if (node) { |
84 | 0 | stream->SetChannelMixingParametersImpl(node->ChannelCount(), |
85 | 0 | node->ChannelCountModeValue(), |
86 | 0 | node->ChannelInterpretationValue()); |
87 | 0 | } |
88 | 0 | aGraph->AddStream(stream); |
89 | 0 | return stream.forget(); |
90 | 0 | } |
91 | | |
92 | | size_t |
93 | | AudioNodeStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
94 | 0 | { |
95 | 0 | size_t amount = 0; |
96 | 0 |
|
97 | 0 | // Not reported: |
98 | 0 | // - mEngine |
99 | 0 |
|
100 | 0 | amount += ProcessedMediaStream::SizeOfExcludingThis(aMallocSizeOf); |
101 | 0 | amount += mLastChunks.ShallowSizeOfExcludingThis(aMallocSizeOf); |
102 | 0 | for (size_t i = 0; i < mLastChunks.Length(); i++) { |
103 | 0 | // NB: This is currently unshared only as there are instances of |
104 | 0 | // double reporting in DMD otherwise. |
105 | 0 | amount += mLastChunks[i].SizeOfExcludingThisIfUnshared(aMallocSizeOf); |
106 | 0 | } |
107 | 0 |
|
108 | 0 | return amount; |
109 | 0 | } |
110 | | |
111 | | size_t |
112 | | AudioNodeStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const |
113 | 0 | { |
114 | 0 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
115 | 0 | } |
116 | | |
117 | | void |
118 | | AudioNodeStream::SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf, |
119 | | AudioNodeSizes& aUsage) const |
120 | 0 | { |
121 | 0 | // Explicitly separate out the stream memory. |
122 | 0 | aUsage.mStream = SizeOfIncludingThis(aMallocSizeOf); |
123 | 0 |
|
124 | 0 | if (mEngine) { |
125 | 0 | // This will fill out the rest of |aUsage|. |
126 | 0 | mEngine->SizeOfIncludingThis(aMallocSizeOf, aUsage); |
127 | 0 | } |
128 | 0 | } |
129 | | |
130 | | void |
131 | | AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext, |
132 | | double aStreamTime) |
133 | 0 | { |
134 | 0 | class Message final : public ControlMessage |
135 | 0 | { |
136 | 0 | public: |
137 | 0 | Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream, |
138 | 0 | double aStreamTime) |
139 | 0 | : ControlMessage(aStream), mStreamTime(aStreamTime), |
140 | 0 | mRelativeToStream(aRelativeToStream), mIndex(aIndex) |
141 | 0 | {} |
142 | 0 | void Run() override |
143 | 0 | { |
144 | 0 | static_cast<AudioNodeStream*>(mStream)-> |
145 | 0 | SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime); |
146 | 0 | } |
147 | 0 | double mStreamTime; |
148 | 0 | MediaStream* MOZ_UNSAFE_REF("ControlMessages are processed in order. This \ |
149 | 0 | destination stream is not yet destroyed. Its (future) destroy message will be \ |
150 | 0 | processed after this message.") mRelativeToStream; |
151 | 0 | uint32_t mIndex; |
152 | 0 | }; |
153 | 0 |
|
154 | 0 | GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, |
155 | 0 | aContext->DestinationStream(), |
156 | 0 | aStreamTime)); |
157 | 0 | } |
158 | | |
159 | | void |
160 | | AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream, |
161 | | double aStreamTime) |
162 | 0 | { |
163 | 0 | StreamTime ticks = aRelativeToStream->SecondsToNearestStreamTime(aStreamTime); |
164 | 0 | mEngine->SetStreamTimeParameter(aIndex, ticks); |
165 | 0 | } |
166 | | |
167 | | void |
168 | | AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue) |
169 | 0 | { |
170 | 0 | class Message final : public ControlMessage |
171 | 0 | { |
172 | 0 | public: |
173 | 0 | Message(AudioNodeStream* aStream, uint32_t aIndex, double aValue) |
174 | 0 | : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) |
175 | 0 | {} |
176 | 0 | void Run() override |
177 | 0 | { |
178 | 0 | static_cast<AudioNodeStream*>(mStream)->Engine()-> |
179 | 0 | SetDoubleParameter(mIndex, mValue); |
180 | 0 | } |
181 | 0 | double mValue; |
182 | 0 | uint32_t mIndex; |
183 | 0 | }; |
184 | 0 |
|
185 | 0 | GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aValue)); |
186 | 0 | } |
187 | | |
188 | | void |
189 | | AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue) |
190 | 0 | { |
191 | 0 | class Message final : public ControlMessage |
192 | 0 | { |
193 | 0 | public: |
194 | 0 | Message(AudioNodeStream* aStream, uint32_t aIndex, int32_t aValue) |
195 | 0 | : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) |
196 | 0 | {} |
197 | 0 | void Run() override |
198 | 0 | { |
199 | 0 | static_cast<AudioNodeStream*>(mStream)->Engine()-> |
200 | 0 | SetInt32Parameter(mIndex, mValue); |
201 | 0 | } |
202 | 0 | int32_t mValue; |
203 | 0 | uint32_t mIndex; |
204 | 0 | }; |
205 | 0 |
|
206 | 0 | GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aValue)); |
207 | 0 | } |
208 | | |
209 | | void |
210 | | AudioNodeStream::SendTimelineEvent(uint32_t aIndex, |
211 | | const AudioTimelineEvent& aEvent) |
212 | 0 | { |
213 | 0 | class Message final : public ControlMessage |
214 | 0 | { |
215 | 0 | public: |
216 | 0 | Message(AudioNodeStream* aStream, uint32_t aIndex, |
217 | 0 | const AudioTimelineEvent& aEvent) |
218 | 0 | : ControlMessage(aStream), |
219 | 0 | mEvent(aEvent), |
220 | 0 | mSampleRate(aStream->SampleRate()), |
221 | 0 | mIndex(aIndex) |
222 | 0 | {} |
223 | 0 | void Run() override |
224 | 0 | { |
225 | 0 | static_cast<AudioNodeStream*>(mStream)->Engine()-> |
226 | 0 | RecvTimelineEvent(mIndex, mEvent); |
227 | 0 | } |
228 | 0 | AudioTimelineEvent mEvent; |
229 | 0 | TrackRate mSampleRate; |
230 | 0 | uint32_t mIndex; |
231 | 0 | }; |
232 | 0 | GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aEvent)); |
233 | 0 | } |
234 | | |
235 | | void |
236 | | AudioNodeStream::SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aValue) |
237 | 0 | { |
238 | 0 | class Message final : public ControlMessage |
239 | 0 | { |
240 | 0 | public: |
241 | 0 | Message(AudioNodeStream* aStream, uint32_t aIndex, const ThreeDPoint& aValue) |
242 | 0 | : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) |
243 | 0 | {} |
244 | 0 | void Run() override |
245 | 0 | { |
246 | 0 | static_cast<AudioNodeStream*>(mStream)->Engine()-> |
247 | 0 | SetThreeDPointParameter(mIndex, mValue); |
248 | 0 | } |
249 | 0 | ThreeDPoint mValue; |
250 | 0 | uint32_t mIndex; |
251 | 0 | }; |
252 | 0 |
|
253 | 0 | GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aValue)); |
254 | 0 | } |
255 | | |
256 | | void |
257 | | AudioNodeStream::SetBuffer(AudioChunk&& aBuffer) |
258 | 0 | { |
259 | 0 | class Message final : public ControlMessage |
260 | 0 | { |
261 | 0 | public: |
262 | 0 | Message(AudioNodeStream* aStream, AudioChunk&& aBuffer) |
263 | 0 | : ControlMessage(aStream), mBuffer(aBuffer) |
264 | 0 | {} |
265 | 0 | void Run() override |
266 | 0 | { |
267 | 0 | static_cast<AudioNodeStream*>(mStream)->Engine()-> |
268 | 0 | SetBuffer(std::move(mBuffer)); |
269 | 0 | } |
270 | 0 | AudioChunk mBuffer; |
271 | 0 | }; |
272 | 0 |
|
273 | 0 | GraphImpl()->AppendMessage(MakeUnique<Message>(this, std::move(aBuffer))); |
274 | 0 | } |
275 | | |
276 | | void |
277 | | AudioNodeStream::SetRawArrayData(nsTArray<float>& aData) |
278 | 0 | { |
279 | 0 | class Message final : public ControlMessage |
280 | 0 | { |
281 | 0 | public: |
282 | 0 | Message(AudioNodeStream* aStream, |
283 | 0 | nsTArray<float>& aData) |
284 | 0 | : ControlMessage(aStream) |
285 | 0 | { |
286 | 0 | mData.SwapElements(aData); |
287 | 0 | } |
288 | 0 | void Run() override |
289 | 0 | { |
290 | 0 | static_cast<AudioNodeStream*>(mStream)->Engine()->SetRawArrayData(mData); |
291 | 0 | } |
292 | 0 | nsTArray<float> mData; |
293 | 0 | }; |
294 | 0 |
|
295 | 0 | GraphImpl()->AppendMessage(MakeUnique<Message>(this, aData)); |
296 | 0 | } |
297 | | |
298 | | void |
299 | | AudioNodeStream::SetChannelMixingParameters(uint32_t aNumberOfChannels, |
300 | | ChannelCountMode aChannelCountMode, |
301 | | ChannelInterpretation aChannelInterpretation) |
302 | 0 | { |
303 | 0 | class Message final : public ControlMessage |
304 | 0 | { |
305 | 0 | public: |
306 | 0 | Message(AudioNodeStream* aStream, |
307 | 0 | uint32_t aNumberOfChannels, |
308 | 0 | ChannelCountMode aChannelCountMode, |
309 | 0 | ChannelInterpretation aChannelInterpretation) |
310 | 0 | : ControlMessage(aStream), |
311 | 0 | mNumberOfChannels(aNumberOfChannels), |
312 | 0 | mChannelCountMode(aChannelCountMode), |
313 | 0 | mChannelInterpretation(aChannelInterpretation) |
314 | 0 | {} |
315 | 0 | void Run() override |
316 | 0 | { |
317 | 0 | static_cast<AudioNodeStream*>(mStream)-> |
318 | 0 | SetChannelMixingParametersImpl(mNumberOfChannels, mChannelCountMode, |
319 | 0 | mChannelInterpretation); |
320 | 0 | } |
321 | 0 | uint32_t mNumberOfChannels; |
322 | 0 | ChannelCountMode mChannelCountMode; |
323 | 0 | ChannelInterpretation mChannelInterpretation; |
324 | 0 | }; |
325 | 0 |
|
326 | 0 | GraphImpl()->AppendMessage(MakeUnique<Message>(this, aNumberOfChannels, |
327 | 0 | aChannelCountMode, |
328 | 0 | aChannelInterpretation)); |
329 | 0 | } |
330 | | |
331 | | void |
332 | | AudioNodeStream::SetPassThrough(bool aPassThrough) |
333 | 0 | { |
334 | 0 | class Message final : public ControlMessage |
335 | 0 | { |
336 | 0 | public: |
337 | 0 | Message(AudioNodeStream* aStream, bool aPassThrough) |
338 | 0 | : ControlMessage(aStream), mPassThrough(aPassThrough) |
339 | 0 | {} |
340 | 0 | void Run() override |
341 | 0 | { |
342 | 0 | static_cast<AudioNodeStream*>(mStream)->mPassThrough = mPassThrough; |
343 | 0 | } |
344 | 0 | bool mPassThrough; |
345 | 0 | }; |
346 | 0 |
|
347 | 0 | GraphImpl()->AppendMessage(MakeUnique<Message>(this, aPassThrough)); |
348 | 0 | } |
349 | | |
350 | | void |
351 | | AudioNodeStream::SetChannelMixingParametersImpl(uint32_t aNumberOfChannels, |
352 | | ChannelCountMode aChannelCountMode, |
353 | | ChannelInterpretation aChannelInterpretation) |
354 | 0 | { |
355 | 0 | mNumberOfInputChannels = aNumberOfChannels; |
356 | 0 | mChannelCountMode = aChannelCountMode; |
357 | 0 | mChannelInterpretation = aChannelInterpretation; |
358 | 0 | } |
359 | | |
360 | | uint32_t |
361 | | AudioNodeStream::ComputedNumberOfChannels(uint32_t aInputChannelCount) |
362 | | { |
363 | | switch (mChannelCountMode) { |
364 | | case ChannelCountMode::Explicit: |
365 | | // Disregard the channel count we've calculated from inputs, and just use |
366 | | // mNumberOfInputChannels. |
367 | | return mNumberOfInputChannels; |
368 | | case ChannelCountMode::Clamped_max: |
369 | | // Clamp the computed output channel count to mNumberOfInputChannels. |
370 | | return std::min(aInputChannelCount, mNumberOfInputChannels); |
371 | | default: |
372 | | case ChannelCountMode::Max: |
373 | | // Nothing to do here, just shut up the compiler warning. |
374 | | return aInputChannelCount; |
375 | | } |
376 | | } |
377 | | |
378 | | class AudioNodeStream::AdvanceAndResumeMessage final : public ControlMessage { |
379 | | public: |
380 | | AdvanceAndResumeMessage(AudioNodeStream* aStream, StreamTime aAdvance) : |
381 | 0 | ControlMessage(aStream), mAdvance(aAdvance) {} |
382 | | void Run() override |
383 | 0 | { |
384 | 0 | auto ns = static_cast<AudioNodeStream*>(mStream); |
385 | 0 | ns->mTracksStartTime -= mAdvance; |
386 | 0 |
|
387 | 0 | StreamTracks::Track* track = ns->EnsureTrack(AUDIO_TRACK); |
388 | 0 | track->Get<AudioSegment>()->AppendNullData(mAdvance); |
389 | 0 |
|
390 | 0 | ns->GraphImpl()->DecrementSuspendCount(mStream); |
391 | 0 | } |
392 | | private: |
393 | | StreamTime mAdvance; |
394 | | }; |
395 | | |
396 | | void |
397 | | AudioNodeStream::AdvanceAndResume(StreamTime aAdvance) |
398 | 0 | { |
399 | 0 | mMainThreadCurrentTime += aAdvance; |
400 | 0 | GraphImpl()->AppendMessage(MakeUnique<AdvanceAndResumeMessage>(this, aAdvance)); |
401 | 0 | } |
402 | | |
403 | | void |
404 | | AudioNodeStream::ObtainInputBlock(AudioBlock& aTmpChunk, |
405 | | uint32_t aPortIndex) |
406 | 0 | { |
407 | 0 | uint32_t inputCount = mInputs.Length(); |
408 | 0 | uint32_t outputChannelCount = 1; |
409 | 0 | AutoTArray<const AudioBlock*,250> inputChunks; |
410 | 0 | for (uint32_t i = 0; i < inputCount; ++i) { |
411 | 0 | if (aPortIndex != mInputs[i]->InputNumber()) { |
412 | 0 | // This input is connected to a different port |
413 | 0 | continue; |
414 | 0 | } |
415 | 0 | MediaStream* s = mInputs[i]->GetSource(); |
416 | 0 | AudioNodeStream* a = static_cast<AudioNodeStream*>(s); |
417 | 0 | MOZ_ASSERT(a == s->AsAudioNodeStream()); |
418 | 0 | if (a->IsAudioParamStream()) { |
419 | 0 | continue; |
420 | 0 | } |
421 | 0 | |
422 | 0 | const AudioBlock* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()]; |
423 | 0 | MOZ_ASSERT(chunk); |
424 | 0 | if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) { |
425 | 0 | continue; |
426 | 0 | } |
427 | 0 | |
428 | 0 | inputChunks.AppendElement(chunk); |
429 | 0 | outputChannelCount = |
430 | 0 | GetAudioChannelsSuperset(outputChannelCount, chunk->ChannelCount()); |
431 | 0 | } |
432 | 0 |
|
433 | 0 | outputChannelCount = ComputedNumberOfChannels(outputChannelCount); |
434 | 0 |
|
435 | 0 | uint32_t inputChunkCount = inputChunks.Length(); |
436 | 0 | if (inputChunkCount == 0 || |
437 | 0 | (inputChunkCount == 1 && inputChunks[0]->ChannelCount() == 0)) { |
438 | 0 | aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE); |
439 | 0 | return; |
440 | 0 | } |
441 | 0 | |
442 | 0 | if (inputChunkCount == 1 && |
443 | 0 | inputChunks[0]->ChannelCount() == outputChannelCount) { |
444 | 0 | aTmpChunk = *inputChunks[0]; |
445 | 0 | return; |
446 | 0 | } |
447 | 0 | |
448 | 0 | if (outputChannelCount == 0) { |
449 | 0 | aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE); |
450 | 0 | return; |
451 | 0 | } |
452 | 0 | |
453 | 0 | aTmpChunk.AllocateChannels(outputChannelCount); |
454 | 0 | DownmixBufferType downmixBuffer; |
455 | 0 | ASSERT_ALIGNED16(downmixBuffer.Elements()); |
456 | 0 |
|
457 | 0 | for (uint32_t i = 0; i < inputChunkCount; ++i) { |
458 | 0 | AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer); |
459 | 0 | } |
460 | 0 | } |
461 | | |
462 | | void |
463 | | AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex, |
464 | | const AudioBlock& aChunk, |
465 | | AudioBlock* aBlock, |
466 | | DownmixBufferType* aDownmixBuffer) |
467 | 0 | { |
468 | 0 | AutoTArray<const float*,GUESS_AUDIO_CHANNELS> channels; |
469 | 0 | UpMixDownMixChunk(&aChunk, aBlock->ChannelCount(), channels, *aDownmixBuffer); |
470 | 0 |
|
471 | 0 | for (uint32_t c = 0; c < channels.Length(); ++c) { |
472 | 0 | const float* inputData = static_cast<const float*>(channels[c]); |
473 | 0 | float* outputData = aBlock->ChannelFloatsForWrite(c); |
474 | 0 | if (inputData) { |
475 | 0 | if (aInputIndex == 0) { |
476 | 0 | AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData); |
477 | 0 | } else { |
478 | 0 | AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData); |
479 | 0 | } |
480 | 0 | } else { |
481 | 0 | if (aInputIndex == 0) { |
482 | 0 | PodZero(outputData, WEBAUDIO_BLOCK_SIZE); |
483 | 0 | } |
484 | 0 | } |
485 | 0 | } |
486 | 0 | } |
487 | | |
488 | | void |
489 | | AudioNodeStream::UpMixDownMixChunk(const AudioBlock* aChunk, |
490 | | uint32_t aOutputChannelCount, |
491 | | nsTArray<const float*>& aOutputChannels, |
492 | | DownmixBufferType& aDownmixBuffer) |
493 | 0 | { |
494 | 0 | for (uint32_t i = 0; i < aChunk->ChannelCount(); i++) { |
495 | 0 | aOutputChannels.AppendElement(static_cast<const float*>(aChunk->mChannelData[i])); |
496 | 0 | } |
497 | 0 | if (aOutputChannels.Length() < aOutputChannelCount) { |
498 | 0 | if (mChannelInterpretation == ChannelInterpretation::Speakers) { |
499 | 0 | AudioChannelsUpMix<float>(&aOutputChannels, aOutputChannelCount, nullptr); |
500 | 0 | NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(), |
501 | 0 | "We called GetAudioChannelsSuperset to avoid this"); |
502 | 0 | } else { |
503 | 0 | // Fill up the remaining aOutputChannels by zeros |
504 | 0 | for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) { |
505 | 0 | aOutputChannels.AppendElement(nullptr); |
506 | 0 | } |
507 | 0 | } |
508 | 0 | } else if (aOutputChannels.Length() > aOutputChannelCount) { |
509 | 0 | if (mChannelInterpretation == ChannelInterpretation::Speakers) { |
510 | 0 | AutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels; |
511 | 0 | outputChannels.SetLength(aOutputChannelCount); |
512 | 0 | aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE); |
513 | 0 | for (uint32_t j = 0; j < aOutputChannelCount; ++j) { |
514 | 0 | outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE]; |
515 | 0 | } |
516 | 0 |
|
517 | 0 | AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(), |
518 | 0 | aOutputChannelCount, WEBAUDIO_BLOCK_SIZE); |
519 | 0 |
|
520 | 0 | aOutputChannels.SetLength(aOutputChannelCount); |
521 | 0 | for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) { |
522 | 0 | aOutputChannels[j] = outputChannels[j]; |
523 | 0 | } |
524 | 0 | } else { |
525 | 0 | // Drop the remaining aOutputChannels |
526 | 0 | aOutputChannels.RemoveElementsAt(aOutputChannelCount, |
527 | 0 | aOutputChannels.Length() - aOutputChannelCount); |
528 | 0 | } |
529 | 0 | } |
530 | 0 | } |
531 | | |
532 | | // The MediaStreamGraph guarantees that this is actually one block, for |
533 | | // AudioNodeStreams. |
534 | | void |
535 | | AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) |
536 | 0 | { |
537 | 0 | uint16_t outputCount = mLastChunks.Length(); |
538 | 0 | MOZ_ASSERT(outputCount == std::max(uint16_t(1), mEngine->OutputCount())); |
539 | 0 |
|
540 | 0 | if (!mIsActive) { |
541 | 0 | // mLastChunks are already null. |
542 | | #ifdef DEBUG |
543 | | for (const auto& chunk : mLastChunks) { |
544 | | MOZ_ASSERT(chunk.IsNull()); |
545 | | } |
546 | | #endif |
547 | 0 | } else if (InMutedCycle()) { |
548 | 0 | mInputChunks.Clear(); |
549 | 0 | for (uint16_t i = 0; i < outputCount; ++i) { |
550 | 0 | mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE); |
551 | 0 | } |
552 | 0 | } else { |
553 | 0 | // We need to generate at least one input |
554 | 0 | uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount()); |
555 | 0 | mInputChunks.SetLength(maxInputs); |
556 | 0 | for (uint16_t i = 0; i < maxInputs; ++i) { |
557 | 0 | ObtainInputBlock(mInputChunks[i], i); |
558 | 0 | } |
559 | 0 | bool finished = false; |
560 | 0 | if (mPassThrough) { |
561 | 0 | MOZ_ASSERT(outputCount == 1, "For now, we only support nodes that have one output port"); |
562 | 0 | mLastChunks[0] = mInputChunks[0]; |
563 | 0 | } else { |
564 | 0 | if (maxInputs <= 1 && outputCount <= 1) { |
565 | 0 | mEngine->ProcessBlock(this, aFrom, |
566 | 0 | mInputChunks[0], &mLastChunks[0], &finished); |
567 | 0 | } else { |
568 | 0 | mEngine->ProcessBlocksOnPorts(this, mInputChunks, mLastChunks, &finished); |
569 | 0 | } |
570 | 0 | } |
571 | 0 | for (uint16_t i = 0; i < outputCount; ++i) { |
572 | 0 | NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE, |
573 | 0 | "Invalid WebAudio chunk size"); |
574 | 0 | } |
575 | 0 | if (finished) { |
576 | 0 | mMarkAsFinishedAfterThisBlock = true; |
577 | 0 | if (mIsActive) { |
578 | 0 | ScheduleCheckForInactive(); |
579 | 0 | } |
580 | 0 | } |
581 | 0 |
|
582 | 0 | if (GetDisabledTrackMode(static_cast<TrackID>(AUDIO_TRACK)) != DisabledTrackMode::ENABLED) { |
583 | 0 | for (uint32_t i = 0; i < outputCount; ++i) { |
584 | 0 | mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE); |
585 | 0 | } |
586 | 0 | } |
587 | 0 | } |
588 | 0 |
|
589 | 0 | if (!mFinished) { |
590 | 0 | // Don't output anything while finished |
591 | 0 | if (mFlags & EXTERNAL_OUTPUT) { |
592 | 0 | AdvanceOutputSegment(); |
593 | 0 | } |
594 | 0 | if (mMarkAsFinishedAfterThisBlock && (aFlags & ALLOW_FINISH)) { |
595 | 0 | // This stream was finished the last time that we looked at it, and all |
596 | 0 | // of the depending streams have finished their output as well, so now |
597 | 0 | // it's time to mark this stream as finished. |
598 | 0 | if (mFlags & EXTERNAL_OUTPUT) { |
599 | 0 | FinishOutput(); |
600 | 0 | } |
601 | 0 | FinishOnGraphThread(); |
602 | 0 | } |
603 | 0 | } |
604 | 0 | } |
605 | | |
606 | | void |
607 | | AudioNodeStream::ProduceOutputBeforeInput(GraphTime aFrom) |
608 | 0 | { |
609 | 0 | MOZ_ASSERT(mEngine->AsDelayNodeEngine()); |
610 | 0 | MOZ_ASSERT(mEngine->OutputCount() == 1, |
611 | 0 | "DelayNodeEngine output count should be 1"); |
612 | 0 | MOZ_ASSERT(!InMutedCycle(), "DelayNodes should break cycles"); |
613 | 0 | MOZ_ASSERT(mLastChunks.Length() == 1); |
614 | 0 |
|
615 | 0 | if (!mIsActive) { |
616 | 0 | mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); |
617 | 0 | } else { |
618 | 0 | mEngine->ProduceBlockBeforeInput(this, aFrom, &mLastChunks[0]); |
619 | 0 | NS_ASSERTION(mLastChunks[0].GetDuration() == WEBAUDIO_BLOCK_SIZE, |
620 | 0 | "Invalid WebAudio chunk size"); |
621 | 0 | if (GetDisabledTrackMode(static_cast<TrackID>(AUDIO_TRACK)) != DisabledTrackMode::ENABLED) { |
622 | 0 | mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); |
623 | 0 | } |
624 | 0 | } |
625 | 0 | } |
626 | | |
627 | | void |
628 | | AudioNodeStream::AdvanceOutputSegment() |
629 | 0 | { |
630 | 0 | StreamTracks::Track* track = EnsureTrack(AUDIO_TRACK); |
631 | 0 | // No more tracks will be coming |
632 | 0 | mTracks.AdvanceKnownTracksTime(STREAM_TIME_MAX); |
633 | 0 |
|
634 | 0 | AudioSegment* segment = track->Get<AudioSegment>(); |
635 | 0 |
|
636 | 0 | AudioChunk copyChunk = *mLastChunks[0].AsMutableChunk(); |
637 | 0 | AudioSegment tmpSegment; |
638 | 0 | tmpSegment.AppendAndConsumeChunk(©Chunk); |
639 | 0 |
|
640 | 0 | for (uint32_t j = 0; j < mListeners.Length(); ++j) { |
641 | 0 | MediaStreamListener* l = mListeners[j]; |
642 | 0 | // Notify MediaStreamListeners. |
643 | 0 | l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK, |
644 | 0 | segment->GetDuration(), TrackEventCommand::TRACK_EVENT_NONE, tmpSegment); |
645 | 0 | } |
646 | 0 | for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) { |
647 | 0 | // Notify MediaStreamTrackListeners. |
648 | 0 | if (b.mTrackID != AUDIO_TRACK) { |
649 | 0 | continue; |
650 | 0 | } |
651 | 0 | b.mListener->NotifyQueuedChanges(Graph(), segment->GetDuration(), tmpSegment); |
652 | 0 | } |
653 | 0 |
|
654 | 0 | if (mLastChunks[0].IsNull()) { |
655 | 0 | segment->AppendNullData(tmpSegment.GetDuration()); |
656 | 0 | } else { |
657 | 0 | segment->AppendFrom(&tmpSegment); |
658 | 0 | } |
659 | 0 | } |
660 | | |
661 | | void |
662 | | AudioNodeStream::FinishOutput() |
663 | 0 | { |
664 | 0 | StreamTracks::Track* track = EnsureTrack(AUDIO_TRACK); |
665 | 0 | track->SetEnded(); |
666 | 0 |
|
667 | 0 | for (uint32_t j = 0; j < mListeners.Length(); ++j) { |
668 | 0 | MediaStreamListener* l = mListeners[j]; |
669 | 0 | AudioSegment emptySegment; |
670 | 0 | l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK, |
671 | 0 | track->GetSegment()->GetDuration(), |
672 | 0 | TrackEventCommand::TRACK_EVENT_ENDED, emptySegment); |
673 | 0 | } |
674 | 0 | for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) { |
675 | 0 | // Notify MediaStreamTrackListeners. |
676 | 0 | if (b.mTrackID != AUDIO_TRACK) { |
677 | 0 | continue; |
678 | 0 | } |
679 | 0 | b.mListener->NotifyEnded(); |
680 | 0 | } |
681 | 0 | } |
682 | | |
683 | | void |
684 | | AudioNodeStream::AddInput(MediaInputPort* aPort) |
685 | 0 | { |
686 | 0 | ProcessedMediaStream::AddInput(aPort); |
687 | 0 | AudioNodeStream* ns = aPort->GetSource()->AsAudioNodeStream(); |
688 | 0 | // Streams that are not AudioNodeStreams are considered active. |
689 | 0 | if (!ns || (ns->mIsActive && !ns->IsAudioParamStream())) { |
690 | 0 | IncrementActiveInputCount(); |
691 | 0 | } |
692 | 0 | } |
693 | | void |
694 | | AudioNodeStream::RemoveInput(MediaInputPort* aPort) |
695 | 0 | { |
696 | 0 | ProcessedMediaStream::RemoveInput(aPort); |
697 | 0 | AudioNodeStream* ns = aPort->GetSource()->AsAudioNodeStream(); |
698 | 0 | // Streams that are not AudioNodeStreams are considered active. |
699 | 0 | if (!ns || (ns->mIsActive && !ns->IsAudioParamStream())) { |
700 | 0 | DecrementActiveInputCount(); |
701 | 0 | } |
702 | 0 | } |
703 | | |
704 | | void |
705 | | AudioNodeStream::SetActive() |
706 | 0 | { |
707 | 0 | if (mIsActive || mMarkAsFinishedAfterThisBlock) { |
708 | 0 | return; |
709 | 0 | } |
710 | 0 | |
711 | 0 | mIsActive = true; |
712 | 0 | if (!(mFlags & EXTERNAL_OUTPUT)) { |
713 | 0 | GraphImpl()->DecrementSuspendCount(this); |
714 | 0 | } |
715 | 0 | if (IsAudioParamStream()) { |
716 | 0 | // Consumers merely influence stream order. |
717 | 0 | // They do not read from the stream. |
718 | 0 | return; |
719 | 0 | } |
720 | 0 | |
721 | 0 | for (const auto& consumer : mConsumers) { |
722 | 0 | AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream(); |
723 | 0 | if (ns) { |
724 | 0 | ns->IncrementActiveInputCount(); |
725 | 0 | } |
726 | 0 | } |
727 | 0 | } |
728 | | |
729 | | class AudioNodeStream::CheckForInactiveMessage final : public ControlMessage |
730 | | { |
731 | | public: |
732 | | explicit CheckForInactiveMessage(AudioNodeStream* aStream) : |
733 | 0 | ControlMessage(aStream) {} |
734 | | void Run() override |
735 | 0 | { |
736 | 0 | auto ns = static_cast<AudioNodeStream*>(mStream); |
737 | 0 | ns->CheckForInactive(); |
738 | 0 | } |
739 | | }; |
740 | | |
741 | | void |
742 | | AudioNodeStream::ScheduleCheckForInactive() |
743 | 0 | { |
744 | 0 | if (mActiveInputCount > 0 && !mMarkAsFinishedAfterThisBlock) { |
745 | 0 | return; |
746 | 0 | } |
747 | 0 | |
748 | 0 | auto message = MakeUnique<CheckForInactiveMessage>(this); |
749 | 0 | GraphImpl()->RunMessageAfterProcessing(std::move(message)); |
750 | 0 | } |
751 | | |
752 | | void |
753 | | AudioNodeStream::CheckForInactive() |
754 | 0 | { |
755 | 0 | if (((mActiveInputCount > 0 || mEngine->IsActive()) && |
756 | 0 | !mMarkAsFinishedAfterThisBlock) || |
757 | 0 | !mIsActive) { |
758 | 0 | return; |
759 | 0 | } |
760 | 0 | |
761 | 0 | mIsActive = false; |
762 | 0 | mInputChunks.Clear(); // not required for foreseeable future |
763 | 0 | for (auto& chunk : mLastChunks) { |
764 | 0 | chunk.SetNull(WEBAUDIO_BLOCK_SIZE); |
765 | 0 | } |
766 | 0 | if (!(mFlags & EXTERNAL_OUTPUT)) { |
767 | 0 | GraphImpl()->IncrementSuspendCount(this); |
768 | 0 | } |
769 | 0 | if (IsAudioParamStream()) { |
770 | 0 | return; |
771 | 0 | } |
772 | 0 | |
773 | 0 | for (const auto& consumer : mConsumers) { |
774 | 0 | AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream(); |
775 | 0 | if (ns) { |
776 | 0 | ns->DecrementActiveInputCount(); |
777 | 0 | } |
778 | 0 | } |
779 | 0 | } |
780 | | |
781 | | void |
782 | | AudioNodeStream::IncrementActiveInputCount() |
783 | 0 | { |
784 | 0 | ++mActiveInputCount; |
785 | 0 | SetActive(); |
786 | 0 | } |
787 | | |
788 | | void |
789 | | AudioNodeStream::DecrementActiveInputCount() |
790 | 0 | { |
791 | 0 | MOZ_ASSERT(mActiveInputCount > 0); |
792 | 0 | --mActiveInputCount; |
793 | 0 | CheckForInactive(); |
794 | 0 | } |
795 | | |
796 | | } // namespace mozilla |