/src/mozilla-central/dom/media/AudioCaptureStream.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
4 | | * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | #include "MediaStreamGraphImpl.h" |
7 | | #include "MediaStreamListener.h" |
8 | | #include "mozilla/MathAlgorithms.h" |
9 | | #include "mozilla/Unused.h" |
10 | | |
11 | | #include "AudioSegment.h" |
12 | | #include "mozilla/Logging.h" |
13 | | #include "mozilla/Attributes.h" |
14 | | #include "AudioCaptureStream.h" |
15 | | #include "ImageContainer.h" |
16 | | #include "AudioNodeEngine.h" |
17 | | #include "AudioNodeStream.h" |
18 | | #include "AudioNodeExternalInputStream.h" |
19 | | #include "webaudio/MediaStreamAudioDestinationNode.h" |
20 | | #include <algorithm> |
21 | | #include "DOMMediaStream.h" |
22 | | |
23 | | using namespace mozilla::layers; |
24 | | using namespace mozilla::dom; |
25 | | using namespace mozilla::gfx; |
26 | | |
27 | | namespace mozilla |
28 | | { |
29 | | |
30 | | // We are mixing to mono until PeerConnection can accept stereo |
31 | | static const uint32_t MONO = 1; |
32 | | |
33 | | AudioCaptureStream::AudioCaptureStream(TrackID aTrackId) |
34 | | : ProcessedMediaStream() |
35 | | , mTrackId(aTrackId) |
36 | | , mStarted(false) |
37 | | , mTrackCreated(false) |
38 | 0 | { |
39 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
40 | 0 | MOZ_COUNT_CTOR(AudioCaptureStream); |
41 | 0 | mMixer.AddCallback(this); |
42 | 0 | } |
43 | | |
44 | | AudioCaptureStream::~AudioCaptureStream() |
45 | 0 | { |
46 | 0 | MOZ_COUNT_DTOR(AudioCaptureStream); |
47 | 0 | mMixer.RemoveCallback(this); |
48 | 0 | } |
49 | | |
50 | | void |
51 | | AudioCaptureStream::Start() |
52 | 0 | { |
53 | 0 | class Message : public ControlMessage { |
54 | 0 | public: |
55 | 0 | explicit Message(AudioCaptureStream* aStream) |
56 | 0 | : ControlMessage(aStream), mStream(aStream) {} |
57 | 0 |
|
58 | 0 | virtual void Run() |
59 | 0 | { |
60 | 0 | mStream->mStarted = true; |
61 | 0 | } |
62 | 0 |
|
63 | 0 | protected: |
64 | 0 | AudioCaptureStream* mStream; |
65 | 0 | }; |
66 | 0 | GraphImpl()->AppendMessage(MakeUnique<Message>(this)); |
67 | 0 | } |
68 | | |
69 | | void |
70 | | AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo, |
71 | | uint32_t aFlags) |
72 | 0 | { |
73 | 0 | if (!mStarted) { |
74 | 0 | return; |
75 | 0 | } |
76 | 0 | |
77 | 0 | uint32_t inputCount = mInputs.Length(); |
78 | 0 | StreamTracks::Track* track = EnsureTrack(mTrackId); |
79 | 0 | // Notify the DOM everything is in order. |
80 | 0 | if (!mTrackCreated) { |
81 | 0 | for (uint32_t i = 0; i < mListeners.Length(); i++) { |
82 | 0 | MediaStreamListener* l = mListeners[i]; |
83 | 0 | AudioSegment tmp; |
84 | 0 | l->NotifyQueuedTrackChanges( |
85 | 0 | Graph(), mTrackId, 0, TrackEventCommand::TRACK_EVENT_CREATED, tmp); |
86 | 0 | l->NotifyFinishedTrackCreation(Graph()); |
87 | 0 | } |
88 | 0 | mTrackCreated = true; |
89 | 0 | } |
90 | 0 |
|
91 | 0 | if (IsFinishedOnGraphThread()) { |
92 | 0 | return; |
93 | 0 | } |
94 | 0 | |
95 | 0 | // If the captured stream is connected back to a object on the page (be it an |
96 | 0 | // HTMLMediaElement with a stream as source, or an AudioContext), a cycle |
97 | 0 | // situation occur. This can work if it's an AudioContext with at least one |
98 | 0 | // DelayNode, but the MSG will mute the whole cycle otherwise. |
99 | 0 | if (InMutedCycle() || inputCount == 0) { |
100 | 0 | track->Get<AudioSegment>()->AppendNullData(aTo - aFrom); |
101 | 0 | } else { |
102 | 0 | // We mix down all the tracks of all inputs, to a stereo track. Everything |
103 | 0 | // is {up,down}-mixed to stereo. |
104 | 0 | mMixer.StartMixing(); |
105 | 0 | AudioSegment output; |
106 | 0 | for (uint32_t i = 0; i < inputCount; i++) { |
107 | 0 | MediaStream* s = mInputs[i]->GetSource(); |
108 | 0 | StreamTracks::TrackIter track(s->GetStreamTracks(), MediaSegment::AUDIO); |
109 | 0 | if (track.IsEnded()) { |
110 | 0 | // No tracks for this input. Still we append data to trigger the mixer. |
111 | 0 | AudioSegment toMix; |
112 | 0 | toMix.AppendNullData(aTo - aFrom); |
113 | 0 | toMix.Mix(mMixer, MONO, Graph()->GraphRate()); |
114 | 0 | } |
115 | 0 | for (; !track.IsEnded(); track.Next()) { |
116 | 0 | AudioSegment* inputSegment = track->Get<AudioSegment>(); |
117 | 0 | StreamTime inputStart = s->GraphTimeToStreamTimeWithBlocking(aFrom); |
118 | 0 | StreamTime inputEnd = s->GraphTimeToStreamTimeWithBlocking(aTo); |
119 | 0 | AudioSegment toMix; |
120 | 0 | if (track->IsEnded() && inputSegment->GetDuration() <= inputStart) { |
121 | 0 | toMix.AppendNullData(aTo - aFrom); |
122 | 0 | } else { |
123 | 0 | toMix.AppendSlice(*inputSegment, inputStart, inputEnd); |
124 | 0 | // Care for streams blocked in the [aTo, aFrom] range. |
125 | 0 | if (inputEnd - inputStart < aTo - aFrom) { |
126 | 0 | toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart)); |
127 | 0 | } |
128 | 0 | } |
129 | 0 | toMix.Mix(mMixer, MONO, Graph()->GraphRate()); |
130 | 0 | } |
131 | 0 | } |
132 | 0 | // This calls MixerCallback below |
133 | 0 | mMixer.FinishMixing(); |
134 | 0 | } |
135 | 0 |
|
136 | 0 | // Regardless of the status of the input tracks, we go foward. |
137 | 0 | mTracks.AdvanceKnownTracksTime(GraphTimeToStreamTimeWithBlocking((aTo))); |
138 | 0 | } |
139 | | |
140 | | void |
141 | | AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer, |
142 | | AudioSampleFormat aFormat, uint32_t aChannels, |
143 | | uint32_t aFrames, uint32_t aSampleRate) |
144 | 0 | { |
145 | 0 | AutoTArray<nsTArray<AudioDataValue>, MONO> output; |
146 | 0 | AutoTArray<const AudioDataValue*, MONO> bufferPtrs; |
147 | 0 | output.SetLength(MONO); |
148 | 0 | bufferPtrs.SetLength(MONO); |
149 | 0 |
|
150 | 0 | uint32_t written = 0; |
151 | 0 | // We need to copy here, because the mixer will reuse the storage, we should |
152 | 0 | // not hold onto it. Buffers are in planar format. |
153 | 0 | for (uint32_t channel = 0; channel < aChannels; channel++) { |
154 | 0 | AudioDataValue* out = output[channel].AppendElements(aFrames); |
155 | 0 | PodCopy(out, aMixedBuffer + written, aFrames); |
156 | 0 | bufferPtrs[channel] = out; |
157 | 0 | written += aFrames; |
158 | 0 | } |
159 | 0 | AudioChunk chunk; |
160 | 0 | chunk.mBuffer = new mozilla::SharedChannelArrayBuffer<AudioDataValue>(&output); |
161 | 0 | chunk.mDuration = aFrames; |
162 | 0 | chunk.mBufferFormat = aFormat; |
163 | 0 | chunk.mChannelData.SetLength(MONO); |
164 | 0 | for (uint32_t channel = 0; channel < aChannels; channel++) { |
165 | 0 | chunk.mChannelData[channel] = bufferPtrs[channel]; |
166 | 0 | } |
167 | 0 |
|
168 | 0 | // Now we have mixed data, simply append it to out track. |
169 | 0 | EnsureTrack(mTrackId)->Get<AudioSegment>()->AppendAndConsumeChunk(&chunk); |
170 | 0 | } |
171 | | } |