/src/mozilla-central/dom/media/mediasink/AudioSink.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "nsPrintfCString.h" |
8 | | #include "MediaQueue.h" |
9 | | #include "AudioSink.h" |
10 | | #include "VideoUtils.h" |
11 | | #include "AudioConverter.h" |
12 | | |
13 | | #include "mozilla/CheckedInt.h" |
14 | | #include "mozilla/DebugOnly.h" |
15 | | #include "mozilla/IntegerPrintfMacros.h" |
16 | | #include "mozilla/StaticPrefs.h" |
17 | | |
18 | | namespace mozilla { |
19 | | |
20 | | extern LazyLogModule gMediaDecoderLog; |
21 | | #define SINK_LOG(msg, ...) \ |
22 | 0 | MOZ_LOG(gMediaDecoderLog, LogLevel::Debug, ("AudioSink=%p " msg, this, ##__VA_ARGS__)) |
23 | | #define SINK_LOG_V(msg, ...) \ |
24 | 0 | MOZ_LOG(gMediaDecoderLog, LogLevel::Verbose, ("AudioSink=%p " msg, this, ##__VA_ARGS__)) |
25 | | |
26 | | namespace media { |
27 | | |
28 | | // The amount of audio frames that is used to fuzz rounding errors. |
29 | | static const int64_t AUDIO_FUZZ_FRAMES = 1; |
30 | | |
31 | | // Amount of audio frames we will be processing ahead of use |
32 | | static const int32_t LOW_AUDIO_USECS = 300000; |
33 | | |
34 | | AudioSink::AudioSink(AbstractThread* aThread, |
35 | | MediaQueue<AudioData>& aAudioQueue, |
36 | | const TimeUnit& aStartTime, |
37 | | const AudioInfo& aInfo) |
38 | | : mStartTime(aStartTime) |
39 | | , mInfo(aInfo) |
40 | | , mPlaying(true) |
41 | | , mMonitor("AudioSink") |
42 | | , mWritten(0) |
43 | | , mErrored(false) |
44 | | , mPlaybackComplete(false) |
45 | | , mOwnerThread(aThread) |
46 | | , mProcessedQueueLength(0) |
47 | | , mFramesParsed(0) |
48 | | , mIsAudioDataAudible(false) |
49 | | , mAudioQueue(aAudioQueue) |
50 | 0 | { |
51 | 0 | bool resampling = StaticPrefs::MediaResamplingEnabled(); |
52 | 0 |
|
53 | 0 | if (resampling) { |
54 | 0 | mOutputRate = 48000; |
55 | 0 | } else if (mInfo.mRate == 44100 || mInfo.mRate == 48000) { |
56 | 0 | // The original rate is of good quality and we want to minimize unecessary |
57 | 0 | // resampling. The common scenario being that the sampling rate is one or |
58 | 0 | // the other, this allows to minimize audio quality regression and hoping |
59 | 0 | // content provider want change from those rates mid-stream. |
60 | 0 | mOutputRate = mInfo.mRate; |
61 | 0 | } else { |
62 | 0 | // We will resample all data to match cubeb's preferred sampling rate. |
63 | 0 | mOutputRate = AudioStream::GetPreferredRate(); |
64 | 0 | } |
65 | 0 | MOZ_DIAGNOSTIC_ASSERT(mOutputRate, "output rate can't be 0."); |
66 | 0 |
|
67 | 0 | mOutputChannels = DecideAudioPlaybackChannels(mInfo); |
68 | 0 | } |
69 | | |
70 | | AudioSink::~AudioSink() |
71 | 0 | { |
72 | 0 | } |
73 | | |
74 | | RefPtr<GenericPromise> |
75 | | AudioSink::Init(const PlaybackParams& aParams) |
76 | 0 | { |
77 | 0 | MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn()); |
78 | 0 |
|
79 | 0 | mAudioQueueListener = mAudioQueue.PushEvent().Connect( |
80 | 0 | mOwnerThread, this, &AudioSink::OnAudioPushed); |
81 | 0 | mAudioQueueFinishListener = mAudioQueue.FinishEvent().Connect( |
82 | 0 | mOwnerThread, this, &AudioSink::NotifyAudioNeeded); |
83 | 0 | mProcessedQueueListener = mProcessedQueue.PopEvent().Connect( |
84 | 0 | mOwnerThread, this, &AudioSink::OnAudioPopped); |
85 | 0 |
|
86 | 0 | // To ensure at least one audio packet will be popped from AudioQueue and |
87 | 0 | // ready to be played. |
88 | 0 | NotifyAudioNeeded(); |
89 | 0 | RefPtr<GenericPromise> p = mEndPromise.Ensure(__func__); |
90 | 0 | nsresult rv = InitializeAudioStream(aParams); |
91 | 0 | if (NS_FAILED(rv)) { |
92 | 0 | mEndPromise.Reject(rv, __func__); |
93 | 0 | } |
94 | 0 | return p; |
95 | 0 | } |
96 | | |
97 | | TimeUnit |
98 | | AudioSink::GetPosition() |
99 | 0 | { |
100 | 0 | int64_t tmp; |
101 | 0 | if (mAudioStream && |
102 | 0 | (tmp = mAudioStream->GetPosition()) >= 0) { |
103 | 0 | TimeUnit pos = TimeUnit::FromMicroseconds(tmp); |
104 | 0 | NS_ASSERTION(pos >= mLastGoodPosition, |
105 | 0 | "AudioStream position shouldn't go backward"); |
106 | 0 | // Update the last good position when we got a good one. |
107 | 0 | if (pos >= mLastGoodPosition) { |
108 | 0 | mLastGoodPosition = pos; |
109 | 0 | } |
110 | 0 | } |
111 | 0 |
|
112 | 0 | return mStartTime + mLastGoodPosition; |
113 | 0 | } |
114 | | |
115 | | bool |
116 | | AudioSink::HasUnplayedFrames() |
117 | 0 | { |
118 | 0 | // Experimentation suggests that GetPositionInFrames() is zero-indexed, |
119 | 0 | // so we need to add 1 here before comparing it to mWritten. |
120 | 0 | int64_t total; |
121 | 0 | { |
122 | 0 | MonitorAutoLock mon(mMonitor); |
123 | 0 | total = mWritten + (mCursor.get() ? mCursor->Available() : 0); |
124 | 0 | } |
125 | 0 | return mProcessedQueue.GetSize() || |
126 | 0 | (mAudioStream && mAudioStream->GetPositionInFrames() + 1 < total); |
127 | 0 | } |
128 | | |
129 | | void |
130 | | AudioSink::Shutdown() |
131 | 0 | { |
132 | 0 | MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn()); |
133 | 0 |
|
134 | 0 | mAudioQueueListener.Disconnect(); |
135 | 0 | mAudioQueueFinishListener.Disconnect(); |
136 | 0 | mProcessedQueueListener.Disconnect(); |
137 | 0 |
|
138 | 0 | if (mAudioStream) { |
139 | 0 | mAudioStream->Shutdown(); |
140 | 0 | mAudioStream = nullptr; |
141 | 0 | } |
142 | 0 | mProcessedQueue.Reset(); |
143 | 0 | mProcessedQueue.Finish(); |
144 | 0 | mEndPromise.ResolveIfExists(true, __func__); |
145 | 0 | } |
146 | | |
147 | | void |
148 | | AudioSink::SetVolume(double aVolume) |
149 | 0 | { |
150 | 0 | if (mAudioStream) { |
151 | 0 | mAudioStream->SetVolume(aVolume); |
152 | 0 | } |
153 | 0 | } |
154 | | |
155 | | void |
156 | | AudioSink::SetPlaybackRate(double aPlaybackRate) |
157 | 0 | { |
158 | 0 | MOZ_ASSERT(aPlaybackRate != 0, "Don't set the playbackRate to 0 on AudioStream"); |
159 | 0 | if (mAudioStream) { |
160 | 0 | mAudioStream->SetPlaybackRate(aPlaybackRate); |
161 | 0 | } |
162 | 0 | } |
163 | | |
164 | | void |
165 | | AudioSink::SetPreservesPitch(bool aPreservesPitch) |
166 | 0 | { |
167 | 0 | if (mAudioStream) { |
168 | 0 | mAudioStream->SetPreservesPitch(aPreservesPitch); |
169 | 0 | } |
170 | 0 | } |
171 | | |
172 | | void |
173 | | AudioSink::SetPlaying(bool aPlaying) |
174 | 0 | { |
175 | 0 | if (!mAudioStream || mPlaying == aPlaying || mPlaybackComplete) { |
176 | 0 | return; |
177 | 0 | } |
178 | 0 | // pause/resume AudioStream as necessary. |
179 | 0 | if (!aPlaying) { |
180 | 0 | mAudioStream->Pause(); |
181 | 0 | } else if (aPlaying) { |
182 | 0 | mAudioStream->Resume(); |
183 | 0 | } |
184 | 0 | mPlaying = aPlaying; |
185 | 0 | } |
186 | | |
187 | | nsresult |
188 | | AudioSink::InitializeAudioStream(const PlaybackParams& aParams) |
189 | 0 | { |
190 | 0 | mAudioStream = new AudioStream(*this); |
191 | 0 | // When AudioQueue is empty, there is no way to know the channel layout of |
192 | 0 | // the coming audio data, so we use the predefined channel map instead. |
193 | 0 | AudioConfig::ChannelLayout::ChannelMap channelMap = |
194 | 0 | mConverter ? mConverter->OutputConfig().Layout().Map() |
195 | 0 | : AudioConfig::ChannelLayout(mOutputChannels).Map(); |
196 | 0 | // The layout map used here is already processed by mConverter with |
197 | 0 | // mOutputChannels into SMPTE format, so there is no need to worry if |
198 | 0 | // StaticPrefs::accessibility_monoaudio_enable() or |
199 | 0 | // StaticPrefs::MediaForcestereoEnabled() is applied. |
200 | 0 | nsresult rv = mAudioStream->Init(mOutputChannels, channelMap, mOutputRate); |
201 | 0 | if (NS_FAILED(rv)) { |
202 | 0 | mAudioStream->Shutdown(); |
203 | 0 | mAudioStream = nullptr; |
204 | 0 | return rv; |
205 | 0 | } |
206 | 0 | |
207 | 0 | // Set playback params before calling Start() so they can take effect |
208 | 0 | // as soon as the 1st DataCallback of the AudioStream fires. |
209 | 0 | mAudioStream->SetVolume(aParams.mVolume); |
210 | 0 | mAudioStream->SetPlaybackRate(aParams.mPlaybackRate); |
211 | 0 | mAudioStream->SetPreservesPitch(aParams.mPreservesPitch); |
212 | 0 | mAudioStream->Start(); |
213 | 0 |
|
214 | 0 | return NS_OK; |
215 | 0 | } |
216 | | |
217 | | TimeUnit |
218 | | AudioSink::GetEndTime() const |
219 | 0 | { |
220 | 0 | int64_t written; |
221 | 0 | { |
222 | 0 | MonitorAutoLock mon(mMonitor); |
223 | 0 | written = mWritten; |
224 | 0 | } |
225 | 0 | TimeUnit played = FramesToTimeUnit(written, mOutputRate) + mStartTime; |
226 | 0 | if (!played.IsValid()) { |
227 | 0 | NS_WARNING("Int overflow calculating audio end time"); |
228 | 0 | return TimeUnit::Zero(); |
229 | 0 | } |
230 | 0 | // As we may be resampling, rounding errors may occur. Ensure we never get |
231 | 0 | // past the original end time. |
232 | 0 | return std::min(mLastEndTime, played); |
233 | 0 | } |
234 | | |
235 | | UniquePtr<AudioStream::Chunk> |
236 | | AudioSink::PopFrames(uint32_t aFrames) |
237 | 0 | { |
238 | 0 | class Chunk : public AudioStream::Chunk { |
239 | 0 | public: |
240 | 0 | Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData) |
241 | 0 | : mBuffer(aBuffer), mFrames(aFrames), mData(aData) {} |
242 | 0 | Chunk() : mFrames(0), mData(nullptr) {} |
243 | 0 | const AudioDataValue* Data() const override { return mData; } |
244 | 0 | uint32_t Frames() const override { return mFrames; } |
245 | 0 | uint32_t Channels() const override { return mBuffer ? mBuffer->mChannels: 0; } |
246 | 0 | uint32_t Rate() const override { return mBuffer ? mBuffer->mRate : 0; } |
247 | 0 | AudioDataValue* GetWritable() const override { return mData; } |
248 | 0 | private: |
249 | 0 | const RefPtr<AudioData> mBuffer; |
250 | 0 | const uint32_t mFrames; |
251 | 0 | AudioDataValue* const mData; |
252 | 0 | }; |
253 | 0 |
|
254 | 0 | bool needPopping = false; |
255 | 0 | if (!mCurrentData) { |
256 | 0 | // No data in the queue. Return an empty chunk. |
257 | 0 | if (!mProcessedQueue.GetSize()) { |
258 | 0 | return MakeUnique<Chunk>(); |
259 | 0 | } |
260 | 0 | |
261 | 0 | // We need to update our values prior popping the processed queue in |
262 | 0 | // order to prevent the pop event to fire too early (prior |
263 | 0 | // mProcessedQueueLength being updated) or prevent HasUnplayedFrames |
264 | 0 | // to incorrectly return true during the time interval betweeen the |
265 | 0 | // when mProcessedQueue is read and mWritten is updated. |
266 | 0 | needPopping = true; |
267 | 0 | mCurrentData = mProcessedQueue.PeekFront(); |
268 | 0 | { |
269 | 0 | MonitorAutoLock mon(mMonitor); |
270 | 0 | mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(), |
271 | 0 | mCurrentData->mChannels, |
272 | 0 | mCurrentData->mFrames); |
273 | 0 | } |
274 | 0 | MOZ_ASSERT(mCurrentData->mFrames > 0); |
275 | 0 | mProcessedQueueLength -= |
276 | 0 | FramesToUsecs(mCurrentData->mFrames, mOutputRate).value(); |
277 | 0 | } |
278 | 0 |
|
279 | 0 | auto framesToPop = std::min(aFrames, mCursor->Available()); |
280 | 0 |
|
281 | 0 | SINK_LOG_V("playing audio at time=%" PRId64 " offset=%u length=%u", |
282 | 0 | mCurrentData->mTime.ToMicroseconds(), |
283 | 0 | mCurrentData->mFrames - mCursor->Available(), framesToPop); |
284 | 0 |
|
285 | 0 | UniquePtr<AudioStream::Chunk> chunk = |
286 | 0 | MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr()); |
287 | 0 |
|
288 | 0 | { |
289 | 0 | MonitorAutoLock mon(mMonitor); |
290 | 0 | mWritten += framesToPop; |
291 | 0 | mCursor->Advance(framesToPop); |
292 | 0 | } |
293 | 0 |
|
294 | 0 | // All frames are popped. Reset mCurrentData so we can pop new elements from |
295 | 0 | // the audio queue in next calls to PopFrames(). |
296 | 0 | if (!mCursor->Available()) { |
297 | 0 | mCurrentData = nullptr; |
298 | 0 | } |
299 | 0 |
|
300 | 0 | if (needPopping) { |
301 | 0 | // We can now safely pop the audio packet from the processed queue. |
302 | 0 | // This will fire the popped event, triggering a call to NotifyAudioNeeded. |
303 | 0 | RefPtr<AudioData> releaseMe = mProcessedQueue.PopFront(); |
304 | 0 | CheckIsAudible(releaseMe); |
305 | 0 | } |
306 | 0 |
|
307 | 0 | return chunk; |
308 | 0 | } |
309 | | |
310 | | bool |
311 | | AudioSink::Ended() const |
312 | 0 | { |
313 | 0 | // Return true when error encountered so AudioStream can start draining. |
314 | 0 | return mProcessedQueue.IsFinished() || mErrored; |
315 | 0 | } |
316 | | |
317 | | void |
318 | | AudioSink::Drained() |
319 | 0 | { |
320 | 0 | SINK_LOG("Drained"); |
321 | 0 | mPlaybackComplete = true; |
322 | 0 | mEndPromise.ResolveIfExists(true, __func__); |
323 | 0 | } |
324 | | |
325 | | void |
326 | | AudioSink::CheckIsAudible(const AudioData* aData) |
327 | 0 | { |
328 | 0 | MOZ_ASSERT(aData); |
329 | 0 |
|
330 | 0 | bool isAudible = aData->IsAudible(); |
331 | 0 | if (isAudible != mIsAudioDataAudible) { |
332 | 0 | mIsAudioDataAudible = isAudible; |
333 | 0 | mAudibleEvent.Notify(mIsAudioDataAudible); |
334 | 0 | } |
335 | 0 | } |
336 | | |
337 | | void |
338 | | AudioSink::OnAudioPopped(const RefPtr<AudioData>& aSample) |
339 | 0 | { |
340 | 0 | SINK_LOG_V("AudioStream has used an audio packet."); |
341 | 0 | NotifyAudioNeeded(); |
342 | 0 | } |
343 | | |
344 | | void |
345 | | AudioSink::OnAudioPushed(const RefPtr<AudioData>& aSample) |
346 | 0 | { |
347 | 0 | SINK_LOG_V("One new audio packet available."); |
348 | 0 | NotifyAudioNeeded(); |
349 | 0 | } |
350 | | |
351 | | void |
352 | | AudioSink::NotifyAudioNeeded() |
353 | 0 | { |
354 | 0 | MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn(), |
355 | 0 | "Not called from the owner's thread"); |
356 | 0 |
|
357 | 0 | // Always ensure we have two processed frames pending to allow for processing |
358 | 0 | // latency. |
359 | 0 | while (mAudioQueue.GetSize() && (mAudioQueue.IsFinished() || |
360 | 0 | mProcessedQueueLength < LOW_AUDIO_USECS || |
361 | 0 | mProcessedQueue.GetSize() < 2)) { |
362 | 0 | RefPtr<AudioData> data = mAudioQueue.PopFront(); |
363 | 0 |
|
364 | 0 | // Ignore the element with 0 frames and try next. |
365 | 0 | if (!data->mFrames) { |
366 | 0 | continue; |
367 | 0 | } |
368 | 0 | |
369 | 0 | if (!mConverter || |
370 | 0 | (data->mRate != mConverter->InputConfig().Rate() || |
371 | 0 | data->mChannels != mConverter->InputConfig().Channels())) { |
372 | 0 | SINK_LOG_V("Audio format changed from %u@%uHz to %u@%uHz", |
373 | 0 | mConverter? mConverter->InputConfig().Channels() : 0, |
374 | 0 | mConverter ? mConverter->InputConfig().Rate() : 0, |
375 | 0 | data->mChannels, data->mRate); |
376 | 0 |
|
377 | 0 | DrainConverter(); |
378 | 0 |
|
379 | 0 | // mFramesParsed indicates the current playtime in frames at the current |
380 | 0 | // input sampling rate. Recalculate it per the new sampling rate. |
381 | 0 | if (mFramesParsed) { |
382 | 0 | // We minimize overflow. |
383 | 0 | uint32_t oldRate = mConverter->InputConfig().Rate(); |
384 | 0 | uint32_t newRate = data->mRate; |
385 | 0 | CheckedInt64 result = SaferMultDiv(mFramesParsed, newRate, oldRate); |
386 | 0 | if (!result.isValid()) { |
387 | 0 | NS_WARNING("Int overflow in AudioSink"); |
388 | 0 | mErrored = true; |
389 | 0 | return; |
390 | 0 | } |
391 | 0 | mFramesParsed = result.value(); |
392 | 0 | } |
393 | 0 |
|
394 | 0 | const AudioConfig::ChannelLayout inputLayout = |
395 | 0 | data->mChannelMap |
396 | 0 | ? AudioConfig::ChannelLayout::SMPTEDefault(data->mChannelMap) |
397 | 0 | : AudioConfig::ChannelLayout(data->mChannels); |
398 | 0 | const AudioConfig::ChannelLayout outputLayout = |
399 | 0 | mOutputChannels == data->mChannels |
400 | 0 | ? inputLayout |
401 | 0 | : AudioConfig::ChannelLayout(mOutputChannels); |
402 | 0 | mConverter = MakeUnique<AudioConverter>( |
403 | 0 | AudioConfig(inputLayout, data->mChannels, data->mRate), |
404 | 0 | AudioConfig(outputLayout, mOutputChannels, mOutputRate)); |
405 | 0 | } |
406 | 0 |
|
407 | 0 | // See if there's a gap in the audio. If there is, push silence into the |
408 | 0 | // audio hardware, so we can play across the gap. |
409 | 0 | // Calculate the timestamp of the next chunk of audio in numbers of |
410 | 0 | // samples. |
411 | 0 | CheckedInt64 sampleTime = |
412 | 0 | TimeUnitToFrames(data->mTime - mStartTime, data->mRate); |
413 | 0 | // Calculate the number of frames that have been pushed onto the audio hardware. |
414 | 0 | CheckedInt64 missingFrames = sampleTime - mFramesParsed; |
415 | 0 |
|
416 | 0 | if (!missingFrames.isValid()) { |
417 | 0 | NS_WARNING("Int overflow in AudioSink"); |
418 | 0 | mErrored = true; |
419 | 0 | return; |
420 | 0 | } |
421 | 0 |
|
422 | 0 | if (missingFrames.value() > AUDIO_FUZZ_FRAMES) { |
423 | 0 | // The next audio packet begins some time after the end of the last packet |
424 | 0 | // we pushed to the audio hardware. We must push silence into the audio |
425 | 0 | // hardware so that the next audio packet begins playback at the correct |
426 | 0 | // time. |
427 | 0 | missingFrames = std::min<int64_t>(INT32_MAX, missingFrames.value()); |
428 | 0 | mFramesParsed += missingFrames.value(); |
429 | 0 |
|
430 | 0 | RefPtr<AudioData> silenceData; |
431 | 0 | AlignedAudioBuffer silenceBuffer(missingFrames.value() * data->mChannels); |
432 | 0 | if (!silenceBuffer) { |
433 | 0 | NS_WARNING("OOM in AudioSink"); |
434 | 0 | mErrored = true; |
435 | 0 | return; |
436 | 0 | } |
437 | 0 | if (mConverter->InputConfig() != mConverter->OutputConfig()) { |
438 | 0 | AlignedAudioBuffer convertedData = |
439 | 0 | mConverter->Process(AudioSampleBuffer(std::move(silenceBuffer))).Forget(); |
440 | 0 | silenceData = CreateAudioFromBuffer(std::move(convertedData), data); |
441 | 0 | } else { |
442 | 0 | silenceData = CreateAudioFromBuffer(std::move(silenceBuffer), data); |
443 | 0 | } |
444 | 0 | PushProcessedAudio(silenceData); |
445 | 0 | } |
446 | 0 |
|
447 | 0 | mLastEndTime = data->GetEndTime(); |
448 | 0 | mFramesParsed += data->mFrames; |
449 | 0 |
|
450 | 0 | if (mConverter->InputConfig() != mConverter->OutputConfig()) { |
451 | 0 | // We must ensure that the size in the buffer contains exactly the number |
452 | 0 | // of frames, in case one of the audio producer over allocated the buffer. |
453 | 0 | AlignedAudioBuffer buffer(std::move(data->mAudioData)); |
454 | 0 | buffer.SetLength(size_t(data->mFrames) * data->mChannels); |
455 | 0 |
|
456 | 0 | AlignedAudioBuffer convertedData = |
457 | 0 | mConverter->Process(AudioSampleBuffer(std::move(buffer))).Forget(); |
458 | 0 | data = CreateAudioFromBuffer(std::move(convertedData), data); |
459 | 0 | } |
460 | 0 | if (PushProcessedAudio(data)) { |
461 | 0 | mLastProcessedPacket = Some(data); |
462 | 0 | } |
463 | 0 | } |
464 | 0 |
|
465 | 0 | if (mAudioQueue.IsFinished()) { |
466 | 0 | // We have reached the end of the data, drain the resampler. |
467 | 0 | DrainConverter(); |
468 | 0 | mProcessedQueue.Finish(); |
469 | 0 | } |
470 | 0 | } |
471 | | |
472 | | uint32_t |
473 | | AudioSink::PushProcessedAudio(AudioData* aData) |
474 | 0 | { |
475 | 0 | if (!aData || !aData->mFrames) { |
476 | 0 | return 0; |
477 | 0 | } |
478 | 0 | mProcessedQueue.Push(aData); |
479 | 0 | mProcessedQueueLength += FramesToUsecs(aData->mFrames, mOutputRate).value(); |
480 | 0 | return aData->mFrames; |
481 | 0 | } |
482 | | |
483 | | already_AddRefed<AudioData> |
484 | | AudioSink::CreateAudioFromBuffer(AlignedAudioBuffer&& aBuffer, |
485 | | AudioData* aReference) |
486 | 0 | { |
487 | 0 | uint32_t frames = aBuffer.Length() / mOutputChannels; |
488 | 0 | if (!frames) { |
489 | 0 | return nullptr; |
490 | 0 | } |
491 | 0 | auto duration = FramesToTimeUnit(frames, mOutputRate); |
492 | 0 | if (!duration.IsValid()) { |
493 | 0 | NS_WARNING("Int overflow in AudioSink"); |
494 | 0 | mErrored = true; |
495 | 0 | return nullptr; |
496 | 0 | } |
497 | 0 | RefPtr<AudioData> data = |
498 | 0 | new AudioData(aReference->mOffset, |
499 | 0 | aReference->mTime, |
500 | 0 | duration, |
501 | 0 | frames, |
502 | 0 | std::move(aBuffer), |
503 | 0 | mOutputChannels, |
504 | 0 | mOutputRate); |
505 | 0 | return data.forget(); |
506 | 0 | } |
507 | | |
508 | | uint32_t |
509 | | AudioSink::DrainConverter(uint32_t aMaxFrames) |
510 | 0 | { |
511 | 0 | MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn()); |
512 | 0 |
|
513 | 0 | if (!mConverter || !mLastProcessedPacket || !aMaxFrames) { |
514 | 0 | // nothing to drain. |
515 | 0 | return 0; |
516 | 0 | } |
517 | 0 | |
518 | 0 | RefPtr<AudioData> lastPacket = mLastProcessedPacket.ref(); |
519 | 0 | mLastProcessedPacket.reset(); |
520 | 0 |
|
521 | 0 | // To drain we simply provide an empty packet to the audio converter. |
522 | 0 | AlignedAudioBuffer convertedData = |
523 | 0 | mConverter->Process(AudioSampleBuffer(AlignedAudioBuffer())).Forget(); |
524 | 0 |
|
525 | 0 | uint32_t frames = convertedData.Length() / mOutputChannels; |
526 | 0 | if (!convertedData.SetLength(std::min(frames, aMaxFrames) * mOutputChannels)) { |
527 | 0 | // This can never happen as we were reducing the length of convertData. |
528 | 0 | mErrored = true; |
529 | 0 | return 0; |
530 | 0 | } |
531 | 0 | |
532 | 0 | RefPtr<AudioData> data = |
533 | 0 | CreateAudioFromBuffer(std::move(convertedData), lastPacket); |
534 | 0 | if (!data) { |
535 | 0 | return 0; |
536 | 0 | } |
537 | 0 | mProcessedQueue.Push(data); |
538 | 0 | return data->mFrames; |
539 | 0 | } |
540 | | |
541 | | nsCString |
542 | | AudioSink::GetDebugInfo() |
543 | 0 | { |
544 | 0 | MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn()); |
545 | 0 | return nsPrintfCString("AudioSink: StartTime=%" PRId64 |
546 | 0 | " LastGoodPosition=%" PRId64 |
547 | 0 | " Playing=%d OutputRate=%u Written=%" PRId64 |
548 | 0 | " Errored=%d PlaybackComplete=%d", |
549 | 0 | mStartTime.ToMicroseconds(), |
550 | 0 | mLastGoodPosition.ToMicroseconds(), |
551 | 0 | mPlaying, |
552 | 0 | mOutputRate, |
553 | 0 | mWritten, |
554 | 0 | bool(mErrored), |
555 | 0 | bool(mPlaybackComplete)); |
556 | 0 | } |
557 | | |
558 | | } // namespace media |
559 | | } // namespace mozilla |