/src/mozilla-central/dom/media/mediasink/DecodedStream.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "mozilla/AbstractThread.h" |
8 | | #include "mozilla/CheckedInt.h" |
9 | | #include "mozilla/gfx/Point.h" |
10 | | #include "mozilla/SyncRunnable.h" |
11 | | |
12 | | #include "AudioSegment.h" |
13 | | #include "DecodedStream.h" |
14 | | #include "MediaData.h" |
15 | | #include "MediaQueue.h" |
16 | | #include "MediaStreamGraph.h" |
17 | | #include "MediaStreamListener.h" |
18 | | #include "OutputStreamManager.h" |
19 | | #include "SharedBuffer.h" |
20 | | #include "VideoSegment.h" |
21 | | #include "VideoUtils.h" |
22 | | |
23 | | namespace mozilla { |
24 | | |
25 | | using media::TimeUnit; |
26 | | |
27 | | /* |
28 | | * A container class to make it easier to pass the playback info all the |
29 | | * way to DecodedStreamGraphListener from DecodedStream. |
30 | | */ |
31 | | struct PlaybackInfoInit { |
32 | | TimeUnit mStartTime; |
33 | | MediaInfo mInfo; |
34 | | }; |
35 | | |
36 | | class DecodedStreamGraphListener : public MediaStreamListener { |
37 | | public: |
38 | | DecodedStreamGraphListener(MediaStream* aStream, |
39 | | MozPromiseHolder<GenericPromise>&& aPromise, |
40 | | AbstractThread* aMainThread) |
41 | | : mMutex("DecodedStreamGraphListener::mMutex") |
42 | | , mStream(aStream) |
43 | | , mAbstractMainThread(aMainThread) |
44 | 0 | { |
45 | 0 | mFinishPromise = std::move(aPromise); |
46 | 0 | } |
47 | | |
48 | | void NotifyOutput(MediaStreamGraph* aGraph, GraphTime aCurrentTime) override |
49 | 0 | { |
50 | 0 | MutexAutoLock lock(mMutex); |
51 | 0 | if (mStream) { |
52 | 0 | int64_t t = mStream->StreamTimeToMicroseconds( |
53 | 0 | mStream->GraphTimeToStreamTime(aCurrentTime)); |
54 | 0 | mOnOutput.Notify(t); |
55 | 0 | } |
56 | 0 | } |
57 | | |
58 | | void NotifyEvent(MediaStreamGraph* aGraph, MediaStreamGraphEvent event) override |
59 | 0 | { |
60 | 0 | if (event == MediaStreamGraphEvent::EVENT_FINISHED) { |
61 | 0 | aGraph->DispatchToMainThreadAfterStreamStateUpdate( |
62 | 0 | NewRunnableMethod("DecodedStreamGraphListener::DoNotifyFinished", |
63 | 0 | this, |
64 | 0 | &DecodedStreamGraphListener::DoNotifyFinished)); |
65 | 0 | } |
66 | 0 | } |
67 | | |
68 | | void DoNotifyFinished() |
69 | 0 | { |
70 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
71 | 0 | mFinishPromise.ResolveIfExists(true, __func__); |
72 | 0 | } |
73 | | |
74 | | void Forget() |
75 | 0 | { |
76 | 0 | RefPtr<DecodedStreamGraphListener> self = this; |
77 | 0 | mAbstractMainThread->Dispatch( |
78 | 0 | NS_NewRunnableFunction("DecodedStreamGraphListener::Forget", [self]() { |
79 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
80 | 0 | self->mFinishPromise.ResolveIfExists(true, __func__); |
81 | 0 | })); |
82 | 0 | MutexAutoLock lock(mMutex); |
83 | 0 | mStream = nullptr; |
84 | 0 | } |
85 | | |
86 | | MediaEventSource<int64_t>& OnOutput() |
87 | 0 | { |
88 | 0 | return mOnOutput; |
89 | 0 | } |
90 | | |
91 | | private: |
92 | | MediaEventProducer<int64_t> mOnOutput; |
93 | | |
94 | | Mutex mMutex; |
95 | | // Members below are protected by mMutex. |
96 | | RefPtr<MediaStream> mStream; |
97 | | // Main thread only. |
98 | | MozPromiseHolder<GenericPromise> mFinishPromise; |
99 | | |
100 | | const RefPtr<AbstractThread> mAbstractMainThread; |
101 | | }; |
102 | | |
103 | | static void |
104 | | UpdateStreamSuspended(AbstractThread* aMainThread, MediaStream* aStream, bool aBlocking) |
105 | 0 | { |
106 | 0 | if (NS_IsMainThread()) { |
107 | 0 | if (aBlocking) { |
108 | 0 | aStream->Suspend(); |
109 | 0 | } else { |
110 | 0 | aStream->Resume(); |
111 | 0 | } |
112 | 0 | } else { |
113 | 0 | nsCOMPtr<nsIRunnable> r; |
114 | 0 | if (aBlocking) { |
115 | 0 | r = NewRunnableMethod( |
116 | 0 | "MediaStream::Suspend", aStream, &MediaStream::Suspend); |
117 | 0 | } else { |
118 | 0 | r = |
119 | 0 | NewRunnableMethod("MediaStream::Resume", aStream, &MediaStream::Resume); |
120 | 0 | } |
121 | 0 | aMainThread->Dispatch(r.forget()); |
122 | 0 | } |
123 | 0 | } |
124 | | |
125 | | /* |
126 | | * All MediaStream-related data is protected by the decoder's monitor. |
127 | | * We have at most one DecodedStreamDaata per MediaDecoder. Its stream |
128 | | * is used as the input for each ProcessedMediaStream created by calls to |
129 | | * captureStream(UntilEnded). Seeking creates a new source stream, as does |
130 | | * replaying after the input as ended. In the latter case, the new source is |
131 | | * not connected to streams created by captureStreamUntilEnded. |
132 | | */ |
133 | | class DecodedStreamData { |
134 | | public: |
135 | | DecodedStreamData(OutputStreamManager* aOutputStreamManager, |
136 | | PlaybackInfoInit&& aInit, |
137 | | MozPromiseHolder<GenericPromise>&& aPromise, |
138 | | AbstractThread* aMainThread); |
139 | | ~DecodedStreamData(); |
140 | | void SetPlaying(bool aPlaying); |
141 | | MediaEventSource<int64_t>& OnOutput(); |
142 | | void Forget(); |
143 | | nsCString GetDebugInfo(); |
144 | | |
145 | | /* The following group of fields are protected by the decoder's monitor |
146 | | * and can be read or written on any thread. |
147 | | */ |
148 | | // Count of audio frames written to the stream |
149 | | int64_t mAudioFramesWritten; |
150 | | // mNextVideoTime is the end timestamp for the last packet sent to the stream. |
151 | | // Therefore video packets starting at or after this time need to be copied |
152 | | // to the output stream. |
153 | | TimeUnit mNextVideoTime; |
154 | | TimeUnit mNextAudioTime; |
155 | | // The last video image sent to the stream. Useful if we need to replicate |
156 | | // the image. |
157 | | RefPtr<layers::Image> mLastVideoImage; |
158 | | gfx::IntSize mLastVideoImageDisplaySize; |
159 | | bool mHaveSentFinish; |
160 | | bool mHaveSentFinishAudio; |
161 | | bool mHaveSentFinishVideo; |
162 | | |
163 | | // The decoder is responsible for calling Destroy() on this stream. |
164 | | const RefPtr<SourceMediaStream> mStream; |
165 | | const RefPtr<DecodedStreamGraphListener> mListener; |
166 | | bool mPlaying; |
167 | | // True if we need to send a compensation video frame to ensure the |
168 | | // StreamTime going forward. |
169 | | bool mEOSVideoCompensation; |
170 | | |
171 | | const RefPtr<OutputStreamManager> mOutputStreamManager; |
172 | | const RefPtr<AbstractThread> mAbstractMainThread; |
173 | | }; |
174 | | |
175 | | DecodedStreamData::DecodedStreamData(OutputStreamManager* aOutputStreamManager, |
176 | | PlaybackInfoInit&& aInit, |
177 | | MozPromiseHolder<GenericPromise>&& aPromise, |
178 | | AbstractThread* aMainThread) |
179 | | : mAudioFramesWritten(0) |
180 | | , mNextVideoTime(aInit.mStartTime) |
181 | | , mNextAudioTime(aInit.mStartTime) |
182 | | , mHaveSentFinish(false) |
183 | | , mHaveSentFinishAudio(false) |
184 | | , mHaveSentFinishVideo(false) |
185 | | , mStream(aOutputStreamManager->Graph()->CreateSourceStream()) |
186 | | // DecodedStreamGraphListener will resolve this promise. |
187 | | , mListener(new DecodedStreamGraphListener(mStream, std::move(aPromise), aMainThread)) |
188 | | // mPlaying is initially true because MDSM won't start playback until playing |
189 | | // becomes true. This is consistent with the settings of AudioSink. |
190 | | , mPlaying(true) |
191 | | , mEOSVideoCompensation(false) |
192 | | , mOutputStreamManager(aOutputStreamManager) |
193 | | , mAbstractMainThread(aMainThread) |
194 | 0 | { |
195 | 0 | mStream->AddListener(mListener); |
196 | 0 | TrackID audioTrack = TRACK_NONE; |
197 | 0 | TrackID videoTrack = TRACK_NONE; |
198 | 0 |
|
199 | 0 | // Initialize tracks. |
200 | 0 | if (aInit.mInfo.HasAudio()) { |
201 | 0 | audioTrack = aInit.mInfo.mAudio.mTrackId; |
202 | 0 | mStream->AddAudioTrack(audioTrack, |
203 | 0 | aInit.mInfo.mAudio.mRate, |
204 | 0 | 0, new AudioSegment()); |
205 | 0 | } |
206 | 0 | if (aInit.mInfo.HasVideo()) { |
207 | 0 | videoTrack = aInit.mInfo.mVideo.mTrackId; |
208 | 0 | mStream->AddTrack(videoTrack, 0, new VideoSegment()); |
209 | 0 | } |
210 | 0 |
|
211 | 0 | mOutputStreamManager->Connect(mStream, audioTrack, videoTrack); |
212 | 0 | } |
213 | | |
214 | | DecodedStreamData::~DecodedStreamData() |
215 | 0 | { |
216 | 0 | mOutputStreamManager->Disconnect(); |
217 | 0 | mStream->Destroy(); |
218 | 0 | } |
219 | | |
220 | | MediaEventSource<int64_t>& |
221 | | DecodedStreamData::OnOutput() |
222 | 0 | { |
223 | 0 | return mListener->OnOutput(); |
224 | 0 | } |
225 | | |
226 | | void |
227 | | DecodedStreamData::SetPlaying(bool aPlaying) |
228 | 0 | { |
229 | 0 | if (mPlaying != aPlaying) { |
230 | 0 | mPlaying = aPlaying; |
231 | 0 | UpdateStreamSuspended(mAbstractMainThread, mStream, !mPlaying); |
232 | 0 | } |
233 | 0 | } |
234 | | |
235 | | void |
236 | | DecodedStreamData::Forget() |
237 | 0 | { |
238 | 0 | mListener->Forget(); |
239 | 0 | } |
240 | | |
241 | | nsCString |
242 | | DecodedStreamData::GetDebugInfo() |
243 | 0 | { |
244 | 0 | return nsPrintfCString( |
245 | 0 | "DecodedStreamData=%p mPlaying=%d mAudioFramesWritten=%" PRId64 |
246 | 0 | " mNextAudioTime=%" PRId64 " mNextVideoTime=%" PRId64 " mHaveSentFinish=%d " |
247 | 0 | "mHaveSentFinishAudio=%d mHaveSentFinishVideo=%d", |
248 | 0 | this, mPlaying, mAudioFramesWritten, mNextAudioTime.ToMicroseconds(), |
249 | 0 | mNextVideoTime.ToMicroseconds(), mHaveSentFinish, mHaveSentFinishAudio, |
250 | 0 | mHaveSentFinishVideo); |
251 | 0 | } |
252 | | |
253 | | DecodedStream::DecodedStream(AbstractThread* aOwnerThread, |
254 | | AbstractThread* aMainThread, |
255 | | MediaQueue<AudioData>& aAudioQueue, |
256 | | MediaQueue<VideoData>& aVideoQueue, |
257 | | OutputStreamManager* aOutputStreamManager, |
258 | | const bool& aSameOrigin, |
259 | | const PrincipalHandle& aPrincipalHandle) |
260 | | : mOwnerThread(aOwnerThread) |
261 | | , mAbstractMainThread(aMainThread) |
262 | | , mOutputStreamManager(aOutputStreamManager) |
263 | | , mPlaying(false) |
264 | | , mSameOrigin(aSameOrigin) |
265 | | , mPrincipalHandle(aPrincipalHandle) |
266 | | , mAudioQueue(aAudioQueue) |
267 | | , mVideoQueue(aVideoQueue) |
268 | 0 | { |
269 | 0 | } |
270 | | |
271 | | DecodedStream::~DecodedStream() |
272 | 0 | { |
273 | 0 | MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended."); |
274 | 0 | } |
275 | | |
276 | | const media::MediaSink::PlaybackParams& |
277 | | DecodedStream::GetPlaybackParams() const |
278 | 0 | { |
279 | 0 | AssertOwnerThread(); |
280 | 0 | return mParams; |
281 | 0 | } |
282 | | |
283 | | void |
284 | | DecodedStream::SetPlaybackParams(const PlaybackParams& aParams) |
285 | 0 | { |
286 | 0 | AssertOwnerThread(); |
287 | 0 | mParams = aParams; |
288 | 0 | } |
289 | | |
290 | | RefPtr<GenericPromise> |
291 | | DecodedStream::OnEnded(TrackType aType) |
292 | 0 | { |
293 | 0 | AssertOwnerThread(); |
294 | 0 | MOZ_ASSERT(mStartTime.isSome()); |
295 | 0 |
|
296 | 0 | if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio()) { |
297 | 0 | // TODO: we should return a promise which is resolved when the audio track |
298 | 0 | // is finished. For now this promise is resolved when the whole stream is |
299 | 0 | // finished. |
300 | 0 | return mFinishPromise; |
301 | 0 | } else if (aType == TrackInfo::kVideoTrack && mInfo.HasVideo()) { |
302 | 0 | return mFinishPromise; |
303 | 0 | } |
304 | 0 | return nullptr; |
305 | 0 | } |
306 | | |
307 | | void |
308 | | DecodedStream::Start(const TimeUnit& aStartTime, const MediaInfo& aInfo) |
309 | 0 | { |
310 | 0 | AssertOwnerThread(); |
311 | 0 | MOZ_ASSERT(mStartTime.isNothing(), "playback already started."); |
312 | 0 |
|
313 | 0 | mStartTime.emplace(aStartTime); |
314 | 0 | mLastOutputTime = TimeUnit::Zero(); |
315 | 0 | mInfo = aInfo; |
316 | 0 | mPlaying = true; |
317 | 0 | ConnectListener(); |
318 | 0 |
|
319 | 0 | class R : public Runnable { |
320 | 0 | typedef MozPromiseHolder<GenericPromise> Promise; |
321 | 0 | public: |
322 | 0 | R(PlaybackInfoInit&& aInit, Promise&& aPromise, |
323 | 0 | OutputStreamManager* aManager, AbstractThread* aMainThread) |
324 | 0 | : Runnable("CreateDecodedStreamData") |
325 | 0 | , mInit(std::move(aInit)) |
326 | 0 | , mOutputStreamManager(aManager) |
327 | 0 | , mAbstractMainThread(aMainThread) |
328 | 0 | { |
329 | 0 | mPromise = std::move(aPromise); |
330 | 0 | } |
331 | 0 | NS_IMETHOD Run() override |
332 | 0 | { |
333 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
334 | 0 | // No need to create a source stream when there are no output streams. This |
335 | 0 | // happens when RemoveOutput() is called immediately after StartPlayback(). |
336 | 0 | if (!mOutputStreamManager->Graph()) { |
337 | 0 | // Resolve the promise to indicate the end of playback. |
338 | 0 | mPromise.Resolve(true, __func__); |
339 | 0 | return NS_OK; |
340 | 0 | } |
341 | 0 | mData = MakeUnique<DecodedStreamData>( |
342 | 0 | mOutputStreamManager, std::move(mInit), std::move(mPromise), mAbstractMainThread); |
343 | 0 | return NS_OK; |
344 | 0 | } |
345 | 0 | UniquePtr<DecodedStreamData> ReleaseData() |
346 | 0 | { |
347 | 0 | return std::move(mData); |
348 | 0 | } |
349 | 0 | private: |
350 | 0 | PlaybackInfoInit mInit; |
351 | 0 | Promise mPromise; |
352 | 0 | RefPtr<OutputStreamManager> mOutputStreamManager; |
353 | 0 | UniquePtr<DecodedStreamData> mData; |
354 | 0 | const RefPtr<AbstractThread> mAbstractMainThread; |
355 | 0 | }; |
356 | 0 |
|
357 | 0 | MozPromiseHolder<GenericPromise> promise; |
358 | 0 | mFinishPromise = promise.Ensure(__func__); |
359 | 0 | PlaybackInfoInit init { |
360 | 0 | aStartTime, aInfo |
361 | 0 | }; |
362 | 0 | nsCOMPtr<nsIRunnable> r = |
363 | 0 | new R(std::move(init), std::move(promise), mOutputStreamManager, mAbstractMainThread); |
364 | 0 | SyncRunnable::DispatchToThread( |
365 | 0 | SystemGroup::EventTargetFor(mozilla::TaskCategory::Other), r); |
366 | 0 | mData = static_cast<R*>(r.get())->ReleaseData(); |
367 | 0 |
|
368 | 0 | if (mData) { |
369 | 0 | mOutputListener = mData->OnOutput().Connect( |
370 | 0 | mOwnerThread, this, &DecodedStream::NotifyOutput); |
371 | 0 | mData->SetPlaying(mPlaying); |
372 | 0 | SendData(); |
373 | 0 | } |
374 | 0 | } |
375 | | |
376 | | void |
377 | | DecodedStream::Stop() |
378 | 0 | { |
379 | 0 | AssertOwnerThread(); |
380 | 0 | MOZ_ASSERT(mStartTime.isSome(), "playback not started."); |
381 | 0 |
|
382 | 0 | mStartTime.reset(); |
383 | 0 | DisconnectListener(); |
384 | 0 | mFinishPromise = nullptr; |
385 | 0 |
|
386 | 0 | // Clear mData immediately when this playback session ends so we won't |
387 | 0 | // send data to the wrong stream in SendData() in next playback session. |
388 | 0 | DestroyData(std::move(mData)); |
389 | 0 | } |
390 | | |
391 | | bool |
392 | | DecodedStream::IsStarted() const |
393 | 0 | { |
394 | 0 | AssertOwnerThread(); |
395 | 0 | return mStartTime.isSome(); |
396 | 0 | } |
397 | | |
398 | | bool |
399 | | DecodedStream::IsPlaying() const |
400 | 0 | { |
401 | 0 | AssertOwnerThread(); |
402 | 0 | return IsStarted() && mPlaying; |
403 | 0 | } |
404 | | |
405 | | void |
406 | | DecodedStream::DestroyData(UniquePtr<DecodedStreamData> aData) |
407 | 0 | { |
408 | 0 | AssertOwnerThread(); |
409 | 0 |
|
410 | 0 | if (!aData) { |
411 | 0 | return; |
412 | 0 | } |
413 | 0 | |
414 | 0 | mOutputListener.Disconnect(); |
415 | 0 |
|
416 | 0 | DecodedStreamData* data = aData.release(); |
417 | 0 | data->Forget(); |
418 | 0 | nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction("DecodedStream::DestroyData", |
419 | 0 | [=]() { delete data; }); |
420 | 0 | NS_DispatchToMainThread(r.forget()); |
421 | 0 | } |
422 | | |
423 | | void |
424 | | DecodedStream::SetPlaying(bool aPlaying) |
425 | 0 | { |
426 | 0 | AssertOwnerThread(); |
427 | 0 |
|
428 | 0 | // Resume/pause matters only when playback started. |
429 | 0 | if (mStartTime.isNothing()) { |
430 | 0 | return; |
431 | 0 | } |
432 | 0 | |
433 | 0 | mPlaying = aPlaying; |
434 | 0 | if (mData) { |
435 | 0 | mData->SetPlaying(aPlaying); |
436 | 0 | } |
437 | 0 | } |
438 | | |
439 | | void |
440 | | DecodedStream::SetVolume(double aVolume) |
441 | 0 | { |
442 | 0 | AssertOwnerThread(); |
443 | 0 | mParams.mVolume = aVolume; |
444 | 0 | } |
445 | | |
446 | | void |
447 | | DecodedStream::SetPlaybackRate(double aPlaybackRate) |
448 | 0 | { |
449 | 0 | AssertOwnerThread(); |
450 | 0 | mParams.mPlaybackRate = aPlaybackRate; |
451 | 0 | } |
452 | | |
453 | | void |
454 | | DecodedStream::SetPreservesPitch(bool aPreservesPitch) |
455 | 0 | { |
456 | 0 | AssertOwnerThread(); |
457 | 0 | mParams.mPreservesPitch = aPreservesPitch; |
458 | 0 | } |
459 | | |
460 | | static void |
461 | | SendStreamAudio(DecodedStreamData* aStream, const TimeUnit& aStartTime, |
462 | | AudioData* aData, AudioSegment* aOutput, uint32_t aRate, |
463 | | const PrincipalHandle& aPrincipalHandle) |
464 | 0 | { |
465 | 0 | // The amount of audio frames that is used to fuzz rounding errors. |
466 | 0 | static const int64_t AUDIO_FUZZ_FRAMES = 1; |
467 | 0 |
|
468 | 0 | MOZ_ASSERT(aData); |
469 | 0 | AudioData* audio = aData; |
470 | 0 | // This logic has to mimic AudioSink closely to make sure we write |
471 | 0 | // the exact same silences |
472 | 0 | CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten |
473 | 0 | + TimeUnitToFrames(aStartTime, aRate); |
474 | 0 | CheckedInt64 frameOffset = TimeUnitToFrames(audio->mTime, aRate); |
475 | 0 |
|
476 | 0 | if (!audioWrittenOffset.isValid() || |
477 | 0 | !frameOffset.isValid() || |
478 | 0 | // ignore packet that we've already processed |
479 | 0 | audio->GetEndTime() <= aStream->mNextAudioTime) { |
480 | 0 | return; |
481 | 0 | } |
482 | 0 | |
483 | 0 | if (audioWrittenOffset.value() + AUDIO_FUZZ_FRAMES < frameOffset.value()) { |
484 | 0 | int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value(); |
485 | 0 | // Write silence to catch up |
486 | 0 | AudioSegment silence; |
487 | 0 | silence.InsertNullDataAtStart(silentFrames); |
488 | 0 | aStream->mAudioFramesWritten += silentFrames; |
489 | 0 | audioWrittenOffset += silentFrames; |
490 | 0 | aOutput->AppendFrom(&silence); |
491 | 0 | } |
492 | 0 |
|
493 | 0 | // Always write the whole sample without truncation to be consistent with |
494 | 0 | // DecodedAudioDataSink::PlayFromAudioQueue() |
495 | 0 | audio->EnsureAudioBuffer(); |
496 | 0 | RefPtr<SharedBuffer> buffer = audio->mAudioBuffer; |
497 | 0 | AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data()); |
498 | 0 | AutoTArray<const AudioDataValue*, 2> channels; |
499 | 0 | for (uint32_t i = 0; i < audio->mChannels; ++i) { |
500 | 0 | channels.AppendElement(bufferData + i * audio->mFrames); |
501 | 0 | } |
502 | 0 | aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames, aPrincipalHandle); |
503 | 0 | aStream->mAudioFramesWritten += audio->mFrames; |
504 | 0 |
|
505 | 0 | aStream->mNextAudioTime = audio->GetEndTime(); |
506 | 0 | } |
507 | | |
508 | | void |
509 | | DecodedStream::SendAudio(double aVolume, bool aIsSameOrigin, |
510 | | const PrincipalHandle& aPrincipalHandle) |
511 | 0 | { |
512 | 0 | AssertOwnerThread(); |
513 | 0 |
|
514 | 0 | if (!mInfo.HasAudio()) { |
515 | 0 | return; |
516 | 0 | } |
517 | 0 | |
518 | 0 | AudioSegment output; |
519 | 0 | uint32_t rate = mInfo.mAudio.mRate; |
520 | 0 | AutoTArray<RefPtr<AudioData>,10> audio; |
521 | 0 | TrackID audioTrackId = mInfo.mAudio.mTrackId; |
522 | 0 | SourceMediaStream* sourceStream = mData->mStream; |
523 | 0 |
|
524 | 0 | // It's OK to hold references to the AudioData because AudioData |
525 | 0 | // is ref-counted. |
526 | 0 | mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio); |
527 | 0 | for (uint32_t i = 0; i < audio.Length(); ++i) { |
528 | 0 | SendStreamAudio(mData.get(), mStartTime.ref(), audio[i], &output, rate, |
529 | 0 | aPrincipalHandle); |
530 | 0 | } |
531 | 0 |
|
532 | 0 | output.ApplyVolume(aVolume); |
533 | 0 |
|
534 | 0 | if (!aIsSameOrigin) { |
535 | 0 | output.ReplaceWithDisabled(); |
536 | 0 | } |
537 | 0 |
|
538 | 0 | // |mNextAudioTime| is updated as we process each audio sample in |
539 | 0 | // SendStreamAudio(). This is consistent with how |mNextVideoTime| |
540 | 0 | // is updated for video samples. |
541 | 0 | if (output.GetDuration() > 0) { |
542 | 0 | sourceStream->AppendToTrack(audioTrackId, &output); |
543 | 0 | } |
544 | 0 |
|
545 | 0 | if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) { |
546 | 0 | sourceStream->EndTrack(audioTrackId); |
547 | 0 | mData->mHaveSentFinishAudio = true; |
548 | 0 | } |
549 | 0 | } |
550 | | |
551 | | static void |
552 | | WriteVideoToMediaStream(MediaStream* aStream, |
553 | | layers::Image* aImage, |
554 | | const TimeUnit& aEnd, |
555 | | const TimeUnit& aStart, |
556 | | const mozilla::gfx::IntSize& aIntrinsicSize, |
557 | | const TimeStamp& aTimeStamp, |
558 | | VideoSegment* aOutput, |
559 | | const PrincipalHandle& aPrincipalHandle) |
560 | 0 | { |
561 | 0 | RefPtr<layers::Image> image = aImage; |
562 | 0 | auto end = aStream->MicrosecondsToStreamTimeRoundDown(aEnd.ToMicroseconds()); |
563 | 0 | auto start = aStream->MicrosecondsToStreamTimeRoundDown(aStart.ToMicroseconds()); |
564 | 0 | StreamTime duration = end - start; |
565 | 0 | aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize, |
566 | 0 | aPrincipalHandle, false, aTimeStamp); |
567 | 0 | } |
568 | | |
569 | | static bool |
570 | | ZeroDurationAtLastChunk(VideoSegment& aInput) |
571 | 0 | { |
572 | 0 | // Get the last video frame's start time in VideoSegment aInput. |
573 | 0 | // If the start time is equal to the duration of aInput, means the last video |
574 | 0 | // frame's duration is zero. |
575 | 0 | StreamTime lastVideoStratTime; |
576 | 0 | aInput.GetLastFrame(&lastVideoStratTime); |
577 | 0 | return lastVideoStratTime == aInput.GetDuration(); |
578 | 0 | } |
579 | | |
580 | | void |
581 | | DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle) |
582 | 0 | { |
583 | 0 | AssertOwnerThread(); |
584 | 0 |
|
585 | 0 | if (!mInfo.HasVideo()) { |
586 | 0 | return; |
587 | 0 | } |
588 | 0 | |
589 | 0 | VideoSegment output; |
590 | 0 | TrackID videoTrackId = mInfo.mVideo.mTrackId; |
591 | 0 | AutoTArray<RefPtr<VideoData>, 10> video; |
592 | 0 | SourceMediaStream* sourceStream = mData->mStream; |
593 | 0 |
|
594 | 0 | // It's OK to hold references to the VideoData because VideoData |
595 | 0 | // is ref-counted. |
596 | 0 | mVideoQueue.GetElementsAfter(mData->mNextVideoTime, &video); |
597 | 0 |
|
598 | 0 | // tracksStartTimeStamp might be null when the SourceMediaStream not yet |
599 | 0 | // be added to MediaStreamGraph. |
600 | 0 | TimeStamp tracksStartTimeStamp = sourceStream->GetStreamTracksStrartTimeStamp(); |
601 | 0 | if (tracksStartTimeStamp.IsNull()) { |
602 | 0 | tracksStartTimeStamp = TimeStamp::Now(); |
603 | 0 | } |
604 | 0 |
|
605 | 0 | for (uint32_t i = 0; i < video.Length(); ++i) { |
606 | 0 | VideoData* v = video[i]; |
607 | 0 |
|
608 | 0 | if (mData->mNextVideoTime < v->mTime) { |
609 | 0 | // Write last video frame to catch up. mLastVideoImage can be null here |
610 | 0 | // which is fine, it just means there's no video. |
611 | 0 |
|
612 | 0 | // TODO: |mLastVideoImage| should come from the last image rendered |
613 | 0 | // by the state machine. This will avoid the black frame when capture |
614 | 0 | // happens in the middle of playback (especially in th middle of a |
615 | 0 | // video frame). E.g. if we have a video frame that is 30 sec long |
616 | 0 | // and capture happens at 15 sec, we'll have to append a black frame |
617 | 0 | // that is 15 sec long. |
618 | 0 | WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime, |
619 | 0 | mData->mNextVideoTime, mData->mLastVideoImageDisplaySize, |
620 | 0 | tracksStartTimeStamp + v->mTime.ToTimeDuration(), |
621 | 0 | &output, aPrincipalHandle); |
622 | 0 | mData->mNextVideoTime = v->mTime; |
623 | 0 | } |
624 | 0 |
|
625 | 0 | if (mData->mNextVideoTime < v->GetEndTime()) { |
626 | 0 | WriteVideoToMediaStream(sourceStream, v->mImage, v->GetEndTime(), |
627 | 0 | mData->mNextVideoTime, v->mDisplay, |
628 | 0 | tracksStartTimeStamp + v->GetEndTime().ToTimeDuration(), |
629 | 0 | &output, aPrincipalHandle); |
630 | 0 | mData->mNextVideoTime = v->GetEndTime(); |
631 | 0 | mData->mLastVideoImage = v->mImage; |
632 | 0 | mData->mLastVideoImageDisplaySize = v->mDisplay; |
633 | 0 | } |
634 | 0 | } |
635 | 0 |
|
636 | 0 | // Check the output is not empty. |
637 | 0 | if (output.GetLastFrame()) { |
638 | 0 | mData->mEOSVideoCompensation = ZeroDurationAtLastChunk(output); |
639 | 0 | } |
640 | 0 |
|
641 | 0 | if (!aIsSameOrigin) { |
642 | 0 | output.ReplaceWithDisabled(); |
643 | 0 | } |
644 | 0 |
|
645 | 0 | if (output.GetDuration() > 0) { |
646 | 0 | sourceStream->AppendToTrack(videoTrackId, &output); |
647 | 0 | } |
648 | 0 |
|
649 | 0 | if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) { |
650 | 0 | if (mData->mEOSVideoCompensation) { |
651 | 0 | VideoSegment endSegment; |
652 | 0 | // Calculate the deviation clock time from DecodedStream. |
653 | 0 | auto deviation = FromMicroseconds(sourceStream->StreamTimeToMicroseconds(1)); |
654 | 0 | WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, |
655 | 0 | mData->mNextVideoTime + deviation, mData->mNextVideoTime, |
656 | 0 | mData->mLastVideoImageDisplaySize, |
657 | 0 | tracksStartTimeStamp + (mData->mNextVideoTime + deviation).ToTimeDuration(), |
658 | 0 | &endSegment, aPrincipalHandle); |
659 | 0 | mData->mNextVideoTime += deviation; |
660 | 0 | MOZ_ASSERT(endSegment.GetDuration() > 0); |
661 | 0 | if (!aIsSameOrigin) { |
662 | 0 | endSegment.ReplaceWithDisabled(); |
663 | 0 | } |
664 | 0 | sourceStream->AppendToTrack(videoTrackId, &endSegment); |
665 | 0 | } |
666 | 0 | sourceStream->EndTrack(videoTrackId); |
667 | 0 | mData->mHaveSentFinishVideo = true; |
668 | 0 | } |
669 | 0 | } |
670 | | |
671 | | void |
672 | | DecodedStream::AdvanceTracks() |
673 | 0 | { |
674 | 0 | AssertOwnerThread(); |
675 | 0 |
|
676 | 0 | StreamTime endPosition = 0; |
677 | 0 |
|
678 | 0 | if (mInfo.HasAudio()) { |
679 | 0 | StreamTime audioEnd = mData->mStream->TicksToTimeRoundDown( |
680 | 0 | mInfo.mAudio.mRate, mData->mAudioFramesWritten); |
681 | 0 | endPosition = std::max(endPosition, audioEnd); |
682 | 0 | } |
683 | 0 |
|
684 | 0 | if (mInfo.HasVideo()) { |
685 | 0 | StreamTime videoEnd = mData->mStream->MicrosecondsToStreamTimeRoundDown( |
686 | 0 | (mData->mNextVideoTime - mStartTime.ref()).ToMicroseconds()); |
687 | 0 | endPosition = std::max(endPosition, videoEnd); |
688 | 0 | } |
689 | 0 |
|
690 | 0 | if (!mData->mHaveSentFinish) { |
691 | 0 | mData->mStream->AdvanceKnownTracksTime(endPosition); |
692 | 0 | } |
693 | 0 | } |
694 | | |
695 | | void |
696 | | DecodedStream::SendData() |
697 | 0 | { |
698 | 0 | AssertOwnerThread(); |
699 | 0 | MOZ_ASSERT(mStartTime.isSome(), "Must be called after StartPlayback()"); |
700 | 0 |
|
701 | 0 | // Not yet created on the main thread. MDSM will try again later. |
702 | 0 | if (!mData) { |
703 | 0 | return; |
704 | 0 | } |
705 | 0 | |
706 | 0 | // Nothing to do when the stream is finished. |
707 | 0 | if (mData->mHaveSentFinish) { |
708 | 0 | return; |
709 | 0 | } |
710 | 0 | |
711 | 0 | SendAudio(mParams.mVolume, mSameOrigin, mPrincipalHandle); |
712 | 0 | SendVideo(mSameOrigin, mPrincipalHandle); |
713 | 0 | AdvanceTracks(); |
714 | 0 |
|
715 | 0 | bool finished = (!mInfo.HasAudio() || mAudioQueue.IsFinished()) && |
716 | 0 | (!mInfo.HasVideo() || mVideoQueue.IsFinished()); |
717 | 0 |
|
718 | 0 | if (finished && !mData->mHaveSentFinish) { |
719 | 0 | mData->mHaveSentFinish = true; |
720 | 0 | mData->mStream->FinishPending(); |
721 | 0 | } |
722 | 0 | } |
723 | | |
724 | | TimeUnit |
725 | | DecodedStream::GetEndTime(TrackType aType) const |
726 | 0 | { |
727 | 0 | AssertOwnerThread(); |
728 | 0 | if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) { |
729 | 0 | auto t = mStartTime.ref() + FramesToTimeUnit( |
730 | 0 | mData->mAudioFramesWritten, mInfo.mAudio.mRate); |
731 | 0 | if (t.IsValid()) { |
732 | 0 | return t; |
733 | 0 | } |
734 | 0 | } else if (aType == TrackInfo::kVideoTrack && mData) { |
735 | 0 | return mData->mNextVideoTime; |
736 | 0 | } |
737 | 0 | return TimeUnit::Zero(); |
738 | 0 | } |
739 | | |
740 | | TimeUnit |
741 | | DecodedStream::GetPosition(TimeStamp* aTimeStamp) const |
742 | 0 | { |
743 | 0 | AssertOwnerThread(); |
744 | 0 | // This is only called after MDSM starts playback. So mStartTime is |
745 | 0 | // guaranteed to be something. |
746 | 0 | MOZ_ASSERT(mStartTime.isSome()); |
747 | 0 | if (aTimeStamp) { |
748 | 0 | *aTimeStamp = TimeStamp::Now(); |
749 | 0 | } |
750 | 0 | return mStartTime.ref() + mLastOutputTime; |
751 | 0 | } |
752 | | |
753 | | void |
754 | | DecodedStream::NotifyOutput(int64_t aTime) |
755 | 0 | { |
756 | 0 | AssertOwnerThread(); |
757 | 0 | mLastOutputTime = FromMicroseconds(aTime); |
758 | 0 | auto currentTime = GetPosition(); |
759 | 0 |
|
760 | 0 | // Remove audio samples that have been played by MSG from the queue. |
761 | 0 | RefPtr<AudioData> a = mAudioQueue.PeekFront(); |
762 | 0 | for (; a && a->mTime < currentTime;) { |
763 | 0 | RefPtr<AudioData> releaseMe = mAudioQueue.PopFront(); |
764 | 0 | a = mAudioQueue.PeekFront(); |
765 | 0 | } |
766 | 0 | } |
767 | | |
768 | | void |
769 | | DecodedStream::ConnectListener() |
770 | 0 | { |
771 | 0 | AssertOwnerThread(); |
772 | 0 |
|
773 | 0 | mAudioPushListener = mAudioQueue.PushEvent().Connect( |
774 | 0 | mOwnerThread, this, &DecodedStream::SendData); |
775 | 0 | mAudioFinishListener = mAudioQueue.FinishEvent().Connect( |
776 | 0 | mOwnerThread, this, &DecodedStream::SendData); |
777 | 0 | mVideoPushListener = mVideoQueue.PushEvent().Connect( |
778 | 0 | mOwnerThread, this, &DecodedStream::SendData); |
779 | 0 | mVideoFinishListener = mVideoQueue.FinishEvent().Connect( |
780 | 0 | mOwnerThread, this, &DecodedStream::SendData); |
781 | 0 | } |
782 | | |
783 | | void |
784 | | DecodedStream::DisconnectListener() |
785 | 0 | { |
786 | 0 | AssertOwnerThread(); |
787 | 0 |
|
788 | 0 | mAudioPushListener.Disconnect(); |
789 | 0 | mVideoPushListener.Disconnect(); |
790 | 0 | mAudioFinishListener.Disconnect(); |
791 | 0 | mVideoFinishListener.Disconnect(); |
792 | 0 | } |
793 | | |
794 | | nsCString |
795 | | DecodedStream::GetDebugInfo() |
796 | 0 | { |
797 | 0 | AssertOwnerThread(); |
798 | 0 | int64_t startTime = mStartTime.isSome() ? mStartTime->ToMicroseconds() : -1; |
799 | 0 | auto str = |
800 | 0 | nsPrintfCString("DecodedStream=%p mStartTime=%" PRId64 |
801 | 0 | " mLastOutputTime=%" PRId64 " mPlaying=%d mData=%p", |
802 | 0 | this, |
803 | 0 | startTime, |
804 | 0 | mLastOutputTime.ToMicroseconds(), |
805 | 0 | mPlaying, |
806 | 0 | mData.get()); |
807 | 0 | if (mData) { |
808 | 0 | AppendStringIfNotEmpty(str, mData->GetDebugInfo()); |
809 | 0 | } |
810 | 0 | return std::move(str); |
811 | 0 | } |
812 | | |
813 | | } // namespace mozilla |