/src/mozilla-central/dom/media/MediaDecoderStateMachine.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | /* |
7 | | |
8 | | Each media element for a media file has one thread called the "audio thread". |
9 | | |
10 | | The audio thread writes the decoded audio data to the audio |
11 | | hardware. This is done in a separate thread to ensure that the |
12 | | audio hardware gets a constant stream of data without |
13 | | interruption due to decoding or display. At some point |
14 | | AudioStream will be refactored to have a callback interface |
15 | | where it asks for data and this thread will no longer be |
16 | | needed. |
17 | | |
18 | | The element/state machine also has a TaskQueue which runs in a |
19 | | SharedThreadPool that is shared with all other elements/decoders. The state |
20 | | machine dispatches tasks to this to call into the MediaDecoderReader to |
21 | | request decoded audio or video data. The Reader will callback with decoded |
22 | | sampled when it has them available, and the state machine places the decoded |
23 | | samples into its queues for the consuming threads to pull from. |
24 | | |
25 | | The MediaDecoderReader can choose to decode asynchronously, or synchronously |
26 | | and return requested samples synchronously inside it's Request*Data() |
27 | | functions via callback. Asynchronous decoding is preferred, and should be |
28 | | used for any new readers. |
29 | | |
30 | | Synchronisation of state between the thread is done via a monitor owned |
31 | | by MediaDecoder. |
32 | | |
33 | | The lifetime of the audio thread is controlled by the state machine when |
34 | | it runs on the shared state machine thread. When playback needs to occur |
35 | | the audio thread is created and an event dispatched to run it. The audio |
36 | | thread exits when audio playback is completed or no longer required. |
37 | | |
38 | | A/V synchronisation is handled by the state machine. It examines the audio |
39 | | playback time and compares this to the next frame in the queue of video |
40 | | frames. If it is time to play the video frame it is then displayed, otherwise |
41 | | it schedules the state machine to run again at the time of the next frame. |
42 | | |
43 | | Frame skipping is done in the following ways: |
44 | | |
45 | | 1) The state machine will skip all frames in the video queue whose |
46 | | display time is less than the current audio time. This ensures |
47 | | the correct frame for the current time is always displayed. |
48 | | |
49 | | 2) The decode tasks will stop decoding interframes and read to the |
50 | | next keyframe if it determines that decoding the remaining |
51 | | interframes will cause playback issues. It detects this by: |
52 | | a) If the amount of audio data in the audio queue drops |
53 | | below a threshold whereby audio may start to skip. |
54 | | b) If the video queue drops below a threshold where it |
55 | | will be decoding video data that won't be displayed due |
56 | | to the decode thread dropping the frame immediately. |
57 | | TODO: In future we should only do this when the Reader is decoding |
58 | | synchronously. |
59 | | |
60 | | When hardware accelerated graphics is not available, YCbCr conversion |
61 | | is done on the decode task queue when video frames are decoded. |
62 | | |
63 | | The decode task queue pushes decoded audio and videos frames into two |
64 | | separate queues - one for audio and one for video. These are kept |
65 | | separate to make it easy to constantly feed audio data to the audio |
66 | | hardware while allowing frame skipping of video data. These queues are |
67 | | threadsafe, and neither the decode, audio, or state machine should |
68 | | be able to monopolize them, and cause starvation of the other threads. |
69 | | |
70 | | Both queues are bounded by a maximum size. When this size is reached |
71 | | the decode tasks will no longer request video or audio depending on the |
72 | | queue that has reached the threshold. If both queues are full, no more |
73 | | decode tasks will be dispatched to the decode task queue, so other |
74 | | decoders will have an opportunity to run. |
75 | | |
76 | | During playback the audio thread will be idle (via a Wait() on the |
77 | | monitor) if the audio queue is empty. Otherwise it constantly pops |
78 | | audio data off the queue and plays it with a blocking write to the audio |
79 | | hardware (via AudioStream). |
80 | | |
81 | | */ |
82 | | #if !defined(MediaDecoderStateMachine_h__) |
83 | | #define MediaDecoderStateMachine_h__ |
84 | | |
85 | | #include "mozilla/Attributes.h" |
86 | | #include "mozilla/ReentrantMonitor.h" |
87 | | #include "mozilla/StateMirroring.h" |
88 | | |
89 | | #include "nsAutoPtr.h" |
90 | | #include "nsThreadUtils.h" |
91 | | #include "MediaDecoder.h" |
92 | | #include "MediaDecoderOwner.h" |
93 | | #include "MediaEventSource.h" |
94 | | #include "MediaFormatReader.h" |
95 | | #include "MediaMetadataManager.h" |
96 | | #include "MediaQueue.h" |
97 | | #include "MediaStatistics.h" |
98 | | #include "MediaTimer.h" |
99 | | #include "ImageContainer.h" |
100 | | #include "SeekJob.h" |
101 | | |
102 | | namespace mozilla { |
103 | | |
104 | | namespace media { |
105 | | class MediaSink; |
106 | | } |
107 | | |
108 | | class AbstractThread; |
109 | | class AudioSegment; |
110 | | class DecodedStream; |
111 | | class OutputStreamManager; |
112 | | class ReaderProxy; |
113 | | class TaskQueue; |
114 | | |
115 | | extern LazyLogModule gMediaDecoderLog; |
116 | | |
117 | | struct MediaPlaybackEvent |
118 | | { |
119 | | enum EventType |
120 | | { |
121 | | PlaybackStarted, |
122 | | PlaybackStopped, |
123 | | PlaybackProgressed, |
124 | | PlaybackEnded, |
125 | | SeekStarted, |
126 | | Loop, |
127 | | Invalidate, |
128 | | EnterVideoSuspend, |
129 | | ExitVideoSuspend, |
130 | | StartVideoSuspendTimer, |
131 | | CancelVideoSuspendTimer, |
132 | | VideoOnlySeekBegin, |
133 | | VideoOnlySeekCompleted, |
134 | | } mType; |
135 | | |
136 | | using DataType = Variant<Nothing, int64_t>; |
137 | | DataType mData; |
138 | | |
139 | | MOZ_IMPLICIT MediaPlaybackEvent(EventType aType) |
140 | | : mType(aType) |
141 | | , mData(Nothing{}) |
142 | 0 | { |
143 | 0 | } |
144 | | |
145 | | template<typename T> |
146 | | MediaPlaybackEvent(EventType aType, T&& aArg) |
147 | | : mType(aType) |
148 | | , mData(std::forward<T>(aArg)) |
149 | 0 | { |
150 | 0 | } |
151 | | }; |
152 | | |
153 | | enum class VideoDecodeMode : uint8_t |
154 | | { |
155 | | Normal, |
156 | | Suspend |
157 | | }; |
158 | | |
159 | | DDLoggedTypeDeclName(MediaDecoderStateMachine); |
160 | | |
161 | | /* |
162 | | The state machine class. This manages the decoding and seeking in the |
163 | | MediaDecoderReader on the decode task queue, and A/V sync on the shared |
164 | | state machine thread, and controls the audio "push" thread. |
165 | | |
166 | | All internal state is synchronised via the decoder monitor. State changes |
167 | | are propagated by scheduling the state machine to run another cycle on the |
168 | | shared state machine thread. |
169 | | |
170 | | See MediaDecoder.h for more details. |
171 | | */ |
172 | | class MediaDecoderStateMachine |
173 | | : public DecoderDoctorLifeLogger<MediaDecoderStateMachine> |
174 | | { |
175 | | NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDecoderStateMachine) |
176 | | |
177 | | using TrackSet = MediaFormatReader::TrackSet; |
178 | | |
179 | | public: |
180 | | typedef MediaDecoderOwner::NextFrameStatus NextFrameStatus; |
181 | | typedef mozilla::layers::ImageContainer::FrameID FrameID; |
182 | | MediaDecoderStateMachine(MediaDecoder* aDecoder, MediaFormatReader* aReader); |
183 | | |
184 | | nsresult Init(MediaDecoder* aDecoder); |
185 | | |
186 | | // Enumeration for the valid decoding states |
187 | | enum State |
188 | | { |
189 | | DECODER_STATE_DECODING_METADATA, |
190 | | DECODER_STATE_DORMANT, |
191 | | DECODER_STATE_DECODING_FIRSTFRAME, |
192 | | DECODER_STATE_DECODING, |
193 | | DECODER_STATE_SEEKING, |
194 | | DECODER_STATE_BUFFERING, |
195 | | DECODER_STATE_COMPLETED, |
196 | | DECODER_STATE_SHUTDOWN |
197 | | }; |
198 | | |
199 | | // Returns the state machine task queue. |
200 | 0 | TaskQueue* OwnerThread() const { return mTaskQueue; } |
201 | | |
202 | | RefPtr<MediaDecoder::DebugInfoPromise> RequestDebugInfo(); |
203 | | |
204 | | void AddOutputStream(ProcessedMediaStream* aStream, |
205 | | TrackID aNextAvailableTrackID, |
206 | | bool aFinishWhenEnded); |
207 | | // Remove an output stream added with AddOutputStream. |
208 | | void RemoveOutputStream(MediaStream* aStream); |
209 | | TrackID NextAvailableTrackIDFor(MediaStream* aOutputStream) const; |
210 | | |
211 | | // Seeks to the decoder to aTarget asynchronously. |
212 | | RefPtr<MediaDecoder::SeekPromise> InvokeSeek(const SeekTarget& aTarget); |
213 | | |
214 | | void DispatchSetPlaybackRate(double aPlaybackRate) |
215 | 0 | { |
216 | 0 | OwnerThread()->DispatchStateChange( |
217 | 0 | NewRunnableMethod<double>("MediaDecoderStateMachine::SetPlaybackRate", |
218 | 0 | this, |
219 | 0 | &MediaDecoderStateMachine::SetPlaybackRate, |
220 | 0 | aPlaybackRate)); |
221 | 0 | } |
222 | | |
223 | | RefPtr<ShutdownPromise> BeginShutdown(); |
224 | | |
225 | | // Set the media fragment end time. |
226 | | void DispatchSetFragmentEndTime(const media::TimeUnit& aEndTime) |
227 | 0 | { |
228 | 0 | RefPtr<MediaDecoderStateMachine> self = this; |
229 | 0 | nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction( |
230 | 0 | "MediaDecoderStateMachine::DispatchSetFragmentEndTime", |
231 | 0 | [self, aEndTime]() { |
232 | 0 | // A negative number means we don't have a fragment end time at all. |
233 | 0 | self->mFragmentEndTime = aEndTime >= media::TimeUnit::Zero() |
234 | 0 | ? aEndTime |
235 | 0 | : media::TimeUnit::Invalid(); |
236 | 0 | }); |
237 | 0 | nsresult rv = OwnerThread()->Dispatch(r.forget()); |
238 | 0 | MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); |
239 | 0 | Unused << rv; |
240 | 0 | } |
241 | | |
242 | | void DispatchCanPlayThrough(bool aCanPlayThrough) |
243 | 0 | { |
244 | 0 | RefPtr<MediaDecoderStateMachine> self = this; |
245 | 0 | nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction( |
246 | 0 | "MediaDecoderStateMachine::DispatchCanPlayThrough", |
247 | 0 | [self, aCanPlayThrough]() { |
248 | 0 | self->mCanPlayThrough = aCanPlayThrough; |
249 | 0 | }); |
250 | 0 | OwnerThread()->DispatchStateChange(r.forget()); |
251 | 0 | } |
252 | | |
253 | | void DispatchIsLiveStream(bool aIsLiveStream) |
254 | 0 | { |
255 | 0 | RefPtr<MediaDecoderStateMachine> self = this; |
256 | 0 | nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction( |
257 | 0 | "MediaDecoderStateMachine::DispatchIsLiveStream", |
258 | 0 | [self, aIsLiveStream]() { |
259 | 0 | self->mIsLiveStream = aIsLiveStream; |
260 | 0 | }); |
261 | 0 | OwnerThread()->DispatchStateChange(r.forget()); |
262 | 0 | } |
263 | | |
264 | 0 | TimedMetadataEventSource& TimedMetadataEvent() { |
265 | 0 | return mMetadataManager.TimedMetadataEvent(); |
266 | 0 | } |
267 | | |
268 | | MediaEventSource<void>& OnMediaNotSeekable() const; |
269 | | |
270 | | MediaEventSourceExc<UniquePtr<MediaInfo>, |
271 | | UniquePtr<MetadataTags>, |
272 | | MediaDecoderEventVisibility>& |
273 | 0 | MetadataLoadedEvent() { return mMetadataLoadedEvent; } |
274 | | |
275 | | MediaEventSourceExc<nsAutoPtr<MediaInfo>, |
276 | | MediaDecoderEventVisibility>& |
277 | 0 | FirstFrameLoadedEvent() { return mFirstFrameLoadedEvent; } |
278 | | |
279 | | MediaEventSource<MediaPlaybackEvent>& OnPlaybackEvent() |
280 | 0 | { |
281 | 0 | return mOnPlaybackEvent; |
282 | 0 | } |
283 | | MediaEventSource<MediaResult>& |
284 | 0 | OnPlaybackErrorEvent() { return mOnPlaybackErrorEvent; } |
285 | | |
286 | | MediaEventSource<DecoderDoctorEvent>& |
287 | 0 | OnDecoderDoctorEvent() { return mOnDecoderDoctorEvent; } |
288 | | |
289 | | MediaEventSource<NextFrameStatus>& |
290 | 0 | OnNextFrameStatus() { return mOnNextFrameStatus; } |
291 | | |
292 | | size_t SizeOfVideoQueue() const; |
293 | | |
294 | | size_t SizeOfAudioQueue() const; |
295 | | |
296 | | // Sets the video decode mode. Used by the suspend-video-decoder feature. |
297 | | void SetVideoDecodeMode(VideoDecodeMode aMode); |
298 | | |
299 | | private: |
300 | | class StateObject; |
301 | | class DecodeMetadataState; |
302 | | class DormantState; |
303 | | class DecodingFirstFrameState; |
304 | | class DecodingState; |
305 | | class SeekingState; |
306 | | class AccurateSeekingState; |
307 | | class NextFrameSeekingState; |
308 | | class NextFrameSeekingFromDormantState; |
309 | | class VideoOnlySeekingState; |
310 | | class BufferingState; |
311 | | class CompletedState; |
312 | | class ShutdownState; |
313 | | |
314 | | static const char* ToStateStr(State aState); |
315 | | const char* ToStateStr(); |
316 | | |
317 | | nsCString GetDebugInfo(); |
318 | | |
319 | | // Functions used by assertions to ensure we're calling things |
320 | | // on the appropriate threads. |
321 | | bool OnTaskQueue() const; |
322 | | |
323 | | // Initialization that needs to happen on the task queue. This is the first |
324 | | // task that gets run on the task queue, and is dispatched from the MDSM |
325 | | // constructor immediately after the task queue is created. |
326 | | void InitializationTask(MediaDecoder* aDecoder); |
327 | | |
328 | | void SetAudioCaptured(bool aCaptured); |
329 | | |
330 | | RefPtr<MediaDecoder::SeekPromise> Seek(const SeekTarget& aTarget); |
331 | | |
332 | | RefPtr<ShutdownPromise> Shutdown(); |
333 | | |
334 | | RefPtr<ShutdownPromise> FinishShutdown(); |
335 | | |
336 | | // Update the playback position. This can result in a timeupdate event |
337 | | // and an invalidate of the frame being dispatched asynchronously if |
338 | | // there is no such event currently queued. |
339 | | // Only called on the decoder thread. Must be called with |
340 | | // the decode monitor held. |
341 | | void UpdatePlaybackPosition(const media::TimeUnit& aTime); |
342 | | |
343 | 0 | bool HasAudio() const { return mInfo.ref().HasAudio(); } |
344 | 0 | bool HasVideo() const { return mInfo.ref().HasVideo(); } |
345 | 0 | const MediaInfo& Info() const { return mInfo.ref(); } |
346 | | |
347 | | // Schedules the shared state machine thread to run the state machine. |
348 | | void ScheduleStateMachine(); |
349 | | |
350 | | // Invokes ScheduleStateMachine to run in |aTime|, |
351 | | // unless it's already scheduled to run earlier, in which case the |
352 | | // request is discarded. |
353 | | void ScheduleStateMachineIn(const media::TimeUnit& aTime); |
354 | | |
355 | | bool HaveEnoughDecodedAudio(); |
356 | | bool HaveEnoughDecodedVideo(); |
357 | | |
358 | | // Returns true if we're currently playing. The decoder monitor must |
359 | | // be held. |
360 | | bool IsPlaying() const; |
361 | | |
362 | | // Sets mMediaSeekable to false. |
363 | | void SetMediaNotSeekable(); |
364 | | |
365 | | // Resets all states related to decoding and aborts all pending requests |
366 | | // to the decoders. |
367 | | void ResetDecode(TrackSet aTracks = TrackSet(TrackInfo::kAudioTrack, |
368 | | TrackInfo::kVideoTrack)); |
369 | | |
370 | | void SetVideoDecodeModeInternal(VideoDecodeMode aMode); |
371 | | |
372 | | protected: |
373 | | virtual ~MediaDecoderStateMachine(); |
374 | | |
375 | | void BufferedRangeUpdated(); |
376 | | |
377 | | void ReaderSuspendedChanged(); |
378 | | |
379 | | // Inserts a sample into the Audio/Video queue. |
380 | | // aSample must not be null. |
381 | | void PushAudio(AudioData* aSample); |
382 | | void PushVideo(VideoData* aSample); |
383 | | |
384 | | void OnAudioPopped(const RefPtr<AudioData>& aSample); |
385 | | void OnVideoPopped(const RefPtr<VideoData>& aSample); |
386 | | |
387 | | void AudioAudibleChanged(bool aAudible); |
388 | | |
389 | | void VolumeChanged(); |
390 | | void SetPlaybackRate(double aPlaybackRate); |
391 | | void PreservesPitchChanged(); |
392 | | void LoopingChanged(); |
393 | | |
394 | 0 | MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; } |
395 | 0 | MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; } |
396 | | |
397 | | // True if we are low in decoded audio/video data. |
398 | | // May not be invoked when mReader->UseBufferingHeuristics() is false. |
399 | | bool HasLowDecodedData(); |
400 | | |
401 | | bool HasLowDecodedAudio(); |
402 | | |
403 | | bool HasLowDecodedVideo(); |
404 | | |
405 | | bool OutOfDecodedAudio(); |
406 | | |
407 | | bool OutOfDecodedVideo() |
408 | 0 | { |
409 | 0 | MOZ_ASSERT(OnTaskQueue()); |
410 | 0 | return IsVideoDecoding() && VideoQueue().GetSize() <= 1; |
411 | 0 | } |
412 | | |
413 | | |
414 | | // Returns true if we're running low on buffered data. |
415 | | bool HasLowBufferedData(); |
416 | | |
417 | | // Returns true if we have less than aThreshold of buffered data available. |
418 | | bool HasLowBufferedData(const media::TimeUnit& aThreshold); |
419 | | |
420 | | // Return the current time, either the audio clock if available (if the media |
421 | | // has audio, and the playback is possible), or a clock for the video. |
422 | | // Called on the state machine thread. |
423 | | // If aTimeStamp is non-null, set *aTimeStamp to the TimeStamp corresponding |
424 | | // to the returned stream time. |
425 | | media::TimeUnit GetClock(TimeStamp* aTimeStamp = nullptr) const; |
426 | | |
427 | | // Update only the state machine's current playback position (and duration, |
428 | | // if unknown). Does not update the playback position on the decoder or |
429 | | // media element -- use UpdatePlaybackPosition for that. Called on the state |
430 | | // machine thread, caller must hold the decoder lock. |
431 | | void UpdatePlaybackPositionInternal(const media::TimeUnit& aTime); |
432 | | |
433 | | // Update playback position and trigger next update by default time period. |
434 | | // Called on the state machine thread. |
435 | | void UpdatePlaybackPositionPeriodically(); |
436 | | |
437 | | media::MediaSink* CreateAudioSink(); |
438 | | |
439 | | // Always create mediasink which contains an AudioSink or StreamSink inside. |
440 | | already_AddRefed<media::MediaSink> CreateMediaSink(bool aAudioCaptured); |
441 | | |
442 | | // Stops the media sink and shut it down. |
443 | | // The decoder monitor must be held with exactly one lock count. |
444 | | // Called on the state machine thread. |
445 | | void StopMediaSink(); |
446 | | |
447 | | // Create and start the media sink. |
448 | | // The decoder monitor must be held with exactly one lock count. |
449 | | // Called on the state machine thread. |
450 | | void StartMediaSink(); |
451 | | |
452 | | // Notification method invoked when mPlayState changes. |
453 | | void PlayStateChanged(); |
454 | | |
455 | | // Notification method invoked when mIsVisible changes. |
456 | | void VisibilityChanged(); |
457 | | |
458 | | // Sets internal state which causes playback of media to pause. |
459 | | // The decoder monitor must be held. |
460 | | void StopPlayback(); |
461 | | |
462 | | // If the conditions are right, sets internal state which causes playback |
463 | | // of media to begin or resume. |
464 | | // Must be called with the decode monitor held. |
465 | | void MaybeStartPlayback(); |
466 | | |
467 | | // Moves the decoder into the shutdown state, and dispatches an error |
468 | | // event to the media element. This begins shutting down the decoder. |
469 | | // The decoder monitor must be held. This is only called on the |
470 | | // decode thread. |
471 | | void DecodeError(const MediaResult& aError); |
472 | | |
473 | | void EnqueueFirstFrameLoadedEvent(); |
474 | | |
475 | | // Start a task to decode audio. |
476 | | void RequestAudioData(); |
477 | | |
478 | | // Start a task to decode video. |
479 | | void RequestVideoData(const media::TimeUnit& aCurrentTime); |
480 | | |
481 | | void WaitForData(MediaData::Type aType); |
482 | | |
483 | 0 | bool IsRequestingAudioData() const { return mAudioDataRequest.Exists(); } |
484 | 0 | bool IsRequestingVideoData() const { return mVideoDataRequest.Exists(); } |
485 | 0 | bool IsWaitingAudioData() const { return mAudioWaitRequest.Exists(); } |
486 | 0 | bool IsWaitingVideoData() const { return mVideoWaitRequest.Exists(); } |
487 | | |
488 | | // Returns the "media time". This is the absolute time which the media |
489 | | // playback has reached. i.e. this returns values in the range |
490 | | // [mStartTime, mEndTime], and mStartTime will not be 0 if the media does |
491 | | // not start at 0. Note this is different than the "current playback position", |
492 | | // which is in the range [0,duration]. |
493 | | media::TimeUnit GetMediaTime() const |
494 | 0 | { |
495 | 0 | MOZ_ASSERT(OnTaskQueue()); |
496 | 0 | return mCurrentPosition; |
497 | 0 | } |
498 | | |
499 | | // Returns an upper bound on the number of microseconds of audio that is |
500 | | // decoded and playable. This is the sum of the number of usecs of audio which |
501 | | // is decoded and in the reader's audio queue, and the usecs of unplayed audio |
502 | | // which has been pushed to the audio hardware for playback. Note that after |
503 | | // calling this, the audio hardware may play some of the audio pushed to |
504 | | // hardware, so this can only be used as a upper bound. The decoder monitor |
505 | | // must be held when calling this. Called on the decode thread. |
506 | | media::TimeUnit GetDecodedAudioDuration(); |
507 | | |
508 | | void FinishDecodeFirstFrame(); |
509 | | |
510 | | // Performs one "cycle" of the state machine. |
511 | | void RunStateMachine(); |
512 | | |
513 | | bool IsStateMachineScheduled() const; |
514 | | |
515 | | // These return true if the respective stream's decode has not yet reached |
516 | | // the end of stream. |
517 | | bool IsAudioDecoding(); |
518 | | bool IsVideoDecoding(); |
519 | | |
520 | | private: |
521 | | // Resolved by the MediaSink to signal that all audio/video outstanding |
522 | | // work is complete and identify which part(a/v) of the sink is shutting down. |
523 | | void OnMediaSinkAudioComplete(); |
524 | | void OnMediaSinkVideoComplete(); |
525 | | |
526 | | // Rejected by the MediaSink to signal errors for audio/video. |
527 | | void OnMediaSinkAudioError(nsresult aResult); |
528 | | void OnMediaSinkVideoError(); |
529 | | |
530 | | void* const mDecoderID; |
531 | | const RefPtr<AbstractThread> mAbstractMainThread; |
532 | | const RefPtr<FrameStatistics> mFrameStats; |
533 | | const RefPtr<VideoFrameContainer> mVideoFrameContainer; |
534 | | |
535 | | // Task queue for running the state machine. |
536 | | RefPtr<TaskQueue> mTaskQueue; |
537 | | |
538 | | // State-watching manager. |
539 | | WatchManager<MediaDecoderStateMachine> mWatchManager; |
540 | | |
541 | | // True if we've dispatched a task to run the state machine but the task has |
542 | | // yet to run. |
543 | | bool mDispatchedStateMachine; |
544 | | |
545 | | // Used to dispatch another round schedule with specific target time. |
546 | | DelayedScheduler mDelayedScheduler; |
547 | | |
548 | | // Queue of audio frames. This queue is threadsafe, and is accessed from |
549 | | // the audio, decoder, state machine, and main threads. |
550 | | MediaQueue<AudioData> mAudioQueue; |
551 | | // Queue of video frames. This queue is threadsafe, and is accessed from |
552 | | // the decoder, state machine, and main threads. |
553 | | MediaQueue<VideoData> mVideoQueue; |
554 | | |
555 | | UniquePtr<StateObject> mStateObj; |
556 | | |
557 | | media::TimeUnit Duration() const |
558 | 0 | { |
559 | 0 | MOZ_ASSERT(OnTaskQueue()); |
560 | 0 | return mDuration.Ref().ref(); |
561 | 0 | } |
562 | | |
563 | | // FrameID which increments every time a frame is pushed to our queue. |
564 | | FrameID mCurrentFrameID; |
565 | | |
566 | | // Media Fragment end time. |
567 | | media::TimeUnit mFragmentEndTime = media::TimeUnit::Invalid(); |
568 | | |
569 | | // The media sink resource. Used on the state machine thread. |
570 | | RefPtr<media::MediaSink> mMediaSink; |
571 | | |
572 | | const RefPtr<ReaderProxy> mReader; |
573 | | |
574 | | // The end time of the last audio frame that's been pushed onto the media sink |
575 | | // in microseconds. This will approximately be the end time |
576 | | // of the audio stream, unless another frame is pushed to the hardware. |
577 | | media::TimeUnit AudioEndTime() const; |
578 | | |
579 | | // The end time of the last rendered video frame that's been sent to |
580 | | // compositor. |
581 | | media::TimeUnit VideoEndTime() const; |
582 | | |
583 | | // The end time of the last decoded audio frame. This signifies the end of |
584 | | // decoded audio data. Used to check if we are low in decoded data. |
585 | | media::TimeUnit mDecodedAudioEndTime; |
586 | | |
587 | | // The end time of the last decoded video frame. Used to check if we are low |
588 | | // on decoded video data. |
589 | | media::TimeUnit mDecodedVideoEndTime; |
590 | | |
591 | | // Playback rate. 1.0 : normal speed, 0.5 : two times slower. |
592 | | double mPlaybackRate; |
593 | | |
594 | | // If we've got more than this number of decoded video frames waiting in |
595 | | // the video queue, we will not decode any more video frames until some have |
596 | | // been consumed by the play state machine thread. |
597 | | // Must hold monitor. |
598 | | uint32_t GetAmpleVideoFrames() const; |
599 | | |
600 | | // Our "ample" audio threshold. Once we've this much audio decoded, we |
601 | | // pause decoding. |
602 | | media::TimeUnit mAmpleAudioThreshold; |
603 | | |
604 | | // Only one of a given pair of ({Audio,Video}DataPromise, WaitForDataPromise) |
605 | | // should exist at any given moment. |
606 | | using AudioDataPromise = MediaFormatReader::AudioDataPromise; |
607 | | using VideoDataPromise = MediaFormatReader::VideoDataPromise; |
608 | | using WaitForDataPromise = MediaFormatReader::WaitForDataPromise; |
609 | | MozPromiseRequestHolder<AudioDataPromise> mAudioDataRequest; |
610 | | MozPromiseRequestHolder<VideoDataPromise> mVideoDataRequest; |
611 | | MozPromiseRequestHolder<WaitForDataPromise> mAudioWaitRequest; |
612 | | MozPromiseRequestHolder<WaitForDataPromise> mVideoWaitRequest; |
613 | | |
614 | | const char* AudioRequestStatus() const; |
615 | | const char* VideoRequestStatus() const; |
616 | | |
617 | | void OnSuspendTimerResolved(); |
618 | | void CancelSuspendTimer(); |
619 | | |
620 | | bool mCanPlayThrough = false; |
621 | | |
622 | | bool mIsLiveStream = false; |
623 | | |
624 | | // True if we shouldn't play our audio (but still write it to any capturing |
625 | | // streams). When this is true, the audio thread will never start again after |
626 | | // it has stopped. |
627 | | bool mAudioCaptured; |
628 | | |
629 | | // True if all audio frames are already rendered. |
630 | | bool mAudioCompleted = false; |
631 | | |
632 | | // True if all video frames are already rendered. |
633 | | bool mVideoCompleted = false; |
634 | | |
635 | | // True if we should not decode/preroll unnecessary samples, unless we're |
636 | | // played. "Prerolling" in this context refers to when we decode and |
637 | | // buffer decoded samples in advance of when they're needed for playback. |
638 | | // This flag is set for preload=metadata media, and means we won't |
639 | | // decode more than the first video frame and first block of audio samples |
640 | | // for that media when we startup, or after a seek. When Play() is called, |
641 | | // we reset this flag, as we assume the user is playing the media, so |
642 | | // prerolling is appropriate then. This flag is used to reduce the overhead |
643 | | // of prerolling samples for media elements that may not play, both |
644 | | // memory and CPU overhead. |
645 | | bool mMinimizePreroll; |
646 | | |
647 | | // Stores presentation info required for playback. |
648 | | Maybe<MediaInfo> mInfo; |
649 | | |
650 | | mozilla::MediaMetadataManager mMetadataManager; |
651 | | |
652 | | // True if we've decoded first frames (thus having the start time) and |
653 | | // notified the FirstFrameLoaded event. Note we can't initiate seek until the |
654 | | // start time is known which happens when the first frames are decoded or we |
655 | | // are playing an MSE stream (the start time is always assumed 0). |
656 | | bool mSentFirstFrameLoadedEvent; |
657 | | |
658 | | // True if video decoding is suspended. |
659 | | bool mVideoDecodeSuspended; |
660 | | |
661 | | // True if the media is seekable (i.e. supports random access). |
662 | | bool mMediaSeekable = true; |
663 | | |
664 | | // True if the media is seekable only in buffered ranges. |
665 | | bool mMediaSeekableOnlyInBufferedRanges = false; |
666 | | |
667 | | // Track enabling video decode suspension via timer |
668 | | DelayedScheduler mVideoDecodeSuspendTimer; |
669 | | |
670 | | // Data about MediaStreams that are being fed by the decoder. |
671 | | const RefPtr<OutputStreamManager> mOutputStreamManager; |
672 | | |
673 | | // Track the current video decode mode. |
674 | | VideoDecodeMode mVideoDecodeMode; |
675 | | |
676 | | // Track the complete & error for audio/video separately |
677 | | MozPromiseRequestHolder<GenericPromise> mMediaSinkAudioPromise; |
678 | | MozPromiseRequestHolder<GenericPromise> mMediaSinkVideoPromise; |
679 | | |
680 | | MediaEventListener mAudioQueueListener; |
681 | | MediaEventListener mVideoQueueListener; |
682 | | MediaEventListener mAudibleListener; |
683 | | MediaEventListener mOnMediaNotSeekable; |
684 | | |
685 | | MediaEventProducerExc<UniquePtr<MediaInfo>, |
686 | | UniquePtr<MetadataTags>, |
687 | | MediaDecoderEventVisibility> mMetadataLoadedEvent; |
688 | | MediaEventProducerExc<nsAutoPtr<MediaInfo>, |
689 | | MediaDecoderEventVisibility> mFirstFrameLoadedEvent; |
690 | | |
691 | | MediaEventProducer<MediaPlaybackEvent> mOnPlaybackEvent; |
692 | | MediaEventProducer<MediaResult> mOnPlaybackErrorEvent; |
693 | | |
694 | | MediaEventProducer<DecoderDoctorEvent> mOnDecoderDoctorEvent; |
695 | | |
696 | | MediaEventProducer<NextFrameStatus> mOnNextFrameStatus; |
697 | | |
698 | | const bool mIsMSE; |
699 | | |
700 | | bool mSeamlessLoopingAllowed; |
701 | | |
702 | | // Current playback position in the stream in bytes. |
703 | | int64_t mPlaybackOffset = 0; |
704 | | |
705 | | private: |
706 | | // The buffered range. Mirrored from the decoder thread. |
707 | | Mirror<media::TimeIntervals> mBuffered; |
708 | | |
709 | | // The current play state, mirrored from the main thread. |
710 | | Mirror<MediaDecoder::PlayState> mPlayState; |
711 | | |
712 | | // Volume of playback. 0.0 = muted. 1.0 = full volume. |
713 | | Mirror<double> mVolume; |
714 | | |
715 | | // Pitch preservation for the playback rate. |
716 | | Mirror<bool> mPreservesPitch; |
717 | | |
718 | | // Whether to seek back to the start of the media resource |
719 | | // upon reaching the end. |
720 | | Mirror<bool> mLooping; |
721 | | |
722 | | // True if the media is same-origin with the element. Data can only be |
723 | | // passed to MediaStreams when this is true. |
724 | | Mirror<bool> mSameOriginMedia; |
725 | | |
726 | | // An identifier for the principal of the media. Used to track when |
727 | | // main-thread induced principal changes get reflected on MSG thread. |
728 | | Mirror<PrincipalHandle> mMediaPrincipalHandle; |
729 | | |
730 | | // Duration of the media. This is guaranteed to be non-null after we finish |
731 | | // decoding the first frame. |
732 | | Canonical<media::NullableTimeUnit> mDuration; |
733 | | |
734 | | // The time of the current frame, corresponding to the "current |
735 | | // playback position" in HTML5. This is referenced from 0, which is the initial |
736 | | // playback position. |
737 | | Canonical<media::TimeUnit> mCurrentPosition; |
738 | | |
739 | | // Used to distinguish whether the audio is producing sound. |
740 | | Canonical<bool> mIsAudioDataAudible; |
741 | | |
742 | | public: |
743 | | AbstractCanonical<media::TimeIntervals>* CanonicalBuffered() const; |
744 | | |
745 | | AbstractCanonical<media::NullableTimeUnit>* CanonicalDuration() |
746 | 0 | { |
747 | 0 | return &mDuration; |
748 | 0 | } |
749 | | AbstractCanonical<media::TimeUnit>* CanonicalCurrentPosition() |
750 | 0 | { |
751 | 0 | return &mCurrentPosition; |
752 | 0 | } |
753 | | AbstractCanonical<bool>* CanonicalIsAudioDataAudible() |
754 | 0 | { |
755 | 0 | return &mIsAudioDataAudible; |
756 | 0 | } |
757 | | }; |
758 | | |
759 | | } // namespace mozilla |
760 | | |
761 | | #endif |