/src/mozilla-central/dom/media/MediaDecoderStateMachine.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include <algorithm> |
8 | | #include <stdint.h> |
9 | | #include <utility> |
10 | | |
11 | | #include "mediasink/AudioSink.h" |
12 | | #include "mediasink/AudioSinkWrapper.h" |
13 | | #include "mediasink/DecodedStream.h" |
14 | | #include "mediasink/OutputStreamManager.h" |
15 | | #include "mediasink/VideoSink.h" |
16 | | #include "mozilla/Logging.h" |
17 | | #include "mozilla/MathAlgorithms.h" |
18 | | #include "mozilla/NotNull.h" |
19 | | #include "mozilla/SharedThreadPool.h" |
20 | | #include "mozilla/Sprintf.h" |
21 | | #include "mozilla/StaticPrefs.h" |
22 | | #include "mozilla/Telemetry.h" |
23 | | #include "mozilla/TaskQueue.h" |
24 | | #include "mozilla/Tuple.h" |
25 | | #include "nsIMemoryReporter.h" |
26 | | #include "nsPrintfCString.h" |
27 | | #include "nsTArray.h" |
28 | | #include "ImageContainer.h" |
29 | | #include "MediaDecoder.h" |
30 | | #include "MediaDecoderStateMachine.h" |
31 | | #include "MediaShutdownManager.h" |
32 | | #include "MediaTimer.h" |
33 | | #include "ReaderProxy.h" |
34 | | #include "TimeUnits.h" |
35 | | #include "VideoUtils.h" |
36 | | |
37 | | namespace mozilla { |
38 | | |
39 | | using namespace mozilla::media; |
40 | | |
41 | | #define NS_DispatchToMainThread(...) CompileError_UseAbstractThreadDispatchInstead |
42 | | |
43 | | // avoid redefined macro in unified build |
44 | | #undef FMT |
45 | | #undef LOG |
46 | | #undef LOGV |
47 | | #undef LOGW |
48 | | #undef LOGE |
49 | | #undef SFMT |
50 | | #undef SLOG |
51 | | #undef SLOGW |
52 | | #undef SLOGE |
53 | | |
54 | 0 | #define FMT(x, ...) "Decoder=%p " x, mDecoderID, ##__VA_ARGS__ |
55 | | #define LOG(x, ...) \ |
56 | 0 | DDMOZ_LOG(gMediaDecoderLog, \ |
57 | 0 | LogLevel::Debug, \ |
58 | 0 | "Decoder=%p " x, \ |
59 | 0 | mDecoderID, \ |
60 | 0 | ##__VA_ARGS__) |
61 | | #define LOGV(x, ...) \ |
62 | 0 | DDMOZ_LOG(gMediaDecoderLog, \ |
63 | 0 | LogLevel::Verbose, \ |
64 | 0 | "Decoder=%p " x, \ |
65 | 0 | mDecoderID, \ |
66 | 0 | ##__VA_ARGS__) |
67 | 0 | #define LOGW(x, ...) NS_WARNING(nsPrintfCString(FMT(x, ##__VA_ARGS__)).get()) |
68 | 0 | #define LOGE(x, ...) NS_DebugBreak(NS_DEBUG_WARNING, nsPrintfCString(FMT(x, ##__VA_ARGS__)).get(), nullptr, __FILE__, __LINE__) |
69 | | |
70 | | // Used by StateObject and its sub-classes |
71 | 0 | #define SFMT(x, ...) "Decoder=%p state=%s " x, mMaster->mDecoderID, ToStateStr(GetState()), ##__VA_ARGS__ |
72 | | #define SLOG(x, ...) \ |
73 | 0 | DDMOZ_LOGEX(mMaster, \ |
74 | 0 | gMediaDecoderLog, \ |
75 | 0 | LogLevel::Debug, \ |
76 | 0 | "state=%s " x, \ |
77 | 0 | ToStateStr(GetState()), \ |
78 | 0 | ##__VA_ARGS__) |
79 | 0 | #define SLOGW(x, ...) NS_WARNING(nsPrintfCString(SFMT(x, ##__VA_ARGS__)).get()) |
80 | 0 | #define SLOGE(x, ...) NS_DebugBreak(NS_DEBUG_WARNING, nsPrintfCString(SFMT(x, ##__VA_ARGS__)).get(), nullptr, __FILE__, __LINE__) |
81 | | |
82 | | // Certain constants get stored as member variables and then adjusted by various |
83 | | // scale factors on a per-decoder basis. We want to make sure to avoid using these |
84 | | // constants directly, so we put them in a namespace. |
85 | | namespace detail { |
86 | | |
87 | | // Resume a suspended video decoder to the current playback position plus this |
88 | | // time premium for compensating the seeking delay. |
89 | | static constexpr auto RESUME_VIDEO_PREMIUM = TimeUnit::FromMicroseconds(125000); |
90 | | |
91 | | static const int64_t AMPLE_AUDIO_USECS = 2000000; |
92 | | |
93 | | // If more than this much decoded audio is queued, we'll hold off |
94 | | // decoding more audio. |
95 | | static constexpr auto AMPLE_AUDIO_THRESHOLD = TimeUnit::FromMicroseconds(AMPLE_AUDIO_USECS); |
96 | | |
97 | | } // namespace detail |
98 | | |
99 | | // If we have fewer than LOW_VIDEO_FRAMES decoded frames, and |
100 | | // we're not "prerolling video", we'll skip the video up to the next keyframe |
101 | | // which is at or after the current playback position. |
102 | | static const uint32_t LOW_VIDEO_FRAMES = 2; |
103 | | |
104 | | // Arbitrary "frame duration" when playing only audio. |
105 | | static const int AUDIO_DURATION_USECS = 40000; |
106 | | |
107 | | namespace detail { |
108 | | |
109 | | // If we have less than this much buffered data available, we'll consider |
110 | | // ourselves to be running low on buffered data. We determine how much |
111 | | // buffered data we have remaining using the reader's GetBuffered() |
112 | | // implementation. |
113 | | static const int64_t LOW_BUFFER_THRESHOLD_USECS = 5000000; |
114 | | |
115 | | static constexpr auto LOW_BUFFER_THRESHOLD = TimeUnit::FromMicroseconds(LOW_BUFFER_THRESHOLD_USECS); |
116 | | |
117 | | // LOW_BUFFER_THRESHOLD_USECS needs to be greater than AMPLE_AUDIO_USECS, otherwise |
118 | | // the skip-to-keyframe logic can activate when we're running low on data. |
119 | | static_assert(LOW_BUFFER_THRESHOLD_USECS > AMPLE_AUDIO_USECS, |
120 | | "LOW_BUFFER_THRESHOLD_USECS is too small"); |
121 | | |
122 | | } // namespace detail |
123 | | |
124 | | // Amount of excess data to add in to the "should we buffer" calculation. |
125 | | static constexpr auto EXHAUSTED_DATA_MARGIN = TimeUnit::FromMicroseconds(100000); |
126 | | |
127 | | static const uint32_t MIN_VIDEO_QUEUE_SIZE = 3; |
128 | | static const uint32_t MAX_VIDEO_QUEUE_SIZE = 10; |
129 | | #ifdef MOZ_APPLEMEDIA |
130 | | static const uint32_t HW_VIDEO_QUEUE_SIZE = 10; |
131 | | #else |
132 | | static const uint32_t HW_VIDEO_QUEUE_SIZE = 3; |
133 | | #endif |
134 | | static const uint32_t VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE = 9999; |
135 | | |
136 | | static uint32_t sVideoQueueDefaultSize = MAX_VIDEO_QUEUE_SIZE; |
137 | | static uint32_t sVideoQueueHWAccelSize = HW_VIDEO_QUEUE_SIZE; |
138 | | static uint32_t sVideoQueueSendToCompositorSize = |
139 | | VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE; |
140 | | |
141 | | static void InitVideoQueuePrefs() |
142 | 0 | { |
143 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
144 | 0 | static bool sPrefInit = false; |
145 | 0 | if (!sPrefInit) { |
146 | 0 | sPrefInit = true; |
147 | 0 | sVideoQueueDefaultSize = Preferences::GetUint( |
148 | 0 | "media.video-queue.default-size", MAX_VIDEO_QUEUE_SIZE); |
149 | 0 | sVideoQueueHWAccelSize = Preferences::GetUint( |
150 | 0 | "media.video-queue.hw-accel-size", HW_VIDEO_QUEUE_SIZE); |
151 | 0 | sVideoQueueSendToCompositorSize = Preferences::GetUint( |
152 | 0 | "media.video-queue.send-to-compositor-size", VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE); |
153 | 0 | } |
154 | 0 | } |
155 | | |
156 | | // Delay, in milliseconds, that tabs needs to be in background before video |
157 | | // decoding is suspended. |
158 | | static TimeDuration |
159 | | SuspendBackgroundVideoDelay() |
160 | 0 | { |
161 | 0 | return TimeDuration::FromMilliseconds( |
162 | 0 | StaticPrefs::MediaSuspendBkgndVideoDelayMs()); |
163 | 0 | } |
164 | | |
165 | | class MediaDecoderStateMachine::StateObject |
166 | | { |
167 | | public: |
168 | 0 | virtual ~StateObject() { } |
169 | 0 | virtual void Exit() { } // Exit action. |
170 | 0 | virtual void Step() { } // Perform a 'cycle' of this state object. |
171 | | virtual State GetState() const = 0; |
172 | | |
173 | | // Event handlers for various events. |
174 | 0 | virtual void HandleAudioCaptured() { } |
175 | | virtual void HandleAudioDecoded(AudioData* aAudio) |
176 | 0 | { |
177 | 0 | Crash("Unexpected event!", __func__); |
178 | 0 | } |
179 | | virtual void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart) |
180 | 0 | { |
181 | 0 | Crash("Unexpected event!", __func__); |
182 | 0 | } |
183 | | virtual void HandleAudioWaited(MediaData::Type aType) |
184 | 0 | { |
185 | 0 | Crash("Unexpected event!", __func__); |
186 | 0 | } |
187 | | virtual void HandleVideoWaited(MediaData::Type aType) |
188 | 0 | { |
189 | 0 | Crash("Unexpected event!", __func__); |
190 | 0 | } |
191 | | virtual void HandleWaitingForAudio() |
192 | 0 | { |
193 | 0 | Crash("Unexpected event!", __func__); |
194 | 0 | } |
195 | | virtual void HandleAudioCanceled() |
196 | 0 | { |
197 | 0 | Crash("Unexpected event!", __func__); |
198 | 0 | } |
199 | | virtual void HandleEndOfAudio() |
200 | 0 | { |
201 | 0 | Crash("Unexpected event!", __func__); |
202 | 0 | } |
203 | | virtual void HandleWaitingForVideo() |
204 | 0 | { |
205 | 0 | Crash("Unexpected event!", __func__); |
206 | 0 | } |
207 | | virtual void HandleVideoCanceled() |
208 | 0 | { |
209 | 0 | Crash("Unexpected event!", __func__); |
210 | 0 | } |
211 | | virtual void HandleEndOfVideo() |
212 | 0 | { |
213 | 0 | Crash("Unexpected event!", __func__); |
214 | 0 | } |
215 | | |
216 | | virtual RefPtr<MediaDecoder::SeekPromise> HandleSeek(SeekTarget aTarget); |
217 | | |
218 | | virtual RefPtr<ShutdownPromise> HandleShutdown(); |
219 | | |
220 | | virtual void HandleVideoSuspendTimeout() = 0; |
221 | | |
222 | | virtual void HandleResumeVideoDecoding(const TimeUnit& aTarget); |
223 | | |
224 | 0 | virtual void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) { } |
225 | | |
226 | 0 | virtual nsCString GetDebugInfo() { return nsCString(); } |
227 | | |
228 | | private: |
229 | | template <class S, typename R, typename... As> |
230 | | auto ReturnTypeHelper(R(S::*)(As...)) -> R; |
231 | | |
232 | | void Crash(const char* aReason, const char* aSite) |
233 | 0 | { |
234 | 0 | char buf[1024]; |
235 | 0 | SprintfLiteral(buf, "%s state=%s callsite=%s", aReason, |
236 | 0 | ToStateStr(GetState()), aSite); |
237 | 0 | MOZ_ReportAssertionFailure(buf, __FILE__, __LINE__); |
238 | 0 | MOZ_CRASH(); |
239 | 0 | } |
240 | | |
241 | | protected: |
242 | | enum class EventVisibility : int8_t |
243 | | { |
244 | | Observable, |
245 | | Suppressed |
246 | | }; |
247 | | |
248 | | using Master = MediaDecoderStateMachine; |
249 | 0 | explicit StateObject(Master* aPtr) : mMaster(aPtr) { } |
250 | 0 | TaskQueue* OwnerThread() const { return mMaster->mTaskQueue; } |
251 | 0 | ReaderProxy* Reader() const { return mMaster->mReader; } |
252 | 0 | const MediaInfo& Info() const { return mMaster->Info(); } |
253 | 0 | MediaQueue<AudioData>& AudioQueue() const { return mMaster->mAudioQueue; } |
254 | 0 | MediaQueue<VideoData>& VideoQueue() const { return mMaster->mVideoQueue; } |
255 | | |
256 | | template <class S, typename... Args, size_t... Indexes> |
257 | | auto |
258 | | CallEnterMemberFunction(S* aS, |
259 | | Tuple<Args...>& aTuple, |
260 | | std::index_sequence<Indexes...>) |
261 | | -> decltype(ReturnTypeHelper(&S::Enter)) |
262 | 0 | { |
263 | 0 | return aS->Enter(std::move(Get<Indexes>(aTuple))...); |
264 | 0 | } Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::DormantState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::CallEnterMemberFunction<mozilla::MediaDecoderStateMachine::DormantState>(mozilla::MediaDecoderStateMachine::DormantState*, mozilla::Tuple<>&, std::__1::integer_sequence<unsigned long>) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::DecodingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::CallEnterMemberFunction<mozilla::MediaDecoderStateMachine::DecodingState>(mozilla::MediaDecoderStateMachine::DecodingState*, mozilla::Tuple<>&, std::__1::integer_sequence<unsigned long>) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::NextFrameSeekingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::CallEnterMemberFunction<mozilla::MediaDecoderStateMachine::NextFrameSeekingState, mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility, 0ul, 1ul>(mozilla::MediaDecoderStateMachine::NextFrameSeekingState*, mozilla::Tuple<mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility>&, std::__1::integer_sequence<unsigned long, 0ul, 1ul>) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::NextFrameSeekingFromDormantState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::CallEnterMemberFunction<mozilla::MediaDecoderStateMachine::NextFrameSeekingFromDormantState, mozilla::SeekJob, mozilla::SeekJob, 0ul, 1ul>(mozilla::MediaDecoderStateMachine::NextFrameSeekingFromDormantState*, mozilla::Tuple<mozilla::SeekJob, mozilla::SeekJob>&, std::__1::integer_sequence<unsigned long, 0ul, 1ul>) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::ShutdownState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::CallEnterMemberFunction<mozilla::MediaDecoderStateMachine::ShutdownState>(mozilla::MediaDecoderStateMachine::ShutdownState*, mozilla::Tuple<>&, std::__1::integer_sequence<unsigned long>) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::VideoOnlySeekingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::CallEnterMemberFunction<mozilla::MediaDecoderStateMachine::VideoOnlySeekingState, mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility, 0ul, 1ul>(mozilla::MediaDecoderStateMachine::VideoOnlySeekingState*, mozilla::Tuple<mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility>&, std::__1::integer_sequence<unsigned long, 0ul, 1ul>) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::AccurateSeekingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::CallEnterMemberFunction<mozilla::MediaDecoderStateMachine::AccurateSeekingState, mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility, 0ul, 1ul>(mozilla::MediaDecoderStateMachine::AccurateSeekingState*, mozilla::Tuple<mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility>&, std::__1::integer_sequence<unsigned long, 0ul, 1ul>) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::DecodingFirstFrameState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::CallEnterMemberFunction<mozilla::MediaDecoderStateMachine::DecodingFirstFrameState>(mozilla::MediaDecoderStateMachine::DecodingFirstFrameState*, mozilla::Tuple<>&, std::__1::integer_sequence<unsigned long>) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::CompletedState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::CallEnterMemberFunction<mozilla::MediaDecoderStateMachine::CompletedState>(mozilla::MediaDecoderStateMachine::CompletedState*, mozilla::Tuple<>&, std::__1::integer_sequence<unsigned long>) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::BufferingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::CallEnterMemberFunction<mozilla::MediaDecoderStateMachine::BufferingState>(mozilla::MediaDecoderStateMachine::BufferingState*, mozilla::Tuple<>&, std::__1::integer_sequence<unsigned long>) |
265 | | |
266 | | // Note this function will delete the current state object. |
267 | | // Don't access members to avoid UAF after this call. |
268 | | template <class S, typename... Ts> |
269 | | auto SetState(Ts&&... aArgs) |
270 | | -> decltype(ReturnTypeHelper(&S::Enter)) |
271 | 0 | { |
272 | 0 | // |aArgs| must be passed by reference to avoid passing MOZ_NON_PARAM class |
273 | 0 | // SeekJob by value. See bug 1287006 and bug 1338374. But we still *must* |
274 | 0 | // copy the parameters, because |Exit()| can modify them. See bug 1312321. |
275 | 0 | // So we 1) pass the parameters by reference, but then 2) immediately copy |
276 | 0 | // them into a Tuple to be safe against modification, and finally 3) move |
277 | 0 | // the elements of the Tuple into the final function call. |
278 | 0 | auto copiedArgs = MakeTuple(std::forward<Ts>(aArgs)...); |
279 | 0 |
|
280 | 0 | // Copy mMaster which will reset to null. |
281 | 0 | auto master = mMaster; |
282 | 0 |
|
283 | 0 | auto* s = new S(master); |
284 | 0 |
|
285 | 0 | MOZ_ASSERT(GetState() != s->GetState() || |
286 | 0 | GetState() == DECODER_STATE_SEEKING); |
287 | 0 |
|
288 | 0 | SLOG("change state to: %s", ToStateStr(s->GetState())); |
289 | 0 |
|
290 | 0 | Exit(); |
291 | 0 |
|
292 | 0 | // Delete the old state asynchronously to avoid UAF if the caller tries to |
293 | 0 | // access its members after SetState() returns. |
294 | 0 | master->OwnerThread()->DispatchDirectTask( |
295 | 0 | NS_NewRunnableFunction("MDSM::StateObject::DeleteOldState", |
296 | 0 | [toDelete = std::move(master->mStateObj)](){})); Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::DormantState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::DormantState>()::{lambda()#1}::operator()() const Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::DecodingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::DecodingState>()::{lambda()#1}::operator()() const Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::NextFrameSeekingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::NextFrameSeekingState, mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility>(mozilla::SeekJob&&, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&&)::{lambda()#1}::operator()() const Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::NextFrameSeekingFromDormantState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::NextFrameSeekingFromDormantState, mozilla::SeekJob, mozilla::SeekJob>(mozilla::SeekJob&&, mozilla::SeekJob&&)::{lambda()#1}::operator()() const Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::ShutdownState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::ShutdownState>()::{lambda()#1}::operator()() const Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::VideoOnlySeekingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::VideoOnlySeekingState, mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&>(mozilla::SeekJob&&, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&)::{lambda()#1}::operator()() const Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::AccurateSeekingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::AccurateSeekingState, mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&>(mozilla::SeekJob&&, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&)::{lambda()#1}::operator()() const Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::NextFrameSeekingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::NextFrameSeekingState, mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&>(mozilla::SeekJob&&, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&)::{lambda()#1}::operator()() const Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::DecodingFirstFrameState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::DecodingFirstFrameState>()::{lambda()#1}::operator()() const Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::CompletedState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::CompletedState>()::{lambda()#1}::operator()() const Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::BufferingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::BufferingState>()::{lambda()#1}::operator()() const |
297 | 0 | // Also reset mMaster to catch potentail UAF. |
298 | 0 | mMaster = nullptr; |
299 | 0 |
|
300 | 0 | master->mStateObj.reset(s); |
301 | 0 | return CallEnterMemberFunction(s, copiedArgs, |
302 | 0 | std::index_sequence_for<Ts...>{}); |
303 | 0 | } Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::DormantState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::DormantState>() Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::DecodingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::DecodingState>() Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::NextFrameSeekingFromDormantState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::NextFrameSeekingFromDormantState, mozilla::SeekJob, mozilla::SeekJob>(mozilla::SeekJob&&, mozilla::SeekJob&&) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::NextFrameSeekingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::NextFrameSeekingState, mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility>(mozilla::SeekJob&&, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&&) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::ShutdownState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::ShutdownState>() Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::VideoOnlySeekingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::VideoOnlySeekingState, mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&>(mozilla::SeekJob&&, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::AccurateSeekingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::AccurateSeekingState, mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&>(mozilla::SeekJob&&, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::NextFrameSeekingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::NextFrameSeekingState, mozilla::SeekJob, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&>(mozilla::SeekJob&&, mozilla::MediaDecoderStateMachine::StateObject::EventVisibility&) Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::DecodingFirstFrameState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::DecodingFirstFrameState>() Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::CompletedState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::CompletedState>() Unexecuted instantiation: decltype (ReturnTypeHelper(&mozilla::MediaDecoderStateMachine::BufferingState::Enter)) mozilla::MediaDecoderStateMachine::StateObject::SetState<mozilla::MediaDecoderStateMachine::BufferingState>() |
304 | | |
305 | | RefPtr<MediaDecoder::SeekPromise> |
306 | | SetSeekingState(SeekJob&& aSeekJob, EventVisibility aVisibility); |
307 | | |
308 | | // Take a raw pointer in order not to change the life cycle of MDSM. |
309 | | // It is guaranteed to be valid by MDSM. |
310 | | Master* mMaster; |
311 | | }; |
312 | | |
313 | | /** |
314 | | * Purpose: decode metadata like duration and dimensions of the media resource. |
315 | | * |
316 | | * Transition to other states when decoding metadata is done: |
317 | | * SHUTDOWN if failing to decode metadata. |
318 | | * DECODING_FIRSTFRAME otherwise. |
319 | | */ |
320 | | class MediaDecoderStateMachine::DecodeMetadataState |
321 | | : public MediaDecoderStateMachine::StateObject |
322 | | { |
323 | | public: |
324 | 0 | explicit DecodeMetadataState(Master* aPtr) : StateObject(aPtr) { } |
325 | | |
326 | | void Enter() |
327 | 0 | { |
328 | 0 | MOZ_ASSERT(!mMaster->mVideoDecodeSuspended); |
329 | 0 | MOZ_ASSERT(!mMetadataRequest.Exists()); |
330 | 0 | SLOG("Dispatching AsyncReadMetadata"); |
331 | 0 |
|
332 | 0 | // We disconnect mMetadataRequest in Exit() so it is fine to capture |
333 | 0 | // a raw pointer here. |
334 | 0 | Reader()->ReadMetadata() |
335 | 0 | ->Then(OwnerThread(), __func__, |
336 | 0 | [this] (MetadataHolder&& aMetadata) { |
337 | 0 | OnMetadataRead(std::move(aMetadata)); |
338 | 0 | }, |
339 | 0 | [this] (const MediaResult& aError) { |
340 | 0 | OnMetadataNotRead(aError); |
341 | 0 | }) |
342 | 0 | ->Track(mMetadataRequest); |
343 | 0 | } |
344 | | |
345 | 0 | void Exit() override { mMetadataRequest.DisconnectIfExists(); } |
346 | | |
347 | 0 | State GetState() const override { return DECODER_STATE_DECODING_METADATA; } |
348 | | |
349 | | RefPtr<MediaDecoder::SeekPromise> HandleSeek(SeekTarget aTarget) override |
350 | 0 | { |
351 | 0 | MOZ_DIAGNOSTIC_ASSERT(false, "Can't seek while decoding metadata."); |
352 | 0 | return MediaDecoder::SeekPromise::CreateAndReject(true, __func__); |
353 | 0 | } |
354 | | |
355 | | void HandleVideoSuspendTimeout() override |
356 | 0 | { |
357 | 0 | // Do nothing since no decoders are created yet. |
358 | 0 | } |
359 | | |
360 | | void HandleResumeVideoDecoding(const TimeUnit&) override |
361 | 0 | { |
362 | 0 | // We never suspend video decoding in this state. |
363 | 0 | MOZ_ASSERT(false, "Shouldn't have suspended video decoding."); |
364 | 0 | } |
365 | | |
366 | | private: |
367 | | void OnMetadataRead(MetadataHolder&& aMetadata); |
368 | | |
369 | | void OnMetadataNotRead(const MediaResult& aError) |
370 | 0 | { |
371 | 0 | mMetadataRequest.Complete(); |
372 | 0 | SLOGE("Decode metadata failed, shutting down decoder"); |
373 | 0 | mMaster->DecodeError(aError); |
374 | 0 | } |
375 | | |
376 | | MozPromiseRequestHolder<MediaFormatReader::MetadataPromise> mMetadataRequest; |
377 | | }; |
378 | | |
379 | | /** |
380 | | * Purpose: release decoder resources to save memory and hardware resources. |
381 | | * |
382 | | * Transition to: |
383 | | * SEEKING if any seek request or play state changes to PLAYING. |
384 | | */ |
385 | | class MediaDecoderStateMachine::DormantState |
386 | | : public MediaDecoderStateMachine::StateObject |
387 | | { |
388 | | public: |
389 | 0 | explicit DormantState(Master* aPtr) : StateObject(aPtr) { } |
390 | | |
391 | | void Enter() |
392 | 0 | { |
393 | 0 | if (mMaster->IsPlaying()) { |
394 | 0 | mMaster->StopPlayback(); |
395 | 0 | } |
396 | 0 |
|
397 | 0 | // Calculate the position to seek to when exiting dormant. |
398 | 0 | auto t = mMaster->mMediaSink->IsStarted() |
399 | 0 | ? mMaster->GetClock() : mMaster->GetMediaTime(); |
400 | 0 | Reader()->AdjustByLooping(t); |
401 | 0 | mPendingSeek.mTarget.emplace(t, SeekTarget::Accurate); |
402 | 0 | // SeekJob asserts |mTarget.IsValid() == !mPromise.IsEmpty()| so we |
403 | 0 | // need to create the promise even it is not used at all. |
404 | 0 | // The promise may be used when coming out of DormantState into |
405 | 0 | // SeekingState. |
406 | 0 | RefPtr<MediaDecoder::SeekPromise> x = |
407 | 0 | mPendingSeek.mPromise.Ensure(__func__); |
408 | 0 |
|
409 | 0 | // No need to call ResetDecode() and StopMediaSink() here. |
410 | 0 | // We will do them during seeking when exiting dormant. |
411 | 0 |
|
412 | 0 | // Ignore WAIT_FOR_DATA since we won't decode in dormant. |
413 | 0 | mMaster->mAudioWaitRequest.DisconnectIfExists(); |
414 | 0 | mMaster->mVideoWaitRequest.DisconnectIfExists(); |
415 | 0 |
|
416 | 0 | MaybeReleaseResources(); |
417 | 0 | } |
418 | | |
419 | | void Exit() override |
420 | 0 | { |
421 | 0 | // mPendingSeek is either moved when exiting dormant or |
422 | 0 | // should be rejected here before transition to SHUTDOWN. |
423 | 0 | mPendingSeek.RejectIfExists(__func__); |
424 | 0 | } |
425 | | |
426 | 0 | State GetState() const override { return DECODER_STATE_DORMANT; } |
427 | | |
428 | | RefPtr<MediaDecoder::SeekPromise> HandleSeek(SeekTarget aTarget) override; |
429 | | |
430 | | void HandleVideoSuspendTimeout() override |
431 | 0 | { |
432 | 0 | // Do nothing since we've released decoders in Enter(). |
433 | 0 | } |
434 | | |
435 | | void HandleResumeVideoDecoding(const TimeUnit&) override |
436 | 0 | { |
437 | 0 | // Do nothing since we won't resume decoding until exiting dormant. |
438 | 0 | } |
439 | | |
440 | | void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) override; |
441 | | |
442 | 0 | void HandleAudioDecoded(AudioData*) override { MaybeReleaseResources(); } |
443 | | void HandleVideoDecoded(VideoData*, TimeStamp) override |
444 | 0 | { |
445 | 0 | MaybeReleaseResources(); |
446 | 0 | } |
447 | 0 | void HandleWaitingForAudio() override { MaybeReleaseResources(); } |
448 | 0 | void HandleWaitingForVideo() override { MaybeReleaseResources(); } |
449 | 0 | void HandleAudioCanceled() override { MaybeReleaseResources(); } |
450 | 0 | void HandleVideoCanceled() override { MaybeReleaseResources(); } |
451 | 0 | void HandleEndOfAudio() override { MaybeReleaseResources(); } |
452 | 0 | void HandleEndOfVideo() override { MaybeReleaseResources(); } |
453 | | |
454 | | private: |
455 | | void MaybeReleaseResources() |
456 | 0 | { |
457 | 0 | if (!mMaster->mAudioDataRequest.Exists() && |
458 | 0 | !mMaster->mVideoDataRequest.Exists()) { |
459 | 0 | // Release decoders only when they are idle. Otherwise it might cause |
460 | 0 | // decode error later when resetting decoders during seeking. |
461 | 0 | mMaster->mReader->ReleaseResources(); |
462 | 0 | } |
463 | 0 | } |
464 | | |
465 | | SeekJob mPendingSeek; |
466 | | }; |
467 | | |
468 | | /** |
469 | | * Purpose: decode the 1st audio and video frames to fire the 'loadeddata' event. |
470 | | * |
471 | | * Transition to: |
472 | | * SHUTDOWN if any decode error. |
473 | | * SEEKING if any seek request. |
474 | | * DECODING when the 'loadeddata' event is fired. |
475 | | */ |
476 | | class MediaDecoderStateMachine::DecodingFirstFrameState |
477 | | : public MediaDecoderStateMachine::StateObject |
478 | | { |
479 | | public: |
480 | 0 | explicit DecodingFirstFrameState(Master* aPtr) : StateObject(aPtr) { } |
481 | | |
482 | | void Enter(); |
483 | | |
484 | | void Exit() override |
485 | 0 | { |
486 | 0 | // mPendingSeek is either moved in MaybeFinishDecodeFirstFrame() |
487 | 0 | // or should be rejected here before transition to SHUTDOWN. |
488 | 0 | mPendingSeek.RejectIfExists(__func__); |
489 | 0 | } |
490 | | |
491 | 0 | State GetState() const override { return DECODER_STATE_DECODING_FIRSTFRAME; } |
492 | | |
493 | | void HandleAudioDecoded(AudioData* aAudio) override |
494 | 0 | { |
495 | 0 | mMaster->PushAudio(aAudio); |
496 | 0 | MaybeFinishDecodeFirstFrame(); |
497 | 0 | } |
498 | | |
499 | | void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart) override |
500 | 0 | { |
501 | 0 | mMaster->PushVideo(aVideo); |
502 | 0 | MaybeFinishDecodeFirstFrame(); |
503 | 0 | } |
504 | | |
505 | | void HandleWaitingForAudio() override |
506 | 0 | { |
507 | 0 | mMaster->WaitForData(MediaData::AUDIO_DATA); |
508 | 0 | } |
509 | | |
510 | | void HandleAudioCanceled() override |
511 | 0 | { |
512 | 0 | mMaster->RequestAudioData(); |
513 | 0 | } |
514 | | |
515 | | void HandleEndOfAudio() override |
516 | 0 | { |
517 | 0 | AudioQueue().Finish(); |
518 | 0 | MaybeFinishDecodeFirstFrame(); |
519 | 0 | } |
520 | | |
521 | | void HandleWaitingForVideo() override |
522 | 0 | { |
523 | 0 | mMaster->WaitForData(MediaData::VIDEO_DATA); |
524 | 0 | } |
525 | | |
526 | | void HandleVideoCanceled() override |
527 | 0 | { |
528 | 0 | mMaster->RequestVideoData(media::TimeUnit()); |
529 | 0 | } |
530 | | |
531 | | void HandleEndOfVideo() override |
532 | 0 | { |
533 | 0 | VideoQueue().Finish(); |
534 | 0 | MaybeFinishDecodeFirstFrame(); |
535 | 0 | } |
536 | | |
537 | | void HandleAudioWaited(MediaData::Type aType) override |
538 | 0 | { |
539 | 0 | mMaster->RequestAudioData(); |
540 | 0 | } |
541 | | |
542 | | void HandleVideoWaited(MediaData::Type aType) override |
543 | 0 | { |
544 | 0 | mMaster->RequestVideoData(media::TimeUnit()); |
545 | 0 | } |
546 | | |
547 | | void HandleVideoSuspendTimeout() override |
548 | 0 | { |
549 | 0 | // Do nothing for we need to decode the 1st video frame to get the |
550 | 0 | // dimensions. |
551 | 0 | } |
552 | | |
553 | | void HandleResumeVideoDecoding(const TimeUnit&) override |
554 | 0 | { |
555 | 0 | // We never suspend video decoding in this state. |
556 | 0 | MOZ_ASSERT(false, "Shouldn't have suspended video decoding."); |
557 | 0 | } |
558 | | |
559 | | RefPtr<MediaDecoder::SeekPromise> HandleSeek(SeekTarget aTarget) override |
560 | 0 | { |
561 | 0 | if (mMaster->mIsMSE) { |
562 | 0 | return StateObject::HandleSeek(aTarget); |
563 | 0 | } |
564 | 0 | // Delay seek request until decoding first frames for non-MSE media. |
565 | 0 | SLOG("Not Enough Data to seek at this stage, queuing seek"); |
566 | 0 | mPendingSeek.RejectIfExists(__func__); |
567 | 0 | mPendingSeek.mTarget.emplace(aTarget); |
568 | 0 | return mPendingSeek.mPromise.Ensure(__func__); |
569 | 0 | } |
570 | | |
571 | | private: |
572 | | // Notify FirstFrameLoaded if having decoded first frames and |
573 | | // transition to SEEKING if there is any pending seek, or DECODING otherwise. |
574 | | void MaybeFinishDecodeFirstFrame(); |
575 | | |
576 | | SeekJob mPendingSeek; |
577 | | }; |
578 | | |
579 | | /** |
580 | | * Purpose: decode audio/video data for playback. |
581 | | * |
582 | | * Transition to: |
583 | | * DORMANT if playback is paused for a while. |
584 | | * SEEKING if any seek request. |
585 | | * SHUTDOWN if any decode error. |
586 | | * BUFFERING if playback can't continue due to lack of decoded data. |
587 | | * COMPLETED when having decoded all audio/video data. |
588 | | */ |
589 | | class MediaDecoderStateMachine::DecodingState |
590 | | : public MediaDecoderStateMachine::StateObject |
591 | | { |
592 | | public: |
593 | | explicit DecodingState(Master* aPtr) |
594 | | : StateObject(aPtr) |
595 | | , mDormantTimer(OwnerThread()) |
596 | 0 | { |
597 | 0 | } |
598 | | |
599 | | void Enter(); |
600 | | |
601 | | void Exit() override |
602 | 0 | { |
603 | 0 | if (!mDecodeStartTime.IsNull()) { |
604 | 0 | TimeDuration decodeDuration = TimeStamp::Now() - mDecodeStartTime; |
605 | 0 | SLOG("Exiting DECODING, decoded for %.3lfs", decodeDuration.ToSeconds()); |
606 | 0 | } |
607 | 0 | mDormantTimer.Reset(); |
608 | 0 | mOnAudioPopped.DisconnectIfExists(); |
609 | 0 | mOnVideoPopped.DisconnectIfExists(); |
610 | 0 | } |
611 | | |
612 | | void Step() override; |
613 | | |
614 | | State GetState() const override |
615 | 0 | { |
616 | 0 | return DECODER_STATE_DECODING; |
617 | 0 | } |
618 | | |
619 | | void HandleAudioDecoded(AudioData* aAudio) override |
620 | 0 | { |
621 | 0 | mMaster->PushAudio(aAudio); |
622 | 0 | DispatchDecodeTasksIfNeeded(); |
623 | 0 | MaybeStopPrerolling(); |
624 | 0 | } |
625 | | |
626 | | void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart) override |
627 | 0 | { |
628 | 0 | mMaster->PushVideo(aVideo); |
629 | 0 | DispatchDecodeTasksIfNeeded(); |
630 | 0 | MaybeStopPrerolling(); |
631 | 0 | } |
632 | | |
633 | | void HandleAudioCanceled() override |
634 | 0 | { |
635 | 0 | mMaster->RequestAudioData(); |
636 | 0 | } |
637 | | |
638 | | void HandleVideoCanceled() override |
639 | 0 | { |
640 | 0 | mMaster->RequestVideoData(mMaster->GetMediaTime()); |
641 | 0 | } |
642 | | |
643 | | void HandleEndOfAudio() override; |
644 | | void HandleEndOfVideo() override; |
645 | | |
646 | | void HandleWaitingForAudio() override |
647 | 0 | { |
648 | 0 | mMaster->WaitForData(MediaData::AUDIO_DATA); |
649 | 0 | MaybeStopPrerolling(); |
650 | 0 | } |
651 | | |
652 | | void HandleWaitingForVideo() override |
653 | 0 | { |
654 | 0 | mMaster->WaitForData(MediaData::VIDEO_DATA); |
655 | 0 | MaybeStopPrerolling(); |
656 | 0 | } |
657 | | |
658 | | void HandleAudioWaited(MediaData::Type aType) override |
659 | 0 | { |
660 | 0 | mMaster->RequestAudioData(); |
661 | 0 | } |
662 | | |
663 | | void HandleVideoWaited(MediaData::Type aType) override |
664 | 0 | { |
665 | 0 | mMaster->RequestVideoData(mMaster->GetMediaTime()); |
666 | 0 | } |
667 | | |
668 | | void HandleAudioCaptured() override |
669 | 0 | { |
670 | 0 | MaybeStopPrerolling(); |
671 | 0 | // MediaSink is changed. Schedule Step() to check if we can start playback. |
672 | 0 | mMaster->ScheduleStateMachine(); |
673 | 0 | } |
674 | | |
675 | | void HandleVideoSuspendTimeout() override |
676 | 0 | { |
677 | 0 | // No video, so nothing to suspend. |
678 | 0 | if (!mMaster->HasVideo()) { |
679 | 0 | return; |
680 | 0 | } |
681 | 0 | |
682 | 0 | mMaster->mVideoDecodeSuspended = true; |
683 | 0 | mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::EnterVideoSuspend); |
684 | 0 | Reader()->SetVideoBlankDecode(true); |
685 | 0 | } |
686 | | |
687 | | void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) override |
688 | 0 | { |
689 | 0 | if (aPlayState == MediaDecoder::PLAY_STATE_PLAYING) { |
690 | 0 | // Schedule Step() to check if we can start playback. |
691 | 0 | mMaster->ScheduleStateMachine(); |
692 | 0 | // Try to dispatch decoding tasks for mMinimizePreroll might be reset. |
693 | 0 | DispatchDecodeTasksIfNeeded(); |
694 | 0 | } |
695 | 0 |
|
696 | 0 | if (aPlayState == MediaDecoder::PLAY_STATE_PAUSED) { |
697 | 0 | StartDormantTimer(); |
698 | 0 | } else { |
699 | 0 | mDormantTimer.Reset(); |
700 | 0 | } |
701 | 0 | } |
702 | | |
703 | | nsCString GetDebugInfo() override |
704 | 0 | { |
705 | 0 | return nsPrintfCString("mIsPrerolling=%d", mIsPrerolling); |
706 | 0 | } |
707 | | |
708 | | private: |
709 | | void DispatchDecodeTasksIfNeeded(); |
710 | | void EnsureAudioDecodeTaskQueued(); |
711 | | void EnsureVideoDecodeTaskQueued(); |
712 | | void MaybeStartBuffering(); |
713 | | |
714 | | // At the start of decoding we want to "preroll" the decode until we've |
715 | | // got a few frames decoded before we consider whether decode is falling |
716 | | // behind. Otherwise our "we're falling behind" logic will trigger |
717 | | // unnecessarily if we start playing as soon as the first sample is |
718 | | // decoded. These two fields store how many video frames and audio |
719 | | // samples we must consume before are considered to be finished prerolling. |
720 | | TimeUnit AudioPrerollThreshold() const |
721 | 0 | { |
722 | 0 | return mMaster->mAmpleAudioThreshold / 2; |
723 | 0 | } |
724 | | |
725 | | uint32_t VideoPrerollFrames() const |
726 | 0 | { |
727 | 0 | return mMaster->GetAmpleVideoFrames() / 2; |
728 | 0 | } |
729 | | |
730 | | bool DonePrerollingAudio() |
731 | 0 | { |
732 | 0 | return !mMaster->IsAudioDecoding() || |
733 | 0 | mMaster->GetDecodedAudioDuration() |
734 | 0 | >= AudioPrerollThreshold().MultDouble(mMaster->mPlaybackRate); |
735 | 0 | } |
736 | | |
737 | | bool DonePrerollingVideo() |
738 | 0 | { |
739 | 0 | return !mMaster->IsVideoDecoding() || |
740 | 0 | static_cast<uint32_t>(mMaster->VideoQueue().GetSize()) >= |
741 | 0 | VideoPrerollFrames() * mMaster->mPlaybackRate + 1; |
742 | 0 | } |
743 | | |
744 | | void MaybeStopPrerolling() |
745 | 0 | { |
746 | 0 | if (mIsPrerolling && |
747 | 0 | (DonePrerollingAudio() || mMaster->IsWaitingAudioData()) && |
748 | 0 | (DonePrerollingVideo() || mMaster->IsWaitingVideoData())) { |
749 | 0 | mIsPrerolling = false; |
750 | 0 | // Check if we can start playback. |
751 | 0 | mMaster->ScheduleStateMachine(); |
752 | 0 | } |
753 | 0 | } |
754 | | |
755 | | void StartDormantTimer() |
756 | 0 | { |
757 | 0 | if (!mMaster->mMediaSeekable) { |
758 | 0 | // Don't enter dormant if the media is not seekable because we need to |
759 | 0 | // seek when exiting dormant. |
760 | 0 | return; |
761 | 0 | } |
762 | 0 | |
763 | 0 | auto timeout = StaticPrefs::MediaDormantOnPauseTimeoutMs(); |
764 | 0 | if (timeout < 0) { |
765 | 0 | // Disabled when timeout is negative. |
766 | 0 | return; |
767 | 0 | } else if (timeout == 0) { |
768 | 0 | // Enter dormant immediately without scheduling a timer. |
769 | 0 | SetState<DormantState>(); |
770 | 0 | return; |
771 | 0 | } |
772 | 0 | |
773 | 0 | if (mMaster->mMinimizePreroll) { |
774 | 0 | SetState<DormantState>(); |
775 | 0 | return; |
776 | 0 | } |
777 | 0 | |
778 | 0 | TimeStamp target = TimeStamp::Now() + |
779 | 0 | TimeDuration::FromMilliseconds(timeout); |
780 | 0 |
|
781 | 0 | mDormantTimer.Ensure(target, |
782 | 0 | [this] () { |
783 | 0 | mDormantTimer.CompleteRequest(); |
784 | 0 | SetState<DormantState>(); |
785 | 0 | }, [this] () { |
786 | 0 | mDormantTimer.CompleteRequest(); |
787 | 0 | }); |
788 | 0 | } |
789 | | |
790 | | // Time at which we started decoding. |
791 | | TimeStamp mDecodeStartTime; |
792 | | |
793 | | // When we start decoding (either for the first time, or after a pause) |
794 | | // we may be low on decoded data. We don't want our "low data" logic to |
795 | | // kick in and decide that we're low on decoded data because the download |
796 | | // can't keep up with the decode, and cause us to pause playback. So we |
797 | | // have a "preroll" stage, where we ignore the results of our "low data" |
798 | | // logic during the first few frames of our decode. This occurs during |
799 | | // playback. |
800 | | bool mIsPrerolling = true; |
801 | | |
802 | | // Fired when playback is paused for a while to enter dormant. |
803 | | DelayedScheduler mDormantTimer; |
804 | | |
805 | | MediaEventListener mOnAudioPopped; |
806 | | MediaEventListener mOnVideoPopped; |
807 | | }; |
808 | | |
809 | | /** |
810 | | * Purpose: seek to a particular new playback position. |
811 | | * |
812 | | * Transition to: |
813 | | * SEEKING if any new seek request. |
814 | | * SHUTDOWN if seek failed. |
815 | | * COMPLETED if the new playback position is the end of the media resource. |
816 | | * NextFrameSeekingState if completing a NextFrameSeekingFromDormantState. |
817 | | * DECODING otherwise. |
818 | | */ |
819 | | class MediaDecoderStateMachine::SeekingState |
820 | | : public MediaDecoderStateMachine::StateObject |
821 | | { |
822 | | public: |
823 | | explicit SeekingState(Master* aPtr) |
824 | | : StateObject(aPtr) |
825 | | , mVisibility(static_cast<EventVisibility>(0)) |
826 | 0 | { } |
827 | | |
828 | | RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob&& aSeekJob, |
829 | | EventVisibility aVisibility) |
830 | 0 | { |
831 | 0 | mSeekJob = std::move(aSeekJob); |
832 | 0 | mVisibility = aVisibility; |
833 | 0 |
|
834 | 0 | // Suppressed visibility comes from two cases: (1) leaving dormant state, |
835 | 0 | // and (2) resuming suspended video decoder. We want both cases to be |
836 | 0 | // transparent to the user. So we only notify the change when the seek |
837 | 0 | // request is from the user. |
838 | 0 | if (mVisibility == EventVisibility::Observable) { |
839 | 0 | // Don't stop playback for a video-only seek since we want to keep playing |
840 | 0 | // audio and we don't need to stop playback while leaving dormant for the |
841 | 0 | // playback should has been stopped. |
842 | 0 | mMaster->StopPlayback(); |
843 | 0 | mMaster->UpdatePlaybackPositionInternal(mSeekJob.mTarget->GetTime()); |
844 | 0 | mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::SeekStarted); |
845 | 0 | mMaster->mOnNextFrameStatus.Notify( |
846 | 0 | MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING); |
847 | 0 | } |
848 | 0 |
|
849 | 0 | RefPtr<MediaDecoder::SeekPromise> p = mSeekJob.mPromise.Ensure(__func__); |
850 | 0 |
|
851 | 0 | DoSeek(); |
852 | 0 |
|
853 | 0 | return p; |
854 | 0 | } |
855 | | |
856 | | virtual void Exit() override = 0; |
857 | | |
858 | | State GetState() const override |
859 | 0 | { |
860 | 0 | return DECODER_STATE_SEEKING; |
861 | 0 | } |
862 | | |
863 | | void HandleAudioDecoded(AudioData* aAudio) override = 0; |
864 | | void HandleVideoDecoded(VideoData* aVideo, |
865 | | TimeStamp aDecodeStart) override = 0; |
866 | | void HandleAudioWaited(MediaData::Type aType) override = 0; |
867 | | void HandleVideoWaited(MediaData::Type aType) override = 0; |
868 | | |
869 | | void HandleVideoSuspendTimeout() override |
870 | 0 | { |
871 | 0 | // Do nothing since we want a valid video frame to show when seek is done. |
872 | 0 | } |
873 | | |
874 | | void HandleResumeVideoDecoding(const TimeUnit&) override |
875 | 0 | { |
876 | 0 | // Do nothing. We will resume video decoding in the decoding state. |
877 | 0 | } |
878 | | |
879 | | // We specially handle next frame seeks by ignoring them if we're already |
880 | | // seeking. |
881 | | RefPtr<MediaDecoder::SeekPromise> HandleSeek(SeekTarget aTarget) override |
882 | 0 | { |
883 | 0 | if (aTarget.IsNextFrame()) { |
884 | 0 | // We ignore next frame seeks if we already have a seek pending |
885 | 0 | SLOG("Already SEEKING, ignoring seekToNextFrame"); |
886 | 0 | MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); |
887 | 0 | return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true, |
888 | 0 | __func__); |
889 | 0 | } |
890 | 0 |
|
891 | 0 | return StateObject::HandleSeek(aTarget); |
892 | 0 | } |
893 | | |
894 | | protected: |
895 | | SeekJob mSeekJob; |
896 | | EventVisibility mVisibility; |
897 | | |
898 | | virtual void DoSeek() = 0; |
899 | | // Transition to the next state (defined by the subclass) when seek is completed. |
900 | 0 | virtual void GoToNextState() { SetState<DecodingState>(); } |
901 | | void SeekCompleted(); |
902 | | virtual TimeUnit CalculateNewCurrentTime() const = 0; |
903 | | }; |
904 | | |
905 | | class MediaDecoderStateMachine::AccurateSeekingState |
906 | | : public MediaDecoderStateMachine::SeekingState |
907 | | { |
908 | | public: |
909 | 0 | explicit AccurateSeekingState(Master* aPtr) : SeekingState(aPtr) { } |
910 | | |
911 | | RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob&& aSeekJob, |
912 | | EventVisibility aVisibility) |
913 | 0 | { |
914 | 0 | MOZ_ASSERT(aSeekJob.mTarget->IsAccurate() || aSeekJob.mTarget->IsFast()); |
915 | 0 | mCurrentTimeBeforeSeek = mMaster->GetMediaTime(); |
916 | 0 | return SeekingState::Enter(std::move(aSeekJob), aVisibility); |
917 | 0 | } |
918 | | |
919 | | void Exit() override |
920 | 0 | { |
921 | 0 | // Disconnect MediaDecoder. |
922 | 0 | mSeekJob.RejectIfExists(__func__); |
923 | 0 |
|
924 | 0 | // Disconnect ReaderProxy. |
925 | 0 | mSeekRequest.DisconnectIfExists(); |
926 | 0 |
|
927 | 0 | mWaitRequest.DisconnectIfExists(); |
928 | 0 | } |
929 | | |
930 | | void HandleAudioDecoded(AudioData* aAudio) override |
931 | 0 | { |
932 | 0 | MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, |
933 | 0 | "Seek shouldn't be finished"); |
934 | 0 | MOZ_ASSERT(aAudio); |
935 | 0 |
|
936 | 0 | AdjustFastSeekIfNeeded(aAudio); |
937 | 0 |
|
938 | 0 | if (mSeekJob.mTarget->IsFast()) { |
939 | 0 | // Non-precise seek; we can stop the seek at the first sample. |
940 | 0 | mMaster->PushAudio(aAudio); |
941 | 0 | mDoneAudioSeeking = true; |
942 | 0 | } else { |
943 | 0 | nsresult rv = DropAudioUpToSeekTarget(aAudio); |
944 | 0 | if (NS_FAILED(rv)) { |
945 | 0 | mMaster->DecodeError(rv); |
946 | 0 | return; |
947 | 0 | } |
948 | 0 | } |
949 | 0 | |
950 | 0 | if (!mDoneAudioSeeking) { |
951 | 0 | RequestAudioData(); |
952 | 0 | return; |
953 | 0 | } |
954 | 0 | MaybeFinishSeek(); |
955 | 0 | } |
956 | | |
957 | | void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart) override |
958 | 0 | { |
959 | 0 | MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, |
960 | 0 | "Seek shouldn't be finished"); |
961 | 0 | MOZ_ASSERT(aVideo); |
962 | 0 |
|
963 | 0 | AdjustFastSeekIfNeeded(aVideo); |
964 | 0 |
|
965 | 0 | if (mSeekJob.mTarget->IsFast()) { |
966 | 0 | // Non-precise seek. We can stop the seek at the first sample. |
967 | 0 | mMaster->PushVideo(aVideo); |
968 | 0 | mDoneVideoSeeking = true; |
969 | 0 | } else { |
970 | 0 | nsresult rv = DropVideoUpToSeekTarget(aVideo); |
971 | 0 | if (NS_FAILED(rv)) { |
972 | 0 | mMaster->DecodeError(rv); |
973 | 0 | return; |
974 | 0 | } |
975 | 0 | } |
976 | 0 | |
977 | 0 | if (!mDoneVideoSeeking) { |
978 | 0 | RequestVideoData(); |
979 | 0 | return; |
980 | 0 | } |
981 | 0 | MaybeFinishSeek(); |
982 | 0 | } |
983 | | |
984 | | void HandleWaitingForAudio() override |
985 | 0 | { |
986 | 0 | MOZ_ASSERT(!mDoneAudioSeeking); |
987 | 0 | mMaster->WaitForData(MediaData::AUDIO_DATA); |
988 | 0 | } |
989 | | |
990 | | void HandleAudioCanceled() override |
991 | 0 | { |
992 | 0 | MOZ_ASSERT(!mDoneAudioSeeking); |
993 | 0 | RequestAudioData(); |
994 | 0 | } |
995 | | |
996 | | void HandleEndOfAudio() override |
997 | 0 | { |
998 | 0 | HandleEndOfAudioInternal(); |
999 | 0 | MaybeFinishSeek(); |
1000 | 0 | } |
1001 | | |
1002 | | void HandleWaitingForVideo() override |
1003 | 0 | { |
1004 | 0 | MOZ_ASSERT(!mDoneVideoSeeking); |
1005 | 0 | mMaster->WaitForData(MediaData::VIDEO_DATA); |
1006 | 0 | } |
1007 | | |
1008 | | void HandleVideoCanceled() override |
1009 | 0 | { |
1010 | 0 | MOZ_ASSERT(!mDoneVideoSeeking); |
1011 | 0 | RequestVideoData(); |
1012 | 0 | } |
1013 | | |
1014 | | void HandleEndOfVideo() override |
1015 | 0 | { |
1016 | 0 | HandleEndOfVideoInternal(); |
1017 | 0 | MaybeFinishSeek(); |
1018 | 0 | } |
1019 | | |
1020 | | void HandleAudioWaited(MediaData::Type aType) override |
1021 | 0 | { |
1022 | 0 | MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, |
1023 | 0 | "Seek shouldn't be finished"); |
1024 | 0 |
|
1025 | 0 | RequestAudioData(); |
1026 | 0 | } |
1027 | | |
1028 | | void HandleVideoWaited(MediaData::Type aType) override |
1029 | 0 | { |
1030 | 0 | MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, |
1031 | 0 | "Seek shouldn't be finished"); |
1032 | 0 |
|
1033 | 0 | RequestVideoData(); |
1034 | 0 | } |
1035 | | |
1036 | | void DoSeek() override |
1037 | 0 | { |
1038 | 0 | mDoneAudioSeeking = !Info().HasAudio(); |
1039 | 0 | mDoneVideoSeeking = !Info().HasVideo(); |
1040 | 0 |
|
1041 | 0 | mMaster->ResetDecode(); |
1042 | 0 | mMaster->StopMediaSink(); |
1043 | 0 |
|
1044 | 0 | DemuxerSeek(); |
1045 | 0 | } |
1046 | | |
1047 | | TimeUnit CalculateNewCurrentTime() const override |
1048 | 0 | { |
1049 | 0 | const auto seekTime = mSeekJob.mTarget->GetTime(); |
1050 | 0 |
|
1051 | 0 | // For the accurate seek, we always set the newCurrentTime = seekTime so |
1052 | 0 | // that the updated HTMLMediaElement.currentTime will always be the seek |
1053 | 0 | // target; we rely on the MediaSink to handles the gap between the |
1054 | 0 | // newCurrentTime and the real decoded samples' start time. |
1055 | 0 | if (mSeekJob.mTarget->IsAccurate()) { |
1056 | 0 | return seekTime; |
1057 | 0 | } |
1058 | 0 | |
1059 | 0 | // For the fast seek, we update the newCurrentTime with the decoded audio |
1060 | 0 | // and video samples, set it to be the one which is closet to the seekTime. |
1061 | 0 | if (mSeekJob.mTarget->IsFast()) { |
1062 | 0 | RefPtr<AudioData> audio = AudioQueue().PeekFront(); |
1063 | 0 | RefPtr<VideoData> video = VideoQueue().PeekFront(); |
1064 | 0 |
|
1065 | 0 | // A situation that both audio and video approaches the end. |
1066 | 0 | if (!audio && !video) { |
1067 | 0 | return seekTime; |
1068 | 0 | } |
1069 | 0 | |
1070 | 0 | const int64_t audioStart = |
1071 | 0 | audio ? audio->mTime.ToMicroseconds() : INT64_MAX; |
1072 | 0 | const int64_t videoStart = |
1073 | 0 | video ? video->mTime.ToMicroseconds() : INT64_MAX; |
1074 | 0 | const int64_t audioGap = std::abs(audioStart - seekTime.ToMicroseconds()); |
1075 | 0 | const int64_t videoGap = std::abs(videoStart - seekTime.ToMicroseconds()); |
1076 | 0 | return TimeUnit::FromMicroseconds( |
1077 | 0 | audioGap <= videoGap ? audioStart : videoStart); |
1078 | 0 | } |
1079 | 0 |
|
1080 | 0 | MOZ_ASSERT(false, "AccurateSeekTask doesn't handle other seek types."); |
1081 | 0 | return TimeUnit::Zero(); |
1082 | 0 | } |
1083 | | |
1084 | | protected: |
1085 | | void DemuxerSeek() |
1086 | 0 | { |
1087 | 0 | // Request the demuxer to perform seek. |
1088 | 0 | Reader()->Seek(mSeekJob.mTarget.ref()) |
1089 | 0 | ->Then(OwnerThread(), __func__, |
1090 | 0 | [this] (const media::TimeUnit& aUnit) { |
1091 | 0 | OnSeekResolved(aUnit); |
1092 | 0 | }, |
1093 | 0 | [this] (const SeekRejectValue& aReject) { |
1094 | 0 | OnSeekRejected(aReject); |
1095 | 0 | }) |
1096 | 0 | ->Track(mSeekRequest); |
1097 | 0 | } |
1098 | | |
1099 | | void OnSeekResolved(media::TimeUnit) |
1100 | 0 | { |
1101 | 0 | mSeekRequest.Complete(); |
1102 | 0 |
|
1103 | 0 | // We must decode the first samples of active streams, so we can determine |
1104 | 0 | // the new stream time. So dispatch tasks to do that. |
1105 | 0 | if (!mDoneVideoSeeking) { |
1106 | 0 | RequestVideoData(); |
1107 | 0 | } |
1108 | 0 | if (!mDoneAudioSeeking) { |
1109 | 0 | RequestAudioData(); |
1110 | 0 | } |
1111 | 0 | } |
1112 | | |
1113 | | void OnSeekRejected(const SeekRejectValue& aReject) |
1114 | 0 | { |
1115 | 0 | mSeekRequest.Complete(); |
1116 | 0 |
|
1117 | 0 | if (aReject.mError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) { |
1118 | 0 | SLOG("OnSeekRejected reason=WAITING_FOR_DATA type=%d", aReject.mType); |
1119 | 0 | MOZ_ASSERT_IF(aReject.mType == MediaData::AUDIO_DATA, !mMaster->IsRequestingAudioData()); |
1120 | 0 | MOZ_ASSERT_IF(aReject.mType == MediaData::VIDEO_DATA, !mMaster->IsRequestingVideoData()); |
1121 | 0 | MOZ_ASSERT_IF(aReject.mType == MediaData::AUDIO_DATA, !mMaster->IsWaitingAudioData()); |
1122 | 0 | MOZ_ASSERT_IF(aReject.mType == MediaData::VIDEO_DATA, !mMaster->IsWaitingVideoData()); |
1123 | 0 |
|
1124 | 0 | // Fire 'waiting' to notify the player that we are waiting for data. |
1125 | 0 | mMaster->mOnNextFrameStatus.Notify( |
1126 | 0 | MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING); |
1127 | 0 |
|
1128 | 0 | Reader() |
1129 | 0 | ->WaitForData(aReject.mType) |
1130 | 0 | ->Then(OwnerThread(), __func__, |
1131 | 0 | [this](MediaData::Type aType) { |
1132 | 0 | SLOG("OnSeekRejected wait promise resolved"); |
1133 | 0 | mWaitRequest.Complete(); |
1134 | 0 | DemuxerSeek(); |
1135 | 0 | }, |
1136 | 0 | [this](const WaitForDataRejectValue& aRejection) { |
1137 | 0 | SLOG("OnSeekRejected wait promise rejected"); |
1138 | 0 | mWaitRequest.Complete(); |
1139 | 0 | mMaster->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA); |
1140 | 0 | }) |
1141 | 0 | ->Track(mWaitRequest); |
1142 | 0 | return; |
1143 | 0 | } |
1144 | 0 |
|
1145 | 0 | if (aReject.mError == NS_ERROR_DOM_MEDIA_END_OF_STREAM) { |
1146 | 0 | if (!mDoneAudioSeeking) { |
1147 | 0 | HandleEndOfAudioInternal(); |
1148 | 0 | } |
1149 | 0 | if (!mDoneVideoSeeking) { |
1150 | 0 | HandleEndOfVideoInternal(); |
1151 | 0 | } |
1152 | 0 | MaybeFinishSeek(); |
1153 | 0 | return; |
1154 | 0 | } |
1155 | 0 |
|
1156 | 0 | MOZ_ASSERT(NS_FAILED(aReject.mError), |
1157 | 0 | "Cancels should also disconnect mSeekRequest"); |
1158 | 0 | mMaster->DecodeError(aReject.mError); |
1159 | 0 | } |
1160 | | |
1161 | | void RequestAudioData() |
1162 | 0 | { |
1163 | 0 | MOZ_ASSERT(!mDoneAudioSeeking); |
1164 | 0 | mMaster->RequestAudioData(); |
1165 | 0 | } |
1166 | | |
1167 | | virtual void RequestVideoData() |
1168 | 0 | { |
1169 | 0 | MOZ_ASSERT(!mDoneVideoSeeking); |
1170 | 0 | mMaster->RequestVideoData(media::TimeUnit()); |
1171 | 0 | } |
1172 | | |
1173 | | void AdjustFastSeekIfNeeded(MediaData* aSample) |
1174 | 0 | { |
1175 | 0 | if (mSeekJob.mTarget->IsFast() && |
1176 | 0 | mSeekJob.mTarget->GetTime() > mCurrentTimeBeforeSeek && |
1177 | 0 | aSample->mTime < mCurrentTimeBeforeSeek) { |
1178 | 0 | // We are doing a fastSeek, but we ended up *before* the previous |
1179 | 0 | // playback position. This is surprising UX, so switch to an accurate |
1180 | 0 | // seek and decode to the seek target. This is not conformant to the |
1181 | 0 | // spec, fastSeek should always be fast, but until we get the time to |
1182 | 0 | // change all Readers to seek to the keyframe after the currentTime |
1183 | 0 | // in this case, we'll just decode forward. Bug 1026330. |
1184 | 0 | mSeekJob.mTarget->SetType(SeekTarget::Accurate); |
1185 | 0 | } |
1186 | 0 | } |
1187 | | |
1188 | | nsresult DropAudioUpToSeekTarget(AudioData* aAudio) |
1189 | 0 | { |
1190 | 0 | MOZ_ASSERT(aAudio && mSeekJob.mTarget->IsAccurate()); |
1191 | 0 |
|
1192 | 0 | auto sampleDuration = FramesToTimeUnit( |
1193 | 0 | aAudio->mFrames, Info().mAudio.mRate); |
1194 | 0 | if (!sampleDuration.IsValid()) { |
1195 | 0 | return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; |
1196 | 0 | } |
1197 | 0 | |
1198 | 0 | auto audioTime = aAudio->mTime; |
1199 | 0 | if (audioTime + sampleDuration <= mSeekJob.mTarget->GetTime()) { |
1200 | 0 | // Our seek target lies after the frames in this AudioData. Don't |
1201 | 0 | // push it onto the audio queue, and keep decoding forwards. |
1202 | 0 | return NS_OK; |
1203 | 0 | } |
1204 | 0 | |
1205 | 0 | if (audioTime > mSeekJob.mTarget->GetTime()) { |
1206 | 0 | // The seek target doesn't lie in the audio block just after the last |
1207 | 0 | // audio frames we've seen which were before the seek target. This |
1208 | 0 | // could have been the first audio data we've seen after seek, i.e. the |
1209 | 0 | // seek terminated after the seek target in the audio stream. Just |
1210 | 0 | // abort the audio decode-to-target, the state machine will play |
1211 | 0 | // silence to cover the gap. Typically this happens in poorly muxed |
1212 | 0 | // files. |
1213 | 0 | SLOGW("Audio not synced after seek, maybe a poorly muxed file?"); |
1214 | 0 | mMaster->PushAudio(aAudio); |
1215 | 0 | mDoneAudioSeeking = true; |
1216 | 0 | return NS_OK; |
1217 | 0 | } |
1218 | 0 |
|
1219 | 0 | // The seek target lies somewhere in this AudioData's frames, strip off |
1220 | 0 | // any frames which lie before the seek target, so we'll begin playback |
1221 | 0 | // exactly at the seek target. |
1222 | 0 | NS_ASSERTION(mSeekJob.mTarget->GetTime() >= audioTime, |
1223 | 0 | "Target must at or be after data start."); |
1224 | 0 | NS_ASSERTION(mSeekJob.mTarget->GetTime() < audioTime + sampleDuration, |
1225 | 0 | "Data must end after target."); |
1226 | 0 |
|
1227 | 0 | CheckedInt64 framesToPrune = TimeUnitToFrames( |
1228 | 0 | mSeekJob.mTarget->GetTime() - audioTime, Info().mAudio.mRate); |
1229 | 0 | if (!framesToPrune.isValid()) { |
1230 | 0 | return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; |
1231 | 0 | } |
1232 | 0 | if (framesToPrune.value() > aAudio->mFrames) { |
1233 | 0 | // We've messed up somehow. Don't try to trim frames, the |frames| |
1234 | 0 | // variable below will overflow. |
1235 | 0 | SLOGE("Can't prune more frames that we have!"); |
1236 | 0 | return NS_ERROR_FAILURE; |
1237 | 0 | } |
1238 | 0 | uint32_t frames = aAudio->mFrames - uint32_t(framesToPrune.value()); |
1239 | 0 | uint32_t channels = aAudio->mChannels; |
1240 | 0 | AlignedAudioBuffer audioData(frames * channels); |
1241 | 0 | if (!audioData) { |
1242 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
1243 | 0 | } |
1244 | 0 | |
1245 | 0 | memcpy(audioData.get(), |
1246 | 0 | aAudio->mAudioData.get() + (framesToPrune.value() * channels), |
1247 | 0 | frames * channels * sizeof(AudioDataValue)); |
1248 | 0 | auto duration = FramesToTimeUnit(frames, Info().mAudio.mRate); |
1249 | 0 | if (!duration.IsValid()) { |
1250 | 0 | return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; |
1251 | 0 | } |
1252 | 0 | RefPtr<AudioData> data(new AudioData( |
1253 | 0 | aAudio->mOffset, mSeekJob.mTarget->GetTime(), |
1254 | 0 | duration, frames, std::move(audioData), channels, |
1255 | 0 | aAudio->mRate, aAudio->mChannelMap)); |
1256 | 0 | MOZ_ASSERT(AudioQueue().GetSize() == 0, |
1257 | 0 | "Should be the 1st sample after seeking"); |
1258 | 0 | mMaster->PushAudio(data); |
1259 | 0 | mDoneAudioSeeking = true; |
1260 | 0 |
|
1261 | 0 | return NS_OK; |
1262 | 0 | } |
1263 | | |
1264 | | nsresult DropVideoUpToSeekTarget(VideoData* aVideo) |
1265 | 0 | { |
1266 | 0 | MOZ_ASSERT(aVideo); |
1267 | 0 | SLOG("DropVideoUpToSeekTarget() frame [%" PRId64 ", %" PRId64 "]", |
1268 | 0 | aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds()); |
1269 | 0 | const auto target = GetSeekTarget(); |
1270 | 0 |
|
1271 | 0 | // If the frame end time is less than the seek target, we won't want |
1272 | 0 | // to display this frame after the seek, so discard it. |
1273 | 0 | if (target >= aVideo->GetEndTime()) { |
1274 | 0 | SLOG("DropVideoUpToSeekTarget() pop video frame [%" PRId64 ", %" PRId64 |
1275 | 0 | "] target=%" PRId64, |
1276 | 0 | aVideo->mTime.ToMicroseconds(), |
1277 | 0 | aVideo->GetEndTime().ToMicroseconds(), |
1278 | 0 | target.ToMicroseconds()); |
1279 | 0 | mFirstVideoFrameAfterSeek = aVideo; |
1280 | 0 | } else { |
1281 | 0 | if (target >= aVideo->mTime && |
1282 | 0 | aVideo->GetEndTime() >= target) { |
1283 | 0 | // The seek target lies inside this frame's time slice. Adjust the |
1284 | 0 | // frame's start time to match the seek target. |
1285 | 0 | aVideo->UpdateTimestamp(target); |
1286 | 0 | } |
1287 | 0 | mFirstVideoFrameAfterSeek = nullptr; |
1288 | 0 |
|
1289 | 0 | SLOG("DropVideoUpToSeekTarget() found video frame [%" PRId64 ", %" PRId64 |
1290 | 0 | "] containing target=%" PRId64, |
1291 | 0 | aVideo->mTime.ToMicroseconds(), |
1292 | 0 | aVideo->GetEndTime().ToMicroseconds(), |
1293 | 0 | target.ToMicroseconds()); |
1294 | 0 |
|
1295 | 0 | MOZ_ASSERT(VideoQueue().GetSize() == 0, |
1296 | 0 | "Should be the 1st sample after seeking"); |
1297 | 0 | mMaster->PushVideo(aVideo); |
1298 | 0 | mDoneVideoSeeking = true; |
1299 | 0 | } |
1300 | 0 |
|
1301 | 0 | return NS_OK; |
1302 | 0 | } |
1303 | | |
1304 | | void HandleEndOfAudioInternal() |
1305 | 0 | { |
1306 | 0 | MOZ_ASSERT(!mDoneAudioSeeking); |
1307 | 0 | AudioQueue().Finish(); |
1308 | 0 | mDoneAudioSeeking = true; |
1309 | 0 | } |
1310 | | |
1311 | | void HandleEndOfVideoInternal() |
1312 | 0 | { |
1313 | 0 | MOZ_ASSERT(!mDoneVideoSeeking); |
1314 | 0 | if (mFirstVideoFrameAfterSeek) { |
1315 | 0 | // Hit the end of stream. Move mFirstVideoFrameAfterSeek into |
1316 | 0 | // mSeekedVideoData so we have something to display after seeking. |
1317 | 0 | mMaster->PushVideo(mFirstVideoFrameAfterSeek); |
1318 | 0 | } |
1319 | 0 | VideoQueue().Finish(); |
1320 | 0 | mDoneVideoSeeking = true; |
1321 | 0 | } |
1322 | | |
1323 | | void MaybeFinishSeek() |
1324 | 0 | { |
1325 | 0 | if (mDoneAudioSeeking && mDoneVideoSeeking) { |
1326 | 0 | SeekCompleted(); |
1327 | 0 | } |
1328 | 0 | } |
1329 | | |
1330 | | /* |
1331 | | * Track the current seek promise made by the reader. |
1332 | | */ |
1333 | | MozPromiseRequestHolder<MediaFormatReader::SeekPromise> mSeekRequest; |
1334 | | |
1335 | | /* |
1336 | | * Internal state. |
1337 | | */ |
1338 | | media::TimeUnit mCurrentTimeBeforeSeek; |
1339 | | bool mDoneAudioSeeking = false; |
1340 | | bool mDoneVideoSeeking = false; |
1341 | | MozPromiseRequestHolder<WaitForDataPromise> mWaitRequest; |
1342 | | |
1343 | | // This temporarily stores the first frame we decode after we seek. |
1344 | | // This is so that if we hit end of stream while we're decoding to reach |
1345 | | // the seek target, we will still have a frame that we can display as the |
1346 | | // last frame in the media. |
1347 | | RefPtr<VideoData> mFirstVideoFrameAfterSeek; |
1348 | | |
1349 | | private: |
1350 | | virtual media::TimeUnit GetSeekTarget() const |
1351 | 0 | { |
1352 | 0 | return mSeekJob.mTarget->GetTime(); |
1353 | 0 | } |
1354 | | }; |
1355 | | |
1356 | | /* |
1357 | | * Remove samples from the queue until aCompare() returns false. |
1358 | | * aCompare A function object with the signature bool(int64_t) which returns |
1359 | | * true for samples that should be removed. |
1360 | | */ |
1361 | | template <typename Type, typename Function> |
1362 | | static void |
1363 | | DiscardFrames(MediaQueue<Type>& aQueue, const Function& aCompare) |
1364 | 0 | { |
1365 | 0 | while(aQueue.GetSize() > 0) { |
1366 | 0 | if (aCompare(aQueue.PeekFront()->mTime.ToMicroseconds())) { |
1367 | 0 | RefPtr<Type> releaseMe = aQueue.PopFront(); |
1368 | 0 | continue; |
1369 | 0 | } |
1370 | 0 | break; |
1371 | 0 | } |
1372 | 0 | } Unexecuted instantiation: Unified_cpp_dom_media5.cpp:void mozilla::DiscardFrames<mozilla::AudioData, mozilla::MediaDecoderStateMachine::NextFrameSeekingState::FinishSeek()::{lambda(long)#1}>(mozilla::MediaQueue<mozilla::AudioData>&, mozilla::MediaDecoderStateMachine::NextFrameSeekingState::FinishSeek()::{lambda(long)#1} const&) Unexecuted instantiation: Unified_cpp_dom_media5.cpp:void mozilla::DiscardFrames<mozilla::VideoData, mozilla::MediaDecoderStateMachine::NextFrameSeekingState::DoSeek()::{lambda(long)#1}>(mozilla::MediaQueue<mozilla::VideoData>&, mozilla::MediaDecoderStateMachine::NextFrameSeekingState::DoSeek()::{lambda(long)#1} const&) |
1373 | | |
1374 | | class MediaDecoderStateMachine::NextFrameSeekingState |
1375 | | : public MediaDecoderStateMachine::SeekingState |
1376 | | { |
1377 | | public: |
1378 | 0 | explicit NextFrameSeekingState(Master* aPtr) : SeekingState(aPtr) { } |
1379 | | |
1380 | | RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob&& aSeekJob, |
1381 | | EventVisibility aVisibility) |
1382 | 0 | { |
1383 | 0 | MOZ_ASSERT(aSeekJob.mTarget->IsNextFrame()); |
1384 | 0 | mCurrentTime = mMaster->GetMediaTime(); |
1385 | 0 | mDuration = mMaster->Duration(); |
1386 | 0 | return SeekingState::Enter(std::move(aSeekJob), aVisibility); |
1387 | 0 | } |
1388 | | |
1389 | | void Exit() override |
1390 | 0 | { |
1391 | 0 | // Disconnect my async seek operation. |
1392 | 0 | if (mAsyncSeekTask) { mAsyncSeekTask->Cancel(); } |
1393 | 0 |
|
1394 | 0 | // Disconnect MediaDecoder. |
1395 | 0 | mSeekJob.RejectIfExists(__func__); |
1396 | 0 | } |
1397 | | |
1398 | | void HandleAudioDecoded(AudioData* aAudio) override |
1399 | 0 | { |
1400 | 0 | mMaster->PushAudio(aAudio); |
1401 | 0 | } |
1402 | | |
1403 | | void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart) override |
1404 | 0 | { |
1405 | 0 | MOZ_ASSERT(aVideo); |
1406 | 0 | MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); |
1407 | 0 | MOZ_ASSERT(NeedMoreVideo()); |
1408 | 0 |
|
1409 | 0 | if (aVideo->mTime > mCurrentTime) { |
1410 | 0 | mMaster->PushVideo(aVideo); |
1411 | 0 | FinishSeek(); |
1412 | 0 | } else { |
1413 | 0 | RequestVideoData(); |
1414 | 0 | } |
1415 | 0 | } |
1416 | | |
1417 | | void HandleWaitingForAudio() override |
1418 | 0 | { |
1419 | 0 | MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); |
1420 | 0 | // We don't care about audio decode errors in this state which will be |
1421 | 0 | // handled by other states after seeking. |
1422 | 0 | } |
1423 | | |
1424 | | void HandleAudioCanceled() override |
1425 | 0 | { |
1426 | 0 | MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); |
1427 | 0 | // We don't care about audio decode errors in this state which will be |
1428 | 0 | // handled by other states after seeking. |
1429 | 0 | } |
1430 | | |
1431 | | void HandleEndOfAudio() override |
1432 | 0 | { |
1433 | 0 | MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); |
1434 | 0 | // We don't care about audio decode errors in this state which will be |
1435 | 0 | // handled by other states after seeking. |
1436 | 0 | } |
1437 | | |
1438 | | void HandleWaitingForVideo() override |
1439 | 0 | { |
1440 | 0 | MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); |
1441 | 0 | MOZ_ASSERT(NeedMoreVideo()); |
1442 | 0 | mMaster->WaitForData(MediaData::VIDEO_DATA); |
1443 | 0 | } |
1444 | | |
1445 | | void HandleVideoCanceled() override |
1446 | 0 | { |
1447 | 0 | MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); |
1448 | 0 | MOZ_ASSERT(NeedMoreVideo()); |
1449 | 0 | RequestVideoData(); |
1450 | 0 | } |
1451 | | |
1452 | | void HandleEndOfVideo() override |
1453 | 0 | { |
1454 | 0 | MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); |
1455 | 0 | MOZ_ASSERT(NeedMoreVideo()); |
1456 | 0 | VideoQueue().Finish(); |
1457 | 0 | FinishSeek(); |
1458 | 0 | } |
1459 | | |
1460 | | void HandleAudioWaited(MediaData::Type aType) override |
1461 | 0 | { |
1462 | 0 | // We don't care about audio in this state. |
1463 | 0 | } |
1464 | | |
1465 | | void HandleVideoWaited(MediaData::Type aType) override |
1466 | 0 | { |
1467 | 0 | MOZ_ASSERT(!mSeekJob.mPromise.IsEmpty(), "Seek shouldn't be finished"); |
1468 | 0 | MOZ_ASSERT(NeedMoreVideo()); |
1469 | 0 | RequestVideoData(); |
1470 | 0 | } |
1471 | | |
1472 | | TimeUnit CalculateNewCurrentTime() const override |
1473 | 0 | { |
1474 | 0 | // The HTMLMediaElement.currentTime should be updated to the seek target |
1475 | 0 | // which has been updated to the next frame's time. |
1476 | 0 | return mSeekJob.mTarget->GetTime(); |
1477 | 0 | } |
1478 | | |
1479 | | void DoSeek() override |
1480 | 0 | { |
1481 | 0 | auto currentTime = mCurrentTime; |
1482 | 0 | DiscardFrames(VideoQueue(), [currentTime] (int64_t aSampleTime) { |
1483 | 0 | return aSampleTime <= currentTime.ToMicroseconds(); |
1484 | 0 | }); |
1485 | 0 |
|
1486 | 0 | // If there is a pending video request, finish the seeking if we don't need |
1487 | 0 | // more data, or wait for HandleVideoDecoded() to finish seeking. |
1488 | 0 | if (mMaster->IsRequestingVideoData()) { |
1489 | 0 | if (!NeedMoreVideo()) { |
1490 | 0 | FinishSeek(); |
1491 | 0 | } |
1492 | 0 | return; |
1493 | 0 | } |
1494 | 0 |
|
1495 | 0 | // Otherwise, we need to do the seek operation asynchronously for a special |
1496 | 0 | // case (bug504613.ogv) which has no data at all, the 1st seekToNextFrame() |
1497 | 0 | // operation reaches the end of the media. If we did the seek operation |
1498 | 0 | // synchronously, we immediately resolve the SeekPromise in mSeekJob and |
1499 | 0 | // then switch to the CompletedState which dispatches an "ended" event. |
1500 | 0 | // However, the ThenValue of the SeekPromise has not yet been set, so the |
1501 | 0 | // promise resolving is postponed and then the JS developer receives the |
1502 | 0 | // "ended" event before the seek promise is resolved. |
1503 | 0 | // An asynchronous seek operation helps to solve this issue since while the |
1504 | 0 | // seek is actually performed, the ThenValue of SeekPromise has already |
1505 | 0 | // been set so that it won't be postponed. |
1506 | 0 | RefPtr<Runnable> r = mAsyncSeekTask = new AysncNextFrameSeekTask(this); |
1507 | 0 | nsresult rv = OwnerThread()->Dispatch(r.forget()); |
1508 | 0 | MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); |
1509 | 0 | Unused << rv; |
1510 | 0 | } |
1511 | | |
1512 | | private: |
1513 | | void DoSeekInternal() |
1514 | 0 | { |
1515 | 0 | // We don't need to discard frames to the mCurrentTime here because we have |
1516 | 0 | // done it at DoSeek() and any video data received in between either |
1517 | 0 | // finishes the seek operation or be discarded, see HandleVideoDecoded(). |
1518 | 0 |
|
1519 | 0 | if (!NeedMoreVideo()) { |
1520 | 0 | FinishSeek(); |
1521 | 0 | } else if (!mMaster->IsRequestingVideoData() && |
1522 | 0 | !mMaster->IsWaitingVideoData()) { |
1523 | 0 | RequestVideoData(); |
1524 | 0 | } |
1525 | 0 | } |
1526 | | |
1527 | | class AysncNextFrameSeekTask : public Runnable |
1528 | | { |
1529 | | public: |
1530 | | explicit AysncNextFrameSeekTask(NextFrameSeekingState* aStateObject) |
1531 | | : Runnable("MediaDecoderStateMachine::NextFrameSeekingState::" |
1532 | | "AysncNextFrameSeekTask") |
1533 | | , mStateObj(aStateObject) |
1534 | 0 | { |
1535 | 0 | } |
1536 | | |
1537 | 0 | void Cancel() { mStateObj = nullptr; } |
1538 | | |
1539 | | NS_IMETHOD Run() override |
1540 | 0 | { |
1541 | 0 | if (mStateObj) { |
1542 | 0 | mStateObj->DoSeekInternal(); |
1543 | 0 | } |
1544 | 0 | return NS_OK; |
1545 | 0 | } |
1546 | | |
1547 | | private: |
1548 | | NextFrameSeekingState* mStateObj; |
1549 | | }; |
1550 | | |
1551 | | void RequestVideoData() |
1552 | 0 | { |
1553 | 0 | mMaster->RequestVideoData(media::TimeUnit()); |
1554 | 0 | } |
1555 | | |
1556 | | bool NeedMoreVideo() const |
1557 | 0 | { |
1558 | 0 | // Need to request video when we have none and video queue is not finished. |
1559 | 0 | return VideoQueue().GetSize() == 0 && !VideoQueue().IsFinished(); |
1560 | 0 | } |
1561 | | |
1562 | | // Update the seek target's time before resolving this seek task, the updated |
1563 | | // time will be used in the MDSM::SeekCompleted() to update the MDSM's |
1564 | | // position. |
1565 | | void UpdateSeekTargetTime() |
1566 | 0 | { |
1567 | 0 | RefPtr<VideoData> data = VideoQueue().PeekFront(); |
1568 | 0 | if (data) { |
1569 | 0 | mSeekJob.mTarget->SetTime(data->mTime); |
1570 | 0 | } else { |
1571 | 0 | MOZ_ASSERT(VideoQueue().AtEndOfStream()); |
1572 | 0 | mSeekJob.mTarget->SetTime(mDuration); |
1573 | 0 | } |
1574 | 0 | } |
1575 | | |
1576 | | void FinishSeek() |
1577 | 0 | { |
1578 | 0 | MOZ_ASSERT(!NeedMoreVideo()); |
1579 | 0 | UpdateSeekTargetTime(); |
1580 | 0 | auto time = mSeekJob.mTarget->GetTime().ToMicroseconds(); |
1581 | 0 | DiscardFrames(AudioQueue(), [time] (int64_t aSampleTime) { |
1582 | 0 | return aSampleTime < time; |
1583 | 0 | }); |
1584 | 0 | SeekCompleted(); |
1585 | 0 | } |
1586 | | |
1587 | | /* |
1588 | | * Internal state. |
1589 | | */ |
1590 | | TimeUnit mCurrentTime; |
1591 | | TimeUnit mDuration; |
1592 | | RefPtr<AysncNextFrameSeekTask> mAsyncSeekTask; |
1593 | | }; |
1594 | | |
1595 | | class MediaDecoderStateMachine::NextFrameSeekingFromDormantState |
1596 | | : public MediaDecoderStateMachine::AccurateSeekingState |
1597 | | { |
1598 | | public: |
1599 | | explicit NextFrameSeekingFromDormantState(Master* aPtr) |
1600 | | : AccurateSeekingState(aPtr) |
1601 | 0 | { |
1602 | 0 | } |
1603 | | |
1604 | | RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob&& aCurrentSeekJob, |
1605 | | SeekJob&& aFutureSeekJob) |
1606 | 0 | { |
1607 | 0 | mFutureSeekJob = std::move(aFutureSeekJob); |
1608 | 0 |
|
1609 | 0 | AccurateSeekingState::Enter(std::move(aCurrentSeekJob), |
1610 | 0 | EventVisibility::Suppressed); |
1611 | 0 |
|
1612 | 0 | // Once seekToNextFrame() is called, we assume the user is likely to keep |
1613 | 0 | // calling seekToNextFrame() repeatedly, and so, we should prevent the MDSM |
1614 | 0 | // from getting into Dormant state. |
1615 | 0 | mMaster->mMinimizePreroll = false; |
1616 | 0 |
|
1617 | 0 | return mFutureSeekJob.mPromise.Ensure(__func__); |
1618 | 0 | } |
1619 | | |
1620 | | void Exit() override |
1621 | 0 | { |
1622 | 0 | mFutureSeekJob.RejectIfExists(__func__); |
1623 | 0 | AccurateSeekingState::Exit(); |
1624 | 0 | } |
1625 | | |
1626 | | private: |
1627 | | SeekJob mFutureSeekJob; |
1628 | | |
1629 | | // We don't want to transition to DecodingState once this seek completes, |
1630 | | // instead, we transition to NextFrameSeekingState. |
1631 | | void GoToNextState() override |
1632 | 0 | { |
1633 | 0 | SetState<NextFrameSeekingState>(std::move(mFutureSeekJob), |
1634 | 0 | EventVisibility::Observable); |
1635 | 0 | } |
1636 | | }; |
1637 | | |
1638 | | class MediaDecoderStateMachine::VideoOnlySeekingState |
1639 | | : public MediaDecoderStateMachine::AccurateSeekingState |
1640 | | { |
1641 | | public: |
1642 | 0 | explicit VideoOnlySeekingState(Master* aPtr) : AccurateSeekingState(aPtr) { } |
1643 | | |
1644 | | RefPtr<MediaDecoder::SeekPromise> Enter(SeekJob&& aSeekJob, |
1645 | | EventVisibility aVisibility) |
1646 | 0 | { |
1647 | 0 | MOZ_ASSERT(aSeekJob.mTarget->IsVideoOnly()); |
1648 | 0 | MOZ_ASSERT(aVisibility == EventVisibility::Suppressed); |
1649 | 0 |
|
1650 | 0 | RefPtr<MediaDecoder::SeekPromise> p = |
1651 | 0 | AccurateSeekingState::Enter(std::move(aSeekJob), aVisibility); |
1652 | 0 |
|
1653 | 0 | // Dispatch a mozvideoonlyseekbegin event to indicate UI for corresponding |
1654 | 0 | // changes. |
1655 | 0 | mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::VideoOnlySeekBegin); |
1656 | 0 |
|
1657 | 0 | return p.forget(); |
1658 | 0 | } |
1659 | | |
1660 | | void Exit() override |
1661 | 0 | { |
1662 | 0 | // We are completing or discarding this video-only seek operation now, |
1663 | 0 | // dispatch an event so that the UI can change in response to the end |
1664 | 0 | // of video-only seek. |
1665 | 0 | mMaster->mOnPlaybackEvent.Notify( |
1666 | 0 | MediaPlaybackEvent::VideoOnlySeekCompleted); |
1667 | 0 |
|
1668 | 0 | AccurateSeekingState::Exit(); |
1669 | 0 | } |
1670 | | |
1671 | | void HandleAudioDecoded(AudioData* aAudio) override |
1672 | 0 | { |
1673 | 0 | MOZ_ASSERT(mDoneAudioSeeking && !mDoneVideoSeeking, |
1674 | 0 | "Seek shouldn't be finished"); |
1675 | 0 | MOZ_ASSERT(aAudio); |
1676 | 0 |
|
1677 | 0 | // Video-only seek doesn't reset audio decoder. There might be pending audio |
1678 | 0 | // requests when AccurateSeekTask::Seek() begins. We will just store the |
1679 | 0 | // data without checking |mDiscontinuity| or calling |
1680 | 0 | // DropAudioUpToSeekTarget(). |
1681 | 0 | mMaster->PushAudio(aAudio); |
1682 | 0 | } |
1683 | | |
1684 | 0 | void HandleWaitingForAudio() override { } |
1685 | | |
1686 | 0 | void HandleAudioCanceled() override { } |
1687 | | |
1688 | 0 | void HandleEndOfAudio() override { } |
1689 | | |
1690 | | void HandleAudioWaited(MediaData::Type aType) override |
1691 | 0 | { |
1692 | 0 | MOZ_ASSERT(!mDoneAudioSeeking || !mDoneVideoSeeking, |
1693 | 0 | "Seek shouldn't be finished"); |
1694 | 0 |
|
1695 | 0 | // Ignore pending requests from video-only seek. |
1696 | 0 | } |
1697 | | |
1698 | | void DoSeek() override |
1699 | 0 | { |
1700 | 0 | // TODO: keep decoding audio. |
1701 | 0 | mDoneAudioSeeking = true; |
1702 | 0 | mDoneVideoSeeking = !Info().HasVideo(); |
1703 | 0 |
|
1704 | 0 | mMaster->ResetDecode(TrackInfo::kVideoTrack); |
1705 | 0 |
|
1706 | 0 | DemuxerSeek(); |
1707 | 0 | } |
1708 | | |
1709 | | protected: |
1710 | | // Allow skip-to-next-key-frame to kick in if we fall behind the current |
1711 | | // playback position so decoding has a better chance to catch up. |
1712 | | void RequestVideoData() override |
1713 | 0 | { |
1714 | 0 | MOZ_ASSERT(!mDoneVideoSeeking); |
1715 | 0 |
|
1716 | 0 | const auto& clock = mMaster->mMediaSink->IsStarted() |
1717 | 0 | ? mMaster->GetClock() |
1718 | 0 | : mMaster->GetMediaTime(); |
1719 | 0 | const auto& nextKeyFrameTime = GetNextKeyFrameTime(); |
1720 | 0 |
|
1721 | 0 | auto threshold = clock; |
1722 | 0 |
|
1723 | 0 | if (nextKeyFrameTime.IsValid() && |
1724 | 0 | clock >= (nextKeyFrameTime - sSkipToNextKeyFrameThreshold)) { |
1725 | 0 | threshold = nextKeyFrameTime; |
1726 | 0 | } |
1727 | 0 |
|
1728 | 0 | mMaster->RequestVideoData(threshold); |
1729 | 0 | } |
1730 | | |
1731 | | private: |
1732 | | // Trigger skip to next key frame if the current playback position is very |
1733 | | // close the next key frame's time. |
1734 | | static constexpr TimeUnit sSkipToNextKeyFrameThreshold = TimeUnit::FromMicroseconds(5000); |
1735 | | |
1736 | | // If the media is playing, drop video until catch up playback position. |
1737 | | media::TimeUnit GetSeekTarget() const override |
1738 | 0 | { |
1739 | 0 | return mMaster->mMediaSink->IsStarted() |
1740 | 0 | ? mMaster->GetClock() |
1741 | 0 | : mSeekJob.mTarget->GetTime(); |
1742 | 0 | } |
1743 | | |
1744 | | media::TimeUnit GetNextKeyFrameTime() const |
1745 | 0 | { |
1746 | 0 | // We only call this method in RequestVideoData() and we only request video |
1747 | 0 | // data if we haven't done video seeking. |
1748 | 0 | MOZ_DIAGNOSTIC_ASSERT(!mDoneVideoSeeking); |
1749 | 0 | MOZ_DIAGNOSTIC_ASSERT(mMaster->VideoQueue().GetSize() == 0); |
1750 | 0 |
|
1751 | 0 | if (mFirstVideoFrameAfterSeek) { |
1752 | 0 | return mFirstVideoFrameAfterSeek->NextKeyFrameTime(); |
1753 | 0 | } |
1754 | 0 | |
1755 | 0 | return TimeUnit::Invalid(); |
1756 | 0 | } |
1757 | | |
1758 | | }; |
1759 | | |
1760 | | constexpr TimeUnit |
1761 | | MediaDecoderStateMachine::VideoOnlySeekingState::sSkipToNextKeyFrameThreshold; |
1762 | | |
1763 | | RefPtr<MediaDecoder::SeekPromise> |
1764 | | MediaDecoderStateMachine::DormantState::HandleSeek(SeekTarget aTarget) |
1765 | 0 | { |
1766 | 0 | if (aTarget.IsNextFrame()) { |
1767 | 0 | // NextFrameSeekingState doesn't reset the decoder unlike |
1768 | 0 | // AccurateSeekingState. So we first must come out of dormant by seeking to |
1769 | 0 | // mPendingSeek and continue later with the NextFrameSeek |
1770 | 0 | SLOG("Changed state to SEEKING (to %" PRId64 ")", |
1771 | 0 | aTarget.GetTime().ToMicroseconds()); |
1772 | 0 | SeekJob seekJob; |
1773 | 0 | seekJob.mTarget = Some(aTarget); |
1774 | 0 | return StateObject::SetState<NextFrameSeekingFromDormantState>( |
1775 | 0 | std::move(mPendingSeek), std::move(seekJob)); |
1776 | 0 | } |
1777 | 0 |
|
1778 | 0 | return StateObject::HandleSeek(aTarget); |
1779 | 0 | } |
1780 | | |
1781 | | /** |
1782 | | * Purpose: stop playback until enough data is decoded to continue playback. |
1783 | | * |
1784 | | * Transition to: |
1785 | | * SEEKING if any seek request. |
1786 | | * SHUTDOWN if any decode error. |
1787 | | * COMPLETED when having decoded all audio/video data. |
1788 | | * DECODING when having decoded enough data to continue playback. |
1789 | | */ |
1790 | | class MediaDecoderStateMachine::BufferingState |
1791 | | : public MediaDecoderStateMachine::StateObject |
1792 | | { |
1793 | | public: |
1794 | 0 | explicit BufferingState(Master* aPtr) : StateObject(aPtr) { } |
1795 | | |
1796 | | void Enter() |
1797 | 0 | { |
1798 | 0 | if (mMaster->IsPlaying()) { |
1799 | 0 | mMaster->StopPlayback(); |
1800 | 0 | } |
1801 | 0 |
|
1802 | 0 | mBufferingStart = TimeStamp::Now(); |
1803 | 0 | mMaster->ScheduleStateMachineIn(TimeUnit::FromMicroseconds(USECS_PER_S)); |
1804 | 0 | mMaster->mOnNextFrameStatus.Notify( |
1805 | 0 | MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING); |
1806 | 0 | } |
1807 | | |
1808 | | void Step() override; |
1809 | | |
1810 | 0 | State GetState() const override { return DECODER_STATE_BUFFERING; } |
1811 | | |
1812 | | void HandleAudioDecoded(AudioData* aAudio) override |
1813 | 0 | { |
1814 | 0 | mMaster->PushAudio(aAudio); |
1815 | 0 | if (!mMaster->HaveEnoughDecodedAudio()) { |
1816 | 0 | mMaster->RequestAudioData(); |
1817 | 0 | } |
1818 | 0 | // This might be the sample we need to exit buffering. |
1819 | 0 | // Schedule Step() to check it. |
1820 | 0 | mMaster->ScheduleStateMachine(); |
1821 | 0 | } |
1822 | | |
1823 | | void HandleVideoDecoded(VideoData* aVideo, TimeStamp aDecodeStart) override |
1824 | 0 | { |
1825 | 0 | mMaster->PushVideo(aVideo); |
1826 | 0 | if (!mMaster->HaveEnoughDecodedVideo()) { |
1827 | 0 | mMaster->RequestVideoData(media::TimeUnit()); |
1828 | 0 | } |
1829 | 0 | // This might be the sample we need to exit buffering. |
1830 | 0 | // Schedule Step() to check it. |
1831 | 0 | mMaster->ScheduleStateMachine(); |
1832 | 0 | } |
1833 | | |
1834 | 0 | void HandleAudioCanceled() override { mMaster->RequestAudioData(); } |
1835 | | |
1836 | | void HandleVideoCanceled() override |
1837 | 0 | { |
1838 | 0 | mMaster->RequestVideoData(media::TimeUnit()); |
1839 | 0 | } |
1840 | | |
1841 | | void HandleWaitingForAudio() override |
1842 | 0 | { |
1843 | 0 | mMaster->WaitForData(MediaData::AUDIO_DATA); |
1844 | 0 | } |
1845 | | |
1846 | | void HandleWaitingForVideo() override |
1847 | 0 | { |
1848 | 0 | mMaster->WaitForData(MediaData::VIDEO_DATA); |
1849 | 0 | } |
1850 | | |
1851 | | void HandleAudioWaited(MediaData::Type aType) override |
1852 | 0 | { |
1853 | 0 | mMaster->RequestAudioData(); |
1854 | 0 | } |
1855 | | |
1856 | | void HandleVideoWaited(MediaData::Type aType) override |
1857 | 0 | { |
1858 | 0 | mMaster->RequestVideoData(media::TimeUnit()); |
1859 | 0 | } |
1860 | | |
1861 | | void HandleEndOfAudio() override; |
1862 | | void HandleEndOfVideo() override; |
1863 | | |
1864 | | void HandleVideoSuspendTimeout() override |
1865 | 0 | { |
1866 | 0 | // No video, so nothing to suspend. |
1867 | 0 | if (!mMaster->HasVideo()) { |
1868 | 0 | return; |
1869 | 0 | } |
1870 | 0 | |
1871 | 0 | mMaster->mVideoDecodeSuspended = true; |
1872 | 0 | mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::EnterVideoSuspend); |
1873 | 0 | Reader()->SetVideoBlankDecode(true); |
1874 | 0 | } |
1875 | | |
1876 | | private: |
1877 | | TimeStamp mBufferingStart; |
1878 | | |
1879 | | // The maximum number of second we spend buffering when we are short on |
1880 | | // unbuffered data. |
1881 | | const uint32_t mBufferingWait = 15; |
1882 | | }; |
1883 | | |
1884 | | /** |
1885 | | * Purpose: play all the decoded data and fire the 'ended' event. |
1886 | | * |
1887 | | * Transition to: |
1888 | | * SEEKING if any seek request. |
1889 | | */ |
1890 | | class MediaDecoderStateMachine::CompletedState |
1891 | | : public MediaDecoderStateMachine::StateObject |
1892 | | { |
1893 | | public: |
1894 | 0 | explicit CompletedState(Master* aPtr) : StateObject(aPtr) { } |
1895 | | |
1896 | | void Enter() |
1897 | 0 | { |
1898 | 0 | // On Android, the life cycle of graphic buffer is equal to Android's codec, |
1899 | 0 | // we couldn't release it if we still need to render the frame. |
1900 | 0 | #ifndef MOZ_WIDGET_ANDROID |
1901 | 0 | if (!mMaster->mLooping) { |
1902 | 0 | // We've decoded all samples. |
1903 | 0 | // We don't need decoders anymore if not looping. |
1904 | 0 | Reader()->ReleaseResources(); |
1905 | 0 | } |
1906 | 0 | #endif |
1907 | 0 | bool hasNextFrame = (!mMaster->HasAudio() || !mMaster->mAudioCompleted) && |
1908 | 0 | (!mMaster->HasVideo() || !mMaster->mVideoCompleted); |
1909 | 0 |
|
1910 | 0 | mMaster->mOnNextFrameStatus.Notify( |
1911 | 0 | hasNextFrame ? MediaDecoderOwner::NEXT_FRAME_AVAILABLE |
1912 | 0 | : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE); |
1913 | 0 |
|
1914 | 0 | Step(); |
1915 | 0 | } |
1916 | | |
1917 | | void Exit() override |
1918 | 0 | { |
1919 | 0 | mSentPlaybackEndedEvent = false; |
1920 | 0 | } |
1921 | | |
1922 | | void Step() override |
1923 | 0 | { |
1924 | 0 | if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING && |
1925 | 0 | mMaster->IsPlaying()) { |
1926 | 0 | mMaster->StopPlayback(); |
1927 | 0 | } |
1928 | 0 |
|
1929 | 0 | // Play the remaining media. We want to run AdvanceFrame() at least |
1930 | 0 | // once to ensure the current playback position is advanced to the |
1931 | 0 | // end of the media, and so that we update the readyState. |
1932 | 0 | if ((mMaster->HasVideo() && !mMaster->mVideoCompleted) || |
1933 | 0 | (mMaster->HasAudio() && !mMaster->mAudioCompleted)) { |
1934 | 0 | // Start playback if necessary to play the remaining media. |
1935 | 0 | mMaster->MaybeStartPlayback(); |
1936 | 0 | mMaster->UpdatePlaybackPositionPeriodically(); |
1937 | 0 | MOZ_ASSERT(!mMaster->IsPlaying() || mMaster->IsStateMachineScheduled(), |
1938 | 0 | "Must have timer scheduled"); |
1939 | 0 | return; |
1940 | 0 | } |
1941 | 0 |
|
1942 | 0 | // StopPlayback in order to reset the IsPlaying() state so audio |
1943 | 0 | // is restarted correctly. |
1944 | 0 | mMaster->StopPlayback(); |
1945 | 0 |
|
1946 | 0 | if (!mSentPlaybackEndedEvent) { |
1947 | 0 | auto clockTime = |
1948 | 0 | std::max(mMaster->AudioEndTime(), mMaster->VideoEndTime()); |
1949 | 0 | // Correct the time over the end once looping was turned on. |
1950 | 0 | Reader()->AdjustByLooping(clockTime); |
1951 | 0 | if (mMaster->mDuration.Ref()->IsInfinite()) { |
1952 | 0 | // We have a finite duration when playback reaches the end. |
1953 | 0 | mMaster->mDuration = Some(clockTime); |
1954 | 0 | DDLOGEX(mMaster, |
1955 | 0 | DDLogCategory::Property, |
1956 | 0 | "duration_us", |
1957 | 0 | mMaster->mDuration.Ref()->ToMicroseconds()); |
1958 | 0 | } |
1959 | 0 | mMaster->UpdatePlaybackPosition(clockTime); |
1960 | 0 |
|
1961 | 0 | // Ensure readyState is updated before firing the 'ended' event. |
1962 | 0 | mMaster->mOnNextFrameStatus.Notify( |
1963 | 0 | MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE); |
1964 | 0 |
|
1965 | 0 | mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::PlaybackEnded); |
1966 | 0 |
|
1967 | 0 | mSentPlaybackEndedEvent = true; |
1968 | 0 |
|
1969 | 0 | // MediaSink::GetEndTime() must be called before stopping playback. |
1970 | 0 | mMaster->StopMediaSink(); |
1971 | 0 | } |
1972 | 0 | } |
1973 | | |
1974 | | State GetState() const override |
1975 | 0 | { |
1976 | 0 | return DECODER_STATE_COMPLETED; |
1977 | 0 | } |
1978 | | |
1979 | | void HandleAudioCaptured() override |
1980 | 0 | { |
1981 | 0 | // MediaSink is changed. Schedule Step() to check if we can start playback. |
1982 | 0 | mMaster->ScheduleStateMachine(); |
1983 | 0 | } |
1984 | | |
1985 | | void HandleVideoSuspendTimeout() override |
1986 | 0 | { |
1987 | 0 | // Do nothing since no decoding is going on. |
1988 | 0 | } |
1989 | | |
1990 | | void HandleResumeVideoDecoding(const TimeUnit&) override |
1991 | 0 | { |
1992 | 0 | // Resume the video decoder and seek to the last video frame. |
1993 | 0 | // This triggers a video-only seek which won't update the playback position. |
1994 | 0 | StateObject::HandleResumeVideoDecoding(mMaster->mDecodedVideoEndTime); |
1995 | 0 | } |
1996 | | |
1997 | | void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) override |
1998 | 0 | { |
1999 | 0 | if (aPlayState == MediaDecoder::PLAY_STATE_PLAYING) { |
2000 | 0 | // Schedule Step() to check if we can start playback. |
2001 | 0 | mMaster->ScheduleStateMachine(); |
2002 | 0 | } |
2003 | 0 | } |
2004 | | |
2005 | | private: |
2006 | | bool mSentPlaybackEndedEvent = false; |
2007 | | }; |
2008 | | |
2009 | | /** |
2010 | | * Purpose: release all resources allocated by MDSM. |
2011 | | * |
2012 | | * Transition to: |
2013 | | * None since this is the final state. |
2014 | | * |
2015 | | * Transition from: |
2016 | | * Any states other than SHUTDOWN. |
2017 | | */ |
2018 | | class MediaDecoderStateMachine::ShutdownState |
2019 | | : public MediaDecoderStateMachine::StateObject |
2020 | | { |
2021 | | public: |
2022 | 0 | explicit ShutdownState(Master* aPtr) : StateObject(aPtr) { } |
2023 | | |
2024 | | RefPtr<ShutdownPromise> Enter(); |
2025 | | |
2026 | | void Exit() override |
2027 | 0 | { |
2028 | 0 | MOZ_DIAGNOSTIC_ASSERT(false, "Shouldn't escape the SHUTDOWN state."); |
2029 | 0 | } |
2030 | | |
2031 | | State GetState() const override |
2032 | 0 | { |
2033 | 0 | return DECODER_STATE_SHUTDOWN; |
2034 | 0 | } |
2035 | | |
2036 | | RefPtr<MediaDecoder::SeekPromise> HandleSeek(SeekTarget aTarget) override |
2037 | 0 | { |
2038 | 0 | MOZ_DIAGNOSTIC_ASSERT(false, "Can't seek in shutdown state."); |
2039 | 0 | return MediaDecoder::SeekPromise::CreateAndReject(true, __func__); |
2040 | 0 | } |
2041 | | |
2042 | | RefPtr<ShutdownPromise> HandleShutdown() override |
2043 | 0 | { |
2044 | 0 | MOZ_DIAGNOSTIC_ASSERT(false, "Already shutting down."); |
2045 | 0 | return nullptr; |
2046 | 0 | } |
2047 | | |
2048 | | void HandleVideoSuspendTimeout() override |
2049 | 0 | { |
2050 | 0 | MOZ_DIAGNOSTIC_ASSERT(false, "Already shutting down."); |
2051 | 0 | } |
2052 | | |
2053 | | void HandleResumeVideoDecoding(const TimeUnit&) override |
2054 | 0 | { |
2055 | 0 | MOZ_DIAGNOSTIC_ASSERT(false, "Already shutting down."); |
2056 | 0 | } |
2057 | | }; |
2058 | | |
2059 | | RefPtr<MediaDecoder::SeekPromise> |
2060 | | MediaDecoderStateMachine:: |
2061 | | StateObject::HandleSeek(SeekTarget aTarget) |
2062 | 0 | { |
2063 | 0 | SLOG("Changed state to SEEKING (to %" PRId64 ")", aTarget.GetTime().ToMicroseconds()); |
2064 | 0 | SeekJob seekJob; |
2065 | 0 | seekJob.mTarget = Some(aTarget); |
2066 | 0 | return SetSeekingState(std::move(seekJob), EventVisibility::Observable); |
2067 | 0 | } |
2068 | | |
2069 | | RefPtr<ShutdownPromise> |
2070 | | MediaDecoderStateMachine:: |
2071 | | StateObject::HandleShutdown() |
2072 | 0 | { |
2073 | 0 | return SetState<ShutdownState>(); |
2074 | 0 | } |
2075 | | |
2076 | | static void |
2077 | | ReportRecoveryTelemetry(const TimeStamp& aRecoveryStart, |
2078 | | const MediaInfo& aMediaInfo, |
2079 | | bool aIsHardwareAccelerated) |
2080 | 0 | { |
2081 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
2082 | 0 | if (!aMediaInfo.HasVideo()) { |
2083 | 0 | return; |
2084 | 0 | } |
2085 | 0 | |
2086 | 0 | // Keyed by audio+video or video alone, hardware acceleration, |
2087 | 0 | // and by a resolution range. |
2088 | 0 | nsCString key(aMediaInfo.HasAudio() ? "AV" : "V"); |
2089 | 0 | key.AppendASCII(aIsHardwareAccelerated ? "(hw)," : ","); |
2090 | 0 | static const struct { int32_t mH; const char* mRes; } sResolutions[] = { |
2091 | 0 | { 240, "0-240" }, |
2092 | 0 | { 480, "241-480" }, |
2093 | 0 | { 720, "481-720" }, |
2094 | 0 | { 1080, "721-1080" }, |
2095 | 0 | { 2160, "1081-2160" } |
2096 | 0 | }; |
2097 | 0 | const char* resolution = "2161+"; |
2098 | 0 | int32_t height = aMediaInfo.mVideo.mImage.height; |
2099 | 0 | for (const auto& res : sResolutions) { |
2100 | 0 | if (height <= res.mH) { |
2101 | 0 | resolution = res.mRes; |
2102 | 0 | break; |
2103 | 0 | } |
2104 | 0 | } |
2105 | 0 | key.AppendASCII(resolution); |
2106 | 0 |
|
2107 | 0 | TimeDuration duration = TimeStamp::Now() - aRecoveryStart; |
2108 | 0 | double duration_ms = duration.ToMilliseconds(); |
2109 | 0 | Telemetry::Accumulate(Telemetry::VIDEO_SUSPEND_RECOVERY_TIME_MS, |
2110 | 0 | key, |
2111 | 0 | uint32_t(duration_ms + 0.5)); |
2112 | 0 | Telemetry::Accumulate(Telemetry::VIDEO_SUSPEND_RECOVERY_TIME_MS, |
2113 | 0 | NS_LITERAL_CSTRING("All"), |
2114 | 0 | uint32_t(duration_ms + 0.5)); |
2115 | 0 | } |
2116 | | |
2117 | | void |
2118 | | MediaDecoderStateMachine:: |
2119 | | StateObject::HandleResumeVideoDecoding(const TimeUnit& aTarget) |
2120 | 0 | { |
2121 | 0 | MOZ_ASSERT(mMaster->mVideoDecodeSuspended); |
2122 | 0 |
|
2123 | 0 | mMaster->mVideoDecodeSuspended = false; |
2124 | 0 | mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::ExitVideoSuspend); |
2125 | 0 | Reader()->SetVideoBlankDecode(false); |
2126 | 0 |
|
2127 | 0 | // Start counting recovery time from right now. |
2128 | 0 | TimeStamp start = TimeStamp::Now(); |
2129 | 0 |
|
2130 | 0 | // Local reference to mInfo, so that it will be copied in the lambda below. |
2131 | 0 | auto& info = Info(); |
2132 | 0 | bool hw = Reader()->VideoIsHardwareAccelerated(); |
2133 | 0 |
|
2134 | 0 | // Start video-only seek to the current time. |
2135 | 0 | SeekJob seekJob; |
2136 | 0 |
|
2137 | 0 | // We use fastseek to optimize the resuming time. |
2138 | 0 | // FastSeek is only used for video-only media since we don't need to worry |
2139 | 0 | // about A/V sync. |
2140 | 0 | // Don't use fastSeek if we want to seek to the end because it might seek to a |
2141 | 0 | // keyframe before the last frame (if the last frame itself is not a keyframe) |
2142 | 0 | // and we always want to present the final frame to the user when seeking to |
2143 | 0 | // the end. |
2144 | 0 | const auto type = mMaster->HasAudio() || aTarget == mMaster->Duration() |
2145 | 0 | ? SeekTarget::Type::Accurate |
2146 | 0 | : SeekTarget::Type::PrevSyncPoint; |
2147 | 0 |
|
2148 | 0 | seekJob.mTarget.emplace(aTarget, type, true /* aVideoOnly */); |
2149 | 0 |
|
2150 | 0 | // Hold mMaster->mAbstractMainThread here because this->mMaster will be |
2151 | 0 | // invalid after the current state object is deleted in SetState(); |
2152 | 0 | RefPtr<AbstractThread> mainThread = mMaster->mAbstractMainThread; |
2153 | 0 |
|
2154 | 0 | SetSeekingState(std::move(seekJob), EventVisibility::Suppressed)->Then( |
2155 | 0 | mainThread, __func__, |
2156 | 0 | [start, info, hw](){ ReportRecoveryTelemetry(start, info, hw); }, |
2157 | 0 | [](){}); |
2158 | 0 | } |
2159 | | |
2160 | | RefPtr<MediaDecoder::SeekPromise> |
2161 | | MediaDecoderStateMachine:: |
2162 | | StateObject::SetSeekingState(SeekJob&& aSeekJob, EventVisibility aVisibility) |
2163 | 0 | { |
2164 | 0 | if (aSeekJob.mTarget->IsAccurate() || aSeekJob.mTarget->IsFast()) { |
2165 | 0 | if (aSeekJob.mTarget->IsVideoOnly()) { |
2166 | 0 | return SetState<VideoOnlySeekingState>(std::move(aSeekJob), aVisibility); |
2167 | 0 | } |
2168 | 0 | return SetState<AccurateSeekingState>(std::move(aSeekJob), aVisibility); |
2169 | 0 | } |
2170 | 0 | |
2171 | 0 | if (aSeekJob.mTarget->IsNextFrame()) { |
2172 | 0 | return SetState<NextFrameSeekingState>(std::move(aSeekJob), aVisibility); |
2173 | 0 | } |
2174 | 0 | |
2175 | 0 | MOZ_ASSERT_UNREACHABLE("Unknown SeekTarget::Type."); |
2176 | 0 | return nullptr; |
2177 | 0 | } |
2178 | | |
2179 | | void |
2180 | | MediaDecoderStateMachine:: |
2181 | | DecodeMetadataState::OnMetadataRead(MetadataHolder&& aMetadata) |
2182 | 0 | { |
2183 | 0 | mMetadataRequest.Complete(); |
2184 | 0 |
|
2185 | 0 | mMaster->mInfo.emplace(*aMetadata.mInfo); |
2186 | 0 | mMaster->mMediaSeekable = Info().mMediaSeekable; |
2187 | 0 | mMaster->mMediaSeekableOnlyInBufferedRanges = |
2188 | 0 | Info().mMediaSeekableOnlyInBufferedRanges; |
2189 | 0 |
|
2190 | 0 | if (Info().mMetadataDuration.isSome()) { |
2191 | 0 | mMaster->mDuration = Info().mMetadataDuration; |
2192 | 0 | } else if (Info().mUnadjustedMetadataEndTime.isSome()) { |
2193 | 0 | const TimeUnit unadjusted = Info().mUnadjustedMetadataEndTime.ref(); |
2194 | 0 | const TimeUnit adjustment = Info().mStartTime; |
2195 | 0 | mMaster->mInfo->mMetadataDuration.emplace(unadjusted - adjustment); |
2196 | 0 | mMaster->mDuration = Info().mMetadataDuration; |
2197 | 0 | } |
2198 | 0 |
|
2199 | 0 | // If we don't know the duration by this point, we assume infinity, per spec. |
2200 | 0 | if (mMaster->mDuration.Ref().isNothing()) { |
2201 | 0 | mMaster->mDuration = Some(TimeUnit::FromInfinity()); |
2202 | 0 | } |
2203 | 0 |
|
2204 | 0 | DDLOGEX(mMaster, |
2205 | 0 | DDLogCategory::Property, |
2206 | 0 | "duration_us", |
2207 | 0 | mMaster->mDuration.Ref()->ToMicroseconds()); |
2208 | 0 |
|
2209 | 0 | if (mMaster->HasVideo()) { |
2210 | 0 | SLOG("Video decode HWAccel=%d videoQueueSize=%d", |
2211 | 0 | Reader()->VideoIsHardwareAccelerated(), |
2212 | 0 | mMaster->GetAmpleVideoFrames()); |
2213 | 0 | } |
2214 | 0 |
|
2215 | 0 | MOZ_ASSERT(mMaster->mDuration.Ref().isSome()); |
2216 | 0 |
|
2217 | 0 | mMaster->mMetadataLoadedEvent.Notify( |
2218 | 0 | std::move(aMetadata.mInfo), |
2219 | 0 | std::move(aMetadata.mTags), |
2220 | 0 | MediaDecoderEventVisibility::Observable); |
2221 | 0 |
|
2222 | 0 | // Check whether the media satisfies the requirement of seamless looing. |
2223 | 0 | // (Before checking the media is audio only, we need to get metadata first.) |
2224 | 0 | mMaster->mSeamlessLoopingAllowed = StaticPrefs::MediaSeamlessLooping() && |
2225 | 0 | mMaster->HasAudio() && |
2226 | 0 | !mMaster->HasVideo(); |
2227 | 0 | mMaster->LoopingChanged(); |
2228 | 0 |
|
2229 | 0 | SetState<DecodingFirstFrameState>(); |
2230 | 0 | } |
2231 | | |
2232 | | void |
2233 | | MediaDecoderStateMachine:: |
2234 | | DormantState::HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) |
2235 | 0 | { |
2236 | 0 | if (aPlayState == MediaDecoder::PLAY_STATE_PLAYING) { |
2237 | 0 | // Exit dormant when the user wants to play. |
2238 | 0 | MOZ_ASSERT(mMaster->mSentFirstFrameLoadedEvent); |
2239 | 0 | SetSeekingState(std::move(mPendingSeek), EventVisibility::Suppressed); |
2240 | 0 | } |
2241 | 0 | } |
2242 | | |
2243 | | void |
2244 | | MediaDecoderStateMachine:: |
2245 | | DecodingFirstFrameState::Enter() |
2246 | 0 | { |
2247 | 0 | // Transition to DECODING if we've decoded first frames. |
2248 | 0 | if (mMaster->mSentFirstFrameLoadedEvent) { |
2249 | 0 | SetState<DecodingState>(); |
2250 | 0 | return; |
2251 | 0 | } |
2252 | 0 | |
2253 | 0 | MOZ_ASSERT(!mMaster->mVideoDecodeSuspended); |
2254 | 0 |
|
2255 | 0 | // Dispatch tasks to decode first frames. |
2256 | 0 | if (mMaster->HasAudio()) { |
2257 | 0 | mMaster->RequestAudioData(); |
2258 | 0 | } |
2259 | 0 | if (mMaster->HasVideo()) { |
2260 | 0 | mMaster->RequestVideoData(media::TimeUnit()); |
2261 | 0 | } |
2262 | 0 | } |
2263 | | |
2264 | | void |
2265 | | MediaDecoderStateMachine:: |
2266 | | DecodingFirstFrameState::MaybeFinishDecodeFirstFrame() |
2267 | 0 | { |
2268 | 0 | MOZ_ASSERT(!mMaster->mSentFirstFrameLoadedEvent); |
2269 | 0 |
|
2270 | 0 | if ((mMaster->IsAudioDecoding() && AudioQueue().GetSize() == 0) || |
2271 | 0 | (mMaster->IsVideoDecoding() && VideoQueue().GetSize() == 0)) { |
2272 | 0 | return; |
2273 | 0 | } |
2274 | 0 | |
2275 | 0 | mMaster->FinishDecodeFirstFrame(); |
2276 | 0 | if (mPendingSeek.Exists()) { |
2277 | 0 | SetSeekingState(std::move(mPendingSeek), EventVisibility::Observable); |
2278 | 0 | } else { |
2279 | 0 | SetState<DecodingState>(); |
2280 | 0 | } |
2281 | 0 | } |
2282 | | |
2283 | | void |
2284 | | MediaDecoderStateMachine:: |
2285 | | DecodingState::Enter() |
2286 | 0 | { |
2287 | 0 | MOZ_ASSERT(mMaster->mSentFirstFrameLoadedEvent); |
2288 | 0 |
|
2289 | 0 | if (mMaster->mVideoDecodeSuspended && |
2290 | 0 | mMaster->mVideoDecodeMode == VideoDecodeMode::Normal) { |
2291 | 0 | StateObject::HandleResumeVideoDecoding(mMaster->GetMediaTime()); |
2292 | 0 | return; |
2293 | 0 | } |
2294 | 0 | |
2295 | 0 | if (mMaster->mVideoDecodeMode == VideoDecodeMode::Suspend && |
2296 | 0 | !mMaster->mVideoDecodeSuspendTimer.IsScheduled() && |
2297 | 0 | !mMaster->mVideoDecodeSuspended) { |
2298 | 0 | // If the VideoDecodeMode is Suspend and the timer is not schedule, it means |
2299 | 0 | // the timer has timed out and we should suspend video decoding now if |
2300 | 0 | // necessary. |
2301 | 0 | HandleVideoSuspendTimeout(); |
2302 | 0 | } |
2303 | 0 |
|
2304 | 0 | if (!mMaster->IsVideoDecoding() && !mMaster->IsAudioDecoding()) { |
2305 | 0 | SetState<CompletedState>(); |
2306 | 0 | return; |
2307 | 0 | } |
2308 | 0 | |
2309 | 0 | mOnAudioPopped = AudioQueue().PopEvent().Connect( |
2310 | 0 | OwnerThread(), [this] () { |
2311 | 0 | if (mMaster->IsAudioDecoding() && !mMaster->HaveEnoughDecodedAudio()) { |
2312 | 0 | EnsureAudioDecodeTaskQueued(); |
2313 | 0 | } |
2314 | 0 | }); |
2315 | 0 | mOnVideoPopped = VideoQueue().PopEvent().Connect( |
2316 | 0 | OwnerThread(), [this] () { |
2317 | 0 | if (mMaster->IsVideoDecoding() && !mMaster->HaveEnoughDecodedVideo()) { |
2318 | 0 | EnsureVideoDecodeTaskQueued(); |
2319 | 0 | } |
2320 | 0 | }); |
2321 | 0 |
|
2322 | 0 | mMaster->mOnNextFrameStatus.Notify(MediaDecoderOwner::NEXT_FRAME_AVAILABLE); |
2323 | 0 |
|
2324 | 0 | mDecodeStartTime = TimeStamp::Now(); |
2325 | 0 |
|
2326 | 0 | MaybeStopPrerolling(); |
2327 | 0 |
|
2328 | 0 | // Ensure that we've got tasks enqueued to decode data if we need to. |
2329 | 0 | DispatchDecodeTasksIfNeeded(); |
2330 | 0 |
|
2331 | 0 | mMaster->ScheduleStateMachine(); |
2332 | 0 |
|
2333 | 0 | // Will enter dormant when playback is paused for a while. |
2334 | 0 | if (mMaster->mPlayState == MediaDecoder::PLAY_STATE_PAUSED) { |
2335 | 0 | StartDormantTimer(); |
2336 | 0 | } |
2337 | 0 | } |
2338 | | |
2339 | | void |
2340 | | MediaDecoderStateMachine:: |
2341 | | DecodingState::Step() |
2342 | 0 | { |
2343 | 0 | if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING && |
2344 | 0 | mMaster->IsPlaying()) { |
2345 | 0 | // We're playing, but the element/decoder is in paused state. Stop |
2346 | 0 | // playing! |
2347 | 0 | mMaster->StopPlayback(); |
2348 | 0 | } |
2349 | 0 |
|
2350 | 0 | // Start playback if necessary so that the clock can be properly queried. |
2351 | 0 | if (!mIsPrerolling) { |
2352 | 0 | mMaster->MaybeStartPlayback(); |
2353 | 0 | } |
2354 | 0 |
|
2355 | 0 | TimeUnit before = mMaster->GetMediaTime(); |
2356 | 0 | mMaster->UpdatePlaybackPositionPeriodically(); |
2357 | 0 |
|
2358 | 0 | // Fire the `seeking` and `seeked` events to meet the HTML spec |
2359 | 0 | // when the media is looped back from the end to the beginning. |
2360 | 0 | if (before > mMaster->GetMediaTime()) { |
2361 | 0 | MOZ_ASSERT(mMaster->mLooping); |
2362 | 0 | mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::Loop); |
2363 | 0 | // After looping is cancelled, the time won't be corrected, and therefore we |
2364 | 0 | // can check it to see if the end of the media track is reached. Make sure |
2365 | 0 | // the media is started before comparing the time, or it's meaningless. |
2366 | 0 | // Without checking IsStarted(), the media will be terminated immediately |
2367 | 0 | // after seeking forward. When the state is just transited from seeking state, |
2368 | 0 | // GetClock() is smaller than GetMediaTime(), since GetMediaTime() is updated |
2369 | 0 | // upon seek is completed while GetClock() will be updated after the media is |
2370 | 0 | // started again. |
2371 | 0 | } else if (mMaster->mMediaSink->IsStarted() && !mMaster->mLooping) { |
2372 | 0 | TimeUnit adjusted = mMaster->GetClock(); |
2373 | 0 | Reader()->AdjustByLooping(adjusted); |
2374 | 0 | if (adjusted < before) { |
2375 | 0 | mMaster->StopPlayback(); |
2376 | 0 | mMaster->mAudioDataRequest.DisconnectIfExists(); |
2377 | 0 | AudioQueue().Finish(); |
2378 | 0 | mMaster->mAudioCompleted = true; |
2379 | 0 | SetState<CompletedState>(); |
2380 | 0 | return; |
2381 | 0 | } |
2382 | 0 | } |
2383 | 0 | |
2384 | 0 | MOZ_ASSERT(!mMaster->IsPlaying() || mMaster->IsStateMachineScheduled(), |
2385 | 0 | "Must have timer scheduled"); |
2386 | 0 |
|
2387 | 0 | MaybeStartBuffering(); |
2388 | 0 | } |
2389 | | |
2390 | | void |
2391 | | MediaDecoderStateMachine:: |
2392 | | DecodingState::HandleEndOfAudio() |
2393 | 0 | { |
2394 | 0 | AudioQueue().Finish(); |
2395 | 0 | if (!mMaster->IsVideoDecoding()) { |
2396 | 0 | SetState<CompletedState>(); |
2397 | 0 | } else { |
2398 | 0 | MaybeStopPrerolling(); |
2399 | 0 | } |
2400 | 0 | } |
2401 | | |
2402 | | void |
2403 | | MediaDecoderStateMachine:: |
2404 | | DecodingState::HandleEndOfVideo() |
2405 | 0 | { |
2406 | 0 | VideoQueue().Finish(); |
2407 | 0 | if (!mMaster->IsAudioDecoding()) { |
2408 | 0 | SetState<CompletedState>(); |
2409 | 0 | } else { |
2410 | 0 | MaybeStopPrerolling(); |
2411 | 0 | } |
2412 | 0 | } |
2413 | | |
2414 | | void |
2415 | | MediaDecoderStateMachine:: |
2416 | | DecodingState::DispatchDecodeTasksIfNeeded() |
2417 | 0 | { |
2418 | 0 | if (mMaster->IsAudioDecoding() && |
2419 | 0 | !mMaster->mMinimizePreroll && |
2420 | 0 | !mMaster->HaveEnoughDecodedAudio()) { |
2421 | 0 | EnsureAudioDecodeTaskQueued(); |
2422 | 0 | } |
2423 | 0 |
|
2424 | 0 | if (mMaster->IsVideoDecoding() && |
2425 | 0 | !mMaster->mMinimizePreroll && |
2426 | 0 | !mMaster->HaveEnoughDecodedVideo()) { |
2427 | 0 | EnsureVideoDecodeTaskQueued(); |
2428 | 0 | } |
2429 | 0 | } |
2430 | | |
2431 | | void |
2432 | | MediaDecoderStateMachine:: |
2433 | | DecodingState::EnsureAudioDecodeTaskQueued() |
2434 | 0 | { |
2435 | 0 | if (!mMaster->IsAudioDecoding() || |
2436 | 0 | mMaster->IsRequestingAudioData() || |
2437 | 0 | mMaster->IsWaitingAudioData()) { |
2438 | 0 | return; |
2439 | 0 | } |
2440 | 0 | mMaster->RequestAudioData(); |
2441 | 0 | } |
2442 | | |
2443 | | void |
2444 | | MediaDecoderStateMachine:: |
2445 | | DecodingState::EnsureVideoDecodeTaskQueued() |
2446 | 0 | { |
2447 | 0 | if (!mMaster->IsVideoDecoding() || |
2448 | 0 | mMaster->IsRequestingVideoData() || |
2449 | 0 | mMaster->IsWaitingVideoData()) { |
2450 | 0 | return; |
2451 | 0 | } |
2452 | 0 | mMaster->RequestVideoData(mMaster->GetMediaTime()); |
2453 | 0 | } |
2454 | | |
2455 | | void |
2456 | | MediaDecoderStateMachine:: |
2457 | | DecodingState::MaybeStartBuffering() |
2458 | 0 | { |
2459 | 0 | // Buffering makes senses only after decoding first frames. |
2460 | 0 | MOZ_ASSERT(mMaster->mSentFirstFrameLoadedEvent); |
2461 | 0 |
|
2462 | 0 | // Don't enter buffering when MediaDecoder is not playing. |
2463 | 0 | if (mMaster->mPlayState != MediaDecoder::PLAY_STATE_PLAYING) { |
2464 | 0 | return; |
2465 | 0 | } |
2466 | 0 | |
2467 | 0 | // Don't enter buffering while prerolling so that the decoder has a chance to |
2468 | 0 | // enqueue some decoded data before we give up and start buffering. |
2469 | 0 | if (!mMaster->IsPlaying()) { |
2470 | 0 | return; |
2471 | 0 | } |
2472 | 0 | |
2473 | 0 | // Note we could have a wait promise pending when playing non-MSE EME. |
2474 | 0 | if ((mMaster->OutOfDecodedAudio() && mMaster->IsWaitingAudioData()) || |
2475 | 0 | (mMaster->OutOfDecodedVideo() && mMaster->IsWaitingVideoData())) { |
2476 | 0 | SetState<BufferingState>(); |
2477 | 0 | return; |
2478 | 0 | } |
2479 | 0 | |
2480 | 0 | if (Reader()->UseBufferingHeuristics() && mMaster->HasLowDecodedData() && |
2481 | 0 | mMaster->HasLowBufferedData() && !mMaster->mCanPlayThrough) { |
2482 | 0 | SetState<BufferingState>(); |
2483 | 0 | } |
2484 | 0 | } |
2485 | | |
2486 | | void |
2487 | | MediaDecoderStateMachine:: |
2488 | | SeekingState::SeekCompleted() |
2489 | 0 | { |
2490 | 0 | const auto newCurrentTime = CalculateNewCurrentTime(); |
2491 | 0 |
|
2492 | 0 | if (newCurrentTime == mMaster->Duration() && !mMaster->mIsLiveStream) { |
2493 | 0 | // Seeked to end of media. Explicitly finish the queues so DECODING |
2494 | 0 | // will transition to COMPLETED immediately. Note we don't do |
2495 | 0 | // this when playing a live stream, since the end of media will advance |
2496 | 0 | // once we download more data! |
2497 | 0 | AudioQueue().Finish(); |
2498 | 0 | VideoQueue().Finish(); |
2499 | 0 |
|
2500 | 0 | // We won't start MediaSink when paused. m{Audio,Video}Completed will |
2501 | 0 | // remain false and 'playbackEnded' won't be notified. Therefore we |
2502 | 0 | // need to set these flags explicitly when seeking to the end. |
2503 | 0 | mMaster->mAudioCompleted = true; |
2504 | 0 | mMaster->mVideoCompleted = true; |
2505 | 0 |
|
2506 | 0 | // There might still be a pending audio request when doing video-only or |
2507 | 0 | // next-frame seek. Discard it so we won't break the invariants of the |
2508 | 0 | // COMPLETED state by adding audio samples to a finished queue. |
2509 | 0 | mMaster->mAudioDataRequest.DisconnectIfExists(); |
2510 | 0 | } |
2511 | 0 |
|
2512 | 0 | // We want to resolve the seek request prior finishing the first frame |
2513 | 0 | // to ensure that the seeked event is fired prior loadeded. |
2514 | 0 | // Note: SeekJob.Resolve() resets SeekJob.mTarget. Don't use mSeekJob anymore |
2515 | 0 | // hereafter. |
2516 | 0 | mSeekJob.Resolve(__func__); |
2517 | 0 |
|
2518 | 0 | // Notify FirstFrameLoaded now if we haven't since we've decoded some data |
2519 | 0 | // for readyState to transition to HAVE_CURRENT_DATA and fire 'loadeddata'. |
2520 | 0 | if (!mMaster->mSentFirstFrameLoadedEvent) { |
2521 | 0 | mMaster->FinishDecodeFirstFrame(); |
2522 | 0 | } |
2523 | 0 |
|
2524 | 0 | // Ensure timestamps are up to date. |
2525 | 0 | // Suppressed visibility comes from two cases: (1) leaving dormant state, |
2526 | 0 | // and (2) resuming suspended video decoder. We want both cases to be |
2527 | 0 | // transparent to the user. So we only notify the change when the seek |
2528 | 0 | // request is from the user. |
2529 | 0 | if (mVisibility == EventVisibility::Observable) { |
2530 | 0 | // Don't update playback position for video-only seek. |
2531 | 0 | // Otherwise we might have |newCurrentTime > mMediaSink->GetPosition()| |
2532 | 0 | // and fail the assertion in GetClock() since we didn't stop MediaSink. |
2533 | 0 | mMaster->UpdatePlaybackPositionInternal(newCurrentTime); |
2534 | 0 | } |
2535 | 0 |
|
2536 | 0 | // Try to decode another frame to detect if we're at the end... |
2537 | 0 | SLOG("Seek completed, mCurrentPosition=%" PRId64, |
2538 | 0 | mMaster->mCurrentPosition.Ref().ToMicroseconds()); |
2539 | 0 |
|
2540 | 0 | if (mMaster->VideoQueue().PeekFront()) { |
2541 | 0 | mMaster->mMediaSink->Redraw(Info().mVideo); |
2542 | 0 | mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::Invalidate); |
2543 | 0 | } |
2544 | 0 |
|
2545 | 0 | GoToNextState(); |
2546 | 0 | } |
2547 | | |
2548 | | void |
2549 | | MediaDecoderStateMachine:: |
2550 | | BufferingState::Step() |
2551 | 0 | { |
2552 | 0 | TimeStamp now = TimeStamp::Now(); |
2553 | 0 | MOZ_ASSERT(!mBufferingStart.IsNull(), "Must know buffering start time."); |
2554 | 0 |
|
2555 | 0 | if (Reader()->UseBufferingHeuristics()) { |
2556 | 0 | if (mMaster->IsWaitingAudioData() || mMaster->IsWaitingVideoData()) { |
2557 | 0 | // Can't exit buffering when we are still waiting for data. |
2558 | 0 | // Note we don't schedule next loop for we will do that when the wait |
2559 | 0 | // promise is resolved. |
2560 | 0 | return; |
2561 | 0 | } |
2562 | 0 | // With buffering heuristics, we exit buffering state when we: |
2563 | 0 | // 1. can play through or |
2564 | 0 | // 2. time out (specified by mBufferingWait) or |
2565 | 0 | // 3. have enough buffered data. |
2566 | 0 | TimeDuration elapsed = now - mBufferingStart; |
2567 | 0 | TimeDuration timeout = |
2568 | 0 | TimeDuration::FromSeconds(mBufferingWait * mMaster->mPlaybackRate); |
2569 | 0 | bool stopBuffering = |
2570 | 0 | mMaster->mCanPlayThrough || elapsed >= timeout || |
2571 | 0 | !mMaster->HasLowBufferedData(TimeUnit::FromSeconds(mBufferingWait)); |
2572 | 0 | if (!stopBuffering) { |
2573 | 0 | SLOG("Buffering: wait %ds, timeout in %.3lfs", |
2574 | 0 | mBufferingWait, mBufferingWait - elapsed.ToSeconds()); |
2575 | 0 | mMaster->ScheduleStateMachineIn(TimeUnit::FromMicroseconds(USECS_PER_S)); |
2576 | 0 | return; |
2577 | 0 | } |
2578 | 0 | } else if (mMaster->OutOfDecodedAudio() || mMaster->OutOfDecodedVideo()) { |
2579 | 0 | MOZ_ASSERT(!mMaster->OutOfDecodedAudio() || |
2580 | 0 | mMaster->IsRequestingAudioData() || |
2581 | 0 | mMaster->IsWaitingAudioData()); |
2582 | 0 | MOZ_ASSERT(!mMaster->OutOfDecodedVideo() || |
2583 | 0 | mMaster->IsRequestingVideoData() || |
2584 | 0 | mMaster->IsWaitingVideoData()); |
2585 | 0 | SLOG("In buffering mode, waiting to be notified: outOfAudio: %d, " |
2586 | 0 | "mAudioStatus: %s, outOfVideo: %d, mVideoStatus: %s", |
2587 | 0 | mMaster->OutOfDecodedAudio(), |
2588 | 0 | mMaster->AudioRequestStatus(), |
2589 | 0 | mMaster->OutOfDecodedVideo(), |
2590 | 0 | mMaster->VideoRequestStatus()); |
2591 | 0 | return; |
2592 | 0 | } |
2593 | 0 |
|
2594 | 0 | SLOG("Buffered for %.3lfs", (now - mBufferingStart).ToSeconds()); |
2595 | 0 | SetState<DecodingState>(); |
2596 | 0 | } |
2597 | | |
2598 | | void |
2599 | | MediaDecoderStateMachine:: |
2600 | | BufferingState::HandleEndOfAudio() |
2601 | 0 | { |
2602 | 0 | AudioQueue().Finish(); |
2603 | 0 | if (!mMaster->IsVideoDecoding()) { |
2604 | 0 | SetState<CompletedState>(); |
2605 | 0 | } else { |
2606 | 0 | // Check if we can exit buffering. |
2607 | 0 | mMaster->ScheduleStateMachine(); |
2608 | 0 | } |
2609 | 0 | } |
2610 | | |
2611 | | void |
2612 | | MediaDecoderStateMachine:: |
2613 | | BufferingState::HandleEndOfVideo() |
2614 | 0 | { |
2615 | 0 | VideoQueue().Finish(); |
2616 | 0 | if (!mMaster->IsAudioDecoding()) { |
2617 | 0 | SetState<CompletedState>(); |
2618 | 0 | } else { |
2619 | 0 | // Check if we can exit buffering. |
2620 | 0 | mMaster->ScheduleStateMachine(); |
2621 | 0 | } |
2622 | 0 | } |
2623 | | |
2624 | | RefPtr<ShutdownPromise> |
2625 | | MediaDecoderStateMachine:: |
2626 | | ShutdownState::Enter() |
2627 | 0 | { |
2628 | 0 | auto master = mMaster; |
2629 | 0 |
|
2630 | 0 | master->mDelayedScheduler.Reset(); |
2631 | 0 |
|
2632 | 0 | // Shutdown happens while decode timer is active, we need to disconnect and |
2633 | 0 | // dispose of the timer. |
2634 | 0 | master->CancelSuspendTimer(); |
2635 | 0 |
|
2636 | 0 | if (master->IsPlaying()) { |
2637 | 0 | master->StopPlayback(); |
2638 | 0 | } |
2639 | 0 |
|
2640 | 0 | master->mAudioDataRequest.DisconnectIfExists(); |
2641 | 0 | master->mVideoDataRequest.DisconnectIfExists(); |
2642 | 0 | master->mAudioWaitRequest.DisconnectIfExists(); |
2643 | 0 | master->mVideoWaitRequest.DisconnectIfExists(); |
2644 | 0 |
|
2645 | 0 | master->ResetDecode(); |
2646 | 0 | master->StopMediaSink(); |
2647 | 0 | master->mMediaSink->Shutdown(); |
2648 | 0 |
|
2649 | 0 | // Prevent dangling pointers by disconnecting the listeners. |
2650 | 0 | master->mAudioQueueListener.Disconnect(); |
2651 | 0 | master->mVideoQueueListener.Disconnect(); |
2652 | 0 | master->mMetadataManager.Disconnect(); |
2653 | 0 | master->mOnMediaNotSeekable.Disconnect(); |
2654 | 0 |
|
2655 | 0 | // Disconnect canonicals and mirrors before shutting down our task queue. |
2656 | 0 | master->mBuffered.DisconnectIfConnected(); |
2657 | 0 | master->mPlayState.DisconnectIfConnected(); |
2658 | 0 | master->mVolume.DisconnectIfConnected(); |
2659 | 0 | master->mPreservesPitch.DisconnectIfConnected(); |
2660 | 0 | master->mLooping.DisconnectIfConnected(); |
2661 | 0 | master->mSameOriginMedia.DisconnectIfConnected(); |
2662 | 0 | master->mMediaPrincipalHandle.DisconnectIfConnected(); |
2663 | 0 |
|
2664 | 0 | master->mDuration.DisconnectAll(); |
2665 | 0 | master->mCurrentPosition.DisconnectAll(); |
2666 | 0 | master->mIsAudioDataAudible.DisconnectAll(); |
2667 | 0 |
|
2668 | 0 | // Shut down the watch manager to stop further notifications. |
2669 | 0 | master->mWatchManager.Shutdown(); |
2670 | 0 |
|
2671 | 0 | return Reader()->Shutdown()->Then( |
2672 | 0 | OwnerThread(), __func__, master, |
2673 | 0 | &MediaDecoderStateMachine::FinishShutdown, |
2674 | 0 | &MediaDecoderStateMachine::FinishShutdown); |
2675 | 0 | } |
2676 | | |
2677 | | #define INIT_WATCHABLE(name, val) \ |
2678 | | name(val, "MediaDecoderStateMachine::" #name) |
2679 | | #define INIT_MIRROR(name, val) \ |
2680 | | name(mTaskQueue, val, "MediaDecoderStateMachine::" #name " (Mirror)") |
2681 | | #define INIT_CANONICAL(name, val) \ |
2682 | | name(mTaskQueue, val, "MediaDecoderStateMachine::" #name " (Canonical)") |
2683 | | |
2684 | | MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder, |
2685 | | MediaFormatReader* aReader) : |
2686 | | mDecoderID(aDecoder), |
2687 | | mAbstractMainThread(aDecoder->AbstractMainThread()), |
2688 | | mFrameStats(&aDecoder->GetFrameStatistics()), |
2689 | | mVideoFrameContainer(aDecoder->GetVideoFrameContainer()), |
2690 | | mTaskQueue(new TaskQueue( |
2691 | | GetMediaThreadPool(MediaThreadType::PLAYBACK), |
2692 | | "MDSM::mTaskQueue", /* aSupportsTailDispatch = */ true)), |
2693 | | mWatchManager(this, mTaskQueue), |
2694 | | mDispatchedStateMachine(false), |
2695 | | mDelayedScheduler(mTaskQueue, true /*aFuzzy*/), |
2696 | | mCurrentFrameID(0), |
2697 | | mReader(new ReaderProxy(mTaskQueue, aReader)), |
2698 | | mPlaybackRate(1.0), |
2699 | | mAmpleAudioThreshold(detail::AMPLE_AUDIO_THRESHOLD), |
2700 | | mAudioCaptured(false), |
2701 | | mMinimizePreroll(aDecoder->GetMinimizePreroll()), |
2702 | | mSentFirstFrameLoadedEvent(false), |
2703 | | mVideoDecodeSuspended(false), |
2704 | | mVideoDecodeSuspendTimer(mTaskQueue), |
2705 | | mOutputStreamManager(new OutputStreamManager()), |
2706 | | mVideoDecodeMode(VideoDecodeMode::Normal), |
2707 | | mIsMSE(aDecoder->IsMSE()), |
2708 | | mSeamlessLoopingAllowed(false), |
2709 | | INIT_MIRROR(mBuffered, TimeIntervals()), |
2710 | | INIT_MIRROR(mPlayState, MediaDecoder::PLAY_STATE_LOADING), |
2711 | | INIT_MIRROR(mVolume, 1.0), |
2712 | | INIT_MIRROR(mPreservesPitch, true), |
2713 | | INIT_MIRROR(mLooping, false), |
2714 | | INIT_MIRROR(mSameOriginMedia, false), |
2715 | | INIT_MIRROR(mMediaPrincipalHandle, PRINCIPAL_HANDLE_NONE), |
2716 | | INIT_CANONICAL(mDuration, NullableTimeUnit()), |
2717 | | INIT_CANONICAL(mCurrentPosition, TimeUnit::Zero()), |
2718 | | INIT_CANONICAL(mIsAudioDataAudible, false) |
2719 | 0 | { |
2720 | 0 | MOZ_COUNT_CTOR(MediaDecoderStateMachine); |
2721 | 0 | NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); |
2722 | 0 |
|
2723 | 0 | InitVideoQueuePrefs(); |
2724 | 0 |
|
2725 | 0 | DDLINKCHILD("reader", aReader); |
2726 | 0 | } |
2727 | | |
2728 | | #undef INIT_WATCHABLE |
2729 | | #undef INIT_MIRROR |
2730 | | #undef INIT_CANONICAL |
2731 | | |
2732 | | MediaDecoderStateMachine::~MediaDecoderStateMachine() |
2733 | 0 | { |
2734 | 0 | MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread."); |
2735 | 0 | MOZ_COUNT_DTOR(MediaDecoderStateMachine); |
2736 | 0 | } |
2737 | | |
2738 | | void |
2739 | | MediaDecoderStateMachine::InitializationTask(MediaDecoder* aDecoder) |
2740 | 0 | { |
2741 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2742 | 0 |
|
2743 | 0 | // Connect mirrors. |
2744 | 0 | mBuffered.Connect(mReader->CanonicalBuffered()); |
2745 | 0 | mPlayState.Connect(aDecoder->CanonicalPlayState()); |
2746 | 0 | mVolume.Connect(aDecoder->CanonicalVolume()); |
2747 | 0 | mPreservesPitch.Connect(aDecoder->CanonicalPreservesPitch()); |
2748 | 0 | mLooping.Connect(aDecoder->CanonicalLooping()); |
2749 | 0 | mSameOriginMedia.Connect(aDecoder->CanonicalSameOriginMedia()); |
2750 | 0 | mMediaPrincipalHandle.Connect(aDecoder->CanonicalMediaPrincipalHandle()); |
2751 | 0 |
|
2752 | 0 | // Initialize watchers. |
2753 | 0 | mWatchManager.Watch(mBuffered, |
2754 | 0 | &MediaDecoderStateMachine::BufferedRangeUpdated); |
2755 | 0 | mWatchManager.Watch(mVolume, &MediaDecoderStateMachine::VolumeChanged); |
2756 | 0 | mWatchManager.Watch(mPreservesPitch, |
2757 | 0 | &MediaDecoderStateMachine::PreservesPitchChanged); |
2758 | 0 | mWatchManager.Watch(mPlayState, &MediaDecoderStateMachine::PlayStateChanged); |
2759 | 0 | mWatchManager.Watch(mLooping, &MediaDecoderStateMachine::LoopingChanged); |
2760 | 0 |
|
2761 | 0 | MOZ_ASSERT(!mStateObj); |
2762 | 0 | auto* s = new DecodeMetadataState(this); |
2763 | 0 | mStateObj.reset(s); |
2764 | 0 | s->Enter(); |
2765 | 0 | } |
2766 | | |
2767 | | void |
2768 | | MediaDecoderStateMachine::AudioAudibleChanged(bool aAudible) |
2769 | 0 | { |
2770 | 0 | mIsAudioDataAudible = aAudible; |
2771 | 0 | } |
2772 | | |
2773 | | media::MediaSink* |
2774 | | MediaDecoderStateMachine::CreateAudioSink() |
2775 | 0 | { |
2776 | 0 | RefPtr<MediaDecoderStateMachine> self = this; |
2777 | 0 | auto audioSinkCreator = [self] () { |
2778 | 0 | MOZ_ASSERT(self->OnTaskQueue()); |
2779 | 0 | AudioSink* audioSink = new AudioSink( |
2780 | 0 | self->mTaskQueue, self->mAudioQueue, |
2781 | 0 | self->GetMediaTime(), |
2782 | 0 | self->Info().mAudio); |
2783 | 0 |
|
2784 | 0 | self->mAudibleListener = audioSink->AudibleEvent().Connect( |
2785 | 0 | self->mTaskQueue, self.get(), |
2786 | 0 | &MediaDecoderStateMachine::AudioAudibleChanged); |
2787 | 0 | return audioSink; |
2788 | 0 | }; |
2789 | 0 | return new AudioSinkWrapper(mTaskQueue, audioSinkCreator); |
2790 | 0 | } |
2791 | | |
2792 | | already_AddRefed<media::MediaSink> |
2793 | | MediaDecoderStateMachine::CreateMediaSink(bool aAudioCaptured) |
2794 | 0 | { |
2795 | 0 | RefPtr<media::MediaSink> audioSink = |
2796 | 0 | aAudioCaptured |
2797 | 0 | ? new DecodedStream(mTaskQueue, mAbstractMainThread, mAudioQueue, |
2798 | 0 | mVideoQueue, mOutputStreamManager, |
2799 | 0 | mSameOriginMedia.Ref(), mMediaPrincipalHandle.Ref()) |
2800 | 0 | : CreateAudioSink(); |
2801 | 0 |
|
2802 | 0 | RefPtr<media::MediaSink> mediaSink = |
2803 | 0 | new VideoSink(mTaskQueue, audioSink, mVideoQueue, |
2804 | 0 | mVideoFrameContainer, *mFrameStats, |
2805 | 0 | sVideoQueueSendToCompositorSize); |
2806 | 0 | return mediaSink.forget(); |
2807 | 0 | } |
2808 | | |
2809 | | TimeUnit |
2810 | | MediaDecoderStateMachine::GetDecodedAudioDuration() |
2811 | 0 | { |
2812 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2813 | 0 | if (mMediaSink->IsStarted()) { |
2814 | 0 | // mDecodedAudioEndTime might be smaller than GetClock() when there is |
2815 | 0 | // overlap between 2 adjacent audio samples or when we are playing |
2816 | 0 | // a chained ogg file. |
2817 | 0 | return std::max(mDecodedAudioEndTime - GetClock(), TimeUnit::Zero()); |
2818 | 0 | } |
2819 | 0 | // MediaSink not started. All audio samples are in the queue. |
2820 | 0 | return TimeUnit::FromMicroseconds(AudioQueue().Duration()); |
2821 | 0 | } |
2822 | | |
2823 | | bool |
2824 | | MediaDecoderStateMachine::HaveEnoughDecodedAudio() |
2825 | 0 | { |
2826 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2827 | 0 | auto ampleAudio = mAmpleAudioThreshold.MultDouble(mPlaybackRate); |
2828 | 0 | return AudioQueue().GetSize() > 0 && GetDecodedAudioDuration() >= ampleAudio; |
2829 | 0 | } |
2830 | | |
2831 | | bool MediaDecoderStateMachine::HaveEnoughDecodedVideo() |
2832 | 0 | { |
2833 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2834 | 0 | return VideoQueue().GetSize() >= GetAmpleVideoFrames() * mPlaybackRate + 1; |
2835 | 0 | } |
2836 | | |
2837 | | void |
2838 | | MediaDecoderStateMachine::PushAudio(AudioData* aSample) |
2839 | 0 | { |
2840 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2841 | 0 | MOZ_ASSERT(aSample); |
2842 | 0 | AudioQueue().Push(aSample); |
2843 | 0 | } |
2844 | | |
2845 | | void |
2846 | | MediaDecoderStateMachine::PushVideo(VideoData* aSample) |
2847 | 0 | { |
2848 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2849 | 0 | MOZ_ASSERT(aSample); |
2850 | 0 | aSample->mFrameID = ++mCurrentFrameID; |
2851 | 0 | VideoQueue().Push(aSample); |
2852 | 0 | } |
2853 | | |
2854 | | void |
2855 | | MediaDecoderStateMachine::OnAudioPopped(const RefPtr<AudioData>& aSample) |
2856 | 0 | { |
2857 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2858 | 0 | mPlaybackOffset = std::max(mPlaybackOffset, aSample->mOffset); |
2859 | 0 | } |
2860 | | |
2861 | | void |
2862 | | MediaDecoderStateMachine::OnVideoPopped(const RefPtr<VideoData>& aSample) |
2863 | 0 | { |
2864 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2865 | 0 | mPlaybackOffset = std::max(mPlaybackOffset, aSample->mOffset); |
2866 | 0 | } |
2867 | | |
2868 | | bool |
2869 | | MediaDecoderStateMachine::IsAudioDecoding() |
2870 | 0 | { |
2871 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2872 | 0 | return HasAudio() && !AudioQueue().IsFinished(); |
2873 | 0 | } |
2874 | | |
2875 | | bool |
2876 | | MediaDecoderStateMachine::IsVideoDecoding() |
2877 | 0 | { |
2878 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2879 | 0 | return HasVideo() && !VideoQueue().IsFinished(); |
2880 | 0 | } |
2881 | | |
2882 | | bool MediaDecoderStateMachine::IsPlaying() const |
2883 | 0 | { |
2884 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2885 | 0 | return mMediaSink->IsPlaying(); |
2886 | 0 | } |
2887 | | |
2888 | | void MediaDecoderStateMachine::SetMediaNotSeekable() |
2889 | 0 | { |
2890 | 0 | mMediaSeekable = false; |
2891 | 0 | } |
2892 | | |
2893 | | nsresult MediaDecoderStateMachine::Init(MediaDecoder* aDecoder) |
2894 | 0 | { |
2895 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
2896 | 0 |
|
2897 | 0 | // Dispatch initialization that needs to happen on that task queue. |
2898 | 0 | nsCOMPtr<nsIRunnable> r = NewRunnableMethod<RefPtr<MediaDecoder>>( |
2899 | 0 | "MediaDecoderStateMachine::InitializationTask", |
2900 | 0 | this, |
2901 | 0 | &MediaDecoderStateMachine::InitializationTask, |
2902 | 0 | aDecoder); |
2903 | 0 | mTaskQueue->DispatchStateChange(r.forget()); |
2904 | 0 |
|
2905 | 0 | mAudioQueueListener = AudioQueue().PopEvent().Connect( |
2906 | 0 | mTaskQueue, this, &MediaDecoderStateMachine::OnAudioPopped); |
2907 | 0 | mVideoQueueListener = VideoQueue().PopEvent().Connect( |
2908 | 0 | mTaskQueue, this, &MediaDecoderStateMachine::OnVideoPopped); |
2909 | 0 |
|
2910 | 0 | mMetadataManager.Connect(mReader->TimedMetadataEvent(), OwnerThread()); |
2911 | 0 |
|
2912 | 0 | mOnMediaNotSeekable = mReader->OnMediaNotSeekable().Connect( |
2913 | 0 | OwnerThread(), this, &MediaDecoderStateMachine::SetMediaNotSeekable); |
2914 | 0 |
|
2915 | 0 | mMediaSink = CreateMediaSink(mAudioCaptured); |
2916 | 0 |
|
2917 | 0 | nsresult rv = mReader->Init(); |
2918 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
2919 | 0 |
|
2920 | 0 | mReader->SetCanonicalDuration(&mDuration); |
2921 | 0 |
|
2922 | 0 | return NS_OK; |
2923 | 0 | } |
2924 | | |
2925 | | void |
2926 | | MediaDecoderStateMachine::StopPlayback() |
2927 | 0 | { |
2928 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2929 | 0 | LOG("StopPlayback()"); |
2930 | 0 |
|
2931 | 0 | if (IsPlaying()) { |
2932 | 0 | mOnPlaybackEvent.Notify(MediaPlaybackEvent{ |
2933 | 0 | MediaPlaybackEvent::PlaybackStopped, mPlaybackOffset }); |
2934 | 0 | mMediaSink->SetPlaying(false); |
2935 | 0 | MOZ_ASSERT(!IsPlaying()); |
2936 | 0 | } |
2937 | 0 | } |
2938 | | |
2939 | | void MediaDecoderStateMachine::MaybeStartPlayback() |
2940 | 0 | { |
2941 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2942 | 0 | // Should try to start playback only after decoding first frames. |
2943 | 0 | MOZ_ASSERT(mSentFirstFrameLoadedEvent); |
2944 | 0 |
|
2945 | 0 | if (IsPlaying()) { |
2946 | 0 | // Logging this case is really spammy - don't do it. |
2947 | 0 | return; |
2948 | 0 | } |
2949 | 0 | |
2950 | 0 | if (mPlayState != MediaDecoder::PLAY_STATE_PLAYING) { |
2951 | 0 | LOG("Not starting playback [mPlayState=%d]", mPlayState.Ref()); |
2952 | 0 | return; |
2953 | 0 | } |
2954 | 0 |
|
2955 | 0 | LOG("MaybeStartPlayback() starting playback"); |
2956 | 0 | StartMediaSink(); |
2957 | 0 |
|
2958 | 0 | if (!IsPlaying()) { |
2959 | 0 | mMediaSink->SetPlaying(true); |
2960 | 0 | MOZ_ASSERT(IsPlaying()); |
2961 | 0 | } |
2962 | 0 |
|
2963 | 0 | mOnPlaybackEvent.Notify( |
2964 | 0 | MediaPlaybackEvent{ MediaPlaybackEvent::PlaybackStarted, mPlaybackOffset }); |
2965 | 0 | } |
2966 | | |
2967 | | void |
2968 | | MediaDecoderStateMachine::UpdatePlaybackPositionInternal(const TimeUnit& aTime) |
2969 | 0 | { |
2970 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2971 | 0 | LOGV("UpdatePlaybackPositionInternal(%" PRId64 ")", aTime.ToMicroseconds()); |
2972 | 0 |
|
2973 | 0 | mCurrentPosition = aTime; |
2974 | 0 | NS_ASSERTION(mCurrentPosition.Ref() >= TimeUnit::Zero(), |
2975 | 0 | "CurrentTime should be positive!"); |
2976 | 0 | if (mDuration.Ref().ref() < mCurrentPosition.Ref()) { |
2977 | 0 | mDuration = Some(mCurrentPosition.Ref()); |
2978 | 0 | DDLOG(DDLogCategory::Property, |
2979 | 0 | "duration_us", |
2980 | 0 | mDuration.Ref()->ToMicroseconds()); |
2981 | 0 | } |
2982 | 0 | } |
2983 | | |
2984 | | void |
2985 | | MediaDecoderStateMachine::UpdatePlaybackPosition(const TimeUnit& aTime) |
2986 | 0 | { |
2987 | 0 | MOZ_ASSERT(OnTaskQueue()); |
2988 | 0 | UpdatePlaybackPositionInternal(aTime); |
2989 | 0 |
|
2990 | 0 | bool fragmentEnded = |
2991 | 0 | mFragmentEndTime.IsValid() && GetMediaTime() >= mFragmentEndTime; |
2992 | 0 | mMetadataManager.DispatchMetadataIfNeeded(aTime); |
2993 | 0 |
|
2994 | 0 | if (fragmentEnded) { |
2995 | 0 | StopPlayback(); |
2996 | 0 | } |
2997 | 0 | } |
2998 | | |
2999 | | /* static */ const char* |
3000 | | MediaDecoderStateMachine::ToStateStr(State aState) |
3001 | 0 | { |
3002 | 0 | switch (aState) { |
3003 | 0 | case DECODER_STATE_DECODING_METADATA: return "DECODING_METADATA"; |
3004 | 0 | case DECODER_STATE_DORMANT: return "DORMANT"; |
3005 | 0 | case DECODER_STATE_DECODING_FIRSTFRAME: return "DECODING_FIRSTFRAME"; |
3006 | 0 | case DECODER_STATE_DECODING: return "DECODING"; |
3007 | 0 | case DECODER_STATE_SEEKING: return "SEEKING"; |
3008 | 0 | case DECODER_STATE_BUFFERING: return "BUFFERING"; |
3009 | 0 | case DECODER_STATE_COMPLETED: return "COMPLETED"; |
3010 | 0 | case DECODER_STATE_SHUTDOWN: return "SHUTDOWN"; |
3011 | 0 | default: MOZ_ASSERT_UNREACHABLE("Invalid state."); |
3012 | 0 | } |
3013 | 0 | return "UNKNOWN"; |
3014 | 0 | } |
3015 | | |
3016 | | const char* |
3017 | | MediaDecoderStateMachine::ToStateStr() |
3018 | 0 | { |
3019 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3020 | 0 | return ToStateStr(mStateObj->GetState()); |
3021 | 0 | } |
3022 | | |
3023 | | void MediaDecoderStateMachine::VolumeChanged() |
3024 | 0 | { |
3025 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3026 | 0 | mMediaSink->SetVolume(mVolume); |
3027 | 0 | } |
3028 | | |
3029 | | RefPtr<ShutdownPromise> |
3030 | | MediaDecoderStateMachine::Shutdown() |
3031 | 0 | { |
3032 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3033 | 0 | return mStateObj->HandleShutdown(); |
3034 | 0 | } |
3035 | | |
3036 | | void MediaDecoderStateMachine::PlayStateChanged() |
3037 | 0 | { |
3038 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3039 | 0 |
|
3040 | 0 | if (mPlayState != MediaDecoder::PLAY_STATE_PLAYING) { |
3041 | 0 | CancelSuspendTimer(); |
3042 | 0 | } else if (mMinimizePreroll) { |
3043 | 0 | // Once we start playing, we don't want to minimize our prerolling, as we |
3044 | 0 | // assume the user is likely to want to keep playing in future. This needs |
3045 | 0 | // to happen before we invoke StartDecoding(). |
3046 | 0 | mMinimizePreroll = false; |
3047 | 0 | } |
3048 | 0 |
|
3049 | 0 | mStateObj->HandlePlayStateChanged(mPlayState); |
3050 | 0 | } |
3051 | | |
3052 | | void MediaDecoderStateMachine::SetVideoDecodeMode(VideoDecodeMode aMode) |
3053 | 0 | { |
3054 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
3055 | 0 | nsCOMPtr<nsIRunnable> r = NewRunnableMethod<VideoDecodeMode>( |
3056 | 0 | "MediaDecoderStateMachine::SetVideoDecodeModeInternal", |
3057 | 0 | this, |
3058 | 0 | &MediaDecoderStateMachine::SetVideoDecodeModeInternal, |
3059 | 0 | aMode); |
3060 | 0 | OwnerThread()->DispatchStateChange(r.forget()); |
3061 | 0 | } |
3062 | | |
3063 | | void MediaDecoderStateMachine::SetVideoDecodeModeInternal(VideoDecodeMode aMode) |
3064 | 0 | { |
3065 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3066 | 0 |
|
3067 | 0 | LOG("SetVideoDecodeModeInternal(), VideoDecodeMode=(%s->%s), mVideoDecodeSuspended=%c", |
3068 | 0 | mVideoDecodeMode == VideoDecodeMode::Normal ? "Normal" : "Suspend", |
3069 | 0 | aMode == VideoDecodeMode::Normal ? "Normal" : "Suspend", |
3070 | 0 | mVideoDecodeSuspended ? 'T' : 'F'); |
3071 | 0 |
|
3072 | 0 | // Should not suspend decoding if we don't turn on the pref. |
3073 | 0 | if (!StaticPrefs::MediaSuspendBkgndVideoEnabled() && |
3074 | 0 | aMode == VideoDecodeMode::Suspend) { |
3075 | 0 | LOG("SetVideoDecodeModeInternal(), early return because preference off and set to Suspend"); |
3076 | 0 | return; |
3077 | 0 | } |
3078 | 0 |
|
3079 | 0 | if (aMode == mVideoDecodeMode) { |
3080 | 0 | LOG("SetVideoDecodeModeInternal(), early return because the mode does not change"); |
3081 | 0 | return; |
3082 | 0 | } |
3083 | 0 |
|
3084 | 0 | // Set new video decode mode. |
3085 | 0 | mVideoDecodeMode = aMode; |
3086 | 0 |
|
3087 | 0 | // Start timer to trigger suspended video decoding. |
3088 | 0 | if (mVideoDecodeMode == VideoDecodeMode::Suspend) { |
3089 | 0 | TimeStamp target = TimeStamp::Now() + SuspendBackgroundVideoDelay(); |
3090 | 0 |
|
3091 | 0 | RefPtr<MediaDecoderStateMachine> self = this; |
3092 | 0 | mVideoDecodeSuspendTimer.Ensure(target, |
3093 | 0 | [=]() { self->OnSuspendTimerResolved(); }, |
3094 | 0 | [] () { MOZ_DIAGNOSTIC_ASSERT(false); }); |
3095 | 0 | mOnPlaybackEvent.Notify(MediaPlaybackEvent::StartVideoSuspendTimer); |
3096 | 0 | return; |
3097 | 0 | } |
3098 | 0 |
|
3099 | 0 | // Resuming from suspended decoding |
3100 | 0 |
|
3101 | 0 | // If suspend timer exists, destroy it. |
3102 | 0 | CancelSuspendTimer(); |
3103 | 0 |
|
3104 | 0 | if (mVideoDecodeSuspended) { |
3105 | 0 | const auto target = mMediaSink->IsStarted() ? GetClock() : GetMediaTime(); |
3106 | 0 | mStateObj->HandleResumeVideoDecoding(target + detail::RESUME_VIDEO_PREMIUM); |
3107 | 0 | } |
3108 | 0 | } |
3109 | | |
3110 | | void MediaDecoderStateMachine::BufferedRangeUpdated() |
3111 | 0 | { |
3112 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3113 | 0 |
|
3114 | 0 | // While playing an unseekable stream of unknown duration, mDuration |
3115 | 0 | // is updated as we play. But if data is being downloaded |
3116 | 0 | // faster than played, mDuration won't reflect the end of playable data |
3117 | 0 | // since we haven't played the frame at the end of buffered data. So update |
3118 | 0 | // mDuration here as new data is downloaded to prevent such a lag. |
3119 | 0 | if (mBuffered.Ref().IsInvalid()) { |
3120 | 0 | return; |
3121 | 0 | } |
3122 | 0 | |
3123 | 0 | bool exists; |
3124 | 0 | media::TimeUnit end{ mBuffered.Ref().GetEnd(&exists) }; |
3125 | 0 | if (!exists) { |
3126 | 0 | return; |
3127 | 0 | } |
3128 | 0 | |
3129 | 0 | // Use estimated duration from buffer ranges when mDuration is unknown or |
3130 | 0 | // the estimated duration is larger. |
3131 | 0 | if (mDuration.Ref().isNothing() || mDuration.Ref()->IsInfinite() || |
3132 | 0 | end > mDuration.Ref().ref()) { |
3133 | 0 | mDuration = Some(end); |
3134 | 0 | DDLOG(DDLogCategory::Property, |
3135 | 0 | "duration_us", |
3136 | 0 | mDuration.Ref()->ToMicroseconds()); |
3137 | 0 | } |
3138 | 0 | } |
3139 | | |
3140 | | RefPtr<MediaDecoder::SeekPromise> |
3141 | | MediaDecoderStateMachine::Seek(const SeekTarget& aTarget) |
3142 | 0 | { |
3143 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3144 | 0 |
|
3145 | 0 | // We need to be able to seek in some way |
3146 | 0 | if (!mMediaSeekable && !mMediaSeekableOnlyInBufferedRanges) { |
3147 | 0 | LOGW("Seek() should not be called on a non-seekable media"); |
3148 | 0 | return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true, |
3149 | 0 | __func__); |
3150 | 0 | } |
3151 | 0 |
|
3152 | 0 | if (aTarget.IsNextFrame() && !HasVideo()) { |
3153 | 0 | LOGW("Ignore a NextFrameSeekTask on a media file without video track."); |
3154 | 0 | return MediaDecoder::SeekPromise::CreateAndReject(/* aIgnored = */ true, |
3155 | 0 | __func__); |
3156 | 0 | } |
3157 | 0 |
|
3158 | 0 | MOZ_ASSERT(mDuration.Ref().isSome(), "We should have got duration already"); |
3159 | 0 |
|
3160 | 0 | return mStateObj->HandleSeek(aTarget); |
3161 | 0 | } |
3162 | | |
3163 | | RefPtr<MediaDecoder::SeekPromise> |
3164 | | MediaDecoderStateMachine::InvokeSeek(const SeekTarget& aTarget) |
3165 | 0 | { |
3166 | 0 | return InvokeAsync( |
3167 | 0 | OwnerThread(), this, __func__, |
3168 | 0 | &MediaDecoderStateMachine::Seek, aTarget); |
3169 | 0 | } |
3170 | | |
3171 | | void MediaDecoderStateMachine::StopMediaSink() |
3172 | 0 | { |
3173 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3174 | 0 | if (mMediaSink->IsStarted()) { |
3175 | 0 | LOG("Stop MediaSink"); |
3176 | 0 | mAudibleListener.DisconnectIfExists(); |
3177 | 0 |
|
3178 | 0 | mMediaSink->Stop(); |
3179 | 0 | mMediaSinkAudioPromise.DisconnectIfExists(); |
3180 | 0 | mMediaSinkVideoPromise.DisconnectIfExists(); |
3181 | 0 | } |
3182 | 0 | } |
3183 | | |
3184 | | void |
3185 | | MediaDecoderStateMachine::RequestAudioData() |
3186 | 0 | { |
3187 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3188 | 0 | MOZ_ASSERT(IsAudioDecoding()); |
3189 | 0 | MOZ_ASSERT(!IsRequestingAudioData()); |
3190 | 0 | MOZ_ASSERT(!IsWaitingAudioData()); |
3191 | 0 | LOGV("Queueing audio task - queued=%zu, decoder-queued=%zu", |
3192 | 0 | AudioQueue().GetSize(), mReader->SizeOfAudioQueueInFrames()); |
3193 | 0 |
|
3194 | 0 | RefPtr<MediaDecoderStateMachine> self = this; |
3195 | 0 | mReader->RequestAudioData()->Then( |
3196 | 0 | OwnerThread(), __func__, |
3197 | 0 | [this, self] (RefPtr<AudioData> aAudio) { |
3198 | 0 | MOZ_ASSERT(aAudio); |
3199 | 0 | mAudioDataRequest.Complete(); |
3200 | 0 | // audio->GetEndTime() is not always mono-increasing in chained ogg. |
3201 | 0 | mDecodedAudioEndTime = std::max( |
3202 | 0 | aAudio->GetEndTime(), mDecodedAudioEndTime); |
3203 | 0 | LOGV("OnAudioDecoded [%" PRId64 ",%" PRId64 "]", |
3204 | 0 | aAudio->mTime.ToMicroseconds(), |
3205 | 0 | aAudio->GetEndTime().ToMicroseconds()); |
3206 | 0 | mStateObj->HandleAudioDecoded(aAudio); |
3207 | 0 | }, |
3208 | 0 | [this, self] (const MediaResult& aError) { |
3209 | 0 | LOGV("OnAudioNotDecoded aError=%s", aError.ErrorName().get()); |
3210 | 0 | mAudioDataRequest.Complete(); |
3211 | 0 | switch (aError.Code()) { |
3212 | 0 | case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: |
3213 | 0 | mStateObj->HandleWaitingForAudio(); |
3214 | 0 | break; |
3215 | 0 | case NS_ERROR_DOM_MEDIA_CANCELED: |
3216 | 0 | mStateObj->HandleAudioCanceled(); |
3217 | 0 | break; |
3218 | 0 | case NS_ERROR_DOM_MEDIA_END_OF_STREAM: |
3219 | 0 | mStateObj->HandleEndOfAudio(); |
3220 | 0 | break; |
3221 | 0 | default: |
3222 | 0 | DecodeError(aError); |
3223 | 0 | } |
3224 | 0 | })->Track(mAudioDataRequest); |
3225 | 0 | } |
3226 | | |
3227 | | void |
3228 | | MediaDecoderStateMachine::RequestVideoData(const media::TimeUnit& aCurrentTime) |
3229 | 0 | { |
3230 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3231 | 0 | MOZ_ASSERT(IsVideoDecoding()); |
3232 | 0 | MOZ_ASSERT(!IsRequestingVideoData()); |
3233 | 0 | MOZ_ASSERT(!IsWaitingVideoData()); |
3234 | 0 | LOGV("Queueing video task - queued=%zu, decoder-queued=%zo" |
3235 | 0 | ", stime=%" PRId64, |
3236 | 0 | VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames(), |
3237 | 0 | aCurrentTime.ToMicroseconds()); |
3238 | 0 |
|
3239 | 0 | TimeStamp videoDecodeStartTime = TimeStamp::Now(); |
3240 | 0 | RefPtr<MediaDecoderStateMachine> self = this; |
3241 | 0 | mReader->RequestVideoData(aCurrentTime)->Then( |
3242 | 0 | OwnerThread(), __func__, |
3243 | 0 | [this, self, videoDecodeStartTime] (RefPtr<VideoData> aVideo) { |
3244 | 0 | MOZ_ASSERT(aVideo); |
3245 | 0 | mVideoDataRequest.Complete(); |
3246 | 0 | // Handle abnormal or negative timestamps. |
3247 | 0 | mDecodedVideoEndTime = std::max( |
3248 | 0 | mDecodedVideoEndTime, aVideo->GetEndTime()); |
3249 | 0 | LOGV("OnVideoDecoded [%" PRId64 ",%" PRId64 "]", |
3250 | 0 | aVideo->mTime.ToMicroseconds(), |
3251 | 0 | aVideo->GetEndTime().ToMicroseconds()); |
3252 | 0 | mStateObj->HandleVideoDecoded(aVideo, videoDecodeStartTime); |
3253 | 0 | }, |
3254 | 0 | [this, self] (const MediaResult& aError) { |
3255 | 0 | LOGV("OnVideoNotDecoded aError=%s" , aError.ErrorName().get()); |
3256 | 0 | mVideoDataRequest.Complete(); |
3257 | 0 | switch (aError.Code()) { |
3258 | 0 | case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: |
3259 | 0 | mStateObj->HandleWaitingForVideo(); |
3260 | 0 | break; |
3261 | 0 | case NS_ERROR_DOM_MEDIA_CANCELED: |
3262 | 0 | mStateObj->HandleVideoCanceled(); |
3263 | 0 | break; |
3264 | 0 | case NS_ERROR_DOM_MEDIA_END_OF_STREAM: |
3265 | 0 | mStateObj->HandleEndOfVideo(); |
3266 | 0 | break; |
3267 | 0 | default: |
3268 | 0 | DecodeError(aError); |
3269 | 0 | } |
3270 | 0 | })->Track(mVideoDataRequest); |
3271 | 0 | } |
3272 | | |
3273 | | void |
3274 | | MediaDecoderStateMachine::WaitForData(MediaData::Type aType) |
3275 | 0 | { |
3276 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3277 | 0 | MOZ_ASSERT(aType == MediaData::AUDIO_DATA || aType == MediaData::VIDEO_DATA); |
3278 | 0 | RefPtr<MediaDecoderStateMachine> self = this; |
3279 | 0 | if (aType == MediaData::AUDIO_DATA) { |
3280 | 0 | mReader->WaitForData(MediaData::AUDIO_DATA)->Then( |
3281 | 0 | OwnerThread(), __func__, |
3282 | 0 | [self] (MediaData::Type aType) { |
3283 | 0 | self->mAudioWaitRequest.Complete(); |
3284 | 0 | MOZ_ASSERT(aType == MediaData::AUDIO_DATA); |
3285 | 0 | self->mStateObj->HandleAudioWaited(aType); |
3286 | 0 | }, |
3287 | 0 | [self] (const WaitForDataRejectValue& aRejection) { |
3288 | 0 | self->mAudioWaitRequest.Complete(); |
3289 | 0 | self->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA); |
3290 | 0 | })->Track(mAudioWaitRequest); |
3291 | 0 | } else { |
3292 | 0 | mReader->WaitForData(MediaData::VIDEO_DATA)->Then( |
3293 | 0 | OwnerThread(), __func__, |
3294 | 0 | [self] (MediaData::Type aType) { |
3295 | 0 | self->mVideoWaitRequest.Complete(); |
3296 | 0 | MOZ_ASSERT(aType == MediaData::VIDEO_DATA); |
3297 | 0 | self->mStateObj->HandleVideoWaited(aType); |
3298 | 0 | }, |
3299 | 0 | [self] (const WaitForDataRejectValue& aRejection) { |
3300 | 0 | self->mVideoWaitRequest.Complete(); |
3301 | 0 | self->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA); |
3302 | 0 | })->Track(mVideoWaitRequest); |
3303 | 0 | } |
3304 | 0 | } |
3305 | | |
3306 | | void |
3307 | | MediaDecoderStateMachine::StartMediaSink() |
3308 | 0 | { |
3309 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3310 | 0 | if (!mMediaSink->IsStarted()) { |
3311 | 0 | mAudioCompleted = false; |
3312 | 0 | mMediaSink->Start(GetMediaTime(), Info()); |
3313 | 0 |
|
3314 | 0 | auto videoPromise = mMediaSink->OnEnded(TrackInfo::kVideoTrack); |
3315 | 0 | auto audioPromise = mMediaSink->OnEnded(TrackInfo::kAudioTrack); |
3316 | 0 |
|
3317 | 0 | if (audioPromise) { |
3318 | 0 | audioPromise->Then( |
3319 | 0 | OwnerThread(), __func__, this, |
3320 | 0 | &MediaDecoderStateMachine::OnMediaSinkAudioComplete, |
3321 | 0 | &MediaDecoderStateMachine::OnMediaSinkAudioError) |
3322 | 0 | ->Track(mMediaSinkAudioPromise); |
3323 | 0 | } |
3324 | 0 | if (videoPromise) { |
3325 | 0 | videoPromise->Then( |
3326 | 0 | OwnerThread(), __func__, this, |
3327 | 0 | &MediaDecoderStateMachine::OnMediaSinkVideoComplete, |
3328 | 0 | &MediaDecoderStateMachine::OnMediaSinkVideoError) |
3329 | 0 | ->Track(mMediaSinkVideoPromise); |
3330 | 0 | } |
3331 | 0 | // Remember the initial offset when playback starts. This will be used |
3332 | 0 | // to calculate the rate at which bytes are consumed as playback moves on. |
3333 | 0 | RefPtr<MediaData> sample = mAudioQueue.PeekFront(); |
3334 | 0 | mPlaybackOffset = sample ? sample->mOffset : 0; |
3335 | 0 | sample = mVideoQueue.PeekFront(); |
3336 | 0 | if (sample && sample->mOffset > mPlaybackOffset) { |
3337 | 0 | mPlaybackOffset = sample->mOffset; |
3338 | 0 | } |
3339 | 0 | } |
3340 | 0 | } |
3341 | | |
3342 | | bool |
3343 | | MediaDecoderStateMachine::HasLowDecodedAudio() |
3344 | 0 | { |
3345 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3346 | 0 | return IsAudioDecoding() && GetDecodedAudioDuration() |
3347 | 0 | < EXHAUSTED_DATA_MARGIN.MultDouble(mPlaybackRate); |
3348 | 0 | } |
3349 | | |
3350 | | bool |
3351 | | MediaDecoderStateMachine::HasLowDecodedVideo() |
3352 | 0 | { |
3353 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3354 | 0 | return IsVideoDecoding() && |
3355 | 0 | VideoQueue().GetSize() < LOW_VIDEO_FRAMES * mPlaybackRate; |
3356 | 0 | } |
3357 | | |
3358 | | bool |
3359 | | MediaDecoderStateMachine::HasLowDecodedData() |
3360 | 0 | { |
3361 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3362 | 0 | MOZ_ASSERT(mReader->UseBufferingHeuristics()); |
3363 | 0 | return HasLowDecodedAudio() || HasLowDecodedVideo(); |
3364 | 0 | } |
3365 | | |
3366 | | bool MediaDecoderStateMachine::OutOfDecodedAudio() |
3367 | 0 | { |
3368 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3369 | 0 | return IsAudioDecoding() && !AudioQueue().IsFinished() && |
3370 | 0 | AudioQueue().GetSize() == 0 && |
3371 | 0 | !mMediaSink->HasUnplayedFrames(TrackInfo::kAudioTrack); |
3372 | 0 | } |
3373 | | |
3374 | | bool |
3375 | | MediaDecoderStateMachine::HasLowBufferedData() |
3376 | 0 | { |
3377 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3378 | 0 | return HasLowBufferedData(detail::LOW_BUFFER_THRESHOLD); |
3379 | 0 | } |
3380 | | |
3381 | | bool |
3382 | | MediaDecoderStateMachine::HasLowBufferedData(const TimeUnit& aThreshold) |
3383 | 0 | { |
3384 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3385 | 0 |
|
3386 | 0 | // If we don't have a duration, mBuffered is probably not going to have |
3387 | 0 | // a useful buffered range. Return false here so that we don't get stuck in |
3388 | 0 | // buffering mode for live streams. |
3389 | 0 | if (Duration().IsInfinite()) { |
3390 | 0 | return false; |
3391 | 0 | } |
3392 | 0 | |
3393 | 0 | if (mBuffered.Ref().IsInvalid()) { |
3394 | 0 | return false; |
3395 | 0 | } |
3396 | 0 | |
3397 | 0 | // We are never low in decoded data when we don't have audio/video or have |
3398 | 0 | // decoded all audio/video samples. |
3399 | 0 | TimeUnit endOfDecodedVideo = (HasVideo() && !VideoQueue().IsFinished()) |
3400 | 0 | ? mDecodedVideoEndTime : TimeUnit::FromInfinity(); |
3401 | 0 | TimeUnit endOfDecodedAudio = (HasAudio() && !AudioQueue().IsFinished()) |
3402 | 0 | ? mDecodedAudioEndTime : TimeUnit::FromInfinity(); |
3403 | 0 |
|
3404 | 0 | auto endOfDecodedData = std::min(endOfDecodedVideo, endOfDecodedAudio); |
3405 | 0 | if (Duration() < endOfDecodedData) { |
3406 | 0 | // Our duration is not up to date. No point buffering. |
3407 | 0 | return false; |
3408 | 0 | } |
3409 | 0 | |
3410 | 0 | if (endOfDecodedData.IsInfinite()) { |
3411 | 0 | // Have decoded all samples. No point buffering. |
3412 | 0 | return false; |
3413 | 0 | } |
3414 | 0 | |
3415 | 0 | auto start = endOfDecodedData; |
3416 | 0 | auto end = std::min(GetMediaTime() + aThreshold, Duration()); |
3417 | 0 | if (start >= end) { |
3418 | 0 | // Duration of decoded samples is greater than our threshold. |
3419 | 0 | return false; |
3420 | 0 | } |
3421 | 0 | media::TimeInterval interval(start, end); |
3422 | 0 | return !mBuffered.Ref().Contains(interval); |
3423 | 0 | } |
3424 | | |
3425 | | void |
3426 | | MediaDecoderStateMachine::DecodeError(const MediaResult& aError) |
3427 | 0 | { |
3428 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3429 | 0 | LOGE("Decode error: %s", aError.Description().get()); |
3430 | 0 | // Notify the decode error and MediaDecoder will shut down MDSM. |
3431 | 0 | mOnPlaybackErrorEvent.Notify(aError); |
3432 | 0 | } |
3433 | | |
3434 | | void |
3435 | | MediaDecoderStateMachine::EnqueueFirstFrameLoadedEvent() |
3436 | 0 | { |
3437 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3438 | 0 | // Track value of mSentFirstFrameLoadedEvent from before updating it |
3439 | 0 | bool firstFrameBeenLoaded = mSentFirstFrameLoadedEvent; |
3440 | 0 | mSentFirstFrameLoadedEvent = true; |
3441 | 0 | MediaDecoderEventVisibility visibility = |
3442 | 0 | firstFrameBeenLoaded ? MediaDecoderEventVisibility::Suppressed |
3443 | 0 | : MediaDecoderEventVisibility::Observable; |
3444 | 0 | mFirstFrameLoadedEvent.Notify( |
3445 | 0 | nsAutoPtr<MediaInfo>(new MediaInfo(Info())), visibility); |
3446 | 0 | } |
3447 | | |
3448 | | void |
3449 | | MediaDecoderStateMachine::FinishDecodeFirstFrame() |
3450 | 0 | { |
3451 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3452 | 0 | MOZ_ASSERT(!mSentFirstFrameLoadedEvent); |
3453 | 0 | LOG("FinishDecodeFirstFrame"); |
3454 | 0 |
|
3455 | 0 | mMediaSink->Redraw(Info().mVideo); |
3456 | 0 |
|
3457 | 0 | LOG("Media duration %" PRId64 ", mediaSeekable=%d", |
3458 | 0 | Duration().ToMicroseconds(), mMediaSeekable); |
3459 | 0 |
|
3460 | 0 | // Get potentially updated metadata |
3461 | 0 | mReader->ReadUpdatedMetadata(mInfo.ptr()); |
3462 | 0 |
|
3463 | 0 | EnqueueFirstFrameLoadedEvent(); |
3464 | 0 | } |
3465 | | |
3466 | | RefPtr<ShutdownPromise> |
3467 | | MediaDecoderStateMachine::BeginShutdown() |
3468 | 0 | { |
3469 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
3470 | 0 | if (mOutputStreamManager) { |
3471 | 0 | mOutputStreamManager->Clear(); |
3472 | 0 | } |
3473 | 0 | return InvokeAsync(OwnerThread(), this, __func__, |
3474 | 0 | &MediaDecoderStateMachine::Shutdown); |
3475 | 0 | } |
3476 | | |
3477 | | RefPtr<ShutdownPromise> |
3478 | | MediaDecoderStateMachine::FinishShutdown() |
3479 | 0 | { |
3480 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3481 | 0 | LOG("Shutting down state machine task queue"); |
3482 | 0 | return OwnerThread()->BeginShutdown(); |
3483 | 0 | } |
3484 | | |
3485 | | void |
3486 | | MediaDecoderStateMachine::RunStateMachine() |
3487 | 0 | { |
3488 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3489 | 0 |
|
3490 | 0 | mDelayedScheduler.Reset(); // Must happen on state machine task queue. |
3491 | 0 | mDispatchedStateMachine = false; |
3492 | 0 | mStateObj->Step(); |
3493 | 0 | } |
3494 | | |
3495 | | void |
3496 | | MediaDecoderStateMachine::ResetDecode(TrackSet aTracks) |
3497 | 0 | { |
3498 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3499 | 0 | LOG("MediaDecoderStateMachine::Reset"); |
3500 | 0 |
|
3501 | 0 | // Assert that aTracks specifies to reset the video track because we |
3502 | 0 | // don't currently support resetting just the audio track. |
3503 | 0 | MOZ_ASSERT(aTracks.contains(TrackInfo::kVideoTrack)); |
3504 | 0 |
|
3505 | 0 | if (aTracks.contains(TrackInfo::kVideoTrack)) { |
3506 | 0 | mDecodedVideoEndTime = TimeUnit::Zero(); |
3507 | 0 | mVideoCompleted = false; |
3508 | 0 | VideoQueue().Reset(); |
3509 | 0 | mVideoDataRequest.DisconnectIfExists(); |
3510 | 0 | mVideoWaitRequest.DisconnectIfExists(); |
3511 | 0 | } |
3512 | 0 |
|
3513 | 0 | if (aTracks.contains(TrackInfo::kAudioTrack)) { |
3514 | 0 | mDecodedAudioEndTime = TimeUnit::Zero(); |
3515 | 0 | mAudioCompleted = false; |
3516 | 0 | AudioQueue().Reset(); |
3517 | 0 | mAudioDataRequest.DisconnectIfExists(); |
3518 | 0 | mAudioWaitRequest.DisconnectIfExists(); |
3519 | 0 | } |
3520 | 0 |
|
3521 | 0 | mReader->ResetDecode(aTracks); |
3522 | 0 | } |
3523 | | |
3524 | | media::TimeUnit |
3525 | | MediaDecoderStateMachine::GetClock(TimeStamp* aTimeStamp) const |
3526 | 0 | { |
3527 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3528 | 0 | auto clockTime = mMediaSink->GetPosition(aTimeStamp); |
3529 | 0 | NS_ASSERTION(GetMediaTime() <= clockTime, "Clock should go forwards."); |
3530 | 0 | return clockTime; |
3531 | 0 | } |
3532 | | |
3533 | | void |
3534 | | MediaDecoderStateMachine::UpdatePlaybackPositionPeriodically() |
3535 | 0 | { |
3536 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3537 | 0 |
|
3538 | 0 | if (!IsPlaying()) { |
3539 | 0 | return; |
3540 | 0 | } |
3541 | 0 | |
3542 | 0 | // Cap the current time to the larger of the audio and video end time. |
3543 | 0 | // This ensures that if we're running off the system clock, we don't |
3544 | 0 | // advance the clock to after the media end time. |
3545 | 0 | if (VideoEndTime() > TimeUnit::Zero() || AudioEndTime() > TimeUnit::Zero()) { |
3546 | 0 |
|
3547 | 0 | auto clockTime = GetClock(); |
3548 | 0 |
|
3549 | 0 | // Once looping was turned on, the time is probably larger than the duration |
3550 | 0 | // of the media track, so the time over the end should be corrected. |
3551 | 0 | mReader->AdjustByLooping(clockTime); |
3552 | 0 | bool loopback = clockTime < GetMediaTime() && mLooping; |
3553 | 0 |
|
3554 | 0 | // Skip frames up to the frame at the playback position, and figure out |
3555 | 0 | // the time remaining until it's time to display the next frame and drop |
3556 | 0 | // the current frame. |
3557 | 0 | NS_ASSERTION(clockTime >= TimeUnit::Zero(), "Should have positive clock time."); |
3558 | 0 |
|
3559 | 0 | // These will be non -1 if we've displayed a video frame, or played an audio |
3560 | 0 | // frame. |
3561 | 0 | auto maxEndTime = std::max(VideoEndTime(), AudioEndTime()); |
3562 | 0 | auto t = std::min(clockTime, maxEndTime); |
3563 | 0 | // FIXME: Bug 1091422 - chained ogg files hit this assertion. |
3564 | 0 | //MOZ_ASSERT(t >= GetMediaTime()); |
3565 | 0 | if (loopback || t > GetMediaTime()) { |
3566 | 0 | UpdatePlaybackPosition(t); |
3567 | 0 | } |
3568 | 0 | } |
3569 | 0 | // Note we have to update playback position before releasing the monitor. |
3570 | 0 | // Otherwise, MediaDecoder::AddOutputStream could kick in when we are outside |
3571 | 0 | // the monitor and get a staled value from GetCurrentTimeUs() which hits the |
3572 | 0 | // assertion in GetClock(). |
3573 | 0 |
|
3574 | 0 | int64_t delay = std::max<int64_t>(1, AUDIO_DURATION_USECS / mPlaybackRate); |
3575 | 0 | ScheduleStateMachineIn(TimeUnit::FromMicroseconds(delay)); |
3576 | 0 |
|
3577 | 0 | // Notify the listener as we progress in the playback offset. Note it would |
3578 | 0 | // be too intensive to send notifications for each popped audio/video sample. |
3579 | 0 | // It is good enough to send 'PlaybackProgressed' events every 40us (defined |
3580 | 0 | // by AUDIO_DURATION_USECS), and we ensure 'PlaybackProgressed' events are |
3581 | 0 | // always sent after 'PlaybackStarted' and before 'PlaybackStopped'. |
3582 | 0 | mOnPlaybackEvent.Notify(MediaPlaybackEvent{ |
3583 | 0 | MediaPlaybackEvent::PlaybackProgressed, mPlaybackOffset }); |
3584 | 0 | } |
3585 | | |
3586 | | void |
3587 | | MediaDecoderStateMachine::ScheduleStateMachine() |
3588 | 0 | { |
3589 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3590 | 0 | if (mDispatchedStateMachine) { |
3591 | 0 | return; |
3592 | 0 | } |
3593 | 0 | mDispatchedStateMachine = true; |
3594 | 0 |
|
3595 | 0 | nsresult rv = |
3596 | 0 | OwnerThread()->Dispatch( |
3597 | 0 | NewRunnableMethod("MediaDecoderStateMachine::RunStateMachine", |
3598 | 0 | this, |
3599 | 0 | &MediaDecoderStateMachine::RunStateMachine)); |
3600 | 0 | MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); |
3601 | 0 | Unused << rv; |
3602 | 0 | } |
3603 | | |
3604 | | void |
3605 | | MediaDecoderStateMachine::ScheduleStateMachineIn(const TimeUnit& aTime) |
3606 | 0 | { |
3607 | 0 | MOZ_ASSERT(OnTaskQueue()); // mDelayedScheduler.Ensure() may Disconnect() |
3608 | 0 | // the promise, which must happen on the state |
3609 | 0 | // machine task queue. |
3610 | 0 | MOZ_ASSERT(aTime > TimeUnit::Zero()); |
3611 | 0 | if (mDispatchedStateMachine) { |
3612 | 0 | return; |
3613 | 0 | } |
3614 | 0 | |
3615 | 0 | TimeStamp target = TimeStamp::Now() + aTime.ToTimeDuration(); |
3616 | 0 |
|
3617 | 0 | // It is OK to capture 'this' without causing UAF because the callback |
3618 | 0 | // always happens before shutdown. |
3619 | 0 | RefPtr<MediaDecoderStateMachine> self = this; |
3620 | 0 | mDelayedScheduler.Ensure(target, [self] () { |
3621 | 0 | self->mDelayedScheduler.CompleteRequest(); |
3622 | 0 | self->RunStateMachine(); |
3623 | 0 | }, [] () { |
3624 | 0 | MOZ_DIAGNOSTIC_ASSERT(false); |
3625 | 0 | }); |
3626 | 0 | } |
3627 | | |
3628 | | bool MediaDecoderStateMachine::OnTaskQueue() const |
3629 | 0 | { |
3630 | 0 | return OwnerThread()->IsCurrentThreadIn(); |
3631 | 0 | } |
3632 | | |
3633 | | bool MediaDecoderStateMachine::IsStateMachineScheduled() const |
3634 | 0 | { |
3635 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3636 | 0 | return mDispatchedStateMachine || mDelayedScheduler.IsScheduled(); |
3637 | 0 | } |
3638 | | |
3639 | | void |
3640 | | MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate) |
3641 | 0 | { |
3642 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3643 | 0 | MOZ_ASSERT(aPlaybackRate != 0, "Should be handled by MediaDecoder::Pause()"); |
3644 | 0 |
|
3645 | 0 | mPlaybackRate = aPlaybackRate; |
3646 | 0 | mMediaSink->SetPlaybackRate(mPlaybackRate); |
3647 | 0 |
|
3648 | 0 | // Schedule next cycle to check if we can stop prerolling. |
3649 | 0 | ScheduleStateMachine(); |
3650 | 0 | } |
3651 | | |
3652 | | void MediaDecoderStateMachine::PreservesPitchChanged() |
3653 | 0 | { |
3654 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3655 | 0 | mMediaSink->SetPreservesPitch(mPreservesPitch); |
3656 | 0 | } |
3657 | | |
3658 | | void |
3659 | | MediaDecoderStateMachine::LoopingChanged() |
3660 | 0 | { |
3661 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3662 | 0 | if (mSeamlessLoopingAllowed) { |
3663 | 0 | mReader->SetSeamlessLoopingEnabled(mLooping); |
3664 | 0 | } |
3665 | 0 | } |
3666 | | |
3667 | | TimeUnit |
3668 | | MediaDecoderStateMachine::AudioEndTime() const |
3669 | 0 | { |
3670 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3671 | 0 | if (mMediaSink->IsStarted()) { |
3672 | 0 | return mMediaSink->GetEndTime(TrackInfo::kAudioTrack); |
3673 | 0 | } |
3674 | 0 | return GetMediaTime(); |
3675 | 0 | } |
3676 | | |
3677 | | TimeUnit |
3678 | | MediaDecoderStateMachine::VideoEndTime() const |
3679 | 0 | { |
3680 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3681 | 0 | if (mMediaSink->IsStarted()) { |
3682 | 0 | return mMediaSink->GetEndTime(TrackInfo::kVideoTrack); |
3683 | 0 | } |
3684 | 0 | return GetMediaTime(); |
3685 | 0 | } |
3686 | | |
3687 | | void |
3688 | | MediaDecoderStateMachine::OnMediaSinkVideoComplete() |
3689 | 0 | { |
3690 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3691 | 0 | MOZ_ASSERT(HasVideo()); |
3692 | 0 | LOG("[%s]", __func__); |
3693 | 0 |
|
3694 | 0 | mMediaSinkVideoPromise.Complete(); |
3695 | 0 | mVideoCompleted = true; |
3696 | 0 | ScheduleStateMachine(); |
3697 | 0 | } |
3698 | | |
3699 | | void |
3700 | | MediaDecoderStateMachine::OnMediaSinkVideoError() |
3701 | 0 | { |
3702 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3703 | 0 | MOZ_ASSERT(HasVideo()); |
3704 | 0 | LOGE("[%s]", __func__); |
3705 | 0 |
|
3706 | 0 | mMediaSinkVideoPromise.Complete(); |
3707 | 0 | mVideoCompleted = true; |
3708 | 0 | if (HasAudio()) { |
3709 | 0 | return; |
3710 | 0 | } |
3711 | 0 | DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__)); |
3712 | 0 | } |
3713 | | |
3714 | | void MediaDecoderStateMachine::OnMediaSinkAudioComplete() |
3715 | 0 | { |
3716 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3717 | 0 | MOZ_ASSERT(HasAudio()); |
3718 | 0 | LOG("[%s]", __func__); |
3719 | 0 |
|
3720 | 0 | mMediaSinkAudioPromise.Complete(); |
3721 | 0 | mAudioCompleted = true; |
3722 | 0 | // To notify PlaybackEnded as soon as possible. |
3723 | 0 | ScheduleStateMachine(); |
3724 | 0 |
|
3725 | 0 | // Report OK to Decoder Doctor (to know if issue may have been resolved). |
3726 | 0 | mOnDecoderDoctorEvent.Notify( |
3727 | 0 | DecoderDoctorEvent{DecoderDoctorEvent::eAudioSinkStartup, NS_OK}); |
3728 | 0 | } |
3729 | | |
3730 | | void MediaDecoderStateMachine::OnMediaSinkAudioError(nsresult aResult) |
3731 | 0 | { |
3732 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3733 | 0 | MOZ_ASSERT(HasAudio()); |
3734 | 0 | LOGE("[%s]", __func__); |
3735 | 0 |
|
3736 | 0 | mMediaSinkAudioPromise.Complete(); |
3737 | 0 | mAudioCompleted = true; |
3738 | 0 |
|
3739 | 0 | // Result should never be NS_OK in this *error* handler. Report to Dec-Doc. |
3740 | 0 | MOZ_ASSERT(NS_FAILED(aResult)); |
3741 | 0 | mOnDecoderDoctorEvent.Notify( |
3742 | 0 | DecoderDoctorEvent{DecoderDoctorEvent::eAudioSinkStartup, aResult}); |
3743 | 0 |
|
3744 | 0 | // Make the best effort to continue playback when there is video. |
3745 | 0 | if (HasVideo()) { |
3746 | 0 | return; |
3747 | 0 | } |
3748 | 0 | |
3749 | 0 | // Otherwise notify media decoder/element about this error for it makes |
3750 | 0 | // no sense to play an audio-only file without sound output. |
3751 | 0 | DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__)); |
3752 | 0 | } |
3753 | | |
3754 | | void |
3755 | | MediaDecoderStateMachine::SetAudioCaptured(bool aCaptured) |
3756 | 0 | { |
3757 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3758 | 0 |
|
3759 | 0 | if (aCaptured == mAudioCaptured) { |
3760 | 0 | return; |
3761 | 0 | } |
3762 | 0 | |
3763 | 0 | // Rest these flags so they are consistent with the status of the sink. |
3764 | 0 | // TODO: Move these flags into MediaSink to improve cohesion so we don't need |
3765 | 0 | // to reset these flags when switching MediaSinks. |
3766 | 0 | mAudioCompleted = false; |
3767 | 0 | mVideoCompleted = false; |
3768 | 0 |
|
3769 | 0 | // Backup current playback parameters. |
3770 | 0 | MediaSink::PlaybackParams params = mMediaSink->GetPlaybackParams(); |
3771 | 0 |
|
3772 | 0 | // Stop and shut down the existing sink. |
3773 | 0 | StopMediaSink(); |
3774 | 0 | mMediaSink->Shutdown(); |
3775 | 0 |
|
3776 | 0 | // Create a new sink according to whether audio is captured. |
3777 | 0 | mMediaSink = CreateMediaSink(aCaptured); |
3778 | 0 |
|
3779 | 0 | // Restore playback parameters. |
3780 | 0 | mMediaSink->SetPlaybackParams(params); |
3781 | 0 |
|
3782 | 0 | mAudioCaptured = aCaptured; |
3783 | 0 |
|
3784 | 0 | // Don't buffer as much when audio is captured because we don't need to worry |
3785 | 0 | // about high latency audio devices. |
3786 | 0 | mAmpleAudioThreshold = mAudioCaptured |
3787 | 0 | ? detail::AMPLE_AUDIO_THRESHOLD / 2 : detail::AMPLE_AUDIO_THRESHOLD; |
3788 | 0 |
|
3789 | 0 | mStateObj->HandleAudioCaptured(); |
3790 | 0 | } |
3791 | | |
3792 | | uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const |
3793 | 0 | { |
3794 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3795 | 0 | return mReader->VideoIsHardwareAccelerated() |
3796 | 0 | ? std::max<uint32_t>(sVideoQueueHWAccelSize, MIN_VIDEO_QUEUE_SIZE) |
3797 | 0 | : std::max<uint32_t>(sVideoQueueDefaultSize, MIN_VIDEO_QUEUE_SIZE); |
3798 | 0 | } |
3799 | | |
3800 | | nsCString |
3801 | | MediaDecoderStateMachine::GetDebugInfo() |
3802 | 0 | { |
3803 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3804 | 0 | int64_t duration = |
3805 | 0 | mDuration.Ref() ? mDuration.Ref().ref().ToMicroseconds() : -1; |
3806 | 0 | auto str = nsPrintfCString( |
3807 | 0 | "MDSM: duration=%" PRId64 " GetMediaTime=%" PRId64 " GetClock=" |
3808 | 0 | "%" PRId64 " mMediaSink=%p state=%s mPlayState=%d " |
3809 | 0 | "mSentFirstFrameLoadedEvent=%d IsPlaying=%d mAudioStatus=%s " |
3810 | 0 | "mVideoStatus=%s mDecodedAudioEndTime=%" PRId64 |
3811 | 0 | " mDecodedVideoEndTime=%" PRId64 " mAudioCompleted=%d " |
3812 | 0 | "mVideoCompleted=%d %s", |
3813 | 0 | duration, |
3814 | 0 | GetMediaTime().ToMicroseconds(), |
3815 | 0 | mMediaSink->IsStarted() ? GetClock().ToMicroseconds() : -1, |
3816 | 0 | mMediaSink.get(), |
3817 | 0 | ToStateStr(), |
3818 | 0 | mPlayState.Ref(), |
3819 | 0 | mSentFirstFrameLoadedEvent, |
3820 | 0 | IsPlaying(), |
3821 | 0 | AudioRequestStatus(), |
3822 | 0 | VideoRequestStatus(), |
3823 | 0 | mDecodedAudioEndTime.ToMicroseconds(), |
3824 | 0 | mDecodedVideoEndTime.ToMicroseconds(), |
3825 | 0 | mAudioCompleted, |
3826 | 0 | mVideoCompleted, |
3827 | 0 | mStateObj->GetDebugInfo().get()); |
3828 | 0 |
|
3829 | 0 | AppendStringIfNotEmpty(str, mMediaSink->GetDebugInfo()); |
3830 | 0 |
|
3831 | 0 | return std::move(str); |
3832 | 0 | } |
3833 | | |
3834 | | RefPtr<MediaDecoder::DebugInfoPromise> |
3835 | | MediaDecoderStateMachine::RequestDebugInfo() |
3836 | 0 | { |
3837 | 0 | using PromiseType = MediaDecoder::DebugInfoPromise; |
3838 | 0 | RefPtr<PromiseType::Private> p = new PromiseType::Private(__func__); |
3839 | 0 | RefPtr<MediaDecoderStateMachine> self = this; |
3840 | 0 | nsresult rv = OwnerThread()->Dispatch( |
3841 | 0 | NS_NewRunnableFunction( |
3842 | 0 | "MediaDecoderStateMachine::RequestDebugInfo", |
3843 | 0 | [self, p]() { p->Resolve(self->GetDebugInfo(), __func__); }), |
3844 | 0 | AbstractThread::TailDispatch); |
3845 | 0 | MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); |
3846 | 0 | Unused << rv; |
3847 | 0 | return p.forget(); |
3848 | 0 | } |
3849 | | |
3850 | | void MediaDecoderStateMachine::AddOutputStream(ProcessedMediaStream* aStream, |
3851 | | TrackID aNextAvailableTrackID, |
3852 | | bool aFinishWhenEnded) |
3853 | 0 | { |
3854 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
3855 | 0 | LOG("AddOutputStream aStream=%p!", aStream); |
3856 | 0 | mOutputStreamManager->Add(aStream, aNextAvailableTrackID, aFinishWhenEnded); |
3857 | 0 | nsCOMPtr<nsIRunnable> r = |
3858 | 0 | NewRunnableMethod<bool>("MediaDecoderStateMachine::SetAudioCaptured", |
3859 | 0 | this, |
3860 | 0 | &MediaDecoderStateMachine::SetAudioCaptured, |
3861 | 0 | true); |
3862 | 0 | nsresult rv = OwnerThread()->Dispatch(r.forget()); |
3863 | 0 | MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); |
3864 | 0 | Unused << rv; |
3865 | 0 | } |
3866 | | |
3867 | | void MediaDecoderStateMachine::RemoveOutputStream(MediaStream* aStream) |
3868 | 0 | { |
3869 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
3870 | 0 | LOG("RemoveOutputStream=%p!", aStream); |
3871 | 0 | mOutputStreamManager->Remove(aStream); |
3872 | 0 | if (mOutputStreamManager->IsEmpty()) { |
3873 | 0 | nsCOMPtr<nsIRunnable> r = |
3874 | 0 | NewRunnableMethod<bool>("MediaDecoderStateMachine::SetAudioCaptured", |
3875 | 0 | this, |
3876 | 0 | &MediaDecoderStateMachine::SetAudioCaptured, |
3877 | 0 | false); |
3878 | 0 | nsresult rv = OwnerThread()->Dispatch(r.forget()); |
3879 | 0 | MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); |
3880 | 0 | Unused << rv; |
3881 | 0 | } |
3882 | 0 | } |
3883 | | |
3884 | | TrackID |
3885 | | MediaDecoderStateMachine::NextAvailableTrackIDFor(MediaStream* aOutputStream) const |
3886 | 0 | { |
3887 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
3888 | 0 | return mOutputStreamManager->NextAvailableTrackIDFor(aOutputStream); |
3889 | 0 | } |
3890 | | |
3891 | | class VideoQueueMemoryFunctor : public nsDequeFunctor |
3892 | | { |
3893 | | public: |
3894 | | VideoQueueMemoryFunctor() |
3895 | | : mSize(0) |
3896 | 0 | { |
3897 | 0 | } |
3898 | | |
3899 | | MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf); |
3900 | | |
3901 | | virtual void operator()(void* aObject) override |
3902 | 0 | { |
3903 | 0 | const VideoData* v = static_cast<const VideoData*>(aObject); |
3904 | 0 | mSize += v->SizeOfIncludingThis(MallocSizeOf); |
3905 | 0 | } |
3906 | | |
3907 | | size_t mSize; |
3908 | | }; |
3909 | | |
3910 | | class AudioQueueMemoryFunctor : public nsDequeFunctor |
3911 | | { |
3912 | | public: |
3913 | | AudioQueueMemoryFunctor() |
3914 | | : mSize(0) |
3915 | 0 | { |
3916 | 0 | } |
3917 | | |
3918 | | MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf); |
3919 | | |
3920 | | virtual void operator()(void* aObject) override |
3921 | 0 | { |
3922 | 0 | const AudioData* audioData = static_cast<const AudioData*>(aObject); |
3923 | 0 | mSize += audioData->SizeOfIncludingThis(MallocSizeOf); |
3924 | 0 | } |
3925 | | |
3926 | | size_t mSize; |
3927 | | }; |
3928 | | |
3929 | | size_t |
3930 | | MediaDecoderStateMachine::SizeOfVideoQueue() const |
3931 | 0 | { |
3932 | 0 | VideoQueueMemoryFunctor functor; |
3933 | 0 | mVideoQueue.LockedForEach(functor); |
3934 | 0 | return functor.mSize; |
3935 | 0 | } |
3936 | | |
3937 | | size_t |
3938 | | MediaDecoderStateMachine::SizeOfAudioQueue() const |
3939 | 0 | { |
3940 | 0 | AudioQueueMemoryFunctor functor; |
3941 | 0 | mAudioQueue.LockedForEach(functor); |
3942 | 0 | return functor.mSize; |
3943 | 0 | } |
3944 | | |
3945 | | AbstractCanonical<media::TimeIntervals>* |
3946 | | MediaDecoderStateMachine::CanonicalBuffered() const |
3947 | 0 | { |
3948 | 0 | return mReader->CanonicalBuffered(); |
3949 | 0 | } |
3950 | | |
3951 | | MediaEventSource<void>& |
3952 | | MediaDecoderStateMachine::OnMediaNotSeekable() const |
3953 | 0 | { |
3954 | 0 | return mReader->OnMediaNotSeekable(); |
3955 | 0 | } |
3956 | | |
3957 | | const char* |
3958 | | MediaDecoderStateMachine::AudioRequestStatus() const |
3959 | 0 | { |
3960 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3961 | 0 | if (IsRequestingAudioData()) { |
3962 | 0 | MOZ_DIAGNOSTIC_ASSERT(!IsWaitingAudioData()); |
3963 | 0 | return "pending"; |
3964 | 0 | } else if (IsWaitingAudioData()) { |
3965 | 0 | return "waiting"; |
3966 | 0 | } |
3967 | 0 | return "idle"; |
3968 | 0 | } |
3969 | | |
3970 | | const char* |
3971 | | MediaDecoderStateMachine::VideoRequestStatus() const |
3972 | 0 | { |
3973 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3974 | 0 | if (IsRequestingVideoData()) { |
3975 | 0 | MOZ_DIAGNOSTIC_ASSERT(!IsWaitingVideoData()); |
3976 | 0 | return "pending"; |
3977 | 0 | } else if (IsWaitingVideoData()) { |
3978 | 0 | return "waiting"; |
3979 | 0 | } |
3980 | 0 | return "idle"; |
3981 | 0 | } |
3982 | | |
3983 | | void |
3984 | | MediaDecoderStateMachine::OnSuspendTimerResolved() |
3985 | 0 | { |
3986 | 0 | LOG("OnSuspendTimerResolved"); |
3987 | 0 | mVideoDecodeSuspendTimer.CompleteRequest(); |
3988 | 0 | mStateObj->HandleVideoSuspendTimeout(); |
3989 | 0 | } |
3990 | | |
3991 | | void |
3992 | | MediaDecoderStateMachine::CancelSuspendTimer() |
3993 | 0 | { |
3994 | 0 | LOG("CancelSuspendTimer: State: %s, Timer.IsScheduled: %c", |
3995 | 0 | ToStateStr(mStateObj->GetState()), |
3996 | 0 | mVideoDecodeSuspendTimer.IsScheduled() ? 'T' : 'F'); |
3997 | 0 | MOZ_ASSERT(OnTaskQueue()); |
3998 | 0 | if (mVideoDecodeSuspendTimer.IsScheduled()) { |
3999 | 0 | mOnPlaybackEvent.Notify(MediaPlaybackEvent::CancelVideoSuspendTimer); |
4000 | 0 | } |
4001 | 0 | mVideoDecodeSuspendTimer.Reset(); |
4002 | 0 | } |
4003 | | |
4004 | | } // namespace mozilla |
4005 | | |
4006 | | // avoid redefined macro in unified build |
4007 | | #undef LOG |
4008 | | #undef LOGV |
4009 | | #undef LOGW |
4010 | | #undef LOGE |
4011 | | #undef SLOGW |
4012 | | #undef SLOGE |
4013 | | #undef NS_DispatchToMainThread |