/src/mozilla-central/dom/media/MediaCache.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "MediaCache.h" |
8 | | |
9 | | #include "ChannelMediaResource.h" |
10 | | #include "FileBlockCache.h" |
11 | | #include "MediaBlockCacheBase.h" |
12 | | #include "MediaResource.h" |
13 | | #include "MemoryBlockCache.h" |
14 | | #include "mozilla/Attributes.h" |
15 | | #include "mozilla/ClearOnShutdown.h" |
16 | | #include "mozilla/ErrorNames.h" |
17 | | #include "mozilla/Logging.h" |
18 | | #include "mozilla/Monitor.h" |
19 | | #include "mozilla/Preferences.h" |
20 | | #include "mozilla/Services.h" |
21 | | #include "mozilla/StaticPtr.h" |
22 | | #include "mozilla/StaticPrefs.h" |
23 | | #include "mozilla/SystemGroup.h" |
24 | | #include "mozilla/Telemetry.h" |
25 | | #include "nsContentUtils.h" |
26 | | #include "nsIObserverService.h" |
27 | | #include "nsIPrincipal.h" |
28 | | #include "nsPrintfCString.h" |
29 | | #include "nsProxyRelease.h" |
30 | | #include "nsThreadUtils.h" |
31 | | #include "prio.h" |
32 | | #include <algorithm> |
33 | | |
34 | | namespace mozilla { |
35 | | |
36 | | #undef LOG |
37 | | #undef LOGI |
38 | | #undef LOGE |
39 | | LazyLogModule gMediaCacheLog("MediaCache"); |
40 | 0 | #define LOG(...) MOZ_LOG(gMediaCacheLog, LogLevel::Debug, (__VA_ARGS__)) |
41 | 0 | #define LOGI(...) MOZ_LOG(gMediaCacheLog, LogLevel::Info, (__VA_ARGS__)) |
42 | 0 | #define LOGE(...) NS_DebugBreak(NS_DEBUG_WARNING, nsPrintfCString(__VA_ARGS__).get(), nullptr, __FILE__, __LINE__) |
43 | | |
44 | | // For HTTP seeking, if number of bytes needing to be |
45 | | // seeked forward is less than this value then a read is |
46 | | // done rather than a byte range request. |
47 | | // |
48 | | // If we assume a 100Mbit connection, and assume reissuing an HTTP seek causes |
49 | | // a delay of 200ms, then in that 200ms we could have simply read ahead 2MB. So |
50 | | // setting SEEK_VS_READ_THRESHOLD to 1MB sounds reasonable. |
51 | | static const int64_t SEEK_VS_READ_THRESHOLD = 1 * 1024 * 1024; |
52 | | |
53 | | // Readahead blocks for non-seekable streams will be limited to this |
54 | | // fraction of the cache space. We don't normally evict such blocks |
55 | | // because replacing them requires a seek, but we need to make sure |
56 | | // they don't monopolize the cache. |
57 | | static const double NONSEEKABLE_READAHEAD_MAX = 0.5; |
58 | | |
59 | | // Data N seconds before the current playback position is given the same priority |
60 | | // as data REPLAY_PENALTY_FACTOR*N seconds ahead of the current playback |
61 | | // position. REPLAY_PENALTY_FACTOR is greater than 1 to reflect that |
62 | | // data in the past is less likely to be played again than data in the future. |
63 | | // We want to give data just behind the current playback position reasonably |
64 | | // high priority in case codecs need to retrieve that data (e.g. because |
65 | | // tracks haven't been muxed well or are being decoded at uneven rates). |
66 | | // 1/REPLAY_PENALTY_FACTOR as much data will be kept behind the |
67 | | // current playback position as will be kept ahead of the current playback |
68 | | // position. |
69 | | static const uint32_t REPLAY_PENALTY_FACTOR = 3; |
70 | | |
71 | | // When looking for a reusable block, scan forward this many blocks |
72 | | // from the desired "best" block location to look for free blocks, |
73 | | // before we resort to scanning the whole cache. The idea is to try to |
74 | | // store runs of stream blocks close-to-consecutively in the cache if we |
75 | | // can. |
76 | | static const uint32_t FREE_BLOCK_SCAN_LIMIT = 16; |
77 | | |
78 | | #ifdef DEBUG |
79 | | // Turn this on to do very expensive cache state validation |
80 | | // #define DEBUG_VERIFY_CACHE |
81 | | #endif |
82 | | |
83 | | class MediaCacheFlusher final : public nsIObserver, |
84 | | public nsSupportsWeakReference |
85 | | { |
86 | | public: |
87 | | NS_DECL_ISUPPORTS |
88 | | NS_DECL_NSIOBSERVER |
89 | | |
90 | | static void RegisterMediaCache(MediaCache* aMediaCache); |
91 | | static void UnregisterMediaCache(MediaCache* aMediaCache); |
92 | | |
93 | | private: |
94 | 0 | MediaCacheFlusher() {} |
95 | 0 | ~MediaCacheFlusher() {} |
96 | | |
97 | | // Singleton instance created when a first MediaCache is registered, and |
98 | | // released when the last MediaCache is unregistered. |
99 | | // The observer service will keep a weak reference to it, for notifications. |
100 | | static StaticRefPtr<MediaCacheFlusher> gMediaCacheFlusher; |
101 | | |
102 | | nsTArray<MediaCache*> mMediaCaches; |
103 | | }; |
104 | | |
105 | | /* static */ StaticRefPtr<MediaCacheFlusher> |
106 | | MediaCacheFlusher::gMediaCacheFlusher; |
107 | | |
108 | | NS_IMPL_ISUPPORTS(MediaCacheFlusher, nsIObserver, nsISupportsWeakReference) |
109 | | |
110 | | /* static */ void |
111 | | MediaCacheFlusher::RegisterMediaCache(MediaCache* aMediaCache) |
112 | 0 | { |
113 | 0 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
114 | 0 |
|
115 | 0 | if (!gMediaCacheFlusher) { |
116 | 0 | gMediaCacheFlusher = new MediaCacheFlusher(); |
117 | 0 |
|
118 | 0 | nsCOMPtr<nsIObserverService> observerService = |
119 | 0 | mozilla::services::GetObserverService(); |
120 | 0 | if (observerService) { |
121 | 0 | observerService->AddObserver( |
122 | 0 | gMediaCacheFlusher, "last-pb-context-exited", true); |
123 | 0 | observerService->AddObserver( |
124 | 0 | gMediaCacheFlusher, "cacheservice:empty-cache", true); |
125 | 0 | } |
126 | 0 | } |
127 | 0 |
|
128 | 0 | gMediaCacheFlusher->mMediaCaches.AppendElement(aMediaCache); |
129 | 0 | } |
130 | | |
131 | | /* static */ void |
132 | | MediaCacheFlusher::UnregisterMediaCache(MediaCache* aMediaCache) |
133 | 0 | { |
134 | 0 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
135 | 0 |
|
136 | 0 | gMediaCacheFlusher->mMediaCaches.RemoveElement(aMediaCache); |
137 | 0 |
|
138 | 0 | if (gMediaCacheFlusher->mMediaCaches.Length() == 0) { |
139 | 0 | gMediaCacheFlusher = nullptr; |
140 | 0 | } |
141 | 0 | } |
142 | | |
143 | | class MediaCache |
144 | | { |
145 | | using AutoLock = MonitorAutoLock; |
146 | | |
147 | | public: |
148 | | NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaCache) |
149 | | |
150 | | friend class MediaCacheStream::BlockList; |
151 | | typedef MediaCacheStream::BlockList BlockList; |
152 | | static const int64_t BLOCK_SIZE = MediaCacheStream::BLOCK_SIZE; |
153 | | |
154 | | // Get an instance of a MediaCache (or nullptr if initialization failed). |
155 | | // aContentLength is the content length if known already, otherwise -1. |
156 | | // If the length is known and considered small enough, a discrete MediaCache |
157 | | // with memory backing will be given. Otherwise the one MediaCache with |
158 | | // file backing will be provided. |
159 | | static RefPtr<MediaCache> GetMediaCache(int64_t aContentLength); |
160 | | |
161 | 0 | nsIEventTarget* OwnerThread() const { return sThread; } |
162 | | |
163 | | // Brutally flush the cache contents. Main thread only. |
164 | | void Flush(); |
165 | | |
166 | | // Close all streams associated with private browsing windows. This will |
167 | | // also remove the blocks from the cache since we don't want to leave any |
168 | | // traces when PB is done. |
169 | | void CloseStreamsForPrivateBrowsing(); |
170 | | |
171 | | // Cache-file access methods. These are the lowest-level cache methods. |
172 | | // mReentrantMonitor must be held; these can be called on any thread. |
173 | | // This can return partial reads. |
174 | | // Note mReentrantMonitor will be dropped while doing IO. The caller need |
175 | | // to handle changes happening when the monitor is not held. |
176 | | nsresult ReadCacheFile(AutoLock&, |
177 | | int64_t aOffset, |
178 | | void* aData, |
179 | | int32_t aLength, |
180 | | int32_t* aBytes); |
181 | | |
182 | | // The generated IDs are always positive. |
183 | 0 | int64_t AllocateResourceID(AutoLock&) { return ++mNextResourceID; } |
184 | | |
185 | | // mReentrantMonitor must be held, called on main thread. |
186 | | // These methods are used by the stream to set up and tear down streams, |
187 | | // and to handle reads and writes. |
188 | | // Add aStream to the list of streams. |
189 | | void OpenStream(AutoLock&, MediaCacheStream* aStream, bool aIsClone = false); |
190 | | // Remove aStream from the list of streams. |
191 | | void ReleaseStream(AutoLock&, MediaCacheStream* aStream); |
192 | | // Free all blocks belonging to aStream. |
193 | | void ReleaseStreamBlocks(AutoLock&, MediaCacheStream* aStream); |
194 | | // Find a cache entry for this data, and write the data into it |
195 | | void AllocateAndWriteBlock( |
196 | | AutoLock&, |
197 | | MediaCacheStream* aStream, |
198 | | int32_t aStreamBlockIndex, |
199 | | MediaCacheStream::ReadMode aMode, |
200 | | Span<const uint8_t> aData1, |
201 | | Span<const uint8_t> aData2 = Span<const uint8_t>()); |
202 | | |
203 | | // mReentrantMonitor must be held; can be called on any thread |
204 | | // Notify the cache that a seek has been requested. Some blocks may |
205 | | // need to change their class between PLAYED_BLOCK and READAHEAD_BLOCK. |
206 | | // This does not trigger channel seeks directly, the next Update() |
207 | | // will do that if necessary. The caller will call QueueUpdate(). |
208 | | void NoteSeek(AutoLock&, MediaCacheStream* aStream, int64_t aOldOffset); |
209 | | // Notify the cache that a block has been read from. This is used |
210 | | // to update last-use times. The block may not actually have a |
211 | | // cache entry yet since Read can read data from a stream's |
212 | | // in-memory mPartialBlockBuffer while the block is only partly full, |
213 | | // and thus hasn't yet been committed to the cache. The caller will |
214 | | // call QueueUpdate(). |
215 | | void NoteBlockUsage(AutoLock&, |
216 | | MediaCacheStream* aStream, |
217 | | int32_t aBlockIndex, |
218 | | int64_t aStreamOffset, |
219 | | MediaCacheStream::ReadMode aMode, |
220 | | TimeStamp aNow); |
221 | | // Mark aStream as having the block, adding it as an owner. |
222 | | void AddBlockOwnerAsReadahead(AutoLock&, |
223 | | int32_t aBlockIndex, |
224 | | MediaCacheStream* aStream, |
225 | | int32_t aStreamBlockIndex); |
226 | | |
227 | | // This queues a call to Update() on the main thread. |
228 | | void QueueUpdate(AutoLock&); |
229 | | |
230 | | // Notify all streams for the resource ID that the suspended status changed |
231 | | // at the end of MediaCache::Update. |
232 | | void QueueSuspendedStatusUpdate(AutoLock&, int64_t aResourceID); |
233 | | |
234 | | // Updates the cache state asynchronously on the main thread: |
235 | | // -- try to trim the cache back to its desired size, if necessary |
236 | | // -- suspend channels that are going to read data that's lower priority |
237 | | // than anything currently cached |
238 | | // -- resume channels that are going to read data that's higher priority |
239 | | // than something currently cached |
240 | | // -- seek channels that need to seek to a new location |
241 | | void Update(); |
242 | | |
243 | | #ifdef DEBUG_VERIFY_CACHE |
244 | | // Verify invariants, especially block list invariants |
245 | | void Verify(AutoLock&); |
246 | | #else |
247 | 0 | void Verify(AutoLock&) {} |
248 | | #endif |
249 | | |
250 | | mozilla::Monitor& Monitor() |
251 | 0 | { |
252 | 0 | // This method should only be called outside the main thread. |
253 | 0 | // The MOZ_DIAGNOSTIC_ASSERT(!NS_IsMainThread()) assertion should be |
254 | 0 | // re-added as part of bug 1464045 |
255 | 0 | return mMonitor; |
256 | 0 | } |
257 | | |
258 | | /** |
259 | | * An iterator that makes it easy to iterate through all streams that |
260 | | * have a given resource ID and are not closed. |
261 | | * Must be used while holding the media cache lock. |
262 | | */ |
263 | | class ResourceStreamIterator |
264 | | { |
265 | | public: |
266 | | ResourceStreamIterator(MediaCache* aMediaCache, int64_t aResourceID) |
267 | | : mMediaCache(aMediaCache) |
268 | | , mResourceID(aResourceID) |
269 | | , mNext(0) |
270 | 0 | { |
271 | 0 | aMediaCache->mMonitor.AssertCurrentThreadOwns(); |
272 | 0 | } |
273 | | MediaCacheStream* Next(AutoLock& aLock) |
274 | 0 | { |
275 | 0 | while (mNext < mMediaCache->mStreams.Length()) { |
276 | 0 | MediaCacheStream* stream = mMediaCache->mStreams[mNext]; |
277 | 0 | ++mNext; |
278 | 0 | if (stream->GetResourceID() == mResourceID && !stream->IsClosed(aLock)) |
279 | 0 | return stream; |
280 | 0 | } |
281 | 0 | return nullptr; |
282 | 0 | } |
283 | | private: |
284 | | MediaCache* mMediaCache; |
285 | | int64_t mResourceID; |
286 | | uint32_t mNext; |
287 | | }; |
288 | | |
289 | | protected: |
290 | | explicit MediaCache(MediaBlockCacheBase* aCache) |
291 | | : mMonitor("MediaCache.mMonitor") |
292 | | , mBlockCache(aCache) |
293 | | , mUpdateQueued(false) |
294 | | #ifdef DEBUG |
295 | | , mInUpdate(false) |
296 | | #endif |
297 | 0 | { |
298 | 0 | NS_ASSERTION(NS_IsMainThread(), "Only construct MediaCache on main thread"); |
299 | 0 | MOZ_COUNT_CTOR(MediaCache); |
300 | 0 | MediaCacheFlusher::RegisterMediaCache(this); |
301 | 0 | } |
302 | | |
303 | | ~MediaCache() |
304 | 0 | { |
305 | 0 | NS_ASSERTION(NS_IsMainThread(), "Only destroy MediaCache on main thread"); |
306 | 0 | if (this == gMediaCache) { |
307 | 0 | LOG("~MediaCache(Global file-backed MediaCache)"); |
308 | 0 | // This is the file-backed MediaCache, reset the global pointer. |
309 | 0 | gMediaCache = nullptr; |
310 | 0 | // Only gather "MEDIACACHE" telemetry for the file-based cache. |
311 | 0 | LOG("MediaCache::~MediaCache(this=%p) MEDIACACHE_WATERMARK_KB=%u", |
312 | 0 | this, |
313 | 0 | unsigned(mIndexWatermark * MediaCache::BLOCK_SIZE / 1024)); |
314 | 0 | Telemetry::Accumulate( |
315 | 0 | Telemetry::HistogramID::MEDIACACHE_WATERMARK_KB, |
316 | 0 | uint32_t(mIndexWatermark * MediaCache::BLOCK_SIZE / 1024)); |
317 | 0 | LOG( |
318 | 0 | "MediaCache::~MediaCache(this=%p) MEDIACACHE_BLOCKOWNERS_WATERMARK=%u", |
319 | 0 | this, |
320 | 0 | unsigned(mBlockOwnersWatermark)); |
321 | 0 | Telemetry::Accumulate( |
322 | 0 | Telemetry::HistogramID::MEDIACACHE_BLOCKOWNERS_WATERMARK, |
323 | 0 | mBlockOwnersWatermark); |
324 | 0 | } else { |
325 | 0 | LOG("~MediaCache(Memory-backed MediaCache %p)", this); |
326 | 0 | } |
327 | 0 | MediaCacheFlusher::UnregisterMediaCache(this); |
328 | 0 | NS_ASSERTION(mStreams.IsEmpty(), "Stream(s) still open!"); |
329 | 0 | Truncate(); |
330 | 0 | NS_ASSERTION(mIndex.Length() == 0, "Blocks leaked?"); |
331 | 0 |
|
332 | 0 | MOZ_COUNT_DTOR(MediaCache); |
333 | 0 | } |
334 | | |
335 | | // Find a free or reusable block and return its index. If there are no |
336 | | // free blocks and no reusable blocks, add a new block to the cache |
337 | | // and return it. Can return -1 on OOM. |
338 | | int32_t FindBlockForIncomingData(AutoLock&, |
339 | | TimeStamp aNow, |
340 | | MediaCacheStream* aStream, |
341 | | int32_t aStreamBlockIndex); |
342 | | // Find a reusable block --- a free block, if there is one, otherwise |
343 | | // the reusable block with the latest predicted-next-use, or -1 if |
344 | | // there aren't any freeable blocks. Only block indices less than |
345 | | // aMaxSearchBlockIndex are considered. If aForStream is non-null, |
346 | | // then aForStream and aForStreamBlock indicate what media data will |
347 | | // be placed; FindReusableBlock will favour returning free blocks |
348 | | // near other blocks for that point in the stream. |
349 | | int32_t FindReusableBlock(AutoLock&, |
350 | | TimeStamp aNow, |
351 | | MediaCacheStream* aForStream, |
352 | | int32_t aForStreamBlock, |
353 | | int32_t aMaxSearchBlockIndex); |
354 | | bool BlockIsReusable(AutoLock&, int32_t aBlockIndex); |
355 | | // Given a list of blocks sorted with the most reusable blocks at the |
356 | | // end, find the last block whose stream is not pinned (if any) |
357 | | // and whose cache entry index is less than aBlockIndexLimit |
358 | | // and append it to aResult. |
359 | | void AppendMostReusableBlock(AutoLock&, |
360 | | BlockList* aBlockList, |
361 | | nsTArray<uint32_t>* aResult, |
362 | | int32_t aBlockIndexLimit); |
363 | | |
364 | | enum BlockClass { |
365 | | // block belongs to mMetadataBlockList because data has been consumed |
366 | | // from it in "metadata mode" --- in particular blocks read during |
367 | | // Ogg seeks go into this class. These blocks may have played data |
368 | | // in them too. |
369 | | METADATA_BLOCK, |
370 | | // block belongs to mPlayedBlockList because its offset is |
371 | | // less than the stream's current reader position |
372 | | PLAYED_BLOCK, |
373 | | // block belongs to the stream's mReadaheadBlockList because its |
374 | | // offset is greater than or equal to the stream's current |
375 | | // reader position |
376 | | READAHEAD_BLOCK |
377 | | }; |
378 | | |
379 | | struct BlockOwner { |
380 | 0 | constexpr BlockOwner() {} |
381 | | |
382 | | // The stream that owns this block, or null if the block is free. |
383 | | MediaCacheStream* mStream = nullptr; |
384 | | // The block index in the stream. Valid only if mStream is non-null. |
385 | | // Initialized to an insane value to highlight misuse. |
386 | | uint32_t mStreamBlock = UINT32_MAX; |
387 | | // Time at which this block was last used. Valid only if |
388 | | // mClass is METADATA_BLOCK or PLAYED_BLOCK. |
389 | | TimeStamp mLastUseTime; |
390 | | BlockClass mClass = READAHEAD_BLOCK; |
391 | | }; |
392 | | |
393 | | struct Block { |
394 | | // Free blocks have an empty mOwners array |
395 | | nsTArray<BlockOwner> mOwners; |
396 | | }; |
397 | | |
398 | | // Get the BlockList that the block should belong to given its |
399 | | // current owner |
400 | | BlockList* GetListForBlock(AutoLock&, BlockOwner* aBlock); |
401 | | // Get the BlockOwner for the given block index and owning stream |
402 | | // (returns null if the stream does not own the block) |
403 | | BlockOwner* GetBlockOwner(AutoLock&, |
404 | | int32_t aBlockIndex, |
405 | | MediaCacheStream* aStream); |
406 | | // Returns true iff the block is free |
407 | | bool IsBlockFree(int32_t aBlockIndex) |
408 | 0 | { return mIndex[aBlockIndex].mOwners.IsEmpty(); } |
409 | | // Add the block to the free list and mark its streams as not having |
410 | | // the block in cache |
411 | | void FreeBlock(AutoLock&, int32_t aBlock); |
412 | | // Mark aStream as not having the block, removing it as an owner. If |
413 | | // the block has no more owners it's added to the free list. |
414 | | void RemoveBlockOwner(AutoLock&, |
415 | | int32_t aBlockIndex, |
416 | | MediaCacheStream* aStream); |
417 | | // Swap all metadata associated with the two blocks. The caller |
418 | | // is responsible for swapping up any cache file state. |
419 | | void SwapBlocks(AutoLock&, int32_t aBlockIndex1, int32_t aBlockIndex2); |
420 | | // Insert the block into the readahead block list for the stream |
421 | | // at the right point in the list. |
422 | | void InsertReadaheadBlock(AutoLock&, |
423 | | BlockOwner* aBlockOwner, |
424 | | int32_t aBlockIndex); |
425 | | |
426 | | // Guess the duration until block aBlock will be next used |
427 | | TimeDuration PredictNextUse(AutoLock&, TimeStamp aNow, int32_t aBlock); |
428 | | // Guess the duration until the next incoming data on aStream will be used |
429 | | TimeDuration PredictNextUseForIncomingData(AutoLock&, |
430 | | MediaCacheStream* aStream); |
431 | | |
432 | | // Truncate the file and index array if there are free blocks at the |
433 | | // end |
434 | | void Truncate(); |
435 | | |
436 | | void FlushInternal(AutoLock&); |
437 | | |
438 | | // There is at most one file-backed media cache. |
439 | | // It is owned by all MediaCacheStreams that use it. |
440 | | // This is a raw pointer set by GetMediaCache(), and reset by ~MediaCache(), |
441 | | // both on the main thread; and is not accessed anywhere else. |
442 | | static MediaCache* gMediaCache; |
443 | | |
444 | | // This member is main-thread only. It's used to allocate unique |
445 | | // resource IDs to streams. |
446 | | int64_t mNextResourceID = 0; |
447 | | |
448 | | // The monitor protects all the data members here. Also, off-main-thread |
449 | | // readers that need to block will Wait() on this monitor. When new |
450 | | // data becomes available in the cache, we NotifyAll() on this monitor. |
451 | | mozilla::Monitor mMonitor; |
452 | | // This must always be accessed when the monitor is held. |
453 | | nsTArray<MediaCacheStream*> mStreams; |
454 | | // The Blocks describing the cache entries. |
455 | | nsTArray<Block> mIndex; |
456 | | // Keep track for highest number of blocks used, for telemetry purposes. |
457 | | int32_t mIndexWatermark = 0; |
458 | | // Keep track for highest number of blocks owners, for telemetry purposes. |
459 | | uint32_t mBlockOwnersWatermark = 0; |
460 | | // Writer which performs IO, asynchronously writing cache blocks. |
461 | | RefPtr<MediaBlockCacheBase> mBlockCache; |
462 | | // The list of free blocks; they are not ordered. |
463 | | BlockList mFreeBlocks; |
464 | | // True if an event to run Update() has been queued but not processed |
465 | | bool mUpdateQueued; |
466 | | #ifdef DEBUG |
467 | | bool mInUpdate; |
468 | | #endif |
469 | | // A list of resource IDs to notify about the change in suspended status. |
470 | | nsTArray<int64_t> mSuspendedStatusToNotify; |
471 | | // The thread on which we will run data callbacks from the channels. |
472 | | // Note this thread is shared among all MediaCache instances. |
473 | | static StaticRefPtr<nsIThread> sThread; |
474 | | // True if we've tried to init sThread. Note we try once only so it is safe |
475 | | // to access sThread on all threads. |
476 | | static bool sThreadInit; |
477 | | |
478 | | private: |
479 | | // Used by MediaCacheStream::GetDebugInfo() only for debugging. |
480 | | // Don't add new callers to this function. |
481 | | friend nsCString MediaCacheStream::GetDebugInfo(); |
482 | | mozilla::Monitor& GetMonitorOnTheMainThread() |
483 | 0 | { |
484 | 0 | MOZ_DIAGNOSTIC_ASSERT(NS_IsMainThread()); |
485 | 0 | return mMonitor; |
486 | 0 | } |
487 | | }; |
488 | | |
489 | | // Initialized to nullptr by non-local static initialization. |
490 | | /* static */ MediaCache* MediaCache::gMediaCache; |
491 | | |
492 | | /* static */ StaticRefPtr<nsIThread> MediaCache::sThread; |
493 | | /* static */ bool MediaCache::sThreadInit = false; |
494 | | |
495 | | NS_IMETHODIMP |
496 | | MediaCacheFlusher::Observe(nsISupports *aSubject, char const *aTopic, char16_t const *aData) |
497 | 0 | { |
498 | 0 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
499 | 0 |
|
500 | 0 | if (strcmp(aTopic, "last-pb-context-exited") == 0) { |
501 | 0 | for (MediaCache* mc : mMediaCaches) { |
502 | 0 | mc->CloseStreamsForPrivateBrowsing(); |
503 | 0 | } |
504 | 0 | return NS_OK; |
505 | 0 | } |
506 | 0 | if (strcmp(aTopic, "cacheservice:empty-cache") == 0) { |
507 | 0 | for (MediaCache* mc : mMediaCaches) { |
508 | 0 | mc->Flush(); |
509 | 0 | } |
510 | 0 | return NS_OK; |
511 | 0 | } |
512 | 0 | return NS_OK; |
513 | 0 | } |
514 | | |
515 | | MediaCacheStream::MediaCacheStream(ChannelMediaResource* aClient, |
516 | | bool aIsPrivateBrowsing) |
517 | | : mMediaCache(nullptr) |
518 | | , mClient(aClient) |
519 | | , mIsTransportSeekable(false) |
520 | | , mCacheSuspended(false) |
521 | | , mChannelEnded(false) |
522 | | , mStreamOffset(0) |
523 | | , mPlaybackBytesPerSecond(10000) |
524 | | , mPinCount(0) |
525 | | , mNotifyDataEndedStatus(NS_ERROR_NOT_INITIALIZED) |
526 | | , mMetadataInPartialBlockBuffer(false) |
527 | | , mIsPrivateBrowsing(aIsPrivateBrowsing) |
528 | 0 | { |
529 | 0 | } |
530 | | |
531 | | size_t MediaCacheStream::SizeOfExcludingThis( |
532 | | MallocSizeOf aMallocSizeOf) const |
533 | 0 | { |
534 | 0 | // Looks like these are not owned: |
535 | 0 | // - mClient |
536 | 0 | size_t size = mBlocks.ShallowSizeOfExcludingThis(aMallocSizeOf); |
537 | 0 | size += mReadaheadBlocks.SizeOfExcludingThis(aMallocSizeOf); |
538 | 0 | size += mMetadataBlocks.SizeOfExcludingThis(aMallocSizeOf); |
539 | 0 | size += mPlayedBlocks.SizeOfExcludingThis(aMallocSizeOf); |
540 | 0 | size += aMallocSizeOf(mPartialBlockBuffer.get()); |
541 | 0 |
|
542 | 0 | return size; |
543 | 0 | } |
544 | | |
545 | | size_t MediaCacheStream::BlockList::SizeOfExcludingThis( |
546 | | MallocSizeOf aMallocSizeOf) const |
547 | 0 | { |
548 | 0 | return mEntries.ShallowSizeOfExcludingThis(aMallocSizeOf); |
549 | 0 | } |
550 | | |
551 | | void MediaCacheStream::BlockList::AddFirstBlock(int32_t aBlock) |
552 | 0 | { |
553 | 0 | NS_ASSERTION(!mEntries.GetEntry(aBlock), "Block already in list"); |
554 | 0 | Entry* entry = mEntries.PutEntry(aBlock); |
555 | 0 |
|
556 | 0 | if (mFirstBlock < 0) { |
557 | 0 | entry->mNextBlock = entry->mPrevBlock = aBlock; |
558 | 0 | } else { |
559 | 0 | entry->mNextBlock = mFirstBlock; |
560 | 0 | entry->mPrevBlock = mEntries.GetEntry(mFirstBlock)->mPrevBlock; |
561 | 0 | mEntries.GetEntry(entry->mNextBlock)->mPrevBlock = aBlock; |
562 | 0 | mEntries.GetEntry(entry->mPrevBlock)->mNextBlock = aBlock; |
563 | 0 | } |
564 | 0 | mFirstBlock = aBlock; |
565 | 0 | ++mCount; |
566 | 0 | } |
567 | | |
568 | | void MediaCacheStream::BlockList::AddAfter(int32_t aBlock, int32_t aBefore) |
569 | 0 | { |
570 | 0 | NS_ASSERTION(!mEntries.GetEntry(aBlock), "Block already in list"); |
571 | 0 | Entry* entry = mEntries.PutEntry(aBlock); |
572 | 0 |
|
573 | 0 | Entry* addAfter = mEntries.GetEntry(aBefore); |
574 | 0 | NS_ASSERTION(addAfter, "aBefore not in list"); |
575 | 0 |
|
576 | 0 | entry->mNextBlock = addAfter->mNextBlock; |
577 | 0 | entry->mPrevBlock = aBefore; |
578 | 0 | mEntries.GetEntry(entry->mNextBlock)->mPrevBlock = aBlock; |
579 | 0 | mEntries.GetEntry(entry->mPrevBlock)->mNextBlock = aBlock; |
580 | 0 | ++mCount; |
581 | 0 | } |
582 | | |
583 | | void MediaCacheStream::BlockList::RemoveBlock(int32_t aBlock) |
584 | 0 | { |
585 | 0 | Entry* entry = mEntries.GetEntry(aBlock); |
586 | 0 | MOZ_DIAGNOSTIC_ASSERT(entry, "Block not in list"); |
587 | 0 |
|
588 | 0 | if (entry->mNextBlock == aBlock) { |
589 | 0 | MOZ_DIAGNOSTIC_ASSERT(entry->mPrevBlock == aBlock, "Linked list inconsistency"); |
590 | 0 | MOZ_DIAGNOSTIC_ASSERT(mFirstBlock == aBlock, "Linked list inconsistency"); |
591 | 0 | mFirstBlock = -1; |
592 | 0 | } else { |
593 | 0 | if (mFirstBlock == aBlock) { |
594 | 0 | mFirstBlock = entry->mNextBlock; |
595 | 0 | } |
596 | 0 | mEntries.GetEntry(entry->mNextBlock)->mPrevBlock = entry->mPrevBlock; |
597 | 0 | mEntries.GetEntry(entry->mPrevBlock)->mNextBlock = entry->mNextBlock; |
598 | 0 | } |
599 | 0 | mEntries.RemoveEntry(entry); |
600 | 0 | --mCount; |
601 | 0 | } |
602 | | |
603 | | int32_t MediaCacheStream::BlockList::GetLastBlock() const |
604 | 0 | { |
605 | 0 | if (mFirstBlock < 0) |
606 | 0 | return -1; |
607 | 0 | return mEntries.GetEntry(mFirstBlock)->mPrevBlock; |
608 | 0 | } |
609 | | |
610 | | int32_t MediaCacheStream::BlockList::GetNextBlock(int32_t aBlock) const |
611 | 0 | { |
612 | 0 | int32_t block = mEntries.GetEntry(aBlock)->mNextBlock; |
613 | 0 | if (block == mFirstBlock) |
614 | 0 | return -1; |
615 | 0 | return block; |
616 | 0 | } |
617 | | |
618 | | int32_t MediaCacheStream::BlockList::GetPrevBlock(int32_t aBlock) const |
619 | 0 | { |
620 | 0 | if (aBlock == mFirstBlock) |
621 | 0 | return -1; |
622 | 0 | return mEntries.GetEntry(aBlock)->mPrevBlock; |
623 | 0 | } |
624 | | |
625 | | #ifdef DEBUG |
626 | | void MediaCacheStream::BlockList::Verify() |
627 | | { |
628 | | int32_t count = 0; |
629 | | if (mFirstBlock >= 0) { |
630 | | int32_t block = mFirstBlock; |
631 | | do { |
632 | | Entry* entry = mEntries.GetEntry(block); |
633 | | NS_ASSERTION(mEntries.GetEntry(entry->mNextBlock)->mPrevBlock == block, |
634 | | "Bad prev link"); |
635 | | NS_ASSERTION(mEntries.GetEntry(entry->mPrevBlock)->mNextBlock == block, |
636 | | "Bad next link"); |
637 | | block = entry->mNextBlock; |
638 | | ++count; |
639 | | } while (block != mFirstBlock); |
640 | | } |
641 | | NS_ASSERTION(count == mCount, "Bad count"); |
642 | | } |
643 | | #endif |
644 | | |
645 | | static void UpdateSwappedBlockIndex(int32_t* aBlockIndex, |
646 | | int32_t aBlock1Index, int32_t aBlock2Index) |
647 | 0 | { |
648 | 0 | int32_t index = *aBlockIndex; |
649 | 0 | if (index == aBlock1Index) { |
650 | 0 | *aBlockIndex = aBlock2Index; |
651 | 0 | } else if (index == aBlock2Index) { |
652 | 0 | *aBlockIndex = aBlock1Index; |
653 | 0 | } |
654 | 0 | } |
655 | | |
656 | | void |
657 | | MediaCacheStream::BlockList::NotifyBlockSwapped(int32_t aBlockIndex1, |
658 | | int32_t aBlockIndex2) |
659 | 0 | { |
660 | 0 | Entry* e1 = mEntries.GetEntry(aBlockIndex1); |
661 | 0 | Entry* e2 = mEntries.GetEntry(aBlockIndex2); |
662 | 0 | int32_t e1Prev = -1, e1Next = -1, e2Prev = -1, e2Next = -1; |
663 | 0 |
|
664 | 0 | // Fix mFirstBlock |
665 | 0 | UpdateSwappedBlockIndex(&mFirstBlock, aBlockIndex1, aBlockIndex2); |
666 | 0 |
|
667 | 0 | // Fix mNextBlock/mPrevBlock links. First capture previous/next links |
668 | 0 | // so we don't get confused due to aliasing. |
669 | 0 | if (e1) { |
670 | 0 | e1Prev = e1->mPrevBlock; |
671 | 0 | e1Next = e1->mNextBlock; |
672 | 0 | } |
673 | 0 | if (e2) { |
674 | 0 | e2Prev = e2->mPrevBlock; |
675 | 0 | e2Next = e2->mNextBlock; |
676 | 0 | } |
677 | 0 | // Update the entries. |
678 | 0 | if (e1) { |
679 | 0 | mEntries.GetEntry(e1Prev)->mNextBlock = aBlockIndex2; |
680 | 0 | mEntries.GetEntry(e1Next)->mPrevBlock = aBlockIndex2; |
681 | 0 | } |
682 | 0 | if (e2) { |
683 | 0 | mEntries.GetEntry(e2Prev)->mNextBlock = aBlockIndex1; |
684 | 0 | mEntries.GetEntry(e2Next)->mPrevBlock = aBlockIndex1; |
685 | 0 | } |
686 | 0 |
|
687 | 0 | // Fix hashtable keys. First remove stale entries. |
688 | 0 | if (e1) { |
689 | 0 | e1Prev = e1->mPrevBlock; |
690 | 0 | e1Next = e1->mNextBlock; |
691 | 0 | mEntries.RemoveEntry(e1); |
692 | 0 | // Refresh pointer after hashtable mutation. |
693 | 0 | e2 = mEntries.GetEntry(aBlockIndex2); |
694 | 0 | } |
695 | 0 | if (e2) { |
696 | 0 | e2Prev = e2->mPrevBlock; |
697 | 0 | e2Next = e2->mNextBlock; |
698 | 0 | mEntries.RemoveEntry(e2); |
699 | 0 | } |
700 | 0 | // Put new entries back. |
701 | 0 | if (e1) { |
702 | 0 | e1 = mEntries.PutEntry(aBlockIndex2); |
703 | 0 | e1->mNextBlock = e1Next; |
704 | 0 | e1->mPrevBlock = e1Prev; |
705 | 0 | } |
706 | 0 | if (e2) { |
707 | 0 | e2 = mEntries.PutEntry(aBlockIndex1); |
708 | 0 | e2->mNextBlock = e2Next; |
709 | 0 | e2->mPrevBlock = e2Prev; |
710 | 0 | } |
711 | 0 | } |
712 | | |
713 | | void |
714 | | MediaCache::FlushInternal(AutoLock& aLock) |
715 | 0 | { |
716 | 0 | for (uint32_t blockIndex = 0; blockIndex < mIndex.Length(); ++blockIndex) { |
717 | 0 | FreeBlock(aLock, blockIndex); |
718 | 0 | } |
719 | 0 |
|
720 | 0 | // Truncate index array. |
721 | 0 | Truncate(); |
722 | 0 | NS_ASSERTION(mIndex.Length() == 0, "Blocks leaked?"); |
723 | 0 | // Reset block cache to its pristine state. |
724 | 0 | mBlockCache->Flush(); |
725 | 0 | } |
726 | | |
727 | | void |
728 | | MediaCache::Flush() |
729 | 0 | { |
730 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
731 | 0 | nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction( |
732 | 0 | "MediaCache::Flush", [self = RefPtr<MediaCache>(this)]() { |
733 | 0 | AutoLock lock(self->mMonitor); |
734 | 0 | self->FlushInternal(lock); |
735 | 0 | }); |
736 | 0 | sThread->Dispatch(r.forget()); |
737 | 0 | } |
738 | | |
739 | | void |
740 | | MediaCache::CloseStreamsForPrivateBrowsing() |
741 | 0 | { |
742 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
743 | 0 | sThread->Dispatch(NS_NewRunnableFunction( |
744 | 0 | "MediaCache::CloseStreamsForPrivateBrowsing", |
745 | 0 | [self = RefPtr<MediaCache>(this)]() { |
746 | 0 | AutoLock lock(self->mMonitor); |
747 | 0 | // Copy mStreams since CloseInternal() will change the array. |
748 | 0 | nsTArray<MediaCacheStream*> streams(self->mStreams); |
749 | 0 | for (MediaCacheStream* s : streams) { |
750 | 0 | if (s->mIsPrivateBrowsing) { |
751 | 0 | s->CloseInternal(lock); |
752 | 0 | } |
753 | 0 | } |
754 | 0 | })); |
755 | 0 | } |
756 | | |
757 | | /* static */ RefPtr<MediaCache> |
758 | | MediaCache::GetMediaCache(int64_t aContentLength) |
759 | 0 | { |
760 | 0 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
761 | 0 |
|
762 | 0 | if (!sThreadInit) { |
763 | 0 | sThreadInit = true; |
764 | 0 | nsCOMPtr<nsIThread> thread; |
765 | 0 | nsresult rv = NS_NewNamedThread("MediaCache", getter_AddRefs(thread)); |
766 | 0 | if (NS_FAILED(rv)) { |
767 | 0 | NS_WARNING("Failed to create a thread for MediaCache."); |
768 | 0 | return nullptr; |
769 | 0 | } |
770 | 0 | sThread = thread.forget(); |
771 | 0 |
|
772 | 0 | static struct ClearThread |
773 | 0 | { |
774 | 0 | // Called during shutdown to clear sThread. |
775 | 0 | void operator=(std::nullptr_t) |
776 | 0 | { |
777 | 0 | nsCOMPtr<nsIThread> thread = sThread.forget(); |
778 | 0 | MOZ_ASSERT(thread); |
779 | 0 | thread->Shutdown(); |
780 | 0 | } |
781 | 0 | } sClearThread; |
782 | 0 | ClearOnShutdown(&sClearThread, ShutdownPhase::ShutdownThreads); |
783 | 0 | } |
784 | 0 |
|
785 | 0 | if (!sThread) { |
786 | 0 | return nullptr; |
787 | 0 | } |
788 | 0 | |
789 | 0 | if (aContentLength > 0 && |
790 | 0 | aContentLength <= int64_t(StaticPrefs::MediaMemoryCacheMaxSize()) * 1024) { |
791 | 0 | // Small-enough resource, use a new memory-backed MediaCache. |
792 | 0 | RefPtr<MediaBlockCacheBase> bc = new MemoryBlockCache(aContentLength); |
793 | 0 | nsresult rv = bc->Init(); |
794 | 0 | if (NS_SUCCEEDED(rv)) { |
795 | 0 | RefPtr<MediaCache> mc = new MediaCache(bc); |
796 | 0 | LOG("GetMediaCache(%" PRIi64 ") -> Memory MediaCache %p", |
797 | 0 | aContentLength, |
798 | 0 | mc.get()); |
799 | 0 | return mc; |
800 | 0 | } |
801 | 0 | // MemoryBlockCache initialization failed, clean up and try for a |
802 | 0 | // file-backed MediaCache below. |
803 | 0 | } |
804 | 0 |
|
805 | 0 | if (gMediaCache) { |
806 | 0 | LOG("GetMediaCache(%" PRIi64 ") -> Existing file-backed MediaCache", |
807 | 0 | aContentLength); |
808 | 0 | return gMediaCache; |
809 | 0 | } |
810 | 0 |
|
811 | 0 | RefPtr<MediaBlockCacheBase> bc = new FileBlockCache(); |
812 | 0 | nsresult rv = bc->Init(); |
813 | 0 | if (NS_SUCCEEDED(rv)) { |
814 | 0 | gMediaCache = new MediaCache(bc); |
815 | 0 | LOG("GetMediaCache(%" PRIi64 ") -> Created file-backed MediaCache", |
816 | 0 | aContentLength); |
817 | 0 | } else { |
818 | 0 | LOG("GetMediaCache(%" PRIi64 ") -> Failed to create file-backed MediaCache", |
819 | 0 | aContentLength); |
820 | 0 | } |
821 | 0 |
|
822 | 0 | return gMediaCache; |
823 | 0 | } |
824 | | |
825 | | nsresult |
826 | | MediaCache::ReadCacheFile(AutoLock&, |
827 | | int64_t aOffset, |
828 | | void* aData, |
829 | | int32_t aLength, |
830 | | int32_t* aBytes) |
831 | 0 | { |
832 | 0 | if (!mBlockCache) { |
833 | 0 | return NS_ERROR_FAILURE; |
834 | 0 | } |
835 | 0 | return mBlockCache->Read(aOffset, reinterpret_cast<uint8_t*>(aData), aLength, aBytes); |
836 | 0 | } |
837 | | |
838 | | // Allowed range is whatever can be accessed with an int32_t block index. |
839 | | static bool |
840 | | IsOffsetAllowed(int64_t aOffset) |
841 | 0 | { |
842 | 0 | return aOffset < (int64_t(INT32_MAX) + 1) * MediaCache::BLOCK_SIZE && |
843 | 0 | aOffset >= 0; |
844 | 0 | } |
845 | | |
846 | | // Convert 64-bit offset to 32-bit block index. |
847 | | // Assumes offset range-check was already done. |
848 | | static int32_t |
849 | | OffsetToBlockIndexUnchecked(int64_t aOffset) |
850 | 0 | { |
851 | 0 | // Still check for allowed range in debug builds, to catch out-of-range |
852 | 0 | // issues early during development. |
853 | 0 | MOZ_ASSERT(IsOffsetAllowed(aOffset)); |
854 | 0 | return int32_t(aOffset / MediaCache::BLOCK_SIZE); |
855 | 0 | } |
856 | | |
857 | | // Convert 64-bit offset to 32-bit block index. -1 if out of allowed range. |
858 | | static int32_t |
859 | | OffsetToBlockIndex(int64_t aOffset) |
860 | 0 | { |
861 | 0 | return IsOffsetAllowed(aOffset) ? OffsetToBlockIndexUnchecked(aOffset) : -1; |
862 | 0 | } |
863 | | |
864 | | // Convert 64-bit offset to 32-bit offset inside a block. |
865 | | // Will not fail (even if offset is outside allowed range), so there is no |
866 | | // need to check for errors. |
867 | | static int32_t |
868 | | OffsetInBlock(int64_t aOffset) |
869 | 0 | { |
870 | 0 | // Still check for allowed range in debug builds, to catch out-of-range |
871 | 0 | // issues early during development. |
872 | 0 | MOZ_ASSERT(IsOffsetAllowed(aOffset)); |
873 | 0 | return int32_t(aOffset % MediaCache::BLOCK_SIZE); |
874 | 0 | } |
875 | | |
876 | | int32_t |
877 | | MediaCache::FindBlockForIncomingData(AutoLock& aLock, |
878 | | TimeStamp aNow, |
879 | | MediaCacheStream* aStream, |
880 | | int32_t aStreamBlockIndex) |
881 | 0 | { |
882 | 0 | MOZ_ASSERT(sThread->IsOnCurrentThread()); |
883 | 0 |
|
884 | 0 | int32_t blockIndex = |
885 | 0 | FindReusableBlock(aLock, aNow, aStream, aStreamBlockIndex, INT32_MAX); |
886 | 0 |
|
887 | 0 | if (blockIndex < 0 || !IsBlockFree(blockIndex)) { |
888 | 0 | // The block returned is already allocated. |
889 | 0 | // Don't reuse it if a) there's room to expand the cache or |
890 | 0 | // b) the data we're going to store in the free block is not higher |
891 | 0 | // priority than the data already stored in the free block. |
892 | 0 | // The latter can lead us to go over the cache limit a bit. |
893 | 0 | if ((mIndex.Length() < uint32_t(mBlockCache->GetMaxBlocks()) || |
894 | 0 | blockIndex < 0 || |
895 | 0 | PredictNextUseForIncomingData(aLock, aStream) >= |
896 | 0 | PredictNextUse(aLock, aNow, blockIndex))) { |
897 | 0 | blockIndex = mIndex.Length(); |
898 | 0 | if (!mIndex.AppendElement()) |
899 | 0 | return -1; |
900 | 0 | mIndexWatermark = std::max(mIndexWatermark, blockIndex + 1); |
901 | 0 | mFreeBlocks.AddFirstBlock(blockIndex); |
902 | 0 | return blockIndex; |
903 | 0 | } |
904 | 0 | } |
905 | 0 |
|
906 | 0 | return blockIndex; |
907 | 0 | } |
908 | | |
909 | | bool |
910 | | MediaCache::BlockIsReusable(AutoLock&, int32_t aBlockIndex) |
911 | 0 | { |
912 | 0 | Block* block = &mIndex[aBlockIndex]; |
913 | 0 | for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { |
914 | 0 | MediaCacheStream* stream = block->mOwners[i].mStream; |
915 | 0 | if (stream->mPinCount > 0 || |
916 | 0 | uint32_t(OffsetToBlockIndex(stream->mStreamOffset)) == |
917 | 0 | block->mOwners[i].mStreamBlock) { |
918 | 0 | return false; |
919 | 0 | } |
920 | 0 | } |
921 | 0 | return true; |
922 | 0 | } |
923 | | |
924 | | void |
925 | | MediaCache::AppendMostReusableBlock(AutoLock& aLock, |
926 | | BlockList* aBlockList, |
927 | | nsTArray<uint32_t>* aResult, |
928 | | int32_t aBlockIndexLimit) |
929 | 0 | { |
930 | 0 | int32_t blockIndex = aBlockList->GetLastBlock(); |
931 | 0 | if (blockIndex < 0) |
932 | 0 | return; |
933 | 0 | do { |
934 | 0 | // Don't consider blocks for pinned streams, or blocks that are |
935 | 0 | // beyond the specified limit, or a block that contains a stream's |
936 | 0 | // current read position (such a block contains both played data |
937 | 0 | // and readahead data) |
938 | 0 | if (blockIndex < aBlockIndexLimit && BlockIsReusable(aLock, blockIndex)) { |
939 | 0 | aResult->AppendElement(blockIndex); |
940 | 0 | return; |
941 | 0 | } |
942 | 0 | blockIndex = aBlockList->GetPrevBlock(blockIndex); |
943 | 0 | } while (blockIndex >= 0); |
944 | 0 | } |
945 | | |
946 | | int32_t |
947 | | MediaCache::FindReusableBlock(AutoLock& aLock, |
948 | | TimeStamp aNow, |
949 | | MediaCacheStream* aForStream, |
950 | | int32_t aForStreamBlock, |
951 | | int32_t aMaxSearchBlockIndex) |
952 | 0 | { |
953 | 0 | MOZ_ASSERT(sThread->IsOnCurrentThread()); |
954 | 0 |
|
955 | 0 | uint32_t length = std::min(uint32_t(aMaxSearchBlockIndex), uint32_t(mIndex.Length())); |
956 | 0 |
|
957 | 0 | if (aForStream && aForStreamBlock > 0 && |
958 | 0 | uint32_t(aForStreamBlock) <= aForStream->mBlocks.Length()) { |
959 | 0 | int32_t prevCacheBlock = aForStream->mBlocks[aForStreamBlock - 1]; |
960 | 0 | if (prevCacheBlock >= 0) { |
961 | 0 | uint32_t freeBlockScanEnd = |
962 | 0 | std::min(length, prevCacheBlock + FREE_BLOCK_SCAN_LIMIT); |
963 | 0 | for (uint32_t i = prevCacheBlock; i < freeBlockScanEnd; ++i) { |
964 | 0 | if (IsBlockFree(i)) |
965 | 0 | return i; |
966 | 0 | } |
967 | 0 | } |
968 | 0 | } |
969 | 0 |
|
970 | 0 | if (!mFreeBlocks.IsEmpty()) { |
971 | 0 | int32_t blockIndex = mFreeBlocks.GetFirstBlock(); |
972 | 0 | do { |
973 | 0 | if (blockIndex < aMaxSearchBlockIndex) |
974 | 0 | return blockIndex; |
975 | 0 | blockIndex = mFreeBlocks.GetNextBlock(blockIndex); |
976 | 0 | } while (blockIndex >= 0); |
977 | 0 | } |
978 | 0 |
|
979 | 0 | // Build a list of the blocks we should consider for the "latest |
980 | 0 | // predicted time of next use". We can exploit the fact that the block |
981 | 0 | // linked lists are ordered by increasing time of next use. This is |
982 | 0 | // actually the whole point of having the linked lists. |
983 | 0 | AutoTArray<uint32_t,8> candidates; |
984 | 0 | for (uint32_t i = 0; i < mStreams.Length(); ++i) { |
985 | 0 | MediaCacheStream* stream = mStreams[i]; |
986 | 0 | if (stream->mPinCount > 0) { |
987 | 0 | // No point in even looking at this stream's blocks |
988 | 0 | continue; |
989 | 0 | } |
990 | 0 | |
991 | 0 | AppendMostReusableBlock( |
992 | 0 | aLock, &stream->mMetadataBlocks, &candidates, length); |
993 | 0 | AppendMostReusableBlock(aLock, &stream->mPlayedBlocks, &candidates, length); |
994 | 0 |
|
995 | 0 | // Don't consider readahead blocks in non-seekable streams. If we |
996 | 0 | // remove the block we won't be able to seek back to read it later. |
997 | 0 | if (stream->mIsTransportSeekable) { |
998 | 0 | AppendMostReusableBlock( |
999 | 0 | aLock, &stream->mReadaheadBlocks, &candidates, length); |
1000 | 0 | } |
1001 | 0 | } |
1002 | 0 |
|
1003 | 0 | TimeDuration latestUse; |
1004 | 0 | int32_t latestUseBlock = -1; |
1005 | 0 | for (uint32_t i = 0; i < candidates.Length(); ++i) { |
1006 | 0 | TimeDuration nextUse = PredictNextUse(aLock, aNow, candidates[i]); |
1007 | 0 | if (nextUse > latestUse) { |
1008 | 0 | latestUse = nextUse; |
1009 | 0 | latestUseBlock = candidates[i]; |
1010 | 0 | } |
1011 | 0 | } |
1012 | 0 |
|
1013 | 0 | return latestUseBlock; |
1014 | 0 | } |
1015 | | |
1016 | | MediaCache::BlockList* |
1017 | | MediaCache::GetListForBlock(AutoLock&, BlockOwner* aBlock) |
1018 | 0 | { |
1019 | 0 | switch (aBlock->mClass) { |
1020 | 0 | case METADATA_BLOCK: |
1021 | 0 | NS_ASSERTION(aBlock->mStream, "Metadata block has no stream?"); |
1022 | 0 | return &aBlock->mStream->mMetadataBlocks; |
1023 | 0 | case PLAYED_BLOCK: |
1024 | 0 | NS_ASSERTION(aBlock->mStream, "Metadata block has no stream?"); |
1025 | 0 | return &aBlock->mStream->mPlayedBlocks; |
1026 | 0 | case READAHEAD_BLOCK: |
1027 | 0 | NS_ASSERTION(aBlock->mStream, "Readahead block has no stream?"); |
1028 | 0 | return &aBlock->mStream->mReadaheadBlocks; |
1029 | 0 | default: |
1030 | 0 | NS_ERROR("Invalid block class"); |
1031 | 0 | return nullptr; |
1032 | 0 | } |
1033 | 0 | } |
1034 | | |
1035 | | MediaCache::BlockOwner* |
1036 | | MediaCache::GetBlockOwner(AutoLock&, |
1037 | | int32_t aBlockIndex, |
1038 | | MediaCacheStream* aStream) |
1039 | 0 | { |
1040 | 0 | Block* block = &mIndex[aBlockIndex]; |
1041 | 0 | for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { |
1042 | 0 | if (block->mOwners[i].mStream == aStream) |
1043 | 0 | return &block->mOwners[i]; |
1044 | 0 | } |
1045 | 0 | return nullptr; |
1046 | 0 | } |
1047 | | |
1048 | | void |
1049 | | MediaCache::SwapBlocks(AutoLock& aLock, |
1050 | | int32_t aBlockIndex1, |
1051 | | int32_t aBlockIndex2) |
1052 | 0 | { |
1053 | 0 | Block* block1 = &mIndex[aBlockIndex1]; |
1054 | 0 | Block* block2 = &mIndex[aBlockIndex2]; |
1055 | 0 |
|
1056 | 0 | block1->mOwners.SwapElements(block2->mOwners); |
1057 | 0 |
|
1058 | 0 | // Now all references to block1 have to be replaced with block2 and |
1059 | 0 | // vice versa. |
1060 | 0 | // First update stream references to blocks via mBlocks. |
1061 | 0 | const Block* blocks[] = { block1, block2 }; |
1062 | 0 | int32_t blockIndices[] = { aBlockIndex1, aBlockIndex2 }; |
1063 | 0 | for (int32_t i = 0; i < 2; ++i) { |
1064 | 0 | for (uint32_t j = 0; j < blocks[i]->mOwners.Length(); ++j) { |
1065 | 0 | const BlockOwner* b = &blocks[i]->mOwners[j]; |
1066 | 0 | b->mStream->mBlocks[b->mStreamBlock] = blockIndices[i]; |
1067 | 0 | } |
1068 | 0 | } |
1069 | 0 |
|
1070 | 0 | // Now update references to blocks in block lists. |
1071 | 0 | mFreeBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2); |
1072 | 0 |
|
1073 | 0 | nsTHashtable<nsPtrHashKey<MediaCacheStream> > visitedStreams; |
1074 | 0 |
|
1075 | 0 | for (int32_t i = 0; i < 2; ++i) { |
1076 | 0 | for (uint32_t j = 0; j < blocks[i]->mOwners.Length(); ++j) { |
1077 | 0 | MediaCacheStream* stream = blocks[i]->mOwners[j].mStream; |
1078 | 0 | // Make sure that we don't update the same stream twice --- that |
1079 | 0 | // would result in swapping the block references back again! |
1080 | 0 | if (visitedStreams.GetEntry(stream)) |
1081 | 0 | continue; |
1082 | 0 | visitedStreams.PutEntry(stream); |
1083 | 0 | stream->mReadaheadBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2); |
1084 | 0 | stream->mPlayedBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2); |
1085 | 0 | stream->mMetadataBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2); |
1086 | 0 | } |
1087 | 0 | } |
1088 | 0 |
|
1089 | 0 | Verify(aLock); |
1090 | 0 | } |
1091 | | |
1092 | | void |
1093 | | MediaCache::RemoveBlockOwner(AutoLock& aLock, |
1094 | | int32_t aBlockIndex, |
1095 | | MediaCacheStream* aStream) |
1096 | 0 | { |
1097 | 0 | Block* block = &mIndex[aBlockIndex]; |
1098 | 0 | for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { |
1099 | 0 | BlockOwner* bo = &block->mOwners[i]; |
1100 | 0 | if (bo->mStream == aStream) { |
1101 | 0 | GetListForBlock(aLock, bo)->RemoveBlock(aBlockIndex); |
1102 | 0 | bo->mStream->mBlocks[bo->mStreamBlock] = -1; |
1103 | 0 | block->mOwners.RemoveElementAt(i); |
1104 | 0 | if (block->mOwners.IsEmpty()) { |
1105 | 0 | mFreeBlocks.AddFirstBlock(aBlockIndex); |
1106 | 0 | } |
1107 | 0 | return; |
1108 | 0 | } |
1109 | 0 | } |
1110 | 0 | } |
1111 | | |
1112 | | void |
1113 | | MediaCache::AddBlockOwnerAsReadahead(AutoLock& aLock, |
1114 | | int32_t aBlockIndex, |
1115 | | MediaCacheStream* aStream, |
1116 | | int32_t aStreamBlockIndex) |
1117 | 0 | { |
1118 | 0 | Block* block = &mIndex[aBlockIndex]; |
1119 | 0 | if (block->mOwners.IsEmpty()) { |
1120 | 0 | mFreeBlocks.RemoveBlock(aBlockIndex); |
1121 | 0 | } |
1122 | 0 | BlockOwner* bo = block->mOwners.AppendElement(); |
1123 | 0 | mBlockOwnersWatermark = |
1124 | 0 | std::max(mBlockOwnersWatermark, uint32_t(block->mOwners.Length())); |
1125 | 0 | bo->mStream = aStream; |
1126 | 0 | bo->mStreamBlock = aStreamBlockIndex; |
1127 | 0 | aStream->mBlocks[aStreamBlockIndex] = aBlockIndex; |
1128 | 0 | bo->mClass = READAHEAD_BLOCK; |
1129 | 0 | InsertReadaheadBlock(aLock, bo, aBlockIndex); |
1130 | 0 | } |
1131 | | |
1132 | | void |
1133 | | MediaCache::FreeBlock(AutoLock& aLock, int32_t aBlock) |
1134 | 0 | { |
1135 | 0 | Block* block = &mIndex[aBlock]; |
1136 | 0 | if (block->mOwners.IsEmpty()) { |
1137 | 0 | // already free |
1138 | 0 | return; |
1139 | 0 | } |
1140 | 0 | |
1141 | 0 | LOG("Released block %d", aBlock); |
1142 | 0 |
|
1143 | 0 | for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { |
1144 | 0 | BlockOwner* bo = &block->mOwners[i]; |
1145 | 0 | GetListForBlock(aLock, bo)->RemoveBlock(aBlock); |
1146 | 0 | bo->mStream->mBlocks[bo->mStreamBlock] = -1; |
1147 | 0 | } |
1148 | 0 | block->mOwners.Clear(); |
1149 | 0 | mFreeBlocks.AddFirstBlock(aBlock); |
1150 | 0 | Verify(aLock); |
1151 | 0 | } |
1152 | | |
1153 | | TimeDuration |
1154 | | MediaCache::PredictNextUse(AutoLock&, TimeStamp aNow, int32_t aBlock) |
1155 | 0 | { |
1156 | 0 | MOZ_ASSERT(sThread->IsOnCurrentThread()); |
1157 | 0 | NS_ASSERTION(!IsBlockFree(aBlock), "aBlock is free"); |
1158 | 0 |
|
1159 | 0 | Block* block = &mIndex[aBlock]; |
1160 | 0 | // Blocks can be belong to multiple streams. The predicted next use |
1161 | 0 | // time is the earliest time predicted by any of the streams. |
1162 | 0 | TimeDuration result; |
1163 | 0 | for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { |
1164 | 0 | BlockOwner* bo = &block->mOwners[i]; |
1165 | 0 | TimeDuration prediction; |
1166 | 0 | switch (bo->mClass) { |
1167 | 0 | case METADATA_BLOCK: |
1168 | 0 | // This block should be managed in LRU mode. For metadata we predict |
1169 | 0 | // that the time until the next use is the time since the last use. |
1170 | 0 | prediction = aNow - bo->mLastUseTime; |
1171 | 0 | break; |
1172 | 0 | case PLAYED_BLOCK: { |
1173 | 0 | // This block should be managed in LRU mode, and we should impose |
1174 | 0 | // a "replay delay" to reflect the likelihood of replay happening |
1175 | 0 | NS_ASSERTION(static_cast<int64_t>(bo->mStreamBlock)*BLOCK_SIZE < |
1176 | 0 | bo->mStream->mStreamOffset, |
1177 | 0 | "Played block after the current stream position?"); |
1178 | 0 | int64_t bytesBehind = |
1179 | 0 | bo->mStream->mStreamOffset - static_cast<int64_t>(bo->mStreamBlock)*BLOCK_SIZE; |
1180 | 0 | int64_t millisecondsBehind = |
1181 | 0 | bytesBehind*1000/bo->mStream->mPlaybackBytesPerSecond; |
1182 | 0 | prediction = TimeDuration::FromMilliseconds( |
1183 | 0 | std::min<int64_t>(millisecondsBehind*REPLAY_PENALTY_FACTOR, INT32_MAX)); |
1184 | 0 | break; |
1185 | 0 | } |
1186 | 0 | case READAHEAD_BLOCK: { |
1187 | 0 | int64_t bytesAhead = |
1188 | 0 | static_cast<int64_t>(bo->mStreamBlock)*BLOCK_SIZE - bo->mStream->mStreamOffset; |
1189 | 0 | NS_ASSERTION(bytesAhead >= 0, |
1190 | 0 | "Readahead block before the current stream position?"); |
1191 | 0 | int64_t millisecondsAhead = |
1192 | 0 | bytesAhead*1000/bo->mStream->mPlaybackBytesPerSecond; |
1193 | 0 | prediction = TimeDuration::FromMilliseconds( |
1194 | 0 | std::min<int64_t>(millisecondsAhead, INT32_MAX)); |
1195 | 0 | break; |
1196 | 0 | } |
1197 | 0 | default: |
1198 | 0 | NS_ERROR("Invalid class for predicting next use"); |
1199 | 0 | return TimeDuration(0); |
1200 | 0 | } |
1201 | 0 | if (i == 0 || prediction < result) { |
1202 | 0 | result = prediction; |
1203 | 0 | } |
1204 | 0 | } |
1205 | 0 | return result; |
1206 | 0 | } |
1207 | | |
1208 | | TimeDuration |
1209 | | MediaCache::PredictNextUseForIncomingData(AutoLock&, MediaCacheStream* aStream) |
1210 | 0 | { |
1211 | 0 | MOZ_ASSERT(sThread->IsOnCurrentThread()); |
1212 | 0 |
|
1213 | 0 | int64_t bytesAhead = aStream->mChannelOffset - aStream->mStreamOffset; |
1214 | 0 | if (bytesAhead <= -BLOCK_SIZE) { |
1215 | 0 | // Hmm, no idea when data behind us will be used. Guess 24 hours. |
1216 | 0 | return TimeDuration::FromSeconds(24*60*60); |
1217 | 0 | } |
1218 | 0 | if (bytesAhead <= 0) |
1219 | 0 | return TimeDuration(0); |
1220 | 0 | int64_t millisecondsAhead = bytesAhead*1000/aStream->mPlaybackBytesPerSecond; |
1221 | 0 | return TimeDuration::FromMilliseconds( |
1222 | 0 | std::min<int64_t>(millisecondsAhead, INT32_MAX)); |
1223 | 0 | } |
1224 | | |
1225 | | void |
1226 | | MediaCache::Update() |
1227 | 0 | { |
1228 | 0 | MOZ_ASSERT(sThread->IsOnCurrentThread()); |
1229 | 0 |
|
1230 | 0 | AutoLock lock(mMonitor); |
1231 | 0 |
|
1232 | 0 | struct StreamAction |
1233 | 0 | { |
1234 | 0 | enum |
1235 | 0 | { |
1236 | 0 | NONE, |
1237 | 0 | SEEK, |
1238 | 0 | RESUME, |
1239 | 0 | SUSPEND |
1240 | 0 | } mTag = NONE; |
1241 | 0 | // Members for 'SEEK' only. |
1242 | 0 | bool mResume = false; |
1243 | 0 | int64_t mSeekTarget = -1; |
1244 | 0 | }; |
1245 | 0 |
|
1246 | 0 | // The action to use for each stream. We store these so we can make |
1247 | 0 | // decisions while holding the cache lock but implement those decisions |
1248 | 0 | // without holding the cache lock, since we need to call out to |
1249 | 0 | // stream, decoder and element code. |
1250 | 0 | AutoTArray<StreamAction,10> actions; |
1251 | 0 |
|
1252 | 0 | mUpdateQueued = false; |
1253 | | #ifdef DEBUG |
1254 | | mInUpdate = true; |
1255 | | #endif |
1256 | |
|
1257 | 0 | int32_t maxBlocks = mBlockCache->GetMaxBlocks(); |
1258 | 0 | TimeStamp now = TimeStamp::Now(); |
1259 | 0 |
|
1260 | 0 | int32_t freeBlockCount = mFreeBlocks.GetCount(); |
1261 | 0 | TimeDuration latestPredictedUseForOverflow = 0; |
1262 | 0 | if (mIndex.Length() > uint32_t(maxBlocks)) { |
1263 | 0 | // Try to trim back the cache to its desired maximum size. The cache may |
1264 | 0 | // have overflowed simply due to data being received when we have |
1265 | 0 | // no blocks in the main part of the cache that are free or lower |
1266 | 0 | // priority than the new data. The cache can also be overflowing because |
1267 | 0 | // the media.cache_size preference was reduced. |
1268 | 0 | // First, figure out what the least valuable block in the cache overflow |
1269 | 0 | // is. We don't want to replace any blocks in the main part of the |
1270 | 0 | // cache whose expected time of next use is earlier or equal to that. |
1271 | 0 | // If we allow that, we can effectively end up discarding overflowing |
1272 | 0 | // blocks (by moving an overflowing block to the main part of the cache, |
1273 | 0 | // and then overwriting it with another overflowing block), and we try |
1274 | 0 | // to avoid that since it requires HTTP seeks. |
1275 | 0 | // We also use this loop to eliminate overflowing blocks from |
1276 | 0 | // freeBlockCount. |
1277 | 0 | for (int32_t blockIndex = mIndex.Length() - 1; blockIndex >= maxBlocks; |
1278 | 0 | --blockIndex) { |
1279 | 0 | if (IsBlockFree(blockIndex)) { |
1280 | 0 | // Don't count overflowing free blocks in our free block count |
1281 | 0 | --freeBlockCount; |
1282 | 0 | continue; |
1283 | 0 | } |
1284 | 0 | TimeDuration predictedUse = PredictNextUse(lock, now, blockIndex); |
1285 | 0 | latestPredictedUseForOverflow = std::max(latestPredictedUseForOverflow, predictedUse); |
1286 | 0 | } |
1287 | 0 | } else { |
1288 | 0 | freeBlockCount += maxBlocks - mIndex.Length(); |
1289 | 0 | } |
1290 | 0 |
|
1291 | 0 | // Now try to move overflowing blocks to the main part of the cache. |
1292 | 0 | for (int32_t blockIndex = mIndex.Length() - 1; blockIndex >= maxBlocks; |
1293 | 0 | --blockIndex) { |
1294 | 0 | if (IsBlockFree(blockIndex)) |
1295 | 0 | continue; |
1296 | 0 | |
1297 | 0 | Block* block = &mIndex[blockIndex]; |
1298 | 0 | // Try to relocate the block close to other blocks for the first stream. |
1299 | 0 | // There is no point in trying to make it close to other blocks in |
1300 | 0 | // *all* the streams it might belong to. |
1301 | 0 | int32_t destinationBlockIndex = |
1302 | 0 | FindReusableBlock(lock, |
1303 | 0 | now, |
1304 | 0 | block->mOwners[0].mStream, |
1305 | 0 | block->mOwners[0].mStreamBlock, |
1306 | 0 | maxBlocks); |
1307 | 0 | if (destinationBlockIndex < 0) { |
1308 | 0 | // Nowhere to place this overflow block. We won't be able to |
1309 | 0 | // place any more overflow blocks. |
1310 | 0 | break; |
1311 | 0 | } |
1312 | 0 | |
1313 | 0 | // Don't evict |destinationBlockIndex| if it is within [cur, end) otherwise |
1314 | 0 | // a new channel will be opened to download this block again which is bad. |
1315 | 0 | bool inCurrentCachedRange = false; |
1316 | 0 | for (BlockOwner& owner : mIndex[destinationBlockIndex].mOwners) { |
1317 | 0 | MediaCacheStream* stream = owner.mStream; |
1318 | 0 | int64_t end = OffsetToBlockIndexUnchecked( |
1319 | 0 | stream->GetCachedDataEndInternal(lock, stream->mStreamOffset)); |
1320 | 0 | int64_t cur = OffsetToBlockIndexUnchecked(stream->mStreamOffset); |
1321 | 0 | if (cur <= owner.mStreamBlock && owner.mStreamBlock < end) { |
1322 | 0 | inCurrentCachedRange = true; |
1323 | 0 | break; |
1324 | 0 | } |
1325 | 0 | } |
1326 | 0 | if (inCurrentCachedRange) { |
1327 | 0 | continue; |
1328 | 0 | } |
1329 | 0 | |
1330 | 0 | if (IsBlockFree(destinationBlockIndex) || |
1331 | 0 | PredictNextUse(lock, now, destinationBlockIndex) > |
1332 | 0 | latestPredictedUseForOverflow) { |
1333 | 0 | // Reuse blocks in the main part of the cache that are less useful than |
1334 | 0 | // the least useful overflow blocks |
1335 | 0 |
|
1336 | 0 | nsresult rv = mBlockCache->MoveBlock(blockIndex, destinationBlockIndex); |
1337 | 0 |
|
1338 | 0 | if (NS_SUCCEEDED(rv)) { |
1339 | 0 | // We successfully copied the file data. |
1340 | 0 | LOG("Swapping blocks %d and %d (trimming cache)", |
1341 | 0 | blockIndex, destinationBlockIndex); |
1342 | 0 | // Swapping the block metadata here lets us maintain the |
1343 | 0 | // correct positions in the linked lists |
1344 | 0 | SwapBlocks(lock, blockIndex, destinationBlockIndex); |
1345 | 0 | //Free the overflowing block even if the copy failed. |
1346 | 0 | LOG("Released block %d (trimming cache)", blockIndex); |
1347 | 0 | FreeBlock(lock, blockIndex); |
1348 | 0 | } |
1349 | 0 | } else { |
1350 | 0 | LOG("Could not trim cache block %d (destination %d, " |
1351 | 0 | "predicted next use %f, latest predicted use for overflow %f", |
1352 | 0 | blockIndex, |
1353 | 0 | destinationBlockIndex, |
1354 | 0 | PredictNextUse(lock, now, destinationBlockIndex).ToSeconds(), |
1355 | 0 | latestPredictedUseForOverflow.ToSeconds()); |
1356 | 0 | } |
1357 | 0 | } |
1358 | 0 | // Try chopping back the array of cache entries and the cache file. |
1359 | 0 | Truncate(); |
1360 | 0 |
|
1361 | 0 | // Count the blocks allocated for readahead of non-seekable streams |
1362 | 0 | // (these blocks can't be freed but we don't want them to monopolize the |
1363 | 0 | // cache) |
1364 | 0 | int32_t nonSeekableReadaheadBlockCount = 0; |
1365 | 0 | for (uint32_t i = 0; i < mStreams.Length(); ++i) { |
1366 | 0 | MediaCacheStream* stream = mStreams[i]; |
1367 | 0 | if (!stream->mIsTransportSeekable) { |
1368 | 0 | nonSeekableReadaheadBlockCount += stream->mReadaheadBlocks.GetCount(); |
1369 | 0 | } |
1370 | 0 | } |
1371 | 0 |
|
1372 | 0 | // If freeBlockCount is zero, then compute the latest of |
1373 | 0 | // the predicted next-uses for all blocks |
1374 | 0 | TimeDuration latestNextUse; |
1375 | 0 | if (freeBlockCount == 0) { |
1376 | 0 | int32_t reusableBlock = FindReusableBlock(lock, now, nullptr, 0, maxBlocks); |
1377 | 0 | if (reusableBlock >= 0) { |
1378 | 0 | latestNextUse = PredictNextUse(lock, now, reusableBlock); |
1379 | 0 | } |
1380 | 0 | } |
1381 | 0 |
|
1382 | 0 | int32_t resumeThreshold = StaticPrefs::MediaCacheResumeThreshold(); |
1383 | 0 | int32_t readaheadLimit = StaticPrefs::MediaCacheReadaheadLimit(); |
1384 | 0 |
|
1385 | 0 | for (uint32_t i = 0; i < mStreams.Length(); ++i) { |
1386 | 0 | actions.AppendElement(StreamAction{}); |
1387 | 0 |
|
1388 | 0 | MediaCacheStream* stream = mStreams[i]; |
1389 | 0 | if (stream->mClosed) { |
1390 | 0 | LOG("Stream %p closed", stream); |
1391 | 0 | continue; |
1392 | 0 | } |
1393 | 0 |
|
1394 | 0 | // We make decisions based on mSeekTarget when there is a pending seek. |
1395 | 0 | // Otherwise we will keep issuing seek requests until mChannelOffset |
1396 | 0 | // is changed by NotifyDataStarted() which is bad. |
1397 | 0 | int64_t channelOffset = |
1398 | 0 | stream->mSeekTarget != -1 ? stream->mSeekTarget : stream->mChannelOffset; |
1399 | 0 |
|
1400 | 0 | // Figure out where we should be reading from. It's the first |
1401 | 0 | // uncached byte after the current mStreamOffset. |
1402 | 0 | int64_t dataOffset = |
1403 | 0 | stream->GetCachedDataEndInternal(lock, stream->mStreamOffset); |
1404 | 0 | MOZ_ASSERT(dataOffset >= 0); |
1405 | 0 |
|
1406 | 0 | // Compute where we'd actually seek to to read at readOffset |
1407 | 0 | int64_t desiredOffset = dataOffset; |
1408 | 0 | if (stream->mIsTransportSeekable) { |
1409 | 0 | if (desiredOffset > channelOffset && |
1410 | 0 | desiredOffset <= channelOffset + SEEK_VS_READ_THRESHOLD) { |
1411 | 0 | // Assume it's more efficient to just keep reading up to the |
1412 | 0 | // desired position instead of trying to seek |
1413 | 0 | desiredOffset = channelOffset; |
1414 | 0 | } |
1415 | 0 | } else { |
1416 | 0 | // We can't seek directly to the desired offset... |
1417 | 0 | if (channelOffset > desiredOffset) { |
1418 | 0 | // Reading forward won't get us anywhere, we need to go backwards. |
1419 | 0 | // Seek back to 0 (the client will reopen the stream) and then |
1420 | 0 | // read forward. |
1421 | 0 | NS_WARNING("Can't seek backwards, so seeking to 0"); |
1422 | 0 | desiredOffset = 0; |
1423 | 0 | // Flush cached blocks out, since if this is a live stream |
1424 | 0 | // the cached data may be completely different next time we |
1425 | 0 | // read it. We have to assume that live streams don't |
1426 | 0 | // advertise themselves as being seekable... |
1427 | 0 | ReleaseStreamBlocks(lock, stream); |
1428 | 0 | } else { |
1429 | 0 | // otherwise reading forward is looking good, so just stay where we |
1430 | 0 | // are and don't trigger a channel seek! |
1431 | 0 | desiredOffset = channelOffset; |
1432 | 0 | } |
1433 | 0 | } |
1434 | 0 |
|
1435 | 0 | // Figure out if we should be reading data now or not. It's amazing |
1436 | 0 | // how complex this is, but each decision is simple enough. |
1437 | 0 | bool enableReading; |
1438 | 0 | if (stream->mStreamLength >= 0 && dataOffset >= stream->mStreamLength) { |
1439 | 0 | // We want data at the end of the stream, where there's nothing to |
1440 | 0 | // read. We don't want to try to read if we're suspended, because that |
1441 | 0 | // might create a new channel and seek unnecessarily (and incorrectly, |
1442 | 0 | // since HTTP doesn't allow seeking to the actual EOF), and we don't want |
1443 | 0 | // to suspend if we're not suspended and already reading at the end of |
1444 | 0 | // the stream, since there just might be more data than the server |
1445 | 0 | // advertised with Content-Length, and we may as well keep reading. |
1446 | 0 | // But we don't want to seek to the end of the stream if we're not |
1447 | 0 | // already there. |
1448 | 0 | LOG("Stream %p at end of stream", stream); |
1449 | 0 | enableReading = |
1450 | 0 | !stream->mCacheSuspended && stream->mStreamLength == channelOffset; |
1451 | 0 | } else if (desiredOffset < stream->mStreamOffset) { |
1452 | 0 | // We're reading to try to catch up to where the current stream |
1453 | 0 | // reader wants to be. Better not stop. |
1454 | 0 | LOG("Stream %p catching up", stream); |
1455 | 0 | enableReading = true; |
1456 | 0 | } else if (desiredOffset < stream->mStreamOffset + BLOCK_SIZE) { |
1457 | 0 | // The stream reader is waiting for us, or nearly so. Better feed it. |
1458 | 0 | LOG("Stream %p feeding reader", stream); |
1459 | 0 | enableReading = true; |
1460 | 0 | } else if (!stream->mIsTransportSeekable && |
1461 | 0 | nonSeekableReadaheadBlockCount >= maxBlocks*NONSEEKABLE_READAHEAD_MAX) { |
1462 | 0 | // This stream is not seekable and there are already too many blocks |
1463 | 0 | // being cached for readahead for nonseekable streams (which we can't |
1464 | 0 | // free). So stop reading ahead now. |
1465 | 0 | LOG("Stream %p throttling non-seekable readahead", stream); |
1466 | 0 | enableReading = false; |
1467 | 0 | } else if (mIndex.Length() > uint32_t(maxBlocks)) { |
1468 | 0 | // We're in the process of bringing the cache size back to the |
1469 | 0 | // desired limit, so don't bring in more data yet |
1470 | 0 | LOG("Stream %p throttling to reduce cache size", stream); |
1471 | 0 | enableReading = false; |
1472 | 0 | } else { |
1473 | 0 | TimeDuration predictedNewDataUse = |
1474 | 0 | PredictNextUseForIncomingData(lock, stream); |
1475 | 0 |
|
1476 | 0 | if (stream->mThrottleReadahead && |
1477 | 0 | stream->mCacheSuspended && |
1478 | 0 | predictedNewDataUse.ToSeconds() > resumeThreshold) { |
1479 | 0 | // Don't need data for a while, so don't bother waking up the stream |
1480 | 0 | LOG("Stream %p avoiding wakeup since more data is not needed", stream); |
1481 | 0 | enableReading = false; |
1482 | 0 | } else if (stream->mThrottleReadahead && |
1483 | 0 | predictedNewDataUse.ToSeconds() > readaheadLimit) { |
1484 | 0 | // Don't read ahead more than this much |
1485 | 0 | LOG("Stream %p throttling to avoid reading ahead too far", stream); |
1486 | 0 | enableReading = false; |
1487 | 0 | } else if (freeBlockCount > 0) { |
1488 | 0 | // Free blocks in the cache, so keep reading |
1489 | 0 | LOG("Stream %p reading since there are free blocks", stream); |
1490 | 0 | enableReading = true; |
1491 | 0 | } else if (latestNextUse <= TimeDuration(0)) { |
1492 | 0 | // No reusable blocks, so can't read anything |
1493 | 0 | LOG("Stream %p throttling due to no reusable blocks", stream); |
1494 | 0 | enableReading = false; |
1495 | 0 | } else { |
1496 | 0 | // Read ahead if the data we expect to read is more valuable than |
1497 | 0 | // the least valuable block in the main part of the cache |
1498 | 0 | LOG("Stream %p predict next data in %f, current worst block is %f", |
1499 | 0 | stream, predictedNewDataUse.ToSeconds(), latestNextUse.ToSeconds()); |
1500 | 0 | enableReading = predictedNewDataUse < latestNextUse; |
1501 | 0 | } |
1502 | 0 | } |
1503 | 0 |
|
1504 | 0 | if (enableReading) { |
1505 | 0 | for (uint32_t j = 0; j < i; ++j) { |
1506 | 0 | MediaCacheStream* other = mStreams[j]; |
1507 | 0 | if (other->mResourceID == stream->mResourceID && !other->mClosed && |
1508 | 0 | !other->mClientSuspended && !other->mChannelEnded && |
1509 | 0 | OffsetToBlockIndexUnchecked(other->mSeekTarget != -1 |
1510 | 0 | ? other->mSeekTarget |
1511 | 0 | : other->mChannelOffset) == |
1512 | 0 | OffsetToBlockIndexUnchecked(desiredOffset)) { |
1513 | 0 | // This block is already going to be read by the other stream. |
1514 | 0 | // So don't try to read it from this stream as well. |
1515 | 0 | enableReading = false; |
1516 | 0 | LOG("Stream %p waiting on same block (%" PRId32 ") from stream %p", |
1517 | 0 | stream, |
1518 | 0 | OffsetToBlockIndexUnchecked(desiredOffset), |
1519 | 0 | other); |
1520 | 0 | break; |
1521 | 0 | } |
1522 | 0 | } |
1523 | 0 | } |
1524 | 0 |
|
1525 | 0 | if (channelOffset != desiredOffset && enableReading) { |
1526 | 0 | // We need to seek now. |
1527 | 0 | NS_ASSERTION(stream->mIsTransportSeekable || desiredOffset == 0, |
1528 | 0 | "Trying to seek in a non-seekable stream!"); |
1529 | 0 | // Round seek offset down to the start of the block. This is essential |
1530 | 0 | // because we don't want to think we have part of a block already |
1531 | 0 | // in mPartialBlockBuffer. |
1532 | 0 | stream->mSeekTarget = |
1533 | 0 | OffsetToBlockIndexUnchecked(desiredOffset) * BLOCK_SIZE; |
1534 | 0 | actions[i].mTag = StreamAction::SEEK; |
1535 | 0 | actions[i].mResume = stream->mCacheSuspended; |
1536 | 0 | actions[i].mSeekTarget = stream->mSeekTarget; |
1537 | 0 | } else if (enableReading && stream->mCacheSuspended) { |
1538 | 0 | actions[i].mTag = StreamAction::RESUME; |
1539 | 0 | } else if (!enableReading && !stream->mCacheSuspended) { |
1540 | 0 | actions[i].mTag = StreamAction::SUSPEND; |
1541 | 0 | } |
1542 | 0 | } |
1543 | | #ifdef DEBUG |
1544 | | mInUpdate = false; |
1545 | | #endif |
1546 | |
|
1547 | 0 | // First, update the mCacheSuspended/mCacheEnded flags so that they're all correct |
1548 | 0 | // when we fire our CacheClient commands below. Those commands can rely on these flags |
1549 | 0 | // being set correctly for all streams. |
1550 | 0 | for (uint32_t i = 0; i < mStreams.Length(); ++i) { |
1551 | 0 | MediaCacheStream* stream = mStreams[i]; |
1552 | 0 | switch (actions[i].mTag) { |
1553 | 0 | case StreamAction::SEEK: |
1554 | 0 | stream->mCacheSuspended = false; |
1555 | 0 | stream->mChannelEnded = false; |
1556 | 0 | break; |
1557 | 0 | case StreamAction::RESUME: |
1558 | 0 | stream->mCacheSuspended = false; |
1559 | 0 | break; |
1560 | 0 | case StreamAction::SUSPEND: |
1561 | 0 | stream->mCacheSuspended = true; |
1562 | 0 | break; |
1563 | 0 | default: |
1564 | 0 | break; |
1565 | 0 | } |
1566 | 0 | } |
1567 | 0 |
|
1568 | 0 | for (uint32_t i = 0; i < mStreams.Length(); ++i) { |
1569 | 0 | MediaCacheStream* stream = mStreams[i]; |
1570 | 0 | switch (actions[i].mTag) { |
1571 | 0 | case StreamAction::SEEK: |
1572 | 0 | LOG("Stream %p CacheSeek to %" PRId64 " (resume=%d)", |
1573 | 0 | stream, |
1574 | 0 | actions[i].mSeekTarget, |
1575 | 0 | actions[i].mResume); |
1576 | 0 | stream->mClient->CacheClientSeek(actions[i].mSeekTarget, |
1577 | 0 | actions[i].mResume); |
1578 | 0 | break; |
1579 | 0 | case StreamAction::RESUME: |
1580 | 0 | LOG("Stream %p Resumed", stream); |
1581 | 0 | stream->mClient->CacheClientResume(); |
1582 | 0 | QueueSuspendedStatusUpdate(lock, stream->mResourceID); |
1583 | 0 | break; |
1584 | 0 | case StreamAction::SUSPEND: |
1585 | 0 | LOG("Stream %p Suspended", stream); |
1586 | 0 | stream->mClient->CacheClientSuspend(); |
1587 | 0 | QueueSuspendedStatusUpdate(lock, stream->mResourceID); |
1588 | 0 | break; |
1589 | 0 | default: |
1590 | 0 | break; |
1591 | 0 | } |
1592 | 0 | } |
1593 | 0 |
|
1594 | 0 | // Notify streams about the suspended status changes. |
1595 | 0 | for (uint32_t i = 0; i < mSuspendedStatusToNotify.Length(); ++i) { |
1596 | 0 | MediaCache::ResourceStreamIterator iter(this, mSuspendedStatusToNotify[i]); |
1597 | 0 | while (MediaCacheStream* stream = iter.Next(lock)) { |
1598 | 0 | stream->mClient->CacheClientNotifySuspendedStatusChanged( |
1599 | 0 | stream->AreAllStreamsForResourceSuspended(lock)); |
1600 | 0 | } |
1601 | 0 | } |
1602 | 0 | mSuspendedStatusToNotify.Clear(); |
1603 | 0 | } |
1604 | | |
1605 | | class UpdateEvent : public Runnable |
1606 | | { |
1607 | | public: |
1608 | | explicit UpdateEvent(MediaCache* aMediaCache) |
1609 | | : Runnable("MediaCache::UpdateEvent") |
1610 | | , mMediaCache(aMediaCache) |
1611 | 0 | { |
1612 | 0 | } |
1613 | | |
1614 | | NS_IMETHOD Run() override |
1615 | 0 | { |
1616 | 0 | mMediaCache->Update(); |
1617 | 0 | // Ensure MediaCache is deleted on the main thread. |
1618 | 0 | NS_ProxyRelease("UpdateEvent::mMediaCache", |
1619 | 0 | SystemGroup::EventTargetFor(mozilla::TaskCategory::Other), |
1620 | 0 | mMediaCache.forget()); |
1621 | 0 | return NS_OK; |
1622 | 0 | } |
1623 | | |
1624 | | private: |
1625 | | RefPtr<MediaCache> mMediaCache; |
1626 | | }; |
1627 | | |
1628 | | void |
1629 | | MediaCache::QueueUpdate(AutoLock&) |
1630 | 0 | { |
1631 | 0 | // Queuing an update while we're in an update raises a high risk of |
1632 | 0 | // triggering endless events |
1633 | 0 | NS_ASSERTION(!mInUpdate, |
1634 | 0 | "Queuing an update while we're in an update"); |
1635 | 0 | if (mUpdateQueued) |
1636 | 0 | return; |
1637 | 0 | mUpdateQueued = true; |
1638 | 0 | // XXX MediaCache does updates when decoders are still running at |
1639 | 0 | // shutdown and get freed in the final cycle-collector cleanup. So |
1640 | 0 | // don't leak a runnable in that case. |
1641 | 0 | nsCOMPtr<nsIRunnable> event = new UpdateEvent(this); |
1642 | 0 | sThread->Dispatch(event.forget()); |
1643 | 0 | } |
1644 | | |
1645 | | void |
1646 | | MediaCache::QueueSuspendedStatusUpdate(AutoLock&, int64_t aResourceID) |
1647 | 0 | { |
1648 | 0 | if (!mSuspendedStatusToNotify.Contains(aResourceID)) { |
1649 | 0 | mSuspendedStatusToNotify.AppendElement(aResourceID); |
1650 | 0 | } |
1651 | 0 | } |
1652 | | |
1653 | | #ifdef DEBUG_VERIFY_CACHE |
1654 | | void |
1655 | | MediaCache::Verify(AutoLock&) |
1656 | | { |
1657 | | mFreeBlocks.Verify(); |
1658 | | for (uint32_t i = 0; i < mStreams.Length(); ++i) { |
1659 | | MediaCacheStream* stream = mStreams[i]; |
1660 | | stream->mReadaheadBlocks.Verify(); |
1661 | | stream->mPlayedBlocks.Verify(); |
1662 | | stream->mMetadataBlocks.Verify(); |
1663 | | |
1664 | | // Verify that the readahead blocks are listed in stream block order |
1665 | | int32_t block = stream->mReadaheadBlocks.GetFirstBlock(); |
1666 | | int32_t lastStreamBlock = -1; |
1667 | | while (block >= 0) { |
1668 | | uint32_t j = 0; |
1669 | | while (mIndex[block].mOwners[j].mStream != stream) { |
1670 | | ++j; |
1671 | | } |
1672 | | int32_t nextStreamBlock = |
1673 | | int32_t(mIndex[block].mOwners[j].mStreamBlock); |
1674 | | NS_ASSERTION(lastStreamBlock < nextStreamBlock, |
1675 | | "Blocks not increasing in readahead stream"); |
1676 | | lastStreamBlock = nextStreamBlock; |
1677 | | block = stream->mReadaheadBlocks.GetNextBlock(block); |
1678 | | } |
1679 | | } |
1680 | | } |
1681 | | #endif |
1682 | | |
1683 | | void |
1684 | | MediaCache::InsertReadaheadBlock(AutoLock& aLock, |
1685 | | BlockOwner* aBlockOwner, |
1686 | | int32_t aBlockIndex) |
1687 | 0 | { |
1688 | 0 | // Find the last block whose stream block is before aBlockIndex's |
1689 | 0 | // stream block, and insert after it |
1690 | 0 | MediaCacheStream* stream = aBlockOwner->mStream; |
1691 | 0 | int32_t readaheadIndex = stream->mReadaheadBlocks.GetLastBlock(); |
1692 | 0 | while (readaheadIndex >= 0) { |
1693 | 0 | BlockOwner* bo = GetBlockOwner(aLock, readaheadIndex, stream); |
1694 | 0 | NS_ASSERTION(bo, "stream must own its blocks"); |
1695 | 0 | if (bo->mStreamBlock < aBlockOwner->mStreamBlock) { |
1696 | 0 | stream->mReadaheadBlocks.AddAfter(aBlockIndex, readaheadIndex); |
1697 | 0 | return; |
1698 | 0 | } |
1699 | 0 | NS_ASSERTION(bo->mStreamBlock > aBlockOwner->mStreamBlock, |
1700 | 0 | "Duplicated blocks??"); |
1701 | 0 | readaheadIndex = stream->mReadaheadBlocks.GetPrevBlock(readaheadIndex); |
1702 | 0 | } |
1703 | 0 |
|
1704 | 0 | stream->mReadaheadBlocks.AddFirstBlock(aBlockIndex); |
1705 | 0 | Verify(aLock); |
1706 | 0 | } |
1707 | | |
1708 | | void |
1709 | | MediaCache::AllocateAndWriteBlock(AutoLock& aLock, |
1710 | | MediaCacheStream* aStream, |
1711 | | int32_t aStreamBlockIndex, |
1712 | | MediaCacheStream::ReadMode aMode, |
1713 | | Span<const uint8_t> aData1, |
1714 | | Span<const uint8_t> aData2) |
1715 | 0 | { |
1716 | 0 | MOZ_ASSERT(sThread->IsOnCurrentThread()); |
1717 | 0 |
|
1718 | 0 | // Remove all cached copies of this block |
1719 | 0 | ResourceStreamIterator iter(this, aStream->mResourceID); |
1720 | 0 | while (MediaCacheStream* stream = iter.Next(aLock)) { |
1721 | 0 | while (aStreamBlockIndex >= int32_t(stream->mBlocks.Length())) { |
1722 | 0 | stream->mBlocks.AppendElement(-1); |
1723 | 0 | } |
1724 | 0 | if (stream->mBlocks[aStreamBlockIndex] >= 0) { |
1725 | 0 | // We no longer want to own this block |
1726 | 0 | int32_t globalBlockIndex = stream->mBlocks[aStreamBlockIndex]; |
1727 | 0 | LOG("Released block %d from stream %p block %d(%" PRId64 ")", |
1728 | 0 | globalBlockIndex, |
1729 | 0 | stream, |
1730 | 0 | aStreamBlockIndex, |
1731 | 0 | aStreamBlockIndex * BLOCK_SIZE); |
1732 | 0 | RemoveBlockOwner(aLock, globalBlockIndex, stream); |
1733 | 0 | } |
1734 | 0 | } |
1735 | 0 |
|
1736 | 0 | // Extend the mBlocks array as necessary |
1737 | 0 |
|
1738 | 0 | TimeStamp now = TimeStamp::Now(); |
1739 | 0 | int32_t blockIndex = |
1740 | 0 | FindBlockForIncomingData(aLock, now, aStream, aStreamBlockIndex); |
1741 | 0 | if (blockIndex >= 0) { |
1742 | 0 | FreeBlock(aLock, blockIndex); |
1743 | 0 |
|
1744 | 0 | Block* block = &mIndex[blockIndex]; |
1745 | 0 | LOG("Allocated block %d to stream %p block %d(%" PRId64 ")", |
1746 | 0 | blockIndex, |
1747 | 0 | aStream, |
1748 | 0 | aStreamBlockIndex, |
1749 | 0 | aStreamBlockIndex * BLOCK_SIZE); |
1750 | 0 |
|
1751 | 0 | ResourceStreamIterator iter(this, aStream->mResourceID); |
1752 | 0 | while (MediaCacheStream* stream = iter.Next(aLock)) { |
1753 | 0 | BlockOwner* bo = block->mOwners.AppendElement(); |
1754 | 0 | if (!bo) { |
1755 | 0 | // Roll back mOwners if any allocation fails. |
1756 | 0 | block->mOwners.Clear(); |
1757 | 0 | return; |
1758 | 0 | } |
1759 | 0 | mBlockOwnersWatermark = |
1760 | 0 | std::max(mBlockOwnersWatermark, uint32_t(block->mOwners.Length())); |
1761 | 0 | bo->mStream = stream; |
1762 | 0 | } |
1763 | 0 |
|
1764 | 0 | if (block->mOwners.IsEmpty()) { |
1765 | 0 | // This happens when all streams with the resource id are closed. We can |
1766 | 0 | // just return here now and discard the data. |
1767 | 0 | return; |
1768 | 0 | } |
1769 | 0 | |
1770 | 0 | // Tell each stream using this resource about the new block. |
1771 | 0 | for (auto& bo : block->mOwners) { |
1772 | 0 | bo.mStreamBlock = aStreamBlockIndex; |
1773 | 0 | bo.mLastUseTime = now; |
1774 | 0 | bo.mStream->mBlocks[aStreamBlockIndex] = blockIndex; |
1775 | 0 | if (aStreamBlockIndex * BLOCK_SIZE < bo.mStream->mStreamOffset) { |
1776 | 0 | bo.mClass = aMode == MediaCacheStream::MODE_PLAYBACK ? PLAYED_BLOCK |
1777 | 0 | : METADATA_BLOCK; |
1778 | 0 | // This must be the most-recently-used block, since we |
1779 | 0 | // marked it as used now (which may be slightly bogus, but we'll |
1780 | 0 | // treat it as used for simplicity). |
1781 | 0 | GetListForBlock(aLock, &bo)->AddFirstBlock(blockIndex); |
1782 | 0 | Verify(aLock); |
1783 | 0 | } else { |
1784 | 0 | // This may not be the latest readahead block, although it usually |
1785 | 0 | // will be. We may have to scan for the right place to insert |
1786 | 0 | // the block in the list. |
1787 | 0 | bo.mClass = READAHEAD_BLOCK; |
1788 | 0 | InsertReadaheadBlock(aLock, &bo, blockIndex); |
1789 | 0 | } |
1790 | 0 | } |
1791 | 0 |
|
1792 | 0 | // Invariant: block->mOwners.IsEmpty() iff we can find an entry |
1793 | 0 | // in mFreeBlocks for a given blockIndex. |
1794 | 0 | MOZ_DIAGNOSTIC_ASSERT(!block->mOwners.IsEmpty()); |
1795 | 0 | mFreeBlocks.RemoveBlock(blockIndex); |
1796 | 0 |
|
1797 | 0 | nsresult rv = mBlockCache->WriteBlock(blockIndex, aData1, aData2); |
1798 | 0 | if (NS_FAILED(rv)) { |
1799 | 0 | LOG("Released block %d from stream %p block %d(%" PRId64 ")", |
1800 | 0 | blockIndex, |
1801 | 0 | aStream, |
1802 | 0 | aStreamBlockIndex, |
1803 | 0 | aStreamBlockIndex * BLOCK_SIZE); |
1804 | 0 | FreeBlock(aLock, blockIndex); |
1805 | 0 | } |
1806 | 0 | } |
1807 | 0 |
|
1808 | 0 | // Queue an Update since the cache state has changed (for example |
1809 | 0 | // we might want to stop loading because the cache is full) |
1810 | 0 | QueueUpdate(aLock); |
1811 | 0 | } |
1812 | | |
1813 | | void |
1814 | | MediaCache::OpenStream(AutoLock& aLock, |
1815 | | MediaCacheStream* aStream, |
1816 | | bool aIsClone) |
1817 | 0 | { |
1818 | 0 | LOG("Stream %p opened", aStream); |
1819 | 0 | mStreams.AppendElement(aStream); |
1820 | 0 |
|
1821 | 0 | // A cloned stream should've got the ID from its original. |
1822 | 0 | if (!aIsClone) { |
1823 | 0 | MOZ_ASSERT(aStream->mResourceID == 0, "mResourceID has been initialized."); |
1824 | 0 | aStream->mResourceID = AllocateResourceID(aLock); |
1825 | 0 | } |
1826 | 0 |
|
1827 | 0 | // We should have a valid ID now no matter it is cloned or not. |
1828 | 0 | MOZ_ASSERT(aStream->mResourceID > 0, "mResourceID is invalid"); |
1829 | 0 |
|
1830 | 0 | // Queue an update since a new stream has been opened. |
1831 | 0 | QueueUpdate(aLock); |
1832 | 0 | } |
1833 | | |
1834 | | void |
1835 | | MediaCache::ReleaseStream(AutoLock&, MediaCacheStream* aStream) |
1836 | 0 | { |
1837 | 0 | MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); |
1838 | 0 | LOG("Stream %p closed", aStream); |
1839 | 0 | mStreams.RemoveElement(aStream); |
1840 | 0 | // The caller needs to call QueueUpdate() to re-run Update(). |
1841 | 0 | } |
1842 | | |
1843 | | void |
1844 | | MediaCache::ReleaseStreamBlocks(AutoLock& aLock, MediaCacheStream* aStream) |
1845 | 0 | { |
1846 | 0 | // XXX scanning the entire stream doesn't seem great, if not much of it |
1847 | 0 | // is cached, but the only easy alternative is to scan the entire cache |
1848 | 0 | // which isn't better |
1849 | 0 | uint32_t length = aStream->mBlocks.Length(); |
1850 | 0 | for (uint32_t i = 0; i < length; ++i) { |
1851 | 0 | int32_t blockIndex = aStream->mBlocks[i]; |
1852 | 0 | if (blockIndex >= 0) { |
1853 | 0 | LOG("Released block %d from stream %p block %d(%" PRId64 ")", |
1854 | 0 | blockIndex, aStream, i, i*BLOCK_SIZE); |
1855 | 0 | RemoveBlockOwner(aLock, blockIndex, aStream); |
1856 | 0 | } |
1857 | 0 | } |
1858 | 0 | } |
1859 | | |
1860 | | void |
1861 | | MediaCache::Truncate() |
1862 | 0 | { |
1863 | 0 | uint32_t end; |
1864 | 0 | for (end = mIndex.Length(); end > 0; --end) { |
1865 | 0 | if (!IsBlockFree(end - 1)) |
1866 | 0 | break; |
1867 | 0 | mFreeBlocks.RemoveBlock(end - 1); |
1868 | 0 | } |
1869 | 0 |
|
1870 | 0 | if (end < mIndex.Length()) { |
1871 | 0 | mIndex.TruncateLength(end); |
1872 | 0 | // XXX We could truncate the cache file here, but we don't seem |
1873 | 0 | // to have a cross-platform API for doing that. At least when all |
1874 | 0 | // streams are closed we shut down the cache, which erases the |
1875 | 0 | // file at that point. |
1876 | 0 | } |
1877 | 0 | } |
1878 | | |
1879 | | void |
1880 | | MediaCache::NoteBlockUsage(AutoLock& aLock, |
1881 | | MediaCacheStream* aStream, |
1882 | | int32_t aBlockIndex, |
1883 | | int64_t aStreamOffset, |
1884 | | MediaCacheStream::ReadMode aMode, |
1885 | | TimeStamp aNow) |
1886 | 0 | { |
1887 | 0 | if (aBlockIndex < 0) { |
1888 | 0 | // this block is not in the cache yet |
1889 | 0 | return; |
1890 | 0 | } |
1891 | 0 | |
1892 | 0 | BlockOwner* bo = GetBlockOwner(aLock, aBlockIndex, aStream); |
1893 | 0 | if (!bo) { |
1894 | 0 | // this block is not in the cache yet |
1895 | 0 | return; |
1896 | 0 | } |
1897 | 0 | |
1898 | 0 | // The following check has to be <= because the stream offset has |
1899 | 0 | // not yet been updated for the data read from this block |
1900 | 0 | NS_ASSERTION(bo->mStreamBlock*BLOCK_SIZE <= aStreamOffset, |
1901 | 0 | "Using a block that's behind the read position?"); |
1902 | 0 |
|
1903 | 0 | GetListForBlock(aLock, bo)->RemoveBlock(aBlockIndex); |
1904 | 0 | bo->mClass = |
1905 | 0 | (aMode == MediaCacheStream::MODE_METADATA || bo->mClass == METADATA_BLOCK) |
1906 | 0 | ? METADATA_BLOCK |
1907 | 0 | : PLAYED_BLOCK; |
1908 | 0 | // Since this is just being used now, it can definitely be at the front |
1909 | 0 | // of mMetadataBlocks or mPlayedBlocks |
1910 | 0 | GetListForBlock(aLock, bo)->AddFirstBlock(aBlockIndex); |
1911 | 0 | bo->mLastUseTime = aNow; |
1912 | 0 | Verify(aLock); |
1913 | 0 | } |
1914 | | |
1915 | | void |
1916 | | MediaCache::NoteSeek(AutoLock& aLock, |
1917 | | MediaCacheStream* aStream, |
1918 | | int64_t aOldOffset) |
1919 | 0 | { |
1920 | 0 | if (aOldOffset < aStream->mStreamOffset) { |
1921 | 0 | // We seeked forward. Convert blocks from readahead to played. |
1922 | 0 | // Any readahead block that intersects the seeked-over range must |
1923 | 0 | // be converted. |
1924 | 0 | int32_t blockIndex = OffsetToBlockIndex(aOldOffset); |
1925 | 0 | if (blockIndex < 0) { |
1926 | 0 | return; |
1927 | 0 | } |
1928 | 0 | int32_t endIndex = |
1929 | 0 | std::min(OffsetToBlockIndex(aStream->mStreamOffset + (BLOCK_SIZE - 1)), |
1930 | 0 | int32_t(aStream->mBlocks.Length())); |
1931 | 0 | if (endIndex < 0) { |
1932 | 0 | return; |
1933 | 0 | } |
1934 | 0 | TimeStamp now = TimeStamp::Now(); |
1935 | 0 | while (blockIndex < endIndex) { |
1936 | 0 | int32_t cacheBlockIndex = aStream->mBlocks[blockIndex]; |
1937 | 0 | if (cacheBlockIndex >= 0) { |
1938 | 0 | // Marking the block used may not be exactly what we want but |
1939 | 0 | // it's simple |
1940 | 0 | NoteBlockUsage(aLock, |
1941 | 0 | aStream, |
1942 | 0 | cacheBlockIndex, |
1943 | 0 | aStream->mStreamOffset, |
1944 | 0 | MediaCacheStream::MODE_PLAYBACK, |
1945 | 0 | now); |
1946 | 0 | } |
1947 | 0 | ++blockIndex; |
1948 | 0 | } |
1949 | 0 | } else { |
1950 | 0 | // We seeked backward. Convert from played to readahead. |
1951 | 0 | // Any played block that is entirely after the start of the seeked-over |
1952 | 0 | // range must be converted. |
1953 | 0 | int32_t blockIndex = |
1954 | 0 | OffsetToBlockIndex(aStream->mStreamOffset + (BLOCK_SIZE - 1)); |
1955 | 0 | if (blockIndex < 0) { |
1956 | 0 | return; |
1957 | 0 | } |
1958 | 0 | int32_t endIndex = |
1959 | 0 | std::min(OffsetToBlockIndex(aOldOffset + (BLOCK_SIZE - 1)), |
1960 | 0 | int32_t(aStream->mBlocks.Length())); |
1961 | 0 | if (endIndex < 0) { |
1962 | 0 | return; |
1963 | 0 | } |
1964 | 0 | while (blockIndex < endIndex) { |
1965 | 0 | MOZ_ASSERT(endIndex > 0); |
1966 | 0 | int32_t cacheBlockIndex = aStream->mBlocks[endIndex - 1]; |
1967 | 0 | if (cacheBlockIndex >= 0) { |
1968 | 0 | BlockOwner* bo = GetBlockOwner(aLock, cacheBlockIndex, aStream); |
1969 | 0 | NS_ASSERTION(bo, "Stream doesn't own its blocks?"); |
1970 | 0 | if (bo->mClass == PLAYED_BLOCK) { |
1971 | 0 | aStream->mPlayedBlocks.RemoveBlock(cacheBlockIndex); |
1972 | 0 | bo->mClass = READAHEAD_BLOCK; |
1973 | 0 | // Adding this as the first block is sure to be OK since |
1974 | 0 | // this must currently be the earliest readahead block |
1975 | 0 | // (that's why we're proceeding backwards from the end of |
1976 | 0 | // the seeked range to the start) |
1977 | 0 | aStream->mReadaheadBlocks.AddFirstBlock(cacheBlockIndex); |
1978 | 0 | Verify(aLock); |
1979 | 0 | } |
1980 | 0 | } |
1981 | 0 | --endIndex; |
1982 | 0 | } |
1983 | 0 | } |
1984 | 0 | } |
1985 | | |
1986 | | void |
1987 | | MediaCacheStream::NotifyLoadID(uint32_t aLoadID) |
1988 | 0 | { |
1989 | 0 | MOZ_ASSERT(aLoadID > 0); |
1990 | 0 |
|
1991 | 0 | nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction( |
1992 | 0 | "MediaCacheStream::NotifyLoadID", |
1993 | 0 | [ client = RefPtr<ChannelMediaResource>(mClient), this, aLoadID ]() { |
1994 | 0 | AutoLock lock(mMediaCache->Monitor()); |
1995 | 0 | mLoadID = aLoadID; |
1996 | 0 | }); |
1997 | 0 | OwnerThread()->Dispatch(r.forget()); |
1998 | 0 | } |
1999 | | |
2000 | | void |
2001 | | MediaCacheStream::NotifyDataStartedInternal(uint32_t aLoadID, |
2002 | | int64_t aOffset, |
2003 | | bool aSeekable, |
2004 | | int64_t aLength) |
2005 | 0 | { |
2006 | 0 | MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); |
2007 | 0 | MOZ_ASSERT(aLoadID > 0); |
2008 | 0 | LOG("Stream %p DataStarted: %" PRId64 " aLoadID=%u aLength=%" PRId64, |
2009 | 0 | this, |
2010 | 0 | aOffset, |
2011 | 0 | aLoadID, |
2012 | 0 | aLength); |
2013 | 0 |
|
2014 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2015 | 0 | NS_WARNING_ASSERTION(aOffset == mSeekTarget || aOffset == mChannelOffset, |
2016 | 0 | "Server is giving us unexpected offset"); |
2017 | 0 | MOZ_ASSERT(aOffset >= 0); |
2018 | 0 | if (aLength >= 0) { |
2019 | 0 | mStreamLength = aLength; |
2020 | 0 | } |
2021 | 0 | mChannelOffset = aOffset; |
2022 | 0 | if (mStreamLength >= 0) { |
2023 | 0 | // If we started reading at a certain offset, then for sure |
2024 | 0 | // the stream is at least that long. |
2025 | 0 | mStreamLength = std::max(mStreamLength, mChannelOffset); |
2026 | 0 | } |
2027 | 0 | mLoadID = aLoadID; |
2028 | 0 |
|
2029 | 0 | MOZ_ASSERT(aOffset == 0 || aSeekable, |
2030 | 0 | "channel offset must be zero when we become non-seekable"); |
2031 | 0 | mIsTransportSeekable = aSeekable; |
2032 | 0 | // Queue an Update since we may change our strategy for dealing |
2033 | 0 | // with this stream |
2034 | 0 | mMediaCache->QueueUpdate(lock); |
2035 | 0 |
|
2036 | 0 | // Reset mSeekTarget since the seek is completed so MediaCache::Update() will |
2037 | 0 | // make decisions based on mChannelOffset instead of mSeekTarget. |
2038 | 0 | mSeekTarget = -1; |
2039 | 0 |
|
2040 | 0 | // Reset these flags since a new load has begun. |
2041 | 0 | mChannelEnded = false; |
2042 | 0 | mDidNotifyDataEnded = false; |
2043 | 0 |
|
2044 | 0 | UpdateDownloadStatistics(lock); |
2045 | 0 | } |
2046 | | |
2047 | | void |
2048 | | MediaCacheStream::NotifyDataStarted(uint32_t aLoadID, |
2049 | | int64_t aOffset, |
2050 | | bool aSeekable, |
2051 | | int64_t aLength) |
2052 | 0 | { |
2053 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
2054 | 0 | MOZ_ASSERT(aLoadID > 0); |
2055 | 0 |
|
2056 | 0 | nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction( |
2057 | 0 | "MediaCacheStream::NotifyDataStarted", |
2058 | 0 | [ =, client = RefPtr<ChannelMediaResource>(mClient) ]() { |
2059 | 0 | NotifyDataStartedInternal(aLoadID, aOffset, aSeekable, aLength); |
2060 | 0 | }); |
2061 | 0 | OwnerThread()->Dispatch(r.forget()); |
2062 | 0 | } |
2063 | | |
2064 | | void |
2065 | | MediaCacheStream::NotifyDataReceived(uint32_t aLoadID, |
2066 | | uint32_t aCount, |
2067 | | const uint8_t* aData) |
2068 | 0 | { |
2069 | 0 | MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); |
2070 | 0 | MOZ_ASSERT(aLoadID > 0); |
2071 | 0 |
|
2072 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2073 | 0 | if (mClosed) { |
2074 | 0 | // Nothing to do if the stream is closed. |
2075 | 0 | return; |
2076 | 0 | } |
2077 | 0 | |
2078 | 0 | LOG("Stream %p DataReceived at %" PRId64 " count=%u aLoadID=%u", |
2079 | 0 | this, |
2080 | 0 | mChannelOffset, |
2081 | 0 | aCount, |
2082 | 0 | aLoadID); |
2083 | 0 |
|
2084 | 0 | if (mLoadID != aLoadID) { |
2085 | 0 | // mChannelOffset is updated to a new position when loading a new channel. |
2086 | 0 | // We should discard the data coming from the old channel so it won't be |
2087 | 0 | // stored to the wrong positoin. |
2088 | 0 | return; |
2089 | 0 | } |
2090 | 0 | |
2091 | 0 | mDownloadStatistics.AddBytes(aCount); |
2092 | 0 |
|
2093 | 0 | // True if we commit any blocks to the cache. |
2094 | 0 | bool cacheUpdated = false; |
2095 | 0 |
|
2096 | 0 | auto source = MakeSpan<const uint8_t>(aData, aCount); |
2097 | 0 |
|
2098 | 0 | // We process the data one block (or part of a block) at a time |
2099 | 0 | while (!source.IsEmpty()) { |
2100 | 0 | // The data we've collected so far in the partial block. |
2101 | 0 | auto partial = MakeSpan<const uint8_t>(mPartialBlockBuffer.get(), |
2102 | 0 | OffsetInBlock(mChannelOffset)); |
2103 | 0 |
|
2104 | 0 | if (partial.IsEmpty()) { |
2105 | 0 | // We've just started filling this buffer so now is a good time |
2106 | 0 | // to clear this flag. |
2107 | 0 | mMetadataInPartialBlockBuffer = false; |
2108 | 0 | } |
2109 | 0 |
|
2110 | 0 | // The number of bytes needed to complete the partial block. |
2111 | 0 | size_t remaining = BLOCK_SIZE - partial.Length(); |
2112 | 0 |
|
2113 | 0 | if (source.Length() >= remaining) { |
2114 | 0 | // We have a whole block now to write it out. |
2115 | 0 | mMediaCache->AllocateAndWriteBlock( |
2116 | 0 | lock, |
2117 | 0 | this, |
2118 | 0 | OffsetToBlockIndexUnchecked(mChannelOffset), |
2119 | 0 | mMetadataInPartialBlockBuffer ? MODE_METADATA : MODE_PLAYBACK, |
2120 | 0 | partial, |
2121 | 0 | source.First(remaining)); |
2122 | 0 | source = source.From(remaining); |
2123 | 0 | mChannelOffset += remaining; |
2124 | 0 | cacheUpdated = true; |
2125 | 0 | } else { |
2126 | 0 | // The buffer to be filled in the partial block. |
2127 | 0 | auto buf = MakeSpan<uint8_t>(mPartialBlockBuffer.get() + partial.Length(), |
2128 | 0 | remaining); |
2129 | 0 | memcpy(buf.Elements(), source.Elements(), source.Length()); |
2130 | 0 | mChannelOffset += source.Length(); |
2131 | 0 | break; |
2132 | 0 | } |
2133 | 0 | } |
2134 | 0 |
|
2135 | 0 | MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID); |
2136 | 0 | while (MediaCacheStream* stream = iter.Next(lock)) { |
2137 | 0 | if (stream->mStreamLength >= 0) { |
2138 | 0 | // The stream is at least as long as what we've read |
2139 | 0 | stream->mStreamLength = std::max(stream->mStreamLength, mChannelOffset); |
2140 | 0 | } |
2141 | 0 | stream->mClient->CacheClientNotifyDataReceived(); |
2142 | 0 | } |
2143 | 0 |
|
2144 | 0 | // XXX it would be fairly easy to optimize things a lot more to |
2145 | 0 | // avoid waking up reader threads unnecessarily |
2146 | 0 | if (cacheUpdated) { |
2147 | 0 | // Wake up the reader who is waiting for the committed blocks. |
2148 | 0 | lock.NotifyAll(); |
2149 | 0 | } |
2150 | 0 | } |
2151 | | |
2152 | | void |
2153 | | MediaCacheStream::FlushPartialBlockInternal(AutoLock& aLock, bool aNotifyAll) |
2154 | 0 | { |
2155 | 0 | MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); |
2156 | 0 |
|
2157 | 0 | int32_t blockIndex = OffsetToBlockIndexUnchecked(mChannelOffset); |
2158 | 0 | int32_t blockOffset = OffsetInBlock(mChannelOffset); |
2159 | 0 | if (blockOffset > 0) { |
2160 | 0 | LOG("Stream %p writing partial block: [%d] bytes; " |
2161 | 0 | "mStreamOffset [%" PRId64 "] mChannelOffset[%" |
2162 | 0 | PRId64 "] mStreamLength [%" PRId64 "] notifying: [%s]", |
2163 | 0 | this, blockOffset, mStreamOffset, mChannelOffset, mStreamLength, |
2164 | 0 | aNotifyAll ? "yes" : "no"); |
2165 | 0 |
|
2166 | 0 | // Write back the partial block |
2167 | 0 | memset(mPartialBlockBuffer.get() + blockOffset, 0, BLOCK_SIZE - blockOffset); |
2168 | 0 | auto data = MakeSpan<const uint8_t>(mPartialBlockBuffer.get(), BLOCK_SIZE); |
2169 | 0 | mMediaCache->AllocateAndWriteBlock( |
2170 | 0 | aLock, |
2171 | 0 | this, |
2172 | 0 | blockIndex, |
2173 | 0 | mMetadataInPartialBlockBuffer ? MODE_METADATA : MODE_PLAYBACK, |
2174 | 0 | data); |
2175 | 0 | } |
2176 | 0 |
|
2177 | 0 | // |mChannelOffset == 0| means download ends with no bytes received. |
2178 | 0 | // We should also wake up those readers who are waiting for data |
2179 | 0 | // that will never come. |
2180 | 0 | if ((blockOffset > 0 || mChannelOffset == 0) && aNotifyAll) { |
2181 | 0 | // Wake up readers who may be waiting for this data |
2182 | 0 | aLock.NotifyAll(); |
2183 | 0 | } |
2184 | 0 | } |
2185 | | |
2186 | | void |
2187 | | MediaCacheStream::UpdateDownloadStatistics(AutoLock&) |
2188 | 0 | { |
2189 | 0 | if (mChannelEnded || mClientSuspended) { |
2190 | 0 | mDownloadStatistics.Stop(); |
2191 | 0 | } else { |
2192 | 0 | mDownloadStatistics.Start(); |
2193 | 0 | } |
2194 | 0 | } |
2195 | | |
2196 | | void |
2197 | | MediaCacheStream::NotifyDataEndedInternal(uint32_t aLoadID, nsresult aStatus) |
2198 | 0 | { |
2199 | 0 | MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); |
2200 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2201 | 0 |
|
2202 | 0 | if (mClosed || aLoadID != mLoadID) { |
2203 | 0 | // Nothing to do if the stream is closed or a new load has begun. |
2204 | 0 | return; |
2205 | 0 | } |
2206 | 0 | |
2207 | 0 | // It is prudent to update channel/cache status before calling |
2208 | 0 | // CacheClientNotifyDataEnded() which will read |mChannelEnded|. |
2209 | 0 | mChannelEnded = true; |
2210 | 0 | mMediaCache->QueueUpdate(lock); |
2211 | 0 |
|
2212 | 0 | UpdateDownloadStatistics(lock); |
2213 | 0 |
|
2214 | 0 | if (NS_FAILED(aStatus)) { |
2215 | 0 | // Notify the client about this network error. |
2216 | 0 | mDidNotifyDataEnded = true; |
2217 | 0 | mNotifyDataEndedStatus = aStatus; |
2218 | 0 | mClient->CacheClientNotifyDataEnded(aStatus); |
2219 | 0 | // Wake up the readers so they can fail gracefully. |
2220 | 0 | lock.NotifyAll(); |
2221 | 0 | return; |
2222 | 0 | } |
2223 | 0 | |
2224 | 0 | // Note we don't flush the partial block when download ends abnormally for |
2225 | 0 | // the padding zeros will give wrong data to other streams. |
2226 | 0 | FlushPartialBlockInternal(lock, true); |
2227 | 0 |
|
2228 | 0 | MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID); |
2229 | 0 | while (MediaCacheStream* stream = iter.Next(lock)) { |
2230 | 0 | // We read the whole stream, so remember the true length |
2231 | 0 | stream->mStreamLength = mChannelOffset; |
2232 | 0 | if (!stream->mDidNotifyDataEnded) { |
2233 | 0 | stream->mDidNotifyDataEnded = true; |
2234 | 0 | stream->mNotifyDataEndedStatus = aStatus; |
2235 | 0 | stream->mClient->CacheClientNotifyDataEnded(aStatus); |
2236 | 0 | } |
2237 | 0 | } |
2238 | 0 | } |
2239 | | |
2240 | | void |
2241 | | MediaCacheStream::NotifyDataEnded(uint32_t aLoadID, nsresult aStatus) |
2242 | 0 | { |
2243 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
2244 | 0 | MOZ_ASSERT(aLoadID > 0); |
2245 | 0 |
|
2246 | 0 | RefPtr<ChannelMediaResource> client = mClient; |
2247 | 0 | nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction( |
2248 | 0 | "MediaCacheStream::NotifyDataEnded", [client, this, aLoadID, aStatus]() { |
2249 | 0 | NotifyDataEndedInternal(aLoadID, aStatus); |
2250 | 0 | }); |
2251 | 0 | OwnerThread()->Dispatch(r.forget()); |
2252 | 0 | } |
2253 | | |
2254 | | void |
2255 | | MediaCacheStream::NotifyClientSuspended(bool aSuspended) |
2256 | 0 | { |
2257 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
2258 | 0 |
|
2259 | 0 | RefPtr<ChannelMediaResource> client = mClient; |
2260 | 0 | nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction( |
2261 | 0 | "MediaCacheStream::NotifyClientSuspended", [client, this, aSuspended]() { |
2262 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2263 | 0 | if (!mClosed && mClientSuspended != aSuspended) { |
2264 | 0 | mClientSuspended = aSuspended; |
2265 | 0 | // mClientSuspended changes the decision of reading streams. |
2266 | 0 | mMediaCache->QueueUpdate(lock); |
2267 | 0 | UpdateDownloadStatistics(lock); |
2268 | 0 | if (mClientSuspended) { |
2269 | 0 | // Download is suspended. Wake up the readers that might be able to |
2270 | 0 | // get data from the partial block. |
2271 | 0 | lock.NotifyAll(); |
2272 | 0 | } |
2273 | 0 | } |
2274 | 0 | }); |
2275 | 0 | OwnerThread()->Dispatch(r.forget()); |
2276 | 0 | } |
2277 | | |
2278 | | void |
2279 | | MediaCacheStream::NotifyResume() |
2280 | 0 | { |
2281 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
2282 | 0 |
|
2283 | 0 | nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction( |
2284 | 0 | "MediaCacheStream::NotifyResume", |
2285 | 0 | [ this, client = RefPtr<ChannelMediaResource>(mClient) ]() { |
2286 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2287 | 0 | if (mClosed) { |
2288 | 0 | return; |
2289 | 0 | } |
2290 | 0 | // Don't resume download if we are already at the end of the stream for |
2291 | 0 | // seek will fail and be wasted anyway. |
2292 | 0 | int64_t offset = mSeekTarget != -1 ? mSeekTarget : mChannelOffset; |
2293 | 0 | if (mStreamLength < 0 || offset < mStreamLength) { |
2294 | 0 | mClient->CacheClientSeek(offset, false); |
2295 | 0 | // DownloadResumed() will be notified when a new channel is opened. |
2296 | 0 | } |
2297 | 0 | // The channel remains dead. If we want to read some other data in the |
2298 | 0 | // future, CacheClientSeek() will be called to reopen the channel. |
2299 | 0 | }); |
2300 | 0 | OwnerThread()->Dispatch(r.forget()); |
2301 | 0 | } |
2302 | | |
2303 | | MediaCacheStream::~MediaCacheStream() |
2304 | 0 | { |
2305 | 0 | MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread"); |
2306 | 0 | MOZ_ASSERT(!mPinCount, "Unbalanced Pin"); |
2307 | 0 | MOZ_ASSERT(!mMediaCache || mClosed); |
2308 | 0 |
|
2309 | 0 | uint32_t lengthKb = uint32_t( |
2310 | 0 | std::min(std::max(mStreamLength, int64_t(0)) / 1024, int64_t(UINT32_MAX))); |
2311 | 0 | LOG("MediaCacheStream::~MediaCacheStream(this=%p) " |
2312 | 0 | "MEDIACACHESTREAM_LENGTH_KB=%" PRIu32, |
2313 | 0 | this, |
2314 | 0 | lengthKb); |
2315 | 0 | Telemetry::Accumulate(Telemetry::HistogramID::MEDIACACHESTREAM_LENGTH_KB, |
2316 | 0 | lengthKb); |
2317 | 0 | } |
2318 | | |
2319 | | bool |
2320 | | MediaCacheStream::AreAllStreamsForResourceSuspended(AutoLock& aLock) |
2321 | 0 | { |
2322 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2323 | 0 |
|
2324 | 0 | MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID); |
2325 | 0 | // Look for a stream that's able to read the data we need |
2326 | 0 | int64_t dataOffset = -1; |
2327 | 0 | while (MediaCacheStream* stream = iter.Next(aLock)) { |
2328 | 0 | if (stream->mCacheSuspended || stream->mChannelEnded || stream->mClosed) { |
2329 | 0 | continue; |
2330 | 0 | } |
2331 | 0 | if (dataOffset < 0) { |
2332 | 0 | dataOffset = GetCachedDataEndInternal(aLock, mStreamOffset); |
2333 | 0 | } |
2334 | 0 | // Ignore streams that are reading beyond the data we need |
2335 | 0 | if (stream->mChannelOffset > dataOffset) { |
2336 | 0 | continue; |
2337 | 0 | } |
2338 | 0 | return false; |
2339 | 0 | } |
2340 | 0 |
|
2341 | 0 | return true; |
2342 | 0 | } |
2343 | | |
2344 | | void |
2345 | | MediaCacheStream::Close() |
2346 | 0 | { |
2347 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
2348 | 0 | if (!mMediaCache) { |
2349 | 0 | return; |
2350 | 0 | } |
2351 | 0 | OwnerThread()->Dispatch(NS_NewRunnableFunction( |
2352 | 0 | "MediaCacheStream::Close", |
2353 | 0 | [ this, client = RefPtr<ChannelMediaResource>(mClient) ]() { |
2354 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2355 | 0 | CloseInternal(lock); |
2356 | 0 | })); |
2357 | 0 | } |
2358 | | |
2359 | | void |
2360 | | MediaCacheStream::CloseInternal(AutoLock& aLock) |
2361 | 0 | { |
2362 | 0 | MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); |
2363 | 0 |
|
2364 | 0 | if (mClosed) { |
2365 | 0 | return; |
2366 | 0 | } |
2367 | 0 | |
2368 | 0 | // Closing a stream will change the return value of |
2369 | 0 | // MediaCacheStream::AreAllStreamsForResourceSuspended as well as |
2370 | 0 | // ChannelMediaResource::IsSuspendedByCache. Let's notify it. |
2371 | 0 | mMediaCache->QueueSuspendedStatusUpdate(aLock, mResourceID); |
2372 | 0 |
|
2373 | 0 | mClosed = true; |
2374 | 0 | mMediaCache->ReleaseStreamBlocks(aLock, this); |
2375 | 0 | mMediaCache->ReleaseStream(aLock, this); |
2376 | 0 | // Wake up any blocked readers |
2377 | 0 | aLock.NotifyAll(); |
2378 | 0 |
|
2379 | 0 | // Queue an Update since we may have created more free space. |
2380 | 0 | mMediaCache->QueueUpdate(aLock); |
2381 | 0 | } |
2382 | | |
2383 | | void |
2384 | | MediaCacheStream::Pin() |
2385 | 0 | { |
2386 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2387 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2388 | 0 | ++mPinCount; |
2389 | 0 | // Queue an Update since we may no longer want to read more into the |
2390 | 0 | // cache, if this stream's block have become non-evictable |
2391 | 0 | mMediaCache->QueueUpdate(lock); |
2392 | 0 | } |
2393 | | |
2394 | | void |
2395 | | MediaCacheStream::Unpin() |
2396 | 0 | { |
2397 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2398 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2399 | 0 | NS_ASSERTION(mPinCount > 0, "Unbalanced Unpin"); |
2400 | 0 | --mPinCount; |
2401 | 0 | // Queue an Update since we may be able to read more into the |
2402 | 0 | // cache, if this stream's block have become evictable |
2403 | 0 | mMediaCache->QueueUpdate(lock); |
2404 | 0 | } |
2405 | | |
2406 | | int64_t |
2407 | | MediaCacheStream::GetLength() const |
2408 | 0 | { |
2409 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2410 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2411 | 0 | return mStreamLength; |
2412 | 0 | } |
2413 | | |
2414 | | MediaCacheStream::LengthAndOffset |
2415 | | MediaCacheStream::GetLengthAndOffset() const |
2416 | 0 | { |
2417 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
2418 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2419 | 0 | return { mStreamLength, mChannelOffset }; |
2420 | 0 | } |
2421 | | |
2422 | | int64_t |
2423 | | MediaCacheStream::GetNextCachedData(int64_t aOffset) |
2424 | 0 | { |
2425 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2426 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2427 | 0 | return GetNextCachedDataInternal(lock, aOffset); |
2428 | 0 | } |
2429 | | |
2430 | | int64_t |
2431 | | MediaCacheStream::GetCachedDataEnd(int64_t aOffset) |
2432 | 0 | { |
2433 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2434 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2435 | 0 | return GetCachedDataEndInternal(lock, aOffset); |
2436 | 0 | } |
2437 | | |
2438 | | bool |
2439 | | MediaCacheStream::IsDataCachedToEndOfStream(int64_t aOffset) |
2440 | 0 | { |
2441 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2442 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2443 | 0 | if (mStreamLength < 0) |
2444 | 0 | return false; |
2445 | 0 | return GetCachedDataEndInternal(lock, aOffset) >= mStreamLength; |
2446 | 0 | } |
2447 | | |
2448 | | int64_t |
2449 | | MediaCacheStream::GetCachedDataEndInternal(AutoLock&, int64_t aOffset) |
2450 | 0 | { |
2451 | 0 | int32_t blockIndex = OffsetToBlockIndex(aOffset); |
2452 | 0 | if (blockIndex < 0) { |
2453 | 0 | return aOffset; |
2454 | 0 | } |
2455 | 0 | while (size_t(blockIndex) < mBlocks.Length() && mBlocks[blockIndex] != -1) { |
2456 | 0 | ++blockIndex; |
2457 | 0 | } |
2458 | 0 | int64_t result = blockIndex*BLOCK_SIZE; |
2459 | 0 | if (blockIndex == OffsetToBlockIndexUnchecked(mChannelOffset)) { |
2460 | 0 | // The block containing mChannelOffset may be partially read but not |
2461 | 0 | // yet committed to the main cache |
2462 | 0 | result = mChannelOffset; |
2463 | 0 | } |
2464 | 0 | if (mStreamLength >= 0) { |
2465 | 0 | // The last block in the cache may only be partially valid, so limit |
2466 | 0 | // the cached range to the stream length |
2467 | 0 | result = std::min(result, mStreamLength); |
2468 | 0 | } |
2469 | 0 | return std::max(result, aOffset); |
2470 | 0 | } |
2471 | | |
2472 | | int64_t |
2473 | | MediaCacheStream::GetNextCachedDataInternal(AutoLock&, int64_t aOffset) |
2474 | 0 | { |
2475 | 0 | if (aOffset == mStreamLength) |
2476 | 0 | return -1; |
2477 | 0 | |
2478 | 0 | int32_t startBlockIndex = OffsetToBlockIndex(aOffset); |
2479 | 0 | if (startBlockIndex < 0) { |
2480 | 0 | return -1; |
2481 | 0 | } |
2482 | 0 | int32_t channelBlockIndex = OffsetToBlockIndexUnchecked(mChannelOffset); |
2483 | 0 |
|
2484 | 0 | if (startBlockIndex == channelBlockIndex && |
2485 | 0 | aOffset < mChannelOffset) { |
2486 | 0 | // The block containing mChannelOffset is partially read, but not |
2487 | 0 | // yet committed to the main cache. aOffset lies in the partially |
2488 | 0 | // read portion, thus it is effectively cached. |
2489 | 0 | return aOffset; |
2490 | 0 | } |
2491 | 0 | |
2492 | 0 | if (size_t(startBlockIndex) >= mBlocks.Length()) |
2493 | 0 | return -1; |
2494 | 0 | |
2495 | 0 | // Is the current block cached? |
2496 | 0 | if (mBlocks[startBlockIndex] != -1) |
2497 | 0 | return aOffset; |
2498 | 0 | |
2499 | 0 | // Count the number of uncached blocks |
2500 | 0 | bool hasPartialBlock = OffsetInBlock(mChannelOffset) != 0; |
2501 | 0 | int32_t blockIndex = startBlockIndex + 1; |
2502 | 0 | while (true) { |
2503 | 0 | if ((hasPartialBlock && blockIndex == channelBlockIndex) || |
2504 | 0 | (size_t(blockIndex) < mBlocks.Length() && mBlocks[blockIndex] != -1)) { |
2505 | 0 | // We at the incoming channel block, which has has data in it, |
2506 | 0 | // or are we at a cached block. Return index of block start. |
2507 | 0 | return blockIndex * BLOCK_SIZE; |
2508 | 0 | } |
2509 | 0 | |
2510 | 0 | // No more cached blocks? |
2511 | 0 | if (size_t(blockIndex) >= mBlocks.Length()) |
2512 | 0 | return -1; |
2513 | 0 | |
2514 | 0 | ++blockIndex; |
2515 | 0 | } |
2516 | 0 |
|
2517 | 0 | MOZ_ASSERT_UNREACHABLE("Should return in loop"); |
2518 | 0 | return -1; |
2519 | 0 | } |
2520 | | |
2521 | | void |
2522 | | MediaCacheStream::SetReadMode(ReadMode aMode) |
2523 | 0 | { |
2524 | 0 | nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction( |
2525 | 0 | "MediaCacheStream::SetReadMode", |
2526 | 0 | [ this, client = RefPtr<ChannelMediaResource>(mClient), aMode ]() { |
2527 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2528 | 0 | if (!mClosed && mCurrentMode != aMode) { |
2529 | 0 | mCurrentMode = aMode; |
2530 | 0 | mMediaCache->QueueUpdate(lock); |
2531 | 0 | } |
2532 | 0 | }); |
2533 | 0 | OwnerThread()->Dispatch(r.forget()); |
2534 | 0 | } |
2535 | | |
2536 | | void |
2537 | | MediaCacheStream::SetPlaybackRate(uint32_t aBytesPerSecond) |
2538 | 0 | { |
2539 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2540 | 0 | MOZ_ASSERT(aBytesPerSecond > 0, "Zero playback rate not allowed"); |
2541 | 0 |
|
2542 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2543 | 0 | if (!mClosed && mPlaybackBytesPerSecond != aBytesPerSecond) { |
2544 | 0 | mPlaybackBytesPerSecond = aBytesPerSecond; |
2545 | 0 | mMediaCache->QueueUpdate(lock); |
2546 | 0 | } |
2547 | 0 | } |
2548 | | |
2549 | | nsresult |
2550 | | MediaCacheStream::Seek(AutoLock& aLock, int64_t aOffset) |
2551 | 0 | { |
2552 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2553 | 0 |
|
2554 | 0 | if (!IsOffsetAllowed(aOffset)) { |
2555 | 0 | return NS_ERROR_ILLEGAL_VALUE; |
2556 | 0 | } |
2557 | 0 | if (mClosed) { |
2558 | 0 | return NS_ERROR_ABORT; |
2559 | 0 | } |
2560 | 0 | |
2561 | 0 | int64_t oldOffset = mStreamOffset; |
2562 | 0 | mStreamOffset = aOffset; |
2563 | 0 | LOG("Stream %p Seek to %" PRId64, this, mStreamOffset); |
2564 | 0 | mMediaCache->NoteSeek(aLock, this, oldOffset); |
2565 | 0 | mMediaCache->QueueUpdate(aLock); |
2566 | 0 | return NS_OK; |
2567 | 0 | } |
2568 | | |
2569 | | void |
2570 | | MediaCacheStream::ThrottleReadahead(bool bThrottle) |
2571 | 0 | { |
2572 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
2573 | 0 |
|
2574 | 0 | nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction( |
2575 | 0 | "MediaCacheStream::ThrottleReadahead", |
2576 | 0 | [ client = RefPtr<ChannelMediaResource>(mClient), this, bThrottle ]() { |
2577 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2578 | 0 | if (!mClosed && mThrottleReadahead != bThrottle) { |
2579 | 0 | LOGI("Stream %p ThrottleReadahead %d", this, bThrottle); |
2580 | 0 | mThrottleReadahead = bThrottle; |
2581 | 0 | mMediaCache->QueueUpdate(lock); |
2582 | 0 | } |
2583 | 0 | }); |
2584 | 0 | OwnerThread()->Dispatch(r.forget()); |
2585 | 0 | } |
2586 | | |
2587 | | uint32_t |
2588 | | MediaCacheStream::ReadPartialBlock(AutoLock&, |
2589 | | int64_t aOffset, |
2590 | | Span<char> aBuffer) |
2591 | 0 | { |
2592 | 0 | MOZ_ASSERT(IsOffsetAllowed(aOffset)); |
2593 | 0 |
|
2594 | 0 | if (OffsetToBlockIndexUnchecked(mChannelOffset) != |
2595 | 0 | OffsetToBlockIndexUnchecked(aOffset) || |
2596 | 0 | aOffset >= mChannelOffset) { |
2597 | 0 | // Not in the partial block or no data to read. |
2598 | 0 | return 0; |
2599 | 0 | } |
2600 | 0 | |
2601 | 0 | auto source = MakeSpan<const uint8_t>( |
2602 | 0 | mPartialBlockBuffer.get() + OffsetInBlock(aOffset), |
2603 | 0 | OffsetInBlock(mChannelOffset) - OffsetInBlock(aOffset)); |
2604 | 0 | // We have |source.Length() <= BLOCK_SIZE < INT32_MAX| to guarantee |
2605 | 0 | // that |bytesToRead| can fit into a uint32_t. |
2606 | 0 | uint32_t bytesToRead = std::min(aBuffer.Length(), source.Length()); |
2607 | 0 | memcpy(aBuffer.Elements(), source.Elements(), bytesToRead); |
2608 | 0 | return bytesToRead; |
2609 | 0 | } |
2610 | | |
2611 | | Result<uint32_t, nsresult> |
2612 | | MediaCacheStream::ReadBlockFromCache(AutoLock& aLock, |
2613 | | int64_t aOffset, |
2614 | | Span<char> aBuffer, |
2615 | | bool aNoteBlockUsage) |
2616 | 0 | { |
2617 | 0 | MOZ_ASSERT(IsOffsetAllowed(aOffset)); |
2618 | 0 |
|
2619 | 0 | // OffsetToBlockIndexUnchecked() is always non-negative. |
2620 | 0 | uint32_t index = OffsetToBlockIndexUnchecked(aOffset); |
2621 | 0 | int32_t cacheBlock = index < mBlocks.Length() ? mBlocks[index] : -1; |
2622 | 0 | if (cacheBlock < 0 || |
2623 | 0 | (mStreamLength >= 0 && aOffset >= mStreamLength)) { |
2624 | 0 | // Not in the cache. |
2625 | 0 | return 0; |
2626 | 0 | } |
2627 | 0 | |
2628 | 0 | if (aBuffer.Length() > size_t(BLOCK_SIZE)) { |
2629 | 0 | // Clamp the buffer to avoid overflow below since we will read at most |
2630 | 0 | // BLOCK_SIZE bytes. |
2631 | 0 | aBuffer = aBuffer.First(BLOCK_SIZE); |
2632 | 0 | } |
2633 | 0 |
|
2634 | 0 | if (mStreamLength >= 0 && |
2635 | 0 | int64_t(aBuffer.Length()) > mStreamLength - aOffset) { |
2636 | 0 | // Clamp reads to stream's length |
2637 | 0 | aBuffer = aBuffer.First(mStreamLength - aOffset); |
2638 | 0 | } |
2639 | 0 |
|
2640 | 0 | // |BLOCK_SIZE - OffsetInBlock(aOffset)| <= BLOCK_SIZE |
2641 | 0 | int32_t bytesToRead = |
2642 | 0 | std::min<int32_t>(BLOCK_SIZE - OffsetInBlock(aOffset), aBuffer.Length()); |
2643 | 0 | int32_t bytesRead = 0; |
2644 | 0 | nsresult rv = |
2645 | 0 | mMediaCache->ReadCacheFile(aLock, |
2646 | 0 | cacheBlock * BLOCK_SIZE + OffsetInBlock(aOffset), |
2647 | 0 | aBuffer.Elements(), |
2648 | 0 | bytesToRead, |
2649 | 0 | &bytesRead); |
2650 | 0 |
|
2651 | 0 | // Ensure |cacheBlock * BLOCK_SIZE + OffsetInBlock(aOffset)| won't overflow. |
2652 | 0 | static_assert(INT64_MAX >= BLOCK_SIZE * (uint32_t(INT32_MAX) + 1), |
2653 | 0 | "BLOCK_SIZE too large!"); |
2654 | 0 |
|
2655 | 0 | if (NS_FAILED(rv)) { |
2656 | 0 | nsCString name; |
2657 | 0 | GetErrorName(rv, name); |
2658 | 0 | LOGE("Stream %p ReadCacheFile failed, rv=%s", this, name.Data()); |
2659 | 0 | return mozilla::Err(rv); |
2660 | 0 | } |
2661 | 0 |
|
2662 | 0 | if (aNoteBlockUsage) { |
2663 | 0 | mMediaCache->NoteBlockUsage( |
2664 | 0 | aLock, this, cacheBlock, aOffset, mCurrentMode, TimeStamp::Now()); |
2665 | 0 | } |
2666 | 0 |
|
2667 | 0 | return bytesRead; |
2668 | 0 | } |
2669 | | |
2670 | | nsresult |
2671 | | MediaCacheStream::Read(AutoLock& aLock, char* aBuffer, uint32_t aCount, uint32_t* aBytes) |
2672 | 0 | { |
2673 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2674 | 0 |
|
2675 | 0 | // Cache the offset in case it is changed again when we are waiting for the |
2676 | 0 | // monitor to be notified to avoid reading at the wrong position. |
2677 | 0 | auto streamOffset = mStreamOffset; |
2678 | 0 |
|
2679 | 0 | // The buffer we are about to fill. |
2680 | 0 | auto buffer = MakeSpan<char>(aBuffer, aCount); |
2681 | 0 |
|
2682 | 0 | // Read one block (or part of a block) at a time |
2683 | 0 | while (!buffer.IsEmpty()) { |
2684 | 0 | if (mClosed) { |
2685 | 0 | return NS_ERROR_ABORT; |
2686 | 0 | } |
2687 | 0 | |
2688 | 0 | if (!IsOffsetAllowed(streamOffset)) { |
2689 | 0 | LOGE("Stream %p invalid offset=%" PRId64, this, streamOffset); |
2690 | 0 | return NS_ERROR_ILLEGAL_VALUE; |
2691 | 0 | } |
2692 | 0 |
|
2693 | 0 | if (mStreamLength >= 0 && streamOffset >= mStreamLength) { |
2694 | 0 | // Don't try to read beyond the end of the stream |
2695 | 0 | break; |
2696 | 0 | } |
2697 | 0 | |
2698 | 0 | Result<uint32_t, nsresult> rv = ReadBlockFromCache( |
2699 | 0 | aLock, streamOffset, buffer, true /* aNoteBlockUsage */); |
2700 | 0 | if (rv.isErr()) { |
2701 | 0 | return rv.unwrapErr(); |
2702 | 0 | } |
2703 | 0 | |
2704 | 0 | uint32_t bytes = rv.unwrap(); |
2705 | 0 | if (bytes > 0) { |
2706 | 0 | // Got data from the cache successfully. Read next block. |
2707 | 0 | streamOffset += bytes; |
2708 | 0 | buffer = buffer.From(bytes); |
2709 | 0 | continue; |
2710 | 0 | } |
2711 | 0 | |
2712 | 0 | // See if we can use the data in the partial block of any stream reading |
2713 | 0 | // this resource. Note we use the partial block only when it is completed, |
2714 | 0 | // that is reaching EOS. |
2715 | 0 | bool foundDataInPartialBlock = false; |
2716 | 0 | MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID); |
2717 | 0 | while (MediaCacheStream* stream = iter.Next(aLock)) { |
2718 | 0 | if (OffsetToBlockIndexUnchecked(stream->mChannelOffset) == |
2719 | 0 | OffsetToBlockIndexUnchecked(streamOffset) && |
2720 | 0 | stream->mChannelOffset == stream->mStreamLength) { |
2721 | 0 | uint32_t bytes = stream->ReadPartialBlock(aLock, streamOffset, buffer); |
2722 | 0 | streamOffset += bytes; |
2723 | 0 | buffer = buffer.From(bytes); |
2724 | 0 | foundDataInPartialBlock = true; |
2725 | 0 | break; |
2726 | 0 | } |
2727 | 0 | } |
2728 | 0 | if (foundDataInPartialBlock) { |
2729 | 0 | // Break for we've reached EOS. |
2730 | 0 | break; |
2731 | 0 | } |
2732 | 0 | |
2733 | 0 | if (mDidNotifyDataEnded && NS_FAILED(mNotifyDataEndedStatus)) { |
2734 | 0 | // Since download ends abnormally, there is no point in waiting for new |
2735 | 0 | // data to come. We will check the partial block to read as many bytes as |
2736 | 0 | // possible before exiting this function. |
2737 | 0 | bytes = ReadPartialBlock(aLock, streamOffset, buffer); |
2738 | 0 | streamOffset += bytes; |
2739 | 0 | buffer = buffer.From(bytes); |
2740 | 0 | break; |
2741 | 0 | } |
2742 | 0 | |
2743 | 0 | if (mStreamOffset != streamOffset) { |
2744 | 0 | // Update mStreamOffset before we drop the lock. We need to run |
2745 | 0 | // Update() again since stream reading strategy might have changed. |
2746 | 0 | mStreamOffset = streamOffset; |
2747 | 0 | mMediaCache->QueueUpdate(aLock); |
2748 | 0 | } |
2749 | 0 |
|
2750 | 0 | // No data to read, so block |
2751 | 0 | aLock.Wait(); |
2752 | 0 | continue; |
2753 | 0 | } |
2754 | 0 |
|
2755 | 0 | uint32_t count = buffer.Elements() - aBuffer; |
2756 | 0 | *aBytes = count; |
2757 | 0 | if (count == 0) { |
2758 | 0 | return NS_OK; |
2759 | 0 | } |
2760 | 0 | |
2761 | 0 | // Some data was read, so queue an update since block priorities may |
2762 | 0 | // have changed |
2763 | 0 | mMediaCache->QueueUpdate(aLock); |
2764 | 0 |
|
2765 | 0 | LOG("Stream %p Read at %" PRId64 " count=%d", this, streamOffset-count, count); |
2766 | 0 | mStreamOffset = streamOffset; |
2767 | 0 | return NS_OK; |
2768 | 0 | } |
2769 | | |
2770 | | nsresult |
2771 | | MediaCacheStream::ReadAt(int64_t aOffset, char* aBuffer, |
2772 | | uint32_t aCount, uint32_t* aBytes) |
2773 | 0 | { |
2774 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2775 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2776 | 0 | nsresult rv = Seek(lock, aOffset); |
2777 | 0 | if (NS_FAILED(rv)) return rv; |
2778 | 0 | return Read(lock, aBuffer, aCount, aBytes); |
2779 | 0 | } |
2780 | | |
2781 | | nsresult |
2782 | | MediaCacheStream::ReadFromCache(char* aBuffer, int64_t aOffset, uint32_t aCount) |
2783 | 0 | { |
2784 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2785 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2786 | 0 |
|
2787 | 0 | // The buffer we are about to fill. |
2788 | 0 | auto buffer = MakeSpan<char>(aBuffer, aCount); |
2789 | 0 |
|
2790 | 0 | // Read one block (or part of a block) at a time |
2791 | 0 | int64_t streamOffset = aOffset; |
2792 | 0 | while (!buffer.IsEmpty()) { |
2793 | 0 | if (mClosed) { |
2794 | 0 | // We need to check |mClosed| in each iteration which might be changed |
2795 | 0 | // after calling |mMediaCache->ReadCacheFile|. |
2796 | 0 | return NS_ERROR_FAILURE; |
2797 | 0 | } |
2798 | 0 | |
2799 | 0 | if (!IsOffsetAllowed(streamOffset)) { |
2800 | 0 | LOGE("Stream %p invalid offset=%" PRId64, this, streamOffset); |
2801 | 0 | return NS_ERROR_ILLEGAL_VALUE; |
2802 | 0 | } |
2803 | 0 |
|
2804 | 0 | Result<uint32_t, nsresult> rv = |
2805 | 0 | ReadBlockFromCache(lock, streamOffset, buffer); |
2806 | 0 | if (rv.isErr()) { |
2807 | 0 | return rv.unwrapErr(); |
2808 | 0 | } |
2809 | 0 | |
2810 | 0 | uint32_t bytes = rv.unwrap(); |
2811 | 0 | if (bytes > 0) { |
2812 | 0 | // Read data from the cache successfully. Let's try next block. |
2813 | 0 | streamOffset += bytes; |
2814 | 0 | buffer = buffer.From(bytes); |
2815 | 0 | continue; |
2816 | 0 | } |
2817 | 0 | |
2818 | 0 | // The partial block is our last chance to get data. |
2819 | 0 | bytes = ReadPartialBlock(lock, streamOffset, buffer); |
2820 | 0 | if (bytes < buffer.Length()) { |
2821 | 0 | // Not enough data to read. |
2822 | 0 | return NS_ERROR_FAILURE; |
2823 | 0 | } |
2824 | 0 | |
2825 | 0 | // Return for we've got all the requested bytes. |
2826 | 0 | return NS_OK; |
2827 | 0 | } |
2828 | 0 |
|
2829 | 0 | return NS_OK; |
2830 | 0 | } |
2831 | | |
2832 | | nsresult |
2833 | | MediaCacheStream::Init(int64_t aContentLength) |
2834 | 0 | { |
2835 | 0 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
2836 | 0 | MOZ_ASSERT(!mMediaCache, "Has been initialized."); |
2837 | 0 |
|
2838 | 0 | if (aContentLength > 0) { |
2839 | 0 | uint32_t length = uint32_t(std::min(aContentLength, int64_t(UINT32_MAX))); |
2840 | 0 | LOG("MediaCacheStream::Init(this=%p) " |
2841 | 0 | "MEDIACACHESTREAM_NOTIFIED_LENGTH=%" PRIu32, |
2842 | 0 | this, |
2843 | 0 | length); |
2844 | 0 | Telemetry::Accumulate( |
2845 | 0 | Telemetry::HistogramID::MEDIACACHESTREAM_NOTIFIED_LENGTH, length); |
2846 | 0 |
|
2847 | 0 | mStreamLength = aContentLength; |
2848 | 0 | } |
2849 | 0 |
|
2850 | 0 | mMediaCache = MediaCache::GetMediaCache(aContentLength); |
2851 | 0 | if (!mMediaCache) { |
2852 | 0 | return NS_ERROR_FAILURE; |
2853 | 0 | } |
2854 | 0 | |
2855 | 0 | OwnerThread()->Dispatch(NS_NewRunnableFunction( |
2856 | 0 | "MediaCacheStream::Init", |
2857 | 0 | [ this, res = RefPtr<ChannelMediaResource>(mClient) ]() { |
2858 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2859 | 0 | mMediaCache->OpenStream(lock, this); |
2860 | 0 | })); |
2861 | 0 |
|
2862 | 0 | return NS_OK; |
2863 | 0 | } |
2864 | | |
2865 | | void |
2866 | | MediaCacheStream::InitAsClone(MediaCacheStream* aOriginal) |
2867 | 0 | { |
2868 | 0 | MOZ_ASSERT(!mMediaCache, "Has been initialized."); |
2869 | 0 | MOZ_ASSERT(aOriginal->mMediaCache, "Don't clone an uninitialized stream."); |
2870 | 0 |
|
2871 | 0 | // Use the same MediaCache as our clone. |
2872 | 0 | mMediaCache = aOriginal->mMediaCache; |
2873 | 0 | OwnerThread()->Dispatch( |
2874 | 0 | NS_NewRunnableFunction("MediaCacheStream::InitAsClone", [ |
2875 | 0 | this, |
2876 | 0 | aOriginal, |
2877 | 0 | r1 = RefPtr<ChannelMediaResource>(mClient), |
2878 | 0 | r2 = RefPtr<ChannelMediaResource>(aOriginal->mClient) |
2879 | 0 | ]() { InitAsCloneInternal(aOriginal); })); |
2880 | 0 | } |
2881 | | |
2882 | | void |
2883 | | MediaCacheStream::InitAsCloneInternal(MediaCacheStream* aOriginal) |
2884 | 0 | { |
2885 | 0 | MOZ_ASSERT(OwnerThread()->IsOnCurrentThread()); |
2886 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2887 | 0 |
|
2888 | 0 | // Download data and notify events if necessary. Note the order is important |
2889 | 0 | // in order to mimic the behavior of data being downloaded from the channel. |
2890 | 0 |
|
2891 | 0 | // Step 1: copy/download data from the original stream. |
2892 | 0 | mResourceID = aOriginal->mResourceID; |
2893 | 0 | mStreamLength = aOriginal->mStreamLength; |
2894 | 0 | mIsTransportSeekable = aOriginal->mIsTransportSeekable; |
2895 | 0 | mDownloadStatistics = aOriginal->mDownloadStatistics; |
2896 | 0 | mDownloadStatistics.Stop(); |
2897 | 0 |
|
2898 | 0 | // Grab cache blocks from aOriginal as readahead blocks for our stream |
2899 | 0 | for (uint32_t i = 0; i < aOriginal->mBlocks.Length(); ++i) { |
2900 | 0 | int32_t cacheBlockIndex = aOriginal->mBlocks[i]; |
2901 | 0 | if (cacheBlockIndex < 0) |
2902 | 0 | continue; |
2903 | 0 | |
2904 | 0 | while (i >= mBlocks.Length()) { |
2905 | 0 | mBlocks.AppendElement(-1); |
2906 | 0 | } |
2907 | 0 | // Every block is a readahead block for the clone because the clone's initial |
2908 | 0 | // stream offset is zero |
2909 | 0 | mMediaCache->AddBlockOwnerAsReadahead(lock, cacheBlockIndex, this, i); |
2910 | 0 | } |
2911 | 0 |
|
2912 | 0 | // Copy the partial block. |
2913 | 0 | mChannelOffset = aOriginal->mChannelOffset; |
2914 | 0 | memcpy(mPartialBlockBuffer.get(), |
2915 | 0 | aOriginal->mPartialBlockBuffer.get(), |
2916 | 0 | BLOCK_SIZE); |
2917 | 0 |
|
2918 | 0 | // Step 2: notify the client that we have new data so the decoder has a chance |
2919 | 0 | // to compute 'canplaythrough' and buffer ranges. |
2920 | 0 | mClient->CacheClientNotifyDataReceived(); |
2921 | 0 |
|
2922 | 0 | // Step 3: notify download ended if necessary. |
2923 | 0 | if (aOriginal->mDidNotifyDataEnded && |
2924 | 0 | NS_SUCCEEDED(aOriginal->mNotifyDataEndedStatus)) { |
2925 | 0 | mNotifyDataEndedStatus = aOriginal->mNotifyDataEndedStatus; |
2926 | 0 | mDidNotifyDataEnded = true; |
2927 | 0 | mClient->CacheClientNotifyDataEnded(mNotifyDataEndedStatus); |
2928 | 0 | } |
2929 | 0 |
|
2930 | 0 | // Step 4: notify download is suspended by the cache. |
2931 | 0 | mClientSuspended = true; |
2932 | 0 | mCacheSuspended = true; |
2933 | 0 | mChannelEnded = true; |
2934 | 0 | mClient->CacheClientSuspend(); |
2935 | 0 |
|
2936 | 0 | // Step 5: add the stream to be managed by the cache. |
2937 | 0 | mMediaCache->OpenStream(lock, this, true /* aIsClone */); |
2938 | 0 | // Wake up the reader which is waiting for the cloned data. |
2939 | 0 | lock.NotifyAll(); |
2940 | 0 | } |
2941 | | |
2942 | | nsIEventTarget* |
2943 | | MediaCacheStream::OwnerThread() const |
2944 | 0 | { |
2945 | 0 | return mMediaCache->OwnerThread(); |
2946 | 0 | } |
2947 | | |
2948 | | nsresult MediaCacheStream::GetCachedRanges(MediaByteRangeSet& aRanges) |
2949 | 0 | { |
2950 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2951 | 0 | // Take the monitor, so that the cached data ranges can't grow while we're |
2952 | 0 | // trying to loop over them. |
2953 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2954 | 0 |
|
2955 | 0 | // We must be pinned while running this, otherwise the cached data ranges may |
2956 | 0 | // shrink while we're trying to loop over them. |
2957 | 0 | NS_ASSERTION(mPinCount > 0, "Must be pinned"); |
2958 | 0 |
|
2959 | 0 | int64_t startOffset = GetNextCachedDataInternal(lock, 0); |
2960 | 0 | while (startOffset >= 0) { |
2961 | 0 | int64_t endOffset = GetCachedDataEndInternal(lock, startOffset); |
2962 | 0 | NS_ASSERTION(startOffset < endOffset, "Buffered range must end after its start"); |
2963 | 0 | // Bytes [startOffset..endOffset] are cached. |
2964 | 0 | aRanges += MediaByteRange(startOffset, endOffset); |
2965 | 0 | startOffset = GetNextCachedDataInternal(lock, endOffset); |
2966 | 0 | NS_ASSERTION(startOffset == -1 || startOffset > endOffset, |
2967 | 0 | "Must have advanced to start of next range, or hit end of stream"); |
2968 | 0 | } |
2969 | 0 | return NS_OK; |
2970 | 0 | } |
2971 | | |
2972 | | double |
2973 | | MediaCacheStream::GetDownloadRate(bool* aIsReliable) |
2974 | 0 | { |
2975 | 0 | MOZ_ASSERT(!NS_IsMainThread()); |
2976 | 0 | AutoLock lock(mMediaCache->Monitor()); |
2977 | 0 | return mDownloadStatistics.GetRate(aIsReliable); |
2978 | 0 | } |
2979 | | |
2980 | | nsCString |
2981 | | MediaCacheStream::GetDebugInfo() |
2982 | 0 | { |
2983 | 0 | AutoLock lock(mMediaCache->GetMonitorOnTheMainThread()); |
2984 | 0 | return nsPrintfCString("mStreamLength=%" PRId64 " mChannelOffset=%" PRId64 |
2985 | 0 | " mCacheSuspended=%d mChannelEnded=%d mLoadID=%u", |
2986 | 0 | mStreamLength, |
2987 | 0 | mChannelOffset, |
2988 | 0 | mCacheSuspended, |
2989 | 0 | mChannelEnded, |
2990 | 0 | mLoadID); |
2991 | 0 | } |
2992 | | |
2993 | | } // namespace mozilla |
2994 | | |
2995 | | // avoid redefined macro in unified build |
2996 | | #undef LOG |
2997 | | #undef LOGI |