/src/mozilla-central/dom/media/FileBlockCache.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
5 | | * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "FileBlockCache.h" |
8 | | #include "MediaCache.h" |
9 | | #include "VideoUtils.h" |
10 | | #include "prio.h" |
11 | | #include <algorithm> |
12 | | #include "nsAnonymousTemporaryFile.h" |
13 | | #include "nsIThreadManager.h" |
14 | | #include "mozilla/dom/ContentChild.h" |
15 | | #include "mozilla/StaticPrefs.h" |
16 | | #include "mozilla/SystemGroup.h" |
17 | | #include "nsXULAppAPI.h" |
18 | | |
19 | | namespace mozilla { |
20 | | |
21 | | #undef LOG |
22 | | LazyLogModule gFileBlockCacheLog("FileBlockCache"); |
23 | 0 | #define LOG(x, ...) MOZ_LOG(gFileBlockCacheLog, LogLevel::Debug, \ |
24 | 0 | ("%p " x, this, ##__VA_ARGS__)) |
25 | | |
26 | | static void |
27 | | CloseFD(PRFileDesc* aFD) |
28 | 0 | { |
29 | 0 | PRStatus prrc; |
30 | 0 | prrc = PR_Close(aFD); |
31 | 0 | if (prrc != PR_SUCCESS) { |
32 | 0 | NS_WARNING("PR_Close() failed."); |
33 | 0 | } |
34 | 0 | } |
35 | | |
36 | | void |
37 | | FileBlockCache::SetCacheFile(PRFileDesc* aFD) |
38 | 0 | { |
39 | 0 | LOG("SetFD(aFD=%p) mThread=%p", aFD, mThread.get()); |
40 | 0 |
|
41 | 0 | if (!aFD) { |
42 | 0 | // Failed to get a temporary file. Shutdown. |
43 | 0 | Close(); |
44 | 0 | return; |
45 | 0 | } |
46 | 0 | { |
47 | 0 | MutexAutoLock lock(mFileMutex); |
48 | 0 | mFD = aFD; |
49 | 0 | } |
50 | 0 | { |
51 | 0 | MutexAutoLock lock(mDataMutex); |
52 | 0 | if (mThread) { |
53 | 0 | // Still open, complete the initialization. |
54 | 0 | mInitialized = true; |
55 | 0 | if (mIsWriteScheduled) { |
56 | 0 | // A write was scheduled while waiting for FD. We need to run/dispatch a |
57 | 0 | // task to service the request. |
58 | 0 | nsCOMPtr<nsIRunnable> event = mozilla::NewRunnableMethod( |
59 | 0 | "FileBlockCache::SetCacheFile -> PerformBlockIOs", |
60 | 0 | this, |
61 | 0 | &FileBlockCache::PerformBlockIOs); |
62 | 0 | mThread->Dispatch(event.forget(), NS_DISPATCH_NORMAL); |
63 | 0 | } |
64 | 0 | return; |
65 | 0 | } |
66 | 0 | } |
67 | 0 | // We've been closed while waiting for the file descriptor. |
68 | 0 | // Close the file descriptor we've just received, if still there. |
69 | 0 | MutexAutoLock lock(mFileMutex); |
70 | 0 | if (mFD) { |
71 | 0 | CloseFD(mFD); |
72 | 0 | mFD = nullptr; |
73 | 0 | } |
74 | 0 | } |
75 | | |
76 | | nsresult |
77 | | FileBlockCache::Init() |
78 | 0 | { |
79 | 0 | LOG("Init()"); |
80 | 0 | MutexAutoLock mon(mDataMutex); |
81 | 0 | MOZ_ASSERT(!mThread); |
82 | 0 | nsresult rv = NS_NewNamedThread("FileBlockCache", |
83 | 0 | getter_AddRefs(mThread), |
84 | 0 | nullptr, |
85 | 0 | nsIThreadManager::kThreadPoolStackSize); |
86 | 0 | if (NS_FAILED(rv)) { |
87 | 0 | return rv; |
88 | 0 | } |
89 | 0 | |
90 | 0 | if (XRE_IsParentProcess()) { |
91 | 0 | RefPtr<FileBlockCache> self = this; |
92 | 0 | rv = mThread->Dispatch( |
93 | 0 | NS_NewRunnableFunction("FileBlockCache::Init", |
94 | 0 | [self] { |
95 | 0 | PRFileDesc* fd = nullptr; |
96 | 0 | nsresult rv = NS_OpenAnonymousTemporaryFile(&fd); |
97 | 0 | if (NS_SUCCEEDED(rv)) { |
98 | 0 | self->SetCacheFile(fd); |
99 | 0 | } else { |
100 | 0 | self->Close(); |
101 | 0 | } |
102 | 0 | }), |
103 | 0 | NS_DISPATCH_NORMAL); |
104 | 0 | } else { |
105 | 0 | // We must request a temporary file descriptor from the parent process. |
106 | 0 | RefPtr<FileBlockCache> self = this; |
107 | 0 | rv = dom::ContentChild::GetSingleton()->AsyncOpenAnonymousTemporaryFile( |
108 | 0 | [self](PRFileDesc* aFD) { self->SetCacheFile(aFD); }); |
109 | 0 | } |
110 | 0 |
|
111 | 0 | if (NS_FAILED(rv)) { |
112 | 0 | Close(); |
113 | 0 | } |
114 | 0 |
|
115 | 0 | return rv; |
116 | 0 | } |
117 | | |
118 | | void |
119 | | FileBlockCache::Flush() |
120 | 0 | { |
121 | 0 | LOG("Flush()"); |
122 | 0 | MutexAutoLock mon(mDataMutex); |
123 | 0 | MOZ_ASSERT(mThread); |
124 | 0 |
|
125 | 0 | // Dispatch a task so we won't clear the arrays while PerformBlockIOs() is |
126 | 0 | // dropping the data lock and cause InvalidArrayIndex. |
127 | 0 | RefPtr<FileBlockCache> self = this; |
128 | 0 | mThread->Dispatch(NS_NewRunnableFunction("FileBlockCache::Flush", [self]() { |
129 | 0 | MutexAutoLock mon(self->mDataMutex); |
130 | 0 | // Just discard pending changes, assume MediaCache won't read from |
131 | 0 | // blocks it hasn't written to. |
132 | 0 | self->mChangeIndexList.clear(); |
133 | 0 | self->mBlockChanges.Clear(); |
134 | 0 | })); |
135 | 0 | } |
136 | | |
137 | | int32_t |
138 | | FileBlockCache::GetMaxBlocks() const |
139 | 0 | { |
140 | 0 | // We look up the cache size every time. This means dynamic changes |
141 | 0 | // to the pref are applied. |
142 | 0 | const uint32_t cacheSizeKb = |
143 | 0 | std::min(StaticPrefs::MediaCacheSize(), uint32_t(INT32_MAX) * 2); |
144 | 0 | // Ensure we can divide BLOCK_SIZE by 1024. |
145 | 0 | static_assert(MediaCacheStream::BLOCK_SIZE % 1024 == 0, |
146 | 0 | "BLOCK_SIZE should be a multiple of 1024"); |
147 | 0 | // Ensure BLOCK_SIZE/1024 is at least 2. |
148 | 0 | static_assert(MediaCacheStream::BLOCK_SIZE / 1024 >= 2, |
149 | 0 | "BLOCK_SIZE / 1024 should be at least 2"); |
150 | 0 | // Ensure we can convert BLOCK_SIZE/1024 to a uint32_t without truncation. |
151 | 0 | static_assert(MediaCacheStream::BLOCK_SIZE / 1024 <= int64_t(UINT32_MAX), |
152 | 0 | "BLOCK_SIZE / 1024 should be at most UINT32_MAX"); |
153 | 0 | // Since BLOCK_SIZE is a strict multiple of 1024, |
154 | 0 | // cacheSizeKb * 1024 / BLOCK_SIZE == cacheSizeKb / (BLOCK_SIZE / 1024), |
155 | 0 | // but the latter formula avoids a potential overflow from `* 1024`. |
156 | 0 | // And because BLOCK_SIZE/1024 is at least 2, the maximum cache size |
157 | 0 | // INT32_MAX*2 will give a maxBlocks that can fit in an int32_t. |
158 | 0 | constexpr uint32_t blockSizeKb = |
159 | 0 | uint32_t(MediaCacheStream::BLOCK_SIZE / 1024); |
160 | 0 | const int32_t maxBlocks = int32_t(cacheSizeKb / blockSizeKb); |
161 | 0 | return std::max(maxBlocks, int32_t(1)); |
162 | 0 | } |
163 | | |
164 | | FileBlockCache::FileBlockCache() |
165 | | : mFileMutex("MediaCache.Writer.IO.Mutex") |
166 | | , mFD(nullptr) |
167 | | , mFDCurrentPos(0) |
168 | | , mDataMutex("MediaCache.Writer.Data.Mutex") |
169 | | , mIsWriteScheduled(false) |
170 | | , mIsReading(false) |
171 | 0 | { |
172 | 0 | } |
173 | | |
174 | | FileBlockCache::~FileBlockCache() |
175 | 0 | { |
176 | 0 | Close(); |
177 | 0 | } |
178 | | |
179 | | void |
180 | | FileBlockCache::Close() |
181 | 0 | { |
182 | 0 | LOG("Close()"); |
183 | 0 |
|
184 | 0 | nsCOMPtr<nsIThread> thread; |
185 | 0 | { |
186 | 0 | MutexAutoLock mon(mDataMutex); |
187 | 0 | if (!mThread) { |
188 | 0 | return; |
189 | 0 | } |
190 | 0 | thread.swap(mThread); |
191 | 0 | } |
192 | 0 |
|
193 | 0 | PRFileDesc* fd; |
194 | 0 | { |
195 | 0 | MutexAutoLock lock(mFileMutex); |
196 | 0 | fd = mFD; |
197 | 0 | mFD = nullptr; |
198 | 0 | } |
199 | 0 |
|
200 | 0 | // Let the thread close the FD, and then trigger its own shutdown. |
201 | 0 | // Note that mThread is now empty, so no other task will be posted there. |
202 | 0 | // Also mThread and mFD are empty and therefore can be reused immediately. |
203 | 0 | nsresult rv = thread->Dispatch( |
204 | 0 | NS_NewRunnableFunction("FileBlockCache::Close", |
205 | 0 | [thread, fd] { |
206 | 0 | if (fd) { |
207 | 0 | CloseFD(fd); |
208 | 0 | } |
209 | 0 | // We must shut down the thread in another |
210 | 0 | // runnable. This is called |
211 | 0 | // while we're shutting down the media cache, and |
212 | 0 | // nsIThread::Shutdown() |
213 | 0 | // can cause events to run before it completes, |
214 | 0 | // which could end up |
215 | 0 | // opening more streams, while the media cache is |
216 | 0 | // shutting down and |
217 | 0 | // releasing memory etc! |
218 | 0 | nsCOMPtr<nsIRunnable> event = |
219 | 0 | new ShutdownThreadEvent(thread); |
220 | 0 | SystemGroup::Dispatch(TaskCategory::Other, |
221 | 0 | event.forget()); |
222 | 0 | }), |
223 | 0 | NS_DISPATCH_NORMAL); |
224 | 0 | NS_ENSURE_SUCCESS_VOID(rv); |
225 | 0 | } |
226 | | |
227 | | template<typename Container, typename Value> |
228 | | bool |
229 | | ContainerContains(const Container& aContainer, const Value& value) |
230 | 0 | { |
231 | 0 | return std::find(aContainer.begin(), aContainer.end(), value) |
232 | 0 | != aContainer.end(); |
233 | 0 | } Unexecuted instantiation: bool mozilla::ContainerContains<std::__1::deque<int, std::__1::allocator<int> >, unsigned int>(std::__1::deque<int, std::__1::allocator<int> > const&, unsigned int const&) Unexecuted instantiation: bool mozilla::ContainerContains<std::__1::deque<int, std::__1::allocator<int> >, int>(std::__1::deque<int, std::__1::allocator<int> > const&, int const&) |
234 | | |
235 | | nsresult |
236 | | FileBlockCache::WriteBlock(uint32_t aBlockIndex, |
237 | | Span<const uint8_t> aData1, Span<const uint8_t> aData2) |
238 | 0 | { |
239 | 0 | MutexAutoLock mon(mDataMutex); |
240 | 0 |
|
241 | 0 | if (!mThread) { |
242 | 0 | return NS_ERROR_FAILURE; |
243 | 0 | } |
244 | 0 | |
245 | 0 | // Check if we've already got a pending write scheduled for this block. |
246 | 0 | mBlockChanges.EnsureLengthAtLeast(aBlockIndex + 1); |
247 | 0 | bool blockAlreadyHadPendingChange = mBlockChanges[aBlockIndex] != nullptr; |
248 | 0 | mBlockChanges[aBlockIndex] = new BlockChange(aData1, aData2); |
249 | 0 |
|
250 | 0 | if (!blockAlreadyHadPendingChange || !ContainerContains(mChangeIndexList, aBlockIndex)) { |
251 | 0 | // We either didn't already have a pending change for this block, or we |
252 | 0 | // did but we didn't have an entry for it in mChangeIndexList (we're in the process |
253 | 0 | // of writing it and have removed the block's index out of mChangeIndexList |
254 | 0 | // in Run() but not finished writing the block to file yet). Add the blocks |
255 | 0 | // index to the end of mChangeIndexList to ensure the block is written as |
256 | 0 | // as soon as possible. |
257 | 0 | mChangeIndexList.push_back(aBlockIndex); |
258 | 0 | } |
259 | 0 | NS_ASSERTION(ContainerContains(mChangeIndexList, aBlockIndex), "Must have entry for new block"); |
260 | 0 |
|
261 | 0 | EnsureWriteScheduled(); |
262 | 0 |
|
263 | 0 | return NS_OK; |
264 | 0 | } |
265 | | |
266 | | void FileBlockCache::EnsureWriteScheduled() |
267 | 0 | { |
268 | 0 | mDataMutex.AssertCurrentThreadOwns(); |
269 | 0 | MOZ_ASSERT(mThread); |
270 | 0 |
|
271 | 0 | if (mIsWriteScheduled || mIsReading) { |
272 | 0 | return; |
273 | 0 | } |
274 | 0 | mIsWriteScheduled = true; |
275 | 0 | if (!mInitialized) { |
276 | 0 | // We're still waiting on a file descriptor. When it arrives, |
277 | 0 | // the write will be scheduled. |
278 | 0 | return; |
279 | 0 | } |
280 | 0 | nsCOMPtr<nsIRunnable> event = mozilla::NewRunnableMethod( |
281 | 0 | "FileBlockCache::EnsureWriteScheduled -> PerformBlockIOs", |
282 | 0 | this, |
283 | 0 | &FileBlockCache::PerformBlockIOs); |
284 | 0 | mThread->Dispatch(event.forget(), NS_DISPATCH_NORMAL); |
285 | 0 | } |
286 | | |
287 | | nsresult FileBlockCache::Seek(int64_t aOffset) |
288 | 0 | { |
289 | 0 | mFileMutex.AssertCurrentThreadOwns(); |
290 | 0 |
|
291 | 0 | if (mFDCurrentPos != aOffset) { |
292 | 0 | MOZ_ASSERT(mFD); |
293 | 0 | int64_t result = PR_Seek64(mFD, aOffset, PR_SEEK_SET); |
294 | 0 | if (result != aOffset) { |
295 | 0 | NS_WARNING("Failed to seek media cache file"); |
296 | 0 | return NS_ERROR_FAILURE; |
297 | 0 | } |
298 | 0 | mFDCurrentPos = result; |
299 | 0 | } |
300 | 0 | return NS_OK; |
301 | 0 | } |
302 | | |
303 | | nsresult FileBlockCache::ReadFromFile(int64_t aOffset, |
304 | | uint8_t* aDest, |
305 | | int32_t aBytesToRead, |
306 | | int32_t& aBytesRead) |
307 | 0 | { |
308 | 0 | LOG("ReadFromFile(offset=%" PRIu64 ", len=%u)", aOffset, aBytesToRead); |
309 | 0 | mFileMutex.AssertCurrentThreadOwns(); |
310 | 0 | MOZ_ASSERT(mFD); |
311 | 0 |
|
312 | 0 | nsresult res = Seek(aOffset); |
313 | 0 | if (NS_FAILED(res)) return res; |
314 | 0 | |
315 | 0 | aBytesRead = PR_Read(mFD, aDest, aBytesToRead); |
316 | 0 | if (aBytesRead <= 0) |
317 | 0 | return NS_ERROR_FAILURE; |
318 | 0 | mFDCurrentPos += aBytesRead; |
319 | 0 |
|
320 | 0 | return NS_OK; |
321 | 0 | } |
322 | | |
323 | | nsresult FileBlockCache::WriteBlockToFile(int32_t aBlockIndex, |
324 | | const uint8_t* aBlockData) |
325 | 0 | { |
326 | 0 | LOG("WriteBlockToFile(index=%u)", aBlockIndex); |
327 | 0 |
|
328 | 0 | mFileMutex.AssertCurrentThreadOwns(); |
329 | 0 | MOZ_ASSERT(mFD); |
330 | 0 |
|
331 | 0 | nsresult rv = Seek(BlockIndexToOffset(aBlockIndex)); |
332 | 0 | if (NS_FAILED(rv)) return rv; |
333 | 0 | |
334 | 0 | int32_t amount = PR_Write(mFD, aBlockData, BLOCK_SIZE); |
335 | 0 | if (amount < BLOCK_SIZE) { |
336 | 0 | NS_WARNING("Failed to write media cache block!"); |
337 | 0 | return NS_ERROR_FAILURE; |
338 | 0 | } |
339 | 0 | mFDCurrentPos += BLOCK_SIZE; |
340 | 0 |
|
341 | 0 | return NS_OK; |
342 | 0 | } |
343 | | |
344 | | nsresult FileBlockCache::MoveBlockInFile(int32_t aSourceBlockIndex, |
345 | | int32_t aDestBlockIndex) |
346 | 0 | { |
347 | 0 | LOG("MoveBlockInFile(src=%u, dest=%u)", aSourceBlockIndex, aDestBlockIndex); |
348 | 0 |
|
349 | 0 | mFileMutex.AssertCurrentThreadOwns(); |
350 | 0 |
|
351 | 0 | uint8_t buf[BLOCK_SIZE]; |
352 | 0 | int32_t bytesRead = 0; |
353 | 0 | if (NS_FAILED(ReadFromFile(BlockIndexToOffset(aSourceBlockIndex), |
354 | 0 | buf, |
355 | 0 | BLOCK_SIZE, |
356 | 0 | bytesRead))) { |
357 | 0 | return NS_ERROR_FAILURE; |
358 | 0 | } |
359 | 0 | return WriteBlockToFile(aDestBlockIndex, buf); |
360 | 0 | } |
361 | | |
362 | | void |
363 | | FileBlockCache::PerformBlockIOs() |
364 | 0 | { |
365 | 0 | MOZ_ASSERT(mThread->IsOnCurrentThread()); |
366 | 0 | MutexAutoLock mon(mDataMutex); |
367 | 0 | NS_ASSERTION(mIsWriteScheduled, "Should report write running or scheduled."); |
368 | 0 |
|
369 | 0 | LOG("Run() mFD=%p mThread=%p", mFD, mThread.get()); |
370 | 0 |
|
371 | 0 | while (!mChangeIndexList.empty()) { |
372 | 0 | if (!mThread) { |
373 | 0 | // We've been closed, abort, discarding unwritten changes. |
374 | 0 | mIsWriteScheduled = false; |
375 | 0 | return; |
376 | 0 | } |
377 | 0 | |
378 | 0 | if (mIsReading) { |
379 | 0 | // We're trying to read; postpone all writes. (Reader will resume writes.) |
380 | 0 | mIsWriteScheduled = false; |
381 | 0 | return; |
382 | 0 | } |
383 | 0 | |
384 | 0 | // Process each pending change. We pop the index out of the change |
385 | 0 | // list, but leave the BlockChange in mBlockChanges until the change |
386 | 0 | // is written to file. This is so that any read which happens while |
387 | 0 | // we drop mDataMutex to write will refer to the data's source in |
388 | 0 | // memory, rather than the not-yet up to date data written to file. |
389 | 0 | // This also ensures we will insert a new index into mChangeIndexList |
390 | 0 | // when this happens. |
391 | 0 | |
392 | 0 | // Hold a reference to the change, in case another change |
393 | 0 | // overwrites the mBlockChanges entry for this block while we drop |
394 | 0 | // mDataMutex to take mFileMutex. |
395 | 0 | int32_t blockIndex = mChangeIndexList.front(); |
396 | 0 | RefPtr<BlockChange> change = mBlockChanges[blockIndex]; |
397 | 0 | MOZ_ASSERT(change, |
398 | 0 | "Change index list should only contain entries for blocks " |
399 | 0 | "with changes"); |
400 | 0 | { |
401 | 0 | MutexAutoUnlock unlock(mDataMutex); |
402 | 0 | MutexAutoLock lock(mFileMutex); |
403 | 0 | if (!mFD) { |
404 | 0 | // We may be here if mFD has been reset because we're closing, so we |
405 | 0 | // don't care anymore about writes. |
406 | 0 | return; |
407 | 0 | } |
408 | 0 | if (change->IsWrite()) { |
409 | 0 | WriteBlockToFile(blockIndex, change->mData.get()); |
410 | 0 | } else if (change->IsMove()) { |
411 | 0 | MoveBlockInFile(change->mSourceBlockIndex, blockIndex); |
412 | 0 | } |
413 | 0 | } |
414 | 0 | mChangeIndexList.pop_front(); |
415 | 0 | // If a new change has not been made to the block while we dropped |
416 | 0 | // mDataMutex, clear reference to the old change. Otherwise, the old |
417 | 0 | // reference has been cleared already. |
418 | 0 | if (mBlockChanges[blockIndex] == change) { |
419 | 0 | mBlockChanges[blockIndex] = nullptr; |
420 | 0 | } |
421 | 0 | } |
422 | 0 |
|
423 | 0 | mIsWriteScheduled = false; |
424 | 0 | } |
425 | | |
426 | | nsresult FileBlockCache::Read(int64_t aOffset, |
427 | | uint8_t* aData, |
428 | | int32_t aLength, |
429 | | int32_t* aBytes) |
430 | 0 | { |
431 | 0 | MutexAutoLock mon(mDataMutex); |
432 | 0 |
|
433 | 0 | if (!mThread || (aOffset / BLOCK_SIZE) > INT32_MAX) { |
434 | 0 | return NS_ERROR_FAILURE; |
435 | 0 | } |
436 | 0 | |
437 | 0 | mIsReading = true; |
438 | 0 | auto exitRead = MakeScopeExit([&] { |
439 | 0 | mIsReading = false; |
440 | 0 | if (!mChangeIndexList.empty()) { |
441 | 0 | // mReading has stopped or prevented pending writes, resume them. |
442 | 0 | EnsureWriteScheduled(); |
443 | 0 | } |
444 | 0 | }); |
445 | 0 |
|
446 | 0 | int32_t bytesToRead = aLength; |
447 | 0 | int64_t offset = aOffset; |
448 | 0 | uint8_t* dst = aData; |
449 | 0 | while (bytesToRead > 0) { |
450 | 0 | int32_t blockIndex = static_cast<int32_t>(offset / BLOCK_SIZE); |
451 | 0 | int32_t start = offset % BLOCK_SIZE; |
452 | 0 | int32_t amount = std::min(BLOCK_SIZE - start, bytesToRead); |
453 | 0 |
|
454 | 0 | // If the block is not yet written to file, we can just read from |
455 | 0 | // the memory buffer, otherwise we need to read from file. |
456 | 0 | int32_t bytesRead = 0; |
457 | 0 | MOZ_ASSERT(!mBlockChanges.IsEmpty()); |
458 | 0 | MOZ_ASSERT(blockIndex >= 0 && |
459 | 0 | static_cast<uint32_t>(blockIndex) < mBlockChanges.Length()); |
460 | 0 | RefPtr<BlockChange> change = mBlockChanges.SafeElementAt(blockIndex); |
461 | 0 | if (change && change->IsWrite()) { |
462 | 0 | // Block isn't yet written to file. Read from memory buffer. |
463 | 0 | const uint8_t* blockData = change->mData.get(); |
464 | 0 | memcpy(dst, blockData + start, amount); |
465 | 0 | bytesRead = amount; |
466 | 0 | } else { |
467 | 0 | if (change && change->IsMove()) { |
468 | 0 | // The target block is the destination of a not-yet-completed move |
469 | 0 | // action, so read from the move's source block from file. Note we |
470 | 0 | // *don't* follow a chain of moves here, as a move's source index |
471 | 0 | // is resolved when MoveBlock() is called, and the move's source's |
472 | 0 | // block could be have itself been subject to a move (or write) |
473 | 0 | // which happened *after* this move was recorded. |
474 | 0 | blockIndex = change->mSourceBlockIndex; |
475 | 0 | } |
476 | 0 | // Block has been written to file, either as the source block of a move, |
477 | 0 | // or as a stable (all changes made) block. Read the data directly |
478 | 0 | // from file. |
479 | 0 | nsresult res; |
480 | 0 | { |
481 | 0 | MutexAutoUnlock unlock(mDataMutex); |
482 | 0 | MutexAutoLock lock(mFileMutex); |
483 | 0 | if (!mFD) { |
484 | 0 | // Not initialized yet, or closed. |
485 | 0 | return NS_ERROR_FAILURE; |
486 | 0 | } |
487 | 0 | res = ReadFromFile(BlockIndexToOffset(blockIndex) + start, |
488 | 0 | dst, |
489 | 0 | amount, |
490 | 0 | bytesRead); |
491 | 0 | } |
492 | 0 | NS_ENSURE_SUCCESS(res,res); |
493 | 0 | } |
494 | 0 | dst += bytesRead; |
495 | 0 | offset += bytesRead; |
496 | 0 | bytesToRead -= bytesRead; |
497 | 0 | } |
498 | 0 | *aBytes = aLength - bytesToRead; |
499 | 0 | return NS_OK; |
500 | 0 | } |
501 | | |
502 | | nsresult FileBlockCache::MoveBlock(int32_t aSourceBlockIndex, int32_t aDestBlockIndex) |
503 | 0 | { |
504 | 0 | MutexAutoLock mon(mDataMutex); |
505 | 0 |
|
506 | 0 | if (!mThread) { |
507 | 0 | return NS_ERROR_FAILURE; |
508 | 0 | } |
509 | 0 | |
510 | 0 | mBlockChanges.EnsureLengthAtLeast(std::max(aSourceBlockIndex, aDestBlockIndex) + 1); |
511 | 0 |
|
512 | 0 | // The source block's contents may be the destination of another pending |
513 | 0 | // move, which in turn can be the destination of another pending move, |
514 | 0 | // etc. Resolve the final source block, so that if one of the blocks in |
515 | 0 | // the chain of moves is overwritten, we don't lose the reference to the |
516 | 0 | // contents of the destination block. |
517 | 0 | int32_t sourceIndex = aSourceBlockIndex; |
518 | 0 | BlockChange* sourceBlock = nullptr; |
519 | 0 | while ((sourceBlock = mBlockChanges[sourceIndex]) && |
520 | 0 | sourceBlock->IsMove()) { |
521 | 0 | sourceIndex = sourceBlock->mSourceBlockIndex; |
522 | 0 | } |
523 | 0 |
|
524 | 0 | if (mBlockChanges[aDestBlockIndex] == nullptr || |
525 | 0 | !ContainerContains(mChangeIndexList, aDestBlockIndex)) { |
526 | 0 | // Only add another entry to the change index list if we don't already |
527 | 0 | // have one for this block. We won't have an entry when either there's |
528 | 0 | // no pending change for this block, or if there is a pending change for |
529 | 0 | // this block and we're in the process of writing it (we've popped the |
530 | 0 | // block's index out of mChangeIndexList in Run() but not finished writing |
531 | 0 | // the block to file yet. |
532 | 0 | mChangeIndexList.push_back(aDestBlockIndex); |
533 | 0 | } |
534 | 0 |
|
535 | 0 | // If the source block hasn't yet been written to file then the dest block |
536 | 0 | // simply contains that same write. Resolve this as a write instead. |
537 | 0 | if (sourceBlock && sourceBlock->IsWrite()) { |
538 | 0 | mBlockChanges[aDestBlockIndex] = new BlockChange(sourceBlock->mData.get()); |
539 | 0 | } else { |
540 | 0 | mBlockChanges[aDestBlockIndex] = new BlockChange(sourceIndex); |
541 | 0 | } |
542 | 0 |
|
543 | 0 | EnsureWriteScheduled(); |
544 | 0 |
|
545 | 0 | NS_ASSERTION(ContainerContains(mChangeIndexList, aDestBlockIndex), |
546 | 0 | "Should have scheduled block for change"); |
547 | 0 |
|
548 | 0 | return NS_OK; |
549 | 0 | } |
550 | | |
551 | | } // End namespace mozilla. |
552 | | |
553 | | // avoid redefined macro in unified build |
554 | | #undef LOG |