/src/mozilla-central/netwerk/cache2/CacheFile.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
2 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
3 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
4 | | |
5 | | #include "CacheLog.h" |
6 | | #include "CacheFile.h" |
7 | | |
8 | | #include "CacheFileChunk.h" |
9 | | #include "CacheFileInputStream.h" |
10 | | #include "CacheFileOutputStream.h" |
11 | | #include "nsThreadUtils.h" |
12 | | #include "mozilla/DebugOnly.h" |
13 | | #include "mozilla/Move.h" |
14 | | #include <algorithm> |
15 | | #include "nsComponentManagerUtils.h" |
16 | | #include "nsProxyRelease.h" |
17 | | #include "mozilla/Telemetry.h" |
18 | | |
19 | | // When CACHE_CHUNKS is defined we always cache unused chunks in mCacheChunks. |
20 | | // When it is not defined, we always release the chunks ASAP, i.e. we cache |
21 | | // unused chunks only when: |
22 | | // - CacheFile is memory-only |
23 | | // - CacheFile is still waiting for the handle |
24 | | // - the chunk is preloaded |
25 | | |
26 | | //#define CACHE_CHUNKS |
27 | | |
28 | | namespace mozilla { |
29 | | namespace net { |
30 | | |
31 | | class NotifyCacheFileListenerEvent : public Runnable { |
32 | | public: |
33 | | NotifyCacheFileListenerEvent(CacheFileListener* aCallback, |
34 | | nsresult aResult, |
35 | | bool aIsNew) |
36 | | : Runnable("net::NotifyCacheFileListenerEvent") |
37 | | , mCallback(aCallback) |
38 | | , mRV(aResult) |
39 | | , mIsNew(aIsNew) |
40 | 0 | { |
41 | 0 | LOG(("NotifyCacheFileListenerEvent::NotifyCacheFileListenerEvent() " |
42 | 0 | "[this=%p]", this)); |
43 | 0 | } |
44 | | |
45 | | protected: |
46 | | ~NotifyCacheFileListenerEvent() |
47 | 0 | { |
48 | 0 | LOG(("NotifyCacheFileListenerEvent::~NotifyCacheFileListenerEvent() " |
49 | 0 | "[this=%p]", this)); |
50 | 0 | } |
51 | | |
52 | | public: |
53 | | NS_IMETHOD Run() override |
54 | 0 | { |
55 | 0 | LOG(("NotifyCacheFileListenerEvent::Run() [this=%p]", this)); |
56 | 0 |
|
57 | 0 | mCallback->OnFileReady(mRV, mIsNew); |
58 | 0 | return NS_OK; |
59 | 0 | } |
60 | | |
61 | | protected: |
62 | | nsCOMPtr<CacheFileListener> mCallback; |
63 | | nsresult mRV; |
64 | | bool mIsNew; |
65 | | }; |
66 | | |
67 | | class NotifyChunkListenerEvent : public Runnable { |
68 | | public: |
69 | | NotifyChunkListenerEvent(CacheFileChunkListener* aCallback, |
70 | | nsresult aResult, |
71 | | uint32_t aChunkIdx, |
72 | | CacheFileChunk* aChunk) |
73 | | : Runnable("net::NotifyChunkListenerEvent") |
74 | | , mCallback(aCallback) |
75 | | , mRV(aResult) |
76 | | , mChunkIdx(aChunkIdx) |
77 | | , mChunk(aChunk) |
78 | 0 | { |
79 | 0 | LOG(("NotifyChunkListenerEvent::NotifyChunkListenerEvent() [this=%p]", |
80 | 0 | this)); |
81 | 0 | } |
82 | | |
83 | | protected: |
84 | | ~NotifyChunkListenerEvent() |
85 | 0 | { |
86 | 0 | LOG(("NotifyChunkListenerEvent::~NotifyChunkListenerEvent() [this=%p]", |
87 | 0 | this)); |
88 | 0 | } |
89 | | |
90 | | public: |
91 | | NS_IMETHOD Run() override |
92 | 0 | { |
93 | 0 | LOG(("NotifyChunkListenerEvent::Run() [this=%p]", this)); |
94 | 0 |
|
95 | 0 | mCallback->OnChunkAvailable(mRV, mChunkIdx, mChunk); |
96 | 0 | return NS_OK; |
97 | 0 | } |
98 | | |
99 | | protected: |
100 | | nsCOMPtr<CacheFileChunkListener> mCallback; |
101 | | nsresult mRV; |
102 | | uint32_t mChunkIdx; |
103 | | RefPtr<CacheFileChunk> mChunk; |
104 | | }; |
105 | | |
106 | | |
107 | | class DoomFileHelper : public CacheFileIOListener |
108 | | { |
109 | | public: |
110 | | NS_DECL_THREADSAFE_ISUPPORTS |
111 | | |
112 | | explicit DoomFileHelper(CacheFileListener *aListener) |
113 | | : mListener(aListener) |
114 | 0 | { |
115 | 0 | } |
116 | | |
117 | | |
118 | | NS_IMETHOD OnFileOpened(CacheFileHandle *aHandle, nsresult aResult) override |
119 | 0 | { |
120 | 0 | MOZ_CRASH("DoomFileHelper::OnFileOpened should not be called!"); |
121 | 0 | return NS_ERROR_UNEXPECTED; |
122 | 0 | } |
123 | | |
124 | | NS_IMETHOD OnDataWritten(CacheFileHandle *aHandle, const char *aBuf, |
125 | | nsresult aResult) override |
126 | 0 | { |
127 | 0 | MOZ_CRASH("DoomFileHelper::OnDataWritten should not be called!"); |
128 | 0 | return NS_ERROR_UNEXPECTED; |
129 | 0 | } |
130 | | |
131 | | NS_IMETHOD OnDataRead(CacheFileHandle *aHandle, char *aBuf, nsresult aResult) override |
132 | 0 | { |
133 | 0 | MOZ_CRASH("DoomFileHelper::OnDataRead should not be called!"); |
134 | 0 | return NS_ERROR_UNEXPECTED; |
135 | 0 | } |
136 | | |
137 | | NS_IMETHOD OnFileDoomed(CacheFileHandle *aHandle, nsresult aResult) override |
138 | 0 | { |
139 | 0 | if (mListener) |
140 | 0 | mListener->OnFileDoomed(aResult); |
141 | 0 | return NS_OK; |
142 | 0 | } |
143 | | |
144 | | NS_IMETHOD OnEOFSet(CacheFileHandle *aHandle, nsresult aResult) override |
145 | 0 | { |
146 | 0 | MOZ_CRASH("DoomFileHelper::OnEOFSet should not be called!"); |
147 | 0 | return NS_ERROR_UNEXPECTED; |
148 | 0 | } |
149 | | |
150 | | NS_IMETHOD OnFileRenamed(CacheFileHandle *aHandle, nsresult aResult) override |
151 | 0 | { |
152 | 0 | MOZ_CRASH("DoomFileHelper::OnFileRenamed should not be called!"); |
153 | 0 | return NS_ERROR_UNEXPECTED; |
154 | 0 | } |
155 | | |
156 | | private: |
157 | 0 | virtual ~DoomFileHelper() = default; |
158 | | |
159 | | nsCOMPtr<CacheFileListener> mListener; |
160 | | }; |
161 | | |
162 | | NS_IMPL_ISUPPORTS(DoomFileHelper, CacheFileIOListener) |
163 | | |
164 | | |
165 | | NS_IMPL_ADDREF(CacheFile) |
166 | | NS_IMPL_RELEASE(CacheFile) |
167 | 0 | NS_INTERFACE_MAP_BEGIN(CacheFile) |
168 | 0 | NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileChunkListener) |
169 | 0 | NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileIOListener) |
170 | 0 | NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileMetadataListener) |
171 | 0 | NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, |
172 | 0 | mozilla::net::CacheFileChunkListener) |
173 | 0 | NS_INTERFACE_MAP_END |
174 | | |
175 | | CacheFile::CacheFile() |
176 | | : mLock("CacheFile.mLock") |
177 | | , mOpeningFile(false) |
178 | | , mReady(false) |
179 | | , mMemoryOnly(false) |
180 | | , mSkipSizeCheck(false) |
181 | | , mOpenAsMemoryOnly(false) |
182 | | , mPinned(false) |
183 | | , mPriority(false) |
184 | | , mDataAccessed(false) |
185 | | , mDataIsDirty(false) |
186 | | , mWritingMetadata(false) |
187 | | , mPreloadWithoutInputStreams(true) |
188 | | , mPreloadChunkCount(0) |
189 | | , mStatus(NS_OK) |
190 | | , mDataSize(-1) |
191 | | , mAltDataOffset(-1) |
192 | | , mKill(false) |
193 | | , mOutput(nullptr) |
194 | 0 | { |
195 | 0 | LOG(("CacheFile::CacheFile() [this=%p]", this)); |
196 | 0 | } |
197 | | |
198 | | CacheFile::~CacheFile() |
199 | 0 | { |
200 | 0 | LOG(("CacheFile::~CacheFile() [this=%p]", this)); |
201 | 0 |
|
202 | 0 | MutexAutoLock lock(mLock); |
203 | 0 | if (!mMemoryOnly && mReady && !mKill) { |
204 | 0 | // mReady flag indicates we have metadata plus in a valid state. |
205 | 0 | WriteMetadataIfNeededLocked(true); |
206 | 0 | } |
207 | 0 | } |
208 | | |
209 | | nsresult |
210 | | CacheFile::Init(const nsACString &aKey, |
211 | | bool aCreateNew, |
212 | | bool aMemoryOnly, |
213 | | bool aSkipSizeCheck, |
214 | | bool aPriority, |
215 | | bool aPinned, |
216 | | CacheFileListener *aCallback) |
217 | 0 | { |
218 | 0 | MOZ_ASSERT(!mListener); |
219 | 0 | MOZ_ASSERT(!mHandle); |
220 | 0 |
|
221 | 0 | MOZ_ASSERT(!(aMemoryOnly && aPinned)); |
222 | 0 |
|
223 | 0 | nsresult rv; |
224 | 0 |
|
225 | 0 | mKey = aKey; |
226 | 0 | mOpenAsMemoryOnly = mMemoryOnly = aMemoryOnly; |
227 | 0 | mSkipSizeCheck = aSkipSizeCheck; |
228 | 0 | mPriority = aPriority; |
229 | 0 | mPinned = aPinned; |
230 | 0 |
|
231 | 0 | // Some consumers (at least nsHTTPCompressConv) assume that Read() can read |
232 | 0 | // such amount of data that was announced by Available(). |
233 | 0 | // CacheFileInputStream::Available() uses also preloaded chunks to compute |
234 | 0 | // number of available bytes in the input stream, so we have to make sure the |
235 | 0 | // preloadChunkCount won't change during CacheFile's lifetime since otherwise |
236 | 0 | // we could potentially release some cached chunks that was used to calculate |
237 | 0 | // available bytes but would not be available later during call to |
238 | 0 | // CacheFileInputStream::Read(). |
239 | 0 | mPreloadChunkCount = CacheObserver::PreloadChunkCount(); |
240 | 0 |
|
241 | 0 | LOG(("CacheFile::Init() [this=%p, key=%s, createNew=%d, memoryOnly=%d, " |
242 | 0 | "priority=%d, listener=%p]", this, mKey.get(), aCreateNew, aMemoryOnly, |
243 | 0 | aPriority, aCallback)); |
244 | 0 |
|
245 | 0 | if (mMemoryOnly) { |
246 | 0 | MOZ_ASSERT(!aCallback); |
247 | 0 |
|
248 | 0 | mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, false, mKey); |
249 | 0 | mReady = true; |
250 | 0 | mDataSize = mMetadata->Offset(); |
251 | 0 | return NS_OK; |
252 | 0 | } |
253 | 0 | else { |
254 | 0 | uint32_t flags; |
255 | 0 | if (aCreateNew) { |
256 | 0 | MOZ_ASSERT(!aCallback); |
257 | 0 | flags = CacheFileIOManager::CREATE_NEW; |
258 | 0 |
|
259 | 0 | // make sure we can use this entry immediately |
260 | 0 | mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey); |
261 | 0 | mReady = true; |
262 | 0 | mDataSize = mMetadata->Offset(); |
263 | 0 | } else { |
264 | 0 | flags = CacheFileIOManager::CREATE; |
265 | 0 | } |
266 | 0 |
|
267 | 0 | if (mPriority) { |
268 | 0 | flags |= CacheFileIOManager::PRIORITY; |
269 | 0 | } |
270 | 0 |
|
271 | 0 | if (mPinned) { |
272 | 0 | flags |= CacheFileIOManager::PINNED; |
273 | 0 | } |
274 | 0 |
|
275 | 0 | mOpeningFile = true; |
276 | 0 | mListener = aCallback; |
277 | 0 | rv = CacheFileIOManager::OpenFile(mKey, flags, this); |
278 | 0 | if (NS_FAILED(rv)) { |
279 | 0 | mListener = nullptr; |
280 | 0 | mOpeningFile = false; |
281 | 0 |
|
282 | 0 | if (mPinned) { |
283 | 0 | LOG(("CacheFile::Init() - CacheFileIOManager::OpenFile() failed " |
284 | 0 | "but we want to pin, fail the file opening. [this=%p]", this)); |
285 | 0 | return NS_ERROR_NOT_AVAILABLE; |
286 | 0 | } |
287 | 0 |
|
288 | 0 | if (aCreateNew) { |
289 | 0 | NS_WARNING("Forcing memory-only entry since OpenFile failed"); |
290 | 0 | LOG(("CacheFile::Init() - CacheFileIOManager::OpenFile() failed " |
291 | 0 | "synchronously. We can continue in memory-only mode since " |
292 | 0 | "aCreateNew == true. [this=%p]", this)); |
293 | 0 |
|
294 | 0 | mMemoryOnly = true; |
295 | 0 | } |
296 | 0 | else if (rv == NS_ERROR_NOT_INITIALIZED) { |
297 | 0 | NS_WARNING("Forcing memory-only entry since CacheIOManager isn't " |
298 | 0 | "initialized."); |
299 | 0 | LOG(("CacheFile::Init() - CacheFileIOManager isn't initialized, " |
300 | 0 | "initializing entry as memory-only. [this=%p]", this)); |
301 | 0 |
|
302 | 0 | mMemoryOnly = true; |
303 | 0 | mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey); |
304 | 0 | mReady = true; |
305 | 0 | mDataSize = mMetadata->Offset(); |
306 | 0 |
|
307 | 0 | RefPtr<NotifyCacheFileListenerEvent> ev; |
308 | 0 | ev = new NotifyCacheFileListenerEvent(aCallback, NS_OK, true); |
309 | 0 | rv = NS_DispatchToCurrentThread(ev); |
310 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
311 | 0 | } |
312 | 0 | else { |
313 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
314 | 0 | } |
315 | 0 | } |
316 | 0 | } |
317 | 0 |
|
318 | 0 | return NS_OK; |
319 | 0 | } |
320 | | |
321 | | nsresult |
322 | | CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk *aChunk) |
323 | 0 | { |
324 | 0 | CacheFileAutoLock lock(this); |
325 | 0 |
|
326 | 0 | nsresult rv; |
327 | 0 |
|
328 | 0 | uint32_t index = aChunk->Index(); |
329 | 0 |
|
330 | 0 | LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08" PRIx32 ", chunk=%p, idx=%u]", |
331 | 0 | this, static_cast<uint32_t>(aResult), aChunk, index)); |
332 | 0 |
|
333 | 0 | if (aChunk->mDiscardedChunk) { |
334 | 0 | // We discard only unused chunks, so it must be still unused when reading |
335 | 0 | // data finishes. |
336 | 0 | MOZ_ASSERT(aChunk->mRefCnt == 2); |
337 | 0 | aChunk->mActiveChunk = false; |
338 | 0 | ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(aChunk->mFile.forget()).forget()); |
339 | 0 |
|
340 | 0 | DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk); |
341 | 0 | MOZ_ASSERT(removed); |
342 | 0 | return NS_OK; |
343 | 0 | } |
344 | 0 |
|
345 | 0 | if (NS_FAILED(aResult)) { |
346 | 0 | SetError(aResult); |
347 | 0 | } |
348 | 0 |
|
349 | 0 | if (HaveChunkListeners(index)) { |
350 | 0 | rv = NotifyChunkListeners(index, aResult, aChunk); |
351 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
352 | 0 | } |
353 | 0 |
|
354 | 0 | return NS_OK; |
355 | 0 | } |
356 | | |
357 | | nsresult |
358 | | CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk *aChunk) |
359 | 0 | { |
360 | 0 | // In case the chunk was reused, made dirty and released between calls to |
361 | 0 | // CacheFileChunk::Write() and CacheFile::OnChunkWritten(), we must write |
362 | 0 | // the chunk to the disk again. When the chunk is unused and is dirty simply |
363 | 0 | // addref and release (outside the lock) the chunk which ensures that |
364 | 0 | // CacheFile::DeactivateChunk() will be called again. |
365 | 0 | RefPtr<CacheFileChunk> deactivateChunkAgain; |
366 | 0 |
|
367 | 0 | CacheFileAutoLock lock(this); |
368 | 0 |
|
369 | 0 | nsresult rv; |
370 | 0 |
|
371 | 0 | LOG(("CacheFile::OnChunkWritten() [this=%p, rv=0x%08" PRIx32 ", chunk=%p, idx=%u]", |
372 | 0 | this, static_cast<uint32_t>(aResult), aChunk, aChunk->Index())); |
373 | 0 |
|
374 | 0 | MOZ_ASSERT(!mMemoryOnly); |
375 | 0 | MOZ_ASSERT(!mOpeningFile); |
376 | 0 | MOZ_ASSERT(mHandle); |
377 | 0 |
|
378 | 0 | if (aChunk->mDiscardedChunk) { |
379 | 0 | // We discard only unused chunks, so it must be still unused when writing |
380 | 0 | // data finishes. |
381 | 0 | MOZ_ASSERT(aChunk->mRefCnt == 2); |
382 | 0 | aChunk->mActiveChunk = false; |
383 | 0 | ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(aChunk->mFile.forget()).forget()); |
384 | 0 |
|
385 | 0 | DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk); |
386 | 0 | MOZ_ASSERT(removed); |
387 | 0 | return NS_OK; |
388 | 0 | } |
389 | 0 |
|
390 | 0 | if (NS_FAILED(aResult)) { |
391 | 0 | SetError(aResult); |
392 | 0 | } |
393 | 0 |
|
394 | 0 | if (NS_SUCCEEDED(aResult) && !aChunk->IsDirty()) { |
395 | 0 | // update hash value in metadata |
396 | 0 | mMetadata->SetHash(aChunk->Index(), aChunk->Hash()); |
397 | 0 | } |
398 | 0 |
|
399 | 0 | // notify listeners if there is any |
400 | 0 | if (HaveChunkListeners(aChunk->Index())) { |
401 | 0 | // don't release the chunk since there are some listeners queued |
402 | 0 | rv = NotifyChunkListeners(aChunk->Index(), aResult, aChunk); |
403 | 0 | if (NS_SUCCEEDED(rv)) { |
404 | 0 | MOZ_ASSERT(aChunk->mRefCnt != 2); |
405 | 0 | return NS_OK; |
406 | 0 | } |
407 | 0 | } |
408 | 0 |
|
409 | 0 | if (aChunk->mRefCnt != 2) { |
410 | 0 | LOG(("CacheFile::OnChunkWritten() - Chunk is still used [this=%p, chunk=%p," |
411 | 0 | " refcnt=%" PRIuPTR "]", this, aChunk, aChunk->mRefCnt.get())); |
412 | 0 |
|
413 | 0 | return NS_OK; |
414 | 0 | } |
415 | 0 |
|
416 | 0 | if (aChunk->IsDirty()) { |
417 | 0 | LOG(("CacheFile::OnChunkWritten() - Unused chunk is dirty. We must go " |
418 | 0 | "through deactivation again. [this=%p, chunk=%p]", this, aChunk)); |
419 | 0 |
|
420 | 0 | deactivateChunkAgain = aChunk; |
421 | 0 | return NS_OK; |
422 | 0 | } |
423 | 0 |
|
424 | 0 | bool keepChunk = false; |
425 | 0 | if (NS_SUCCEEDED(aResult)) { |
426 | 0 | keepChunk = ShouldCacheChunk(aChunk->Index()); |
427 | 0 | LOG(("CacheFile::OnChunkWritten() - %s unused chunk [this=%p, chunk=%p]", |
428 | 0 | keepChunk ? "Caching" : "Releasing", this, aChunk)); |
429 | 0 | } else { |
430 | 0 | LOG(("CacheFile::OnChunkWritten() - Releasing failed chunk [this=%p, " |
431 | 0 | "chunk=%p]", this, aChunk)); |
432 | 0 | } |
433 | 0 |
|
434 | 0 | RemoveChunkInternal(aChunk, keepChunk); |
435 | 0 |
|
436 | 0 | WriteMetadataIfNeededLocked(); |
437 | 0 |
|
438 | 0 | return NS_OK; |
439 | 0 | } |
440 | | |
441 | | nsresult |
442 | | CacheFile::OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx, |
443 | | CacheFileChunk *aChunk) |
444 | 0 | { |
445 | 0 | MOZ_CRASH("CacheFile::OnChunkAvailable should not be called!"); |
446 | 0 | return NS_ERROR_UNEXPECTED; |
447 | 0 | } |
448 | | |
449 | | nsresult |
450 | | CacheFile::OnChunkUpdated(CacheFileChunk *aChunk) |
451 | 0 | { |
452 | 0 | MOZ_CRASH("CacheFile::OnChunkUpdated should not be called!"); |
453 | 0 | return NS_ERROR_UNEXPECTED; |
454 | 0 | } |
455 | | |
456 | | nsresult |
457 | | CacheFile::OnFileOpened(CacheFileHandle *aHandle, nsresult aResult) |
458 | 0 | { |
459 | 0 | nsresult rv; |
460 | 0 |
|
461 | 0 | // Using an 'auto' class to perform doom or fail the listener |
462 | 0 | // outside the CacheFile's lock. |
463 | 0 | class AutoFailDoomListener |
464 | 0 | { |
465 | 0 | public: |
466 | 0 | explicit AutoFailDoomListener(CacheFileHandle *aHandle) |
467 | 0 | : mHandle(aHandle) |
468 | 0 | , mAlreadyDoomed(false) |
469 | 0 | {} |
470 | 0 | ~AutoFailDoomListener() |
471 | 0 | { |
472 | 0 | if (!mListener) |
473 | 0 | return; |
474 | 0 | |
475 | 0 | if (mHandle) { |
476 | 0 | if (mAlreadyDoomed) { |
477 | 0 | mListener->OnFileDoomed(mHandle, NS_OK); |
478 | 0 | } else { |
479 | 0 | CacheFileIOManager::DoomFile(mHandle, mListener); |
480 | 0 | } |
481 | 0 | } else { |
482 | 0 | mListener->OnFileDoomed(nullptr, NS_ERROR_NOT_AVAILABLE); |
483 | 0 | } |
484 | 0 | } |
485 | 0 |
|
486 | 0 | CacheFileHandle* mHandle; |
487 | 0 | nsCOMPtr<CacheFileIOListener> mListener; |
488 | 0 | bool mAlreadyDoomed; |
489 | 0 | } autoDoom(aHandle); |
490 | 0 |
|
491 | 0 | nsCOMPtr<CacheFileListener> listener; |
492 | 0 | bool isNew = false; |
493 | 0 | nsresult retval = NS_OK; |
494 | 0 |
|
495 | 0 | { |
496 | 0 | CacheFileAutoLock lock(this); |
497 | 0 |
|
498 | 0 | MOZ_ASSERT(mOpeningFile); |
499 | 0 | MOZ_ASSERT((NS_SUCCEEDED(aResult) && aHandle) || |
500 | 0 | (NS_FAILED(aResult) && !aHandle)); |
501 | 0 | MOZ_ASSERT((mListener && !mMetadata) || // !createNew |
502 | 0 | (!mListener && mMetadata)); // createNew |
503 | 0 | MOZ_ASSERT(!mMemoryOnly || mMetadata); // memory-only was set on new entry |
504 | 0 |
|
505 | 0 | LOG(("CacheFile::OnFileOpened() [this=%p, rv=0x%08" PRIx32 ", handle=%p]", |
506 | 0 | this, static_cast<uint32_t>(aResult), aHandle)); |
507 | 0 |
|
508 | 0 | mOpeningFile = false; |
509 | 0 |
|
510 | 0 | autoDoom.mListener.swap(mDoomAfterOpenListener); |
511 | 0 |
|
512 | 0 | if (mMemoryOnly) { |
513 | 0 | // We can be here only in case the entry was initilized as createNew and |
514 | 0 | // SetMemoryOnly() was called. |
515 | 0 |
|
516 | 0 | // Just don't store the handle into mHandle and exit |
517 | 0 | autoDoom.mAlreadyDoomed = true; |
518 | 0 | return NS_OK; |
519 | 0 | } |
520 | 0 | |
521 | 0 | if (NS_FAILED(aResult)) { |
522 | 0 | if (mMetadata) { |
523 | 0 | // This entry was initialized as createNew, just switch to memory-only |
524 | 0 | // mode. |
525 | 0 | NS_WARNING("Forcing memory-only entry since OpenFile failed"); |
526 | 0 | LOG(("CacheFile::OnFileOpened() - CacheFileIOManager::OpenFile() " |
527 | 0 | "failed asynchronously. We can continue in memory-only mode since " |
528 | 0 | "aCreateNew == true. [this=%p]", this)); |
529 | 0 |
|
530 | 0 | mMemoryOnly = true; |
531 | 0 | return NS_OK; |
532 | 0 | } |
533 | 0 |
|
534 | 0 | if (aResult == NS_ERROR_FILE_INVALID_PATH) { |
535 | 0 | // CacheFileIOManager doesn't have mCacheDirectory, switch to |
536 | 0 | // memory-only mode. |
537 | 0 | NS_WARNING("Forcing memory-only entry since CacheFileIOManager doesn't " |
538 | 0 | "have mCacheDirectory."); |
539 | 0 | LOG(("CacheFile::OnFileOpened() - CacheFileIOManager doesn't have " |
540 | 0 | "mCacheDirectory, initializing entry as memory-only. [this=%p]", |
541 | 0 | this)); |
542 | 0 |
|
543 | 0 | mMemoryOnly = true; |
544 | 0 | mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey); |
545 | 0 | mReady = true; |
546 | 0 | mDataSize = mMetadata->Offset(); |
547 | 0 |
|
548 | 0 | isNew = true; |
549 | 0 | retval = NS_OK; |
550 | 0 | } else { |
551 | 0 | // CacheFileIOManager::OpenFile() failed for another reason. |
552 | 0 | isNew = false; |
553 | 0 | retval = aResult; |
554 | 0 | } |
555 | 0 |
|
556 | 0 | mListener.swap(listener); |
557 | 0 | } else { |
558 | 0 | mHandle = aHandle; |
559 | 0 | if (NS_FAILED(mStatus)) { |
560 | 0 | CacheFileIOManager::DoomFile(mHandle, nullptr); |
561 | 0 | } |
562 | 0 |
|
563 | 0 | if (mMetadata) { |
564 | 0 | InitIndexEntry(); |
565 | 0 |
|
566 | 0 | // The entry was initialized as createNew, don't try to read metadata. |
567 | 0 | mMetadata->SetHandle(mHandle); |
568 | 0 |
|
569 | 0 | // Write all cached chunks, otherwise they may stay unwritten. |
570 | 0 | for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) { |
571 | 0 | uint32_t idx = iter.Key(); |
572 | 0 | const RefPtr<CacheFileChunk>& chunk = iter.Data(); |
573 | 0 |
|
574 | 0 | LOG(("CacheFile::OnFileOpened() - write [this=%p, idx=%u, chunk=%p]", |
575 | 0 | this, idx, chunk.get())); |
576 | 0 |
|
577 | 0 | mChunks.Put(idx, chunk); |
578 | 0 | chunk->mFile = this; |
579 | 0 | chunk->mActiveChunk = true; |
580 | 0 |
|
581 | 0 | MOZ_ASSERT(chunk->IsReady()); |
582 | 0 |
|
583 | 0 | // This would be cleaner if we had an nsRefPtr constructor that took |
584 | 0 | // a RefPtr<Derived>. |
585 | 0 | ReleaseOutsideLock(RefPtr<nsISupports>(chunk)); |
586 | 0 |
|
587 | 0 | iter.Remove(); |
588 | 0 | } |
589 | 0 |
|
590 | 0 | return NS_OK; |
591 | 0 | } |
592 | 0 | } |
593 | 0 | } |
594 | 0 |
|
595 | 0 | if (listener) { |
596 | 0 | listener->OnFileReady(retval, isNew); |
597 | 0 | return NS_OK; |
598 | 0 | } |
599 | 0 | |
600 | 0 | MOZ_ASSERT(NS_SUCCEEDED(aResult)); |
601 | 0 | MOZ_ASSERT(!mMetadata); |
602 | 0 | MOZ_ASSERT(mListener); |
603 | 0 |
|
604 | 0 | mMetadata = new CacheFileMetadata(mHandle, mKey); |
605 | 0 |
|
606 | 0 | rv = mMetadata->ReadMetadata(this); |
607 | 0 | if (NS_FAILED(rv)) { |
608 | 0 | mListener.swap(listener); |
609 | 0 | listener->OnFileReady(rv, false); |
610 | 0 | } |
611 | 0 |
|
612 | 0 | return NS_OK; |
613 | 0 | } |
614 | | |
615 | | nsresult |
616 | | CacheFile::OnDataWritten(CacheFileHandle *aHandle, const char *aBuf, |
617 | | nsresult aResult) |
618 | 0 | { |
619 | 0 | MOZ_CRASH("CacheFile::OnDataWritten should not be called!"); |
620 | 0 | return NS_ERROR_UNEXPECTED; |
621 | 0 | } |
622 | | |
623 | | nsresult |
624 | | CacheFile::OnDataRead(CacheFileHandle *aHandle, char *aBuf, nsresult aResult) |
625 | 0 | { |
626 | 0 | MOZ_CRASH("CacheFile::OnDataRead should not be called!"); |
627 | 0 | return NS_ERROR_UNEXPECTED; |
628 | 0 | } |
629 | | |
630 | | nsresult |
631 | | CacheFile::OnMetadataRead(nsresult aResult) |
632 | 0 | { |
633 | 0 | MOZ_ASSERT(mListener); |
634 | 0 |
|
635 | 0 | LOG(("CacheFile::OnMetadataRead() [this=%p, rv=0x%08" PRIx32 "]", |
636 | 0 | this, static_cast<uint32_t>(aResult))); |
637 | 0 |
|
638 | 0 | bool isNew = false; |
639 | 0 | if (NS_SUCCEEDED(aResult)) { |
640 | 0 | mPinned = mMetadata->Pinned(); |
641 | 0 | mReady = true; |
642 | 0 | mDataSize = mMetadata->Offset(); |
643 | 0 | if (mDataSize == 0 && mMetadata->ElementsSize() == 0) { |
644 | 0 | isNew = true; |
645 | 0 | mMetadata->MarkDirty(); |
646 | 0 | } else { |
647 | 0 | const char *altData = mMetadata->GetElement(CacheFileUtils::kAltDataKey); |
648 | 0 | if (altData && |
649 | 0 | (NS_FAILED(CacheFileUtils::ParseAlternativeDataInfo( |
650 | 0 | altData, &mAltDataOffset, nullptr)) || |
651 | 0 | (mAltDataOffset > mDataSize))) { |
652 | 0 | // alt-metadata cannot be parsed or alt-data offset is invalid |
653 | 0 | mMetadata->InitEmptyMetadata(); |
654 | 0 | isNew = true; |
655 | 0 | mAltDataOffset = -1; |
656 | 0 | mDataSize = 0; |
657 | 0 | } else { |
658 | 0 | CacheFileAutoLock lock(this); |
659 | 0 | PreloadChunks(0); |
660 | 0 | } |
661 | 0 | } |
662 | 0 |
|
663 | 0 | InitIndexEntry(); |
664 | 0 | } |
665 | 0 |
|
666 | 0 | nsCOMPtr<CacheFileListener> listener; |
667 | 0 | mListener.swap(listener); |
668 | 0 | listener->OnFileReady(aResult, isNew); |
669 | 0 | return NS_OK; |
670 | 0 | } |
671 | | |
672 | | nsresult |
673 | | CacheFile::OnMetadataWritten(nsresult aResult) |
674 | 0 | { |
675 | 0 | CacheFileAutoLock lock(this); |
676 | 0 |
|
677 | 0 | LOG(("CacheFile::OnMetadataWritten() [this=%p, rv=0x%08" PRIx32 "]", |
678 | 0 | this, static_cast<uint32_t>(aResult))); |
679 | 0 |
|
680 | 0 | MOZ_ASSERT(mWritingMetadata); |
681 | 0 | mWritingMetadata = false; |
682 | 0 |
|
683 | 0 | MOZ_ASSERT(!mMemoryOnly); |
684 | 0 | MOZ_ASSERT(!mOpeningFile); |
685 | 0 |
|
686 | 0 | if (NS_WARN_IF(NS_FAILED(aResult))) { |
687 | 0 | // TODO close streams with an error ??? |
688 | 0 | SetError(aResult); |
689 | 0 | } |
690 | 0 |
|
691 | 0 | if (mOutput || mInputs.Length() || mChunks.Count()) |
692 | 0 | return NS_OK; |
693 | 0 | |
694 | 0 | if (IsDirty()) |
695 | 0 | WriteMetadataIfNeededLocked(); |
696 | 0 |
|
697 | 0 | if (!mWritingMetadata) { |
698 | 0 | LOG(("CacheFile::OnMetadataWritten() - Releasing file handle [this=%p]", |
699 | 0 | this)); |
700 | 0 | CacheFileIOManager::ReleaseNSPRHandle(mHandle); |
701 | 0 | } |
702 | 0 |
|
703 | 0 | return NS_OK; |
704 | 0 | } |
705 | | |
706 | | nsresult |
707 | | CacheFile::OnFileDoomed(CacheFileHandle *aHandle, nsresult aResult) |
708 | 0 | { |
709 | 0 | nsCOMPtr<CacheFileListener> listener; |
710 | 0 |
|
711 | 0 | { |
712 | 0 | CacheFileAutoLock lock(this); |
713 | 0 |
|
714 | 0 | MOZ_ASSERT(mListener); |
715 | 0 |
|
716 | 0 | LOG(("CacheFile::OnFileDoomed() [this=%p, rv=0x%08" PRIx32 ", handle=%p]", |
717 | 0 | this, static_cast<uint32_t>(aResult), aHandle)); |
718 | 0 |
|
719 | 0 | mListener.swap(listener); |
720 | 0 | } |
721 | 0 |
|
722 | 0 | listener->OnFileDoomed(aResult); |
723 | 0 | return NS_OK; |
724 | 0 | } |
725 | | |
726 | | nsresult |
727 | | CacheFile::OnEOFSet(CacheFileHandle *aHandle, nsresult aResult) |
728 | 0 | { |
729 | 0 | MOZ_CRASH("CacheFile::OnEOFSet should not be called!"); |
730 | 0 | return NS_ERROR_UNEXPECTED; |
731 | 0 | } |
732 | | |
733 | | nsresult |
734 | | CacheFile::OnFileRenamed(CacheFileHandle *aHandle, nsresult aResult) |
735 | 0 | { |
736 | 0 | MOZ_CRASH("CacheFile::OnFileRenamed should not be called!"); |
737 | 0 | return NS_ERROR_UNEXPECTED; |
738 | 0 | } |
739 | | |
740 | | bool CacheFile::IsKilled() |
741 | 0 | { |
742 | 0 | bool killed = mKill; |
743 | 0 | if (killed) { |
744 | 0 | LOG(("CacheFile is killed, this=%p", this)); |
745 | 0 | } |
746 | 0 |
|
747 | 0 | return killed; |
748 | 0 | } |
749 | | |
750 | | nsresult |
751 | | CacheFile::OpenInputStream(nsICacheEntry *aEntryHandle, nsIInputStream **_retval) |
752 | 0 | { |
753 | 0 | CacheFileAutoLock lock(this); |
754 | 0 |
|
755 | 0 | MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); |
756 | 0 |
|
757 | 0 | if (!mReady) { |
758 | 0 | LOG(("CacheFile::OpenInputStream() - CacheFile is not ready [this=%p]", |
759 | 0 | this)); |
760 | 0 |
|
761 | 0 | return NS_ERROR_NOT_AVAILABLE; |
762 | 0 | } |
763 | 0 |
|
764 | 0 | if (NS_FAILED(mStatus)) { |
765 | 0 | LOG(("CacheFile::OpenInputStream() - CacheFile is in a failure state " |
766 | 0 | "[this=%p, status=0x%08" PRIx32 "]", this, static_cast<uint32_t>(mStatus))); |
767 | 0 |
|
768 | 0 | // Don't allow opening the input stream when this CacheFile is in |
769 | 0 | // a failed state. This is the only way to protect consumers correctly |
770 | 0 | // from reading a broken entry. When the file is in the failed state, |
771 | 0 | // it's also doomed, so reopening the entry won't make any difference - |
772 | 0 | // data will still be inaccessible anymore. Note that for just doomed |
773 | 0 | // files, we must allow reading the data. |
774 | 0 | return mStatus; |
775 | 0 | } |
776 | 0 |
|
777 | 0 | // Once we open input stream we no longer allow preloading of chunks without |
778 | 0 | // input stream, i.e. we will no longer keep first few chunks preloaded when |
779 | 0 | // the last input stream is closed. |
780 | 0 | mPreloadWithoutInputStreams = false; |
781 | 0 |
|
782 | 0 | CacheFileInputStream *input = new CacheFileInputStream(this, aEntryHandle, |
783 | 0 | false); |
784 | 0 | LOG(("CacheFile::OpenInputStream() - Creating new input stream %p [this=%p]", |
785 | 0 | input, this)); |
786 | 0 |
|
787 | 0 | mInputs.AppendElement(input); |
788 | 0 | NS_ADDREF(input); |
789 | 0 |
|
790 | 0 | mDataAccessed = true; |
791 | 0 | NS_ADDREF(*_retval = input); |
792 | 0 | return NS_OK; |
793 | 0 | } |
794 | | |
795 | | nsresult |
796 | | CacheFile::OpenAlternativeInputStream(nsICacheEntry *aEntryHandle, |
797 | | const char *aAltDataType, |
798 | | nsIInputStream **_retval) |
799 | 0 | { |
800 | 0 | CacheFileAutoLock lock(this); |
801 | 0 |
|
802 | 0 | MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); |
803 | 0 |
|
804 | 0 | nsresult rv; |
805 | 0 |
|
806 | 0 | if (NS_WARN_IF(!mReady)) { |
807 | 0 | LOG(("CacheFile::OpenAlternativeInputStream() - CacheFile is not ready " |
808 | 0 | "[this=%p]", this)); |
809 | 0 | return NS_ERROR_NOT_AVAILABLE; |
810 | 0 | } |
811 | 0 |
|
812 | 0 | if (mAltDataOffset == -1) { |
813 | 0 | LOG(("CacheFile::OpenAlternativeInputStream() - Alternative data is not " |
814 | 0 | "available [this=%p]", this)); |
815 | 0 | return NS_ERROR_NOT_AVAILABLE; |
816 | 0 | } |
817 | 0 |
|
818 | 0 | if (NS_FAILED(mStatus)) { |
819 | 0 | LOG(("CacheFile::OpenAlternativeInputStream() - CacheFile is in a failure " |
820 | 0 | "state [this=%p, status=0x%08" PRIx32 "]", this, static_cast<uint32_t>(mStatus))); |
821 | 0 |
|
822 | 0 | // Don't allow opening the input stream when this CacheFile is in |
823 | 0 | // a failed state. This is the only way to protect consumers correctly |
824 | 0 | // from reading a broken entry. When the file is in the failed state, |
825 | 0 | // it's also doomed, so reopening the entry won't make any difference - |
826 | 0 | // data will still be inaccessible anymore. Note that for just doomed |
827 | 0 | // files, we must allow reading the data. |
828 | 0 | return mStatus; |
829 | 0 | } |
830 | 0 |
|
831 | 0 | const char *altData = mMetadata->GetElement(CacheFileUtils::kAltDataKey); |
832 | 0 | MOZ_ASSERT(altData, "alt-metadata should exist but was not found!"); |
833 | 0 | if (NS_WARN_IF(!altData)) { |
834 | 0 | LOG(("CacheFile::OpenAlternativeInputStream() - alt-metadata not found but " |
835 | 0 | "alt-data exists according to mAltDataOffset! [this=%p, ]", this)); |
836 | 0 | return NS_ERROR_NOT_AVAILABLE; |
837 | 0 | } |
838 | 0 |
|
839 | 0 | int64_t offset; |
840 | 0 | nsCString availableAltData; |
841 | 0 | rv = CacheFileUtils::ParseAlternativeDataInfo(altData, &offset, |
842 | 0 | &availableAltData); |
843 | 0 | if (NS_WARN_IF(NS_FAILED(rv))) { |
844 | 0 | MOZ_ASSERT(false, "alt-metadata unexpectedly failed to parse"); |
845 | 0 | LOG(("CacheFile::OpenAlternativeInputStream() - Cannot parse alternative " |
846 | 0 | "metadata! [this=%p]", this)); |
847 | 0 | return rv; |
848 | 0 | } |
849 | 0 |
|
850 | 0 | if (availableAltData != aAltDataType) { |
851 | 0 | LOG(("CacheFile::OpenAlternativeInputStream() - Alternative data is of a " |
852 | 0 | "different type than requested [this=%p, availableType=%s, " |
853 | 0 | "requestedType=%s]", this, availableAltData.get(), aAltDataType)); |
854 | 0 | return NS_ERROR_NOT_AVAILABLE; |
855 | 0 | } |
856 | 0 |
|
857 | 0 | // mAltDataOffset must be in sync with what is stored in metadata |
858 | 0 | MOZ_ASSERT(mAltDataOffset == offset); |
859 | 0 |
|
860 | 0 | // Once we open input stream we no longer allow preloading of chunks without |
861 | 0 | // input stream, i.e. we will no longer keep first few chunks preloaded when |
862 | 0 | // the last input stream is closed. |
863 | 0 | mPreloadWithoutInputStreams = false; |
864 | 0 |
|
865 | 0 | CacheFileInputStream *input = new CacheFileInputStream(this, aEntryHandle, true); |
866 | 0 |
|
867 | 0 | LOG(("CacheFile::OpenAlternativeInputStream() - Creating new input stream %p " |
868 | 0 | "[this=%p]", input, this)); |
869 | 0 |
|
870 | 0 | mInputs.AppendElement(input); |
871 | 0 | NS_ADDREF(input); |
872 | 0 |
|
873 | 0 | mDataAccessed = true; |
874 | 0 | NS_ADDREF(*_retval = input); |
875 | 0 | return NS_OK; |
876 | 0 | } |
877 | | |
878 | | nsresult |
879 | | CacheFile::OpenOutputStream(CacheOutputCloseListener *aCloseListener, nsIOutputStream **_retval) |
880 | 0 | { |
881 | 0 | CacheFileAutoLock lock(this); |
882 | 0 |
|
883 | 0 | MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); |
884 | 0 |
|
885 | 0 | nsresult rv; |
886 | 0 |
|
887 | 0 | if (!mReady) { |
888 | 0 | LOG(("CacheFile::OpenOutputStream() - CacheFile is not ready [this=%p]", |
889 | 0 | this)); |
890 | 0 |
|
891 | 0 | return NS_ERROR_NOT_AVAILABLE; |
892 | 0 | } |
893 | 0 |
|
894 | 0 | if (mOutput) { |
895 | 0 | LOG(("CacheFile::OpenOutputStream() - We already have output stream %p " |
896 | 0 | "[this=%p]", mOutput, this)); |
897 | 0 |
|
898 | 0 | return NS_ERROR_NOT_AVAILABLE; |
899 | 0 | } |
900 | 0 |
|
901 | 0 | if (NS_FAILED(mStatus)) { |
902 | 0 | LOG(("CacheFile::OpenOutputStream() - CacheFile is in a failure state " |
903 | 0 | "[this=%p, status=0x%08" PRIx32 "]", this, |
904 | 0 | static_cast<uint32_t>(mStatus))); |
905 | 0 |
|
906 | 0 | // The CacheFile is already doomed. It make no sense to allow to write any |
907 | 0 | // data to such entry. |
908 | 0 | return mStatus; |
909 | 0 | } |
910 | 0 |
|
911 | 0 | // Fail if there is any input stream opened for alternative data |
912 | 0 | for (uint32_t i = 0; i < mInputs.Length(); ++i) { |
913 | 0 | if (mInputs[i]->IsAlternativeData()) { |
914 | 0 | return NS_ERROR_NOT_AVAILABLE; |
915 | 0 | } |
916 | 0 | } |
917 | 0 |
|
918 | 0 | if (mAltDataOffset != -1) { |
919 | 0 | // Remove alt-data |
920 | 0 | rv = Truncate(mAltDataOffset); |
921 | 0 | if (NS_FAILED(rv)) { |
922 | 0 | LOG(("CacheFile::OpenOutputStream() - Truncating alt-data failed " |
923 | 0 | "[rv=0x%08" PRIx32 "]", static_cast<uint32_t>(rv))); |
924 | 0 | return rv; |
925 | 0 | } |
926 | 0 | SetAltMetadata(nullptr); |
927 | 0 | mAltDataOffset = -1; |
928 | 0 | } |
929 | 0 |
|
930 | 0 | // Once we open output stream we no longer allow preloading of chunks without |
931 | 0 | // input stream. There is no reason to believe that some input stream will be |
932 | 0 | // opened soon. Otherwise we would cache unused chunks of all newly created |
933 | 0 | // entries until the CacheFile is destroyed. |
934 | 0 | mPreloadWithoutInputStreams = false; |
935 | 0 |
|
936 | 0 | mOutput = new CacheFileOutputStream(this, aCloseListener, false); |
937 | 0 |
|
938 | 0 | LOG(("CacheFile::OpenOutputStream() - Creating new output stream %p " |
939 | 0 | "[this=%p]", mOutput, this)); |
940 | 0 |
|
941 | 0 | mDataAccessed = true; |
942 | 0 | NS_ADDREF(*_retval = mOutput); |
943 | 0 | return NS_OK; |
944 | 0 | } |
945 | | |
946 | | nsresult |
947 | | CacheFile::OpenAlternativeOutputStream(CacheOutputCloseListener *aCloseListener, |
948 | | const char *aAltDataType, |
949 | | nsIOutputStream **_retval) |
950 | 0 | { |
951 | 0 | CacheFileAutoLock lock(this); |
952 | 0 |
|
953 | 0 | MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); |
954 | 0 |
|
955 | 0 | if (!mReady) { |
956 | 0 | LOG(("CacheFile::OpenAlternativeOutputStream() - CacheFile is not ready " |
957 | 0 | "[this=%p]", this)); |
958 | 0 |
|
959 | 0 | return NS_ERROR_NOT_AVAILABLE; |
960 | 0 | } |
961 | 0 |
|
962 | 0 | if (mOutput) { |
963 | 0 | LOG(("CacheFile::OpenAlternativeOutputStream() - We already have output " |
964 | 0 | "stream %p [this=%p]", mOutput, this)); |
965 | 0 |
|
966 | 0 | return NS_ERROR_NOT_AVAILABLE; |
967 | 0 | } |
968 | 0 |
|
969 | 0 | if (NS_FAILED(mStatus)) { |
970 | 0 | LOG(("CacheFile::OpenAlternativeOutputStream() - CacheFile is in a failure " |
971 | 0 | "state [this=%p, status=0x%08" PRIx32 "]", this, |
972 | 0 | static_cast<uint32_t>(mStatus))); |
973 | 0 |
|
974 | 0 | // The CacheFile is already doomed. It make no sense to allow to write any |
975 | 0 | // data to such entry. |
976 | 0 | return mStatus; |
977 | 0 | } |
978 | 0 |
|
979 | 0 | // Fail if there is any input stream opened for alternative data |
980 | 0 | for (uint32_t i = 0; i < mInputs.Length(); ++i) { |
981 | 0 | if (mInputs[i]->IsAlternativeData()) { |
982 | 0 | return NS_ERROR_NOT_AVAILABLE; |
983 | 0 | } |
984 | 0 | } |
985 | 0 |
|
986 | 0 | nsresult rv; |
987 | 0 |
|
988 | 0 | if (mAltDataOffset != -1) { |
989 | 0 | // Truncate old alt-data |
990 | 0 | rv = Truncate(mAltDataOffset); |
991 | 0 | if (NS_FAILED(rv)) { |
992 | 0 | LOG(("CacheFile::OpenAlternativeOutputStream() - Truncating old alt-data " |
993 | 0 | "failed [rv=0x%08" PRIx32 "]", static_cast<uint32_t>(rv))); |
994 | 0 | return rv; |
995 | 0 | } |
996 | 0 | } else { |
997 | 0 | mAltDataOffset = mDataSize; |
998 | 0 | } |
999 | 0 |
|
1000 | 0 | nsAutoCString altMetadata; |
1001 | 0 | CacheFileUtils::BuildAlternativeDataInfo(aAltDataType, mAltDataOffset, |
1002 | 0 | altMetadata); |
1003 | 0 | rv = SetAltMetadata(altMetadata.get()); |
1004 | 0 | if (NS_FAILED(rv)) { |
1005 | 0 | LOG(("CacheFile::OpenAlternativeOutputStream() - Set Metadata for alt-data" |
1006 | 0 | "failed [rv=0x%08" PRIx32 "]", static_cast<uint32_t>(rv))); |
1007 | 0 | return rv; |
1008 | 0 | } |
1009 | 0 |
|
1010 | 0 | // Once we open output stream we no longer allow preloading of chunks without |
1011 | 0 | // input stream. There is no reason to believe that some input stream will be |
1012 | 0 | // opened soon. Otherwise we would cache unused chunks of all newly created |
1013 | 0 | // entries until the CacheFile is destroyed. |
1014 | 0 | mPreloadWithoutInputStreams = false; |
1015 | 0 |
|
1016 | 0 | mOutput = new CacheFileOutputStream(this, aCloseListener, true); |
1017 | 0 |
|
1018 | 0 | LOG(("CacheFile::OpenAlternativeOutputStream() - Creating new output stream " |
1019 | 0 | "%p [this=%p]", mOutput, this)); |
1020 | 0 |
|
1021 | 0 | mDataAccessed = true; |
1022 | 0 | NS_ADDREF(*_retval = mOutput); |
1023 | 0 | return NS_OK; |
1024 | 0 | } |
1025 | | |
1026 | | nsresult |
1027 | | CacheFile::SetMemoryOnly() |
1028 | 0 | { |
1029 | 0 | LOG(("CacheFile::SetMemoryOnly() mMemoryOnly=%d [this=%p]", |
1030 | 0 | mMemoryOnly, this)); |
1031 | 0 |
|
1032 | 0 | if (mMemoryOnly) |
1033 | 0 | return NS_OK; |
1034 | 0 | |
1035 | 0 | MOZ_ASSERT(mReady); |
1036 | 0 |
|
1037 | 0 | if (!mReady) { |
1038 | 0 | LOG(("CacheFile::SetMemoryOnly() - CacheFile is not ready [this=%p]", |
1039 | 0 | this)); |
1040 | 0 |
|
1041 | 0 | return NS_ERROR_NOT_AVAILABLE; |
1042 | 0 | } |
1043 | 0 |
|
1044 | 0 | if (mDataAccessed) { |
1045 | 0 | LOG(("CacheFile::SetMemoryOnly() - Data was already accessed [this=%p]", this)); |
1046 | 0 | return NS_ERROR_NOT_AVAILABLE; |
1047 | 0 | } |
1048 | 0 |
|
1049 | 0 | // TODO what to do when this isn't a new entry and has an existing metadata??? |
1050 | 0 | mMemoryOnly = true; |
1051 | 0 | return NS_OK; |
1052 | 0 | } |
1053 | | |
1054 | | nsresult |
1055 | | CacheFile::Doom(CacheFileListener *aCallback) |
1056 | 0 | { |
1057 | 0 | LOG(("CacheFile::Doom() [this=%p, listener=%p]", this, aCallback)); |
1058 | 0 |
|
1059 | 0 | CacheFileAutoLock lock(this); |
1060 | 0 |
|
1061 | 0 | return DoomLocked(aCallback); |
1062 | 0 | } |
1063 | | |
1064 | | nsresult |
1065 | | CacheFile::DoomLocked(CacheFileListener *aCallback) |
1066 | 0 | { |
1067 | 0 | MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); |
1068 | 0 |
|
1069 | 0 | LOG(("CacheFile::DoomLocked() [this=%p, listener=%p]", this, aCallback)); |
1070 | 0 |
|
1071 | 0 | nsresult rv = NS_OK; |
1072 | 0 |
|
1073 | 0 | if (mMemoryOnly) { |
1074 | 0 | return NS_ERROR_FILE_NOT_FOUND; |
1075 | 0 | } |
1076 | 0 | |
1077 | 0 | if (mHandle && mHandle->IsDoomed()) { |
1078 | 0 | return NS_ERROR_FILE_NOT_FOUND; |
1079 | 0 | } |
1080 | 0 | |
1081 | 0 | nsCOMPtr<CacheFileIOListener> listener; |
1082 | 0 | if (aCallback || !mHandle) { |
1083 | 0 | listener = new DoomFileHelper(aCallback); |
1084 | 0 | } |
1085 | 0 | if (mHandle) { |
1086 | 0 | rv = CacheFileIOManager::DoomFile(mHandle, listener); |
1087 | 0 | } else if (mOpeningFile) { |
1088 | 0 | mDoomAfterOpenListener = listener; |
1089 | 0 | } |
1090 | 0 |
|
1091 | 0 | return rv; |
1092 | 0 | } |
1093 | | |
1094 | | nsresult |
1095 | | CacheFile::ThrowMemoryCachedData() |
1096 | 0 | { |
1097 | 0 | CacheFileAutoLock lock(this); |
1098 | 0 |
|
1099 | 0 | LOG(("CacheFile::ThrowMemoryCachedData() [this=%p]", this)); |
1100 | 0 |
|
1101 | 0 | if (mMemoryOnly) { |
1102 | 0 | // This method should not be called when the CacheFile was initialized as |
1103 | 0 | // memory-only, but it can be called when CacheFile end up as memory-only |
1104 | 0 | // due to e.g. IO failure since CacheEntry doesn't know it. |
1105 | 0 | LOG(("CacheFile::ThrowMemoryCachedData() - Ignoring request because the " |
1106 | 0 | "entry is memory-only. [this=%p]", this)); |
1107 | 0 |
|
1108 | 0 | return NS_ERROR_NOT_AVAILABLE; |
1109 | 0 | } |
1110 | 0 |
|
1111 | 0 | if (mOpeningFile) { |
1112 | 0 | // mayhemer, note: we shouldn't get here, since CacheEntry prevents loading |
1113 | 0 | // entries from being purged. |
1114 | 0 |
|
1115 | 0 | LOG(("CacheFile::ThrowMemoryCachedData() - Ignoring request because the " |
1116 | 0 | "entry is still opening the file [this=%p]", this)); |
1117 | 0 |
|
1118 | 0 | return NS_ERROR_ABORT; |
1119 | 0 | } |
1120 | 0 |
|
1121 | 0 | // We cannot release all cached chunks since we need to keep preloaded chunks |
1122 | 0 | // in memory. See initialization of mPreloadChunkCount for explanation. |
1123 | 0 | CleanUpCachedChunks(); |
1124 | 0 |
|
1125 | 0 | return NS_OK; |
1126 | 0 | } |
1127 | | |
1128 | | nsresult |
1129 | | CacheFile::GetElement(const char *aKey, char **_retval) |
1130 | 0 | { |
1131 | 0 | CacheFileAutoLock lock(this); |
1132 | 0 | MOZ_ASSERT(mMetadata); |
1133 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1134 | 0 |
|
1135 | 0 | const char *value; |
1136 | 0 | value = mMetadata->GetElement(aKey); |
1137 | 0 | if (!value) |
1138 | 0 | return NS_ERROR_NOT_AVAILABLE; |
1139 | 0 | |
1140 | 0 | *_retval = NS_xstrdup(value); |
1141 | 0 | return NS_OK; |
1142 | 0 | } |
1143 | | |
1144 | | nsresult |
1145 | | CacheFile::SetElement(const char *aKey, const char *aValue) |
1146 | 0 | { |
1147 | 0 | CacheFileAutoLock lock(this); |
1148 | 0 |
|
1149 | 0 | LOG(("CacheFile::SetElement() this=%p", this)); |
1150 | 0 |
|
1151 | 0 | MOZ_ASSERT(mMetadata); |
1152 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1153 | 0 |
|
1154 | 0 | if (!strcmp(aKey, CacheFileUtils::kAltDataKey)) { |
1155 | 0 | NS_ERROR("alt-data element is reserved for internal use and must not be " |
1156 | 0 | "changed via CacheFile::SetElement()"); |
1157 | 0 | return NS_ERROR_FAILURE; |
1158 | 0 | } |
1159 | 0 |
|
1160 | 0 | PostWriteTimer(); |
1161 | 0 | return mMetadata->SetElement(aKey, aValue); |
1162 | 0 | } |
1163 | | |
1164 | | nsresult |
1165 | | CacheFile::VisitMetaData(nsICacheEntryMetaDataVisitor *aVisitor) |
1166 | 0 | { |
1167 | 0 | CacheFileAutoLock lock(this); |
1168 | 0 | MOZ_ASSERT(mMetadata); |
1169 | 0 | MOZ_ASSERT(mReady); |
1170 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1171 | 0 |
|
1172 | 0 | return mMetadata->Visit(aVisitor); |
1173 | 0 | } |
1174 | | |
1175 | | nsresult |
1176 | | CacheFile::ElementsSize(uint32_t *_retval) |
1177 | 0 | { |
1178 | 0 | CacheFileAutoLock lock(this); |
1179 | 0 |
|
1180 | 0 | if (!mMetadata) |
1181 | 0 | return NS_ERROR_NOT_AVAILABLE; |
1182 | 0 | |
1183 | 0 | *_retval = mMetadata->ElementsSize(); |
1184 | 0 | return NS_OK; |
1185 | 0 | } |
1186 | | |
1187 | | nsresult |
1188 | | CacheFile::SetExpirationTime(uint32_t aExpirationTime) |
1189 | 0 | { |
1190 | 0 | CacheFileAutoLock lock(this); |
1191 | 0 |
|
1192 | 0 | LOG(("CacheFile::SetExpirationTime() this=%p, expiration=%u", |
1193 | 0 | this, aExpirationTime)); |
1194 | 0 |
|
1195 | 0 | MOZ_ASSERT(mMetadata); |
1196 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1197 | 0 |
|
1198 | 0 | PostWriteTimer(); |
1199 | 0 |
|
1200 | 0 | if (mHandle && !mHandle->IsDoomed()) |
1201 | 0 | CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, &aExpirationTime, nullptr, nullptr, nullptr); |
1202 | 0 |
|
1203 | 0 | return mMetadata->SetExpirationTime(aExpirationTime); |
1204 | 0 | } |
1205 | | |
1206 | | nsresult |
1207 | | CacheFile::GetExpirationTime(uint32_t *_retval) |
1208 | 0 | { |
1209 | 0 | CacheFileAutoLock lock(this); |
1210 | 0 | MOZ_ASSERT(mMetadata); |
1211 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1212 | 0 |
|
1213 | 0 | return mMetadata->GetExpirationTime(_retval); |
1214 | 0 | } |
1215 | | |
1216 | | nsresult |
1217 | | CacheFile::SetFrecency(uint32_t aFrecency) |
1218 | 0 | { |
1219 | 0 | CacheFileAutoLock lock(this); |
1220 | 0 |
|
1221 | 0 | LOG(("CacheFile::SetFrecency() this=%p, frecency=%u", |
1222 | 0 | this, aFrecency)); |
1223 | 0 |
|
1224 | 0 | MOZ_ASSERT(mMetadata); |
1225 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1226 | 0 |
|
1227 | 0 | PostWriteTimer(); |
1228 | 0 |
|
1229 | 0 | if (mHandle && !mHandle->IsDoomed()) |
1230 | 0 | CacheFileIOManager::UpdateIndexEntry(mHandle, &aFrecency, nullptr, nullptr, nullptr, nullptr); |
1231 | 0 |
|
1232 | 0 | return mMetadata->SetFrecency(aFrecency); |
1233 | 0 | } |
1234 | | |
1235 | | nsresult |
1236 | | CacheFile::GetFrecency(uint32_t *_retval) |
1237 | 0 | { |
1238 | 0 | CacheFileAutoLock lock(this); |
1239 | 0 | MOZ_ASSERT(mMetadata); |
1240 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1241 | 0 |
|
1242 | 0 | return mMetadata->GetFrecency(_retval); |
1243 | 0 | } |
1244 | | |
1245 | | nsresult CacheFile::SetNetworkTimes(uint64_t aOnStartTime, uint64_t aOnStopTime) |
1246 | 0 | { |
1247 | 0 | CacheFileAutoLock lock(this); |
1248 | 0 |
|
1249 | 0 | LOG(("CacheFile::SetNetworkTimes() this=%p, aOnStartTime=%" PRIu64 |
1250 | 0 | ", aOnStopTime=%" PRIu64 "", this, aOnStartTime, aOnStopTime)); |
1251 | 0 |
|
1252 | 0 | MOZ_ASSERT(mMetadata); |
1253 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1254 | 0 |
|
1255 | 0 | PostWriteTimer(); |
1256 | 0 |
|
1257 | 0 | nsAutoCString onStartTime; |
1258 | 0 | onStartTime.AppendInt(aOnStartTime); |
1259 | 0 | nsresult rv = mMetadata->SetElement("net-response-time-onstart", onStartTime.get()); |
1260 | 0 | if (NS_WARN_IF(NS_FAILED(rv))) { |
1261 | 0 | return rv; |
1262 | 0 | } |
1263 | 0 | |
1264 | 0 | nsAutoCString onStopTime; |
1265 | 0 | onStopTime.AppendInt(aOnStopTime); |
1266 | 0 | rv = mMetadata->SetElement("net-response-time-onstop", onStopTime.get()); |
1267 | 0 | if (NS_WARN_IF(NS_FAILED(rv))) { |
1268 | 0 | return rv; |
1269 | 0 | } |
1270 | 0 | |
1271 | 0 | uint16_t onStartTime16 = aOnStartTime <= kIndexTimeOutOfBound ? aOnStartTime : kIndexTimeOutOfBound; |
1272 | 0 | uint16_t onStopTime16 = aOnStopTime <= kIndexTimeOutOfBound ? aOnStopTime : kIndexTimeOutOfBound; |
1273 | 0 |
|
1274 | 0 | if (mHandle && !mHandle->IsDoomed()) { |
1275 | 0 | CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, nullptr, nullptr, |
1276 | 0 | &onStartTime16, &onStopTime16); |
1277 | 0 | } |
1278 | 0 | return NS_OK; |
1279 | 0 | } |
1280 | | |
1281 | | nsresult CacheFile::GetOnStartTime(uint64_t *_retval) |
1282 | 0 | { |
1283 | 0 | CacheFileAutoLock lock(this); |
1284 | 0 |
|
1285 | 0 | MOZ_ASSERT(mMetadata); |
1286 | 0 | const char *onStartTimeStr = mMetadata->GetElement("net-response-time-onstart"); |
1287 | 0 | if (!onStartTimeStr) { |
1288 | 0 | return NS_ERROR_NOT_AVAILABLE; |
1289 | 0 | } |
1290 | 0 | nsresult rv; |
1291 | 0 | *_retval = nsDependentCString(onStartTimeStr).ToInteger64(&rv); |
1292 | 0 | MOZ_ASSERT(NS_SUCCEEDED(rv)); |
1293 | 0 | return NS_OK; |
1294 | 0 | } |
1295 | | |
1296 | | nsresult CacheFile::GetOnStopTime(uint64_t *_retval) |
1297 | 0 | { |
1298 | 0 | CacheFileAutoLock lock(this); |
1299 | 0 |
|
1300 | 0 | MOZ_ASSERT(mMetadata); |
1301 | 0 | const char *onStopTimeStr = mMetadata->GetElement("net-response-time-onstop"); |
1302 | 0 | if (!onStopTimeStr) { |
1303 | 0 | return NS_ERROR_NOT_AVAILABLE; |
1304 | 0 | } |
1305 | 0 | nsresult rv; |
1306 | 0 | *_retval = nsDependentCString(onStopTimeStr).ToInteger64(&rv); |
1307 | 0 | MOZ_ASSERT(NS_SUCCEEDED(rv)); |
1308 | 0 | return NS_OK; |
1309 | 0 | } |
1310 | | |
1311 | | nsresult |
1312 | | CacheFile::SetAltMetadata(const char* aAltMetadata) |
1313 | 0 | { |
1314 | 0 | AssertOwnsLock(); |
1315 | 0 | LOG(("CacheFile::SetAltMetadata() this=%p, aAltMetadata=%s", |
1316 | 0 | this, aAltMetadata ? aAltMetadata : "")); |
1317 | 0 |
|
1318 | 0 | MOZ_ASSERT(mMetadata); |
1319 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1320 | 0 |
|
1321 | 0 | PostWriteTimer(); |
1322 | 0 |
|
1323 | 0 | nsresult rv = mMetadata->SetElement(CacheFileUtils::kAltDataKey, aAltMetadata); |
1324 | 0 | bool hasAltData = aAltMetadata ? true : false; |
1325 | 0 |
|
1326 | 0 | if (NS_FAILED(rv)) { |
1327 | 0 | // Removing element shouldn't fail because it doesn't allocate memory. |
1328 | 0 | mMetadata->SetElement(CacheFileUtils::kAltDataKey, nullptr); |
1329 | 0 |
|
1330 | 0 | mAltDataOffset = -1; |
1331 | 0 | hasAltData = false; |
1332 | 0 | } |
1333 | 0 |
|
1334 | 0 | if (mHandle && !mHandle->IsDoomed()) { |
1335 | 0 | CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, nullptr, &hasAltData, nullptr, nullptr); |
1336 | 0 | } |
1337 | 0 | return rv; |
1338 | 0 | } |
1339 | | |
1340 | | nsresult |
1341 | | CacheFile::GetLastModified(uint32_t *_retval) |
1342 | 0 | { |
1343 | 0 | CacheFileAutoLock lock(this); |
1344 | 0 | MOZ_ASSERT(mMetadata); |
1345 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1346 | 0 |
|
1347 | 0 | return mMetadata->GetLastModified(_retval); |
1348 | 0 | } |
1349 | | |
1350 | | nsresult |
1351 | | CacheFile::GetLastFetched(uint32_t *_retval) |
1352 | 0 | { |
1353 | 0 | CacheFileAutoLock lock(this); |
1354 | 0 | MOZ_ASSERT(mMetadata); |
1355 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1356 | 0 |
|
1357 | 0 | return mMetadata->GetLastFetched(_retval); |
1358 | 0 | } |
1359 | | |
1360 | | nsresult |
1361 | | CacheFile::GetFetchCount(uint32_t *_retval) |
1362 | 0 | { |
1363 | 0 | CacheFileAutoLock lock(this); |
1364 | 0 | MOZ_ASSERT(mMetadata); |
1365 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1366 | 0 |
|
1367 | 0 | return mMetadata->GetFetchCount(_retval); |
1368 | 0 | } |
1369 | | |
1370 | | nsresult |
1371 | | CacheFile::GetDiskStorageSizeInKB(uint32_t *aDiskStorageSize) |
1372 | 0 | { |
1373 | 0 | if (!mHandle) { |
1374 | 0 | return NS_ERROR_NOT_AVAILABLE; |
1375 | 0 | } |
1376 | 0 | |
1377 | 0 | *aDiskStorageSize = mHandle->FileSizeInK(); |
1378 | 0 | return NS_OK; |
1379 | 0 | } |
1380 | | |
1381 | | nsresult |
1382 | | CacheFile::OnFetched() |
1383 | 0 | { |
1384 | 0 | CacheFileAutoLock lock(this); |
1385 | 0 |
|
1386 | 0 | LOG(("CacheFile::OnFetched() this=%p", this)); |
1387 | 0 |
|
1388 | 0 | MOZ_ASSERT(mMetadata); |
1389 | 0 | NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED); |
1390 | 0 |
|
1391 | 0 | PostWriteTimer(); |
1392 | 0 |
|
1393 | 0 | return mMetadata->OnFetched(); |
1394 | 0 | } |
1395 | | |
1396 | | void |
1397 | | CacheFile::Lock() |
1398 | 0 | { |
1399 | 0 | mLock.Lock(); |
1400 | 0 | } |
1401 | | |
1402 | | void |
1403 | | CacheFile::Unlock() |
1404 | 0 | { |
1405 | 0 | // move the elements out of mObjsToRelease |
1406 | 0 | // so that they can be released after we unlock |
1407 | 0 | nsTArray<RefPtr<nsISupports>> objs; |
1408 | 0 | objs.SwapElements(mObjsToRelease); |
1409 | 0 |
|
1410 | 0 | mLock.Unlock(); |
1411 | 0 |
|
1412 | 0 | } |
1413 | | |
1414 | | void |
1415 | | CacheFile::AssertOwnsLock() const |
1416 | 0 | { |
1417 | 0 | mLock.AssertCurrentThreadOwns(); |
1418 | 0 | } |
1419 | | |
1420 | | void |
1421 | | CacheFile::ReleaseOutsideLock(RefPtr<nsISupports> aObject) |
1422 | 0 | { |
1423 | 0 | AssertOwnsLock(); |
1424 | 0 |
|
1425 | 0 | mObjsToRelease.AppendElement(std::move(aObject)); |
1426 | 0 | } |
1427 | | |
1428 | | nsresult |
1429 | | CacheFile::GetChunkLocked(uint32_t aIndex, ECallerType aCaller, |
1430 | | CacheFileChunkListener *aCallback, |
1431 | | CacheFileChunk **_retval) |
1432 | 0 | { |
1433 | 0 | AssertOwnsLock(); |
1434 | 0 |
|
1435 | 0 | LOG(("CacheFile::GetChunkLocked() [this=%p, idx=%u, caller=%d, listener=%p]", |
1436 | 0 | this, aIndex, aCaller, aCallback)); |
1437 | 0 |
|
1438 | 0 | MOZ_ASSERT(mReady); |
1439 | 0 | MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile); |
1440 | 0 | MOZ_ASSERT((aCaller == READER && aCallback) || |
1441 | 0 | (aCaller == WRITER && !aCallback) || |
1442 | 0 | (aCaller == PRELOADER && !aCallback)); |
1443 | 0 |
|
1444 | 0 | // Preload chunks from disk when this is disk backed entry and the listener |
1445 | 0 | // is reader. |
1446 | 0 | bool preload = !mMemoryOnly && (aCaller == READER); |
1447 | 0 |
|
1448 | 0 | nsresult rv; |
1449 | 0 |
|
1450 | 0 | RefPtr<CacheFileChunk> chunk; |
1451 | 0 | if (mChunks.Get(aIndex, getter_AddRefs(chunk))) { |
1452 | 0 | LOG(("CacheFile::GetChunkLocked() - Found chunk %p in mChunks [this=%p]", |
1453 | 0 | chunk.get(), this)); |
1454 | 0 |
|
1455 | 0 | // Preloader calls this method to preload only non-loaded chunks. |
1456 | 0 | MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!"); |
1457 | 0 |
|
1458 | 0 | // We might get failed chunk between releasing the lock in |
1459 | 0 | // CacheFileChunk::OnDataWritten/Read and CacheFile::OnChunkWritten/Read |
1460 | 0 | rv = chunk->GetStatus(); |
1461 | 0 | if (NS_FAILED(rv)) { |
1462 | 0 | SetError(rv); |
1463 | 0 | LOG(("CacheFile::GetChunkLocked() - Found failed chunk in mChunks " |
1464 | 0 | "[this=%p]", this)); |
1465 | 0 | return rv; |
1466 | 0 | } |
1467 | 0 |
|
1468 | 0 | if (chunk->IsReady() || aCaller == WRITER) { |
1469 | 0 | chunk.swap(*_retval); |
1470 | 0 | } else { |
1471 | 0 | rv = QueueChunkListener(aIndex, aCallback); |
1472 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1473 | 0 | } |
1474 | 0 |
|
1475 | 0 | if (preload) { |
1476 | 0 | PreloadChunks(aIndex + 1); |
1477 | 0 | } |
1478 | 0 |
|
1479 | 0 | return NS_OK; |
1480 | 0 | } |
1481 | 0 |
|
1482 | 0 | if (mCachedChunks.Get(aIndex, getter_AddRefs(chunk))) { |
1483 | 0 | LOG(("CacheFile::GetChunkLocked() - Reusing cached chunk %p [this=%p]", |
1484 | 0 | chunk.get(), this)); |
1485 | 0 |
|
1486 | 0 | // Preloader calls this method to preload only non-loaded chunks. |
1487 | 0 | MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!"); |
1488 | 0 |
|
1489 | 0 | mChunks.Put(aIndex, chunk); |
1490 | 0 | mCachedChunks.Remove(aIndex); |
1491 | 0 | chunk->mFile = this; |
1492 | 0 | chunk->mActiveChunk = true; |
1493 | 0 |
|
1494 | 0 | MOZ_ASSERT(chunk->IsReady()); |
1495 | 0 |
|
1496 | 0 | chunk.swap(*_retval); |
1497 | 0 |
|
1498 | 0 | if (preload) { |
1499 | 0 | PreloadChunks(aIndex + 1); |
1500 | 0 | } |
1501 | 0 |
|
1502 | 0 | return NS_OK; |
1503 | 0 | } |
1504 | 0 |
|
1505 | 0 | int64_t off = aIndex * static_cast<int64_t>(kChunkSize); |
1506 | 0 |
|
1507 | 0 | if (off < mDataSize) { |
1508 | 0 | // We cannot be here if this is memory only entry since the chunk must exist |
1509 | 0 | MOZ_ASSERT(!mMemoryOnly); |
1510 | 0 | if (mMemoryOnly) { |
1511 | 0 | // If this ever really happen it is better to fail rather than crashing on |
1512 | 0 | // a null handle. |
1513 | 0 | LOG(("CacheFile::GetChunkLocked() - Unexpected state! Offset < mDataSize " |
1514 | 0 | "for memory-only entry. [this=%p, off=%" PRId64 ", mDataSize=%" PRId64 "]", |
1515 | 0 | this, off, mDataSize)); |
1516 | 0 |
|
1517 | 0 | return NS_ERROR_UNEXPECTED; |
1518 | 0 | } |
1519 | 0 |
|
1520 | 0 | chunk = new CacheFileChunk(this, aIndex, aCaller == WRITER); |
1521 | 0 | mChunks.Put(aIndex, chunk); |
1522 | 0 | chunk->mActiveChunk = true; |
1523 | 0 |
|
1524 | 0 | LOG(("CacheFile::GetChunkLocked() - Reading newly created chunk %p from " |
1525 | 0 | "the disk [this=%p]", chunk.get(), this)); |
1526 | 0 |
|
1527 | 0 | // Read the chunk from the disk |
1528 | 0 | rv = chunk->Read(mHandle, std::min(static_cast<uint32_t>(mDataSize - off), |
1529 | 0 | static_cast<uint32_t>(kChunkSize)), |
1530 | 0 | mMetadata->GetHash(aIndex), this); |
1531 | 0 | if (NS_WARN_IF(NS_FAILED(rv))) { |
1532 | 0 | RemoveChunkInternal(chunk, false); |
1533 | 0 | return rv; |
1534 | 0 | } |
1535 | 0 | |
1536 | 0 | if (aCaller == WRITER) { |
1537 | 0 | chunk.swap(*_retval); |
1538 | 0 | } else if (aCaller != PRELOADER) { |
1539 | 0 | rv = QueueChunkListener(aIndex, aCallback); |
1540 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1541 | 0 | } |
1542 | 0 |
|
1543 | 0 | if (preload) { |
1544 | 0 | PreloadChunks(aIndex + 1); |
1545 | 0 | } |
1546 | 0 |
|
1547 | 0 | return NS_OK; |
1548 | 0 | } else if (off == mDataSize) { |
1549 | 0 | if (aCaller == WRITER) { |
1550 | 0 | // this listener is going to write to the chunk |
1551 | 0 | chunk = new CacheFileChunk(this, aIndex, true); |
1552 | 0 | mChunks.Put(aIndex, chunk); |
1553 | 0 | chunk->mActiveChunk = true; |
1554 | 0 |
|
1555 | 0 | LOG(("CacheFile::GetChunkLocked() - Created new empty chunk %p [this=%p]", |
1556 | 0 | chunk.get(), this)); |
1557 | 0 |
|
1558 | 0 | chunk->InitNew(); |
1559 | 0 | mMetadata->SetHash(aIndex, chunk->Hash()); |
1560 | 0 |
|
1561 | 0 | if (HaveChunkListeners(aIndex)) { |
1562 | 0 | rv = NotifyChunkListeners(aIndex, NS_OK, chunk); |
1563 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1564 | 0 | } |
1565 | 0 |
|
1566 | 0 | chunk.swap(*_retval); |
1567 | 0 | return NS_OK; |
1568 | 0 | } |
1569 | 0 | } else { |
1570 | 0 | if (aCaller == WRITER) { |
1571 | 0 | // this chunk was requested by writer, but we need to fill the gap first |
1572 | 0 |
|
1573 | 0 | // Fill with zero the last chunk if it is incomplete |
1574 | 0 | if (mDataSize % kChunkSize) { |
1575 | 0 | rv = PadChunkWithZeroes(mDataSize / kChunkSize); |
1576 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1577 | 0 |
|
1578 | 0 | MOZ_ASSERT(!(mDataSize % kChunkSize)); |
1579 | 0 | } |
1580 | 0 |
|
1581 | 0 | uint32_t startChunk = mDataSize / kChunkSize; |
1582 | 0 |
|
1583 | 0 | if (mMemoryOnly) { |
1584 | 0 | // We need to create all missing CacheFileChunks if this is memory-only |
1585 | 0 | // entry |
1586 | 0 | for (uint32_t i = startChunk ; i < aIndex ; i++) { |
1587 | 0 | rv = PadChunkWithZeroes(i); |
1588 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1589 | 0 | } |
1590 | 0 | } else { |
1591 | 0 | // We don't need to create CacheFileChunk for other empty chunks unless |
1592 | 0 | // there is some input stream waiting for this chunk. |
1593 | 0 |
|
1594 | 0 | if (startChunk != aIndex) { |
1595 | 0 | // Make sure the file contains zeroes at the end of the file |
1596 | 0 | rv = CacheFileIOManager::TruncateSeekSetEOF(mHandle, |
1597 | 0 | startChunk * kChunkSize, |
1598 | 0 | aIndex * kChunkSize, |
1599 | 0 | nullptr); |
1600 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1601 | 0 | } |
1602 | 0 |
|
1603 | 0 | for (uint32_t i = startChunk ; i < aIndex ; i++) { |
1604 | 0 | if (HaveChunkListeners(i)) { |
1605 | 0 | rv = PadChunkWithZeroes(i); |
1606 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1607 | 0 | } else { |
1608 | 0 | mMetadata->SetHash(i, kEmptyChunkHash); |
1609 | 0 | mDataSize = (i + 1) * kChunkSize; |
1610 | 0 | } |
1611 | 0 | } |
1612 | 0 | } |
1613 | 0 |
|
1614 | 0 | MOZ_ASSERT(mDataSize == off); |
1615 | 0 | rv = GetChunkLocked(aIndex, WRITER, nullptr, getter_AddRefs(chunk)); |
1616 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1617 | 0 |
|
1618 | 0 | chunk.swap(*_retval); |
1619 | 0 | return NS_OK; |
1620 | 0 | } |
1621 | 0 | } |
1622 | 0 | |
1623 | 0 | // We can be here only if the caller is reader since writer always create a |
1624 | 0 | // new chunk above and preloader calls this method to preload only chunks that |
1625 | 0 | // are not loaded but that do exist. |
1626 | 0 | MOZ_ASSERT(aCaller == READER, "Unexpected!"); |
1627 | 0 |
|
1628 | 0 | if (mOutput) { |
1629 | 0 | // the chunk doesn't exist but mOutput may create it |
1630 | 0 | rv = QueueChunkListener(aIndex, aCallback); |
1631 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1632 | 0 | } else { |
1633 | 0 | return NS_ERROR_NOT_AVAILABLE; |
1634 | 0 | } |
1635 | 0 | |
1636 | 0 | return NS_OK; |
1637 | 0 | } |
1638 | | |
1639 | | void |
1640 | | CacheFile::PreloadChunks(uint32_t aIndex) |
1641 | 0 | { |
1642 | 0 | AssertOwnsLock(); |
1643 | 0 |
|
1644 | 0 | uint32_t limit = aIndex + mPreloadChunkCount; |
1645 | 0 |
|
1646 | 0 | for (uint32_t i = aIndex; i < limit; ++i) { |
1647 | 0 | int64_t off = i * static_cast<int64_t>(kChunkSize); |
1648 | 0 |
|
1649 | 0 | if (off >= mDataSize) { |
1650 | 0 | // This chunk is beyond EOF. |
1651 | 0 | return; |
1652 | 0 | } |
1653 | 0 | |
1654 | 0 | if (mChunks.GetWeak(i) || mCachedChunks.GetWeak(i)) { |
1655 | 0 | // This chunk is already in memory or is being read right now. |
1656 | 0 | continue; |
1657 | 0 | } |
1658 | 0 | |
1659 | 0 | LOG(("CacheFile::PreloadChunks() - Preloading chunk [this=%p, idx=%u]", |
1660 | 0 | this, i)); |
1661 | 0 |
|
1662 | 0 | RefPtr<CacheFileChunk> chunk; |
1663 | 0 | GetChunkLocked(i, PRELOADER, nullptr, getter_AddRefs(chunk)); |
1664 | 0 | // We've checked that we don't have this chunk, so no chunk must be |
1665 | 0 | // returned. |
1666 | 0 | MOZ_ASSERT(!chunk); |
1667 | 0 | } |
1668 | 0 | } |
1669 | | |
1670 | | bool |
1671 | | CacheFile::ShouldCacheChunk(uint32_t aIndex) |
1672 | 0 | { |
1673 | 0 | AssertOwnsLock(); |
1674 | 0 |
|
1675 | | #ifdef CACHE_CHUNKS |
1676 | | // We cache all chunks. |
1677 | | return true; |
1678 | | #else |
1679 | |
|
1680 | 0 | if (mPreloadChunkCount != 0 && mInputs.Length() == 0 && |
1681 | 0 | mPreloadWithoutInputStreams && aIndex < mPreloadChunkCount) { |
1682 | 0 | // We don't have any input stream yet, but it is likely that some will be |
1683 | 0 | // opened soon. Keep first mPreloadChunkCount chunks in memory. The |
1684 | 0 | // condition is here instead of in MustKeepCachedChunk() since these |
1685 | 0 | // chunks should be preloaded and can be kept in memory as an optimization, |
1686 | 0 | // but they can be released at any time until they are considered as |
1687 | 0 | // preloaded chunks for any input stream. |
1688 | 0 | return true; |
1689 | 0 | } |
1690 | 0 | |
1691 | 0 | // Cache only chunks that we really need to keep. |
1692 | 0 | return MustKeepCachedChunk(aIndex); |
1693 | 0 | #endif |
1694 | 0 | } |
1695 | | |
1696 | | bool |
1697 | | CacheFile::MustKeepCachedChunk(uint32_t aIndex) |
1698 | 0 | { |
1699 | 0 | AssertOwnsLock(); |
1700 | 0 |
|
1701 | 0 | // We must keep the chunk when this is memory only entry or we don't have |
1702 | 0 | // a handle yet. |
1703 | 0 | if (mMemoryOnly || mOpeningFile) { |
1704 | 0 | return true; |
1705 | 0 | } |
1706 | 0 | |
1707 | 0 | if (mPreloadChunkCount == 0) { |
1708 | 0 | // Preloading of chunks is disabled |
1709 | 0 | return false; |
1710 | 0 | } |
1711 | 0 | |
1712 | 0 | // Check whether this chunk should be considered as preloaded chunk for any |
1713 | 0 | // existing input stream. |
1714 | 0 | |
1715 | 0 | // maxPos is the position of the last byte in the given chunk |
1716 | 0 | int64_t maxPos = static_cast<int64_t>(aIndex + 1) * kChunkSize - 1; |
1717 | 0 |
|
1718 | 0 | // minPos is the position of the first byte in a chunk that precedes the given |
1719 | 0 | // chunk by mPreloadChunkCount chunks |
1720 | 0 | int64_t minPos; |
1721 | 0 | if (mPreloadChunkCount >= aIndex) { |
1722 | 0 | minPos = 0; |
1723 | 0 | } else { |
1724 | 0 | minPos = static_cast<int64_t>(aIndex - mPreloadChunkCount) * kChunkSize; |
1725 | 0 | } |
1726 | 0 |
|
1727 | 0 | for (uint32_t i = 0; i < mInputs.Length(); ++i) { |
1728 | 0 | int64_t inputPos = mInputs[i]->GetPosition(); |
1729 | 0 | if (inputPos >= minPos && inputPos <= maxPos) { |
1730 | 0 | return true; |
1731 | 0 | } |
1732 | 0 | } |
1733 | 0 |
|
1734 | 0 | return false; |
1735 | 0 | } |
1736 | | |
1737 | | nsresult |
1738 | | CacheFile::DeactivateChunk(CacheFileChunk *aChunk) |
1739 | 0 | { |
1740 | 0 | nsresult rv; |
1741 | 0 |
|
1742 | 0 | // Avoid lock reentrancy by increasing the RefCnt |
1743 | 0 | RefPtr<CacheFileChunk> chunk = aChunk; |
1744 | 0 |
|
1745 | 0 | { |
1746 | 0 | CacheFileAutoLock lock(this); |
1747 | 0 |
|
1748 | 0 | LOG(("CacheFile::DeactivateChunk() [this=%p, chunk=%p, idx=%u]", |
1749 | 0 | this, aChunk, aChunk->Index())); |
1750 | 0 |
|
1751 | 0 | MOZ_ASSERT(mReady); |
1752 | 0 | MOZ_ASSERT((mHandle && !mMemoryOnly && !mOpeningFile) || |
1753 | 0 | (!mHandle && mMemoryOnly && !mOpeningFile) || |
1754 | 0 | (!mHandle && !mMemoryOnly && mOpeningFile)); |
1755 | 0 |
|
1756 | 0 | if (aChunk->mRefCnt != 2) { |
1757 | 0 | LOG(("CacheFile::DeactivateChunk() - Chunk is still used [this=%p, " |
1758 | 0 | "chunk=%p, refcnt=%" PRIuPTR "]", this, aChunk, aChunk->mRefCnt.get())); |
1759 | 0 |
|
1760 | 0 | // somebody got the reference before the lock was acquired |
1761 | 0 | return NS_OK; |
1762 | 0 | } |
1763 | 0 |
|
1764 | 0 | if (aChunk->mDiscardedChunk) { |
1765 | 0 | aChunk->mActiveChunk = false; |
1766 | 0 | ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(aChunk->mFile.forget()).forget()); |
1767 | 0 |
|
1768 | 0 | DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk); |
1769 | 0 | MOZ_ASSERT(removed); |
1770 | 0 | return NS_OK; |
1771 | 0 | } |
1772 | 0 |
|
1773 | | #ifdef DEBUG |
1774 | | { |
1775 | | // We can be here iff the chunk is in the hash table |
1776 | | RefPtr<CacheFileChunk> chunkCheck; |
1777 | | mChunks.Get(chunk->Index(), getter_AddRefs(chunkCheck)); |
1778 | | MOZ_ASSERT(chunkCheck == chunk); |
1779 | | |
1780 | | // We also shouldn't have any queued listener for this chunk |
1781 | | ChunkListeners *listeners; |
1782 | | mChunkListeners.Get(chunk->Index(), &listeners); |
1783 | | MOZ_ASSERT(!listeners); |
1784 | | } |
1785 | | #endif |
1786 | | |
1787 | 0 | if (NS_FAILED(chunk->GetStatus())) { |
1788 | 0 | SetError(chunk->GetStatus()); |
1789 | 0 | } |
1790 | 0 |
|
1791 | 0 | if (NS_FAILED(mStatus)) { |
1792 | 0 | // Don't write any chunk to disk since this entry will be doomed |
1793 | 0 | LOG(("CacheFile::DeactivateChunk() - Releasing chunk because of status " |
1794 | 0 | "[this=%p, chunk=%p, mStatus=0x%08" PRIx32 "]", |
1795 | 0 | this, chunk.get(), static_cast<uint32_t>(mStatus))); |
1796 | 0 |
|
1797 | 0 | RemoveChunkInternal(chunk, false); |
1798 | 0 | return mStatus; |
1799 | 0 | } |
1800 | 0 |
|
1801 | 0 | if (chunk->IsDirty() && !mMemoryOnly && !mOpeningFile) { |
1802 | 0 | LOG(("CacheFile::DeactivateChunk() - Writing dirty chunk to the disk " |
1803 | 0 | "[this=%p]", this)); |
1804 | 0 |
|
1805 | 0 | mDataIsDirty = true; |
1806 | 0 |
|
1807 | 0 | rv = chunk->Write(mHandle, this); |
1808 | 0 | if (NS_FAILED(rv)) { |
1809 | 0 | LOG(("CacheFile::DeactivateChunk() - CacheFileChunk::Write() failed " |
1810 | 0 | "synchronously. Removing it. [this=%p, chunk=%p, rv=0x%08" PRIx32 "]", |
1811 | 0 | this, chunk.get(), static_cast<uint32_t>(rv))); |
1812 | 0 |
|
1813 | 0 | RemoveChunkInternal(chunk, false); |
1814 | 0 |
|
1815 | 0 | SetError(rv); |
1816 | 0 | return rv; |
1817 | 0 | } |
1818 | 0 |
|
1819 | 0 | // Chunk will be removed in OnChunkWritten if it is still unused |
1820 | 0 |
|
1821 | 0 | // chunk needs to be released under the lock to be able to rely on |
1822 | 0 | // CacheFileChunk::mRefCnt in CacheFile::OnChunkWritten() |
1823 | 0 | chunk = nullptr; |
1824 | 0 | return NS_OK; |
1825 | 0 | } |
1826 | 0 | |
1827 | 0 | bool keepChunk = ShouldCacheChunk(aChunk->Index()); |
1828 | 0 | LOG(("CacheFile::DeactivateChunk() - %s unused chunk [this=%p, chunk=%p]", |
1829 | 0 | keepChunk ? "Caching" : "Releasing", this, chunk.get())); |
1830 | 0 |
|
1831 | 0 | RemoveChunkInternal(chunk, keepChunk); |
1832 | 0 |
|
1833 | 0 | if (!mMemoryOnly) |
1834 | 0 | WriteMetadataIfNeededLocked(); |
1835 | 0 | } |
1836 | 0 |
|
1837 | 0 | return NS_OK; |
1838 | 0 | } |
1839 | | |
1840 | | void |
1841 | | CacheFile::RemoveChunkInternal(CacheFileChunk *aChunk, bool aCacheChunk) |
1842 | 0 | { |
1843 | 0 | AssertOwnsLock(); |
1844 | 0 |
|
1845 | 0 | aChunk->mActiveChunk = false; |
1846 | 0 | ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(aChunk->mFile.forget()).forget()); |
1847 | 0 |
|
1848 | 0 | if (aCacheChunk) { |
1849 | 0 | mCachedChunks.Put(aChunk->Index(), aChunk); |
1850 | 0 | } |
1851 | 0 |
|
1852 | 0 | mChunks.Remove(aChunk->Index()); |
1853 | 0 | } |
1854 | | |
1855 | | bool |
1856 | | CacheFile::OutputStreamExists(bool aAlternativeData) |
1857 | 0 | { |
1858 | 0 | AssertOwnsLock(); |
1859 | 0 |
|
1860 | 0 | if (!mOutput) { |
1861 | 0 | return false; |
1862 | 0 | } |
1863 | 0 | |
1864 | 0 | return mOutput->IsAlternativeData() == aAlternativeData; |
1865 | 0 | } |
1866 | | |
1867 | | int64_t |
1868 | | CacheFile::BytesFromChunk(uint32_t aIndex, bool aAlternativeData) |
1869 | 0 | { |
1870 | 0 | AssertOwnsLock(); |
1871 | 0 |
|
1872 | 0 | int64_t dataSize; |
1873 | 0 |
|
1874 | 0 | if (mAltDataOffset != -1) { |
1875 | 0 | if (aAlternativeData) { |
1876 | 0 | dataSize = mDataSize; |
1877 | 0 | } else { |
1878 | 0 | dataSize = mAltDataOffset; |
1879 | 0 | } |
1880 | 0 | } else { |
1881 | 0 | MOZ_ASSERT(!aAlternativeData); |
1882 | 0 | dataSize = mDataSize; |
1883 | 0 | } |
1884 | 0 |
|
1885 | 0 | if (!dataSize) { |
1886 | 0 | return 0; |
1887 | 0 | } |
1888 | 0 | |
1889 | 0 | // Index of the last existing chunk. |
1890 | 0 | uint32_t lastChunk = (dataSize - 1) / kChunkSize; |
1891 | 0 | if (aIndex > lastChunk) { |
1892 | 0 | return 0; |
1893 | 0 | } |
1894 | 0 | |
1895 | 0 | // We can use only preloaded chunks for the given stream to calculate |
1896 | 0 | // available bytes if this is an entry stored on disk, since only those |
1897 | 0 | // chunks are guaranteed not to be released. |
1898 | 0 | uint32_t maxPreloadedChunk; |
1899 | 0 | if (mMemoryOnly) { |
1900 | 0 | maxPreloadedChunk = lastChunk; |
1901 | 0 | } else { |
1902 | 0 | maxPreloadedChunk = std::min(aIndex + mPreloadChunkCount, lastChunk); |
1903 | 0 | } |
1904 | 0 |
|
1905 | 0 | uint32_t i; |
1906 | 0 | for (i = aIndex; i <= maxPreloadedChunk; ++i) { |
1907 | 0 | CacheFileChunk * chunk; |
1908 | 0 |
|
1909 | 0 | chunk = mChunks.GetWeak(i); |
1910 | 0 | if (chunk) { |
1911 | 0 | MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize); |
1912 | 0 | if (chunk->IsReady()) { |
1913 | 0 | continue; |
1914 | 0 | } |
1915 | 0 | |
1916 | 0 | // don't search this chunk in cached |
1917 | 0 | break; |
1918 | 0 | } |
1919 | 0 | |
1920 | 0 | chunk = mCachedChunks.GetWeak(i); |
1921 | 0 | if (chunk) { |
1922 | 0 | MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize); |
1923 | 0 | continue; |
1924 | 0 | } |
1925 | 0 |
|
1926 | 0 | break; |
1927 | 0 | } |
1928 | 0 |
|
1929 | 0 | // theoretic bytes in advance |
1930 | 0 | int64_t advance = int64_t(i - aIndex) * kChunkSize; |
1931 | 0 | // real bytes till the end of the file |
1932 | 0 | int64_t tail = dataSize - (aIndex * kChunkSize); |
1933 | 0 |
|
1934 | 0 | return std::min(advance, tail); |
1935 | 0 | } |
1936 | | |
1937 | | nsresult |
1938 | | CacheFile::Truncate(int64_t aOffset) |
1939 | 0 | { |
1940 | 0 | AssertOwnsLock(); |
1941 | 0 |
|
1942 | 0 | LOG(("CacheFile::Truncate() [this=%p, offset=%" PRId64 "]", this, aOffset)); |
1943 | 0 |
|
1944 | 0 | nsresult rv; |
1945 | 0 |
|
1946 | 0 | // If we ever need to truncate on non alt-data boundary, we need to handle |
1947 | 0 | // existing input streams. |
1948 | 0 | MOZ_ASSERT(aOffset == mAltDataOffset, "Truncating normal data not implemented"); |
1949 | 0 | MOZ_ASSERT(mReady); |
1950 | 0 | MOZ_ASSERT(!mOutput); |
1951 | 0 |
|
1952 | 0 | uint32_t lastChunk = 0; |
1953 | 0 | if (mDataSize > 0) { |
1954 | 0 | lastChunk = (mDataSize - 1) / kChunkSize; |
1955 | 0 | } |
1956 | 0 |
|
1957 | 0 | uint32_t newLastChunk = 0; |
1958 | 0 | if (aOffset > 0) { |
1959 | 0 | newLastChunk = (aOffset - 1) / kChunkSize; |
1960 | 0 | } |
1961 | 0 |
|
1962 | 0 | uint32_t bytesInNewLastChunk = aOffset - newLastChunk * kChunkSize; |
1963 | 0 |
|
1964 | 0 | LOG(("CacheFileTruncate() - lastChunk=%u, newLastChunk=%u, " |
1965 | 0 | "bytesInNewLastChunk=%u", lastChunk, newLastChunk, bytesInNewLastChunk)); |
1966 | 0 |
|
1967 | 0 | // Remove all truncated chunks from mCachedChunks |
1968 | 0 | for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) { |
1969 | 0 | uint32_t idx = iter.Key(); |
1970 | 0 |
|
1971 | 0 | if (idx > newLastChunk) { |
1972 | 0 | // This is unused chunk, simply remove it. |
1973 | 0 | LOG(("CacheFile::Truncate() - removing cached chunk [idx=%u]", idx)); |
1974 | 0 | iter.Remove(); |
1975 | 0 | } |
1976 | 0 | } |
1977 | 0 |
|
1978 | 0 | // We need to make sure no input stream holds a reference to a chunk we're |
1979 | 0 | // going to discard. In theory, if alt-data begins at chunk boundary, input |
1980 | 0 | // stream for normal data can get the chunk containing only alt-data via |
1981 | 0 | // EnsureCorrectChunk() call. The input stream won't read the data from such |
1982 | 0 | // chunk, but it will keep the reference until the stream is closed and we |
1983 | 0 | // cannot simply discard this chunk. |
1984 | 0 | int64_t maxInputChunk = -1; |
1985 | 0 | for (uint32_t i = 0; i < mInputs.Length(); ++i) { |
1986 | 0 | int64_t inputChunk = mInputs[i]->GetChunkIdx(); |
1987 | 0 |
|
1988 | 0 | if (maxInputChunk < inputChunk) { |
1989 | 0 | maxInputChunk = inputChunk; |
1990 | 0 | } |
1991 | 0 |
|
1992 | 0 | MOZ_RELEASE_ASSERT(mInputs[i]->GetPosition() <= aOffset); |
1993 | 0 | } |
1994 | 0 |
|
1995 | 0 | MOZ_RELEASE_ASSERT(maxInputChunk <= newLastChunk + 1); |
1996 | 0 | if (maxInputChunk == newLastChunk + 1) { |
1997 | 0 | // Truncating must be done at chunk boundary |
1998 | 0 | MOZ_RELEASE_ASSERT(bytesInNewLastChunk == kChunkSize); |
1999 | 0 | newLastChunk++; |
2000 | 0 | bytesInNewLastChunk = 0; |
2001 | 0 | LOG(("CacheFile::Truncate() - chunk %p is still in use, using " |
2002 | 0 | "newLastChunk=%u and bytesInNewLastChunk=%u", |
2003 | 0 | mChunks.GetWeak(newLastChunk), newLastChunk, bytesInNewLastChunk)); |
2004 | 0 | } |
2005 | 0 |
|
2006 | 0 | // Discard all truncated chunks in mChunks |
2007 | 0 | for (auto iter = mChunks.Iter(); !iter.Done(); iter.Next()) { |
2008 | 0 | uint32_t idx = iter.Key(); |
2009 | 0 |
|
2010 | 0 | if (idx > newLastChunk) { |
2011 | 0 | RefPtr<CacheFileChunk>& chunk = iter.Data(); |
2012 | 0 | LOG(("CacheFile::Truncate() - discarding chunk [idx=%u, chunk=%p]", |
2013 | 0 | idx, chunk.get())); |
2014 | 0 |
|
2015 | 0 | if (HaveChunkListeners(idx)) { |
2016 | 0 | NotifyChunkListeners(idx, NS_ERROR_NOT_AVAILABLE, chunk); |
2017 | 0 | } |
2018 | 0 |
|
2019 | 0 | chunk->mDiscardedChunk = true; |
2020 | 0 | mDiscardedChunks.AppendElement(chunk); |
2021 | 0 | iter.Remove(); |
2022 | 0 | } |
2023 | 0 | } |
2024 | 0 |
|
2025 | 0 | // Remove hashes of all removed chunks from the metadata |
2026 | 0 | for (uint32_t i = lastChunk; i > newLastChunk; --i) { |
2027 | 0 | mMetadata->RemoveHash(i); |
2028 | 0 | } |
2029 | 0 |
|
2030 | 0 | // Truncate new last chunk |
2031 | 0 | if (bytesInNewLastChunk == kChunkSize) { |
2032 | 0 | LOG(("CacheFile::Truncate() - not truncating last chunk.")); |
2033 | 0 | } else { |
2034 | 0 | RefPtr<CacheFileChunk> chunk; |
2035 | 0 | if (mChunks.Get(newLastChunk, getter_AddRefs(chunk))) { |
2036 | 0 | LOG(("CacheFile::Truncate() - New last chunk %p got from mChunks.", |
2037 | 0 | chunk.get())); |
2038 | 0 | } else if (mCachedChunks.Get(newLastChunk, getter_AddRefs(chunk))) { |
2039 | 0 | LOG(("CacheFile::Truncate() - New last chunk %p got from mCachedChunks.", |
2040 | 0 | chunk.get())); |
2041 | 0 | } else { |
2042 | 0 | // New last chunk isn't loaded but we need to update the hash. |
2043 | 0 | MOZ_ASSERT(!mMemoryOnly); |
2044 | 0 | MOZ_ASSERT(mHandle); |
2045 | 0 |
|
2046 | 0 | rv = GetChunkLocked(newLastChunk, PRELOADER, nullptr, |
2047 | 0 | getter_AddRefs(chunk)); |
2048 | 0 | if (NS_FAILED(rv)) { |
2049 | 0 | return rv; |
2050 | 0 | } |
2051 | 0 | // We've checked that we don't have this chunk, so no chunk must be |
2052 | 0 | // returned. |
2053 | 0 | MOZ_ASSERT(!chunk); |
2054 | 0 |
|
2055 | 0 | if (!mChunks.Get(newLastChunk, getter_AddRefs(chunk))) { |
2056 | 0 | return NS_ERROR_UNEXPECTED; |
2057 | 0 | } |
2058 | 0 | |
2059 | 0 | LOG(("CacheFile::Truncate() - New last chunk %p got from preloader.", |
2060 | 0 | chunk.get())); |
2061 | 0 | } |
2062 | 0 |
|
2063 | 0 | rv = chunk->GetStatus(); |
2064 | 0 | if (NS_FAILED(rv)) { |
2065 | 0 | LOG(("CacheFile::Truncate() - New last chunk is failed [status=0x%08" |
2066 | 0 | PRIx32 "]", static_cast<uint32_t>(rv))); |
2067 | 0 | return rv; |
2068 | 0 | } |
2069 | 0 |
|
2070 | 0 | rv = chunk->Truncate(bytesInNewLastChunk); |
2071 | 0 | if (NS_FAILED(rv)) { |
2072 | 0 | return rv; |
2073 | 0 | } |
2074 | 0 | |
2075 | 0 | // If the chunk is ready set the new hash now. If it's still being loaded |
2076 | 0 | // CacheChunk::Truncate() made the chunk dirty and the hash will be updated |
2077 | 0 | // in OnChunkWritten(). |
2078 | 0 | if (chunk->IsReady()) { |
2079 | 0 | mMetadata->SetHash(newLastChunk, chunk->Hash()); |
2080 | 0 | } |
2081 | 0 | } |
2082 | 0 |
|
2083 | 0 | if (mHandle) { |
2084 | 0 | rv = CacheFileIOManager::TruncateSeekSetEOF(mHandle, aOffset, aOffset, nullptr); |
2085 | 0 | if (NS_FAILED(rv)) { |
2086 | 0 | return rv; |
2087 | 0 | } |
2088 | 0 | } |
2089 | 0 | |
2090 | 0 | mDataSize = aOffset; |
2091 | 0 |
|
2092 | 0 | return NS_OK; |
2093 | 0 | } |
2094 | | |
2095 | | static uint32_t |
2096 | | StatusToTelemetryEnum(nsresult aStatus) |
2097 | 0 | { |
2098 | 0 | if (NS_SUCCEEDED(aStatus)) { |
2099 | 0 | return 0; |
2100 | 0 | } |
2101 | 0 | |
2102 | 0 | switch (aStatus) { |
2103 | 0 | case NS_BASE_STREAM_CLOSED: |
2104 | 0 | return 0; // Log this as a success |
2105 | 0 | case NS_ERROR_OUT_OF_MEMORY: |
2106 | 0 | return 2; |
2107 | 0 | case NS_ERROR_FILE_DISK_FULL: |
2108 | 0 | return 3; |
2109 | 0 | case NS_ERROR_FILE_CORRUPTED: |
2110 | 0 | return 4; |
2111 | 0 | case NS_ERROR_FILE_NOT_FOUND: |
2112 | 0 | return 5; |
2113 | 0 | case NS_BINDING_ABORTED: |
2114 | 0 | return 6; |
2115 | 0 | default: |
2116 | 0 | return 1; // other error |
2117 | 0 | } |
2118 | 0 | |
2119 | 0 | MOZ_ASSERT_UNREACHABLE("We should never get here"); |
2120 | 0 | } |
2121 | | |
2122 | | nsresult |
2123 | | CacheFile::RemoveInput(CacheFileInputStream *aInput, nsresult aStatus) |
2124 | 0 | { |
2125 | 0 | CacheFileAutoLock lock(this); |
2126 | 0 |
|
2127 | 0 | LOG(("CacheFile::RemoveInput() [this=%p, input=%p, status=0x%08" PRIx32 "]", this, |
2128 | 0 | aInput, static_cast<uint32_t>(aStatus))); |
2129 | 0 |
|
2130 | 0 | DebugOnly<bool> found; |
2131 | 0 | found = mInputs.RemoveElement(aInput); |
2132 | 0 | MOZ_ASSERT(found); |
2133 | 0 |
|
2134 | 0 | ReleaseOutsideLock(already_AddRefed<nsIInputStream>(static_cast<nsIInputStream*>(aInput))); |
2135 | 0 |
|
2136 | 0 | if (!mMemoryOnly) |
2137 | 0 | WriteMetadataIfNeededLocked(); |
2138 | 0 |
|
2139 | 0 | // If the input didn't read all data, there might be left some preloaded |
2140 | 0 | // chunks that won't be used anymore. |
2141 | 0 | CleanUpCachedChunks(); |
2142 | 0 |
|
2143 | 0 | Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_INPUT_STREAM_STATUS, |
2144 | 0 | StatusToTelemetryEnum(aStatus)); |
2145 | 0 |
|
2146 | 0 | return NS_OK; |
2147 | 0 | } |
2148 | | |
2149 | | nsresult |
2150 | | CacheFile::RemoveOutput(CacheFileOutputStream *aOutput, nsresult aStatus) |
2151 | 0 | { |
2152 | 0 | AssertOwnsLock(); |
2153 | 0 |
|
2154 | 0 | nsresult rv; |
2155 | 0 |
|
2156 | 0 | LOG(("CacheFile::RemoveOutput() [this=%p, output=%p, status=0x%08" PRIx32 "]", this, |
2157 | 0 | aOutput, static_cast<uint32_t>(aStatus))); |
2158 | 0 |
|
2159 | 0 | if (mOutput != aOutput) { |
2160 | 0 | LOG(("CacheFile::RemoveOutput() - This output was already removed, ignoring" |
2161 | 0 | " call [this=%p]", this)); |
2162 | 0 | return NS_OK; |
2163 | 0 | } |
2164 | 0 |
|
2165 | 0 | mOutput = nullptr; |
2166 | 0 |
|
2167 | 0 | // Cancel all queued chunk and update listeners that cannot be satisfied |
2168 | 0 | NotifyListenersAboutOutputRemoval(); |
2169 | 0 |
|
2170 | 0 | if (!mMemoryOnly) |
2171 | 0 | WriteMetadataIfNeededLocked(); |
2172 | 0 |
|
2173 | 0 | // Make sure the CacheFile status is set to a failure when the output stream |
2174 | 0 | // is closed with a fatal error. This way we propagate correctly and w/o any |
2175 | 0 | // windows the failure state of this entry to end consumers. |
2176 | 0 | if (NS_SUCCEEDED(mStatus) && NS_FAILED(aStatus) && aStatus != NS_BASE_STREAM_CLOSED) { |
2177 | 0 | if (aOutput->IsAlternativeData()) { |
2178 | 0 | MOZ_ASSERT(mAltDataOffset != -1); |
2179 | 0 | // If there is no alt-data input stream truncate only alt-data, otherwise |
2180 | 0 | // doom the entry. |
2181 | 0 | bool altDataInputExists = false; |
2182 | 0 | for (uint32_t i = 0; i < mInputs.Length(); ++i) { |
2183 | 0 | if (mInputs[i]->IsAlternativeData()) { |
2184 | 0 | altDataInputExists = true; |
2185 | 0 | break; |
2186 | 0 | } |
2187 | 0 | } |
2188 | 0 | if (altDataInputExists) { |
2189 | 0 | SetError(aStatus); |
2190 | 0 | } else { |
2191 | 0 | rv = Truncate(mAltDataOffset); |
2192 | 0 | if (NS_FAILED(rv)) { |
2193 | 0 | LOG(("CacheFile::RemoveOutput() - Truncating alt-data failed " |
2194 | 0 | "[rv=0x%08" PRIx32 "]", static_cast<uint32_t>(rv))); |
2195 | 0 | SetError(aStatus); |
2196 | 0 | } else { |
2197 | 0 | SetAltMetadata(nullptr); |
2198 | 0 | mAltDataOffset = -1; |
2199 | 0 | } |
2200 | 0 | } |
2201 | 0 | } else { |
2202 | 0 | SetError(aStatus); |
2203 | 0 | } |
2204 | 0 | } |
2205 | 0 |
|
2206 | 0 | // Notify close listener as the last action |
2207 | 0 | aOutput->NotifyCloseListener(); |
2208 | 0 |
|
2209 | 0 | Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS, |
2210 | 0 | StatusToTelemetryEnum(aStatus)); |
2211 | 0 |
|
2212 | 0 | return NS_OK; |
2213 | 0 | } |
2214 | | |
2215 | | nsresult |
2216 | | CacheFile::NotifyChunkListener(CacheFileChunkListener *aCallback, |
2217 | | nsIEventTarget *aTarget, |
2218 | | nsresult aResult, |
2219 | | uint32_t aChunkIdx, |
2220 | | CacheFileChunk *aChunk) |
2221 | 0 | { |
2222 | 0 | LOG(("CacheFile::NotifyChunkListener() [this=%p, listener=%p, target=%p, " |
2223 | 0 | "rv=0x%08" PRIx32 ", idx=%u, chunk=%p]", this, aCallback, aTarget, |
2224 | 0 | static_cast<uint32_t>(aResult), aChunkIdx, aChunk)); |
2225 | 0 |
|
2226 | 0 | nsresult rv; |
2227 | 0 | RefPtr<NotifyChunkListenerEvent> ev; |
2228 | 0 | ev = new NotifyChunkListenerEvent(aCallback, aResult, aChunkIdx, aChunk); |
2229 | 0 | if (aTarget) |
2230 | 0 | rv = aTarget->Dispatch(ev, NS_DISPATCH_NORMAL); |
2231 | 0 | else |
2232 | 0 | rv = NS_DispatchToCurrentThread(ev); |
2233 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
2234 | 0 |
|
2235 | 0 | return NS_OK; |
2236 | 0 | } |
2237 | | |
2238 | | nsresult |
2239 | | CacheFile::QueueChunkListener(uint32_t aIndex, |
2240 | | CacheFileChunkListener *aCallback) |
2241 | 0 | { |
2242 | 0 | LOG(("CacheFile::QueueChunkListener() [this=%p, idx=%u, listener=%p]", |
2243 | 0 | this, aIndex, aCallback)); |
2244 | 0 |
|
2245 | 0 | AssertOwnsLock(); |
2246 | 0 |
|
2247 | 0 | MOZ_ASSERT(aCallback); |
2248 | 0 |
|
2249 | 0 | ChunkListenerItem *item = new ChunkListenerItem(); |
2250 | 0 | item->mTarget = CacheFileIOManager::IOTarget(); |
2251 | 0 | if (!item->mTarget) { |
2252 | 0 | LOG(("CacheFile::QueueChunkListener() - Cannot get Cache I/O thread! Using " |
2253 | 0 | "main thread for callback.")); |
2254 | 0 | item->mTarget = GetMainThreadEventTarget(); |
2255 | 0 | } |
2256 | 0 | item->mCallback = aCallback; |
2257 | 0 |
|
2258 | 0 | ChunkListeners *listeners; |
2259 | 0 | if (!mChunkListeners.Get(aIndex, &listeners)) { |
2260 | 0 | listeners = new ChunkListeners(); |
2261 | 0 | mChunkListeners.Put(aIndex, listeners); |
2262 | 0 | } |
2263 | 0 |
|
2264 | 0 | listeners->mItems.AppendElement(item); |
2265 | 0 | return NS_OK; |
2266 | 0 | } |
2267 | | |
2268 | | nsresult |
2269 | | CacheFile::NotifyChunkListeners(uint32_t aIndex, nsresult aResult, |
2270 | | CacheFileChunk *aChunk) |
2271 | 0 | { |
2272 | 0 | LOG(("CacheFile::NotifyChunkListeners() [this=%p, idx=%u, rv=0x%08" PRIx32 ", " |
2273 | 0 | "chunk=%p]", this, aIndex, static_cast<uint32_t>(aResult), aChunk)); |
2274 | 0 |
|
2275 | 0 | AssertOwnsLock(); |
2276 | 0 |
|
2277 | 0 | nsresult rv, rv2; |
2278 | 0 |
|
2279 | 0 | ChunkListeners *listeners; |
2280 | 0 | mChunkListeners.Get(aIndex, &listeners); |
2281 | 0 | MOZ_ASSERT(listeners); |
2282 | 0 |
|
2283 | 0 | rv = NS_OK; |
2284 | 0 | for (uint32_t i = 0 ; i < listeners->mItems.Length() ; i++) { |
2285 | 0 | ChunkListenerItem *item = listeners->mItems[i]; |
2286 | 0 | rv2 = NotifyChunkListener(item->mCallback, item->mTarget, aResult, aIndex, |
2287 | 0 | aChunk); |
2288 | 0 | if (NS_FAILED(rv2) && NS_SUCCEEDED(rv)) |
2289 | 0 | rv = rv2; |
2290 | 0 | delete item; |
2291 | 0 | } |
2292 | 0 |
|
2293 | 0 | mChunkListeners.Remove(aIndex); |
2294 | 0 |
|
2295 | 0 | return rv; |
2296 | 0 | } |
2297 | | |
2298 | | bool |
2299 | | CacheFile::HaveChunkListeners(uint32_t aIndex) |
2300 | 0 | { |
2301 | 0 | ChunkListeners *listeners; |
2302 | 0 | mChunkListeners.Get(aIndex, &listeners); |
2303 | 0 | return !!listeners; |
2304 | 0 | } |
2305 | | |
2306 | | void |
2307 | | CacheFile::NotifyListenersAboutOutputRemoval() |
2308 | 0 | { |
2309 | 0 | LOG(("CacheFile::NotifyListenersAboutOutputRemoval() [this=%p]", this)); |
2310 | 0 |
|
2311 | 0 | AssertOwnsLock(); |
2312 | 0 |
|
2313 | 0 | // First fail all chunk listeners that wait for non-existent chunk |
2314 | 0 | for (auto iter = mChunkListeners.Iter(); !iter.Done(); iter.Next()) { |
2315 | 0 | uint32_t idx = iter.Key(); |
2316 | 0 | nsAutoPtr<ChunkListeners>& listeners = iter.Data(); |
2317 | 0 |
|
2318 | 0 | LOG(("CacheFile::NotifyListenersAboutOutputRemoval() - fail " |
2319 | 0 | "[this=%p, idx=%u]", this, idx)); |
2320 | 0 |
|
2321 | 0 | RefPtr<CacheFileChunk> chunk; |
2322 | 0 | mChunks.Get(idx, getter_AddRefs(chunk)); |
2323 | 0 | if (chunk) { |
2324 | 0 | MOZ_ASSERT(!chunk->IsReady()); |
2325 | 0 | continue; |
2326 | 0 | } |
2327 | 0 |
|
2328 | 0 | for (uint32_t i = 0 ; i < listeners->mItems.Length() ; i++) { |
2329 | 0 | ChunkListenerItem *item = listeners->mItems[i]; |
2330 | 0 | NotifyChunkListener(item->mCallback, item->mTarget, |
2331 | 0 | NS_ERROR_NOT_AVAILABLE, idx, nullptr); |
2332 | 0 | delete item; |
2333 | 0 | } |
2334 | 0 |
|
2335 | 0 | iter.Remove(); |
2336 | 0 | } |
2337 | 0 |
|
2338 | 0 | // Fail all update listeners |
2339 | 0 | for (auto iter = mChunks.Iter(); !iter.Done(); iter.Next()) { |
2340 | 0 | const RefPtr<CacheFileChunk>& chunk = iter.Data(); |
2341 | 0 | LOG(("CacheFile::NotifyListenersAboutOutputRemoval() - fail2 " |
2342 | 0 | "[this=%p, idx=%u]", this, iter.Key())); |
2343 | 0 |
|
2344 | 0 | if (chunk->IsReady()) { |
2345 | 0 | chunk->NotifyUpdateListeners(); |
2346 | 0 | } |
2347 | 0 | } |
2348 | 0 | } |
2349 | | |
2350 | | bool |
2351 | | CacheFile::DataSize(int64_t* aSize) |
2352 | 0 | { |
2353 | 0 | CacheFileAutoLock lock(this); |
2354 | 0 |
|
2355 | 0 | if (OutputStreamExists(false)) { |
2356 | 0 | return false; |
2357 | 0 | } |
2358 | 0 | |
2359 | 0 | if (mAltDataOffset == -1) { |
2360 | 0 | *aSize = mDataSize; |
2361 | 0 | } else { |
2362 | 0 | *aSize = mAltDataOffset; |
2363 | 0 | } |
2364 | 0 |
|
2365 | 0 | return true; |
2366 | 0 | } |
2367 | | |
2368 | | nsresult |
2369 | | CacheFile::GetAltDataSize(int64_t *aSize) |
2370 | 0 | { |
2371 | 0 | CacheFileAutoLock lock(this); |
2372 | 0 | if (mOutput) { |
2373 | 0 | return NS_ERROR_IN_PROGRESS; |
2374 | 0 | } |
2375 | 0 | |
2376 | 0 | if (mAltDataOffset == -1) { |
2377 | 0 | return NS_ERROR_NOT_AVAILABLE; |
2378 | 0 | } |
2379 | 0 | |
2380 | 0 | *aSize = mDataSize - mAltDataOffset; |
2381 | 0 | return NS_OK; |
2382 | 0 | } |
2383 | | |
2384 | | bool |
2385 | | CacheFile::IsDoomed() |
2386 | 0 | { |
2387 | 0 | CacheFileAutoLock lock(this); |
2388 | 0 |
|
2389 | 0 | if (!mHandle) |
2390 | 0 | return false; |
2391 | 0 | |
2392 | 0 | return mHandle->IsDoomed(); |
2393 | 0 | } |
2394 | | |
2395 | | bool |
2396 | | CacheFile::IsWriteInProgress() |
2397 | 0 | { |
2398 | 0 | // Returns true when there is a potentially unfinished write operation. |
2399 | 0 | // Not using lock for performance reasons. mMetadata is never released |
2400 | 0 | // during life time of CacheFile. |
2401 | 0 |
|
2402 | 0 | bool result = false; |
2403 | 0 |
|
2404 | 0 | if (!mMemoryOnly) { |
2405 | 0 | result = mDataIsDirty || |
2406 | 0 | (mMetadata && mMetadata->IsDirty()) || |
2407 | 0 | mWritingMetadata; |
2408 | 0 | } |
2409 | 0 |
|
2410 | 0 | result = result || |
2411 | 0 | mOpeningFile || |
2412 | 0 | mOutput || |
2413 | 0 | mChunks.Count(); |
2414 | 0 |
|
2415 | 0 | return result; |
2416 | 0 | } |
2417 | | |
2418 | | bool |
2419 | | CacheFile::EntryWouldExceedLimit(int64_t aOffset, int64_t aSize, bool aIsAltData) |
2420 | 0 | { |
2421 | 0 | if (mSkipSizeCheck || aSize < 0) { |
2422 | 0 | return false; |
2423 | 0 | } |
2424 | 0 | |
2425 | 0 | int64_t totalSize = aOffset + aSize; |
2426 | 0 | if (aIsAltData) { |
2427 | 0 | totalSize += (mAltDataOffset == -1) ? mDataSize : mAltDataOffset; |
2428 | 0 | } |
2429 | 0 |
|
2430 | 0 | if (CacheObserver::EntryIsTooBig(totalSize, !mMemoryOnly)) { |
2431 | 0 | return true; |
2432 | 0 | } |
2433 | 0 | |
2434 | 0 | return false; |
2435 | 0 | } |
2436 | | |
2437 | | |
2438 | | bool |
2439 | | CacheFile::IsDirty() |
2440 | 0 | { |
2441 | 0 | return mDataIsDirty || mMetadata->IsDirty(); |
2442 | 0 | } |
2443 | | |
2444 | | void |
2445 | | CacheFile::WriteMetadataIfNeeded() |
2446 | 0 | { |
2447 | 0 | LOG(("CacheFile::WriteMetadataIfNeeded() [this=%p]", this)); |
2448 | 0 |
|
2449 | 0 | CacheFileAutoLock lock(this); |
2450 | 0 |
|
2451 | 0 | if (!mMemoryOnly) |
2452 | 0 | WriteMetadataIfNeededLocked(); |
2453 | 0 | } |
2454 | | |
2455 | | void |
2456 | | CacheFile::WriteMetadataIfNeededLocked(bool aFireAndForget) |
2457 | 0 | { |
2458 | 0 | // When aFireAndForget is set to true, we are called from dtor. |
2459 | 0 | // |this| must not be referenced after this method returns! |
2460 | 0 |
|
2461 | 0 | LOG(("CacheFile::WriteMetadataIfNeededLocked() [this=%p]", this)); |
2462 | 0 |
|
2463 | 0 | nsresult rv; |
2464 | 0 |
|
2465 | 0 | AssertOwnsLock(); |
2466 | 0 | MOZ_ASSERT(!mMemoryOnly); |
2467 | 0 |
|
2468 | 0 | if (!mMetadata) { |
2469 | 0 | MOZ_CRASH("Must have metadata here"); |
2470 | 0 | return; |
2471 | 0 | } |
2472 | 0 | |
2473 | 0 | if (NS_FAILED(mStatus)) |
2474 | 0 | return; |
2475 | 0 | |
2476 | 0 | if (!IsDirty() || mOutput || mInputs.Length() || mChunks.Count() || |
2477 | 0 | mWritingMetadata || mOpeningFile || mKill) |
2478 | 0 | return; |
2479 | 0 | |
2480 | 0 | if (!aFireAndForget) { |
2481 | 0 | // if aFireAndForget is set, we are called from dtor. Write |
2482 | 0 | // scheduler hard-refers CacheFile otherwise, so we cannot be here. |
2483 | 0 | CacheFileIOManager::UnscheduleMetadataWrite(this); |
2484 | 0 | } |
2485 | 0 |
|
2486 | 0 | LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing metadata [this=%p]", |
2487 | 0 | this)); |
2488 | 0 |
|
2489 | 0 | rv = mMetadata->WriteMetadata(mDataSize, aFireAndForget ? nullptr : this); |
2490 | 0 | if (NS_SUCCEEDED(rv)) { |
2491 | 0 | mWritingMetadata = true; |
2492 | 0 | mDataIsDirty = false; |
2493 | 0 | } else { |
2494 | 0 | LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing synchronously " |
2495 | 0 | "failed [this=%p]", this)); |
2496 | 0 | // TODO: close streams with error |
2497 | 0 | SetError(rv); |
2498 | 0 | } |
2499 | 0 | } |
2500 | | |
2501 | | void |
2502 | | CacheFile::PostWriteTimer() |
2503 | 0 | { |
2504 | 0 | if (mMemoryOnly) |
2505 | 0 | return; |
2506 | 0 | |
2507 | 0 | LOG(("CacheFile::PostWriteTimer() [this=%p]", this)); |
2508 | 0 |
|
2509 | 0 | CacheFileIOManager::ScheduleMetadataWrite(this); |
2510 | 0 | } |
2511 | | |
2512 | | void |
2513 | | CacheFile::CleanUpCachedChunks() |
2514 | 0 | { |
2515 | 0 | for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) { |
2516 | 0 | uint32_t idx = iter.Key(); |
2517 | 0 | const RefPtr<CacheFileChunk>& chunk = iter.Data(); |
2518 | 0 |
|
2519 | 0 | LOG(("CacheFile::CleanUpCachedChunks() [this=%p, idx=%u, chunk=%p]", this, |
2520 | 0 | idx, chunk.get())); |
2521 | 0 |
|
2522 | 0 | if (MustKeepCachedChunk(idx)) { |
2523 | 0 | LOG(("CacheFile::CleanUpCachedChunks() - Keeping chunk")); |
2524 | 0 | continue; |
2525 | 0 | } |
2526 | 0 |
|
2527 | 0 | LOG(("CacheFile::CleanUpCachedChunks() - Removing chunk")); |
2528 | 0 | iter.Remove(); |
2529 | 0 | } |
2530 | 0 | } |
2531 | | |
2532 | | nsresult |
2533 | | CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx) |
2534 | 0 | { |
2535 | 0 | AssertOwnsLock(); |
2536 | 0 |
|
2537 | 0 | // This method is used to pad last incomplete chunk with zeroes or create |
2538 | 0 | // a new chunk full of zeroes |
2539 | 0 | MOZ_ASSERT(mDataSize / kChunkSize == aChunkIdx); |
2540 | 0 |
|
2541 | 0 | nsresult rv; |
2542 | 0 | RefPtr<CacheFileChunk> chunk; |
2543 | 0 | rv = GetChunkLocked(aChunkIdx, WRITER, nullptr, getter_AddRefs(chunk)); |
2544 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
2545 | 0 |
|
2546 | 0 | LOG(("CacheFile::PadChunkWithZeroes() - Zeroing hole in chunk %d, range %d-%d" |
2547 | 0 | " [this=%p]", aChunkIdx, chunk->DataSize(), kChunkSize - 1, this)); |
2548 | 0 |
|
2549 | 0 | CacheFileChunkWriteHandle hnd = chunk->GetWriteHandle(kChunkSize); |
2550 | 0 | if (!hnd.Buf()) { |
2551 | 0 | ReleaseOutsideLock(chunk.forget()); |
2552 | 0 | SetError(NS_ERROR_OUT_OF_MEMORY); |
2553 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
2554 | 0 | } |
2555 | 0 | |
2556 | 0 | uint32_t offset = hnd.DataSize(); |
2557 | 0 | memset(hnd.Buf() + offset, 0, kChunkSize - offset); |
2558 | 0 | hnd.UpdateDataSize(offset, kChunkSize - offset); |
2559 | 0 |
|
2560 | 0 | ReleaseOutsideLock(chunk.forget()); |
2561 | 0 |
|
2562 | 0 | return NS_OK; |
2563 | 0 | } |
2564 | | |
2565 | | void |
2566 | | CacheFile::SetError(nsresult aStatus) |
2567 | 0 | { |
2568 | 0 | AssertOwnsLock(); |
2569 | 0 |
|
2570 | 0 | if (NS_SUCCEEDED(mStatus)) { |
2571 | 0 | mStatus = aStatus; |
2572 | 0 | if (mHandle) { |
2573 | 0 | CacheFileIOManager::DoomFile(mHandle, nullptr); |
2574 | 0 | } |
2575 | 0 | } |
2576 | 0 | } |
2577 | | |
2578 | | nsresult |
2579 | | CacheFile::InitIndexEntry() |
2580 | 0 | { |
2581 | 0 | MOZ_ASSERT(mHandle); |
2582 | 0 |
|
2583 | 0 | if (mHandle->IsDoomed()) |
2584 | 0 | return NS_OK; |
2585 | 0 | |
2586 | 0 | nsresult rv; |
2587 | 0 |
|
2588 | 0 | rv = CacheFileIOManager::InitIndexEntry( |
2589 | 0 | mHandle, GetOriginAttrsHash(mMetadata->OriginAttributes()), |
2590 | 0 | mMetadata->IsAnonymous(), mPinned); |
2591 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
2592 | 0 |
|
2593 | 0 | uint32_t expTime; |
2594 | 0 | mMetadata->GetExpirationTime(&expTime); |
2595 | 0 |
|
2596 | 0 | uint32_t frecency; |
2597 | 0 | mMetadata->GetFrecency(&frecency); |
2598 | 0 |
|
2599 | 0 | bool hasAltData = mMetadata->GetElement(CacheFileUtils::kAltDataKey) ? true : false; |
2600 | 0 |
|
2601 | 0 | static auto toUint16 = [](const char* s) -> uint16_t { |
2602 | 0 | if (s) { |
2603 | 0 | nsresult rv; |
2604 | 0 | uint64_t n64 = nsDependentCString(s).ToInteger64(&rv); |
2605 | 0 | MOZ_ASSERT(NS_SUCCEEDED(rv)); |
2606 | 0 | return n64 <= kIndexTimeOutOfBound ? n64 : kIndexTimeOutOfBound ; |
2607 | 0 | } |
2608 | 0 | return kIndexTimeNotAvailable; |
2609 | 0 | }; |
2610 | 0 |
|
2611 | 0 | const char *onStartTimeStr = mMetadata->GetElement("net-response-time-onstart"); |
2612 | 0 | uint16_t onStartTime = toUint16(onStartTimeStr); |
2613 | 0 |
|
2614 | 0 | const char *onStopTimeStr = mMetadata->GetElement("net-response-time-onstop"); |
2615 | 0 | uint16_t onStopTime = toUint16(onStopTimeStr); |
2616 | 0 |
|
2617 | 0 | rv = CacheFileIOManager::UpdateIndexEntry(mHandle, &frecency, &expTime, &hasAltData, &onStartTime, &onStopTime); |
2618 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
2619 | 0 |
|
2620 | 0 | return NS_OK; |
2621 | 0 | } |
2622 | | |
2623 | | size_t |
2624 | | CacheFile::SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const |
2625 | 0 | { |
2626 | 0 | CacheFileAutoLock lock(const_cast<CacheFile*>(this)); |
2627 | 0 |
|
2628 | 0 | size_t n = 0; |
2629 | 0 | n += mKey.SizeOfExcludingThisIfUnshared(mallocSizeOf); |
2630 | 0 | n += mChunks.ShallowSizeOfExcludingThis(mallocSizeOf); |
2631 | 0 | for (auto iter = mChunks.ConstIter(); !iter.Done(); iter.Next()) { |
2632 | 0 | n += iter.Data()->SizeOfIncludingThis(mallocSizeOf); |
2633 | 0 | } |
2634 | 0 | n += mCachedChunks.ShallowSizeOfExcludingThis(mallocSizeOf); |
2635 | 0 | for (auto iter = mCachedChunks.ConstIter(); !iter.Done(); iter.Next()) { |
2636 | 0 | n += iter.Data()->SizeOfIncludingThis(mallocSizeOf); |
2637 | 0 | } |
2638 | 0 | // Ignore metadata if it's still being read. It's not safe to access buffers |
2639 | 0 | // in CacheFileMetadata because they might be reallocated on another thread |
2640 | 0 | // outside CacheFile's lock. |
2641 | 0 | if (mMetadata && mReady) { |
2642 | 0 | n += mMetadata->SizeOfIncludingThis(mallocSizeOf); |
2643 | 0 | } |
2644 | 0 |
|
2645 | 0 | // Input streams are not elsewhere reported. |
2646 | 0 | n += mInputs.ShallowSizeOfExcludingThis(mallocSizeOf); |
2647 | 0 | for (uint32_t i = 0; i < mInputs.Length(); ++i) { |
2648 | 0 | n += mInputs[i]->SizeOfIncludingThis(mallocSizeOf); |
2649 | 0 | } |
2650 | 0 |
|
2651 | 0 | // Output streams are not elsewhere reported. |
2652 | 0 | if (mOutput) { |
2653 | 0 | n += mOutput->SizeOfIncludingThis(mallocSizeOf); |
2654 | 0 | } |
2655 | 0 |
|
2656 | 0 | // The listeners are usually classes reported just above. |
2657 | 0 | n += mChunkListeners.ShallowSizeOfExcludingThis(mallocSizeOf); |
2658 | 0 | n += mObjsToRelease.ShallowSizeOfExcludingThis(mallocSizeOf); |
2659 | 0 |
|
2660 | 0 | // mHandle reported directly from CacheFileIOManager. |
2661 | 0 |
|
2662 | 0 | return n; |
2663 | 0 | } |
2664 | | |
2665 | | size_t |
2666 | | CacheFile::SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const |
2667 | 0 | { |
2668 | 0 | return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf); |
2669 | 0 | } |
2670 | | |
2671 | | } // namespace net |
2672 | | } // namespace mozilla |