/src/mozilla-central/image/SurfaceCache.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
4 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | /** |
7 | | * SurfaceCache is a service for caching temporary surfaces in imagelib. |
8 | | */ |
9 | | |
10 | | #include "SurfaceCache.h" |
11 | | |
12 | | #include <algorithm> |
13 | | #include "mozilla/Assertions.h" |
14 | | #include "mozilla/Attributes.h" |
15 | | #include "mozilla/CheckedInt.h" |
16 | | #include "mozilla/DebugOnly.h" |
17 | | #include "mozilla/Likely.h" |
18 | | #include "mozilla/Move.h" |
19 | | #include "mozilla/Pair.h" |
20 | | #include "mozilla/RefPtr.h" |
21 | | #include "mozilla/StaticMutex.h" |
22 | | #include "mozilla/StaticPtr.h" |
23 | | #include "mozilla/Tuple.h" |
24 | | #include "nsIMemoryReporter.h" |
25 | | #include "gfx2DGlue.h" |
26 | | #include "gfxPlatform.h" |
27 | | #include "gfxPrefs.h" |
28 | | #include "imgFrame.h" |
29 | | #include "Image.h" |
30 | | #include "ISurfaceProvider.h" |
31 | | #include "LookupResult.h" |
32 | | #include "nsExpirationTracker.h" |
33 | | #include "nsHashKeys.h" |
34 | | #include "nsRefPtrHashtable.h" |
35 | | #include "nsSize.h" |
36 | | #include "nsTArray.h" |
37 | | #include "prsystem.h" |
38 | | #include "ShutdownTracker.h" |
39 | | |
40 | | using std::max; |
41 | | using std::min; |
42 | | |
43 | | namespace mozilla { |
44 | | |
45 | | using namespace gfx; |
46 | | |
47 | | namespace image { |
48 | | |
49 | | class CachedSurface; |
50 | | class SurfaceCacheImpl; |
51 | | |
52 | | /////////////////////////////////////////////////////////////////////////////// |
53 | | // Static Data |
54 | | /////////////////////////////////////////////////////////////////////////////// |
55 | | |
56 | | // The single surface cache instance. |
57 | | static StaticRefPtr<SurfaceCacheImpl> sInstance; |
58 | | |
59 | | // The mutex protecting the surface cache. |
60 | | static StaticMutex sInstanceMutex; |
61 | | |
62 | | /////////////////////////////////////////////////////////////////////////////// |
63 | | // SurfaceCache Implementation |
64 | | /////////////////////////////////////////////////////////////////////////////// |
65 | | |
66 | | /** |
67 | | * Cost models the cost of storing a surface in the cache. Right now, this is |
68 | | * simply an estimate of the size of the surface in bytes, but in the future it |
69 | | * may be worth taking into account the cost of rematerializing the surface as |
70 | | * well. |
71 | | */ |
72 | | typedef size_t Cost; |
73 | | |
74 | | static Cost |
75 | | ComputeCost(const IntSize& aSize, uint32_t aBytesPerPixel) |
76 | 0 | { |
77 | 0 | MOZ_ASSERT(aBytesPerPixel == 1 || aBytesPerPixel == 4); |
78 | 0 | return aSize.width * aSize.height * aBytesPerPixel; |
79 | 0 | } |
80 | | |
81 | | /** |
82 | | * Since we want to be able to make eviction decisions based on cost, we need to |
83 | | * be able to look up the CachedSurface which has a certain cost as well as the |
84 | | * cost associated with a certain CachedSurface. To make this possible, in data |
85 | | * structures we actually store a CostEntry, which contains a weak pointer to |
86 | | * its associated surface. |
87 | | * |
88 | | * To make usage of the weak pointer safe, SurfaceCacheImpl always calls |
89 | | * StartTracking after a surface is stored in the cache and StopTracking before |
90 | | * it is removed. |
91 | | */ |
92 | | class CostEntry |
93 | | { |
94 | | public: |
95 | | CostEntry(NotNull<CachedSurface*> aSurface, Cost aCost) |
96 | | : mSurface(aSurface) |
97 | | , mCost(aCost) |
98 | 0 | { } |
99 | | |
100 | 0 | NotNull<CachedSurface*> Surface() const { return mSurface; } |
101 | 0 | Cost GetCost() const { return mCost; } |
102 | | |
103 | | bool operator==(const CostEntry& aOther) const |
104 | 0 | { |
105 | 0 | return mSurface == aOther.mSurface && |
106 | 0 | mCost == aOther.mCost; |
107 | 0 | } |
108 | | |
109 | | bool operator<(const CostEntry& aOther) const |
110 | 0 | { |
111 | 0 | return mCost < aOther.mCost || |
112 | 0 | (mCost == aOther.mCost && recordreplay::RecordReplayValue(mSurface < aOther.mSurface)); |
113 | 0 | } |
114 | | |
115 | | private: |
116 | | NotNull<CachedSurface*> mSurface; |
117 | | Cost mCost; |
118 | | }; |
119 | | |
120 | | /** |
121 | | * A CachedSurface associates a surface with a key that uniquely identifies that |
122 | | * surface. |
123 | | */ |
124 | | class CachedSurface |
125 | | { |
126 | 0 | ~CachedSurface() { } |
127 | | public: |
128 | | MOZ_DECLARE_REFCOUNTED_TYPENAME(CachedSurface) |
129 | | NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CachedSurface) |
130 | | |
131 | | explicit CachedSurface(NotNull<ISurfaceProvider*> aProvider) |
132 | | : mProvider(aProvider) |
133 | | , mIsLocked(false) |
134 | 0 | { } |
135 | | |
136 | | DrawableSurface GetDrawableSurface() const |
137 | 0 | { |
138 | 0 | if (MOZ_UNLIKELY(IsPlaceholder())) { |
139 | 0 | MOZ_ASSERT_UNREACHABLE("Called GetDrawableSurface() on a placeholder"); |
140 | 0 | return DrawableSurface(); |
141 | 0 | } |
142 | 0 |
|
143 | 0 | return mProvider->Surface(); |
144 | 0 | } |
145 | | |
146 | | void SetLocked(bool aLocked) |
147 | 0 | { |
148 | 0 | if (IsPlaceholder()) { |
149 | 0 | return; // Can't lock a placeholder. |
150 | 0 | } |
151 | 0 | |
152 | 0 | // Update both our state and our provider's state. Some surface providers |
153 | 0 | // are permanently locked; maintaining our own locking state enables us to |
154 | 0 | // respect SetLocked() even when it's meaningless from the provider's |
155 | 0 | // perspective. |
156 | 0 | mIsLocked = aLocked; |
157 | 0 | mProvider->SetLocked(aLocked); |
158 | 0 | } |
159 | | |
160 | | bool IsLocked() const |
161 | 0 | { |
162 | 0 | return !IsPlaceholder() && mIsLocked && mProvider->IsLocked(); |
163 | 0 | } |
164 | | |
165 | 0 | void SetCannotSubstitute() { mProvider->Availability().SetCannotSubstitute(); } |
166 | 0 | bool CannotSubstitute() const { return mProvider->Availability().CannotSubstitute(); } |
167 | | |
168 | 0 | bool IsPlaceholder() const { return mProvider->Availability().IsPlaceholder(); } |
169 | 0 | bool IsDecoded() const { return !IsPlaceholder() && mProvider->IsFinished(); } |
170 | | |
171 | 0 | ImageKey GetImageKey() const { return mProvider->GetImageKey(); } |
172 | 0 | const SurfaceKey& GetSurfaceKey() const { return mProvider->GetSurfaceKey(); } |
173 | 0 | nsExpirationState* GetExpirationState() { return &mExpirationState; } |
174 | | |
175 | | CostEntry GetCostEntry() |
176 | 0 | { |
177 | 0 | return image::CostEntry(WrapNotNull(this), mProvider->LogicalSizeInBytes()); |
178 | 0 | } |
179 | | |
180 | | // A helper type used by SurfaceCacheImpl::CollectSizeOfSurfaces. |
181 | | struct MOZ_STACK_CLASS SurfaceMemoryReport |
182 | | { |
183 | | SurfaceMemoryReport(nsTArray<SurfaceMemoryCounter>& aCounters, |
184 | | MallocSizeOf aMallocSizeOf) |
185 | | : mCounters(aCounters) |
186 | | , mMallocSizeOf(aMallocSizeOf) |
187 | 0 | { } |
188 | | |
189 | | void Add(NotNull<CachedSurface*> aCachedSurface, bool aIsFactor2) |
190 | 0 | { |
191 | 0 | SurfaceMemoryCounter counter(aCachedSurface->GetSurfaceKey(), |
192 | 0 | aCachedSurface->IsLocked(), |
193 | 0 | aCachedSurface->CannotSubstitute(), |
194 | 0 | aIsFactor2); |
195 | 0 |
|
196 | 0 | if (aCachedSurface->IsPlaceholder()) { |
197 | 0 | return; |
198 | 0 | } |
199 | 0 | |
200 | 0 | // Record the memory used by the ISurfaceProvider. This may not have a |
201 | 0 | // straightforward relationship to the size of the surface that |
202 | 0 | // DrawableRef() returns if the surface is generated dynamically. (i.e., |
203 | 0 | // for surfaces with PlaybackType::eAnimated.) |
204 | 0 | size_t heap = 0; |
205 | 0 | size_t nonHeap = 0; |
206 | 0 | size_t handles = 0; |
207 | 0 | aCachedSurface->mProvider |
208 | 0 | ->AddSizeOfExcludingThis(mMallocSizeOf, heap, nonHeap, handles); |
209 | 0 | counter.Values().SetDecodedHeap(heap); |
210 | 0 | counter.Values().SetDecodedNonHeap(nonHeap); |
211 | 0 | counter.Values().SetExternalHandles(handles); |
212 | 0 |
|
213 | 0 | mCounters.AppendElement(counter); |
214 | 0 | } |
215 | | |
216 | | private: |
217 | | nsTArray<SurfaceMemoryCounter>& mCounters; |
218 | | MallocSizeOf mMallocSizeOf; |
219 | | }; |
220 | | |
221 | | private: |
222 | | nsExpirationState mExpirationState; |
223 | | NotNull<RefPtr<ISurfaceProvider>> mProvider; |
224 | | bool mIsLocked; |
225 | | }; |
226 | | |
227 | | static int64_t |
228 | 0 | AreaOfIntSize(const IntSize& aSize) { |
229 | 0 | return static_cast<int64_t>(aSize.width) * static_cast<int64_t>(aSize.height); |
230 | 0 | } |
231 | | |
232 | | /** |
233 | | * An ImageSurfaceCache is a per-image surface cache. For correctness we must be |
234 | | * able to remove all surfaces associated with an image when the image is |
235 | | * destroyed or invalidated. Since this will happen frequently, it makes sense |
236 | | * to make it cheap by storing the surfaces for each image separately. |
237 | | * |
238 | | * ImageSurfaceCache also keeps track of whether its associated image is locked |
239 | | * or unlocked. |
240 | | * |
241 | | * The cache may also enter "factor of 2" mode which occurs when the number of |
242 | | * surfaces in the cache exceeds the "image.cache.factor2.threshold-surfaces" |
243 | | * pref plus the number of native sizes of the image. When in "factor of 2" |
244 | | * mode, the cache will strongly favour sizes which are a factor of 2 of the |
245 | | * largest native size. It accomplishes this by suggesting a factor of 2 size |
246 | | * when lookups fail and substituting the nearest factor of 2 surface to the |
247 | | * ideal size as the "best" available (as opposed to subsitution but not found). |
248 | | * This allows us to minimize memory consumption and CPU time spent decoding |
249 | | * when a website requires many variants of the same surface. |
250 | | */ |
251 | | class ImageSurfaceCache |
252 | | { |
253 | 0 | ~ImageSurfaceCache() { } |
254 | | public: |
255 | | explicit ImageSurfaceCache(const ImageKey aImageKey) |
256 | | : mLocked(false) |
257 | | , mFactor2Mode(false) |
258 | | , mFactor2Pruned(false) |
259 | | , mIsVectorImage(aImageKey->GetType() == imgIContainer::TYPE_VECTOR) |
260 | 0 | { } |
261 | | |
262 | | MOZ_DECLARE_REFCOUNTED_TYPENAME(ImageSurfaceCache) |
263 | | NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ImageSurfaceCache) |
264 | | |
265 | | typedef |
266 | | nsRefPtrHashtable<nsGenericHashKey<SurfaceKey>, CachedSurface> SurfaceTable; |
267 | | |
268 | 0 | bool IsEmpty() const { return mSurfaces.Count() == 0; } |
269 | | |
270 | | MOZ_MUST_USE bool Insert(NotNull<CachedSurface*> aSurface) |
271 | 0 | { |
272 | 0 | MOZ_ASSERT(!mLocked || aSurface->IsPlaceholder() || aSurface->IsLocked(), |
273 | 0 | "Inserting an unlocked surface for a locked image"); |
274 | 0 | return mSurfaces.Put(aSurface->GetSurfaceKey(), aSurface, fallible); |
275 | 0 | } |
276 | | |
277 | | already_AddRefed<CachedSurface> Remove(NotNull<CachedSurface*> aSurface) |
278 | 0 | { |
279 | 0 | MOZ_ASSERT(mSurfaces.GetWeak(aSurface->GetSurfaceKey()), |
280 | 0 | "Should not be removing a surface we don't have"); |
281 | 0 |
|
282 | 0 | RefPtr<CachedSurface> surface; |
283 | 0 | mSurfaces.Remove(aSurface->GetSurfaceKey(), getter_AddRefs(surface)); |
284 | 0 | AfterMaybeRemove(); |
285 | 0 | return surface.forget(); |
286 | 0 | } |
287 | | |
288 | | already_AddRefed<CachedSurface> Lookup(const SurfaceKey& aSurfaceKey, |
289 | | bool aForAccess) |
290 | 0 | { |
291 | 0 | RefPtr<CachedSurface> surface; |
292 | 0 | mSurfaces.Get(aSurfaceKey, getter_AddRefs(surface)); |
293 | 0 |
|
294 | 0 | if (aForAccess) { |
295 | 0 | if (surface) { |
296 | 0 | // We don't want to allow factor of 2 mode pruning to release surfaces |
297 | 0 | // for which the callers will accept no substitute. |
298 | 0 | surface->SetCannotSubstitute(); |
299 | 0 | } else if (!mFactor2Mode) { |
300 | 0 | // If no exact match is found, and this is for use rather than internal |
301 | 0 | // accounting (i.e. insert and removal), we know this will trigger a |
302 | 0 | // decode. Make sure we switch now to factor of 2 mode if necessary. |
303 | 0 | MaybeSetFactor2Mode(); |
304 | 0 | } |
305 | 0 | } |
306 | 0 |
|
307 | 0 | return surface.forget(); |
308 | 0 | } |
309 | | |
310 | | /** |
311 | | * @returns A tuple containing the best matching CachedSurface if available, |
312 | | * a MatchType describing how the CachedSurface was selected, and |
313 | | * an IntSize which is the size the caller should choose to decode |
314 | | * at should it attempt to do so. |
315 | | */ |
316 | | Tuple<already_AddRefed<CachedSurface>, MatchType, IntSize> |
317 | | LookupBestMatch(const SurfaceKey& aIdealKey) |
318 | 0 | { |
319 | 0 | // Try for an exact match first. |
320 | 0 | RefPtr<CachedSurface> exactMatch; |
321 | 0 | mSurfaces.Get(aIdealKey, getter_AddRefs(exactMatch)); |
322 | 0 | if (exactMatch) { |
323 | 0 | if (exactMatch->IsDecoded()) { |
324 | 0 | return MakeTuple(exactMatch.forget(), MatchType::EXACT, IntSize()); |
325 | 0 | } |
326 | 0 | } else if (!mFactor2Mode) { |
327 | 0 | // If no exact match is found, and we are not in factor of 2 mode, then |
328 | 0 | // we know that we will trigger a decode because at best we will provide |
329 | 0 | // a substitute. Make sure we switch now to factor of 2 mode if necessary. |
330 | 0 | MaybeSetFactor2Mode(); |
331 | 0 | } |
332 | 0 |
|
333 | 0 | // Try for a best match second, if using compact. |
334 | 0 | IntSize suggestedSize = SuggestedSize(aIdealKey.Size()); |
335 | 0 | if (suggestedSize != aIdealKey.Size()) { |
336 | 0 | if (!exactMatch) { |
337 | 0 | SurfaceKey compactKey = aIdealKey.CloneWithSize(suggestedSize); |
338 | 0 | mSurfaces.Get(compactKey, getter_AddRefs(exactMatch)); |
339 | 0 | if (exactMatch && exactMatch->IsDecoded()) { |
340 | 0 | MOZ_ASSERT(suggestedSize != aIdealKey.Size()); |
341 | 0 | return MakeTuple(exactMatch.forget(), |
342 | 0 | MatchType::SUBSTITUTE_BECAUSE_BEST, |
343 | 0 | suggestedSize); |
344 | 0 | } |
345 | 0 | } |
346 | 0 | } |
347 | 0 |
|
348 | 0 | // There's no perfect match, so find the best match we can. |
349 | 0 | RefPtr<CachedSurface> bestMatch; |
350 | 0 | for (auto iter = ConstIter(); !iter.Done(); iter.Next()) { |
351 | 0 | NotNull<CachedSurface*> current = WrapNotNull(iter.UserData()); |
352 | 0 | const SurfaceKey& currentKey = current->GetSurfaceKey(); |
353 | 0 |
|
354 | 0 | // We never match a placeholder. |
355 | 0 | if (current->IsPlaceholder()) { |
356 | 0 | continue; |
357 | 0 | } |
358 | 0 | // Matching the playback type and SVG context is required. |
359 | 0 | if (currentKey.Playback() != aIdealKey.Playback() || |
360 | 0 | currentKey.SVGContext() != aIdealKey.SVGContext()) { |
361 | 0 | continue; |
362 | 0 | } |
363 | 0 | // Matching the flags is required. |
364 | 0 | if (currentKey.Flags() != aIdealKey.Flags()) { |
365 | 0 | continue; |
366 | 0 | } |
367 | 0 | // Anything is better than nothing! (Within the constraints we just |
368 | 0 | // checked, of course.) |
369 | 0 | if (!bestMatch) { |
370 | 0 | bestMatch = current; |
371 | 0 | continue; |
372 | 0 | } |
373 | 0 | |
374 | 0 | MOZ_ASSERT(bestMatch, "Should have a current best match"); |
375 | 0 |
|
376 | 0 | // Always prefer completely decoded surfaces. |
377 | 0 | bool bestMatchIsDecoded = bestMatch->IsDecoded(); |
378 | 0 | if (bestMatchIsDecoded && !current->IsDecoded()) { |
379 | 0 | continue; |
380 | 0 | } |
381 | 0 | if (!bestMatchIsDecoded && current->IsDecoded()) { |
382 | 0 | bestMatch = current; |
383 | 0 | continue; |
384 | 0 | } |
385 | 0 | |
386 | 0 | SurfaceKey bestMatchKey = bestMatch->GetSurfaceKey(); |
387 | 0 | if (CompareArea(aIdealKey.Size(), bestMatchKey.Size(), |
388 | 0 | currentKey.Size())) { |
389 | 0 | bestMatch = current; |
390 | 0 | } |
391 | 0 | } |
392 | 0 |
|
393 | 0 | MatchType matchType; |
394 | 0 | if (bestMatch) { |
395 | 0 | if (!exactMatch) { |
396 | 0 | // No exact match, neither ideal nor factor of 2. |
397 | 0 | MOZ_ASSERT(suggestedSize != bestMatch->GetSurfaceKey().Size(), |
398 | 0 | "No exact match despite the fact the sizes match!"); |
399 | 0 | matchType = MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND; |
400 | 0 | } else if (exactMatch != bestMatch) { |
401 | 0 | // The exact match is still decoding, but we found a substitute. |
402 | 0 | matchType = MatchType::SUBSTITUTE_BECAUSE_PENDING; |
403 | 0 | } else if (aIdealKey.Size() != bestMatch->GetSurfaceKey().Size()) { |
404 | 0 | // The best factor of 2 match is still decoding, but the best we've got. |
405 | 0 | MOZ_ASSERT(suggestedSize != aIdealKey.Size()); |
406 | 0 | MOZ_ASSERT(mFactor2Mode || mIsVectorImage); |
407 | 0 | matchType = MatchType::SUBSTITUTE_BECAUSE_BEST; |
408 | 0 | } else { |
409 | 0 | // The exact match is still decoding, but it's the best we've got. |
410 | 0 | matchType = MatchType::EXACT; |
411 | 0 | } |
412 | 0 | } else { |
413 | 0 | if (exactMatch) { |
414 | 0 | // We found an "exact match"; it must have been a placeholder. |
415 | 0 | MOZ_ASSERT(exactMatch->IsPlaceholder()); |
416 | 0 | matchType = MatchType::PENDING; |
417 | 0 | } else { |
418 | 0 | // We couldn't find an exact match *or* a substitute. |
419 | 0 | matchType = MatchType::NOT_FOUND; |
420 | 0 | } |
421 | 0 | } |
422 | 0 |
|
423 | 0 | return MakeTuple(bestMatch.forget(), matchType, suggestedSize); |
424 | 0 | } |
425 | | |
426 | | void MaybeSetFactor2Mode() |
427 | 0 | { |
428 | 0 | MOZ_ASSERT(!mFactor2Mode); |
429 | 0 |
|
430 | 0 | // Typically an image cache will not have too many size-varying surfaces, so |
431 | 0 | // if we exceed the given threshold, we should consider using a subset. |
432 | 0 | int32_t thresholdSurfaces = gfxPrefs::ImageCacheFactor2ThresholdSurfaces(); |
433 | 0 | if (thresholdSurfaces < 0 || |
434 | 0 | mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) { |
435 | 0 | return; |
436 | 0 | } |
437 | 0 | |
438 | 0 | // Determine how many native surfaces this image has. If it is zero, and it |
439 | 0 | // is a vector image, then we should impute a single native size. Otherwise, |
440 | 0 | // it may be zero because we don't know yet, or the image has an error, or |
441 | 0 | // it isn't supported. |
442 | 0 | auto first = ConstIter(); |
443 | 0 | NotNull<CachedSurface*> current = WrapNotNull(first.UserData()); |
444 | 0 | Image* image = static_cast<Image*>(current->GetImageKey()); |
445 | 0 | size_t nativeSizes = image->GetNativeSizesLength(); |
446 | 0 | if (mIsVectorImage) { |
447 | 0 | MOZ_ASSERT(nativeSizes == 0); |
448 | 0 | nativeSizes = 1; |
449 | 0 | } else if (nativeSizes == 0) { |
450 | 0 | return; |
451 | 0 | } |
452 | 0 | |
453 | 0 | // Increase the threshold by the number of native sizes. This ensures that |
454 | 0 | // we do not prevent decoding of the image at all its native sizes. It does |
455 | 0 | // not guarantee we will provide a surface at that size however (i.e. many |
456 | 0 | // other sized surfaces are requested, in addition to the native sizes). |
457 | 0 | thresholdSurfaces += nativeSizes; |
458 | 0 | if (mSurfaces.Count() <= static_cast<uint32_t>(thresholdSurfaces)) { |
459 | 0 | return; |
460 | 0 | } |
461 | 0 | |
462 | 0 | // Get our native size. While we know the image should be fully decoded, |
463 | 0 | // if it is an SVG, it is valid to have a zero size. We can't do compacting |
464 | 0 | // in that case because we need to know the width/height ratio to define a |
465 | 0 | // candidate set. |
466 | 0 | IntSize nativeSize; |
467 | 0 | if (NS_FAILED(image->GetWidth(&nativeSize.width)) || |
468 | 0 | NS_FAILED(image->GetHeight(&nativeSize.height)) || |
469 | 0 | nativeSize.IsEmpty()) { |
470 | 0 | return; |
471 | 0 | } |
472 | 0 | |
473 | 0 | // We have a valid size, we can change modes. |
474 | 0 | mFactor2Mode = true; |
475 | 0 | } |
476 | | |
477 | | template<typename Function> |
478 | | void Prune(Function&& aRemoveCallback) |
479 | 0 | { |
480 | 0 | if (!mFactor2Mode || mFactor2Pruned) { |
481 | 0 | return; |
482 | 0 | } |
483 | 0 | |
484 | 0 | // Attempt to discard any surfaces which are not factor of 2 and the best |
485 | 0 | // factor of 2 match exists. |
486 | 0 | bool hasNotFactorSize = false; |
487 | 0 | for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) { |
488 | 0 | NotNull<CachedSurface*> current = WrapNotNull(iter.UserData()); |
489 | 0 | const SurfaceKey& currentKey = current->GetSurfaceKey(); |
490 | 0 | const IntSize& currentSize = currentKey.Size(); |
491 | 0 |
|
492 | 0 | // First we check if someone requested this size and would not accept |
493 | 0 | // an alternatively sized surface. |
494 | 0 | if (current->CannotSubstitute()) { |
495 | 0 | continue; |
496 | 0 | } |
497 | 0 | |
498 | 0 | // Next we find the best factor of 2 size for this surface. If this |
499 | 0 | // surface is a factor of 2 size, then we want to keep it. |
500 | 0 | IntSize bestSize = SuggestedSize(currentSize); |
501 | 0 | if (bestSize == currentSize) { |
502 | 0 | continue; |
503 | 0 | } |
504 | 0 | |
505 | 0 | // Check the cache for a surface with the same parameters except for the |
506 | 0 | // size which uses the closest factor of 2 size. |
507 | 0 | SurfaceKey compactKey = currentKey.CloneWithSize(bestSize); |
508 | 0 | RefPtr<CachedSurface> compactMatch; |
509 | 0 | mSurfaces.Get(compactKey, getter_AddRefs(compactMatch)); |
510 | 0 | if (compactMatch && compactMatch->IsDecoded()) { |
511 | 0 | aRemoveCallback(current); |
512 | 0 | iter.Remove(); |
513 | 0 | } else { |
514 | 0 | hasNotFactorSize = true; |
515 | 0 | } |
516 | 0 | } |
517 | 0 |
|
518 | 0 | // We have no surfaces that are not factor of 2 sized, so we can stop |
519 | 0 | // pruning henceforth, because we avoid the insertion of new surfaces that |
520 | 0 | // don't match our sizing set (unless the caller won't accept a |
521 | 0 | // substitution.) |
522 | 0 | if (!hasNotFactorSize) { |
523 | 0 | mFactor2Pruned = true; |
524 | 0 | } |
525 | 0 |
|
526 | 0 | // We should never leave factor of 2 mode due to pruning in of itself, but |
527 | 0 | // if we discarded surfaces due to the volatile buffers getting released, |
528 | 0 | // it is possible. |
529 | 0 | AfterMaybeRemove(); |
530 | 0 | } |
531 | | |
532 | | IntSize SuggestedSize(const IntSize& aSize) const |
533 | 0 | { |
534 | 0 | IntSize suggestedSize = SuggestedSizeInternal(aSize); |
535 | 0 | if (mIsVectorImage) { |
536 | 0 | // Whether or not we are in factor of 2 mode, vector image rasterization is |
537 | 0 | // clamped at a configured maximum if the caller is willing to accept |
538 | 0 | // substitutes. |
539 | 0 | MOZ_ASSERT(SurfaceCache::IsLegalSize(suggestedSize)); |
540 | 0 |
|
541 | 0 | // If we exceed the maximum, we need to scale the size downwards to fit. |
542 | 0 | // It shouldn't get here if it is significantly larger because |
543 | 0 | // VectorImage::UseSurfaceCacheForSize should prevent us from requesting |
544 | 0 | // a rasterized version of a surface greater than 4x the maximum. |
545 | 0 | int32_t maxSizeKB = gfxPrefs::ImageCacheMaxRasterizedSVGThresholdKB(); |
546 | 0 | int32_t proposedKB = suggestedSize.width * suggestedSize.height / 256; |
547 | 0 | if (maxSizeKB >= proposedKB) { |
548 | 0 | return suggestedSize; |
549 | 0 | } |
550 | 0 | |
551 | 0 | double scale = sqrt(double(maxSizeKB) / proposedKB); |
552 | 0 | suggestedSize.width = int32_t(scale * suggestedSize.width); |
553 | 0 | suggestedSize.height = int32_t(scale * suggestedSize.height); |
554 | 0 | } |
555 | 0 |
|
556 | 0 | return suggestedSize; |
557 | 0 | } |
558 | | |
559 | | IntSize SuggestedSizeInternal(const IntSize& aSize) const |
560 | 0 | { |
561 | 0 | // When not in factor of 2 mode, we can always decode at the given size. |
562 | 0 | if (!mFactor2Mode) { |
563 | 0 | return aSize; |
564 | 0 | } |
565 | 0 | |
566 | 0 | // We cannot enter factor of 2 mode unless we have a minimum number of |
567 | 0 | // surfaces, and we should have left it if the cache was emptied. |
568 | 0 | if (MOZ_UNLIKELY(IsEmpty())) { |
569 | 0 | MOZ_ASSERT_UNREACHABLE("Should not be empty and in factor of 2 mode!"); |
570 | 0 | return aSize; |
571 | 0 | } |
572 | 0 |
|
573 | 0 | // This bit of awkwardness gets the largest native size of the image. |
574 | 0 | auto iter = ConstIter(); |
575 | 0 | NotNull<CachedSurface*> firstSurface = WrapNotNull(iter.UserData()); |
576 | 0 | Image* image = static_cast<Image*>(firstSurface->GetImageKey()); |
577 | 0 | IntSize factorSize; |
578 | 0 | if (NS_FAILED(image->GetWidth(&factorSize.width)) || |
579 | 0 | NS_FAILED(image->GetHeight(&factorSize.height)) || |
580 | 0 | factorSize.IsEmpty()) { |
581 | 0 | // We should not have entered factor of 2 mode without a valid size, and |
582 | 0 | // several successfully decoded surfaces. Note that valid vector images |
583 | 0 | // may have a default size of 0x0, and those are not yet supported. |
584 | 0 | MOZ_ASSERT_UNREACHABLE("Expected valid native size!"); |
585 | 0 | return aSize; |
586 | 0 | } |
587 | 0 |
|
588 | 0 | if (mIsVectorImage) { |
589 | 0 | // Ensure the aspect ratio matches the native size before forcing the |
590 | 0 | // caller to accept a factor of 2 size. The difference between the aspect |
591 | 0 | // ratios is: |
592 | 0 | // |
593 | 0 | // delta = nativeWidth/nativeHeight - desiredWidth/desiredHeight |
594 | 0 | // |
595 | 0 | // delta*nativeHeight*desiredHeight = nativeWidth*desiredHeight |
596 | 0 | // - desiredWidth*nativeHeight |
597 | 0 | // |
598 | 0 | // Using the maximum accepted delta as a constant, we can avoid the |
599 | 0 | // floating point division and just compare after some integer ops. |
600 | 0 | int32_t delta = factorSize.width * aSize.height - aSize.width * factorSize.height; |
601 | 0 | int32_t maxDelta = (factorSize.height * aSize.height) >> 4; |
602 | 0 | if (delta > maxDelta || delta < -maxDelta) { |
603 | 0 | return aSize; |
604 | 0 | } |
605 | 0 | |
606 | 0 | // If the requested size is bigger than the native size, we actually need |
607 | 0 | // to grow the native size instead of shrinking it. |
608 | 0 | if (factorSize.width < aSize.width) { |
609 | 0 | do { |
610 | 0 | IntSize candidate(factorSize.width * 2, factorSize.height * 2); |
611 | 0 | if (!SurfaceCache::IsLegalSize(candidate)) { |
612 | 0 | break; |
613 | 0 | } |
614 | 0 | |
615 | 0 | factorSize = candidate; |
616 | 0 | } while (factorSize.width < aSize.width); |
617 | 0 |
|
618 | 0 | return factorSize; |
619 | 0 | } |
620 | 0 |
|
621 | 0 | // Otherwise we can find the best fit as normal. |
622 | 0 | } |
623 | 0 |
|
624 | 0 | // Start with the native size as the best first guess. |
625 | 0 | IntSize bestSize = factorSize; |
626 | 0 | factorSize.width /= 2; |
627 | 0 | factorSize.height /= 2; |
628 | 0 |
|
629 | 0 | while (!factorSize.IsEmpty()) { |
630 | 0 | if (!CompareArea(aSize, bestSize, factorSize)) { |
631 | 0 | // This size is not better than the last. Since we proceed from largest |
632 | 0 | // to smallest, we know that the next size will not be better if the |
633 | 0 | // previous size was rejected. Break early. |
634 | 0 | break; |
635 | 0 | } |
636 | 0 | |
637 | 0 | // The current factor of 2 size is better than the last selected size. |
638 | 0 | bestSize = factorSize; |
639 | 0 | factorSize.width /= 2; |
640 | 0 | factorSize.height /= 2; |
641 | 0 | } |
642 | 0 |
|
643 | 0 | return bestSize; |
644 | 0 | } |
645 | | |
646 | | bool CompareArea(const IntSize& aIdealSize, |
647 | | const IntSize& aBestSize, |
648 | | const IntSize& aSize) const |
649 | 0 | { |
650 | 0 | // Compare sizes. We use an area-based heuristic here instead of computing a |
651 | 0 | // truly optimal answer, since it seems very unlikely to make a difference |
652 | 0 | // for realistic sizes. |
653 | 0 | int64_t idealArea = AreaOfIntSize(aIdealSize); |
654 | 0 | int64_t currentArea = AreaOfIntSize(aSize); |
655 | 0 | int64_t bestMatchArea = AreaOfIntSize(aBestSize); |
656 | 0 |
|
657 | 0 | // If the best match is smaller than the ideal size, prefer bigger sizes. |
658 | 0 | if (bestMatchArea < idealArea) { |
659 | 0 | if (currentArea > bestMatchArea) { |
660 | 0 | return true; |
661 | 0 | } |
662 | 0 | return false; |
663 | 0 | } |
664 | 0 | |
665 | 0 | // Other, prefer sizes closer to the ideal size, but still not smaller. |
666 | 0 | if (idealArea <= currentArea && currentArea < bestMatchArea) { |
667 | 0 | return true; |
668 | 0 | } |
669 | 0 | |
670 | 0 | // This surface isn't an improvement over the current best match. |
671 | 0 | return false; |
672 | 0 | } |
673 | | |
674 | | template<typename Function> |
675 | | void CollectSizeOfSurfaces(nsTArray<SurfaceMemoryCounter>& aCounters, |
676 | | MallocSizeOf aMallocSizeOf, |
677 | | Function&& aRemoveCallback) |
678 | 0 | { |
679 | 0 | CachedSurface::SurfaceMemoryReport report(aCounters, aMallocSizeOf); |
680 | 0 | for (auto iter = mSurfaces.Iter(); !iter.Done(); iter.Next()) { |
681 | 0 | NotNull<CachedSurface*> surface = WrapNotNull(iter.UserData()); |
682 | 0 |
|
683 | 0 | // We don't need the drawable surface for ourselves, but adding a surface |
684 | 0 | // to the report will trigger this indirectly. If the surface was |
685 | 0 | // discarded by the OS because it was in volatile memory, we should remove |
686 | 0 | // it from the cache immediately rather than include it in the report. |
687 | 0 | DrawableSurface drawableSurface; |
688 | 0 | if (!surface->IsPlaceholder()) { |
689 | 0 | drawableSurface = surface->GetDrawableSurface(); |
690 | 0 | if (!drawableSurface) { |
691 | 0 | aRemoveCallback(surface); |
692 | 0 | iter.Remove(); |
693 | 0 | continue; |
694 | 0 | } |
695 | 0 | } |
696 | 0 | |
697 | 0 | const IntSize& size = surface->GetSurfaceKey().Size(); |
698 | 0 | bool factor2Size = false; |
699 | 0 | if (mFactor2Mode) { |
700 | 0 | factor2Size = (size == SuggestedSize(size)); |
701 | 0 | } |
702 | 0 | report.Add(surface, factor2Size); |
703 | 0 | } |
704 | 0 |
|
705 | 0 | AfterMaybeRemove(); |
706 | 0 | } |
707 | | |
708 | | SurfaceTable::Iterator ConstIter() const |
709 | 0 | { |
710 | 0 | return mSurfaces.ConstIter(); |
711 | 0 | } |
712 | | |
713 | 0 | void SetLocked(bool aLocked) { mLocked = aLocked; } |
714 | 0 | bool IsLocked() const { return mLocked; } |
715 | | |
716 | | private: |
717 | | void AfterMaybeRemove() |
718 | 0 | { |
719 | 0 | if (IsEmpty() && mFactor2Mode) { |
720 | 0 | // The last surface for this cache was removed. This can happen if the |
721 | 0 | // surface was stored in a volatile buffer and got purged, or the surface |
722 | 0 | // expired from the cache. If the cache itself lingers for some reason |
723 | 0 | // (e.g. in the process of performing a lookup, the cache itself is |
724 | 0 | // locked), then we need to reset the factor of 2 state because it |
725 | 0 | // requires at least one surface present to get the native size |
726 | 0 | // information from the image. |
727 | 0 | mFactor2Mode = mFactor2Pruned = false; |
728 | 0 | } |
729 | 0 | } |
730 | | |
731 | | SurfaceTable mSurfaces; |
732 | | |
733 | | bool mLocked; |
734 | | |
735 | | // True in "factor of 2" mode. |
736 | | bool mFactor2Mode; |
737 | | |
738 | | // True if all non-factor of 2 surfaces have been removed from the cache. Note |
739 | | // that this excludes unsubstitutable sizes. |
740 | | bool mFactor2Pruned; |
741 | | |
742 | | // True if the surfaces are produced from a vector image. If so, it must match |
743 | | // the aspect ratio when using factor of 2 mode. |
744 | | bool mIsVectorImage; |
745 | | }; |
746 | | |
747 | | /** |
748 | | * SurfaceCacheImpl is responsible for determining which surfaces will be cached |
749 | | * and managing the surface cache data structures. Rather than interact with |
750 | | * SurfaceCacheImpl directly, client code interacts with SurfaceCache, which |
751 | | * maintains high-level invariants and encapsulates the details of the surface |
752 | | * cache's implementation. |
753 | | */ |
754 | | class SurfaceCacheImpl final : public nsIMemoryReporter |
755 | | { |
756 | | public: |
757 | | NS_DECL_ISUPPORTS |
758 | | |
759 | | SurfaceCacheImpl(uint32_t aSurfaceCacheExpirationTimeMS, |
760 | | uint32_t aSurfaceCacheDiscardFactor, |
761 | | uint32_t aSurfaceCacheSize) |
762 | | : mExpirationTracker(aSurfaceCacheExpirationTimeMS) |
763 | | , mMemoryPressureObserver(new MemoryPressureObserver) |
764 | | , mDiscardFactor(aSurfaceCacheDiscardFactor) |
765 | | , mMaxCost(aSurfaceCacheSize) |
766 | | , mAvailableCost(aSurfaceCacheSize) |
767 | | , mLockedCost(0) |
768 | | , mOverflowCount(0) |
769 | 0 | { |
770 | 0 | nsCOMPtr<nsIObserverService> os = services::GetObserverService(); |
771 | 0 | if (os) { |
772 | 0 | os->AddObserver(mMemoryPressureObserver, "memory-pressure", false); |
773 | 0 | } |
774 | 0 | } |
775 | | |
776 | | private: |
777 | | virtual ~SurfaceCacheImpl() |
778 | 0 | { |
779 | 0 | nsCOMPtr<nsIObserverService> os = services::GetObserverService(); |
780 | 0 | if (os) { |
781 | 0 | os->RemoveObserver(mMemoryPressureObserver, "memory-pressure"); |
782 | 0 | } |
783 | 0 |
|
784 | 0 | UnregisterWeakMemoryReporter(this); |
785 | 0 | } |
786 | | |
787 | | public: |
788 | 0 | void InitMemoryReporter() { RegisterWeakMemoryReporter(this); } |
789 | | |
790 | | InsertOutcome Insert(NotNull<ISurfaceProvider*> aProvider, |
791 | | bool aSetAvailable, |
792 | | const StaticMutexAutoLock& aAutoLock) |
793 | 0 | { |
794 | 0 | // If this is a duplicate surface, refuse to replace the original. |
795 | 0 | // XXX(seth): Calling Lookup() and then RemoveEntry() does the lookup |
796 | 0 | // twice. We'll make this more efficient in bug 1185137. |
797 | 0 | LookupResult result = Lookup(aProvider->GetImageKey(), |
798 | 0 | aProvider->GetSurfaceKey(), |
799 | 0 | aAutoLock, |
800 | 0 | /* aMarkUsed = */ false); |
801 | 0 | if (MOZ_UNLIKELY(result)) { |
802 | 0 | return InsertOutcome::FAILURE_ALREADY_PRESENT; |
803 | 0 | } |
804 | 0 | |
805 | 0 | if (result.Type() == MatchType::PENDING) { |
806 | 0 | RemoveEntry(aProvider->GetImageKey(), aProvider->GetSurfaceKey(), aAutoLock); |
807 | 0 | } |
808 | 0 |
|
809 | 0 | MOZ_ASSERT(result.Type() == MatchType::NOT_FOUND || |
810 | 0 | result.Type() == MatchType::PENDING, |
811 | 0 | "A LookupResult with no surface should be NOT_FOUND or PENDING"); |
812 | 0 |
|
813 | 0 | // If this is bigger than we can hold after discarding everything we can, |
814 | 0 | // refuse to cache it. |
815 | 0 | Cost cost = aProvider->LogicalSizeInBytes(); |
816 | 0 | if (MOZ_UNLIKELY(!CanHoldAfterDiscarding(cost))) { |
817 | 0 | mOverflowCount++; |
818 | 0 | return InsertOutcome::FAILURE; |
819 | 0 | } |
820 | 0 | |
821 | 0 | // Remove elements in order of cost until we can fit this in the cache. Note |
822 | 0 | // that locked surfaces aren't in mCosts, so we never remove them here. |
823 | 0 | while (cost > mAvailableCost) { |
824 | 0 | MOZ_ASSERT(!mCosts.IsEmpty(), |
825 | 0 | "Removed everything and it still won't fit"); |
826 | 0 | Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true, aAutoLock); |
827 | 0 | } |
828 | 0 |
|
829 | 0 | // Locate the appropriate per-image cache. If there's not an existing cache |
830 | 0 | // for this image, create it. |
831 | 0 | const ImageKey imageKey = aProvider->GetImageKey(); |
832 | 0 | RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey); |
833 | 0 | if (!cache) { |
834 | 0 | cache = new ImageSurfaceCache(imageKey); |
835 | 0 | mImageCaches.Put(aProvider->GetImageKey(), cache); |
836 | 0 | } |
837 | 0 |
|
838 | 0 | // If we were asked to mark the cache entry available, do so. |
839 | 0 | if (aSetAvailable) { |
840 | 0 | aProvider->Availability().SetAvailable(); |
841 | 0 | } |
842 | 0 |
|
843 | 0 | auto surface = MakeNotNull<RefPtr<CachedSurface>>(aProvider); |
844 | 0 |
|
845 | 0 | // We require that locking succeed if the image is locked and we're not |
846 | 0 | // inserting a placeholder; the caller may need to know this to handle |
847 | 0 | // errors correctly. |
848 | 0 | bool mustLock = cache->IsLocked() && !surface->IsPlaceholder(); |
849 | 0 | if (mustLock) { |
850 | 0 | surface->SetLocked(true); |
851 | 0 | if (!surface->IsLocked()) { |
852 | 0 | return InsertOutcome::FAILURE; |
853 | 0 | } |
854 | 0 | } |
855 | 0 | |
856 | 0 | // Insert. |
857 | 0 | MOZ_ASSERT(cost <= mAvailableCost, "Inserting despite too large a cost"); |
858 | 0 | if (!cache->Insert(surface)) { |
859 | 0 | if (mustLock) { |
860 | 0 | surface->SetLocked(false); |
861 | 0 | } |
862 | 0 | return InsertOutcome::FAILURE; |
863 | 0 | } |
864 | 0 |
|
865 | 0 | if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) { |
866 | 0 | MOZ_ASSERT(!mustLock); |
867 | 0 | Remove(surface, /* aStopTracking */ false, aAutoLock); |
868 | 0 | return InsertOutcome::FAILURE; |
869 | 0 | } |
870 | 0 |
|
871 | 0 | return InsertOutcome::SUCCESS; |
872 | 0 | } |
873 | | |
874 | | void Remove(NotNull<CachedSurface*> aSurface, |
875 | | bool aStopTracking, |
876 | | const StaticMutexAutoLock& aAutoLock) |
877 | 0 | { |
878 | 0 | ImageKey imageKey = aSurface->GetImageKey(); |
879 | 0 |
|
880 | 0 | RefPtr<ImageSurfaceCache> cache = GetImageCache(imageKey); |
881 | 0 | MOZ_ASSERT(cache, "Shouldn't try to remove a surface with no image cache"); |
882 | 0 |
|
883 | 0 | // If the surface was not a placeholder, tell its image that we discarded it. |
884 | 0 | if (!aSurface->IsPlaceholder()) { |
885 | 0 | static_cast<Image*>(imageKey)->OnSurfaceDiscarded(aSurface->GetSurfaceKey()); |
886 | 0 | } |
887 | 0 |
|
888 | 0 | // If we failed during StartTracking, we can skip this step. |
889 | 0 | if (aStopTracking) { |
890 | 0 | StopTracking(aSurface, /* aIsTracked */ true, aAutoLock); |
891 | 0 | } |
892 | 0 |
|
893 | 0 | // Individual surfaces must be freed outside the lock. |
894 | 0 | mCachedSurfacesDiscard.AppendElement(cache->Remove(aSurface)); |
895 | 0 |
|
896 | 0 | MaybeRemoveEmptyCache(imageKey, cache); |
897 | 0 | } |
898 | | |
899 | | bool StartTracking(NotNull<CachedSurface*> aSurface, |
900 | | const StaticMutexAutoLock& aAutoLock) |
901 | 0 | { |
902 | 0 | CostEntry costEntry = aSurface->GetCostEntry(); |
903 | 0 | MOZ_ASSERT(costEntry.GetCost() <= mAvailableCost, |
904 | 0 | "Cost too large and the caller didn't catch it"); |
905 | 0 |
|
906 | 0 | if (aSurface->IsLocked()) { |
907 | 0 | mLockedCost += costEntry.GetCost(); |
908 | 0 | MOZ_ASSERT(mLockedCost <= mMaxCost, "Locked more than we can hold?"); |
909 | 0 | } else { |
910 | 0 | if (NS_WARN_IF(!mCosts.InsertElementSorted(costEntry, fallible))) { |
911 | 0 | return false; |
912 | 0 | } |
913 | 0 | |
914 | 0 | // This may fail during XPCOM shutdown, so we need to ensure the object is |
915 | 0 | // tracked before calling RemoveObject in StopTracking. |
916 | 0 | nsresult rv = mExpirationTracker.AddObjectLocked(aSurface, aAutoLock); |
917 | 0 | if (NS_WARN_IF(NS_FAILED(rv))) { |
918 | 0 | DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry); |
919 | 0 | MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface"); |
920 | 0 | return false; |
921 | 0 | } |
922 | 0 | } |
923 | 0 |
|
924 | 0 | mAvailableCost -= costEntry.GetCost(); |
925 | 0 | return true; |
926 | 0 | } |
927 | | |
928 | | void StopTracking(NotNull<CachedSurface*> aSurface, |
929 | | bool aIsTracked, |
930 | | const StaticMutexAutoLock& aAutoLock) |
931 | 0 | { |
932 | 0 | CostEntry costEntry = aSurface->GetCostEntry(); |
933 | 0 |
|
934 | 0 | if (aSurface->IsLocked()) { |
935 | 0 | MOZ_ASSERT(mLockedCost >= costEntry.GetCost(), "Costs don't balance"); |
936 | 0 | mLockedCost -= costEntry.GetCost(); |
937 | 0 | // XXX(seth): It'd be nice to use an O(log n) lookup here. This is O(n). |
938 | 0 | MOZ_ASSERT(!mCosts.Contains(costEntry), |
939 | 0 | "Shouldn't have a cost entry for a locked surface"); |
940 | 0 | } else { |
941 | 0 | if (MOZ_LIKELY(aSurface->GetExpirationState()->IsTracked())) { |
942 | 0 | MOZ_ASSERT(aIsTracked, "Expiration-tracking a surface unexpectedly!"); |
943 | 0 | mExpirationTracker.RemoveObjectLocked(aSurface, aAutoLock); |
944 | 0 | } else { |
945 | 0 | // Our call to AddObject must have failed in StartTracking; most likely |
946 | 0 | // we're in XPCOM shutdown right now. |
947 | 0 | MOZ_ASSERT(!aIsTracked, "Not expiration-tracking an unlocked surface!"); |
948 | 0 | } |
949 | 0 |
|
950 | 0 | DebugOnly<bool> foundInCosts = mCosts.RemoveElementSorted(costEntry); |
951 | 0 | MOZ_ASSERT(foundInCosts, "Lost track of costs for this surface"); |
952 | 0 | } |
953 | 0 |
|
954 | 0 | mAvailableCost += costEntry.GetCost(); |
955 | 0 | MOZ_ASSERT(mAvailableCost <= mMaxCost, |
956 | 0 | "More available cost than we started with"); |
957 | 0 | } |
958 | | |
959 | | LookupResult Lookup(const ImageKey aImageKey, |
960 | | const SurfaceKey& aSurfaceKey, |
961 | | const StaticMutexAutoLock& aAutoLock, |
962 | | bool aMarkUsed = true) |
963 | 0 | { |
964 | 0 | RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); |
965 | 0 | if (!cache) { |
966 | 0 | // No cached surfaces for this image. |
967 | 0 | return LookupResult(MatchType::NOT_FOUND); |
968 | 0 | } |
969 | 0 | |
970 | 0 | RefPtr<CachedSurface> surface = cache->Lookup(aSurfaceKey, aMarkUsed); |
971 | 0 | if (!surface) { |
972 | 0 | // Lookup in the per-image cache missed. |
973 | 0 | return LookupResult(MatchType::NOT_FOUND); |
974 | 0 | } |
975 | 0 | |
976 | 0 | if (surface->IsPlaceholder()) { |
977 | 0 | return LookupResult(MatchType::PENDING); |
978 | 0 | } |
979 | 0 | |
980 | 0 | DrawableSurface drawableSurface = surface->GetDrawableSurface(); |
981 | 0 | if (!drawableSurface) { |
982 | 0 | // The surface was released by the operating system. Remove the cache |
983 | 0 | // entry as well. |
984 | 0 | Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock); |
985 | 0 | return LookupResult(MatchType::NOT_FOUND); |
986 | 0 | } |
987 | 0 | |
988 | 0 | if (aMarkUsed && |
989 | 0 | !MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) { |
990 | 0 | Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock); |
991 | 0 | return LookupResult(MatchType::NOT_FOUND); |
992 | 0 | } |
993 | 0 | |
994 | 0 | MOZ_ASSERT(surface->GetSurfaceKey() == aSurfaceKey, |
995 | 0 | "Lookup() not returning an exact match?"); |
996 | 0 | return LookupResult(std::move(drawableSurface), MatchType::EXACT); |
997 | 0 | } |
998 | | |
999 | | LookupResult LookupBestMatch(const ImageKey aImageKey, |
1000 | | const SurfaceKey& aSurfaceKey, |
1001 | | const StaticMutexAutoLock& aAutoLock) |
1002 | 0 | { |
1003 | 0 | RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); |
1004 | 0 | if (!cache) { |
1005 | 0 | // No cached surfaces for this image. |
1006 | 0 | return LookupResult(MatchType::NOT_FOUND); |
1007 | 0 | } |
1008 | 0 | |
1009 | 0 | // Repeatedly look up the best match, trying again if the resulting surface |
1010 | 0 | // has been freed by the operating system, until we can either lock a |
1011 | 0 | // surface for drawing or there are no matching surfaces left. |
1012 | 0 | // XXX(seth): This is O(N^2), but N is expected to be very small. If we |
1013 | 0 | // encounter a performance problem here we can revisit this. |
1014 | 0 | |
1015 | 0 | RefPtr<CachedSurface> surface; |
1016 | 0 | DrawableSurface drawableSurface; |
1017 | 0 | MatchType matchType = MatchType::NOT_FOUND; |
1018 | 0 | IntSize suggestedSize; |
1019 | 0 | while (true) { |
1020 | 0 | Tie(surface, matchType, suggestedSize) |
1021 | 0 | = cache->LookupBestMatch(aSurfaceKey); |
1022 | 0 |
|
1023 | 0 | if (!surface) { |
1024 | 0 | return LookupResult(matchType); // Lookup in the per-image cache missed. |
1025 | 0 | } |
1026 | 0 | |
1027 | 0 | drawableSurface = surface->GetDrawableSurface(); |
1028 | 0 | if (drawableSurface) { |
1029 | 0 | break; |
1030 | 0 | } |
1031 | 0 | |
1032 | 0 | // The surface was released by the operating system. Remove the cache |
1033 | 0 | // entry as well. |
1034 | 0 | Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock); |
1035 | 0 | } |
1036 | 0 |
|
1037 | 0 | MOZ_ASSERT_IF(matchType == MatchType::EXACT, |
1038 | 0 | surface->GetSurfaceKey() == aSurfaceKey); |
1039 | 0 | MOZ_ASSERT_IF(matchType == MatchType::SUBSTITUTE_BECAUSE_NOT_FOUND || |
1040 | 0 | matchType == MatchType::SUBSTITUTE_BECAUSE_PENDING, |
1041 | 0 | surface->GetSurfaceKey().SVGContext() == aSurfaceKey.SVGContext() && |
1042 | 0 | surface->GetSurfaceKey().Playback() == aSurfaceKey.Playback() && |
1043 | 0 | surface->GetSurfaceKey().Flags() == aSurfaceKey.Flags()); |
1044 | 0 |
|
1045 | 0 | if (matchType == MatchType::EXACT || |
1046 | 0 | matchType == MatchType::SUBSTITUTE_BECAUSE_BEST) { |
1047 | 0 | if (!MarkUsed(WrapNotNull(surface), WrapNotNull(cache), aAutoLock)) { |
1048 | 0 | Remove(WrapNotNull(surface), /* aStopTracking */ false, aAutoLock); |
1049 | 0 | } |
1050 | 0 | } |
1051 | 0 |
|
1052 | 0 | return LookupResult(std::move(drawableSurface), matchType, suggestedSize); |
1053 | 0 | } |
1054 | | |
1055 | | bool CanHold(const Cost aCost) const |
1056 | 0 | { |
1057 | 0 | return aCost <= mMaxCost; |
1058 | 0 | } |
1059 | | |
1060 | | size_t MaximumCapacity() const |
1061 | 0 | { |
1062 | 0 | return size_t(mMaxCost); |
1063 | 0 | } |
1064 | | |
1065 | | void SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider, |
1066 | | const StaticMutexAutoLock& aAutoLock) |
1067 | 0 | { |
1068 | 0 | if (!aProvider->Availability().IsPlaceholder()) { |
1069 | 0 | MOZ_ASSERT_UNREACHABLE("Calling SurfaceAvailable on non-placeholder"); |
1070 | 0 | return; |
1071 | 0 | } |
1072 | 0 |
|
1073 | 0 | // Reinsert the provider, requesting that Insert() mark it available. This |
1074 | 0 | // may or may not succeed, depending on whether some other decoder has |
1075 | 0 | // beaten us to the punch and inserted a non-placeholder version of this |
1076 | 0 | // surface first, but it's fine either way. |
1077 | 0 | // XXX(seth): This could be implemented more efficiently; we should be able |
1078 | 0 | // to just update our data structures without reinserting. |
1079 | 0 | Insert(aProvider, /* aSetAvailable = */ true, aAutoLock); |
1080 | 0 | } |
1081 | | |
1082 | | void LockImage(const ImageKey aImageKey) |
1083 | 0 | { |
1084 | 0 | RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); |
1085 | 0 | if (!cache) { |
1086 | 0 | cache = new ImageSurfaceCache(aImageKey); |
1087 | 0 | mImageCaches.Put(aImageKey, cache); |
1088 | 0 | } |
1089 | 0 |
|
1090 | 0 | cache->SetLocked(true); |
1091 | 0 |
|
1092 | 0 | // We don't relock this image's existing surfaces right away; instead, the |
1093 | 0 | // image should arrange for Lookup() to touch them if they are still useful. |
1094 | 0 | } |
1095 | | |
1096 | | void UnlockImage(const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock) |
1097 | 0 | { |
1098 | 0 | RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); |
1099 | 0 | if (!cache || !cache->IsLocked()) { |
1100 | 0 | return; // Already unlocked. |
1101 | 0 | } |
1102 | 0 | |
1103 | 0 | cache->SetLocked(false); |
1104 | 0 | DoUnlockSurfaces(WrapNotNull(cache), /* aStaticOnly = */ false, aAutoLock); |
1105 | 0 | } |
1106 | | |
1107 | | void UnlockEntries(const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock) |
1108 | 0 | { |
1109 | 0 | RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); |
1110 | 0 | if (!cache || !cache->IsLocked()) { |
1111 | 0 | return; // Already unlocked. |
1112 | 0 | } |
1113 | 0 | |
1114 | 0 | // (Note that we *don't* unlock the per-image cache here; that's the |
1115 | 0 | // difference between this and UnlockImage.) |
1116 | 0 | DoUnlockSurfaces(WrapNotNull(cache), |
1117 | 0 | /* aStaticOnly = */ !gfxPrefs::ImageMemAnimatedDiscardable(), aAutoLock); |
1118 | 0 | } |
1119 | | |
1120 | | already_AddRefed<ImageSurfaceCache> |
1121 | | RemoveImage(const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock) |
1122 | 0 | { |
1123 | 0 | RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); |
1124 | 0 | if (!cache) { |
1125 | 0 | return nullptr; // No cached surfaces for this image, so nothing to do. |
1126 | 0 | } |
1127 | 0 | |
1128 | 0 | // Discard all of the cached surfaces for this image. |
1129 | 0 | // XXX(seth): This is O(n^2) since for each item in the cache we are |
1130 | 0 | // removing an element from the costs array. Since n is expected to be |
1131 | 0 | // small, performance should be good, but if usage patterns change we should |
1132 | 0 | // change the data structure used for mCosts. |
1133 | 0 | for (auto iter = cache->ConstIter(); !iter.Done(); iter.Next()) { |
1134 | 0 | StopTracking(WrapNotNull(iter.UserData()), |
1135 | 0 | /* aIsTracked */ true, aAutoLock); |
1136 | 0 | } |
1137 | 0 |
|
1138 | 0 | // The per-image cache isn't needed anymore, so remove it as well. |
1139 | 0 | // This implicitly unlocks the image if it was locked. |
1140 | 0 | mImageCaches.Remove(aImageKey); |
1141 | 0 |
|
1142 | 0 | // Since we did not actually remove any of the surfaces from the cache |
1143 | 0 | // itself, only stopped tracking them, we should free it outside the lock. |
1144 | 0 | return cache.forget(); |
1145 | 0 | } |
1146 | | |
1147 | | void PruneImage(const ImageKey aImageKey, const StaticMutexAutoLock& aAutoLock) |
1148 | 0 | { |
1149 | 0 | RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); |
1150 | 0 | if (!cache) { |
1151 | 0 | return; // No cached surfaces for this image, so nothing to do. |
1152 | 0 | } |
1153 | 0 | |
1154 | 0 | cache->Prune([this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void { |
1155 | 0 | StopTracking(aSurface, /* aIsTracked */ true, aAutoLock); |
1156 | 0 | // Individual surfaces must be freed outside the lock. |
1157 | 0 | mCachedSurfacesDiscard.AppendElement(aSurface); |
1158 | 0 | }); |
1159 | 0 |
|
1160 | 0 | MaybeRemoveEmptyCache(aImageKey, cache); |
1161 | 0 | } |
1162 | | |
1163 | | void DiscardAll(const StaticMutexAutoLock& aAutoLock) |
1164 | 0 | { |
1165 | 0 | // Remove in order of cost because mCosts is an array and the other data |
1166 | 0 | // structures are all hash tables. Note that locked surfaces are not |
1167 | 0 | // removed, since they aren't present in mCosts. |
1168 | 0 | while (!mCosts.IsEmpty()) { |
1169 | 0 | Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true, aAutoLock); |
1170 | 0 | } |
1171 | 0 | } |
1172 | | |
1173 | | void DiscardForMemoryPressure(const StaticMutexAutoLock& aAutoLock) |
1174 | 0 | { |
1175 | 0 | // Compute our discardable cost. Since locked surfaces aren't discardable, |
1176 | 0 | // we exclude them. |
1177 | 0 | const Cost discardableCost = (mMaxCost - mAvailableCost) - mLockedCost; |
1178 | 0 | MOZ_ASSERT(discardableCost <= mMaxCost, "Discardable cost doesn't add up"); |
1179 | 0 |
|
1180 | 0 | // Our target is to raise our available cost by (1 / mDiscardFactor) of our |
1181 | 0 | // discardable cost - in other words, we want to end up with about |
1182 | 0 | // (discardableCost / mDiscardFactor) fewer bytes stored in the surface |
1183 | 0 | // cache after we're done. |
1184 | 0 | const Cost targetCost = mAvailableCost + (discardableCost / mDiscardFactor); |
1185 | 0 |
|
1186 | 0 | if (targetCost > mMaxCost - mLockedCost) { |
1187 | 0 | MOZ_ASSERT_UNREACHABLE("Target cost is more than we can discard"); |
1188 | 0 | DiscardAll(aAutoLock); |
1189 | 0 | return; |
1190 | 0 | } |
1191 | 0 |
|
1192 | 0 | // Discard surfaces until we've reduced our cost to our target cost. |
1193 | 0 | while (mAvailableCost < targetCost) { |
1194 | 0 | MOZ_ASSERT(!mCosts.IsEmpty(), "Removed everything and still not done"); |
1195 | 0 | Remove(mCosts.LastElement().Surface(), /* aStopTracking */ true, aAutoLock); |
1196 | 0 | } |
1197 | 0 | } |
1198 | | |
1199 | | void TakeDiscard(nsTArray<RefPtr<CachedSurface>>& aDiscard, |
1200 | | const StaticMutexAutoLock& aAutoLock) |
1201 | 0 | { |
1202 | 0 | MOZ_ASSERT(aDiscard.IsEmpty()); |
1203 | 0 | aDiscard = std::move(mCachedSurfacesDiscard); |
1204 | 0 | } |
1205 | | |
1206 | | void LockSurface(NotNull<CachedSurface*> aSurface, |
1207 | | const StaticMutexAutoLock& aAutoLock) |
1208 | 0 | { |
1209 | 0 | if (aSurface->IsPlaceholder() || aSurface->IsLocked()) { |
1210 | 0 | return; |
1211 | 0 | } |
1212 | 0 | |
1213 | 0 | StopTracking(aSurface, /* aIsTracked */ true, aAutoLock); |
1214 | 0 |
|
1215 | 0 | // Lock the surface. This can fail. |
1216 | 0 | aSurface->SetLocked(true); |
1217 | 0 | DebugOnly<bool> tracking = StartTracking(aSurface, aAutoLock); |
1218 | 0 | MOZ_ASSERT(tracking); |
1219 | 0 | } |
1220 | | |
1221 | | NS_IMETHOD |
1222 | | CollectReports(nsIHandleReportCallback* aHandleReport, |
1223 | | nsISupports* aData, |
1224 | | bool aAnonymize) override |
1225 | 0 | { |
1226 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1227 | 0 |
|
1228 | 0 | // We have explicit memory reporting for the surface cache which is more |
1229 | 0 | // accurate than the cost metrics we report here, but these metrics are |
1230 | 0 | // still useful to report, since they control the cache's behavior. |
1231 | 0 | MOZ_COLLECT_REPORT( |
1232 | 0 | "imagelib-surface-cache-estimated-total", |
1233 | 0 | KIND_OTHER, UNITS_BYTES, (mMaxCost - mAvailableCost), |
1234 | 0 | "Estimated total memory used by the imagelib surface cache."); |
1235 | 0 |
|
1236 | 0 | MOZ_COLLECT_REPORT( |
1237 | 0 | "imagelib-surface-cache-estimated-locked", |
1238 | 0 | KIND_OTHER, UNITS_BYTES, mLockedCost, |
1239 | 0 | "Estimated memory used by locked surfaces in the imagelib surface cache."); |
1240 | 0 |
|
1241 | 0 | MOZ_COLLECT_REPORT( |
1242 | 0 | "imagelib-surface-cache-overflow-count", |
1243 | 0 | KIND_OTHER, UNITS_COUNT, mOverflowCount, |
1244 | 0 | "Count of how many times the surface cache has hit its capacity and been " |
1245 | 0 | "unable to insert a new surface."); |
1246 | 0 |
|
1247 | 0 | return NS_OK; |
1248 | 0 | } |
1249 | | |
1250 | | void CollectSizeOfSurfaces(const ImageKey aImageKey, |
1251 | | nsTArray<SurfaceMemoryCounter>& aCounters, |
1252 | | MallocSizeOf aMallocSizeOf, |
1253 | | const StaticMutexAutoLock& aAutoLock) |
1254 | 0 | { |
1255 | 0 | RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); |
1256 | 0 | if (!cache) { |
1257 | 0 | return; // No surfaces for this image. |
1258 | 0 | } |
1259 | 0 | |
1260 | 0 | // Report all surfaces in the per-image cache. |
1261 | 0 | cache->CollectSizeOfSurfaces(aCounters, aMallocSizeOf, |
1262 | 0 | [this, &aAutoLock](NotNull<CachedSurface*> aSurface) -> void { |
1263 | 0 | StopTracking(aSurface, /* aIsTracked */ true, aAutoLock); |
1264 | 0 | // Individual surfaces must be freed outside the lock. |
1265 | 0 | mCachedSurfacesDiscard.AppendElement(aSurface); |
1266 | 0 | }); |
1267 | 0 |
|
1268 | 0 | MaybeRemoveEmptyCache(aImageKey, cache); |
1269 | 0 | } |
1270 | | |
1271 | | private: |
1272 | | already_AddRefed<ImageSurfaceCache> GetImageCache(const ImageKey aImageKey) |
1273 | 0 | { |
1274 | 0 | RefPtr<ImageSurfaceCache> imageCache; |
1275 | 0 | mImageCaches.Get(aImageKey, getter_AddRefs(imageCache)); |
1276 | 0 | return imageCache.forget(); |
1277 | 0 | } |
1278 | | |
1279 | | void MaybeRemoveEmptyCache(const ImageKey aImageKey, |
1280 | | ImageSurfaceCache* aCache) |
1281 | 0 | { |
1282 | 0 | // Remove the per-image cache if it's unneeded now. Keep it if the image is |
1283 | 0 | // locked, since the per-image cache is where we store that state. Note that |
1284 | 0 | // we don't push it into mImageCachesDiscard because all of its surfaces |
1285 | 0 | // have been removed, so it is safe to free while holding the lock. |
1286 | 0 | if (aCache->IsEmpty() && !aCache->IsLocked()) { |
1287 | 0 | mImageCaches.Remove(aImageKey); |
1288 | 0 | } |
1289 | 0 | } |
1290 | | |
1291 | | // This is similar to CanHold() except that it takes into account the costs of |
1292 | | // locked surfaces. It's used internally in Insert(), but it's not exposed |
1293 | | // publicly because we permit multithreaded access to the surface cache, which |
1294 | | // means that the result would be meaningless: another thread could insert a |
1295 | | // surface or lock an image at any time. |
1296 | | bool CanHoldAfterDiscarding(const Cost aCost) const |
1297 | 0 | { |
1298 | 0 | return aCost <= mMaxCost - mLockedCost; |
1299 | 0 | } |
1300 | | |
1301 | | bool MarkUsed(NotNull<CachedSurface*> aSurface, |
1302 | | NotNull<ImageSurfaceCache*> aCache, |
1303 | | const StaticMutexAutoLock& aAutoLock) |
1304 | 0 | { |
1305 | 0 | if (aCache->IsLocked()) { |
1306 | 0 | LockSurface(aSurface, aAutoLock); |
1307 | 0 | return true; |
1308 | 0 | } |
1309 | 0 | |
1310 | 0 | nsresult rv = mExpirationTracker.MarkUsedLocked(aSurface, aAutoLock); |
1311 | 0 | if (NS_WARN_IF(NS_FAILED(rv))) { |
1312 | 0 | // If mark used fails, it is because it failed to reinsert the surface |
1313 | 0 | // after removing it from the tracker. Thus we need to update our |
1314 | 0 | // own accounting but otherwise expect it to be untracked. |
1315 | 0 | StopTracking(aSurface, /* aIsTracked */ false, aAutoLock); |
1316 | 0 | return false; |
1317 | 0 | } |
1318 | 0 | return true; |
1319 | 0 | } |
1320 | | |
1321 | | void DoUnlockSurfaces(NotNull<ImageSurfaceCache*> aCache, bool aStaticOnly, |
1322 | | const StaticMutexAutoLock& aAutoLock) |
1323 | 0 | { |
1324 | 0 | AutoTArray<NotNull<CachedSurface*>, 8> discard; |
1325 | 0 |
|
1326 | 0 | // Unlock all the surfaces the per-image cache is holding. |
1327 | 0 | for (auto iter = aCache->ConstIter(); !iter.Done(); iter.Next()) { |
1328 | 0 | NotNull<CachedSurface*> surface = WrapNotNull(iter.UserData()); |
1329 | 0 | if (surface->IsPlaceholder() || !surface->IsLocked()) { |
1330 | 0 | continue; |
1331 | 0 | } |
1332 | 0 | if (aStaticOnly && surface->GetSurfaceKey().Playback() != PlaybackType::eStatic) { |
1333 | 0 | continue; |
1334 | 0 | } |
1335 | 0 | StopTracking(surface, /* aIsTracked */ true, aAutoLock); |
1336 | 0 | surface->SetLocked(false); |
1337 | 0 | if (MOZ_UNLIKELY(!StartTracking(surface, aAutoLock))) { |
1338 | 0 | discard.AppendElement(surface); |
1339 | 0 | } |
1340 | 0 | } |
1341 | 0 |
|
1342 | 0 | // Discard any that we failed to track. |
1343 | 0 | for (auto iter = discard.begin(); iter != discard.end(); ++iter) { |
1344 | 0 | Remove(*iter, /* aStopTracking */ false, aAutoLock); |
1345 | 0 | } |
1346 | 0 | } |
1347 | | |
1348 | | void RemoveEntry(const ImageKey aImageKey, |
1349 | | const SurfaceKey& aSurfaceKey, |
1350 | | const StaticMutexAutoLock& aAutoLock) |
1351 | 0 | { |
1352 | 0 | RefPtr<ImageSurfaceCache> cache = GetImageCache(aImageKey); |
1353 | 0 | if (!cache) { |
1354 | 0 | return; // No cached surfaces for this image. |
1355 | 0 | } |
1356 | 0 | |
1357 | 0 | RefPtr<CachedSurface> surface = |
1358 | 0 | cache->Lookup(aSurfaceKey, /* aForAccess = */ false); |
1359 | 0 | if (!surface) { |
1360 | 0 | return; // Lookup in the per-image cache missed. |
1361 | 0 | } |
1362 | 0 | |
1363 | 0 | Remove(WrapNotNull(surface), /* aStopTracking */ true, aAutoLock); |
1364 | 0 | } |
1365 | | |
1366 | | class SurfaceTracker final : |
1367 | | public ExpirationTrackerImpl<CachedSurface, 2, |
1368 | | StaticMutex, |
1369 | | StaticMutexAutoLock> |
1370 | | { |
1371 | | public: |
1372 | | explicit SurfaceTracker(uint32_t aSurfaceCacheExpirationTimeMS) |
1373 | | : ExpirationTrackerImpl<CachedSurface, 2, |
1374 | | StaticMutex, StaticMutexAutoLock>( |
1375 | | aSurfaceCacheExpirationTimeMS, "SurfaceTracker", |
1376 | | SystemGroup::EventTargetFor(TaskCategory::Other)) |
1377 | 0 | { } |
1378 | | |
1379 | | protected: |
1380 | | void NotifyExpiredLocked(CachedSurface* aSurface, |
1381 | | const StaticMutexAutoLock& aAutoLock) override |
1382 | 0 | { |
1383 | 0 | sInstance->Remove(WrapNotNull(aSurface), /* aStopTracking */ true, aAutoLock); |
1384 | 0 | } |
1385 | | |
1386 | | void NotifyHandlerEndLocked(const StaticMutexAutoLock& aAutoLock) override |
1387 | 0 | { |
1388 | 0 | sInstance->TakeDiscard(mDiscard, aAutoLock); |
1389 | 0 | } |
1390 | | |
1391 | | void NotifyHandlerEnd() override |
1392 | 0 | { |
1393 | 0 | nsTArray<RefPtr<CachedSurface>> discard(std::move(mDiscard)); |
1394 | 0 | } |
1395 | | |
1396 | | StaticMutex& GetMutex() override |
1397 | 0 | { |
1398 | 0 | return sInstanceMutex; |
1399 | 0 | } |
1400 | | |
1401 | | nsTArray<RefPtr<CachedSurface>> mDiscard; |
1402 | | }; |
1403 | | |
1404 | | class MemoryPressureObserver final : public nsIObserver |
1405 | | { |
1406 | | public: |
1407 | | NS_DECL_ISUPPORTS |
1408 | | |
1409 | | NS_IMETHOD Observe(nsISupports*, |
1410 | | const char* aTopic, |
1411 | | const char16_t*) override |
1412 | 0 | { |
1413 | 0 | nsTArray<RefPtr<CachedSurface>> discard; |
1414 | 0 | { |
1415 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1416 | 0 | if (sInstance && strcmp(aTopic, "memory-pressure") == 0) { |
1417 | 0 | sInstance->DiscardForMemoryPressure(lock); |
1418 | 0 | sInstance->TakeDiscard(discard, lock); |
1419 | 0 | } |
1420 | 0 | } |
1421 | 0 | return NS_OK; |
1422 | 0 | } |
1423 | | |
1424 | | private: |
1425 | 0 | virtual ~MemoryPressureObserver() { } |
1426 | | }; |
1427 | | |
1428 | | nsTArray<CostEntry> mCosts; |
1429 | | nsRefPtrHashtable<nsPtrHashKey<Image>, |
1430 | | ImageSurfaceCache> mImageCaches; |
1431 | | nsTArray<RefPtr<CachedSurface>> mCachedSurfacesDiscard; |
1432 | | SurfaceTracker mExpirationTracker; |
1433 | | RefPtr<MemoryPressureObserver> mMemoryPressureObserver; |
1434 | | const uint32_t mDiscardFactor; |
1435 | | const Cost mMaxCost; |
1436 | | Cost mAvailableCost; |
1437 | | Cost mLockedCost; |
1438 | | size_t mOverflowCount; |
1439 | | }; |
1440 | | |
1441 | | NS_IMPL_ISUPPORTS(SurfaceCacheImpl, nsIMemoryReporter) |
1442 | | NS_IMPL_ISUPPORTS(SurfaceCacheImpl::MemoryPressureObserver, nsIObserver) |
1443 | | |
1444 | | /////////////////////////////////////////////////////////////////////////////// |
1445 | | // Public API |
1446 | | /////////////////////////////////////////////////////////////////////////////// |
1447 | | |
1448 | | /* static */ void |
1449 | | SurfaceCache::Initialize() |
1450 | 0 | { |
1451 | 0 | // Initialize preferences. |
1452 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
1453 | 0 | MOZ_ASSERT(!sInstance, "Shouldn't initialize more than once"); |
1454 | 0 |
|
1455 | 0 | // See gfxPrefs for the default values of these preferences. |
1456 | 0 |
|
1457 | 0 | // Length of time before an unused surface is removed from the cache, in |
1458 | 0 | // milliseconds. |
1459 | 0 | uint32_t surfaceCacheExpirationTimeMS = |
1460 | 0 | gfxPrefs::ImageMemSurfaceCacheMinExpirationMS(); |
1461 | 0 |
|
1462 | 0 | // What fraction of the memory used by the surface cache we should discard |
1463 | 0 | // when we get a memory pressure notification. This value is interpreted as |
1464 | 0 | // 1/N, so 1 means to discard everything, 2 means to discard about half of the |
1465 | 0 | // memory we're using, and so forth. We clamp it to avoid division by zero. |
1466 | 0 | uint32_t surfaceCacheDiscardFactor = |
1467 | 0 | max(gfxPrefs::ImageMemSurfaceCacheDiscardFactor(), 1u); |
1468 | 0 |
|
1469 | 0 | // Maximum size of the surface cache, in kilobytes. |
1470 | 0 | uint64_t surfaceCacheMaxSizeKB = gfxPrefs::ImageMemSurfaceCacheMaxSizeKB(); |
1471 | 0 |
|
1472 | 0 | // A knob determining the actual size of the surface cache. Currently the |
1473 | 0 | // cache is (size of main memory) / (surface cache size factor) KB |
1474 | 0 | // or (surface cache max size) KB, whichever is smaller. The formula |
1475 | 0 | // may change in the future, though. |
1476 | 0 | // For example, a value of 4 would yield a 256MB cache on a 1GB machine. |
1477 | 0 | // The smallest machines we are likely to run this code on have 256MB |
1478 | 0 | // of memory, which would yield a 64MB cache on this setting. |
1479 | 0 | // We clamp this value to avoid division by zero. |
1480 | 0 | uint32_t surfaceCacheSizeFactor = |
1481 | 0 | max(gfxPrefs::ImageMemSurfaceCacheSizeFactor(), 1u); |
1482 | 0 |
|
1483 | 0 | // Compute the size of the surface cache. |
1484 | 0 | uint64_t memorySize = PR_GetPhysicalMemorySize(); |
1485 | 0 | if (memorySize == 0) { |
1486 | 0 | MOZ_ASSERT_UNREACHABLE("PR_GetPhysicalMemorySize not implemented here"); |
1487 | 0 | memorySize = 256 * 1024 * 1024; // Fall back to 256MB. |
1488 | 0 | } |
1489 | 0 | uint64_t proposedSize = memorySize / surfaceCacheSizeFactor; |
1490 | 0 | uint64_t surfaceCacheSizeBytes = min(proposedSize, |
1491 | 0 | surfaceCacheMaxSizeKB * 1024); |
1492 | 0 | uint32_t finalSurfaceCacheSizeBytes = |
1493 | 0 | min(surfaceCacheSizeBytes, uint64_t(UINT32_MAX)); |
1494 | 0 |
|
1495 | 0 | // Create the surface cache singleton with the requested settings. Note that |
1496 | 0 | // the size is a limit that the cache may not grow beyond, but we do not |
1497 | 0 | // actually allocate any storage for surfaces at this time. |
1498 | 0 | sInstance = new SurfaceCacheImpl(surfaceCacheExpirationTimeMS, |
1499 | 0 | surfaceCacheDiscardFactor, |
1500 | 0 | finalSurfaceCacheSizeBytes); |
1501 | 0 | sInstance->InitMemoryReporter(); |
1502 | 0 | } |
1503 | | |
1504 | | /* static */ void |
1505 | | SurfaceCache::Shutdown() |
1506 | 0 | { |
1507 | 0 | RefPtr<SurfaceCacheImpl> cache; |
1508 | 0 | { |
1509 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1510 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
1511 | 0 | MOZ_ASSERT(sInstance, "No singleton - was Shutdown() called twice?"); |
1512 | 0 | cache = sInstance.forget(); |
1513 | 0 | } |
1514 | 0 | } |
1515 | | |
1516 | | /* static */ LookupResult |
1517 | | SurfaceCache::Lookup(const ImageKey aImageKey, |
1518 | | const SurfaceKey& aSurfaceKey) |
1519 | 0 | { |
1520 | 0 | nsTArray<RefPtr<CachedSurface>> discard; |
1521 | 0 | LookupResult rv(MatchType::NOT_FOUND); |
1522 | 0 |
|
1523 | 0 | { |
1524 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1525 | 0 | if (!sInstance) { |
1526 | 0 | return rv; |
1527 | 0 | } |
1528 | 0 | |
1529 | 0 | rv = sInstance->Lookup(aImageKey, aSurfaceKey, lock); |
1530 | 0 | sInstance->TakeDiscard(discard, lock); |
1531 | 0 | } |
1532 | 0 |
|
1533 | 0 | return rv; |
1534 | 0 | } |
1535 | | |
1536 | | /* static */ LookupResult |
1537 | | SurfaceCache::LookupBestMatch(const ImageKey aImageKey, |
1538 | | const SurfaceKey& aSurfaceKey) |
1539 | 0 | { |
1540 | 0 | nsTArray<RefPtr<CachedSurface>> discard; |
1541 | 0 | LookupResult rv(MatchType::NOT_FOUND); |
1542 | 0 |
|
1543 | 0 | { |
1544 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1545 | 0 | if (!sInstance) { |
1546 | 0 | return rv; |
1547 | 0 | } |
1548 | 0 | |
1549 | 0 | rv = sInstance->LookupBestMatch(aImageKey, aSurfaceKey, lock); |
1550 | 0 | sInstance->TakeDiscard(discard, lock); |
1551 | 0 | } |
1552 | 0 |
|
1553 | 0 | return rv; |
1554 | 0 | } |
1555 | | |
1556 | | /* static */ InsertOutcome |
1557 | | SurfaceCache::Insert(NotNull<ISurfaceProvider*> aProvider) |
1558 | 0 | { |
1559 | 0 | nsTArray<RefPtr<CachedSurface>> discard; |
1560 | 0 | InsertOutcome rv(InsertOutcome::FAILURE); |
1561 | 0 |
|
1562 | 0 | { |
1563 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1564 | 0 | if (!sInstance) { |
1565 | 0 | return rv; |
1566 | 0 | } |
1567 | 0 | |
1568 | 0 | rv = sInstance->Insert(aProvider, /* aSetAvailable = */ false, lock); |
1569 | 0 | sInstance->TakeDiscard(discard, lock); |
1570 | 0 | } |
1571 | 0 |
|
1572 | 0 | return rv; |
1573 | 0 | } |
1574 | | |
1575 | | /* static */ bool |
1576 | | SurfaceCache::CanHold(const IntSize& aSize, uint32_t aBytesPerPixel /* = 4 */) |
1577 | 0 | { |
1578 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1579 | 0 | if (!sInstance) { |
1580 | 0 | return false; |
1581 | 0 | } |
1582 | 0 | |
1583 | 0 | Cost cost = ComputeCost(aSize, aBytesPerPixel); |
1584 | 0 | return sInstance->CanHold(cost); |
1585 | 0 | } |
1586 | | |
1587 | | /* static */ bool |
1588 | | SurfaceCache::CanHold(size_t aSize) |
1589 | 0 | { |
1590 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1591 | 0 | if (!sInstance) { |
1592 | 0 | return false; |
1593 | 0 | } |
1594 | 0 | |
1595 | 0 | return sInstance->CanHold(aSize); |
1596 | 0 | } |
1597 | | |
1598 | | /* static */ void |
1599 | | SurfaceCache::SurfaceAvailable(NotNull<ISurfaceProvider*> aProvider) |
1600 | 0 | { |
1601 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1602 | 0 | if (!sInstance) { |
1603 | 0 | return; |
1604 | 0 | } |
1605 | 0 | |
1606 | 0 | sInstance->SurfaceAvailable(aProvider, lock); |
1607 | 0 | } |
1608 | | |
1609 | | /* static */ void |
1610 | | SurfaceCache::LockImage(const ImageKey aImageKey) |
1611 | 0 | { |
1612 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1613 | 0 | if (sInstance) { |
1614 | 0 | return sInstance->LockImage(aImageKey); |
1615 | 0 | } |
1616 | 0 | } |
1617 | | |
1618 | | /* static */ void |
1619 | | SurfaceCache::UnlockImage(const ImageKey aImageKey) |
1620 | 0 | { |
1621 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1622 | 0 | if (sInstance) { |
1623 | 0 | return sInstance->UnlockImage(aImageKey, lock); |
1624 | 0 | } |
1625 | 0 | } |
1626 | | |
1627 | | /* static */ void |
1628 | | SurfaceCache::UnlockEntries(const ImageKey aImageKey) |
1629 | 0 | { |
1630 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1631 | 0 | if (sInstance) { |
1632 | 0 | return sInstance->UnlockEntries(aImageKey, lock); |
1633 | 0 | } |
1634 | 0 | } |
1635 | | |
1636 | | /* static */ void |
1637 | | SurfaceCache::RemoveImage(const ImageKey aImageKey) |
1638 | 0 | { |
1639 | 0 | RefPtr<ImageSurfaceCache> discard; |
1640 | 0 | { |
1641 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1642 | 0 | if (sInstance) { |
1643 | 0 | discard = sInstance->RemoveImage(aImageKey, lock); |
1644 | 0 | } |
1645 | 0 | } |
1646 | 0 | } |
1647 | | |
1648 | | /* static */ void |
1649 | | SurfaceCache::PruneImage(const ImageKey aImageKey) |
1650 | 0 | { |
1651 | 0 | nsTArray<RefPtr<CachedSurface>> discard; |
1652 | 0 | { |
1653 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1654 | 0 | if (sInstance) { |
1655 | 0 | sInstance->PruneImage(aImageKey, lock); |
1656 | 0 | sInstance->TakeDiscard(discard, lock); |
1657 | 0 | } |
1658 | 0 | } |
1659 | 0 | } |
1660 | | |
1661 | | /* static */ void |
1662 | | SurfaceCache::DiscardAll() |
1663 | 0 | { |
1664 | 0 | nsTArray<RefPtr<CachedSurface>> discard; |
1665 | 0 | { |
1666 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1667 | 0 | if (sInstance) { |
1668 | 0 | sInstance->DiscardAll(lock); |
1669 | 0 | sInstance->TakeDiscard(discard, lock); |
1670 | 0 | } |
1671 | 0 | } |
1672 | 0 | } |
1673 | | |
1674 | | /* static */ void |
1675 | | SurfaceCache::CollectSizeOfSurfaces(const ImageKey aImageKey, |
1676 | | nsTArray<SurfaceMemoryCounter>& aCounters, |
1677 | | MallocSizeOf aMallocSizeOf) |
1678 | 0 | { |
1679 | 0 | nsTArray<RefPtr<CachedSurface>> discard; |
1680 | 0 | { |
1681 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1682 | 0 | if (!sInstance) { |
1683 | 0 | return; |
1684 | 0 | } |
1685 | 0 | |
1686 | 0 | sInstance->CollectSizeOfSurfaces(aImageKey, aCounters, aMallocSizeOf, lock); |
1687 | 0 | sInstance->TakeDiscard(discard, lock); |
1688 | 0 | } |
1689 | 0 | } |
1690 | | |
1691 | | /* static */ size_t |
1692 | | SurfaceCache::MaximumCapacity() |
1693 | 0 | { |
1694 | 0 | StaticMutexAutoLock lock(sInstanceMutex); |
1695 | 0 | if (!sInstance) { |
1696 | 0 | return 0; |
1697 | 0 | } |
1698 | 0 | |
1699 | 0 | return sInstance->MaximumCapacity(); |
1700 | 0 | } |
1701 | | |
1702 | | /* static */ bool |
1703 | | SurfaceCache::IsLegalSize(const IntSize& aSize) |
1704 | 0 | { |
1705 | 0 | // reject over-wide or over-tall images |
1706 | 0 | const int32_t k64KLimit = 0x0000FFFF; |
1707 | 0 | if (MOZ_UNLIKELY(aSize.width > k64KLimit || aSize.height > k64KLimit )) { |
1708 | 0 | NS_WARNING("image too big"); |
1709 | 0 | return false; |
1710 | 0 | } |
1711 | 0 |
|
1712 | 0 | // protect against invalid sizes |
1713 | 0 | if (MOZ_UNLIKELY(aSize.height <= 0 || aSize.width <= 0)) { |
1714 | 0 | return false; |
1715 | 0 | } |
1716 | 0 | |
1717 | 0 | // check to make sure we don't overflow a 32-bit |
1718 | 0 | CheckedInt32 requiredBytes = CheckedInt32(aSize.width) * |
1719 | 0 | CheckedInt32(aSize.height) * 4; |
1720 | 0 | if (MOZ_UNLIKELY(!requiredBytes.isValid())) { |
1721 | 0 | NS_WARNING("width or height too large"); |
1722 | 0 | return false; |
1723 | 0 | } |
1724 | 0 | return true; |
1725 | 0 | } |
1726 | | |
1727 | | } // namespace image |
1728 | | } // namespace mozilla |