/src/skia/src/gpu/GrResourceCache.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2014 Google Inc. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license that can be |
5 | | * found in the LICENSE file. |
6 | | */ |
7 | | |
8 | | #include "src/gpu/GrResourceCache.h" |
9 | | #include <atomic> |
10 | | #include <vector> |
11 | | #include "include/gpu/GrDirectContext.h" |
12 | | #include "include/private/GrSingleOwner.h" |
13 | | #include "include/private/SkTo.h" |
14 | | #include "include/utils/SkRandom.h" |
15 | | #include "src/core/SkMessageBus.h" |
16 | | #include "src/core/SkOpts.h" |
17 | | #include "src/core/SkScopeExit.h" |
18 | | #include "src/core/SkTSort.h" |
19 | | #include "src/gpu/GrCaps.h" |
20 | | #include "src/gpu/GrDirectContextPriv.h" |
21 | | #include "src/gpu/GrGpuResourceCacheAccess.h" |
22 | | #include "src/gpu/GrProxyProvider.h" |
23 | | #include "src/gpu/GrTexture.h" |
24 | | #include "src/gpu/GrTextureProxyCacheAccess.h" |
25 | | #include "src/gpu/GrThreadSafeCache.h" |
26 | | #include "src/gpu/GrTracing.h" |
27 | | #include "src/gpu/SkGr.h" |
28 | | |
29 | | DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage, uint32_t, true); |
30 | | |
31 | | DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage, GrDirectContext::DirectContextID, true); |
32 | | |
33 | 0 | #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(fSingleOwner) |
34 | | |
35 | | ////////////////////////////////////////////////////////////////////////////// |
36 | | |
37 | 2 | GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() { |
38 | 2 | static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1}; |
39 | | |
40 | 2 | int32_t type = nextType.fetch_add(1, std::memory_order_relaxed); |
41 | 2 | if (type > SkTo<int32_t>(UINT16_MAX)) { |
42 | 0 | SK_ABORT("Too many Resource Types"); |
43 | 0 | } |
44 | | |
45 | 2 | return static_cast<ResourceType>(type); |
46 | 2 | } |
47 | | |
48 | 14 | GrUniqueKey::Domain GrUniqueKey::GenerateDomain() { |
49 | 14 | static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1}; |
50 | | |
51 | 14 | int32_t domain = nextDomain.fetch_add(1, std::memory_order_relaxed); |
52 | 14 | if (domain > SkTo<int32_t>(UINT16_MAX)) { |
53 | 0 | SK_ABORT("Too many GrUniqueKey Domains"); |
54 | 0 | } |
55 | | |
56 | 14 | return static_cast<Domain>(domain); |
57 | 14 | } |
58 | | |
59 | 329k | uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) { |
60 | 329k | return SkOpts::hash(data, size); |
61 | 329k | } |
62 | | |
63 | | ////////////////////////////////////////////////////////////////////////////// |
64 | | |
65 | | class GrResourceCache::AutoValidate : ::SkNoncopyable { |
66 | | public: |
67 | 3.30k | AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); } |
68 | 3.30k | ~AutoValidate() { fCache->validate(); } |
69 | | private: |
70 | | GrResourceCache* fCache; |
71 | | }; |
72 | | |
73 | | ////////////////////////////////////////////////////////////////////////////// |
74 | | |
75 | 0 | inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref() = default; |
76 | | |
77 | | inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(GrTexture* texture) |
78 | 0 | : fTexture(texture), fNumUnrefs(1) {} |
79 | | |
80 | 0 | inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(TextureAwaitingUnref&& that) { |
81 | 0 | fTexture = std::exchange(that.fTexture, nullptr); |
82 | 0 | fNumUnrefs = std::exchange(that.fNumUnrefs, 0); |
83 | 0 | } |
84 | | |
85 | | inline GrResourceCache::TextureAwaitingUnref& GrResourceCache::TextureAwaitingUnref::operator=( |
86 | 0 | TextureAwaitingUnref&& that) { |
87 | 0 | fTexture = std::exchange(that.fTexture, nullptr); |
88 | 0 | fNumUnrefs = std::exchange(that.fNumUnrefs, 0); |
89 | 0 | return *this; |
90 | 0 | } |
91 | | |
92 | 0 | inline GrResourceCache::TextureAwaitingUnref::~TextureAwaitingUnref() { |
93 | 0 | if (fTexture) { |
94 | 0 | for (int i = 0; i < fNumUnrefs; ++i) { |
95 | 0 | fTexture->unref(); |
96 | 0 | } |
97 | 0 | } |
98 | 0 | } |
99 | | |
100 | 0 | inline void GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref::addRef() { ++fNumUnrefs; } |
101 | | |
102 | 0 | inline void GrResourceCache::TextureAwaitingUnref::unref() { |
103 | 0 | SkASSERT(fNumUnrefs > 0); |
104 | 0 | fTexture->unref(); |
105 | 0 | --fNumUnrefs; |
106 | 0 | } Unexecuted instantiation: GrResourceCache::TextureAwaitingUnref::unref() Unexecuted instantiation: GrResourceCache::TextureAwaitingUnref::unref() |
107 | | |
108 | 0 | inline bool GrResourceCache::TextureAwaitingUnref::finished() { return !fNumUnrefs; } |
109 | | |
110 | | ////////////////////////////////////////////////////////////////////////////// |
111 | | |
112 | | GrResourceCache::GrResourceCache(GrSingleOwner* singleOwner, |
113 | | GrDirectContext::DirectContextID owningContextID, |
114 | | uint32_t familyID) |
115 | | : fInvalidUniqueKeyInbox(familyID) |
116 | | , fFreedTextureInbox(owningContextID) |
117 | | , fOwningContextID(owningContextID) |
118 | | , fContextUniqueID(familyID) |
119 | 1.32k | , fSingleOwner(singleOwner) { |
120 | 1.32k | SkASSERT(owningContextID.isValid()); |
121 | 1.32k | SkASSERT(familyID != SK_InvalidUniqueID); |
122 | 1.32k | } |
123 | | |
124 | 1.32k | GrResourceCache::~GrResourceCache() { |
125 | 1.32k | this->releaseAll(); |
126 | 1.32k | } |
127 | | |
128 | 0 | void GrResourceCache::setLimit(size_t bytes) { |
129 | 0 | fMaxBytes = bytes; |
130 | 0 | this->purgeAsNeeded(); |
131 | 0 | } |
132 | | |
133 | 57.8k | void GrResourceCache::insertResource(GrGpuResource* resource) { |
134 | 57.8k | ASSERT_SINGLE_OWNER |
135 | 57.8k | SkASSERT(resource); |
136 | 57.8k | SkASSERT(!this->isInCache(resource)); |
137 | 57.8k | SkASSERT(!resource->wasDestroyed()); |
138 | 57.8k | SkASSERT(!resource->resourcePriv().isPurgeable()); |
139 | | |
140 | | // We must set the timestamp before adding to the array in case the timestamp wraps and we wind |
141 | | // up iterating over all the resources that already have timestamps. |
142 | 57.8k | resource->cacheAccess().setTimestamp(this->getNextTimestamp()); |
143 | | |
144 | 57.8k | this->addToNonpurgeableArray(resource); |
145 | | |
146 | 57.8k | size_t size = resource->gpuMemorySize(); |
147 | 57.8k | SkDEBUGCODE(++fCount;) |
148 | 57.8k | fBytes += size; |
149 | | #if GR_CACHE_STATS |
150 | | fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount); |
151 | | fHighWaterBytes = std::max(fBytes, fHighWaterBytes); |
152 | | #endif |
153 | 57.8k | if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) { |
154 | 56.5k | ++fBudgetedCount; |
155 | 56.5k | fBudgetedBytes += size; |
156 | 56.5k | TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used", |
157 | 56.5k | fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes); |
158 | | #if GR_CACHE_STATS |
159 | | fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount); |
160 | | fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes); |
161 | | #endif |
162 | 56.5k | } |
163 | 57.8k | SkASSERT(!resource->cacheAccess().isUsableAsScratch()); |
164 | 57.8k | this->purgeAsNeeded(); |
165 | 57.8k | } |
166 | | |
167 | 57.8k | void GrResourceCache::removeResource(GrGpuResource* resource) { |
168 | 57.8k | ASSERT_SINGLE_OWNER |
169 | 57.8k | this->validate(); |
170 | 57.8k | SkASSERT(this->isInCache(resource)); |
171 | | |
172 | 57.8k | size_t size = resource->gpuMemorySize(); |
173 | 57.8k | if (resource->resourcePriv().isPurgeable()) { |
174 | 56.9k | fPurgeableQueue.remove(resource); |
175 | 56.9k | fPurgeableBytes -= size; |
176 | 965 | } else { |
177 | 965 | this->removeFromNonpurgeableArray(resource); |
178 | 965 | } |
179 | | |
180 | 57.8k | SkDEBUGCODE(--fCount;) |
181 | 57.8k | fBytes -= size; |
182 | 57.8k | if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) { |
183 | 57.8k | --fBudgetedCount; |
184 | 57.8k | fBudgetedBytes -= size; |
185 | 57.8k | TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used", |
186 | 57.8k | fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes); |
187 | 57.8k | } |
188 | | |
189 | 57.8k | if (resource->cacheAccess().isUsableAsScratch()) { |
190 | 55.4k | fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource); |
191 | 55.4k | } |
192 | 57.8k | if (resource->getUniqueKey().isValid()) { |
193 | 1.06k | fUniqueHash.remove(resource->getUniqueKey()); |
194 | 1.06k | } |
195 | 57.8k | this->validate(); |
196 | 57.8k | } |
197 | | |
198 | 0 | void GrResourceCache::abandonAll() { |
199 | 0 | AutoValidate av(this); |
200 | | |
201 | | // We need to make sure to free any resources that were waiting on a free message but never |
202 | | // received one. |
203 | 0 | fTexturesAwaitingUnref.reset(); |
204 | |
|
205 | 0 | while (fNonpurgeableResources.count()) { |
206 | 0 | GrGpuResource* back = *(fNonpurgeableResources.end() - 1); |
207 | 0 | SkASSERT(!back->wasDestroyed()); |
208 | 0 | back->cacheAccess().abandon(); |
209 | 0 | } |
210 | |
|
211 | 0 | while (fPurgeableQueue.count()) { |
212 | 0 | GrGpuResource* top = fPurgeableQueue.peek(); |
213 | 0 | SkASSERT(!top->wasDestroyed()); |
214 | 0 | top->cacheAccess().abandon(); |
215 | 0 | } |
216 | |
|
217 | 0 | fThreadSafeCache->dropAllRefs(); |
218 | |
|
219 | 0 | SkASSERT(!fScratchMap.count()); |
220 | 0 | SkASSERT(!fUniqueHash.count()); |
221 | 0 | SkASSERT(!fCount); |
222 | 0 | SkASSERT(!this->getResourceCount()); |
223 | 0 | SkASSERT(!fBytes); |
224 | 0 | SkASSERT(!fBudgetedCount); |
225 | 0 | SkASSERT(!fBudgetedBytes); |
226 | 0 | SkASSERT(!fPurgeableBytes); |
227 | 0 | SkASSERT(!fTexturesAwaitingUnref.count()); |
228 | 0 | } Unexecuted instantiation: GrResourceCache::abandonAll() Unexecuted instantiation: GrResourceCache::abandonAll() |
229 | | |
230 | 2.65k | void GrResourceCache::releaseAll() { |
231 | 2.65k | AutoValidate av(this); |
232 | | |
233 | 2.65k | fThreadSafeCache->dropAllRefs(); |
234 | | |
235 | 2.65k | this->processFreedGpuResources(); |
236 | | |
237 | | // We need to make sure to free any resources that were waiting on a free message but never |
238 | | // received one. |
239 | 2.65k | fTexturesAwaitingUnref.reset(); |
240 | | |
241 | 2.65k | SkASSERT(fProxyProvider); // better have called setProxyProvider |
242 | 2.65k | SkASSERT(fThreadSafeCache); // better have called setThreadSafeCache too |
243 | | |
244 | | // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey |
245 | | // they also have a raw pointer back to this class (which is presumably going away)! |
246 | 2.65k | fProxyProvider->removeAllUniqueKeys(); |
247 | | |
248 | 3.61k | while (fNonpurgeableResources.count()) { |
249 | 965 | GrGpuResource* back = *(fNonpurgeableResources.end() - 1); |
250 | 965 | SkASSERT(!back->wasDestroyed()); |
251 | 965 | back->cacheAccess().release(); |
252 | 965 | } |
253 | | |
254 | 58.3k | while (fPurgeableQueue.count()) { |
255 | 55.6k | GrGpuResource* top = fPurgeableQueue.peek(); |
256 | 55.6k | SkASSERT(!top->wasDestroyed()); |
257 | 55.6k | top->cacheAccess().release(); |
258 | 55.6k | } |
259 | | |
260 | 2.65k | SkASSERT(!fScratchMap.count()); |
261 | 2.65k | SkASSERT(!fUniqueHash.count()); |
262 | 2.65k | SkASSERT(!fCount); |
263 | 2.65k | SkASSERT(!this->getResourceCount()); |
264 | 2.65k | SkASSERT(!fBytes); |
265 | 2.65k | SkASSERT(!fBudgetedCount); |
266 | 2.65k | SkASSERT(!fBudgetedBytes); |
267 | 2.65k | SkASSERT(!fPurgeableBytes); |
268 | 2.65k | SkASSERT(!fTexturesAwaitingUnref.count()); |
269 | 2.65k | } |
270 | | |
271 | 0 | void GrResourceCache::refResource(GrGpuResource* resource) { |
272 | 0 | SkASSERT(resource); |
273 | 0 | SkASSERT(resource->getContext()->priv().getResourceCache() == this); |
274 | 0 | if (resource->cacheAccess().hasRef()) { |
275 | 0 | resource->ref(); |
276 | 0 | } else { |
277 | 0 | this->refAndMakeResourceMRU(resource); |
278 | 0 | } |
279 | 0 | this->validate(); |
280 | 0 | } Unexecuted instantiation: GrResourceCache::refResource(GrGpuResource*) Unexecuted instantiation: GrResourceCache::refResource(GrGpuResource*) |
281 | | |
282 | | class GrResourceCache::AvailableForScratchUse { |
283 | | public: |
284 | 101k | AvailableForScratchUse() { } |
285 | | |
286 | 29.7k | bool operator()(const GrGpuResource* resource) const { |
287 | | // Everything that is in the scratch map should be usable as a |
288 | | // scratch resource. |
289 | 29.7k | return true; |
290 | 29.7k | } |
291 | | }; |
292 | | |
293 | 101k | GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey) { |
294 | 101k | SkASSERT(scratchKey.isValid()); |
295 | | |
296 | 101k | GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse()); |
297 | 101k | if (resource) { |
298 | 29.7k | fScratchMap.remove(scratchKey, resource); |
299 | 29.7k | this->refAndMakeResourceMRU(resource); |
300 | 29.7k | this->validate(); |
301 | 29.7k | } |
302 | 101k | return resource; |
303 | 101k | } |
304 | | |
305 | 0 | void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) { |
306 | 0 | ASSERT_SINGLE_OWNER |
307 | 0 | SkASSERT(resource->resourcePriv().getScratchKey().isValid()); |
308 | 0 | if (resource->cacheAccess().isUsableAsScratch()) { |
309 | 0 | fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource); |
310 | 0 | } |
311 | 0 | } Unexecuted instantiation: GrResourceCache::willRemoveScratchKey(GrGpuResource const*) Unexecuted instantiation: GrResourceCache::willRemoveScratchKey(GrGpuResource const*) |
312 | | |
313 | 61.6k | void GrResourceCache::removeUniqueKey(GrGpuResource* resource) { |
314 | 61.6k | ASSERT_SINGLE_OWNER |
315 | | // Someone has a ref to this resource in order to have removed the key. When the ref count |
316 | | // reaches zero we will get a ref cnt notification and figure out what to do with it. |
317 | 61.6k | if (resource->getUniqueKey().isValid()) { |
318 | 61.6k | SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey())); |
319 | 61.6k | fUniqueHash.remove(resource->getUniqueKey()); |
320 | 61.6k | } |
321 | 61.6k | resource->cacheAccess().removeUniqueKey(); |
322 | 61.6k | if (resource->cacheAccess().isUsableAsScratch()) { |
323 | 0 | fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource); |
324 | 0 | } |
325 | | |
326 | | // Removing a unique key from a kUnbudgetedCacheable resource would make the resource |
327 | | // require purging. However, the resource must be ref'ed to get here and therefore can't |
328 | | // be purgeable. We'll purge it when the refs reach zero. |
329 | 61.6k | SkASSERT(!resource->resourcePriv().isPurgeable()); |
330 | 61.6k | this->validate(); |
331 | 61.6k | } |
332 | | |
333 | 62.6k | void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) { |
334 | 62.6k | ASSERT_SINGLE_OWNER |
335 | 62.6k | SkASSERT(resource); |
336 | 62.6k | SkASSERT(this->isInCache(resource)); |
337 | | |
338 | | // If another resource has the new key, remove its key then install the key on this resource. |
339 | 62.6k | if (newKey.isValid()) { |
340 | 62.6k | if (GrGpuResource* old = fUniqueHash.find(newKey)) { |
341 | | // If the old resource using the key is purgeable and is unreachable, then remove it. |
342 | 0 | if (!old->resourcePriv().getScratchKey().isValid() && |
343 | 0 | old->resourcePriv().isPurgeable()) { |
344 | 0 | old->cacheAccess().release(); |
345 | 0 | } else { |
346 | | // removeUniqueKey expects an external owner of the resource. |
347 | 0 | this->removeUniqueKey(sk_ref_sp(old).get()); |
348 | 0 | } |
349 | 0 | } |
350 | 62.6k | SkASSERT(nullptr == fUniqueHash.find(newKey)); |
351 | | |
352 | | // Remove the entry for this resource if it already has a unique key. |
353 | 62.6k | if (resource->getUniqueKey().isValid()) { |
354 | 0 | SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey())); |
355 | 0 | fUniqueHash.remove(resource->getUniqueKey()); |
356 | 0 | SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey())); |
357 | 62.6k | } else { |
358 | | // 'resource' didn't have a valid unique key before so it is switching sides. Remove it |
359 | | // from the ScratchMap. The isUsableAsScratch call depends on us not adding the new |
360 | | // unique key until after this check. |
361 | 62.6k | if (resource->cacheAccess().isUsableAsScratch()) { |
362 | 0 | fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource); |
363 | 0 | } |
364 | 62.6k | } |
365 | | |
366 | 62.6k | resource->cacheAccess().setUniqueKey(newKey); |
367 | 62.6k | fUniqueHash.add(resource); |
368 | 0 | } else { |
369 | 0 | this->removeUniqueKey(resource); |
370 | 0 | } |
371 | | |
372 | 62.6k | this->validate(); |
373 | 62.6k | } |
374 | | |
375 | 103k | void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) { |
376 | 103k | ASSERT_SINGLE_OWNER |
377 | 103k | SkASSERT(resource); |
378 | 103k | SkASSERT(this->isInCache(resource)); |
379 | | |
380 | 103k | if (resource->resourcePriv().isPurgeable()) { |
381 | | // It's about to become unpurgeable. |
382 | 98.6k | fPurgeableBytes -= resource->gpuMemorySize(); |
383 | 98.6k | fPurgeableQueue.remove(resource); |
384 | 98.6k | this->addToNonpurgeableArray(resource); |
385 | 5.26k | } else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() && |
386 | 0 | resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) { |
387 | 0 | SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0); |
388 | 0 | fNumBudgetedResourcesFlushWillMakePurgeable--; |
389 | 0 | } |
390 | 103k | resource->cacheAccess().ref(); |
391 | | |
392 | 103k | resource->cacheAccess().setTimestamp(this->getNextTimestamp()); |
393 | 103k | this->validate(); |
394 | 103k | } |
395 | | |
396 | | void GrResourceCache::notifyARefCntReachedZero(GrGpuResource* resource, |
397 | 155k | GrGpuResource::LastRemovedRef removedRef) { |
398 | 155k | ASSERT_SINGLE_OWNER |
399 | 155k | SkASSERT(resource); |
400 | 155k | SkASSERT(!resource->wasDestroyed()); |
401 | 155k | SkASSERT(this->isInCache(resource)); |
402 | | // This resource should always be in the nonpurgeable array when this function is called. It |
403 | | // will be moved to the queue if it is newly purgeable. |
404 | 155k | SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource); |
405 | | |
406 | 155k | if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) { |
407 | 155k | if (resource->cacheAccess().isUsableAsScratch()) { |
408 | 83.8k | fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource); |
409 | 83.8k | } |
410 | 155k | } |
411 | | |
412 | 155k | if (resource->cacheAccess().hasRefOrCommandBufferUsage()) { |
413 | 0 | this->validate(); |
414 | 0 | return; |
415 | 0 | } |
416 | | |
417 | | #ifdef SK_DEBUG |
418 | | // When the timestamp overflows validate() is called. validate() checks that resources in |
419 | | // the nonpurgeable array are indeed not purgeable. However, the movement from the array to |
420 | | // the purgeable queue happens just below in this function. So we mark it as an exception. |
421 | | if (resource->resourcePriv().isPurgeable()) { |
422 | | fNewlyPurgeableResourceForValidation = resource; |
423 | | } |
424 | | #endif |
425 | 155k | resource->cacheAccess().setTimestamp(this->getNextTimestamp()); |
426 | 155k | SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr); |
427 | | |
428 | 155k | if (!resource->resourcePriv().isPurgeable() && |
429 | 0 | resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) { |
430 | 0 | ++fNumBudgetedResourcesFlushWillMakePurgeable; |
431 | 0 | } |
432 | | |
433 | 155k | if (!resource->resourcePriv().isPurgeable()) { |
434 | 0 | this->validate(); |
435 | 0 | return; |
436 | 0 | } |
437 | | |
438 | 155k | this->removeFromNonpurgeableArray(resource); |
439 | 155k | fPurgeableQueue.insert(resource); |
440 | 155k | resource->cacheAccess().setTimeWhenResourceBecomePurgeable(); |
441 | 155k | fPurgeableBytes += resource->gpuMemorySize(); |
442 | | |
443 | 155k | bool hasUniqueKey = resource->getUniqueKey().isValid(); |
444 | | |
445 | 155k | GrBudgetedType budgetedType = resource->resourcePriv().budgetedType(); |
446 | | |
447 | 155k | if (budgetedType == GrBudgetedType::kBudgeted) { |
448 | | // Purge the resource immediately if we're over budget |
449 | | // Also purge if the resource has neither a valid scratch key nor a unique key. |
450 | 154k | bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey; |
451 | 154k | if (!this->overBudget() && hasKey) { |
452 | 153k | return; |
453 | 153k | } |
454 | 1.32k | } else { |
455 | | // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so |
456 | | // they can be reused again by the image connected to the unique key. |
457 | 1.32k | if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) { |
458 | 0 | return; |
459 | 0 | } |
460 | | // Check whether this resource could still be used as a scratch resource. |
461 | 1.32k | if (!resource->resourcePriv().refsWrappedObjects() && |
462 | 1.32k | resource->resourcePriv().getScratchKey().isValid()) { |
463 | | // We won't purge an existing resource to make room for this one. |
464 | 1.32k | if (this->wouldFit(resource->gpuMemorySize())) { |
465 | 1.32k | resource->resourcePriv().makeBudgeted(); |
466 | 1.32k | return; |
467 | 1.32k | } |
468 | 1.26k | } |
469 | 1.32k | } |
470 | | |
471 | 1.26k | SkDEBUGCODE(int beforeCount = this->getResourceCount();) |
472 | 1.26k | resource->cacheAccess().release(); |
473 | | // We should at least free this resource, perhaps dependent resources as well. |
474 | 1.26k | SkASSERT(this->getResourceCount() < beforeCount); |
475 | 1.26k | this->validate(); |
476 | 1.26k | } |
477 | | |
478 | 1.32k | void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) { |
479 | 1.32k | ASSERT_SINGLE_OWNER |
480 | 1.32k | SkASSERT(resource); |
481 | 1.32k | SkASSERT(this->isInCache(resource)); |
482 | | |
483 | 1.32k | size_t size = resource->gpuMemorySize(); |
484 | | // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make |
485 | | // resource become purgeable. However, we should never allow that transition. Wrapped |
486 | | // resources are the only resources that can be in that state and they aren't allowed to |
487 | | // transition from one budgeted state to another. |
488 | 1.32k | SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable()); |
489 | 1.32k | if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) { |
490 | 1.32k | ++fBudgetedCount; |
491 | 1.32k | fBudgetedBytes += size; |
492 | | #if GR_CACHE_STATS |
493 | | fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes); |
494 | | fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount); |
495 | | #endif |
496 | 1.32k | if (!resource->resourcePriv().isPurgeable() && |
497 | 0 | !resource->cacheAccess().hasRefOrCommandBufferUsage()) { |
498 | 0 | ++fNumBudgetedResourcesFlushWillMakePurgeable; |
499 | 0 | } |
500 | 1.32k | if (resource->cacheAccess().isUsableAsScratch()) { |
501 | 1.32k | fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource); |
502 | 1.32k | } |
503 | 1.32k | this->purgeAsNeeded(); |
504 | 0 | } else { |
505 | 0 | SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable); |
506 | 0 | --fBudgetedCount; |
507 | 0 | fBudgetedBytes -= size; |
508 | 0 | if (!resource->resourcePriv().isPurgeable() && |
509 | 0 | !resource->cacheAccess().hasRefOrCommandBufferUsage()) { |
510 | 0 | --fNumBudgetedResourcesFlushWillMakePurgeable; |
511 | 0 | } |
512 | 0 | if (!resource->cacheAccess().hasRef() && !resource->getUniqueKey().isValid() && |
513 | 0 | resource->resourcePriv().getScratchKey().isValid()) { |
514 | 0 | fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource); |
515 | 0 | } |
516 | 0 | } |
517 | 1.32k | SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable()); |
518 | 1.32k | TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used", |
519 | 1.32k | fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes); |
520 | | |
521 | 1.32k | this->validate(); |
522 | 1.32k | } |
523 | | |
524 | 82.3k | void GrResourceCache::purgeAsNeeded() { |
525 | 82.3k | SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs; |
526 | 82.3k | fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs); |
527 | 82.3k | if (invalidKeyMsgs.count()) { |
528 | 6.17k | SkASSERT(fProxyProvider); |
529 | | |
530 | 65.2k | for (int i = 0; i < invalidKeyMsgs.count(); ++i) { |
531 | 59.0k | if (invalidKeyMsgs[i].inThreadSafeCache()) { |
532 | 2 | fThreadSafeCache->remove(invalidKeyMsgs[i].key()); |
533 | 2 | SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[i].key())); |
534 | 59.0k | } else { |
535 | 59.0k | fProxyProvider->processInvalidUniqueKey( |
536 | 59.0k | invalidKeyMsgs[i].key(), nullptr, |
537 | 59.0k | GrProxyProvider::InvalidateGPUResource::kYes); |
538 | 59.0k | SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key())); |
539 | 59.0k | } |
540 | 59.0k | } |
541 | 6.17k | } |
542 | | |
543 | 82.3k | this->processFreedGpuResources(); |
544 | | |
545 | 82.3k | bool stillOverbudget = this->overBudget(); |
546 | 82.3k | while (stillOverbudget && fPurgeableQueue.count()) { |
547 | 0 | GrGpuResource* resource = fPurgeableQueue.peek(); |
548 | 0 | SkASSERT(resource->resourcePriv().isPurgeable()); |
549 | 0 | resource->cacheAccess().release(); |
550 | 0 | stillOverbudget = this->overBudget(); |
551 | 0 | } |
552 | | |
553 | 82.3k | if (stillOverbudget) { |
554 | 609 | fThreadSafeCache->dropUniqueRefs(this); |
555 | | |
556 | 609 | stillOverbudget = this->overBudget(); |
557 | 609 | while (stillOverbudget && fPurgeableQueue.count()) { |
558 | 0 | GrGpuResource* resource = fPurgeableQueue.peek(); |
559 | 0 | SkASSERT(resource->resourcePriv().isPurgeable()); |
560 | 0 | resource->cacheAccess().release(); |
561 | 0 | stillOverbudget = this->overBudget(); |
562 | 0 | } |
563 | 609 | } |
564 | | |
565 | 82.3k | this->validate(); |
566 | 82.3k | } |
567 | | |
568 | | void GrResourceCache::purgeUnlockedResources(const GrStdSteadyClock::time_point* purgeTime, |
569 | 0 | bool scratchResourcesOnly) { |
570 | |
|
571 | 0 | if (!scratchResourcesOnly) { |
572 | 0 | if (purgeTime) { |
573 | 0 | fThreadSafeCache->dropUniqueRefsOlderThan(*purgeTime); |
574 | 0 | } else { |
575 | 0 | fThreadSafeCache->dropUniqueRefs(nullptr); |
576 | 0 | } |
577 | | |
578 | | // We could disable maintaining the heap property here, but it would add a lot of |
579 | | // complexity. Moreover, this is rarely called. |
580 | 0 | while (fPurgeableQueue.count()) { |
581 | 0 | GrGpuResource* resource = fPurgeableQueue.peek(); |
582 | |
|
583 | 0 | const GrStdSteadyClock::time_point resourceTime = |
584 | 0 | resource->cacheAccess().timeWhenResourceBecamePurgeable(); |
585 | 0 | if (purgeTime && resourceTime >= *purgeTime) { |
586 | | // Resources were given both LRU timestamps and tagged with a frame number when |
587 | | // they first became purgeable. The LRU timestamp won't change again until the |
588 | | // resource is made non-purgeable again. So, at this point all the remaining |
589 | | // resources in the timestamp-sorted queue will have a frame number >= to this |
590 | | // one. |
591 | 0 | break; |
592 | 0 | } |
593 | | |
594 | 0 | SkASSERT(resource->resourcePriv().isPurgeable()); |
595 | 0 | resource->cacheAccess().release(); |
596 | 0 | } |
597 | 0 | } else { |
598 | | // Early out if the very first item is too new to purge to avoid sorting the queue when |
599 | | // nothing will be deleted. |
600 | 0 | if (purgeTime && fPurgeableQueue.count() && |
601 | 0 | fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) { |
602 | 0 | return; |
603 | 0 | } |
604 | | |
605 | | // Sort the queue |
606 | 0 | fPurgeableQueue.sort(); |
607 | | |
608 | | // Make a list of the scratch resources to delete |
609 | 0 | SkTDArray<GrGpuResource*> scratchResources; |
610 | 0 | for (int i = 0; i < fPurgeableQueue.count(); i++) { |
611 | 0 | GrGpuResource* resource = fPurgeableQueue.at(i); |
612 | |
|
613 | 0 | const GrStdSteadyClock::time_point resourceTime = |
614 | 0 | resource->cacheAccess().timeWhenResourceBecamePurgeable(); |
615 | 0 | if (purgeTime && resourceTime >= *purgeTime) { |
616 | | // scratch or not, all later iterations will be too recently used to purge. |
617 | 0 | break; |
618 | 0 | } |
619 | 0 | SkASSERT(resource->resourcePriv().isPurgeable()); |
620 | 0 | if (!resource->getUniqueKey().isValid()) { |
621 | 0 | *scratchResources.append() = resource; |
622 | 0 | } |
623 | 0 | } |
624 | | |
625 | | // Delete the scratch resources. This must be done as a separate pass |
626 | | // to avoid messing up the sorted order of the queue |
627 | 0 | for (int i = 0; i < scratchResources.count(); i++) { |
628 | 0 | scratchResources.getAt(i)->cacheAccess().release(); |
629 | 0 | } |
630 | 0 | } |
631 | |
|
632 | 0 | this->validate(); |
633 | 0 | } Unexecuted instantiation: GrResourceCache::purgeUnlockedResources(std::__1::chrono::time_point<std::__1::chrono::steady_clock, std::__1::chrono::duration<long long, std::__1::ratio<1l, 1000000000l> > > const*, bool) Unexecuted instantiation: GrResourceCache::purgeUnlockedResources(std::__1::chrono::time_point<std::__1::chrono::steady_clock, std::__1::chrono::duration<long long, std::__1::ratio<1l, 1000000000l> > > const*, bool) |
634 | | |
635 | 656 | bool GrResourceCache::purgeToMakeHeadroom(size_t desiredHeadroomBytes) { |
636 | 656 | AutoValidate av(this); |
637 | 656 | if (desiredHeadroomBytes > fMaxBytes) { |
638 | 22 | return false; |
639 | 22 | } |
640 | 634 | if (this->wouldFit(desiredHeadroomBytes)) { |
641 | 634 | return true; |
642 | 634 | } |
643 | 0 | fPurgeableQueue.sort(); |
644 | |
|
645 | 0 | size_t projectedBudget = fBudgetedBytes; |
646 | 0 | int purgeCnt = 0; |
647 | 0 | for (int i = 0; i < fPurgeableQueue.count(); i++) { |
648 | 0 | GrGpuResource* resource = fPurgeableQueue.at(i); |
649 | 0 | if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) { |
650 | 0 | projectedBudget -= resource->gpuMemorySize(); |
651 | 0 | } |
652 | 0 | if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) { |
653 | 0 | purgeCnt = i + 1; |
654 | 0 | break; |
655 | 0 | } |
656 | 0 | } |
657 | 0 | if (purgeCnt == 0) { |
658 | 0 | return false; |
659 | 0 | } |
660 | | |
661 | | // Success! Release the resources. |
662 | | // Copy to array first so we don't mess with the queue. |
663 | 0 | std::vector<GrGpuResource*> resources; |
664 | 0 | resources.reserve(purgeCnt); |
665 | 0 | for (int i = 0; i < purgeCnt; i++) { |
666 | 0 | resources.push_back(fPurgeableQueue.at(i)); |
667 | 0 | } |
668 | 0 | for (GrGpuResource* resource : resources) { |
669 | 0 | resource->cacheAccess().release(); |
670 | 0 | } |
671 | 0 | return true; |
672 | 0 | } |
673 | | |
674 | 0 | void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) { |
675 | |
|
676 | 0 | const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge); |
677 | 0 | bool stillOverbudget = tmpByteBudget < fBytes; |
678 | |
|
679 | 0 | if (preferScratchResources && bytesToPurge < fPurgeableBytes) { |
680 | | // Sort the queue |
681 | 0 | fPurgeableQueue.sort(); |
682 | | |
683 | | // Make a list of the scratch resources to delete |
684 | 0 | SkTDArray<GrGpuResource*> scratchResources; |
685 | 0 | size_t scratchByteCount = 0; |
686 | 0 | for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) { |
687 | 0 | GrGpuResource* resource = fPurgeableQueue.at(i); |
688 | 0 | SkASSERT(resource->resourcePriv().isPurgeable()); |
689 | 0 | if (!resource->getUniqueKey().isValid()) { |
690 | 0 | *scratchResources.append() = resource; |
691 | 0 | scratchByteCount += resource->gpuMemorySize(); |
692 | 0 | stillOverbudget = tmpByteBudget < fBytes - scratchByteCount; |
693 | 0 | } |
694 | 0 | } |
695 | | |
696 | | // Delete the scratch resources. This must be done as a separate pass |
697 | | // to avoid messing up the sorted order of the queue |
698 | 0 | for (int i = 0; i < scratchResources.count(); i++) { |
699 | 0 | scratchResources.getAt(i)->cacheAccess().release(); |
700 | 0 | } |
701 | 0 | stillOverbudget = tmpByteBudget < fBytes; |
702 | |
|
703 | 0 | this->validate(); |
704 | 0 | } |
705 | | |
706 | | // Purge any remaining resources in LRU order |
707 | 0 | if (stillOverbudget) { |
708 | 0 | const size_t cachedByteCount = fMaxBytes; |
709 | 0 | fMaxBytes = tmpByteBudget; |
710 | 0 | this->purgeAsNeeded(); |
711 | 0 | fMaxBytes = cachedByteCount; |
712 | 0 | } |
713 | 0 | } Unexecuted instantiation: GrResourceCache::purgeUnlockedResources(unsigned long, bool) Unexecuted instantiation: GrResourceCache::purgeUnlockedResources(unsigned long, bool) |
714 | | |
715 | 182k | bool GrResourceCache::requestsFlush() const { |
716 | 182k | return this->overBudget() && !fPurgeableQueue.count() && |
717 | 3.40k | fNumBudgetedResourcesFlushWillMakePurgeable > 0; |
718 | 182k | } |
719 | | |
720 | 0 | void GrResourceCache::insertDelayedTextureUnref(GrTexture* texture) { |
721 | 0 | texture->ref(); |
722 | 0 | uint32_t id = texture->uniqueID().asUInt(); |
723 | 0 | if (auto* data = fTexturesAwaitingUnref.find(id)) { |
724 | 0 | data->addRef(); |
725 | 0 | } else { |
726 | 0 | fTexturesAwaitingUnref.set(id, {texture}); |
727 | 0 | } |
728 | 0 | } |
729 | | |
730 | 84.9k | void GrResourceCache::processFreedGpuResources() { |
731 | 84.9k | if (!fTexturesAwaitingUnref.count()) { |
732 | 84.9k | return; |
733 | 84.9k | } |
734 | | |
735 | 0 | SkTArray<GrTextureFreedMessage> msgs; |
736 | 0 | fFreedTextureInbox.poll(&msgs); |
737 | 0 | for (int i = 0; i < msgs.count(); ++i) { |
738 | 0 | SkASSERT(msgs[i].fIntendedRecipient == fOwningContextID); |
739 | 0 | uint32_t id = msgs[i].fTexture->uniqueID().asUInt(); |
740 | 0 | TextureAwaitingUnref* info = fTexturesAwaitingUnref.find(id); |
741 | | // If the GrContext was released or abandoned then fTexturesAwaitingUnref should have been |
742 | | // empty and we would have returned early above. Thus, any texture from a message should be |
743 | | // in the list of fTexturesAwaitingUnref. |
744 | 0 | SkASSERT(info); |
745 | 0 | info->unref(); |
746 | 0 | if (info->finished()) { |
747 | 0 | fTexturesAwaitingUnref.remove(id); |
748 | 0 | } |
749 | 0 | } |
750 | 0 | } |
751 | | |
752 | 156k | void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) { |
753 | 156k | int index = fNonpurgeableResources.count(); |
754 | 156k | *fNonpurgeableResources.append() = resource; |
755 | 156k | *resource->cacheAccess().accessCacheIndex() = index; |
756 | 156k | } |
757 | | |
758 | 156k | void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) { |
759 | 156k | int* index = resource->cacheAccess().accessCacheIndex(); |
760 | | // Fill the hole we will create in the array with the tail object, adjust its index, and |
761 | | // then pop the array |
762 | 156k | GrGpuResource* tail = *(fNonpurgeableResources.end() - 1); |
763 | 156k | SkASSERT(fNonpurgeableResources[*index] == resource); |
764 | 156k | fNonpurgeableResources[*index] = tail; |
765 | 156k | *tail->cacheAccess().accessCacheIndex() = *index; |
766 | 156k | fNonpurgeableResources.pop(); |
767 | 156k | SkDEBUGCODE(*index = -1); |
768 | 156k | } |
769 | | |
770 | 317k | uint32_t GrResourceCache::getNextTimestamp() { |
771 | | // If we wrap then all the existing resources will appear older than any resources that get |
772 | | // a timestamp after the wrap. |
773 | 317k | if (0 == fTimestamp) { |
774 | 1.32k | int count = this->getResourceCount(); |
775 | 1.32k | if (count) { |
776 | | // Reset all the timestamps. We sort the resources by timestamp and then assign |
777 | | // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely |
778 | | // rare. |
779 | 0 | SkTDArray<GrGpuResource*> sortedPurgeableResources; |
780 | 0 | sortedPurgeableResources.setReserve(fPurgeableQueue.count()); |
781 | |
|
782 | 0 | while (fPurgeableQueue.count()) { |
783 | 0 | *sortedPurgeableResources.append() = fPurgeableQueue.peek(); |
784 | 0 | fPurgeableQueue.pop(); |
785 | 0 | } |
786 | |
|
787 | 0 | SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(), |
788 | 0 | CompareTimestamp); |
789 | | |
790 | | // Pick resources out of the purgeable and non-purgeable arrays based on lowest |
791 | | // timestamp and assign new timestamps. |
792 | 0 | int currP = 0; |
793 | 0 | int currNP = 0; |
794 | 0 | while (currP < sortedPurgeableResources.count() && |
795 | 0 | currNP < fNonpurgeableResources.count()) { |
796 | 0 | uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp(); |
797 | 0 | uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp(); |
798 | 0 | SkASSERT(tsP != tsNP); |
799 | 0 | if (tsP < tsNP) { |
800 | 0 | sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++); |
801 | 0 | } else { |
802 | | // Correct the index in the nonpurgeable array stored on the resource post-sort. |
803 | 0 | *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP; |
804 | 0 | fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++); |
805 | 0 | } |
806 | 0 | } |
807 | | |
808 | | // The above loop ended when we hit the end of one array. Finish the other one. |
809 | 0 | while (currP < sortedPurgeableResources.count()) { |
810 | 0 | sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++); |
811 | 0 | } |
812 | 0 | while (currNP < fNonpurgeableResources.count()) { |
813 | 0 | *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP; |
814 | 0 | fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++); |
815 | 0 | } |
816 | | |
817 | | // Rebuild the queue. |
818 | 0 | for (int i = 0; i < sortedPurgeableResources.count(); ++i) { |
819 | 0 | fPurgeableQueue.insert(sortedPurgeableResources[i]); |
820 | 0 | } |
821 | |
|
822 | 0 | this->validate(); |
823 | 0 | SkASSERT(count == this->getResourceCount()); |
824 | | |
825 | | // count should be the next timestamp we return. |
826 | 0 | SkASSERT(fTimestamp == SkToU32(count)); |
827 | 0 | } |
828 | 1.32k | } |
829 | 317k | return fTimestamp++; |
830 | 317k | } |
831 | | |
832 | 0 | void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const { |
833 | 0 | for (int i = 0; i < fNonpurgeableResources.count(); ++i) { |
834 | 0 | fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump); |
835 | 0 | } |
836 | 0 | for (int i = 0; i < fPurgeableQueue.count(); ++i) { |
837 | 0 | fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump); |
838 | 0 | } |
839 | 0 | } |
840 | | |
841 | | #if GR_CACHE_STATS |
842 | 0 | void GrResourceCache::getStats(Stats* stats) const { |
843 | 0 | stats->reset(); |
844 | |
|
845 | 0 | stats->fTotal = this->getResourceCount(); |
846 | 0 | stats->fNumNonPurgeable = fNonpurgeableResources.count(); |
847 | 0 | stats->fNumPurgeable = fPurgeableQueue.count(); |
848 | |
|
849 | 0 | for (int i = 0; i < fNonpurgeableResources.count(); ++i) { |
850 | 0 | stats->update(fNonpurgeableResources[i]); |
851 | 0 | } |
852 | 0 | for (int i = 0; i < fPurgeableQueue.count(); ++i) { |
853 | 0 | stats->update(fPurgeableQueue.at(i)); |
854 | 0 | } |
855 | 0 | } |
856 | | |
857 | | #if GR_TEST_UTILS |
858 | 0 | void GrResourceCache::dumpStats(SkString* out) const { |
859 | 0 | this->validate(); |
860 | |
|
861 | 0 | Stats stats; |
862 | |
|
863 | 0 | this->getStats(&stats); |
864 | |
|
865 | 0 | float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes; |
866 | |
|
867 | 0 | out->appendf("Budget: %d bytes\n", (int)fMaxBytes); |
868 | 0 | out->appendf("\t\tEntry Count: current %d" |
869 | 0 | " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n", |
870 | 0 | stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable, |
871 | 0 | stats.fScratch, fHighWaterCount); |
872 | 0 | out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n", |
873 | 0 | SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization, |
874 | 0 | SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes)); |
875 | 0 | } |
876 | | |
877 | | void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys, |
878 | 0 | SkTArray<double>* values) const { |
879 | 0 | this->validate(); |
880 | |
|
881 | 0 | Stats stats; |
882 | 0 | this->getStats(&stats); |
883 | |
|
884 | 0 | keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable); |
885 | 0 | } |
886 | | #endif // GR_TEST_UTILS |
887 | | #endif // GR_CACHE_STATS |
888 | | |
889 | | #ifdef SK_DEBUG |
890 | | void GrResourceCache::validate() const { |
891 | | // Reduce the frequency of validations for large resource counts. |
892 | | static SkRandom gRandom; |
893 | | int mask = (SkNextPow2(fCount + 1) >> 5) - 1; |
894 | | if (~mask && (gRandom.nextU() & mask)) { |
895 | | return; |
896 | | } |
897 | | |
898 | | struct Stats { |
899 | | size_t fBytes; |
900 | | int fBudgetedCount; |
901 | | size_t fBudgetedBytes; |
902 | | int fLocked; |
903 | | int fScratch; |
904 | | int fCouldBeScratch; |
905 | | int fContent; |
906 | | const ScratchMap* fScratchMap; |
907 | | const UniqueHash* fUniqueHash; |
908 | | |
909 | 0 | Stats(const GrResourceCache* cache) { |
910 | 0 | memset(this, 0, sizeof(*this)); |
911 | 0 | fScratchMap = &cache->fScratchMap; |
912 | 0 | fUniqueHash = &cache->fUniqueHash; |
913 | 0 | } |
914 | | |
915 | 0 | void update(GrGpuResource* resource) { |
916 | 0 | fBytes += resource->gpuMemorySize(); |
917 | |
|
918 | 0 | if (!resource->resourcePriv().isPurgeable()) { |
919 | 0 | ++fLocked; |
920 | 0 | } |
921 | |
|
922 | 0 | const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey(); |
923 | 0 | const GrUniqueKey& uniqueKey = resource->getUniqueKey(); |
924 | |
|
925 | 0 | if (resource->cacheAccess().isUsableAsScratch()) { |
926 | 0 | SkASSERT(!uniqueKey.isValid()); |
927 | 0 | SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()); |
928 | 0 | SkASSERT(!resource->cacheAccess().hasRef()); |
929 | 0 | ++fScratch; |
930 | 0 | SkASSERT(fScratchMap->countForKey(scratchKey)); |
931 | 0 | SkASSERT(!resource->resourcePriv().refsWrappedObjects()); |
932 | 0 | } else if (scratchKey.isValid()) { |
933 | 0 | SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() || |
934 | 0 | uniqueKey.isValid() || resource->cacheAccess().hasRef()); |
935 | 0 | SkASSERT(!resource->resourcePriv().refsWrappedObjects()); |
936 | 0 | SkASSERT(!fScratchMap->has(resource, scratchKey)); |
937 | 0 | } |
938 | 0 | if (uniqueKey.isValid()) { |
939 | 0 | ++fContent; |
940 | 0 | SkASSERT(fUniqueHash->find(uniqueKey) == resource); |
941 | 0 | SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() || |
942 | 0 | resource->resourcePriv().refsWrappedObjects()); |
943 | 0 | } |
944 | |
|
945 | 0 | if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) { |
946 | 0 | ++fBudgetedCount; |
947 | 0 | fBudgetedBytes += resource->gpuMemorySize(); |
948 | 0 | } |
949 | 0 | } |
950 | | }; |
951 | | |
952 | | { |
953 | | int count = 0; |
954 | 0 | fScratchMap.foreach([&](const GrGpuResource& resource) { |
955 | 0 | SkASSERT(resource.cacheAccess().isUsableAsScratch()); |
956 | 0 | count++; |
957 | 0 | }); |
958 | | SkASSERT(count == fScratchMap.count()); |
959 | | } |
960 | | |
961 | | Stats stats(this); |
962 | | size_t purgeableBytes = 0; |
963 | | int numBudgetedResourcesFlushWillMakePurgeable = 0; |
964 | | |
965 | | for (int i = 0; i < fNonpurgeableResources.count(); ++i) { |
966 | | SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() || |
967 | | fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]); |
968 | | SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i); |
969 | | SkASSERT(!fNonpurgeableResources[i]->wasDestroyed()); |
970 | | if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted && |
971 | | !fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() && |
972 | | fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) { |
973 | | ++numBudgetedResourcesFlushWillMakePurgeable; |
974 | | } |
975 | | stats.update(fNonpurgeableResources[i]); |
976 | | } |
977 | | for (int i = 0; i < fPurgeableQueue.count(); ++i) { |
978 | | SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable()); |
979 | | SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i); |
980 | | SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed()); |
981 | | stats.update(fPurgeableQueue.at(i)); |
982 | | purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize(); |
983 | | } |
984 | | |
985 | | SkASSERT(fCount == this->getResourceCount()); |
986 | | SkASSERT(fBudgetedCount <= fCount); |
987 | | SkASSERT(fBudgetedBytes <= fBytes); |
988 | | SkASSERT(stats.fBytes == fBytes); |
989 | | SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable == |
990 | | numBudgetedResourcesFlushWillMakePurgeable); |
991 | | SkASSERT(stats.fBudgetedBytes == fBudgetedBytes); |
992 | | SkASSERT(stats.fBudgetedCount == fBudgetedCount); |
993 | | SkASSERT(purgeableBytes == fPurgeableBytes); |
994 | | #if GR_CACHE_STATS |
995 | | SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount); |
996 | | SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes); |
997 | | SkASSERT(fBytes <= fHighWaterBytes); |
998 | | SkASSERT(fCount <= fHighWaterCount); |
999 | | SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes); |
1000 | | SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount); |
1001 | | #endif |
1002 | | SkASSERT(stats.fContent == fUniqueHash.count()); |
1003 | | SkASSERT(stats.fScratch == fScratchMap.count()); |
1004 | | |
1005 | | // This assertion is not currently valid because we can be in recursive notifyCntReachedZero() |
1006 | | // calls. This will be fixed when subresource registration is explicit. |
1007 | | // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount; |
1008 | | // SkASSERT(!overBudget || locked == count || fPurging); |
1009 | | } |
1010 | | |
1011 | 0 | bool GrResourceCache::isInCache(const GrGpuResource* resource) const { |
1012 | 0 | int index = *resource->cacheAccess().accessCacheIndex(); |
1013 | 0 | if (index < 0) { |
1014 | 0 | return false; |
1015 | 0 | } |
1016 | 0 | if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) { |
1017 | 0 | return true; |
1018 | 0 | } |
1019 | 0 | if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) { |
1020 | 0 | return true; |
1021 | 0 | } |
1022 | 0 | SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache."); |
1023 | 0 | return false; |
1024 | 0 | } |
1025 | | |
1026 | | #endif // SK_DEBUG |
1027 | | |
1028 | | #if GR_TEST_UTILS |
1029 | | |
1030 | 0 | int GrResourceCache::countUniqueKeysWithTag(const char* tag) const { |
1031 | 0 | int count = 0; |
1032 | 0 | fUniqueHash.foreach([&](const GrGpuResource& resource){ |
1033 | 0 | if (0 == strcmp(tag, resource.getUniqueKey().tag())) { |
1034 | 0 | ++count; |
1035 | 0 | } |
1036 | 0 | }); Unexecuted instantiation: GrResourceCache.cpp:GrResourceCache::countUniqueKeysWithTag(char const*) const::$_2::operator()(GrGpuResource const&) const Unexecuted instantiation: GrResourceCache.cpp:GrResourceCache::countUniqueKeysWithTag(char const*) const::$_95::operator()(GrGpuResource const&) const |
1037 | 0 | return count; |
1038 | 0 | } |
1039 | | |
1040 | 0 | void GrResourceCache::changeTimestamp(uint32_t newTimestamp) { |
1041 | 0 | fTimestamp = newTimestamp; |
1042 | 0 | } |
1043 | | |
1044 | | #endif // GR_TEST_UTILS |