Coverage Report

Created: 2024-09-14 07:19

/src/skia/src/gpu/ganesh/GrGpuResource.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright 2014 Google Inc.
3
 *
4
 * Use of this source code is governed by a BSD-style license that can be
5
 * found in the LICENSE file.
6
 */
7
8
#ifndef GrGpuResource_DEFINED
9
#define GrGpuResource_DEFINED
10
11
#include "include/core/SkString.h"
12
#include "include/core/SkTypes.h"
13
#include "include/private/base/SkNoncopyable.h"
14
#include "include/private/base/SkTo.h"
15
#include "include/private/gpu/ganesh/GrTypesPriv.h"
16
#include "src/gpu/GpuTypesPriv.h"
17
#include "src/gpu/ResourceKey.h"
18
19
#include <atomic>
20
#include <cstddef>
21
#include <cstdint>
22
#include <string>
23
#include <string_view>
24
25
class GrDirectContext;
26
class GrGpu;
27
class GrResourceCache;
28
class GrSurface;
29
class SkTraceMemoryDump;
30
31
namespace skgpu {
32
enum class Budgeted : bool;
33
}
34
35
/**
36
 * Base class for GrGpuResource. Provides the hooks for resources to interact with the cache.
37
 * Separated out as a base class to isolate the ref-cnting behavior and provide friendship without
38
 * exposing all of GrGpuResource.
39
 *
40
 * PRIOR to the last ref being removed DERIVED::notifyARefCntWillBeZero() will be called
41
 * (static poly morphism using CRTP). It is legal for additional ref's to be added
42
 * during this time. AFTER the ref count reaches zero DERIVED::notifyARefCntIsZero() will be
43
 * called.
44
 */
45
template <typename DERIVED> class GrIORef : public SkNoncopyable {
46
public:
47
0
    bool unique() const { return fRefCnt == 1; }
48
49
789k
    void ref() const {
50
        // Only the cache should be able to add the first ref to a resource.
51
789k
        SkASSERT(this->getRefCnt() > 0);
52
        // No barrier required.
53
789k
        (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
54
789k
    }
55
56
    // This enum is used to notify the GrResourceCache which type of ref just dropped to zero.
57
    enum class LastRemovedRef {
58
        kMainRef,            // This refers to fRefCnt
59
        kCommandBufferUsage, // This refers to fCommandBufferUsageCnt
60
    };
61
62
1.16M
    void unref() const {
63
1.16M
        SkASSERT(this->getRefCnt() > 0);
64
1.16M
        if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
65
361k
            this->notifyWillBeZero(LastRemovedRef::kMainRef);
66
361k
        }
67
1.16M
    }
68
69
0
    void refCommandBuffer() const {
70
        // No barrier required.
71
0
        (void)fCommandBufferUsageCnt.fetch_add(+1, std::memory_order_relaxed);
72
0
    }
73
74
0
    void unrefCommandBuffer() const {
75
0
        SkASSERT(!this->hasNoCommandBufferUsages());
76
0
        if (1 == fCommandBufferUsageCnt.fetch_add(-1, std::memory_order_acq_rel)) {
77
0
            this->notifyWillBeZero(LastRemovedRef::kCommandBufferUsage);
78
0
        }
79
0
    }
Unexecuted instantiation: GrIORef<GrGpuResource>::unrefCommandBuffer() const
Unexecuted instantiation: GrIORef<GrGpuResource>::unrefCommandBuffer() const
80
81
#if defined(GPU_TEST_UTILS)
82
0
    int32_t testingOnly_getRefCnt() const { return this->getRefCnt(); }
83
#endif
84
85
protected:
86
203k
    GrIORef() : fRefCnt(1), fCommandBufferUsageCnt(0) {}
87
88
2.30M
    bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
89
1.64M
    bool internalHasNoCommandBufferUsages() const {
90
1.64M
        return SkToBool(this->hasNoCommandBufferUsages());
91
1.64M
    }
92
93
    // Privileged method that allows going from ref count = 0 to ref count = 1.
94
167k
    void addInitialRef() const {
95
167k
        SkASSERT(fRefCnt >= 0);
96
        // No barrier required.
97
167k
        (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
98
167k
    }
99
100
private:
101
361k
    void notifyWillBeZero(LastRemovedRef removedRef) const {
102
361k
        static_cast<const DERIVED*>(this)->notifyARefCntIsZero(removedRef);
103
361k
    }
104
105
2.30M
    int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
106
107
1.64M
    bool hasNoCommandBufferUsages() const {
108
1.64M
        if (0 == fCommandBufferUsageCnt.load(std::memory_order_acquire)) {
109
            // The acquire barrier is only really needed if we return true.  It
110
            // prevents code conditioned on the result of hasNoCommandBufferUsages() from running
111
            // until previous owners are all totally done calling removeCommandBufferUsage().
112
1.64M
            return true;
113
1.64M
        }
114
0
        return false;
115
1.64M
    }
116
117
    mutable std::atomic<int32_t> fRefCnt;
118
    mutable std::atomic<int32_t> fCommandBufferUsageCnt;
119
120
    using INHERITED = SkNoncopyable;
121
};
122
123
/**
124
 * Base class for objects that can be kept in the GrResourceCache.
125
 */
126
class GrGpuResource : public GrIORef<GrGpuResource> {
127
public:
128
    /**
129
     * Tests whether a object has been abandoned or released. All objects will
130
     * be in this state after their creating GrContext is destroyed or has
131
     * contextLost called. It's up to the client to test wasDestroyed() before
132
     * attempting to use an object if it holds refs on objects across
133
     * ~GrContext, freeResources with the force flag, or contextLost.
134
     *
135
     * @return true if the object has been released or abandoned,
136
     *         false otherwise.
137
     */
138
1.16M
    bool wasDestroyed() const { return nullptr == fGpu; }
139
140
    /**
141
     * Retrieves the context that owns the object. Note that it is possible for
142
     * this to return NULL. When objects have been release()ed or abandon()ed
143
     * they no longer have an owning context. Destroying a GrDirectContext
144
     * automatically releases all its resources.
145
     */
146
    const GrDirectContext* getContext() const;
147
    GrDirectContext* getContext();
148
149
    /**
150
     * Retrieves the amount of GPU memory used by this resource in bytes. It is
151
     * approximate since we aren't aware of additional padding or copies made
152
     * by the driver.
153
     *
154
     * @return the amount of GPU memory used in bytes
155
     */
156
937k
    size_t gpuMemorySize() const {
157
937k
        if (kInvalidGpuMemorySize == fGpuMemorySize) {
158
203k
            fGpuMemorySize = this->onGpuMemorySize();
159
203k
            SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize);
160
203k
        }
161
937k
        return fGpuMemorySize;
162
937k
    }
163
164
    class UniqueID {
165
    public:
166
        UniqueID() = default;
167
168
203k
        explicit UniqueID(uint32_t id) : fID(id) {}
169
170
20.5k
        uint32_t asUInt() const { return fID; }
171
172
0
        bool operator==(const UniqueID& other) const { return fID == other.fID; }
173
0
        bool operator!=(const UniqueID& other) const { return !(*this == other); }
174
175
0
        void makeInvalid() { fID = SK_InvalidUniqueID; }
176
0
        bool isInvalid() const { return  fID == SK_InvalidUniqueID; }
177
178
    protected:
179
        uint32_t fID = SK_InvalidUniqueID;
180
    };
181
182
    /**
183
     * Gets an id that is unique for this GrGpuResource object. It is static in that it does
184
     * not change when the content of the GrGpuResource object changes. This will never return
185
     * 0.
186
     */
187
20.5k
    UniqueID uniqueID() const { return fUniqueID; }
188
189
    /** Returns the current unique key for the resource. It will be invalid if the resource has no
190
        associated unique key. */
191
2.26M
    const skgpu::UniqueKey& getUniqueKey() const { return fUniqueKey; }
192
193
20.5k
    std::string getLabel() const { return fLabel; }
194
195
44.2k
    void setLabel(std::string_view label) {
196
44.2k
        fLabel = label;
197
44.2k
        this->onSetLabel();
198
44.2k
    }
199
200
    /**
201
     * Internal-only helper class used for manipulations of the resource by the cache.
202
     */
203
    class CacheAccess;
204
    inline CacheAccess cacheAccess();
205
    inline const CacheAccess cacheAccess() const;  // NOLINT(readability-const-return-type)
206
207
    /**
208
     * Internal-only helper class used for manipulations of the resource by GrSurfaceProxy.
209
     */
210
    class ProxyAccess;
211
    inline ProxyAccess proxyAccess();
212
213
    /**
214
     * Internal-only helper class used for manipulations of the resource by internal code.
215
     */
216
    class ResourcePriv;
217
    inline ResourcePriv resourcePriv();
218
    inline const ResourcePriv resourcePriv() const;  // NOLINT(readability-const-return-type)
219
220
    /**
221
     * Dumps memory usage information for this GrGpuResource to traceMemoryDump.
222
     * Typically, subclasses should not need to override this, and should only
223
     * need to override setMemoryBacking.
224
     **/
225
    virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
226
227
    /**
228
     * Describes the type of gpu resource that is represented by the implementing
229
     * class (e.g. texture, buffer object, stencil).  This data is used for diagnostic
230
     * purposes by dumpMemoryStatistics().
231
     *
232
     * The value returned is expected to be long lived and will not be copied by the caller.
233
     */
234
    virtual const char* getResourceType() const = 0;
235
236
    static uint32_t CreateUniqueID();
237
238
#if defined(GPU_TEST_UTILS)
239
0
    virtual const GrSurface* asSurface() const { return nullptr; }
240
#endif
241
242
protected:
243
    // This must be called by every non-wrapped GrGpuObject. It should be called once the object is
244
    // fully initialized (i.e. only from the constructors of the final class).
245
    void registerWithCache(skgpu::Budgeted);
246
247
    // This must be called by every GrGpuObject that references any wrapped backend objects. It
248
    // should be called once the object is fully initialized (i.e. only from the constructors of the
249
    // final class).
250
    void registerWithCacheWrapped(GrWrapCacheable);
251
252
    GrGpuResource(GrGpu*, std::string_view label);
253
    virtual ~GrGpuResource();
254
255
191k
    GrGpu* getGpu() const { return fGpu; }
256
257
    /** Overridden to free GPU resources in the backend API. */
258
231k
    virtual void onRelease() { }
259
    /** Overridden to abandon any internal handles, ptrs, etc to backend API resources.
260
        This may be called when the underlying 3D context is no longer valid and so no
261
        backend API calls should be made. */
262
0
    virtual void onAbandon() { }
263
264
    /**
265
     * Allows subclasses to add additional backing information to the SkTraceMemoryDump.
266
     **/
267
0
    virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {}
268
269
    /**
270
     * Returns a string that uniquely identifies this resource.
271
     */
272
    SkString getResourceName() const;
273
274
    /**
275
     * A helper for subclasses that override dumpMemoryStatistics(). This method using a format
276
     * consistent with the default implementation of dumpMemoryStatistics() but allows the caller
277
     * to customize various inputs.
278
     */
279
    void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName,
280
                                  const char* type, size_t size) const;
281
282
283
private:
284
    bool isPurgeable() const;
285
    bool hasRef() const;
286
    bool hasNoCommandBufferUsages() const;
287
288
    /**
289
     * Called by the registerWithCache if the resource is available to be used as scratch.
290
     * Resource subclasses should override this if the instances should be recycled as scratch
291
     * resources and populate the scratchKey with the key.
292
     * By default resources are not recycled as scratch.
293
     **/
294
0
    virtual void computeScratchKey(skgpu::ScratchKey*) const {}
295
296
    /**
297
     * Removes references to objects in the underlying 3D API without freeing them.
298
     * Called by CacheAccess.
299
     */
300
    void abandon();
301
302
    /**
303
     * Frees the object in the underlying 3D API. Called by CacheAccess.
304
     */
305
    void release();
306
307
    virtual size_t onGpuMemorySize() const = 0;
308
309
    virtual void onSetLabel() = 0;
310
311
    // See comments in CacheAccess and ResourcePriv.
312
    void setUniqueKey(const skgpu::UniqueKey&);
313
    void removeUniqueKey();
314
    void notifyARefCntIsZero(LastRemovedRef removedRef) const;
315
    void removeScratchKey();
316
    void makeBudgeted();
317
    void makeUnbudgeted();
318
319
#ifdef SK_DEBUG
320
    friend class GrGpu;  // for assert in GrGpu to access getGpu
321
#endif
322
323
    // An index into a heap when this resource is purgeable or an array when not. This is maintained
324
    // by the cache.
325
    int fCacheArrayIndex;
326
    // This value reflects how recently this resource was accessed in the cache. This is maintained
327
    // by the cache.
328
    uint32_t fTimestamp;
329
    skgpu::StdSteadyClock::time_point fTimeWhenBecamePurgeable;
330
331
    static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
332
    skgpu::ScratchKey fScratchKey;
333
    skgpu::UniqueKey fUniqueKey;
334
335
    // This is not ref'ed but abandon() or release() will be called before the GrGpu object
336
    // is destroyed. Those calls will set this to NULL.
337
    GrGpu* fGpu;
338
    mutable size_t fGpuMemorySize = kInvalidGpuMemorySize;
339
340
    GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable;
341
    bool fRefsWrappedObjects = false;
342
    const UniqueID fUniqueID;
343
    std::string fLabel;
344
345
    using INHERITED = GrIORef<GrGpuResource>;
346
    friend class GrIORef<GrGpuResource>; // to access notifyRefCntWillBeZero and
347
                                         // notifyARefCntIsZero.
348
};
349
350
class GrGpuResource::ProxyAccess {
351
private:
352
0
    ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
353
354
    /** Proxies are allowed to take a resource from no refs to one ref. */
355
    void ref(GrResourceCache* cache);
356
357
    // No taking addresses of this type.
358
    const CacheAccess* operator&() const = delete;
359
    CacheAccess* operator&() = delete;
360
361
    GrGpuResource* fResource;
362
363
    friend class GrGpuResource;
364
    friend class GrSurfaceProxy;
365
};
366
367
0
inline GrGpuResource::ProxyAccess GrGpuResource::proxyAccess() { return ProxyAccess(this); }
368
369
#endif