Coverage Report

Created: 2024-05-20 07:14

/src/skia/src/gpu/ganesh/GrOpFlushState.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright 2015 Google Inc.
3
 *
4
 * Use of this source code is governed by a BSD-style license that can be
5
 * found in the LICENSE file.
6
 */
7
#ifndef GrOpFlushState_DEFINED
8
#define GrOpFlushState_DEFINED
9
10
#include "include/core/SkRefCnt.h"
11
#include "include/private/base/SkAssert.h"
12
#include "include/private/base/SkDebug.h"
13
#include "include/private/base/SkTArray.h"
14
#include "include/private/gpu/ganesh/GrTypesPriv.h"
15
#include "src/base/SkArenaAlloc.h"
16
#include "src/base/SkArenaAllocList.h"
17
#include "src/gpu/AtlasTypes.h"
18
#include "src/gpu/ganesh/GrAppliedClip.h"
19
#include "src/gpu/ganesh/GrBuffer.h"
20
#include "src/gpu/ganesh/GrBufferAllocPool.h"
21
#include "src/gpu/ganesh/GrDeferredUpload.h"
22
#include "src/gpu/ganesh/GrDrawIndirectCommand.h"
23
#include "src/gpu/ganesh/GrDstProxyView.h"
24
#include "src/gpu/ganesh/GrGeometryProcessor.h"
25
#include "src/gpu/ganesh/GrMeshDrawTarget.h"
26
#include "src/gpu/ganesh/GrOpsRenderPass.h"
27
#include "src/gpu/ganesh/GrPipeline.h"
28
#include "src/gpu/ganesh/GrProgramInfo.h"
29
#include "src/gpu/ganesh/GrScissorState.h"
30
#include "src/gpu/ganesh/GrSurfaceProxyView.h"
31
32
#include <cstddef>
33
#include <cstdint>
34
#include <utility>
35
36
class GrAtlasManager;
37
class GrCaps;
38
class GrGpu;
39
class GrOp;
40
class GrRenderTargetProxy;
41
class GrResourceProvider;
42
class GrSurfaceProxy;
43
class GrThreadSafeCache;
44
enum class GrXferBarrierFlags;
45
struct GrSimpleMesh;
46
struct GrUserStencilSettings;
47
struct SkIRect;
48
struct SkRect;
49
50
namespace skgpu::ganesh {
51
class SmallPathAtlasMgr;
52
}
53
namespace sktext::gpu {
54
class StrikeCache;
55
}
56
57
/** Tracks the state across all the GrOps (really just the GrDrawOps) in a OpsTask flush. */
58
class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawTarget {
59
public:
60
    // vertexSpace and indexSpace may either be null or an alloation of size
61
    // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for
62
    // vertices/indices when a buffer larger than kDefaultBufferSize is required.
63
    GrOpFlushState(GrGpu*, GrResourceProvider*, skgpu::TokenTracker*,
64
                   sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr);
65
66
18.4k
    ~GrOpFlushState() final { this->reset(); }
67
68
    /** This is called after each op has a chance to prepare its draws and before the draws are
69
        executed. */
70
    void preExecuteDraws();
71
72
    /** Called to upload data to a texture using the GrDeferredTextureUploadFn. If the uploaded
73
        surface needs to be prepared for being sampled in a draw after the upload, the caller
74
        should pass in true for shouldPrepareSurfaceForSampling. This feature is needed for Vulkan
75
        when doing inline uploads to reset the image layout back to sampled. */
76
    void doUpload(GrDeferredTextureUploadFn&, bool shouldPrepareSurfaceForSampling = false);
77
78
    /** Called as ops are executed. Must be called in the same order as the ops were prepared. */
79
    void executeDrawsAndUploadsForMeshDrawOp(const GrOp* op, const SkRect& chainBounds,
80
                                             const GrPipeline*, const GrUserStencilSettings*);
81
82
93.0k
    GrOpsRenderPass* opsRenderPass() { return fOpsRenderPass; }
83
175k
    void setOpsRenderPass(GrOpsRenderPass* renderPass) { fOpsRenderPass = renderPass; }
84
85
303k
    GrGpu* gpu() { return fGpu; }
86
87
    void reset();
88
89
    /** Additional data required on a per-op basis when executing GrOps. */
90
    struct OpArgs {
91
        // TODO: why does OpArgs have the op we're going to pass it to as a member? Remove it.
92
        explicit OpArgs(GrOp* op, const GrSurfaceProxyView& surfaceView, bool usesMSAASurface,
93
                        GrAppliedClip* appliedClip, const GrDstProxyView& dstProxyView,
94
                        GrXferBarrierFlags renderPassXferBarriers, GrLoadOp colorLoadOp)
95
                : fOp(op)
96
                , fSurfaceView(surfaceView)
97
                , fRenderTargetProxy(surfaceView.asRenderTargetProxy())
98
                , fUsesMSAASurface(usesMSAASurface)
99
                , fAppliedClip(appliedClip)
100
                , fDstProxyView(dstProxyView)
101
                , fRenderPassXferBarriers(renderPassXferBarriers)
102
236k
                , fColorLoadOp(colorLoadOp) {
103
236k
            SkASSERT(surfaceView.asRenderTargetProxy());
104
236k
        }
105
106
1.84k
        GrOp* op() { return fOp; }
107
118k
        const GrSurfaceProxyView& writeView() const { return fSurfaceView; }
108
0
        GrRenderTargetProxy* rtProxy() const { return fRenderTargetProxy; }
109
        // True if the op under consideration belongs to an opsTask that renders to an MSAA buffer.
110
116k
        bool usesMSAASurface() const { return fUsesMSAASurface; }
111
191k
        GrAppliedClip* appliedClip() { return fAppliedClip; }
112
70.2k
        const GrAppliedClip* appliedClip() const { return fAppliedClip; }
113
116k
        const GrDstProxyView& dstProxyView() const { return fDstProxyView; }
114
116k
        GrXferBarrierFlags renderPassBarriers() const { return fRenderPassXferBarriers; }
115
116k
        GrLoadOp colorLoadOp() const { return fColorLoadOp; }
116
117
#ifdef SK_DEBUG
118
0
        void validate() const {
119
0
            SkASSERT(fOp);
120
0
            SkASSERT(fSurfaceView);
121
0
        }
122
#endif
123
124
    private:
125
        GrOp*                         fOp;
126
        const GrSurfaceProxyView&     fSurfaceView;
127
        GrRenderTargetProxy*          fRenderTargetProxy;
128
        bool                          fUsesMSAASurface;
129
        GrAppliedClip*                fAppliedClip;
130
        GrDstProxyView                fDstProxyView;   // TODO: do we still need the dst proxy here?
131
        GrXferBarrierFlags            fRenderPassXferBarriers;
132
        GrLoadOp                      fColorLoadOp;
133
    };
134
135
472k
    void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; }
136
137
654k
    const OpArgs& drawOpArgs() const {
138
654k
        SkASSERT(fOpArgs);
139
654k
        SkDEBUGCODE(fOpArgs->validate());
140
654k
        return *fOpArgs;
141
654k
    }
142
143
175k
    void setSampledProxyArray(skia_private::TArray<GrSurfaceProxy*, true>* sampledProxies) {
144
175k
        fSampledProxies = sampledProxies;
145
175k
    }
146
147
1.84k
    skia_private::TArray<GrSurfaceProxy*, true>* sampledProxyArray() override {
148
1.84k
        return fSampledProxies;
149
1.84k
    }
150
151
    /** Overrides of GrDeferredUploadTarget. */
152
153
5.94k
    const skgpu::TokenTracker* tokenTracker() final { return fTokenTracker; }
154
    skgpu::AtlasToken addInlineUpload(GrDeferredTextureUploadFn&&) final;
155
    skgpu::AtlasToken addASAPUpload(GrDeferredTextureUploadFn&&) final;
156
157
    /** Overrides of GrMeshDrawTarget. */
158
    void recordDraw(const GrGeometryProcessor*,
159
                    const GrSimpleMesh[],
160
                    int meshCnt,
161
                    const GrSurfaceProxy* const primProcProxies[],
162
                    GrPrimitiveType) final;
163
    void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*,
164
                          int* startVertex) final;
165
    uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final;
166
    void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
167
                                 sk_sp<const GrBuffer>*, int* startVertex,
168
                                 int* actualVertexCount) final;
169
    uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
170
                                    sk_sp<const GrBuffer>*, int* startIndex,
171
                                    int* actualIndexCount) final;
172
    GrDrawIndirectWriter makeDrawIndirectSpace(int drawCount, sk_sp<const GrBuffer>* buffer,
173
0
                                               size_t* offset) override {
174
0
        return fDrawIndirectPool.makeSpace(drawCount, buffer, offset);
175
0
    }
176
    GrDrawIndexedIndirectWriter makeDrawIndexedIndirectSpace(int drawCount,
177
                                                             sk_sp<const GrBuffer>* buffer,
178
0
                                                             size_t* offset) override {
179
0
        return fDrawIndirectPool.makeIndexedSpace(drawCount, buffer, offset);
180
0
    }
181
    void putBackIndices(int indexCount) final;
182
    void putBackVertices(int vertices, size_t vertexStride) final;
183
0
    void putBackIndirectDraws(int drawCount) final { fDrawIndirectPool.putBack(drawCount); }
184
0
    void putBackIndexedIndirectDraws(int drawCount) final {
185
0
        fDrawIndirectPool.putBackIndexed(drawCount);
186
0
    }
187
118k
    const GrSurfaceProxyView& writeView() const final { return this->drawOpArgs().writeView(); }
188
0
    GrRenderTargetProxy* rtProxy() const final { return this->drawOpArgs().rtProxy(); }
189
116k
    bool usesMSAASurface() const final { return this->drawOpArgs().usesMSAASurface(); }
190
70.2k
    const GrAppliedClip* appliedClip() const final { return this->drawOpArgs().appliedClip(); }
191
0
    const GrAppliedHardClip& appliedHardClip() const {
192
0
        return (fOpArgs->appliedClip()) ?
193
0
                fOpArgs->appliedClip()->hardClip() : GrAppliedHardClip::Disabled();
194
0
    }
195
    GrAppliedClip detachAppliedClip() final;
196
116k
    const GrDstProxyView& dstProxyView() const final {
197
116k
        return this->drawOpArgs().dstProxyView();
198
116k
    }
199
200
116k
    GrXferBarrierFlags renderPassBarriers() const final {
201
116k
        return this->drawOpArgs().renderPassBarriers();
202
116k
    }
203
204
116k
    GrLoadOp colorLoadOp() const final {
205
116k
        return this->drawOpArgs().colorLoadOp();
206
116k
    }
207
208
5.29k
    GrDeferredUploadTarget* deferredUploadTarget() final { return this; }
209
    const GrCaps& caps() const final;
210
    GrThreadSafeCache* threadSafeCache() const final;
211
27.8k
    GrResourceProvider* resourceProvider() const final { return fResourceProvider; }
212
213
    sktext::gpu::StrikeCache* strikeCache() const final;
214
215
    // At this point we know we're flushing so full access to the GrAtlasManager and
216
    // SmallPathAtlasMgr is required (and permissible).
217
    GrAtlasManager* atlasManager() const final;
218
#if !defined(SK_ENABLE_OPTIMIZE_SIZE)
219
    skgpu::ganesh::SmallPathAtlasMgr* smallPathAtlasManager() const final;
220
#endif
221
222
    /** GrMeshDrawTarget override. */
223
209k
    SkArenaAlloc* allocator() override { return &fArena; }
224
225
    // This is a convenience method that binds the given pipeline, and then, if our applied clip has
226
    // a scissor, sets the scissor rect from the applied clip.
227
113k
    void bindPipelineAndScissorClip(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
228
113k
        SkASSERT((programInfo.pipeline().isScissorTestEnabled()) ==
229
113k
                 (this->appliedClip() && this->appliedClip()->scissorState().enabled()));
230
113k
        this->bindPipeline(programInfo, drawBounds);
231
113k
        if (programInfo.pipeline().isScissorTestEnabled()) {
232
70.2k
            this->setScissorRect(this->appliedClip()->scissorState().rect());
233
70.2k
        }
234
113k
    }
235
236
    // This is a convenience method for when the primitive processor has exactly one texture. It
237
    // binds one texture for the primitive processor, and any others for FPs on the pipeline.
238
    void bindTextures(const GrGeometryProcessor& geomProc,
239
                      const GrSurfaceProxy& singleGeomProcTexture,
240
17.6k
                      const GrPipeline& pipeline) {
241
17.6k
        SkASSERT(geomProc.numTextureSamplers() == 1);
242
17.6k
        const GrSurfaceProxy* ptr = &singleGeomProcTexture;
243
17.6k
        this->bindTextures(geomProc, &ptr, pipeline);
244
17.6k
    }
245
246
    // Makes the appropriate bindBuffers() and draw*() calls for the provided mesh.
247
    void drawMesh(const GrSimpleMesh& mesh);
248
249
    // Pass-through methods to GrOpsRenderPass.
250
113k
    void bindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) {
251
113k
        fOpsRenderPass->bindPipeline(programInfo, drawBounds);
252
113k
    }
253
70.2k
    void setScissorRect(const SkIRect& scissorRect) {
254
70.2k
        fOpsRenderPass->setScissorRect(scissorRect);
255
70.2k
    }
256
    void bindTextures(const GrGeometryProcessor& geomProc,
257
                      const GrSurfaceProxy* const geomProcTextures[],
258
113k
                      const GrPipeline& pipeline) {
259
113k
        fOpsRenderPass->bindTextures(geomProc, geomProcTextures, pipeline);
260
113k
    }
261
    void bindBuffers(sk_sp<const GrBuffer> indexBuffer, sk_sp<const GrBuffer> instanceBuffer,
262
                     sk_sp<const GrBuffer> vertexBuffer,
263
162k
                     GrPrimitiveRestart primitiveRestart = GrPrimitiveRestart::kNo) {
264
162k
        fOpsRenderPass->bindBuffers(std::move(indexBuffer), std::move(instanceBuffer),
265
162k
                                    std::move(vertexBuffer), primitiveRestart);
266
162k
    }
267
8.58k
    void draw(int vertexCount, int baseVertex) {
268
8.58k
        fOpsRenderPass->draw(vertexCount, baseVertex);
269
8.58k
    }
270
    void drawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue,
271
56.8k
                     int baseVertex) {
272
56.8k
        fOpsRenderPass->drawIndexed(indexCount, baseIndex, minIndexValue, maxIndexValue,
273
56.8k
                                    baseVertex);
274
56.8k
    }
275
0
    void drawInstanced(int instanceCount, int baseInstance, int vertexCount, int baseVertex) {
276
0
        fOpsRenderPass->drawInstanced(instanceCount, baseInstance, vertexCount, baseVertex);
277
0
    }
278
    void drawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance,
279
0
                              int baseVertex) {
280
0
        fOpsRenderPass->drawIndexedInstanced(indexCount, baseIndex, instanceCount, baseInstance,
281
0
                                             baseVertex);
282
0
    }
283
0
    void drawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
284
0
        fOpsRenderPass->drawIndirect(drawIndirectBuffer, offset, drawCount);
285
0
    }
286
0
    void drawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) {
287
0
        fOpsRenderPass->drawIndexedIndirect(drawIndirectBuffer, offset, drawCount);
288
0
    }
289
    void drawIndexPattern(int patternIndexCount, int patternRepeatCount,
290
                          int maxPatternRepetitionsInIndexBuffer, int patternVertexCount,
291
5.80k
                          int baseVertex) {
292
5.80k
        fOpsRenderPass->drawIndexPattern(patternIndexCount, patternRepeatCount,
293
5.80k
                                         maxPatternRepetitionsInIndexBuffer, patternVertexCount,
294
5.80k
                                         baseVertex);
295
5.80k
    }
296
297
private:
298
    struct InlineUpload {
299
        InlineUpload(GrDeferredTextureUploadFn&& upload, skgpu::AtlasToken token)
300
0
                : fUpload(std::move(upload)), fUploadBeforeToken(token) {}
301
        GrDeferredTextureUploadFn fUpload;
302
        skgpu::AtlasToken fUploadBeforeToken;
303
    };
304
305
    // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The
306
    // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes
307
    // that share a geometry processor into a Draw is that it allows the Gpu object to setup
308
    // the shared state once and then issue draws for each mesh.
309
    struct Draw {
310
        ~Draw();
311
        // The geometry processor is always forced to be in an arena allocation. This object does
312
        // not need to manage its lifetime.
313
        const GrGeometryProcessor* fGeometryProcessor = nullptr;
314
        // Must have GrGeometryProcessor::numTextureSamplers() entries. Can be null if no samplers.
315
        const GrSurfaceProxy* const* fGeomProcProxies = nullptr;
316
        const GrSimpleMesh* fMeshes = nullptr;
317
        const GrOp* fOp = nullptr;
318
        int fMeshCnt = 0;
319
        GrPrimitiveType fPrimitiveType;
320
    };
321
322
    // Storage for ops' pipelines, draws, and inline uploads.
323
    SkArenaAllocWithReset fArena{sizeof(GrPipeline) * 100};
324
325
    // Store vertex and index data on behalf of ops that are flushed.
326
    GrVertexBufferAllocPool fVertexPool;
327
    GrIndexBufferAllocPool fIndexPool;
328
    GrDrawIndirectBufferAllocPool fDrawIndirectPool;
329
330
    // Data stored on behalf of the ops being flushed.
331
    SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads;
332
    SkArenaAllocList<InlineUpload> fInlineUploads;
333
    SkArenaAllocList<Draw> fDraws;
334
335
    // All draws we store have an implicit draw token. This is the draw token for the first draw
336
    // in fDraws.
337
    skgpu::AtlasToken fBaseDrawToken = skgpu::AtlasToken::InvalidToken();
338
339
    // Info about the op that is currently preparing or executing using the flush state or null if
340
    // an op is not currently preparing of executing.
341
    OpArgs* fOpArgs = nullptr;
342
343
    // This field is only transiently set during flush. Each OpsTask will set it to point to an
344
    // array of proxies it uses before call onPrepare and onExecute.
345
    skia_private::TArray<GrSurfaceProxy*, true>* fSampledProxies;
346
347
    GrGpu* fGpu;
348
    GrResourceProvider* fResourceProvider;
349
    skgpu::TokenTracker* fTokenTracker;
350
    GrOpsRenderPass* fOpsRenderPass = nullptr;
351
352
    // Variables that are used to track where we are in lists as ops are executed
353
    SkArenaAllocList<Draw>::Iter fCurrDraw;
354
    SkArenaAllocList<InlineUpload>::Iter fCurrUpload;
355
};
356
357
#endif