Coverage Report

Created: 2024-09-14 07:19

/src/skia/src/gpu/graphite/Device.cpp
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright 2021 Google LLC
3
 *
4
 * Use of this source code is governed by a BSD-style license that can be
5
 * found in the LICENSE file.
6
 */
7
8
#include "src/gpu/graphite/Device.h"
9
10
#include "include/gpu/graphite/Recorder.h"
11
#include "include/gpu/graphite/Recording.h"
12
#include "include/gpu/graphite/Surface.h"
13
#include "src/gpu/AtlasTypes.h"
14
#include "src/gpu/BlurUtils.h"
15
#include "src/gpu/SkBackingFit.h"
16
#include "src/gpu/graphite/AtlasProvider.h"
17
#include "src/gpu/graphite/Buffer.h"
18
#include "src/gpu/graphite/Caps.h"
19
#include "src/gpu/graphite/CommandBuffer.h"
20
#include "src/gpu/graphite/ContextOptionsPriv.h"
21
#include "src/gpu/graphite/ContextPriv.h"
22
#include "src/gpu/graphite/ContextUtils.h"
23
#include "src/gpu/graphite/DrawContext.h"
24
#include "src/gpu/graphite/DrawList.h"
25
#include "src/gpu/graphite/DrawParams.h"
26
#include "src/gpu/graphite/Image_Graphite.h"
27
#include "src/gpu/graphite/Log.h"
28
#include "src/gpu/graphite/PathAtlas.h"
29
#include "src/gpu/graphite/RasterPathAtlas.h"
30
#include "src/gpu/graphite/RecorderPriv.h"
31
#include "src/gpu/graphite/Renderer.h"
32
#include "src/gpu/graphite/RendererProvider.h"
33
#include "src/gpu/graphite/ResourceTypes.h"
34
#include "src/gpu/graphite/SharedContext.h"
35
#include "src/gpu/graphite/SpecialImage_Graphite.h"
36
#include "src/gpu/graphite/Surface_Graphite.h"
37
#include "src/gpu/graphite/TextureProxy.h"
38
#include "src/gpu/graphite/TextureUtils.h"
39
#include "src/gpu/graphite/geom/BoundsManager.h"
40
#include "src/gpu/graphite/geom/Geometry.h"
41
#include "src/gpu/graphite/geom/IntersectionTree.h"
42
#include "src/gpu/graphite/geom/Shape.h"
43
#include "src/gpu/graphite/geom/Transform_graphite.h"
44
#include "src/gpu/graphite/text/TextAtlasManager.h"
45
46
#include "include/core/SkColorSpace.h"
47
#include "include/core/SkPath.h"
48
#include "include/core/SkPathEffect.h"
49
#include "include/core/SkStrokeRec.h"
50
51
#include "src/core/SkBlenderBase.h"
52
#include "src/core/SkBlurMaskFilterImpl.h"
53
#include "src/core/SkColorSpacePriv.h"
54
#include "src/core/SkConvertPixels.h"
55
#include "src/core/SkImageFilterTypes.h"
56
#include "src/core/SkImageInfoPriv.h"
57
#include "src/core/SkImagePriv.h"
58
#include "src/core/SkMatrixPriv.h"
59
#include "src/core/SkPaintPriv.h"
60
#include "src/core/SkRRectPriv.h"
61
#include "src/core/SkSpecialImage.h"
62
#include "src/core/SkStrikeCache.h"
63
#include "src/core/SkTraceEvent.h"
64
#include "src/core/SkVerticesPriv.h"
65
#include "src/gpu/TiledTextureUtils.h"
66
#include "src/text/GlyphRun.h"
67
#include "src/text/gpu/GlyphVector.h"
68
#include "src/text/gpu/SlugImpl.h"
69
#include "src/text/gpu/SubRunContainer.h"
70
#include "src/text/gpu/TextBlobRedrawCoordinator.h"
71
#include "src/text/gpu/VertexFiller.h"
72
73
#include <functional>
74
#include <tuple>
75
#include <unordered_map>
76
#include <vector>
77
78
using RescaleGamma       = SkImage::RescaleGamma;
79
using RescaleMode        = SkImage::RescaleMode;
80
using ReadPixelsCallback = SkImage::ReadPixelsCallback;
81
using ReadPixelsContext  = SkImage::ReadPixelsContext;
82
83
#if defined(GPU_TEST_UTILS)
84
int gOverrideMaxTextureSizeGraphite = 0;
85
// Allows tests to check how many tiles were drawn on the most recent call to
86
// Device::drawAsTiledImageRect. This is an atomic because we can write to it from
87
// multiple threads during "normal" operations. However, the tests that actually
88
// read from it are done single-threaded.
89
std::atomic<int> gNumTilesDrawnGraphite{0};
90
#endif
91
92
namespace skgpu::graphite {
93
94
0
#define ASSERT_SINGLE_OWNER SkASSERT(fRecorder); SKGPU_ASSERT_SINGLE_OWNER(fRecorder->singleOwner())
95
96
namespace {
97
98
0
const SkStrokeRec& DefaultFillStyle() {
99
0
    static const SkStrokeRec kFillStyle(SkStrokeRec::kFill_InitStyle);
100
0
    return kFillStyle;
101
0
}
102
103
0
bool blender_depends_on_dst(const SkBlender* blender, bool srcIsTransparent) {
104
0
    std::optional<SkBlendMode> bm = blender ? as_BB(blender)->asBlendMode() : SkBlendMode::kSrcOver;
105
0
    if (!bm.has_value()) {
106
0
        return true;
107
0
    }
108
0
    if (bm.value() == SkBlendMode::kSrc || bm.value() == SkBlendMode::kClear) {
109
        // src and clear blending never depends on dst
110
0
        return false;
111
0
    }
112
0
    if (bm.value() == SkBlendMode::kSrcOver) {
113
        // src-over depends on dst if src is transparent (a != 1)
114
0
        return srcIsTransparent;
115
0
    }
116
    // TODO: Are their other modes that don't depend on dst that can be trivially detected?
117
0
    return true;
118
0
}
119
120
bool paint_depends_on_dst(SkColor4f color,
121
                          const SkShader* shader,
122
                          const SkColorFilter* colorFilter,
123
                          const SkBlender* finalBlender,
124
0
                          const SkBlender* primitiveBlender) {
125
0
    const bool srcIsTransparent = !color.isOpaque() || (shader && !shader->isOpaque()) ||
126
0
                                  (colorFilter && !colorFilter->isAlphaUnchanged());
127
128
0
    if (primitiveBlender && blender_depends_on_dst(primitiveBlender, srcIsTransparent)) {
129
0
        return true;
130
0
    }
131
132
0
    return blender_depends_on_dst(finalBlender, srcIsTransparent);
133
0
}
134
135
0
bool paint_depends_on_dst(const PaintParams& paintParams) {
136
0
    return paint_depends_on_dst(paintParams.color(),
137
0
                                paintParams.shader(),
138
0
                                paintParams.colorFilter(),
139
0
                                paintParams.finalBlender(),
140
0
                                paintParams.primitiveBlender());
141
0
}
142
143
0
bool paint_depends_on_dst(const SkPaint& paint) {
144
    // CAUTION: getMaskFilter is intentionally ignored here.
145
0
    SkASSERT(!paint.getImageFilter());  // no paints in SkDevice should have an image filter
146
0
    return paint_depends_on_dst(paint.getColor4f(),
147
0
                                paint.getShader(),
148
0
                                paint.getColorFilter(),
149
0
                                paint.getBlender(),
150
0
                                /*primitiveBlender=*/nullptr);
151
0
}
Unexecuted instantiation: Device.cpp:skgpu::graphite::(anonymous namespace)::paint_depends_on_dst(SkPaint const&)
Unexecuted instantiation: Device.cpp:skgpu::graphite::(anonymous namespace)::paint_depends_on_dst(SkPaint const&)
152
153
/** If the paint can be reduced to a solid flood-fill, determine the correct color to fill with. */
154
std::optional<SkColor4f> extract_paint_color(const SkPaint& paint,
155
0
                                             const SkColorInfo& dstColorInfo) {
156
0
    SkASSERT(!paint_depends_on_dst(paint));
157
0
    if (paint.getShader()) {
158
0
        return std::nullopt;
159
0
    }
160
161
0
    SkColor4f dstPaintColor = PaintParams::Color4fPrepForDst(paint.getColor4f(), dstColorInfo);
162
163
0
    if (SkColorFilter* filter = paint.getColorFilter()) {
164
0
        SkColorSpace* dstCS = dstColorInfo.colorSpace();
165
0
        return filter->filterColor4f(dstPaintColor, dstCS, dstCS);
166
0
    }
167
0
    return dstPaintColor;
168
0
}
Unexecuted instantiation: Device.cpp:skgpu::graphite::(anonymous namespace)::extract_paint_color(SkPaint const&, SkColorInfo const&)
Unexecuted instantiation: Device.cpp:skgpu::graphite::(anonymous namespace)::extract_paint_color(SkPaint const&, SkColorInfo const&)
169
170
// Returns a local rect that has been adjusted such that when it's rasterized with `localToDevice`
171
// it will be pixel aligned. If this adjustment is not possible (due to transform type or precision)
172
// then this returns the original local rect unmodified.
173
//
174
// If `strokeWidth` is null, it's assumed to be a filled rectangle. If it's not null, on input it
175
// should hold the stroke width (or 0 for a hairline). After this returns, the stroke width may
176
// have been adjusted so that outer and inner stroked edges are pixel aligned (in which case the
177
// underlying rectangle geometry probably won't be pixel aligned).
178
//
179
// A best effort is made to align the stroke edges when there's a non-uniform scale factor that
180
// prevents exactly aligning both X and Y axes.
181
Rect snap_rect_to_pixels(const Transform& localToDevice,
182
                         const Rect& rect,
183
0
                         float* strokeWidth=nullptr) {
184
0
    if (localToDevice.type() > Transform::Type::kRectStaysRect) {
185
0
        return rect;
186
0
    }
187
188
0
    Rect snappedDeviceRect;
189
0
    if (!strokeWidth) {
190
        // Just a fill, use round() to emulate non-AA rasterization (vs. roundOut() to get the
191
        // covering bounds). This matches how ClipStack treats clipRects with PixelSnapping::kYes.
192
0
        snappedDeviceRect = localToDevice.mapRect(rect).round();
193
0
    } else if (strokeWidth) {
194
0
        if (*strokeWidth == 0.f) {
195
            // Hairline case needs to be outset by 1/2 device pixels *before* rounding, and then
196
            // inset by 1/2px to get the base shape while leaving the stroke width as 0.
197
0
            snappedDeviceRect = localToDevice.mapRect(rect);
198
0
            snappedDeviceRect.outset(0.5f).round().inset(0.5f);
199
0
        } else {
200
            // For regular strokes, outset by the stroke radius *before* mapping to device space,
201
            // and then round.
202
0
            snappedDeviceRect = localToDevice.mapRect(rect.makeOutset(0.5f*(*strokeWidth))).round();
203
204
            // devScales.x() holds scale factor affecting device-space X axis (so max of |m00| or
205
            // |m01|) and y() holds the device Y axis scale (max of |m10| or |m11|).
206
0
            skvx::float2 devScales = max(abs(skvx::float2(localToDevice.matrix().rc(0,0),
207
0
                                                          localToDevice.matrix().rc(1,0))),
208
0
                                         abs(skvx::float2(localToDevice.matrix().rc(0,1),
209
0
                                                          localToDevice.matrix().rc(1,1))));
210
0
            skvx::float2 devStrokeWidth = max(round(*strokeWidth * devScales), 1.f);
211
212
            // Prioritize the axis that has the largest device-space radius (any error from a
213
            // non-uniform scale factor will go into the inner edge of the opposite axis).
214
            // During animating scale factors, preserving the large axis leads to better behavior.
215
0
            if (devStrokeWidth.x() > devStrokeWidth.y()) {
216
0
                *strokeWidth = devStrokeWidth.x() / devScales.x();
217
0
            } else {
218
0
                *strokeWidth = devStrokeWidth.y() / devScales.y();
219
0
            }
220
221
0
            snappedDeviceRect.inset(0.5f * devScales * (*strokeWidth));
222
0
        }
223
0
    }
224
225
    // Map back to local space so that it can be drawn with appropriate coord interpolation.
226
0
    Rect snappedLocalRect = localToDevice.inverseMapRect(snappedDeviceRect);
227
    // If the transform has an extreme scale factor or large translation, it's possible for floating
228
    // point precision to round `snappedLocalRect` in such a way that re-transforming it by the
229
    // local-to-device matrix no longer matches the expected device bounds.
230
0
    if (snappedDeviceRect.nearlyEquals(localToDevice.mapRect(snappedLocalRect))) {
231
0
        return snappedLocalRect;
232
0
    } else {
233
        // In this case we will just return the original geometry and the pixels will show
234
        // fractional coverage.
235
0
        return rect;
236
0
    }
237
0
}
238
239
// If possible, snaps `dstRect` such that its device-space transformation lands on pixel bounds,
240
// and then updates `srcRect` to match the original src-to-dst coordinate mapping.
241
void snap_src_and_dst_rect_to_pixels(const Transform& localToDevice,
242
                                     SkRect* srcRect,
243
0
                                     SkRect* dstRect) {
244
0
    if (localToDevice.type() > Transform::Type::kRectStaysRect) {
245
0
        return;
246
0
    }
247
248
    // Assume snapping will succeed and always update 'src' to match; in the event snapping
249
    // returns the original dst rect, then the recalculated src rect is a no-op.
250
0
    SkMatrix dstToSrc = SkMatrix::RectToRect(*dstRect, *srcRect);
251
0
    *dstRect = snap_rect_to_pixels(localToDevice, *dstRect).asSkRect();
252
0
    *srcRect = dstToSrc.mapRect(*dstRect);
253
0
}
254
255
// Returns the inner bounds of `geometry` that is known to have full coverage. This does not worry
256
// about identifying draws that are equivalent pixel aligned and thus entirely full coverage, as
257
// that should have been caught earlier and used a coverage-less renderer from the beginning.
258
//
259
// An empty Rect is returned if there is no available inner bounds, or if it's not worth performing.
260
0
Rect get_inner_bounds(const Geometry& geometry, const Transform& localToDevice) {
261
0
    auto applyAAInset = [&](Rect rect) {
262
        // If the aa inset is too large, rect becomes empty and the inner bounds draw is
263
        // automatically skipped
264
0
        float aaInset = localToDevice.localAARadius(rect);
265
0
        rect.inset(aaInset);
266
        // Only add a second draw if it will have a reasonable number of covered pixels; otherwise
267
        // we are just adding draws to sort and pipelines to switch around.
268
0
        static constexpr float kInnerFillArea = 64*64;
269
        // Approximate the device-space area based on the minimum scale factor of the transform.
270
0
        float scaleFactor = sk_ieee_float_divide(1.f, aaInset);
271
0
        return scaleFactor*rect.area() >= kInnerFillArea ? rect : Rect::InfiniteInverted();
272
0
    };
273
274
0
    if (geometry.isEdgeAAQuad()) {
275
0
        const EdgeAAQuad& quad = geometry.edgeAAQuad();
276
0
        if (quad.isRect()) {
277
0
            return applyAAInset(quad.bounds());
278
0
        }
279
        // else currently we don't have a function to calculate the largest interior axis aligned
280
        // bounding box of a quadrilateral so skip the inner fill draw.
281
0
    } else if (geometry.isShape()) {
282
0
        const Shape& shape = geometry.shape();
283
0
        if (shape.isRect()) {
284
0
            return applyAAInset(shape.rect());
285
0
        } else if (shape.isRRect()) {
286
0
            return applyAAInset(SkRRectPriv::InnerBounds(shape.rrect()));
287
0
        }
288
0
    }
289
290
0
    return Rect::InfiniteInverted();
291
0
}
292
293
0
SkIRect rect_to_pixelbounds(const Rect& r) {
294
0
    return r.makeRoundOut().asSkIRect();
295
0
}
296
297
0
bool is_simple_shape(const Shape& shape, SkStrokeRec::Style type) {
298
    // We send regular filled and hairline [round] rectangles, stroked/hairline lines, and stroked
299
    // [r]rects with circular corners to a single Renderer that does not trigger MSAA.
300
    // Per-edge AA quadrilaterals also use the same Renderer but those are not "Shapes".
301
    // These shapes and quads may also be combined with a second non-AA inner fill. This fill step
302
    // is also directly used for flooding the clip
303
0
    return (shape.isEmpty() && shape.inverted()) ||
304
0
           (!shape.inverted() && type != SkStrokeRec::kStrokeAndFill_Style &&
305
0
            (shape.isRect() ||
306
0
             (shape.isLine() && type != SkStrokeRec::kFill_Style) ||
307
0
             (shape.isRRect() && (type != SkStrokeRec::kStroke_Style ||
308
0
                                  SkRRectPriv::AllCornersCircular(shape.rrect())))));
309
0
}
310
311
0
bool use_compute_atlas_when_available(PathRendererStrategy strategy) {
312
0
    return strategy == PathRendererStrategy::kComputeAnalyticAA ||
313
0
           strategy == PathRendererStrategy::kComputeMSAA16 ||
314
0
           strategy == PathRendererStrategy::kComputeMSAA8 ||
315
0
           strategy == PathRendererStrategy::kDefault;
316
0
}
317
318
} // anonymous namespace
319
320
/**
321
 * IntersectionTreeSet controls multiple IntersectionTrees to organize all add rectangles into
322
 * disjoint sets. For a given CompressedPaintersOrder and bounds, it returns the smallest
323
 * DisjointStencilIndex that guarantees the bounds are disjoint from all other draws that use the
324
 * same painters order and stencil index.
325
 */
326
class Device::IntersectionTreeSet {
327
public:
328
0
    IntersectionTreeSet() = default;
329
330
0
    DisjointStencilIndex add(CompressedPaintersOrder drawOrder, Rect rect) {
331
0
        auto& trees = fTrees[drawOrder];
332
0
        DisjointStencilIndex stencil = DrawOrder::kUnassigned.next();
333
0
        for (auto&& tree : trees) {
334
0
            if (tree->add(rect)) {
335
0
                return stencil;
336
0
            }
337
0
            stencil = stencil.next(); // advance to the next tree's index
338
0
        }
339
340
        // If here, no existing intersection tree can hold the rect so add a new one
341
0
        IntersectionTree* newTree = this->makeTree();
342
0
        SkAssertResult(newTree->add(rect));
343
0
        trees.push_back(newTree);
344
0
        return stencil;
345
0
    }
Unexecuted instantiation: skgpu::graphite::Device::IntersectionTreeSet::add(skgpu::graphite::MonotonicValue<skgpu::graphite::CompressedPaintersOrderSequence>, skgpu::graphite::Rect)
Unexecuted instantiation: skgpu::graphite::Device::IntersectionTreeSet::add(skgpu::graphite::MonotonicValue<skgpu::graphite::CompressedPaintersOrderSequence>, skgpu::graphite::Rect)
346
347
0
    void reset() {
348
0
        fTrees.clear();
349
0
        fTreeStore.reset();
350
0
    }
351
352
private:
353
    struct Hash {
354
0
        size_t operator()(const CompressedPaintersOrder& o) const noexcept { return o.bits(); }
355
    };
356
357
0
    IntersectionTree* makeTree() {
358
0
        return fTreeStore.make<IntersectionTree>();
359
0
    }
360
361
    // Each compressed painters order defines a barrier around draws so each order's set of draws
362
    // are independent, even if they may intersect. Within each order, the list of trees holds the
363
    // IntersectionTrees representing each disjoint set.
364
    // TODO: This organization of trees is logically convenient but may need to be optimized based
365
    // on real world data (e.g. how sparse is the map, how long is each vector of trees,...)
366
    std::unordered_map<CompressedPaintersOrder, std::vector<IntersectionTree*>, Hash> fTrees;
367
    SkSTArenaAllocWithReset<4 * sizeof(IntersectionTree)> fTreeStore;
368
};
369
370
sk_sp<Device> Device::Make(Recorder* recorder,
371
                           const SkImageInfo& ii,
372
                           skgpu::Budgeted budgeted,
373
                           Mipmapped mipmapped,
374
                           SkBackingFit backingFit,
375
                           const SkSurfaceProps& props,
376
                           LoadOp initialLoadOp,
377
                           std::string_view label,
378
0
                           bool registerWithRecorder) {
379
0
    SkASSERT(!(mipmapped == Mipmapped::kYes && backingFit == SkBackingFit::kApprox));
380
0
    if (!recorder) {
381
0
        return nullptr;
382
0
    }
383
384
0
    const Caps* caps = recorder->priv().caps();
385
0
    SkISize backingDimensions = backingFit == SkBackingFit::kApprox ? GetApproxSize(ii.dimensions())
386
0
                                                                    : ii.dimensions();
387
0
    auto textureInfo = caps->getDefaultSampledTextureInfo(ii.colorType(),
388
0
                                                          mipmapped,
389
0
                                                          recorder->priv().isProtected(),
390
0
                                                          Renderable::kYes);
391
392
0
    return Make(recorder,
393
0
                TextureProxy::Make(caps, recorder->priv().resourceProvider(),
394
0
                                   backingDimensions, textureInfo, std::move(label), budgeted),
395
0
                ii.dimensions(),
396
0
                ii.colorInfo(),
397
0
                props,
398
0
                initialLoadOp,
399
0
                registerWithRecorder);
400
0
}
Unexecuted instantiation: skgpu::graphite::Device::Make(skgpu::graphite::Recorder*, SkImageInfo const&, skgpu::Budgeted, skgpu::Mipmapped, SkBackingFit, SkSurfaceProps const&, skgpu::graphite::LoadOp, std::__1::basic_string_view<char, std::__1::char_traits<char> >, bool)
Unexecuted instantiation: skgpu::graphite::Device::Make(skgpu::graphite::Recorder*, SkImageInfo const&, skgpu::Budgeted, skgpu::Mipmapped, SkBackingFit, SkSurfaceProps const&, skgpu::graphite::LoadOp, std::__1::basic_string_view<char, std::__1::char_traits<char> >, bool)
401
402
sk_sp<Device> Device::Make(Recorder* recorder,
403
                           sk_sp<TextureProxy> target,
404
                           SkISize deviceSize,
405
                           const SkColorInfo& colorInfo,
406
                           const SkSurfaceProps& props,
407
                           LoadOp initialLoadOp,
408
0
                           bool registerWithRecorder) {
409
0
    if (!recorder) {
410
0
        return nullptr;
411
0
    }
412
413
0
    sk_sp<DrawContext> dc = DrawContext::Make(recorder->priv().caps(),
414
0
                                              std::move(target),
415
0
                                              deviceSize,
416
0
                                              colorInfo,
417
0
                                              props);
418
0
    if (!dc) {
419
0
        return nullptr;
420
0
    } else if (initialLoadOp == LoadOp::kClear) {
421
0
        dc->clear(SkColors::kTransparent);
422
0
    } else if (initialLoadOp == LoadOp::kDiscard) {
423
0
        dc->discard();
424
0
    } // else kLoad is the default initial op for a DrawContext
425
426
0
    sk_sp<Device> device{new Device(recorder, std::move(dc))};
427
0
    if (registerWithRecorder) {
428
        // We don't register the device with the recorder until after the constructor has returned.
429
0
        recorder->registerDevice(device);
430
0
    } else {
431
        // Since it's not registered, it should go out of scope before nextRecordingID() changes
432
        // from what is saved to fScopedRecordingID.
433
0
        SkDEBUGCODE(device->fScopedRecordingID = recorder->priv().nextRecordingID();)
434
0
    }
435
0
    return device;
436
0
}
Unexecuted instantiation: skgpu::graphite::Device::Make(skgpu::graphite::Recorder*, sk_sp<skgpu::graphite::TextureProxy>, SkISize, SkColorInfo const&, SkSurfaceProps const&, skgpu::graphite::LoadOp, bool)
Unexecuted instantiation: skgpu::graphite::Device::Make(skgpu::graphite::Recorder*, sk_sp<skgpu::graphite::TextureProxy>, SkISize, SkColorInfo const&, SkSurfaceProps const&, skgpu::graphite::LoadOp, bool)
437
438
// These default tuning numbers for the HybridBoundsManager were chosen from looking at performance
439
// and accuracy curves produced by the BoundsManagerBench for random draw bounding boxes. This
440
// config will use brute force for the first 64 draw calls to the Device and then switch to a grid
441
// that is dynamically sized to produce cells that are 16x16, up to a grid that's 32x32 cells.
442
// This seemed like a sweet spot balancing accuracy for low-draw count surfaces and overhead for
443
// high-draw count and high-resolution surfaces. With the 32x32 grid limit, cell size will increase
444
// above 16px when the surface dimension goes above 512px.
445
// TODO: These could be exposed as context options or surface options, and we may want to have
446
// different strategies in place for a base device vs. a layer's device.
447
static constexpr int kGridCellSize = 16;
448
static constexpr int kMaxBruteForceN = 64;
449
static constexpr int kMaxGridSize = 32;
450
451
Device::Device(Recorder* recorder, sk_sp<DrawContext> dc)
452
        : SkDevice(dc->imageInfo(), dc->surfaceProps())
453
        , fRecorder(recorder)
454
        , fDC(std::move(dc))
455
        , fClip(this)
456
        , fColorDepthBoundsManager(std::make_unique<HybridBoundsManager>(
457
                  fDC->imageInfo().dimensions(), kGridCellSize, kMaxBruteForceN, kMaxGridSize))
458
        , fDisjointStencilSet(std::make_unique<IntersectionTreeSet>())
459
        , fCachedLocalToDevice(SkM44())
460
        , fCurrentDepth(DrawOrder::kClearDepth)
461
        , fSubRunControl(recorder->priv().caps()->getSubRunControl(
462
0
                fDC->surfaceProps().isUseDeviceIndependentFonts())) {
463
0
    SkASSERT(SkToBool(fDC) && SkToBool(fRecorder));
464
0
    if (fRecorder->priv().caps()->defaultMSAASamplesCount() > 1) {
465
0
        if (fRecorder->priv().caps()->msaaRenderToSingleSampledSupport()) {
466
0
            fMSAASupported = true;
467
0
        } else {
468
0
            TextureInfo msaaTexInfo =
469
0
                   fRecorder->priv().caps()->getDefaultMSAATextureInfo(fDC->target()->textureInfo(),
470
0
                                                                       Discardable::kYes);
471
0
            fMSAASupported = msaaTexInfo.isValid();
472
0
        }
473
0
    }
474
0
}
Unexecuted instantiation: skgpu::graphite::Device::Device(skgpu::graphite::Recorder*, sk_sp<skgpu::graphite::DrawContext>)
Unexecuted instantiation: skgpu::graphite::Device::Device(skgpu::graphite::Recorder*, sk_sp<skgpu::graphite::DrawContext>)
475
476
0
Device::~Device() {
477
    // The Device should have been marked immutable before it's destroyed, or the Recorder was the
478
    // last holder of a reference to it and de-registered the device as part of its cleanup.
479
    // However, if the Device was not registered with the recorder (i.e. a scratch device) we don't
480
    // require that its recorder be adandoned. Scratch devices must either have been marked
481
    // immutable or be destroyed before the recorder has been snapped.
482
0
    SkASSERT(!fRecorder || fScopedRecordingID != 0);
483
#if defined(SK_DEBUG)
484
0
    if (fScopedRecordingID != 0 && fRecorder) {
485
0
        SkASSERT(fScopedRecordingID == fRecorder->priv().nextRecordingID());
486
0
    }
487
    // else it wasn't a scratch device, or it was a scratch device that was marked immutable so its
488
    // lifetime was validated when setImmutable() was called.
489
#endif
490
0
}
Unexecuted instantiation: skgpu::graphite::Device::~Device()
Unexecuted instantiation: skgpu::graphite::Device::~Device()
491
492
0
void Device::setImmutable() {
493
0
    if (fRecorder) {
494
        // Push any pending work to the Recorder now. setImmutable() is only called by the
495
        // destructor of a client-owned Surface, or explicitly in layer/filtering workflows. In
496
        // both cases this is restricted to the Recorder's thread. This is in contrast to ~Device(),
497
        // which might be called from another thread if it was linked to an Image used in multiple
498
        // recorders.
499
0
        this->flushPendingWorkToRecorder();
500
0
        fRecorder->deregisterDevice(this);
501
        // Abandoning the recorder ensures that there are no further operations that can be recorded
502
        // and is relied on by Image::notifyInUse() to detect when it can unlink from a Device.
503
0
        this->abandonRecorder();
504
0
    }
505
0
}
506
507
0
const Transform& Device::localToDeviceTransform() {
508
0
    if (this->checkLocalToDeviceDirty()) {
509
0
        fCachedLocalToDevice = Transform{this->localToDevice44()};
510
0
    }
511
0
    return fCachedLocalToDevice;
512
0
}
513
514
0
SkStrikeDeviceInfo Device::strikeDeviceInfo() const {
515
0
    return {this->surfaceProps(), this->scalerContextFlags(), &fSubRunControl};
516
0
}
517
518
0
sk_sp<SkDevice> Device::createDevice(const CreateInfo& info, const SkPaint*) {
519
    // TODO: Inspect the paint and create info to determine if there's anything that has to be
520
    // modified to support inline subpasses.
521
0
    SkSurfaceProps props =
522
0
        this->surfaceProps().cloneWithPixelGeometry(info.fPixelGeometry);
523
524
    // Skia's convention is to only clear a device if it is non-opaque.
525
0
    LoadOp initialLoadOp = info.fInfo.isOpaque() ? LoadOp::kDiscard : LoadOp::kClear;
526
527
0
    std::string label = this->target()->label();
528
0
    if (label.empty()) {
529
0
        label = "ChildDevice";
530
0
    } else {
531
0
        label += "_ChildDevice";
532
0
    }
533
534
0
    return Make(fRecorder,
535
0
                info.fInfo,
536
0
                skgpu::Budgeted::kYes,
537
0
                Mipmapped::kNo,
538
0
                SkBackingFit::kApprox,
539
0
                props,
540
0
                initialLoadOp,
541
0
                label);
542
0
}
543
544
0
sk_sp<SkSurface> Device::makeSurface(const SkImageInfo& ii, const SkSurfaceProps& props) {
545
0
    return SkSurfaces::RenderTarget(fRecorder, ii, Mipmapped::kNo, &props);
546
0
}
547
548
sk_sp<Image> Device::makeImageCopy(const SkIRect& subset,
549
                                   Budgeted budgeted,
550
                                   Mipmapped mipmapped,
551
0
                                   SkBackingFit backingFit) {
552
0
    ASSERT_SINGLE_OWNER
553
0
    this->flushPendingWorkToRecorder();
554
555
0
    const SkColorInfo& colorInfo = this->imageInfo().colorInfo();
556
0
    TextureProxyView srcView = this->readSurfaceView();
557
0
    if (!srcView) {
558
        // readSurfaceView() returns an empty view when the target is not texturable. Create an
559
        // equivalent view for the blitting operation.
560
0
        Swizzle readSwizzle = fRecorder->priv().caps()->getReadSwizzle(
561
0
                colorInfo.colorType(), this->target()->textureInfo());
562
0
        srcView = {sk_ref_sp(this->target()), readSwizzle};
563
0
    }
564
0
    std::string label = this->target()->label();
565
0
    if (label.empty()) {
566
0
        label = "CopyDeviceTexture";
567
0
    } else {
568
0
        label += "_DeviceCopy";
569
0
    }
570
571
0
    return Image::Copy(fRecorder, srcView, colorInfo, subset, budgeted, mipmapped, backingFit,
572
0
                       label);
573
0
}
Unexecuted instantiation: skgpu::graphite::Device::makeImageCopy(SkIRect const&, skgpu::Budgeted, skgpu::Mipmapped, SkBackingFit)
Unexecuted instantiation: skgpu::graphite::Device::makeImageCopy(SkIRect const&, skgpu::Budgeted, skgpu::Mipmapped, SkBackingFit)
574
575
0
bool Device::onReadPixels(const SkPixmap& pm, int srcX, int srcY) {
576
0
#if defined(GPU_TEST_UTILS)
577
    // This testing-only function should only be called before the Device has detached from its
578
    // Recorder, since it's accessed via the test-held Surface.
579
0
    ASSERT_SINGLE_OWNER
580
0
    if (Context* context = fRecorder->priv().context()) {
581
        // Add all previous commands generated to the command buffer.
582
        // If the client snaps later they'll only get post-read commands in their Recording,
583
        // but since they're doing a readPixels in the middle that shouldn't be unexpected.
584
0
        std::unique_ptr<Recording> recording = fRecorder->snap();
585
0
        if (!recording) {
586
0
            return false;
587
0
        }
588
0
        InsertRecordingInfo info;
589
0
        info.fRecording = recording.get();
590
0
        if (!context->insertRecording(info)) {
591
0
            return false;
592
0
        }
593
0
        return context->priv().readPixels(pm, fDC->target(), this->imageInfo(), srcX, srcY);
594
0
    }
595
0
#endif
596
    // We have no access to a context to do a read pixels here.
597
0
    return false;
598
0
}
Unexecuted instantiation: skgpu::graphite::Device::onReadPixels(SkPixmap const&, int, int)
Unexecuted instantiation: skgpu::graphite::Device::onReadPixels(SkPixmap const&, int, int)
599
600
0
bool Device::onWritePixels(const SkPixmap& src, int x, int y) {
601
0
    ASSERT_SINGLE_OWNER
602
    // TODO: we may need to share this in a more central place to handle uploads
603
    // to backend textures
604
605
0
    const TextureProxy* target = fDC->target();
606
607
    // TODO: add mipmap support for createBackendTexture
608
609
0
    if (src.colorType() == kUnknown_SkColorType) {
610
0
        return false;
611
0
    }
612
613
    // If one alpha type is unknown and the other isn't, it's too underspecified.
614
0
    if ((src.alphaType() == kUnknown_SkAlphaType) !=
615
0
        (this->imageInfo().alphaType() == kUnknown_SkAlphaType)) {
616
0
        return false;
617
0
    }
618
619
    // TODO: canvas2DFastPath?
620
621
0
    if (!fRecorder->priv().caps()->supportsWritePixels(target->textureInfo())) {
622
0
        auto image = SkImages::RasterFromPixmap(src, nullptr, nullptr);
623
0
        image = SkImages::TextureFromImage(fRecorder, image.get());
624
0
        if (!image) {
625
0
            return false;
626
0
        }
627
628
0
        SkPaint paint;
629
0
        paint.setBlendMode(SkBlendMode::kSrc);
630
0
        this->drawImageRect(image.get(),
631
0
                            /*src=*/nullptr,
632
0
                            SkRect::MakeXYWH(x, y, src.width(), src.height()),
633
0
                            SkFilterMode::kNearest,
634
0
                            paint,
635
0
                            SkCanvas::kFast_SrcRectConstraint);
636
0
        return true;
637
0
    }
638
639
    // TODO: check for flips and either handle here or pass info to UploadTask
640
641
    // Determine rect to copy
642
0
    SkIRect dstRect = SkIRect::MakePtSize({x, y}, src.dimensions());
643
0
    if (!target->isFullyLazy() && !dstRect.intersect(SkIRect::MakeSize(target->dimensions()))) {
644
0
        return false;
645
0
    }
646
647
    // Set up copy location
648
0
    const void* addr = src.addr(dstRect.fLeft - x, dstRect.fTop - y);
649
0
    std::vector<MipLevel> levels;
650
0
    levels.push_back({addr, src.rowBytes()});
651
652
    // The writePixels() still respects painter's order, so flush everything to tasks before this
653
    // recording the upload for the pixel data.
654
0
    this->internalFlush();
655
    // The new upload will be executed before any new draws are recorded and also ensures that
656
    // the next call to flushDeviceToRecorder() will produce a non-null DrawTask. If this Device's
657
    // target is mipmapped, mipmap generation tasks will be added automatically at that point.
658
0
    return fDC->recordUpload(fRecorder, fDC->refTarget(), src.info().colorInfo(),
659
0
                             this->imageInfo().colorInfo(), levels, dstRect, nullptr);
660
0
}
Unexecuted instantiation: skgpu::graphite::Device::onWritePixels(SkPixmap const&, int, int)
Unexecuted instantiation: skgpu::graphite::Device::onWritePixels(SkPixmap const&, int, int)
661
662
663
///////////////////////////////////////////////////////////////////////////////
664
665
0
bool Device::isClipAntiAliased() const {
666
    // All clips are AA'ed unless it's wide-open, empty, or a device-rect with integer coordinates
667
0
    ClipStack::ClipState type = fClip.clipState();
668
0
    if (type == ClipStack::ClipState::kWideOpen || type == ClipStack::ClipState::kEmpty) {
669
0
        return false;
670
0
    } else if (type == ClipStack::ClipState::kDeviceRect) {
671
0
        const ClipStack::Element rect = *fClip.begin();
672
0
        SkASSERT(rect.fShape.isRect() && rect.fLocalToDevice.type() == Transform::Type::kIdentity);
673
0
        return rect.fShape.rect() != rect.fShape.rect().makeRoundOut();
674
0
    } else {
675
0
        return true;
676
0
    }
677
0
}
Unexecuted instantiation: skgpu::graphite::Device::isClipAntiAliased() const
Unexecuted instantiation: skgpu::graphite::Device::isClipAntiAliased() const
678
679
0
SkIRect Device::devClipBounds() const {
680
0
    return rect_to_pixelbounds(fClip.conservativeBounds());
681
0
}
682
683
// TODO: This is easy enough to support, but do we still need this API in Skia at all?
684
0
void Device::android_utils_clipAsRgn(SkRegion* region) const {
685
0
    SkIRect bounds = this->devClipBounds();
686
    // Assume wide open and then perform intersect/difference operations reducing the region
687
0
    region->setRect(bounds);
688
0
    const SkRegion deviceBounds(bounds);
689
0
    for (const ClipStack::Element& e : fClip) {
690
0
        SkRegion tmp;
691
0
        if (e.fShape.isRect() && e.fLocalToDevice.type() == Transform::Type::kIdentity) {
692
0
            tmp.setRect(rect_to_pixelbounds(e.fShape.rect()));
693
0
        } else {
694
0
            SkPath tmpPath = e.fShape.asPath();
695
0
            tmpPath.transform(e.fLocalToDevice);
696
0
            tmp.setPath(tmpPath, deviceBounds);
697
0
        }
698
699
0
        region->op(tmp, (SkRegion::Op) e.fOp);
700
0
    }
701
0
}
702
703
0
void Device::clipRect(const SkRect& rect, SkClipOp op, bool aa) {
704
0
    SkASSERT(op == SkClipOp::kIntersect || op == SkClipOp::kDifference);
705
0
    auto snapping = aa ? ClipStack::PixelSnapping::kNo : ClipStack::PixelSnapping::kYes;
706
0
    fClip.clipShape(this->localToDeviceTransform(), Shape{rect}, op, snapping);
707
0
}
Unexecuted instantiation: skgpu::graphite::Device::clipRect(SkRect const&, SkClipOp, bool)
Unexecuted instantiation: skgpu::graphite::Device::clipRect(SkRect const&, SkClipOp, bool)
708
709
0
void Device::clipRRect(const SkRRect& rrect, SkClipOp op, bool aa) {
710
0
    SkASSERT(op == SkClipOp::kIntersect || op == SkClipOp::kDifference);
711
0
    auto snapping = aa ? ClipStack::PixelSnapping::kNo : ClipStack::PixelSnapping::kYes;
712
0
    fClip.clipShape(this->localToDeviceTransform(), Shape{rrect}, op, snapping);
713
0
}
Unexecuted instantiation: skgpu::graphite::Device::clipRRect(SkRRect const&, SkClipOp, bool)
Unexecuted instantiation: skgpu::graphite::Device::clipRRect(SkRRect const&, SkClipOp, bool)
714
715
0
void Device::clipPath(const SkPath& path, SkClipOp op, bool aa) {
716
0
    SkASSERT(op == SkClipOp::kIntersect || op == SkClipOp::kDifference);
717
    // TODO: Ensure all path inspection is handled here or in SkCanvas, and that non-AA rects as
718
    // paths are routed appropriately.
719
    // TODO: Must also detect paths that are lines so the clip stack can be set to empty
720
0
    fClip.clipShape(this->localToDeviceTransform(), Shape{path}, op);
721
0
}
Unexecuted instantiation: skgpu::graphite::Device::clipPath(SkPath const&, SkClipOp, bool)
Unexecuted instantiation: skgpu::graphite::Device::clipPath(SkPath const&, SkClipOp, bool)
722
723
0
void Device::onClipShader(sk_sp<SkShader> shader) {
724
0
    fClip.clipShader(std::move(shader));
725
0
}
726
727
// TODO: Is clipRegion() on the deprecation chopping block. If not it should be...
728
0
void Device::clipRegion(const SkRegion& globalRgn, SkClipOp op) {
729
0
    SkASSERT(op == SkClipOp::kIntersect || op == SkClipOp::kDifference);
730
731
0
    Transform globalToDevice{this->globalToDevice()};
732
733
0
    if (globalRgn.isEmpty()) {
734
0
        fClip.clipShape(globalToDevice, Shape{}, op);
735
0
    } else if (globalRgn.isRect()) {
736
0
        fClip.clipShape(globalToDevice, Shape{SkRect::Make(globalRgn.getBounds())}, op,
737
0
                        ClipStack::PixelSnapping::kYes);
738
0
    } else {
739
        // TODO: Can we just iterate the region and do non-AA rects for each chunk?
740
0
        SkPath path;
741
0
        globalRgn.getBoundaryPath(&path);
742
0
        fClip.clipShape(globalToDevice, Shape{path}, op);
743
0
    }
744
0
}
Unexecuted instantiation: skgpu::graphite::Device::clipRegion(SkRegion const&, SkClipOp)
Unexecuted instantiation: skgpu::graphite::Device::clipRegion(SkRegion const&, SkClipOp)
745
746
0
void Device::replaceClip(const SkIRect& rect) {
747
    // ReplaceClip() is currently not intended to be supported in Graphite since it's only used
748
    // for emulating legacy clip ops in Android Framework, and apps/devices that require that
749
    // should not use Graphite. However, if it needs to be supported, we could probably implement
750
    // it by:
751
    //  1. Flush all pending clip element depth draws.
752
    //  2. Draw a fullscreen rect to the depth attachment using a Z value greater than what's
753
    //     been used so far.
754
    //  3. Make sure all future "unclipped" draws use this Z value instead of 0 so they aren't
755
    //     sorted before the depth reset.
756
    //  4. Make sure all prior elements are inactive so they can't affect subsequent draws.
757
    //
758
    // For now, just ignore it.
759
0
}
760
761
///////////////////////////////////////////////////////////////////////////////
762
763
0
void Device::drawPaint(const SkPaint& paint) {
764
0
    ASSERT_SINGLE_OWNER
765
    // We never want to do a fullscreen clear on a fully-lazy render target, because the device size
766
    // may be smaller than the final surface we draw to, in which case we don't want to fill the
767
    // entire final surface.
768
0
    if (this->isClipWideOpen() && !fDC->target()->isFullyLazy()) {
769
0
        if (!paint_depends_on_dst(paint)) {
770
0
            if (std::optional<SkColor4f> color = extract_paint_color(paint, fDC->colorInfo())) {
771
                // do fullscreen clear
772
0
                fDC->clear(*color);
773
0
                return;
774
0
            } else {
775
                // This paint does not depend on the destination and covers the entire surface, so
776
                // discard everything previously recorded and proceed with the draw.
777
0
                fDC->discard();
778
0
            }
779
0
        }
780
0
    }
781
782
0
    Shape inverseFill; // defaults to empty
783
0
    inverseFill.setInverted(true);
784
    // An empty shape with an inverse fill completely floods the clip
785
0
    SkASSERT(inverseFill.isEmpty() && inverseFill.inverted());
786
787
0
    this->drawGeometry(this->localToDeviceTransform(),
788
0
                       Geometry(inverseFill),
789
0
                       paint,
790
0
                       DefaultFillStyle(),
791
0
                       DrawFlags::kIgnorePathEffect);
792
0
}
Unexecuted instantiation: skgpu::graphite::Device::drawPaint(SkPaint const&)
Unexecuted instantiation: skgpu::graphite::Device::drawPaint(SkPaint const&)
793
794
0
void Device::drawRect(const SkRect& r, const SkPaint& paint) {
795
0
    Rect rectToDraw(r);
796
0
    SkStrokeRec style(paint);
797
0
    if (!paint.isAntiAlias()) {
798
        // Graphite assumes everything is anti-aliased. In the case of axis-aligned non-aa requested
799
        // rectangles, we snap the local geometry to land on pixel boundaries to emulate non-aa.
800
0
        if (style.isFillStyle()) {
801
0
            rectToDraw = snap_rect_to_pixels(this->localToDeviceTransform(), rectToDraw);
802
0
        } else {
803
0
            const bool strokeAndFill = style.getStyle() == SkStrokeRec::kStrokeAndFill_Style;
804
0
            float strokeWidth = style.getWidth();
805
0
            rectToDraw = snap_rect_to_pixels(this->localToDeviceTransform(),
806
0
                                             rectToDraw, &strokeWidth);
807
0
            style.setStrokeStyle(strokeWidth, strokeAndFill);
808
0
        }
809
0
    }
810
0
    this->drawGeometry(this->localToDeviceTransform(), Geometry(Shape(rectToDraw)), paint, style);
811
0
}
812
813
void Device::drawVertices(const SkVertices* vertices, sk_sp<SkBlender> blender,
814
0
                          const SkPaint& paint, bool skipColorXform)  {
815
  // TODO - Add GPU handling of skipColorXform once Graphite has its color system more fleshed out.
816
0
    this->drawGeometry(this->localToDeviceTransform(),
817
0
                       Geometry(sk_ref_sp(vertices)),
818
0
                       paint,
819
0
                       DefaultFillStyle(),
820
0
                       DrawFlags::kIgnorePathEffect,
821
0
                       std::move(blender),
822
0
                       skipColorXform);
823
0
}
824
825
bool Device::drawAsTiledImageRect(SkCanvas* canvas,
826
                                  const SkImage* image,
827
                                  const SkRect* src,
828
                                  const SkRect& dst,
829
                                  const SkSamplingOptions& sampling,
830
                                  const SkPaint& paint,
831
0
                                  SkCanvas::SrcRectConstraint constraint) {
832
0
    auto recorder = canvas->recorder();
833
0
    if (!recorder) {
834
0
        return false;
835
0
    }
836
0
    SkASSERT(src);
837
838
    // For Graphite this is a pretty loose heuristic. The Recorder-local cache size (relative
839
    // to the large image's size) is used as a proxy for how conservative we should be when
840
    // allocating tiles. Since the tiles will actually be owned by the client (via an
841
    // ImageProvider) they won't actually add any memory pressure directly to Graphite.
842
0
    size_t cacheSize = recorder->priv().getResourceCacheLimit();
843
0
    size_t maxTextureSize = recorder->priv().caps()->maxTextureSize();
844
845
0
#if defined(GPU_TEST_UTILS)
846
0
    if (gOverrideMaxTextureSizeGraphite) {
847
0
        maxTextureSize = gOverrideMaxTextureSizeGraphite;
848
0
    }
849
0
    gNumTilesDrawnGraphite.store(0, std::memory_order_relaxed);
850
0
#endif
851
852
    // DrawAsTiledImageRect produces per-edge AA quads, which do not participate in non-AA pixel
853
    // snapping emulation. To match an un-tiled drawImageRect, round the src and dst geometry
854
    // before any tiling occurs.
855
0
    SkRect finalSrc = *src;
856
0
    SkRect finalDst = dst;
857
0
    if (!paint.isAntiAlias()) {
858
0
        snap_src_and_dst_rect_to_pixels(this->localToDeviceTransform(),
859
0
                                        &finalSrc, &finalDst);
860
0
    }
861
862
0
    [[maybe_unused]] auto [wasTiled, numTiles] =
863
0
            skgpu::TiledTextureUtils::DrawAsTiledImageRect(canvas,
864
0
                                                           image,
865
0
                                                           finalSrc,
866
0
                                                           finalDst,
867
0
                                                           SkCanvas::kAll_QuadAAFlags,
868
0
                                                           sampling,
869
0
                                                           &paint,
870
0
                                                           constraint,
871
0
                                                           cacheSize,
872
0
                                                           maxTextureSize);
873
0
#if defined(GPU_TEST_UTILS)
874
0
    gNumTilesDrawnGraphite.store(numTiles, std::memory_order_relaxed);
875
0
#endif
876
0
    return wasTiled;
877
0
}
Unexecuted instantiation: skgpu::graphite::Device::drawAsTiledImageRect(SkCanvas*, SkImage const*, SkRect const*, SkRect const&, SkSamplingOptions const&, SkPaint const&, SkCanvas::SrcRectConstraint)
Unexecuted instantiation: skgpu::graphite::Device::drawAsTiledImageRect(SkCanvas*, SkImage const*, SkRect const*, SkRect const&, SkSamplingOptions const&, SkPaint const&, SkCanvas::SrcRectConstraint)
878
879
0
void Device::drawOval(const SkRect& oval, const SkPaint& paint) {
880
0
    if (paint.getPathEffect()) {
881
        // Dashing requires that the oval path starts on the right side and travels clockwise. This
882
        // is the default for the SkPath::Oval constructor, as used by SkBitmapDevice.
883
0
        this->drawGeometry(this->localToDeviceTransform(), Geometry(Shape(SkPath::Oval(oval))),
884
0
                           paint, SkStrokeRec(paint));
885
0
    } else {
886
        // TODO: This has wasted effort from the SkCanvas level since it instead converts rrects
887
        // that happen to be ovals into this, only for us to go right back to rrect.
888
0
        this->drawRRect(SkRRect::MakeOval(oval), paint);
889
0
    }
890
0
}
891
892
0
void Device::drawRRect(const SkRRect& rr, const SkPaint& paint) {
893
0
    Shape rrectToDraw;
894
0
    SkStrokeRec style(paint);
895
896
0
    if (paint.isAntiAlias()) {
897
0
        rrectToDraw.setRRect(rr);
898
0
    } else {
899
        // Snap the horizontal and vertical edges of the rounded rectangle to pixel edges to match
900
        // the behavior of drawRect(rr.bounds()), to partially emulate non-AA rendering while
901
        // preserving the anti-aliasing of the curved corners.
902
0
        Rect snappedBounds;
903
0
        if (style.isFillStyle()) {
904
0
            snappedBounds = snap_rect_to_pixels(this->localToDeviceTransform(), rr.rect());
905
0
        } else {
906
0
            const bool strokeAndFill = style.getStyle() == SkStrokeRec::kStrokeAndFill_Style;
907
0
            float strokeWidth = style.getWidth();
908
0
            snappedBounds = snap_rect_to_pixels(this->localToDeviceTransform(),
909
0
                                                rr.rect(), &strokeWidth);
910
0
            style.setStrokeStyle(strokeWidth, strokeAndFill);
911
0
        }
912
913
0
        SkRRect snappedRRect;
914
0
        snappedRRect.setRectRadii(snappedBounds.asSkRect(), rr.radii().data());
915
0
        rrectToDraw.setRRect(snappedRRect);
916
0
    }
917
918
0
    this->drawGeometry(this->localToDeviceTransform(), Geometry(rrectToDraw), paint, style);
919
0
}
920
921
0
void Device::drawPath(const SkPath& path, const SkPaint& paint, bool pathIsMutable) {
922
    // Alternatively, we could move this analysis to SkCanvas. Also, we could consider applying the
923
    // path effect, being careful about starting point and direction.
924
0
    if (!paint.getPathEffect() && !path.isInverseFillType()) {
925
0
        if (SkRect oval; path.isOval(&oval)) {
926
0
            this->drawOval(oval, paint);
927
0
            return;
928
0
        }
929
0
        if (SkRRect rrect; path.isRRect(&rrect)) {
930
0
            this->drawRRect(rrect, paint);
931
0
            return;
932
0
        }
933
        // For rects, if the path is not explicitly closed and the paint style is stroked then it
934
        // represents a rectangle with only 3 sides rasterized (and with any caps). If it's filled
935
        // or is closed+stroked, then the path renders identically to the rectangle.
936
0
        bool isClosed = false;
937
0
        if (SkRect rect; path.isRect(&rect, &isClosed) &&
938
0
            (paint.getStyle() == SkPaint::kFill_Style || isClosed)) {
939
0
            this->drawRect(rect, paint);
940
0
            return;
941
0
        }
942
0
    }
943
0
    this->drawGeometry(this->localToDeviceTransform(), Geometry(Shape(path)),
944
0
                       paint, SkStrokeRec(paint));
945
0
}
946
947
void Device::drawPoints(SkCanvas::PointMode mode, size_t count,
948
0
                        const SkPoint* points, const SkPaint& paint) {
949
0
    SkStrokeRec stroke(paint, SkPaint::kStroke_Style);
950
0
    size_t next = 0;
951
0
    if (mode == SkCanvas::kPoints_PointMode) {
952
        // Treat kPoints mode as stroking zero-length path segments, which produce caps so that
953
        // both hairlines and round vs. square geometry are handled entirely on the GPU.
954
        // TODO: SkCanvas should probably do the butt to square cap correction.
955
0
        if (paint.getStrokeCap() == SkPaint::kButt_Cap) {
956
0
            stroke.setStrokeParams(SkPaint::kSquare_Cap,
957
0
                                   paint.getStrokeJoin(),
958
0
                                   paint.getStrokeMiter());
959
0
        }
960
0
    } else {
961
0
        next = 1;
962
0
        count--;
963
0
    }
964
965
0
    size_t inc = mode == SkCanvas::kLines_PointMode ? 2 : 1;
966
0
    for (size_t i = 0; i < count; i += inc) {
967
0
        this->drawGeometry(this->localToDeviceTransform(),
968
0
                           Geometry(Shape(points[i], points[i + next])),
969
0
                           paint, stroke);
970
0
    }
971
0
}
972
973
void Device::drawEdgeAAQuad(const SkRect& rect,
974
                            const SkPoint clip[4],
975
                            SkCanvas::QuadAAFlags aaFlags,
976
                            const SkColor4f& color,
977
0
                            SkBlendMode mode) {
978
0
    SkPaint solidColorPaint;
979
0
    solidColorPaint.setColor4f(color, /*colorSpace=*/nullptr);
980
0
    solidColorPaint.setBlendMode(mode);
981
982
    // NOTE: We do not snap edge AA quads that are fully non-AA because we need their edges to seam
983
    // with quads that have mixed edge flags (so both need to match the GPU rasterization, not our
984
    // CPU rounding).
985
0
    auto flags = SkEnumBitMask<EdgeAAQuad::Flags>(static_cast<EdgeAAQuad::Flags>(aaFlags));
986
0
    EdgeAAQuad quad = clip ? EdgeAAQuad(clip, flags) : EdgeAAQuad(rect, flags);
987
0
    this->drawGeometry(this->localToDeviceTransform(),
988
0
                       Geometry(quad),
989
0
                       solidColorPaint,
990
0
                       DefaultFillStyle(),
991
0
                       DrawFlags::kIgnorePathEffect);
992
0
}
993
994
void Device::drawEdgeAAImageSet(const SkCanvas::ImageSetEntry set[], int count,
995
                                const SkPoint dstClips[], const SkMatrix preViewMatrices[],
996
                                const SkSamplingOptions& sampling, const SkPaint& paint,
997
0
                                SkCanvas::SrcRectConstraint constraint) {
998
0
    SkASSERT(count > 0);
999
1000
0
    SkPaint paintWithShader(paint);
1001
0
    int dstClipIndex = 0;
1002
0
    for (int i = 0; i < count; ++i) {
1003
        // If the entry is clipped by 'dstClips', that must be provided
1004
0
        SkASSERT(!set[i].fHasClip || dstClips);
1005
        // Similarly, if it has an extra transform, those must be provided
1006
0
        SkASSERT(set[i].fMatrixIndex < 0 || preViewMatrices);
1007
1008
0
        auto [ imageToDraw, newSampling ] =
1009
0
                skgpu::graphite::GetGraphiteBacked(this->recorder(), set[i].fImage.get(), sampling);
1010
0
        if (!imageToDraw) {
1011
0
            SKGPU_LOG_W("Device::drawImageRect: Creation of Graphite-backed image failed");
1012
0
            return;
1013
0
        }
1014
1015
        // TODO: Produce an image shading paint key and data directly without having to reconstruct
1016
        // the equivalent SkPaint for each entry. Reuse the key and data between entries if possible
1017
0
        paintWithShader.setShader(paint.refShader());
1018
0
        paintWithShader.setAlphaf(paint.getAlphaf() * set[i].fAlpha);
1019
0
        SkRect dst = SkModifyPaintAndDstForDrawImageRect(
1020
0
                    imageToDraw.get(), newSampling, set[i].fSrcRect, set[i].fDstRect,
1021
0
                    constraint == SkCanvas::kStrict_SrcRectConstraint,
1022
0
                    &paintWithShader);
1023
0
        if (dst.isEmpty()) {
1024
0
            return;
1025
0
        }
1026
1027
        // NOTE: See drawEdgeAAQuad for details, we do not snap non-AA quads.
1028
0
        auto flags =
1029
0
                SkEnumBitMask<EdgeAAQuad::Flags>(static_cast<EdgeAAQuad::Flags>(set[i].fAAFlags));
1030
0
        EdgeAAQuad quad = set[i].fHasClip ? EdgeAAQuad(dstClips + dstClipIndex, flags)
1031
0
                                          : EdgeAAQuad(dst, flags);
1032
1033
        // TODO: Calling drawGeometry() for each entry re-evaluates the clip stack every time, which
1034
        // is consistent with Ganesh's behavior. It also matches the behavior if edge-AA images were
1035
        // submitted one at a time by SkiaRenderer (a nice client simplification). However, we
1036
        // should explore the performance trade off with doing one bulk evaluation for the whole set
1037
0
        if (set[i].fMatrixIndex < 0) {
1038
0
            this->drawGeometry(this->localToDeviceTransform(),
1039
0
                               Geometry(quad),
1040
0
                               paintWithShader,
1041
0
                               DefaultFillStyle(),
1042
0
                               DrawFlags::kIgnorePathEffect);
1043
0
        } else {
1044
0
            SkM44 xtraTransform(preViewMatrices[set[i].fMatrixIndex]);
1045
0
            this->drawGeometry(this->localToDeviceTransform().concat(xtraTransform),
1046
0
                               Geometry(quad),
1047
0
                               paintWithShader,
1048
0
                               DefaultFillStyle(),
1049
0
                               DrawFlags::kIgnorePathEffect);
1050
0
        }
1051
1052
0
        dstClipIndex += 4 * set[i].fHasClip;
1053
0
    }
1054
0
}
Unexecuted instantiation: skgpu::graphite::Device::drawEdgeAAImageSet(SkCanvas::ImageSetEntry const*, int, SkPoint const*, SkMatrix const*, SkSamplingOptions const&, SkPaint const&, SkCanvas::SrcRectConstraint)
Unexecuted instantiation: skgpu::graphite::Device::drawEdgeAAImageSet(SkCanvas::ImageSetEntry const*, int, SkPoint const*, SkMatrix const*, SkSamplingOptions const&, SkPaint const&, SkCanvas::SrcRectConstraint)
1055
1056
void Device::drawImageRect(const SkImage* image, const SkRect* src, const SkRect& dst,
1057
                           const SkSamplingOptions& sampling, const SkPaint& paint,
1058
0
                           SkCanvas::SrcRectConstraint constraint) {
1059
0
    SkCanvas::ImageSetEntry single{sk_ref_sp(image),
1060
0
                                   src ? *src : SkRect::Make(image->bounds()),
1061
0
                                   dst,
1062
0
                                   /*alpha=*/1.f,
1063
0
                                   SkCanvas::kAll_QuadAAFlags};
1064
    // While this delegates to drawEdgeAAImageSet() for the image shading logic, semantically a
1065
    // drawImageRect()'s non-AA behavior should match that of drawRect() so we snap dst (and update
1066
    // src to match) if needed before hand.
1067
0
    if (!paint.isAntiAlias()) {
1068
0
        snap_src_and_dst_rect_to_pixels(this->localToDeviceTransform(),
1069
0
                                        &single.fSrcRect, &single.fDstRect);
1070
0
    }
1071
0
    this->drawEdgeAAImageSet(&single, 1, nullptr, nullptr, sampling, paint, constraint);
1072
0
}
1073
1074
0
sktext::gpu::AtlasDrawDelegate Device::atlasDelegate() {
1075
0
    return [&](const sktext::gpu::AtlasSubRun* subRun,
1076
0
               SkPoint drawOrigin,
1077
0
               const SkPaint& paint,
1078
0
               sk_sp<SkRefCnt> subRunStorage,
1079
0
               sktext::gpu::RendererData rendererData) {
1080
0
        this->drawAtlasSubRun(subRun, drawOrigin, paint, std::move(subRunStorage), rendererData);
1081
0
    };
1082
0
}
1083
1084
void Device::onDrawGlyphRunList(SkCanvas* canvas,
1085
                                const sktext::GlyphRunList& glyphRunList,
1086
0
                                const SkPaint& paint) {
1087
0
    ASSERT_SINGLE_OWNER
1088
0
    fRecorder->priv().textBlobCache()->drawGlyphRunList(canvas,
1089
0
                                                        this->localToDevice(),
1090
0
                                                        glyphRunList,
1091
0
                                                        paint,
1092
0
                                                        this->strikeDeviceInfo(),
1093
0
                                                        this->atlasDelegate());
1094
0
}
Unexecuted instantiation: skgpu::graphite::Device::onDrawGlyphRunList(SkCanvas*, sktext::GlyphRunList const&, SkPaint const&)
Unexecuted instantiation: skgpu::graphite::Device::onDrawGlyphRunList(SkCanvas*, sktext::GlyphRunList const&, SkPaint const&)
1095
1096
void Device::drawAtlasSubRun(const sktext::gpu::AtlasSubRun* subRun,
1097
                             SkPoint drawOrigin,
1098
                             const SkPaint& paint,
1099
                             sk_sp<SkRefCnt> subRunStorage,
1100
0
                             sktext::gpu::RendererData rendererData) {
1101
0
    ASSERT_SINGLE_OWNER
1102
1103
0
    const int subRunEnd = subRun->glyphCount();
1104
0
    auto regenerateDelegate = [&](sktext::gpu::GlyphVector* glyphs,
1105
0
                                  int begin,
1106
0
                                  int end,
1107
0
                                  skgpu::MaskFormat maskFormat,
1108
0
                                  int padding) {
1109
0
        return glyphs->regenerateAtlasForGraphite(begin, end, maskFormat, padding, fRecorder);
1110
0
    };
Unexecuted instantiation: Device.cpp:skgpu::graphite::Device::drawAtlasSubRun(sktext::gpu::AtlasSubRun const*, SkPoint, SkPaint const&, sk_sp<SkRefCnt>, sktext::gpu::RendererData)::$_0::operator()(sktext::gpu::GlyphVector*, int, int, skgpu::MaskFormat, int) const
Unexecuted instantiation: Device.cpp:skgpu::graphite::Device::drawAtlasSubRun(sktext::gpu::AtlasSubRun const*, SkPoint, SkPaint const&, sk_sp<SkRefCnt>, sktext::gpu::RendererData)::$_1::operator()(sktext::gpu::GlyphVector*, int, int, skgpu::MaskFormat, int) const
1111
0
    for (int subRunCursor = 0; subRunCursor < subRunEnd;) {
1112
        // For the remainder of the run, add any atlas uploads to the Recorder's TextAtlasManager
1113
0
        auto[ok, glyphsRegenerated] = subRun->regenerateAtlas(subRunCursor, subRunEnd,
1114
0
                                                              regenerateDelegate);
1115
        // There was a problem allocating the glyph in the atlas. Bail.
1116
0
        if (!ok) {
1117
0
            return;
1118
0
        }
1119
0
        if (glyphsRegenerated) {
1120
0
            auto [bounds, localToDevice] = subRun->vertexFiller().boundsAndDeviceMatrix(
1121
0
                                                   this->localToDeviceTransform(), drawOrigin);
1122
0
            SkPaint subRunPaint = paint;
1123
            // For color emoji, shaders don't affect the final color
1124
0
            if (subRun->maskFormat() == skgpu::MaskFormat::kARGB) {
1125
0
                subRunPaint.setShader(nullptr);
1126
0
            }
1127
1128
0
            bool useGammaCorrectDistanceTable =
1129
0
                    this->imageInfo().colorSpace() &&
1130
0
                    this->imageInfo().colorSpace()->gammaIsLinear();
1131
0
            this->drawGeometry(localToDevice,
1132
0
                               Geometry(SubRunData(subRun,
1133
0
                                                   subRunStorage,
1134
0
                                                   bounds,
1135
0
                                                   this->localToDeviceTransform().inverse(),
1136
0
                                                   subRunCursor,
1137
0
                                                   glyphsRegenerated,
1138
0
                                                   SkPaintPriv::ComputeLuminanceColor(subRunPaint),
1139
0
                                                   useGammaCorrectDistanceTable,
1140
0
                                                   this->surfaceProps().pixelGeometry(),
1141
0
                                                   fRecorder,
1142
0
                                                   rendererData)),
1143
0
                               subRunPaint,
1144
0
                               DefaultFillStyle(),
1145
0
                               DrawFlags::kIgnorePathEffect,
1146
0
                               SkBlender::Mode(SkBlendMode::kDstIn));
1147
0
        }
1148
0
        subRunCursor += glyphsRegenerated;
1149
1150
0
        if (subRunCursor < subRunEnd) {
1151
            // Flush if not all the glyphs are handled because the atlas is out of space.
1152
            // We flush every Device because the glyphs that are being flushed/referenced are not
1153
            // necessarily specific to this Device. This addresses both multiple SkSurfaces within
1154
            // a Recorder, and nested layers.
1155
0
            TRACE_EVENT_INSTANT0("skia.gpu", "Glyph atlas full", TRACE_EVENT_SCOPE_NAME_THREAD);
1156
0
            fRecorder->priv().flushTrackedDevices();
1157
0
        }
1158
0
    }
1159
0
}
Unexecuted instantiation: skgpu::graphite::Device::drawAtlasSubRun(sktext::gpu::AtlasSubRun const*, SkPoint, SkPaint const&, sk_sp<SkRefCnt>, sktext::gpu::RendererData)
Unexecuted instantiation: skgpu::graphite::Device::drawAtlasSubRun(sktext::gpu::AtlasSubRun const*, SkPoint, SkPaint const&, sk_sp<SkRefCnt>, sktext::gpu::RendererData)
1160
1161
void Device::drawGeometry(const Transform& localToDevice,
1162
                          const Geometry& geometry,
1163
                          const SkPaint& paint,
1164
                          const SkStrokeRec& style,
1165
                          SkEnumBitMask<DrawFlags> flags,
1166
                          sk_sp<SkBlender> primitiveBlender,
1167
0
                          bool skipColorXform) {
1168
0
    ASSERT_SINGLE_OWNER
1169
1170
0
    if (!localToDevice.valid()) {
1171
        // If the transform is not invertible or not finite then drawing isn't well defined.
1172
0
        SKGPU_LOG_W("Skipping draw with non-invertible/non-finite transform.");
1173
0
        return;
1174
0
    }
1175
1176
    // Heavy weight paint options like path effects, mask filters, and stroke-and-fill style are
1177
    // applied on the CPU by generating a new shape and recursing on drawGeometry with updated flags
1178
0
    if (!(flags & DrawFlags::kIgnorePathEffect) && paint.getPathEffect()) {
1179
        // Apply the path effect before anything else, which if we are applying here, means that we
1180
        // are dealing with a Shape. drawVertices (and a SkVertices geometry) should pass in
1181
        // kIgnorePathEffect per SkCanvas spec. Text geometry also should pass in kIgnorePathEffect
1182
        // because the path effect is applied per glyph by the SkStrikeSpec already.
1183
0
        SkASSERT(geometry.isShape());
1184
1185
        // TODO: If asADash() returns true and the base path matches the dashing fast path, then
1186
        // that should be detected now as well. Maybe add dashPath to Device so canvas can handle it
1187
0
        SkStrokeRec newStyle = style;
1188
0
        float maxScaleFactor = localToDevice.maxScaleFactor();
1189
0
        if (localToDevice.type() == Transform::Type::kPerspective) {
1190
0
            auto bounds = geometry.bounds();
1191
0
            float tl = std::get<1>(localToDevice.scaleFactors({bounds.left(), bounds.top()}));
1192
0
            float tr = std::get<1>(localToDevice.scaleFactors({bounds.right(), bounds.top()}));
1193
0
            float br = std::get<1>(localToDevice.scaleFactors({bounds.right(), bounds.bot()}));
1194
0
            float bl = std::get<1>(localToDevice.scaleFactors({bounds.left(), bounds.bot()}));
1195
0
            maxScaleFactor = std::max(std::max(tl, tr), std::max(bl, br));
1196
0
        }
1197
0
        newStyle.setResScale(maxScaleFactor);
1198
0
        SkPath dst;
1199
0
        if (paint.getPathEffect()->filterPath(&dst, geometry.shape().asPath(), &newStyle,
1200
0
                                              nullptr, localToDevice)) {
1201
0
            dst.setIsVolatile(true);
1202
            // Recurse using the path and new style, while disabling downstream path effect handling
1203
0
            this->drawGeometry(localToDevice, Geometry(Shape(dst)), paint, newStyle,
1204
0
                               flags | DrawFlags::kIgnorePathEffect, std::move(primitiveBlender),
1205
0
                               skipColorXform);
1206
0
            return;
1207
0
        } else {
1208
0
            SKGPU_LOG_W("Path effect failed to apply, drawing original path.");
1209
0
            this->drawGeometry(localToDevice, geometry, paint, style,
1210
0
                               flags | DrawFlags::kIgnorePathEffect, std::move(primitiveBlender),
1211
0
                               skipColorXform);
1212
0
            return;
1213
0
        }
1214
0
    }
1215
1216
    // TODO: The tessellating and atlas path renderers haven't implemented perspective yet, so
1217
    // transform to device space so we draw something approximately correct (barring local coord
1218
    // issues).
1219
0
    if (geometry.isShape() && localToDevice.type() == Transform::Type::kPerspective &&
1220
0
        !is_simple_shape(geometry.shape(), style.getStyle())) {
1221
0
        SkPath devicePath = geometry.shape().asPath();
1222
0
        devicePath.transform(localToDevice.matrix().asM33());
1223
0
        devicePath.setIsVolatile(true);
1224
0
        this->drawGeometry(Transform::Identity(), Geometry(Shape(devicePath)), paint, style, flags,
1225
0
                           std::move(primitiveBlender), skipColorXform);
1226
0
        return;
1227
0
    }
1228
1229
    // TODO: Manually snap pixels for rects, rrects, and lines if paint is non-AA (ideally also
1230
    // consider snapping stroke width and/or adjusting geometry for hairlines). This pixel snapping
1231
    // math should be consistent with how non-AA clip [r]rects are handled.
1232
1233
    // If we got here, then path effects should have been handled and the style should be fill or
1234
    // stroke/hairline. Stroke-and-fill is not handled by DrawContext, but is emulated here by
1235
    // drawing twice--one stroke and one fill--using the same depth value.
1236
0
    SkASSERT(!SkToBool(paint.getPathEffect()) || (flags & DrawFlags::kIgnorePathEffect));
1237
1238
    // TODO: Some renderer decisions could depend on the clip (see PathAtlas::addShape for
1239
    // one workaround) so we should figure out how to remove this circular dependency.
1240
1241
    // We assume that we will receive a renderer, or a PathAtlas. If it's a PathAtlas,
1242
    // then we assume that the renderer chosen in PathAtlas::addShape() will have
1243
    // single-channel coverage, require AA bounds outsetting, and have a single renderStep.
1244
0
    auto [renderer, pathAtlas] =
1245
0
            this->chooseRenderer(localToDevice, geometry, style, /*requireMSAA=*/false);
1246
0
    if (!renderer && !pathAtlas) {
1247
0
        SKGPU_LOG_W("Skipping draw with no supported renderer or PathAtlas.");
1248
0
        return;
1249
0
    }
1250
1251
    // Calculate the clipped bounds of the draw and determine the clip elements that affect the
1252
    // draw without updating the clip stack.
1253
0
    const bool outsetBoundsForAA = renderer ? renderer->outsetBoundsForAA() : true;
1254
0
    ClipStack::ElementList clipElements;
1255
0
    const Clip clip =
1256
0
            fClip.visitClipStackForDraw(localToDevice, geometry, style, outsetBoundsForAA,
1257
0
                                        &clipElements);
1258
0
    if (clip.isClippedOut()) {
1259
        // Clipped out, so don't record anything.
1260
0
        return;
1261
0
    }
1262
1263
    // Figure out what dst color requirements we have, if any.
1264
0
    DstReadRequirement dstReadReq = DstReadRequirement::kNone;
1265
0
    const SkBlenderBase* blender = as_BB(paint.getBlender());
1266
0
    const std::optional<SkBlendMode> blendMode = blender ? blender->asBlendMode()
1267
0
                                                         : SkBlendMode::kSrcOver;
1268
0
    Coverage rendererCoverage = renderer ? renderer->coverage()
1269
0
                                         : Coverage::kSingleChannel;
1270
0
    if ((clip.shader() || !clip.analyticClip().isEmpty()) && rendererCoverage == Coverage::kNone) {
1271
        // Must upgrade to single channel coverage if there is a clip shader or analytic clip;
1272
        // but preserve LCD coverage if the Renderer uses that.
1273
0
        rendererCoverage = Coverage::kSingleChannel;
1274
0
    }
1275
0
    dstReadReq = GetDstReadRequirement(fRecorder->priv().caps(), blendMode, rendererCoverage);
1276
1277
    // A primitive blender should be ignored if there is no primitive color to blend against.
1278
    // Additionally, if a renderer emits a primitive color, then a null primitive blender should
1279
    // be interpreted as SrcOver blending mode.
1280
0
    if (!renderer || !renderer->emitsPrimitiveColor()) {
1281
0
        primitiveBlender = nullptr;
1282
0
    } else if (!SkToBool(primitiveBlender)) {
1283
0
        primitiveBlender = SkBlender::Mode(SkBlendMode::kSrcOver);
1284
0
    }
1285
1286
0
    PaintParams shading{paint,
1287
0
                        std::move(primitiveBlender),
1288
0
                        clip.analyticClip(),
1289
0
                        sk_ref_sp(clip.shader()),
1290
0
                        dstReadReq,
1291
0
                        skipColorXform};
1292
0
    const bool dependsOnDst = paint_depends_on_dst(shading);
1293
1294
    // Some shapes and styles combine multiple draws so the total render step count is split between
1295
    // the main renderer and possibly a secondaryRenderer.
1296
0
    SkStrokeRec::Style styleType = style.getStyle();
1297
0
    const Renderer* secondaryRenderer = nullptr;
1298
0
    Rect innerFillBounds = Rect::InfiniteInverted();
1299
0
    if (renderer) {
1300
0
        if (styleType == SkStrokeRec::kStrokeAndFill_Style) {
1301
            // `renderer` covers the fill, `secondaryRenderer` covers the stroke
1302
0
            secondaryRenderer = fRecorder->priv().rendererProvider()->tessellatedStrokes();
1303
0
        } else if (style.isFillStyle() && renderer->useNonAAInnerFill() && !dependsOnDst) {
1304
            // `renderer` opts into drawing a non-AA inner fill
1305
0
            innerFillBounds = get_inner_bounds(geometry, localToDevice);
1306
0
            if (!innerFillBounds.isEmptyNegativeOrNaN()) {
1307
0
                secondaryRenderer = fRecorder->priv().rendererProvider()->nonAABounds();
1308
0
            }
1309
0
        }
1310
0
    }
1311
0
    const int numNewRenderSteps = (renderer ? renderer->numRenderSteps() : 1) +
1312
0
                                  (secondaryRenderer ? secondaryRenderer->numRenderSteps() : 0);
1313
1314
    // Decide if we have any reason to flush pending work. We want to flush before updating the clip
1315
    // state or making any permanent changes to a path atlas, since otherwise clip operations and/or
1316
    // atlas entries for the current draw will be flushed.
1317
0
    const bool needsFlush = this->needsFlushBeforeDraw(numNewRenderSteps, dstReadReq);
1318
0
    if (needsFlush) {
1319
0
        if (pathAtlas != nullptr) {
1320
            // We need to flush work for all devices associated with the current Recorder.
1321
            // Otherwise we may end up with outstanding draws that depend on past atlas state.
1322
0
            fRecorder->priv().flushTrackedDevices();
1323
0
        } else {
1324
0
            this->flushPendingWorkToRecorder();
1325
0
        }
1326
0
    }
1327
1328
    // If an atlas path renderer was chosen we need to insert the shape into the atlas and schedule
1329
    // it to be drawn.
1330
0
    std::optional<PathAtlas::MaskAndOrigin> atlasMask;  // only used if `pathAtlas != nullptr`
1331
0
    if (pathAtlas != nullptr) {
1332
0
        std::tie(renderer, atlasMask) = pathAtlas->addShape(clip.transformedShapeBounds(),
1333
0
                                                            geometry.shape(),
1334
0
                                                            localToDevice,
1335
0
                                                            style);
1336
1337
        // If there was no space in the atlas and we haven't flushed already, then flush pending
1338
        // work to clear up space in the atlas. If we had already flushed once (which would have
1339
        // cleared the atlas) then the atlas is too small for this shape.
1340
0
        if (!atlasMask && !needsFlush) {
1341
            // We need to flush work for all devices associated with the current Recorder.
1342
            // Otherwise we may end up with outstanding draws that depend on past atlas state.
1343
0
            fRecorder->priv().flushTrackedDevices();
1344
1345
            // Try inserting the shape again.
1346
0
            std::tie(renderer, atlasMask) = pathAtlas->addShape(clip.transformedShapeBounds(),
1347
0
                                                                geometry.shape(),
1348
0
                                                                localToDevice,
1349
0
                                                                style);
1350
0
        }
1351
1352
0
        if (!atlasMask) {
1353
0
            SKGPU_LOG_E("Failed to add shape to atlas!");
1354
            // TODO(b/285195175): This can happen if the atlas is not large enough or a compatible
1355
            // atlas texture cannot be created. Handle the first case in `chooseRenderer` and make
1356
            // sure that the atlas path renderer is not chosen if the path is larger than the atlas
1357
            // texture.
1358
0
            return;
1359
0
        }
1360
        // Since addShape() was successful we should have a valid Renderer now.
1361
0
        SkASSERT(renderer && renderer->numRenderSteps() == 1 && !renderer->emitsPrimitiveColor());
1362
0
    }
1363
1364
#if defined(SK_DEBUG)
1365
    // Renderers and their component RenderSteps have flexibility in defining their
1366
    // DepthStencilSettings. However, the clipping and ordering managed between Device and ClipStack
1367
    // requires that only GREATER or GEQUAL depth tests are used for draws recorded through the
1368
    // client-facing, painters-order-oriented API. We assert here vs. in Renderer's constructor to
1369
    // allow internal-oriented Renderers that are never selected for a "regular" draw call to have
1370
    // more flexibility in their settings.
1371
0
    SkASSERT(renderer);
1372
0
    for (const RenderStep* step : renderer->steps()) {
1373
0
        auto dss = step->depthStencilSettings();
1374
0
        SkASSERT((!step->performsShading() || dss.fDepthTestEnabled) &&
1375
0
                 (!dss.fDepthTestEnabled ||
1376
0
                  dss.fDepthCompareOp == CompareOp::kGreater ||
1377
0
                  dss.fDepthCompareOp == CompareOp::kGEqual));
1378
0
    }
1379
0
#endif
1380
1381
    // Update the clip stack after issuing a flush (if it was needed). A draw will be recorded after
1382
    // this point.
1383
0
    DrawOrder order(fCurrentDepth.next());
1384
0
    CompressedPaintersOrder clipOrder = fClip.updateClipStateForDraw(
1385
0
            clip, clipElements, fColorDepthBoundsManager.get(), order.depth());
1386
1387
    // A draw's order always depends on the clips that must be drawn before it
1388
0
    order.dependsOnPaintersOrder(clipOrder);
1389
    // If a draw is not opaque, it must be drawn after the most recent draw it intersects with in
1390
    // order to blend correctly.
1391
0
    if (rendererCoverage != Coverage::kNone || dependsOnDst) {
1392
0
        CompressedPaintersOrder prevDraw =
1393
0
            fColorDepthBoundsManager->getMostRecentDraw(clip.drawBounds());
1394
0
        order.dependsOnPaintersOrder(prevDraw);
1395
0
    }
1396
1397
    // Now that the base paint order and draw bounds are finalized, if the Renderer relies on the
1398
    // stencil attachment, we compute a secondary sorting field to allow disjoint draws to reorder
1399
    // the RenderSteps across draws instead of in sequence for each draw.
1400
0
    if (renderer->depthStencilFlags() & DepthStencilFlags::kStencil) {
1401
0
        DisjointStencilIndex setIndex = fDisjointStencilSet->add(order.paintOrder(),
1402
0
                                                                 clip.drawBounds());
1403
0
        order.dependsOnStencil(setIndex);
1404
0
    }
1405
1406
    // TODO(b/330864257): This is an extra traversal of all paint effects, that can be avoided when
1407
    // the paint key itself is determined inside this function.
1408
0
    shading.notifyImagesInUse(fRecorder, fDC.get());
1409
1410
    // If an atlas path renderer was chosen, then record a single CoverageMaskShape draw.
1411
    // The shape will be scheduled to be rendered or uploaded into the atlas during the
1412
    // next invocation of flushPendingWorkToRecorder().
1413
0
    if (pathAtlas != nullptr) {
1414
        // Record the draw as a fill since stroking is handled by the atlas render/upload.
1415
0
        SkASSERT(atlasMask.has_value());
1416
0
        auto [mask, origin] = *atlasMask;
1417
0
        fDC->recordDraw(renderer, Transform::Translate(origin.fX, origin.fY), Geometry(mask),
1418
0
                        clip, order, &shading, nullptr);
1419
0
    } else {
1420
0
        if (styleType == SkStrokeRec::kStroke_Style ||
1421
0
            styleType == SkStrokeRec::kHairline_Style ||
1422
0
            styleType == SkStrokeRec::kStrokeAndFill_Style) {
1423
            // For stroke-and-fill, 'renderer' is used for the fill and we always use the
1424
            // TessellatedStrokes renderer; for stroke and hairline, 'renderer' is used.
1425
0
            StrokeStyle stroke(style.getWidth(), style.getMiter(), style.getJoin(), style.getCap());
1426
0
            fDC->recordDraw(styleType == SkStrokeRec::kStrokeAndFill_Style
1427
0
                                   ? fRecorder->priv().rendererProvider()->tessellatedStrokes()
1428
0
                                   : renderer,
1429
0
                            localToDevice, geometry, clip, order, &shading, &stroke);
1430
0
        }
1431
0
        if (styleType == SkStrokeRec::kFill_Style ||
1432
0
            styleType == SkStrokeRec::kStrokeAndFill_Style) {
1433
            // Possibly record an additional draw using the non-AA bounds renderer to fill the
1434
            // interior with a renderer that can disable blending entirely.
1435
0
            if (!innerFillBounds.isEmptyNegativeOrNaN()) {
1436
0
                SkASSERT(!dependsOnDst && renderer->useNonAAInnerFill());
1437
0
                DrawOrder orderWithoutCoverage{order.depth()};
1438
0
                orderWithoutCoverage.dependsOnPaintersOrder(clipOrder);
1439
0
                fDC->recordDraw(fRecorder->priv().rendererProvider()->nonAABounds(),
1440
0
                                localToDevice, Geometry(Shape(innerFillBounds)),
1441
0
                                clip, orderWithoutCoverage, &shading, nullptr);
1442
                // Force the coverage draw to come after the non-AA draw in order to benefit from
1443
                // early depth testing.
1444
0
                order.dependsOnPaintersOrder(orderWithoutCoverage.paintOrder());
1445
0
            }
1446
0
            fDC->recordDraw(renderer, localToDevice, geometry, clip, order, &shading, nullptr);
1447
0
        }
1448
0
    }
1449
1450
    // Post-draw book keeping (bounds manager, depth tracking, etc.)
1451
0
    fColorDepthBoundsManager->recordDraw(clip.drawBounds(), order.paintOrder());
1452
0
    fCurrentDepth = order.depth();
1453
1454
    // TODO(b/238758897): When we enable layer elision that depends on draws not overlapping, we
1455
    // can use the `getMostRecentDraw()` query to determine that, although that will mean querying
1456
    // even if the draw does not depend on dst (so should be only be used when the Device is an
1457
    // elision candidate).
1458
0
}
Unexecuted instantiation: skgpu::graphite::Device::drawGeometry(skgpu::graphite::Transform const&, skgpu::graphite::Geometry const&, SkPaint const&, SkStrokeRec const&, SkEnumBitMask<skgpu::graphite::Device::DrawFlags>, sk_sp<SkBlender>, bool)
Unexecuted instantiation: skgpu::graphite::Device::drawGeometry(skgpu::graphite::Transform const&, skgpu::graphite::Geometry const&, SkPaint const&, SkStrokeRec const&, SkEnumBitMask<skgpu::graphite::Device::DrawFlags>, sk_sp<SkBlender>, bool)
1459
1460
void Device::drawClipShape(const Transform& localToDevice,
1461
                           const Shape& shape,
1462
                           const Clip& clip,
1463
0
                           DrawOrder order) {
1464
    // A clip draw's state is almost fully defined by the ClipStack. The only thing we need
1465
    // to account for is selecting a Renderer and tracking the stencil buffer usage.
1466
0
    Geometry geometry{shape};
1467
0
    auto [renderer, pathAtlas] = this->chooseRenderer(localToDevice,
1468
0
                                                      geometry,
1469
0
                                                      DefaultFillStyle(),
1470
0
                                                      /*requireMSAA=*/true);
1471
0
    if (!renderer) {
1472
0
        SKGPU_LOG_W("Skipping clip with no supported path renderer.");
1473
0
        return;
1474
0
    } else if (renderer->depthStencilFlags() & DepthStencilFlags::kStencil) {
1475
0
        DisjointStencilIndex setIndex = fDisjointStencilSet->add(order.paintOrder(),
1476
0
                                                                 clip.drawBounds());
1477
0
        order.dependsOnStencil(setIndex);
1478
0
    }
1479
1480
    // This call represents one of the deferred clip shapes that's already pessimistically counted
1481
    // in needsFlushBeforeDraw(), so the DrawContext should have room to add it.
1482
0
    SkASSERT(fDC->pendingRenderSteps() + renderer->numRenderSteps() < DrawList::kMaxRenderSteps);
1483
1484
    // Anti-aliased clipping requires the renderer to use MSAA to modify the depth per sample, so
1485
    // analytic coverage renderers cannot be used.
1486
0
    SkASSERT(renderer->coverage() == Coverage::kNone && renderer->requiresMSAA());
1487
0
    SkASSERT(pathAtlas == nullptr);
1488
1489
    // Clips draws are depth-only (null PaintParams), and filled (null StrokeStyle).
1490
    // TODO: Remove this CPU-transform once perspective is supported for all path renderers
1491
0
    if (localToDevice.type() == Transform::Type::kPerspective) {
1492
0
        SkPath devicePath = geometry.shape().asPath();
1493
0
        devicePath.transform(localToDevice.matrix().asM33());
1494
0
        fDC->recordDraw(renderer, Transform::Identity(), Geometry(Shape(devicePath)), clip, order,
1495
0
                        nullptr, nullptr);
1496
0
    } else {
1497
0
        fDC->recordDraw(renderer, localToDevice, geometry, clip, order, nullptr, nullptr);
1498
0
    }
1499
    // This ensures that draws recorded after this clip shape has been popped off the stack will
1500
    // be unaffected by the Z value the clip shape wrote to the depth attachment.
1501
0
    if (order.depth() > fCurrentDepth) {
1502
0
        fCurrentDepth = order.depth();
1503
0
    }
1504
0
}
Unexecuted instantiation: skgpu::graphite::Device::drawClipShape(skgpu::graphite::Transform const&, skgpu::graphite::Shape const&, skgpu::graphite::Clip const&, skgpu::graphite::DrawOrder)
Unexecuted instantiation: skgpu::graphite::Device::drawClipShape(skgpu::graphite::Transform const&, skgpu::graphite::Shape const&, skgpu::graphite::Clip const&, skgpu::graphite::DrawOrder)
1505
1506
// TODO: Currently all Renderers are always defined, but with config options and caps that may not
1507
// be the case, in which case chooseRenderer() will have to go through compatible choices.
1508
std::pair<const Renderer*, PathAtlas*> Device::chooseRenderer(const Transform& localToDevice,
1509
                                                              const Geometry& geometry,
1510
                                                              const SkStrokeRec& style,
1511
0
                                                              bool requireMSAA) const {
1512
0
    const RendererProvider* renderers = fRecorder->priv().rendererProvider();
1513
0
    SkASSERT(renderers);
1514
0
    SkStrokeRec::Style type = style.getStyle();
1515
1516
0
    if (geometry.isSubRun()) {
1517
0
        SkASSERT(!requireMSAA);
1518
0
        sktext::gpu::RendererData rendererData = geometry.subRunData().rendererData();
1519
0
        if (!rendererData.isSDF) {
1520
0
            return {renderers->bitmapText(rendererData.isLCD, rendererData.maskFormat), nullptr};
1521
0
        }
1522
        // Even though the SkPaint can request subpixel rendering, we still need to match
1523
        // this with the pixel geometry.
1524
0
        bool useLCD = rendererData.isLCD &&
1525
0
                      geometry.subRunData().pixelGeometry() != kUnknown_SkPixelGeometry;
1526
0
        return {renderers->sdfText(useLCD), nullptr};
1527
0
    } else if (geometry.isVertices()) {
1528
0
        SkVerticesPriv info(geometry.vertices()->priv());
1529
0
        return {renderers->vertices(info.mode(), info.hasColors(), info.hasTexCoords()), nullptr};
1530
0
    } else if (geometry.isCoverageMaskShape()) {
1531
        // drawCoverageMask() passes in CoverageMaskShapes that reference a provided texture.
1532
        // The CoverageMask renderer can also be chosen later on if the shape is assigned to
1533
        // to be rendered into the PathAtlas, in which case the 2nd return value is non-null.
1534
0
        return {renderers->coverageMask(), nullptr};
1535
0
    } else if (geometry.isEdgeAAQuad()) {
1536
0
        SkASSERT(!requireMSAA && style.isFillStyle());
1537
        // handled by specialized system, simplified from rects and round rects
1538
0
        const EdgeAAQuad& quad = geometry.edgeAAQuad();
1539
0
        if (quad.isRect() && quad.edgeFlags() == EdgeAAQuad::Flags::kNone) {
1540
            // For non-AA rectangular quads, it can always use a coverage-less renderer; there's no
1541
            // need to check for pixel alignment to avoid popping if MSAA is turned on because quad
1542
            // tile edges will seam with each in either mode.
1543
0
            return {renderers->nonAABounds(), nullptr};
1544
0
        } else {
1545
0
            return {renderers->perEdgeAAQuad(), nullptr};
1546
0
        }
1547
0
    } else if (geometry.isAnalyticBlur()) {
1548
0
        return {renderers->analyticBlur(), nullptr};
1549
0
    } else if (!geometry.isShape()) {
1550
        // We must account for new Geometry types with specific Renderers
1551
0
        return {nullptr, nullptr};
1552
0
    }
1553
1554
0
    const Shape& shape = geometry.shape();
1555
    // We can't use this renderer if we require MSAA for an effect (i.e. clipping or stroke+fill).
1556
0
    if (!requireMSAA && is_simple_shape(shape, type)) {
1557
        // For pixel-aligned rects, use the the non-AA bounds renderer to avoid triggering any
1558
        // dst-read requirement due to src blending.
1559
0
        bool pixelAlignedRect = false;
1560
0
        if (shape.isRect() && style.isFillStyle() &&
1561
0
            localToDevice.type() <= Transform::Type::kRectStaysRect) {
1562
0
            Rect devRect = localToDevice.mapRect(shape.rect());
1563
0
            pixelAlignedRect = devRect.nearlyEquals(devRect.makeRound());
1564
0
        }
1565
1566
0
        if (shape.isEmpty() || pixelAlignedRect) {
1567
0
            SkASSERT(!shape.isEmpty() || shape.inverted());
1568
0
            return {renderers->nonAABounds(), nullptr};
1569
0
        } else {
1570
0
            return {renderers->analyticRRect(), nullptr};
1571
0
        }
1572
0
    }
1573
1574
    // Path rendering options. For now the strategy is very simple and not optimal:
1575
    // I. Use tessellation if MSAA is required for an effect.
1576
    // II: otherwise:
1577
    //    1. Always use compute AA if supported unless it was excluded by ContextOptions or the
1578
    //       compute renderer cannot render the shape efficiently yet (based on the result of
1579
    //       `isSuitableForAtlasing`).
1580
    //    2. Fall back to CPU raster AA if hardware MSAA is disabled or it was explicitly requested
1581
    //       via ContextOptions.
1582
    //    3. Otherwise use tessellation.
1583
0
#if defined(GPU_TEST_UTILS)
1584
0
    PathRendererStrategy strategy = fRecorder->priv().caps()->requestedPathRendererStrategy();
1585
#else
1586
    PathRendererStrategy strategy = PathRendererStrategy::kDefault;
1587
#endif
1588
1589
0
    PathAtlas* pathAtlas = nullptr;
1590
0
    AtlasProvider* atlasProvider = fRecorder->priv().atlasProvider();
1591
1592
    // Prefer compute atlas draws if supported. This currently implicitly filters out clip draws as
1593
    // they require MSAA. Eventually we may want to route clip shapes to the atlas as well but not
1594
    // if hardware MSAA is required.
1595
0
    std::optional<Rect> drawBounds;
1596
0
    if (atlasProvider->isAvailable(AtlasProvider::PathAtlasFlags::kCompute) &&
1597
0
        use_compute_atlas_when_available(strategy)) {
1598
0
        PathAtlas* atlas = fDC->getComputePathAtlas(fRecorder);
1599
0
        SkASSERT(atlas);
1600
1601
        // Don't use the compute renderer if it can't handle the shape efficiently.
1602
        //
1603
        // Use the conservative clip bounds for a rough estimate of the mask size (this avoids
1604
        // having to evaluate the entire clip stack before choosing the renderer as it will have to
1605
        // get evaluated again if we fall back to a different renderer).
1606
0
        drawBounds = localToDevice.mapRect(shape.bounds());
1607
0
        if (atlas->isSuitableForAtlasing(*drawBounds, fClip.conservativeBounds())) {
1608
0
            pathAtlas = atlas;
1609
0
        }
1610
0
    }
1611
1612
    // Fall back to CPU rendered paths when multisampling is disabled and the compute atlas is not
1613
    // available.
1614
    // TODO: enable other uses of the software path renderer
1615
0
    if (!pathAtlas && atlasProvider->isAvailable(AtlasProvider::PathAtlasFlags::kRaster) &&
1616
0
        (strategy == PathRendererStrategy::kRasterAA ||
1617
0
         (strategy == PathRendererStrategy::kDefault && !fMSAASupported))) {
1618
        // NOTE: RasterPathAtlas doesn't implement `PathAtlas::isSuitableForAtlasing` as it doesn't
1619
        // reject paths (unlike ComputePathAtlas).
1620
0
        pathAtlas = atlasProvider->getRasterPathAtlas();
1621
0
    }
1622
1623
0
    if (!requireMSAA && pathAtlas) {
1624
        // If we got here it means that we should draw with an atlas renderer if we can and avoid
1625
        // resorting to one of the tessellating techniques.
1626
0
        return {nullptr, pathAtlas};
1627
0
    }
1628
1629
    // If we got here, it requires tessellated path rendering or an MSAA technique applied to a
1630
    // simple shape (so we interpret them as paths to reduce the number of pipelines we need).
1631
1632
    // TODO: All shapes that select a tessellating path renderer need to be "pre-chopped" if they
1633
    // are large enough to exceed the fixed count tessellation limits. Fills are pre-chopped to the
1634
    // viewport bounds, strokes and stroke-and-fills are pre-chopped to the viewport bounds outset
1635
    // by the stroke radius (hence taking the whole style and not just its type).
1636
1637
0
    if (type == SkStrokeRec::kStroke_Style ||
1638
0
        type == SkStrokeRec::kHairline_Style) {
1639
        // Unlike in Ganesh, the HW stroke tessellator can work with arbitrary paints since the
1640
        // depth test prevents double-blending when there is transparency, thus we can HW stroke
1641
        // any path regardless of its paint.
1642
        // TODO: We treat inverse-filled strokes as regular strokes. We could handle them by
1643
        // stenciling first with the HW stroke tessellator and then covering their bounds, but
1644
        // inverse-filled strokes are not well-specified in our public canvas behavior so we may be
1645
        // able to remove it.
1646
0
        return {renderers->tessellatedStrokes(), nullptr};
1647
0
    }
1648
1649
    // 'type' could be kStrokeAndFill, but in that case chooseRenderer() is meant to return the
1650
    // fill renderer since tessellatedStrokes() will always be used for the stroke pass.
1651
0
    if (shape.convex() && !shape.inverted()) {
1652
        // TODO: Ganesh doesn't have a curve+middle-out triangles option for convex paths, but it
1653
        // would be pretty trivial to spin up.
1654
0
        return {renderers->convexTessellatedWedges(), nullptr};
1655
0
    } else {
1656
0
        if (!drawBounds.has_value()) {
1657
0
            drawBounds = localToDevice.mapRect(shape.bounds());
1658
0
        }
1659
0
        drawBounds->intersect(fClip.conservativeBounds());
1660
0
        const bool preferWedges =
1661
                // If the draw bounds don't intersect with the clip stack's conservative bounds,
1662
                // we'll be drawing a very small area at most, accounting for coverage, so just
1663
                // stick with drawing wedges in that case.
1664
0
                drawBounds->isEmptyNegativeOrNaN() ||
1665
1666
                // TODO: Combine this heuristic with what is used in PathStencilCoverOp to choose
1667
                // between wedges curves consistently in Graphite and Ganesh.
1668
0
                (shape.isPath() && shape.path().countVerbs() < 50) ||
1669
0
                drawBounds->area() <= (256 * 256);
1670
1671
0
        if (preferWedges) {
1672
0
            return {renderers->stencilTessellatedWedges(shape.fillType()), nullptr};
1673
0
        } else {
1674
0
            return {renderers->stencilTessellatedCurvesAndTris(shape.fillType()), nullptr};
1675
0
        }
1676
0
    }
1677
0
}
Unexecuted instantiation: skgpu::graphite::Device::chooseRenderer(skgpu::graphite::Transform const&, skgpu::graphite::Geometry const&, SkStrokeRec const&, bool) const
Unexecuted instantiation: skgpu::graphite::Device::chooseRenderer(skgpu::graphite::Transform const&, skgpu::graphite::Geometry const&, SkStrokeRec const&, bool) const
1678
1679
0
sk_sp<Task> Device::lastDrawTask() const {
1680
0
    SkASSERT(this->isScratchDevice());
1681
0
    return fLastTask;
1682
0
}
Unexecuted instantiation: skgpu::graphite::Device::lastDrawTask() const
Unexecuted instantiation: skgpu::graphite::Device::lastDrawTask() const
1683
1684
0
void Device::flushPendingWorkToRecorder() {
1685
0
    TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1686
1687
    // If this is a scratch device being flushed, it should only be flushing into the expected
1688
    // next recording from when the Device was first created.
1689
0
    SkASSERT(fRecorder);
1690
0
    SkASSERT(fScopedRecordingID == 0 || fScopedRecordingID == fRecorder->priv().nextRecordingID());
1691
1692
    // TODO(b/330864257):  flushPendingWorkToRecorder() can be recursively called if this Device
1693
    // recorded a picture shader draw and during a flush (triggered by snap or automatically from
1694
    // reaching limits), the picture shader will be rendered to a new device. If that picture drawn
1695
    // to the temporary device fills up an atlas it can trigger the global
1696
    // recorder->flushTrackedDevices(), which will then encounter this device that is already in
1697
    // the midst of flushing. To avoid crashing we only actually flush the first time this is called
1698
    // and set a bit to early-out on any recursive calls.
1699
    // This is not an ideal solution since the temporary Device's flush-the-world may have reset
1700
    // atlas entries that the current Device's flushed draws will reference. But at this stage it's
1701
    // not possible to split the already recorded draws into a before-list and an after-list that
1702
    // can reference the old and new contents of the atlas. While avoiding the crash, this may cause
1703
    // incorrect accesses to a shared atlas. Once paint data is extracted at draw time, picture
1704
    // shaders will be resolved outside of flushes and then this will be fixed automatically.
1705
0
    if (fIsFlushing) {
1706
0
        return;
1707
0
    } else {
1708
0
        fIsFlushing = true;
1709
0
    }
1710
1711
0
    this->internalFlush();
1712
0
    sk_sp<Task> drawTask = fDC->snapDrawTask(fRecorder);
1713
0
    if (this->isScratchDevice()) {
1714
        // TODO(b/323887221): Once shared atlas resources are less brittle, scratch devices won't
1715
        // flush to the recorder at all and will only store the snapped task here.
1716
0
        fLastTask = drawTask;
1717
0
    } else {
1718
        // Non-scratch devices do not need to point back to the last snapped task since they are
1719
        // always added to the root task list.
1720
        // TODO: It is currently possible for scratch devices to be flushed and instantiated before
1721
        // their work is finished, meaning they will produce additional tasks to be included in
1722
        // a follow-up Recording: https://chat.google.com/room/AAAA2HlH94I/YU0XdFqX2Uw.
1723
        // However, in this case they no longer appear scratch because the first Recording
1724
        // instantiated the targets. When scratch devices are not actually registered with the
1725
        // Recorder and are only included when they are drawn (e.g. restored), we should be able to
1726
        // assert that `fLastTask` is null.
1727
0
        fLastTask = nullptr;
1728
0
    }
1729
1730
0
    if (drawTask) {
1731
0
        fRecorder->priv().add(std::move(drawTask));
1732
1733
        // TODO(b/297344089): This always regenerates mipmaps on the draw target when it's drawn to.
1734
        // This could be wasteful if we draw to a target multiple times before reading from it with
1735
        // downscaling.
1736
0
        if (fDC->target()->mipmapped() == Mipmapped::kYes) {
1737
0
            if (!GenerateMipmaps(fRecorder, fDC->refTarget(), fDC->colorInfo())) {
1738
0
                SKGPU_LOG_W("Device::flushPendingWorkToRecorder: Failed to generate mipmaps");
1739
0
            }
1740
0
        }
1741
0
    }
1742
1743
0
    fIsFlushing = false;
1744
0
}
Unexecuted instantiation: skgpu::graphite::Device::flushPendingWorkToRecorder()
Unexecuted instantiation: skgpu::graphite::Device::flushPendingWorkToRecorder()
1745
1746
0
void Device::internalFlush() {
1747
0
    TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1748
0
    ASSERT_SINGLE_OWNER
1749
1750
    // Push any pending uploads from the atlas provider that pending draws reference.
1751
0
    fRecorder->priv().atlasProvider()->recordUploads(fDC.get());
1752
1753
    // Clip shapes are depth-only draws, but aren't recorded in the DrawContext until a flush in
1754
    // order to determine the Z values for each element.
1755
0
    fClip.recordDeferredClipDraws();
1756
1757
    // Flush all pending items to the internal task list and reset Device tracking state
1758
0
    fDC->flush(fRecorder);
1759
1760
0
    fColorDepthBoundsManager->reset();
1761
0
    fDisjointStencilSet->reset();
1762
0
    fCurrentDepth = DrawOrder::kClearDepth;
1763
1764
     // Any cleanup in the AtlasProvider
1765
0
    fRecorder->priv().atlasProvider()->compact(/*forceCompact=*/false);
1766
0
}
Unexecuted instantiation: skgpu::graphite::Device::internalFlush()
Unexecuted instantiation: skgpu::graphite::Device::internalFlush()
1767
1768
0
bool Device::needsFlushBeforeDraw(int numNewRenderSteps, DstReadRequirement dstReadReq) const {
1769
    // Must also account for the elements in the clip stack that might need to be recorded.
1770
0
    numNewRenderSteps += fClip.maxDeferredClipDraws() * Renderer::kMaxRenderSteps;
1771
0
    return // Need flush if we don't have room to record into the current list.
1772
0
           (DrawList::kMaxRenderSteps - fDC->pendingRenderSteps()) < numNewRenderSteps ||
1773
           // Need flush if this draw needs to copy the dst surface for reading.
1774
0
           dstReadReq == DstReadRequirement::kTextureCopy;
1775
0
}
1776
1777
void Device::drawSpecial(SkSpecialImage* special,
1778
                         const SkMatrix& localToDevice,
1779
                         const SkSamplingOptions& sampling,
1780
                         const SkPaint& paint,
1781
0
                         SkCanvas::SrcRectConstraint constraint) {
1782
0
    SkASSERT(!paint.getMaskFilter() && !paint.getImageFilter());
1783
1784
0
    sk_sp<SkImage> img = special->asImage();
1785
0
    if (!img || !as_IB(img)->isGraphiteBacked()) {
1786
0
        SKGPU_LOG_W("Couldn't get Graphite-backed special image as image");
1787
0
        return;
1788
0
    }
1789
1790
0
    SkPaint paintWithShader(paint);
1791
0
    SkRect dst = SkModifyPaintAndDstForDrawImageRect(
1792
0
            img.get(),
1793
0
            sampling,
1794
0
            /*src=*/SkRect::Make(special->subset()),
1795
0
            /*dst=*/SkRect::MakeIWH(special->width(), special->height()),
1796
0
            /*strictSrcSubset=*/constraint == SkCanvas::kStrict_SrcRectConstraint,
1797
0
            &paintWithShader);
1798
0
    if (dst.isEmpty()) {
1799
0
        return;
1800
0
    }
1801
1802
    // The image filtering and layer code paths often rely on the paint being non-AA to avoid
1803
    // coverage operations. To stay consistent with the other backends, we use an edge AA "quad"
1804
    // whose flags match the paint's AA request.
1805
0
    EdgeAAQuad::Flags aaFlags = paint.isAntiAlias() ? EdgeAAQuad::Flags::kAll
1806
0
                                                    : EdgeAAQuad::Flags::kNone;
1807
0
    this->drawGeometry(Transform(SkM44(localToDevice)),
1808
0
                       Geometry(EdgeAAQuad(dst, aaFlags)),
1809
0
                       paintWithShader,
1810
0
                       DefaultFillStyle(),
1811
0
                       DrawFlags::kIgnorePathEffect);
1812
0
}
Unexecuted instantiation: skgpu::graphite::Device::drawSpecial(SkSpecialImage*, SkMatrix const&, SkSamplingOptions const&, SkPaint const&, SkCanvas::SrcRectConstraint)
Unexecuted instantiation: skgpu::graphite::Device::drawSpecial(SkSpecialImage*, SkMatrix const&, SkSamplingOptions const&, SkPaint const&, SkCanvas::SrcRectConstraint)
1813
1814
void Device::drawCoverageMask(const SkSpecialImage* mask,
1815
                              const SkMatrix& localToDevice,
1816
                              const SkSamplingOptions& sampling,
1817
0
                              const SkPaint& paint) {
1818
0
    CoverageMaskShape::MaskInfo maskInfo{/*fTextureOrigin=*/{SkTo<uint16_t>(mask->subset().fLeft),
1819
0
                                                             SkTo<uint16_t>(mask->subset().fTop)},
1820
0
                                         /*fMaskSize=*/{SkTo<uint16_t>(mask->width()),
1821
0
                                                        SkTo<uint16_t>(mask->height())}};
1822
1823
0
    auto maskProxyView = AsView(mask->asImage());
1824
0
    if (!maskProxyView) {
1825
0
        SKGPU_LOG_W("Couldn't get Graphite-backed special image as texture proxy view");
1826
0
        return;
1827
0
    }
1828
1829
    // Every other "Image" draw reaches the underlying texture via AddToKey/NotifyInUse, which
1830
    // handles notifying the image and either flushing the linked surface or attaching draw tasks
1831
    // from a scratch device to the current draw context. In this case, 'mask' is very likely to
1832
    // be linked to a scratch device, but we must perform the same notifyInUse manually here because
1833
    // the texture is consumed by the RenderStep and not part of the PaintParams.
1834
0
    static_cast<Image_Base*>(mask->asImage().get())->notifyInUse(fRecorder, fDC.get());
1835
1836
    // 'mask' logically has 0 coverage outside of its pixels, which is equivalent to kDecal tiling.
1837
    // However, since we draw geometry tightly fitting 'mask', we can use the better-supported
1838
    // kClamp tiling and behave effectively the same way.
1839
0
    TextureDataBlock::SampledTexture sampledMask{maskProxyView.refProxy(),
1840
0
                                                 {SkFilterMode::kLinear, SkTileMode::kClamp}};
1841
    // Ensure this is kept alive; normally textures are kept alive by the PipelineDataGatherer for
1842
    // image shaders, or by the PathAtlas. This is a unique circumstance.
1843
    // NOTE: CoverageMaskRenderStep controls the final sampling options; this texture data block
1844
    // serves only to keep the mask alive so the sampling passed to add() doesn't matter.
1845
0
    fRecorder->priv().textureDataCache()->insert(TextureDataBlock(sampledMask));
1846
1847
    // CoverageMaskShape() wraps a Shape when it's used as a PathAtlas, but in this case the
1848
    // original shape has been long lost, so just use a Rect that bounds the image.
1849
0
    CoverageMaskShape maskShape{Shape{Rect::WH((float)mask->width(), (float)mask->height())},
1850
0
                                maskProxyView.proxy(),
1851
                                // Use the active local-to-device transform for this since it
1852
                                // determines the local coords for evaluating the skpaint, whereas
1853
                                // the provided 'localToDevice' just places the coverage mask.
1854
0
                                this->localToDeviceTransform().inverse(),
1855
0
                                maskInfo};
1856
1857
0
    this->drawGeometry(Transform(SkM44(localToDevice)),
1858
0
                       Geometry(maskShape),
1859
0
                       paint,
1860
0
                       DefaultFillStyle(),
1861
0
                       DrawFlags::kIgnorePathEffect);
1862
0
}
1863
1864
0
sk_sp<SkSpecialImage> Device::makeSpecial(const SkBitmap&) {
1865
0
    return nullptr;
1866
0
}
1867
1868
0
sk_sp<SkSpecialImage> Device::makeSpecial(const SkImage*) {
1869
0
    return nullptr;
1870
0
}
1871
1872
0
sk_sp<SkSpecialImage> Device::snapSpecial(const SkIRect& subset, bool forceCopy) {
1873
    // NOTE: snapSpecial() can be called even after the device has been marked immutable (null
1874
    // recorder), but in those cases it should not be a copy and just returns the image view.
1875
0
    sk_sp<Image> deviceImage;
1876
0
    SkIRect finalSubset;
1877
0
    if (forceCopy || !this->readSurfaceView() || this->readSurfaceView().proxy()->isFullyLazy()) {
1878
0
        deviceImage = this->makeImageCopy(
1879
0
                subset, Budgeted::kYes, Mipmapped::kNo, SkBackingFit::kApprox);
1880
0
        finalSubset = SkIRect::MakeSize(subset.size());
1881
0
    } else {
1882
        // TODO(b/323886870): For now snapSpecial() force adds the pending work to the recorder's
1883
        // root task list. Once shared atlas management is solved and DrawTasks can be nested in a
1884
        // graph then this can go away in favor of auto-flushing through the image's linked device.
1885
0
        if (fRecorder) {
1886
0
            this->flushPendingWorkToRecorder();
1887
0
        }
1888
0
        deviceImage = Image::WrapDevice(sk_ref_sp(this));
1889
0
        finalSubset = subset;
1890
0
    }
1891
1892
0
    if (!deviceImage) {
1893
0
        return nullptr;
1894
0
    }
1895
1896
    // For non-copying "snapSpecial", the semantics are returning an image view of the surface data,
1897
    // and relying on higher-level draw and restore logic for the contents to make sense.
1898
0
    return SkSpecialImages::MakeGraphite(
1899
0
            fRecorder, finalSubset, std::move(deviceImage), this->surfaceProps());
1900
0
}
1901
1902
sk_sp<skif::Backend> Device::createImageFilteringBackend(const SkSurfaceProps& surfaceProps,
1903
0
                                                         SkColorType colorType) const {
1904
0
    return skif::MakeGraphiteBackend(fRecorder, surfaceProps, colorType);
1905
0
}
1906
1907
0
TextureProxy* Device::target() { return fDC->target(); }
1908
1909
0
TextureProxyView Device::readSurfaceView() const { return fDC->readSurfaceView(); }
1910
1911
0
bool Device::isScratchDevice() const {
1912
    // Scratch device status is inferred from whether or not the Device's target is instantiated.
1913
    // By default devices start out un-instantiated unless they are wrapping an existing backend
1914
    // texture (definitely not a scratch scenario), or Surface explicitly instantiates the target
1915
    // before returning to the client (not a scratch scenario).
1916
    //
1917
    // Scratch device targets are instantiated during the prepareResources() phase of
1918
    // Recorder::snap(). Truly scratch devices that have gone out of scope as intended will have
1919
    // already been destroyed at this point. Scratch devices that become longer-lived (linked to
1920
    // a client-owned object) automatically transition to non-scratch usage.
1921
0
    return !fDC->target()->isInstantiated() && !fDC->target()->isLazy();
1922
0
}
1923
1924
sk_sp<sktext::gpu::Slug> Device::convertGlyphRunListToSlug(const sktext::GlyphRunList& glyphRunList,
1925
0
                                                           const SkPaint& paint) {
1926
0
    return sktext::gpu::SlugImpl::Make(this->localToDevice(),
1927
0
                                       glyphRunList,
1928
0
                                       paint,
1929
0
                                       this->strikeDeviceInfo(),
1930
0
                                       SkStrikeCache::GlobalStrikeCache());
1931
0
}
1932
1933
0
void Device::drawSlug(SkCanvas* canvas, const sktext::gpu::Slug* slug, const SkPaint& paint) {
1934
0
    auto slugImpl = static_cast<const sktext::gpu::SlugImpl*>(slug);
1935
0
    slugImpl->subRuns()->draw(canvas, slugImpl->origin(), paint, slugImpl, this->atlasDelegate());
1936
0
}
1937
1938
0
bool Device::drawBlurredRRect(const SkRRect& rrect, const SkPaint& paint, float deviceSigma) {
1939
0
    SkStrokeRec style(paint);
1940
0
    if (skgpu::BlurIsEffectivelyIdentity(deviceSigma)) {
1941
0
        this->drawGeometry(this->localToDeviceTransform(),
1942
0
                           Geometry(rrect.isRect() ? Shape(rrect.rect()) : Shape(rrect)),
1943
0
                           paint,
1944
0
                           style);
1945
0
        return true;
1946
0
    }
1947
1948
0
    std::optional<AnalyticBlurMask> analyticBlur = AnalyticBlurMask::Make(
1949
0
            this->recorder(), this->localToDeviceTransform(), deviceSigma, rrect);
1950
0
    if (!analyticBlur) {
1951
0
        return false;
1952
0
    }
1953
1954
0
    this->drawGeometry(this->localToDeviceTransform(), Geometry(*analyticBlur), paint, style);
1955
0
    return true;
1956
0
}
1957
1958
} // namespace skgpu::graphite