/src/skia/src/gpu/graphite/Context.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2021 Google LLC |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license that can be |
5 | | * found in the LICENSE file. |
6 | | */ |
7 | | |
8 | | #include "include/gpu/graphite/Context.h" |
9 | | |
10 | | #include "include/core/SkColorSpace.h" |
11 | | #include "include/core/SkPathTypes.h" |
12 | | #include "include/core/SkTraceMemoryDump.h" |
13 | | #include "include/effects/SkRuntimeEffect.h" |
14 | | #include "include/gpu/graphite/BackendTexture.h" |
15 | | #include "include/gpu/graphite/Recorder.h" |
16 | | #include "include/gpu/graphite/Recording.h" |
17 | | #include "include/gpu/graphite/Surface.h" |
18 | | #include "include/gpu/graphite/TextureInfo.h" |
19 | | #include "src/base/SkRectMemcpy.h" |
20 | | #include "src/core/SkAutoPixmapStorage.h" |
21 | | #include "src/core/SkConvertPixels.h" |
22 | | #include "src/core/SkTraceEvent.h" |
23 | | #include "src/core/SkYUVMath.h" |
24 | | #include "src/gpu/RefCntedCallback.h" |
25 | | #include "src/gpu/graphite/AtlasProvider.h" |
26 | | #include "src/gpu/graphite/BufferManager.h" |
27 | | #include "src/gpu/graphite/Caps.h" |
28 | | #include "src/gpu/graphite/ClientMappedBufferManager.h" |
29 | | #include "src/gpu/graphite/CommandBuffer.h" |
30 | | #include "src/gpu/graphite/ContextPriv.h" |
31 | | #include "src/gpu/graphite/DrawAtlas.h" |
32 | | #include "src/gpu/graphite/GlobalCache.h" |
33 | | #include "src/gpu/graphite/GraphicsPipeline.h" |
34 | | #include "src/gpu/graphite/GraphicsPipelineDesc.h" |
35 | | #include "src/gpu/graphite/Image_Graphite.h" |
36 | | #include "src/gpu/graphite/KeyContext.h" |
37 | | #include "src/gpu/graphite/Log.h" |
38 | | #include "src/gpu/graphite/QueueManager.h" |
39 | | #include "src/gpu/graphite/RecorderPriv.h" |
40 | | #include "src/gpu/graphite/RecordingPriv.h" |
41 | | #include "src/gpu/graphite/Renderer.h" |
42 | | #include "src/gpu/graphite/RendererProvider.h" |
43 | | #include "src/gpu/graphite/ResourceProvider.h" |
44 | | #include "src/gpu/graphite/RuntimeEffectDictionary.h" |
45 | | #include "src/gpu/graphite/ShaderCodeDictionary.h" |
46 | | #include "src/gpu/graphite/SharedContext.h" |
47 | | #include "src/gpu/graphite/Surface_Graphite.h" |
48 | | #include "src/gpu/graphite/TextureProxyView.h" |
49 | | #include "src/gpu/graphite/TextureUtils.h" |
50 | | #include "src/gpu/graphite/task/CopyTask.h" |
51 | | #include "src/gpu/graphite/task/SynchronizeToCpuTask.h" |
52 | | #include "src/gpu/graphite/task/UploadTask.h" |
53 | | |
54 | | #include "src/image/SkSurface_Base.h" |
55 | | |
56 | | #if defined(GRAPHITE_TEST_UTILS) |
57 | | #include "include/private/gpu/graphite/ContextOptionsPriv.h" |
58 | | #if defined(SK_DAWN) |
59 | | #include "src/gpu/graphite/dawn/DawnSharedContext.h" |
60 | | #include "webgpu/webgpu_cpp.h" // NO_G3_REWRITE |
61 | | #endif |
62 | | #endif |
63 | | |
64 | | namespace skgpu::graphite { |
65 | | |
66 | 0 | #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(this->singleOwner()) |
67 | | |
68 | 0 | Context::ContextID Context::ContextID::Next() { |
69 | 0 | static std::atomic<uint32_t> nextID{1}; |
70 | 0 | uint32_t id; |
71 | 0 | do { |
72 | 0 | id = nextID.fetch_add(1, std::memory_order_relaxed); |
73 | 0 | } while (id == SK_InvalidUniqueID); |
74 | 0 | return ContextID(id); |
75 | 0 | } |
76 | | |
77 | | //-------------------------------------------------------------------------------------------------- |
78 | | Context::Context(sk_sp<SharedContext> sharedContext, |
79 | | std::unique_ptr<QueueManager> queueManager, |
80 | | const ContextOptions& options) |
81 | | : fSharedContext(std::move(sharedContext)) |
82 | | , fQueueManager(std::move(queueManager)) |
83 | 0 | , fContextID(ContextID::Next()) { |
84 | | // We have to create this outside the initializer list because we need to pass in the Context's |
85 | | // SingleOwner object and it is declared last |
86 | 0 | fResourceProvider = fSharedContext->makeResourceProvider(&fSingleOwner, |
87 | 0 | SK_InvalidGenID, |
88 | 0 | options.fGpuBudgetInBytes); |
89 | 0 | fMappedBufferManager = std::make_unique<ClientMappedBufferManager>(this->contextID()); |
90 | 0 | #if defined(GRAPHITE_TEST_UTILS) |
91 | 0 | if (options.fOptionsPriv) { |
92 | 0 | fStoreContextRefInRecorder = options.fOptionsPriv->fStoreContextRefInRecorder; |
93 | 0 | } |
94 | 0 | #endif |
95 | 0 | } |
96 | | |
97 | 0 | Context::~Context() { |
98 | 0 | #if defined(GRAPHITE_TEST_UTILS) |
99 | 0 | ASSERT_SINGLE_OWNER |
100 | 0 | for (auto& recorder : fTrackedRecorders) { |
101 | 0 | recorder->priv().setContext(nullptr); |
102 | 0 | } |
103 | 0 | #endif |
104 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::~Context() Unexecuted instantiation: skgpu::graphite::Context::~Context() |
105 | | |
106 | 0 | bool Context::finishInitialization() { |
107 | 0 | SkASSERT(!fSharedContext->rendererProvider()); // Can only initialize once |
108 | |
|
109 | 0 | StaticBufferManager bufferManager{fResourceProvider.get(), fSharedContext->caps()}; |
110 | 0 | std::unique_ptr<RendererProvider> renderers{ |
111 | 0 | new RendererProvider(fSharedContext->caps(), &bufferManager)}; |
112 | |
|
113 | 0 | auto result = bufferManager.finalize(this, fQueueManager.get(), fSharedContext->globalCache()); |
114 | 0 | if (result == StaticBufferManager::FinishResult::kFailure) { |
115 | | // If something went wrong filling out the static vertex buffers, any Renderer that would |
116 | | // use it will draw incorrectly, so it's better to fail the Context creation. |
117 | 0 | return false; |
118 | 0 | } |
119 | 0 | if (result == StaticBufferManager::FinishResult::kSuccess && |
120 | 0 | !fQueueManager->submitToGpu()) { |
121 | 0 | SKGPU_LOG_W("Failed to submit initial command buffer for Context creation.\n"); |
122 | 0 | return false; |
123 | 0 | } // else result was kNoWork so skip submitting to the GPU |
124 | 0 | fSharedContext->setRendererProvider(std::move(renderers)); |
125 | 0 | return true; |
126 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::finishInitialization() Unexecuted instantiation: skgpu::graphite::Context::finishInitialization() |
127 | | |
128 | 0 | BackendApi Context::backend() const { return fSharedContext->backend(); } |
129 | | |
130 | 0 | std::unique_ptr<Recorder> Context::makeRecorder(const RecorderOptions& options) { |
131 | 0 | ASSERT_SINGLE_OWNER |
132 | |
|
133 | 0 | auto recorder = std::unique_ptr<Recorder>(new Recorder(fSharedContext, options)); |
134 | 0 | #if defined(GRAPHITE_TEST_UTILS) |
135 | 0 | if (fStoreContextRefInRecorder) { |
136 | 0 | recorder->priv().setContext(this); |
137 | 0 | } |
138 | 0 | #endif |
139 | 0 | return recorder; |
140 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::makeRecorder(skgpu::graphite::RecorderOptions const&) Unexecuted instantiation: skgpu::graphite::Context::makeRecorder(skgpu::graphite::RecorderOptions const&) |
141 | | |
142 | 0 | bool Context::insertRecording(const InsertRecordingInfo& info) { |
143 | 0 | ASSERT_SINGLE_OWNER |
144 | |
|
145 | 0 | return fQueueManager->addRecording(info, this); |
146 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::insertRecording(skgpu::graphite::InsertRecordingInfo const&) Unexecuted instantiation: skgpu::graphite::Context::insertRecording(skgpu::graphite::InsertRecordingInfo const&) |
147 | | |
148 | 0 | bool Context::submit(SyncToCpu syncToCpu) { |
149 | 0 | ASSERT_SINGLE_OWNER |
150 | |
|
151 | 0 | if (syncToCpu == SyncToCpu::kYes && !fSharedContext->caps()->allowCpuSync()) { |
152 | 0 | SKGPU_LOG_E("SyncToCpu::kYes not supported with ContextOptions::fNeverYieldToWebGPU. " |
153 | 0 | "The parameter is ignored and no synchronization will occur."); |
154 | 0 | syncToCpu = SyncToCpu::kNo; |
155 | 0 | } |
156 | 0 | bool success = fQueueManager->submitToGpu(); |
157 | 0 | this->checkForFinishedWork(syncToCpu); |
158 | 0 | return success; |
159 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::submit(skgpu::graphite::SyncToCpu) Unexecuted instantiation: skgpu::graphite::Context::submit(skgpu::graphite::SyncToCpu) |
160 | | |
161 | 0 | bool Context::hasUnfinishedGpuWork() const { return fQueueManager->hasUnfinishedGpuWork(); } |
162 | | |
163 | | void Context::asyncRescaleAndReadPixels(const SkImage* image, |
164 | | const SkImageInfo& dstImageInfo, |
165 | | const SkIRect& srcRect, |
166 | | SkImage::RescaleGamma rescaleGamma, |
167 | | SkImage::RescaleMode rescaleMode, |
168 | | SkImage::ReadPixelsCallback callback, |
169 | 0 | SkImage::ReadPixelsContext callbackContext) { |
170 | 0 | if (!image || !as_IB(image)->isGraphiteBacked()) { |
171 | 0 | callback(callbackContext, nullptr); |
172 | 0 | return; |
173 | 0 | } |
174 | | // TODO(b/238756380): YUVA read not supported right now |
175 | 0 | if (as_IB(image)->isYUVA()) { |
176 | 0 | callback(callbackContext, nullptr); |
177 | 0 | return; |
178 | 0 | } |
179 | | |
180 | 0 | if (!SkIRect::MakeSize(image->imageInfo().dimensions()).contains(srcRect)) { |
181 | 0 | callback(callbackContext, nullptr); |
182 | 0 | return; |
183 | 0 | } |
184 | | |
185 | 0 | if (srcRect.size() == dstImageInfo.bounds().size()) { |
186 | | // No need for rescale |
187 | 0 | auto graphiteImage = reinterpret_cast<const skgpu::graphite::Image*>(image); |
188 | 0 | const TextureProxyView& proxyView = graphiteImage->textureProxyView(); |
189 | 0 | return this->asyncReadPixels(proxyView.proxy(), |
190 | 0 | image->imageInfo(), |
191 | 0 | dstImageInfo.colorInfo(), |
192 | 0 | srcRect, |
193 | 0 | callback, |
194 | 0 | callbackContext); |
195 | 0 | } |
196 | | |
197 | | // Make a recorder to record drawing commands into |
198 | 0 | std::unique_ptr<Recorder> recorder = this->makeRecorder(); |
199 | |
|
200 | 0 | sk_sp<SkImage> scaledImage = RescaleImage(recorder.get(), |
201 | 0 | image, |
202 | 0 | srcRect, |
203 | 0 | dstImageInfo, |
204 | 0 | rescaleGamma, |
205 | 0 | rescaleMode); |
206 | 0 | if (!scaledImage) { |
207 | 0 | callback(callbackContext, nullptr); |
208 | 0 | return; |
209 | 0 | } |
210 | | |
211 | | // Add draw commands to queue before starting the transfer |
212 | 0 | std::unique_ptr<Recording> recording = recorder->snap(); |
213 | 0 | if (!recording) { |
214 | 0 | callback(callbackContext, nullptr); |
215 | 0 | return; |
216 | 0 | } |
217 | 0 | InsertRecordingInfo recordingInfo; |
218 | 0 | recordingInfo.fRecording = recording.get(); |
219 | 0 | if (!this->insertRecording(recordingInfo)) { |
220 | 0 | callback(callbackContext, nullptr); |
221 | 0 | return; |
222 | 0 | } |
223 | | |
224 | 0 | SkASSERT(scaledImage->imageInfo() == dstImageInfo); |
225 | |
|
226 | 0 | auto scaledGraphiteImage = reinterpret_cast<const skgpu::graphite::Image*>(scaledImage.get()); |
227 | 0 | const TextureProxyView& scaledProxyView = scaledGraphiteImage->textureProxyView(); |
228 | |
|
229 | 0 | this->asyncReadPixels(scaledProxyView.proxy(), |
230 | 0 | dstImageInfo, |
231 | 0 | dstImageInfo.colorInfo(), |
232 | 0 | dstImageInfo.bounds(), |
233 | 0 | callback, |
234 | 0 | callbackContext); |
235 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::asyncRescaleAndReadPixels(SkImage const*, SkImageInfo const&, SkIRect const&, SkImage::RescaleGamma, SkImage::RescaleMode, void (*)(void*, std::__1::unique_ptr<SkImage::AsyncReadResult const, std::__1::default_delete<SkImage::AsyncReadResult const> >), void*) Unexecuted instantiation: skgpu::graphite::Context::asyncRescaleAndReadPixels(SkImage const*, SkImageInfo const&, SkIRect const&, SkImage::RescaleGamma, SkImage::RescaleMode, void (*)(void*, std::__1::unique_ptr<SkImage::AsyncReadResult const, std::__1::default_delete<SkImage::AsyncReadResult const> >), void*) |
236 | | |
237 | | void Context::asyncRescaleAndReadPixels(const SkSurface* surface, |
238 | | const SkImageInfo& dstImageInfo, |
239 | | const SkIRect& srcRect, |
240 | | SkImage::RescaleGamma rescaleGamma, |
241 | | SkImage::RescaleMode rescaleMode, |
242 | | SkImage::ReadPixelsCallback callback, |
243 | 0 | SkImage::ReadPixelsContext callbackContext) { |
244 | 0 | if (!static_cast<const SkSurface_Base*>(surface)->isGraphiteBacked()) { |
245 | 0 | callback(callbackContext, nullptr); |
246 | 0 | return; |
247 | 0 | } |
248 | | |
249 | 0 | sk_sp<SkImage> surfaceImage = SkSurfaces::AsImage(sk_ref_sp(surface)); |
250 | 0 | this->asyncRescaleAndReadPixels(surfaceImage.get(), |
251 | 0 | dstImageInfo, |
252 | 0 | srcRect, |
253 | 0 | rescaleGamma, |
254 | 0 | rescaleMode, |
255 | 0 | callback, |
256 | 0 | callbackContext); |
257 | 0 | } |
258 | | |
259 | | void Context::asyncReadPixels(const TextureProxy* proxy, |
260 | | const SkImageInfo& srcImageInfo, |
261 | | const SkColorInfo& dstColorInfo, |
262 | | const SkIRect& srcRect, |
263 | | SkImage::ReadPixelsCallback callback, |
264 | 0 | SkImage::ReadPixelsContext callbackContext) { |
265 | 0 | TRACE_EVENT2("skia.gpu", TRACE_FUNC, "width", srcRect.width(), "height", srcRect.height()); |
266 | |
|
267 | 0 | if (!proxy || proxy->textureInfo().isProtected() == Protected::kYes) { |
268 | 0 | callback(callbackContext, nullptr); |
269 | 0 | return; |
270 | 0 | } |
271 | | |
272 | 0 | if (!SkImageInfoIsValid(srcImageInfo) || !SkColorInfoIsValid(dstColorInfo)) { |
273 | 0 | callback(callbackContext, nullptr); |
274 | 0 | return; |
275 | 0 | } |
276 | | |
277 | 0 | if (!SkIRect::MakeSize(srcImageInfo.dimensions()).contains(srcRect)) { |
278 | 0 | callback(callbackContext, nullptr); |
279 | 0 | return; |
280 | 0 | } |
281 | | |
282 | 0 | const Caps* caps = fSharedContext->caps(); |
283 | 0 | if (!caps->supportsReadPixels(proxy->textureInfo())) { |
284 | 0 | if (!caps->isTexturable(proxy->textureInfo())) { |
285 | 0 | callback(callbackContext, nullptr); |
286 | 0 | return; |
287 | 0 | } |
288 | | |
289 | 0 | auto recorder = this->makeRecorder(); |
290 | |
|
291 | 0 | auto surface = SkSurfaces::RenderTarget(recorder.get(), |
292 | 0 | srcImageInfo.makeDimensions(srcRect.size())); |
293 | 0 | if (!surface) { |
294 | 0 | surface = SkSurfaces::RenderTarget(recorder.get(), |
295 | 0 | SkImageInfo::Make(srcRect.size(), dstColorInfo)); |
296 | 0 | if (!surface) { |
297 | 0 | callback(callbackContext, nullptr); |
298 | 0 | return; |
299 | 0 | } |
300 | 0 | } |
301 | | |
302 | 0 | auto swizzle = caps->getReadSwizzle(srcImageInfo.colorType(), proxy->textureInfo()); |
303 | 0 | TextureProxyView view(sk_ref_sp(proxy), swizzle); |
304 | 0 | auto srcImage = sk_make_sp<Image>(view, srcImageInfo.colorInfo()); |
305 | |
|
306 | 0 | SkPaint paint; |
307 | 0 | paint.setBlendMode(SkBlendMode::kSrc); |
308 | 0 | surface->getCanvas()->drawImage(srcImage, |
309 | 0 | -srcRect.x(), -srcRect.y(), |
310 | 0 | SkFilterMode::kNearest, |
311 | 0 | &paint); |
312 | |
|
313 | 0 | auto recording = recorder->snap(); |
314 | 0 | InsertRecordingInfo recordingInfo; |
315 | 0 | recordingInfo.fRecording = recording.get(); |
316 | 0 | this->insertRecording(recordingInfo); |
317 | |
|
318 | 0 | this->asyncReadPixels(static_cast<Surface*>(surface.get())->readSurfaceView().proxy(), |
319 | 0 | surface->imageInfo(), |
320 | 0 | dstColorInfo, |
321 | 0 | SkIRect::MakeSize(srcRect.size()), |
322 | 0 | callback, |
323 | 0 | callbackContext); |
324 | |
|
325 | 0 | return; |
326 | 0 | } |
327 | | |
328 | 0 | PixelTransferResult transferResult = this->transferPixels(proxy, srcImageInfo, |
329 | 0 | dstColorInfo, srcRect); |
330 | |
|
331 | 0 | if (!transferResult.fTransferBuffer) { |
332 | | // TODO: try to do a synchronous readPixels instead |
333 | 0 | callback(callbackContext, nullptr); |
334 | 0 | return; |
335 | 0 | } |
336 | | |
337 | 0 | this->finalizeAsyncReadPixels({&transferResult, 1}, callback, callbackContext); |
338 | 0 | } |
339 | | |
340 | | void Context::asyncRescaleAndReadPixelsYUV420(const SkImage* image, |
341 | | SkYUVColorSpace yuvColorSpace, |
342 | | sk_sp<SkColorSpace> dstColorSpace, |
343 | | const SkIRect& srcRect, |
344 | | const SkISize& dstSize, |
345 | | SkImage::RescaleGamma rescaleGamma, |
346 | | SkImage::RescaleMode rescaleMode, |
347 | | SkImage::ReadPixelsCallback callback, |
348 | 0 | SkImage::ReadPixelsContext callbackContext) { |
349 | 0 | this->asyncRescaleAndReadPixelsYUV420Impl(image, |
350 | 0 | yuvColorSpace, |
351 | 0 | /*readAlpha=*/false, |
352 | 0 | dstColorSpace, |
353 | 0 | srcRect, |
354 | 0 | dstSize, |
355 | 0 | rescaleGamma, |
356 | 0 | rescaleMode, |
357 | 0 | callback, |
358 | 0 | callbackContext); |
359 | 0 | } |
360 | | |
361 | | void Context::asyncRescaleAndReadPixelsYUV420(const SkSurface* surface, |
362 | | SkYUVColorSpace yuvColorSpace, |
363 | | sk_sp<SkColorSpace> dstColorSpace, |
364 | | const SkIRect& srcRect, |
365 | | const SkISize& dstSize, |
366 | | SkImage::RescaleGamma rescaleGamma, |
367 | | SkImage::RescaleMode rescaleMode, |
368 | | SkImage::ReadPixelsCallback callback, |
369 | 0 | SkImage::ReadPixelsContext callbackContext) { |
370 | 0 | if (!static_cast<const SkSurface_Base*>(surface)->isGraphiteBacked()) { |
371 | 0 | callback(callbackContext, nullptr); |
372 | 0 | return; |
373 | 0 | } |
374 | | |
375 | 0 | sk_sp<SkImage> surfaceImage = SkSurfaces::AsImage(sk_ref_sp(surface)); |
376 | 0 | this->asyncRescaleAndReadPixelsYUV420(surfaceImage.get(), |
377 | 0 | yuvColorSpace, |
378 | 0 | dstColorSpace, |
379 | 0 | srcRect, |
380 | 0 | dstSize, |
381 | 0 | rescaleGamma, |
382 | 0 | rescaleMode, |
383 | 0 | callback, |
384 | 0 | callbackContext); |
385 | 0 | } |
386 | | |
387 | | void Context::asyncRescaleAndReadPixelsYUVA420(const SkImage* image, |
388 | | SkYUVColorSpace yuvColorSpace, |
389 | | sk_sp<SkColorSpace> dstColorSpace, |
390 | | const SkIRect& srcRect, |
391 | | const SkISize& dstSize, |
392 | | SkImage::RescaleGamma rescaleGamma, |
393 | | SkImage::RescaleMode rescaleMode, |
394 | | SkImage::ReadPixelsCallback callback, |
395 | 0 | SkImage::ReadPixelsContext callbackContext) { |
396 | 0 | this->asyncRescaleAndReadPixelsYUV420Impl(image, |
397 | 0 | yuvColorSpace, |
398 | 0 | /*readAlpha=*/true, |
399 | 0 | dstColorSpace, |
400 | 0 | srcRect, |
401 | 0 | dstSize, |
402 | 0 | rescaleGamma, |
403 | 0 | rescaleMode, |
404 | 0 | callback, |
405 | 0 | callbackContext); |
406 | 0 | } |
407 | | |
408 | | void Context::asyncRescaleAndReadPixelsYUVA420(const SkSurface* surface, |
409 | | SkYUVColorSpace yuvColorSpace, |
410 | | sk_sp<SkColorSpace> dstColorSpace, |
411 | | const SkIRect& srcRect, |
412 | | const SkISize& dstSize, |
413 | | SkImage::RescaleGamma rescaleGamma, |
414 | | SkImage::RescaleMode rescaleMode, |
415 | | SkImage::ReadPixelsCallback callback, |
416 | 0 | SkImage::ReadPixelsContext callbackContext) { |
417 | 0 | if (!static_cast<const SkSurface_Base*>(surface)->isGraphiteBacked()) { |
418 | 0 | callback(callbackContext, nullptr); |
419 | 0 | return; |
420 | 0 | } |
421 | | |
422 | 0 | sk_sp<SkImage> surfaceImage = SkSurfaces::AsImage(sk_ref_sp(surface)); |
423 | 0 | this->asyncRescaleAndReadPixelsYUVA420(surfaceImage.get(), |
424 | 0 | yuvColorSpace, |
425 | 0 | dstColorSpace, |
426 | 0 | srcRect, |
427 | 0 | dstSize, |
428 | 0 | rescaleGamma, |
429 | 0 | rescaleMode, |
430 | 0 | callback, |
431 | 0 | callbackContext); |
432 | 0 | } |
433 | | |
434 | | void Context::asyncRescaleAndReadPixelsYUV420Impl(const SkImage* image, |
435 | | SkYUVColorSpace yuvColorSpace, |
436 | | bool readAlpha, |
437 | | sk_sp<SkColorSpace> dstColorSpace, |
438 | | const SkIRect& srcRect, |
439 | | const SkISize& dstSize, |
440 | | SkImage::RescaleGamma rescaleGamma, |
441 | | SkImage::RescaleMode rescaleMode, |
442 | | SkImage::ReadPixelsCallback callback, |
443 | 0 | SkImage::ReadPixelsContext callbackContext) { |
444 | 0 | if (!image || !as_IB(image)->isGraphiteBacked()) { |
445 | 0 | callback(callbackContext, nullptr); |
446 | 0 | return; |
447 | 0 | } |
448 | | |
449 | 0 | const SkImageInfo& srcImageInfo = image->imageInfo(); |
450 | 0 | if (!SkIRect::MakeSize(srcImageInfo.dimensions()).contains(srcRect)) { |
451 | 0 | callback(callbackContext, nullptr); |
452 | 0 | return; |
453 | 0 | } |
454 | | |
455 | | // Make a recorder to record drawing commands into |
456 | 0 | std::unique_ptr<Recorder> recorder = this->makeRecorder(); |
457 | |
|
458 | 0 | if (srcRect.size() == dstSize && |
459 | 0 | SkColorSpace::Equals(srcImageInfo.colorInfo().colorSpace(), |
460 | 0 | dstColorSpace.get())) { |
461 | | // No need for rescale |
462 | 0 | return this->asyncReadPixelsYUV420(recorder.get(), |
463 | 0 | image, |
464 | 0 | yuvColorSpace, |
465 | 0 | readAlpha, |
466 | 0 | srcRect, |
467 | 0 | callback, |
468 | 0 | callbackContext); |
469 | 0 | } |
470 | | |
471 | 0 | SkImageInfo dstImageInfo = SkImageInfo::Make(dstSize, |
472 | 0 | kRGBA_8888_SkColorType, |
473 | 0 | srcImageInfo.colorInfo().alphaType(), |
474 | 0 | dstColorSpace); |
475 | 0 | sk_sp<SkImage> scaledImage = RescaleImage(recorder.get(), |
476 | 0 | image, |
477 | 0 | srcRect, |
478 | 0 | dstImageInfo, |
479 | 0 | rescaleGamma, |
480 | 0 | rescaleMode); |
481 | 0 | if (!scaledImage) { |
482 | 0 | callback(callbackContext, nullptr); |
483 | 0 | return; |
484 | 0 | } |
485 | | |
486 | 0 | this->asyncReadPixelsYUV420(recorder.get(), |
487 | 0 | scaledImage.get(), |
488 | 0 | yuvColorSpace, |
489 | 0 | readAlpha, |
490 | 0 | SkIRect::MakeSize(dstSize), |
491 | 0 | callback, |
492 | 0 | callbackContext); |
493 | 0 | } |
494 | | |
495 | | void Context::asyncReadPixelsYUV420(Recorder* recorder, |
496 | | const SkImage* srcImage, |
497 | | SkYUVColorSpace yuvColorSpace, |
498 | | bool readAlpha, |
499 | | const SkIRect& srcRect, |
500 | | SkImage::ReadPixelsCallback callback, |
501 | 0 | SkImage::ReadPixelsContext callbackContext) { |
502 | 0 | TRACE_EVENT2("skia.gpu", TRACE_FUNC, "width", srcRect.width(), "height", srcRect.height()); |
503 | | |
504 | | // Make three or four Surfaces to draw the YUV[A] planes into |
505 | 0 | SkImageInfo yaInfo = SkImageInfo::MakeA8(srcRect.size()); |
506 | 0 | sk_sp<SkSurface> ySurface = Surface::Make(recorder, yaInfo, "AsyncReadPixelsYPlane", |
507 | 0 | Budgeted::kNo); |
508 | 0 | sk_sp<SkSurface> aSurface; |
509 | 0 | if (readAlpha) { |
510 | 0 | aSurface = Surface::Make(recorder, yaInfo, "AsyncReadPixelsAPlane", Budgeted::kNo); |
511 | 0 | } |
512 | |
|
513 | 0 | SkImageInfo uvInfo = yaInfo.makeWH(yaInfo.width()/2, yaInfo.height()/2); |
514 | 0 | sk_sp<SkSurface> uSurface = Surface::Make(recorder, uvInfo, "AsyncReadPixelsUPlane", |
515 | 0 | Budgeted::kNo); |
516 | 0 | sk_sp<SkSurface> vSurface = Surface::Make(recorder, uvInfo, "AsyncReadPixelsVPlane", |
517 | 0 | Budgeted::kNo); |
518 | |
|
519 | 0 | if (!ySurface || !uSurface || !vSurface || (readAlpha && !aSurface)) { |
520 | 0 | callback(callbackContext, nullptr); |
521 | 0 | return; |
522 | 0 | } |
523 | | |
524 | | // Set up draws and transfers |
525 | | // TODO: Use one transfer buffer for all three planes to reduce map/unmap cost? |
526 | 0 | auto drawPlane = [](SkSurface* dstSurface, |
527 | 0 | const SkImage* srcImage, |
528 | 0 | float rgb2yuv[20], |
529 | 0 | const SkMatrix& texMatrix) { |
530 | | // Render the plane defined by rgb2yuv from srcImage into dstSurface |
531 | 0 | SkPaint paint; |
532 | 0 | const SkSamplingOptions sampling(SkFilterMode::kLinear, SkMipmapMode::kNone); |
533 | 0 | sk_sp<SkShader> imgShader = srcImage->makeShader(SkTileMode::kClamp, SkTileMode::kClamp, |
534 | 0 | sampling, texMatrix); |
535 | 0 | paint.setShader(std::move(imgShader)); |
536 | |
|
537 | 0 | if (rgb2yuv) { |
538 | 0 | sk_sp<SkColorFilter> matrixFilter = SkColorFilters::Matrix(rgb2yuv); |
539 | 0 | paint.setColorFilter(std::move(matrixFilter)); |
540 | 0 | } |
541 | |
|
542 | 0 | SkCanvas* canvas = dstSurface->getCanvas(); |
543 | 0 | canvas->drawPaint(paint); |
544 | 0 | }; |
545 | |
|
546 | 0 | auto copyPlane = [this](SkSurface* surface) { |
547 | | // Transfer result from dstSurface |
548 | 0 | auto graphiteSurface = reinterpret_cast<const skgpu::graphite::Surface*>(surface); |
549 | 0 | TextureProxyView proxyView = graphiteSurface->readSurfaceView(); |
550 | |
|
551 | 0 | auto srcImageInfo = surface->imageInfo(); |
552 | 0 | auto dstColorInfo = srcImageInfo.colorInfo().makeColorType(kAlpha_8_SkColorType); |
553 | 0 | return this->transferPixels(proxyView.proxy(), |
554 | 0 | srcImageInfo, |
555 | 0 | dstColorInfo, |
556 | 0 | SkIRect::MakeWH(surface->width(), surface->height())); |
557 | 0 | }; |
558 | |
|
559 | 0 | float baseM[20]; |
560 | 0 | SkColorMatrix_RGB2YUV(yuvColorSpace, baseM); |
561 | 0 | SkMatrix texMatrix = SkMatrix::Translate(srcRect.fLeft, srcRect.fTop); |
562 | | |
563 | | // This matrix generates (r,g,b,a) = (0, 0, 0, y) |
564 | 0 | float yM[20]; |
565 | 0 | std::fill_n(yM, 15, 0.f); |
566 | 0 | std::copy_n(baseM + 0, 5, yM + 15); |
567 | 0 | drawPlane(ySurface.get(), srcImage, yM, texMatrix); |
568 | 0 | if (readAlpha) { |
569 | | // No matrix, straight copy of alpha channel |
570 | 0 | SkASSERT(baseM[15] == 0 && |
571 | 0 | baseM[16] == 0 && |
572 | 0 | baseM[17] == 0 && |
573 | 0 | baseM[18] == 1 && |
574 | 0 | baseM[19] == 0); |
575 | 0 | drawPlane(aSurface.get(), srcImage, nullptr, texMatrix); |
576 | 0 | } |
577 | |
|
578 | 0 | texMatrix.preScale(0.5f, 0.5f); |
579 | | // This matrix generates (r,g,b,a) = (0, 0, 0, u) |
580 | 0 | float uM[20]; |
581 | 0 | std::fill_n(uM, 15, 0.f); |
582 | 0 | std::copy_n(baseM + 5, 5, uM + 15); |
583 | 0 | drawPlane(uSurface.get(), srcImage, uM, texMatrix); |
584 | | |
585 | | // This matrix generates (r,g,b,a) = (0, 0, 0, v) |
586 | 0 | float vM[20]; |
587 | 0 | std::fill_n(vM, 15, 0.f); |
588 | 0 | std::copy_n(baseM + 10, 5, vM + 15); |
589 | 0 | drawPlane(vSurface.get(), srcImage, vM, texMatrix); |
590 | | |
591 | | // Add draw commands to queue |
592 | 0 | std::unique_ptr<Recording> recording = recorder->snap(); |
593 | 0 | if (!recording) { |
594 | 0 | callback(callbackContext, nullptr); |
595 | 0 | return; |
596 | 0 | } |
597 | 0 | InsertRecordingInfo recordingInfo; |
598 | 0 | recordingInfo.fRecording = recording.get(); |
599 | 0 | if (!this->insertRecording(recordingInfo)) { |
600 | 0 | callback(callbackContext, nullptr); |
601 | 0 | return; |
602 | 0 | } |
603 | | |
604 | | // Now set up transfers |
605 | 0 | PixelTransferResult transfers[4]; |
606 | 0 | transfers[0] = copyPlane(ySurface.get()); |
607 | 0 | if (!transfers[0].fTransferBuffer) { |
608 | 0 | callback(callbackContext, nullptr); |
609 | 0 | return; |
610 | 0 | } |
611 | 0 | transfers[1] = copyPlane(uSurface.get()); |
612 | 0 | if (!transfers[1].fTransferBuffer) { |
613 | 0 | callback(callbackContext, nullptr); |
614 | 0 | return; |
615 | 0 | } |
616 | 0 | transfers[2] = copyPlane(vSurface.get()); |
617 | 0 | if (!transfers[2].fTransferBuffer) { |
618 | 0 | callback(callbackContext, nullptr); |
619 | 0 | return; |
620 | 0 | } |
621 | 0 | if (readAlpha) { |
622 | 0 | transfers[3] = copyPlane(aSurface.get()); |
623 | 0 | if (!transfers[3].fTransferBuffer) { |
624 | 0 | callback(callbackContext, nullptr); |
625 | 0 | return; |
626 | 0 | } |
627 | 0 | } |
628 | | |
629 | 0 | this->finalizeAsyncReadPixels({transfers, readAlpha ? 4 : 3}, callback, callbackContext); |
630 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::asyncReadPixelsYUV420(skgpu::graphite::Recorder*, SkImage const*, SkYUVColorSpace, bool, SkIRect const&, void (*)(void*, std::__1::unique_ptr<SkImage::AsyncReadResult const, std::__1::default_delete<SkImage::AsyncReadResult const> >), void*) Unexecuted instantiation: skgpu::graphite::Context::asyncReadPixelsYUV420(skgpu::graphite::Recorder*, SkImage const*, SkYUVColorSpace, bool, SkIRect const&, void (*)(void*, std::__1::unique_ptr<SkImage::AsyncReadResult const, std::__1::default_delete<SkImage::AsyncReadResult const> >), void*) |
631 | | |
632 | | void Context::finalizeAsyncReadPixels(SkSpan<PixelTransferResult> transferResults, |
633 | | SkImage::ReadPixelsCallback callback, |
634 | 0 | SkImage::ReadPixelsContext callbackContext) { |
635 | | // Set up FinishContext and add transfer commands to queue |
636 | 0 | struct AsyncReadFinishContext { |
637 | 0 | SkImage::ReadPixelsCallback* fClientCallback; |
638 | 0 | SkImage::ReadPixelsContext fClientContext; |
639 | 0 | ClientMappedBufferManager* fMappedBufferManager; |
640 | 0 | std::array<PixelTransferResult, 4> fTransferResults; |
641 | 0 | }; |
642 | |
|
643 | 0 | auto finishContext = std::make_unique<AsyncReadFinishContext>(); |
644 | 0 | finishContext->fClientCallback = callback; |
645 | 0 | finishContext->fClientContext = callbackContext; |
646 | 0 | finishContext->fMappedBufferManager = fMappedBufferManager.get(); |
647 | |
|
648 | 0 | SkASSERT(transferResults.size() <= std::size(finishContext->fTransferResults)); |
649 | 0 | skia_private::STArray<4, sk_sp<Buffer>> buffersToAsyncMap; |
650 | 0 | for (size_t i = 0; i < transferResults.size(); ++i) { |
651 | 0 | finishContext->fTransferResults[i] = std::move(transferResults[i]); |
652 | 0 | if (fSharedContext->caps()->bufferMapsAreAsync()) { |
653 | 0 | buffersToAsyncMap.push_back(finishContext->fTransferResults[i].fTransferBuffer); |
654 | 0 | } |
655 | 0 | } |
656 | |
|
657 | 0 | InsertFinishInfo info; |
658 | 0 | info.fFinishedContext = finishContext.release(); |
659 | 0 | info.fFinishedProc = [](GpuFinishedContext c, CallbackResult status) { |
660 | 0 | std::unique_ptr<const AsyncReadFinishContext> context( |
661 | 0 | reinterpret_cast<const AsyncReadFinishContext*>(c)); |
662 | 0 | using AsyncReadResult = skgpu::TAsyncReadResult<Buffer, ContextID, PixelTransferResult>; |
663 | |
|
664 | 0 | ClientMappedBufferManager* manager = context->fMappedBufferManager; |
665 | 0 | std::unique_ptr<AsyncReadResult> result; |
666 | 0 | if (status == CallbackResult::kSuccess) { |
667 | 0 | result = std::make_unique<AsyncReadResult>(manager->ownerID()); |
668 | 0 | } |
669 | 0 | for (const auto& r : context->fTransferResults) { |
670 | 0 | if (!r.fTransferBuffer) { |
671 | 0 | break; |
672 | 0 | } |
673 | 0 | if (result && !result->addTransferResult(r, r.fSize, r.fRowBytes, manager)) { |
674 | 0 | result.reset(); |
675 | 0 | } |
676 | | // If we didn't get this buffer into the mapped buffer manager then make sure it gets |
677 | | // unmapped if it has a pending or completed async map. |
678 | 0 | if (!result && r.fTransferBuffer->isUnmappable()) { |
679 | 0 | r.fTransferBuffer->unmap(); |
680 | 0 | } |
681 | 0 | } |
682 | 0 | (*context->fClientCallback)(context->fClientContext, std::move(result)); |
683 | 0 | }; Unexecuted instantiation: Context.cpp:skgpu::graphite::Context::finalizeAsyncReadPixels(SkSpan<skgpu::graphite::Context::PixelTransferResult>, void (*)(void*, std::__1::unique_ptr<SkImage::AsyncReadResult const, std::__1::default_delete<SkImage::AsyncReadResult const> >), void*)::$_0::operator()(void*, skgpu::CallbackResult) const Unexecuted instantiation: Context.cpp:skgpu::graphite::Context::finalizeAsyncReadPixels(SkSpan<skgpu::graphite::Context::PixelTransferResult>, void (*)(void*, std::__1::unique_ptr<SkImage::AsyncReadResult const, std::__1::default_delete<SkImage::AsyncReadResult const> >), void*)::$_1::operator()(void*, skgpu::CallbackResult) const |
684 | | |
685 | | // If addFinishInfo() fails, it invokes the finish callback automatically, which handles all the |
686 | | // required clean up for us, just log an error message. The buffers will never be mapped and |
687 | | // thus don't need an unmap. |
688 | 0 | if (!fQueueManager->addFinishInfo(info, fResourceProvider.get(), buffersToAsyncMap)) { |
689 | 0 | SKGPU_LOG_E("Failed to register finish callbacks for asyncReadPixels."); |
690 | 0 | return; |
691 | 0 | } |
692 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::finalizeAsyncReadPixels(SkSpan<skgpu::graphite::Context::PixelTransferResult>, void (*)(void*, std::__1::unique_ptr<SkImage::AsyncReadResult const, std::__1::default_delete<SkImage::AsyncReadResult const> >), void*) Unexecuted instantiation: skgpu::graphite::Context::finalizeAsyncReadPixels(SkSpan<skgpu::graphite::Context::PixelTransferResult>, void (*)(void*, std::__1::unique_ptr<SkImage::AsyncReadResult const, std::__1::default_delete<SkImage::AsyncReadResult const> >), void*) |
693 | | |
694 | | Context::PixelTransferResult Context::transferPixels(const TextureProxy* proxy, |
695 | | const SkImageInfo& srcImageInfo, |
696 | | const SkColorInfo& dstColorInfo, |
697 | 0 | const SkIRect& srcRect) { |
698 | 0 | SkASSERT(srcImageInfo.bounds().contains(srcRect)); |
699 | |
|
700 | 0 | const Caps* caps = fSharedContext->caps(); |
701 | 0 | SkColorType supportedColorType; |
702 | 0 | bool isRGB888Format; |
703 | 0 | std::tie(supportedColorType, isRGB888Format) = |
704 | 0 | caps->supportedReadPixelsColorType(srcImageInfo.colorType(), |
705 | 0 | proxy->textureInfo(), |
706 | 0 | dstColorInfo.colorType()); |
707 | 0 | if (supportedColorType == kUnknown_SkColorType) { |
708 | 0 | return {}; |
709 | 0 | } |
710 | | |
711 | | // Fail if read color type does not have all of dstCT's color channels and those missing color |
712 | | // channels are in the src. |
713 | 0 | uint32_t dstChannels = SkColorTypeChannelFlags(dstColorInfo.colorType()); |
714 | 0 | uint32_t legalReadChannels = SkColorTypeChannelFlags(supportedColorType); |
715 | 0 | uint32_t srcChannels = SkColorTypeChannelFlags(srcImageInfo.colorType()); |
716 | 0 | if ((~legalReadChannels & dstChannels) & srcChannels) { |
717 | 0 | return {}; |
718 | 0 | } |
719 | | |
720 | 0 | int bpp = isRGB888Format ? 3 : SkColorTypeBytesPerPixel(supportedColorType); |
721 | 0 | size_t rowBytes = caps->getAlignedTextureDataRowBytes(bpp * srcRect.width()); |
722 | 0 | size_t size = SkAlignTo(rowBytes * srcRect.height(), caps->requiredTransferBufferAlignment()); |
723 | 0 | sk_sp<Buffer> buffer = fResourceProvider->findOrCreateBuffer( |
724 | 0 | size, BufferType::kXferGpuToCpu, AccessPattern::kHostVisible, "TransferToCpu"); |
725 | 0 | if (!buffer) { |
726 | 0 | return {}; |
727 | 0 | } |
728 | | |
729 | | // Set up copy task. Since we always use a new buffer the offset can be 0 and we don't need to |
730 | | // worry about aligning it to the required transfer buffer alignment. |
731 | 0 | sk_sp<CopyTextureToBufferTask> copyTask = CopyTextureToBufferTask::Make(sk_ref_sp(proxy), |
732 | 0 | srcRect, |
733 | 0 | buffer, |
734 | 0 | /*bufferOffset=*/0, |
735 | 0 | rowBytes); |
736 | 0 | if (!copyTask || !fQueueManager->addTask(copyTask.get(), this)) { |
737 | 0 | return {}; |
738 | 0 | } |
739 | 0 | sk_sp<SynchronizeToCpuTask> syncTask = SynchronizeToCpuTask::Make(buffer); |
740 | 0 | if (!syncTask || !fQueueManager->addTask(syncTask.get(), this)) { |
741 | 0 | return {}; |
742 | 0 | } |
743 | | |
744 | 0 | PixelTransferResult result; |
745 | 0 | result.fTransferBuffer = std::move(buffer); |
746 | 0 | result.fSize = srcRect.size(); |
747 | 0 | if (srcImageInfo.colorInfo() != dstColorInfo || isRGB888Format) { |
748 | 0 | SkISize dims = srcRect.size(); |
749 | 0 | SkImageInfo srcInfo = SkImageInfo::Make(dims, srcImageInfo.colorInfo()); |
750 | 0 | SkImageInfo dstInfo = SkImageInfo::Make(dims, dstColorInfo); |
751 | 0 | result.fRowBytes = dstInfo.minRowBytes(); |
752 | 0 | result.fPixelConverter = [dstInfo, srcInfo, rowBytes, isRGB888Format]( |
753 | 0 | void* dst, const void* src) { |
754 | 0 | SkAutoPixmapStorage temp; |
755 | 0 | size_t srcRowBytes = rowBytes; |
756 | 0 | if (isRGB888Format) { |
757 | 0 | temp.alloc(srcInfo); |
758 | 0 | size_t tRowBytes = temp.rowBytes(); |
759 | 0 | auto* sRow = reinterpret_cast<const char*>(src); |
760 | 0 | auto* tRow = reinterpret_cast<char*>(temp.writable_addr()); |
761 | 0 | for (int y = 0; y < srcInfo.height(); ++y, sRow += srcRowBytes, tRow += tRowBytes) { |
762 | 0 | for (int x = 0; x < srcInfo.width(); ++x) { |
763 | 0 | auto s = sRow + x*3; |
764 | 0 | auto t = tRow + x*sizeof(uint32_t); |
765 | 0 | memcpy(t, s, 3); |
766 | 0 | t[3] = static_cast<char>(0xFF); |
767 | 0 | } |
768 | 0 | } |
769 | 0 | src = temp.addr(); |
770 | 0 | srcRowBytes = tRowBytes; |
771 | 0 | } |
772 | 0 | SkAssertResult(SkConvertPixels(dstInfo, dst, dstInfo.minRowBytes(), |
773 | 0 | srcInfo, src, srcRowBytes)); |
774 | 0 | }; Unexecuted instantiation: Context.cpp:skgpu::graphite::Context::transferPixels(skgpu::graphite::TextureProxy const*, SkImageInfo const&, SkColorInfo const&, SkIRect const&)::$_0::operator()(void*, void const*) const Unexecuted instantiation: Context.cpp:skgpu::graphite::Context::transferPixels(skgpu::graphite::TextureProxy const*, SkImageInfo const&, SkColorInfo const&, SkIRect const&)::$_1::operator()(void*, void const*) const |
775 | 0 | } else { |
776 | 0 | result.fRowBytes = rowBytes; |
777 | 0 | } |
778 | |
|
779 | 0 | return result; |
780 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::transferPixels(skgpu::graphite::TextureProxy const*, SkImageInfo const&, SkColorInfo const&, SkIRect const&) Unexecuted instantiation: skgpu::graphite::Context::transferPixels(skgpu::graphite::TextureProxy const*, SkImageInfo const&, SkColorInfo const&, SkIRect const&) |
781 | | |
782 | 0 | void Context::checkForFinishedWork(SyncToCpu syncToCpu) { |
783 | 0 | ASSERT_SINGLE_OWNER |
784 | |
|
785 | 0 | fQueueManager->checkForFinishedWork(syncToCpu); |
786 | 0 | fMappedBufferManager->process(); |
787 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::checkForFinishedWork(skgpu::graphite::SyncToCpu) Unexecuted instantiation: skgpu::graphite::Context::checkForFinishedWork(skgpu::graphite::SyncToCpu) |
788 | | |
789 | 0 | void Context::checkAsyncWorkCompletion() { |
790 | 0 | this->checkForFinishedWork(SyncToCpu::kNo); |
791 | 0 | } |
792 | | |
793 | 0 | void Context::deleteBackendTexture(const BackendTexture& texture) { |
794 | 0 | ASSERT_SINGLE_OWNER |
795 | |
|
796 | 0 | if (!texture.isValid() || texture.backend() != this->backend()) { |
797 | 0 | return; |
798 | 0 | } |
799 | 0 | fResourceProvider->deleteBackendTexture(texture); |
800 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::deleteBackendTexture(skgpu::graphite::BackendTexture const&) Unexecuted instantiation: skgpu::graphite::Context::deleteBackendTexture(skgpu::graphite::BackendTexture const&) |
801 | | |
802 | 0 | void Context::freeGpuResources() { |
803 | 0 | ASSERT_SINGLE_OWNER |
804 | |
|
805 | 0 | this->checkAsyncWorkCompletion(); |
806 | |
|
807 | 0 | fResourceProvider->freeGpuResources(); |
808 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::freeGpuResources() Unexecuted instantiation: skgpu::graphite::Context::freeGpuResources() |
809 | | |
810 | 0 | void Context::performDeferredCleanup(std::chrono::milliseconds msNotUsed) { |
811 | 0 | ASSERT_SINGLE_OWNER |
812 | |
|
813 | 0 | this->checkAsyncWorkCompletion(); |
814 | |
|
815 | 0 | auto purgeTime = skgpu::StdSteadyClock::now() - msNotUsed; |
816 | 0 | fResourceProvider->purgeResourcesNotUsedSince(purgeTime); |
817 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::performDeferredCleanup(std::__1::chrono::duration<long long, std::__1::ratio<1l, 1000l> >) Unexecuted instantiation: skgpu::graphite::Context::performDeferredCleanup(std::__1::chrono::duration<long long, std::__1::ratio<1l, 1000l> >) |
818 | | |
819 | 0 | size_t Context::currentBudgetedBytes() const { |
820 | 0 | ASSERT_SINGLE_OWNER |
821 | 0 | return fResourceProvider->getResourceCacheCurrentBudgetedBytes(); |
822 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::currentBudgetedBytes() const Unexecuted instantiation: skgpu::graphite::Context::currentBudgetedBytes() const |
823 | | |
824 | 0 | size_t Context::maxBudgetedBytes() const { |
825 | 0 | ASSERT_SINGLE_OWNER |
826 | 0 | return fResourceProvider->getResourceCacheLimit(); |
827 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::maxBudgetedBytes() const Unexecuted instantiation: skgpu::graphite::Context::maxBudgetedBytes() const |
828 | | |
829 | 0 | void Context::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const { |
830 | 0 | ASSERT_SINGLE_OWNER |
831 | 0 | fResourceProvider->dumpMemoryStatistics(traceMemoryDump); |
832 | | // TODO: What is the graphite equivalent for the text blob cache and how do we print out its |
833 | | // used bytes here (see Ganesh implementation). |
834 | 0 | } Unexecuted instantiation: skgpu::graphite::Context::dumpMemoryStatistics(SkTraceMemoryDump*) const Unexecuted instantiation: skgpu::graphite::Context::dumpMemoryStatistics(SkTraceMemoryDump*) const |
835 | | |
836 | 0 | bool Context::isDeviceLost() const { |
837 | 0 | return fSharedContext->isDeviceLost(); |
838 | 0 | } |
839 | | |
840 | 0 | int Context::maxTextureSize() const { |
841 | 0 | return fSharedContext->caps()->maxTextureSize(); |
842 | 0 | } |
843 | | |
844 | 0 | bool Context::supportsProtectedContent() const { |
845 | 0 | return fSharedContext->isProtected() == Protected::kYes; |
846 | 0 | } |
847 | | |
848 | | /////////////////////////////////////////////////////////////////////////////////// |
849 | | |
850 | | #if defined(GRAPHITE_TEST_UTILS) |
851 | | bool ContextPriv::readPixels(const SkPixmap& pm, |
852 | | const TextureProxy* textureProxy, |
853 | | const SkImageInfo& srcImageInfo, |
854 | 0 | int srcX, int srcY) { |
855 | 0 | auto rect = SkIRect::MakeXYWH(srcX, srcY, pm.width(), pm.height()); |
856 | 0 | struct AsyncContext { |
857 | 0 | bool fCalled = false; |
858 | 0 | std::unique_ptr<const SkImage::AsyncReadResult> fResult; |
859 | 0 | } asyncContext; |
860 | 0 | fContext->asyncReadPixels(textureProxy, srcImageInfo, pm.info().colorInfo(), rect, |
861 | 0 | [](void* c, std::unique_ptr<const SkImage::AsyncReadResult> result) { |
862 | 0 | auto context = static_cast<AsyncContext*>(c); |
863 | 0 | context->fResult = std::move(result); |
864 | 0 | context->fCalled = true; |
865 | 0 | }, |
866 | 0 | &asyncContext); |
867 | |
|
868 | 0 | if (fContext->fSharedContext->caps()->allowCpuSync()) { |
869 | 0 | fContext->submit(SyncToCpu::kYes); |
870 | 0 | } else { |
871 | 0 | fContext->submit(SyncToCpu::kNo); |
872 | 0 | if (fContext->fSharedContext->backend() == BackendApi::kDawn) { |
873 | 0 | while (!asyncContext.fCalled) { |
874 | | #if defined(SK_DAWN) |
875 | | auto dawnContext = static_cast<DawnSharedContext*>(fContext->fSharedContext.get()); |
876 | | dawnContext->device().Tick(); |
877 | | fContext->checkAsyncWorkCompletion(); |
878 | | #endif |
879 | 0 | } |
880 | 0 | } else { |
881 | 0 | SK_ABORT("Only Dawn supports non-synching contexts."); |
882 | 0 | } |
883 | 0 | } |
884 | 0 | SkASSERT(asyncContext.fCalled); |
885 | 0 | if (!asyncContext.fResult) { |
886 | 0 | return false; |
887 | 0 | } |
888 | 0 | SkRectMemcpy(pm.writable_addr(), pm.rowBytes(), asyncContext.fResult->data(0), |
889 | 0 | asyncContext.fResult->rowBytes(0), pm.info().minRowBytes(), |
890 | 0 | pm.height()); |
891 | 0 | return true; |
892 | 0 | } Unexecuted instantiation: skgpu::graphite::ContextPriv::readPixels(SkPixmap const&, skgpu::graphite::TextureProxy const*, SkImageInfo const&, int, int) Unexecuted instantiation: skgpu::graphite::ContextPriv::readPixels(SkPixmap const&, skgpu::graphite::TextureProxy const*, SkImageInfo const&, int, int) |
893 | | |
894 | 0 | void ContextPriv::deregisterRecorder(const Recorder* recorder) { |
895 | 0 | SKGPU_ASSERT_SINGLE_OWNER(fContext->singleOwner()) |
896 | 0 | for (auto it = fContext->fTrackedRecorders.begin(); |
897 | 0 | it != fContext->fTrackedRecorders.end(); |
898 | 0 | it++) { |
899 | 0 | if (*it == recorder) { |
900 | 0 | fContext->fTrackedRecorders.erase(it); |
901 | 0 | return; |
902 | 0 | } |
903 | 0 | } |
904 | 0 | } Unexecuted instantiation: skgpu::graphite::ContextPriv::deregisterRecorder(skgpu::graphite::Recorder const*) Unexecuted instantiation: skgpu::graphite::ContextPriv::deregisterRecorder(skgpu::graphite::Recorder const*) |
905 | | |
906 | 0 | bool ContextPriv::supportsPathRendererStrategy(PathRendererStrategy strategy) { |
907 | 0 | AtlasProvider::PathAtlasFlagsBitMask pathAtlasFlags = |
908 | 0 | AtlasProvider::QueryPathAtlasSupport(this->caps()); |
909 | 0 | switch (strategy) { |
910 | 0 | case PathRendererStrategy::kDefault: |
911 | 0 | return true; |
912 | 0 | case PathRendererStrategy::kComputeAnalyticAA: |
913 | 0 | case PathRendererStrategy::kComputeMSAA16: |
914 | 0 | case PathRendererStrategy::kComputeMSAA8: |
915 | 0 | return SkToBool(pathAtlasFlags & AtlasProvider::PathAtlasFlags::kCompute); |
916 | 0 | case PathRendererStrategy::kRasterAA: |
917 | 0 | return SkToBool(pathAtlasFlags & AtlasProvider::PathAtlasFlags::kRaster); |
918 | 0 | case PathRendererStrategy::kTessellation: |
919 | 0 | return true; |
920 | 0 | } |
921 | | |
922 | 0 | return false; |
923 | 0 | } |
924 | | |
925 | | #endif |
926 | | |
927 | | /////////////////////////////////////////////////////////////////////////////////// |
928 | | |
929 | | std::unique_ptr<Context> ContextCtorAccessor::MakeContext( |
930 | | sk_sp<SharedContext> sharedContext, |
931 | | std::unique_ptr<QueueManager> queueManager, |
932 | 0 | const ContextOptions& options) { |
933 | 0 | auto context = std::unique_ptr<Context>(new Context(std::move(sharedContext), |
934 | 0 | std::move(queueManager), |
935 | 0 | options)); |
936 | 0 | if (context && context->finishInitialization()) { |
937 | 0 | return context; |
938 | 0 | } else { |
939 | 0 | return nullptr; |
940 | 0 | } |
941 | 0 | } |
942 | | |
943 | | } // namespace skgpu::graphite |