/src/skia/src/gpu/ganesh/vk/GrVkOpsRenderPass.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2016 Google Inc. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license that can be |
5 | | * found in the LICENSE file. |
6 | | */ |
7 | | |
8 | | #include "src/gpu/ganesh/vk/GrVkOpsRenderPass.h" |
9 | | |
10 | | #include "include/core/SkDrawable.h" |
11 | | #include "include/core/SkRect.h" |
12 | | #include "include/gpu/GrDirectContext.h" |
13 | | #include "include/gpu/ganesh/vk/GrBackendDrawableInfo.h" |
14 | | #include "src/gpu/ganesh/GrBackendUtils.h" |
15 | | #include "src/gpu/ganesh/GrDirectContextPriv.h" |
16 | | #include "src/gpu/ganesh/GrOpFlushState.h" |
17 | | #include "src/gpu/ganesh/GrPipeline.h" |
18 | | #include "src/gpu/ganesh/GrRenderTarget.h" |
19 | | #include "src/gpu/ganesh/effects/GrTextureEffect.h" |
20 | | #include "src/gpu/ganesh/vk/GrVkBuffer.h" |
21 | | #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h" |
22 | | #include "src/gpu/ganesh/vk/GrVkCommandPool.h" |
23 | | #include "src/gpu/ganesh/vk/GrVkFramebuffer.h" |
24 | | #include "src/gpu/ganesh/vk/GrVkGpu.h" |
25 | | #include "src/gpu/ganesh/vk/GrVkImage.h" |
26 | | #include "src/gpu/ganesh/vk/GrVkPipeline.h" |
27 | | #include "src/gpu/ganesh/vk/GrVkRenderPass.h" |
28 | | #include "src/gpu/ganesh/vk/GrVkRenderTarget.h" |
29 | | #include "src/gpu/ganesh/vk/GrVkResourceProvider.h" |
30 | | #include "src/gpu/ganesh/vk/GrVkSemaphore.h" |
31 | | #include "src/gpu/ganesh/vk/GrVkTexture.h" |
32 | | |
33 | | using namespace skia_private; |
34 | | |
35 | | ///////////////////////////////////////////////////////////////////////////// |
36 | | |
37 | | void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn, |
38 | 0 | VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) { |
39 | 0 | switch (loadOpIn) { |
40 | 0 | case GrLoadOp::kLoad: |
41 | 0 | *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; |
42 | 0 | break; |
43 | 0 | case GrLoadOp::kClear: |
44 | 0 | *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; |
45 | 0 | break; |
46 | 0 | case GrLoadOp::kDiscard: |
47 | 0 | *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; |
48 | 0 | break; |
49 | 0 | default: |
50 | 0 | SK_ABORT("Invalid LoadOp"); |
51 | 0 | *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; |
52 | 0 | } |
53 | | |
54 | 0 | switch (storeOpIn) { |
55 | 0 | case GrStoreOp::kStore: |
56 | 0 | *storeOp = VK_ATTACHMENT_STORE_OP_STORE; |
57 | 0 | break; |
58 | 0 | case GrStoreOp::kDiscard: |
59 | 0 | *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; |
60 | 0 | break; |
61 | 0 | default: |
62 | 0 | SK_ABORT("Invalid StoreOp"); |
63 | 0 | *storeOp = VK_ATTACHMENT_STORE_OP_STORE; |
64 | 0 | } |
65 | 0 | } |
66 | | |
67 | 0 | GrVkOpsRenderPass::GrVkOpsRenderPass(GrVkGpu* gpu) : fGpu(gpu) {} |
68 | | |
69 | 0 | void GrVkOpsRenderPass::setAttachmentLayouts(LoadFromResolve loadFromResolve) { |
70 | 0 | bool withStencil = fCurrentRenderPass->hasStencilAttachment(); |
71 | 0 | bool withResolve = fCurrentRenderPass->hasResolveAttachment(); |
72 | |
|
73 | 0 | if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) { |
74 | | // We need to use the GENERAL layout in this case since we'll be using texture barriers |
75 | | // with an input attachment. |
76 | 0 | VkAccessFlags dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | |
77 | 0 | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | |
78 | 0 | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; |
79 | 0 | VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | |
80 | 0 | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; |
81 | 0 | fFramebuffer->colorAttachment()->setImageLayout( |
82 | 0 | fGpu, VK_IMAGE_LAYOUT_GENERAL, dstAccess, dstStages, false); |
83 | 0 | } else { |
84 | | // Change layout of our render target so it can be used as the color attachment. |
85 | | // TODO: If we know that we will never be blending or loading the attachment we could drop |
86 | | // the VK_ACCESS_COLOR_ATTACHMENT_READ_BIT. |
87 | 0 | fFramebuffer->colorAttachment()->setImageLayout( |
88 | 0 | fGpu, |
89 | 0 | VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, |
90 | 0 | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, |
91 | 0 | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, |
92 | 0 | false); |
93 | 0 | } |
94 | |
|
95 | 0 | if (withResolve) { |
96 | 0 | GrVkImage* resolveAttachment = fFramebuffer->resolveAttachment(); |
97 | 0 | SkASSERT(resolveAttachment); |
98 | 0 | if (loadFromResolve == LoadFromResolve::kLoad) { |
99 | | // We need input access to do the shader read and color read access to do the attachment |
100 | | // load. |
101 | 0 | VkAccessFlags dstAccess = |
102 | 0 | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT; |
103 | 0 | VkPipelineStageFlags dstStages = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | |
104 | 0 | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; |
105 | 0 | resolveAttachment->setImageLayout(fGpu, |
106 | 0 | VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, |
107 | 0 | dstAccess, |
108 | 0 | dstStages, |
109 | 0 | false); |
110 | 0 | } else { |
111 | 0 | resolveAttachment->setImageLayout( |
112 | 0 | fGpu, |
113 | 0 | VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, |
114 | 0 | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, |
115 | 0 | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, |
116 | 0 | false); |
117 | 0 | } |
118 | 0 | } |
119 | | |
120 | | // If we are using a stencil attachment we also need to update its layout |
121 | 0 | if (withStencil) { |
122 | 0 | auto* vkStencil = fFramebuffer->stencilAttachment(); |
123 | 0 | SkASSERT(vkStencil); |
124 | | |
125 | | // We need the write and read access bits since we may load and store the stencil. |
126 | | // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we |
127 | | // wait there. |
128 | 0 | vkStencil->setImageLayout(fGpu, |
129 | 0 | VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, |
130 | 0 | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | |
131 | 0 | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, |
132 | 0 | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, |
133 | 0 | false); |
134 | 0 | } |
135 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::setAttachmentLayouts(GrVkRenderPass::LoadFromResolve) Unexecuted instantiation: GrVkOpsRenderPass::setAttachmentLayouts(GrVkRenderPass::LoadFromResolve) |
136 | | |
137 | | // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple |
138 | | // of the granularity. The width must also be a multiple of the granularity or eaqual to the width |
139 | | // the the entire attachment. Similar requirements for the y and height components. |
140 | | void adjust_bounds_to_granularity(SkIRect* dstBounds, |
141 | | const SkIRect& srcBounds, |
142 | | const VkExtent2D& granularity, |
143 | | int maxWidth, |
144 | 0 | int maxHeight) { |
145 | | // Adjust Width |
146 | 0 | if ((0 != granularity.width && 1 != granularity.width)) { |
147 | | // Start with the right side of rect so we know if we end up going pass the maxWidth. |
148 | 0 | int rightAdj = srcBounds.fRight % granularity.width; |
149 | 0 | if (rightAdj != 0) { |
150 | 0 | rightAdj = granularity.width - rightAdj; |
151 | 0 | } |
152 | 0 | dstBounds->fRight = srcBounds.fRight + rightAdj; |
153 | 0 | if (dstBounds->fRight > maxWidth) { |
154 | 0 | dstBounds->fRight = maxWidth; |
155 | 0 | dstBounds->fLeft = 0; |
156 | 0 | } else { |
157 | 0 | dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width; |
158 | 0 | } |
159 | 0 | } else { |
160 | 0 | dstBounds->fLeft = srcBounds.fLeft; |
161 | 0 | dstBounds->fRight = srcBounds.fRight; |
162 | 0 | } |
163 | | |
164 | | // Adjust height |
165 | 0 | if ((0 != granularity.height && 1 != granularity.height)) { |
166 | | // Start with the bottom side of rect so we know if we end up going pass the maxHeight. |
167 | 0 | int bottomAdj = srcBounds.fBottom % granularity.height; |
168 | 0 | if (bottomAdj != 0) { |
169 | 0 | bottomAdj = granularity.height - bottomAdj; |
170 | 0 | } |
171 | 0 | dstBounds->fBottom = srcBounds.fBottom + bottomAdj; |
172 | 0 | if (dstBounds->fBottom > maxHeight) { |
173 | 0 | dstBounds->fBottom = maxHeight; |
174 | 0 | dstBounds->fTop = 0; |
175 | 0 | } else { |
176 | 0 | dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height; |
177 | 0 | } |
178 | 0 | } else { |
179 | 0 | dstBounds->fTop = srcBounds.fTop; |
180 | 0 | dstBounds->fBottom = srcBounds.fBottom; |
181 | 0 | } |
182 | 0 | } |
183 | | |
184 | | bool GrVkOpsRenderPass::beginRenderPass(const VkClearValue& clearColor, |
185 | 0 | LoadFromResolve loadFromResolve) { |
186 | 0 | this->setAttachmentLayouts(loadFromResolve); |
187 | |
|
188 | 0 | bool firstSubpassUsesSecondaryCB = |
189 | 0 | loadFromResolve != LoadFromResolve::kLoad && SkToBool(fCurrentSecondaryCommandBuffer); |
190 | |
|
191 | 0 | bool useFullBounds = fCurrentRenderPass->hasResolveAttachment() && |
192 | 0 | fGpu->vkCaps().mustLoadFullImageWithDiscardableMSAA(); |
193 | |
|
194 | 0 | auto dimensions = fFramebuffer->colorAttachment()->dimensions(); |
195 | |
|
196 | 0 | auto nativeBounds = GrNativeRect::MakeIRectRelativeTo( |
197 | 0 | fOrigin, |
198 | 0 | dimensions.height(), useFullBounds ? SkIRect::MakeSize(dimensions) : fBounds); |
199 | | |
200 | | // The bounds we use for the render pass should be of the granularity supported |
201 | | // by the device. |
202 | 0 | const VkExtent2D& granularity = fCurrentRenderPass->granularity(); |
203 | 0 | SkIRect adjustedBounds; |
204 | 0 | if ((0 != granularity.width && 1 != granularity.width) || |
205 | 0 | (0 != granularity.height && 1 != granularity.height)) { |
206 | 0 | adjust_bounds_to_granularity(&adjustedBounds, |
207 | 0 | nativeBounds, |
208 | 0 | granularity, |
209 | 0 | dimensions.width(), |
210 | 0 | dimensions.height()); |
211 | 0 | } else { |
212 | 0 | adjustedBounds = nativeBounds; |
213 | 0 | } |
214 | |
|
215 | 0 | if (!fGpu->beginRenderPass(fCurrentRenderPass, fFramebuffer, &clearColor, fRenderTarget, |
216 | 0 | adjustedBounds, firstSubpassUsesSecondaryCB)) { |
217 | 0 | if (fCurrentSecondaryCommandBuffer) { |
218 | 0 | fCurrentSecondaryCommandBuffer->end(fGpu); |
219 | 0 | } |
220 | 0 | fCurrentRenderPass = nullptr; |
221 | 0 | return false; |
222 | 0 | } |
223 | | |
224 | 0 | if (loadFromResolve == LoadFromResolve::kLoad) { |
225 | 0 | this->loadResolveIntoMSAA(adjustedBounds); |
226 | 0 | } |
227 | |
|
228 | 0 | return true; |
229 | 0 | } |
230 | | |
231 | | bool GrVkOpsRenderPass::init(const GrOpsRenderPass::LoadAndStoreInfo& colorInfo, |
232 | | const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo, |
233 | 0 | const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) { |
234 | 0 | VkAttachmentLoadOp loadOp; |
235 | 0 | VkAttachmentStoreOp storeOp; |
236 | 0 | get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp, &loadOp, &storeOp); |
237 | 0 | GrVkRenderPass::LoadStoreOps vkColorOps(loadOp, storeOp); |
238 | |
|
239 | 0 | get_vk_load_store_ops(resolveInfo.fLoadOp, resolveInfo.fStoreOp, &loadOp, &storeOp); |
240 | 0 | GrVkRenderPass::LoadStoreOps vkResolveOps(loadOp, storeOp); |
241 | |
|
242 | 0 | get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp, &loadOp, &storeOp); |
243 | 0 | GrVkRenderPass::LoadStoreOps vkStencilOps(loadOp, storeOp); |
244 | |
|
245 | 0 | GrVkResourceProvider::CompatibleRPHandle rpHandle = fFramebuffer->compatibleRenderPassHandle(); |
246 | 0 | SkASSERT(rpHandle.isValid()); |
247 | 0 | fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle, |
248 | 0 | vkColorOps, |
249 | 0 | vkResolveOps, |
250 | 0 | vkStencilOps); |
251 | |
|
252 | 0 | if (!fCurrentRenderPass) { |
253 | 0 | return false; |
254 | 0 | } |
255 | | |
256 | 0 | if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers()) { |
257 | 0 | SkASSERT(fGpu->cmdPool()); |
258 | 0 | fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu); |
259 | 0 | if (!fCurrentSecondaryCommandBuffer) { |
260 | 0 | fCurrentRenderPass = nullptr; |
261 | 0 | return false; |
262 | 0 | } |
263 | 0 | fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass); |
264 | 0 | } |
265 | | |
266 | 0 | VkClearValue vkClearColor; |
267 | 0 | vkClearColor.color.float32[0] = colorInfo.fClearColor[0]; |
268 | 0 | vkClearColor.color.float32[1] = colorInfo.fClearColor[1]; |
269 | 0 | vkClearColor.color.float32[2] = colorInfo.fClearColor[2]; |
270 | 0 | vkClearColor.color.float32[3] = colorInfo.fClearColor[3]; |
271 | |
|
272 | 0 | return this->beginRenderPass(vkClearColor, fLoadFromResolve); |
273 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::init(GrOpsRenderPass::LoadAndStoreInfo const&, GrOpsRenderPass::LoadAndStoreInfo const&, GrOpsRenderPass::StencilLoadAndStoreInfo const&) Unexecuted instantiation: GrVkOpsRenderPass::init(GrOpsRenderPass::LoadAndStoreInfo const&, GrOpsRenderPass::LoadAndStoreInfo const&, GrOpsRenderPass::StencilLoadAndStoreInfo const&) |
274 | | |
275 | 0 | bool GrVkOpsRenderPass::initWrapped() { |
276 | 0 | SkASSERT(fFramebuffer->isExternal()); |
277 | 0 | fCurrentRenderPass = fFramebuffer->externalRenderPass(); |
278 | 0 | SkASSERT(fCurrentRenderPass); |
279 | 0 | fCurrentRenderPass->ref(); |
280 | |
|
281 | 0 | fCurrentSecondaryCommandBuffer = fFramebuffer->externalCommandBuffer(); |
282 | 0 | if (!fCurrentSecondaryCommandBuffer) { |
283 | 0 | return false; |
284 | 0 | } |
285 | 0 | return true; |
286 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::initWrapped() Unexecuted instantiation: GrVkOpsRenderPass::initWrapped() |
287 | | |
288 | 0 | GrVkOpsRenderPass::~GrVkOpsRenderPass() { |
289 | 0 | this->reset(); |
290 | 0 | } |
291 | | |
292 | 0 | GrGpu* GrVkOpsRenderPass::gpu() { return fGpu; } |
293 | | |
294 | 0 | GrVkCommandBuffer* GrVkOpsRenderPass::currentCommandBuffer() { |
295 | 0 | if (fCurrentSecondaryCommandBuffer) { |
296 | 0 | return fCurrentSecondaryCommandBuffer.get(); |
297 | 0 | } |
298 | | // We checked this when we setup the GrVkOpsRenderPass and it should not have changed while we |
299 | | // are still using this object. |
300 | 0 | SkASSERT(fGpu->currentCommandBuffer()); |
301 | 0 | return fGpu->currentCommandBuffer(); |
302 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::currentCommandBuffer() Unexecuted instantiation: GrVkOpsRenderPass::currentCommandBuffer() |
303 | | |
304 | 0 | void GrVkOpsRenderPass::loadResolveIntoMSAA(const SkIRect& nativeBounds) { |
305 | 0 | fGpu->loadMSAAFromResolve(this->currentCommandBuffer(), *fCurrentRenderPass, |
306 | 0 | fFramebuffer->colorAttachment(), fFramebuffer->resolveAttachment(), |
307 | 0 | nativeBounds); |
308 | 0 | fGpu->currentCommandBuffer()->nexSubpass(fGpu, SkToBool(fCurrentSecondaryCommandBuffer)); |
309 | | |
310 | | // If we loaded the resolve attachment, then we would have set the image layout to be |
311 | | // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an input |
312 | | // attachment. However, when we switched to the main subpass it will transition the layout |
313 | | // internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our tracking |
314 | | // of the layout to match the new layout. |
315 | 0 | SkASSERT(fFramebuffer->resolveAttachment()); |
316 | 0 | fFramebuffer->resolveAttachment()->updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); |
317 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::loadResolveIntoMSAA(SkIRect const&) Unexecuted instantiation: GrVkOpsRenderPass::loadResolveIntoMSAA(SkIRect const&) |
318 | | |
319 | 0 | void GrVkOpsRenderPass::submit() { |
320 | 0 | if (!fRenderTarget) { |
321 | 0 | return; |
322 | 0 | } |
323 | 0 | if (!fCurrentRenderPass) { |
324 | 0 | SkASSERT(fGpu->isDeviceLost()); |
325 | 0 | return; |
326 | 0 | } |
327 | | |
328 | | // We don't want to actually submit the secondary command buffer if it is wrapped. |
329 | 0 | if (this->wrapsSecondaryCommandBuffer()) { |
330 | | // We pass the ownership of the GrVkSecondaryCommandBuffer to the external framebuffer |
331 | | // since it's lifetime matches the lifetime we need to keep the GrManagedResources on the |
332 | | // GrVkSecondaryCommandBuffer alive. |
333 | 0 | fFramebuffer->returnExternalGrSecondaryCommandBuffer( |
334 | 0 | std::move(fCurrentSecondaryCommandBuffer)); |
335 | 0 | return; |
336 | 0 | } |
337 | | |
338 | 0 | if (fCurrentSecondaryCommandBuffer) { |
339 | 0 | fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer)); |
340 | 0 | } |
341 | 0 | fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds); |
342 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::submit() Unexecuted instantiation: GrVkOpsRenderPass::submit() |
343 | | |
344 | | bool GrVkOpsRenderPass::set(GrRenderTarget* rt, |
345 | | sk_sp<GrVkFramebuffer> framebuffer, |
346 | | GrSurfaceOrigin origin, |
347 | | const SkIRect& bounds, |
348 | | const GrOpsRenderPass::LoadAndStoreInfo& colorInfo, |
349 | | const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo, |
350 | | const GrOpsRenderPass::LoadAndStoreInfo& resolveInfo, |
351 | | GrVkRenderPass::SelfDependencyFlags selfDepFlags, |
352 | | GrVkRenderPass::LoadFromResolve loadFromResolve, |
353 | 0 | const TArray<GrSurfaceProxy*, true>& sampledProxies) { |
354 | 0 | SkASSERT(!fRenderTarget); |
355 | 0 | SkASSERT(fGpu == rt->getContext()->priv().getGpu()); |
356 | |
|
357 | | #ifdef SK_DEBUG |
358 | | fIsActive = true; |
359 | | #endif |
360 | | |
361 | | // We check to make sure the GrVkGpu has a valid current command buffer instead of each time we |
362 | | // access it. If the command buffer is valid here should be valid throughout the use of the |
363 | | // render pass since nothing should trigger a submit while this render pass is active. |
364 | 0 | if (!fGpu->currentCommandBuffer()) { |
365 | 0 | return false; |
366 | 0 | } |
367 | | |
368 | 0 | this->INHERITED::set(rt, origin); |
369 | |
|
370 | 0 | for (int i = 0; i < sampledProxies.size(); ++i) { |
371 | 0 | if (sampledProxies[i]->isInstantiated()) { |
372 | 0 | SkASSERT(sampledProxies[i]->asTextureProxy()); |
373 | 0 | GrVkTexture* vkTex = static_cast<GrVkTexture*>(sampledProxies[i]->peekTexture()); |
374 | 0 | SkASSERT(vkTex); |
375 | 0 | GrVkImage* texture = vkTex->textureImage(); |
376 | 0 | SkASSERT(texture); |
377 | 0 | texture->setImageLayout( |
378 | 0 | fGpu, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT, |
379 | 0 | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, false); |
380 | 0 | } |
381 | 0 | } |
382 | |
|
383 | 0 | SkASSERT(framebuffer); |
384 | 0 | fFramebuffer = std::move(framebuffer); |
385 | |
|
386 | 0 | SkASSERT(bounds.isEmpty() || |
387 | 0 | SkIRect::MakeSize(fFramebuffer->colorAttachment()->dimensions()).contains(bounds)); |
388 | 0 | fBounds = bounds; |
389 | |
|
390 | 0 | fSelfDependencyFlags = selfDepFlags; |
391 | 0 | fLoadFromResolve = loadFromResolve; |
392 | |
|
393 | 0 | if (this->wrapsSecondaryCommandBuffer()) { |
394 | 0 | return this->initWrapped(); |
395 | 0 | } |
396 | | |
397 | 0 | return this->init(colorInfo, resolveInfo, stencilInfo); |
398 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::set(GrRenderTarget*, sk_sp<GrVkFramebuffer>, GrSurfaceOrigin, SkIRect const&, GrOpsRenderPass::LoadAndStoreInfo const&, GrOpsRenderPass::StencilLoadAndStoreInfo const&, GrOpsRenderPass::LoadAndStoreInfo const&, GrVkRenderPass::SelfDependencyFlags, GrVkRenderPass::LoadFromResolve, skia_private::TArray<GrSurfaceProxy*, true> const&) Unexecuted instantiation: GrVkOpsRenderPass::set(GrRenderTarget*, sk_sp<GrVkFramebuffer>, GrSurfaceOrigin, SkIRect const&, GrOpsRenderPass::LoadAndStoreInfo const&, GrOpsRenderPass::StencilLoadAndStoreInfo const&, GrOpsRenderPass::LoadAndStoreInfo const&, GrVkRenderPass::SelfDependencyFlags, GrVkRenderPass::LoadFromResolve, skia_private::TArray<GrSurfaceProxy*, true> const&) |
399 | | |
400 | 0 | void GrVkOpsRenderPass::reset() { |
401 | 0 | if (fCurrentSecondaryCommandBuffer) { |
402 | | // The active GrVkCommandPool on the GrVkGpu should still be the same pool we got the |
403 | | // secondary command buffer from since we haven't submitted any work yet. |
404 | 0 | SkASSERT(fGpu->cmdPool()); |
405 | 0 | fCurrentSecondaryCommandBuffer.release()->recycle(fGpu->cmdPool()); |
406 | 0 | } |
407 | 0 | if (fCurrentRenderPass) { |
408 | 0 | fCurrentRenderPass->unref(); |
409 | 0 | fCurrentRenderPass = nullptr; |
410 | 0 | } |
411 | 0 | fCurrentCBIsEmpty = true; |
412 | |
|
413 | 0 | fRenderTarget = nullptr; |
414 | 0 | fFramebuffer.reset(); |
415 | |
|
416 | 0 | fSelfDependencyFlags = GrVkRenderPass::SelfDependencyFlags::kNone; |
417 | |
|
418 | 0 | fLoadFromResolve = LoadFromResolve::kNo; |
419 | 0 | fOverridePipelinesForResolveLoad = false; |
420 | |
|
421 | | #ifdef SK_DEBUG |
422 | | fIsActive = false; |
423 | | #endif |
424 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::reset() Unexecuted instantiation: GrVkOpsRenderPass::reset() |
425 | | |
426 | 0 | bool GrVkOpsRenderPass::wrapsSecondaryCommandBuffer() const { |
427 | 0 | return fFramebuffer->isExternal(); |
428 | 0 | } |
429 | | |
430 | | //////////////////////////////////////////////////////////////////////////////// |
431 | | |
432 | 0 | void GrVkOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) { |
433 | 0 | if (!fCurrentRenderPass) { |
434 | 0 | SkASSERT(fGpu->isDeviceLost()); |
435 | 0 | return; |
436 | 0 | } |
437 | | |
438 | 0 | GrAttachment* sb = fFramebuffer->stencilAttachment(); |
439 | | // this should only be called internally when we know we have a |
440 | | // stencil buffer. |
441 | 0 | SkASSERT(sb); |
442 | 0 | int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat()); |
443 | | |
444 | | // The contract with the callers does not guarantee that we preserve all bits in the stencil |
445 | | // during this clear. Thus we will clear the entire stencil to the desired value. |
446 | |
|
447 | 0 | VkClearDepthStencilValue vkStencilColor; |
448 | 0 | memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue)); |
449 | 0 | if (insideStencilMask) { |
450 | 0 | vkStencilColor.stencil = (1 << (stencilBitCount - 1)); |
451 | 0 | } else { |
452 | 0 | vkStencilColor.stencil = 0; |
453 | 0 | } |
454 | |
|
455 | 0 | VkClearRect clearRect; |
456 | | // Flip rect if necessary |
457 | 0 | SkIRect vkRect; |
458 | 0 | if (!scissor.enabled()) { |
459 | 0 | vkRect.setXYWH(0, 0, sb->width(), sb->height()); |
460 | 0 | } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) { |
461 | 0 | vkRect = scissor.rect(); |
462 | 0 | } else { |
463 | 0 | vkRect.setLTRB(scissor.rect().fLeft, sb->height() - scissor.rect().fBottom, |
464 | 0 | scissor.rect().fRight, sb->height() - scissor.rect().fTop); |
465 | 0 | } |
466 | |
|
467 | 0 | clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop }; |
468 | 0 | clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() }; |
469 | |
|
470 | 0 | clearRect.baseArrayLayer = 0; |
471 | 0 | clearRect.layerCount = 1; |
472 | |
|
473 | 0 | uint32_t stencilIndex; |
474 | 0 | SkAssertResult(fCurrentRenderPass->stencilAttachmentIndex(&stencilIndex)); |
475 | |
|
476 | 0 | VkClearAttachment attachment; |
477 | 0 | attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; |
478 | 0 | attachment.colorAttachment = 0; // this value shouldn't matter |
479 | 0 | attachment.clearValue.depthStencil = vkStencilColor; |
480 | |
|
481 | 0 | this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect); |
482 | 0 | fCurrentCBIsEmpty = false; |
483 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::onClearStencilClip(GrScissorState const&, bool) Unexecuted instantiation: GrVkOpsRenderPass::onClearStencilClip(GrScissorState const&, bool) |
484 | | |
485 | 0 | void GrVkOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) { |
486 | 0 | if (!fCurrentRenderPass) { |
487 | 0 | SkASSERT(fGpu->isDeviceLost()); |
488 | 0 | return; |
489 | 0 | } |
490 | | |
491 | 0 | VkClearColorValue vkColor = {{color[0], color[1], color[2], color[3]}}; |
492 | | |
493 | | // If we end up in a situation where we are calling clear without a scissior then in general it |
494 | | // means we missed an opportunity higher up the stack to set the load op to be a clear. However, |
495 | | // there are situations where higher up we couldn't discard the previous ops and set a clear |
496 | | // load op (e.g. if we needed to execute a wait op). Thus we also have the empty check here. |
497 | | // TODO: Make the waitOp a RenderTask instead so we can clear out the OpsTask for a clear. We |
498 | | // can then reenable this assert assuming we can't get messed up by a waitOp. |
499 | | //SkASSERT(!fCurrentCBIsEmpty || scissor); |
500 | |
|
501 | 0 | auto dimensions = fFramebuffer->colorAttachment()->dimensions(); |
502 | | // We always do a sub rect clear with clearAttachments since we are inside a render pass |
503 | 0 | VkClearRect clearRect; |
504 | | // Flip rect if necessary |
505 | 0 | SkIRect vkRect; |
506 | 0 | if (!scissor.enabled()) { |
507 | 0 | vkRect.setSize(dimensions); |
508 | 0 | } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) { |
509 | 0 | vkRect = scissor.rect(); |
510 | 0 | } else { |
511 | 0 | vkRect.setLTRB(scissor.rect().fLeft, dimensions.height() - scissor.rect().fBottom, |
512 | 0 | scissor.rect().fRight, dimensions.height() - scissor.rect().fTop); |
513 | 0 | } |
514 | 0 | clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop }; |
515 | 0 | clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() }; |
516 | 0 | clearRect.baseArrayLayer = 0; |
517 | 0 | clearRect.layerCount = 1; |
518 | |
|
519 | 0 | uint32_t colorIndex; |
520 | 0 | SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&colorIndex)); |
521 | |
|
522 | 0 | VkClearAttachment attachment; |
523 | 0 | attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; |
524 | 0 | attachment.colorAttachment = colorIndex; |
525 | 0 | attachment.clearValue.color = vkColor; |
526 | |
|
527 | 0 | this->currentCommandBuffer()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect); |
528 | 0 | fCurrentCBIsEmpty = false; |
529 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::onClear(GrScissorState const&, std::__1::array<float, 4ul>) Unexecuted instantiation: GrVkOpsRenderPass::onClear(GrScissorState const&, std::__1::array<float, 4ul>) |
530 | | |
531 | | //////////////////////////////////////////////////////////////////////////////// |
532 | | |
533 | 0 | void GrVkOpsRenderPass::addAdditionalRenderPass(bool mustUseSecondaryCommandBuffer) { |
534 | 0 | SkASSERT(!this->wrapsSecondaryCommandBuffer()); |
535 | |
|
536 | 0 | bool withResolve = fFramebuffer->resolveAttachment(); |
537 | 0 | bool withStencil = fFramebuffer->stencilAttachment(); |
538 | | |
539 | | // If we have a resolve attachment we must do a resolve load in the new render pass since we |
540 | | // broke up the original one. GrProgramInfos were made without any knowledge that the render |
541 | | // pass may be split up. Thus they may try to make VkPipelines that only use one subpass. We |
542 | | // need to override that to make sure they are compatible with the extra load subpass. |
543 | 0 | fOverridePipelinesForResolveLoad |= |
544 | 0 | withResolve && fCurrentRenderPass->loadFromResolve() != LoadFromResolve::kLoad; |
545 | |
|
546 | 0 | GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD, |
547 | 0 | VK_ATTACHMENT_STORE_OP_STORE); |
548 | 0 | GrVkRenderPass::LoadStoreOps vkResolveOps(VK_ATTACHMENT_LOAD_OP_LOAD, |
549 | 0 | VK_ATTACHMENT_STORE_OP_STORE); |
550 | 0 | LoadFromResolve loadFromResolve = LoadFromResolve::kNo; |
551 | 0 | if (withResolve) { |
552 | 0 | vkColorOps = {VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE}; |
553 | 0 | loadFromResolve = LoadFromResolve::kLoad; |
554 | 0 | } |
555 | 0 | GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD, |
556 | 0 | VK_ATTACHMENT_STORE_OP_STORE); |
557 | |
|
558 | 0 | SkASSERT(fCurrentRenderPass); |
559 | 0 | fCurrentRenderPass->unref(); |
560 | 0 | fCurrentRenderPass = nullptr; |
561 | |
|
562 | 0 | GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget); |
563 | 0 | auto fb = vkRT->getFramebuffer(withResolve, withStencil, fSelfDependencyFlags, loadFromResolve); |
564 | 0 | if (!fb) { |
565 | 0 | return; |
566 | 0 | } |
567 | 0 | fFramebuffer = sk_ref_sp(fb); |
568 | |
|
569 | 0 | SkASSERT(fFramebuffer); |
570 | 0 | const GrVkResourceProvider::CompatibleRPHandle& rpHandle = |
571 | 0 | fFramebuffer->compatibleRenderPassHandle(); |
572 | 0 | SkASSERT(rpHandle.isValid()); |
573 | |
|
574 | 0 | fCurrentRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle, |
575 | 0 | vkColorOps, |
576 | 0 | vkResolveOps, |
577 | 0 | vkStencilOps); |
578 | |
|
579 | 0 | if (!fCurrentRenderPass) { |
580 | 0 | return; |
581 | 0 | } |
582 | | |
583 | 0 | if (!fGpu->vkCaps().preferPrimaryOverSecondaryCommandBuffers() || |
584 | 0 | mustUseSecondaryCommandBuffer) { |
585 | 0 | SkASSERT(fGpu->cmdPool()); |
586 | 0 | fCurrentSecondaryCommandBuffer = fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu); |
587 | 0 | if (!fCurrentSecondaryCommandBuffer) { |
588 | 0 | fCurrentRenderPass = nullptr; |
589 | 0 | return; |
590 | 0 | } |
591 | 0 | fCurrentSecondaryCommandBuffer->begin(fGpu, fFramebuffer.get(), fCurrentRenderPass); |
592 | 0 | } |
593 | | |
594 | 0 | VkClearValue vkClearColor; |
595 | 0 | memset(&vkClearColor, 0, sizeof(VkClearValue)); |
596 | |
|
597 | 0 | this->beginRenderPass(vkClearColor, loadFromResolve); |
598 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::addAdditionalRenderPass(bool) Unexecuted instantiation: GrVkOpsRenderPass::addAdditionalRenderPass(bool) |
599 | | |
600 | 0 | void GrVkOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) { |
601 | 0 | if (!fCurrentRenderPass) { |
602 | 0 | SkASSERT(fGpu->isDeviceLost()); |
603 | 0 | return; |
604 | 0 | } |
605 | 0 | if (fCurrentSecondaryCommandBuffer) { |
606 | 0 | fCurrentSecondaryCommandBuffer->end(fGpu); |
607 | 0 | fGpu->submitSecondaryCommandBuffer(std::move(fCurrentSecondaryCommandBuffer)); |
608 | 0 | } |
609 | 0 | fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds); |
610 | | |
611 | | // We pass in true here to signal that after the upload we need to set the upload textures |
612 | | // layout back to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL. |
613 | 0 | state->doUpload(upload, true); |
614 | |
|
615 | 0 | this->addAdditionalRenderPass(false); |
616 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::inlineUpload(GrOpFlushState*, std::__1::function<void (std::__1::function<bool (GrTextureProxy*, SkIRect, GrColorType, void const*, unsigned long)>&)>&) Unexecuted instantiation: GrVkOpsRenderPass::inlineUpload(GrOpFlushState*, std::__1::function<void (std::__1::function<bool (GrTextureProxy*, SkIRect, GrColorType, void const*, unsigned long)>&)>&) |
617 | | |
618 | | //////////////////////////////////////////////////////////////////////////////// |
619 | | |
620 | 0 | void GrVkOpsRenderPass::onEnd() { |
621 | 0 | if (fCurrentSecondaryCommandBuffer && !this->wrapsSecondaryCommandBuffer()) { |
622 | 0 | fCurrentSecondaryCommandBuffer->end(fGpu); |
623 | 0 | } |
624 | 0 | } |
625 | | |
626 | 0 | bool GrVkOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) { |
627 | 0 | if (!fCurrentRenderPass) { |
628 | 0 | SkASSERT(fGpu->isDeviceLost()); |
629 | 0 | return false; |
630 | 0 | } |
631 | | |
632 | 0 | SkRect rtRect = SkRect::Make(fBounds); |
633 | 0 | if (rtRect.intersect(drawBounds)) { |
634 | 0 | rtRect.roundOut(&fCurrentPipelineBounds); |
635 | 0 | } else { |
636 | 0 | fCurrentPipelineBounds.setEmpty(); |
637 | 0 | } |
638 | |
|
639 | 0 | GrVkCommandBuffer* currentCB = this->currentCommandBuffer(); |
640 | 0 | SkASSERT(fCurrentRenderPass); |
641 | |
|
642 | 0 | VkRenderPass compatibleRenderPass = fCurrentRenderPass->vkRenderPass(); |
643 | 0 | fCurrentPipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState( |
644 | 0 | fRenderTarget, programInfo, compatibleRenderPass, fOverridePipelinesForResolveLoad); |
645 | 0 | if (!fCurrentPipelineState) { |
646 | 0 | return false; |
647 | 0 | } |
648 | | |
649 | 0 | fCurrentPipelineState->bindPipeline(fGpu, currentCB); |
650 | | |
651 | | // Both the 'programInfo' and this renderPass have an origin. Since they come from the |
652 | | // same place (i.e., the target renderTargetProxy) they had best agree. |
653 | 0 | SkASSERT(programInfo.origin() == fOrigin); |
654 | |
|
655 | 0 | auto colorAttachment = fFramebuffer->colorAttachment(); |
656 | 0 | if (!fCurrentPipelineState->setAndBindUniforms(fGpu, colorAttachment->dimensions(), programInfo, |
657 | 0 | currentCB)) { |
658 | 0 | return false; |
659 | 0 | } |
660 | | |
661 | 0 | if (!programInfo.pipeline().isScissorTestEnabled()) { |
662 | | // "Disable" scissor by setting it to the full pipeline bounds. |
663 | 0 | GrVkPipeline::SetDynamicScissorRectState( |
664 | 0 | fGpu, currentCB, colorAttachment->dimensions(), fOrigin, |
665 | 0 | fCurrentPipelineBounds); |
666 | 0 | } |
667 | 0 | GrVkPipeline::SetDynamicViewportState(fGpu, currentCB, colorAttachment->dimensions()); |
668 | 0 | GrVkPipeline::SetDynamicBlendConstantState(fGpu, currentCB, |
669 | 0 | programInfo.pipeline().writeSwizzle(), |
670 | 0 | programInfo.pipeline().getXferProcessor()); |
671 | |
|
672 | 0 | return true; |
673 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::onBindPipeline(GrProgramInfo const&, SkRect const&) Unexecuted instantiation: GrVkOpsRenderPass::onBindPipeline(GrProgramInfo const&, SkRect const&) |
674 | | |
675 | 0 | void GrVkOpsRenderPass::onSetScissorRect(const SkIRect& scissor) { |
676 | 0 | SkIRect combinedScissorRect; |
677 | 0 | if (!combinedScissorRect.intersect(fCurrentPipelineBounds, scissor)) { |
678 | 0 | combinedScissorRect = SkIRect::MakeEmpty(); |
679 | 0 | } |
680 | 0 | GrVkPipeline::SetDynamicScissorRectState(fGpu, this->currentCommandBuffer(), |
681 | 0 | fFramebuffer->colorAttachment()->dimensions(), |
682 | 0 | fOrigin, combinedScissorRect); |
683 | 0 | } |
684 | | |
685 | | #ifdef SK_DEBUG |
686 | 0 | void check_sampled_texture(GrTexture* tex, GrAttachment* colorAttachment, GrVkGpu* gpu) { |
687 | 0 | SkASSERT(!tex->isProtected() || (colorAttachment->isProtected() && gpu->protectedContext())); |
688 | 0 | auto vkTex = static_cast<GrVkTexture*>(tex)->textureImage(); |
689 | 0 | SkASSERT(vkTex->currentLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); |
690 | 0 | } |
691 | | #endif |
692 | | |
693 | | bool GrVkOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc, |
694 | | const GrSurfaceProxy* const geomProcTextures[], |
695 | 0 | const GrPipeline& pipeline) { |
696 | | #ifdef SK_DEBUG |
697 | 0 | SkASSERT(fCurrentPipelineState); |
698 | | auto colorAttachment = fFramebuffer->colorAttachment(); |
699 | 0 | for (int i = 0; i < geomProc.numTextureSamplers(); ++i) { |
700 | 0 | check_sampled_texture(geomProcTextures[i]->peekTexture(), colorAttachment, fGpu); |
701 | 0 | } |
702 | 0 | pipeline.visitTextureEffects([&](const GrTextureEffect& te) { |
703 | 0 | check_sampled_texture(te.texture(), colorAttachment, fGpu); |
704 | 0 | }); |
705 | 0 | if (GrTexture* dstTexture = pipeline.peekDstTexture()) { |
706 | 0 | check_sampled_texture(dstTexture, colorAttachment, fGpu); |
707 | 0 | } |
708 | | #endif |
709 | 0 | if (!fCurrentPipelineState->setAndBindTextures(fGpu, geomProc, pipeline, geomProcTextures, |
710 | 0 | this->currentCommandBuffer())) { |
711 | 0 | return false; |
712 | 0 | } |
713 | 0 | if (fSelfDependencyFlags == SelfDependencyFlags::kForInputAttachment) { |
714 | | // We bind the color attachment as an input attachment |
715 | 0 | auto ds = fFramebuffer->colorAttachment()->inputDescSetForBlending(fGpu); |
716 | 0 | if (!ds) { |
717 | 0 | return false; |
718 | 0 | } |
719 | 0 | return fCurrentPipelineState->setAndBindInputAttachment(fGpu, std::move(ds), |
720 | 0 | this->currentCommandBuffer()); |
721 | 0 | } |
722 | 0 | return true; |
723 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::onBindTextures(GrGeometryProcessor const&, GrSurfaceProxy const* const*, GrPipeline const&) Unexecuted instantiation: GrVkOpsRenderPass::onBindTextures(GrGeometryProcessor const&, GrSurfaceProxy const* const*, GrPipeline const&) |
724 | | |
725 | | void GrVkOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer, |
726 | | sk_sp<const GrBuffer> instanceBuffer, |
727 | | sk_sp<const GrBuffer> vertexBuffer, |
728 | 0 | GrPrimitiveRestart primRestart) { |
729 | 0 | SkASSERT(GrPrimitiveRestart::kNo == primRestart); |
730 | 0 | if (!fCurrentRenderPass) { |
731 | 0 | SkASSERT(fGpu->isDeviceLost()); |
732 | 0 | return; |
733 | 0 | } |
734 | 0 | SkASSERT(fCurrentPipelineState); |
735 | 0 | SkASSERT(!fGpu->caps()->usePrimitiveRestart()); // Ignore primitiveRestart parameter. |
736 | |
|
737 | 0 | GrVkCommandBuffer* currCmdBuf = this->currentCommandBuffer(); |
738 | 0 | SkASSERT(currCmdBuf); |
739 | | |
740 | | // There is no need to put any memory barriers to make sure host writes have finished here. |
741 | | // When a command buffer is submitted to a queue, there is an implicit memory barrier that |
742 | | // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of |
743 | | // an active RenderPass. |
744 | | |
745 | | // Here our vertex and instance inputs need to match the same 0-based bindings they were |
746 | | // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance. |
747 | 0 | uint32_t binding = 0; |
748 | 0 | if (vertexBuffer) { |
749 | 0 | SkDEBUGCODE(auto* gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer.get())); |
750 | 0 | SkASSERT(!gpuVertexBuffer->isCpuBuffer()); |
751 | 0 | SkASSERT(!gpuVertexBuffer->isMapped()); |
752 | 0 | currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(vertexBuffer)); |
753 | 0 | } |
754 | 0 | if (instanceBuffer) { |
755 | 0 | SkDEBUGCODE(auto* gpuInstanceBuffer = |
756 | 0 | static_cast<const GrGpuBuffer*>(instanceBuffer.get())); |
757 | 0 | SkASSERT(!gpuInstanceBuffer->isCpuBuffer()); |
758 | 0 | SkASSERT(!gpuInstanceBuffer->isMapped()); |
759 | 0 | currCmdBuf->bindInputBuffer(fGpu, binding++, std::move(instanceBuffer)); |
760 | 0 | } |
761 | 0 | if (indexBuffer) { |
762 | 0 | SkDEBUGCODE(auto* gpuIndexBuffer = static_cast<const GrGpuBuffer*>(indexBuffer.get())); |
763 | 0 | SkASSERT(!gpuIndexBuffer->isCpuBuffer()); |
764 | 0 | SkASSERT(!gpuIndexBuffer->isMapped()); |
765 | 0 | currCmdBuf->bindIndexBuffer(fGpu, std::move(indexBuffer)); |
766 | 0 | } |
767 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::onBindBuffers(sk_sp<GrBuffer const>, sk_sp<GrBuffer const>, sk_sp<GrBuffer const>, GrPrimitiveRestart) Unexecuted instantiation: GrVkOpsRenderPass::onBindBuffers(sk_sp<GrBuffer const>, sk_sp<GrBuffer const>, sk_sp<GrBuffer const>, GrPrimitiveRestart) |
768 | | |
769 | | void GrVkOpsRenderPass::onDrawInstanced(int instanceCount, |
770 | | int baseInstance, |
771 | 0 | int vertexCount, int baseVertex) { |
772 | 0 | if (!fCurrentRenderPass) { |
773 | 0 | SkASSERT(fGpu->isDeviceLost()); |
774 | 0 | return; |
775 | 0 | } |
776 | 0 | SkASSERT(fCurrentPipelineState); |
777 | 0 | this->currentCommandBuffer()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance); |
778 | 0 | fGpu->stats()->incNumDraws(); |
779 | 0 | fCurrentCBIsEmpty = false; |
780 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::onDrawInstanced(int, int, int, int) Unexecuted instantiation: GrVkOpsRenderPass::onDrawInstanced(int, int, int, int) |
781 | | |
782 | | void GrVkOpsRenderPass::onDrawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, |
783 | 0 | int baseInstance, int baseVertex) { |
784 | 0 | if (!fCurrentRenderPass) { |
785 | 0 | SkASSERT(fGpu->isDeviceLost()); |
786 | 0 | return; |
787 | 0 | } |
788 | 0 | SkASSERT(fCurrentPipelineState); |
789 | 0 | this->currentCommandBuffer()->drawIndexed(fGpu, indexCount, instanceCount, |
790 | 0 | baseIndex, baseVertex, baseInstance); |
791 | 0 | fGpu->stats()->incNumDraws(); |
792 | 0 | fCurrentCBIsEmpty = false; |
793 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::onDrawIndexedInstanced(int, int, int, int, int) Unexecuted instantiation: GrVkOpsRenderPass::onDrawIndexedInstanced(int, int, int, int, int) |
794 | | |
795 | | void GrVkOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, |
796 | 0 | int drawCount) { |
797 | 0 | SkASSERT(!drawIndirectBuffer->isCpuBuffer()); |
798 | 0 | if (!fCurrentRenderPass) { |
799 | 0 | SkASSERT(fGpu->isDeviceLost()); |
800 | 0 | return; |
801 | 0 | } |
802 | 0 | const GrVkCaps& caps = fGpu->vkCaps(); |
803 | 0 | SkASSERT(caps.nativeDrawIndirectSupport()); |
804 | 0 | SkASSERT(fCurrentPipelineState); |
805 | |
|
806 | 0 | const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount(); |
807 | 0 | uint32_t remainingDraws = drawCount; |
808 | 0 | const size_t stride = sizeof(GrDrawIndirectCommand); |
809 | 0 | while (remainingDraws >= 1) { |
810 | 0 | uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount); |
811 | 0 | this->currentCommandBuffer()->drawIndirect( |
812 | 0 | fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride); |
813 | 0 | remainingDraws -= currDrawCount; |
814 | 0 | offset += stride * currDrawCount; |
815 | 0 | fGpu->stats()->incNumDraws(); |
816 | 0 | } |
817 | 0 | fCurrentCBIsEmpty = false; |
818 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::onDrawIndirect(GrBuffer const*, unsigned long, int) Unexecuted instantiation: GrVkOpsRenderPass::onDrawIndirect(GrBuffer const*, unsigned long, int) |
819 | | |
820 | | void GrVkOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, |
821 | 0 | int drawCount) { |
822 | 0 | SkASSERT(!drawIndirectBuffer->isCpuBuffer()); |
823 | 0 | if (!fCurrentRenderPass) { |
824 | 0 | SkASSERT(fGpu->isDeviceLost()); |
825 | 0 | return; |
826 | 0 | } |
827 | 0 | const GrVkCaps& caps = fGpu->vkCaps(); |
828 | 0 | SkASSERT(caps.nativeDrawIndirectSupport()); |
829 | 0 | SkASSERT(fCurrentPipelineState); |
830 | 0 | const uint32_t maxDrawCount = caps.maxDrawIndirectDrawCount(); |
831 | 0 | uint32_t remainingDraws = drawCount; |
832 | 0 | const size_t stride = sizeof(GrDrawIndexedIndirectCommand); |
833 | 0 | while (remainingDraws >= 1) { |
834 | 0 | uint32_t currDrawCount = std::min(remainingDraws, maxDrawCount); |
835 | 0 | this->currentCommandBuffer()->drawIndexedIndirect( |
836 | 0 | fGpu, sk_ref_sp(drawIndirectBuffer), offset, currDrawCount, stride); |
837 | 0 | remainingDraws -= currDrawCount; |
838 | 0 | offset += stride * currDrawCount; |
839 | 0 | fGpu->stats()->incNumDraws(); |
840 | 0 | } |
841 | 0 | fCurrentCBIsEmpty = false; |
842 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::onDrawIndexedIndirect(GrBuffer const*, unsigned long, int) Unexecuted instantiation: GrVkOpsRenderPass::onDrawIndexedIndirect(GrBuffer const*, unsigned long, int) |
843 | | |
844 | | //////////////////////////////////////////////////////////////////////////////// |
845 | | |
846 | 0 | void GrVkOpsRenderPass::onExecuteDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) { |
847 | 0 | if (!fCurrentRenderPass) { |
848 | 0 | SkASSERT(fGpu->isDeviceLost()); |
849 | 0 | return; |
850 | 0 | } |
851 | | |
852 | 0 | VkRect2D bounds; |
853 | 0 | bounds.offset = { 0, 0 }; |
854 | 0 | bounds.extent = { 0, 0 }; |
855 | |
|
856 | 0 | if (!fCurrentSecondaryCommandBuffer) { |
857 | 0 | fGpu->endRenderPass(fRenderTarget, fOrigin, fBounds); |
858 | 0 | this->addAdditionalRenderPass(true); |
859 | | // We may have failed to start a new render pass |
860 | 0 | if (!fCurrentRenderPass) { |
861 | 0 | SkASSERT(fGpu->isDeviceLost()); |
862 | 0 | return; |
863 | 0 | } |
864 | 0 | } |
865 | 0 | SkASSERT(fCurrentSecondaryCommandBuffer); |
866 | |
|
867 | 0 | GrVkDrawableInfo vkInfo; |
868 | 0 | vkInfo.fSecondaryCommandBuffer = fCurrentSecondaryCommandBuffer->vkCommandBuffer(); |
869 | 0 | vkInfo.fCompatibleRenderPass = fCurrentRenderPass->vkRenderPass(); |
870 | 0 | SkAssertResult(fCurrentRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex)); |
871 | 0 | vkInfo.fFormat = fFramebuffer->colorAttachment()->imageFormat(); |
872 | 0 | vkInfo.fDrawBounds = &bounds; |
873 | | #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK |
874 | | vkInfo.fFromSwapchainOrAndroidWindow = |
875 | | fFramebuffer->colorAttachment()->vkImageInfo().fPartOfSwapchainOrAndroidWindow; |
876 | | #endif //SK_BUILD_FOR_ANDROID_FRAMEWORK |
877 | |
|
878 | 0 | GrBackendDrawableInfo info(vkInfo); |
879 | | |
880 | | // After we draw into the command buffer via the drawable, cached state we have may be invalid. |
881 | 0 | this->currentCommandBuffer()->invalidateState(); |
882 | | // Also assume that the drawable produced output. |
883 | 0 | fCurrentCBIsEmpty = false; |
884 | |
|
885 | 0 | drawable->draw(info); |
886 | 0 | fGpu->addDrawable(std::move(drawable)); |
887 | 0 | } Unexecuted instantiation: GrVkOpsRenderPass::onExecuteDrawable(std::__1::unique_ptr<SkDrawable::GpuDrawHandler, std::__1::default_delete<SkDrawable::GpuDrawHandler> >) Unexecuted instantiation: GrVkOpsRenderPass::onExecuteDrawable(std::__1::unique_ptr<SkDrawable::GpuDrawHandler, std::__1::default_delete<SkDrawable::GpuDrawHandler> >) |