/src/skia/src/gpu/graphite/vk/VulkanCommandBuffer.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2022 Google LLC |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license that can be |
5 | | * found in the LICENSE file. |
6 | | */ |
7 | | |
8 | | #include "src/gpu/graphite/vk/VulkanCommandBuffer.h" |
9 | | |
10 | | #include "include/gpu/MutableTextureState.h" |
11 | | #include "include/gpu/graphite/BackendSemaphore.h" |
12 | | #include "include/gpu/graphite/vk/VulkanGraphiteTypes.h" |
13 | | #include "include/gpu/vk/VulkanMutableTextureState.h" |
14 | | #include "include/private/base/SkTArray.h" |
15 | | #include "src/gpu/DataUtils.h" |
16 | | #include "src/gpu/graphite/ContextUtils.h" |
17 | | #include "src/gpu/graphite/DescriptorData.h" |
18 | | #include "src/gpu/graphite/Log.h" |
19 | | #include "src/gpu/graphite/RenderPassDesc.h" |
20 | | #include "src/gpu/graphite/Surface_Graphite.h" |
21 | | #include "src/gpu/graphite/TextureProxy.h" |
22 | | #include "src/gpu/graphite/UniformManager.h" |
23 | | #include "src/gpu/graphite/vk/VulkanBuffer.h" |
24 | | #include "src/gpu/graphite/vk/VulkanDescriptorSet.h" |
25 | | #include "src/gpu/graphite/vk/VulkanFramebuffer.h" |
26 | | #include "src/gpu/graphite/vk/VulkanGraphiteUtilsPriv.h" |
27 | | #include "src/gpu/graphite/vk/VulkanRenderPass.h" |
28 | | #include "src/gpu/graphite/vk/VulkanSampler.h" |
29 | | #include "src/gpu/graphite/vk/VulkanSharedContext.h" |
30 | | #include "src/gpu/graphite/vk/VulkanTexture.h" |
31 | | #include "src/gpu/vk/VulkanUtilsPriv.h" |
32 | | |
33 | | using namespace skia_private; |
34 | | |
35 | | namespace skgpu::graphite { |
36 | | |
37 | | class VulkanDescriptorSet; |
38 | | |
39 | | std::unique_ptr<VulkanCommandBuffer> VulkanCommandBuffer::Make( |
40 | | const VulkanSharedContext* sharedContext, |
41 | 0 | VulkanResourceProvider* resourceProvider) { |
42 | | // Create VkCommandPool |
43 | 0 | VkCommandPoolCreateFlags cmdPoolCreateFlags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT; |
44 | 0 | if (sharedContext->isProtected() == Protected::kYes) { |
45 | 0 | cmdPoolCreateFlags |= VK_COMMAND_POOL_CREATE_PROTECTED_BIT; |
46 | 0 | } |
47 | |
|
48 | 0 | const VkCommandPoolCreateInfo cmdPoolInfo = { |
49 | 0 | VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType |
50 | 0 | nullptr, // pNext |
51 | 0 | cmdPoolCreateFlags, // CmdPoolCreateFlags |
52 | 0 | sharedContext->queueIndex(), // queueFamilyIndex |
53 | 0 | }; |
54 | 0 | VkResult result; |
55 | 0 | VkCommandPool pool; |
56 | 0 | VULKAN_CALL_RESULT(sharedContext, |
57 | 0 | result, |
58 | 0 | CreateCommandPool(sharedContext->device(), &cmdPoolInfo, nullptr, &pool)); |
59 | 0 | if (result != VK_SUCCESS) { |
60 | 0 | return nullptr; |
61 | 0 | } |
62 | | |
63 | 0 | const VkCommandBufferAllocateInfo cmdInfo = { |
64 | 0 | VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType |
65 | 0 | nullptr, // pNext |
66 | 0 | pool, // commandPool |
67 | 0 | VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level |
68 | 0 | 1 // bufferCount |
69 | 0 | }; |
70 | |
|
71 | 0 | VkCommandBuffer primaryCmdBuffer; |
72 | 0 | VULKAN_CALL_RESULT( |
73 | 0 | sharedContext, |
74 | 0 | result, |
75 | 0 | AllocateCommandBuffers(sharedContext->device(), &cmdInfo, &primaryCmdBuffer)); |
76 | 0 | if (result != VK_SUCCESS) { |
77 | 0 | VULKAN_CALL(sharedContext->interface(), |
78 | 0 | DestroyCommandPool(sharedContext->device(), pool, nullptr)); |
79 | 0 | return nullptr; |
80 | 0 | } |
81 | | |
82 | 0 | return std::unique_ptr<VulkanCommandBuffer>(new VulkanCommandBuffer(pool, |
83 | 0 | primaryCmdBuffer, |
84 | 0 | sharedContext, |
85 | 0 | resourceProvider)); |
86 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::Make(skgpu::graphite::VulkanSharedContext const*, skgpu::graphite::VulkanResourceProvider*) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::Make(skgpu::graphite::VulkanSharedContext const*, skgpu::graphite::VulkanResourceProvider*) |
87 | | |
88 | | VulkanCommandBuffer::VulkanCommandBuffer(VkCommandPool pool, |
89 | | VkCommandBuffer primaryCommandBuffer, |
90 | | const VulkanSharedContext* sharedContext, |
91 | | VulkanResourceProvider* resourceProvider) |
92 | | : fPool(pool) |
93 | | , fPrimaryCommandBuffer(primaryCommandBuffer) |
94 | | , fSharedContext(sharedContext) |
95 | 0 | , fResourceProvider(resourceProvider) { |
96 | | // When making a new command buffer, we automatically begin the command buffer |
97 | 0 | this->begin(); |
98 | 0 | } |
99 | | |
100 | 0 | VulkanCommandBuffer::~VulkanCommandBuffer() { |
101 | 0 | if (fActive) { |
102 | | // Need to end command buffer before deleting it |
103 | 0 | VULKAN_CALL(fSharedContext->interface(), EndCommandBuffer(fPrimaryCommandBuffer)); |
104 | 0 | fActive = false; |
105 | 0 | } |
106 | |
|
107 | 0 | if (VK_NULL_HANDLE != fSubmitFence) { |
108 | 0 | VULKAN_CALL(fSharedContext->interface(), |
109 | 0 | DestroyFence(fSharedContext->device(), fSubmitFence, nullptr)); |
110 | 0 | } |
111 | | // This should delete any command buffers as well. |
112 | 0 | VULKAN_CALL(fSharedContext->interface(), |
113 | 0 | DestroyCommandPool(fSharedContext->device(), fPool, nullptr)); |
114 | 0 | } |
115 | | |
116 | 0 | void VulkanCommandBuffer::onResetCommandBuffer() { |
117 | 0 | SkASSERT(!fActive); |
118 | 0 | VULKAN_CALL_ERRCHECK(fSharedContext, ResetCommandPool(fSharedContext->device(), fPool, 0)); |
119 | 0 | fActiveGraphicsPipeline = nullptr; |
120 | 0 | fBindUniformBuffers = true; |
121 | 0 | fBoundIndexBuffer = VK_NULL_HANDLE; |
122 | 0 | fBoundIndexBufferOffset = 0; |
123 | 0 | fBoundIndirectBuffer = VK_NULL_HANDLE; |
124 | 0 | fBoundIndirectBufferOffset = 0; |
125 | 0 | fTextureSamplerDescSetToBind = VK_NULL_HANDLE; |
126 | 0 | fNumTextureSamplers = 0; |
127 | 0 | fUniformBuffersToBind.fill({}); |
128 | 0 | for (int i = 0; i < 4; ++i) { |
129 | 0 | fCachedBlendConstant[i] = -1.0; |
130 | 0 | } |
131 | 0 | for (auto& boundInputBuffer : fBoundInputBuffers) { |
132 | 0 | boundInputBuffer = VK_NULL_HANDLE; |
133 | 0 | } |
134 | 0 | for (auto& boundInputOffset : fBoundInputBufferOffsets) { |
135 | 0 | boundInputOffset = 0; |
136 | 0 | } |
137 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::onResetCommandBuffer() Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::onResetCommandBuffer() |
138 | | |
139 | 0 | bool VulkanCommandBuffer::setNewCommandBufferResources() { |
140 | 0 | this->begin(); |
141 | 0 | return true; |
142 | 0 | } |
143 | | |
144 | 0 | void VulkanCommandBuffer::begin() { |
145 | 0 | SkASSERT(!fActive); |
146 | 0 | VkCommandBufferBeginInfo cmdBufferBeginInfo; |
147 | 0 | memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); |
148 | 0 | cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; |
149 | 0 | cmdBufferBeginInfo.pNext = nullptr; |
150 | 0 | cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; |
151 | 0 | cmdBufferBeginInfo.pInheritanceInfo = nullptr; |
152 | |
|
153 | 0 | VULKAN_CALL_ERRCHECK(fSharedContext, |
154 | 0 | BeginCommandBuffer(fPrimaryCommandBuffer, &cmdBufferBeginInfo)); |
155 | 0 | fActive = true; |
156 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::begin() Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::begin() |
157 | | |
158 | 0 | void VulkanCommandBuffer::end() { |
159 | 0 | SkASSERT(fActive); |
160 | 0 | SkASSERT(!fActiveRenderPass); |
161 | |
|
162 | 0 | this->submitPipelineBarriers(); |
163 | |
|
164 | 0 | VULKAN_CALL_ERRCHECK(fSharedContext, EndCommandBuffer(fPrimaryCommandBuffer)); |
165 | |
|
166 | 0 | fActive = false; |
167 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::end() Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::end() |
168 | | |
169 | | void VulkanCommandBuffer::addWaitSemaphores(size_t numWaitSemaphores, |
170 | 0 | const BackendSemaphore* waitSemaphores) { |
171 | 0 | if (!waitSemaphores) { |
172 | 0 | SkASSERT(numWaitSemaphores == 0); |
173 | 0 | return; |
174 | 0 | } |
175 | | |
176 | 0 | for (size_t i = 0; i < numWaitSemaphores; ++i) { |
177 | 0 | auto& semaphore = waitSemaphores[i]; |
178 | 0 | if (semaphore.isValid() && semaphore.backend() == BackendApi::kVulkan) { |
179 | 0 | fWaitSemaphores.push_back(BackendSemaphores::GetVkSemaphore(semaphore)); |
180 | 0 | } |
181 | 0 | } |
182 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::addWaitSemaphores(unsigned long, skgpu::graphite::BackendSemaphore const*) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::addWaitSemaphores(unsigned long, skgpu::graphite::BackendSemaphore const*) |
183 | | |
184 | | void VulkanCommandBuffer::addSignalSemaphores(size_t numSignalSemaphores, |
185 | 0 | const BackendSemaphore* signalSemaphores) { |
186 | 0 | if (!signalSemaphores) { |
187 | 0 | SkASSERT(numSignalSemaphores == 0); |
188 | 0 | return; |
189 | 0 | } |
190 | | |
191 | 0 | for (size_t i = 0; i < numSignalSemaphores; ++i) { |
192 | 0 | auto& semaphore = signalSemaphores[i]; |
193 | 0 | if (semaphore.isValid() && semaphore.backend() == BackendApi::kVulkan) { |
194 | 0 | fSignalSemaphores.push_back(BackendSemaphores::GetVkSemaphore(semaphore)); |
195 | 0 | } |
196 | 0 | } |
197 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::addSignalSemaphores(unsigned long, skgpu::graphite::BackendSemaphore const*) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::addSignalSemaphores(unsigned long, skgpu::graphite::BackendSemaphore const*) |
198 | | |
199 | | void VulkanCommandBuffer::prepareSurfaceForStateUpdate(SkSurface* targetSurface, |
200 | 0 | const MutableTextureState* newState) { |
201 | 0 | TextureProxy* textureProxy = static_cast<Surface*>(targetSurface)->backingTextureProxy(); |
202 | 0 | VulkanTexture* texture = static_cast<VulkanTexture*>(textureProxy->texture()); |
203 | | |
204 | | // Even though internally we use this helper for getting src access flags and stages they |
205 | | // can also be used for general dst flags since we don't know exactly what the client |
206 | | // plans on using the image for. |
207 | 0 | VkImageLayout newLayout = skgpu::MutableTextureStates::GetVkImageLayout(newState); |
208 | 0 | if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) { |
209 | 0 | newLayout = texture->currentLayout(); |
210 | 0 | } |
211 | 0 | VkPipelineStageFlags dstStage = VulkanTexture::LayoutToPipelineSrcStageFlags(newLayout); |
212 | 0 | VkAccessFlags dstAccess = VulkanTexture::LayoutToSrcAccessMask(newLayout); |
213 | |
|
214 | 0 | uint32_t currentQueueFamilyIndex = texture->currentQueueFamilyIndex(); |
215 | 0 | uint32_t newQueueFamilyIndex = skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState); |
216 | 0 | auto isSpecialQueue = [](uint32_t queueFamilyIndex) { |
217 | 0 | return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL || |
218 | 0 | queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT; |
219 | 0 | }; |
220 | 0 | if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) { |
221 | | // It is illegal to have both the new and old queue be special queue families (i.e. external |
222 | | // or foreign). |
223 | 0 | return; |
224 | 0 | } |
225 | | |
226 | 0 | texture->setImageLayoutAndQueueIndex(this, |
227 | 0 | newLayout, |
228 | 0 | dstAccess, |
229 | 0 | dstStage, |
230 | 0 | false, |
231 | 0 | newQueueFamilyIndex); |
232 | 0 | } |
233 | | |
234 | | static VkResult submit_to_queue(const VulkanSharedContext* sharedContext, |
235 | | VkQueue queue, |
236 | | VkFence fence, |
237 | | uint32_t waitCount, |
238 | | const VkSemaphore* waitSemaphores, |
239 | | const VkPipelineStageFlags* waitStages, |
240 | | uint32_t commandBufferCount, |
241 | | const VkCommandBuffer* commandBuffers, |
242 | | uint32_t signalCount, |
243 | | const VkSemaphore* signalSemaphores, |
244 | 0 | Protected protectedContext) { |
245 | 0 | VkProtectedSubmitInfo protectedSubmitInfo; |
246 | 0 | if (protectedContext == Protected::kYes) { |
247 | 0 | memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo)); |
248 | 0 | protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO; |
249 | 0 | protectedSubmitInfo.pNext = nullptr; |
250 | 0 | protectedSubmitInfo.protectedSubmit = VK_TRUE; |
251 | 0 | } |
252 | |
|
253 | 0 | VkSubmitInfo submitInfo; |
254 | 0 | memset(&submitInfo, 0, sizeof(VkSubmitInfo)); |
255 | 0 | submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; |
256 | 0 | submitInfo.pNext = protectedContext == Protected::kYes ? &protectedSubmitInfo : nullptr; |
257 | 0 | submitInfo.waitSemaphoreCount = waitCount; |
258 | 0 | submitInfo.pWaitSemaphores = waitSemaphores; |
259 | 0 | submitInfo.pWaitDstStageMask = waitStages; |
260 | 0 | submitInfo.commandBufferCount = commandBufferCount; |
261 | 0 | submitInfo.pCommandBuffers = commandBuffers; |
262 | 0 | submitInfo.signalSemaphoreCount = signalCount; |
263 | 0 | submitInfo.pSignalSemaphores = signalSemaphores; |
264 | 0 | VkResult result; |
265 | 0 | VULKAN_CALL_RESULT(sharedContext, result, QueueSubmit(queue, 1, &submitInfo, fence)); |
266 | 0 | return result; |
267 | 0 | } Unexecuted instantiation: VulkanCommandBuffer.cpp:skgpu::graphite::submit_to_queue(skgpu::graphite::VulkanSharedContext const*, VkQueue_T*, VkFence_T*, unsigned int, VkSemaphore_T* const*, unsigned int const*, unsigned int, VkCommandBuffer_T* const*, unsigned int, VkSemaphore_T* const*, skgpu::Protected) Unexecuted instantiation: VulkanCommandBuffer.cpp:skgpu::graphite::submit_to_queue(skgpu::graphite::VulkanSharedContext const*, VkQueue_T*, VkFence_T*, unsigned int, VkSemaphore_T* const*, unsigned int const*, unsigned int, VkCommandBuffer_T* const*, unsigned int, VkSemaphore_T* const*, skgpu::Protected) |
268 | | |
269 | 0 | bool VulkanCommandBuffer::submit(VkQueue queue) { |
270 | 0 | this->end(); |
271 | |
|
272 | 0 | auto device = fSharedContext->device(); |
273 | 0 | VkResult err; |
274 | |
|
275 | 0 | if (fSubmitFence == VK_NULL_HANDLE) { |
276 | 0 | VkFenceCreateInfo fenceInfo; |
277 | 0 | memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo)); |
278 | 0 | fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; |
279 | 0 | VULKAN_CALL_RESULT( |
280 | 0 | fSharedContext, err, CreateFence(device, &fenceInfo, nullptr, &fSubmitFence)); |
281 | 0 | if (err) { |
282 | 0 | fSubmitFence = VK_NULL_HANDLE; |
283 | 0 | return false; |
284 | 0 | } |
285 | 0 | } else { |
286 | | // This cannot return DEVICE_LOST so we assert we succeeded. |
287 | 0 | VULKAN_CALL_RESULT(fSharedContext, err, ResetFences(device, 1, &fSubmitFence)); |
288 | 0 | SkASSERT(err == VK_SUCCESS); |
289 | 0 | } |
290 | | |
291 | 0 | SkASSERT(fSubmitFence != VK_NULL_HANDLE); |
292 | 0 | int waitCount = fWaitSemaphores.size(); |
293 | 0 | TArray<VkPipelineStageFlags> vkWaitStages(waitCount); |
294 | 0 | for (int i = 0; i < waitCount; ++i) { |
295 | 0 | vkWaitStages.push_back(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | |
296 | 0 | VK_PIPELINE_STAGE_TRANSFER_BIT); |
297 | 0 | } |
298 | |
|
299 | 0 | VkResult submitResult = submit_to_queue(fSharedContext, |
300 | 0 | queue, |
301 | 0 | fSubmitFence, |
302 | 0 | waitCount, |
303 | 0 | fWaitSemaphores.data(), |
304 | 0 | vkWaitStages.data(), |
305 | 0 | /*commandBufferCount*/ 1, |
306 | 0 | &fPrimaryCommandBuffer, |
307 | 0 | fSignalSemaphores.size(), |
308 | 0 | fSignalSemaphores.data(), |
309 | 0 | fSharedContext->isProtected()); |
310 | 0 | fWaitSemaphores.clear(); |
311 | 0 | fSignalSemaphores.clear(); |
312 | 0 | if (submitResult != VK_SUCCESS) { |
313 | | // If we failed to submit because of a device lost, we still need to wait for the fence to |
314 | | // signal before deleting. However, there is an ARM bug (b/359822580) where the driver early |
315 | | // outs on the fence wait if in a device lost state and thus we can't wait on it. Instead, |
316 | | // we just wait on the queue to finish. We're already in a state that's going to cause us to |
317 | | // restart the whole device, so waiting on the queue shouldn't have any performance impact. |
318 | 0 | if (submitResult == VK_ERROR_DEVICE_LOST) { |
319 | 0 | VULKAN_CALL(fSharedContext->interface(), QueueWaitIdle(queue)); |
320 | 0 | } else { |
321 | 0 | SkASSERT(submitResult == VK_ERROR_OUT_OF_HOST_MEMORY || |
322 | 0 | submitResult == VK_ERROR_OUT_OF_DEVICE_MEMORY); |
323 | 0 | } |
324 | |
|
325 | 0 | VULKAN_CALL(fSharedContext->interface(), DestroyFence(device, fSubmitFence, nullptr)); |
326 | 0 | fSubmitFence = VK_NULL_HANDLE; |
327 | 0 | return false; |
328 | 0 | } |
329 | 0 | return true; |
330 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::submit(VkQueue_T*) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::submit(VkQueue_T*) |
331 | | |
332 | 0 | bool VulkanCommandBuffer::isFinished() { |
333 | 0 | SkASSERT(!fActive); |
334 | 0 | if (VK_NULL_HANDLE == fSubmitFence) { |
335 | 0 | return true; |
336 | 0 | } |
337 | | |
338 | 0 | VkResult err; |
339 | 0 | VULKAN_CALL_RESULT_NOCHECK(fSharedContext->interface(), err, |
340 | 0 | GetFenceStatus(fSharedContext->device(), fSubmitFence)); |
341 | 0 | switch (err) { |
342 | 0 | case VK_SUCCESS: |
343 | 0 | case VK_ERROR_DEVICE_LOST: |
344 | 0 | return true; |
345 | | |
346 | 0 | case VK_NOT_READY: |
347 | 0 | return false; |
348 | | |
349 | 0 | default: |
350 | 0 | SKGPU_LOG_F("Error calling vkGetFenceStatus. Error: %d", err); |
351 | 0 | SK_ABORT("Got an invalid fence status"); |
352 | 0 | return false; |
353 | 0 | } |
354 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::isFinished() Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::isFinished() |
355 | | |
356 | 0 | void VulkanCommandBuffer::waitUntilFinished() { |
357 | 0 | if (fSubmitFence == VK_NULL_HANDLE) { |
358 | 0 | return; |
359 | 0 | } |
360 | 0 | VULKAN_CALL_ERRCHECK(fSharedContext, |
361 | 0 | WaitForFences(fSharedContext->device(), |
362 | 0 | 1, |
363 | 0 | &fSubmitFence, |
364 | 0 | /*waitAll=*/true, |
365 | 0 | /*timeout=*/UINT64_MAX)); |
366 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::waitUntilFinished() Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::waitUntilFinished() |
367 | | |
368 | 0 | void VulkanCommandBuffer::updateIntrinsicUniforms(SkIRect viewport) { |
369 | 0 | SkASSERT(fActive && !fActiveRenderPass); |
370 | | |
371 | | // The SkSL has declared these as a top-level interface block, which will use std140 in Vulkan. |
372 | | // If we switch to supporting push constants here, it would be std430 instead. |
373 | 0 | UniformManager intrinsicValues{Layout::kStd140}; |
374 | 0 | CollectIntrinsicUniforms(fSharedContext->caps(), viewport, fReplayTranslation, fDstCopyOffset, |
375 | 0 | &intrinsicValues); |
376 | 0 | SkSpan<const char> bytes = intrinsicValues.finish(); |
377 | 0 | SkASSERT(bytes.size_bytes() == VulkanResourceProvider::kIntrinsicConstantSize); |
378 | |
|
379 | 0 | sk_sp<Buffer> intrinsicUniformBuffer = fResourceProvider->refIntrinsicConstantBuffer(); |
380 | 0 | const VulkanBuffer* intrinsicVulkanBuffer = |
381 | 0 | static_cast<VulkanBuffer*>(intrinsicUniformBuffer.get()); |
382 | 0 | SkASSERT(intrinsicVulkanBuffer && intrinsicVulkanBuffer->size() >= bytes.size_bytes()); |
383 | |
|
384 | 0 | fUniformBuffersToBind[VulkanGraphicsPipeline::kIntrinsicUniformBufferIndex] = { |
385 | 0 | intrinsicUniformBuffer.get(), |
386 | 0 | /*offset=*/0, |
387 | 0 | SkTo<uint32_t>(bytes.size_bytes()) |
388 | 0 | }; |
389 | |
|
390 | 0 | this->updateBuffer(intrinsicVulkanBuffer, bytes.data(), bytes.size_bytes()); |
391 | | |
392 | | // Ensure the buffer update is completed and made visible before reading |
393 | 0 | intrinsicVulkanBuffer->setBufferAccess(this, VK_ACCESS_UNIFORM_READ_BIT, |
394 | 0 | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT); |
395 | 0 | this->trackResource(std::move(intrinsicUniformBuffer)); |
396 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::updateIntrinsicUniforms(SkIRect) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::updateIntrinsicUniforms(SkIRect) |
397 | | |
398 | | bool VulkanCommandBuffer::onAddRenderPass(const RenderPassDesc& renderPassDesc, |
399 | | SkIRect renderPassBounds, |
400 | | const Texture* colorTexture, |
401 | | const Texture* resolveTexture, |
402 | | const Texture* depthStencilTexture, |
403 | | SkIRect viewport, |
404 | 0 | const DrawPassList& drawPasses) { |
405 | 0 | for (const auto& drawPass : drawPasses) { |
406 | | // Our current implementation of setting texture image layouts does not allow layout changes |
407 | | // once we have already begun a render pass, so prior to any other commands, set the layout |
408 | | // of all sampled textures from the drawpass so they can be sampled from the shader. |
409 | 0 | const skia_private::TArray<sk_sp<TextureProxy>>& sampledTextureProxies = |
410 | 0 | drawPass->sampledTextures(); |
411 | 0 | for (const sk_sp<TextureProxy>& textureProxy : sampledTextureProxies) { |
412 | 0 | VulkanTexture* vulkanTexture = const_cast<VulkanTexture*>( |
413 | 0 | static_cast<const VulkanTexture*>( |
414 | 0 | textureProxy->texture())); |
415 | 0 | vulkanTexture->setImageLayout(this, |
416 | 0 | VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, |
417 | 0 | VK_ACCESS_SHADER_READ_BIT, |
418 | 0 | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, |
419 | 0 | false); |
420 | 0 | this->submitPipelineBarriers(); |
421 | 0 | } |
422 | 0 | } |
423 | |
|
424 | 0 | this->updateIntrinsicUniforms(viewport); |
425 | 0 | this->setViewport(viewport); |
426 | |
|
427 | 0 | if (!this->beginRenderPass(renderPassDesc, |
428 | 0 | renderPassBounds, |
429 | 0 | colorTexture, |
430 | 0 | resolveTexture, |
431 | 0 | depthStencilTexture)) { |
432 | 0 | return false; |
433 | 0 | } |
434 | | |
435 | 0 | for (const auto& drawPass : drawPasses) { |
436 | 0 | this->addDrawPass(drawPass.get()); |
437 | 0 | } |
438 | |
|
439 | 0 | this->endRenderPass(); |
440 | 0 | return true; |
441 | 0 | } |
442 | | |
443 | 0 | bool VulkanCommandBuffer::updateLoadMSAAVertexBuffer() { |
444 | 0 | const Buffer* vertexBuffer = fResourceProvider->loadMSAAVertexBuffer(); |
445 | 0 | if (!vertexBuffer) { |
446 | 0 | return false; |
447 | 0 | } |
448 | 0 | const VulkanBuffer* vulkanVertexBuffer = static_cast<const VulkanBuffer*>(vertexBuffer); |
449 | 0 | SkASSERT(vulkanVertexBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); |
450 | | |
451 | | // Determine vertices in NDC. TODO: When only wanting to draw a portion of the resolve |
452 | | // texture, these values will need to be dynamically determined. For now, simply span the |
453 | | // range of NDC since we want to reference the entire resolve texture. |
454 | 0 | static constexpr float kVertices[8] = { 1.f, 1.f, |
455 | 0 | 1.f, -1.f, |
456 | 0 | -1.f, 1.f, |
457 | 0 | -1.f, -1.f }; |
458 | 0 | this->updateBuffer(vulkanVertexBuffer, |
459 | 0 | &kVertices, |
460 | 0 | VulkanResourceProvider::kLoadMSAAVertexBufferSize); |
461 | | |
462 | | // Ensure the buffer update is completed and made visible before reading |
463 | 0 | vulkanVertexBuffer->setBufferAccess(this, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, |
464 | 0 | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT); |
465 | |
|
466 | 0 | return true; |
467 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::updateLoadMSAAVertexBuffer() Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::updateLoadMSAAVertexBuffer() |
468 | | |
469 | | bool VulkanCommandBuffer::updateAndBindLoadMSAAInputAttachment(const VulkanTexture& resolveTexture) |
470 | 0 | { |
471 | | // Fetch a descriptor set that contains one input attachment |
472 | 0 | STArray<1, DescriptorData> inputDescriptors = |
473 | 0 | {VulkanGraphicsPipeline::kInputAttachmentDescriptor}; |
474 | 0 | sk_sp<VulkanDescriptorSet> set = fResourceProvider->findOrCreateDescriptorSet( |
475 | 0 | SkSpan<DescriptorData>{&inputDescriptors.front(), inputDescriptors.size()}); |
476 | 0 | if (!set) { |
477 | 0 | return false; |
478 | 0 | } |
479 | | |
480 | 0 | VkDescriptorImageInfo textureInfo; |
481 | 0 | memset(&textureInfo, 0, sizeof(VkDescriptorImageInfo)); |
482 | 0 | textureInfo.sampler = VK_NULL_HANDLE; |
483 | 0 | textureInfo.imageView = |
484 | 0 | resolveTexture.getImageView(VulkanImageView::Usage::kAttachment)->imageView(); |
485 | 0 | textureInfo.imageLayout = resolveTexture.currentLayout(); |
486 | |
|
487 | 0 | VkWriteDescriptorSet writeInfo; |
488 | 0 | memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet)); |
489 | 0 | writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; |
490 | 0 | writeInfo.pNext = nullptr; |
491 | 0 | writeInfo.dstSet = *set->descriptorSet(); |
492 | 0 | writeInfo.dstBinding = VulkanGraphicsPipeline::kInputAttachmentBindingIndex; |
493 | 0 | writeInfo.dstArrayElement = 0; |
494 | 0 | writeInfo.descriptorCount = 1; |
495 | 0 | writeInfo.descriptorType = DsTypeEnumToVkDs(DescriptorType::kInputAttachment); |
496 | 0 | writeInfo.pImageInfo = &textureInfo; |
497 | 0 | writeInfo.pBufferInfo = nullptr; |
498 | 0 | writeInfo.pTexelBufferView = nullptr; |
499 | |
|
500 | 0 | VULKAN_CALL(fSharedContext->interface(), |
501 | 0 | UpdateDescriptorSets(fSharedContext->device(), |
502 | 0 | /*descriptorWriteCount=*/1, |
503 | 0 | &writeInfo, |
504 | 0 | /*descriptorCopyCount=*/0, |
505 | 0 | /*pDescriptorCopies=*/nullptr)); |
506 | |
|
507 | 0 | VULKAN_CALL(fSharedContext->interface(), |
508 | 0 | CmdBindDescriptorSets(fPrimaryCommandBuffer, |
509 | 0 | VK_PIPELINE_BIND_POINT_GRAPHICS, |
510 | 0 | fActiveGraphicsPipeline->layout(), |
511 | 0 | VulkanGraphicsPipeline::kInputAttachmentDescSetIndex, |
512 | 0 | /*setCount=*/1, |
513 | 0 | set->descriptorSet(), |
514 | 0 | /*dynamicOffsetCount=*/0, |
515 | 0 | /*dynamicOffsets=*/nullptr)); |
516 | |
|
517 | 0 | this->trackResource(std::move(set)); |
518 | 0 | return true; |
519 | 0 | } |
520 | | |
521 | | bool VulkanCommandBuffer::loadMSAAFromResolve(const RenderPassDesc& renderPassDesc, |
522 | | VulkanTexture& resolveTexture, |
523 | 0 | SkISize dstDimensions) { |
524 | 0 | sk_sp<VulkanGraphicsPipeline> loadPipeline = |
525 | 0 | fResourceProvider->findOrCreateLoadMSAAPipeline(renderPassDesc); |
526 | 0 | if (!loadPipeline) { |
527 | 0 | SKGPU_LOG_E("Unable to create pipeline to load resolve texture into MSAA attachment"); |
528 | 0 | return false; |
529 | 0 | } |
530 | | |
531 | 0 | this->bindGraphicsPipeline(loadPipeline.get()); |
532 | | // Make sure we do not attempt to bind uniform or texture/sampler descriptors because we do |
533 | | // not use them for loading MSAA from resolve. |
534 | 0 | fBindUniformBuffers = false; |
535 | 0 | fBindTextureSamplers = false; |
536 | |
|
537 | 0 | this->setScissor(/*left=*/0, /*top=*/0, dstDimensions.width(), dstDimensions.height()); |
538 | |
|
539 | 0 | if (!this->updateAndBindLoadMSAAInputAttachment(resolveTexture)) { |
540 | 0 | SKGPU_LOG_E("Unable to update and bind an input attachment descriptor for loading MSAA " |
541 | 0 | "from resolve"); |
542 | 0 | return false; |
543 | 0 | } |
544 | | |
545 | 0 | SkASSERT(fResourceProvider->loadMSAAVertexBuffer()); |
546 | 0 | this->bindVertexBuffers(fResourceProvider->loadMSAAVertexBuffer(), |
547 | 0 | /*vertexOffset=*/0, |
548 | 0 | /*instanceBuffer=*/nullptr, |
549 | 0 | /*instanceOffset=*/0); |
550 | |
|
551 | 0 | this->draw(PrimitiveType::kTriangleStrip, /*baseVertex=*/0, /*vertexCount=*/4); |
552 | 0 | this->nextSubpass(); |
553 | | |
554 | | // If we loaded the resolve attachment, then we would have set the image layout to be |
555 | | // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an |
556 | | // input attachment. However, when we switched to the main subpass it will transition the |
557 | | // layout internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our |
558 | | // tracking of the layout to match the new layout. |
559 | 0 | resolveTexture.updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); |
560 | | |
561 | | // After using a distinct descriptor set layout for loading MSAA from resolve, we will need to |
562 | | // (re-)bind any descriptor sets. |
563 | 0 | fBindUniformBuffers = true; |
564 | 0 | fBindTextureSamplers = true; |
565 | 0 | return true; |
566 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::loadMSAAFromResolve(skgpu::graphite::RenderPassDesc const&, skgpu::graphite::VulkanTexture&, SkISize) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::loadMSAAFromResolve(skgpu::graphite::RenderPassDesc const&, skgpu::graphite::VulkanTexture&, SkISize) |
567 | | |
568 | | namespace { |
569 | | void setup_texture_layouts(VulkanCommandBuffer* cmdBuf, |
570 | | VulkanTexture* colorTexture, |
571 | | VulkanTexture* resolveTexture, |
572 | | VulkanTexture* depthStencilTexture, |
573 | 0 | bool loadMSAAFromResolve) { |
574 | 0 | if (colorTexture) { |
575 | 0 | colorTexture->setImageLayout(cmdBuf, |
576 | 0 | VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, |
577 | 0 | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | |
578 | 0 | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, |
579 | 0 | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, |
580 | 0 | /*byRegion=*/false); |
581 | 0 | if (resolveTexture) { |
582 | 0 | if (loadMSAAFromResolve) { |
583 | | // When loading MSAA from resolve, the texture is used in the first subpass as an |
584 | | // input attachment. Subsequent subpass(es) need the resolve texture to provide read |
585 | | // access to the color attachment (for use cases such as blending), so add access |
586 | | // and pipeline stage flags for both usages. |
587 | 0 | resolveTexture->setImageLayout(cmdBuf, |
588 | 0 | VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, |
589 | 0 | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | |
590 | 0 | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, |
591 | 0 | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | |
592 | 0 | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, |
593 | 0 | /*byRegion=*/false); |
594 | 0 | } else { |
595 | 0 | resolveTexture->setImageLayout(cmdBuf, |
596 | 0 | VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, |
597 | 0 | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | |
598 | 0 | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, |
599 | 0 | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, |
600 | 0 | /*byRegion=*/false); |
601 | 0 | } |
602 | 0 | } |
603 | 0 | } |
604 | 0 | if (depthStencilTexture) { |
605 | 0 | depthStencilTexture->setImageLayout(cmdBuf, |
606 | 0 | VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, |
607 | 0 | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, |
608 | 0 | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | |
609 | 0 | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, |
610 | 0 | /*byRegion=*/false); |
611 | 0 | } |
612 | 0 | } |
613 | | |
614 | | void track_attachments(VulkanCommandBuffer* cmdBuf, |
615 | | VulkanTexture* colorTexture, |
616 | | VulkanTexture* resolveTexture, |
617 | 0 | VulkanTexture* depthStencilTexture) { |
618 | 0 | if (colorTexture) { |
619 | 0 | cmdBuf->trackResource(sk_ref_sp(colorTexture)); |
620 | 0 | } |
621 | 0 | if (resolveTexture){ |
622 | 0 | cmdBuf->trackResource(sk_ref_sp(resolveTexture)); |
623 | 0 | } |
624 | 0 | if (depthStencilTexture) { |
625 | 0 | cmdBuf->trackResource(sk_ref_sp(depthStencilTexture)); |
626 | 0 | } |
627 | 0 | } |
628 | | |
629 | | void gather_attachment_views(skia_private::TArray<VkImageView>& attachmentViews, |
630 | | VulkanTexture* colorTexture, |
631 | | VulkanTexture* resolveTexture, |
632 | 0 | VulkanTexture* depthStencilTexture) { |
633 | 0 | if (colorTexture) { |
634 | 0 | VkImageView& colorAttachmentView = attachmentViews.push_back(); |
635 | 0 | colorAttachmentView = |
636 | 0 | colorTexture->getImageView(VulkanImageView::Usage::kAttachment)->imageView(); |
637 | |
|
638 | 0 | if (resolveTexture) { |
639 | 0 | VkImageView& resolveView = attachmentViews.push_back(); |
640 | 0 | resolveView = |
641 | 0 | resolveTexture->getImageView(VulkanImageView::Usage::kAttachment)->imageView(); |
642 | 0 | } |
643 | 0 | } |
644 | |
|
645 | 0 | if (depthStencilTexture) { |
646 | 0 | VkImageView& stencilView = attachmentViews.push_back(); |
647 | 0 | stencilView = |
648 | 0 | depthStencilTexture->getImageView(VulkanImageView::Usage::kAttachment)->imageView(); |
649 | 0 | } |
650 | 0 | } |
651 | | |
652 | | void gather_clear_values( |
653 | | STArray<VulkanRenderPass::kMaxExpectedAttachmentCount, VkClearValue>& clearValues, |
654 | | const RenderPassDesc& renderPassDesc, |
655 | | VulkanTexture* colorTexture, |
656 | | VulkanTexture* depthStencilTexture, |
657 | 0 | int depthStencilAttachmentIdx) { |
658 | 0 | clearValues.push_back_n(VulkanRenderPass::kMaxExpectedAttachmentCount); |
659 | 0 | if (colorTexture) { |
660 | 0 | VkClearValue& colorAttachmentClear = |
661 | 0 | clearValues.at(VulkanRenderPass::kColorAttachmentIdx); |
662 | 0 | memset(&colorAttachmentClear, 0, sizeof(VkClearValue)); |
663 | 0 | colorAttachmentClear.color = {{renderPassDesc.fClearColor[0], |
664 | 0 | renderPassDesc.fClearColor[1], |
665 | 0 | renderPassDesc.fClearColor[2], |
666 | 0 | renderPassDesc.fClearColor[3]}}; |
667 | 0 | } |
668 | | // Resolve texture does not have a clear value |
669 | 0 | if (depthStencilTexture) { |
670 | 0 | VkClearValue& depthStencilAttachmentClear = clearValues.at(depthStencilAttachmentIdx); |
671 | 0 | memset(&depthStencilAttachmentClear, 0, sizeof(VkClearValue)); |
672 | 0 | depthStencilAttachmentClear.depthStencil = {renderPassDesc.fClearDepth, |
673 | 0 | renderPassDesc.fClearStencil}; |
674 | 0 | } |
675 | 0 | } |
676 | | |
677 | | // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple |
678 | | // of the granularity. The width must also be a multiple of the granularity or equal to the width |
679 | | // of the entire attachment. Similar requirements apply to the y and height components. |
680 | | VkRect2D get_render_area(const SkIRect& srcBounds, |
681 | | const VkExtent2D& granularity, |
682 | | int maxWidth, |
683 | 0 | int maxHeight) { |
684 | 0 | SkIRect dstBounds; |
685 | | // Adjust Width |
686 | 0 | if (granularity.width == 0 || granularity.width == 1) { |
687 | 0 | dstBounds.fLeft = srcBounds.fLeft; |
688 | 0 | dstBounds.fRight = srcBounds.fRight; |
689 | 0 | } else { |
690 | | // Start with the right side of rect so we know if we end up going past the maxWidth. |
691 | 0 | int rightAdj = srcBounds.fRight % granularity.width; |
692 | 0 | if (rightAdj != 0) { |
693 | 0 | rightAdj = granularity.width - rightAdj; |
694 | 0 | } |
695 | 0 | dstBounds.fRight = srcBounds.fRight + rightAdj; |
696 | 0 | if (dstBounds.fRight > maxWidth) { |
697 | 0 | dstBounds.fRight = maxWidth; |
698 | 0 | dstBounds.fLeft = 0; |
699 | 0 | } else { |
700 | 0 | dstBounds.fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width; |
701 | 0 | } |
702 | 0 | } |
703 | |
|
704 | 0 | if (granularity.height == 0 || granularity.height == 1) { |
705 | 0 | dstBounds.fTop = srcBounds.fTop; |
706 | 0 | dstBounds.fBottom = srcBounds.fBottom; |
707 | 0 | } else { |
708 | | // Start with the bottom side of rect so we know if we end up going past the maxHeight. |
709 | 0 | int bottomAdj = srcBounds.fBottom % granularity.height; |
710 | 0 | if (bottomAdj != 0) { |
711 | 0 | bottomAdj = granularity.height - bottomAdj; |
712 | 0 | } |
713 | 0 | dstBounds.fBottom = srcBounds.fBottom + bottomAdj; |
714 | 0 | if (dstBounds.fBottom > maxHeight) { |
715 | 0 | dstBounds.fBottom = maxHeight; |
716 | 0 | dstBounds.fTop = 0; |
717 | 0 | } else { |
718 | 0 | dstBounds.fTop = srcBounds.fTop - srcBounds.fTop % granularity.height; |
719 | 0 | } |
720 | 0 | } |
721 | |
|
722 | 0 | VkRect2D renderArea; |
723 | 0 | renderArea.offset = { dstBounds.fLeft , dstBounds.fTop }; |
724 | 0 | renderArea.extent = { (uint32_t)dstBounds.width(), (uint32_t)dstBounds.height() }; |
725 | 0 | return renderArea; |
726 | 0 | } |
727 | | |
728 | | } // anonymous namespace |
729 | | |
730 | | bool VulkanCommandBuffer::beginRenderPass(const RenderPassDesc& renderPassDesc, |
731 | | SkIRect renderPassBounds, |
732 | | const Texture* colorTexture, |
733 | | const Texture* resolveTexture, |
734 | 0 | const Texture* depthStencilTexture) { |
735 | | // TODO: Check that Textures match RenderPassDesc |
736 | 0 | VulkanTexture* vulkanColorTexture = |
737 | 0 | const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(colorTexture)); |
738 | 0 | VulkanTexture* vulkanResolveTexture = |
739 | 0 | const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(resolveTexture)); |
740 | 0 | VulkanTexture* vulkanDepthStencilTexture = |
741 | 0 | const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(depthStencilTexture)); |
742 | |
|
743 | 0 | SkASSERT(resolveTexture ? renderPassDesc.fColorResolveAttachment.fStoreOp == StoreOp::kStore |
744 | 0 | : true); |
745 | | |
746 | | // Determine if we need to load MSAA from resolve, and if so, make certain that key conditions |
747 | | // are met before proceeding. |
748 | 0 | bool loadMSAAFromResolve = renderPassDesc.fColorResolveAttachment.fTextureInfo.isValid() && |
749 | 0 | renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad; |
750 | 0 | if (loadMSAAFromResolve && (!vulkanResolveTexture || !vulkanColorTexture || |
751 | 0 | !vulkanResolveTexture->supportsInputAttachmentUsage())) { |
752 | 0 | SKGPU_LOG_E("Cannot begin render pass. In order to load MSAA from resolve, the color " |
753 | 0 | "attachment must have input attachment usage and both the color and resolve " |
754 | 0 | "attachments must be valid."); |
755 | 0 | return false; |
756 | 0 | } |
757 | | |
758 | 0 | track_attachments(this, vulkanColorTexture, vulkanResolveTexture, vulkanDepthStencilTexture); |
759 | | |
760 | | // Before beginning a renderpass, set all textures to the appropriate image layout. |
761 | 0 | setup_texture_layouts(this, |
762 | 0 | vulkanColorTexture, |
763 | 0 | vulkanResolveTexture, |
764 | 0 | vulkanDepthStencilTexture, |
765 | 0 | loadMSAAFromResolve); |
766 | |
|
767 | 0 | static constexpr int kMaxNumAttachments = 3; |
768 | | // Gather attachment views neeeded for frame buffer creation. |
769 | 0 | skia_private::TArray<VkImageView> attachmentViews; |
770 | 0 | gather_attachment_views( |
771 | 0 | attachmentViews, vulkanColorTexture, vulkanResolveTexture, vulkanDepthStencilTexture); |
772 | | |
773 | | // Gather clear values needed for RenderPassBeginInfo. Indexed by attachment number. |
774 | 0 | STArray<kMaxNumAttachments, VkClearValue> clearValues; |
775 | | // The depth/stencil attachment can be at attachment index 1 or 2 depending on whether there is |
776 | | // a resolve texture attachment for this renderpass. |
777 | 0 | int depthStencilAttachmentIndex = resolveTexture ? 2 : 1; |
778 | 0 | gather_clear_values(clearValues, |
779 | 0 | renderPassDesc, |
780 | 0 | vulkanColorTexture, |
781 | 0 | vulkanDepthStencilTexture, |
782 | 0 | depthStencilAttachmentIndex); |
783 | |
|
784 | 0 | sk_sp<VulkanRenderPass> vulkanRenderPass = |
785 | 0 | fResourceProvider->findOrCreateRenderPass(renderPassDesc, /*compatibleOnly=*/false); |
786 | 0 | if (!vulkanRenderPass) { |
787 | 0 | SKGPU_LOG_W("Could not create Vulkan RenderPass"); |
788 | 0 | return false; |
789 | 0 | } |
790 | 0 | this->submitPipelineBarriers(); |
791 | 0 | this->trackResource(vulkanRenderPass); |
792 | |
|
793 | 0 | int frameBufferWidth = 0; |
794 | 0 | int frameBufferHeight = 0; |
795 | 0 | if (colorTexture) { |
796 | 0 | frameBufferWidth = colorTexture->dimensions().width(); |
797 | 0 | frameBufferHeight = colorTexture->dimensions().height(); |
798 | 0 | } else if (depthStencilTexture) { |
799 | 0 | frameBufferWidth = depthStencilTexture->dimensions().width(); |
800 | 0 | frameBufferHeight = depthStencilTexture->dimensions().height(); |
801 | 0 | } |
802 | 0 | sk_sp<VulkanFramebuffer> framebuffer = fResourceProvider->createFramebuffer(fSharedContext, |
803 | 0 | attachmentViews, |
804 | 0 | *vulkanRenderPass, |
805 | 0 | frameBufferWidth, |
806 | 0 | frameBufferHeight); |
807 | 0 | if (!framebuffer) { |
808 | 0 | SKGPU_LOG_W("Could not create Vulkan Framebuffer"); |
809 | 0 | return false; |
810 | 0 | } |
811 | | |
812 | 0 | VkExtent2D granularity; |
813 | | // Get granularity for this render pass |
814 | 0 | VULKAN_CALL(fSharedContext->interface(), |
815 | 0 | GetRenderAreaGranularity(fSharedContext->device(), |
816 | 0 | vulkanRenderPass->renderPass(), |
817 | 0 | &granularity)); |
818 | 0 | VkRect2D renderArea = get_render_area(renderPassBounds, |
819 | 0 | granularity, |
820 | 0 | frameBufferWidth, |
821 | 0 | frameBufferHeight); |
822 | |
|
823 | 0 | VkRenderPassBeginInfo beginInfo; |
824 | 0 | memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo)); |
825 | 0 | beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; |
826 | 0 | beginInfo.pNext = nullptr; |
827 | 0 | beginInfo.renderPass = vulkanRenderPass->renderPass(); |
828 | 0 | beginInfo.framebuffer = framebuffer->framebuffer(); |
829 | 0 | beginInfo.renderArea = renderArea; |
830 | 0 | beginInfo.clearValueCount = clearValues.size(); |
831 | 0 | beginInfo.pClearValues = clearValues.begin(); |
832 | | |
833 | | // If loading MSAA from resolve, we need to update and bind a vertex buffer w/ NDC. This entails |
834 | | // take care of some necessary preparations that must be performed while there is not an active |
835 | | // renderpass. |
836 | 0 | if (loadMSAAFromResolve) { |
837 | | // We manually load the contents of the resolve texture into the MSAA attachment as a draw, |
838 | | // so the MSAA attachment's load op should be LoadOp::kDiscard. |
839 | 0 | SkASSERT(renderPassDesc.fColorAttachment.fLoadOp == LoadOp::kDiscard); |
840 | 0 | SkASSERT(!fActiveRenderPass); |
841 | 0 | SkASSERT(resolveTexture); |
842 | |
|
843 | 0 | if (!this->updateLoadMSAAVertexBuffer()) { |
844 | 0 | SKGPU_LOG_E("Failed to update vertex buffer for loading MSAA from resolve"); |
845 | 0 | return false; |
846 | 0 | } |
847 | 0 | } |
848 | | |
849 | | // Submit pipeline barriers to ensure any image layout transitions are recorded prior to |
850 | | // beginning the render pass. |
851 | 0 | this->submitPipelineBarriers(); |
852 | | // TODO: If we add support for secondary command buffers, dynamically determine subpass contents |
853 | 0 | VULKAN_CALL(fSharedContext->interface(), |
854 | 0 | CmdBeginRenderPass(fPrimaryCommandBuffer, |
855 | 0 | &beginInfo, |
856 | 0 | VK_SUBPASS_CONTENTS_INLINE)); |
857 | 0 | fActiveRenderPass = true; |
858 | |
|
859 | 0 | if (loadMSAAFromResolve && !this->loadMSAAFromResolve(renderPassDesc, |
860 | 0 | *vulkanResolveTexture, |
861 | 0 | vulkanColorTexture->dimensions())) { |
862 | 0 | SKGPU_LOG_E("Failed to load MSAA from resolve"); |
863 | 0 | this->endRenderPass(); |
864 | 0 | return false; |
865 | 0 | } |
866 | | |
867 | | // Once we have an active render pass, the command buffer should hold on to a frame buffer ref. |
868 | 0 | this->trackResource(std::move(framebuffer)); |
869 | 0 | return true; |
870 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::beginRenderPass(skgpu::graphite::RenderPassDesc const&, SkIRect, skgpu::graphite::Texture const*, skgpu::graphite::Texture const*, skgpu::graphite::Texture const*) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::beginRenderPass(skgpu::graphite::RenderPassDesc const&, SkIRect, skgpu::graphite::Texture const*, skgpu::graphite::Texture const*, skgpu::graphite::Texture const*) |
871 | | |
872 | 0 | void VulkanCommandBuffer::endRenderPass() { |
873 | 0 | SkASSERT(fActive); |
874 | 0 | VULKAN_CALL(fSharedContext->interface(), CmdEndRenderPass(fPrimaryCommandBuffer)); |
875 | 0 | fActiveRenderPass = false; |
876 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::endRenderPass() Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::endRenderPass() |
877 | | |
878 | 0 | void VulkanCommandBuffer::addDrawPass(const DrawPass* drawPass) { |
879 | 0 | drawPass->addResourceRefs(this); |
880 | 0 | for (auto [type, cmdPtr] : drawPass->commands()) { |
881 | 0 | switch (type) { |
882 | 0 | case DrawPassCommands::Type::kBindGraphicsPipeline: { |
883 | 0 | auto bgp = static_cast<DrawPassCommands::BindGraphicsPipeline*>(cmdPtr); |
884 | 0 | this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex)); |
885 | 0 | break; |
886 | 0 | } |
887 | 0 | case DrawPassCommands::Type::kSetBlendConstants: { |
888 | 0 | auto sbc = static_cast<DrawPassCommands::SetBlendConstants*>(cmdPtr); |
889 | 0 | this->setBlendConstants(sbc->fBlendConstants); |
890 | 0 | break; |
891 | 0 | } |
892 | 0 | case DrawPassCommands::Type::kBindUniformBuffer: { |
893 | 0 | auto bub = static_cast<DrawPassCommands::BindUniformBuffer*>(cmdPtr); |
894 | 0 | this->recordBufferBindingInfo(bub->fInfo, bub->fSlot); |
895 | 0 | break; |
896 | 0 | } |
897 | 0 | case DrawPassCommands::Type::kBindDrawBuffers: { |
898 | 0 | auto bdb = static_cast<DrawPassCommands::BindDrawBuffers*>(cmdPtr); |
899 | 0 | this->bindDrawBuffers( |
900 | 0 | bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect); |
901 | 0 | break; |
902 | 0 | } |
903 | 0 | case DrawPassCommands::Type::kBindTexturesAndSamplers: { |
904 | 0 | auto bts = static_cast<DrawPassCommands::BindTexturesAndSamplers*>(cmdPtr); |
905 | 0 | this->recordTextureAndSamplerDescSet(*drawPass, *bts); |
906 | 0 | break; |
907 | 0 | } |
908 | 0 | case DrawPassCommands::Type::kSetScissor: { |
909 | 0 | auto ss = static_cast<DrawPassCommands::SetScissor*>(cmdPtr); |
910 | 0 | const SkIRect& rect = ss->fScissor; |
911 | 0 | this->setScissor(rect.fLeft, rect.fTop, rect.width(), rect.height()); |
912 | 0 | break; |
913 | 0 | } |
914 | 0 | case DrawPassCommands::Type::kDraw: { |
915 | 0 | auto draw = static_cast<DrawPassCommands::Draw*>(cmdPtr); |
916 | 0 | this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount); |
917 | 0 | break; |
918 | 0 | } |
919 | 0 | case DrawPassCommands::Type::kDrawIndexed: { |
920 | 0 | auto draw = static_cast<DrawPassCommands::DrawIndexed*>(cmdPtr); |
921 | 0 | this->drawIndexed( |
922 | 0 | draw->fType, draw->fBaseIndex, draw->fIndexCount, draw->fBaseVertex); |
923 | 0 | break; |
924 | 0 | } |
925 | 0 | case DrawPassCommands::Type::kDrawInstanced: { |
926 | 0 | auto draw = static_cast<DrawPassCommands::DrawInstanced*>(cmdPtr); |
927 | 0 | this->drawInstanced(draw->fType, |
928 | 0 | draw->fBaseVertex, |
929 | 0 | draw->fVertexCount, |
930 | 0 | draw->fBaseInstance, |
931 | 0 | draw->fInstanceCount); |
932 | 0 | break; |
933 | 0 | } |
934 | 0 | case DrawPassCommands::Type::kDrawIndexedInstanced: { |
935 | 0 | auto draw = static_cast<DrawPassCommands::DrawIndexedInstanced*>(cmdPtr); |
936 | 0 | this->drawIndexedInstanced(draw->fType, |
937 | 0 | draw->fBaseIndex, |
938 | 0 | draw->fIndexCount, |
939 | 0 | draw->fBaseVertex, |
940 | 0 | draw->fBaseInstance, |
941 | 0 | draw->fInstanceCount); |
942 | 0 | break; |
943 | 0 | } |
944 | 0 | case DrawPassCommands::Type::kDrawIndirect: { |
945 | 0 | auto draw = static_cast<DrawPassCommands::DrawIndirect*>(cmdPtr); |
946 | 0 | this->drawIndirect(draw->fType); |
947 | 0 | break; |
948 | 0 | } |
949 | 0 | case DrawPassCommands::Type::kDrawIndexedIndirect: { |
950 | 0 | auto draw = static_cast<DrawPassCommands::DrawIndexedIndirect*>(cmdPtr); |
951 | 0 | this->drawIndexedIndirect(draw->fType); |
952 | 0 | break; |
953 | 0 | } |
954 | 0 | } |
955 | 0 | } |
956 | 0 | } |
957 | | |
958 | 0 | void VulkanCommandBuffer::bindGraphicsPipeline(const GraphicsPipeline* graphicsPipeline) { |
959 | 0 | fActiveGraphicsPipeline = static_cast<const VulkanGraphicsPipeline*>(graphicsPipeline); |
960 | 0 | SkASSERT(fActiveRenderPass); |
961 | 0 | VULKAN_CALL(fSharedContext->interface(), CmdBindPipeline(fPrimaryCommandBuffer, |
962 | 0 | VK_PIPELINE_BIND_POINT_GRAPHICS, |
963 | 0 | fActiveGraphicsPipeline->pipeline())); |
964 | | // TODO(b/293924877): Compare pipeline layouts. If 2 pipelines have the same pipeline layout, |
965 | | // then descriptor sets do not need to be re-bound. For now, simply force a re-binding of |
966 | | // descriptor sets with any new bindGraphicsPipeline DrawPassCommand. |
967 | 0 | fBindUniformBuffers = true; |
968 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::bindGraphicsPipeline(skgpu::graphite::GraphicsPipeline const*) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::bindGraphicsPipeline(skgpu::graphite::GraphicsPipeline const*) |
969 | | |
970 | 0 | void VulkanCommandBuffer::setBlendConstants(float* blendConstants) { |
971 | 0 | SkASSERT(fActive); |
972 | 0 | if (0 != memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) { |
973 | 0 | VULKAN_CALL(fSharedContext->interface(), |
974 | 0 | CmdSetBlendConstants(fPrimaryCommandBuffer, blendConstants)); |
975 | 0 | memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float)); |
976 | 0 | } |
977 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::setBlendConstants(float*) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::setBlendConstants(float*) |
978 | | |
979 | 0 | void VulkanCommandBuffer::recordBufferBindingInfo(const BindBufferInfo& info, UniformSlot slot) { |
980 | 0 | unsigned int bufferIndex = 0; |
981 | 0 | switch (slot) { |
982 | 0 | case UniformSlot::kRenderStep: |
983 | 0 | bufferIndex = VulkanGraphicsPipeline::kRenderStepUniformBufferIndex; |
984 | 0 | break; |
985 | 0 | case UniformSlot::kPaint: |
986 | 0 | bufferIndex = VulkanGraphicsPipeline::kPaintUniformBufferIndex; |
987 | 0 | break; |
988 | 0 | case UniformSlot::kGradient: |
989 | 0 | bufferIndex = VulkanGraphicsPipeline::kGradientBufferIndex; |
990 | 0 | break; |
991 | 0 | default: |
992 | 0 | SkASSERT(false); |
993 | 0 | } |
994 | | |
995 | 0 | fUniformBuffersToBind[bufferIndex] = info; |
996 | 0 | fBindUniformBuffers = true; |
997 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::recordBufferBindingInfo(skgpu::graphite::BindBufferInfo const&, skgpu::graphite::UniformSlot) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::recordBufferBindingInfo(skgpu::graphite::BindBufferInfo const&, skgpu::graphite::UniformSlot) |
998 | | |
999 | 0 | void VulkanCommandBuffer::syncDescriptorSets() { |
1000 | 0 | if (fBindUniformBuffers) { |
1001 | 0 | this->bindUniformBuffers(); |
1002 | | // Changes to descriptor sets in lower slot numbers disrupt later set bindings. Currently, |
1003 | | // the descriptor set which houses uniform buffers is at a lower slot than the texture / |
1004 | | // sampler set, so rebinding uniform buffers necessitates re-binding any texture/samplers. |
1005 | 0 | fBindTextureSamplers = true; |
1006 | 0 | } |
1007 | 0 | if (fBindTextureSamplers) { |
1008 | 0 | this->bindTextureSamplers(); |
1009 | 0 | } |
1010 | 0 | } |
1011 | | |
1012 | 0 | void VulkanCommandBuffer::bindUniformBuffers() { |
1013 | 0 | fBindUniformBuffers = false; |
1014 | | |
1015 | | // We always bind at least one uniform buffer descriptor for intrinsic uniforms, but can bind |
1016 | | // up to three (one for render step uniforms, one for paint uniforms). |
1017 | 0 | STArray<VulkanGraphicsPipeline::kNumUniformBuffers, DescriptorData> descriptors; |
1018 | 0 | descriptors.push_back(VulkanGraphicsPipeline::kIntrinsicUniformBufferDescriptor); |
1019 | |
|
1020 | 0 | DescriptorType uniformBufferType = fSharedContext->caps()->storageBufferSupport() |
1021 | 0 | ? DescriptorType::kStorageBuffer |
1022 | 0 | : DescriptorType::kUniformBuffer; |
1023 | 0 | if (fActiveGraphicsPipeline->hasStepUniforms() && |
1024 | 0 | fUniformBuffersToBind[VulkanGraphicsPipeline::kRenderStepUniformBufferIndex].fBuffer) { |
1025 | 0 | descriptors.push_back({ |
1026 | 0 | uniformBufferType, |
1027 | 0 | /*count=*/1, |
1028 | 0 | VulkanGraphicsPipeline::kRenderStepUniformBufferIndex, |
1029 | 0 | PipelineStageFlags::kVertexShader | PipelineStageFlags::kFragmentShader}); |
1030 | 0 | } |
1031 | 0 | if (fActiveGraphicsPipeline->hasPaintUniforms() && |
1032 | 0 | fUniformBuffersToBind[VulkanGraphicsPipeline::kPaintUniformBufferIndex].fBuffer) { |
1033 | 0 | descriptors.push_back({ |
1034 | 0 | uniformBufferType, |
1035 | 0 | /*count=*/1, |
1036 | 0 | VulkanGraphicsPipeline::kPaintUniformBufferIndex, |
1037 | 0 | PipelineStageFlags::kFragmentShader}); |
1038 | 0 | } |
1039 | 0 | if (fActiveGraphicsPipeline->hasGradientBuffer() && |
1040 | 0 | fUniformBuffersToBind[VulkanGraphicsPipeline::kGradientBufferIndex].fBuffer) { |
1041 | 0 | SkASSERT(fSharedContext->caps()->gradientBufferSupport() && |
1042 | 0 | fSharedContext->caps()->storageBufferSupport()); |
1043 | 0 | descriptors.push_back({ |
1044 | 0 | DescriptorType::kStorageBuffer, |
1045 | 0 | /*count=*/1, |
1046 | 0 | VulkanGraphicsPipeline::kGradientBufferIndex, |
1047 | 0 | PipelineStageFlags::kFragmentShader}); |
1048 | 0 | } |
1049 | |
|
1050 | 0 | sk_sp<VulkanDescriptorSet> descSet = fResourceProvider->findOrCreateUniformBuffersDescriptorSet( |
1051 | 0 | descriptors, fUniformBuffersToBind); |
1052 | 0 | if (!descSet) { |
1053 | 0 | SKGPU_LOG_E("Unable to find or create uniform descriptor set"); |
1054 | 0 | return; |
1055 | 0 | } |
1056 | 0 | skia_private::AutoSTMalloc<VulkanGraphicsPipeline::kNumUniformBuffers, uint32_t> |
1057 | 0 | dynamicOffsets(descriptors.size()); |
1058 | 0 | for (int i = 0; i < descriptors.size(); i++) { |
1059 | 0 | int descriptorBindingIndex = descriptors[i].fBindingIndex; |
1060 | 0 | SkASSERT(static_cast<unsigned long>(descriptorBindingIndex) < fUniformBuffersToBind.size()); |
1061 | 0 | const auto& bindInfo = fUniformBuffersToBind[descriptorBindingIndex]; |
1062 | 0 | dynamicOffsets[i] = bindInfo.fOffset; |
1063 | 0 | } |
1064 | |
|
1065 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1066 | 0 | CmdBindDescriptorSets(fPrimaryCommandBuffer, |
1067 | 0 | VK_PIPELINE_BIND_POINT_GRAPHICS, |
1068 | 0 | fActiveGraphicsPipeline->layout(), |
1069 | 0 | VulkanGraphicsPipeline::kUniformBufferDescSetIndex, |
1070 | 0 | /*setCount=*/1, |
1071 | 0 | descSet->descriptorSet(), |
1072 | 0 | descriptors.size(), |
1073 | 0 | dynamicOffsets.get())); |
1074 | 0 | this->trackResource(std::move(descSet)); |
1075 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::bindUniformBuffers() Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::bindUniformBuffers() |
1076 | | |
1077 | | void VulkanCommandBuffer::bindDrawBuffers(const BindBufferInfo& vertices, |
1078 | | const BindBufferInfo& instances, |
1079 | | const BindBufferInfo& indices, |
1080 | 0 | const BindBufferInfo& indirect) { |
1081 | 0 | this->bindVertexBuffers(vertices.fBuffer, |
1082 | 0 | vertices.fOffset, |
1083 | 0 | instances.fBuffer, |
1084 | 0 | instances.fOffset); |
1085 | 0 | this->bindIndexBuffer(indices.fBuffer, indices.fOffset); |
1086 | 0 | this->bindIndirectBuffer(indirect.fBuffer, indirect.fOffset); |
1087 | 0 | } |
1088 | | |
1089 | | void VulkanCommandBuffer::bindVertexBuffers(const Buffer* vertexBuffer, |
1090 | | size_t vertexOffset, |
1091 | | const Buffer* instanceBuffer, |
1092 | 0 | size_t instanceOffset) { |
1093 | 0 | this->bindInputBuffer(vertexBuffer, vertexOffset, |
1094 | 0 | VulkanGraphicsPipeline::kVertexBufferIndex); |
1095 | 0 | this->bindInputBuffer(instanceBuffer, instanceOffset, |
1096 | 0 | VulkanGraphicsPipeline::kInstanceBufferIndex); |
1097 | 0 | } |
1098 | | |
1099 | | void VulkanCommandBuffer::bindInputBuffer(const Buffer* buffer, VkDeviceSize offset, |
1100 | 0 | uint32_t binding) { |
1101 | 0 | if (buffer) { |
1102 | 0 | VkBuffer vkBuffer = static_cast<const VulkanBuffer*>(buffer)->vkBuffer(); |
1103 | 0 | SkASSERT(vkBuffer != VK_NULL_HANDLE); |
1104 | 0 | if (vkBuffer != fBoundInputBuffers[binding] || |
1105 | 0 | offset != fBoundInputBufferOffsets[binding]) { |
1106 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1107 | 0 | CmdBindVertexBuffers(fPrimaryCommandBuffer, |
1108 | 0 | binding, |
1109 | 0 | /*bindingCount=*/1, |
1110 | 0 | &vkBuffer, |
1111 | 0 | &offset)); |
1112 | 0 | fBoundInputBuffers[binding] = vkBuffer; |
1113 | 0 | fBoundInputBufferOffsets[binding] = offset; |
1114 | 0 | this->trackResource(sk_ref_sp(buffer)); |
1115 | 0 | } |
1116 | 0 | } |
1117 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::bindInputBuffer(skgpu::graphite::Buffer const*, unsigned long, unsigned int) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::bindInputBuffer(skgpu::graphite::Buffer const*, unsigned long, unsigned int) |
1118 | | |
1119 | 0 | void VulkanCommandBuffer::bindIndexBuffer(const Buffer* indexBuffer, size_t offset) { |
1120 | 0 | if (indexBuffer) { |
1121 | 0 | VkBuffer vkBuffer = static_cast<const VulkanBuffer*>(indexBuffer)->vkBuffer(); |
1122 | 0 | SkASSERT(vkBuffer != VK_NULL_HANDLE); |
1123 | 0 | if (vkBuffer != fBoundIndexBuffer || offset != fBoundIndexBufferOffset) { |
1124 | 0 | VULKAN_CALL(fSharedContext->interface(), CmdBindIndexBuffer(fPrimaryCommandBuffer, |
1125 | 0 | vkBuffer, |
1126 | 0 | offset, |
1127 | 0 | VK_INDEX_TYPE_UINT16)); |
1128 | 0 | fBoundIndexBuffer = vkBuffer; |
1129 | 0 | fBoundIndexBufferOffset = offset; |
1130 | 0 | this->trackResource(sk_ref_sp(indexBuffer)); |
1131 | 0 | } |
1132 | 0 | } else { |
1133 | 0 | fBoundIndexBuffer = VK_NULL_HANDLE; |
1134 | 0 | fBoundIndexBufferOffset = 0; |
1135 | 0 | } |
1136 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::bindIndexBuffer(skgpu::graphite::Buffer const*, unsigned long) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::bindIndexBuffer(skgpu::graphite::Buffer const*, unsigned long) |
1137 | | |
1138 | 0 | void VulkanCommandBuffer::bindIndirectBuffer(const Buffer* indirectBuffer, size_t offset) { |
1139 | | // Indirect buffers are not bound via the command buffer, but specified in the draw cmd. |
1140 | 0 | if (indirectBuffer) { |
1141 | 0 | fBoundIndirectBuffer = static_cast<const VulkanBuffer*>(indirectBuffer)->vkBuffer(); |
1142 | 0 | fBoundIndirectBufferOffset = offset; |
1143 | 0 | this->trackResource(sk_ref_sp(indirectBuffer)); |
1144 | 0 | } else { |
1145 | 0 | fBoundIndirectBuffer = VK_NULL_HANDLE; |
1146 | 0 | fBoundIndirectBufferOffset = 0; |
1147 | 0 | } |
1148 | 0 | } |
1149 | | |
1150 | | void VulkanCommandBuffer::recordTextureAndSamplerDescSet( |
1151 | 0 | const DrawPass& drawPass, const DrawPassCommands::BindTexturesAndSamplers& command) { |
1152 | 0 | if (command.fNumTexSamplers == 0) { |
1153 | 0 | fNumTextureSamplers = 0; |
1154 | 0 | fTextureSamplerDescSetToBind = VK_NULL_HANDLE; |
1155 | 0 | fBindTextureSamplers = false; |
1156 | 0 | return; |
1157 | 0 | } |
1158 | | |
1159 | | // Query resource provider to obtain a descriptor set for the texture/samplers |
1160 | 0 | TArray<DescriptorData> descriptors(command.fNumTexSamplers); |
1161 | 0 | for (int i = 0; i < command.fNumTexSamplers; i++) { |
1162 | 0 | auto sampler = static_cast<const VulkanSampler*>( |
1163 | 0 | drawPass.getSampler(command.fSamplerIndices[i])); |
1164 | |
|
1165 | 0 | const Sampler* immutableSampler = (sampler && sampler->ycbcrConversion()) ? sampler |
1166 | 0 | : nullptr; |
1167 | 0 | descriptors.push_back({DescriptorType::kCombinedTextureSampler, |
1168 | 0 | /*count=*/1, |
1169 | 0 | /*bindingIdx=*/i, |
1170 | 0 | PipelineStageFlags::kFragmentShader, |
1171 | 0 | immutableSampler}); |
1172 | 0 | } |
1173 | 0 | sk_sp<VulkanDescriptorSet> set = fResourceProvider->findOrCreateDescriptorSet( |
1174 | 0 | SkSpan<DescriptorData>{&descriptors.front(), descriptors.size()}); |
1175 | |
|
1176 | 0 | if (!set) { |
1177 | 0 | SKGPU_LOG_E("Unable to find or create descriptor set"); |
1178 | 0 | fNumTextureSamplers = 0; |
1179 | 0 | fTextureSamplerDescSetToBind = VK_NULL_HANDLE; |
1180 | 0 | fBindTextureSamplers = false; |
1181 | 0 | return; |
1182 | 0 | } |
1183 | | // Populate the descriptor set with texture/sampler descriptors |
1184 | 0 | TArray<VkWriteDescriptorSet> writeDescriptorSets(command.fNumTexSamplers); |
1185 | 0 | TArray<VkDescriptorImageInfo> descriptorImageInfos(command.fNumTexSamplers); |
1186 | 0 | for (int i = 0; i < command.fNumTexSamplers; ++i) { |
1187 | 0 | auto texture = const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>( |
1188 | 0 | drawPass.getTexture(command.fTextureIndices[i]))); |
1189 | 0 | auto sampler = static_cast<const VulkanSampler*>( |
1190 | 0 | drawPass.getSampler(command.fSamplerIndices[i])); |
1191 | 0 | if (!texture || !sampler) { |
1192 | | // TODO(b/294198324): Investigate the root cause for null texture or samplers on |
1193 | | // Ubuntu QuadP400 GPU |
1194 | 0 | SKGPU_LOG_E("Texture and sampler must not be null"); |
1195 | 0 | fNumTextureSamplers = 0; |
1196 | 0 | fTextureSamplerDescSetToBind = VK_NULL_HANDLE; |
1197 | 0 | fBindTextureSamplers = false; |
1198 | 0 | return; |
1199 | 0 | } |
1200 | | |
1201 | 0 | VkDescriptorImageInfo& textureInfo = descriptorImageInfos.push_back(); |
1202 | 0 | memset(&textureInfo, 0, sizeof(VkDescriptorImageInfo)); |
1203 | 0 | textureInfo.sampler = sampler->ycbcrConversion() ? VK_NULL_HANDLE : sampler->vkSampler(); |
1204 | 0 | textureInfo.imageView = |
1205 | 0 | texture->getImageView(VulkanImageView::Usage::kShaderInput)->imageView(); |
1206 | 0 | textureInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; |
1207 | |
|
1208 | 0 | VkWriteDescriptorSet& writeInfo = writeDescriptorSets.push_back(); |
1209 | 0 | memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet)); |
1210 | 0 | writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; |
1211 | 0 | writeInfo.pNext = nullptr; |
1212 | 0 | writeInfo.dstSet = *set->descriptorSet(); |
1213 | 0 | writeInfo.dstBinding = i; |
1214 | 0 | writeInfo.dstArrayElement = 0; |
1215 | 0 | writeInfo.descriptorCount = 1; |
1216 | 0 | writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; |
1217 | 0 | writeInfo.pImageInfo = &textureInfo; |
1218 | 0 | writeInfo.pBufferInfo = nullptr; |
1219 | 0 | writeInfo.pTexelBufferView = nullptr; |
1220 | 0 | } |
1221 | | |
1222 | 0 | VULKAN_CALL(fSharedContext->interface(), UpdateDescriptorSets(fSharedContext->device(), |
1223 | 0 | command.fNumTexSamplers, |
1224 | 0 | &writeDescriptorSets[0], |
1225 | 0 | /*descriptorCopyCount=*/0, |
1226 | 0 | /*pDescriptorCopies=*/nullptr)); |
1227 | | |
1228 | | // Store the updated descriptor set to be actually bound later on. This avoids binding and |
1229 | | // potentially having to re-bind in cases where earlier descriptor sets change while going |
1230 | | // through drawpass commands. |
1231 | 0 | fTextureSamplerDescSetToBind = *set->descriptorSet(); |
1232 | 0 | fBindTextureSamplers = true; |
1233 | 0 | fNumTextureSamplers = command.fNumTexSamplers; |
1234 | 0 | this->trackResource(std::move(set)); |
1235 | 0 | } |
1236 | | |
1237 | 0 | void VulkanCommandBuffer::bindTextureSamplers() { |
1238 | 0 | fBindTextureSamplers = false; |
1239 | 0 | if (fTextureSamplerDescSetToBind != VK_NULL_HANDLE && |
1240 | 0 | fActiveGraphicsPipeline->numFragTexturesAndSamplers() == fNumTextureSamplers) { |
1241 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1242 | 0 | CmdBindDescriptorSets(fPrimaryCommandBuffer, |
1243 | 0 | VK_PIPELINE_BIND_POINT_GRAPHICS, |
1244 | 0 | fActiveGraphicsPipeline->layout(), |
1245 | 0 | VulkanGraphicsPipeline::kTextureBindDescSetIndex, |
1246 | 0 | /*setCount=*/1, |
1247 | 0 | &fTextureSamplerDescSetToBind, |
1248 | 0 | /*dynamicOffsetCount=*/0, |
1249 | 0 | /*dynamicOffsets=*/nullptr)); |
1250 | 0 | } |
1251 | 0 | } |
1252 | | |
1253 | | void VulkanCommandBuffer::setScissor(unsigned int left, unsigned int top, unsigned int width, |
1254 | 0 | unsigned int height) { |
1255 | 0 | VkRect2D scissor = { |
1256 | 0 | {(int32_t)left, (int32_t)top}, |
1257 | 0 | {width, height} |
1258 | 0 | }; |
1259 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1260 | 0 | CmdSetScissor(fPrimaryCommandBuffer, |
1261 | 0 | /*firstScissor=*/0, |
1262 | 0 | /*scissorCount=*/1, |
1263 | 0 | &scissor)); |
1264 | 0 | } |
1265 | | |
1266 | | void VulkanCommandBuffer::draw(PrimitiveType, |
1267 | | unsigned int baseVertex, |
1268 | 0 | unsigned int vertexCount) { |
1269 | 0 | SkASSERT(fActiveRenderPass); |
1270 | 0 | this->syncDescriptorSets(); |
1271 | | // TODO: set primitive type via dynamic state if available |
1272 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1273 | 0 | CmdDraw(fPrimaryCommandBuffer, |
1274 | 0 | vertexCount, |
1275 | 0 | /*instanceCount=*/1, |
1276 | 0 | baseVertex, |
1277 | 0 | /*firstInstance=*/0)); |
1278 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::draw(skgpu::graphite::PrimitiveType, unsigned int, unsigned int) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::draw(skgpu::graphite::PrimitiveType, unsigned int, unsigned int) |
1279 | | |
1280 | | void VulkanCommandBuffer::drawIndexed(PrimitiveType, |
1281 | | unsigned int baseIndex, |
1282 | | unsigned int indexCount, |
1283 | 0 | unsigned int baseVertex) { |
1284 | 0 | SkASSERT(fActiveRenderPass); |
1285 | 0 | this->syncDescriptorSets(); |
1286 | | // TODO: set primitive type via dynamic state if available |
1287 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1288 | 0 | CmdDrawIndexed(fPrimaryCommandBuffer, |
1289 | 0 | indexCount, |
1290 | 0 | /*instanceCount=*/1, |
1291 | 0 | baseIndex, |
1292 | 0 | baseVertex, |
1293 | 0 | /*firstInstance=*/0)); |
1294 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::drawIndexed(skgpu::graphite::PrimitiveType, unsigned int, unsigned int, unsigned int) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::drawIndexed(skgpu::graphite::PrimitiveType, unsigned int, unsigned int, unsigned int) |
1295 | | |
1296 | | void VulkanCommandBuffer::drawInstanced(PrimitiveType, |
1297 | | unsigned int baseVertex, |
1298 | | unsigned int vertexCount, |
1299 | | unsigned int baseInstance, |
1300 | 0 | unsigned int instanceCount) { |
1301 | 0 | SkASSERT(fActiveRenderPass); |
1302 | 0 | this->syncDescriptorSets(); |
1303 | | // TODO: set primitive type via dynamic state if available |
1304 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1305 | 0 | CmdDraw(fPrimaryCommandBuffer, |
1306 | 0 | vertexCount, |
1307 | 0 | instanceCount, |
1308 | 0 | baseVertex, |
1309 | 0 | baseInstance)); |
1310 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::drawInstanced(skgpu::graphite::PrimitiveType, unsigned int, unsigned int, unsigned int, unsigned int) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::drawInstanced(skgpu::graphite::PrimitiveType, unsigned int, unsigned int, unsigned int, unsigned int) |
1311 | | |
1312 | | void VulkanCommandBuffer::drawIndexedInstanced(PrimitiveType, |
1313 | | unsigned int baseIndex, |
1314 | | unsigned int indexCount, |
1315 | | unsigned int baseVertex, |
1316 | | unsigned int baseInstance, |
1317 | 0 | unsigned int instanceCount) { |
1318 | 0 | SkASSERT(fActiveRenderPass); |
1319 | 0 | this->syncDescriptorSets(); |
1320 | | // TODO: set primitive type via dynamic state if available |
1321 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1322 | 0 | CmdDrawIndexed(fPrimaryCommandBuffer, |
1323 | 0 | indexCount, |
1324 | 0 | instanceCount, |
1325 | 0 | baseIndex, |
1326 | 0 | baseVertex, |
1327 | 0 | baseInstance)); |
1328 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::drawIndexedInstanced(skgpu::graphite::PrimitiveType, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::drawIndexedInstanced(skgpu::graphite::PrimitiveType, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int) |
1329 | | |
1330 | 0 | void VulkanCommandBuffer::drawIndirect(PrimitiveType) { |
1331 | 0 | SkASSERT(fActiveRenderPass); |
1332 | 0 | this->syncDescriptorSets(); |
1333 | | // TODO: set primitive type via dynamic state if available |
1334 | | // Currently we can only support doing one indirect draw operation at a time, |
1335 | | // so stride is irrelevant. |
1336 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1337 | 0 | CmdDrawIndirect(fPrimaryCommandBuffer, |
1338 | 0 | fBoundIndirectBuffer, |
1339 | 0 | fBoundIndirectBufferOffset, |
1340 | 0 | /*drawCount=*/1, |
1341 | 0 | /*stride=*/0)); |
1342 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::drawIndirect(skgpu::graphite::PrimitiveType) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::drawIndirect(skgpu::graphite::PrimitiveType) |
1343 | | |
1344 | 0 | void VulkanCommandBuffer::drawIndexedIndirect(PrimitiveType) { |
1345 | 0 | SkASSERT(fActiveRenderPass); |
1346 | 0 | this->syncDescriptorSets(); |
1347 | | // TODO: set primitive type via dynamic state if available |
1348 | | // Currently we can only support doing one indirect draw operation at a time, |
1349 | | // so stride is irrelevant. |
1350 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1351 | 0 | CmdDrawIndexedIndirect(fPrimaryCommandBuffer, |
1352 | 0 | fBoundIndirectBuffer, |
1353 | 0 | fBoundIndirectBufferOffset, |
1354 | 0 | /*drawCount=*/1, |
1355 | 0 | /*stride=*/0)); |
1356 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::drawIndexedIndirect(skgpu::graphite::PrimitiveType) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::drawIndexedIndirect(skgpu::graphite::PrimitiveType) |
1357 | | |
1358 | 0 | bool VulkanCommandBuffer::onAddComputePass(DispatchGroupSpan) { return false; } |
1359 | | |
1360 | | bool VulkanCommandBuffer::onCopyBufferToBuffer(const Buffer* srcBuffer, |
1361 | | size_t srcOffset, |
1362 | | const Buffer* dstBuffer, |
1363 | | size_t dstOffset, |
1364 | 0 | size_t size) { |
1365 | 0 | auto vkSrcBuffer = static_cast<const VulkanBuffer*>(srcBuffer); |
1366 | 0 | auto vkDstBuffer = static_cast<const VulkanBuffer*>(dstBuffer); |
1367 | |
|
1368 | 0 | SkASSERT(vkSrcBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_SRC_BIT); |
1369 | 0 | SkASSERT(vkDstBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_DST_BIT); |
1370 | |
|
1371 | 0 | VkBufferCopy region; |
1372 | 0 | memset(®ion, 0, sizeof(VkBufferCopy)); |
1373 | 0 | region.srcOffset = srcOffset; |
1374 | 0 | region.dstOffset = dstOffset; |
1375 | 0 | region.size = size; |
1376 | |
|
1377 | 0 | this->submitPipelineBarriers(); |
1378 | |
|
1379 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1380 | 0 | CmdCopyBuffer(fPrimaryCommandBuffer, |
1381 | 0 | vkSrcBuffer->vkBuffer(), |
1382 | 0 | vkDstBuffer->vkBuffer(), |
1383 | 0 | /*regionCount=*/1, |
1384 | 0 | ®ion)); |
1385 | |
|
1386 | 0 | return true; |
1387 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::onCopyBufferToBuffer(skgpu::graphite::Buffer const*, unsigned long, skgpu::graphite::Buffer const*, unsigned long, unsigned long) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::onCopyBufferToBuffer(skgpu::graphite::Buffer const*, unsigned long, skgpu::graphite::Buffer const*, unsigned long, unsigned long) |
1388 | | |
1389 | | bool VulkanCommandBuffer::onCopyTextureToBuffer(const Texture* texture, |
1390 | | SkIRect srcRect, |
1391 | | const Buffer* buffer, |
1392 | | size_t bufferOffset, |
1393 | 0 | size_t bufferRowBytes) { |
1394 | 0 | const VulkanTexture* srcTexture = static_cast<const VulkanTexture*>(texture); |
1395 | 0 | auto dstBuffer = static_cast<const VulkanBuffer*>(buffer); |
1396 | 0 | SkASSERT(dstBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_DST_BIT); |
1397 | | |
1398 | | // Obtain the VkFormat of the source texture so we can determine bytes per block. |
1399 | 0 | VulkanTextureInfo srcTextureInfo; |
1400 | 0 | SkAssertResult(TextureInfos::GetVulkanTextureInfo(texture->textureInfo(), &srcTextureInfo)); |
1401 | 0 | size_t bytesPerBlock = VkFormatBytesPerBlock(srcTextureInfo.fFormat); |
1402 | | |
1403 | | // Set up copy region |
1404 | 0 | VkBufferImageCopy region; |
1405 | 0 | memset(®ion, 0, sizeof(VkBufferImageCopy)); |
1406 | 0 | region.bufferOffset = bufferOffset; |
1407 | | // Vulkan expects bufferRowLength in texels, not bytes. |
1408 | 0 | region.bufferRowLength = (uint32_t)(bufferRowBytes/bytesPerBlock); |
1409 | 0 | region.bufferImageHeight = 0; // Tightly packed |
1410 | 0 | region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, /*mipLevel=*/0, 0, 1 }; |
1411 | 0 | region.imageOffset = { srcRect.left(), srcRect.top(), /*z=*/0 }; |
1412 | 0 | region.imageExtent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), /*depth=*/1 }; |
1413 | | |
1414 | | // Enable editing of the source texture so we can change its layout so it can be copied from. |
1415 | 0 | const_cast<VulkanTexture*>(srcTexture)->setImageLayout(this, |
1416 | 0 | VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
1417 | 0 | VK_ACCESS_TRANSFER_READ_BIT, |
1418 | 0 | VK_PIPELINE_STAGE_TRANSFER_BIT, |
1419 | 0 | false); |
1420 | | // Set current access mask for buffer |
1421 | 0 | const_cast<VulkanBuffer*>(dstBuffer)->setBufferAccess(this, |
1422 | 0 | VK_ACCESS_TRANSFER_WRITE_BIT, |
1423 | 0 | VK_PIPELINE_STAGE_TRANSFER_BIT); |
1424 | |
|
1425 | 0 | this->submitPipelineBarriers(); |
1426 | |
|
1427 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1428 | 0 | CmdCopyImageToBuffer(fPrimaryCommandBuffer, |
1429 | 0 | srcTexture->vkImage(), |
1430 | 0 | VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
1431 | 0 | dstBuffer->vkBuffer(), |
1432 | 0 | /*regionCount=*/1, |
1433 | 0 | ®ion)); |
1434 | 0 | return true; |
1435 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::onCopyTextureToBuffer(skgpu::graphite::Texture const*, SkIRect, skgpu::graphite::Buffer const*, unsigned long, unsigned long) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::onCopyTextureToBuffer(skgpu::graphite::Texture const*, SkIRect, skgpu::graphite::Buffer const*, unsigned long, unsigned long) |
1436 | | |
1437 | | bool VulkanCommandBuffer::onCopyBufferToTexture(const Buffer* buffer, |
1438 | | const Texture* texture, |
1439 | | const BufferTextureCopyData* copyData, |
1440 | 0 | int count) { |
1441 | 0 | auto srcBuffer = static_cast<const VulkanBuffer*>(buffer); |
1442 | 0 | SkASSERT(srcBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_SRC_BIT); |
1443 | 0 | const VulkanTexture* dstTexture = static_cast<const VulkanTexture*>(texture); |
1444 | | |
1445 | | // Obtain the VkFormat of the destination texture so we can determine bytes per block. |
1446 | 0 | VulkanTextureInfo dstTextureInfo; |
1447 | 0 | SkAssertResult(TextureInfos::GetVulkanTextureInfo(dstTexture->textureInfo(), &dstTextureInfo)); |
1448 | 0 | size_t bytesPerBlock = VkFormatBytesPerBlock(dstTextureInfo.fFormat); |
1449 | 0 | SkISize oneBlockDims = CompressedDimensions(dstTexture->textureInfo().compressionType(), |
1450 | 0 | {1, 1}); |
1451 | | |
1452 | | // Set up copy regions. |
1453 | 0 | TArray<VkBufferImageCopy> regions(count); |
1454 | 0 | for (int i = 0; i < count; ++i) { |
1455 | 0 | VkBufferImageCopy& region = regions.push_back(); |
1456 | 0 | memset(®ion, 0, sizeof(VkBufferImageCopy)); |
1457 | 0 | region.bufferOffset = copyData[i].fBufferOffset; |
1458 | | // copyData provides row length in bytes, but Vulkan expects bufferRowLength in texels. |
1459 | | // For compressed this is the number of logical pixels not the number of blocks. |
1460 | 0 | region.bufferRowLength = |
1461 | 0 | (uint32_t)((copyData[i].fBufferRowBytes/bytesPerBlock) * oneBlockDims.fWidth); |
1462 | 0 | region.bufferImageHeight = 0; // Tightly packed |
1463 | 0 | region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, copyData[i].fMipLevel, 0, 1 }; |
1464 | 0 | region.imageOffset = { copyData[i].fRect.left(), |
1465 | 0 | copyData[i].fRect.top(), |
1466 | 0 | /*z=*/0 }; |
1467 | 0 | region.imageExtent = { (uint32_t)copyData[i].fRect.width(), |
1468 | 0 | (uint32_t)copyData[i].fRect.height(), |
1469 | 0 | /*depth=*/1 }; |
1470 | 0 | } |
1471 | | |
1472 | | // Enable editing of the destination texture so we can change its layout so it can be copied to. |
1473 | 0 | const_cast<VulkanTexture*>(dstTexture)->setImageLayout(this, |
1474 | 0 | VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
1475 | 0 | VK_ACCESS_TRANSFER_WRITE_BIT, |
1476 | 0 | VK_PIPELINE_STAGE_TRANSFER_BIT, |
1477 | 0 | false); |
1478 | |
|
1479 | 0 | this->submitPipelineBarriers(); |
1480 | |
|
1481 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1482 | 0 | CmdCopyBufferToImage(fPrimaryCommandBuffer, |
1483 | 0 | srcBuffer->vkBuffer(), |
1484 | 0 | dstTexture->vkImage(), |
1485 | 0 | VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
1486 | 0 | regions.size(), |
1487 | 0 | regions.begin())); |
1488 | 0 | return true; |
1489 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::onCopyBufferToTexture(skgpu::graphite::Buffer const*, skgpu::graphite::Texture const*, skgpu::graphite::BufferTextureCopyData const*, int) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::onCopyBufferToTexture(skgpu::graphite::Buffer const*, skgpu::graphite::Texture const*, skgpu::graphite::BufferTextureCopyData const*, int) |
1490 | | |
1491 | | bool VulkanCommandBuffer::onCopyTextureToTexture(const Texture* src, |
1492 | | SkIRect srcRect, |
1493 | | const Texture* dst, |
1494 | | SkIPoint dstPoint, |
1495 | 0 | int mipLevel) { |
1496 | 0 | const VulkanTexture* srcTexture = static_cast<const VulkanTexture*>(src); |
1497 | 0 | const VulkanTexture* dstTexture = static_cast<const VulkanTexture*>(dst); |
1498 | |
|
1499 | 0 | VkImageCopy copyRegion; |
1500 | 0 | memset(©Region, 0, sizeof(VkImageCopy)); |
1501 | 0 | copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; |
1502 | 0 | copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 }; |
1503 | 0 | copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, (uint32_t)mipLevel, 0, 1 }; |
1504 | 0 | copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 }; |
1505 | 0 | copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 }; |
1506 | | |
1507 | | // Enable editing of the src texture so we can change its layout so it can be copied from. |
1508 | 0 | const_cast<VulkanTexture*>(srcTexture)->setImageLayout(this, |
1509 | 0 | VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
1510 | 0 | VK_ACCESS_TRANSFER_READ_BIT, |
1511 | 0 | VK_PIPELINE_STAGE_TRANSFER_BIT, |
1512 | 0 | false); |
1513 | | // Enable editing of the destination texture so we can change its layout so it can be copied to. |
1514 | 0 | const_cast<VulkanTexture*>(dstTexture)->setImageLayout(this, |
1515 | 0 | VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
1516 | 0 | VK_ACCESS_TRANSFER_WRITE_BIT, |
1517 | 0 | VK_PIPELINE_STAGE_TRANSFER_BIT, |
1518 | 0 | false); |
1519 | |
|
1520 | 0 | this->submitPipelineBarriers(); |
1521 | |
|
1522 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1523 | 0 | CmdCopyImage(fPrimaryCommandBuffer, |
1524 | 0 | srcTexture->vkImage(), |
1525 | 0 | VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
1526 | 0 | dstTexture->vkImage(), |
1527 | 0 | VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
1528 | 0 | /*regionCount=*/1, |
1529 | 0 | ©Region)); |
1530 | |
|
1531 | 0 | return true; |
1532 | 0 | } |
1533 | | |
1534 | 0 | bool VulkanCommandBuffer::onSynchronizeBufferToCpu(const Buffer* buffer, bool* outDidResultInWork) { |
1535 | 0 | static_cast<const VulkanBuffer*>(buffer)->setBufferAccess(this, |
1536 | 0 | VK_ACCESS_HOST_READ_BIT, |
1537 | 0 | VK_PIPELINE_STAGE_HOST_BIT); |
1538 | |
|
1539 | 0 | *outDidResultInWork = true; |
1540 | 0 | return true; |
1541 | 0 | } |
1542 | | |
1543 | 0 | bool VulkanCommandBuffer::onClearBuffer(const Buffer*, size_t offset, size_t size) { |
1544 | 0 | return false; |
1545 | 0 | } |
1546 | | |
1547 | | void VulkanCommandBuffer::addBufferMemoryBarrier(const Resource* resource, |
1548 | | VkPipelineStageFlags srcStageMask, |
1549 | | VkPipelineStageFlags dstStageMask, |
1550 | 0 | VkBufferMemoryBarrier* barrier) { |
1551 | 0 | SkASSERT(resource); |
1552 | 0 | this->pipelineBarrier(resource, |
1553 | 0 | srcStageMask, |
1554 | 0 | dstStageMask, |
1555 | 0 | /*byRegion=*/false, |
1556 | 0 | kBufferMemory_BarrierType, |
1557 | 0 | barrier); |
1558 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::addBufferMemoryBarrier(skgpu::graphite::Resource const*, unsigned int, unsigned int, VkBufferMemoryBarrier*) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::addBufferMemoryBarrier(skgpu::graphite::Resource const*, unsigned int, unsigned int, VkBufferMemoryBarrier*) |
1559 | | |
1560 | | void VulkanCommandBuffer::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, |
1561 | | VkPipelineStageFlags dstStageMask, |
1562 | 0 | VkBufferMemoryBarrier* barrier) { |
1563 | | // We don't pass in a resource here to the command buffer. The command buffer only is using it |
1564 | | // to hold a ref, but every place where we add a buffer memory barrier we are doing some other |
1565 | | // command with the buffer on the command buffer. Thus those other commands will already cause |
1566 | | // the command buffer to be holding a ref to the buffer. |
1567 | 0 | this->pipelineBarrier(/*resource=*/nullptr, |
1568 | 0 | srcStageMask, |
1569 | 0 | dstStageMask, |
1570 | 0 | /*byRegion=*/false, |
1571 | 0 | kBufferMemory_BarrierType, |
1572 | 0 | barrier); |
1573 | 0 | } |
1574 | | |
1575 | | void VulkanCommandBuffer::addImageMemoryBarrier(const Resource* resource, |
1576 | | VkPipelineStageFlags srcStageMask, |
1577 | | VkPipelineStageFlags dstStageMask, |
1578 | | bool byRegion, |
1579 | 0 | VkImageMemoryBarrier* barrier) { |
1580 | 0 | SkASSERT(resource); |
1581 | 0 | this->pipelineBarrier(resource, |
1582 | 0 | srcStageMask, |
1583 | 0 | dstStageMask, |
1584 | 0 | byRegion, |
1585 | 0 | kImageMemory_BarrierType, |
1586 | 0 | barrier); |
1587 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::addImageMemoryBarrier(skgpu::graphite::Resource const*, unsigned int, unsigned int, bool, VkImageMemoryBarrier*) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::addImageMemoryBarrier(skgpu::graphite::Resource const*, unsigned int, unsigned int, bool, VkImageMemoryBarrier*) |
1588 | | |
1589 | | void VulkanCommandBuffer::pipelineBarrier(const Resource* resource, |
1590 | | VkPipelineStageFlags srcStageMask, |
1591 | | VkPipelineStageFlags dstStageMask, |
1592 | | bool byRegion, |
1593 | | BarrierType barrierType, |
1594 | 0 | void* barrier) { |
1595 | | // TODO: Do we need to handle wrapped command buffers? |
1596 | | // SkASSERT(!this->isWrapped()); |
1597 | 0 | SkASSERT(fActive); |
1598 | | #ifdef SK_DEBUG |
1599 | | // For images we can have barriers inside of render passes but they require us to add more |
1600 | | // support in subpasses which need self dependencies to have barriers inside them. Also, we can |
1601 | | // never have buffer barriers inside of a render pass. For now we will just assert that we are |
1602 | | // not in a render pass. |
1603 | | bool isValidSubpassBarrier = false; |
1604 | 0 | if (barrierType == kImageMemory_BarrierType) { |
1605 | 0 | VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier); |
1606 | 0 | isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) && |
1607 | 0 | (imgBarrier->srcQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) && |
1608 | 0 | (imgBarrier->dstQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) && |
1609 | 0 | byRegion; |
1610 | 0 | } |
1611 | 0 | SkASSERT(!fActiveRenderPass || isValidSubpassBarrier); |
1612 | | #endif |
1613 | |
|
1614 | 0 | if (barrierType == kBufferMemory_BarrierType) { |
1615 | 0 | const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier); |
1616 | 0 | fBufferBarriers.push_back(*barrierPtr); |
1617 | 0 | } else { |
1618 | 0 | SkASSERT(barrierType == kImageMemory_BarrierType); |
1619 | 0 | const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier); |
1620 | | // We need to check if we are adding a pipeline barrier that covers part of the same |
1621 | | // subresource range as a barrier that is already in current batch. If it does, then we must |
1622 | | // submit the first batch because the vulkan spec does not define a specific ordering for |
1623 | | // barriers submitted in the same batch. |
1624 | | // TODO: Look if we can gain anything by merging barriers together instead of submitting |
1625 | | // the old ones. |
1626 | 0 | for (int i = 0; i < fImageBarriers.size(); ++i) { |
1627 | 0 | VkImageMemoryBarrier& currentBarrier = fImageBarriers[i]; |
1628 | 0 | if (barrierPtr->image == currentBarrier.image) { |
1629 | 0 | const VkImageSubresourceRange newRange = barrierPtr->subresourceRange; |
1630 | 0 | const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange; |
1631 | 0 | SkASSERT(newRange.aspectMask == oldRange.aspectMask); |
1632 | 0 | SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer); |
1633 | 0 | SkASSERT(newRange.layerCount == oldRange.layerCount); |
1634 | 0 | uint32_t newStart = newRange.baseMipLevel; |
1635 | 0 | uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1; |
1636 | 0 | uint32_t oldStart = oldRange.baseMipLevel; |
1637 | 0 | uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1; |
1638 | 0 | if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) { |
1639 | 0 | this->submitPipelineBarriers(); |
1640 | 0 | break; |
1641 | 0 | } |
1642 | 0 | } |
1643 | 0 | } |
1644 | 0 | fImageBarriers.push_back(*barrierPtr); |
1645 | 0 | } |
1646 | 0 | fBarriersByRegion |= byRegion; |
1647 | 0 | fSrcStageMask = fSrcStageMask | srcStageMask; |
1648 | 0 | fDstStageMask = fDstStageMask | dstStageMask; |
1649 | |
|
1650 | 0 | if (resource) { |
1651 | 0 | this->trackResource(sk_ref_sp(resource)); |
1652 | 0 | } |
1653 | 0 | if (fActiveRenderPass) { |
1654 | 0 | this->submitPipelineBarriers(true); |
1655 | 0 | } |
1656 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::pipelineBarrier(skgpu::graphite::Resource const*, unsigned int, unsigned int, bool, skgpu::graphite::VulkanCommandBuffer::BarrierType, void*) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::pipelineBarrier(skgpu::graphite::Resource const*, unsigned int, unsigned int, bool, skgpu::graphite::VulkanCommandBuffer::BarrierType, void*) |
1657 | | |
1658 | 0 | void VulkanCommandBuffer::submitPipelineBarriers(bool forSelfDependency) { |
1659 | 0 | SkASSERT(fActive); |
1660 | | |
1661 | | // TODO: Do we need to handle SecondaryCommandBuffers as well? |
1662 | | |
1663 | | // Currently we never submit a pipeline barrier without at least one buffer or image barrier. |
1664 | 0 | if (!fBufferBarriers.empty() || !fImageBarriers.empty()) { |
1665 | | // For images we can have barriers inside of render passes but they require us to add more |
1666 | | // support in subpasses which need self dependencies to have barriers inside them. Also, we |
1667 | | // can never have buffer barriers inside of a render pass. For now we will just assert that |
1668 | | // we are not in a render pass. |
1669 | 0 | SkASSERT(!fActiveRenderPass || forSelfDependency); |
1670 | | // TODO: Do we need to handle wrapped CommandBuffers? |
1671 | | // SkASSERT(!this->isWrapped()); |
1672 | 0 | SkASSERT(fSrcStageMask && fDstStageMask); |
1673 | |
|
1674 | 0 | VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0; |
1675 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1676 | 0 | CmdPipelineBarrier(fPrimaryCommandBuffer, fSrcStageMask, fDstStageMask, |
1677 | 0 | dependencyFlags, |
1678 | 0 | /*memoryBarrierCount=*/0, /*pMemoryBarrier=*/nullptr, |
1679 | 0 | fBufferBarriers.size(), fBufferBarriers.begin(), |
1680 | 0 | fImageBarriers.size(), fImageBarriers.begin())); |
1681 | 0 | fBufferBarriers.clear(); |
1682 | 0 | fImageBarriers.clear(); |
1683 | 0 | fBarriersByRegion = false; |
1684 | 0 | fSrcStageMask = 0; |
1685 | 0 | fDstStageMask = 0; |
1686 | 0 | } |
1687 | 0 | SkASSERT(fBufferBarriers.empty()); |
1688 | 0 | SkASSERT(fImageBarriers.empty()); |
1689 | 0 | SkASSERT(!fBarriersByRegion); |
1690 | 0 | SkASSERT(!fSrcStageMask); |
1691 | 0 | SkASSERT(!fDstStageMask); |
1692 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::submitPipelineBarriers(bool) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::submitPipelineBarriers(bool) |
1693 | | |
1694 | | void VulkanCommandBuffer::updateBuffer(const VulkanBuffer* buffer, |
1695 | | const void* data, |
1696 | | size_t dataSize, |
1697 | 0 | size_t dstOffset) { |
1698 | | // vkCmdUpdateBuffer can only be called outside of a render pass. |
1699 | 0 | SkASSERT(fActive && !fActiveRenderPass); |
1700 | 0 | if (!buffer || buffer->vkBuffer() == VK_NULL_HANDLE) { |
1701 | 0 | SKGPU_LOG_W("VulkanCommandBuffer::updateBuffer requires a valid VulkanBuffer pointer backed" |
1702 | 0 | "by a valid VkBuffer handle"); |
1703 | 0 | return; |
1704 | 0 | } |
1705 | | |
1706 | | // Per the spec, vkCmdUpdateBuffer is treated as a “transfer" operation for the purposes of |
1707 | | // synchronization barriers. Ensure this write operation occurs after any previous read |
1708 | | // operations and without clobbering any other write operations on the same memory in the cache. |
1709 | 0 | buffer->setBufferAccess(this, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT); |
1710 | 0 | this->submitPipelineBarriers(); |
1711 | |
|
1712 | 0 | VULKAN_CALL(fSharedContext->interface(), CmdUpdateBuffer(fPrimaryCommandBuffer, |
1713 | 0 | buffer->vkBuffer(), |
1714 | 0 | dstOffset, |
1715 | 0 | dataSize, |
1716 | 0 | data)); |
1717 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::updateBuffer(skgpu::graphite::VulkanBuffer const*, void const*, unsigned long, unsigned long) Unexecuted instantiation: skgpu::graphite::VulkanCommandBuffer::updateBuffer(skgpu::graphite::VulkanBuffer const*, void const*, unsigned long, unsigned long) |
1718 | | |
1719 | 0 | void VulkanCommandBuffer::nextSubpass() { |
1720 | | // TODO: Use VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS if we add secondary cmd buffers |
1721 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1722 | 0 | CmdNextSubpass(fPrimaryCommandBuffer, VK_SUBPASS_CONTENTS_INLINE)); |
1723 | 0 | } |
1724 | | |
1725 | 0 | void VulkanCommandBuffer::setViewport(SkIRect viewport) { |
1726 | 0 | VkViewport vkViewport = { |
1727 | 0 | (float) viewport.fLeft, |
1728 | 0 | (float) viewport.fTop, |
1729 | 0 | (float) viewport.width(), |
1730 | 0 | (float) viewport.height(), |
1731 | 0 | 0.0f, // minDepth |
1732 | 0 | 1.0f, // maxDepth |
1733 | 0 | }; |
1734 | 0 | VULKAN_CALL(fSharedContext->interface(), |
1735 | 0 | CmdSetViewport(fPrimaryCommandBuffer, |
1736 | 0 | /*firstViewport=*/0, |
1737 | 0 | /*viewportCount=*/1, |
1738 | 0 | &vkViewport)); |
1739 | 0 | } |
1740 | | |
1741 | | } // namespace skgpu::graphite |