/src/skia/src/gpu/ganesh/vk/GrVkCommandBuffer.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2015 Google Inc. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license that can be |
5 | | * found in the LICENSE file. |
6 | | */ |
7 | | |
8 | | #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h" |
9 | | |
10 | | #include "include/core/SkRect.h" |
11 | | #include "src/core/SkTraceEvent.h" |
12 | | #include "src/gpu/ganesh/vk/GrVkBuffer.h" |
13 | | #include "src/gpu/ganesh/vk/GrVkCommandPool.h" |
14 | | #include "src/gpu/ganesh/vk/GrVkFramebuffer.h" |
15 | | #include "src/gpu/ganesh/vk/GrVkGpu.h" |
16 | | #include "src/gpu/ganesh/vk/GrVkImage.h" |
17 | | #include "src/gpu/ganesh/vk/GrVkImageView.h" |
18 | | #include "src/gpu/ganesh/vk/GrVkPipeline.h" |
19 | | #include "src/gpu/ganesh/vk/GrVkPipelineState.h" |
20 | | #include "src/gpu/ganesh/vk/GrVkRenderPass.h" |
21 | | #include "src/gpu/ganesh/vk/GrVkRenderTarget.h" |
22 | | #include "src/gpu/ganesh/vk/GrVkUtil.h" |
23 | | |
24 | | using namespace skia_private; |
25 | | |
26 | 0 | void GrVkCommandBuffer::invalidateState() { |
27 | 0 | for (auto& boundInputBuffer : fBoundInputBuffers) { |
28 | 0 | boundInputBuffer = VK_NULL_HANDLE; |
29 | 0 | } |
30 | 0 | fBoundIndexBuffer = VK_NULL_HANDLE; |
31 | |
|
32 | 0 | memset(&fCachedViewport, 0, sizeof(VkViewport)); |
33 | 0 | fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0 |
34 | |
|
35 | 0 | memset(&fCachedScissor, 0, sizeof(VkRect2D)); |
36 | 0 | fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid |
37 | |
|
38 | 0 | for (int i = 0; i < 4; ++i) { |
39 | 0 | fCachedBlendConstant[i] = -1.0; |
40 | 0 | } |
41 | 0 | } |
42 | | |
43 | 0 | void GrVkCommandBuffer::freeGPUData(const GrGpu* gpu, VkCommandPool cmdPool) const { |
44 | 0 | TRACE_EVENT0("skia.gpu", TRACE_FUNC); |
45 | 0 | SkASSERT(!fIsActive); |
46 | 0 | SkASSERT(fTrackedResources.empty()); |
47 | 0 | SkASSERT(fTrackedRecycledResources.empty()); |
48 | 0 | SkASSERT(fTrackedGpuBuffers.empty()); |
49 | 0 | SkASSERT(fTrackedGpuSurfaces.empty()); |
50 | 0 | SkASSERT(cmdPool != VK_NULL_HANDLE); |
51 | 0 | SkASSERT(!this->isWrapped()); |
52 | |
|
53 | 0 | const GrVkGpu* vkGpu = (const GrVkGpu*)gpu; |
54 | 0 | GR_VK_CALL(vkGpu->vkInterface(), FreeCommandBuffers(vkGpu->device(), cmdPool, 1, &fCmdBuffer)); |
55 | |
|
56 | 0 | this->onFreeGPUData(vkGpu); |
57 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::freeGPUData(GrGpu const*, VkCommandPool_T*) const Unexecuted instantiation: GrVkCommandBuffer::freeGPUData(GrGpu const*, VkCommandPool_T*) const |
58 | | |
59 | 0 | void GrVkCommandBuffer::releaseResources() { |
60 | 0 | TRACE_EVENT0("skia.gpu", TRACE_FUNC); |
61 | 0 | SkASSERT(!fIsActive || this->isWrapped()); |
62 | 0 | fTrackedResources.clear(); |
63 | 0 | fTrackedRecycledResources.clear(); |
64 | |
|
65 | 0 | fTrackedGpuBuffers.clear(); |
66 | 0 | fTrackedGpuSurfaces.clear(); |
67 | |
|
68 | 0 | this->invalidateState(); |
69 | |
|
70 | 0 | this->onReleaseResources(); |
71 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::releaseResources() Unexecuted instantiation: GrVkCommandBuffer::releaseResources() |
72 | | |
73 | | //////////////////////////////////////////////////////////////////////////////// |
74 | | // CommandBuffer commands |
75 | | //////////////////////////////////////////////////////////////////////////////// |
76 | | |
77 | | void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu, |
78 | | const GrManagedResource* resource, |
79 | | VkPipelineStageFlags srcStageMask, |
80 | | VkPipelineStageFlags dstStageMask, |
81 | | bool byRegion, |
82 | | BarrierType barrierType, |
83 | 0 | void* barrier) { |
84 | 0 | SkASSERT(!this->isWrapped()); |
85 | 0 | SkASSERT(fIsActive); |
86 | | #ifdef SK_DEBUG |
87 | | // For images we can have barriers inside of render passes but they require us to add more |
88 | | // support in subpasses which need self dependencies to have barriers inside them. Also, we can |
89 | | // never have buffer barriers inside of a render pass. For now we will just assert that we are |
90 | | // not in a render pass. |
91 | | bool isValidSubpassBarrier = false; |
92 | 0 | if (barrierType == kImageMemory_BarrierType) { |
93 | 0 | VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier); |
94 | 0 | isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) && |
95 | 0 | (imgBarrier->srcQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) && |
96 | 0 | (imgBarrier->dstQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) && |
97 | 0 | byRegion; |
98 | 0 | } |
99 | 0 | SkASSERT(!fActiveRenderPass || isValidSubpassBarrier); |
100 | | #endif |
101 | |
|
102 | 0 | if (barrierType == kBufferMemory_BarrierType) { |
103 | 0 | const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier); |
104 | 0 | fBufferBarriers.push_back(*barrierPtr); |
105 | 0 | } else { |
106 | 0 | SkASSERT(barrierType == kImageMemory_BarrierType); |
107 | 0 | const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier); |
108 | | // We need to check if we are adding a pipeline barrier that covers part of the same |
109 | | // subresource range as a barrier that is already in current batch. If it does, then we must |
110 | | // submit the first batch because the vulkan spec does not define a specific ordering for |
111 | | // barriers submitted in the same batch. |
112 | | // TODO: Look if we can gain anything by merging barriers together instead of submitting |
113 | | // the old ones. |
114 | 0 | for (int i = 0; i < fImageBarriers.size(); ++i) { |
115 | 0 | VkImageMemoryBarrier& currentBarrier = fImageBarriers[i]; |
116 | 0 | if (barrierPtr->image == currentBarrier.image) { |
117 | 0 | const VkImageSubresourceRange newRange = barrierPtr->subresourceRange; |
118 | 0 | const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange; |
119 | 0 | SkASSERT(newRange.aspectMask == oldRange.aspectMask); |
120 | 0 | SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer); |
121 | 0 | SkASSERT(newRange.layerCount == oldRange.layerCount); |
122 | 0 | uint32_t newStart = newRange.baseMipLevel; |
123 | 0 | uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1; |
124 | 0 | uint32_t oldStart = oldRange.baseMipLevel; |
125 | 0 | uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1; |
126 | 0 | if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) { |
127 | 0 | this->submitPipelineBarriers(gpu); |
128 | 0 | break; |
129 | 0 | } |
130 | 0 | } |
131 | 0 | } |
132 | 0 | fImageBarriers.push_back(*barrierPtr); |
133 | 0 | } |
134 | 0 | fBarriersByRegion |= byRegion; |
135 | 0 | fSrcStageMask = fSrcStageMask | srcStageMask; |
136 | 0 | fDstStageMask = fDstStageMask | dstStageMask; |
137 | |
|
138 | 0 | fHasWork = true; |
139 | 0 | if (resource) { |
140 | 0 | this->addResource(resource); |
141 | 0 | } |
142 | 0 | if (fActiveRenderPass) { |
143 | 0 | this->submitPipelineBarriers(gpu, true); |
144 | 0 | } |
145 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::pipelineBarrier(GrVkGpu const*, GrManagedResource const*, unsigned int, unsigned int, bool, GrVkCommandBuffer::BarrierType, void*) Unexecuted instantiation: GrVkCommandBuffer::pipelineBarrier(GrVkGpu const*, GrManagedResource const*, unsigned int, unsigned int, bool, GrVkCommandBuffer::BarrierType, void*) |
146 | | |
147 | 0 | void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency) { |
148 | 0 | SkASSERT(fIsActive); |
149 | | |
150 | | // Currently we never submit a pipeline barrier without at least one memory barrier. |
151 | 0 | if (!fBufferBarriers.empty() || !fImageBarriers.empty()) { |
152 | | // For images we can have barriers inside of render passes but they require us to add more |
153 | | // support in subpasses which need self dependencies to have barriers inside them. Also, we |
154 | | // can never have buffer barriers inside of a render pass. For now we will just assert that |
155 | | // we are not in a render pass. |
156 | 0 | SkASSERT(!fActiveRenderPass || forSelfDependency); |
157 | 0 | SkASSERT(!this->isWrapped()); |
158 | 0 | SkASSERT(fSrcStageMask && fDstStageMask); |
159 | | |
160 | | // TODO(https://crbug.com/1469231): The linked bug references a crash report from calling |
161 | | // CmdPipelineBarrier. The checks below were added to ensure that we are passing in buffer |
162 | | // counts >= 0, and in the case of >0, that the buffers are non-null. Evaluate whether this |
163 | | // change leads to a reduction in crash instances. If not, the issue may lie within the |
164 | | // driver itself and these checks can be removed. |
165 | 0 | if (!fBufferBarriers.empty() && fBufferBarriers.begin() == nullptr) { |
166 | 0 | fBufferBarriers.clear(); // Sets the size to 0 |
167 | 0 | } |
168 | 0 | if (!fImageBarriers.empty() && fImageBarriers.begin() == nullptr) { |
169 | 0 | fImageBarriers.clear(); // Sets the size to 0 |
170 | 0 | } |
171 | |
|
172 | 0 | VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0; |
173 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier( |
174 | 0 | fCmdBuffer, fSrcStageMask, fDstStageMask, dependencyFlags, 0, nullptr, |
175 | 0 | fBufferBarriers.size(), fBufferBarriers.begin(), |
176 | 0 | fImageBarriers.size(), fImageBarriers.begin())); |
177 | 0 | fBufferBarriers.clear(); |
178 | 0 | fImageBarriers.clear(); |
179 | 0 | fBarriersByRegion = false; |
180 | 0 | fSrcStageMask = 0; |
181 | 0 | fDstStageMask = 0; |
182 | 0 | } |
183 | 0 | SkASSERT(fBufferBarriers.empty()); |
184 | 0 | SkASSERT(fImageBarriers.empty()); |
185 | 0 | SkASSERT(!fBarriersByRegion); |
186 | 0 | SkASSERT(!fSrcStageMask); |
187 | 0 | SkASSERT(!fDstStageMask); |
188 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::submitPipelineBarriers(GrVkGpu const*, bool) Unexecuted instantiation: GrVkCommandBuffer::submitPipelineBarriers(GrVkGpu const*, bool) |
189 | | |
190 | | void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding, |
191 | 0 | sk_sp<const GrBuffer> buffer) { |
192 | 0 | VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(buffer.get())->vkBuffer(); |
193 | 0 | SkASSERT(VK_NULL_HANDLE != vkBuffer); |
194 | 0 | SkASSERT(binding < kMaxInputBuffers); |
195 | | // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset |
196 | | // to know if we can skip binding or not. |
197 | 0 | if (vkBuffer != fBoundInputBuffers[binding]) { |
198 | 0 | VkDeviceSize offset = 0; |
199 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer, |
200 | 0 | binding, |
201 | 0 | 1, |
202 | 0 | &vkBuffer, |
203 | 0 | &offset)); |
204 | 0 | fBoundInputBuffers[binding] = vkBuffer; |
205 | 0 | this->addGrBuffer(std::move(buffer)); |
206 | 0 | } |
207 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::bindInputBuffer(GrVkGpu*, unsigned int, sk_sp<GrBuffer const>) Unexecuted instantiation: GrVkCommandBuffer::bindInputBuffer(GrVkGpu*, unsigned int, sk_sp<GrBuffer const>) |
208 | | |
209 | 0 | void GrVkCommandBuffer::bindIndexBuffer(GrVkGpu* gpu, sk_sp<const GrBuffer> buffer) { |
210 | 0 | VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(buffer.get())->vkBuffer(); |
211 | 0 | SkASSERT(VK_NULL_HANDLE != vkBuffer); |
212 | | // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset |
213 | | // to know if we can skip binding or not. |
214 | 0 | if (vkBuffer != fBoundIndexBuffer) { |
215 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer, |
216 | 0 | vkBuffer, /*offset=*/0, |
217 | 0 | VK_INDEX_TYPE_UINT16)); |
218 | 0 | fBoundIndexBuffer = vkBuffer; |
219 | 0 | this->addGrBuffer(std::move(buffer)); |
220 | 0 | } |
221 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::bindIndexBuffer(GrVkGpu*, sk_sp<GrBuffer const>) Unexecuted instantiation: GrVkCommandBuffer::bindIndexBuffer(GrVkGpu*, sk_sp<GrBuffer const>) |
222 | | |
223 | | void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu, |
224 | | int numAttachments, |
225 | | const VkClearAttachment* attachments, |
226 | | int numRects, |
227 | 0 | const VkClearRect* clearRects) { |
228 | 0 | SkASSERT(fIsActive); |
229 | 0 | SkASSERT(fActiveRenderPass); |
230 | 0 | SkASSERT(numAttachments > 0); |
231 | 0 | SkASSERT(numRects > 0); |
232 | |
|
233 | 0 | this->addingWork(gpu); |
234 | |
|
235 | | #ifdef SK_DEBUG |
236 | 0 | for (int i = 0; i < numAttachments; ++i) { |
237 | 0 | if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) { |
238 | 0 | uint32_t testIndex; |
239 | 0 | SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex)); |
240 | 0 | SkASSERT(testIndex == attachments[i].colorAttachment); |
241 | 0 | } |
242 | 0 | } |
243 | | #endif |
244 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer, |
245 | 0 | numAttachments, |
246 | 0 | attachments, |
247 | 0 | numRects, |
248 | 0 | clearRects)); |
249 | 0 | if (gpu->vkCaps().mustInvalidatePrimaryCmdBufferStateAfterClearAttachments()) { |
250 | 0 | this->invalidateState(); |
251 | 0 | } |
252 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::clearAttachments(GrVkGpu const*, int, VkClearAttachment const*, int, VkClearRect const*) Unexecuted instantiation: GrVkCommandBuffer::clearAttachments(GrVkGpu const*, int, VkClearAttachment const*, int, VkClearRect const*) |
253 | | |
254 | | void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu, |
255 | | VkPipelineLayout layout, |
256 | | uint32_t firstSet, |
257 | | uint32_t setCount, |
258 | | const VkDescriptorSet* descriptorSets, |
259 | | uint32_t dynamicOffsetCount, |
260 | 0 | const uint32_t* dynamicOffsets) { |
261 | 0 | SkASSERT(fIsActive); |
262 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer, |
263 | 0 | VK_PIPELINE_BIND_POINT_GRAPHICS, |
264 | 0 | layout, |
265 | 0 | firstSet, |
266 | 0 | setCount, |
267 | 0 | descriptorSets, |
268 | 0 | dynamicOffsetCount, |
269 | 0 | dynamicOffsets)); |
270 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::bindDescriptorSets(GrVkGpu const*, VkPipelineLayout_T*, unsigned int, unsigned int, VkDescriptorSet_T* const*, unsigned int, unsigned int const*) Unexecuted instantiation: GrVkCommandBuffer::bindDescriptorSets(GrVkGpu const*, VkPipelineLayout_T*, unsigned int, unsigned int, VkDescriptorSet_T* const*, unsigned int, unsigned int const*) |
271 | | |
272 | 0 | void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, sk_sp<const GrVkPipeline> pipeline) { |
273 | 0 | SkASSERT(fIsActive); |
274 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer, |
275 | 0 | VK_PIPELINE_BIND_POINT_GRAPHICS, |
276 | 0 | pipeline->pipeline())); |
277 | 0 | this->addResource(std::move(pipeline)); |
278 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::bindPipeline(GrVkGpu const*, sk_sp<GrVkPipeline const>) Unexecuted instantiation: GrVkCommandBuffer::bindPipeline(GrVkGpu const*, sk_sp<GrVkPipeline const>) |
279 | | |
280 | | void GrVkCommandBuffer::pushConstants(const GrVkGpu* gpu, VkPipelineLayout layout, |
281 | | VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, |
282 | 0 | const void* values) { |
283 | 0 | SkASSERT(fIsActive); |
284 | | // offset and size must be a multiple of 4 |
285 | 0 | SkASSERT(!SkToBool(offset & 0x3)); |
286 | 0 | SkASSERT(!SkToBool(size & 0x3)); |
287 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdPushConstants(fCmdBuffer, |
288 | 0 | layout, |
289 | 0 | stageFlags, |
290 | 0 | offset, |
291 | 0 | size, |
292 | 0 | values)); |
293 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::pushConstants(GrVkGpu const*, VkPipelineLayout_T*, unsigned int, unsigned int, unsigned int, void const*) Unexecuted instantiation: GrVkCommandBuffer::pushConstants(GrVkGpu const*, VkPipelineLayout_T*, unsigned int, unsigned int, unsigned int, void const*) |
294 | | |
295 | | void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu, |
296 | | uint32_t indexCount, |
297 | | uint32_t instanceCount, |
298 | | uint32_t firstIndex, |
299 | | int32_t vertexOffset, |
300 | 0 | uint32_t firstInstance) { |
301 | 0 | SkASSERT(fIsActive); |
302 | 0 | SkASSERT(fActiveRenderPass); |
303 | 0 | this->addingWork(gpu); |
304 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer, |
305 | 0 | indexCount, |
306 | 0 | instanceCount, |
307 | 0 | firstIndex, |
308 | 0 | vertexOffset, |
309 | 0 | firstInstance)); |
310 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::drawIndexed(GrVkGpu const*, unsigned int, unsigned int, unsigned int, int, unsigned int) Unexecuted instantiation: GrVkCommandBuffer::drawIndexed(GrVkGpu const*, unsigned int, unsigned int, unsigned int, int, unsigned int) |
311 | | |
312 | | void GrVkCommandBuffer::draw(const GrVkGpu* gpu, |
313 | | uint32_t vertexCount, |
314 | | uint32_t instanceCount, |
315 | | uint32_t firstVertex, |
316 | 0 | uint32_t firstInstance) { |
317 | 0 | SkASSERT(fIsActive); |
318 | 0 | SkASSERT(fActiveRenderPass); |
319 | 0 | this->addingWork(gpu); |
320 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer, |
321 | 0 | vertexCount, |
322 | 0 | instanceCount, |
323 | 0 | firstVertex, |
324 | 0 | firstInstance)); |
325 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::draw(GrVkGpu const*, unsigned int, unsigned int, unsigned int, unsigned int) Unexecuted instantiation: GrVkCommandBuffer::draw(GrVkGpu const*, unsigned int, unsigned int, unsigned int, unsigned int) |
326 | | |
327 | | void GrVkCommandBuffer::drawIndirect(const GrVkGpu* gpu, |
328 | | sk_sp<const GrBuffer> indirectBuffer, |
329 | | VkDeviceSize offset, |
330 | | uint32_t drawCount, |
331 | 0 | uint32_t stride) { |
332 | 0 | SkASSERT(fIsActive); |
333 | 0 | SkASSERT(fActiveRenderPass); |
334 | 0 | SkASSERT(!indirectBuffer->isCpuBuffer()); |
335 | 0 | this->addingWork(gpu); |
336 | 0 | VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(indirectBuffer.get())->vkBuffer(); |
337 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdDrawIndirect(fCmdBuffer, |
338 | 0 | vkBuffer, |
339 | 0 | offset, |
340 | 0 | drawCount, |
341 | 0 | stride)); |
342 | 0 | this->addGrBuffer(std::move(indirectBuffer)); |
343 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::drawIndirect(GrVkGpu const*, sk_sp<GrBuffer const>, unsigned long, unsigned int, unsigned int) Unexecuted instantiation: GrVkCommandBuffer::drawIndirect(GrVkGpu const*, sk_sp<GrBuffer const>, unsigned long, unsigned int, unsigned int) |
344 | | |
345 | | void GrVkCommandBuffer::drawIndexedIndirect(const GrVkGpu* gpu, |
346 | | sk_sp<const GrBuffer> indirectBuffer, |
347 | | VkDeviceSize offset, |
348 | | uint32_t drawCount, |
349 | 0 | uint32_t stride) { |
350 | 0 | SkASSERT(fIsActive); |
351 | 0 | SkASSERT(fActiveRenderPass); |
352 | 0 | SkASSERT(!indirectBuffer->isCpuBuffer()); |
353 | 0 | this->addingWork(gpu); |
354 | 0 | VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(indirectBuffer.get())->vkBuffer(); |
355 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexedIndirect(fCmdBuffer, |
356 | 0 | vkBuffer, |
357 | 0 | offset, |
358 | 0 | drawCount, |
359 | 0 | stride)); |
360 | 0 | this->addGrBuffer(std::move(indirectBuffer)); |
361 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::drawIndexedIndirect(GrVkGpu const*, sk_sp<GrBuffer const>, unsigned long, unsigned int, unsigned int) Unexecuted instantiation: GrVkCommandBuffer::drawIndexedIndirect(GrVkGpu const*, sk_sp<GrBuffer const>, unsigned long, unsigned int, unsigned int) |
362 | | |
363 | | void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu, |
364 | | uint32_t firstViewport, |
365 | | uint32_t viewportCount, |
366 | 0 | const VkViewport* viewports) { |
367 | 0 | SkASSERT(fIsActive); |
368 | 0 | SkASSERT(1 == viewportCount); |
369 | 0 | if (0 != memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) { |
370 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer, |
371 | 0 | firstViewport, |
372 | 0 | viewportCount, |
373 | 0 | viewports)); |
374 | 0 | fCachedViewport = viewports[0]; |
375 | 0 | } |
376 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::setViewport(GrVkGpu const*, unsigned int, unsigned int, VkViewport const*) Unexecuted instantiation: GrVkCommandBuffer::setViewport(GrVkGpu const*, unsigned int, unsigned int, VkViewport const*) |
377 | | |
378 | | void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu, |
379 | | uint32_t firstScissor, |
380 | | uint32_t scissorCount, |
381 | 0 | const VkRect2D* scissors) { |
382 | 0 | SkASSERT(fIsActive); |
383 | 0 | SkASSERT(1 == scissorCount); |
384 | 0 | if (0 != memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) { |
385 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer, |
386 | 0 | firstScissor, |
387 | 0 | scissorCount, |
388 | 0 | scissors)); |
389 | 0 | fCachedScissor = scissors[0]; |
390 | 0 | } |
391 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::setScissor(GrVkGpu const*, unsigned int, unsigned int, VkRect2D const*) Unexecuted instantiation: GrVkCommandBuffer::setScissor(GrVkGpu const*, unsigned int, unsigned int, VkRect2D const*) |
392 | | |
393 | | void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu, |
394 | 0 | const float blendConstants[4]) { |
395 | 0 | SkASSERT(fIsActive); |
396 | 0 | if (0 != memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) { |
397 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants)); |
398 | 0 | memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float)); |
399 | 0 | } |
400 | 0 | } Unexecuted instantiation: GrVkCommandBuffer::setBlendConstants(GrVkGpu const*, float const*) Unexecuted instantiation: GrVkCommandBuffer::setBlendConstants(GrVkGpu const*, float const*) |
401 | | |
402 | 0 | void GrVkCommandBuffer::addingWork(const GrVkGpu* gpu) { |
403 | 0 | this->submitPipelineBarriers(gpu); |
404 | 0 | fHasWork = true; |
405 | 0 | } |
406 | | |
407 | | /////////////////////////////////////////////////////////////////////////////// |
408 | | // PrimaryCommandBuffer |
409 | | //////////////////////////////////////////////////////////////////////////////// |
410 | 0 | GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() { |
411 | | // Should have ended any render pass we're in the middle of |
412 | 0 | SkASSERT(!fActiveRenderPass); |
413 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() Unexecuted instantiation: GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() |
414 | | |
415 | | GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(GrVkGpu* gpu, |
416 | 0 | VkCommandPool cmdPool) { |
417 | 0 | const VkCommandBufferAllocateInfo cmdInfo = { |
418 | 0 | VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType |
419 | 0 | nullptr, // pNext |
420 | 0 | cmdPool, // commandPool |
421 | 0 | VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level |
422 | 0 | 1 // bufferCount |
423 | 0 | }; |
424 | |
|
425 | 0 | VkCommandBuffer cmdBuffer; |
426 | 0 | VkResult err; |
427 | 0 | GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer)); |
428 | 0 | if (err) { |
429 | 0 | return nullptr; |
430 | 0 | } |
431 | 0 | return new GrVkPrimaryCommandBuffer(cmdBuffer); |
432 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::Create(GrVkGpu*, VkCommandPool_T*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::Create(GrVkGpu*, VkCommandPool_T*) |
433 | | |
434 | 0 | void GrVkPrimaryCommandBuffer::begin(GrVkGpu* gpu) { |
435 | 0 | SkASSERT(!fIsActive); |
436 | 0 | VkCommandBufferBeginInfo cmdBufferBeginInfo; |
437 | 0 | memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); |
438 | 0 | cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; |
439 | 0 | cmdBufferBeginInfo.pNext = nullptr; |
440 | 0 | cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; |
441 | 0 | cmdBufferBeginInfo.pInheritanceInfo = nullptr; |
442 | |
|
443 | 0 | GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo)); |
444 | 0 | fIsActive = true; |
445 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::begin(GrVkGpu*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::begin(GrVkGpu*) |
446 | | |
447 | 0 | void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu, bool abandoningBuffer) { |
448 | 0 | SkASSERT(fIsActive); |
449 | 0 | SkASSERT(!fActiveRenderPass); |
450 | | |
451 | | // If we are in the process of abandoning the context then the GrResourceCache will have freed |
452 | | // all resources before destroying the GrVkGpu. When we destroy the GrVkGpu we call end on the |
453 | | // command buffer to keep all our state tracking consistent. However, the vulkan validation |
454 | | // layers complain about calling end on a command buffer that contains resources that have |
455 | | // already been deleted. From the vulkan API it isn't required to end the command buffer to |
456 | | // delete it, so we just skip the vulkan API calls and update our own state tracking. |
457 | 0 | if (!abandoningBuffer) { |
458 | 0 | this->submitPipelineBarriers(gpu); |
459 | |
|
460 | 0 | GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer)); |
461 | 0 | } |
462 | 0 | this->invalidateState(); |
463 | 0 | fIsActive = false; |
464 | 0 | fHasWork = false; |
465 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::end(GrVkGpu*, bool) Unexecuted instantiation: GrVkPrimaryCommandBuffer::end(GrVkGpu*, bool) |
466 | | |
467 | | bool GrVkPrimaryCommandBuffer::beginRenderPass(GrVkGpu* gpu, |
468 | | const GrVkRenderPass* renderPass, |
469 | | sk_sp<const GrVkFramebuffer> framebuffer, |
470 | | const VkClearValue clearValues[], |
471 | | const GrSurface* target, |
472 | | const SkIRect& bounds, |
473 | 0 | bool forSecondaryCB) { |
474 | 0 | SkASSERT(fIsActive); |
475 | 0 | SkASSERT(!fActiveRenderPass); |
476 | |
|
477 | 0 | SkASSERT(framebuffer); |
478 | |
|
479 | 0 | this->addingWork(gpu); |
480 | |
|
481 | 0 | VkRenderPassBeginInfo beginInfo; |
482 | 0 | VkRect2D renderArea; |
483 | 0 | renderArea.offset = { bounds.fLeft , bounds.fTop }; |
484 | 0 | renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() }; |
485 | |
|
486 | 0 | memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo)); |
487 | 0 | beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; |
488 | 0 | beginInfo.pNext = nullptr; |
489 | 0 | beginInfo.renderPass = renderPass->vkRenderPass(); |
490 | 0 | beginInfo.framebuffer = framebuffer->framebuffer(); |
491 | 0 | beginInfo.renderArea = renderArea; |
492 | 0 | beginInfo.clearValueCount = renderPass->clearValueCount(); |
493 | 0 | beginInfo.pClearValues = clearValues; |
494 | |
|
495 | 0 | VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS |
496 | 0 | : VK_SUBPASS_CONTENTS_INLINE; |
497 | |
|
498 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents)); |
499 | 0 | fActiveRenderPass = renderPass; |
500 | 0 | this->addResource(renderPass); |
501 | 0 | this->addResource(std::move(framebuffer)); |
502 | 0 | this->addGrSurface(sk_ref_sp(target)); |
503 | 0 | return true; |
504 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::beginRenderPass(GrVkGpu*, GrVkRenderPass const*, sk_sp<GrVkFramebuffer const>, VkClearValue const*, GrSurface const*, SkIRect const&, bool) Unexecuted instantiation: GrVkPrimaryCommandBuffer::beginRenderPass(GrVkGpu*, GrVkRenderPass const*, sk_sp<GrVkFramebuffer const>, VkClearValue const*, GrSurface const*, SkIRect const&, bool) |
505 | | |
506 | 0 | void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) { |
507 | 0 | SkASSERT(fIsActive); |
508 | 0 | SkASSERT(fActiveRenderPass); |
509 | 0 | this->addingWork(gpu); |
510 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer)); |
511 | 0 | fActiveRenderPass = nullptr; |
512 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::endRenderPass(GrVkGpu const*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::endRenderPass(GrVkGpu const*) |
513 | | |
514 | | |
515 | 0 | void GrVkPrimaryCommandBuffer::nexSubpass(GrVkGpu* gpu, bool forSecondaryCB) { |
516 | 0 | SkASSERT(fIsActive); |
517 | 0 | SkASSERT(fActiveRenderPass); |
518 | 0 | VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS |
519 | 0 | : VK_SUBPASS_CONTENTS_INLINE; |
520 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdNextSubpass(fCmdBuffer, contents)); |
521 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::nexSubpass(GrVkGpu*, bool) Unexecuted instantiation: GrVkPrimaryCommandBuffer::nexSubpass(GrVkGpu*, bool) |
522 | | |
523 | | void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu, |
524 | 0 | std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) { |
525 | | // The Vulkan spec allows secondary command buffers to be executed on a primary command buffer |
526 | | // if the command pools both were created from were created with the same queue family. However, |
527 | | // we currently always create them from the same pool. |
528 | 0 | SkASSERT(fIsActive); |
529 | 0 | SkASSERT(!buffer->fIsActive); |
530 | 0 | SkASSERT(fActiveRenderPass); |
531 | 0 | SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass)); |
532 | |
|
533 | 0 | this->addingWork(gpu); |
534 | |
|
535 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer)); |
536 | 0 | fSecondaryCommandBuffers.push_back(std::move(buffer)); |
537 | | // When executing a secondary command buffer all state (besides render pass state) becomes |
538 | | // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc. |
539 | 0 | this->invalidateState(); |
540 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::executeCommands(GrVkGpu const*, std::__1::unique_ptr<GrVkSecondaryCommandBuffer, std::__1::default_delete<GrVkSecondaryCommandBuffer> >) Unexecuted instantiation: GrVkPrimaryCommandBuffer::executeCommands(GrVkGpu const*, std::__1::unique_ptr<GrVkSecondaryCommandBuffer, std::__1::default_delete<GrVkSecondaryCommandBuffer> >) |
541 | | |
542 | | static bool submit_to_queue(GrVkGpu* gpu, |
543 | | VkQueue queue, |
544 | | VkFence fence, |
545 | | uint32_t waitCount, |
546 | | const VkSemaphore* waitSemaphores, |
547 | | const VkPipelineStageFlags* waitStages, |
548 | | uint32_t commandBufferCount, |
549 | | const VkCommandBuffer* commandBuffers, |
550 | | uint32_t signalCount, |
551 | | const VkSemaphore* signalSemaphores, |
552 | 0 | GrProtected protectedContext) { |
553 | 0 | VkProtectedSubmitInfo protectedSubmitInfo; |
554 | 0 | if (protectedContext == GrProtected::kYes) { |
555 | 0 | memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo)); |
556 | 0 | protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO; |
557 | 0 | protectedSubmitInfo.pNext = nullptr; |
558 | 0 | protectedSubmitInfo.protectedSubmit = VK_TRUE; |
559 | 0 | } |
560 | |
|
561 | 0 | VkSubmitInfo submitInfo; |
562 | 0 | memset(&submitInfo, 0, sizeof(VkSubmitInfo)); |
563 | 0 | submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; |
564 | 0 | submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr; |
565 | 0 | submitInfo.waitSemaphoreCount = waitCount; |
566 | 0 | submitInfo.pWaitSemaphores = waitSemaphores; |
567 | 0 | submitInfo.pWaitDstStageMask = waitStages; |
568 | 0 | submitInfo.commandBufferCount = commandBufferCount; |
569 | 0 | submitInfo.pCommandBuffers = commandBuffers; |
570 | 0 | submitInfo.signalSemaphoreCount = signalCount; |
571 | 0 | submitInfo.pSignalSemaphores = signalSemaphores; |
572 | 0 | VkResult result; |
573 | 0 | GR_VK_CALL_RESULT(gpu, result, QueueSubmit(queue, 1, &submitInfo, fence)); |
574 | 0 | return result == VK_SUCCESS; |
575 | 0 | } Unexecuted instantiation: GrVkCommandBuffer.cpp:submit_to_queue(GrVkGpu*, VkQueue_T*, VkFence_T*, unsigned int, VkSemaphore_T* const*, unsigned int const*, unsigned int, VkCommandBuffer_T* const*, unsigned int, VkSemaphore_T* const*, skgpu::Protected) Unexecuted instantiation: GrVkCommandBuffer.cpp:submit_to_queue(GrVkGpu*, VkQueue_T*, VkFence_T*, unsigned int, VkSemaphore_T* const*, unsigned int const*, unsigned int, VkCommandBuffer_T* const*, unsigned int, VkSemaphore_T* const*, skgpu::Protected) |
576 | | |
577 | | bool GrVkPrimaryCommandBuffer::submitToQueue( |
578 | | GrVkGpu* gpu, |
579 | | VkQueue queue, |
580 | | TArray<GrVkSemaphore::Resource*>& signalSemaphores, |
581 | 0 | TArray<GrVkSemaphore::Resource*>& waitSemaphores) { |
582 | 0 | SkASSERT(!fIsActive); |
583 | |
|
584 | 0 | VkResult err; |
585 | 0 | if (VK_NULL_HANDLE == fSubmitFence) { |
586 | 0 | VkFenceCreateInfo fenceInfo; |
587 | 0 | memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo)); |
588 | 0 | fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; |
589 | 0 | GR_VK_CALL_RESULT(gpu, err, CreateFence(gpu->device(), &fenceInfo, nullptr, |
590 | 0 | &fSubmitFence)); |
591 | 0 | if (err) { |
592 | 0 | fSubmitFence = VK_NULL_HANDLE; |
593 | 0 | return false; |
594 | 0 | } |
595 | 0 | } else { |
596 | | // This cannot return DEVICE_LOST so we assert we succeeded. |
597 | 0 | GR_VK_CALL_RESULT(gpu, err, ResetFences(gpu->device(), 1, &fSubmitFence)); |
598 | 0 | SkASSERT(err == VK_SUCCESS); |
599 | 0 | } |
600 | | |
601 | 0 | int signalCount = signalSemaphores.size(); |
602 | 0 | int waitCount = waitSemaphores.size(); |
603 | |
|
604 | 0 | bool submitted = false; |
605 | |
|
606 | 0 | if (0 == signalCount && 0 == waitCount) { |
607 | | // This command buffer has no dependent semaphores so we can simply just submit it to the |
608 | | // queue with no worries. |
609 | 0 | submitted = submit_to_queue( |
610 | 0 | gpu, queue, fSubmitFence, 0, nullptr, nullptr, 1, &fCmdBuffer, 0, nullptr, |
611 | 0 | GrProtected(gpu->protectedContext())); |
612 | 0 | } else { |
613 | 0 | TArray<VkSemaphore> vkSignalSems(signalCount); |
614 | 0 | for (int i = 0; i < signalCount; ++i) { |
615 | 0 | if (signalSemaphores[i]->shouldSignal()) { |
616 | 0 | this->addResource(signalSemaphores[i]); |
617 | 0 | vkSignalSems.push_back(signalSemaphores[i]->semaphore()); |
618 | 0 | } |
619 | 0 | } |
620 | |
|
621 | 0 | TArray<VkSemaphore> vkWaitSems(waitCount); |
622 | 0 | TArray<VkPipelineStageFlags> vkWaitStages(waitCount); |
623 | 0 | for (int i = 0; i < waitCount; ++i) { |
624 | 0 | if (waitSemaphores[i]->shouldWait()) { |
625 | 0 | this->addResource(waitSemaphores[i]); |
626 | 0 | vkWaitSems.push_back(waitSemaphores[i]->semaphore()); |
627 | | // We only block the fragment stage since client provided resources are not used |
628 | | // before the fragment stage. This allows the driver to begin vertex work while |
629 | | // waiting on the semaphore. We also add in the transfer stage for uses of clients |
630 | | // calling read or write pixels. |
631 | 0 | vkWaitStages.push_back(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | |
632 | 0 | VK_PIPELINE_STAGE_TRANSFER_BIT); |
633 | 0 | } |
634 | 0 | } |
635 | 0 | submitted = submit_to_queue(gpu, queue, fSubmitFence, vkWaitSems.size(), |
636 | 0 | vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer, |
637 | 0 | vkSignalSems.size(), vkSignalSems.begin(), |
638 | 0 | GrProtected(gpu->protectedContext())); |
639 | 0 | if (submitted) { |
640 | 0 | for (int i = 0; i < signalCount; ++i) { |
641 | 0 | signalSemaphores[i]->markAsSignaled(); |
642 | 0 | } |
643 | 0 | for (int i = 0; i < waitCount; ++i) { |
644 | 0 | waitSemaphores[i]->markAsWaited(); |
645 | 0 | } |
646 | 0 | } |
647 | 0 | } |
648 | |
|
649 | 0 | if (!submitted) { |
650 | | // Destroy the fence or else we will try to wait forever for it to finish. |
651 | 0 | GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr)); |
652 | 0 | fSubmitFence = VK_NULL_HANDLE; |
653 | 0 | return false; |
654 | 0 | } |
655 | 0 | return true; |
656 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::submitToQueue(GrVkGpu*, VkQueue_T*, skia_private::TArray<GrVkSemaphore::Resource*, true>&, skia_private::TArray<GrVkSemaphore::Resource*, true>&) Unexecuted instantiation: GrVkPrimaryCommandBuffer::submitToQueue(GrVkGpu*, VkQueue_T*, skia_private::TArray<GrVkSemaphore::Resource*, true>&, skia_private::TArray<GrVkSemaphore::Resource*, true>&) |
657 | | |
658 | 0 | void GrVkPrimaryCommandBuffer::forceSync(GrVkGpu* gpu) { |
659 | 0 | if (fSubmitFence == VK_NULL_HANDLE) { |
660 | 0 | return; |
661 | 0 | } |
662 | 0 | GR_VK_CALL_ERRCHECK(gpu, WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX)); |
663 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::forceSync(GrVkGpu*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::forceSync(GrVkGpu*) |
664 | | |
665 | 0 | bool GrVkPrimaryCommandBuffer::finished(GrVkGpu* gpu) { |
666 | 0 | SkASSERT(!fIsActive); |
667 | 0 | if (VK_NULL_HANDLE == fSubmitFence) { |
668 | 0 | return true; |
669 | 0 | } |
670 | | |
671 | 0 | VkResult err; |
672 | 0 | GR_VK_CALL_RESULT_NOCHECK(gpu, err, GetFenceStatus(gpu->device(), fSubmitFence)); |
673 | 0 | switch (err) { |
674 | 0 | case VK_SUCCESS: |
675 | 0 | case VK_ERROR_DEVICE_LOST: |
676 | 0 | return true; |
677 | | |
678 | 0 | case VK_NOT_READY: |
679 | 0 | return false; |
680 | | |
681 | 0 | default: |
682 | 0 | SkDebugf("Error getting fence status: %d\n", err); |
683 | 0 | SK_ABORT("Got an invalid fence status"); |
684 | 0 | return false; |
685 | 0 | } |
686 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::finished(GrVkGpu*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::finished(GrVkGpu*) |
687 | | |
688 | 0 | void GrVkPrimaryCommandBuffer::addFinishedProc(sk_sp<skgpu::RefCntedCallback> finishedProc) { |
689 | 0 | fFinishedProcs.push_back(std::move(finishedProc)); |
690 | 0 | } |
691 | | |
692 | 0 | void GrVkPrimaryCommandBuffer::onReleaseResources() { |
693 | 0 | for (int i = 0; i < fSecondaryCommandBuffers.size(); ++i) { |
694 | 0 | fSecondaryCommandBuffers[i]->releaseResources(); |
695 | 0 | } |
696 | 0 | this->callFinishedProcs(); |
697 | 0 | } |
698 | | |
699 | 0 | void GrVkPrimaryCommandBuffer::recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool) { |
700 | 0 | for (int i = 0; i < fSecondaryCommandBuffers.size(); ++i) { |
701 | 0 | fSecondaryCommandBuffers[i].release()->recycle(cmdPool); |
702 | 0 | } |
703 | 0 | fSecondaryCommandBuffers.clear(); |
704 | 0 | } |
705 | | |
706 | | void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu, |
707 | | GrVkImage* srcImage, |
708 | | VkImageLayout srcLayout, |
709 | | GrVkImage* dstImage, |
710 | | VkImageLayout dstLayout, |
711 | | uint32_t copyRegionCount, |
712 | 0 | const VkImageCopy* copyRegions) { |
713 | 0 | SkASSERT(fIsActive); |
714 | 0 | SkASSERT(!fActiveRenderPass); |
715 | 0 | this->addingWork(gpu); |
716 | 0 | this->addResource(srcImage->resource()); |
717 | 0 | this->addResource(dstImage->resource()); |
718 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer, |
719 | 0 | srcImage->image(), |
720 | 0 | srcLayout, |
721 | 0 | dstImage->image(), |
722 | 0 | dstLayout, |
723 | 0 | copyRegionCount, |
724 | 0 | copyRegions)); |
725 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::copyImage(GrVkGpu const*, GrVkImage*, VkImageLayout, GrVkImage*, VkImageLayout, unsigned int, VkImageCopy const*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::copyImage(GrVkGpu const*, GrVkImage*, VkImageLayout, GrVkImage*, VkImageLayout, unsigned int, VkImageCopy const*) |
726 | | |
727 | | void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu, |
728 | | const GrManagedResource* srcResource, |
729 | | VkImage srcImage, |
730 | | VkImageLayout srcLayout, |
731 | | const GrManagedResource* dstResource, |
732 | | VkImage dstImage, |
733 | | VkImageLayout dstLayout, |
734 | | uint32_t blitRegionCount, |
735 | | const VkImageBlit* blitRegions, |
736 | 0 | VkFilter filter) { |
737 | 0 | SkASSERT(fIsActive); |
738 | 0 | SkASSERT(!fActiveRenderPass); |
739 | 0 | this->addingWork(gpu); |
740 | 0 | this->addResource(srcResource); |
741 | 0 | this->addResource(dstResource); |
742 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer, |
743 | 0 | srcImage, |
744 | 0 | srcLayout, |
745 | 0 | dstImage, |
746 | 0 | dstLayout, |
747 | 0 | blitRegionCount, |
748 | 0 | blitRegions, |
749 | 0 | filter)); |
750 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::blitImage(GrVkGpu const*, GrManagedResource const*, VkImage_T*, VkImageLayout, GrManagedResource const*, VkImage_T*, VkImageLayout, unsigned int, VkImageBlit const*, VkFilter) Unexecuted instantiation: GrVkPrimaryCommandBuffer::blitImage(GrVkGpu const*, GrManagedResource const*, VkImage_T*, VkImageLayout, GrManagedResource const*, VkImage_T*, VkImageLayout, unsigned int, VkImageBlit const*, VkFilter) |
751 | | |
752 | | void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu, |
753 | | const GrVkImage& srcImage, |
754 | | const GrVkImage& dstImage, |
755 | | uint32_t blitRegionCount, |
756 | | const VkImageBlit* blitRegions, |
757 | 0 | VkFilter filter) { |
758 | 0 | this->blitImage(gpu, |
759 | 0 | srcImage.resource(), |
760 | 0 | srcImage.image(), |
761 | 0 | srcImage.currentLayout(), |
762 | 0 | dstImage.resource(), |
763 | 0 | dstImage.image(), |
764 | 0 | dstImage.currentLayout(), |
765 | 0 | blitRegionCount, |
766 | 0 | blitRegions, |
767 | 0 | filter); |
768 | 0 | } |
769 | | |
770 | | |
771 | | void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu, |
772 | | GrVkImage* srcImage, |
773 | | VkImageLayout srcLayout, |
774 | | sk_sp<GrGpuBuffer> dstBuffer, |
775 | | uint32_t copyRegionCount, |
776 | 0 | const VkBufferImageCopy* copyRegions) { |
777 | 0 | SkASSERT(fIsActive); |
778 | 0 | SkASSERT(!fActiveRenderPass); |
779 | 0 | this->addingWork(gpu); |
780 | 0 | GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(dstBuffer.get()); |
781 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer, |
782 | 0 | srcImage->image(), |
783 | 0 | srcLayout, |
784 | 0 | vkBuffer->vkBuffer(), |
785 | 0 | copyRegionCount, |
786 | 0 | copyRegions)); |
787 | 0 | this->addResource(srcImage->resource()); |
788 | 0 | this->addGrBuffer(std::move(dstBuffer)); |
789 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::copyImageToBuffer(GrVkGpu const*, GrVkImage*, VkImageLayout, sk_sp<GrGpuBuffer>, unsigned int, VkBufferImageCopy const*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::copyImageToBuffer(GrVkGpu const*, GrVkImage*, VkImageLayout, sk_sp<GrGpuBuffer>, unsigned int, VkBufferImageCopy const*) |
790 | | |
791 | | void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu, |
792 | | VkBuffer srcBuffer, |
793 | | GrVkImage* dstImage, |
794 | | VkImageLayout dstLayout, |
795 | | uint32_t copyRegionCount, |
796 | 0 | const VkBufferImageCopy* copyRegions) { |
797 | 0 | SkASSERT(fIsActive); |
798 | 0 | SkASSERT(!fActiveRenderPass); |
799 | 0 | this->addingWork(gpu); |
800 | |
|
801 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer, |
802 | 0 | srcBuffer, |
803 | 0 | dstImage->image(), |
804 | 0 | dstLayout, |
805 | 0 | copyRegionCount, |
806 | 0 | copyRegions)); |
807 | 0 | this->addResource(dstImage->resource()); |
808 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::copyBufferToImage(GrVkGpu const*, VkBuffer_T*, GrVkImage*, VkImageLayout, unsigned int, VkBufferImageCopy const*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::copyBufferToImage(GrVkGpu const*, VkBuffer_T*, GrVkImage*, VkImageLayout, unsigned int, VkBufferImageCopy const*) |
809 | | |
810 | | void GrVkPrimaryCommandBuffer::fillBuffer(GrVkGpu* gpu, |
811 | | sk_sp<GrGpuBuffer> buffer, |
812 | | VkDeviceSize offset, |
813 | | VkDeviceSize size, |
814 | 0 | uint32_t data) { |
815 | 0 | SkASSERT(fIsActive); |
816 | 0 | SkASSERT(!fActiveRenderPass); |
817 | 0 | this->addingWork(gpu); |
818 | |
|
819 | 0 | const GrVkBuffer* bufferVk = static_cast<GrVkBuffer*>(buffer.get()); |
820 | |
|
821 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdFillBuffer(fCmdBuffer, |
822 | 0 | bufferVk->vkBuffer(), |
823 | 0 | offset, |
824 | 0 | size, |
825 | 0 | data)); |
826 | 0 | this->addGrBuffer(std::move(buffer)); |
827 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::fillBuffer(GrVkGpu*, sk_sp<GrGpuBuffer>, unsigned long, unsigned long, unsigned int) Unexecuted instantiation: GrVkPrimaryCommandBuffer::fillBuffer(GrVkGpu*, sk_sp<GrGpuBuffer>, unsigned long, unsigned long, unsigned int) |
828 | | |
829 | | void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu, |
830 | | sk_sp<GrGpuBuffer> srcBuffer, |
831 | | sk_sp<GrGpuBuffer> dstBuffer, |
832 | | uint32_t regionCount, |
833 | 0 | const VkBufferCopy* regions) { |
834 | 0 | SkASSERT(fIsActive); |
835 | 0 | SkASSERT(!fActiveRenderPass); |
836 | 0 | this->addingWork(gpu); |
837 | | #ifdef SK_DEBUG |
838 | 0 | for (uint32_t i = 0; i < regionCount; ++i) { |
839 | 0 | const VkBufferCopy& region = regions[i]; |
840 | 0 | SkASSERT(region.size > 0); |
841 | 0 | SkASSERT(region.srcOffset < srcBuffer->size()); |
842 | 0 | SkASSERT(region.dstOffset < dstBuffer->size()); |
843 | 0 | SkASSERT(region.srcOffset + region.size <= srcBuffer->size()); |
844 | 0 | SkASSERT(region.dstOffset + region.size <= dstBuffer->size()); |
845 | 0 | } |
846 | | #endif |
847 | |
|
848 | 0 | const GrVkBuffer* srcVk = static_cast<GrVkBuffer*>(srcBuffer.get()); |
849 | 0 | const GrVkBuffer* dstVk = static_cast<GrVkBuffer*>(dstBuffer.get()); |
850 | |
|
851 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer, |
852 | 0 | srcVk->vkBuffer(), |
853 | 0 | dstVk->vkBuffer(), |
854 | 0 | regionCount, |
855 | 0 | regions)); |
856 | 0 | this->addGrBuffer(std::move(srcBuffer)); |
857 | 0 | this->addGrBuffer(std::move(dstBuffer)); |
858 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu*, sk_sp<GrGpuBuffer>, sk_sp<GrGpuBuffer>, unsigned int, VkBufferCopy const*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu*, sk_sp<GrGpuBuffer>, sk_sp<GrGpuBuffer>, unsigned int, VkBufferCopy const*) |
859 | | |
860 | | void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu, |
861 | | sk_sp<GrVkBuffer> dstBuffer, |
862 | | VkDeviceSize dstOffset, |
863 | | VkDeviceSize dataSize, |
864 | 0 | const void* data) { |
865 | 0 | SkASSERT(fIsActive); |
866 | 0 | SkASSERT(!fActiveRenderPass); |
867 | 0 | SkASSERT(0 == (dstOffset & 0x03)); // four byte aligned |
868 | | // TODO: handle larger transfer sizes |
869 | 0 | SkASSERT(dataSize <= 65536); |
870 | 0 | SkASSERT(0 == (dataSize & 0x03)); // four byte aligned |
871 | 0 | this->addingWork(gpu); |
872 | 0 | GR_VK_CALL( |
873 | 0 | gpu->vkInterface(), |
874 | 0 | CmdUpdateBuffer( |
875 | 0 | fCmdBuffer, dstBuffer->vkBuffer(), dstOffset, dataSize, (const uint32_t*)data)); |
876 | 0 | this->addGrBuffer(std::move(dstBuffer)); |
877 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu*, sk_sp<GrVkBuffer>, unsigned long, unsigned long, void const*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu*, sk_sp<GrVkBuffer>, unsigned long, unsigned long, void const*) |
878 | | |
879 | | void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu, |
880 | | GrVkImage* image, |
881 | | const VkClearColorValue* color, |
882 | | uint32_t subRangeCount, |
883 | 0 | const VkImageSubresourceRange* subRanges) { |
884 | 0 | SkASSERT(fIsActive); |
885 | 0 | SkASSERT(!fActiveRenderPass); |
886 | 0 | this->addingWork(gpu); |
887 | 0 | this->addResource(image->resource()); |
888 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer, |
889 | 0 | image->image(), |
890 | 0 | image->currentLayout(), |
891 | 0 | color, |
892 | 0 | subRangeCount, |
893 | 0 | subRanges)); |
894 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::clearColorImage(GrVkGpu const*, GrVkImage*, VkClearColorValue const*, unsigned int, VkImageSubresourceRange const*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::clearColorImage(GrVkGpu const*, GrVkImage*, VkClearColorValue const*, unsigned int, VkImageSubresourceRange const*) |
895 | | |
896 | | void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu, |
897 | | GrVkImage* image, |
898 | | const VkClearDepthStencilValue* color, |
899 | | uint32_t subRangeCount, |
900 | 0 | const VkImageSubresourceRange* subRanges) { |
901 | 0 | SkASSERT(fIsActive); |
902 | 0 | SkASSERT(!fActiveRenderPass); |
903 | 0 | this->addingWork(gpu); |
904 | 0 | this->addResource(image->resource()); |
905 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer, |
906 | 0 | image->image(), |
907 | 0 | image->currentLayout(), |
908 | 0 | color, |
909 | 0 | subRangeCount, |
910 | 0 | subRanges)); |
911 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::clearDepthStencilImage(GrVkGpu const*, GrVkImage*, VkClearDepthStencilValue const*, unsigned int, VkImageSubresourceRange const*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::clearDepthStencilImage(GrVkGpu const*, GrVkImage*, VkClearDepthStencilValue const*, unsigned int, VkImageSubresourceRange const*) |
912 | | |
913 | | void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu, |
914 | | const GrVkImage& srcImage, |
915 | | const GrVkImage& dstImage, |
916 | | uint32_t regionCount, |
917 | 0 | const VkImageResolve* regions) { |
918 | 0 | SkASSERT(fIsActive); |
919 | 0 | SkASSERT(!fActiveRenderPass); |
920 | |
|
921 | 0 | this->addingWork(gpu); |
922 | 0 | this->addResource(srcImage.resource()); |
923 | 0 | this->addResource(dstImage.resource()); |
924 | |
|
925 | 0 | GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer, |
926 | 0 | srcImage.image(), |
927 | 0 | srcImage.currentLayout(), |
928 | 0 | dstImage.image(), |
929 | 0 | dstImage.currentLayout(), |
930 | 0 | regionCount, |
931 | 0 | regions)); |
932 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu*, GrVkImage const&, GrVkImage const&, unsigned int, VkImageResolve const*) Unexecuted instantiation: GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu*, GrVkImage const&, GrVkImage const&, unsigned int, VkImageResolve const*) |
933 | | |
934 | 0 | void GrVkPrimaryCommandBuffer::onFreeGPUData(const GrVkGpu* gpu) const { |
935 | 0 | SkASSERT(!fActiveRenderPass); |
936 | | // Destroy the fence, if any |
937 | 0 | if (VK_NULL_HANDLE != fSubmitFence) { |
938 | 0 | GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr)); |
939 | 0 | } |
940 | 0 | SkASSERT(fSecondaryCommandBuffers.empty()); |
941 | 0 | } Unexecuted instantiation: GrVkPrimaryCommandBuffer::onFreeGPUData(GrVkGpu const*) const Unexecuted instantiation: GrVkPrimaryCommandBuffer::onFreeGPUData(GrVkGpu const*) const |
942 | | |
943 | | /////////////////////////////////////////////////////////////////////////////// |
944 | | // SecondaryCommandBuffer |
945 | | //////////////////////////////////////////////////////////////////////////////// |
946 | | |
947 | | GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(GrVkGpu* gpu, |
948 | 0 | GrVkCommandPool* cmdPool) { |
949 | 0 | SkASSERT(cmdPool); |
950 | 0 | const VkCommandBufferAllocateInfo cmdInfo = { |
951 | 0 | VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType |
952 | 0 | nullptr, // pNext |
953 | 0 | cmdPool->vkCommandPool(), // commandPool |
954 | 0 | VK_COMMAND_BUFFER_LEVEL_SECONDARY, // level |
955 | 0 | 1 // bufferCount |
956 | 0 | }; |
957 | |
|
958 | 0 | VkCommandBuffer cmdBuffer; |
959 | 0 | VkResult err; |
960 | 0 | GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer)); |
961 | 0 | if (err) { |
962 | 0 | return nullptr; |
963 | 0 | } |
964 | 0 | return new GrVkSecondaryCommandBuffer(cmdBuffer, /*externalRenderPass=*/nullptr); |
965 | 0 | } Unexecuted instantiation: GrVkSecondaryCommandBuffer::Create(GrVkGpu*, GrVkCommandPool*) Unexecuted instantiation: GrVkSecondaryCommandBuffer::Create(GrVkGpu*, GrVkCommandPool*) |
966 | | |
967 | | GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create( |
968 | 0 | VkCommandBuffer cmdBuffer, const GrVkRenderPass* externalRenderPass) { |
969 | 0 | return new GrVkSecondaryCommandBuffer(cmdBuffer, externalRenderPass); |
970 | 0 | } |
971 | | |
972 | | void GrVkSecondaryCommandBuffer::begin(GrVkGpu* gpu, const GrVkFramebuffer* framebuffer, |
973 | 0 | const GrVkRenderPass* compatibleRenderPass) { |
974 | 0 | SkASSERT(!fIsActive); |
975 | 0 | SkASSERT(!this->isWrapped()); |
976 | 0 | SkASSERT(compatibleRenderPass); |
977 | 0 | fActiveRenderPass = compatibleRenderPass; |
978 | |
|
979 | 0 | VkCommandBufferInheritanceInfo inheritanceInfo; |
980 | 0 | memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo)); |
981 | 0 | inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; |
982 | 0 | inheritanceInfo.pNext = nullptr; |
983 | 0 | inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass(); |
984 | 0 | inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass |
985 | 0 | inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE; |
986 | 0 | inheritanceInfo.occlusionQueryEnable = false; |
987 | 0 | inheritanceInfo.queryFlags = 0; |
988 | 0 | inheritanceInfo.pipelineStatistics = 0; |
989 | |
|
990 | 0 | VkCommandBufferBeginInfo cmdBufferBeginInfo; |
991 | 0 | memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); |
992 | 0 | cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; |
993 | 0 | cmdBufferBeginInfo.pNext = nullptr; |
994 | 0 | cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT | |
995 | 0 | VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; |
996 | 0 | cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo; |
997 | |
|
998 | 0 | GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo)); |
999 | |
|
1000 | 0 | fIsActive = true; |
1001 | 0 | } Unexecuted instantiation: GrVkSecondaryCommandBuffer::begin(GrVkGpu*, GrVkFramebuffer const*, GrVkRenderPass const*) Unexecuted instantiation: GrVkSecondaryCommandBuffer::begin(GrVkGpu*, GrVkFramebuffer const*, GrVkRenderPass const*) |
1002 | | |
1003 | 0 | void GrVkSecondaryCommandBuffer::end(GrVkGpu* gpu) { |
1004 | 0 | SkASSERT(fIsActive); |
1005 | 0 | SkASSERT(!this->isWrapped()); |
1006 | 0 | GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer)); |
1007 | 0 | this->invalidateState(); |
1008 | 0 | fHasWork = false; |
1009 | 0 | fIsActive = false; |
1010 | 0 | } Unexecuted instantiation: GrVkSecondaryCommandBuffer::end(GrVkGpu*) Unexecuted instantiation: GrVkSecondaryCommandBuffer::end(GrVkGpu*) |
1011 | | |
1012 | 0 | void GrVkSecondaryCommandBuffer::recycle(GrVkCommandPool* cmdPool) { |
1013 | 0 | if (this->isWrapped()) { |
1014 | 0 | delete this; |
1015 | 0 | } else { |
1016 | 0 | cmdPool->recycleSecondaryCommandBuffer(this); |
1017 | 0 | } |
1018 | 0 | } |