/src/skia/src/gpu/graphite/vk/VulkanResourceProvider.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2022 Google LLC |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license that can be |
5 | | * found in the LICENSE file. |
6 | | */ |
7 | | |
8 | | #include "src/gpu/graphite/vk/VulkanResourceProvider.h" |
9 | | |
10 | | #include "include/core/SkSpan.h" |
11 | | #include "include/gpu/MutableTextureState.h" |
12 | | #include "include/gpu/graphite/BackendTexture.h" |
13 | | #include "include/gpu/graphite/vk/VulkanGraphiteTypes.h" |
14 | | #include "include/gpu/vk/VulkanMutableTextureState.h" |
15 | | #include "src/gpu/graphite/Buffer.h" |
16 | | #include "src/gpu/graphite/ComputePipeline.h" |
17 | | #include "src/gpu/graphite/GraphicsPipeline.h" |
18 | | #include "src/gpu/graphite/RenderPassDesc.h" |
19 | | #include "src/gpu/graphite/Sampler.h" |
20 | | #include "src/gpu/graphite/Texture.h" |
21 | | #include "src/gpu/graphite/vk/VulkanBuffer.h" |
22 | | #include "src/gpu/graphite/vk/VulkanCommandBuffer.h" |
23 | | #include "src/gpu/graphite/vk/VulkanDescriptorPool.h" |
24 | | #include "src/gpu/graphite/vk/VulkanDescriptorSet.h" |
25 | | #include "src/gpu/graphite/vk/VulkanFramebuffer.h" |
26 | | #include "src/gpu/graphite/vk/VulkanGraphicsPipeline.h" |
27 | | #include "src/gpu/graphite/vk/VulkanGraphiteTypesPriv.h" |
28 | | #include "src/gpu/graphite/vk/VulkanRenderPass.h" |
29 | | #include "src/gpu/graphite/vk/VulkanSampler.h" |
30 | | #include "src/gpu/graphite/vk/VulkanSharedContext.h" |
31 | | #include "src/gpu/graphite/vk/VulkanTexture.h" |
32 | | #include "src/gpu/graphite/vk/VulkanYcbcrConversion.h" |
33 | | #include "src/gpu/vk/VulkanMemory.h" |
34 | | #include "src/sksl/SkSLCompiler.h" |
35 | | |
36 | | #ifdef SK_BUILD_FOR_ANDROID |
37 | | #include "src/gpu/vk/VulkanUtilsPriv.h" |
38 | | #include <android/hardware_buffer.h> |
39 | | #endif |
40 | | |
41 | | namespace skgpu::graphite { |
42 | | |
43 | | constexpr int kMaxNumberOfCachedBufferDescSets = 1024; |
44 | | |
45 | | VulkanResourceProvider::VulkanResourceProvider(SharedContext* sharedContext, |
46 | | SingleOwner* singleOwner, |
47 | | uint32_t recorderID, |
48 | | size_t resourceBudget, |
49 | | sk_sp<Buffer> intrinsicConstantUniformBuffer, |
50 | | sk_sp<Buffer> loadMSAAVertexBuffer) |
51 | | : ResourceProvider(sharedContext, singleOwner, recorderID, resourceBudget) |
52 | | , fIntrinsicUniformBuffer(std::move(intrinsicConstantUniformBuffer)) |
53 | | , fLoadMSAAVertexBuffer(std::move(loadMSAAVertexBuffer)) |
54 | 0 | , fUniformBufferDescSetCache(kMaxNumberOfCachedBufferDescSets) {} |
55 | | |
56 | 0 | VulkanResourceProvider::~VulkanResourceProvider() { |
57 | 0 | if (fPipelineCache != VK_NULL_HANDLE) { |
58 | 0 | VULKAN_CALL(this->vulkanSharedContext()->interface(), |
59 | 0 | DestroyPipelineCache(this->vulkanSharedContext()->device(), |
60 | 0 | fPipelineCache, |
61 | 0 | nullptr)); |
62 | 0 | } |
63 | 0 | if (fMSAALoadVertShaderModule != VK_NULL_HANDLE) { |
64 | 0 | VULKAN_CALL(this->vulkanSharedContext()->interface(), |
65 | 0 | DestroyShaderModule(this->vulkanSharedContext()->device(), |
66 | 0 | fMSAALoadVertShaderModule, |
67 | 0 | nullptr)); |
68 | 0 | } |
69 | 0 | if (fMSAALoadFragShaderModule != VK_NULL_HANDLE) { |
70 | 0 | VULKAN_CALL(this->vulkanSharedContext()->interface(), |
71 | 0 | DestroyShaderModule(this->vulkanSharedContext()->device(), |
72 | 0 | fMSAALoadFragShaderModule, |
73 | 0 | nullptr)); |
74 | 0 | } |
75 | 0 | if (fMSAALoadPipelineLayout != VK_NULL_HANDLE) { |
76 | 0 | VULKAN_CALL(this->vulkanSharedContext()->interface(), |
77 | 0 | DestroyPipelineLayout(this->vulkanSharedContext()->device(), |
78 | 0 | fMSAALoadPipelineLayout, |
79 | 0 | nullptr)); |
80 | 0 | } |
81 | 0 | } |
82 | | |
83 | 0 | const VulkanSharedContext* VulkanResourceProvider::vulkanSharedContext() const { |
84 | 0 | return static_cast<const VulkanSharedContext*>(fSharedContext); |
85 | 0 | } |
86 | | |
87 | 0 | sk_sp<Texture> VulkanResourceProvider::onCreateWrappedTexture(const BackendTexture& texture) { |
88 | 0 | sk_sp<VulkanYcbcrConversion> ycbcrConversion; |
89 | 0 | if (TextureInfos::GetVulkanYcbcrConversionInfo(texture.info()).isValid()) { |
90 | 0 | ycbcrConversion = this->findOrCreateCompatibleYcbcrConversion( |
91 | 0 | TextureInfos::GetVulkanYcbcrConversionInfo(texture.info())); |
92 | 0 | if (!ycbcrConversion) { |
93 | 0 | return nullptr; |
94 | 0 | } |
95 | 0 | } |
96 | | |
97 | 0 | return VulkanTexture::MakeWrapped(this->vulkanSharedContext(), |
98 | 0 | texture.dimensions(), |
99 | 0 | texture.info(), |
100 | 0 | BackendTextures::GetMutableState(texture), |
101 | 0 | BackendTextures::GetVkImage(texture), |
102 | 0 | /*alloc=*/{} /*Skia does not own wrapped texture memory*/, |
103 | 0 | std::move(ycbcrConversion)); |
104 | 0 | } |
105 | | |
106 | 0 | sk_sp<Buffer> VulkanResourceProvider::refIntrinsicConstantBuffer() const { |
107 | 0 | return fIntrinsicUniformBuffer; |
108 | 0 | } |
109 | | |
110 | 0 | const Buffer* VulkanResourceProvider::loadMSAAVertexBuffer() const { |
111 | 0 | return fLoadMSAAVertexBuffer.get(); |
112 | 0 | } |
113 | | |
114 | | sk_sp<GraphicsPipeline> VulkanResourceProvider::createGraphicsPipeline( |
115 | | const RuntimeEffectDictionary* runtimeDict, |
116 | | const GraphicsPipelineDesc& pipelineDesc, |
117 | 0 | const RenderPassDesc& renderPassDesc) { |
118 | 0 | return VulkanGraphicsPipeline::Make(this, |
119 | 0 | runtimeDict, |
120 | 0 | pipelineDesc, |
121 | 0 | renderPassDesc); |
122 | 0 | } |
123 | | |
124 | 0 | sk_sp<ComputePipeline> VulkanResourceProvider::createComputePipeline(const ComputePipelineDesc&) { |
125 | 0 | return nullptr; |
126 | 0 | } |
127 | | |
128 | | sk_sp<Texture> VulkanResourceProvider::createTexture(SkISize size, |
129 | | const TextureInfo& info, |
130 | 0 | skgpu::Budgeted budgeted) { |
131 | 0 | sk_sp<VulkanYcbcrConversion> ycbcrConversion; |
132 | 0 | if (TextureInfos::GetVulkanYcbcrConversionInfo(info).isValid()) { |
133 | 0 | ycbcrConversion = this->findOrCreateCompatibleYcbcrConversion( |
134 | 0 | TextureInfos::GetVulkanYcbcrConversionInfo(info)); |
135 | 0 | if (!ycbcrConversion) { |
136 | 0 | return nullptr; |
137 | 0 | } |
138 | 0 | } |
139 | | |
140 | 0 | return VulkanTexture::Make(this->vulkanSharedContext(), |
141 | 0 | size, |
142 | 0 | info, |
143 | 0 | budgeted, |
144 | 0 | std::move(ycbcrConversion)); |
145 | 0 | } |
146 | | |
147 | | sk_sp<Buffer> VulkanResourceProvider::createBuffer(size_t size, |
148 | | BufferType type, |
149 | 0 | AccessPattern accessPattern) { |
150 | 0 | return VulkanBuffer::Make(this->vulkanSharedContext(), size, type, accessPattern); |
151 | 0 | } |
152 | | |
153 | 0 | sk_sp<Sampler> VulkanResourceProvider::createSampler(const SamplerDesc& samplerDesc) { |
154 | 0 | sk_sp<VulkanYcbcrConversion> ycbcrConversion = nullptr; |
155 | | |
156 | | // Non-zero conversion information means the sampler utilizes a ycbcr conversion. |
157 | 0 | bool usesYcbcrConversion = (samplerDesc.desc() >> SamplerDesc::kImmutableSamplerInfoShift) != 0; |
158 | 0 | if (usesYcbcrConversion) { |
159 | 0 | GraphiteResourceKey ycbcrKey = VulkanYcbcrConversion::GetKeyFromSamplerDesc(samplerDesc); |
160 | 0 | if (Resource* resource = fResourceCache->findAndRefResource(ycbcrKey, |
161 | 0 | skgpu::Budgeted::kYes)) { |
162 | 0 | ycbcrConversion = |
163 | 0 | sk_sp<VulkanYcbcrConversion>(static_cast<VulkanYcbcrConversion*>(resource)); |
164 | 0 | } else { |
165 | 0 | ycbcrConversion = VulkanYcbcrConversion::Make( |
166 | 0 | this->vulkanSharedContext(), |
167 | 0 | static_cast<uint32_t>( |
168 | 0 | samplerDesc.desc() >> SamplerDesc::kImmutableSamplerInfoShift), |
169 | 0 | (uint64_t)(samplerDesc.externalFormatMSBs()) << 32 | samplerDesc.format()); |
170 | 0 | SkASSERT(ycbcrConversion); |
171 | |
|
172 | 0 | ycbcrConversion->setKey(ycbcrKey); |
173 | 0 | fResourceCache->insertResource(ycbcrConversion.get()); |
174 | 0 | } |
175 | 0 | } |
176 | |
|
177 | 0 | return VulkanSampler::Make(this->vulkanSharedContext(), |
178 | 0 | samplerDesc, |
179 | 0 | std::move(ycbcrConversion)); |
180 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanResourceProvider::createSampler(skgpu::graphite::SamplerDesc const&) Unexecuted instantiation: skgpu::graphite::VulkanResourceProvider::createSampler(skgpu::graphite::SamplerDesc const&) |
181 | | |
182 | | BackendTexture VulkanResourceProvider::onCreateBackendTexture(SkISize dimensions, |
183 | 0 | const TextureInfo& info) { |
184 | 0 | VulkanTextureInfo vkTexInfo; |
185 | 0 | if (!TextureInfos::GetVulkanTextureInfo(info, &vkTexInfo)) { |
186 | 0 | return {}; |
187 | 0 | } |
188 | 0 | VulkanTexture::CreatedImageInfo createdTextureInfo; |
189 | 0 | if (!VulkanTexture::MakeVkImage(this->vulkanSharedContext(), dimensions, info, |
190 | 0 | &createdTextureInfo)) { |
191 | 0 | return {}; |
192 | 0 | } |
193 | 0 | return BackendTextures::MakeVulkan( |
194 | 0 | dimensions, |
195 | 0 | vkTexInfo, |
196 | 0 | skgpu::MutableTextureStates::GetVkImageLayout(createdTextureInfo.fMutableState.get()), |
197 | 0 | skgpu::MutableTextureStates::GetVkQueueFamilyIndex( |
198 | 0 | createdTextureInfo.fMutableState.get()), |
199 | 0 | createdTextureInfo.fImage, |
200 | 0 | createdTextureInfo.fMemoryAlloc); |
201 | 0 | } |
202 | | |
203 | | namespace { |
204 | 0 | GraphiteResourceKey build_desc_set_key(const SkSpan<DescriptorData>& requestedDescriptors) { |
205 | 0 | static const ResourceType kType = GraphiteResourceKey::GenerateResourceType(); |
206 | | |
207 | | // The number of int32s needed for a key can depend on whether we use immutable samplers or not. |
208 | | // So, accumulte key data while passing through to check for that quantity and simply copy |
209 | | // into builder afterwards. |
210 | 0 | skia_private::TArray<uint32_t> keyData (requestedDescriptors.size() + 1); |
211 | |
|
212 | 0 | keyData.push_back(requestedDescriptors.size()); |
213 | 0 | for (const DescriptorData& desc : requestedDescriptors) { |
214 | 0 | keyData.push_back(static_cast<uint8_t>(desc.fType) << 24 | |
215 | 0 | desc.fBindingIndex << 16 | |
216 | 0 | static_cast<uint16_t>(desc.fCount)); |
217 | 0 | if (desc.fImmutableSampler) { |
218 | 0 | const VulkanSampler* sampler = |
219 | 0 | static_cast<const VulkanSampler*>(desc.fImmutableSampler); |
220 | 0 | SkASSERT(sampler); |
221 | 0 | keyData.push_back_n(sampler->samplerDesc().asSpan().size(), |
222 | 0 | sampler->samplerDesc().asSpan().data()); |
223 | 0 | } |
224 | 0 | } |
225 | |
|
226 | 0 | GraphiteResourceKey key; |
227 | 0 | GraphiteResourceKey::Builder builder(&key, kType, keyData.size(), Shareable::kNo); |
228 | |
|
229 | 0 | for (int i = 0; i < keyData.size(); i++) { |
230 | 0 | builder[i] = keyData[i]; |
231 | 0 | } |
232 | |
|
233 | 0 | builder.finish(); |
234 | 0 | return key; |
235 | 0 | } Unexecuted instantiation: VulkanResourceProvider.cpp:skgpu::graphite::(anonymous namespace)::build_desc_set_key(SkSpan<skgpu::graphite::DescriptorData> const&) Unexecuted instantiation: VulkanResourceProvider.cpp:skgpu::graphite::(anonymous namespace)::build_desc_set_key(SkSpan<skgpu::graphite::DescriptorData> const&) |
236 | | |
237 | | sk_sp<VulkanDescriptorSet> add_new_desc_set_to_cache(const VulkanSharedContext* context, |
238 | | const sk_sp<VulkanDescriptorPool>& pool, |
239 | | const GraphiteResourceKey& descSetKey, |
240 | 0 | ResourceCache* resourceCache) { |
241 | 0 | sk_sp<VulkanDescriptorSet> descSet = VulkanDescriptorSet::Make(context, pool); |
242 | 0 | if (!descSet) { |
243 | 0 | return nullptr; |
244 | 0 | } |
245 | 0 | descSet->setKey(descSetKey); |
246 | 0 | resourceCache->insertResource(descSet.get()); |
247 | |
|
248 | 0 | return descSet; |
249 | 0 | } |
250 | | } // anonymous namespace |
251 | | |
252 | | sk_sp<VulkanDescriptorSet> VulkanResourceProvider::findOrCreateDescriptorSet( |
253 | 0 | SkSpan<DescriptorData> requestedDescriptors) { |
254 | 0 | if (requestedDescriptors.empty()) { |
255 | 0 | return nullptr; |
256 | 0 | } |
257 | | // Search for available descriptor sets by assembling a key based upon the set's structure. |
258 | 0 | GraphiteResourceKey key = build_desc_set_key(requestedDescriptors); |
259 | 0 | if (auto descSet = fResourceCache->findAndRefResource(key, skgpu::Budgeted::kYes)) { |
260 | | // A non-null resource pointer indicates we have found an available descriptor set. |
261 | 0 | return sk_sp<VulkanDescriptorSet>(static_cast<VulkanDescriptorSet*>(descSet)); |
262 | 0 | } |
263 | | |
264 | | |
265 | | // If we did not find an existing avilable desc set, allocate sets with the appropriate layout |
266 | | // and add them to the cache. |
267 | 0 | VkDescriptorSetLayout layout; |
268 | 0 | const VulkanSharedContext* context = this->vulkanSharedContext(); |
269 | 0 | DescriptorDataToVkDescSetLayout(context, requestedDescriptors, &layout); |
270 | 0 | if (!layout) { |
271 | 0 | return nullptr; |
272 | 0 | } |
273 | 0 | auto pool = VulkanDescriptorPool::Make(context, requestedDescriptors, layout); |
274 | 0 | if (!pool) { |
275 | 0 | VULKAN_CALL(context->interface(), DestroyDescriptorSetLayout(context->device(), |
276 | 0 | layout, |
277 | 0 | nullptr)); |
278 | 0 | return nullptr; |
279 | 0 | } |
280 | | |
281 | | // Start with allocating one descriptor set. If one cannot be successfully created, then we can |
282 | | // return early before attempting to allocate more. Storing a ptr to the first set also |
283 | | // allows us to return that later without having to perform a find operation on the cache once |
284 | | // all the sets are added. |
285 | 0 | auto firstDescSet = |
286 | 0 | add_new_desc_set_to_cache(context, pool, key, fResourceCache.get()); |
287 | 0 | if (!firstDescSet) { |
288 | 0 | return nullptr; |
289 | 0 | } |
290 | | |
291 | | // Continue to allocate & cache the maximum number of sets so they can be easily accessed as |
292 | | // they're needed. |
293 | 0 | for (int i = 1; i < VulkanDescriptorPool::kMaxNumSets ; i++) { |
294 | 0 | auto descSet = |
295 | 0 | add_new_desc_set_to_cache(context, pool, key, fResourceCache.get()); |
296 | 0 | if (!descSet) { |
297 | 0 | SKGPU_LOG_W("Descriptor set allocation %d of %d was unsuccessful; no more sets will be" |
298 | 0 | "allocated from this pool.", i, VulkanDescriptorPool::kMaxNumSets); |
299 | 0 | break; |
300 | 0 | } |
301 | 0 | } |
302 | |
|
303 | 0 | return firstDescSet; |
304 | 0 | } |
305 | | |
306 | | namespace { |
307 | | |
308 | | VulkanResourceProvider::UniformBindGroupKey make_ubo_bind_group_key( |
309 | | SkSpan<DescriptorData> requestedDescriptors, |
310 | 0 | SkSpan<BindBufferInfo> bindUniformBufferInfo) { |
311 | 0 | VulkanResourceProvider::UniformBindGroupKey uniqueKey; |
312 | 0 | { |
313 | | // Each entry in the bind group needs 2 uint32_t in the key: |
314 | | // - buffer's unique ID: 32 bits. |
315 | | // - buffer's binding size: 32 bits. |
316 | | // We need total of 4 entries in the uniform buffer bind group. |
317 | | // Unused entries will be assigned zero values. |
318 | 0 | VulkanResourceProvider::UniformBindGroupKey::Builder builder(&uniqueKey); |
319 | |
|
320 | 0 | for (uint32_t i = 0; i < VulkanGraphicsPipeline::kNumUniformBuffers; ++i) { |
321 | 0 | builder[2 * i] = 0; |
322 | 0 | builder[2 * i + 1] = 0; |
323 | 0 | } |
324 | |
|
325 | 0 | for (uint32_t i = 0; i < requestedDescriptors.size(); ++i) { |
326 | 0 | int descriptorBindingIndex = requestedDescriptors[i].fBindingIndex; |
327 | 0 | SkASSERT(SkTo<unsigned long>(descriptorBindingIndex) < bindUniformBufferInfo.size()); |
328 | 0 | SkASSERT(SkTo<unsigned long>(descriptorBindingIndex) < |
329 | 0 | VulkanGraphicsPipeline::kNumUniformBuffers); |
330 | 0 | const auto& bindInfo = bindUniformBufferInfo[descriptorBindingIndex]; |
331 | 0 | const VulkanBuffer* boundBuffer = static_cast<const VulkanBuffer*>(bindInfo.fBuffer); |
332 | 0 | SkASSERT(boundBuffer); |
333 | 0 | builder[2 * descriptorBindingIndex] = boundBuffer->uniqueID().asUInt(); |
334 | 0 | builder[2 * descriptorBindingIndex + 1] = bindInfo.fSize; |
335 | 0 | } |
336 | |
|
337 | 0 | builder.finish(); |
338 | 0 | } |
339 | |
|
340 | 0 | return uniqueKey; |
341 | 0 | } Unexecuted instantiation: VulkanResourceProvider.cpp:skgpu::graphite::(anonymous namespace)::make_ubo_bind_group_key(SkSpan<skgpu::graphite::DescriptorData>, SkSpan<skgpu::graphite::BindBufferInfo>) Unexecuted instantiation: VulkanResourceProvider.cpp:skgpu::graphite::(anonymous namespace)::make_ubo_bind_group_key(SkSpan<skgpu::graphite::DescriptorData>, SkSpan<skgpu::graphite::BindBufferInfo>) |
342 | | |
343 | | void update_uniform_descriptor_set(SkSpan<DescriptorData> requestedDescriptors, |
344 | | SkSpan<BindBufferInfo> bindUniformBufferInfo, |
345 | | VkDescriptorSet descSet, |
346 | 0 | const VulkanSharedContext* sharedContext) { |
347 | 0 | for (size_t i = 0; i < requestedDescriptors.size(); i++) { |
348 | 0 | int descriptorBindingIndex = requestedDescriptors[i].fBindingIndex; |
349 | 0 | SkASSERT(SkTo<unsigned long>(descriptorBindingIndex) < bindUniformBufferInfo.size()); |
350 | 0 | const auto& bindInfo = bindUniformBufferInfo[descriptorBindingIndex]; |
351 | 0 | if (bindInfo.fBuffer) { |
352 | | #if defined(SK_DEBUG) |
353 | | static uint64_t maxBufferRange = |
354 | 0 | sharedContext->caps()->storageBufferSupport() |
355 | 0 | ? sharedContext->vulkanCaps().maxStorageBufferRange() |
356 | 0 | : sharedContext->vulkanCaps().maxUniformBufferRange(); |
357 | 0 | SkASSERT(bindInfo.fSize <= maxBufferRange); |
358 | | #endif |
359 | 0 | VkDescriptorBufferInfo bufferInfo; |
360 | 0 | memset(&bufferInfo, 0, sizeof(VkDescriptorBufferInfo)); |
361 | 0 | auto vulkanBuffer = static_cast<const VulkanBuffer*>(bindInfo.fBuffer); |
362 | 0 | bufferInfo.buffer = vulkanBuffer->vkBuffer(); |
363 | 0 | bufferInfo.offset = 0; // We always use dynamic ubos so we set the base offset to 0 |
364 | 0 | bufferInfo.range = bindInfo.fSize; |
365 | |
|
366 | 0 | VkWriteDescriptorSet writeInfo; |
367 | 0 | memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet)); |
368 | 0 | writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; |
369 | 0 | writeInfo.pNext = nullptr; |
370 | 0 | writeInfo.dstSet = descSet; |
371 | 0 | writeInfo.dstBinding = descriptorBindingIndex; |
372 | 0 | writeInfo.dstArrayElement = 0; |
373 | 0 | writeInfo.descriptorCount = requestedDescriptors[i].fCount; |
374 | 0 | writeInfo.descriptorType = DsTypeEnumToVkDs(requestedDescriptors[i].fType); |
375 | 0 | writeInfo.pImageInfo = nullptr; |
376 | 0 | writeInfo.pBufferInfo = &bufferInfo; |
377 | 0 | writeInfo.pTexelBufferView = nullptr; |
378 | | |
379 | | // TODO(b/293925059): Migrate to updating all the uniform descriptors with one driver |
380 | | // call. Calling UpdateDescriptorSets once to encapsulate updates to all uniform |
381 | | // descriptors would be ideal, but that led to issues with draws where all the UBOs |
382 | | // within that set would unexpectedly be assigned the same offset. Updating them one at |
383 | | // a time within this loop works in the meantime but is suboptimal. |
384 | 0 | VULKAN_CALL(sharedContext->interface(), |
385 | 0 | UpdateDescriptorSets(sharedContext->device(), |
386 | 0 | /*descriptorWriteCount=*/1, |
387 | 0 | &writeInfo, |
388 | 0 | /*descriptorCopyCount=*/0, |
389 | 0 | /*pDescriptorCopies=*/nullptr)); |
390 | 0 | } |
391 | 0 | } |
392 | 0 | } Unexecuted instantiation: VulkanResourceProvider.cpp:skgpu::graphite::(anonymous namespace)::update_uniform_descriptor_set(SkSpan<skgpu::graphite::DescriptorData>, SkSpan<skgpu::graphite::BindBufferInfo>, VkDescriptorSet_T*, skgpu::graphite::VulkanSharedContext const*) Unexecuted instantiation: VulkanResourceProvider.cpp:skgpu::graphite::(anonymous namespace)::update_uniform_descriptor_set(SkSpan<skgpu::graphite::DescriptorData>, SkSpan<skgpu::graphite::BindBufferInfo>, VkDescriptorSet_T*, skgpu::graphite::VulkanSharedContext const*) |
393 | | |
394 | | } // anonymous namespace |
395 | | |
396 | | sk_sp<VulkanDescriptorSet> VulkanResourceProvider::findOrCreateUniformBuffersDescriptorSet( |
397 | | SkSpan<DescriptorData> requestedDescriptors, |
398 | 0 | SkSpan<BindBufferInfo> bindUniformBufferInfo) { |
399 | 0 | SkASSERT(requestedDescriptors.size() <= VulkanGraphicsPipeline::kNumUniformBuffers); |
400 | |
|
401 | 0 | auto key = make_ubo_bind_group_key(requestedDescriptors, bindUniformBufferInfo); |
402 | 0 | auto* existingDescSet = fUniformBufferDescSetCache.find(key); |
403 | 0 | if (existingDescSet) { |
404 | 0 | return *existingDescSet; |
405 | 0 | } |
406 | 0 | sk_sp<VulkanDescriptorSet> newDS = this->findOrCreateDescriptorSet(requestedDescriptors); |
407 | 0 | if (!newDS) { |
408 | 0 | return nullptr; |
409 | 0 | } |
410 | | |
411 | 0 | update_uniform_descriptor_set(requestedDescriptors, |
412 | 0 | bindUniformBufferInfo, |
413 | 0 | *newDS->descriptorSet(), |
414 | 0 | this->vulkanSharedContext()); |
415 | 0 | return *fUniformBufferDescSetCache.insert(key, newDS); |
416 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanResourceProvider::findOrCreateUniformBuffersDescriptorSet(SkSpan<skgpu::graphite::DescriptorData>, SkSpan<skgpu::graphite::BindBufferInfo>) Unexecuted instantiation: skgpu::graphite::VulkanResourceProvider::findOrCreateUniformBuffersDescriptorSet(SkSpan<skgpu::graphite::DescriptorData>, SkSpan<skgpu::graphite::BindBufferInfo>) |
417 | | |
418 | | |
419 | | sk_sp<VulkanRenderPass> VulkanResourceProvider::findOrCreateRenderPassWithKnownKey( |
420 | | const RenderPassDesc& renderPassDesc, |
421 | | bool compatibleOnly, |
422 | 0 | const GraphiteResourceKey& rpKey) { |
423 | 0 | if (Resource* resource = |
424 | 0 | fResourceCache->findAndRefResource(rpKey, skgpu::Budgeted::kYes)) { |
425 | 0 | return sk_sp<VulkanRenderPass>(static_cast<VulkanRenderPass*>(resource)); |
426 | 0 | } |
427 | | |
428 | 0 | sk_sp<VulkanRenderPass> renderPass = |
429 | 0 | VulkanRenderPass::MakeRenderPass(this->vulkanSharedContext(), |
430 | 0 | renderPassDesc, |
431 | 0 | compatibleOnly); |
432 | 0 | if (!renderPass) { |
433 | 0 | return nullptr; |
434 | 0 | } |
435 | | |
436 | 0 | renderPass->setKey(rpKey); |
437 | 0 | fResourceCache->insertResource(renderPass.get()); |
438 | |
|
439 | 0 | return renderPass; |
440 | 0 | } |
441 | | |
442 | | sk_sp<VulkanRenderPass> VulkanResourceProvider::findOrCreateRenderPass( |
443 | 0 | const RenderPassDesc& renderPassDesc, bool compatibleOnly) { |
444 | 0 | GraphiteResourceKey rpKey = VulkanRenderPass::MakeRenderPassKey(renderPassDesc, compatibleOnly); |
445 | |
|
446 | 0 | return this->findOrCreateRenderPassWithKnownKey(renderPassDesc, compatibleOnly, rpKey); |
447 | 0 | } |
448 | | |
449 | 0 | VkPipelineCache VulkanResourceProvider::pipelineCache() { |
450 | 0 | if (fPipelineCache == VK_NULL_HANDLE) { |
451 | 0 | VkPipelineCacheCreateInfo createInfo; |
452 | 0 | memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo)); |
453 | 0 | createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; |
454 | 0 | createInfo.pNext = nullptr; |
455 | 0 | createInfo.flags = 0; |
456 | 0 | createInfo.initialDataSize = 0; |
457 | 0 | createInfo.pInitialData = nullptr; |
458 | 0 | VkResult result; |
459 | 0 | VULKAN_CALL_RESULT(this->vulkanSharedContext(), |
460 | 0 | result, |
461 | 0 | CreatePipelineCache(this->vulkanSharedContext()->device(), |
462 | 0 | &createInfo, |
463 | 0 | nullptr, |
464 | 0 | &fPipelineCache)); |
465 | 0 | if (VK_SUCCESS != result) { |
466 | 0 | fPipelineCache = VK_NULL_HANDLE; |
467 | 0 | } |
468 | 0 | } |
469 | 0 | return fPipelineCache; |
470 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanResourceProvider::pipelineCache() Unexecuted instantiation: skgpu::graphite::VulkanResourceProvider::pipelineCache() |
471 | | |
472 | | sk_sp<VulkanFramebuffer> VulkanResourceProvider::createFramebuffer( |
473 | | const VulkanSharedContext* context, |
474 | | const skia_private::TArray<VkImageView>& attachmentViews, |
475 | | const VulkanRenderPass& renderPass, |
476 | | const int width, |
477 | 0 | const int height) { |
478 | | // TODO: Consider caching these in the future. If we pursue that, it may make more sense to |
479 | | // use a compatible renderpass rather than a full one to make each frame buffer more versatile. |
480 | 0 | VkFramebufferCreateInfo framebufferInfo; |
481 | 0 | memset(&framebufferInfo, 0, sizeof(VkFramebufferCreateInfo)); |
482 | 0 | framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; |
483 | 0 | framebufferInfo.pNext = nullptr; |
484 | 0 | framebufferInfo.flags = 0; |
485 | 0 | framebufferInfo.renderPass = renderPass.renderPass(); |
486 | 0 | framebufferInfo.attachmentCount = attachmentViews.size(); |
487 | 0 | framebufferInfo.pAttachments = attachmentViews.begin(); |
488 | 0 | framebufferInfo.width = width; |
489 | 0 | framebufferInfo.height = height; |
490 | 0 | framebufferInfo.layers = 1; |
491 | 0 | return VulkanFramebuffer::Make(context, framebufferInfo); |
492 | 0 | } |
493 | | |
494 | 0 | void VulkanResourceProvider::onDeleteBackendTexture(const BackendTexture& texture) { |
495 | 0 | SkASSERT(texture.isValid()); |
496 | 0 | SkASSERT(texture.backend() == BackendApi::kVulkan); |
497 | |
|
498 | 0 | VULKAN_CALL(this->vulkanSharedContext()->interface(), |
499 | 0 | DestroyImage(this->vulkanSharedContext()->device(), |
500 | 0 | BackendTextures::GetVkImage(texture), |
501 | 0 | /*VkAllocationCallbacks=*/nullptr)); |
502 | |
|
503 | 0 | VulkanAlloc alloc = BackendTextures::GetMemoryAlloc(texture); |
504 | | // Free the image memory used for the BackendTexture's VkImage. |
505 | | // |
506 | | // How we do this is dependent upon on how the image was allocated (via the memory allocator or |
507 | | // with a direct call to the Vulkan driver) . If the VulkanAlloc's fBackendMemory is != 0, then |
508 | | // that means the allocator was used. Otherwise, a direct driver call was used and we should |
509 | | // free the VkDeviceMemory (fMemory). |
510 | 0 | if (alloc.fBackendMemory) { |
511 | 0 | skgpu::VulkanMemory::FreeImageMemory(this->vulkanSharedContext()->memoryAllocator(), alloc); |
512 | 0 | } else { |
513 | 0 | SkASSERT(alloc.fMemory != VK_NULL_HANDLE); |
514 | 0 | VULKAN_CALL(this->vulkanSharedContext()->interface(), |
515 | 0 | FreeMemory(this->vulkanSharedContext()->device(), alloc.fMemory, nullptr)); |
516 | 0 | } |
517 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanResourceProvider::onDeleteBackendTexture(skgpu::graphite::BackendTexture const&) Unexecuted instantiation: skgpu::graphite::VulkanResourceProvider::onDeleteBackendTexture(skgpu::graphite::BackendTexture const&) |
518 | | |
519 | | sk_sp<VulkanYcbcrConversion> VulkanResourceProvider::findOrCreateCompatibleYcbcrConversion( |
520 | 0 | const VulkanYcbcrConversionInfo& ycbcrInfo) const { |
521 | 0 | if (!ycbcrInfo.isValid()) { |
522 | 0 | return nullptr; |
523 | 0 | } |
524 | 0 | GraphiteResourceKey ycbcrConversionKey = |
525 | 0 | VulkanYcbcrConversion::MakeYcbcrConversionKey(this->vulkanSharedContext(), ycbcrInfo); |
526 | |
|
527 | 0 | if (Resource* resource = fResourceCache->findAndRefResource(ycbcrConversionKey, |
528 | 0 | skgpu::Budgeted::kYes)) { |
529 | 0 | return sk_sp<VulkanYcbcrConversion>(static_cast<VulkanYcbcrConversion*>(resource)); |
530 | 0 | } |
531 | | |
532 | 0 | auto ycbcrConversion = VulkanYcbcrConversion::Make(this->vulkanSharedContext(), ycbcrInfo); |
533 | 0 | if (!ycbcrConversion) { |
534 | 0 | return nullptr; |
535 | 0 | } |
536 | | |
537 | 0 | ycbcrConversion->setKey(ycbcrConversionKey); |
538 | 0 | fResourceCache->insertResource(ycbcrConversion.get()); |
539 | |
|
540 | 0 | return ycbcrConversion; |
541 | 0 | } |
542 | | |
543 | | sk_sp<VulkanGraphicsPipeline> VulkanResourceProvider::findOrCreateLoadMSAAPipeline( |
544 | 0 | const RenderPassDesc& renderPassDesc) { |
545 | |
|
546 | 0 | if (!renderPassDesc.fColorResolveAttachment.fTextureInfo.isValid() || |
547 | 0 | !renderPassDesc.fColorAttachment.fTextureInfo.isValid()) { |
548 | 0 | SKGPU_LOG_E("Loading MSAA from resolve texture requires valid color & resolve attachment"); |
549 | 0 | return nullptr; |
550 | 0 | } |
551 | | |
552 | | // Check to see if we already have a suitable pipeline that we can use. |
553 | 0 | GraphiteResourceKey renderPassKey = |
554 | 0 | VulkanRenderPass::MakeRenderPassKey(renderPassDesc, /*compatibleOnly=*/true); |
555 | 0 | for (int i = 0; i < fLoadMSAAPipelines.size(); i++) { |
556 | 0 | if (renderPassKey == fLoadMSAAPipelines.at(i).first) { |
557 | 0 | return fLoadMSAAPipelines.at(i).second; |
558 | 0 | } |
559 | 0 | } |
560 | | |
561 | | // If any of the load MSAA pipeline creation structures are null then we need to initialize |
562 | | // those before proceeding. If the creation of one of them fails, all are assigned to null, so |
563 | | // we only need to check one of the structures. |
564 | 0 | if (fMSAALoadVertShaderModule == VK_NULL_HANDLE) { |
565 | 0 | SkASSERT(fMSAALoadFragShaderModule == VK_NULL_HANDLE && |
566 | 0 | fMSAALoadPipelineLayout == VK_NULL_HANDLE); |
567 | 0 | if (!VulkanGraphicsPipeline::InitializeMSAALoadPipelineStructs( |
568 | 0 | this->vulkanSharedContext(), |
569 | 0 | &fMSAALoadVertShaderModule, |
570 | 0 | &fMSAALoadFragShaderModule, |
571 | 0 | &fMSAALoadShaderStageInfo[0], |
572 | 0 | &fMSAALoadPipelineLayout)) { |
573 | 0 | SKGPU_LOG_E("Failed to initialize MSAA load pipeline creation structure(s)"); |
574 | 0 | return nullptr; |
575 | 0 | } |
576 | 0 | } |
577 | | |
578 | 0 | sk_sp<VulkanRenderPass> compatibleRenderPass = |
579 | 0 | this->findOrCreateRenderPassWithKnownKey(renderPassDesc, |
580 | 0 | /*compatibleOnly=*/true, |
581 | 0 | renderPassKey); |
582 | 0 | if (!compatibleRenderPass) { |
583 | 0 | SKGPU_LOG_E("Failed to make compatible render pass for loading MSAA"); |
584 | 0 | } |
585 | |
|
586 | 0 | sk_sp<VulkanGraphicsPipeline> pipeline = VulkanGraphicsPipeline::MakeLoadMSAAPipeline( |
587 | 0 | this->vulkanSharedContext(), |
588 | 0 | fMSAALoadVertShaderModule, |
589 | 0 | fMSAALoadFragShaderModule, |
590 | 0 | &fMSAALoadShaderStageInfo[0], |
591 | 0 | fMSAALoadPipelineLayout, |
592 | 0 | compatibleRenderPass, |
593 | 0 | this->pipelineCache(), |
594 | 0 | renderPassDesc.fColorAttachment.fTextureInfo); |
595 | |
|
596 | 0 | if (!pipeline) { |
597 | 0 | SKGPU_LOG_E("Failed to create MSAA load pipeline"); |
598 | 0 | return nullptr; |
599 | 0 | } |
600 | | |
601 | 0 | fLoadMSAAPipelines.push_back(std::make_pair(renderPassKey, pipeline)); |
602 | 0 | return pipeline; |
603 | 0 | } Unexecuted instantiation: skgpu::graphite::VulkanResourceProvider::findOrCreateLoadMSAAPipeline(skgpu::graphite::RenderPassDesc const&) Unexecuted instantiation: skgpu::graphite::VulkanResourceProvider::findOrCreateLoadMSAAPipeline(skgpu::graphite::RenderPassDesc const&) |
604 | | |
605 | | #ifdef SK_BUILD_FOR_ANDROID |
606 | | |
607 | | BackendTexture VulkanResourceProvider::onCreateBackendTexture(AHardwareBuffer* hardwareBuffer, |
608 | | bool isRenderable, |
609 | | bool isProtectedContent, |
610 | | SkISize dimensions, |
611 | | bool fromAndroidWindow) const { |
612 | | |
613 | | const VulkanSharedContext* vkContext = this->vulkanSharedContext(); |
614 | | VkDevice device = vkContext->device(); |
615 | | const VulkanCaps& vkCaps = vkContext->vulkanCaps(); |
616 | | |
617 | | VkAndroidHardwareBufferFormatPropertiesANDROID hwbFormatProps; |
618 | | VkAndroidHardwareBufferPropertiesANDROID hwbProps; |
619 | | if (!skgpu::GetAHardwareBufferProperties( |
620 | | &hwbFormatProps, &hwbProps, vkContext->interface(), hardwareBuffer, device)) { |
621 | | return {}; |
622 | | } |
623 | | |
624 | | bool importAsExternalFormat = hwbFormatProps.format == VK_FORMAT_UNDEFINED; |
625 | | |
626 | | // Start to assemble VulkanTextureInfo which is needed later on to create the VkImage but can |
627 | | // sooner help us query VulkanCaps for certain format feature support. |
628 | | // TODO: Allow client to pass in tiling mode. For external formats, this is required to be |
629 | | // optimal. For AHB that have a known Vulkan format, we can query VulkanCaps to determine if |
630 | | // optimal is a valid decision given the format features. |
631 | | VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL; |
632 | | VkImageCreateFlags imgCreateflags = isProtectedContent ? VK_IMAGE_CREATE_PROTECTED_BIT : 0; |
633 | | VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT; |
634 | | // When importing as an external format the image usage can only be VK_IMAGE_USAGE_SAMPLED_BIT. |
635 | | if (!importAsExternalFormat) { |
636 | | usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; |
637 | | if (isRenderable) { |
638 | | // Renderable attachments can be used as input attachments if we are loading from MSAA. |
639 | | usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; |
640 | | } |
641 | | } |
642 | | VulkanTextureInfo vkTexInfo { VK_SAMPLE_COUNT_1_BIT, |
643 | | Mipmapped::kNo, |
644 | | imgCreateflags, |
645 | | hwbFormatProps.format, |
646 | | tiling, |
647 | | usageFlags, |
648 | | VK_SHARING_MODE_EXCLUSIVE, |
649 | | VK_IMAGE_ASPECT_COLOR_BIT, |
650 | | VulkanYcbcrConversionInfo() }; |
651 | | |
652 | | if (isRenderable && (importAsExternalFormat || !vkCaps.isRenderable(vkTexInfo))) { |
653 | | SKGPU_LOG_W("Renderable texture requested from an AHardwareBuffer which uses a VkFormat " |
654 | | "that Skia cannot render to (VkFormat: %d).\n", hwbFormatProps.format); |
655 | | return {}; |
656 | | } |
657 | | |
658 | | if (!importAsExternalFormat && (!vkCaps.isTransferSrc(vkTexInfo) || |
659 | | !vkCaps.isTransferDst(vkTexInfo) || |
660 | | !vkCaps.isTexturable(vkTexInfo))) { |
661 | | if (isRenderable) { |
662 | | SKGPU_LOG_W("VkFormat %d is either unfamiliar to Skia or doesn't support the necessary" |
663 | | " format features. Because a renerable texture was requested, we cannot " |
664 | | "fall back to importing with an external format.\n", hwbFormatProps.format); |
665 | | return {}; |
666 | | } |
667 | | // If the VkFormat does not support the features we need, then import as an external format. |
668 | | importAsExternalFormat = true; |
669 | | // If we use VkExternalFormatANDROID with an externalFormat != 0, then format must = |
670 | | // VK_FORMAT_UNDEFINED. |
671 | | vkTexInfo.fFormat = VK_FORMAT_UNDEFINED; |
672 | | vkTexInfo.fImageUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT; |
673 | | } |
674 | | |
675 | | VulkanYcbcrConversionInfo ycbcrInfo; |
676 | | VkExternalFormatANDROID externalFormat; |
677 | | externalFormat.sType = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID; |
678 | | externalFormat.pNext = nullptr; |
679 | | externalFormat.externalFormat = 0; // If this is zero it is as if we aren't using this struct. |
680 | | if (importAsExternalFormat) { |
681 | | GetYcbcrConversionInfoFromFormatProps(&ycbcrInfo, hwbFormatProps); |
682 | | if (!ycbcrInfo.isValid()) { |
683 | | SKGPU_LOG_W("Failed to create valid YCbCr conversion information from hardware buffer" |
684 | | "format properties.\n"); |
685 | | return {}; |
686 | | } |
687 | | vkTexInfo.fYcbcrConversionInfo = ycbcrInfo; |
688 | | externalFormat.externalFormat = hwbFormatProps.externalFormat; |
689 | | } |
690 | | const VkExternalMemoryImageCreateInfo externalMemoryImageInfo{ |
691 | | VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, // sType |
692 | | &externalFormat, // pNext |
693 | | VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID, // handleTypes |
694 | | }; |
695 | | |
696 | | SkASSERT(!(vkTexInfo.fFlags & VK_IMAGE_CREATE_PROTECTED_BIT) || |
697 | | fSharedContext->isProtected() == Protected::kYes); |
698 | | |
699 | | const VkImageCreateInfo imageCreateInfo = { |
700 | | VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType |
701 | | &externalMemoryImageInfo, // pNext |
702 | | vkTexInfo.fFlags, // VkImageCreateFlags |
703 | | VK_IMAGE_TYPE_2D, // VkImageType |
704 | | vkTexInfo.fFormat, // VkFormat |
705 | | { (uint32_t)dimensions.fWidth, (uint32_t)dimensions.fHeight, 1 }, // VkExtent3D |
706 | | 1, // mipLevels |
707 | | 1, // arrayLayers |
708 | | VK_SAMPLE_COUNT_1_BIT, // samples |
709 | | vkTexInfo.fImageTiling, // VkImageTiling |
710 | | vkTexInfo.fImageUsageFlags, // VkImageUsageFlags |
711 | | vkTexInfo.fSharingMode, // VkSharingMode |
712 | | 0, // queueFamilyCount |
713 | | nullptr, // pQueueFamilyIndices |
714 | | VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout |
715 | | }; |
716 | | |
717 | | VkResult result; |
718 | | VkImage image; |
719 | | result = VULKAN_CALL(vkContext->interface(), |
720 | | CreateImage(device, &imageCreateInfo, nullptr, &image)); |
721 | | if (result != VK_SUCCESS) { |
722 | | return {}; |
723 | | } |
724 | | |
725 | | const VkPhysicalDeviceMemoryProperties2& phyDevMemProps = |
726 | | vkContext->vulkanCaps().physicalDeviceMemoryProperties2(); |
727 | | VulkanAlloc alloc; |
728 | | if (!AllocateAndBindImageMemory(&alloc, image, phyDevMemProps, hwbProps, hardwareBuffer, |
729 | | vkContext->interface(), device)) { |
730 | | VULKAN_CALL(vkContext->interface(), DestroyImage(device, image, nullptr)); |
731 | | return {}; |
732 | | } |
733 | | |
734 | | return BackendTextures::MakeVulkan(dimensions, |
735 | | vkTexInfo, |
736 | | VK_IMAGE_LAYOUT_UNDEFINED, |
737 | | VK_QUEUE_FAMILY_FOREIGN_EXT, |
738 | | image, |
739 | | alloc); |
740 | | } |
741 | | |
742 | | #endif // SK_BUILD_FOR_ANDROID |
743 | | |
744 | | } // namespace skgpu::graphite |