/src/skia/tools/gpu/vk/VkTestUtils.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2017 Google Inc. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license that can be |
5 | | * found in the LICENSE file. |
6 | | */ |
7 | | |
8 | | #include "src/gpu/vk/VulkanInterface.h" |
9 | | #include "tools/gpu/vk/VkTestMemoryAllocator.h" |
10 | | #include "tools/gpu/vk/VkTestUtils.h" |
11 | | |
12 | | #ifdef SK_VULKAN |
13 | | |
14 | | #ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME |
15 | | #if defined _WIN32 |
16 | | #define SK_GPU_TOOLS_VK_LIBRARY_NAME vulkan-1.dll |
17 | | #elif defined SK_BUILD_FOR_MAC |
18 | | #define SK_GPU_TOOLS_VK_LIBRARY_NAME libvk_swiftshader.dylib |
19 | | #else |
20 | | #define SK_GPU_TOOLS_VK_LIBRARY_NAME libvulkan.so |
21 | | #define SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP libvulkan.so.1 |
22 | | #endif |
23 | | #endif |
24 | | |
25 | 48 | #define STRINGIFY2(S) #S |
26 | 48 | #define STRINGIFY(S) STRINGIFY2(S) |
27 | | |
28 | | #include <algorithm> |
29 | | |
30 | | #if defined(__GLIBC__) |
31 | | #include <execinfo.h> |
32 | | #endif |
33 | | #include "include/gpu/vk/VulkanBackendContext.h" |
34 | | #include "include/gpu/vk/VulkanExtensions.h" |
35 | | #include "src/base/SkAutoMalloc.h" |
36 | | #include "tools/library/LoadDynamicLibrary.h" |
37 | | |
38 | | #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS) |
39 | | #include <sanitizer/lsan_interface.h> |
40 | | #endif |
41 | | |
42 | | using namespace skia_private; |
43 | | |
44 | | namespace sk_gpu_test { |
45 | | |
46 | 24 | bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc) { |
47 | 24 | static void* vkLib = nullptr; |
48 | 24 | static PFN_vkGetInstanceProcAddr localInstProc = nullptr; |
49 | 24 | if (!vkLib) { |
50 | 24 | vkLib = SkLoadDynamicLibrary(STRINGIFY(SK_GPU_TOOLS_VK_LIBRARY_NAME)); |
51 | 24 | if (!vkLib) { |
52 | | // vulkaninfo tries to load the library from two places, so we do as well |
53 | | // https://github.com/KhronosGroup/Vulkan-Tools/blob/078d44e4664b7efa0b6c96ebced1995c4425d57a/vulkaninfo/vulkaninfo.h#L249 |
54 | 24 | #ifdef SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP |
55 | 24 | vkLib = SkLoadDynamicLibrary(STRINGIFY(SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP)); |
56 | 24 | if (!vkLib) { |
57 | 24 | return false; |
58 | 24 | } |
59 | | #else |
60 | | return false; |
61 | | #endif |
62 | 24 | } |
63 | 0 | localInstProc = (PFN_vkGetInstanceProcAddr) SkGetProcedureAddress(vkLib, |
64 | 0 | "vkGetInstanceProcAddr"); |
65 | 0 | } |
66 | 0 | if (!localInstProc) { |
67 | 0 | return false; |
68 | 0 | } |
69 | 0 | *instProc = localInstProc; |
70 | 0 | return true; |
71 | 0 | } |
72 | | |
73 | | //////////////////////////////////////////////////////////////////////////////// |
74 | | // Helper code to set up Vulkan context objects |
75 | | |
76 | | #ifdef SK_ENABLE_VK_LAYERS |
77 | | const char* kDebugLayerNames[] = { |
78 | | // single merged layer |
79 | | "VK_LAYER_KHRONOS_validation", |
80 | | // not included in standard_validation |
81 | | //"VK_LAYER_LUNARG_api_dump", |
82 | | //"VK_LAYER_LUNARG_vktrace", |
83 | | //"VK_LAYER_LUNARG_screenshot", |
84 | | }; |
85 | | |
86 | | static uint32_t remove_patch_version(uint32_t specVersion) { |
87 | | return (specVersion >> 12) << 12; |
88 | | } |
89 | | |
90 | | // Returns the index into layers array for the layer we want. Returns -1 if not supported. |
91 | | static int should_include_debug_layer(const char* layerName, |
92 | | uint32_t layerCount, VkLayerProperties* layers, |
93 | | uint32_t version) { |
94 | | for (uint32_t i = 0; i < layerCount; ++i) { |
95 | | if (!strcmp(layerName, layers[i].layerName)) { |
96 | | // Since the layers intercept the vulkan calls and forward them on, we need to make sure |
97 | | // layer was written against a version that isn't older than the version of Vulkan we're |
98 | | // using so that it has all the api entry points. |
99 | | if (version <= remove_patch_version(layers[i].specVersion)) { |
100 | | return i; |
101 | | } |
102 | | return -1; |
103 | | } |
104 | | |
105 | | } |
106 | | return -1; |
107 | | } |
108 | | |
109 | | static void print_backtrace() { |
110 | | #if defined(__GLIBC__) |
111 | | void* stack[64]; |
112 | | int count = backtrace(stack, std::size(stack)); |
113 | | backtrace_symbols_fd(stack, count, 2); |
114 | | #else |
115 | | // Please add implementations for other platforms. |
116 | | #endif |
117 | | } |
118 | | |
119 | | VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback( |
120 | | VkDebugReportFlagsEXT flags, |
121 | | VkDebugReportObjectTypeEXT objectType, |
122 | | uint64_t object, |
123 | | size_t location, |
124 | | int32_t messageCode, |
125 | | const char* pLayerPrefix, |
126 | | const char* pMessage, |
127 | | void* pUserData) { |
128 | | if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) { |
129 | | // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/1887 |
130 | | if (strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521") || |
131 | | strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522")) { |
132 | | return VK_FALSE; |
133 | | } |
134 | | // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2171 |
135 | | if (strstr(pMessage, "VUID-vkCmdDraw-None-02686") || |
136 | | strstr(pMessage, "VUID-vkCmdDrawIndexed-None-02686")) { |
137 | | return VK_FALSE; |
138 | | } |
139 | | SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage); |
140 | | print_backtrace(); |
141 | | SkDEBUGFAIL("Vulkan debug layer error"); |
142 | | return VK_TRUE; // skip further layers |
143 | | } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) { |
144 | | SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage); |
145 | | print_backtrace(); |
146 | | } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) { |
147 | | SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage); |
148 | | print_backtrace(); |
149 | | } else { |
150 | | SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage); |
151 | | } |
152 | | return VK_FALSE; |
153 | | } |
154 | | #endif |
155 | | |
156 | | #define ACQUIRE_VK_INST_PROC_LOCAL(name, instance) \ |
157 | 0 | PFN_vk##name grVk##name = \ |
158 | 0 | reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name)); \ |
159 | 0 | do { \ |
160 | 0 | if (grVk##name == nullptr) { \ |
161 | 0 | SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \ |
162 | 0 | return false; \ |
163 | 0 | } \ |
164 | 0 | } while (0) |
165 | | |
166 | | // Returns the index into layers array for the layer we want. Returns -1 if not supported. |
167 | 0 | static bool should_include_extension(const char* extensionName) { |
168 | 0 | const char* kValidExtensions[] = { |
169 | | // single merged layer |
170 | 0 | VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, |
171 | 0 | VK_EXT_DEVICE_FAULT_EXTENSION_NAME, |
172 | 0 | VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME, |
173 | 0 | VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME, |
174 | 0 | VK_EXT_RGBA10X6_FORMATS_EXTENSION_NAME, |
175 | 0 | VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME, |
176 | 0 | VK_KHR_BIND_MEMORY_2_EXTENSION_NAME, |
177 | 0 | VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME, |
178 | 0 | VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME, |
179 | 0 | VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME, |
180 | 0 | VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, |
181 | 0 | VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, |
182 | 0 | VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME, |
183 | 0 | VK_KHR_MAINTENANCE1_EXTENSION_NAME, |
184 | 0 | VK_KHR_MAINTENANCE2_EXTENSION_NAME, |
185 | 0 | VK_KHR_MAINTENANCE3_EXTENSION_NAME, |
186 | 0 | VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, |
187 | 0 | VK_KHR_SURFACE_EXTENSION_NAME, |
188 | 0 | VK_KHR_SWAPCHAIN_EXTENSION_NAME, |
189 | | // Below are all platform specific extensions. The name macros like we use above are |
190 | | // all defined in platform specific vulkan headers. We currently don't include these |
191 | | // headers as they are a little bit of a pain (e.g. windows headers requires including |
192 | | // <windows.h> which causes all sorts of fun annoyances/problems. So instead we are |
193 | | // just listing the strings these macros are defined to. This really shouldn't cause |
194 | | // any long term issues as the chances of the strings connected to the name macros |
195 | | // changing is next to zero. |
196 | 0 | "VK_KHR_win32_surface", // VK_KHR_WIN32_SURFACE_EXTENSION_NAME |
197 | 0 | "VK_KHR_xcb_surface", // VK_KHR_XCB_SURFACE_EXTENSION_NAME, |
198 | 0 | "VK_ANDROID_external_memory_android_hardware_buffer", |
199 | | // VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME, |
200 | 0 | "VK_KHR_android_surface", // VK_KHR_ANDROID_SURFACE_EXTENSION_NAME, |
201 | 0 | }; |
202 | |
|
203 | 0 | for (size_t i = 0; i < std::size(kValidExtensions); ++i) { |
204 | 0 | if (!strcmp(extensionName, kValidExtensions[i])) { |
205 | 0 | return true; |
206 | 0 | } |
207 | 0 | } |
208 | 0 | return false; |
209 | 0 | } |
210 | | |
211 | | static bool init_instance_extensions_and_layers(PFN_vkGetInstanceProcAddr getInstProc, |
212 | | uint32_t specVersion, |
213 | | TArray<VkExtensionProperties>* instanceExtensions, |
214 | 0 | TArray<VkLayerProperties>* instanceLayers) { |
215 | 0 | if (getInstProc == nullptr) { |
216 | 0 | return false; |
217 | 0 | } |
218 | | |
219 | 0 | ACQUIRE_VK_INST_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE); |
220 | 0 | ACQUIRE_VK_INST_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE); |
221 | | |
222 | 0 | VkResult res; |
223 | 0 | uint32_t layerCount = 0; |
224 | | #ifdef SK_ENABLE_VK_LAYERS |
225 | | // instance layers |
226 | | res = grVkEnumerateInstanceLayerProperties(&layerCount, nullptr); |
227 | | if (VK_SUCCESS != res) { |
228 | | return false; |
229 | | } |
230 | | VkLayerProperties* layers = new VkLayerProperties[layerCount]; |
231 | | res = grVkEnumerateInstanceLayerProperties(&layerCount, layers); |
232 | | if (VK_SUCCESS != res) { |
233 | | delete[] layers; |
234 | | return false; |
235 | | } |
236 | | |
237 | | uint32_t nonPatchVersion = remove_patch_version(specVersion); |
238 | | for (size_t i = 0; i < std::size(kDebugLayerNames); ++i) { |
239 | | int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers, |
240 | | nonPatchVersion); |
241 | | if (idx != -1) { |
242 | | instanceLayers->push_back() = layers[idx]; |
243 | | } |
244 | | } |
245 | | delete[] layers; |
246 | | #endif |
247 | | |
248 | | // instance extensions |
249 | | // via Vulkan implementation and implicitly enabled layers |
250 | 0 | { |
251 | 0 | uint32_t extensionCount = 0; |
252 | 0 | res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr); |
253 | 0 | if (VK_SUCCESS != res) { |
254 | 0 | return false; |
255 | 0 | } |
256 | 0 | VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount]; |
257 | 0 | res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions); |
258 | 0 | if (VK_SUCCESS != res) { |
259 | 0 | delete[] extensions; |
260 | 0 | return false; |
261 | 0 | } |
262 | 0 | for (uint32_t i = 0; i < extensionCount; ++i) { |
263 | 0 | if (should_include_extension(extensions[i].extensionName)) { |
264 | 0 | instanceExtensions->push_back() = extensions[i]; |
265 | 0 | } |
266 | 0 | } |
267 | 0 | delete [] extensions; |
268 | 0 | } |
269 | | |
270 | | // via explicitly enabled layers |
271 | 0 | layerCount = instanceLayers->size(); |
272 | 0 | for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) { |
273 | 0 | uint32_t extensionCount = 0; |
274 | 0 | res = grVkEnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName, |
275 | 0 | &extensionCount, nullptr); |
276 | 0 | if (VK_SUCCESS != res) { |
277 | 0 | return false; |
278 | 0 | } |
279 | 0 | VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount]; |
280 | 0 | res = grVkEnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName, |
281 | 0 | &extensionCount, extensions); |
282 | 0 | if (VK_SUCCESS != res) { |
283 | 0 | delete[] extensions; |
284 | 0 | return false; |
285 | 0 | } |
286 | 0 | for (uint32_t i = 0; i < extensionCount; ++i) { |
287 | 0 | if (should_include_extension(extensions[i].extensionName)) { |
288 | 0 | instanceExtensions->push_back() = extensions[i]; |
289 | 0 | } |
290 | 0 | } |
291 | 0 | delete[] extensions; |
292 | 0 | } |
293 | | |
294 | 0 | return true; |
295 | 0 | } |
296 | | |
297 | 0 | #define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device) |
298 | | |
299 | | static bool init_device_extensions_and_layers(const skgpu::VulkanGetProc& getProc, |
300 | | uint32_t specVersion, VkInstance inst, |
301 | | VkPhysicalDevice physDev, |
302 | | TArray<VkExtensionProperties>* deviceExtensions, |
303 | 0 | TArray<VkLayerProperties>* deviceLayers) { |
304 | 0 | if (getProc == nullptr) { |
305 | 0 | return false; |
306 | 0 | } |
307 | | |
308 | 0 | GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE); |
309 | 0 | GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE); |
310 | |
|
311 | 0 | if (!EnumerateDeviceExtensionProperties || |
312 | 0 | !EnumerateDeviceLayerProperties) { |
313 | 0 | return false; |
314 | 0 | } |
315 | | |
316 | 0 | VkResult res; |
317 | | // device layers |
318 | 0 | uint32_t layerCount = 0; |
319 | | #ifdef SK_ENABLE_VK_LAYERS |
320 | | res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr); |
321 | | if (VK_SUCCESS != res) { |
322 | | return false; |
323 | | } |
324 | | VkLayerProperties* layers = new VkLayerProperties[layerCount]; |
325 | | res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers); |
326 | | if (VK_SUCCESS != res) { |
327 | | delete[] layers; |
328 | | return false; |
329 | | } |
330 | | |
331 | | uint32_t nonPatchVersion = remove_patch_version(specVersion); |
332 | | for (size_t i = 0; i < std::size(kDebugLayerNames); ++i) { |
333 | | int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers, |
334 | | nonPatchVersion); |
335 | | if (idx != -1) { |
336 | | deviceLayers->push_back() = layers[idx]; |
337 | | } |
338 | | } |
339 | | delete[] layers; |
340 | | #endif |
341 | | |
342 | | // device extensions |
343 | | // via Vulkan implementation and implicitly enabled layers |
344 | 0 | { |
345 | 0 | uint32_t extensionCount = 0; |
346 | 0 | res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr); |
347 | 0 | if (VK_SUCCESS != res) { |
348 | 0 | return false; |
349 | 0 | } |
350 | 0 | VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount]; |
351 | 0 | res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions); |
352 | 0 | if (VK_SUCCESS != res) { |
353 | 0 | delete[] extensions; |
354 | 0 | return false; |
355 | 0 | } |
356 | 0 | for (uint32_t i = 0; i < extensionCount; ++i) { |
357 | 0 | if (should_include_extension(extensions[i].extensionName)) { |
358 | 0 | deviceExtensions->push_back() = extensions[i]; |
359 | 0 | } |
360 | 0 | } |
361 | 0 | delete[] extensions; |
362 | 0 | } |
363 | | |
364 | | // via explicitly enabled layers |
365 | 0 | layerCount = deviceLayers->size(); |
366 | 0 | for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) { |
367 | 0 | uint32_t extensionCount = 0; |
368 | 0 | res = EnumerateDeviceExtensionProperties(physDev, |
369 | 0 | (*deviceLayers)[layerIndex].layerName, |
370 | 0 | &extensionCount, nullptr); |
371 | 0 | if (VK_SUCCESS != res) { |
372 | 0 | return false; |
373 | 0 | } |
374 | 0 | VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount]; |
375 | 0 | res = EnumerateDeviceExtensionProperties(physDev, |
376 | 0 | (*deviceLayers)[layerIndex].layerName, |
377 | 0 | &extensionCount, extensions); |
378 | 0 | if (VK_SUCCESS != res) { |
379 | 0 | delete[] extensions; |
380 | 0 | return false; |
381 | 0 | } |
382 | 0 | for (uint32_t i = 0; i < extensionCount; ++i) { |
383 | 0 | if (should_include_extension(extensions[i].extensionName)) { |
384 | 0 | deviceExtensions->push_back() = extensions[i]; |
385 | 0 | } |
386 | 0 | } |
387 | 0 | delete[] extensions; |
388 | 0 | } |
389 | | |
390 | 0 | return true; |
391 | 0 | } |
392 | | |
393 | | #define ACQUIRE_VK_INST_PROC_NOCHECK(name, instance) \ |
394 | 0 | PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name)) |
395 | | |
396 | | #define ACQUIRE_VK_INST_PROC(name, instance) \ |
397 | 0 | PFN_vk##name grVk##name = \ |
398 | 0 | reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name)); \ |
399 | 0 | do { \ |
400 | 0 | if (grVk##name == nullptr) { \ |
401 | 0 | SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \ |
402 | 0 | if (inst != VK_NULL_HANDLE) { \ |
403 | 0 | destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); \ |
404 | 0 | } \ |
405 | 0 | return false; \ |
406 | 0 | } \ |
407 | 0 | } while (0) |
408 | | |
409 | | #define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \ |
410 | | PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)) |
411 | | |
412 | | #define ACQUIRE_VK_PROC(name, instance, device) \ |
413 | 0 | PFN_vk##name grVk##name = \ |
414 | 0 | reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \ |
415 | 0 | do { \ |
416 | 0 | if (grVk##name == nullptr) { \ |
417 | 0 | SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \ |
418 | 0 | if (inst != VK_NULL_HANDLE) { \ |
419 | 0 | destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); \ |
420 | 0 | } \ |
421 | 0 | return false; \ |
422 | 0 | } \ |
423 | 0 | } while (0) |
424 | | |
425 | | #define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \ |
426 | 0 | PFN_vk##name grVk##name = \ |
427 | 0 | reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \ |
428 | 0 | do { \ |
429 | 0 | if (grVk##name == nullptr) { \ |
430 | 0 | SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \ |
431 | 0 | return false; \ |
432 | 0 | } \ |
433 | 0 | } while (0) |
434 | | |
435 | | static bool destroy_instance(PFN_vkGetInstanceProcAddr getInstProc, VkInstance inst, |
436 | | VkDebugReportCallbackEXT* debugCallback, |
437 | 0 | bool hasDebugExtension) { |
438 | 0 | if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) { |
439 | 0 | ACQUIRE_VK_INST_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst); |
440 | 0 | grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr); |
441 | 0 | *debugCallback = VK_NULL_HANDLE; |
442 | 0 | } |
443 | 0 | ACQUIRE_VK_INST_PROC_LOCAL(DestroyInstance, inst); |
444 | 0 | grVkDestroyInstance(inst, nullptr); |
445 | 0 | return true; |
446 | 0 | } |
447 | | |
448 | | static bool setup_features(const skgpu::VulkanGetProc& getProc, VkInstance inst, |
449 | | VkPhysicalDevice physDev, uint32_t physDeviceVersion, |
450 | | skgpu::VulkanExtensions* extensions, VkPhysicalDeviceFeatures2* features, |
451 | 0 | bool isProtected) { |
452 | 0 | SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) || |
453 | 0 | extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)); |
454 | | |
455 | | // Setup all extension feature structs we may want to use. |
456 | 0 | void** tailPNext = &features->pNext; |
457 | | |
458 | | // If |isProtected| is given, attach that first |
459 | 0 | VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr; |
460 | 0 | if (isProtected) { |
461 | 0 | SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)); |
462 | 0 | protectedMemoryFeatures = |
463 | 0 | (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw( |
464 | 0 | sizeof(VkPhysicalDeviceProtectedMemoryFeatures)); |
465 | 0 | protectedMemoryFeatures->sType = |
466 | 0 | VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES; |
467 | 0 | protectedMemoryFeatures->pNext = nullptr; |
468 | 0 | *tailPNext = protectedMemoryFeatures; |
469 | 0 | tailPNext = &protectedMemoryFeatures->pNext; |
470 | 0 | } |
471 | |
|
472 | 0 | VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr; |
473 | 0 | if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) { |
474 | 0 | blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw( |
475 | 0 | sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT)); |
476 | 0 | blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT; |
477 | 0 | blend->pNext = nullptr; |
478 | 0 | *tailPNext = blend; |
479 | 0 | tailPNext = &blend->pNext; |
480 | 0 | } |
481 | |
|
482 | 0 | VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr; |
483 | 0 | if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) || |
484 | 0 | extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) { |
485 | 0 | ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw( |
486 | 0 | sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures)); |
487 | 0 | ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES; |
488 | 0 | ycbcrFeature->pNext = nullptr; |
489 | 0 | ycbcrFeature->samplerYcbcrConversion = VK_TRUE; |
490 | 0 | *tailPNext = ycbcrFeature; |
491 | 0 | tailPNext = &ycbcrFeature->pNext; |
492 | 0 | } |
493 | |
|
494 | 0 | if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) { |
495 | 0 | ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE); |
496 | 0 | grVkGetPhysicalDeviceFeatures2(physDev, features); |
497 | 0 | } else { |
498 | 0 | SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, |
499 | 0 | 1)); |
500 | 0 | ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE); |
501 | 0 | grVkGetPhysicalDeviceFeatures2KHR(physDev, features); |
502 | 0 | } |
503 | | |
504 | 0 | if (isProtected) { |
505 | 0 | if (!protectedMemoryFeatures->protectedMemory) { |
506 | 0 | return false; |
507 | 0 | } |
508 | 0 | } |
509 | 0 | return true; |
510 | | // If we want to disable any extension features do so here. |
511 | 0 | } Unexecuted instantiation: VkTestUtils.cpp:sk_gpu_test::setup_features(std::__1::function<void (*(char const*, VkInstance_T*, VkDevice_T*))()> const&, VkInstance_T*, VkPhysicalDevice_T*, unsigned int, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, bool) Unexecuted instantiation: VkTestUtils.cpp:sk_gpu_test::setup_features(std::__1::function<void (*(char const*, VkInstance_T*, VkDevice_T*))()> const&, VkInstance_T*, VkPhysicalDevice_T*, unsigned int, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, bool) |
512 | | |
513 | | bool CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc, |
514 | | skgpu::VulkanBackendContext* ctx, |
515 | | skgpu::VulkanExtensions* extensions, |
516 | | VkPhysicalDeviceFeatures2* features, |
517 | | VkDebugReportCallbackEXT* debugCallback, |
518 | | uint32_t* presentQueueIndexPtr, |
519 | | const CanPresentFn& canPresent, |
520 | 0 | bool isProtected) { |
521 | 0 | VkResult err; |
522 | |
|
523 | 0 | ACQUIRE_VK_INST_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE); |
524 | 0 | uint32_t instanceVersion = 0; |
525 | 0 | if (!grVkEnumerateInstanceVersion) { |
526 | 0 | instanceVersion = VK_MAKE_VERSION(1, 0, 0); |
527 | 0 | } else { |
528 | 0 | err = grVkEnumerateInstanceVersion(&instanceVersion); |
529 | 0 | if (err) { |
530 | 0 | SkDebugf("failed to enumerate instance version. Err: %d\n", err); |
531 | 0 | return false; |
532 | 0 | } |
533 | 0 | } |
534 | 0 | SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0)); |
535 | 0 | if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) { |
536 | 0 | SkDebugf("protected requires vk instance version 1.1\n"); |
537 | 0 | return false; |
538 | 0 | } |
539 | | |
540 | 0 | uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0); |
541 | 0 | if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) { |
542 | | // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the |
543 | | // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest |
544 | | // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1 |
545 | | // since that is the highest vulkan version. |
546 | 0 | apiVersion = VK_MAKE_VERSION(1, 1, 0); |
547 | 0 | } |
548 | |
|
549 | 0 | instanceVersion = std::min(instanceVersion, apiVersion); |
550 | |
|
551 | 0 | STArray<2, VkPhysicalDevice> physDevs; |
552 | 0 | VkDevice device; |
553 | 0 | VkInstance inst = VK_NULL_HANDLE; |
554 | |
|
555 | 0 | const VkApplicationInfo app_info = { |
556 | 0 | VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType |
557 | 0 | nullptr, // pNext |
558 | 0 | "vktest", // pApplicationName |
559 | 0 | 0, // applicationVersion |
560 | 0 | "vktest", // pEngineName |
561 | 0 | 0, // engineVerison |
562 | 0 | apiVersion, // apiVersion |
563 | 0 | }; |
564 | |
|
565 | 0 | TArray<VkLayerProperties> instanceLayers; |
566 | 0 | TArray<VkExtensionProperties> instanceExtensions; |
567 | |
|
568 | 0 | if (!init_instance_extensions_and_layers(getInstProc, instanceVersion, |
569 | 0 | &instanceExtensions, |
570 | 0 | &instanceLayers)) { |
571 | 0 | return false; |
572 | 0 | } |
573 | | |
574 | 0 | TArray<const char*> instanceLayerNames; |
575 | 0 | TArray<const char*> instanceExtensionNames; |
576 | 0 | for (int i = 0; i < instanceLayers.size(); ++i) { |
577 | 0 | instanceLayerNames.push_back(instanceLayers[i].layerName); |
578 | 0 | } |
579 | 0 | for (int i = 0; i < instanceExtensions.size(); ++i) { |
580 | 0 | instanceExtensionNames.push_back(instanceExtensions[i].extensionName); |
581 | 0 | } |
582 | |
|
583 | 0 | const VkInstanceCreateInfo instance_create = { |
584 | 0 | VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType |
585 | 0 | nullptr, // pNext |
586 | 0 | 0, // flags |
587 | 0 | &app_info, // pApplicationInfo |
588 | 0 | (uint32_t) instanceLayerNames.size(), // enabledLayerNameCount |
589 | 0 | instanceLayerNames.begin(), // ppEnabledLayerNames |
590 | 0 | (uint32_t) instanceExtensionNames.size(), // enabledExtensionNameCount |
591 | 0 | instanceExtensionNames.begin(), // ppEnabledExtensionNames |
592 | 0 | }; |
593 | |
|
594 | 0 | bool hasDebugExtension = false; |
595 | |
|
596 | 0 | ACQUIRE_VK_INST_PROC(CreateInstance, VK_NULL_HANDLE); |
597 | 0 | err = grVkCreateInstance(&instance_create, nullptr, &inst); |
598 | 0 | if (err < 0) { |
599 | 0 | SkDebugf("vkCreateInstance failed: %d\n", err); |
600 | 0 | return false; |
601 | 0 | } |
602 | | |
603 | 0 | ACQUIRE_VK_INST_PROC(GetDeviceProcAddr, inst); |
604 | 0 | auto getProc = [getInstProc, grVkGetDeviceProcAddr](const char* proc_name, |
605 | 0 | VkInstance instance, VkDevice device) { |
606 | 0 | if (device != VK_NULL_HANDLE) { |
607 | 0 | return grVkGetDeviceProcAddr(device, proc_name); |
608 | 0 | } |
609 | 0 | return getInstProc(instance, proc_name); |
610 | 0 | }; Unexecuted instantiation: VkTestUtils.cpp:sk_gpu_test::CreateVkBackendContext(void (*(*)(VkInstance_T*, char const*))(), skgpu::VulkanBackendContext*, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, VkDebugReportCallbackEXT_T**, unsigned int*, std::__1::function<bool (VkInstance_T*, VkPhysicalDevice_T*, unsigned int)> const&, bool)::$_0::operator()(char const*, VkInstance_T*, VkDevice_T*) const Unexecuted instantiation: VkTestUtils.cpp:sk_gpu_test::CreateVkBackendContext(void (*(*)(VkInstance_T*, char const*))(), skgpu::VulkanBackendContext*, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, VkDebugReportCallbackEXT_T**, unsigned int*, std::__1::function<bool (VkInstance_T*, VkPhysicalDevice_T*, unsigned int)> const&, bool)::$_1::operator()(char const*, VkInstance_T*, VkDevice_T*) const |
611 | |
|
612 | | #ifdef SK_ENABLE_VK_LAYERS |
613 | | *debugCallback = VK_NULL_HANDLE; |
614 | | for (int i = 0; i < instanceExtensionNames.size() && !hasDebugExtension; ++i) { |
615 | | if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) { |
616 | | hasDebugExtension = true; |
617 | | } |
618 | | } |
619 | | if (hasDebugExtension) { |
620 | | // Setup callback creation information |
621 | | VkDebugReportCallbackCreateInfoEXT callbackCreateInfo; |
622 | | callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT; |
623 | | callbackCreateInfo.pNext = nullptr; |
624 | | callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | |
625 | | VK_DEBUG_REPORT_WARNING_BIT_EXT | |
626 | | // VK_DEBUG_REPORT_INFORMATION_BIT_EXT | |
627 | | // VK_DEBUG_REPORT_DEBUG_BIT_EXT | |
628 | | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; |
629 | | callbackCreateInfo.pfnCallback = &DebugReportCallback; |
630 | | callbackCreateInfo.pUserData = nullptr; |
631 | | |
632 | | ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE); |
633 | | // Register the callback |
634 | | grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback); |
635 | | } |
636 | | #endif |
637 | |
|
638 | 0 | ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE); |
639 | 0 | ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE); |
640 | 0 | ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE); |
641 | 0 | ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE); |
642 | 0 | ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE); |
643 | 0 | ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE); |
644 | 0 | ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE); |
645 | 0 | ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE); |
646 | | |
647 | 0 | uint32_t gpuCount; |
648 | 0 | err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr); |
649 | 0 | if (err) { |
650 | 0 | SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); |
651 | 0 | destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); |
652 | 0 | return false; |
653 | 0 | } |
654 | 0 | if (!gpuCount) { |
655 | 0 | SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n"); |
656 | 0 | destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); |
657 | 0 | return false; |
658 | 0 | } |
659 | | // Allocate enough storage for all available physical devices. We should be able to just ask for |
660 | | // the first one, but a bug in RenderDoc (https://github.com/baldurk/renderdoc/issues/2766) |
661 | | // will smash the stack if we do that. |
662 | 0 | physDevs.resize(gpuCount); |
663 | 0 | err = grVkEnumeratePhysicalDevices(inst, &gpuCount, physDevs.data()); |
664 | 0 | if (err) { |
665 | 0 | SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); |
666 | 0 | destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); |
667 | 0 | return false; |
668 | 0 | } |
669 | | // We just use the first physical device. |
670 | | // TODO: find best match for our needs |
671 | 0 | VkPhysicalDevice physDev = physDevs.front(); |
672 | |
|
673 | 0 | VkPhysicalDeviceProperties physDeviceProperties; |
674 | 0 | grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties); |
675 | 0 | uint32_t physDeviceVersion = std::min(physDeviceProperties.apiVersion, apiVersion); |
676 | |
|
677 | 0 | if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) { |
678 | 0 | SkDebugf("protected requires vk physical device version 1.1\n"); |
679 | 0 | destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); |
680 | 0 | return false; |
681 | 0 | } |
682 | | |
683 | | // query to get the initial queue props size |
684 | 0 | uint32_t queueCount; |
685 | 0 | grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr); |
686 | 0 | if (!queueCount) { |
687 | 0 | SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n"); |
688 | 0 | destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); |
689 | 0 | return false; |
690 | 0 | } |
691 | | |
692 | 0 | SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties)); |
693 | | // now get the actual queue props |
694 | 0 | VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get(); |
695 | |
|
696 | 0 | grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps); |
697 | | |
698 | | // iterate to find the graphics queue |
699 | 0 | uint32_t graphicsQueueIndex = queueCount; |
700 | 0 | for (uint32_t i = 0; i < queueCount; i++) { |
701 | 0 | if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { |
702 | 0 | graphicsQueueIndex = i; |
703 | 0 | break; |
704 | 0 | } |
705 | 0 | } |
706 | 0 | if (graphicsQueueIndex == queueCount) { |
707 | 0 | SkDebugf("Could not find any supported graphics queues.\n"); |
708 | 0 | destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); |
709 | 0 | return false; |
710 | 0 | } |
711 | | |
712 | | // iterate to find the present queue, if needed |
713 | 0 | uint32_t presentQueueIndex = queueCount; |
714 | 0 | if (presentQueueIndexPtr && canPresent) { |
715 | 0 | for (uint32_t i = 0; i < queueCount; i++) { |
716 | 0 | if (canPresent(inst, physDev, i)) { |
717 | 0 | presentQueueIndex = i; |
718 | 0 | break; |
719 | 0 | } |
720 | 0 | } |
721 | 0 | if (presentQueueIndex == queueCount) { |
722 | 0 | SkDebugf("Could not find any supported present queues.\n"); |
723 | 0 | destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); |
724 | 0 | return false; |
725 | 0 | } |
726 | 0 | *presentQueueIndexPtr = presentQueueIndex; |
727 | 0 | } else { |
728 | | // Just setting this so we end up make a single queue for graphics since there was no |
729 | | // request for a present queue. |
730 | 0 | presentQueueIndex = graphicsQueueIndex; |
731 | 0 | } |
732 | | |
733 | 0 | TArray<VkLayerProperties> deviceLayers; |
734 | 0 | TArray<VkExtensionProperties> deviceExtensions; |
735 | 0 | if (!init_device_extensions_and_layers(getProc, physDeviceVersion, |
736 | 0 | inst, physDev, |
737 | 0 | &deviceExtensions, |
738 | 0 | &deviceLayers)) { |
739 | 0 | destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); |
740 | 0 | return false; |
741 | 0 | } |
742 | | |
743 | 0 | TArray<const char*> deviceLayerNames; |
744 | 0 | TArray<const char*> deviceExtensionNames; |
745 | 0 | for (int i = 0; i < deviceLayers.size(); ++i) { |
746 | 0 | deviceLayerNames.push_back(deviceLayers[i].layerName); |
747 | 0 | } |
748 | |
|
749 | 0 | for (int i = 0; i < deviceExtensions.size(); ++i) { |
750 | 0 | deviceExtensionNames.push_back(deviceExtensions[i].extensionName); |
751 | 0 | } |
752 | |
|
753 | 0 | extensions->init(getProc, inst, physDev, |
754 | 0 | (uint32_t) instanceExtensionNames.size(), |
755 | 0 | instanceExtensionNames.begin(), |
756 | 0 | (uint32_t) deviceExtensionNames.size(), |
757 | 0 | deviceExtensionNames.begin()); |
758 | |
|
759 | 0 | memset(features, 0, sizeof(VkPhysicalDeviceFeatures2)); |
760 | 0 | features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2; |
761 | 0 | features->pNext = nullptr; |
762 | |
|
763 | 0 | VkPhysicalDeviceFeatures* deviceFeatures = &features->features; |
764 | 0 | void* pointerToFeatures = nullptr; |
765 | 0 | if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) || |
766 | 0 | extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) { |
767 | 0 | if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features, |
768 | 0 | isProtected)) { |
769 | 0 | destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); |
770 | 0 | return false; |
771 | 0 | } |
772 | | |
773 | | // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct, |
774 | | // the device creation will use that instead of the ppEnabledFeatures. |
775 | 0 | pointerToFeatures = features; |
776 | 0 | } else { |
777 | 0 | grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures); |
778 | 0 | } |
779 | | |
780 | | // this looks like it would slow things down, |
781 | | // and we can't depend on it on all platforms |
782 | 0 | deviceFeatures->robustBufferAccess = VK_FALSE; |
783 | |
|
784 | 0 | VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0; |
785 | 0 | float queuePriorities[1] = { 0.0 }; |
786 | | // Here we assume no need for swapchain queue |
787 | | // If one is needed, the client will need its own setup code |
788 | 0 | const VkDeviceQueueCreateInfo queueInfo[2] = { |
789 | 0 | { |
790 | 0 | VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType |
791 | 0 | nullptr, // pNext |
792 | 0 | flags, // VkDeviceQueueCreateFlags |
793 | 0 | graphicsQueueIndex, // queueFamilyIndex |
794 | 0 | 1, // queueCount |
795 | 0 | queuePriorities, // pQueuePriorities |
796 | |
|
797 | 0 | }, |
798 | 0 | { |
799 | 0 | VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType |
800 | 0 | nullptr, // pNext |
801 | 0 | 0, // VkDeviceQueueCreateFlags |
802 | 0 | presentQueueIndex, // queueFamilyIndex |
803 | 0 | 1, // queueCount |
804 | 0 | queuePriorities, // pQueuePriorities |
805 | 0 | } |
806 | 0 | }; |
807 | 0 | uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1; |
808 | |
|
809 | 0 | const VkDeviceCreateInfo deviceInfo = { |
810 | 0 | VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType |
811 | 0 | pointerToFeatures, // pNext |
812 | 0 | 0, // VkDeviceCreateFlags |
813 | 0 | queueInfoCount, // queueCreateInfoCount |
814 | 0 | queueInfo, // pQueueCreateInfos |
815 | 0 | (uint32_t) deviceLayerNames.size(), // layerCount |
816 | 0 | deviceLayerNames.begin(), // ppEnabledLayerNames |
817 | 0 | (uint32_t) deviceExtensionNames.size(), // extensionCount |
818 | 0 | deviceExtensionNames.begin(), // ppEnabledExtensionNames |
819 | 0 | pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures |
820 | 0 | }; |
821 | |
|
822 | 0 | { |
823 | | #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS) |
824 | | // skia:8712 |
825 | | __lsan::ScopedDisabler lsanDisabler; |
826 | | #endif |
827 | 0 | err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device); |
828 | 0 | } |
829 | 0 | if (err) { |
830 | 0 | SkDebugf("CreateDevice failed: %d\n", err); |
831 | 0 | destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); |
832 | 0 | return false; |
833 | 0 | } |
834 | | |
835 | 0 | VkQueue queue; |
836 | 0 | if (isProtected) { |
837 | 0 | ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device); |
838 | 0 | SkASSERT(grVkGetDeviceQueue2 != nullptr); |
839 | 0 | VkDeviceQueueInfo2 queue_info2 = { |
840 | 0 | VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, // sType |
841 | 0 | nullptr, // pNext |
842 | 0 | VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT, // flags |
843 | 0 | graphicsQueueIndex, // queueFamilyIndex |
844 | 0 | 0 // queueIndex |
845 | 0 | }; |
846 | 0 | grVkGetDeviceQueue2(device, &queue_info2, &queue); |
847 | 0 | } else { |
848 | 0 | grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue); |
849 | 0 | } |
850 | | |
851 | 0 | skgpu::VulkanInterface interface = skgpu::VulkanInterface( |
852 | 0 | getProc, inst, device, instanceVersion, physDeviceVersion, extensions); |
853 | 0 | SkASSERT(interface.validate(instanceVersion, physDeviceVersion, extensions)); |
854 | |
|
855 | 0 | sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator = VkTestMemoryAllocator::Make( |
856 | 0 | inst, physDev, device, physDeviceVersion, extensions, &interface); |
857 | |
|
858 | 0 | ctx->fInstance = inst; |
859 | 0 | ctx->fPhysicalDevice = physDev; |
860 | 0 | ctx->fDevice = device; |
861 | 0 | ctx->fQueue = queue; |
862 | 0 | ctx->fGraphicsQueueIndex = graphicsQueueIndex; |
863 | 0 | ctx->fMaxAPIVersion = apiVersion; |
864 | 0 | ctx->fVkExtensions = extensions; |
865 | 0 | ctx->fDeviceFeatures2 = features; |
866 | 0 | ctx->fGetProc = getProc; |
867 | 0 | ctx->fProtectedContext = skgpu::Protected(isProtected); |
868 | 0 | ctx->fMemoryAllocator = memoryAllocator; |
869 | |
|
870 | 0 | return true; |
871 | 0 | } Unexecuted instantiation: sk_gpu_test::CreateVkBackendContext(void (*(*)(VkInstance_T*, char const*))(), skgpu::VulkanBackendContext*, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, VkDebugReportCallbackEXT_T**, unsigned int*, std::__1::function<bool (VkInstance_T*, VkPhysicalDevice_T*, unsigned int)> const&, bool) Unexecuted instantiation: sk_gpu_test::CreateVkBackendContext(void (*(*)(VkInstance_T*, char const*))(), skgpu::VulkanBackendContext*, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, VkDebugReportCallbackEXT_T**, unsigned int*, std::__1::function<bool (VkInstance_T*, VkPhysicalDevice_T*, unsigned int)> const&, bool) |
872 | | |
873 | 0 | void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) { |
874 | | // All Vulkan structs that could be part of the features chain will start with the |
875 | | // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader |
876 | | // so we can get access to the pNext for the next struct. |
877 | 0 | struct CommonVulkanHeader { |
878 | 0 | VkStructureType sType; |
879 | 0 | void* pNext; |
880 | 0 | }; |
881 | |
|
882 | 0 | void* pNext = features->pNext; |
883 | 0 | while (pNext) { |
884 | 0 | void* current = pNext; |
885 | 0 | pNext = static_cast<CommonVulkanHeader*>(current)->pNext; |
886 | 0 | sk_free(current); |
887 | 0 | } |
888 | 0 | } |
889 | | |
890 | | } // namespace sk_gpu_test |
891 | | |
892 | | #endif |