Coverage Report

Created: 2024-05-20 07:14

/src/skia/tools/gpu/vk/VkTestUtils.cpp
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright 2017 Google Inc.
3
 *
4
 * Use of this source code is governed by a BSD-style license that can be
5
 * found in the LICENSE file.
6
 */
7
8
#include "src/gpu/vk/VulkanInterface.h"
9
#include "tools/gpu/vk/VkTestMemoryAllocator.h"
10
#include "tools/gpu/vk/VkTestUtils.h"
11
12
#ifdef SK_VULKAN
13
14
#ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
15
    #if defined _WIN32
16
        #define SK_GPU_TOOLS_VK_LIBRARY_NAME vulkan-1.dll
17
    #elif defined SK_BUILD_FOR_MAC
18
        #define SK_GPU_TOOLS_VK_LIBRARY_NAME libvk_swiftshader.dylib
19
    #else
20
        #define SK_GPU_TOOLS_VK_LIBRARY_NAME        libvulkan.so
21
        #define SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP libvulkan.so.1
22
    #endif
23
#endif
24
25
48
#define STRINGIFY2(S) #S
26
48
#define STRINGIFY(S) STRINGIFY2(S)
27
28
#include <algorithm>
29
30
#if defined(__GLIBC__)
31
#include <execinfo.h>
32
#endif
33
#include "include/gpu/vk/GrVkBackendContext.h"
34
#include "include/gpu/vk/VulkanBackendContext.h"
35
#include "include/gpu/vk/VulkanExtensions.h"
36
#include "src/base/SkAutoMalloc.h"
37
#include "src/ports/SkOSLibrary.h"
38
39
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
40
#include <sanitizer/lsan_interface.h>
41
#endif
42
43
using namespace skia_private;
44
45
namespace sk_gpu_test {
46
47
24
bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc) {
48
24
    static void* vkLib = nullptr;
49
24
    static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
50
24
    if (!vkLib) {
51
24
        vkLib = SkLoadDynamicLibrary(STRINGIFY(SK_GPU_TOOLS_VK_LIBRARY_NAME));
52
24
        if (!vkLib) {
53
            // vulkaninfo tries to load the library from two places, so we do as well
54
            // https://github.com/KhronosGroup/Vulkan-Tools/blob/078d44e4664b7efa0b6c96ebced1995c4425d57a/vulkaninfo/vulkaninfo.h#L249
55
24
#ifdef SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP
56
24
            vkLib = SkLoadDynamicLibrary(STRINGIFY(SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP));
57
24
            if (!vkLib) {
58
24
                return false;
59
24
            }
60
#else
61
            return false;
62
#endif
63
24
        }
64
0
        localInstProc = (PFN_vkGetInstanceProcAddr) SkGetProcedureAddress(vkLib,
65
0
                                                                          "vkGetInstanceProcAddr");
66
0
    }
67
0
    if (!localInstProc) {
68
0
        return false;
69
0
    }
70
0
    *instProc = localInstProc;
71
0
    return true;
72
0
}
73
74
////////////////////////////////////////////////////////////////////////////////
75
// Helper code to set up Vulkan context objects
76
77
#ifdef SK_ENABLE_VK_LAYERS
78
const char* kDebugLayerNames[] = {
79
    // single merged layer
80
    "VK_LAYER_KHRONOS_validation",
81
    // not included in standard_validation
82
    //"VK_LAYER_LUNARG_api_dump",
83
    //"VK_LAYER_LUNARG_vktrace",
84
    //"VK_LAYER_LUNARG_screenshot",
85
};
86
87
static uint32_t remove_patch_version(uint32_t specVersion) {
88
    return (specVersion >> 12) << 12;
89
}
90
91
// Returns the index into layers array for the layer we want. Returns -1 if not supported.
92
static int should_include_debug_layer(const char* layerName,
93
                                       uint32_t layerCount, VkLayerProperties* layers,
94
                                       uint32_t version) {
95
    for (uint32_t i = 0; i < layerCount; ++i) {
96
        if (!strcmp(layerName, layers[i].layerName)) {
97
            // Since the layers intercept the vulkan calls and forward them on, we need to make sure
98
            // layer was written against a version that isn't older than the version of Vulkan we're
99
            // using so that it has all the api entry points.
100
            if (version <= remove_patch_version(layers[i].specVersion)) {
101
                return i;
102
            }
103
            return -1;
104
        }
105
106
    }
107
    return -1;
108
}
109
110
static void print_backtrace() {
111
#if defined(__GLIBC__)
112
    void* stack[64];
113
    int count = backtrace(stack, std::size(stack));
114
    backtrace_symbols_fd(stack, count, 2);
115
#else
116
    // Please add implementations for other platforms.
117
#endif
118
}
119
120
VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
121
    VkDebugReportFlagsEXT       flags,
122
    VkDebugReportObjectTypeEXT  objectType,
123
    uint64_t                    object,
124
    size_t                      location,
125
    int32_t                     messageCode,
126
    const char*                 pLayerPrefix,
127
    const char*                 pMessage,
128
    void*                       pUserData) {
129
    if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
130
        // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/1887
131
        if (strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521") ||
132
            strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522")) {
133
            return VK_FALSE;
134
        }
135
        // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2171
136
        if (strstr(pMessage, "VUID-vkCmdDraw-None-02686") ||
137
            strstr(pMessage, "VUID-vkCmdDrawIndexed-None-02686")) {
138
            return VK_FALSE;
139
        }
140
        SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
141
        print_backtrace();
142
        SkDEBUGFAIL("Vulkan debug layer error");
143
        return VK_TRUE; // skip further layers
144
    } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
145
        SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
146
        print_backtrace();
147
    } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
148
        SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
149
        print_backtrace();
150
    } else {
151
        SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
152
    }
153
    return VK_FALSE;
154
}
155
#endif
156
157
#define ACQUIRE_VK_INST_PROC_LOCAL(name, instance)                                 \
158
0
    PFN_vk##name grVk##name =                                                      \
159
0
        reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name));         \
160
0
    do {                                                                           \
161
0
        if (grVk##name == nullptr) {                                               \
162
0
            SkDebugf("Function ptr for vk%s could not be acquired\n", #name);      \
163
0
            return false;                                                          \
164
0
        }                                                                          \
165
0
    } while (0)
166
167
static bool init_instance_extensions_and_layers(PFN_vkGetInstanceProcAddr getInstProc,
168
                                                uint32_t specVersion,
169
                                                TArray<VkExtensionProperties>* instanceExtensions,
170
0
                                                TArray<VkLayerProperties>* instanceLayers) {
171
0
    if (getInstProc == nullptr) {
172
0
        return false;
173
0
    }
174
175
0
    ACQUIRE_VK_INST_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE);
176
0
    ACQUIRE_VK_INST_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE);
177
178
0
    VkResult res;
179
0
    uint32_t layerCount = 0;
180
#ifdef SK_ENABLE_VK_LAYERS
181
    // instance layers
182
    res = grVkEnumerateInstanceLayerProperties(&layerCount, nullptr);
183
    if (VK_SUCCESS != res) {
184
        return false;
185
    }
186
    VkLayerProperties* layers = new VkLayerProperties[layerCount];
187
    res = grVkEnumerateInstanceLayerProperties(&layerCount, layers);
188
    if (VK_SUCCESS != res) {
189
        delete[] layers;
190
        return false;
191
    }
192
193
    uint32_t nonPatchVersion = remove_patch_version(specVersion);
194
    for (size_t i = 0; i < std::size(kDebugLayerNames); ++i) {
195
        int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
196
                                             nonPatchVersion);
197
        if (idx != -1) {
198
            instanceLayers->push_back() = layers[idx];
199
        }
200
    }
201
    delete[] layers;
202
#endif
203
204
    // instance extensions
205
    // via Vulkan implementation and implicitly enabled layers
206
0
    {
207
0
        uint32_t extensionCount = 0;
208
0
        res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
209
0
        if (VK_SUCCESS != res) {
210
0
            return false;
211
0
        }
212
0
        VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
213
0
        res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
214
0
        if (VK_SUCCESS != res) {
215
0
            delete[] extensions;
216
0
            return false;
217
0
        }
218
0
        for (uint32_t i = 0; i < extensionCount; ++i) {
219
0
            instanceExtensions->push_back() = extensions[i];
220
0
        }
221
0
        delete [] extensions;
222
0
    }
223
224
    // via explicitly enabled layers
225
0
    layerCount = instanceLayers->size();
226
0
    for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
227
0
        uint32_t extensionCount = 0;
228
0
        res = grVkEnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
229
0
                                                       &extensionCount, nullptr);
230
0
        if (VK_SUCCESS != res) {
231
0
            return false;
232
0
        }
233
0
        VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
234
0
        res = grVkEnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
235
0
                                                       &extensionCount, extensions);
236
0
        if (VK_SUCCESS != res) {
237
0
            delete[] extensions;
238
0
            return false;
239
0
        }
240
0
        for (uint32_t i = 0; i < extensionCount; ++i) {
241
0
            instanceExtensions->push_back() = extensions[i];
242
0
        }
243
0
        delete[] extensions;
244
0
    }
245
246
0
    return true;
247
0
}
248
249
0
#define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
250
251
static bool init_device_extensions_and_layers(const skgpu::VulkanGetProc& getProc,
252
                                              uint32_t specVersion, VkInstance inst,
253
                                              VkPhysicalDevice physDev,
254
                                              TArray<VkExtensionProperties>* deviceExtensions,
255
0
                                              TArray<VkLayerProperties>* deviceLayers) {
256
0
    if (getProc == nullptr) {
257
0
        return false;
258
0
    }
259
260
0
    GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
261
0
    GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
262
263
0
    if (!EnumerateDeviceExtensionProperties ||
264
0
        !EnumerateDeviceLayerProperties) {
265
0
        return false;
266
0
    }
267
268
0
    VkResult res;
269
    // device layers
270
0
    uint32_t layerCount = 0;
271
#ifdef SK_ENABLE_VK_LAYERS
272
    res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
273
    if (VK_SUCCESS != res) {
274
        return false;
275
    }
276
    VkLayerProperties* layers = new VkLayerProperties[layerCount];
277
    res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
278
    if (VK_SUCCESS != res) {
279
        delete[] layers;
280
        return false;
281
    }
282
283
    uint32_t nonPatchVersion = remove_patch_version(specVersion);
284
    for (size_t i = 0; i < std::size(kDebugLayerNames); ++i) {
285
        int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
286
                                             nonPatchVersion);
287
        if (idx != -1) {
288
            deviceLayers->push_back() = layers[idx];
289
        }
290
    }
291
    delete[] layers;
292
#endif
293
294
    // device extensions
295
    // via Vulkan implementation and implicitly enabled layers
296
0
    {
297
0
        uint32_t extensionCount = 0;
298
0
        res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
299
0
        if (VK_SUCCESS != res) {
300
0
            return false;
301
0
        }
302
0
        VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
303
0
        res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
304
0
        if (VK_SUCCESS != res) {
305
0
            delete[] extensions;
306
0
            return false;
307
0
        }
308
0
        for (uint32_t i = 0; i < extensionCount; ++i) {
309
0
            deviceExtensions->push_back() = extensions[i];
310
0
        }
311
0
        delete[] extensions;
312
0
    }
313
314
    // via explicitly enabled layers
315
0
    layerCount = deviceLayers->size();
316
0
    for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
317
0
        uint32_t extensionCount = 0;
318
0
        res = EnumerateDeviceExtensionProperties(physDev,
319
0
            (*deviceLayers)[layerIndex].layerName,
320
0
            &extensionCount, nullptr);
321
0
        if (VK_SUCCESS != res) {
322
0
            return false;
323
0
        }
324
0
        VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
325
0
        res = EnumerateDeviceExtensionProperties(physDev,
326
0
            (*deviceLayers)[layerIndex].layerName,
327
0
            &extensionCount, extensions);
328
0
        if (VK_SUCCESS != res) {
329
0
            delete[] extensions;
330
0
            return false;
331
0
        }
332
0
        for (uint32_t i = 0; i < extensionCount; ++i) {
333
0
            deviceExtensions->push_back() = extensions[i];
334
0
        }
335
0
        delete[] extensions;
336
0
    }
337
338
0
    return true;
339
0
}
340
341
#define ACQUIRE_VK_INST_PROC_NOCHECK(name, instance) \
342
0
    PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name))
343
344
#define ACQUIRE_VK_INST_PROC(name, instance) \
345
0
    PFN_vk##name grVk##name =                                                          \
346
0
        reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name));             \
347
0
    do {                                                                               \
348
0
        if (grVk##name == nullptr) {                                                   \
349
0
            SkDebugf("Function ptr for vk%s could not be acquired\n", #name);          \
350
0
            if (inst != VK_NULL_HANDLE) {                                              \
351
0
                destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); \
352
0
            }                                                                          \
353
0
            return false;                                                              \
354
0
        }                                                                              \
355
0
    } while (0)
356
357
#define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
358
    PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
359
360
#define ACQUIRE_VK_PROC(name, instance, device)                                        \
361
0
    PFN_vk##name grVk##name =                                                          \
362
0
            reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device));     \
363
0
    do {                                                                               \
364
0
        if (grVk##name == nullptr) {                                                   \
365
0
            SkDebugf("Function ptr for vk%s could not be acquired\n", #name);          \
366
0
            if (inst != VK_NULL_HANDLE) {                                              \
367
0
                destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); \
368
0
            }                                                                          \
369
0
            return false;                                                              \
370
0
        }                                                                              \
371
0
    } while (0)
372
373
#define ACQUIRE_VK_PROC_LOCAL(name, instance, device)                              \
374
0
    PFN_vk##name grVk##name =                                                      \
375
0
            reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
376
0
    do {                                                                           \
377
0
        if (grVk##name == nullptr) {                                               \
378
0
            SkDebugf("Function ptr for vk%s could not be acquired\n", #name);      \
379
0
            return false;                                                          \
380
0
        }                                                                          \
381
0
    } while (0)
382
383
static bool destroy_instance(PFN_vkGetInstanceProcAddr getInstProc, VkInstance inst,
384
                             VkDebugReportCallbackEXT* debugCallback,
385
0
                             bool hasDebugExtension) {
386
0
    if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
387
0
        ACQUIRE_VK_INST_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst);
388
0
        grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
389
0
        *debugCallback = VK_NULL_HANDLE;
390
0
    }
391
0
    ACQUIRE_VK_INST_PROC_LOCAL(DestroyInstance, inst);
392
0
    grVkDestroyInstance(inst, nullptr);
393
0
    return true;
394
0
}
395
396
static bool setup_features(const skgpu::VulkanGetProc& getProc, VkInstance inst,
397
                           VkPhysicalDevice physDev, uint32_t physDeviceVersion,
398
                           skgpu::VulkanExtensions* extensions, VkPhysicalDeviceFeatures2* features,
399
0
                           bool isProtected) {
400
0
    SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
401
0
             extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
402
403
    // Setup all extension feature structs we may want to use.
404
0
    void** tailPNext = &features->pNext;
405
406
    // If |isProtected| is given, attach that first
407
0
    VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
408
0
    if (isProtected) {
409
0
        SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
410
0
        protectedMemoryFeatures =
411
0
          (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
412
0
              sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
413
0
        protectedMemoryFeatures->sType =
414
0
          VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
415
0
        protectedMemoryFeatures->pNext = nullptr;
416
0
        *tailPNext = protectedMemoryFeatures;
417
0
        tailPNext = &protectedMemoryFeatures->pNext;
418
0
    }
419
420
0
    VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
421
0
    if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
422
0
        blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
423
0
                sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
424
0
        blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
425
0
        blend->pNext = nullptr;
426
0
        *tailPNext = blend;
427
0
        tailPNext = &blend->pNext;
428
0
    }
429
430
0
    VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
431
0
    if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
432
0
        extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
433
0
        ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
434
0
                sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
435
0
        ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
436
0
        ycbcrFeature->pNext = nullptr;
437
0
        ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
438
0
        *tailPNext = ycbcrFeature;
439
0
        tailPNext = &ycbcrFeature->pNext;
440
0
    }
441
442
0
    if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
443
0
        ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
444
0
        grVkGetPhysicalDeviceFeatures2(physDev, features);
445
0
    } else {
446
0
        SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
447
0
                                          1));
448
0
        ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
449
0
        grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
450
0
    }
451
452
0
    if (isProtected) {
453
0
        if (!protectedMemoryFeatures->protectedMemory) {
454
0
            return false;
455
0
        }
456
0
    }
457
0
    return true;
458
    // If we want to disable any extension features do so here.
459
0
}
Unexecuted instantiation: VkTestUtils.cpp:sk_gpu_test::setup_features(std::__1::function<void (*(char const*, VkInstance_T*, VkDevice_T*))()> const&, VkInstance_T*, VkPhysicalDevice_T*, unsigned int, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, bool)
Unexecuted instantiation: VkTestUtils.cpp:sk_gpu_test::setup_features(std::__1::function<void (*(char const*, VkInstance_T*, VkDevice_T*))()> const&, VkInstance_T*, VkPhysicalDevice_T*, unsigned int, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, bool)
460
461
// TODO: remove once GrVkBackendContext is deprecated (skbug.com/309785258)
462
void ConvertBackendContext(const skgpu::VulkanBackendContext& newStyle,
463
0
                           GrVkBackendContext* oldStyle) {
464
0
    oldStyle->fInstance = newStyle.fInstance;
465
0
    oldStyle->fPhysicalDevice = newStyle.fPhysicalDevice;
466
0
    oldStyle->fDevice = newStyle.fDevice;
467
0
    oldStyle->fQueue = newStyle.fQueue;
468
0
    oldStyle->fGraphicsQueueIndex = newStyle.fGraphicsQueueIndex;
469
0
    oldStyle->fMaxAPIVersion = newStyle.fMaxAPIVersion;
470
0
    oldStyle->fVkExtensions = newStyle.fVkExtensions;
471
0
    oldStyle->fDeviceFeatures = newStyle.fDeviceFeatures;
472
0
    oldStyle->fDeviceFeatures2 = newStyle.fDeviceFeatures2;
473
0
    oldStyle->fMemoryAllocator = newStyle.fMemoryAllocator;
474
0
    oldStyle->fGetProc = newStyle.fGetProc;
475
0
    oldStyle->fProtectedContext = newStyle.fProtectedContext;
476
0
}
477
478
// TODO: remove once GrVkBackendContext is deprecated (skbug.com/309785258)
479
bool CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc,
480
                            GrVkBackendContext* ctx,
481
                            skgpu::VulkanExtensions* extensions,
482
                            VkPhysicalDeviceFeatures2* features,
483
                            VkDebugReportCallbackEXT* debugCallback,
484
                            uint32_t* presentQueueIndexPtr,
485
                            const CanPresentFn& canPresent,
486
0
                            bool isProtected) {
487
0
    skgpu::VulkanBackendContext skgpuCtx;
488
0
    if (!CreateVkBackendContext(getInstProc,
489
0
                                &skgpuCtx,
490
0
                                extensions,
491
0
                                features,
492
0
                                debugCallback,
493
0
                                presentQueueIndexPtr,
494
0
                                canPresent,
495
0
                                isProtected)) {
496
0
        return false;
497
0
    }
498
499
0
    SkASSERT(skgpuCtx.fProtectedContext == skgpu::Protected(isProtected));
500
501
0
    ConvertBackendContext(skgpuCtx, ctx);
502
0
    return true;
503
0
}
Unexecuted instantiation: sk_gpu_test::CreateVkBackendContext(void (*(*)(VkInstance_T*, char const*))(), GrVkBackendContext*, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, VkDebugReportCallbackEXT_T**, unsigned int*, std::__1::function<bool (VkInstance_T*, VkPhysicalDevice_T*, unsigned int)> const&, bool)
Unexecuted instantiation: sk_gpu_test::CreateVkBackendContext(void (*(*)(VkInstance_T*, char const*))(), GrVkBackendContext*, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, VkDebugReportCallbackEXT_T**, unsigned int*, std::__1::function<bool (VkInstance_T*, VkPhysicalDevice_T*, unsigned int)> const&, bool)
504
505
bool CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc,
506
                            skgpu::VulkanBackendContext* ctx,
507
                            skgpu::VulkanExtensions* extensions,
508
                            VkPhysicalDeviceFeatures2* features,
509
                            VkDebugReportCallbackEXT* debugCallback,
510
                            uint32_t* presentQueueIndexPtr,
511
                            const CanPresentFn& canPresent,
512
0
                            bool isProtected) {
513
0
    VkResult err;
514
515
0
    ACQUIRE_VK_INST_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE);
516
0
    uint32_t instanceVersion = 0;
517
0
    if (!grVkEnumerateInstanceVersion) {
518
0
        instanceVersion = VK_MAKE_VERSION(1, 0, 0);
519
0
    } else {
520
0
        err = grVkEnumerateInstanceVersion(&instanceVersion);
521
0
        if (err) {
522
0
            SkDebugf("failed to enumerate instance version. Err: %d\n", err);
523
0
            return false;
524
0
        }
525
0
    }
526
0
    SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
527
0
    if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
528
0
        SkDebugf("protected requires vk instance version 1.1\n");
529
0
        return false;
530
0
    }
531
532
0
    uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
533
0
    if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
534
        // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
535
        // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
536
        // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
537
        // since that is the highest vulkan version.
538
0
        apiVersion = VK_MAKE_VERSION(1, 1, 0);
539
0
    }
540
541
0
    instanceVersion = std::min(instanceVersion, apiVersion);
542
543
0
    STArray<2, VkPhysicalDevice> physDevs;
544
0
    VkDevice device;
545
0
    VkInstance inst = VK_NULL_HANDLE;
546
547
0
    const VkApplicationInfo app_info = {
548
0
        VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
549
0
        nullptr,                            // pNext
550
0
        "vktest",                           // pApplicationName
551
0
        0,                                  // applicationVersion
552
0
        "vktest",                           // pEngineName
553
0
        0,                                  // engineVerison
554
0
        apiVersion,                         // apiVersion
555
0
    };
556
557
0
    TArray<VkLayerProperties> instanceLayers;
558
0
    TArray<VkExtensionProperties> instanceExtensions;
559
560
0
    if (!init_instance_extensions_and_layers(getInstProc, instanceVersion,
561
0
                                             &instanceExtensions,
562
0
                                             &instanceLayers)) {
563
0
        return false;
564
0
    }
565
566
0
    TArray<const char*> instanceLayerNames;
567
0
    TArray<const char*> instanceExtensionNames;
568
0
    for (int i = 0; i < instanceLayers.size(); ++i) {
569
0
        instanceLayerNames.push_back(instanceLayers[i].layerName);
570
0
    }
571
0
    for (int i = 0; i < instanceExtensions.size(); ++i) {
572
0
        if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6) != 0) {
573
0
            instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
574
0
        }
575
0
    }
576
577
0
    const VkInstanceCreateInfo instance_create = {
578
0
        VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,    // sType
579
0
        nullptr,                                   // pNext
580
0
        0,                                         // flags
581
0
        &app_info,                                 // pApplicationInfo
582
0
        (uint32_t) instanceLayerNames.size(),      // enabledLayerNameCount
583
0
        instanceLayerNames.begin(),                // ppEnabledLayerNames
584
0
        (uint32_t) instanceExtensionNames.size(),  // enabledExtensionNameCount
585
0
        instanceExtensionNames.begin(),            // ppEnabledExtensionNames
586
0
    };
587
588
0
    bool hasDebugExtension = false;
589
590
0
    ACQUIRE_VK_INST_PROC(CreateInstance, VK_NULL_HANDLE);
591
0
    err = grVkCreateInstance(&instance_create, nullptr, &inst);
592
0
    if (err < 0) {
593
0
        SkDebugf("vkCreateInstance failed: %d\n", err);
594
0
        return false;
595
0
    }
596
597
0
    ACQUIRE_VK_INST_PROC(GetDeviceProcAddr, inst);
598
0
    auto getProc = [getInstProc, grVkGetDeviceProcAddr](const char* proc_name,
599
0
                                                        VkInstance instance, VkDevice device) {
600
0
        if (device != VK_NULL_HANDLE) {
601
0
            return grVkGetDeviceProcAddr(device, proc_name);
602
0
        }
603
0
        return getInstProc(instance, proc_name);
604
0
    };
Unexecuted instantiation: VkTestUtils.cpp:sk_gpu_test::CreateVkBackendContext(void (*(*)(VkInstance_T*, char const*))(), skgpu::VulkanBackendContext*, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, VkDebugReportCallbackEXT_T**, unsigned int*, std::__1::function<bool (VkInstance_T*, VkPhysicalDevice_T*, unsigned int)> const&, bool)::$_0::operator()(char const*, VkInstance_T*, VkDevice_T*) const
Unexecuted instantiation: VkTestUtils.cpp:sk_gpu_test::CreateVkBackendContext(void (*(*)(VkInstance_T*, char const*))(), skgpu::VulkanBackendContext*, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, VkDebugReportCallbackEXT_T**, unsigned int*, std::__1::function<bool (VkInstance_T*, VkPhysicalDevice_T*, unsigned int)> const&, bool)::$_1::operator()(char const*, VkInstance_T*, VkDevice_T*) const
605
606
#ifdef SK_ENABLE_VK_LAYERS
607
    *debugCallback = VK_NULL_HANDLE;
608
    for (int i = 0; i < instanceExtensionNames.size() && !hasDebugExtension; ++i) {
609
        if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
610
            hasDebugExtension = true;
611
        }
612
    }
613
    if (hasDebugExtension) {
614
        // Setup callback creation information
615
        VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
616
        callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
617
        callbackCreateInfo.pNext = nullptr;
618
        callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
619
                                   VK_DEBUG_REPORT_WARNING_BIT_EXT |
620
                                   // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
621
                                   // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
622
                                   VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
623
        callbackCreateInfo.pfnCallback = &DebugReportCallback;
624
        callbackCreateInfo.pUserData = nullptr;
625
626
        ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
627
        // Register the callback
628
        grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
629
    }
630
#endif
631
632
0
    ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
633
0
    ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
634
0
    ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
635
0
    ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
636
0
    ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
637
0
    ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
638
0
    ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
639
0
    ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
640
641
0
    uint32_t gpuCount;
642
0
    err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
643
0
    if (err) {
644
0
        SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
645
0
        destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
646
0
        return false;
647
0
    }
648
0
    if (!gpuCount) {
649
0
        SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
650
0
        destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
651
0
        return false;
652
0
    }
653
    // Allocate enough storage for all available physical devices. We should be able to just ask for
654
    // the first one, but a bug in RenderDoc (https://github.com/baldurk/renderdoc/issues/2766)
655
    // will smash the stack if we do that.
656
0
    physDevs.resize(gpuCount);
657
0
    err = grVkEnumeratePhysicalDevices(inst, &gpuCount, physDevs.data());
658
0
    if (err) {
659
0
        SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
660
0
        destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
661
0
        return false;
662
0
    }
663
    // We just use the first physical device.
664
    // TODO: find best match for our needs
665
0
    VkPhysicalDevice physDev = physDevs.front();
666
667
0
    VkPhysicalDeviceProperties physDeviceProperties;
668
0
    grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
669
0
    uint32_t physDeviceVersion = std::min(physDeviceProperties.apiVersion, apiVersion);
670
671
0
    if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
672
0
        SkDebugf("protected requires vk physical device version 1.1\n");
673
0
        destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
674
0
        return false;
675
0
    }
676
677
    // query to get the initial queue props size
678
0
    uint32_t queueCount;
679
0
    grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
680
0
    if (!queueCount) {
681
0
        SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
682
0
        destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
683
0
        return false;
684
0
    }
685
686
0
    SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
687
    // now get the actual queue props
688
0
    VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
689
690
0
    grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
691
692
    // iterate to find the graphics queue
693
0
    uint32_t graphicsQueueIndex = queueCount;
694
0
    for (uint32_t i = 0; i < queueCount; i++) {
695
0
        if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
696
0
            graphicsQueueIndex = i;
697
0
            break;
698
0
        }
699
0
    }
700
0
    if (graphicsQueueIndex == queueCount) {
701
0
        SkDebugf("Could not find any supported graphics queues.\n");
702
0
        destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
703
0
        return false;
704
0
    }
705
706
    // iterate to find the present queue, if needed
707
0
    uint32_t presentQueueIndex = queueCount;
708
0
    if (presentQueueIndexPtr && canPresent) {
709
0
        for (uint32_t i = 0; i < queueCount; i++) {
710
0
            if (canPresent(inst, physDev, i)) {
711
0
                presentQueueIndex = i;
712
0
                break;
713
0
            }
714
0
        }
715
0
        if (presentQueueIndex == queueCount) {
716
0
            SkDebugf("Could not find any supported present queues.\n");
717
0
            destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
718
0
            return false;
719
0
        }
720
0
        *presentQueueIndexPtr = presentQueueIndex;
721
0
    } else {
722
        // Just setting this so we end up make a single queue for graphics since there was no
723
        // request for a present queue.
724
0
        presentQueueIndex = graphicsQueueIndex;
725
0
    }
726
727
0
    TArray<VkLayerProperties> deviceLayers;
728
0
    TArray<VkExtensionProperties> deviceExtensions;
729
0
    if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
730
0
                                           inst, physDev,
731
0
                                           &deviceExtensions,
732
0
                                           &deviceLayers)) {
733
0
        destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
734
0
        return false;
735
0
    }
736
737
0
    TArray<const char*> deviceLayerNames;
738
0
    TArray<const char*> deviceExtensionNames;
739
0
    for (int i = 0; i < deviceLayers.size(); ++i) {
740
0
        deviceLayerNames.push_back(deviceLayers[i].layerName);
741
0
    }
742
743
    // We can't have both VK_KHR_buffer_device_address and VK_EXT_buffer_device_address as
744
    // extensions. So see if we have the KHR version and if so don't push back the EXT version in
745
    // the next loop.
746
0
    bool hasKHRBufferDeviceAddress = false;
747
0
    for (int i = 0; i < deviceExtensions.size(); ++i) {
748
0
        if (!strcmp(deviceExtensions[i].extensionName, "VK_KHR_buffer_device_address")) {
749
0
            hasKHRBufferDeviceAddress = true;
750
0
            break;
751
0
        }
752
0
    }
753
754
0
    for (int i = 0; i < deviceExtensions.size(); ++i) {
755
        // Don't use experimental extensions since they typically don't work with debug layers and
756
        // often are missing dependecy requirements for other extensions. Additionally, these are
757
        // often left behind in the driver even after they've been promoted to real extensions.
758
0
        if (0 != strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
759
0
            0 != strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
760
761
            // There are some extensions that are not supported by the debug layers which result in
762
            // many warnings even though we don't actually use them. It's easiest to just
763
            // avoid enabling those.
764
0
            if (0 == strcmp(deviceExtensions[i].extensionName, "VK_EXT_provoking_vertex")     ||
765
0
                0 == strcmp(deviceExtensions[i].extensionName, "VK_EXT_shader_object")        ||
766
0
                0 == strcmp(deviceExtensions[i].extensionName, "VK_KHR_dynamic_rendering")    ||
767
0
                0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_acquire_winrt_display") ||
768
0
                0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_cuda_kernel_launch")    ||
769
0
                0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_low_latency")           ||
770
0
                0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_present_barrier")) {
771
0
                continue;
772
0
            }
773
774
0
            if (!hasKHRBufferDeviceAddress ||
775
0
                0 != strcmp(deviceExtensions[i].extensionName, "VK_EXT_buffer_device_address")) {
776
0
                deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
777
0
            }
778
0
        }
779
0
    }
780
781
0
    extensions->init(getProc, inst, physDev,
782
0
                     (uint32_t) instanceExtensionNames.size(),
783
0
                     instanceExtensionNames.begin(),
784
0
                     (uint32_t) deviceExtensionNames.size(),
785
0
                     deviceExtensionNames.begin());
786
787
0
    memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
788
0
    features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
789
0
    features->pNext = nullptr;
790
791
0
    VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
792
0
    void* pointerToFeatures = nullptr;
793
0
    if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
794
0
        extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
795
0
        if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
796
0
                            isProtected)) {
797
0
            destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
798
0
            return false;
799
0
        }
800
801
        // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
802
        // the device creation will use that instead of the ppEnabledFeatures.
803
0
        pointerToFeatures = features;
804
0
    } else {
805
0
        grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
806
0
    }
807
808
    // this looks like it would slow things down,
809
    // and we can't depend on it on all platforms
810
0
    deviceFeatures->robustBufferAccess = VK_FALSE;
811
812
0
    VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
813
0
    float queuePriorities[1] = { 0.0 };
814
    // Here we assume no need for swapchain queue
815
    // If one is needed, the client will need its own setup code
816
0
    const VkDeviceQueueCreateInfo queueInfo[2] = {
817
0
        {
818
0
            VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
819
0
            nullptr,                                    // pNext
820
0
            flags,                                      // VkDeviceQueueCreateFlags
821
0
            graphicsQueueIndex,                         // queueFamilyIndex
822
0
            1,                                          // queueCount
823
0
            queuePriorities,                            // pQueuePriorities
824
825
0
        },
826
0
        {
827
0
            VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
828
0
            nullptr,                                    // pNext
829
0
            0,                                          // VkDeviceQueueCreateFlags
830
0
            presentQueueIndex,                          // queueFamilyIndex
831
0
            1,                                          // queueCount
832
0
            queuePriorities,                            // pQueuePriorities
833
0
        }
834
0
    };
835
0
    uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
836
837
0
    const VkDeviceCreateInfo deviceInfo = {
838
0
        VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,        // sType
839
0
        pointerToFeatures,                           // pNext
840
0
        0,                                           // VkDeviceCreateFlags
841
0
        queueInfoCount,                              // queueCreateInfoCount
842
0
        queueInfo,                                   // pQueueCreateInfos
843
0
        (uint32_t) deviceLayerNames.size(),          // layerCount
844
0
        deviceLayerNames.begin(),                    // ppEnabledLayerNames
845
0
        (uint32_t) deviceExtensionNames.size(),      // extensionCount
846
0
        deviceExtensionNames.begin(),                // ppEnabledExtensionNames
847
0
        pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
848
0
    };
849
850
0
    {
851
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
852
        // skia:8712
853
        __lsan::ScopedDisabler lsanDisabler;
854
#endif
855
0
        err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
856
0
    }
857
0
    if (err) {
858
0
        SkDebugf("CreateDevice failed: %d\n", err);
859
0
        destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
860
0
        return false;
861
0
    }
862
863
0
    VkQueue queue;
864
0
    if (isProtected) {
865
0
        ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
866
0
        SkASSERT(grVkGetDeviceQueue2 != nullptr);
867
0
        VkDeviceQueueInfo2 queue_info2 = {
868
0
            VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,          // sType
869
0
            nullptr,                                        // pNext
870
0
            VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT,           // flags
871
0
            graphicsQueueIndex,                             // queueFamilyIndex
872
0
            0                                               // queueIndex
873
0
        };
874
0
        grVkGetDeviceQueue2(device, &queue_info2, &queue);
875
0
    } else {
876
0
        grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
877
0
    }
878
879
0
    skgpu::VulkanInterface interface = skgpu::VulkanInterface(
880
0
            getProc, inst, device, instanceVersion, physDeviceVersion, extensions);
881
0
    SkASSERT(interface.validate(instanceVersion, physDeviceVersion, extensions));
882
883
0
    sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator = VkTestMemoryAllocator::Make(
884
0
            inst, physDev, device, physDeviceVersion, extensions, &interface);
885
886
0
    ctx->fInstance = inst;
887
0
    ctx->fPhysicalDevice = physDev;
888
0
    ctx->fDevice = device;
889
0
    ctx->fQueue = queue;
890
0
    ctx->fGraphicsQueueIndex = graphicsQueueIndex;
891
0
    ctx->fMaxAPIVersion = apiVersion;
892
0
    ctx->fVkExtensions = extensions;
893
0
    ctx->fDeviceFeatures2 = features;
894
0
    ctx->fGetProc = getProc;
895
0
    ctx->fProtectedContext = skgpu::Protected(isProtected);
896
0
    ctx->fMemoryAllocator = memoryAllocator;
897
898
0
    return true;
899
0
}
Unexecuted instantiation: sk_gpu_test::CreateVkBackendContext(void (*(*)(VkInstance_T*, char const*))(), skgpu::VulkanBackendContext*, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, VkDebugReportCallbackEXT_T**, unsigned int*, std::__1::function<bool (VkInstance_T*, VkPhysicalDevice_T*, unsigned int)> const&, bool)
Unexecuted instantiation: sk_gpu_test::CreateVkBackendContext(void (*(*)(VkInstance_T*, char const*))(), skgpu::VulkanBackendContext*, skgpu::VulkanExtensions*, VkPhysicalDeviceFeatures2*, VkDebugReportCallbackEXT_T**, unsigned int*, std::__1::function<bool (VkInstance_T*, VkPhysicalDevice_T*, unsigned int)> const&, bool)
900
901
0
void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
902
    // All Vulkan structs that could be part of the features chain will start with the
903
    // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
904
    // so we can get access to the pNext for the next struct.
905
0
    struct CommonVulkanHeader {
906
0
        VkStructureType sType;
907
0
        void*           pNext;
908
0
    };
909
910
0
    void* pNext = features->pNext;
911
0
    while (pNext) {
912
0
        void* current = pNext;
913
0
        pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
914
0
        sk_free(current);
915
0
    }
916
0
}
917
918
}  // namespace sk_gpu_test
919
920
#endif