/src/vulkan-loader/loader/loader.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * |
3 | | * Copyright (c) 2014-2023 The Khronos Group Inc. |
4 | | * Copyright (c) 2014-2023 Valve Corporation |
5 | | * Copyright (c) 2014-2023 LunarG, Inc. |
6 | | * Copyright (C) 2015 Google Inc. |
7 | | * Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
8 | | * Copyright (c) 2023-2023 RasterGrid Kft. |
9 | | * |
10 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
11 | | * you may not use this file except in compliance with the License. |
12 | | * You may obtain a copy of the License at |
13 | | * |
14 | | * http://www.apache.org/licenses/LICENSE-2.0 |
15 | | * |
16 | | * Unless required by applicable law or agreed to in writing, software |
17 | | * distributed under the License is distributed on an "AS IS" BASIS, |
18 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
19 | | * See the License for the specific language governing permissions and |
20 | | * limitations under the License. |
21 | | |
22 | | * |
23 | | * Author: Jon Ashburn <jon@lunarg.com> |
24 | | * Author: Courtney Goeltzenleuchter <courtney@LunarG.com> |
25 | | * Author: Mark Young <marky@lunarg.com> |
26 | | * Author: Lenny Komow <lenny@lunarg.com> |
27 | | * Author: Charles Giessen <charles@lunarg.com> |
28 | | * |
29 | | */ |
30 | | |
31 | | #include "loader.h" |
32 | | |
33 | | #include <errno.h> |
34 | | #include <inttypes.h> |
35 | | #include <stdio.h> |
36 | | #include <stdlib.h> |
37 | | #include <stdarg.h> |
38 | | #include <stdbool.h> |
39 | | #include <string.h> |
40 | | #include <stddef.h> |
41 | | |
42 | | #if defined(__APPLE__) |
43 | | #include <CoreFoundation/CoreFoundation.h> |
44 | | #include <sys/param.h> |
45 | | #endif |
46 | | |
47 | | #include <sys/types.h> |
48 | | #if defined(_WIN32) |
49 | | #include "dirent_on_windows.h" |
50 | | #elif COMMON_UNIX_PLATFORMS |
51 | | #include <dirent.h> |
52 | | #else |
53 | | #warning dirent.h not available on this platform |
54 | | #endif // _WIN32 |
55 | | |
56 | | #include "allocation.h" |
57 | | #include "stack_allocation.h" |
58 | | #include "cJSON.h" |
59 | | #include "debug_utils.h" |
60 | | #include "loader_environment.h" |
61 | | #include "loader_json.h" |
62 | | #include "log.h" |
63 | | #include "unknown_function_handling.h" |
64 | | #include "vk_loader_platform.h" |
65 | | #include "wsi.h" |
66 | | |
67 | | #if defined(WIN32) |
68 | | #include "loader_windows.h" |
69 | | #endif |
70 | | #if defined(LOADER_ENABLE_LINUX_SORT) |
71 | | // This header is currently only used when sorting Linux devices, so don't include it otherwise. |
72 | | #include "loader_linux.h" |
73 | | #endif // LOADER_ENABLE_LINUX_SORT |
74 | | |
75 | | // Generated file containing all the extension data |
76 | | #include "vk_loader_extensions.c" |
77 | | |
78 | | struct loader_struct loader = {0}; |
79 | | |
80 | | struct activated_layer_info { |
81 | | char *name; |
82 | | char *manifest; |
83 | | char *library; |
84 | | bool is_implicit; |
85 | | enum loader_layer_enabled_by_what enabled_by_what; |
86 | | char *disable_env; |
87 | | char *enable_name_env; |
88 | | char *enable_value_env; |
89 | | }; |
90 | | |
91 | | // thread safety lock for accessing global data structures such as "loader" |
92 | | // all entrypoints on the instance chain need to be locked except GPA |
93 | | // additionally CreateDevice and DestroyDevice needs to be locked |
94 | | loader_platform_thread_mutex loader_lock; |
95 | | loader_platform_thread_mutex loader_preload_icd_lock; |
96 | | loader_platform_thread_mutex loader_global_instance_list_lock; |
97 | | |
98 | | // A list of ICDs that gets initialized when the loader does its global initialization. This list should never be used by anything |
99 | | // other than EnumerateInstanceExtensionProperties(), vkDestroyInstance, and loader_release(). This list does not change |
100 | | // functionality, but the fact that the libraries already been loaded causes any call that needs to load ICD libraries to speed up |
101 | | // significantly. This can have a huge impact when making repeated calls to vkEnumerateInstanceExtensionProperties and |
102 | | // vkCreateInstance. |
103 | | struct loader_icd_tramp_list preloaded_icds; |
104 | | |
105 | | // controls whether loader_platform_close_library() closes the libraries or not - controlled by an environment |
106 | | // variables - this is just the definition of the variable, usage is in vk_loader_platform.h |
107 | | bool loader_disable_dynamic_library_unloading; |
108 | | |
109 | | LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init); |
110 | | |
111 | | // Creates loader_api_version struct that contains the major and minor fields, setting patch to 0 |
112 | 42.2k | loader_api_version loader_make_version(uint32_t version) { |
113 | 42.2k | loader_api_version out_version; |
114 | 42.2k | out_version.major = VK_API_VERSION_MAJOR(version); |
115 | 42.2k | out_version.minor = VK_API_VERSION_MINOR(version); |
116 | 42.2k | out_version.patch = 0; |
117 | 42.2k | return out_version; |
118 | 42.2k | } |
119 | | |
120 | | // Creates loader_api_version struct containing the major, minor, and patch fields |
121 | 6.86k | loader_api_version loader_make_full_version(uint32_t version) { |
122 | 6.86k | loader_api_version out_version; |
123 | 6.86k | out_version.major = VK_API_VERSION_MAJOR(version); |
124 | 6.86k | out_version.minor = VK_API_VERSION_MINOR(version); |
125 | 6.86k | out_version.patch = VK_API_VERSION_PATCH(version); |
126 | 6.86k | return out_version; |
127 | 6.86k | } |
128 | | |
129 | 27.5k | loader_api_version loader_combine_version(uint32_t major, uint32_t minor, uint32_t patch) { |
130 | 27.5k | loader_api_version out_version; |
131 | 27.5k | out_version.major = (uint16_t)major; |
132 | 27.5k | out_version.minor = (uint16_t)minor; |
133 | 27.5k | out_version.patch = (uint16_t)patch; |
134 | 27.5k | return out_version; |
135 | 27.5k | } |
136 | | |
137 | | // Helper macros for determining if a version is valid or not |
138 | 41.1k | bool loader_check_version_meets_required(loader_api_version required, loader_api_version version) { |
139 | | // major version is satisfied |
140 | 41.1k | return (version.major > required.major) || |
141 | | // major version is equal, minor version is patch version is greater to minimum minor |
142 | 41.1k | (version.major == required.major && version.minor > required.minor) || |
143 | | // major and minor version are equal, patch version is greater or equal to minimum patch |
144 | 41.1k | (version.major == required.major && version.minor == required.minor && version.patch >= required.patch); |
145 | 41.1k | } |
146 | | |
147 | 0 | const char *get_enabled_by_what_str(enum loader_layer_enabled_by_what enabled_by_what) { |
148 | 0 | switch (enabled_by_what) { |
149 | 0 | default: |
150 | 0 | assert(true && "Shouldn't reach this"); |
151 | 0 | return "Unknown"; |
152 | 0 | case (ENABLED_BY_WHAT_UNSET): |
153 | 0 | assert(true && "Shouldn't reach this"); |
154 | 0 | return "Unknown"; |
155 | 0 | case (ENABLED_BY_WHAT_LOADER_SETTINGS_FILE): |
156 | 0 | return "Loader Settings File (Vulkan Configurator)"; |
157 | 0 | case (ENABLED_BY_WHAT_IMPLICIT_LAYER): |
158 | 0 | return "Implicit Layer"; |
159 | 0 | case (ENABLED_BY_WHAT_VK_INSTANCE_LAYERS): |
160 | 0 | return "Environment Variable VK_INSTANCE_LAYERS"; |
161 | 0 | case (ENABLED_BY_WHAT_VK_LOADER_LAYERS_ENABLE): |
162 | 0 | return "Environment Variable VK_LOADER_LAYERS_ENABLE"; |
163 | 0 | case (ENABLED_BY_WHAT_IN_APPLICATION_API): |
164 | 0 | return "By the Application"; |
165 | 0 | case (ENABLED_BY_WHAT_META_LAYER): |
166 | 0 | return "Meta Layer (Vulkan Configurator)"; |
167 | 0 | } |
168 | 0 | } |
169 | | |
170 | | // Wrapper around opendir so that the dirent_on_windows gets the instance it needs |
171 | | // while linux opendir & readdir does not |
172 | 103k | DIR *loader_opendir(const struct loader_instance *instance, const char *name) { |
173 | | #if defined(_WIN32) |
174 | | return opendir(instance ? &instance->alloc_callbacks : NULL, name); |
175 | | #elif COMMON_UNIX_PLATFORMS |
176 | | (void)instance; |
177 | 103k | return opendir(name); |
178 | | #else |
179 | | #warning dirent.h - opendir not available on this platform |
180 | | #endif // _WIN32 |
181 | 103k | } |
182 | 7.81k | int loader_closedir(const struct loader_instance *instance, DIR *dir) { |
183 | | #if defined(_WIN32) |
184 | | return closedir(instance ? &instance->alloc_callbacks : NULL, dir); |
185 | | #elif COMMON_UNIX_PLATFORMS |
186 | | (void)instance; |
187 | 7.81k | return closedir(dir); |
188 | | #else |
189 | | #warning dirent.h - closedir not available on this platform |
190 | | #endif // _WIN32 |
191 | 7.81k | } |
192 | | |
193 | 303k | bool is_json(const char *path, size_t len) { |
194 | 303k | if (len < 5) { |
195 | 75.8k | return false; |
196 | 75.8k | } |
197 | 227k | return !strncmp(path, ".json", 5); |
198 | 303k | } |
199 | | |
200 | | // Handle error from to library loading |
201 | | void loader_handle_load_library_error(const struct loader_instance *inst, const char *filename, |
202 | 51 | enum loader_layer_library_status *lib_status) { |
203 | 51 | const char *error_message = loader_platform_open_library_error(filename); |
204 | | // If the error is due to incompatible architecture (eg 32 bit vs 64 bit), report it with INFO level |
205 | | // Discussed in Github issue 262 & 644 |
206 | | // "wrong ELF class" is a linux error, " with error 193" is a windows error |
207 | 51 | VkFlags err_flag = VULKAN_LOADER_ERROR_BIT; |
208 | 51 | if (strstr(error_message, "wrong ELF class:") != NULL || strstr(error_message, " with error 193") != NULL) { |
209 | 2 | err_flag = VULKAN_LOADER_INFO_BIT; |
210 | 2 | if (NULL != lib_status) { |
211 | 2 | *lib_status = LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE; |
212 | 2 | } |
213 | 2 | } |
214 | | // Check if the error is due to lack of memory |
215 | | // "with error 8" is the windows error code for OOM cases, aka ERROR_NOT_ENOUGH_MEMORY |
216 | | // Linux doesn't have such a nice error message - only if there are reported issues should this be called |
217 | 49 | else if (strstr(error_message, " with error 8") != NULL) { |
218 | 1 | if (NULL != lib_status) { |
219 | 1 | *lib_status = LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY; |
220 | 1 | } |
221 | 48 | } else if (NULL != lib_status) { |
222 | 48 | *lib_status = LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD; |
223 | 48 | } |
224 | 51 | loader_log(inst, err_flag, 0, "%s", error_message); |
225 | 51 | } |
226 | | |
227 | 0 | VKAPI_ATTR VkResult VKAPI_CALL vkSetInstanceDispatch(VkInstance instance, void *object) { |
228 | 0 | struct loader_instance *inst = loader_get_instance(instance); |
229 | 0 | if (!inst) { |
230 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkSetInstanceDispatch: Can not retrieve Instance dispatch table."); |
231 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
232 | 0 | } |
233 | 0 | loader_set_dispatch(object, inst->disp); |
234 | 0 | return VK_SUCCESS; |
235 | 0 | } |
236 | | |
237 | 0 | VKAPI_ATTR VkResult VKAPI_CALL vkSetDeviceDispatch(VkDevice device, void *object) { |
238 | 0 | struct loader_device *dev; |
239 | 0 | struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev); |
240 | |
|
241 | 0 | if (NULL == icd_term || NULL == dev) { |
242 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
243 | 0 | } |
244 | 0 | loader_set_dispatch(object, &dev->loader_dispatch); |
245 | 0 | return VK_SUCCESS; |
246 | 0 | } |
247 | | |
248 | 1.46M | void loader_free_layer_properties(const struct loader_instance *inst, struct loader_layer_properties *layer_properties) { |
249 | 1.46M | loader_instance_heap_free(inst, layer_properties->manifest_file_name); |
250 | 1.46M | loader_instance_heap_free(inst, layer_properties->lib_name); |
251 | 1.46M | loader_instance_heap_free(inst, layer_properties->functions.str_gipa); |
252 | 1.46M | loader_instance_heap_free(inst, layer_properties->functions.str_gdpa); |
253 | 1.46M | loader_instance_heap_free(inst, layer_properties->functions.str_negotiate_interface); |
254 | 1.46M | loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->instance_extension_list); |
255 | 1.46M | if (layer_properties->device_extension_list.capacity > 0 && NULL != layer_properties->device_extension_list.list) { |
256 | 52.4k | for (uint32_t i = 0; i < layer_properties->device_extension_list.count; i++) { |
257 | 49.6k | free_string_list(inst, &layer_properties->device_extension_list.list[i].entrypoints); |
258 | 49.6k | } |
259 | 2.80k | } |
260 | 1.46M | loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->device_extension_list); |
261 | 1.46M | loader_instance_heap_free(inst, layer_properties->disable_env_var.name); |
262 | 1.46M | loader_instance_heap_free(inst, layer_properties->disable_env_var.value); |
263 | 1.46M | loader_instance_heap_free(inst, layer_properties->enable_env_var.name); |
264 | 1.46M | loader_instance_heap_free(inst, layer_properties->enable_env_var.value); |
265 | 1.46M | free_string_list(inst, &layer_properties->component_layer_names); |
266 | 1.46M | loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_extension_properties); |
267 | 1.46M | loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_layer_properties); |
268 | 1.46M | loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_version); |
269 | 1.46M | free_string_list(inst, &layer_properties->override_paths); |
270 | 1.46M | free_string_list(inst, &layer_properties->blacklist_layer_names); |
271 | 1.46M | free_string_list(inst, &layer_properties->app_key_paths); |
272 | | |
273 | | // Make sure to clear out the removed layer, in case new layers are added in the previous location |
274 | 1.46M | memset(layer_properties, 0, sizeof(struct loader_layer_properties)); |
275 | 1.46M | } |
276 | | |
277 | 0 | VkResult loader_init_library_list(struct loader_layer_list *instance_layers, loader_platform_dl_handle **libs) { |
278 | 0 | if (instance_layers->count > 0) { |
279 | 0 | *libs = loader_calloc(NULL, sizeof(loader_platform_dl_handle) * instance_layers->count, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
280 | 0 | if (*libs == NULL) { |
281 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
282 | 0 | } |
283 | 0 | } |
284 | 0 | return VK_SUCCESS; |
285 | 0 | } |
286 | | |
287 | 1.34M | VkResult loader_copy_to_new_str(const struct loader_instance *inst, const char *source_str, char **dest_str) { |
288 | 1.34M | assert(source_str && dest_str); |
289 | 1.34M | size_t str_len = strlen(source_str) + 1; |
290 | 1.34M | *dest_str = loader_instance_heap_calloc(inst, str_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
291 | 1.34M | if (NULL == *dest_str) return VK_ERROR_OUT_OF_HOST_MEMORY; |
292 | 1.34M | loader_strncpy(*dest_str, str_len, source_str, str_len); |
293 | 1.34M | (*dest_str)[str_len - 1] = 0; |
294 | 1.34M | return VK_SUCCESS; |
295 | 1.34M | } |
296 | | |
297 | 17.0k | VkResult create_string_list(const struct loader_instance *inst, uint32_t allocated_count, struct loader_string_list *string_list) { |
298 | 17.0k | assert(string_list); |
299 | 17.0k | string_list->list = loader_instance_heap_calloc(inst, sizeof(char *) * allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
300 | 17.0k | if (NULL == string_list->list) { |
301 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
302 | 0 | } |
303 | 17.0k | string_list->allocated_count = allocated_count; |
304 | 17.0k | string_list->count = 0; |
305 | 17.0k | return VK_SUCCESS; |
306 | 17.0k | } |
307 | | |
308 | 534k | VkResult append_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, char *str) { |
309 | 534k | assert(string_list && str); |
310 | 534k | if (string_list->allocated_count == 0) { |
311 | 7.59k | string_list->allocated_count = 32; |
312 | 7.59k | string_list->list = |
313 | 7.59k | loader_instance_heap_calloc(inst, sizeof(char *) * string_list->allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
314 | 7.59k | if (NULL == string_list->list) { |
315 | 0 | loader_instance_heap_free(inst, str); // Must clean up in case of failure |
316 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
317 | 0 | } |
318 | 527k | } else if (string_list->count + 1 > string_list->allocated_count) { |
319 | 6 | uint32_t new_allocated_count = string_list->allocated_count * 2; |
320 | 6 | string_list->list = loader_instance_heap_realloc(inst, string_list->list, sizeof(char *) * string_list->allocated_count, |
321 | 6 | sizeof(char *) * new_allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
322 | 6 | if (NULL == string_list->list) { |
323 | 0 | loader_instance_heap_free(inst, str); // Must clean up in case of failure |
324 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
325 | 0 | } |
326 | 6 | string_list->allocated_count *= 2; |
327 | 6 | } |
328 | 534k | string_list->list[string_list->count++] = str; |
329 | 534k | return VK_SUCCESS; |
330 | 534k | } |
331 | | |
332 | | VkResult copy_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, const char *str, |
333 | 8.00k | size_t str_len) { |
334 | 8.00k | assert(string_list && str); |
335 | 8.00k | char *new_str = loader_instance_heap_calloc(inst, sizeof(char *) * str_len + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
336 | 8.00k | if (NULL == new_str) { |
337 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
338 | 0 | } |
339 | 8.00k | loader_strncpy(new_str, sizeof(char *) * str_len + 1, str, str_len); |
340 | 8.00k | new_str[str_len] = '\0'; |
341 | 8.00k | return append_str_to_string_list(inst, string_list, new_str); |
342 | 8.00k | } |
343 | | |
344 | 5.92M | void free_string_list(const struct loader_instance *inst, struct loader_string_list *string_list) { |
345 | 5.92M | assert(string_list); |
346 | 5.92M | if (string_list->list) { |
347 | 559k | for (uint32_t i = 0; i < string_list->count; i++) { |
348 | 534k | loader_instance_heap_free(inst, string_list->list[i]); |
349 | 534k | string_list->list[i] = NULL; |
350 | 534k | } |
351 | 24.6k | loader_instance_heap_free(inst, string_list->list); |
352 | 24.6k | } |
353 | 5.92M | memset(string_list, 0, sizeof(struct loader_string_list)); |
354 | 5.92M | } |
355 | | |
356 | | // Given string of three part form "maj.min.pat" convert to a vulkan version number. |
357 | | // Also can understand four part form "variant.major.minor.patch" if provided. |
358 | 37.6k | uint32_t loader_parse_version_string(char *vers_str) { |
359 | 37.6k | uint32_t variant = 0, major = 0, minor = 0, patch = 0; |
360 | 37.6k | char *vers_tok; |
361 | 37.6k | char *context = NULL; |
362 | 37.6k | if (!vers_str) { |
363 | 0 | return 0; |
364 | 0 | } |
365 | | |
366 | 37.6k | vers_tok = thread_safe_strtok(vers_str, ".\"\n\r", &context); |
367 | 37.6k | if (NULL != vers_tok) { |
368 | 34.7k | major = (uint16_t)atoi(vers_tok); |
369 | 34.7k | vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context); |
370 | 34.7k | if (NULL != vers_tok) { |
371 | 23.6k | minor = (uint16_t)atoi(vers_tok); |
372 | 23.6k | vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context); |
373 | 23.6k | if (NULL != vers_tok) { |
374 | 11.4k | patch = (uint16_t)atoi(vers_tok); |
375 | 11.4k | vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context); |
376 | | // check that we are using a 4 part version string |
377 | 11.4k | if (NULL != vers_tok) { |
378 | | // if we are, move the values over into the correct place |
379 | 234 | variant = major; |
380 | 234 | major = minor; |
381 | 234 | minor = patch; |
382 | 234 | patch = (uint16_t)atoi(vers_tok); |
383 | 234 | } |
384 | 11.4k | } |
385 | 23.6k | } |
386 | 34.7k | } |
387 | | |
388 | 37.6k | return VK_MAKE_API_VERSION(variant, major, minor, patch); |
389 | 37.6k | } |
390 | | |
391 | 4.30M | bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) { |
392 | 4.30M | return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false; |
393 | 4.30M | } |
394 | | |
395 | | // Search the given ext_array for an extension matching the given vk_ext_prop |
396 | | bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, const uint32_t count, |
397 | 0 | const VkExtensionProperties *ext_array) { |
398 | 0 | for (uint32_t i = 0; i < count; i++) { |
399 | 0 | if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) return true; |
400 | 0 | } |
401 | 0 | return false; |
402 | 0 | } |
403 | | |
404 | | // Search the given ext_list for an extension matching the given vk_ext_prop |
405 | 84.7k | bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, const struct loader_extension_list *ext_list) { |
406 | 1.86M | for (uint32_t i = 0; i < ext_list->count; i++) { |
407 | 1.82M | if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) return true; |
408 | 1.82M | } |
409 | 49.3k | return false; |
410 | 84.7k | } |
411 | | |
412 | | // Search the given ext_list for a device extension matching the given ext_prop |
413 | 103k | bool has_vk_dev_ext_property(const VkExtensionProperties *ext_prop, const struct loader_device_extension_list *ext_list) { |
414 | 2.56M | for (uint32_t i = 0; i < ext_list->count; i++) { |
415 | 2.48M | if (compare_vk_extension_properties(&ext_list->list[i].props, ext_prop)) return true; |
416 | 2.48M | } |
417 | 81.8k | return false; |
418 | 103k | } |
419 | | |
420 | | VkResult loader_append_layer_property(const struct loader_instance *inst, struct loader_layer_list *layer_list, |
421 | 1.40M | struct loader_layer_properties *layer_property) { |
422 | 1.40M | VkResult res = VK_SUCCESS; |
423 | 1.40M | if (layer_list->capacity == 0) { |
424 | 5.71k | res = loader_init_generic_list(inst, (struct loader_generic_list *)layer_list, sizeof(struct loader_layer_properties)); |
425 | 5.71k | if (VK_SUCCESS != res) { |
426 | 0 | goto out; |
427 | 0 | } |
428 | 5.71k | } |
429 | | |
430 | | // Ensure enough room to add an entry |
431 | 1.40M | if ((layer_list->count + 1) * sizeof(struct loader_layer_properties) > layer_list->capacity) { |
432 | 1.62k | void *new_ptr = loader_instance_heap_realloc(inst, layer_list->list, layer_list->capacity, layer_list->capacity * 2, |
433 | 1.62k | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
434 | 1.62k | if (NULL == new_ptr) { |
435 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_append_layer_property: realloc failed for layer list"); |
436 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
437 | 0 | goto out; |
438 | 0 | } |
439 | 1.62k | layer_list->list = new_ptr; |
440 | 1.62k | layer_list->capacity *= 2; |
441 | 1.62k | } |
442 | 1.40M | memcpy(&layer_list->list[layer_list->count], layer_property, sizeof(struct loader_layer_properties)); |
443 | 1.40M | layer_list->count++; |
444 | 1.40M | memset(layer_property, 0, sizeof(struct loader_layer_properties)); |
445 | 1.40M | out: |
446 | 1.40M | if (res != VK_SUCCESS) { |
447 | 0 | loader_free_layer_properties(inst, layer_property); |
448 | 0 | } |
449 | 1.40M | return res; |
450 | 1.40M | } |
451 | | |
452 | | // Search the given layer list for a layer property matching the given layer name |
453 | 29.9k | struct loader_layer_properties *loader_find_layer_property(const char *name, const struct loader_layer_list *layer_list) { |
454 | 51.9M | for (uint32_t i = 0; i < layer_list->count; i++) { |
455 | 51.9M | const VkLayerProperties *item = &layer_list->list[i].info; |
456 | 51.9M | if (strcmp(name, item->layerName) == 0) return &layer_list->list[i]; |
457 | 51.9M | } |
458 | 9.36k | return NULL; |
459 | 29.9k | } |
460 | | |
461 | | struct loader_layer_properties *loader_find_pointer_layer_property(const char *name, |
462 | 0 | const struct loader_pointer_layer_list *layer_list) { |
463 | 0 | for (uint32_t i = 0; i < layer_list->count; i++) { |
464 | 0 | const VkLayerProperties *item = &layer_list->list[i]->info; |
465 | 0 | if (strcmp(name, item->layerName) == 0) return layer_list->list[i]; |
466 | 0 | } |
467 | 0 | return NULL; |
468 | 0 | } |
469 | | |
470 | | // Search the given layer list for a layer matching the given layer name |
471 | 0 | bool loader_find_layer_name_in_list(const char *name, const struct loader_pointer_layer_list *layer_list) { |
472 | 0 | if (NULL == layer_list) { |
473 | 0 | return false; |
474 | 0 | } |
475 | 0 | if (NULL != loader_find_pointer_layer_property(name, layer_list)) { |
476 | 0 | return true; |
477 | 0 | } |
478 | 0 | return false; |
479 | 0 | } |
480 | | |
481 | | // Search the given meta-layer's component list for a layer matching the given layer name |
482 | | bool loader_find_layer_name_in_meta_layer(const struct loader_instance *inst, const char *layer_name, |
483 | 0 | struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props) { |
484 | 0 | for (uint32_t comp_layer = 0; comp_layer < meta_layer_props->component_layer_names.count; comp_layer++) { |
485 | 0 | if (!strcmp(meta_layer_props->component_layer_names.list[comp_layer], layer_name)) { |
486 | 0 | return true; |
487 | 0 | } |
488 | 0 | struct loader_layer_properties *comp_layer_props = |
489 | 0 | loader_find_layer_property(meta_layer_props->component_layer_names.list[comp_layer], layer_list); |
490 | 0 | if (comp_layer_props->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { |
491 | 0 | return loader_find_layer_name_in_meta_layer(inst, layer_name, layer_list, comp_layer_props); |
492 | 0 | } |
493 | 0 | } |
494 | 0 | return false; |
495 | 0 | } |
496 | | |
497 | | // Search the override layer's blacklist for a layer matching the given layer name |
498 | 108k | bool loader_find_layer_name_in_blacklist(const char *layer_name, struct loader_layer_properties *meta_layer_props) { |
499 | 108k | for (uint32_t black_layer = 0; black_layer < meta_layer_props->blacklist_layer_names.count; ++black_layer) { |
500 | 0 | if (!strcmp(meta_layer_props->blacklist_layer_names.list[black_layer], layer_name)) { |
501 | 0 | return true; |
502 | 0 | } |
503 | 0 | } |
504 | 108k | return false; |
505 | 108k | } |
506 | | |
507 | | // Remove all layer properties entries from the list |
508 | | TEST_FUNCTION_EXPORT void loader_delete_layer_list_and_properties(const struct loader_instance *inst, |
509 | 22.4k | struct loader_layer_list *layer_list) { |
510 | 22.4k | uint32_t i; |
511 | 22.4k | if (!layer_list) return; |
512 | | |
513 | 1.41M | for (i = 0; i < layer_list->count; i++) { |
514 | 1.39M | if (layer_list->list[i].lib_handle) { |
515 | 0 | loader_platform_close_library(layer_list->list[i].lib_handle); |
516 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Unloading layer library %s", |
517 | 0 | layer_list->list[i].lib_name); |
518 | 0 | layer_list->list[i].lib_handle = NULL; |
519 | 0 | } |
520 | 1.39M | loader_free_layer_properties(inst, &(layer_list->list[i])); |
521 | 1.39M | } |
522 | 22.4k | layer_list->count = 0; |
523 | | |
524 | 22.4k | if (layer_list->capacity > 0) { |
525 | 6.48k | layer_list->capacity = 0; |
526 | 6.48k | loader_instance_heap_free(inst, layer_list->list); |
527 | 6.48k | } |
528 | 22.4k | memset(layer_list, 0, sizeof(struct loader_layer_list)); |
529 | 22.4k | } |
530 | | |
531 | | void loader_remove_layer_in_list(const struct loader_instance *inst, struct loader_layer_list *layer_list, |
532 | 10.7k | uint32_t layer_to_remove) { |
533 | 10.7k | if (layer_list == NULL || layer_to_remove >= layer_list->count) { |
534 | 1 | return; |
535 | 1 | } |
536 | 10.7k | loader_free_layer_properties(inst, &(layer_list->list[layer_to_remove])); |
537 | | |
538 | | // Remove the current invalid meta-layer from the layer list. Use memmove since we are |
539 | | // overlapping the source and destination addresses. |
540 | 10.7k | if (layer_to_remove + 1 <= layer_list->count) { |
541 | 10.7k | memmove(&layer_list->list[layer_to_remove], &layer_list->list[layer_to_remove + 1], |
542 | 10.7k | sizeof(struct loader_layer_properties) * (layer_list->count - 1 - layer_to_remove)); |
543 | 10.7k | } |
544 | | // Decrement the count (because we now have one less) and decrement the loop index since we need to |
545 | | // re-check this index. |
546 | 10.7k | layer_list->count--; |
547 | 10.7k | } |
548 | | |
549 | | // Remove all layers in the layer list that are blacklisted by the override layer. |
550 | | // NOTE: This should only be called if an override layer is found and not expired. |
551 | 218 | void loader_remove_layers_in_blacklist(const struct loader_instance *inst, struct loader_layer_list *layer_list) { |
552 | 218 | struct loader_layer_properties *override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list); |
553 | 218 | if (NULL == override_prop) { |
554 | 0 | return; |
555 | 0 | } |
556 | | |
557 | 108k | for (int32_t j = 0; j < (int32_t)(layer_list->count); j++) { |
558 | 108k | struct loader_layer_properties cur_layer_prop = layer_list->list[j]; |
559 | 108k | const char *cur_layer_name = &cur_layer_prop.info.layerName[0]; |
560 | | |
561 | | // Skip the override layer itself. |
562 | 108k | if (!strcmp(VK_OVERRIDE_LAYER_NAME, cur_layer_name)) { |
563 | 218 | continue; |
564 | 218 | } |
565 | | |
566 | | // If found in the override layer's blacklist, remove it |
567 | 108k | if (loader_find_layer_name_in_blacklist(cur_layer_name, override_prop)) { |
568 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, |
569 | 0 | "loader_remove_layers_in_blacklist: Override layer is active and layer %s is in the blacklist inside of it. " |
570 | 0 | "Removing that layer from current layer list.", |
571 | 0 | cur_layer_name); |
572 | 0 | loader_remove_layer_in_list(inst, layer_list, j); |
573 | 0 | j--; |
574 | | |
575 | | // Re-do the query for the override layer |
576 | 0 | override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list); |
577 | 0 | } |
578 | 108k | } |
579 | 218 | } |
580 | | |
581 | | // Remove all layers in the layer list that are not found inside any implicit meta-layers. |
582 | 0 | void loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance *inst, struct loader_layer_list *layer_list) { |
583 | 0 | int32_t i; |
584 | 0 | int32_t j; |
585 | 0 | int32_t layer_count = (int32_t)(layer_list->count); |
586 | |
|
587 | 0 | for (i = 0; i < layer_count; i++) { |
588 | 0 | layer_list->list[i].keep = false; |
589 | 0 | } |
590 | |
|
591 | 0 | for (i = 0; i < layer_count; i++) { |
592 | 0 | struct loader_layer_properties *cur_layer_prop = &layer_list->list[i]; |
593 | |
|
594 | 0 | if (0 == (cur_layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { |
595 | 0 | cur_layer_prop->keep = true; |
596 | 0 | continue; |
597 | 0 | } |
598 | 0 | for (j = 0; j < layer_count; j++) { |
599 | 0 | struct loader_layer_properties *layer_to_check = &layer_list->list[j]; |
600 | |
|
601 | 0 | if (i == j) { |
602 | 0 | continue; |
603 | 0 | } |
604 | | |
605 | 0 | if (layer_to_check->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { |
606 | | // For all layers found in this meta layer, we want to keep them as well. |
607 | 0 | if (loader_find_layer_name_in_meta_layer(inst, cur_layer_prop->info.layerName, layer_list, layer_to_check)) { |
608 | 0 | cur_layer_prop->keep = true; |
609 | 0 | } |
610 | 0 | } |
611 | 0 | } |
612 | 0 | } |
613 | | |
614 | | // Remove any layers we don't want to keep (Don't use layer_count here as we need it to be |
615 | | // dynamically updated if we delete a layer property in the list). |
616 | 0 | for (i = 0; i < (int32_t)(layer_list->count); i++) { |
617 | 0 | struct loader_layer_properties *cur_layer_prop = &layer_list->list[i]; |
618 | 0 | if (!cur_layer_prop->keep) { |
619 | 0 | loader_log( |
620 | 0 | inst, VULKAN_LOADER_DEBUG_BIT, 0, |
621 | 0 | "loader_remove_layers_not_in_implicit_meta_layers : Implicit meta-layers are active, and layer %s is not list " |
622 | 0 | "inside of any. So removing layer from current layer list.", |
623 | 0 | cur_layer_prop->info.layerName); |
624 | 0 | loader_remove_layer_in_list(inst, layer_list, i); |
625 | 0 | i--; |
626 | 0 | } |
627 | 0 | } |
628 | 0 | } |
629 | | |
630 | | VkResult loader_add_instance_extensions(const struct loader_instance *inst, |
631 | | const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, const char *lib_name, |
632 | 0 | struct loader_extension_list *ext_list) { |
633 | 0 | uint32_t i, count = 0; |
634 | 0 | VkExtensionProperties *ext_props; |
635 | 0 | VkResult res = VK_SUCCESS; |
636 | |
|
637 | 0 | if (!fp_get_props) { |
638 | | // No EnumerateInstanceExtensionProperties defined |
639 | 0 | goto out; |
640 | 0 | } |
641 | | |
642 | | // Make sure we never call ourself by accident, this should never happen outside of error paths |
643 | 0 | if (fp_get_props == vkEnumerateInstanceExtensionProperties) { |
644 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
645 | 0 | "loader_add_instance_extensions: %s's vkEnumerateInstanceExtensionProperties points to the loader, this would " |
646 | 0 | "lead to infinite recursion.", |
647 | 0 | lib_name); |
648 | 0 | goto out; |
649 | 0 | } |
650 | | |
651 | 0 | res = fp_get_props(NULL, &count, NULL); |
652 | 0 | if (res != VK_SUCCESS) { |
653 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
654 | 0 | "loader_add_instance_extensions: Error getting Instance extension count from %s", lib_name); |
655 | 0 | goto out; |
656 | 0 | } |
657 | | |
658 | 0 | if (count == 0) { |
659 | | // No ExtensionProperties to report |
660 | 0 | goto out; |
661 | 0 | } |
662 | | |
663 | 0 | ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); |
664 | 0 | if (NULL == ext_props) { |
665 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
666 | 0 | goto out; |
667 | 0 | } |
668 | | |
669 | 0 | res = fp_get_props(NULL, &count, ext_props); |
670 | 0 | if (res != VK_SUCCESS) { |
671 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_instance_extensions: Error getting Instance extensions from %s", |
672 | 0 | lib_name); |
673 | 0 | goto out; |
674 | 0 | } |
675 | | |
676 | 0 | for (i = 0; i < count; i++) { |
677 | 0 | bool ext_unsupported = wsi_unsupported_instance_extension(&ext_props[i]); |
678 | 0 | if (!ext_unsupported) { |
679 | 0 | res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); |
680 | 0 | if (res != VK_SUCCESS) { |
681 | 0 | goto out; |
682 | 0 | } |
683 | 0 | } |
684 | 0 | } |
685 | | |
686 | 0 | out: |
687 | 0 | return res; |
688 | 0 | } |
689 | | |
690 | | VkResult loader_add_device_extensions(const struct loader_instance *inst, |
691 | | PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties, |
692 | | VkPhysicalDevice physical_device, const char *lib_name, |
693 | 0 | struct loader_extension_list *ext_list) { |
694 | 0 | uint32_t i = 0, count = 0; |
695 | 0 | VkResult res = VK_SUCCESS; |
696 | 0 | VkExtensionProperties *ext_props = NULL; |
697 | |
|
698 | 0 | res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL); |
699 | 0 | if (res != VK_SUCCESS) { |
700 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
701 | 0 | "loader_add_device_extensions: Error getting physical device extension info count from library %s", lib_name); |
702 | 0 | return res; |
703 | 0 | } |
704 | 0 | if (count > 0) { |
705 | 0 | ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); |
706 | 0 | if (!ext_props) { |
707 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
708 | 0 | "loader_add_device_extensions: Failed to allocate space for device extension properties from library %s.", |
709 | 0 | lib_name); |
710 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
711 | 0 | } |
712 | 0 | res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props); |
713 | 0 | if (res != VK_SUCCESS) { |
714 | 0 | return res; |
715 | 0 | } |
716 | 0 | for (i = 0; i < count; i++) { |
717 | 0 | res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); |
718 | 0 | if (res != VK_SUCCESS) { |
719 | 0 | return res; |
720 | 0 | } |
721 | 0 | } |
722 | 0 | } |
723 | | |
724 | 0 | return VK_SUCCESS; |
725 | 0 | } |
726 | | |
727 | 11.5k | VkResult loader_init_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info, size_t element_size) { |
728 | 11.5k | size_t capacity = 32 * element_size; |
729 | 11.5k | list_info->count = 0; |
730 | 11.5k | list_info->capacity = 0; |
731 | 11.5k | list_info->list = loader_instance_heap_calloc(inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
732 | 11.5k | if (list_info->list == NULL) { |
733 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_init_generic_list: Failed to allocate space for generic list"); |
734 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
735 | 0 | } |
736 | 11.5k | list_info->capacity = capacity; |
737 | 11.5k | return VK_SUCCESS; |
738 | 11.5k | } |
739 | | |
740 | 0 | VkResult loader_resize_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info) { |
741 | 0 | list_info->list = loader_instance_heap_realloc(inst, list_info->list, list_info->capacity, list_info->capacity * 2, |
742 | 0 | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
743 | 0 | if (list_info->list == NULL) { |
744 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_resize_generic_list: Failed to allocate space for generic list"); |
745 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
746 | 0 | } |
747 | 0 | list_info->capacity = list_info->capacity * 2; |
748 | 0 | return VK_SUCCESS; |
749 | 0 | } |
750 | | |
751 | 2.95M | void loader_destroy_generic_list(const struct loader_instance *inst, struct loader_generic_list *list) { |
752 | 2.95M | loader_instance_heap_free(inst, list->list); |
753 | 2.95M | memset(list, 0, sizeof(struct loader_generic_list)); |
754 | 2.95M | } |
755 | | |
756 | | VkResult loader_get_next_available_entry(const struct loader_instance *inst, struct loader_used_object_list *list_info, |
757 | 0 | uint32_t *free_index, const VkAllocationCallbacks *pAllocator) { |
758 | 0 | if (NULL == list_info->list) { |
759 | 0 | VkResult res = |
760 | 0 | loader_init_generic_list(inst, (struct loader_generic_list *)list_info, sizeof(struct loader_used_object_status)); |
761 | 0 | if (VK_SUCCESS != res) { |
762 | 0 | return res; |
763 | 0 | } |
764 | 0 | } |
765 | 0 | for (uint32_t i = 0; i < list_info->capacity / sizeof(struct loader_used_object_status); i++) { |
766 | 0 | if (list_info->list[i].status == VK_FALSE) { |
767 | 0 | list_info->list[i].status = VK_TRUE; |
768 | 0 | if (pAllocator) { |
769 | 0 | list_info->list[i].allocation_callbacks = *pAllocator; |
770 | 0 | } else { |
771 | 0 | memset(&list_info->list[i].allocation_callbacks, 0, sizeof(VkAllocationCallbacks)); |
772 | 0 | } |
773 | 0 | *free_index = i; |
774 | 0 | return VK_SUCCESS; |
775 | 0 | } |
776 | 0 | } |
777 | | // No free space, must resize |
778 | | |
779 | 0 | size_t old_capacity = list_info->capacity; |
780 | 0 | VkResult res = loader_resize_generic_list(inst, (struct loader_generic_list *)list_info); |
781 | 0 | if (VK_SUCCESS != res) { |
782 | 0 | return res; |
783 | 0 | } |
784 | 0 | uint32_t new_index = (uint32_t)(old_capacity / sizeof(struct loader_used_object_status)); |
785 | | // Zero out the newly allocated back half of list. |
786 | 0 | memset(&list_info->list[new_index], 0, old_capacity); |
787 | 0 | list_info->list[new_index].status = VK_TRUE; |
788 | 0 | if (pAllocator) { |
789 | 0 | list_info->list[new_index].allocation_callbacks = *pAllocator; |
790 | 0 | } else { |
791 | 0 | memset(&list_info->list[new_index].allocation_callbacks, 0, sizeof(VkAllocationCallbacks)); |
792 | 0 | } |
793 | 0 | *free_index = new_index; |
794 | 0 | return VK_SUCCESS; |
795 | 0 | } |
796 | | |
797 | 0 | void loader_release_object_from_list(struct loader_used_object_list *list_info, uint32_t index_to_free) { |
798 | 0 | if (list_info->list && list_info->capacity > index_to_free * sizeof(struct loader_used_object_status)) { |
799 | 0 | list_info->list[index_to_free].status = VK_FALSE; |
800 | 0 | memset(&list_info->list[index_to_free].allocation_callbacks, 0, sizeof(VkAllocationCallbacks)); |
801 | 0 | } |
802 | 0 | } |
803 | | |
804 | | // Append non-duplicate extension properties defined in props to the given ext_list. |
805 | | // Return - Vk_SUCCESS on success |
806 | | VkResult loader_add_to_ext_list(const struct loader_instance *inst, struct loader_extension_list *ext_list, |
807 | 66.2k | uint32_t prop_list_count, const VkExtensionProperties *props) { |
808 | 66.2k | if (ext_list->list == NULL || ext_list->capacity == 0) { |
809 | 2.28k | VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties)); |
810 | 2.28k | if (VK_SUCCESS != res) { |
811 | 0 | return res; |
812 | 0 | } |
813 | 2.28k | } |
814 | | |
815 | 132k | for (uint32_t i = 0; i < prop_list_count; i++) { |
816 | 66.2k | const VkExtensionProperties *cur_ext = &props[i]; |
817 | | |
818 | | // look for duplicates |
819 | 66.2k | if (has_vk_extension_property(cur_ext, ext_list)) { |
820 | 30.6k | continue; |
821 | 30.6k | } |
822 | | |
823 | | // add to list at end |
824 | | // check for enough capacity |
825 | 35.6k | if (ext_list->count * sizeof(VkExtensionProperties) >= ext_list->capacity) { |
826 | 697 | void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2, |
827 | 697 | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
828 | 697 | if (new_ptr == NULL) { |
829 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
830 | 0 | "loader_add_to_ext_list: Failed to reallocate space for extension list"); |
831 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
832 | 0 | } |
833 | 697 | ext_list->list = new_ptr; |
834 | | |
835 | | // double capacity |
836 | 697 | ext_list->capacity *= 2; |
837 | 697 | } |
838 | | |
839 | 35.6k | memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties)); |
840 | 35.6k | ext_list->count++; |
841 | 35.6k | } |
842 | 66.2k | return VK_SUCCESS; |
843 | 66.2k | } |
844 | | |
845 | | // Append one extension property defined in props with entrypoints defined in entries to the given |
846 | | // ext_list. Do not append if a duplicate. |
847 | | // If this is a duplicate, this function free's the passed in entries - as in it takes ownership over that list (if it is not |
848 | | // NULL) Return - Vk_SUCCESS on success |
849 | | VkResult loader_add_to_dev_ext_list(const struct loader_instance *inst, struct loader_device_extension_list *ext_list, |
850 | 70.1k | const VkExtensionProperties *props, struct loader_string_list *entrys) { |
851 | 70.1k | VkResult res = VK_SUCCESS; |
852 | 70.1k | bool should_free_entrys = true; |
853 | 70.1k | if (ext_list->list == NULL || ext_list->capacity == 0) { |
854 | 2.80k | res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(struct loader_dev_ext_props)); |
855 | 2.80k | if (VK_SUCCESS != res) { |
856 | 0 | goto out; |
857 | 0 | } |
858 | 2.80k | } |
859 | | |
860 | | // look for duplicates |
861 | 70.1k | if (has_vk_dev_ext_property(props, ext_list)) { |
862 | 20.5k | goto out; |
863 | 20.5k | } |
864 | | |
865 | 49.6k | uint32_t idx = ext_list->count; |
866 | | // add to list at end |
867 | | // check for enough capacity |
868 | 49.6k | if (idx * sizeof(struct loader_dev_ext_props) >= ext_list->capacity) { |
869 | 1.21k | void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2, |
870 | 1.21k | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
871 | | |
872 | 1.21k | if (NULL == new_ptr) { |
873 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
874 | 0 | "loader_add_to_dev_ext_list: Failed to reallocate space for device extension list"); |
875 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
876 | 0 | goto out; |
877 | 0 | } |
878 | 1.21k | ext_list->list = new_ptr; |
879 | | |
880 | | // double capacity |
881 | 1.21k | ext_list->capacity *= 2; |
882 | 1.21k | } |
883 | | |
884 | 49.6k | memcpy(&ext_list->list[idx].props, props, sizeof(*props)); |
885 | 49.6k | if (entrys) { |
886 | 11 | ext_list->list[idx].entrypoints = *entrys; |
887 | 11 | should_free_entrys = false; |
888 | 11 | } |
889 | 49.6k | ext_list->count++; |
890 | 70.1k | out: |
891 | 70.1k | if (NULL != entrys && should_free_entrys) { |
892 | 43 | free_string_list(inst, entrys); |
893 | 43 | } |
894 | 70.1k | return res; |
895 | 49.6k | } |
896 | | |
897 | | // Create storage for pointers to loader_layer_properties |
898 | 0 | bool loader_init_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list) { |
899 | 0 | list->capacity = 32 * sizeof(void *); |
900 | 0 | list->list = loader_instance_heap_calloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
901 | 0 | if (list->list == NULL) { |
902 | 0 | return false; |
903 | 0 | } |
904 | 0 | list->count = 0; |
905 | 0 | return true; |
906 | 0 | } |
907 | | |
908 | | // Search the given array of layer names for an entry matching the given VkLayerProperties |
909 | | bool loader_names_array_has_layer_property(const VkLayerProperties *vk_layer_prop, uint32_t layer_info_count, |
910 | 0 | struct activated_layer_info *layer_info) { |
911 | 0 | for (uint32_t i = 0; i < layer_info_count; i++) { |
912 | 0 | if (strcmp(vk_layer_prop->layerName, layer_info[i].name) == 0) { |
913 | 0 | return true; |
914 | 0 | } |
915 | 0 | } |
916 | 0 | return false; |
917 | 0 | } |
918 | | |
919 | 14.9k | void loader_destroy_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *layer_list) { |
920 | 14.9k | loader_instance_heap_free(inst, layer_list->list); |
921 | 14.9k | memset(layer_list, 0, sizeof(struct loader_pointer_layer_list)); |
922 | 14.9k | } |
923 | | |
924 | | // Append layer properties defined in prop_list to the given layer_info list |
925 | | VkResult loader_add_layer_properties_to_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list, |
926 | 0 | struct loader_layer_properties *props) { |
927 | 0 | if (list->list == NULL || list->capacity == 0) { |
928 | 0 | if (!loader_init_pointer_layer_list(inst, list)) { |
929 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
930 | 0 | } |
931 | 0 | } |
932 | | |
933 | | // Check for enough capacity |
934 | 0 | if (((list->count + 1) * sizeof(struct loader_layer_properties)) >= list->capacity) { |
935 | 0 | size_t new_capacity = list->capacity * 2; |
936 | 0 | void *new_ptr = |
937 | 0 | loader_instance_heap_realloc(inst, list->list, list->capacity, new_capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
938 | 0 | if (NULL == new_ptr) { |
939 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
940 | 0 | "loader_add_layer_properties_to_list: Realloc failed for when attempting to add new layer"); |
941 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
942 | 0 | } |
943 | 0 | list->list = new_ptr; |
944 | 0 | list->capacity = new_capacity; |
945 | 0 | } |
946 | 0 | list->list[list->count++] = props; |
947 | |
|
948 | 0 | return VK_SUCCESS; |
949 | 0 | } |
950 | | |
951 | | // Determine if the provided explicit layer should be available by querying the appropriate environmental variables. |
952 | | bool loader_layer_is_available(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, |
953 | 1.22M | const struct loader_layer_properties *prop) { |
954 | 1.22M | bool available = true; |
955 | 1.22M | bool is_implicit = (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)); |
956 | 1.22M | bool disabled_by_type = |
957 | 1.22M | (is_implicit) ? (filters->disable_filter.disable_all_implicit) : (filters->disable_filter.disable_all_explicit); |
958 | 1.22M | if ((filters->disable_filter.disable_all || disabled_by_type || |
959 | 1.22M | check_name_matches_filter_environment_var(prop->info.layerName, &filters->disable_filter.additional_filters)) && |
960 | 1.22M | !check_name_matches_filter_environment_var(prop->info.layerName, &filters->allow_filter)) { |
961 | 0 | available = false; |
962 | 0 | } |
963 | 1.22M | if (check_name_matches_filter_environment_var(prop->info.layerName, &filters->enable_filter)) { |
964 | 344 | available = true; |
965 | 1.22M | } else if (!available) { |
966 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
967 | 0 | "Layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName, |
968 | 0 | VK_LAYERS_DISABLE_ENV_VAR); |
969 | 0 | } |
970 | | |
971 | 1.22M | return available; |
972 | 1.22M | } |
973 | | |
974 | | // Search the given search_list for any layers in the props list. Add these to the |
975 | | // output layer_list. |
976 | | VkResult loader_add_layer_names_to_list(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, |
977 | | struct loader_pointer_layer_list *output_list, |
978 | | struct loader_pointer_layer_list *expanded_output_list, uint32_t name_count, |
979 | 0 | const char *const *names, const struct loader_layer_list *source_list) { |
980 | 0 | VkResult err = VK_SUCCESS; |
981 | |
|
982 | 0 | for (uint32_t i = 0; i < name_count; i++) { |
983 | 0 | const char *source_name = names[i]; |
984 | |
|
985 | 0 | struct loader_layer_properties *layer_prop = loader_find_layer_property(source_name, source_list); |
986 | 0 | if (NULL == layer_prop) { |
987 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
988 | 0 | "loader_add_layer_names_to_list: Unable to find layer \"%s\"", source_name); |
989 | 0 | err = VK_ERROR_LAYER_NOT_PRESENT; |
990 | 0 | continue; |
991 | 0 | } |
992 | | |
993 | | // Make sure the layer isn't already in the output_list, skip adding it if it is. |
994 | 0 | if (loader_find_layer_name_in_list(source_name, output_list)) { |
995 | 0 | continue; |
996 | 0 | } |
997 | | |
998 | 0 | if (!loader_layer_is_available(inst, filters, layer_prop)) { |
999 | 0 | continue; |
1000 | 0 | } |
1001 | | |
1002 | | // If not a meta-layer, simply add it. |
1003 | 0 | if (0 == (layer_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) { |
1004 | 0 | layer_prop->enabled_by_what = ENABLED_BY_WHAT_IN_APPLICATION_API; |
1005 | 0 | err = loader_add_layer_properties_to_list(inst, output_list, layer_prop); |
1006 | 0 | if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err; |
1007 | 0 | err = loader_add_layer_properties_to_list(inst, expanded_output_list, layer_prop); |
1008 | 0 | if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err; |
1009 | 0 | } else { |
1010 | 0 | err = loader_add_meta_layer(inst, filters, layer_prop, output_list, expanded_output_list, source_list, NULL); |
1011 | 0 | if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err; |
1012 | 0 | } |
1013 | 0 | } |
1014 | | |
1015 | 0 | return err; |
1016 | 0 | } |
1017 | | |
1018 | | // Determine if the provided implicit layer should be enabled by querying the appropriate environmental variables. |
1019 | | // For an implicit layer, at least a disable environment variable is required. |
1020 | | bool loader_implicit_layer_is_enabled(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, |
1021 | 627 | const struct loader_layer_properties *prop) { |
1022 | 627 | bool enable = false; |
1023 | 627 | bool forced_disabled = false; |
1024 | 627 | bool forced_enabled = false; |
1025 | | |
1026 | 627 | if ((filters->disable_filter.disable_all || filters->disable_filter.disable_all_implicit || |
1027 | 627 | check_name_matches_filter_environment_var(prop->info.layerName, &filters->disable_filter.additional_filters)) && |
1028 | 627 | !check_name_matches_filter_environment_var(prop->info.layerName, &filters->allow_filter)) { |
1029 | 0 | forced_disabled = true; |
1030 | 0 | } |
1031 | 627 | if (check_name_matches_filter_environment_var(prop->info.layerName, &filters->enable_filter)) { |
1032 | 0 | forced_enabled = true; |
1033 | 0 | } |
1034 | | |
1035 | | // If no enable_environment variable is specified, this implicit layer is always be enabled by default. |
1036 | 627 | if (NULL == prop->enable_env_var.name) { |
1037 | 601 | enable = true; |
1038 | 601 | } else { |
1039 | 26 | char *env_value = loader_getenv(prop->enable_env_var.name, inst); |
1040 | 26 | if (env_value && !strcmp(prop->enable_env_var.value, env_value)) { |
1041 | 0 | enable = true; |
1042 | 0 | } |
1043 | | |
1044 | | // Otherwise, only enable this layer if the enable environment variable is defined |
1045 | 26 | loader_free_getenv(env_value, inst); |
1046 | 26 | } |
1047 | | |
1048 | 627 | if (forced_enabled) { |
1049 | | // Only report a message that we've forced on a layer if it wouldn't have been enabled |
1050 | | // normally. |
1051 | 0 | if (!enable) { |
1052 | 0 | enable = true; |
1053 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1054 | 0 | "Implicit layer \"%s\" forced enabled due to env var \'%s\'.", prop->info.layerName, |
1055 | 0 | VK_LAYERS_ENABLE_ENV_VAR); |
1056 | 0 | } |
1057 | 627 | } else if (enable && forced_disabled) { |
1058 | 0 | enable = false; |
1059 | | // Report a message that we've forced off a layer if it would have been enabled normally. |
1060 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1061 | 0 | "Implicit layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName, |
1062 | 0 | VK_LAYERS_DISABLE_ENV_VAR); |
1063 | 0 | return enable; |
1064 | 0 | } |
1065 | | |
1066 | | // The disable_environment has priority over everything else. If it is defined, the layer is always |
1067 | | // disabled. |
1068 | 627 | if (NULL != prop->disable_env_var.name) { |
1069 | 466 | char *env_value = loader_getenv(prop->disable_env_var.name, inst); |
1070 | 466 | if (NULL != env_value) { |
1071 | 3 | enable = false; |
1072 | 3 | } |
1073 | 466 | loader_free_getenv(env_value, inst); |
1074 | 466 | } else if ((prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER) == 0) { |
1075 | 161 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1076 | 161 | "Implicit layer \"%s\" missing disabled environment variable!", prop->info.layerName); |
1077 | 161 | } |
1078 | | |
1079 | | // Enable this layer if it is included in the override layer |
1080 | 627 | if (inst != NULL && inst->override_layer_present) { |
1081 | 0 | struct loader_layer_properties *override = NULL; |
1082 | 0 | for (uint32_t i = 0; i < inst->instance_layer_list.count; ++i) { |
1083 | 0 | if (strcmp(inst->instance_layer_list.list[i].info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) { |
1084 | 0 | override = &inst->instance_layer_list.list[i]; |
1085 | 0 | break; |
1086 | 0 | } |
1087 | 0 | } |
1088 | 0 | if (override != NULL) { |
1089 | 0 | for (uint32_t i = 0; i < override->component_layer_names.count; ++i) { |
1090 | 0 | if (strcmp(override->component_layer_names.list[i], prop->info.layerName) == 0) { |
1091 | 0 | enable = true; |
1092 | 0 | break; |
1093 | 0 | } |
1094 | 0 | } |
1095 | 0 | } |
1096 | 0 | } |
1097 | | |
1098 | 627 | return enable; |
1099 | 627 | } |
1100 | | |
1101 | | // Check the individual implicit layer for the enable/disable environment variable settings. Only add it after |
1102 | | // every check has passed indicating it should be used, including making sure a layer of the same name hasn't already been |
1103 | | // added. |
1104 | | VkResult loader_add_implicit_layer(const struct loader_instance *inst, struct loader_layer_properties *prop, |
1105 | | const struct loader_envvar_all_filters *filters, struct loader_pointer_layer_list *target_list, |
1106 | | struct loader_pointer_layer_list *expanded_target_list, |
1107 | 0 | const struct loader_layer_list *source_list) { |
1108 | 0 | VkResult result = VK_SUCCESS; |
1109 | 0 | if (loader_implicit_layer_is_enabled(inst, filters, prop)) { |
1110 | 0 | if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) { |
1111 | | // Make sure the layer isn't already in the output_list, skip adding it if it is. |
1112 | 0 | if (loader_find_layer_name_in_list(&prop->info.layerName[0], target_list)) { |
1113 | 0 | return result; |
1114 | 0 | } |
1115 | 0 | prop->enabled_by_what = ENABLED_BY_WHAT_IMPLICIT_LAYER; |
1116 | 0 | result = loader_add_layer_properties_to_list(inst, target_list, prop); |
1117 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
1118 | 0 | if (NULL != expanded_target_list) { |
1119 | 0 | result = loader_add_layer_properties_to_list(inst, expanded_target_list, prop); |
1120 | 0 | } |
1121 | 0 | } else { |
1122 | 0 | result = loader_add_meta_layer(inst, filters, prop, target_list, expanded_target_list, source_list, NULL); |
1123 | 0 | } |
1124 | 0 | } |
1125 | 0 | return result; |
1126 | 0 | } |
1127 | | |
1128 | | // Add the component layers of a meta-layer to the active list of layers |
1129 | | VkResult loader_add_meta_layer(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, |
1130 | | struct loader_layer_properties *prop, struct loader_pointer_layer_list *target_list, |
1131 | | struct loader_pointer_layer_list *expanded_target_list, const struct loader_layer_list *source_list, |
1132 | 0 | bool *out_found_all_component_layers) { |
1133 | 0 | VkResult result = VK_SUCCESS; |
1134 | 0 | bool found_all_component_layers = true; |
1135 | | |
1136 | | // We need to add all the individual component layers |
1137 | 0 | loader_api_version meta_layer_api_version = loader_make_version(prop->info.specVersion); |
1138 | 0 | for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) { |
1139 | 0 | struct loader_layer_properties *search_prop = |
1140 | 0 | loader_find_layer_property(prop->component_layer_names.list[comp_layer], source_list); |
1141 | 0 | if (search_prop != NULL) { |
1142 | 0 | loader_api_version search_prop_version = loader_make_version(prop->info.specVersion); |
1143 | 0 | if (!loader_check_version_meets_required(meta_layer_api_version, search_prop_version)) { |
1144 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1145 | 0 | "Meta-layer \"%s\" API version %u.%u, component layer \"%s\" version %u.%u, may have " |
1146 | 0 | "incompatibilities (Policy #LLP_LAYER_8)!", |
1147 | 0 | prop->info.layerName, meta_layer_api_version.major, meta_layer_api_version.minor, |
1148 | 0 | search_prop->info.layerName, search_prop_version.major, search_prop_version.minor); |
1149 | 0 | } |
1150 | |
|
1151 | 0 | if (!loader_layer_is_available(inst, filters, search_prop)) { |
1152 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1153 | 0 | "Meta Layer \"%s\" component layer \"%s\" disabled.", prop->info.layerName, search_prop->info.layerName); |
1154 | 0 | continue; |
1155 | 0 | } |
1156 | | |
1157 | | // If the component layer is itself an implicit layer, we need to do the implicit layer enable |
1158 | | // checks |
1159 | 0 | if (0 == (search_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { |
1160 | 0 | search_prop->enabled_by_what = ENABLED_BY_WHAT_META_LAYER; |
1161 | 0 | result = loader_add_implicit_layer(inst, search_prop, filters, target_list, expanded_target_list, source_list); |
1162 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
1163 | 0 | } else { |
1164 | 0 | if (0 != (search_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) { |
1165 | 0 | bool found_layers_in_component_meta_layer = true; |
1166 | 0 | search_prop->enabled_by_what = ENABLED_BY_WHAT_META_LAYER; |
1167 | 0 | result = loader_add_meta_layer(inst, filters, search_prop, target_list, expanded_target_list, source_list, |
1168 | 0 | &found_layers_in_component_meta_layer); |
1169 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
1170 | 0 | if (!found_layers_in_component_meta_layer) found_all_component_layers = false; |
1171 | 0 | } else if (!loader_find_layer_name_in_list(&search_prop->info.layerName[0], target_list)) { |
1172 | | // Make sure the layer isn't already in the output_list, skip adding it if it is. |
1173 | 0 | search_prop->enabled_by_what = ENABLED_BY_WHAT_META_LAYER; |
1174 | 0 | result = loader_add_layer_properties_to_list(inst, target_list, search_prop); |
1175 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
1176 | 0 | if (NULL != expanded_target_list) { |
1177 | 0 | result = loader_add_layer_properties_to_list(inst, expanded_target_list, search_prop); |
1178 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
1179 | 0 | } |
1180 | 0 | } |
1181 | 0 | } |
1182 | 0 | } else { |
1183 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1184 | 0 | "Failed to find layer name \"%s\" component layer \"%s\" to activate (Policy #LLP_LAYER_7)", |
1185 | 0 | prop->component_layer_names.list[comp_layer], prop->component_layer_names.list[comp_layer]); |
1186 | 0 | found_all_component_layers = false; |
1187 | 0 | } |
1188 | 0 | } |
1189 | | |
1190 | | // Add this layer to the overall target list (not the expanded one) |
1191 | 0 | if (found_all_component_layers) { |
1192 | 0 | prop->enabled_by_what = ENABLED_BY_WHAT_META_LAYER; |
1193 | 0 | result = loader_add_layer_properties_to_list(inst, target_list, prop); |
1194 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
1195 | | // Write the result to out_found_all_component_layers in case this function is being recursed |
1196 | 0 | if (out_found_all_component_layers) *out_found_all_component_layers = found_all_component_layers; |
1197 | 0 | } |
1198 | | |
1199 | 0 | return result; |
1200 | 0 | } |
1201 | | |
1202 | 0 | VkExtensionProperties *get_extension_property(const char *name, const struct loader_extension_list *list) { |
1203 | 0 | for (uint32_t i = 0; i < list->count; i++) { |
1204 | 0 | if (strcmp(name, list->list[i].extensionName) == 0) return &list->list[i]; |
1205 | 0 | } |
1206 | 0 | return NULL; |
1207 | 0 | } |
1208 | | |
1209 | 0 | VkExtensionProperties *get_dev_extension_property(const char *name, const struct loader_device_extension_list *list) { |
1210 | 0 | for (uint32_t i = 0; i < list->count; i++) { |
1211 | 0 | if (strcmp(name, list->list[i].props.extensionName) == 0) return &list->list[i].props; |
1212 | 0 | } |
1213 | 0 | return NULL; |
1214 | 0 | } |
1215 | | |
1216 | | // For Instance extensions implemented within the loader (i.e. DEBUG_REPORT |
1217 | | // the extension must provide two entry points for the loader to use: |
1218 | | // - "trampoline" entry point - this is the address returned by GetProcAddr |
1219 | | // and will always do what's necessary to support a |
1220 | | // global call. |
1221 | | // - "terminator" function - this function will be put at the end of the |
1222 | | // instance chain and will contain the necessary logic |
1223 | | // to call / process the extension for the appropriate |
1224 | | // ICDs that are available. |
1225 | | // There is no generic mechanism for including these functions, the references |
1226 | | // must be placed into the appropriate loader entry points. |
1227 | | // GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr |
1228 | | // requests |
1229 | | // loader_coalesce_extensions(void) - add extension records to the list of global |
1230 | | // extension available to the app. |
1231 | | // instance_disp - add function pointer for terminator function |
1232 | | // to this array. |
1233 | | // The extension itself should be in a separate file that will be linked directly |
1234 | | // with the loader. |
1235 | | VkResult loader_get_icd_loader_instance_extensions(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list, |
1236 | 0 | struct loader_extension_list *inst_exts) { |
1237 | 0 | struct loader_extension_list icd_exts; |
1238 | 0 | VkResult res = VK_SUCCESS; |
1239 | 0 | char *env_value; |
1240 | 0 | bool filter_extensions = true; |
1241 | | |
1242 | | // Check if a user wants to disable the instance extension filtering behavior |
1243 | 0 | env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst); |
1244 | 0 | if (NULL != env_value && atoi(env_value) != 0) { |
1245 | 0 | filter_extensions = false; |
1246 | 0 | } |
1247 | 0 | loader_free_getenv(env_value, inst); |
1248 | | |
1249 | | // traverse scanned icd list adding non-duplicate extensions to the list |
1250 | 0 | for (uint32_t i = 0; i < icd_tramp_list->count; i++) { |
1251 | 0 | res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties)); |
1252 | 0 | if (VK_SUCCESS != res) { |
1253 | 0 | goto out; |
1254 | 0 | } |
1255 | 0 | res = loader_add_instance_extensions(inst, icd_tramp_list->scanned_list[i].EnumerateInstanceExtensionProperties, |
1256 | 0 | icd_tramp_list->scanned_list[i].lib_name, &icd_exts); |
1257 | 0 | if (VK_SUCCESS == res) { |
1258 | 0 | if (filter_extensions) { |
1259 | | // Remove any extensions not recognized by the loader |
1260 | 0 | for (int32_t j = 0; j < (int32_t)icd_exts.count; j++) { |
1261 | | // See if the extension is in the list of supported extensions |
1262 | 0 | bool found = false; |
1263 | 0 | for (uint32_t k = 0; LOADER_INSTANCE_EXTENSIONS[k] != NULL; k++) { |
1264 | 0 | if (strcmp(icd_exts.list[j].extensionName, LOADER_INSTANCE_EXTENSIONS[k]) == 0) { |
1265 | 0 | found = true; |
1266 | 0 | break; |
1267 | 0 | } |
1268 | 0 | } |
1269 | | |
1270 | | // If it isn't in the list, remove it |
1271 | 0 | if (!found) { |
1272 | 0 | for (uint32_t k = j + 1; k < icd_exts.count; k++) { |
1273 | 0 | icd_exts.list[k - 1] = icd_exts.list[k]; |
1274 | 0 | } |
1275 | 0 | --icd_exts.count; |
1276 | 0 | --j; |
1277 | 0 | } |
1278 | 0 | } |
1279 | 0 | } |
1280 | |
|
1281 | 0 | res = loader_add_to_ext_list(inst, inst_exts, icd_exts.count, icd_exts.list); |
1282 | 0 | } |
1283 | 0 | loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts); |
1284 | 0 | if (VK_SUCCESS != res) { |
1285 | 0 | goto out; |
1286 | 0 | } |
1287 | 0 | }; |
1288 | | |
1289 | | // Traverse loader's extensions, adding non-duplicate extensions to the list |
1290 | 0 | res = add_debug_extensions_to_ext_list(inst, inst_exts); |
1291 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
1292 | 0 | goto out; |
1293 | 0 | } |
1294 | 0 | const VkExtensionProperties portability_enumeration_extension_info[] = { |
1295 | 0 | {VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME, VK_KHR_PORTABILITY_ENUMERATION_SPEC_VERSION}}; |
1296 | | |
1297 | | // Add VK_KHR_portability_subset |
1298 | 0 | res = loader_add_to_ext_list(inst, inst_exts, sizeof(portability_enumeration_extension_info) / sizeof(VkExtensionProperties), |
1299 | 0 | portability_enumeration_extension_info); |
1300 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
1301 | 0 | goto out; |
1302 | 0 | } |
1303 | | |
1304 | 0 | const VkExtensionProperties direct_driver_loading_extension_info[] = { |
1305 | 0 | {VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME, VK_LUNARG_DIRECT_DRIVER_LOADING_SPEC_VERSION}}; |
1306 | | |
1307 | | // Add VK_LUNARG_direct_driver_loading |
1308 | 0 | res = loader_add_to_ext_list(inst, inst_exts, sizeof(direct_driver_loading_extension_info) / sizeof(VkExtensionProperties), |
1309 | 0 | direct_driver_loading_extension_info); |
1310 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
1311 | 0 | goto out; |
1312 | 0 | } |
1313 | | |
1314 | 0 | out: |
1315 | 0 | return res; |
1316 | 0 | } |
1317 | | |
1318 | 0 | struct loader_icd_term *loader_get_icd_and_device(const void *device, struct loader_device **found_dev) { |
1319 | 0 | VkLayerDispatchTable *dispatch_table_device = loader_get_dispatch(device); |
1320 | 0 | if (NULL == dispatch_table_device) { |
1321 | 0 | *found_dev = NULL; |
1322 | 0 | return NULL; |
1323 | 0 | } |
1324 | 0 | loader_platform_thread_lock_mutex(&loader_global_instance_list_lock); |
1325 | 0 | *found_dev = NULL; |
1326 | |
|
1327 | 0 | for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { |
1328 | 0 | for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) { |
1329 | 0 | for (struct loader_device *dev = icd_term->logical_device_list; dev; dev = dev->next) { |
1330 | | // Value comparison of device prevents object wrapping by layers |
1331 | 0 | if (loader_get_dispatch(dev->icd_device) == dispatch_table_device || |
1332 | 0 | (dev->chain_device != VK_NULL_HANDLE && loader_get_dispatch(dev->chain_device) == dispatch_table_device)) { |
1333 | 0 | *found_dev = dev; |
1334 | 0 | loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock); |
1335 | 0 | return icd_term; |
1336 | 0 | } |
1337 | 0 | } |
1338 | 0 | } |
1339 | 0 | } |
1340 | 0 | loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock); |
1341 | 0 | return NULL; |
1342 | 0 | } |
1343 | | |
1344 | 0 | void loader_destroy_logical_device(struct loader_device *dev, const VkAllocationCallbacks *pAllocator) { |
1345 | 0 | if (pAllocator) { |
1346 | 0 | dev->alloc_callbacks = *pAllocator; |
1347 | 0 | } |
1348 | 0 | loader_device_heap_free(dev, dev); |
1349 | 0 | } |
1350 | | |
1351 | 0 | struct loader_device *loader_create_logical_device(const struct loader_instance *inst, const VkAllocationCallbacks *pAllocator) { |
1352 | 0 | struct loader_device *new_dev; |
1353 | 0 | new_dev = loader_calloc(pAllocator, sizeof(struct loader_device), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); |
1354 | |
|
1355 | 0 | if (!new_dev) { |
1356 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_logical_device: Failed to alloc struct loader_device"); |
1357 | 0 | return NULL; |
1358 | 0 | } |
1359 | | |
1360 | 0 | new_dev->loader_dispatch.core_dispatch.magic = DEVICE_DISP_TABLE_MAGIC_NUMBER; |
1361 | |
|
1362 | 0 | if (pAllocator) { |
1363 | 0 | new_dev->alloc_callbacks = *pAllocator; |
1364 | 0 | } |
1365 | |
|
1366 | 0 | return new_dev; |
1367 | 0 | } |
1368 | | |
1369 | 0 | void loader_add_logical_device(struct loader_icd_term *icd_term, struct loader_device *dev) { |
1370 | 0 | dev->next = icd_term->logical_device_list; |
1371 | 0 | icd_term->logical_device_list = dev; |
1372 | 0 | } |
1373 | | |
1374 | | void loader_remove_logical_device(struct loader_icd_term *icd_term, struct loader_device *found_dev, |
1375 | 0 | const VkAllocationCallbacks *pAllocator) { |
1376 | 0 | struct loader_device *dev, *prev_dev; |
1377 | |
|
1378 | 0 | if (!icd_term || !found_dev) return; |
1379 | | |
1380 | 0 | prev_dev = NULL; |
1381 | 0 | dev = icd_term->logical_device_list; |
1382 | 0 | while (dev && dev != found_dev) { |
1383 | 0 | prev_dev = dev; |
1384 | 0 | dev = dev->next; |
1385 | 0 | } |
1386 | |
|
1387 | 0 | if (prev_dev) |
1388 | 0 | prev_dev->next = found_dev->next; |
1389 | 0 | else |
1390 | 0 | icd_term->logical_device_list = found_dev->next; |
1391 | 0 | loader_destroy_logical_device(found_dev, pAllocator); |
1392 | 0 | } |
1393 | | |
1394 | 0 | const VkAllocationCallbacks *ignore_null_callback(const VkAllocationCallbacks *callbacks) { |
1395 | 0 | return NULL != callbacks->pfnAllocation && NULL != callbacks->pfnFree && NULL != callbacks->pfnReallocation && |
1396 | 0 | NULL != callbacks->pfnInternalAllocation && NULL != callbacks->pfnInternalFree |
1397 | 0 | ? callbacks |
1398 | 0 | : NULL; |
1399 | 0 | } |
1400 | | |
1401 | | // Try to close any open objects on the loader_icd_term - this must be done before destroying the instance |
1402 | 0 | void loader_icd_close_objects(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term) { |
1403 | 0 | for (uint32_t i = 0; i < icd_term->surface_list.capacity / sizeof(VkSurfaceKHR); i++) { |
1404 | 0 | if (ptr_inst->surfaces_list.capacity > i * sizeof(struct loader_used_object_status) && |
1405 | 0 | ptr_inst->surfaces_list.list[i].status == VK_TRUE && NULL != icd_term->surface_list.list && |
1406 | 0 | icd_term->surface_list.list[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { |
1407 | 0 | icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, icd_term->surface_list.list[i], |
1408 | 0 | ignore_null_callback(&(ptr_inst->surfaces_list.list[i].allocation_callbacks))); |
1409 | 0 | icd_term->surface_list.list[i] = (VkSurfaceKHR)(uintptr_t)NULL; |
1410 | 0 | } |
1411 | 0 | } |
1412 | 0 | for (uint32_t i = 0; i < icd_term->debug_utils_messenger_list.capacity / sizeof(VkDebugUtilsMessengerEXT); i++) { |
1413 | 0 | if (ptr_inst->debug_utils_messengers_list.capacity > i * sizeof(struct loader_used_object_status) && |
1414 | 0 | ptr_inst->debug_utils_messengers_list.list[i].status == VK_TRUE && NULL != icd_term->debug_utils_messenger_list.list && |
1415 | 0 | icd_term->debug_utils_messenger_list.list[i] && NULL != icd_term->dispatch.DestroyDebugUtilsMessengerEXT) { |
1416 | 0 | icd_term->dispatch.DestroyDebugUtilsMessengerEXT( |
1417 | 0 | icd_term->instance, icd_term->debug_utils_messenger_list.list[i], |
1418 | 0 | ignore_null_callback(&(ptr_inst->debug_utils_messengers_list.list[i].allocation_callbacks))); |
1419 | 0 | icd_term->debug_utils_messenger_list.list[i] = (VkDebugUtilsMessengerEXT)(uintptr_t)NULL; |
1420 | 0 | } |
1421 | 0 | } |
1422 | 0 | for (uint32_t i = 0; i < icd_term->debug_report_callback_list.capacity / sizeof(VkDebugReportCallbackEXT); i++) { |
1423 | 0 | if (ptr_inst->debug_report_callbacks_list.capacity > i * sizeof(struct loader_used_object_status) && |
1424 | 0 | ptr_inst->debug_report_callbacks_list.list[i].status == VK_TRUE && NULL != icd_term->debug_report_callback_list.list && |
1425 | 0 | icd_term->debug_report_callback_list.list[i] && NULL != icd_term->dispatch.DestroyDebugReportCallbackEXT) { |
1426 | 0 | icd_term->dispatch.DestroyDebugReportCallbackEXT( |
1427 | 0 | icd_term->instance, icd_term->debug_report_callback_list.list[i], |
1428 | 0 | ignore_null_callback(&(ptr_inst->debug_report_callbacks_list.list[i].allocation_callbacks))); |
1429 | 0 | icd_term->debug_report_callback_list.list[i] = (VkDebugReportCallbackEXT)(uintptr_t)NULL; |
1430 | 0 | } |
1431 | 0 | } |
1432 | 0 | } |
1433 | | // Free resources allocated inside the loader_icd_term |
1434 | | void loader_icd_destroy(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term, |
1435 | 0 | const VkAllocationCallbacks *pAllocator) { |
1436 | 0 | ptr_inst->icd_terms_count--; |
1437 | 0 | for (struct loader_device *dev = icd_term->logical_device_list; dev;) { |
1438 | 0 | struct loader_device *next_dev = dev->next; |
1439 | 0 | loader_destroy_logical_device(dev, pAllocator); |
1440 | 0 | dev = next_dev; |
1441 | 0 | } |
1442 | |
|
1443 | 0 | loader_destroy_generic_list(ptr_inst, (struct loader_generic_list *)&icd_term->surface_list); |
1444 | 0 | loader_destroy_generic_list(ptr_inst, (struct loader_generic_list *)&icd_term->debug_utils_messenger_list); |
1445 | 0 | loader_destroy_generic_list(ptr_inst, (struct loader_generic_list *)&icd_term->debug_report_callback_list); |
1446 | |
|
1447 | 0 | loader_instance_heap_free(ptr_inst, icd_term); |
1448 | 0 | } |
1449 | | |
1450 | 0 | struct loader_icd_term *loader_icd_add(struct loader_instance *ptr_inst, const struct loader_scanned_icd *scanned_icd) { |
1451 | 0 | struct loader_icd_term *icd_term; |
1452 | |
|
1453 | 0 | icd_term = loader_instance_heap_calloc(ptr_inst, sizeof(struct loader_icd_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1454 | 0 | if (!icd_term) { |
1455 | 0 | return NULL; |
1456 | 0 | } |
1457 | | |
1458 | 0 | icd_term->scanned_icd = scanned_icd; |
1459 | 0 | icd_term->this_instance = ptr_inst; |
1460 | | |
1461 | | // Prepend to the list |
1462 | 0 | icd_term->next = ptr_inst->icd_terms; |
1463 | 0 | ptr_inst->icd_terms = icd_term; |
1464 | 0 | ptr_inst->icd_terms_count++; |
1465 | |
|
1466 | 0 | return icd_term; |
1467 | 0 | } |
1468 | | // Closes the library handle in the scanned ICD, free the lib_name string, and zeros out all data |
1469 | 0 | void loader_unload_scanned_icd(struct loader_instance *inst, struct loader_scanned_icd *scanned_icd) { |
1470 | 0 | if (NULL == scanned_icd) { |
1471 | 0 | return; |
1472 | 0 | } |
1473 | 0 | if (scanned_icd->handle) { |
1474 | 0 | loader_platform_close_library(scanned_icd->handle); |
1475 | 0 | scanned_icd->handle = NULL; |
1476 | 0 | } |
1477 | 0 | loader_instance_heap_free(inst, scanned_icd->lib_name); |
1478 | 0 | memset(scanned_icd, 0, sizeof(struct loader_scanned_icd)); |
1479 | 0 | } |
1480 | | |
1481 | | // Determine the ICD interface version to use. |
1482 | | // @param icd |
1483 | | // @param pVersion Output parameter indicating which version to use or 0 if |
1484 | | // the negotiation API is not supported by the ICD |
1485 | | // @return bool indicating true if the selected interface version is supported |
1486 | | // by the loader, false indicates the version is not supported |
1487 | 0 | bool loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version, uint32_t *pVersion) { |
1488 | 0 | if (fp_negotiate_icd_version == NULL) { |
1489 | | // ICD does not support the negotiation API, it supports version 0 or 1 |
1490 | | // calling code must determine if it is version 0 or 1 |
1491 | 0 | *pVersion = 0; |
1492 | 0 | } else { |
1493 | | // ICD supports the negotiation API, so call it with the loader's |
1494 | | // latest version supported |
1495 | 0 | *pVersion = CURRENT_LOADER_ICD_INTERFACE_VERSION; |
1496 | 0 | VkResult result = fp_negotiate_icd_version(pVersion); |
1497 | |
|
1498 | 0 | if (result == VK_ERROR_INCOMPATIBLE_DRIVER) { |
1499 | | // ICD no longer supports the loader's latest interface version so |
1500 | | // fail loading the ICD |
1501 | 0 | return false; |
1502 | 0 | } |
1503 | 0 | } |
1504 | | |
1505 | | #if MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION > 0 |
1506 | | if (*pVersion < MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION) { |
1507 | | // Loader no longer supports the ICD's latest interface version so fail |
1508 | | // loading the ICD |
1509 | | return false; |
1510 | | } |
1511 | | #endif |
1512 | 0 | return true; |
1513 | 0 | } |
1514 | | |
1515 | 8.45k | void loader_clear_scanned_icd_list(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) { |
1516 | 8.45k | if (0 != icd_tramp_list->capacity && icd_tramp_list->scanned_list) { |
1517 | 980 | for (uint32_t i = 0; i < icd_tramp_list->count; i++) { |
1518 | 0 | if (icd_tramp_list->scanned_list[i].handle) { |
1519 | 0 | loader_platform_close_library(icd_tramp_list->scanned_list[i].handle); |
1520 | 0 | icd_tramp_list->scanned_list[i].handle = NULL; |
1521 | 0 | } |
1522 | 0 | loader_instance_heap_free(inst, icd_tramp_list->scanned_list[i].lib_name); |
1523 | 0 | } |
1524 | 980 | loader_instance_heap_free(inst, icd_tramp_list->scanned_list); |
1525 | 980 | } |
1526 | 8.45k | memset(icd_tramp_list, 0, sizeof(struct loader_icd_tramp_list)); |
1527 | 8.45k | } |
1528 | | |
1529 | 980 | VkResult loader_init_scanned_icd_list(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) { |
1530 | 980 | VkResult res = VK_SUCCESS; |
1531 | 980 | loader_clear_scanned_icd_list(inst, icd_tramp_list); |
1532 | 980 | icd_tramp_list->capacity = 8 * sizeof(struct loader_scanned_icd); |
1533 | 980 | icd_tramp_list->scanned_list = loader_instance_heap_alloc(inst, icd_tramp_list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1534 | 980 | if (NULL == icd_tramp_list->scanned_list) { |
1535 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1536 | 0 | "loader_init_scanned_icd_list: Realloc failed for layer list when attempting to add new layer"); |
1537 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
1538 | 0 | } |
1539 | 980 | return res; |
1540 | 980 | } |
1541 | | |
1542 | | VkResult loader_add_direct_driver(const struct loader_instance *inst, uint32_t index, |
1543 | 0 | const VkDirectDriverLoadingInfoLUNARG *pDriver, struct loader_icd_tramp_list *icd_tramp_list) { |
1544 | | // Assume pDriver is valid, since there is no real way to check it. Calling code should make sure the pointer to the array |
1545 | | // of VkDirectDriverLoadingInfoLUNARG structures is non-null. |
1546 | 0 | if (NULL == pDriver->pfnGetInstanceProcAddr) { |
1547 | 0 | loader_log( |
1548 | 0 | inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1549 | 0 | "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d contains a NULL pointer for the " |
1550 | 0 | "pfnGetInstanceProcAddr member, skipping.", |
1551 | 0 | index); |
1552 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
1553 | 0 | } |
1554 | | |
1555 | 0 | PFN_vkGetInstanceProcAddr fp_get_proc_addr = pDriver->pfnGetInstanceProcAddr; |
1556 | 0 | PFN_vkCreateInstance fp_create_inst = NULL; |
1557 | 0 | PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props = NULL; |
1558 | 0 | PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL; |
1559 | 0 | PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version = NULL; |
1560 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
1561 | | PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL; |
1562 | | #endif |
1563 | 0 | struct loader_scanned_icd *new_scanned_icd; |
1564 | 0 | uint32_t interface_version = 0; |
1565 | | |
1566 | | // Try to get the negotiate ICD interface version function |
1567 | 0 | fp_negotiate_icd_version = (PFN_vk_icdNegotiateLoaderICDInterfaceVersion)pDriver->pfnGetInstanceProcAddr( |
1568 | 0 | NULL, "vk_icdNegotiateLoaderICDInterfaceVersion"); |
1569 | |
|
1570 | 0 | if (NULL == fp_negotiate_icd_version) { |
1571 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1572 | 0 | "loader_add_direct_driver: Could not get 'vk_icdNegotiateLoaderICDInterfaceVersion' from " |
1573 | 0 | "VkDirectDriverLoadingInfoLUNARG structure at " |
1574 | 0 | "index %d, skipping.", |
1575 | 0 | index); |
1576 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
1577 | 0 | } |
1578 | | |
1579 | 0 | if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_version)) { |
1580 | 0 | loader_log( |
1581 | 0 | inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1582 | 0 | "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d supports interface version %d, " |
1583 | 0 | "which is incompatible with the Loader Driver Interface version that supports the VK_LUNARG_direct_driver_loading " |
1584 | 0 | "extension, skipping.", |
1585 | 0 | index, interface_version); |
1586 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
1587 | 0 | } |
1588 | | |
1589 | 0 | if (interface_version < 7) { |
1590 | 0 | loader_log( |
1591 | 0 | inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1592 | 0 | "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d supports interface version %d, " |
1593 | 0 | "which is incompatible with the Loader Driver Interface version that supports the VK_LUNARG_direct_driver_loading " |
1594 | 0 | "extension, skipping.", |
1595 | 0 | index, interface_version); |
1596 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
1597 | 0 | } |
1598 | | |
1599 | 0 | fp_create_inst = (PFN_vkCreateInstance)pDriver->pfnGetInstanceProcAddr(NULL, "vkCreateInstance"); |
1600 | 0 | if (NULL == fp_create_inst) { |
1601 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1602 | 0 | "loader_add_direct_driver: Could not get 'vkCreateInstance' from VkDirectDriverLoadingInfoLUNARG structure at " |
1603 | 0 | "index %d, skipping.", |
1604 | 0 | index); |
1605 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
1606 | 0 | } |
1607 | 0 | fp_get_inst_ext_props = |
1608 | 0 | (PFN_vkEnumerateInstanceExtensionProperties)pDriver->pfnGetInstanceProcAddr(NULL, "vkEnumerateInstanceExtensionProperties"); |
1609 | 0 | if (NULL == fp_get_inst_ext_props) { |
1610 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1611 | 0 | "loader_add_direct_driver: Could not get 'vkEnumerateInstanceExtensionProperties' from " |
1612 | 0 | "VkDirectDriverLoadingInfoLUNARG structure at index %d, skipping.", |
1613 | 0 | index); |
1614 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
1615 | 0 | } |
1616 | | |
1617 | 0 | fp_get_phys_dev_proc_addr = |
1618 | 0 | (PFN_vk_icdGetPhysicalDeviceProcAddr)pDriver->pfnGetInstanceProcAddr(NULL, "vk_icdGetPhysicalDeviceProcAddr"); |
1619 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
1620 | | // Query "vk_icdEnumerateAdapterPhysicalDevices" with vk_icdGetInstanceProcAddr if the library reports interface version |
1621 | | // 7 or greater, otherwise fallback to loading it from the platform dynamic linker |
1622 | | fp_enum_dxgi_adapter_phys_devs = |
1623 | | (PFN_vk_icdEnumerateAdapterPhysicalDevices)pDriver->pfnGetInstanceProcAddr(NULL, "vk_icdEnumerateAdapterPhysicalDevices"); |
1624 | | #endif |
1625 | | |
1626 | | // check for enough capacity |
1627 | 0 | if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) { |
1628 | 0 | void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity, |
1629 | 0 | icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1630 | 0 | if (NULL == new_ptr) { |
1631 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1632 | 0 | "loader_add_direct_driver: Realloc failed on icd library list for ICD index %u", index); |
1633 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
1634 | 0 | } |
1635 | 0 | icd_tramp_list->scanned_list = new_ptr; |
1636 | | |
1637 | | // double capacity |
1638 | 0 | icd_tramp_list->capacity *= 2; |
1639 | 0 | } |
1640 | | |
1641 | | // Driver must be 1.1 to support version 7 |
1642 | 0 | uint32_t api_version = VK_API_VERSION_1_1; |
1643 | 0 | PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version = |
1644 | 0 | (PFN_vkEnumerateInstanceVersion)pDriver->pfnGetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion"); |
1645 | |
|
1646 | 0 | if (icd_enumerate_instance_version) { |
1647 | 0 | VkResult res = icd_enumerate_instance_version(&api_version); |
1648 | 0 | if (res != VK_SUCCESS) { |
1649 | 0 | return res; |
1650 | 0 | } |
1651 | 0 | } |
1652 | | |
1653 | 0 | new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]); |
1654 | 0 | new_scanned_icd->handle = NULL; |
1655 | 0 | new_scanned_icd->api_version = api_version; |
1656 | 0 | new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr; |
1657 | 0 | new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr; |
1658 | 0 | new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props; |
1659 | 0 | new_scanned_icd->CreateInstance = fp_create_inst; |
1660 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
1661 | | new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs; |
1662 | | #endif |
1663 | 0 | new_scanned_icd->interface_version = interface_version; |
1664 | |
|
1665 | 0 | new_scanned_icd->lib_name = NULL; |
1666 | 0 | icd_tramp_list->count++; |
1667 | |
|
1668 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1669 | 0 | "loader_add_direct_driver: Adding driver found in index %d of " |
1670 | 0 | "VkDirectDriverLoadingListLUNARG::pDrivers structure. pfnGetInstanceProcAddr was set to %p", |
1671 | 0 | index, pDriver->pfnGetInstanceProcAddr); |
1672 | |
|
1673 | 0 | return VK_SUCCESS; |
1674 | 0 | } |
1675 | | |
1676 | | // Search through VkInstanceCreateInfo's pNext chain for any drivers from the direct driver loading extension and load them. |
1677 | | VkResult loader_scan_for_direct_drivers(const struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo, |
1678 | 980 | struct loader_icd_tramp_list *icd_tramp_list, bool *direct_driver_loading_exclusive_mode) { |
1679 | 980 | if (NULL == pCreateInfo) { |
1680 | | // Don't do this logic unless we are being called from vkCreateInstance, when pCreateInfo will be non-null |
1681 | 0 | return VK_SUCCESS; |
1682 | 0 | } |
1683 | 980 | bool direct_driver_loading_enabled = false; |
1684 | | // Try to if VK_LUNARG_direct_driver_loading is enabled and if we are using it exclusively |
1685 | | // Skip this step if inst is NULL, aka when this function is being called before instance creation |
1686 | 980 | if (inst != NULL && pCreateInfo->ppEnabledExtensionNames && pCreateInfo->enabledExtensionCount > 0) { |
1687 | | // Look through the enabled extension list, make sure VK_LUNARG_direct_driver_loading is present |
1688 | 0 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
1689 | 0 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME) == 0) { |
1690 | 0 | direct_driver_loading_enabled = true; |
1691 | 0 | break; |
1692 | 0 | } |
1693 | 0 | } |
1694 | 0 | } |
1695 | 980 | const VkDirectDriverLoadingListLUNARG *ddl_list = NULL; |
1696 | | // Find the VkDirectDriverLoadingListLUNARG struct in the pNext chain of vkInstanceCreateInfo |
1697 | 980 | const void *pNext = pCreateInfo->pNext; |
1698 | 980 | while (pNext) { |
1699 | 0 | VkBaseInStructure out_structure = {0}; |
1700 | 0 | memcpy(&out_structure, pNext, sizeof(VkBaseInStructure)); |
1701 | 0 | if (out_structure.sType == VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_LIST_LUNARG) { |
1702 | 0 | ddl_list = (VkDirectDriverLoadingListLUNARG *)pNext; |
1703 | 0 | break; |
1704 | 0 | } |
1705 | 0 | pNext = out_structure.pNext; |
1706 | 0 | } |
1707 | 980 | if (NULL == ddl_list) { |
1708 | 980 | if (direct_driver_loading_enabled) { |
1709 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1710 | 0 | "loader_scan_for_direct_drivers: The VK_LUNARG_direct_driver_loading extension was enabled but the " |
1711 | 0 | "pNext chain of " |
1712 | 0 | "VkInstanceCreateInfo did not contain the " |
1713 | 0 | "VkDirectDriverLoadingListLUNARG structure."); |
1714 | 0 | } |
1715 | | // Always want to exit early if there was no VkDirectDriverLoadingListLUNARG in the pNext chain |
1716 | 980 | return VK_SUCCESS; |
1717 | 980 | } |
1718 | | |
1719 | 0 | if (!direct_driver_loading_enabled) { |
1720 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1721 | 0 | "loader_scan_for_direct_drivers: The pNext chain of VkInstanceCreateInfo contained the " |
1722 | 0 | "VkDirectDriverLoadingListLUNARG structure, but the VK_LUNARG_direct_driver_loading extension was " |
1723 | 0 | "not enabled."); |
1724 | 0 | return VK_SUCCESS; |
1725 | 0 | } |
1726 | | // If we are using exclusive mode, skip looking for any more drivers from system or environment variables |
1727 | 0 | if (ddl_list->mode == VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG) { |
1728 | 0 | *direct_driver_loading_exclusive_mode = true; |
1729 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1730 | 0 | "loader_scan_for_direct_drivers: The VK_LUNARG_direct_driver_loading extension is active and specified " |
1731 | 0 | "VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG, skipping system and environment " |
1732 | 0 | "variable driver search mechanisms."); |
1733 | 0 | } |
1734 | 0 | if (NULL == ddl_list->pDrivers) { |
1735 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1736 | 0 | "loader_scan_for_direct_drivers: The VkDirectDriverLoadingListLUNARG structure in the pNext chain of " |
1737 | 0 | "VkInstanceCreateInfo has a NULL pDrivers member."); |
1738 | 0 | return VK_SUCCESS; |
1739 | 0 | } |
1740 | 0 | if (ddl_list->driverCount == 0) { |
1741 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1742 | 0 | "loader_scan_for_direct_drivers: The VkDirectDriverLoadingListLUNARG structure in the pNext chain of " |
1743 | 0 | "VkInstanceCreateInfo has a non-null pDrivers member but a driverCount member with a value " |
1744 | 0 | "of zero."); |
1745 | 0 | return VK_SUCCESS; |
1746 | 0 | } |
1747 | | // Go through all VkDirectDriverLoadingInfoLUNARG entries and add each driver |
1748 | | // Because icd_tramp's are prepended, this will result in the drivers appearing at the end |
1749 | 0 | for (uint32_t i = 0; i < ddl_list->driverCount; i++) { |
1750 | 0 | VkResult res = loader_add_direct_driver(inst, i, &ddl_list->pDrivers[i], icd_tramp_list); |
1751 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
1752 | 0 | return res; |
1753 | 0 | } |
1754 | 0 | } |
1755 | | |
1756 | 0 | return VK_SUCCESS; |
1757 | 0 | } |
1758 | | |
1759 | | VkResult loader_scanned_icd_add(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list, |
1760 | 51 | const char *filename, uint32_t api_version, enum loader_layer_library_status *lib_status) { |
1761 | 51 | loader_platform_dl_handle handle = NULL; |
1762 | 51 | PFN_vkCreateInstance fp_create_inst = NULL; |
1763 | 51 | PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props = NULL; |
1764 | 51 | PFN_vkGetInstanceProcAddr fp_get_proc_addr = NULL; |
1765 | 51 | PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL; |
1766 | 51 | PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version = NULL; |
1767 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
1768 | | PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL; |
1769 | | #endif |
1770 | 51 | struct loader_scanned_icd *new_scanned_icd = NULL; |
1771 | 51 | uint32_t interface_vers; |
1772 | 51 | VkResult res = VK_SUCCESS; |
1773 | | |
1774 | | // This shouldn't happen, but the check is necessary because dlopen returns a handle to the main program when |
1775 | | // filename is NULL |
1776 | 51 | if (filename == NULL) { |
1777 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: A NULL filename was used, skipping this ICD"); |
1778 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
1779 | 0 | goto out; |
1780 | 0 | } |
1781 | | |
1782 | | // TODO implement smarter opening/closing of libraries. For now this |
1783 | | // function leaves libraries open and the scanned_icd_clear closes them |
1784 | | #if defined(__Fuchsia__) |
1785 | | handle = loader_platform_open_driver(filename); |
1786 | | #else |
1787 | 51 | handle = loader_platform_open_library(filename); |
1788 | 51 | #endif |
1789 | 51 | if (NULL == handle) { |
1790 | 51 | loader_handle_load_library_error(inst, filename, lib_status); |
1791 | 51 | if (lib_status && *lib_status == LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY) { |
1792 | 1 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
1793 | 50 | } else { |
1794 | 50 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
1795 | 50 | } |
1796 | 51 | goto out; |
1797 | 51 | } |
1798 | | |
1799 | | // Try to load the driver's exported vk_icdNegotiateLoaderICDInterfaceVersion |
1800 | 0 | fp_negotiate_icd_version = loader_platform_get_proc_address(handle, "vk_icdNegotiateLoaderICDInterfaceVersion"); |
1801 | | |
1802 | | // If it isn't exported, we are dealing with either a v0, v1, or a v7 and up driver |
1803 | 0 | if (NULL == fp_negotiate_icd_version) { |
1804 | | // Try to load the driver's exported vk_icdGetInstanceProcAddr - if this is a v7 or up driver, we can use it to get |
1805 | | // the driver's vk_icdNegotiateLoaderICDInterfaceVersion function |
1806 | 0 | fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr"); |
1807 | | |
1808 | | // If we successfully loaded vk_icdGetInstanceProcAddr, try to get vk_icdNegotiateLoaderICDInterfaceVersion |
1809 | 0 | if (fp_get_proc_addr) { |
1810 | 0 | fp_negotiate_icd_version = |
1811 | 0 | (PFN_vk_icdNegotiateLoaderICDInterfaceVersion)fp_get_proc_addr(NULL, "vk_icdNegotiateLoaderICDInterfaceVersion"); |
1812 | 0 | } |
1813 | 0 | } |
1814 | | |
1815 | | // Try to negotiate the Loader and Driver Interface Versions |
1816 | | // loader_get_icd_interface_version will check if fp_negotiate_icd_version is NULL, so we don't have to. |
1817 | | // If it *is* NULL, that means this driver uses interface version 0 or 1 |
1818 | 0 | if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_vers)) { |
1819 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1820 | 0 | "loader_scanned_icd_add: ICD %s doesn't support interface version compatible with loader, skip this ICD.", |
1821 | 0 | filename); |
1822 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
1823 | 0 | goto out; |
1824 | 0 | } |
1825 | | |
1826 | | // If we didn't already query vk_icdGetInstanceProcAddr, try now |
1827 | 0 | if (NULL == fp_get_proc_addr) { |
1828 | 0 | fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr"); |
1829 | 0 | } |
1830 | | |
1831 | | // If vk_icdGetInstanceProcAddr is NULL, this ICD is using version 0 and so we should respond accordingly. |
1832 | 0 | if (NULL == fp_get_proc_addr) { |
1833 | | // Exporting vk_icdNegotiateLoaderICDInterfaceVersion but not vk_icdGetInstanceProcAddr violates Version 2's |
1834 | | // requirements, as for Version 2 to be supported Version 1 must also be supported |
1835 | 0 | if (interface_vers != 0) { |
1836 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1837 | 0 | "loader_scanned_icd_add: ICD %s reports an interface version of %d but doesn't export " |
1838 | 0 | "vk_icdGetInstanceProcAddr, skip this ICD.", |
1839 | 0 | filename, interface_vers); |
1840 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
1841 | 0 | goto out; |
1842 | 0 | } |
1843 | | // Use deprecated interface from version 0 |
1844 | 0 | fp_get_proc_addr = loader_platform_get_proc_address(handle, "vkGetInstanceProcAddr"); |
1845 | 0 | if (NULL == fp_get_proc_addr) { |
1846 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1847 | 0 | "loader_scanned_icd_add: Attempt to retrieve either \'vkGetInstanceProcAddr\' or " |
1848 | 0 | "\'vk_icdGetInstanceProcAddr\' from ICD %s failed.", |
1849 | 0 | filename); |
1850 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
1851 | 0 | goto out; |
1852 | 0 | } else { |
1853 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
1854 | 0 | "loader_scanned_icd_add: Using deprecated ICD interface of \'vkGetInstanceProcAddr\' instead of " |
1855 | 0 | "\'vk_icdGetInstanceProcAddr\' for ICD %s", |
1856 | 0 | filename); |
1857 | 0 | } |
1858 | 0 | fp_create_inst = loader_platform_get_proc_address(handle, "vkCreateInstance"); |
1859 | 0 | if (NULL == fp_create_inst) { |
1860 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1861 | 0 | "loader_scanned_icd_add: Failed querying \'vkCreateInstance\' via dlsym/LoadLibrary for ICD %s", filename); |
1862 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
1863 | 0 | goto out; |
1864 | 0 | } |
1865 | 0 | fp_get_inst_ext_props = loader_platform_get_proc_address(handle, "vkEnumerateInstanceExtensionProperties"); |
1866 | 0 | if (NULL == fp_get_inst_ext_props) { |
1867 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1868 | 0 | "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via dlsym/LoadLibrary " |
1869 | 0 | "for ICD %s", |
1870 | 0 | filename); |
1871 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
1872 | 0 | goto out; |
1873 | 0 | } |
1874 | 0 | } else { |
1875 | | // vk_icdGetInstanceProcAddr was successfully found, we can assume the version is at least one |
1876 | | // If vk_icdNegotiateLoaderICDInterfaceVersion was also found, interface_vers must be 2 or greater, so this check is |
1877 | | // fine |
1878 | 0 | if (interface_vers == 0) { |
1879 | 0 | interface_vers = 1; |
1880 | 0 | } |
1881 | |
|
1882 | 0 | fp_create_inst = (PFN_vkCreateInstance)fp_get_proc_addr(NULL, "vkCreateInstance"); |
1883 | 0 | if (NULL == fp_create_inst) { |
1884 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1885 | 0 | "loader_scanned_icd_add: Could not get \'vkCreateInstance\' via \'vk_icdGetInstanceProcAddr\' for ICD %s", |
1886 | 0 | filename); |
1887 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
1888 | 0 | goto out; |
1889 | 0 | } |
1890 | 0 | fp_get_inst_ext_props = |
1891 | 0 | (PFN_vkEnumerateInstanceExtensionProperties)fp_get_proc_addr(NULL, "vkEnumerateInstanceExtensionProperties"); |
1892 | 0 | if (NULL == fp_get_inst_ext_props) { |
1893 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1894 | 0 | "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via " |
1895 | 0 | "\'vk_icdGetInstanceProcAddr\' for ICD %s", |
1896 | 0 | filename); |
1897 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
1898 | 0 | goto out; |
1899 | 0 | } |
1900 | | // Query "vk_icdGetPhysicalDeviceProcAddr" with vk_icdGetInstanceProcAddr if the library reports interface version 7 or |
1901 | | // greater, otherwise fallback to loading it from the platform dynamic linker |
1902 | 0 | if (interface_vers >= 7) { |
1903 | 0 | fp_get_phys_dev_proc_addr = |
1904 | 0 | (PFN_vk_icdGetPhysicalDeviceProcAddr)fp_get_proc_addr(NULL, "vk_icdGetPhysicalDeviceProcAddr"); |
1905 | 0 | } |
1906 | 0 | if (NULL == fp_get_phys_dev_proc_addr && interface_vers >= 3) { |
1907 | 0 | fp_get_phys_dev_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetPhysicalDeviceProcAddr"); |
1908 | 0 | } |
1909 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
1910 | | // Query "vk_icdEnumerateAdapterPhysicalDevices" with vk_icdGetInstanceProcAddr if the library reports interface version |
1911 | | // 7 or greater, otherwise fallback to loading it from the platform dynamic linker |
1912 | | if (interface_vers >= 7) { |
1913 | | fp_enum_dxgi_adapter_phys_devs = |
1914 | | (PFN_vk_icdEnumerateAdapterPhysicalDevices)fp_get_proc_addr(NULL, "vk_icdEnumerateAdapterPhysicalDevices"); |
1915 | | } |
1916 | | if (NULL == fp_enum_dxgi_adapter_phys_devs && interface_vers >= 6) { |
1917 | | fp_enum_dxgi_adapter_phys_devs = loader_platform_get_proc_address(handle, "vk_icdEnumerateAdapterPhysicalDevices"); |
1918 | | } |
1919 | | #endif |
1920 | 0 | } |
1921 | | |
1922 | | // check for enough capacity |
1923 | 0 | if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) { |
1924 | 0 | void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity, |
1925 | 0 | icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1926 | 0 | if (NULL == new_ptr) { |
1927 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
1928 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Realloc failed on icd library list for ICD %s", |
1929 | 0 | filename); |
1930 | 0 | goto out; |
1931 | 0 | } |
1932 | 0 | icd_tramp_list->scanned_list = new_ptr; |
1933 | | |
1934 | | // double capacity |
1935 | 0 | icd_tramp_list->capacity *= 2; |
1936 | 0 | } |
1937 | | |
1938 | 0 | loader_api_version api_version_struct = loader_make_version(api_version); |
1939 | 0 | if (interface_vers <= 4 && loader_check_version_meets_required(LOADER_VERSION_1_1_0, api_version_struct)) { |
1940 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
1941 | 0 | "loader_scanned_icd_add: Driver %s supports Vulkan %u.%u, but only supports loader interface version %u." |
1942 | 0 | " Interface version 5 or newer required to support this version of Vulkan (Policy #LDP_DRIVER_7)", |
1943 | 0 | filename, api_version_struct.major, api_version_struct.minor, interface_vers); |
1944 | 0 | } |
1945 | |
|
1946 | 0 | new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]); |
1947 | 0 | new_scanned_icd->handle = handle; |
1948 | 0 | new_scanned_icd->api_version = api_version; |
1949 | 0 | new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr; |
1950 | 0 | new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr; |
1951 | 0 | new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props; |
1952 | 0 | new_scanned_icd->CreateInstance = fp_create_inst; |
1953 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
1954 | | new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs; |
1955 | | #endif |
1956 | 0 | new_scanned_icd->interface_version = interface_vers; |
1957 | |
|
1958 | 0 | res = loader_copy_to_new_str(inst, filename, &new_scanned_icd->lib_name); |
1959 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
1960 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Out of memory can't add ICD %s", filename); |
1961 | 0 | goto out; |
1962 | 0 | } |
1963 | 0 | icd_tramp_list->count++; |
1964 | |
|
1965 | 51 | out: |
1966 | 51 | if (res != VK_SUCCESS) { |
1967 | 51 | if (NULL != handle) { |
1968 | 0 | loader_platform_close_library(handle); |
1969 | 0 | } |
1970 | 51 | } |
1971 | | |
1972 | 51 | return res; |
1973 | 0 | } |
1974 | | |
1975 | | #if defined(_WIN32) |
1976 | | BOOL __stdcall loader_initialize(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *Context) { |
1977 | | (void)InitOnce; |
1978 | | (void)Parameter; |
1979 | | (void)Context; |
1980 | | #else |
1981 | 2 | void loader_initialize(void) { |
1982 | 2 | loader_platform_thread_create_mutex(&loader_lock); |
1983 | 2 | loader_platform_thread_create_mutex(&loader_preload_icd_lock); |
1984 | 2 | loader_platform_thread_create_mutex(&loader_global_instance_list_lock); |
1985 | 2 | init_global_loader_settings(); |
1986 | 2 | #endif |
1987 | | |
1988 | | // initialize logging |
1989 | 2 | loader_init_global_debug_level(); |
1990 | | #if defined(_WIN32) |
1991 | | windows_initialization(); |
1992 | | #endif |
1993 | | |
1994 | 2 | loader_api_version version = loader_make_full_version(VK_HEADER_VERSION_COMPLETE); |
1995 | 2 | loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "Vulkan Loader Version %d.%d.%d", version.major, version.minor, version.patch); |
1996 | | |
1997 | | #if defined(GIT_BRANCH_NAME) && defined(GIT_TAG_INFO) |
1998 | | loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "[Vulkan Loader Git - Tag: " GIT_BRANCH_NAME ", Branch/Commit: " GIT_TAG_INFO "]"); |
1999 | | #endif |
2000 | | |
2001 | 2 | char *loader_disable_dynamic_library_unloading_env_var = loader_getenv("VK_LOADER_DISABLE_DYNAMIC_LIBRARY_UNLOADING", NULL); |
2002 | 2 | if (loader_disable_dynamic_library_unloading_env_var && |
2003 | 2 | 0 == strncmp(loader_disable_dynamic_library_unloading_env_var, "1", 2)) { |
2004 | 0 | loader_disable_dynamic_library_unloading = true; |
2005 | 0 | loader_log(NULL, VULKAN_LOADER_WARN_BIT, 0, "Vulkan Loader: library unloading is disabled"); |
2006 | 2 | } else { |
2007 | 2 | loader_disable_dynamic_library_unloading = false; |
2008 | 2 | } |
2009 | 2 | loader_free_getenv(loader_disable_dynamic_library_unloading_env_var, NULL); |
2010 | | #if defined(LOADER_USE_UNSAFE_FILE_SEARCH) |
2011 | | loader_log(NULL, VULKAN_LOADER_WARN_BIT, 0, "Vulkan Loader: unsafe searching is enabled"); |
2012 | | #endif |
2013 | | #if defined(_WIN32) |
2014 | | return TRUE; |
2015 | | #endif |
2016 | 2 | } |
2017 | | |
2018 | 0 | void loader_release(void) { |
2019 | | // Guarantee release of the preloaded ICD libraries. This may have already been called in vkDestroyInstance. |
2020 | 0 | loader_unload_preloaded_icds(); |
2021 | | |
2022 | | // release mutexes |
2023 | 0 | teardown_global_loader_settings(); |
2024 | 0 | loader_platform_thread_delete_mutex(&loader_lock); |
2025 | 0 | loader_platform_thread_delete_mutex(&loader_preload_icd_lock); |
2026 | 0 | loader_platform_thread_delete_mutex(&loader_global_instance_list_lock); |
2027 | 0 | } |
2028 | | |
2029 | | // Preload the ICD libraries that are likely to be needed so we don't repeatedly load/unload them later |
2030 | 0 | void loader_preload_icds(void) { |
2031 | 0 | loader_platform_thread_lock_mutex(&loader_preload_icd_lock); |
2032 | | |
2033 | | // Already preloaded, skip loading again. |
2034 | 0 | if (preloaded_icds.scanned_list != NULL) { |
2035 | 0 | loader_platform_thread_unlock_mutex(&loader_preload_icd_lock); |
2036 | 0 | return; |
2037 | 0 | } |
2038 | | |
2039 | 0 | VkResult result = loader_icd_scan(NULL, &preloaded_icds, NULL, NULL); |
2040 | 0 | if (result != VK_SUCCESS) { |
2041 | 0 | loader_clear_scanned_icd_list(NULL, &preloaded_icds); |
2042 | 0 | } |
2043 | 0 | loader_platform_thread_unlock_mutex(&loader_preload_icd_lock); |
2044 | 0 | } |
2045 | | |
2046 | | // Release the ICD libraries that were preloaded |
2047 | 0 | void loader_unload_preloaded_icds(void) { |
2048 | 0 | loader_platform_thread_lock_mutex(&loader_preload_icd_lock); |
2049 | 0 | loader_clear_scanned_icd_list(NULL, &preloaded_icds); |
2050 | 0 | loader_platform_thread_unlock_mutex(&loader_preload_icd_lock); |
2051 | 0 | } |
2052 | | |
2053 | | #if !defined(_WIN32) |
2054 | 2 | __attribute__((constructor)) void loader_init_library(void) { loader_initialize(); } |
2055 | | |
2056 | 0 | __attribute__((destructor)) void loader_free_library(void) { loader_release(); } |
2057 | | #endif |
2058 | | |
2059 | | // Get next file or dirname given a string list or registry key path |
2060 | | // |
2061 | | // \returns |
2062 | | // A pointer to first char in the next path. |
2063 | | // The next path (or NULL) in the list is returned in next_path. |
2064 | | // Note: input string is modified in some cases. PASS IN A COPY! |
2065 | 234k | char *loader_get_next_path(char *path) { |
2066 | 234k | uint32_t len; |
2067 | 234k | char *next; |
2068 | | |
2069 | 234k | if (path == NULL) return NULL; |
2070 | 234k | next = strchr(path, PATH_SEPARATOR); |
2071 | 234k | if (next == NULL) { |
2072 | 55.5k | len = (uint32_t)strlen(path); |
2073 | 55.5k | next = path + len; |
2074 | 178k | } else { |
2075 | 178k | *next = '\0'; |
2076 | 178k | next++; |
2077 | 178k | } |
2078 | | |
2079 | 234k | return next; |
2080 | 234k | } |
2081 | | |
2082 | | /* Processes a json manifest's library_path and the location of the json manifest to create the path of the library |
2083 | | * The output is stored in out_fullpath by allocating a string - so its the caller's responsibility to free it |
2084 | | * The output is the combination of the base path of manifest_file_path concatenated with library path |
2085 | | * If library_path is an absolute path, we do not prepend the base path of manifest_file_path |
2086 | | * |
2087 | | * This function takes ownership of library_path - caller does not need to worry about freeing it. |
2088 | | */ |
2089 | | VkResult combine_manifest_directory_and_library_path(const struct loader_instance *inst, char *library_path, |
2090 | 8.61k | const char *manifest_file_path, char **out_fullpath) { |
2091 | 8.61k | assert(library_path && manifest_file_path && out_fullpath); |
2092 | 8.61k | if (loader_platform_is_path_absolute(library_path)) { |
2093 | 1.02k | *out_fullpath = library_path; |
2094 | 1.02k | return VK_SUCCESS; |
2095 | 1.02k | } |
2096 | 7.58k | VkResult res = VK_SUCCESS; |
2097 | | |
2098 | 7.58k | size_t library_path_len = strlen(library_path); |
2099 | 7.58k | size_t manifest_file_path_str_len = strlen(manifest_file_path); |
2100 | 7.58k | bool library_path_contains_directory_symbol = false; |
2101 | 274M | for (size_t i = 0; i < library_path_len; i++) { |
2102 | 274M | if (library_path[i] == DIRECTORY_SYMBOL) { |
2103 | 1.64k | library_path_contains_directory_symbol = true; |
2104 | 1.64k | break; |
2105 | 1.64k | } |
2106 | 274M | } |
2107 | | // Means that the library_path is neither absolute nor relative - thus we should not modify it at all |
2108 | 7.58k | if (!library_path_contains_directory_symbol) { |
2109 | 5.94k | *out_fullpath = library_path; |
2110 | 5.94k | return VK_SUCCESS; |
2111 | 5.94k | } |
2112 | | // must include both a directory symbol and the null terminator |
2113 | 1.64k | size_t new_str_len = library_path_len + manifest_file_path_str_len + 1 + 1; |
2114 | | |
2115 | 1.64k | *out_fullpath = loader_instance_heap_calloc(inst, new_str_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
2116 | 1.64k | if (NULL == *out_fullpath) { |
2117 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
2118 | 0 | goto out; |
2119 | 0 | } |
2120 | 1.64k | size_t cur_loc_in_out_fullpath = 0; |
2121 | | // look for the last occurrence of DIRECTORY_SYMBOL in manifest_file_path |
2122 | 1.64k | size_t last_directory_symbol = 0; |
2123 | 1.64k | bool found_directory_symbol = false; |
2124 | 96.3k | for (size_t i = 0; i < manifest_file_path_str_len; i++) { |
2125 | 94.6k | if (manifest_file_path[i] == DIRECTORY_SYMBOL) { |
2126 | 9.84k | last_directory_symbol = i + 1; // we want to include the symbol |
2127 | 9.84k | found_directory_symbol = true; |
2128 | | // dont break because we want to find the last occurrence |
2129 | 9.84k | } |
2130 | 94.6k | } |
2131 | | // Add manifest_file_path up to the last directory symbol |
2132 | 1.64k | if (found_directory_symbol) { |
2133 | 1.64k | loader_strncpy(*out_fullpath, new_str_len, manifest_file_path, last_directory_symbol); |
2134 | 1.64k | cur_loc_in_out_fullpath += last_directory_symbol; |
2135 | 1.64k | } |
2136 | 1.64k | loader_strncpy(&(*out_fullpath)[cur_loc_in_out_fullpath], new_str_len - cur_loc_in_out_fullpath, library_path, |
2137 | 1.64k | library_path_len); |
2138 | 1.64k | cur_loc_in_out_fullpath += library_path_len + 1; |
2139 | 1.64k | (*out_fullpath)[cur_loc_in_out_fullpath] = '\0'; |
2140 | | |
2141 | 1.64k | out: |
2142 | 1.64k | loader_instance_heap_free(inst, library_path); |
2143 | | |
2144 | 1.64k | return res; |
2145 | 1.64k | } |
2146 | | |
2147 | | // Given a filename (file) and a list of paths (in_dirs), try to find an existing |
2148 | | // file in the paths. If filename already is a path then no searching in the given paths. |
2149 | | // |
2150 | | // @return - A string in out_fullpath of either the full path or file. |
2151 | 26.5k | void loader_get_fullpath(const char *file, const char *in_dirs, size_t out_size, char *out_fullpath) { |
2152 | 26.5k | if (!loader_platform_is_path(file) && *in_dirs) { |
2153 | 26.3k | size_t dirs_copy_len = strlen(in_dirs) + 1; |
2154 | 26.3k | char *dirs_copy = loader_stack_alloc(dirs_copy_len); |
2155 | 26.3k | loader_strncpy(dirs_copy, dirs_copy_len, in_dirs, dirs_copy_len); |
2156 | | |
2157 | | // find if file exists after prepending paths in given list |
2158 | | // for (dir = dirs_copy; *dir && (next_dir = loader_get_next_path(dir)); dir = next_dir) { |
2159 | 26.3k | char *dir = dirs_copy; |
2160 | 26.3k | char *next_dir = loader_get_next_path(dir); |
2161 | 26.4k | while (*dir && next_dir) { |
2162 | 26.3k | int path_concat_ret = snprintf(out_fullpath, out_size, "%s%c%s", dir, DIRECTORY_SYMBOL, file); |
2163 | 26.3k | if (path_concat_ret < 0) { |
2164 | 0 | continue; |
2165 | 0 | } |
2166 | 26.3k | if (loader_platform_file_exists(out_fullpath)) { |
2167 | 26.1k | return; |
2168 | 26.1k | } |
2169 | 161 | dir = next_dir; |
2170 | 161 | next_dir = loader_get_next_path(dir); |
2171 | 161 | } |
2172 | 26.3k | } |
2173 | | |
2174 | 437 | (void)snprintf(out_fullpath, out_size, "%s", file); |
2175 | 437 | } |
2176 | | |
2177 | | // Verify that all component layers in a meta-layer are valid. |
2178 | | // This function is potentially recursive so we pass in an array of "already checked" (length of the instance_layers->count) meta |
2179 | | // layers, preventing a stack overflow verifying meta layers that are each other's component layers |
2180 | | bool verify_meta_layer_component_layers(const struct loader_instance *inst, size_t prop_index, |
2181 | 21.1k | struct loader_layer_list *instance_layers, bool *already_checked_meta_layers) { |
2182 | 21.1k | struct loader_layer_properties *prop = &instance_layers->list[prop_index]; |
2183 | 21.1k | loader_api_version meta_layer_version = loader_make_version(prop->info.specVersion); |
2184 | | |
2185 | 21.1k | if (NULL == already_checked_meta_layers) { |
2186 | 15.3k | already_checked_meta_layers = loader_stack_alloc(sizeof(bool) * instance_layers->count); |
2187 | 15.3k | if (already_checked_meta_layers == NULL) { |
2188 | 0 | return false; |
2189 | 0 | } |
2190 | 15.3k | memset(already_checked_meta_layers, 0, sizeof(bool) * instance_layers->count); |
2191 | 15.3k | } |
2192 | | |
2193 | | // Mark this meta layer as 'already checked', indicating which layers have already been recursed. |
2194 | 21.1k | already_checked_meta_layers[prop_index] = true; |
2195 | | |
2196 | 29.6k | for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) { |
2197 | 16.4k | struct loader_layer_properties *comp_prop = |
2198 | 16.4k | loader_find_layer_property(prop->component_layer_names.list[comp_layer], instance_layers); |
2199 | 16.4k | if (comp_prop == NULL) { |
2200 | 2.86k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2201 | 2.86k | "verify_meta_layer_component_layers: Meta-layer %s can't find component layer %s at index %d." |
2202 | 2.86k | " Skipping this layer.", |
2203 | 2.86k | prop->info.layerName, prop->component_layer_names.list[comp_layer], comp_layer); |
2204 | | |
2205 | 2.86k | return false; |
2206 | 2.86k | } |
2207 | | |
2208 | | // Check the version of each layer, they need to be at least MAJOR and MINOR |
2209 | 13.5k | loader_api_version comp_prop_version = loader_make_version(comp_prop->info.specVersion); |
2210 | 13.5k | if (!loader_check_version_meets_required(meta_layer_version, comp_prop_version)) { |
2211 | 1.55k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2212 | 1.55k | "verify_meta_layer_component_layers: Meta-layer uses API version %d.%d, but component " |
2213 | 1.55k | "layer %d has API version %d.%d that is lower. Skipping this layer.", |
2214 | 1.55k | meta_layer_version.major, meta_layer_version.minor, comp_layer, comp_prop_version.major, |
2215 | 1.55k | comp_prop_version.minor); |
2216 | | |
2217 | 1.55k | return false; |
2218 | 1.55k | } |
2219 | | |
2220 | | // Make sure the layer isn't using it's own name |
2221 | 11.9k | if (!strcmp(prop->info.layerName, prop->component_layer_names.list[comp_layer])) { |
2222 | 1.75k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2223 | 1.75k | "verify_meta_layer_component_layers: Meta-layer %s lists itself in its component layer " |
2224 | 1.75k | "list at index %d. Skipping this layer.", |
2225 | 1.75k | prop->info.layerName, comp_layer); |
2226 | | |
2227 | 1.75k | return false; |
2228 | 1.75k | } |
2229 | 10.2k | if (comp_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { |
2230 | 6.24k | size_t comp_prop_index = INT32_MAX; |
2231 | | // Make sure we haven't verified this meta layer before |
2232 | 10.5M | for (uint32_t i = 0; i < instance_layers->count; i++) { |
2233 | 10.5M | if (strcmp(comp_prop->info.layerName, instance_layers->list[i].info.layerName) == 0) { |
2234 | 75.1k | comp_prop_index = i; |
2235 | 75.1k | } |
2236 | 10.5M | } |
2237 | 6.24k | if (comp_prop_index != INT32_MAX && already_checked_meta_layers[comp_prop_index]) { |
2238 | 424 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2239 | 424 | "verify_meta_layer_component_layers: Recursive depedency between Meta-layer %s and Meta-layer %s. " |
2240 | 424 | "Skipping this layer.", |
2241 | 424 | instance_layers->list[prop_index].info.layerName, comp_prop->info.layerName); |
2242 | 424 | return false; |
2243 | 424 | } |
2244 | | |
2245 | 5.81k | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
2246 | 5.81k | "verify_meta_layer_component_layers: Adding meta-layer %s which also contains meta-layer %s", |
2247 | 5.81k | prop->info.layerName, comp_prop->info.layerName); |
2248 | | |
2249 | | // Make sure if the layer is using a meta-layer in its component list that we also verify that. |
2250 | 5.81k | if (!verify_meta_layer_component_layers(inst, comp_prop_index, instance_layers, already_checked_meta_layers)) { |
2251 | 1.33k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2252 | 1.33k | "Meta-layer %s component layer %s can not find all component layers." |
2253 | 1.33k | " Skipping this layer.", |
2254 | 1.33k | prop->info.layerName, prop->component_layer_names.list[comp_layer]); |
2255 | 1.33k | return false; |
2256 | 1.33k | } |
2257 | 5.81k | } |
2258 | 10.2k | } |
2259 | | // Didn't exit early so that means it passed all checks |
2260 | 13.2k | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2261 | 13.2k | "Meta-layer \"%s\" all %d component layers appear to be valid.", prop->info.layerName, |
2262 | 13.2k | prop->component_layer_names.count); |
2263 | | |
2264 | | // If layer logging is on, list the internals included in the meta-layer |
2265 | 21.3k | for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) { |
2266 | 8.09k | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " [%d] %s", comp_layer, prop->component_layer_names.list[comp_layer]); |
2267 | 8.09k | } |
2268 | 13.2k | return true; |
2269 | 21.1k | } |
2270 | | |
2271 | | // Add any instance and device extensions from component layers to this layer |
2272 | | // list, so that anyone querying extensions will only need to look at the meta-layer |
2273 | | bool update_meta_layer_extensions_from_component_layers(const struct loader_instance *inst, struct loader_layer_properties *prop, |
2274 | 8.76k | struct loader_layer_list *instance_layers) { |
2275 | 8.76k | VkResult res = VK_SUCCESS; |
2276 | 14.6k | for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) { |
2277 | 5.84k | struct loader_layer_properties *comp_prop = |
2278 | 5.84k | loader_find_layer_property(prop->component_layer_names.list[comp_layer], instance_layers); |
2279 | | |
2280 | 5.84k | if (NULL != comp_prop->instance_extension_list.list) { |
2281 | 19.5k | for (uint32_t ext = 0; ext < comp_prop->instance_extension_list.count; ext++) { |
2282 | 18.5k | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding instance extension %s", |
2283 | 18.5k | prop->info.layerName, prop->component_layer_names.list[comp_layer], |
2284 | 18.5k | comp_prop->instance_extension_list.list[ext].extensionName); |
2285 | | |
2286 | 18.5k | if (!has_vk_extension_property(&comp_prop->instance_extension_list.list[ext], &prop->instance_extension_list)) { |
2287 | 13.7k | res = loader_add_to_ext_list(inst, &prop->instance_extension_list, 1, |
2288 | 13.7k | &comp_prop->instance_extension_list.list[ext]); |
2289 | 13.7k | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
2290 | 0 | return res; |
2291 | 0 | } |
2292 | 13.7k | } |
2293 | 18.5k | } |
2294 | 1.04k | } |
2295 | 5.84k | if (NULL != comp_prop->device_extension_list.list) { |
2296 | 35.0k | for (uint32_t ext = 0; ext < comp_prop->device_extension_list.count; ext++) { |
2297 | 33.3k | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding device extension %s", |
2298 | 33.3k | prop->info.layerName, prop->component_layer_names.list[comp_layer], |
2299 | 33.3k | comp_prop->device_extension_list.list[ext].props.extensionName); |
2300 | | |
2301 | 33.3k | if (!has_vk_dev_ext_property(&comp_prop->device_extension_list.list[ext].props, &prop->device_extension_list)) { |
2302 | 32.1k | loader_add_to_dev_ext_list(inst, &prop->device_extension_list, |
2303 | 32.1k | &comp_prop->device_extension_list.list[ext].props, NULL); |
2304 | 32.1k | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
2305 | 0 | return res; |
2306 | 0 | } |
2307 | 32.1k | } |
2308 | 33.3k | } |
2309 | 1.67k | } |
2310 | 5.84k | } |
2311 | 8.76k | return res; |
2312 | 8.76k | } |
2313 | | |
2314 | | // Verify that all meta-layers in a layer list are valid. |
2315 | | VkResult verify_all_meta_layers(struct loader_instance *inst, const struct loader_envvar_all_filters *filters, |
2316 | 6.78k | struct loader_layer_list *instance_layers, bool *override_layer_present) { |
2317 | 6.78k | VkResult res = VK_SUCCESS; |
2318 | 6.78k | *override_layer_present = false; |
2319 | 1.23M | for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) { |
2320 | 1.23M | struct loader_layer_properties *prop = &instance_layers->list[i]; |
2321 | | |
2322 | | // If this is a meta-layer, make sure it is valid |
2323 | 1.23M | if (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { |
2324 | 15.3k | if (verify_meta_layer_component_layers(inst, i, instance_layers, NULL)) { |
2325 | | // If any meta layer is valid, update its extension list to include the extensions from its component layers. |
2326 | 8.76k | res = update_meta_layer_extensions_from_component_layers(inst, prop, instance_layers); |
2327 | 8.76k | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
2328 | 0 | return res; |
2329 | 0 | } |
2330 | 8.76k | if (prop->is_override && loader_implicit_layer_is_enabled(inst, filters, prop)) { |
2331 | 218 | *override_layer_present = true; |
2332 | 218 | } |
2333 | 8.76k | } else { |
2334 | 6.60k | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, |
2335 | 6.60k | "Removing meta-layer %s from instance layer list since it appears invalid.", prop->info.layerName); |
2336 | | |
2337 | 6.60k | loader_remove_layer_in_list(inst, instance_layers, i); |
2338 | 6.60k | i--; |
2339 | 6.60k | } |
2340 | 15.3k | } |
2341 | 1.23M | } |
2342 | 6.78k | return res; |
2343 | 6.78k | } |
2344 | | |
2345 | | // If the current working directory matches any app_key_path of the layers, remove all other override layers. |
2346 | | // Otherwise if no matching app_key was found, remove all but the global override layer, which has no app_key_path. |
2347 | 6.78k | void remove_all_non_valid_override_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers) { |
2348 | 6.78k | if (instance_layers == NULL) { |
2349 | 0 | return; |
2350 | 0 | } |
2351 | | |
2352 | 6.78k | char cur_path[1024]; |
2353 | 6.78k | char *ret = loader_platform_executable_path(cur_path, 1024); |
2354 | 6.78k | if (NULL == ret) { |
2355 | 0 | return; |
2356 | 0 | } |
2357 | | // Find out if there is an override layer with same the app_key_path as the path to the current executable. |
2358 | | // If more than one is found, remove it and use the first layer |
2359 | | // Remove any layers which aren't global and do not have the same app_key_path as the path to the current executable. |
2360 | 6.78k | bool found_active_override_layer = false; |
2361 | 6.78k | int global_layer_index = -1; |
2362 | 1.24M | for (uint32_t i = 0; i < instance_layers->count; i++) { |
2363 | 1.23M | struct loader_layer_properties *props = &instance_layers->list[i]; |
2364 | 1.23M | if (strcmp(props->info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) { |
2365 | 4.55k | if (props->app_key_paths.count > 0) { // not the global layer |
2366 | 43.5k | for (uint32_t j = 0; j < props->app_key_paths.count; j++) { |
2367 | 42.4k | if (strcmp(props->app_key_paths.list[j], cur_path) == 0) { |
2368 | 0 | if (!found_active_override_layer) { |
2369 | 0 | found_active_override_layer = true; |
2370 | 0 | } else { |
2371 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2372 | 0 | "remove_all_non_valid_override_layers: Multiple override layers where the same path in " |
2373 | 0 | "app_keys " |
2374 | 0 | "was found. Using the first layer found"); |
2375 | | |
2376 | | // Remove duplicate active override layers that have the same app_key_path |
2377 | 0 | loader_remove_layer_in_list(inst, instance_layers, i); |
2378 | 0 | i--; |
2379 | 0 | } |
2380 | 0 | } |
2381 | 42.4k | } |
2382 | 1.06k | if (!found_active_override_layer) { |
2383 | 1.06k | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2384 | 1.06k | "--Override layer found but not used because app \'%s\' is not in \'app_keys\' list!", cur_path); |
2385 | | |
2386 | | // Remove non-global override layers that don't have an app_key that matches cur_path |
2387 | 1.06k | loader_remove_layer_in_list(inst, instance_layers, i); |
2388 | 1.06k | i--; |
2389 | 1.06k | } |
2390 | 3.48k | } else { |
2391 | 3.48k | if (global_layer_index == -1) { |
2392 | 405 | global_layer_index = i; |
2393 | 3.08k | } else { |
2394 | 3.08k | loader_log( |
2395 | 3.08k | inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2396 | 3.08k | "remove_all_non_valid_override_layers: Multiple global override layers found. Using the first global " |
2397 | 3.08k | "layer found"); |
2398 | 3.08k | loader_remove_layer_in_list(inst, instance_layers, i); |
2399 | 3.08k | i--; |
2400 | 3.08k | } |
2401 | 3.48k | } |
2402 | 4.55k | } |
2403 | 1.23M | } |
2404 | | // Remove global layer if layer with same the app_key_path as the path to the current executable is found |
2405 | 6.78k | if (found_active_override_layer && global_layer_index >= 0) { |
2406 | 0 | loader_remove_layer_in_list(inst, instance_layers, global_layer_index); |
2407 | 0 | } |
2408 | | // Should be at most 1 override layer in the list now. |
2409 | 6.78k | if (found_active_override_layer) { |
2410 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the override layer for app key %s", cur_path); |
2411 | 6.78k | } else if (global_layer_index >= 0) { |
2412 | 405 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the global override layer"); |
2413 | 405 | } |
2414 | 6.78k | } |
2415 | | |
2416 | | /* The following are required in the "layer" object: |
2417 | | * "name" |
2418 | | * "type" |
2419 | | * (for non-meta layers) "library_path" |
2420 | | * (for meta layers) "component_layers" |
2421 | | * "api_version" |
2422 | | * "implementation_version" |
2423 | | * "description" |
2424 | | * (for implicit layers) "disable_environment" |
2425 | | */ |
2426 | | |
2427 | | VkResult loader_read_layer_json(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list, |
2428 | 1.28M | cJSON *layer_node, loader_api_version version, bool is_implicit, char *filename) { |
2429 | 1.28M | assert(layer_instance_list); |
2430 | 1.28M | char *library_path = NULL; |
2431 | 1.28M | VkResult result = VK_SUCCESS; |
2432 | 1.28M | struct loader_layer_properties props = {0}; |
2433 | | |
2434 | 1.28M | result = loader_copy_to_new_str(inst, filename, &props.manifest_file_name); |
2435 | 1.28M | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) { |
2436 | 0 | goto out; |
2437 | 0 | } |
2438 | | |
2439 | | // Parse name |
2440 | | |
2441 | 1.28M | result = loader_parse_json_string_to_existing_str(layer_node, "name", VK_MAX_EXTENSION_NAME_SIZE, props.info.layerName); |
2442 | 1.28M | if (VK_ERROR_INITIALIZATION_FAILED == result) { |
2443 | 44.6k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2444 | 44.6k | "Layer located at %s didn't find required layer value \"name\" in manifest JSON file, skipping this layer", |
2445 | 44.6k | filename); |
2446 | 44.6k | goto out; |
2447 | 44.6k | } |
2448 | | |
2449 | | // Check if this layer's name matches the override layer name, set is_override to true if so. |
2450 | 1.24M | if (!strcmp(props.info.layerName, VK_OVERRIDE_LAYER_NAME)) { |
2451 | 5.16k | props.is_override = true; |
2452 | 5.16k | } |
2453 | | |
2454 | 1.24M | if (0 != strncmp(props.info.layerName, "VK_LAYER_", 9)) { |
2455 | 512k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, "Layer name %s does not conform to naming standard (Policy #LLP_LAYER_3)", |
2456 | 512k | props.info.layerName); |
2457 | 512k | } |
2458 | | |
2459 | | // Parse type |
2460 | 1.24M | char *type = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(layer_node, "type")); |
2461 | 1.24M | if (NULL == type) { |
2462 | 1.21M | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2463 | 1.21M | "Layer located at %s didn't find required layer value \"type\" in manifest JSON file, skipping this layer", |
2464 | 1.21M | filename); |
2465 | 1.21M | goto out; |
2466 | 1.21M | } |
2467 | | |
2468 | | // Add list entry |
2469 | 33.3k | if (!strcmp(type, "DEVICE")) { |
2470 | 206 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Device layers are deprecated. Skipping layer %s", |
2471 | 206 | props.info.layerName); |
2472 | 206 | result = VK_ERROR_INITIALIZATION_FAILED; |
2473 | 206 | goto out; |
2474 | 206 | } |
2475 | | |
2476 | | // Allow either GLOBAL or INSTANCE type interchangeably to handle layers that must work with older loaders |
2477 | 33.0k | if (!strcmp(type, "INSTANCE") || !strcmp(type, "GLOBAL")) { |
2478 | 32.0k | props.type_flags = VK_LAYER_TYPE_FLAG_INSTANCE_LAYER; |
2479 | 32.0k | if (!is_implicit) { |
2480 | 10 | props.type_flags |= VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER; |
2481 | 10 | } |
2482 | 32.0k | } else { |
2483 | 999 | result = VK_ERROR_INITIALIZATION_FAILED; |
2484 | 999 | goto out; |
2485 | 999 | } |
2486 | | |
2487 | | // Parse api_version |
2488 | 32.0k | char *api_version = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(layer_node, "api_version")); |
2489 | 32.0k | if (NULL == api_version) { |
2490 | 1.41k | loader_log( |
2491 | 1.41k | inst, VULKAN_LOADER_WARN_BIT, 0, |
2492 | 1.41k | "Layer located at %s didn't find required layer value \"api_version\" in manifest JSON file, skipping this layer", |
2493 | 1.41k | filename); |
2494 | 1.41k | goto out; |
2495 | 1.41k | } |
2496 | | |
2497 | 30.6k | props.info.specVersion = loader_parse_version_string(api_version); |
2498 | | |
2499 | | // Make sure the layer's manifest doesn't contain a non zero variant value |
2500 | 30.6k | if (VK_API_VERSION_VARIANT(props.info.specVersion) != 0) { |
2501 | 754 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2502 | 754 | "Layer \"%s\" has an \'api_version\' field which contains a non-zero variant value of %d. " |
2503 | 754 | " Skipping Layer.", |
2504 | 754 | props.info.layerName, VK_API_VERSION_VARIANT(props.info.specVersion)); |
2505 | 754 | result = VK_ERROR_INITIALIZATION_FAILED; |
2506 | 754 | goto out; |
2507 | 754 | } |
2508 | | |
2509 | | // Parse implementation_version |
2510 | 29.9k | char *implementation_version = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(layer_node, "implementation_version")); |
2511 | 29.9k | if (NULL == implementation_version) { |
2512 | 2.19k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2513 | 2.19k | "Layer located at %s didn't find required layer value \"implementation_version\" in manifest JSON file, " |
2514 | 2.19k | "skipping this layer", |
2515 | 2.19k | filename); |
2516 | 2.19k | goto out; |
2517 | 2.19k | } |
2518 | 27.7k | props.info.implementationVersion = atoi(implementation_version); |
2519 | | |
2520 | | // Parse description |
2521 | | |
2522 | 27.7k | result = |
2523 | 27.7k | loader_parse_json_string_to_existing_str(layer_node, "description", VK_MAX_EXTENSION_NAME_SIZE, props.info.description); |
2524 | 27.7k | if (VK_ERROR_INITIALIZATION_FAILED == result) { |
2525 | 1.14k | loader_log( |
2526 | 1.14k | inst, VULKAN_LOADER_WARN_BIT, 0, |
2527 | 1.14k | "Layer located at %s didn't find required layer value \"description\" in manifest JSON file, skipping this layer", |
2528 | 1.14k | filename); |
2529 | 1.14k | goto out; |
2530 | 1.14k | } |
2531 | | |
2532 | | // Parse library_path |
2533 | | |
2534 | | // Library path no longer required unless component_layers is also not defined |
2535 | 26.5k | result = loader_parse_json_string(layer_node, "library_path", &library_path); |
2536 | 26.5k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) { |
2537 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2538 | 0 | "Skipping layer \"%s\" due to problem accessing the library_path value in the manifest JSON file", |
2539 | 0 | props.info.layerName); |
2540 | 0 | result = VK_ERROR_OUT_OF_HOST_MEMORY; |
2541 | 0 | goto out; |
2542 | 0 | } |
2543 | 26.5k | if (NULL != library_path) { |
2544 | 8.06k | if (NULL != loader_cJSON_GetObjectItem(layer_node, "component_layers")) { |
2545 | 293 | loader_log( |
2546 | 293 | inst, VULKAN_LOADER_WARN_BIT, 0, |
2547 | 293 | "Layer \"%s\" contains meta-layer-specific component_layers, but also defining layer library path. Both are not " |
2548 | 293 | "compatible, so skipping this layer", |
2549 | 293 | props.info.layerName); |
2550 | 293 | result = VK_ERROR_INITIALIZATION_FAILED; |
2551 | 293 | loader_instance_heap_free(inst, library_path); |
2552 | 293 | goto out; |
2553 | 293 | } |
2554 | | |
2555 | | // This function takes ownership of library_path_str - so we don't need to clean it up |
2556 | 7.77k | result = combine_manifest_directory_and_library_path(inst, library_path, filename, &props.lib_name); |
2557 | 7.77k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2558 | 7.77k | } |
2559 | | |
2560 | | // Parse component_layers |
2561 | | |
2562 | 26.2k | if (NULL == library_path) { |
2563 | 18.5k | if (!loader_check_version_meets_required(LOADER_VERSION_1_1_0, version)) { |
2564 | 11.9k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2565 | 11.9k | "Layer \"%s\" contains meta-layer-specific component_layers, but using older JSON file version.", |
2566 | 11.9k | props.info.layerName); |
2567 | 11.9k | } |
2568 | | |
2569 | 18.5k | result = loader_parse_json_array_of_strings(inst, layer_node, "component_layers", &(props.component_layer_names)); |
2570 | 18.5k | if (VK_ERROR_OUT_OF_HOST_MEMORY == result) { |
2571 | 0 | goto out; |
2572 | 0 | } |
2573 | 18.5k | if (VK_ERROR_INITIALIZATION_FAILED == result) { |
2574 | 1.49k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2575 | 1.49k | "Layer \"%s\" is missing both library_path and component_layers fields. One or the other MUST be defined. " |
2576 | 1.49k | "Skipping this layer", |
2577 | 1.49k | props.info.layerName); |
2578 | 1.49k | goto out; |
2579 | 1.49k | } |
2580 | | // This is now, officially, a meta-layer |
2581 | 17.0k | props.type_flags |= VK_LAYER_TYPE_FLAG_META_LAYER; |
2582 | 17.0k | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Encountered meta-layer \"%s\"", |
2583 | 17.0k | props.info.layerName); |
2584 | 17.0k | } |
2585 | | |
2586 | | // Parse blacklisted_layers |
2587 | | |
2588 | 24.7k | if (props.is_override) { |
2589 | 1.60k | result = loader_parse_json_array_of_strings(inst, layer_node, "blacklisted_layers", &(props.blacklist_layer_names)); |
2590 | 1.60k | if (VK_ERROR_OUT_OF_HOST_MEMORY == result) { |
2591 | 0 | goto out; |
2592 | 0 | } |
2593 | 1.60k | } |
2594 | | |
2595 | | // Parse override_paths |
2596 | | |
2597 | 24.7k | result = loader_parse_json_array_of_strings(inst, layer_node, "override_paths", &(props.override_paths)); |
2598 | 24.7k | if (VK_ERROR_OUT_OF_HOST_MEMORY == result) { |
2599 | 0 | goto out; |
2600 | 0 | } |
2601 | 24.7k | if (NULL != props.override_paths.list && !loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) { |
2602 | 101 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2603 | 101 | "Layer \"%s\" contains meta-layer-specific override paths, but using older JSON file version.", |
2604 | 101 | props.info.layerName); |
2605 | 101 | } |
2606 | | |
2607 | | // Parse disable_environment |
2608 | | |
2609 | 24.7k | if (is_implicit) { |
2610 | 24.7k | cJSON *disable_environment = loader_cJSON_GetObjectItem(layer_node, "disable_environment"); |
2611 | 24.7k | if (disable_environment == NULL) { |
2612 | 2.92k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2613 | 2.92k | "Layer \"%s\" doesn't contain required layer object disable_environment in the manifest JSON file, skipping " |
2614 | 2.92k | "this layer", |
2615 | 2.92k | props.info.layerName); |
2616 | 2.92k | result = VK_ERROR_INITIALIZATION_FAILED; |
2617 | 2.92k | goto out; |
2618 | 2.92k | } |
2619 | | |
2620 | 21.8k | if (!disable_environment->child || disable_environment->child->type != cJSON_String || |
2621 | 21.8k | !disable_environment->child->string || !disable_environment->child->valuestring) { |
2622 | 1.16k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2623 | 1.16k | "Layer \"%s\" doesn't contain required child value in object disable_environment in the manifest JSON file, " |
2624 | 1.16k | "skipping this layer (Policy #LLP_LAYER_9)", |
2625 | 1.16k | props.info.layerName); |
2626 | 1.16k | result = VK_ERROR_INITIALIZATION_FAILED; |
2627 | 1.16k | goto out; |
2628 | 1.16k | } |
2629 | 20.7k | result = loader_copy_to_new_str(inst, disable_environment->child->string, &(props.disable_env_var.name)); |
2630 | 20.7k | if (VK_SUCCESS != result) goto out; |
2631 | 20.7k | result = loader_copy_to_new_str(inst, disable_environment->child->valuestring, &(props.disable_env_var.value)); |
2632 | 20.7k | if (VK_SUCCESS != result) goto out; |
2633 | 20.7k | } |
2634 | | |
2635 | | // Now get all optional items and objects and put in list: |
2636 | | // functions |
2637 | | // instance_extensions |
2638 | | // device_extensions |
2639 | | // enable_environment (implicit layers only) |
2640 | | // library_arch |
2641 | | |
2642 | | // Layer interface functions |
2643 | | // vkGetInstanceProcAddr |
2644 | | // vkGetDeviceProcAddr |
2645 | | // vkNegotiateLoaderLayerInterfaceVersion (starting with JSON file 1.1.0) |
2646 | 20.7k | cJSON *functions = loader_cJSON_GetObjectItem(layer_node, "functions"); |
2647 | 20.7k | if (functions != NULL) { |
2648 | 1.37k | if (loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) { |
2649 | 672 | result = loader_parse_json_string(functions, "vkNegotiateLoaderLayerInterfaceVersion", |
2650 | 672 | &props.functions.str_negotiate_interface); |
2651 | 672 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2652 | 672 | } |
2653 | 1.37k | result = loader_parse_json_string(functions, "vkGetInstanceProcAddr", &props.functions.str_gipa); |
2654 | 1.37k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2655 | | |
2656 | 1.37k | if (NULL == props.functions.str_negotiate_interface && props.functions.str_gipa && |
2657 | 1.37k | loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) { |
2658 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
2659 | 0 | "Layer \"%s\" using deprecated \'vkGetInstanceProcAddr\' tag which was deprecated starting with JSON " |
2660 | 0 | "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for " |
2661 | 0 | "compatibility reasons it may be desirable to continue using the deprecated tag.", |
2662 | 0 | props.info.layerName); |
2663 | 0 | } |
2664 | | |
2665 | 1.37k | result = loader_parse_json_string(functions, "vkGetDeviceProcAddr", &props.functions.str_gdpa); |
2666 | 1.37k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2667 | | |
2668 | 1.37k | if (NULL == props.functions.str_negotiate_interface && props.functions.str_gdpa && |
2669 | 1.37k | loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) { |
2670 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
2671 | 0 | "Layer \"%s\" using deprecated \'vkGetDeviceProcAddr\' tag which was deprecated starting with JSON " |
2672 | 0 | "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for " |
2673 | 0 | "compatibility reasons it may be desirable to continue using the deprecated tag.", |
2674 | 0 | props.info.layerName); |
2675 | 0 | } |
2676 | 1.37k | } |
2677 | | |
2678 | | // instance_extensions |
2679 | | // array of { |
2680 | | // name |
2681 | | // spec_version |
2682 | | // } |
2683 | | |
2684 | 20.7k | cJSON *instance_extensions = loader_cJSON_GetObjectItem(layer_node, "instance_extensions"); |
2685 | 20.7k | if (instance_extensions != NULL && instance_extensions->type == cJSON_Array) { |
2686 | 1.97k | cJSON *ext_item = NULL; |
2687 | 60.9k | cJSON_ArrayForEach(ext_item, instance_extensions) { |
2688 | 60.9k | if (ext_item->type != cJSON_Object) { |
2689 | 854 | continue; |
2690 | 854 | } |
2691 | | |
2692 | 60.1k | VkExtensionProperties ext_prop = {0}; |
2693 | 60.1k | result = loader_parse_json_string_to_existing_str(ext_item, "name", VK_MAX_EXTENSION_NAME_SIZE, ext_prop.extensionName); |
2694 | 60.1k | if (result == VK_ERROR_INITIALIZATION_FAILED) { |
2695 | 7.08k | continue; |
2696 | 7.08k | } |
2697 | 53.0k | char *spec_version = NULL; |
2698 | 53.0k | result = loader_parse_json_string(ext_item, "spec_version", &spec_version); |
2699 | 53.0k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2700 | 53.0k | if (NULL != spec_version) { |
2701 | 69 | ext_prop.specVersion = atoi(spec_version); |
2702 | 69 | } |
2703 | 53.0k | loader_instance_heap_free(inst, spec_version); |
2704 | 53.0k | bool ext_unsupported = wsi_unsupported_instance_extension(&ext_prop); |
2705 | 53.0k | if (!ext_unsupported) { |
2706 | 52.4k | loader_add_to_ext_list(inst, &props.instance_extension_list, 1, &ext_prop); |
2707 | 52.4k | } |
2708 | 53.0k | } |
2709 | 1.97k | } |
2710 | | |
2711 | | // device_extensions |
2712 | | // array of { |
2713 | | // name |
2714 | | // spec_version |
2715 | | // entrypoints |
2716 | | // } |
2717 | 20.7k | cJSON *device_extensions = loader_cJSON_GetObjectItem(layer_node, "device_extensions"); |
2718 | 20.7k | if (device_extensions != NULL && device_extensions->type == cJSON_Array) { |
2719 | 1.92k | cJSON *ext_item = NULL; |
2720 | 46.1k | cJSON_ArrayForEach(ext_item, device_extensions) { |
2721 | 46.1k | if (ext_item->type != cJSON_Object) { |
2722 | 233 | continue; |
2723 | 233 | } |
2724 | | |
2725 | 45.9k | VkExtensionProperties ext_prop = {0}; |
2726 | 45.9k | result = loader_parse_json_string_to_existing_str(ext_item, "name", VK_MAX_EXTENSION_NAME_SIZE, ext_prop.extensionName); |
2727 | 45.9k | if (result == VK_ERROR_INITIALIZATION_FAILED) { |
2728 | 7.99k | continue; |
2729 | 7.99k | } |
2730 | | |
2731 | 37.9k | char *spec_version = NULL; |
2732 | 37.9k | result = loader_parse_json_string(ext_item, "spec_version", &spec_version); |
2733 | 37.9k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2734 | 37.9k | if (NULL != spec_version) { |
2735 | 118 | ext_prop.specVersion = atoi(spec_version); |
2736 | 118 | } |
2737 | 37.9k | loader_instance_heap_free(inst, spec_version); |
2738 | | |
2739 | 37.9k | cJSON *entrypoints = loader_cJSON_GetObjectItem(ext_item, "entrypoints"); |
2740 | 37.9k | if (entrypoints == NULL) { |
2741 | 37.9k | result = loader_add_to_dev_ext_list(inst, &props.device_extension_list, &ext_prop, NULL); |
2742 | 37.9k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2743 | 37.9k | continue; |
2744 | 37.9k | } |
2745 | | |
2746 | 54 | struct loader_string_list entrys = {0}; |
2747 | 54 | result = loader_parse_json_array_of_strings(inst, ext_item, "entrypoints", &entrys); |
2748 | 54 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2749 | 54 | result = loader_add_to_dev_ext_list(inst, &props.device_extension_list, &ext_prop, &entrys); |
2750 | 54 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2751 | 54 | } |
2752 | 1.92k | } |
2753 | 20.7k | if (is_implicit) { |
2754 | 20.7k | cJSON *enable_environment = loader_cJSON_GetObjectItem(layer_node, "enable_environment"); |
2755 | | |
2756 | | // enable_environment is optional |
2757 | 20.7k | if (enable_environment && enable_environment->child && enable_environment->child->type == cJSON_String && |
2758 | 20.7k | enable_environment->child->string && enable_environment->child->valuestring) { |
2759 | 2.11k | result = loader_copy_to_new_str(inst, enable_environment->child->string, &(props.enable_env_var.name)); |
2760 | 2.11k | if (VK_SUCCESS != result) goto out; |
2761 | 2.11k | result = loader_copy_to_new_str(inst, enable_environment->child->valuestring, &(props.enable_env_var.value)); |
2762 | 2.11k | if (VK_SUCCESS != result) goto out; |
2763 | 2.11k | } |
2764 | 20.7k | } |
2765 | | |
2766 | | // Read in the pre-instance stuff |
2767 | 20.7k | cJSON *pre_instance = loader_cJSON_GetObjectItem(layer_node, "pre_instance_functions"); |
2768 | 20.7k | if (NULL != pre_instance) { |
2769 | | // Supported versions started in 1.1.2, so anything newer |
2770 | 1.22k | if (!loader_check_version_meets_required(loader_combine_version(1, 1, 2), version)) { |
2771 | 632 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
2772 | 632 | "Found pre_instance_functions section in layer from \"%s\". This section is only valid in manifest version " |
2773 | 632 | "1.1.2 or later. The section will be ignored", |
2774 | 632 | filename); |
2775 | 632 | } else if (!is_implicit) { |
2776 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2777 | 0 | "Found pre_instance_functions section in explicit layer from \"%s\". This section is only valid in implicit " |
2778 | 0 | "layers. The section will be ignored", |
2779 | 0 | filename); |
2780 | 596 | } else { |
2781 | 596 | result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceExtensionProperties", |
2782 | 596 | &props.pre_instance_functions.enumerate_instance_extension_properties); |
2783 | 596 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2784 | | |
2785 | 596 | result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceLayerProperties", |
2786 | 596 | &props.pre_instance_functions.enumerate_instance_layer_properties); |
2787 | 596 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2788 | | |
2789 | 596 | result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceVersion", |
2790 | 596 | &props.pre_instance_functions.enumerate_instance_version); |
2791 | 596 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2792 | 596 | } |
2793 | 1.22k | } |
2794 | | |
2795 | 20.7k | if (loader_cJSON_GetObjectItem(layer_node, "app_keys")) { |
2796 | 5.39k | if (!props.is_override) { |
2797 | 4.28k | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2798 | 4.28k | "Layer %s contains app_keys, but any app_keys can only be provided by the override meta layer. " |
2799 | 4.28k | "These will be ignored.", |
2800 | 4.28k | props.info.layerName); |
2801 | 4.28k | } |
2802 | | |
2803 | 5.39k | result = loader_parse_json_array_of_strings(inst, layer_node, "app_keys", &props.app_key_paths); |
2804 | 5.39k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2805 | 5.39k | } |
2806 | | |
2807 | 20.7k | char *library_arch = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(layer_node, "library_arch")); |
2808 | 20.7k | if (NULL != library_arch) { |
2809 | 444 | if ((strncmp(library_arch, "32", 2) == 0 && sizeof(void *) != 4) || |
2810 | 444 | (strncmp(library_arch, "64", 2) == 0 && sizeof(void *) != 8)) { |
2811 | 210 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
2812 | 210 | "The library architecture in layer %s doesn't match the current running architecture, skipping this layer", |
2813 | 210 | filename); |
2814 | 210 | result = VK_ERROR_INITIALIZATION_FAILED; |
2815 | 210 | goto out; |
2816 | 210 | } |
2817 | 444 | } |
2818 | | |
2819 | 20.5k | result = VK_SUCCESS; |
2820 | | |
2821 | 1.28M | out: |
2822 | | // Try to append the layer property |
2823 | 1.28M | if (VK_SUCCESS == result) { |
2824 | 1.23M | result = loader_append_layer_property(inst, layer_instance_list, &props); |
2825 | 1.23M | } |
2826 | | // If appending fails - free all the memory allocated in it |
2827 | 1.28M | if (VK_SUCCESS != result) { |
2828 | 53.8k | loader_free_layer_properties(inst, &props); |
2829 | 53.8k | } |
2830 | 1.28M | return result; |
2831 | 20.5k | } |
2832 | | |
2833 | 5.90k | bool is_valid_layer_json_version(const loader_api_version *layer_json) { |
2834 | | // Supported versions are: 1.0.0, 1.0.1, 1.1.0 - 1.1.2, and 1.2.0 - 1.2.1. |
2835 | 5.90k | if ((layer_json->major == 1 && layer_json->minor == 2 && layer_json->patch < 2) || |
2836 | 5.90k | (layer_json->major == 1 && layer_json->minor == 1 && layer_json->patch < 3) || |
2837 | 5.90k | (layer_json->major == 1 && layer_json->minor == 0 && layer_json->patch < 2)) { |
2838 | 511 | return true; |
2839 | 511 | } |
2840 | 5.39k | return false; |
2841 | 5.90k | } |
2842 | | |
2843 | | // Given a cJSON struct (json) of the top level JSON object from layer manifest |
2844 | | // file, add entry to the layer_list. Fill out the layer_properties in this list |
2845 | | // entry from the input cJSON object. |
2846 | | // |
2847 | | // \returns |
2848 | | // void |
2849 | | // layer_list has a new entry and initialized accordingly. |
2850 | | // If the json input object does not have all the required fields no entry |
2851 | | // is added to the list. |
2852 | | VkResult loader_add_layer_properties(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list, cJSON *json, |
2853 | 6.07k | bool is_implicit, char *filename) { |
2854 | | // The following Fields in layer manifest file that are required: |
2855 | | // - "file_format_version" |
2856 | | // - If more than one "layer" object are used, then the "layers" array is |
2857 | | // required |
2858 | 6.07k | VkResult result = VK_ERROR_INITIALIZATION_FAILED; |
2859 | | // Make sure sure the top level json value is an object |
2860 | 6.07k | if (!json || json->type != cJSON_Object) { |
2861 | 162 | goto out; |
2862 | 162 | } |
2863 | 5.91k | char *file_vers = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(json, "file_format_version")); |
2864 | 5.91k | if (NULL == file_vers) { |
2865 | 10 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2866 | 10 | "loader_add_layer_properties: Manifest %s missing required field file_format_version", filename); |
2867 | 10 | goto out; |
2868 | 10 | } |
2869 | | |
2870 | 5.90k | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, "Found manifest file %s (file version %s)", filename, file_vers); |
2871 | | // Get the major/minor/and patch as integers for easier comparison |
2872 | 5.90k | loader_api_version json_version = loader_make_full_version(loader_parse_version_string(file_vers)); |
2873 | | |
2874 | 5.90k | if (!is_valid_layer_json_version(&json_version)) { |
2875 | 5.39k | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2876 | 5.39k | "loader_add_layer_properties: %s has unknown layer manifest file version %d.%d.%d. May cause errors.", filename, |
2877 | 5.39k | json_version.major, json_version.minor, json_version.patch); |
2878 | 5.39k | } |
2879 | | |
2880 | | // If "layers" is present, read in the array of layer objects |
2881 | 5.90k | cJSON *layers_node = loader_cJSON_GetObjectItem(json, "layers"); |
2882 | 5.90k | if (layers_node != NULL) { |
2883 | | // Supported versions started in 1.0.1, so anything newer |
2884 | 5.23k | if (!loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) { |
2885 | 4.01k | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2886 | 4.01k | "loader_add_layer_properties: \'layers\' tag not supported until file version 1.0.1, but %s is reporting " |
2887 | 4.01k | "version %s", |
2888 | 4.01k | filename, file_vers); |
2889 | 4.01k | } |
2890 | 5.23k | cJSON *layer_node = NULL; |
2891 | 1.28M | cJSON_ArrayForEach(layer_node, layers_node) { |
2892 | 1.28M | if (layer_node->type != cJSON_Object) { |
2893 | 249 | loader_log( |
2894 | 249 | inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2895 | 249 | "loader_add_layer_properties: Array element in \"layers\" field in manifest JSON file %s is not an object. " |
2896 | 249 | "Skipping this file", |
2897 | 249 | filename); |
2898 | 249 | goto out; |
2899 | 249 | } |
2900 | 1.28M | result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, is_implicit, filename); |
2901 | 1.28M | } |
2902 | 5.23k | } else { |
2903 | | // Otherwise, try to read in individual layers |
2904 | 668 | cJSON *layer_node = loader_cJSON_GetObjectItem(json, "layer"); |
2905 | 668 | if (layer_node == NULL) { |
2906 | | // Don't warn if this happens to be an ICD manifest |
2907 | 607 | if (loader_cJSON_GetObjectItem(json, "ICD") == NULL) { |
2908 | 605 | loader_log( |
2909 | 605 | inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2910 | 605 | "loader_add_layer_properties: Can not find 'layer' object in manifest JSON file %s. Skipping this file.", |
2911 | 605 | filename); |
2912 | 605 | } |
2913 | 607 | goto out; |
2914 | 607 | } |
2915 | | // Loop through all "layer" objects in the file to get a count of them |
2916 | | // first. |
2917 | 61 | uint16_t layer_count = 0; |
2918 | 61 | cJSON *tempNode = layer_node; |
2919 | 697 | do { |
2920 | 697 | tempNode = tempNode->next; |
2921 | 697 | layer_count++; |
2922 | 697 | } while (tempNode != NULL); |
2923 | | |
2924 | | // Throw a warning if we encounter multiple "layer" objects in file |
2925 | | // versions newer than 1.0.0. Having multiple objects with the same |
2926 | | // name at the same level is actually a JSON standard violation. |
2927 | 61 | if (layer_count > 1 && loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) { |
2928 | 25 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2929 | 25 | "loader_add_layer_properties: Multiple 'layer' nodes are deprecated starting in file version \"1.0.1\". " |
2930 | 25 | "Please use 'layers' : [] array instead in %s.", |
2931 | 25 | filename); |
2932 | 36 | } else { |
2933 | 647 | do { |
2934 | 647 | result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, is_implicit, filename); |
2935 | 647 | layer_node = layer_node->next; |
2936 | 647 | } while (layer_node != NULL); |
2937 | 36 | } |
2938 | 61 | } |
2939 | | |
2940 | 6.07k | out: |
2941 | | |
2942 | 6.07k | return result; |
2943 | 5.90k | } |
2944 | | |
2945 | 161k | size_t determine_data_file_path_size(const char *cur_path, size_t relative_path_size) { |
2946 | 161k | size_t path_size = 0; |
2947 | | |
2948 | 161k | if (NULL != cur_path) { |
2949 | | // For each folder in cur_path, (detected by finding additional |
2950 | | // path separators in the string) we need to add the relative path on |
2951 | | // the end. Plus, leave an additional two slots on the end to add an |
2952 | | // additional directory slash and path separator if needed |
2953 | 161k | path_size += strlen(cur_path) + relative_path_size + 2; |
2954 | 31.0M | for (const char *x = cur_path; *x; ++x) { |
2955 | 30.9M | if (*x == PATH_SEPARATOR) { |
2956 | 16.4k | path_size += relative_path_size + 2; |
2957 | 16.4k | } |
2958 | 30.9M | } |
2959 | 161k | } |
2960 | | |
2961 | 161k | return path_size; |
2962 | 161k | } |
2963 | | |
2964 | 161k | void copy_data_file_info(const char *cur_path, const char *relative_path, size_t relative_path_size, char **output_path) { |
2965 | 161k | if (NULL != cur_path) { |
2966 | 161k | uint32_t start = 0; |
2967 | 161k | uint32_t stop = 0; |
2968 | 161k | char *cur_write = *output_path; |
2969 | | |
2970 | 334k | while (cur_path[start] != '\0') { |
2971 | 189k | while (cur_path[start] == PATH_SEPARATOR) { |
2972 | 16.4k | start++; |
2973 | 16.4k | } |
2974 | 173k | stop = start; |
2975 | 31.0M | while (cur_path[stop] != PATH_SEPARATOR && cur_path[stop] != '\0') { |
2976 | 30.8M | stop++; |
2977 | 30.8M | } |
2978 | 173k | const size_t s = stop - start; |
2979 | 173k | if (s) { |
2980 | 172k | memcpy(cur_write, &cur_path[start], s); |
2981 | 172k | cur_write += s; |
2982 | | |
2983 | | // If this is a specific JSON file, just add it and don't add any |
2984 | | // relative path or directory symbol to it. |
2985 | 172k | if (!is_json(cur_write - 5, s)) { |
2986 | | // Add the relative directory if present. |
2987 | 171k | if (relative_path_size > 0) { |
2988 | | // If last symbol written was not a directory symbol, add it. |
2989 | 101k | if (*(cur_write - 1) != DIRECTORY_SYMBOL) { |
2990 | 101k | *cur_write++ = DIRECTORY_SYMBOL; |
2991 | 101k | } |
2992 | 101k | memcpy(cur_write, relative_path, relative_path_size); |
2993 | 101k | cur_write += relative_path_size; |
2994 | 101k | } |
2995 | 171k | } |
2996 | | |
2997 | 172k | *cur_write++ = PATH_SEPARATOR; |
2998 | 172k | start = stop; |
2999 | 172k | } |
3000 | 173k | } |
3001 | 161k | *output_path = cur_write; |
3002 | 161k | } |
3003 | 161k | } |
3004 | | |
3005 | | // If the file found is a manifest file name, add it to the out_files manifest list. |
3006 | 26.5k | VkResult add_if_manifest_file(const struct loader_instance *inst, const char *file_name, struct loader_string_list *out_files) { |
3007 | 26.5k | VkResult vk_result = VK_SUCCESS; |
3008 | | |
3009 | 26.5k | assert(NULL != file_name && "add_if_manifest_file: Received NULL pointer for file_name"); |
3010 | 26.5k | assert(NULL != out_files && "add_if_manifest_file: Received NULL pointer for out_files"); |
3011 | | |
3012 | | // Look for files ending with ".json" suffix |
3013 | 26.5k | size_t name_len = strlen(file_name); |
3014 | 26.5k | const char *name_suffix = file_name + name_len - 5; |
3015 | 26.5k | if (!is_json(name_suffix, name_len)) { |
3016 | | // Use incomplete to indicate invalid name, but to keep going. |
3017 | 18.5k | vk_result = VK_INCOMPLETE; |
3018 | 18.5k | goto out; |
3019 | 18.5k | } |
3020 | | |
3021 | 8.00k | vk_result = copy_str_to_string_list(inst, out_files, file_name, name_len); |
3022 | | |
3023 | 26.5k | out: |
3024 | | |
3025 | 26.5k | return vk_result; |
3026 | 8.00k | } |
3027 | | |
3028 | | // Add any files found in the search_path. If any path in the search path points to a specific JSON, attempt to |
3029 | | // only open that one JSON. Otherwise, if the path is a folder, search the folder for JSON files. |
3030 | | VkResult add_data_files(const struct loader_instance *inst, char *search_path, struct loader_string_list *out_files, |
3031 | 14.5k | bool use_first_found_manifest) { |
3032 | 14.5k | VkResult vk_result = VK_SUCCESS; |
3033 | 14.5k | char full_path[2048]; |
3034 | 14.5k | #if !defined(_WIN32) |
3035 | 14.5k | char temp_path[2048]; |
3036 | 14.5k | #endif |
3037 | | |
3038 | | // Now, parse the paths |
3039 | 14.5k | char *next_file = search_path; |
3040 | 118k | while (NULL != next_file && *next_file != '\0') { |
3041 | 103k | char *name = NULL; |
3042 | 103k | char *cur_file = next_file; |
3043 | 103k | next_file = loader_get_next_path(cur_file); |
3044 | | |
3045 | | // Is this a JSON file, then try to open it. |
3046 | 103k | size_t len = strlen(cur_file); |
3047 | 103k | if (is_json(cur_file + len - 5, len)) { |
3048 | | #if defined(_WIN32) |
3049 | | name = cur_file; |
3050 | | #elif COMMON_UNIX_PLATFORMS |
3051 | | // Only Linux has relative paths, make a copy of location so it isn't modified |
3052 | 536 | size_t str_len; |
3053 | 536 | if (NULL != next_file) { |
3054 | 536 | str_len = next_file - cur_file + 1; |
3055 | 536 | } else { |
3056 | 0 | str_len = strlen(cur_file) + 1; |
3057 | 0 | } |
3058 | 536 | if (str_len > sizeof(temp_path)) { |
3059 | 99 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "add_data_files: Path to %s too long", cur_file); |
3060 | 99 | continue; |
3061 | 99 | } |
3062 | 437 | strncpy(temp_path, cur_file, str_len); |
3063 | 437 | name = temp_path; |
3064 | | #else |
3065 | | #warning add_data_files must define relative path copy for this platform |
3066 | | #endif |
3067 | 437 | loader_get_fullpath(cur_file, name, sizeof(full_path), full_path); |
3068 | 437 | name = full_path; |
3069 | | |
3070 | 437 | VkResult local_res; |
3071 | 437 | local_res = add_if_manifest_file(inst, name, out_files); |
3072 | | |
3073 | | // Incomplete means this was not a valid data file. |
3074 | 437 | if (local_res == VK_INCOMPLETE) { |
3075 | 0 | continue; |
3076 | 437 | } else if (local_res != VK_SUCCESS) { |
3077 | 0 | vk_result = local_res; |
3078 | 0 | break; |
3079 | 0 | } |
3080 | 103k | } else { // Otherwise, treat it as a directory |
3081 | 103k | DIR *dir_stream = loader_opendir(inst, cur_file); |
3082 | 103k | if (NULL == dir_stream) { |
3083 | 95.5k | continue; |
3084 | 95.5k | } |
3085 | 33.9k | while (1) { |
3086 | 33.9k | errno = 0; |
3087 | 33.9k | struct dirent *dir_entry = readdir(dir_stream); |
3088 | 33.9k | #if !defined(WIN32) // Windows doesn't use readdir, don't check errors on functions which aren't called |
3089 | 33.9k | if (errno != 0) { |
3090 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "readdir failed with %d: %s", errno, strerror(errno)); |
3091 | 0 | break; |
3092 | 0 | } |
3093 | 33.9k | #endif |
3094 | 33.9k | if (NULL == dir_entry) { |
3095 | 7.81k | break; |
3096 | 7.81k | } |
3097 | | |
3098 | 26.1k | name = &(dir_entry->d_name[0]); |
3099 | 26.1k | loader_get_fullpath(name, cur_file, sizeof(full_path), full_path); |
3100 | 26.1k | name = full_path; |
3101 | | |
3102 | 26.1k | VkResult local_res; |
3103 | 26.1k | local_res = add_if_manifest_file(inst, name, out_files); |
3104 | | |
3105 | | // Incomplete means this was not a valid data file. |
3106 | 26.1k | if (local_res == VK_INCOMPLETE) { |
3107 | 18.5k | continue; |
3108 | 18.5k | } else if (local_res != VK_SUCCESS) { |
3109 | 0 | vk_result = local_res; |
3110 | 0 | break; |
3111 | 0 | } |
3112 | 26.1k | } |
3113 | 7.81k | loader_closedir(inst, dir_stream); |
3114 | 7.81k | if (vk_result != VK_SUCCESS) { |
3115 | 0 | goto out; |
3116 | 0 | } |
3117 | 7.81k | } |
3118 | 8.25k | if (use_first_found_manifest && out_files->count > 0) { |
3119 | 0 | break; |
3120 | 0 | } |
3121 | 8.25k | } |
3122 | | |
3123 | 14.5k | out: |
3124 | | |
3125 | 14.5k | return vk_result; |
3126 | 14.5k | } |
3127 | | |
3128 | | // Look for data files in the provided paths, but first check the environment override to determine if we should use that |
3129 | | // instead. |
3130 | | VkResult read_data_files_in_search_paths(const struct loader_instance *inst, enum loader_data_files_type manifest_type, |
3131 | 14.5k | const char *path_override, bool *override_active, struct loader_string_list *out_files) { |
3132 | 14.5k | VkResult vk_result = VK_SUCCESS; |
3133 | 14.5k | char *override_env = NULL; |
3134 | 14.5k | const char *override_path = NULL; |
3135 | 14.5k | char *additional_env = NULL; |
3136 | 14.5k | size_t search_path_size = 0; |
3137 | 14.5k | char *search_path = NULL; |
3138 | 14.5k | char *cur_path_ptr = NULL; |
3139 | 14.5k | bool use_first_found_manifest = false; |
3140 | 14.5k | #if COMMON_UNIX_PLATFORMS |
3141 | 14.5k | const char *relative_location = NULL; // Only used on unix platforms |
3142 | 14.5k | size_t rel_size = 0; // unused in windows, dont declare so no compiler warnings are generated |
3143 | 14.5k | #endif |
3144 | | |
3145 | | #if defined(_WIN32) |
3146 | | char *package_path = NULL; |
3147 | | #elif COMMON_UNIX_PLATFORMS |
3148 | | // Determine how much space is needed to generate the full search path |
3149 | | // for the current manifest files. |
3150 | 14.5k | char *xdg_config_home = loader_secure_getenv("XDG_CONFIG_HOME", inst); |
3151 | 14.5k | char *xdg_config_dirs = loader_secure_getenv("XDG_CONFIG_DIRS", inst); |
3152 | | |
3153 | 14.5k | #if !defined(__Fuchsia__) && !defined(__QNX__) |
3154 | 14.5k | if (NULL == xdg_config_dirs || '\0' == xdg_config_dirs[0]) { |
3155 | 14.5k | xdg_config_dirs = FALLBACK_CONFIG_DIRS; |
3156 | 14.5k | } |
3157 | 14.5k | #endif |
3158 | | |
3159 | 14.5k | char *xdg_data_home = loader_secure_getenv("XDG_DATA_HOME", inst); |
3160 | 14.5k | char *xdg_data_dirs = loader_secure_getenv("XDG_DATA_DIRS", inst); |
3161 | | |
3162 | 14.5k | #if !defined(__Fuchsia__) && !defined(__QNX__) |
3163 | 14.5k | if (NULL == xdg_data_dirs || '\0' == xdg_data_dirs[0]) { |
3164 | 14.5k | xdg_data_dirs = FALLBACK_DATA_DIRS; |
3165 | 14.5k | } |
3166 | 14.5k | #endif |
3167 | | |
3168 | 14.5k | char *home = NULL; |
3169 | 14.5k | char *default_data_home = NULL; |
3170 | 14.5k | char *default_config_home = NULL; |
3171 | 14.5k | char *home_data_dir = NULL; |
3172 | 14.5k | char *home_config_dir = NULL; |
3173 | | |
3174 | | // Only use HOME if XDG_DATA_HOME is not present on the system |
3175 | 14.5k | home = loader_secure_getenv("HOME", inst); |
3176 | 14.5k | if (home != NULL) { |
3177 | 14.5k | if (NULL == xdg_config_home || '\0' == xdg_config_home[0]) { |
3178 | 14.5k | const char config_suffix[] = "/.config"; |
3179 | 14.5k | size_t default_config_home_len = strlen(home) + sizeof(config_suffix) + 1; |
3180 | 14.5k | default_config_home = loader_instance_heap_calloc(inst, default_config_home_len, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
3181 | 14.5k | if (default_config_home == NULL) { |
3182 | 0 | vk_result = VK_ERROR_OUT_OF_HOST_MEMORY; |
3183 | 0 | goto out; |
3184 | 0 | } |
3185 | 14.5k | strncpy(default_config_home, home, default_config_home_len); |
3186 | 14.5k | strncat(default_config_home, config_suffix, default_config_home_len); |
3187 | 14.5k | } |
3188 | 14.5k | if (NULL == xdg_data_home || '\0' == xdg_data_home[0]) { |
3189 | 14.5k | const char data_suffix[] = "/.local/share"; |
3190 | 14.5k | size_t default_data_home_len = strlen(home) + sizeof(data_suffix) + 1; |
3191 | 14.5k | default_data_home = loader_instance_heap_calloc(inst, default_data_home_len, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
3192 | 14.5k | if (default_data_home == NULL) { |
3193 | 0 | vk_result = VK_ERROR_OUT_OF_HOST_MEMORY; |
3194 | 0 | goto out; |
3195 | 0 | } |
3196 | 14.5k | strncpy(default_data_home, home, default_data_home_len); |
3197 | 14.5k | strncat(default_data_home, data_suffix, default_data_home_len); |
3198 | 14.5k | } |
3199 | 14.5k | } |
3200 | | |
3201 | 14.5k | if (NULL != default_config_home) { |
3202 | 14.5k | home_config_dir = default_config_home; |
3203 | 14.5k | } else { |
3204 | 0 | home_config_dir = xdg_config_home; |
3205 | 0 | } |
3206 | 14.5k | if (NULL != default_data_home) { |
3207 | 14.5k | home_data_dir = default_data_home; |
3208 | 14.5k | } else { |
3209 | 0 | home_data_dir = xdg_data_home; |
3210 | 0 | } |
3211 | | #else |
3212 | | #warning read_data_files_in_search_paths unsupported platform |
3213 | | #endif |
3214 | | |
3215 | 14.5k | switch (manifest_type) { |
3216 | 980 | case LOADER_DATA_FILE_MANIFEST_DRIVER: |
3217 | 980 | override_env = loader_secure_getenv(VK_DRIVER_FILES_ENV_VAR, inst); |
3218 | 980 | if (NULL == override_env) { |
3219 | | // Not there, so fall back to the old name |
3220 | 980 | override_env = loader_secure_getenv(VK_ICD_FILENAMES_ENV_VAR, inst); |
3221 | 980 | } |
3222 | 980 | additional_env = loader_secure_getenv(VK_ADDITIONAL_DRIVER_FILES_ENV_VAR, inst); |
3223 | 980 | #if COMMON_UNIX_PLATFORMS |
3224 | 980 | relative_location = VK_DRIVERS_INFO_RELATIVE_DIR; |
3225 | 980 | #endif |
3226 | | #if defined(_WIN32) |
3227 | | package_path = windows_get_app_package_manifest_path(inst); |
3228 | | #endif |
3229 | 980 | break; |
3230 | 6.78k | case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER: |
3231 | 6.78k | override_env = loader_secure_getenv(VK_IMPLICIT_LAYER_PATH_ENV_VAR, inst); |
3232 | 6.78k | additional_env = loader_secure_getenv(VK_ADDITIONAL_IMPLICIT_LAYER_PATH_ENV_VAR, inst); |
3233 | 6.78k | #if COMMON_UNIX_PLATFORMS |
3234 | 6.78k | relative_location = VK_ILAYERS_INFO_RELATIVE_DIR; |
3235 | 6.78k | #endif |
3236 | | #if defined(_WIN32) |
3237 | | package_path = windows_get_app_package_manifest_path(inst); |
3238 | | #endif |
3239 | 6.78k | break; |
3240 | 6.78k | case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER: |
3241 | 6.78k | override_env = loader_secure_getenv(VK_EXPLICIT_LAYER_PATH_ENV_VAR, inst); |
3242 | 6.78k | additional_env = loader_secure_getenv(VK_ADDITIONAL_EXPLICIT_LAYER_PATH_ENV_VAR, inst); |
3243 | 6.78k | #if COMMON_UNIX_PLATFORMS |
3244 | 6.78k | relative_location = VK_ELAYERS_INFO_RELATIVE_DIR; |
3245 | 6.78k | #endif |
3246 | 6.78k | break; |
3247 | 0 | default: |
3248 | 0 | assert(false && "Shouldn't get here!"); |
3249 | 0 | break; |
3250 | 14.5k | } |
3251 | | |
3252 | | // Log a message when VK_LAYER_PATH is set but the override layer paths take priority |
3253 | 14.5k | if (manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER && NULL != override_env && NULL != path_override) { |
3254 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
3255 | 0 | "Ignoring VK_LAYER_PATH. The Override layer is active and has override paths set, which takes priority. " |
3256 | 0 | "VK_LAYER_PATH is set to %s", |
3257 | 0 | override_env); |
3258 | 0 | } |
3259 | | |
3260 | 14.5k | if (path_override != NULL) { |
3261 | 64 | override_path = path_override; |
3262 | 14.4k | } else if (override_env != NULL) { |
3263 | 0 | override_path = override_env; |
3264 | 0 | } |
3265 | | |
3266 | | // Add two by default for NULL terminator and one path separator on end (just in case) |
3267 | 14.5k | search_path_size = 2; |
3268 | | |
3269 | | // If there's an override, use that (and the local folder if required) and nothing else |
3270 | 14.5k | if (NULL != override_path) { |
3271 | | // Local folder and null terminator |
3272 | 64 | search_path_size += strlen(override_path) + 2; |
3273 | 14.4k | } else { |
3274 | | // Add the size of any additional search paths defined in the additive environment variable |
3275 | 14.4k | if (NULL != additional_env) { |
3276 | 0 | search_path_size += determine_data_file_path_size(additional_env, 0) + 2; |
3277 | | #if defined(_WIN32) |
3278 | | } |
3279 | | if (NULL != package_path) { |
3280 | | search_path_size += determine_data_file_path_size(package_path, 0) + 2; |
3281 | | } |
3282 | | if (search_path_size == 2) { |
3283 | | goto out; |
3284 | | } |
3285 | | #elif COMMON_UNIX_PLATFORMS |
3286 | | } |
3287 | | |
3288 | | // Add the general search folders (with the appropriate relative folder added) |
3289 | 14.4k | rel_size = strlen(relative_location); |
3290 | 14.4k | if (rel_size > 0) { |
3291 | | #if defined(__APPLE__) |
3292 | | search_path_size += MAXPATHLEN; |
3293 | | #endif |
3294 | | // Only add the home folders if defined |
3295 | 14.4k | if (NULL != home_config_dir) { |
3296 | 14.4k | search_path_size += determine_data_file_path_size(home_config_dir, rel_size); |
3297 | 14.4k | } |
3298 | 14.4k | search_path_size += determine_data_file_path_size(xdg_config_dirs, rel_size); |
3299 | 14.4k | search_path_size += determine_data_file_path_size(SYSCONFDIR, rel_size); |
3300 | 14.4k | #if defined(EXTRASYSCONFDIR) |
3301 | 14.4k | search_path_size += determine_data_file_path_size(EXTRASYSCONFDIR, rel_size); |
3302 | 14.4k | #endif |
3303 | | // Only add the home folders if defined |
3304 | 14.4k | if (NULL != home_data_dir) { |
3305 | 14.4k | search_path_size += determine_data_file_path_size(home_data_dir, rel_size); |
3306 | 14.4k | } |
3307 | 14.4k | search_path_size += determine_data_file_path_size(xdg_data_dirs, rel_size); |
3308 | 14.4k | } |
3309 | | #else |
3310 | | #warning read_data_files_in_search_paths unsupported platform |
3311 | | #endif |
3312 | 14.4k | } |
3313 | | |
3314 | | // Allocate the required space |
3315 | 14.5k | search_path = loader_instance_heap_calloc(inst, search_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
3316 | 14.5k | if (NULL == search_path) { |
3317 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
3318 | 0 | "read_data_files_in_search_paths: Failed to allocate space for search path of length %d", |
3319 | 0 | (uint32_t)search_path_size); |
3320 | 0 | vk_result = VK_ERROR_OUT_OF_HOST_MEMORY; |
3321 | 0 | goto out; |
3322 | 0 | } |
3323 | | |
3324 | 14.5k | cur_path_ptr = search_path; |
3325 | | |
3326 | | // Add the remaining paths to the list |
3327 | 14.5k | if (NULL != override_path) { |
3328 | 64 | size_t override_path_len = strlen(override_path); |
3329 | 64 | loader_strncpy(cur_path_ptr, search_path_size, override_path, override_path_len); |
3330 | 64 | cur_path_ptr += override_path_len; |
3331 | 14.4k | } else { |
3332 | | // Add any additional search paths defined in the additive environment variable |
3333 | 14.4k | if (NULL != additional_env) { |
3334 | 0 | copy_data_file_info(additional_env, NULL, 0, &cur_path_ptr); |
3335 | 0 | } |
3336 | | |
3337 | | #if defined(_WIN32) |
3338 | | if (NULL != package_path) { |
3339 | | copy_data_file_info(package_path, NULL, 0, &cur_path_ptr); |
3340 | | } |
3341 | | #elif COMMON_UNIX_PLATFORMS |
3342 | 14.4k | if (rel_size > 0) { |
3343 | | #if defined(__APPLE__) |
3344 | | // Add the bundle's Resources dir to the beginning of the search path. |
3345 | | // Looks for manifests in the bundle first, before any system directories. |
3346 | | // This also appears to work unmodified for iOS, it finds the app bundle on the devices |
3347 | | // file system. (RSW) |
3348 | | CFBundleRef main_bundle = CFBundleGetMainBundle(); |
3349 | | if (NULL != main_bundle) { |
3350 | | CFURLRef ref = CFBundleCopyResourcesDirectoryURL(main_bundle); |
3351 | | if (NULL != ref) { |
3352 | | if (CFURLGetFileSystemRepresentation(ref, TRUE, (UInt8 *)cur_path_ptr, search_path_size)) { |
3353 | | cur_path_ptr += strlen(cur_path_ptr); |
3354 | | *cur_path_ptr++ = DIRECTORY_SYMBOL; |
3355 | | memcpy(cur_path_ptr, relative_location, rel_size); |
3356 | | cur_path_ptr += rel_size; |
3357 | | *cur_path_ptr++ = PATH_SEPARATOR; |
3358 | | if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) { |
3359 | | use_first_found_manifest = true; |
3360 | | } |
3361 | | } |
3362 | | CFRelease(ref); |
3363 | | } |
3364 | | } |
3365 | | #endif // __APPLE__ |
3366 | | |
3367 | | // Only add the home folders if not NULL |
3368 | 14.4k | if (NULL != home_config_dir) { |
3369 | 14.4k | copy_data_file_info(home_config_dir, relative_location, rel_size, &cur_path_ptr); |
3370 | 14.4k | } |
3371 | 14.4k | copy_data_file_info(xdg_config_dirs, relative_location, rel_size, &cur_path_ptr); |
3372 | 14.4k | copy_data_file_info(SYSCONFDIR, relative_location, rel_size, &cur_path_ptr); |
3373 | 14.4k | #if defined(EXTRASYSCONFDIR) |
3374 | 14.4k | copy_data_file_info(EXTRASYSCONFDIR, relative_location, rel_size, &cur_path_ptr); |
3375 | 14.4k | #endif |
3376 | | |
3377 | | // Only add the home folders if not NULL |
3378 | 14.4k | if (NULL != home_data_dir) { |
3379 | 14.4k | copy_data_file_info(home_data_dir, relative_location, rel_size, &cur_path_ptr); |
3380 | 14.4k | } |
3381 | 14.4k | copy_data_file_info(xdg_data_dirs, relative_location, rel_size, &cur_path_ptr); |
3382 | 14.4k | } |
3383 | | |
3384 | | // Remove the last path separator |
3385 | 14.4k | --cur_path_ptr; |
3386 | | |
3387 | 14.4k | assert(cur_path_ptr - search_path < (ptrdiff_t)search_path_size); |
3388 | 14.4k | *cur_path_ptr = '\0'; |
3389 | | #else |
3390 | | #warning read_data_files_in_search_paths unsupported platform |
3391 | | #endif |
3392 | 14.4k | } |
3393 | | |
3394 | | // Remove duplicate paths, or it would result in duplicate extensions, duplicate devices, etc. |
3395 | | // This uses minimal memory, but is O(N^2) on the number of paths. Expect only a few paths. |
3396 | 14.5k | char path_sep_str[2] = {PATH_SEPARATOR, '\0'}; |
3397 | 14.5k | size_t search_path_updated_size = strlen(search_path); |
3398 | 118k | for (size_t first = 0; first < search_path_updated_size;) { |
3399 | | // If this is an empty path, erase it |
3400 | 103k | if (search_path[first] == PATH_SEPARATOR) { |
3401 | 0 | memmove(&search_path[first], &search_path[first + 1], search_path_updated_size - first + 1); |
3402 | 0 | search_path_updated_size -= 1; |
3403 | 0 | continue; |
3404 | 0 | } |
3405 | | |
3406 | 103k | size_t first_end = first + 1; |
3407 | 103k | first_end += strcspn(&search_path[first_end], path_sep_str); |
3408 | 1.79M | for (size_t second = first_end + 1; second < search_path_updated_size;) { |
3409 | 1.68M | size_t second_end = second + 1; |
3410 | 1.68M | second_end += strcspn(&search_path[second_end], path_sep_str); |
3411 | 1.68M | if (first_end - first == second_end - second && |
3412 | 1.68M | !strncmp(&search_path[first], &search_path[second], second_end - second)) { |
3413 | | // Found duplicate. Include PATH_SEPARATOR in second_end, then erase it from search_path. |
3414 | 68.7k | if (search_path[second_end] == PATH_SEPARATOR) { |
3415 | 68.7k | second_end++; |
3416 | 68.7k | } |
3417 | 68.7k | memmove(&search_path[second], &search_path[second_end], search_path_updated_size - second_end + 1); |
3418 | 68.7k | search_path_updated_size -= second_end - second; |
3419 | 1.61M | } else { |
3420 | 1.61M | second = second_end + 1; |
3421 | 1.61M | } |
3422 | 1.68M | } |
3423 | 103k | first = first_end + 1; |
3424 | 103k | } |
3425 | 14.5k | search_path_size = search_path_updated_size; |
3426 | | |
3427 | | // Print out the paths being searched if debugging is enabled |
3428 | 14.5k | uint32_t log_flags = 0; |
3429 | 14.5k | if (search_path_size > 0) { |
3430 | 14.5k | char *tmp_search_path = loader_instance_heap_alloc(inst, search_path_size + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
3431 | 14.5k | if (NULL != tmp_search_path) { |
3432 | 14.5k | loader_strncpy(tmp_search_path, search_path_size + 1, search_path, search_path_size); |
3433 | 14.5k | tmp_search_path[search_path_size] = '\0'; |
3434 | 14.5k | if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) { |
3435 | 980 | log_flags = VULKAN_LOADER_DRIVER_BIT; |
3436 | 980 | loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Searching for driver manifest files"); |
3437 | 13.5k | } else { |
3438 | 13.5k | log_flags = VULKAN_LOADER_LAYER_BIT; |
3439 | 13.5k | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "Searching for %s layer manifest files", |
3440 | 13.5k | manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER ? "explicit" : "implicit"); |
3441 | 13.5k | } |
3442 | 14.5k | loader_log(inst, log_flags, 0, " In following locations:"); |
3443 | 14.5k | char *cur_file; |
3444 | 14.5k | char *next_file = tmp_search_path; |
3445 | 118k | while (NULL != next_file && *next_file != '\0') { |
3446 | 103k | cur_file = next_file; |
3447 | 103k | next_file = loader_get_next_path(cur_file); |
3448 | 103k | loader_log(inst, log_flags, 0, " %s", cur_file); |
3449 | 103k | } |
3450 | 14.5k | loader_instance_heap_free(inst, tmp_search_path); |
3451 | 14.5k | } |
3452 | 14.5k | } |
3453 | | |
3454 | | // Now, parse the paths and add any manifest files found in them. |
3455 | 14.5k | vk_result = add_data_files(inst, search_path, out_files, use_first_found_manifest); |
3456 | | |
3457 | 14.5k | if (log_flags != 0 && out_files->count > 0) { |
3458 | 7.59k | loader_log(inst, log_flags, 0, " Found the following files:"); |
3459 | 15.5k | for (uint32_t cur_file = 0; cur_file < out_files->count; ++cur_file) { |
3460 | 8.00k | loader_log(inst, log_flags, 0, " %s", out_files->list[cur_file]); |
3461 | 8.00k | } |
3462 | 7.59k | } else { |
3463 | 6.95k | loader_log(inst, log_flags, 0, " Found no files"); |
3464 | 6.95k | } |
3465 | | |
3466 | 14.5k | if (NULL != override_path) { |
3467 | 64 | *override_active = true; |
3468 | 14.4k | } else { |
3469 | 14.4k | *override_active = false; |
3470 | 14.4k | } |
3471 | | |
3472 | 14.5k | out: |
3473 | | |
3474 | 14.5k | loader_free_getenv(additional_env, inst); |
3475 | 14.5k | loader_free_getenv(override_env, inst); |
3476 | | #if defined(_WIN32) |
3477 | | loader_instance_heap_free(inst, package_path); |
3478 | | #elif COMMON_UNIX_PLATFORMS |
3479 | | loader_free_getenv(xdg_config_home, inst); |
3480 | 14.5k | loader_free_getenv(xdg_config_dirs, inst); |
3481 | 14.5k | loader_free_getenv(xdg_data_home, inst); |
3482 | 14.5k | loader_free_getenv(xdg_data_dirs, inst); |
3483 | 14.5k | loader_free_getenv(xdg_data_home, inst); |
3484 | 14.5k | loader_free_getenv(home, inst); |
3485 | 14.5k | loader_instance_heap_free(inst, default_data_home); |
3486 | 14.5k | loader_instance_heap_free(inst, default_config_home); |
3487 | | #else |
3488 | | #warning read_data_files_in_search_paths unsupported platform |
3489 | | #endif |
3490 | | |
3491 | 14.5k | loader_instance_heap_free(inst, search_path); |
3492 | | |
3493 | 14.5k | return vk_result; |
3494 | 14.5k | } |
3495 | | |
3496 | | // Find the Vulkan library manifest files. |
3497 | | // |
3498 | | // This function scans the appropriate locations for a list of JSON manifest files based on the |
3499 | | // "manifest_type". The location is interpreted as Registry path on Windows and a directory path(s) |
3500 | | // on Linux. |
3501 | | // "home_location" is an additional directory in the users home directory to look at. It is |
3502 | | // expanded into the dir path $XDG_DATA_HOME/home_location or $HOME/.local/share/home_location |
3503 | | // depending on environment variables. This "home_location" is only used on Linux. |
3504 | | // |
3505 | | // \returns |
3506 | | // VKResult |
3507 | | // A string list of manifest files to be opened in out_files param. |
3508 | | // List has a pointer to string for each manifest filename. |
3509 | | // When done using the list in out_files, pointers should be freed. |
3510 | | // Location or override string lists can be either files or directories as |
3511 | | // follows: |
3512 | | // | location | override |
3513 | | // -------------------------------- |
3514 | | // Win ICD | files | files |
3515 | | // Win Layer | files | dirs |
3516 | | // Linux ICD | dirs | files |
3517 | | // Linux Layer| dirs | dirs |
3518 | | |
3519 | | VkResult loader_get_data_files(const struct loader_instance *inst, enum loader_data_files_type manifest_type, |
3520 | 14.5k | const char *path_override, struct loader_string_list *out_files) { |
3521 | 14.5k | VkResult res = VK_SUCCESS; |
3522 | 14.5k | bool override_active = false; |
3523 | | |
3524 | | // Free and init the out_files information so there's no false data left from uninitialized variables. |
3525 | 14.5k | free_string_list(inst, out_files); |
3526 | | |
3527 | 14.5k | res = read_data_files_in_search_paths(inst, manifest_type, path_override, &override_active, out_files); |
3528 | 14.5k | if (VK_SUCCESS != res) { |
3529 | 0 | goto out; |
3530 | 0 | } |
3531 | | |
3532 | | #if defined(_WIN32) |
3533 | | // Read the registry if the override wasn't active. |
3534 | | if (!override_active) { |
3535 | | bool warn_if_not_present = false; |
3536 | | char *registry_location = NULL; |
3537 | | |
3538 | | switch (manifest_type) { |
3539 | | default: |
3540 | | goto out; |
3541 | | case LOADER_DATA_FILE_MANIFEST_DRIVER: |
3542 | | warn_if_not_present = true; |
3543 | | registry_location = VK_DRIVERS_INFO_REGISTRY_LOC; |
3544 | | break; |
3545 | | case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER: |
3546 | | registry_location = VK_ILAYERS_INFO_REGISTRY_LOC; |
3547 | | break; |
3548 | | case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER: |
3549 | | warn_if_not_present = true; |
3550 | | registry_location = VK_ELAYERS_INFO_REGISTRY_LOC; |
3551 | | break; |
3552 | | } |
3553 | | VkResult tmp_res = |
3554 | | windows_read_data_files_in_registry(inst, manifest_type, warn_if_not_present, registry_location, out_files); |
3555 | | // Only return an error if there was an error this time, and no manifest files from before. |
3556 | | if (VK_SUCCESS != tmp_res && out_files->count == 0) { |
3557 | | res = tmp_res; |
3558 | | goto out; |
3559 | | } |
3560 | | } |
3561 | | #endif |
3562 | | |
3563 | 14.5k | out: |
3564 | | |
3565 | 14.5k | if (VK_SUCCESS != res) { |
3566 | 0 | free_string_list(inst, out_files); |
3567 | 0 | } |
3568 | | |
3569 | 14.5k | return res; |
3570 | 14.5k | } |
3571 | | |
3572 | | struct ICDManifestInfo { |
3573 | | char *full_library_path; |
3574 | | uint32_t version; |
3575 | | }; |
3576 | | |
3577 | | // Takes a json file, opens, reads, and parses an ICD Manifest out of it. |
3578 | | // Should only return VK_SUCCESS, VK_ERROR_INCOMPATIBLE_DRIVER, or VK_ERROR_OUT_OF_HOST_MEMORY |
3579 | | VkResult loader_parse_icd_manifest(const struct loader_instance *inst, char *file_str, struct ICDManifestInfo *icd, |
3580 | 980 | bool *skipped_portability_drivers) { |
3581 | 980 | VkResult res = VK_SUCCESS; |
3582 | 980 | cJSON *icd_manifest_json = NULL; |
3583 | | |
3584 | 980 | if (file_str == NULL) { |
3585 | 0 | goto out; |
3586 | 0 | } |
3587 | | |
3588 | 980 | res = loader_get_json(inst, file_str, &icd_manifest_json); |
3589 | 980 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
3590 | 0 | goto out; |
3591 | 0 | } |
3592 | 980 | if (res != VK_SUCCESS || NULL == icd_manifest_json) { |
3593 | 8 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3594 | 8 | goto out; |
3595 | 8 | } |
3596 | | |
3597 | 972 | cJSON *file_format_version_json = loader_cJSON_GetObjectItem(icd_manifest_json, "file_format_version"); |
3598 | 972 | if (file_format_version_json == NULL) { |
3599 | 6 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3600 | 6 | "loader_parse_icd_manifest: ICD JSON %s does not have a \'file_format_version\' field. Skipping ICD JSON.", |
3601 | 6 | file_str); |
3602 | 6 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3603 | 6 | goto out; |
3604 | 6 | } |
3605 | | |
3606 | 966 | char *file_vers_str = loader_cJSON_GetStringValue(file_format_version_json); |
3607 | 966 | if (NULL == file_vers_str) { |
3608 | | // Only reason the print can fail is if there was an allocation issue |
3609 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3610 | 0 | "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'file_format_version\' field. Skipping ICD JSON", |
3611 | 0 | file_str); |
3612 | 0 | goto out; |
3613 | 0 | } |
3614 | 966 | loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Found ICD manifest file %s, version %s", file_str, file_vers_str); |
3615 | | |
3616 | | // Get the version of the driver manifest |
3617 | 966 | loader_api_version json_file_version = loader_make_full_version(loader_parse_version_string(file_vers_str)); |
3618 | | |
3619 | | // Loader only knows versions 1.0.0 and 1.0.1, anything above it is unknown |
3620 | 966 | if (loader_check_version_meets_required(loader_combine_version(1, 0, 2), json_file_version)) { |
3621 | 116 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3622 | 116 | "loader_parse_icd_manifest: %s has unknown icd manifest file version %d.%d.%d. May cause errors.", file_str, |
3623 | 116 | json_file_version.major, json_file_version.minor, json_file_version.patch); |
3624 | 116 | } |
3625 | | |
3626 | 966 | cJSON *itemICD = loader_cJSON_GetObjectItem(icd_manifest_json, "ICD"); |
3627 | 966 | if (itemICD == NULL) { |
3628 | | // Don't warn if this happens to be a layer manifest file |
3629 | 104 | if (loader_cJSON_GetObjectItem(icd_manifest_json, "layer") == NULL && |
3630 | 104 | loader_cJSON_GetObjectItem(icd_manifest_json, "layers") == NULL) { |
3631 | 1 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3632 | 1 | "loader_parse_icd_manifest: Can not find \'ICD\' object in ICD JSON file %s. Skipping ICD JSON", file_str); |
3633 | 1 | } |
3634 | 104 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3635 | 104 | goto out; |
3636 | 104 | } |
3637 | | |
3638 | 862 | cJSON *library_path_json = loader_cJSON_GetObjectItem(itemICD, "library_path"); |
3639 | 862 | if (library_path_json == NULL) { |
3640 | 18 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3641 | 18 | "loader_parse_icd_manifest: Failed to find \'library_path\' object in ICD JSON file %s. Skipping ICD JSON.", |
3642 | 18 | file_str); |
3643 | 18 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3644 | 18 | goto out; |
3645 | 18 | } |
3646 | 844 | bool out_of_memory = false; |
3647 | 844 | char *library_path = loader_cJSON_Print(library_path_json, &out_of_memory); |
3648 | 844 | if (out_of_memory) { |
3649 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3650 | 0 | "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'library_path\' field. Skipping ICD JSON.", file_str); |
3651 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
3652 | 0 | goto out; |
3653 | 844 | } else if (!library_path || strlen(library_path) == 0) { |
3654 | 1 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3655 | 1 | "loader_parse_icd_manifest: ICD JSON %s \'library_path\' field is empty. Skipping ICD JSON.", file_str); |
3656 | 1 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3657 | 1 | loader_instance_heap_free(inst, library_path); |
3658 | 1 | goto out; |
3659 | 1 | } |
3660 | | |
3661 | | // Print out the paths being searched if debugging is enabled |
3662 | 843 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0, "Searching for ICD drivers named %s", library_path); |
3663 | | // This function takes ownership of library_path - so we don't need to clean it up |
3664 | 843 | res = combine_manifest_directory_and_library_path(inst, library_path, file_str, &icd->full_library_path); |
3665 | 843 | if (VK_SUCCESS != res) { |
3666 | 0 | goto out; |
3667 | 0 | } |
3668 | | |
3669 | 843 | cJSON *api_version_json = loader_cJSON_GetObjectItem(itemICD, "api_version"); |
3670 | 843 | if (api_version_json == NULL) { |
3671 | 769 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3672 | 769 | "loader_parse_icd_manifest: ICD JSON %s does not have an \'api_version\' field. Skipping ICD JSON.", file_str); |
3673 | 769 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3674 | 769 | goto out; |
3675 | 769 | } |
3676 | 74 | char *version_str = loader_cJSON_GetStringValue(api_version_json); |
3677 | 74 | if (NULL == version_str) { |
3678 | | // Only reason the print can fail is if there was an allocation issue |
3679 | 12 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3680 | 12 | "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'api_version\' field. Skipping ICD JSON.", file_str); |
3681 | | |
3682 | 12 | goto out; |
3683 | 12 | } |
3684 | 62 | icd->version = loader_parse_version_string(version_str); |
3685 | | |
3686 | 62 | if (VK_API_VERSION_VARIANT(icd->version) != 0) { |
3687 | 22 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3688 | 22 | "loader_parse_icd_manifest: Driver's ICD JSON %s \'api_version\' field contains a non-zero variant value of %d. " |
3689 | 22 | " Skipping ICD JSON.", |
3690 | 22 | file_str, VK_API_VERSION_VARIANT(icd->version)); |
3691 | 22 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3692 | 22 | goto out; |
3693 | 22 | } |
3694 | | |
3695 | | // Skip over ICD's which contain a true "is_portability_driver" value whenever the application doesn't enable |
3696 | | // portability enumeration. |
3697 | 40 | cJSON *is_portability_driver_json = loader_cJSON_GetObjectItem(itemICD, "is_portability_driver"); |
3698 | 40 | if (loader_cJSON_IsTrue(is_portability_driver_json) && inst && !inst->portability_enumeration_enabled) { |
3699 | 0 | if (skipped_portability_drivers) { |
3700 | 0 | *skipped_portability_drivers = true; |
3701 | 0 | } |
3702 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3703 | 0 | goto out; |
3704 | 0 | } |
3705 | | |
3706 | 40 | char *library_arch_str = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(itemICD, "library_arch")); |
3707 | 40 | if (library_arch_str != NULL) { |
3708 | 11 | if ((strncmp(library_arch_str, "32", 2) == 0 && sizeof(void *) != 4) || |
3709 | 11 | (strncmp(library_arch_str, "64", 2) == 0 && sizeof(void *) != 8)) { |
3710 | 1 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
3711 | 1 | "loader_parse_icd_manifest: Driver library architecture doesn't match the current running " |
3712 | 1 | "architecture, skipping this driver"); |
3713 | 1 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3714 | 1 | goto out; |
3715 | 1 | } |
3716 | 11 | } |
3717 | 980 | out: |
3718 | 980 | loader_cJSON_Delete(icd_manifest_json); |
3719 | 980 | return res; |
3720 | 40 | } |
3721 | | |
3722 | | // Try to find the Vulkan ICD driver(s). |
3723 | | // |
3724 | | // This function scans the default system loader path(s) or path specified by either the |
3725 | | // VK_DRIVER_FILES or VK_ICD_FILENAMES environment variable in order to find loadable |
3726 | | // VK ICDs manifest files. |
3727 | | // From these manifest files it finds the ICD libraries. |
3728 | | // |
3729 | | // skipped_portability_drivers is used to report whether the loader found drivers which report |
3730 | | // portability but the application didn't enable the bit to enumerate them |
3731 | | // Can be NULL |
3732 | | // |
3733 | | // \returns |
3734 | | // Vulkan result |
3735 | | // (on result == VK_SUCCESS) a list of icds that were discovered |
3736 | | VkResult loader_icd_scan(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list, |
3737 | 980 | const VkInstanceCreateInfo *pCreateInfo, bool *skipped_portability_drivers) { |
3738 | 980 | VkResult res = VK_SUCCESS; |
3739 | 980 | struct loader_string_list manifest_files = {0}; |
3740 | 980 | struct loader_envvar_filter select_filter = {0}; |
3741 | 980 | struct loader_envvar_filter disable_filter = {0}; |
3742 | 980 | struct ICDManifestInfo *icd_details = NULL; |
3743 | | |
3744 | | // Set up the ICD Trampoline list so elements can be written into it. |
3745 | 980 | res = loader_init_scanned_icd_list(inst, icd_tramp_list); |
3746 | 980 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
3747 | 0 | return res; |
3748 | 0 | } |
3749 | | |
3750 | 980 | bool direct_driver_loading_exclusive_mode = false; |
3751 | 980 | res = loader_scan_for_direct_drivers(inst, pCreateInfo, icd_tramp_list, &direct_driver_loading_exclusive_mode); |
3752 | 980 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
3753 | 0 | goto out; |
3754 | 0 | } |
3755 | 980 | if (direct_driver_loading_exclusive_mode) { |
3756 | | // Make sure to jump over the system & env-var driver discovery mechanisms if exclusive mode is set, even if no drivers |
3757 | | // were successfully found through the direct driver loading mechanism |
3758 | 0 | goto out; |
3759 | 0 | } |
3760 | | |
3761 | | // Parse the filter environment variables to determine if we have any special behavior |
3762 | 980 | res = parse_generic_filter_environment_var(inst, VK_DRIVERS_SELECT_ENV_VAR, &select_filter); |
3763 | 980 | if (VK_SUCCESS != res) { |
3764 | 0 | goto out; |
3765 | 0 | } |
3766 | 980 | res = parse_generic_filter_environment_var(inst, VK_DRIVERS_DISABLE_ENV_VAR, &disable_filter); |
3767 | 980 | if (VK_SUCCESS != res) { |
3768 | 0 | goto out; |
3769 | 0 | } |
3770 | | |
3771 | | // Get a list of manifest files for ICDs |
3772 | 980 | res = loader_get_data_files(inst, LOADER_DATA_FILE_MANIFEST_DRIVER, NULL, &manifest_files); |
3773 | 980 | if (VK_SUCCESS != res) { |
3774 | 0 | goto out; |
3775 | 0 | } |
3776 | | |
3777 | 980 | icd_details = loader_stack_alloc(sizeof(struct ICDManifestInfo) * manifest_files.count); |
3778 | 980 | if (NULL == icd_details) { |
3779 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
3780 | 0 | goto out; |
3781 | 0 | } |
3782 | 980 | memset(icd_details, 0, sizeof(struct ICDManifestInfo) * manifest_files.count); |
3783 | | |
3784 | 1.95k | for (uint32_t i = 0; i < manifest_files.count; i++) { |
3785 | 980 | VkResult icd_res = VK_SUCCESS; |
3786 | | |
3787 | 980 | icd_res = loader_parse_icd_manifest(inst, manifest_files.list[i], &icd_details[i], skipped_portability_drivers); |
3788 | 980 | if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) { |
3789 | 0 | res = icd_res; |
3790 | 0 | goto out; |
3791 | 980 | } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) { |
3792 | 929 | continue; |
3793 | 929 | } |
3794 | | |
3795 | 51 | if (select_filter.count > 0 || disable_filter.count > 0) { |
3796 | | // Get only the filename for comparing to the filters |
3797 | 0 | char *just_filename_str = strrchr(manifest_files.list[i], DIRECTORY_SYMBOL); |
3798 | | |
3799 | | // No directory symbol, just the filename |
3800 | 0 | if (NULL == just_filename_str) { |
3801 | 0 | just_filename_str = manifest_files.list[i]; |
3802 | 0 | } else { |
3803 | 0 | just_filename_str++; |
3804 | 0 | } |
3805 | |
|
3806 | 0 | bool name_matches_select = |
3807 | 0 | (select_filter.count > 0 && check_name_matches_filter_environment_var(just_filename_str, &select_filter)); |
3808 | 0 | bool name_matches_disable = |
3809 | 0 | (disable_filter.count > 0 && check_name_matches_filter_environment_var(just_filename_str, &disable_filter)); |
3810 | |
|
3811 | 0 | if (name_matches_disable && !name_matches_select) { |
3812 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3813 | 0 | "Driver \"%s\" ignored because it was disabled by env var \'%s\'", just_filename_str, |
3814 | 0 | VK_DRIVERS_DISABLE_ENV_VAR); |
3815 | 0 | continue; |
3816 | 0 | } |
3817 | 0 | if (select_filter.count != 0 && !name_matches_select) { |
3818 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3819 | 0 | "Driver \"%s\" ignored because not selected by env var \'%s\'", just_filename_str, |
3820 | 0 | VK_DRIVERS_SELECT_ENV_VAR); |
3821 | 0 | continue; |
3822 | 0 | } |
3823 | 0 | } |
3824 | | |
3825 | 51 | enum loader_layer_library_status lib_status; |
3826 | 51 | icd_res = |
3827 | 51 | loader_scanned_icd_add(inst, icd_tramp_list, icd_details[i].full_library_path, icd_details[i].version, &lib_status); |
3828 | 51 | if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) { |
3829 | 1 | res = icd_res; |
3830 | 1 | goto out; |
3831 | 50 | } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) { |
3832 | 50 | switch (lib_status) { |
3833 | 0 | case LOADER_LAYER_LIB_NOT_LOADED: |
3834 | 48 | case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD: |
3835 | 48 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3836 | 48 | "loader_icd_scan: Failed loading library associated with ICD JSON %s. Ignoring this JSON", |
3837 | 48 | icd_details[i].full_library_path); |
3838 | 48 | break; |
3839 | 2 | case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: { |
3840 | 2 | loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Requested ICD %s was wrong bit-type. Ignoring this JSON", |
3841 | 2 | icd_details[i].full_library_path); |
3842 | 2 | break; |
3843 | 0 | } |
3844 | 0 | case LOADER_LAYER_LIB_SUCCESS_LOADED: |
3845 | 0 | case LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY: |
3846 | | // Shouldn't be able to reach this but if it is, best to report a debug |
3847 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3848 | 0 | "Shouldn't reach this. A valid version of requested ICD %s was loaded but something bad " |
3849 | 0 | "happened afterwards.", |
3850 | 0 | icd_details[i].full_library_path); |
3851 | 0 | break; |
3852 | 50 | } |
3853 | 50 | } |
3854 | 51 | } |
3855 | | |
3856 | 980 | out: |
3857 | 980 | if (NULL != icd_details) { |
3858 | | // Successfully got the icd_details structure, which means we need to free the paths contained within |
3859 | 1.96k | for (uint32_t i = 0; i < manifest_files.count; i++) { |
3860 | 980 | loader_instance_heap_free(inst, icd_details[i].full_library_path); |
3861 | 980 | } |
3862 | 980 | } |
3863 | 980 | free_string_list(inst, &manifest_files); |
3864 | 980 | return res; |
3865 | 980 | } |
3866 | | |
3867 | | // Gets the layer data files corresponding to manifest_type & path_override, then parses the resulting json objects |
3868 | | // into instance_layers |
3869 | | // Manifest type must be either implicit or explicit |
3870 | | VkResult loader_parse_instance_layers(struct loader_instance *inst, enum loader_data_files_type manifest_type, |
3871 | 13.5k | const char *path_override, struct loader_layer_list *instance_layers) { |
3872 | 13.5k | assert(manifest_type == LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER || manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER); |
3873 | 13.5k | VkResult res = VK_SUCCESS; |
3874 | 13.5k | struct loader_string_list manifest_files = {0}; |
3875 | | |
3876 | 13.5k | res = loader_get_data_files(inst, manifest_type, path_override, &manifest_files); |
3877 | 13.5k | if (VK_SUCCESS != res) { |
3878 | 0 | goto out; |
3879 | 0 | } |
3880 | | |
3881 | 20.5k | for (uint32_t i = 0; i < manifest_files.count; i++) { |
3882 | 7.02k | char *file_str = manifest_files.list[i]; |
3883 | 7.02k | if (file_str == NULL) { |
3884 | 0 | continue; |
3885 | 0 | } |
3886 | | |
3887 | | // Parse file into JSON struct |
3888 | 7.02k | cJSON *json = NULL; |
3889 | 7.02k | VkResult local_res = loader_get_json(inst, file_str, &json); |
3890 | 7.02k | if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) { |
3891 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
3892 | 0 | goto out; |
3893 | 7.02k | } else if (VK_SUCCESS != local_res || NULL == json) { |
3894 | 976 | continue; |
3895 | 976 | } |
3896 | | |
3897 | 6.04k | local_res = loader_add_layer_properties(inst, instance_layers, json, |
3898 | 6.04k | manifest_type == LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, file_str); |
3899 | 6.04k | loader_cJSON_Delete(json); |
3900 | | |
3901 | | // If the error is anything other than out of memory we still want to try to load the other layers |
3902 | 6.04k | if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) { |
3903 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
3904 | 0 | goto out; |
3905 | 0 | } |
3906 | 6.04k | } |
3907 | 13.5k | out: |
3908 | 13.5k | free_string_list(inst, &manifest_files); |
3909 | | |
3910 | 13.5k | return res; |
3911 | 13.5k | } |
3912 | | |
3913 | | // Given a loader_layer_properties struct that is a valid override layer, concatenate the properties override paths and put them |
3914 | | // into the output parameter override_paths |
3915 | | VkResult get_override_layer_override_paths(struct loader_instance *inst, struct loader_layer_properties *prop, |
3916 | 64 | char **override_paths) { |
3917 | 64 | if (prop->override_paths.count > 0) { |
3918 | 64 | char *cur_write_ptr = NULL; |
3919 | 64 | size_t override_path_size = 0; |
3920 | 74.6k | for (uint32_t j = 0; j < prop->override_paths.count; j++) { |
3921 | 74.6k | override_path_size += determine_data_file_path_size(prop->override_paths.list[j], 0); |
3922 | 74.6k | } |
3923 | 64 | *override_paths = loader_instance_heap_alloc(inst, override_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
3924 | 64 | if (*override_paths == NULL) { |
3925 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
3926 | 0 | } |
3927 | 64 | cur_write_ptr = &(*override_paths)[0]; |
3928 | 74.6k | for (uint32_t j = 0; j < prop->override_paths.count; j++) { |
3929 | 74.6k | copy_data_file_info(prop->override_paths.list[j], NULL, 0, &cur_write_ptr); |
3930 | 74.6k | } |
3931 | | // Remove the last path separator |
3932 | 64 | --cur_write_ptr; |
3933 | 64 | assert(cur_write_ptr - (*override_paths) < (ptrdiff_t)override_path_size); |
3934 | 64 | *cur_write_ptr = '\0'; |
3935 | 64 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Override layer has override paths set to %s", |
3936 | 64 | *override_paths); |
3937 | 64 | } |
3938 | 64 | return VK_SUCCESS; |
3939 | 64 | } |
3940 | | |
3941 | | VkResult loader_scan_for_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers, |
3942 | 7.47k | const struct loader_envvar_all_filters *filters) { |
3943 | 7.47k | VkResult res = VK_SUCCESS; |
3944 | 7.47k | struct loader_layer_list settings_layers = {0}; |
3945 | 7.47k | struct loader_layer_list regular_instance_layers = {0}; |
3946 | 7.47k | bool override_layer_valid = false; |
3947 | 7.47k | char *override_paths = NULL; |
3948 | | |
3949 | 7.47k | bool should_search_for_other_layers = true; |
3950 | 7.47k | res = get_settings_layers(inst, &settings_layers, &should_search_for_other_layers); |
3951 | 7.47k | if (VK_SUCCESS != res) { |
3952 | 0 | goto out; |
3953 | 0 | } |
3954 | | |
3955 | | // If we should not look for layers using other mechanisms, assign settings_layers to instance_layers and jump to the |
3956 | | // output |
3957 | 7.47k | if (!should_search_for_other_layers) { |
3958 | 691 | *instance_layers = settings_layers; |
3959 | 691 | memset(&settings_layers, 0, sizeof(struct loader_layer_list)); |
3960 | 691 | goto out; |
3961 | 691 | } |
3962 | | |
3963 | 6.78k | res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, ®ular_instance_layers); |
3964 | 6.78k | if (VK_SUCCESS != res) { |
3965 | 0 | goto out; |
3966 | 0 | } |
3967 | | |
3968 | | // Remove any extraneous override layers. |
3969 | 6.78k | remove_all_non_valid_override_layers(inst, ®ular_instance_layers); |
3970 | | |
3971 | | // Check to see if the override layer is present, and use it's override paths. |
3972 | 1.23M | for (uint32_t i = 0; i < regular_instance_layers.count; i++) { |
3973 | 1.23M | struct loader_layer_properties *prop = ®ular_instance_layers.list[i]; |
3974 | 1.23M | if (prop->is_override && loader_implicit_layer_is_enabled(inst, filters, prop) && prop->override_paths.count > 0) { |
3975 | 64 | res = get_override_layer_override_paths(inst, prop, &override_paths); |
3976 | 64 | if (VK_SUCCESS != res) { |
3977 | 0 | goto out; |
3978 | 0 | } |
3979 | 64 | break; |
3980 | 64 | } |
3981 | 1.23M | } |
3982 | | |
3983 | | // Get a list of manifest files for explicit layers |
3984 | 6.78k | res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, ®ular_instance_layers); |
3985 | 6.78k | if (VK_SUCCESS != res) { |
3986 | 0 | goto out; |
3987 | 0 | } |
3988 | | |
3989 | | // Verify any meta-layers in the list are valid and all the component layers are |
3990 | | // actually present in the available layer list |
3991 | 6.78k | res = verify_all_meta_layers(inst, filters, ®ular_instance_layers, &override_layer_valid); |
3992 | 6.78k | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
3993 | 0 | return res; |
3994 | 0 | } |
3995 | | |
3996 | 6.78k | if (override_layer_valid) { |
3997 | 218 | loader_remove_layers_in_blacklist(inst, ®ular_instance_layers); |
3998 | 218 | if (NULL != inst) { |
3999 | 218 | inst->override_layer_present = true; |
4000 | 218 | } |
4001 | 218 | } |
4002 | | |
4003 | | // Remove disabled layers |
4004 | 1.23M | for (uint32_t i = 0; i < regular_instance_layers.count; ++i) { |
4005 | 1.22M | if (!loader_layer_is_available(inst, filters, ®ular_instance_layers.list[i])) { |
4006 | 0 | loader_remove_layer_in_list(inst, ®ular_instance_layers, i); |
4007 | 0 | i--; |
4008 | 0 | } |
4009 | 1.22M | } |
4010 | | |
4011 | 6.78k | res = combine_settings_layers_with_regular_layers(inst, &settings_layers, ®ular_instance_layers, instance_layers); |
4012 | | |
4013 | 7.47k | out: |
4014 | 7.47k | loader_delete_layer_list_and_properties(inst, &settings_layers); |
4015 | 7.47k | loader_delete_layer_list_and_properties(inst, ®ular_instance_layers); |
4016 | | |
4017 | 7.47k | loader_instance_heap_free(inst, override_paths); |
4018 | 7.47k | return res; |
4019 | 6.78k | } |
4020 | | |
4021 | | VkResult loader_scan_for_implicit_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers, |
4022 | 0 | const struct loader_envvar_all_filters *layer_filters) { |
4023 | 0 | VkResult res = VK_SUCCESS; |
4024 | 0 | struct loader_layer_list settings_layers = {0}; |
4025 | 0 | struct loader_layer_list regular_instance_layers = {0}; |
4026 | 0 | bool override_layer_valid = false; |
4027 | 0 | char *override_paths = NULL; |
4028 | 0 | bool implicit_metalayer_present = false; |
4029 | |
|
4030 | 0 | bool should_search_for_other_layers = true; |
4031 | 0 | res = get_settings_layers(inst, &settings_layers, &should_search_for_other_layers); |
4032 | 0 | if (VK_SUCCESS != res) { |
4033 | 0 | goto out; |
4034 | 0 | } |
4035 | | |
4036 | | // Remove layers from settings file that are off, are explicit, or are implicit layers that aren't active |
4037 | 0 | for (uint32_t i = 0; i < settings_layers.count; ++i) { |
4038 | 0 | if (settings_layers.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_OFF || |
4039 | 0 | settings_layers.list[i].settings_control_value == LOADER_SETTINGS_LAYER_UNORDERED_LAYER_LOCATION || |
4040 | 0 | (settings_layers.list[i].type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER) == VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER || |
4041 | 0 | !loader_implicit_layer_is_enabled(inst, layer_filters, &settings_layers.list[i])) { |
4042 | 0 | loader_remove_layer_in_list(inst, &settings_layers, i); |
4043 | 0 | i--; |
4044 | 0 | } |
4045 | 0 | } |
4046 | | |
4047 | | // If we should not look for layers using other mechanisms, assign settings_layers to instance_layers and jump to the |
4048 | | // output |
4049 | 0 | if (!should_search_for_other_layers) { |
4050 | 0 | *instance_layers = settings_layers; |
4051 | 0 | memset(&settings_layers, 0, sizeof(struct loader_layer_list)); |
4052 | 0 | goto out; |
4053 | 0 | } |
4054 | | |
4055 | 0 | res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, ®ular_instance_layers); |
4056 | 0 | if (VK_SUCCESS != res) { |
4057 | 0 | goto out; |
4058 | 0 | } |
4059 | | |
4060 | | // Remove any extraneous override layers. |
4061 | 0 | remove_all_non_valid_override_layers(inst, ®ular_instance_layers); |
4062 | | |
4063 | | // Check to see if either the override layer is present, or another implicit meta-layer. |
4064 | | // Each of these may require explicit layers to be enabled at this time. |
4065 | 0 | for (uint32_t i = 0; i < regular_instance_layers.count; i++) { |
4066 | 0 | struct loader_layer_properties *prop = ®ular_instance_layers.list[i]; |
4067 | 0 | if (prop->is_override && loader_implicit_layer_is_enabled(inst, layer_filters, prop)) { |
4068 | 0 | override_layer_valid = true; |
4069 | 0 | res = get_override_layer_override_paths(inst, prop, &override_paths); |
4070 | 0 | if (VK_SUCCESS != res) { |
4071 | 0 | goto out; |
4072 | 0 | } |
4073 | 0 | } else if (!prop->is_override && prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { |
4074 | 0 | implicit_metalayer_present = true; |
4075 | 0 | } |
4076 | 0 | } |
4077 | | |
4078 | | // If either the override layer or an implicit meta-layer are present, we need to add |
4079 | | // explicit layer info as well. Not to worry, though, all explicit layers not included |
4080 | | // in the override layer will be removed below in loader_remove_layers_in_blacklist(). |
4081 | 0 | if (override_layer_valid || implicit_metalayer_present) { |
4082 | 0 | res = |
4083 | 0 | loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, ®ular_instance_layers); |
4084 | 0 | if (VK_SUCCESS != res) { |
4085 | 0 | goto out; |
4086 | 0 | } |
4087 | 0 | } |
4088 | | |
4089 | | // Verify any meta-layers in the list are valid and all the component layers are |
4090 | | // actually present in the available layer list |
4091 | 0 | res = verify_all_meta_layers(inst, layer_filters, ®ular_instance_layers, &override_layer_valid); |
4092 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
4093 | 0 | return res; |
4094 | 0 | } |
4095 | | |
4096 | 0 | if (override_layer_valid || implicit_metalayer_present) { |
4097 | 0 | loader_remove_layers_not_in_implicit_meta_layers(inst, ®ular_instance_layers); |
4098 | 0 | if (override_layer_valid && inst != NULL) { |
4099 | 0 | inst->override_layer_present = true; |
4100 | 0 | } |
4101 | 0 | } |
4102 | | |
4103 | | // Remove disabled layers |
4104 | 0 | for (uint32_t i = 0; i < regular_instance_layers.count; ++i) { |
4105 | 0 | if (!loader_implicit_layer_is_enabled(inst, layer_filters, ®ular_instance_layers.list[i])) { |
4106 | 0 | loader_remove_layer_in_list(inst, ®ular_instance_layers, i); |
4107 | 0 | i--; |
4108 | 0 | } |
4109 | 0 | } |
4110 | |
|
4111 | 0 | res = combine_settings_layers_with_regular_layers(inst, &settings_layers, ®ular_instance_layers, instance_layers); |
4112 | |
|
4113 | 0 | out: |
4114 | 0 | loader_delete_layer_list_and_properties(inst, &settings_layers); |
4115 | 0 | loader_delete_layer_list_and_properties(inst, ®ular_instance_layers); |
4116 | |
|
4117 | 0 | loader_instance_heap_free(inst, override_paths); |
4118 | 0 | return res; |
4119 | 0 | } |
4120 | | |
4121 | 0 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_terminator(VkInstance inst, const char *pName) { |
4122 | | // inst is not wrapped |
4123 | 0 | if (inst == VK_NULL_HANDLE) { |
4124 | 0 | return NULL; |
4125 | 0 | } |
4126 | | |
4127 | 0 | VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst; |
4128 | |
|
4129 | 0 | if (disp_table == NULL) return NULL; |
4130 | | |
4131 | 0 | struct loader_instance *loader_inst = loader_get_instance(inst); |
4132 | |
|
4133 | 0 | if (loader_inst->instance_finished_creation) { |
4134 | 0 | disp_table = &loader_inst->terminator_dispatch; |
4135 | 0 | } |
4136 | |
|
4137 | 0 | bool found_name; |
4138 | 0 | void *addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name); |
4139 | 0 | if (found_name) { |
4140 | 0 | return addr; |
4141 | 0 | } |
4142 | | |
4143 | | // Check if any drivers support the function, and if so, add it to the unknown function list |
4144 | 0 | addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName); |
4145 | 0 | if (NULL != addr) return addr; |
4146 | | |
4147 | | // Don't call down the chain, this would be an infinite loop |
4148 | 0 | loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpdpa_instance_terminator() unrecognized name %s", pName); |
4149 | 0 | return NULL; |
4150 | 0 | } |
4151 | | |
4152 | 0 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_instance_terminator(VkInstance inst, const char *pName) { |
4153 | | // Global functions - Do not need a valid instance handle to query |
4154 | 0 | if (!strcmp(pName, "vkGetInstanceProcAddr")) { |
4155 | 0 | return (PFN_vkVoidFunction)loader_gpa_instance_terminator; |
4156 | 0 | } |
4157 | 0 | if (!strcmp(pName, "vk_layerGetPhysicalDeviceProcAddr")) { |
4158 | 0 | return (PFN_vkVoidFunction)loader_gpdpa_instance_terminator; |
4159 | 0 | } |
4160 | 0 | if (!strcmp(pName, "vkCreateInstance")) { |
4161 | 0 | return (PFN_vkVoidFunction)terminator_CreateInstance; |
4162 | 0 | } |
4163 | | // If a layer is querying pre-instance functions using vkGetInstanceProcAddr, we need to return function pointers that match the |
4164 | | // Vulkan API |
4165 | 0 | if (!strcmp(pName, "vkEnumerateInstanceLayerProperties")) { |
4166 | 0 | return (PFN_vkVoidFunction)terminator_EnumerateInstanceLayerProperties; |
4167 | 0 | } |
4168 | 0 | if (!strcmp(pName, "vkEnumerateInstanceExtensionProperties")) { |
4169 | 0 | return (PFN_vkVoidFunction)terminator_EnumerateInstanceExtensionProperties; |
4170 | 0 | } |
4171 | 0 | if (!strcmp(pName, "vkEnumerateInstanceVersion")) { |
4172 | 0 | return (PFN_vkVoidFunction)terminator_EnumerateInstanceVersion; |
4173 | 0 | } |
4174 | | |
4175 | | // While the spec is very clear that querying vkCreateDevice requires a valid VkInstance, because the loader allowed querying |
4176 | | // with a NULL VkInstance handle for a long enough time, it is impractical to fix this bug in the loader |
4177 | | |
4178 | | // As such, this is a bug to maintain compatibility for the RTSS layer (Riva Tuner Statistics Server) but may |
4179 | | // be depended upon by other layers out in the wild. |
4180 | 0 | if (!strcmp(pName, "vkCreateDevice")) { |
4181 | 0 | return (PFN_vkVoidFunction)terminator_CreateDevice; |
4182 | 0 | } |
4183 | | |
4184 | | // inst is not wrapped |
4185 | 0 | if (inst == VK_NULL_HANDLE) { |
4186 | 0 | return NULL; |
4187 | 0 | } |
4188 | 0 | VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst; |
4189 | |
|
4190 | 0 | if (disp_table == NULL) return NULL; |
4191 | | |
4192 | 0 | struct loader_instance *loader_inst = loader_get_instance(inst); |
4193 | | |
4194 | | // The VK_EXT_debug_utils functions need a special case here so the terminators can still be found from |
4195 | | // vkGetInstanceProcAddr This is because VK_EXT_debug_utils is an instance level extension with device level functions, and |
4196 | | // is 'supported' by the loader. |
4197 | | // These functions need a terminator to handle the case of a driver not supporting VK_EXT_debug_utils when there are layers |
4198 | | // present which not check for NULL before calling the function. |
4199 | 0 | if (!strcmp(pName, "vkSetDebugUtilsObjectNameEXT")) { |
4200 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectNameEXT : NULL; |
4201 | 0 | } |
4202 | 0 | if (!strcmp(pName, "vkSetDebugUtilsObjectTagEXT")) { |
4203 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectTagEXT : NULL; |
4204 | 0 | } |
4205 | 0 | if (!strcmp(pName, "vkQueueBeginDebugUtilsLabelEXT")) { |
4206 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueBeginDebugUtilsLabelEXT : NULL; |
4207 | 0 | } |
4208 | 0 | if (!strcmp(pName, "vkQueueEndDebugUtilsLabelEXT")) { |
4209 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueEndDebugUtilsLabelEXT : NULL; |
4210 | 0 | } |
4211 | 0 | if (!strcmp(pName, "vkQueueInsertDebugUtilsLabelEXT")) { |
4212 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueInsertDebugUtilsLabelEXT |
4213 | 0 | : NULL; |
4214 | 0 | } |
4215 | 0 | if (!strcmp(pName, "vkCmdBeginDebugUtilsLabelEXT")) { |
4216 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdBeginDebugUtilsLabelEXT : NULL; |
4217 | 0 | } |
4218 | 0 | if (!strcmp(pName, "vkCmdEndDebugUtilsLabelEXT")) { |
4219 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdEndDebugUtilsLabelEXT : NULL; |
4220 | 0 | } |
4221 | 0 | if (!strcmp(pName, "vkCmdInsertDebugUtilsLabelEXT")) { |
4222 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdInsertDebugUtilsLabelEXT : NULL; |
4223 | 0 | } |
4224 | | |
4225 | 0 | if (loader_inst->instance_finished_creation) { |
4226 | 0 | disp_table = &loader_inst->terminator_dispatch; |
4227 | 0 | } |
4228 | |
|
4229 | 0 | bool found_name; |
4230 | 0 | void *addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name); |
4231 | 0 | if (found_name) { |
4232 | 0 | return addr; |
4233 | 0 | } |
4234 | | |
4235 | | // Check if it is an unknown physical device function, to see if any drivers support it. |
4236 | 0 | addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName); |
4237 | 0 | if (addr) { |
4238 | 0 | return addr; |
4239 | 0 | } |
4240 | | |
4241 | | // Assume it is an unknown device function, check to see if any drivers support it. |
4242 | 0 | addr = loader_dev_ext_gpa_term(loader_get_instance(inst), pName); |
4243 | 0 | if (addr) { |
4244 | 0 | return addr; |
4245 | 0 | } |
4246 | | |
4247 | | // Don't call down the chain, this would be an infinite loop |
4248 | 0 | loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpa_instance_terminator() unrecognized name %s", pName); |
4249 | 0 | return NULL; |
4250 | 0 | } |
4251 | | |
4252 | 0 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_device_terminator(VkDevice device, const char *pName) { |
4253 | 0 | struct loader_device *dev; |
4254 | 0 | struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev); |
4255 | | |
4256 | | // Return this function if a layer above here is asking for the vkGetDeviceProcAddr. |
4257 | | // This is so we can properly intercept any device commands needing a terminator. |
4258 | 0 | if (!strcmp(pName, "vkGetDeviceProcAddr")) { |
4259 | 0 | return (PFN_vkVoidFunction)loader_gpa_device_terminator; |
4260 | 0 | } |
4261 | | |
4262 | | // NOTE: Device Funcs needing Trampoline/Terminator. |
4263 | | // Overrides for device functions needing a trampoline and |
4264 | | // a terminator because certain device entry-points still need to go |
4265 | | // through a terminator before hitting the ICD. This could be for |
4266 | | // several reasons, but the main one is currently unwrapping an |
4267 | | // object before passing the appropriate info along to the ICD. |
4268 | | // This is why we also have to override the direct ICD call to |
4269 | | // vkGetDeviceProcAddr to intercept those calls. |
4270 | | // If the pName is for a 'known' function but isn't available, due to |
4271 | | // the corresponding extension/feature not being enabled, we need to |
4272 | | // return NULL and not call down to the driver's GetDeviceProcAddr. |
4273 | 0 | if (NULL != dev) { |
4274 | 0 | bool found_name = false; |
4275 | 0 | PFN_vkVoidFunction addr = get_extension_device_proc_terminator(dev, pName, &found_name); |
4276 | 0 | if (found_name) { |
4277 | 0 | return addr; |
4278 | 0 | } |
4279 | 0 | } |
4280 | | |
4281 | 0 | if (icd_term == NULL) { |
4282 | 0 | return NULL; |
4283 | 0 | } |
4284 | | |
4285 | 0 | return icd_term->dispatch.GetDeviceProcAddr(device, pName); |
4286 | 0 | } |
4287 | | |
4288 | 0 | struct loader_instance *loader_get_instance(const VkInstance instance) { |
4289 | | // look up the loader_instance in our list by comparing dispatch tables, as |
4290 | | // there is no guarantee the instance is still a loader_instance* after any |
4291 | | // layers which wrap the instance object. |
4292 | 0 | const VkLayerInstanceDispatchTable *disp; |
4293 | 0 | struct loader_instance *ptr_instance = (struct loader_instance *)instance; |
4294 | 0 | if (VK_NULL_HANDLE == instance || LOADER_MAGIC_NUMBER != ptr_instance->magic) { |
4295 | 0 | return NULL; |
4296 | 0 | } else { |
4297 | 0 | disp = loader_get_instance_layer_dispatch(instance); |
4298 | 0 | loader_platform_thread_lock_mutex(&loader_global_instance_list_lock); |
4299 | 0 | for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { |
4300 | 0 | if (&inst->disp->layer_inst_disp == disp) { |
4301 | 0 | ptr_instance = inst; |
4302 | 0 | break; |
4303 | 0 | } |
4304 | 0 | } |
4305 | 0 | loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock); |
4306 | 0 | } |
4307 | 0 | return ptr_instance; |
4308 | 0 | } |
4309 | | |
4310 | 0 | loader_platform_dl_handle loader_open_layer_file(const struct loader_instance *inst, struct loader_layer_properties *prop) { |
4311 | 0 | if ((prop->lib_handle = loader_platform_open_library(prop->lib_name)) == NULL) { |
4312 | 0 | loader_handle_load_library_error(inst, prop->lib_name, &prop->lib_status); |
4313 | 0 | } else { |
4314 | 0 | prop->lib_status = LOADER_LAYER_LIB_SUCCESS_LOADED; |
4315 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Loading layer library %s", prop->lib_name); |
4316 | 0 | } |
4317 | |
|
4318 | 0 | return prop->lib_handle; |
4319 | 0 | } |
4320 | | |
4321 | | // Go through the search_list and find any layers which match type. If layer |
4322 | | // type match is found in then add it to ext_list. |
4323 | | VkResult loader_add_implicit_layers(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, |
4324 | | struct loader_pointer_layer_list *target_list, |
4325 | | struct loader_pointer_layer_list *expanded_target_list, |
4326 | 0 | const struct loader_layer_list *source_list) { |
4327 | 0 | for (uint32_t src_layer = 0; src_layer < source_list->count; src_layer++) { |
4328 | 0 | struct loader_layer_properties *prop = &source_list->list[src_layer]; |
4329 | 0 | if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { |
4330 | 0 | VkResult result = loader_add_implicit_layer(inst, prop, filters, target_list, expanded_target_list, source_list); |
4331 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
4332 | 0 | } |
4333 | 0 | } |
4334 | 0 | return VK_SUCCESS; |
4335 | 0 | } |
4336 | | |
4337 | 0 | void warn_if_layers_are_older_than_application(struct loader_instance *inst) { |
4338 | 0 | for (uint32_t i = 0; i < inst->expanded_activated_layer_list.count; i++) { |
4339 | | // Verify that the layer api version is at least that of the application's request, if not, throw a warning since |
4340 | | // undefined behavior could occur. |
4341 | 0 | struct loader_layer_properties *prop = inst->expanded_activated_layer_list.list[i]; |
4342 | 0 | loader_api_version prop_spec_version = loader_make_version(prop->info.specVersion); |
4343 | 0 | if (!loader_check_version_meets_required(inst->app_api_version, prop_spec_version)) { |
4344 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
4345 | 0 | "Layer %s uses API version %u.%u which is older than the application specified " |
4346 | 0 | "API version of %u.%u. May cause issues.", |
4347 | 0 | prop->info.layerName, prop_spec_version.major, prop_spec_version.minor, inst->app_api_version.major, |
4348 | 0 | inst->app_api_version.minor); |
4349 | 0 | } |
4350 | 0 | } |
4351 | 0 | } |
4352 | | |
4353 | | VkResult loader_enable_instance_layers(struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo, |
4354 | | const struct loader_layer_list *instance_layers, |
4355 | 0 | const struct loader_envvar_all_filters *layer_filters) { |
4356 | 0 | VkResult res = VK_SUCCESS; |
4357 | |
|
4358 | 0 | assert(inst && "Cannot have null instance"); |
4359 | |
|
4360 | 0 | if (!loader_init_pointer_layer_list(inst, &inst->app_activated_layer_list)) { |
4361 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
4362 | 0 | "loader_enable_instance_layers: Failed to initialize application version of the layer list"); |
4363 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
4364 | 0 | goto out; |
4365 | 0 | } |
4366 | | |
4367 | 0 | if (!loader_init_pointer_layer_list(inst, &inst->expanded_activated_layer_list)) { |
4368 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
4369 | 0 | "loader_enable_instance_layers: Failed to initialize expanded version of the layer list"); |
4370 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
4371 | 0 | goto out; |
4372 | 0 | } |
4373 | | |
4374 | 0 | if (inst->settings.settings_active) { |
4375 | 0 | res = enable_correct_layers_from_settings(inst, layer_filters, pCreateInfo->enabledLayerCount, |
4376 | 0 | pCreateInfo->ppEnabledLayerNames, &inst->instance_layer_list, |
4377 | 0 | &inst->app_activated_layer_list, &inst->expanded_activated_layer_list); |
4378 | 0 | warn_if_layers_are_older_than_application(inst); |
4379 | |
|
4380 | 0 | goto out; |
4381 | 0 | } |
4382 | | |
4383 | | // Add any implicit layers first |
4384 | 0 | res = loader_add_implicit_layers(inst, layer_filters, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list, |
4385 | 0 | instance_layers); |
4386 | 0 | if (res != VK_SUCCESS) { |
4387 | 0 | goto out; |
4388 | 0 | } |
4389 | | |
4390 | | // Add any layers specified via environment variable next |
4391 | 0 | res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, layer_filters, &inst->app_activated_layer_list, |
4392 | 0 | &inst->expanded_activated_layer_list, instance_layers); |
4393 | 0 | if (res != VK_SUCCESS) { |
4394 | 0 | goto out; |
4395 | 0 | } |
4396 | | |
4397 | | // Add layers specified by the application |
4398 | 0 | res = loader_add_layer_names_to_list(inst, layer_filters, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list, |
4399 | 0 | pCreateInfo->enabledLayerCount, pCreateInfo->ppEnabledLayerNames, instance_layers); |
4400 | |
|
4401 | 0 | warn_if_layers_are_older_than_application(inst); |
4402 | 0 | out: |
4403 | 0 | return res; |
4404 | 0 | } |
4405 | | |
4406 | | // Determine the layer interface version to use. |
4407 | | bool loader_get_layer_interface_version(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version, |
4408 | 0 | VkNegotiateLayerInterface *interface_struct) { |
4409 | 0 | memset(interface_struct, 0, sizeof(VkNegotiateLayerInterface)); |
4410 | 0 | interface_struct->sType = LAYER_NEGOTIATE_INTERFACE_STRUCT; |
4411 | 0 | interface_struct->loaderLayerInterfaceVersion = 1; |
4412 | 0 | interface_struct->pNext = NULL; |
4413 | |
|
4414 | 0 | if (fp_negotiate_layer_version != NULL) { |
4415 | | // Layer supports the negotiation API, so call it with the loader's |
4416 | | // latest version supported |
4417 | 0 | interface_struct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION; |
4418 | 0 | VkResult result = fp_negotiate_layer_version(interface_struct); |
4419 | |
|
4420 | 0 | if (result != VK_SUCCESS) { |
4421 | | // Layer no longer supports the loader's latest interface version so |
4422 | | // fail loading the Layer |
4423 | 0 | return false; |
4424 | 0 | } |
4425 | 0 | } |
4426 | | |
4427 | 0 | if (interface_struct->loaderLayerInterfaceVersion < MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION) { |
4428 | | // Loader no longer supports the layer's latest interface version so |
4429 | | // fail loading the layer |
4430 | 0 | return false; |
4431 | 0 | } |
4432 | | |
4433 | 0 | return true; |
4434 | 0 | } |
4435 | | |
4436 | | // Every extension that has a loader-defined trampoline needs to be marked as enabled or disabled so that we know whether or |
4437 | | // not to return that trampoline when vkGetDeviceProcAddr is called |
4438 | | void setup_logical_device_enabled_layer_extensions(const struct loader_instance *inst, struct loader_device *dev, |
4439 | | const struct loader_extension_list *icd_exts, |
4440 | 0 | const VkDeviceCreateInfo *pCreateInfo) { |
4441 | | // no enabled extensions, early exit |
4442 | 0 | if (pCreateInfo->ppEnabledExtensionNames == NULL) { |
4443 | 0 | return; |
4444 | 0 | } |
4445 | | // Can only setup debug marker as debug utils is an instance extensions. |
4446 | 0 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; ++i) { |
4447 | 0 | if (pCreateInfo->ppEnabledExtensionNames[i] && |
4448 | 0 | !strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) { |
4449 | | // Check if its supported by the driver |
4450 | 0 | for (uint32_t j = 0; j < icd_exts->count; ++j) { |
4451 | 0 | if (!strcmp(icd_exts->list[j].extensionName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) { |
4452 | 0 | dev->layer_extensions.ext_debug_marker_enabled = true; |
4453 | 0 | } |
4454 | 0 | } |
4455 | | // also check if any layers support it. |
4456 | 0 | for (uint32_t j = 0; j < inst->app_activated_layer_list.count; j++) { |
4457 | 0 | struct loader_layer_properties *layer = inst->app_activated_layer_list.list[j]; |
4458 | 0 | for (uint32_t k = 0; k < layer->device_extension_list.count; k++) { |
4459 | 0 | if (!strcmp(layer->device_extension_list.list[k].props.extensionName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) { |
4460 | 0 | dev->layer_extensions.ext_debug_marker_enabled = true; |
4461 | 0 | } |
4462 | 0 | } |
4463 | 0 | } |
4464 | 0 | } |
4465 | 0 | } |
4466 | 0 | } |
4467 | | |
4468 | | VKAPI_ATTR VkResult VKAPI_CALL loader_layer_create_device(VkInstance instance, VkPhysicalDevice physicalDevice, |
4469 | | const VkDeviceCreateInfo *pCreateInfo, |
4470 | | const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, |
4471 | 0 | PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA) { |
4472 | 0 | VkResult res; |
4473 | 0 | VkPhysicalDevice internal_device = VK_NULL_HANDLE; |
4474 | 0 | struct loader_device *dev = NULL; |
4475 | 0 | struct loader_instance *inst = NULL; |
4476 | |
|
4477 | 0 | if (instance != VK_NULL_HANDLE) { |
4478 | 0 | inst = loader_get_instance(instance); |
4479 | 0 | internal_device = physicalDevice; |
4480 | 0 | } else { |
4481 | 0 | struct loader_physical_device_tramp *phys_dev = (struct loader_physical_device_tramp *)physicalDevice; |
4482 | 0 | internal_device = phys_dev->phys_dev; |
4483 | 0 | inst = (struct loader_instance *)phys_dev->this_instance; |
4484 | 0 | } |
4485 | | |
4486 | | // Get the physical device (ICD) extensions |
4487 | 0 | struct loader_extension_list icd_exts = {0}; |
4488 | 0 | icd_exts.list = NULL; |
4489 | 0 | res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties)); |
4490 | 0 | if (VK_SUCCESS != res) { |
4491 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to create ICD extension list"); |
4492 | 0 | goto out; |
4493 | 0 | } |
4494 | | |
4495 | 0 | PFN_vkEnumerateDeviceExtensionProperties enumDeviceExtensionProperties = NULL; |
4496 | 0 | if (layerGIPA != NULL) { |
4497 | 0 | enumDeviceExtensionProperties = |
4498 | 0 | (PFN_vkEnumerateDeviceExtensionProperties)layerGIPA(instance, "vkEnumerateDeviceExtensionProperties"); |
4499 | 0 | } else { |
4500 | 0 | enumDeviceExtensionProperties = inst->disp->layer_inst_disp.EnumerateDeviceExtensionProperties; |
4501 | 0 | } |
4502 | 0 | res = loader_add_device_extensions(inst, enumDeviceExtensionProperties, internal_device, "Unknown", &icd_exts); |
4503 | 0 | if (res != VK_SUCCESS) { |
4504 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to add extensions to list"); |
4505 | 0 | goto out; |
4506 | 0 | } |
4507 | | |
4508 | | // Make sure requested extensions to be enabled are supported |
4509 | 0 | res = loader_validate_device_extensions(inst, &inst->expanded_activated_layer_list, &icd_exts, pCreateInfo); |
4510 | 0 | if (res != VK_SUCCESS) { |
4511 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to validate extensions in list"); |
4512 | 0 | goto out; |
4513 | 0 | } |
4514 | | |
4515 | 0 | dev = loader_create_logical_device(inst, pAllocator); |
4516 | 0 | if (dev == NULL) { |
4517 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
4518 | 0 | goto out; |
4519 | 0 | } |
4520 | | |
4521 | 0 | setup_logical_device_enabled_layer_extensions(inst, dev, &icd_exts, pCreateInfo); |
4522 | |
|
4523 | 0 | res = loader_create_device_chain(internal_device, pCreateInfo, pAllocator, inst, dev, layerGIPA, nextGDPA); |
4524 | 0 | if (res != VK_SUCCESS) { |
4525 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to create device chain."); |
4526 | 0 | goto out; |
4527 | 0 | } |
4528 | | |
4529 | 0 | *pDevice = dev->chain_device; |
4530 | | |
4531 | | // Initialize any device extension dispatch entry's from the instance list |
4532 | 0 | loader_init_dispatch_dev_ext(inst, dev); |
4533 | | |
4534 | | // Initialize WSI device extensions as part of core dispatch since loader |
4535 | | // has dedicated trampoline code for these |
4536 | 0 | loader_init_device_extension_dispatch_table(&dev->loader_dispatch, inst->disp->layer_inst_disp.GetInstanceProcAddr, |
4537 | 0 | dev->loader_dispatch.core_dispatch.GetDeviceProcAddr, inst->instance, *pDevice); |
4538 | |
|
4539 | 0 | out: |
4540 | | |
4541 | | // Failure cleanup |
4542 | 0 | if (VK_SUCCESS != res) { |
4543 | 0 | if (NULL != dev) { |
4544 | | // Find the icd_term this device belongs to then remove it from that icd_term. |
4545 | | // Need to iterate the linked lists and remove the device from it. Don't delete |
4546 | | // the device here since it may not have been added to the icd_term and there |
4547 | | // are other allocations attached to it. |
4548 | 0 | struct loader_icd_term *icd_term = inst->icd_terms; |
4549 | 0 | bool found = false; |
4550 | 0 | while (!found && NULL != icd_term) { |
4551 | 0 | struct loader_device *cur_dev = icd_term->logical_device_list; |
4552 | 0 | struct loader_device *prev_dev = NULL; |
4553 | 0 | while (NULL != cur_dev) { |
4554 | 0 | if (cur_dev == dev) { |
4555 | 0 | if (cur_dev == icd_term->logical_device_list) { |
4556 | 0 | icd_term->logical_device_list = cur_dev->next; |
4557 | 0 | } else if (prev_dev) { |
4558 | 0 | prev_dev->next = cur_dev->next; |
4559 | 0 | } |
4560 | |
|
4561 | 0 | found = true; |
4562 | 0 | break; |
4563 | 0 | } |
4564 | 0 | prev_dev = cur_dev; |
4565 | 0 | cur_dev = cur_dev->next; |
4566 | 0 | } |
4567 | 0 | icd_term = icd_term->next; |
4568 | 0 | } |
4569 | | // Now destroy the device and the allocations associated with it. |
4570 | 0 | loader_destroy_logical_device(dev, pAllocator); |
4571 | 0 | } |
4572 | 0 | } |
4573 | |
|
4574 | 0 | if (NULL != icd_exts.list) { |
4575 | 0 | loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts); |
4576 | 0 | } |
4577 | 0 | return res; |
4578 | 0 | } |
4579 | | |
4580 | | VKAPI_ATTR void VKAPI_CALL loader_layer_destroy_device(VkDevice device, const VkAllocationCallbacks *pAllocator, |
4581 | 0 | PFN_vkDestroyDevice destroyFunction) { |
4582 | 0 | struct loader_device *dev; |
4583 | |
|
4584 | 0 | if (device == VK_NULL_HANDLE) { |
4585 | 0 | return; |
4586 | 0 | } |
4587 | | |
4588 | 0 | struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev); |
4589 | |
|
4590 | 0 | destroyFunction(device, pAllocator); |
4591 | 0 | if (NULL != dev) { |
4592 | 0 | dev->chain_device = NULL; |
4593 | 0 | dev->icd_device = NULL; |
4594 | 0 | loader_remove_logical_device(icd_term, dev, pAllocator); |
4595 | 0 | } |
4596 | 0 | } |
4597 | | |
4598 | | // Given the list of layers to activate in the loader_instance |
4599 | | // structure. This function will add a VkLayerInstanceCreateInfo |
4600 | | // structure to the VkInstanceCreateInfo.pNext pointer. |
4601 | | // Each activated layer will have it's own VkLayerInstanceLink |
4602 | | // structure that tells the layer what Get*ProcAddr to call to |
4603 | | // get function pointers to the next layer down. |
4604 | | // Once the chain info has been created this function will |
4605 | | // execute the CreateInstance call chain. Each layer will |
4606 | | // then have an opportunity in it's CreateInstance function |
4607 | | // to setup it's dispatch table when the lower layer returns |
4608 | | // successfully. |
4609 | | // Each layer can wrap or not-wrap the returned VkInstance object |
4610 | | // as it sees fit. |
4611 | | // The instance chain is terminated by a loader function |
4612 | | // that will call CreateInstance on all available ICD's and |
4613 | | // cache those VkInstance objects for future use. |
4614 | | VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, |
4615 | 0 | struct loader_instance *inst, VkInstance *created_instance) { |
4616 | 0 | uint32_t num_activated_layers = 0; |
4617 | 0 | struct activated_layer_info *activated_layers = NULL; |
4618 | 0 | VkLayerInstanceCreateInfo chain_info; |
4619 | 0 | VkLayerInstanceLink *layer_instance_link_info = NULL; |
4620 | 0 | VkInstanceCreateInfo loader_create_info; |
4621 | 0 | VkResult res; |
4622 | |
|
4623 | 0 | PFN_vkGetInstanceProcAddr next_gipa = loader_gpa_instance_terminator; |
4624 | 0 | PFN_vkGetInstanceProcAddr cur_gipa = loader_gpa_instance_terminator; |
4625 | 0 | PFN_vkGetDeviceProcAddr cur_gdpa = loader_gpa_device_terminator; |
4626 | 0 | PFN_GetPhysicalDeviceProcAddr next_gpdpa = loader_gpdpa_instance_terminator; |
4627 | 0 | PFN_GetPhysicalDeviceProcAddr cur_gpdpa = loader_gpdpa_instance_terminator; |
4628 | |
|
4629 | 0 | memcpy(&loader_create_info, pCreateInfo, sizeof(VkInstanceCreateInfo)); |
4630 | |
|
4631 | 0 | if (inst->expanded_activated_layer_list.count > 0) { |
4632 | 0 | chain_info.u.pLayerInfo = NULL; |
4633 | 0 | chain_info.pNext = pCreateInfo->pNext; |
4634 | 0 | chain_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; |
4635 | 0 | chain_info.function = VK_LAYER_LINK_INFO; |
4636 | 0 | loader_create_info.pNext = &chain_info; |
4637 | |
|
4638 | 0 | layer_instance_link_info = loader_stack_alloc(sizeof(VkLayerInstanceLink) * inst->expanded_activated_layer_list.count); |
4639 | 0 | if (!layer_instance_link_info) { |
4640 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
4641 | 0 | "loader_create_instance_chain: Failed to alloc Instance objects for layer"); |
4642 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
4643 | 0 | } |
4644 | | |
4645 | 0 | activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count); |
4646 | 0 | if (!activated_layers) { |
4647 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
4648 | 0 | "loader_create_instance_chain: Failed to alloc activated layer storage array"); |
4649 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
4650 | 0 | } |
4651 | | |
4652 | | // Create instance chain of enabled layers |
4653 | 0 | for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) { |
4654 | 0 | struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i]; |
4655 | 0 | loader_platform_dl_handle lib_handle; |
4656 | | |
4657 | | // Skip it if a Layer with the same name has been already successfully activated |
4658 | 0 | if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) { |
4659 | 0 | continue; |
4660 | 0 | } |
4661 | | |
4662 | 0 | lib_handle = loader_open_layer_file(inst, layer_prop); |
4663 | 0 | if (layer_prop->lib_status == LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY) { |
4664 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
4665 | 0 | } |
4666 | 0 | if (!lib_handle) { |
4667 | 0 | continue; |
4668 | 0 | } |
4669 | | |
4670 | 0 | if (NULL == layer_prop->functions.negotiate_layer_interface) { |
4671 | 0 | PFN_vkNegotiateLoaderLayerInterfaceVersion negotiate_interface = NULL; |
4672 | 0 | bool functions_in_interface = false; |
4673 | 0 | if (!layer_prop->functions.str_negotiate_interface || strlen(layer_prop->functions.str_negotiate_interface) == 0) { |
4674 | 0 | negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address( |
4675 | 0 | lib_handle, "vkNegotiateLoaderLayerInterfaceVersion"); |
4676 | 0 | } else { |
4677 | 0 | negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address( |
4678 | 0 | lib_handle, layer_prop->functions.str_negotiate_interface); |
4679 | 0 | } |
4680 | | |
4681 | | // If we can negotiate an interface version, then we can also |
4682 | | // get everything we need from the one function call, so try |
4683 | | // that first, and see if we can get all the function pointers |
4684 | | // necessary from that one call. |
4685 | 0 | if (NULL != negotiate_interface) { |
4686 | 0 | layer_prop->functions.negotiate_layer_interface = negotiate_interface; |
4687 | |
|
4688 | 0 | VkNegotiateLayerInterface interface_struct; |
4689 | |
|
4690 | 0 | if (loader_get_layer_interface_version(negotiate_interface, &interface_struct)) { |
4691 | | // Go ahead and set the properties version to the |
4692 | | // correct value. |
4693 | 0 | layer_prop->interface_version = interface_struct.loaderLayerInterfaceVersion; |
4694 | | |
4695 | | // If the interface is 2 or newer, we have access to the |
4696 | | // new GetPhysicalDeviceProcAddr function, so grab it, |
4697 | | // and the other necessary functions, from the |
4698 | | // structure. |
4699 | 0 | if (interface_struct.loaderLayerInterfaceVersion > 1) { |
4700 | 0 | cur_gipa = interface_struct.pfnGetInstanceProcAddr; |
4701 | 0 | cur_gdpa = interface_struct.pfnGetDeviceProcAddr; |
4702 | 0 | cur_gpdpa = interface_struct.pfnGetPhysicalDeviceProcAddr; |
4703 | 0 | if (cur_gipa != NULL) { |
4704 | | // We've set the functions, so make sure we |
4705 | | // don't do the unnecessary calls later. |
4706 | 0 | functions_in_interface = true; |
4707 | 0 | } |
4708 | 0 | } |
4709 | 0 | } |
4710 | 0 | } |
4711 | |
|
4712 | 0 | if (!functions_in_interface) { |
4713 | 0 | if ((cur_gipa = layer_prop->functions.get_instance_proc_addr) == NULL) { |
4714 | 0 | if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) { |
4715 | 0 | cur_gipa = |
4716 | 0 | (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr"); |
4717 | 0 | layer_prop->functions.get_instance_proc_addr = cur_gipa; |
4718 | |
|
4719 | 0 | if (NULL == cur_gipa) { |
4720 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
4721 | 0 | "loader_create_instance_chain: Failed to find \'vkGetInstanceProcAddr\' in layer \"%s\"", |
4722 | 0 | layer_prop->lib_name); |
4723 | 0 | continue; |
4724 | 0 | } |
4725 | 0 | } else { |
4726 | 0 | cur_gipa = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, |
4727 | 0 | layer_prop->functions.str_gipa); |
4728 | |
|
4729 | 0 | if (NULL == cur_gipa) { |
4730 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
4731 | 0 | "loader_create_instance_chain: Failed to find \'%s\' in layer \"%s\"", |
4732 | 0 | layer_prop->functions.str_gipa, layer_prop->lib_name); |
4733 | 0 | continue; |
4734 | 0 | } |
4735 | 0 | } |
4736 | 0 | } |
4737 | 0 | } |
4738 | 0 | } |
4739 | | |
4740 | 0 | layer_instance_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo; |
4741 | 0 | layer_instance_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = next_gipa; |
4742 | 0 | layer_instance_link_info[num_activated_layers].pfnNextGetPhysicalDeviceProcAddr = next_gpdpa; |
4743 | 0 | next_gipa = cur_gipa; |
4744 | 0 | if (layer_prop->interface_version > 1 && cur_gpdpa != NULL) { |
4745 | 0 | layer_prop->functions.get_physical_device_proc_addr = cur_gpdpa; |
4746 | 0 | next_gpdpa = cur_gpdpa; |
4747 | 0 | } |
4748 | 0 | if (layer_prop->interface_version > 1 && cur_gipa != NULL) { |
4749 | 0 | layer_prop->functions.get_instance_proc_addr = cur_gipa; |
4750 | 0 | } |
4751 | 0 | if (layer_prop->interface_version > 1 && cur_gdpa != NULL) { |
4752 | 0 | layer_prop->functions.get_device_proc_addr = cur_gdpa; |
4753 | 0 | } |
4754 | |
|
4755 | 0 | chain_info.u.pLayerInfo = &layer_instance_link_info[num_activated_layers]; |
4756 | |
|
4757 | 0 | activated_layers[num_activated_layers].name = layer_prop->info.layerName; |
4758 | 0 | activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name; |
4759 | 0 | activated_layers[num_activated_layers].library = layer_prop->lib_name; |
4760 | 0 | activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER); |
4761 | 0 | activated_layers[num_activated_layers].enabled_by_what = layer_prop->enabled_by_what; |
4762 | 0 | if (activated_layers[num_activated_layers].is_implicit) { |
4763 | 0 | activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name; |
4764 | 0 | activated_layers[num_activated_layers].enable_name_env = layer_prop->enable_env_var.name; |
4765 | 0 | activated_layers[num_activated_layers].enable_value_env = layer_prop->enable_env_var.value; |
4766 | 0 | } |
4767 | |
|
4768 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Insert instance layer \"%s\" (%s)", |
4769 | 0 | layer_prop->info.layerName, layer_prop->lib_name); |
4770 | |
|
4771 | 0 | num_activated_layers++; |
4772 | 0 | } |
4773 | 0 | } |
4774 | | |
4775 | | // Make sure each layer requested by the application was actually loaded |
4776 | 0 | for (uint32_t exp = 0; exp < inst->expanded_activated_layer_list.count; ++exp) { |
4777 | 0 | struct loader_layer_properties *exp_layer_prop = inst->expanded_activated_layer_list.list[exp]; |
4778 | 0 | bool found = false; |
4779 | 0 | for (uint32_t act = 0; act < num_activated_layers; ++act) { |
4780 | 0 | if (!strcmp(activated_layers[act].name, exp_layer_prop->info.layerName)) { |
4781 | 0 | found = true; |
4782 | 0 | break; |
4783 | 0 | } |
4784 | 0 | } |
4785 | | // If it wasn't found, we want to at least log an error. However, if it was enabled by the application directly, |
4786 | | // we want to return a bad layer error. |
4787 | 0 | if (!found) { |
4788 | 0 | bool app_requested = false; |
4789 | 0 | for (uint32_t act = 0; act < pCreateInfo->enabledLayerCount; ++act) { |
4790 | 0 | if (!strcmp(pCreateInfo->ppEnabledLayerNames[act], exp_layer_prop->info.layerName)) { |
4791 | 0 | app_requested = true; |
4792 | 0 | break; |
4793 | 0 | } |
4794 | 0 | } |
4795 | 0 | VkFlags log_flag = VULKAN_LOADER_LAYER_BIT; |
4796 | 0 | char ending = '.'; |
4797 | 0 | if (app_requested) { |
4798 | 0 | log_flag |= VULKAN_LOADER_ERROR_BIT; |
4799 | 0 | ending = '!'; |
4800 | 0 | } else { |
4801 | 0 | log_flag |= VULKAN_LOADER_INFO_BIT; |
4802 | 0 | } |
4803 | 0 | switch (exp_layer_prop->lib_status) { |
4804 | 0 | case LOADER_LAYER_LIB_NOT_LOADED: |
4805 | 0 | loader_log(inst, log_flag, 0, "Requested layer \"%s\" was not loaded%c", exp_layer_prop->info.layerName, |
4806 | 0 | ending); |
4807 | 0 | break; |
4808 | 0 | case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: { |
4809 | 0 | loader_log(inst, log_flag, 0, "Requested layer \"%s\" was wrong bit-type%c", exp_layer_prop->info.layerName, |
4810 | 0 | ending); |
4811 | 0 | break; |
4812 | 0 | } |
4813 | 0 | case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD: |
4814 | 0 | loader_log(inst, log_flag, 0, "Requested layer \"%s\" failed to load%c", exp_layer_prop->info.layerName, |
4815 | 0 | ending); |
4816 | 0 | break; |
4817 | 0 | case LOADER_LAYER_LIB_SUCCESS_LOADED: |
4818 | 0 | case LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY: |
4819 | | // Shouldn't be able to reach this but if it is, best to report a debug |
4820 | 0 | loader_log(inst, log_flag, 0, |
4821 | 0 | "Shouldn't reach this. A valid version of requested layer %s was loaded but was not found in the " |
4822 | 0 | "list of activated layers%c", |
4823 | 0 | exp_layer_prop->info.layerName, ending); |
4824 | 0 | break; |
4825 | 0 | } |
4826 | 0 | if (app_requested) { |
4827 | 0 | return VK_ERROR_LAYER_NOT_PRESENT; |
4828 | 0 | } |
4829 | 0 | } |
4830 | 0 | } |
4831 | | |
4832 | 0 | VkLoaderFeatureFlags feature_flags = 0; |
4833 | | #if defined(_WIN32) |
4834 | | feature_flags = windows_initialize_dxgi(); |
4835 | | #endif |
4836 | | |
4837 | | // The following line of code is actually invalid at least according to the Vulkan spec with header update 1.2.193 and onwards. |
4838 | | // The update required calls to vkGetInstanceProcAddr querying "global" functions (which includes vkCreateInstance) to pass NULL |
4839 | | // for the instance parameter. Because it wasn't required to be NULL before, there may be layers which expect the loader's |
4840 | | // behavior of passing a non-NULL value into vkGetInstanceProcAddr. |
4841 | | // In an abundance of caution, the incorrect code remains as is, with a big comment to indicate that its wrong |
4842 | 0 | PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)next_gipa(*created_instance, "vkCreateInstance"); |
4843 | 0 | if (fpCreateInstance) { |
4844 | 0 | VkLayerInstanceCreateInfo instance_dispatch; |
4845 | 0 | instance_dispatch.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; |
4846 | 0 | instance_dispatch.pNext = loader_create_info.pNext; |
4847 | 0 | instance_dispatch.function = VK_LOADER_DATA_CALLBACK; |
4848 | 0 | instance_dispatch.u.pfnSetInstanceLoaderData = vkSetInstanceDispatch; |
4849 | |
|
4850 | 0 | VkLayerInstanceCreateInfo device_callback; |
4851 | 0 | device_callback.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; |
4852 | 0 | device_callback.pNext = &instance_dispatch; |
4853 | 0 | device_callback.function = VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK; |
4854 | 0 | device_callback.u.layerDevice.pfnLayerCreateDevice = loader_layer_create_device; |
4855 | 0 | device_callback.u.layerDevice.pfnLayerDestroyDevice = loader_layer_destroy_device; |
4856 | |
|
4857 | 0 | VkLayerInstanceCreateInfo loader_features; |
4858 | 0 | loader_features.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; |
4859 | 0 | loader_features.pNext = &device_callback; |
4860 | 0 | loader_features.function = VK_LOADER_FEATURES; |
4861 | 0 | loader_features.u.loaderFeatures = feature_flags; |
4862 | |
|
4863 | 0 | loader_create_info.pNext = &loader_features; |
4864 | | |
4865 | | // If layer debugging is enabled, let's print out the full callstack with layers in their |
4866 | | // defined order. |
4867 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "vkCreateInstance layer callstack setup to:"); |
4868 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " <Application>"); |
4869 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||"); |
4870 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " <Loader>"); |
4871 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||"); |
4872 | 0 | for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) { |
4873 | 0 | uint32_t index = num_activated_layers - cur_layer - 1; |
4874 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " %s", activated_layers[index].name); |
4875 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Type: %s", |
4876 | 0 | activated_layers[index].is_implicit ? "Implicit" : "Explicit"); |
4877 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Enabled By: %s", |
4878 | 0 | get_enabled_by_what_str(activated_layers[index].enabled_by_what)); |
4879 | 0 | if (activated_layers[index].is_implicit) { |
4880 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Disable Env Var: %s", |
4881 | 0 | activated_layers[index].disable_env); |
4882 | 0 | if (activated_layers[index].enable_name_env) { |
4883 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, |
4884 | 0 | " This layer was enabled because Env Var %s was set to Value %s", |
4885 | 0 | activated_layers[index].enable_name_env, activated_layers[index].enable_value_env); |
4886 | 0 | } |
4887 | 0 | } |
4888 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Manifest: %s", activated_layers[index].manifest); |
4889 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Library: %s", activated_layers[index].library); |
4890 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||"); |
4891 | 0 | } |
4892 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " <Drivers>"); |
4893 | |
|
4894 | 0 | res = fpCreateInstance(&loader_create_info, pAllocator, created_instance); |
4895 | 0 | } else { |
4896 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_instance_chain: Failed to find \'vkCreateInstance\'"); |
4897 | | // Couldn't find CreateInstance function! |
4898 | 0 | res = VK_ERROR_INITIALIZATION_FAILED; |
4899 | 0 | } |
4900 | |
|
4901 | 0 | if (res == VK_SUCCESS) { |
4902 | | // Copy the current disp table into the terminator_dispatch table so we can use it in loader_gpa_instance_terminator() |
4903 | 0 | memcpy(&inst->terminator_dispatch, &inst->disp->layer_inst_disp, sizeof(VkLayerInstanceDispatchTable)); |
4904 | |
|
4905 | 0 | loader_init_instance_core_dispatch_table(&inst->disp->layer_inst_disp, next_gipa, *created_instance); |
4906 | 0 | inst->instance = *created_instance; |
4907 | |
|
4908 | 0 | if (pCreateInfo->enabledLayerCount > 0 && pCreateInfo->ppEnabledLayerNames != NULL) { |
4909 | 0 | res = create_string_list(inst, pCreateInfo->enabledLayerCount, &inst->enabled_layer_names); |
4910 | 0 | if (res != VK_SUCCESS) { |
4911 | 0 | return res; |
4912 | 0 | } |
4913 | | |
4914 | 0 | for (uint32_t i = 0; i < pCreateInfo->enabledLayerCount; ++i) { |
4915 | 0 | res = copy_str_to_string_list(inst, &inst->enabled_layer_names, pCreateInfo->ppEnabledLayerNames[i], |
4916 | 0 | strlen(pCreateInfo->ppEnabledLayerNames[i])); |
4917 | 0 | if (res != VK_SUCCESS) return res; |
4918 | 0 | } |
4919 | 0 | } |
4920 | 0 | } |
4921 | | |
4922 | 0 | return res; |
4923 | 0 | } |
4924 | | |
4925 | 0 | void loader_activate_instance_layer_extensions(struct loader_instance *inst, VkInstance created_inst) { |
4926 | 0 | loader_init_instance_extension_dispatch_table(&inst->disp->layer_inst_disp, inst->disp->layer_inst_disp.GetInstanceProcAddr, |
4927 | 0 | created_inst); |
4928 | 0 | } |
4929 | | |
4930 | | #if defined(__APPLE__) |
4931 | | VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo, |
4932 | | const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst, |
4933 | | struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer, |
4934 | | PFN_vkGetDeviceProcAddr *layerNextGDPA) __attribute__((optnone)) { |
4935 | | #else |
4936 | | VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo, |
4937 | | const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst, |
4938 | | struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer, |
4939 | 0 | PFN_vkGetDeviceProcAddr *layerNextGDPA) { |
4940 | 0 | #endif |
4941 | 0 | uint32_t num_activated_layers = 0; |
4942 | 0 | struct activated_layer_info *activated_layers = NULL; |
4943 | 0 | VkLayerDeviceLink *layer_device_link_info; |
4944 | 0 | VkLayerDeviceCreateInfo chain_info; |
4945 | 0 | VkDeviceCreateInfo loader_create_info; |
4946 | 0 | VkDeviceGroupDeviceCreateInfo *original_device_group_create_info_struct = NULL; |
4947 | 0 | VkResult res; |
4948 | |
|
4949 | 0 | PFN_vkGetDeviceProcAddr fpGDPA = NULL, nextGDPA = loader_gpa_device_terminator; |
4950 | 0 | PFN_vkGetInstanceProcAddr fpGIPA = NULL, nextGIPA = loader_gpa_instance_terminator; |
4951 | |
|
4952 | 0 | memcpy(&loader_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo)); |
4953 | |
|
4954 | 0 | if (loader_create_info.enabledLayerCount > 0 && loader_create_info.ppEnabledLayerNames != NULL) { |
4955 | 0 | bool invalid_device_layer_usage = false; |
4956 | |
|
4957 | 0 | if (loader_create_info.enabledLayerCount != inst->enabled_layer_names.count && loader_create_info.enabledLayerCount > 0) { |
4958 | 0 | invalid_device_layer_usage = true; |
4959 | 0 | } else if (loader_create_info.enabledLayerCount > 0 && loader_create_info.ppEnabledLayerNames == NULL) { |
4960 | 0 | invalid_device_layer_usage = true; |
4961 | 0 | } else if (loader_create_info.enabledLayerCount == 0 && loader_create_info.ppEnabledLayerNames != NULL) { |
4962 | 0 | invalid_device_layer_usage = true; |
4963 | 0 | } else if (inst->enabled_layer_names.list != NULL) { |
4964 | 0 | for (uint32_t i = 0; i < loader_create_info.enabledLayerCount; i++) { |
4965 | 0 | const char *device_layer_names = loader_create_info.ppEnabledLayerNames[i]; |
4966 | |
|
4967 | 0 | if (strcmp(device_layer_names, inst->enabled_layer_names.list[i]) != 0) { |
4968 | 0 | invalid_device_layer_usage = true; |
4969 | 0 | break; |
4970 | 0 | } |
4971 | 0 | } |
4972 | 0 | } |
4973 | |
|
4974 | 0 | if (invalid_device_layer_usage) { |
4975 | 0 | loader_log( |
4976 | 0 | inst, VULKAN_LOADER_WARN_BIT, 0, |
4977 | 0 | "loader_create_device_chain: Using deprecated and ignored 'ppEnabledLayerNames' member of 'VkDeviceCreateInfo' " |
4978 | 0 | "when creating a Vulkan device."); |
4979 | 0 | } |
4980 | 0 | } |
4981 | | |
4982 | | // Before we continue, we need to find out if the KHR_device_group extension is in the enabled list. If it is, we then |
4983 | | // need to look for the corresponding VkDeviceGroupDeviceCreateInfo struct in the device list. This is because we |
4984 | | // need to replace all the incoming physical device values (which are really loader trampoline physical device values) |
4985 | | // with the layer/ICD version. |
4986 | 0 | { |
4987 | 0 | VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext; |
4988 | 0 | VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info; |
4989 | 0 | while (NULL != pNext) { |
4990 | 0 | if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) { |
4991 | 0 | VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext; |
4992 | 0 | if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) { |
4993 | 0 | VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo)); |
4994 | 0 | VkPhysicalDevice *phys_dev_array = NULL; |
4995 | 0 | if (NULL == temp_struct) { |
4996 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
4997 | 0 | } |
4998 | 0 | memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo)); |
4999 | 0 | phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount); |
5000 | 0 | if (NULL == phys_dev_array) { |
5001 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
5002 | 0 | } |
5003 | | |
5004 | | // Before calling down, replace the incoming physical device values (which are really loader trampoline |
5005 | | // physical devices) with the next layer (or possibly even the terminator) physical device values. |
5006 | 0 | struct loader_physical_device_tramp *cur_tramp; |
5007 | 0 | for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) { |
5008 | 0 | cur_tramp = (struct loader_physical_device_tramp *)cur_struct->pPhysicalDevices[phys_dev]; |
5009 | 0 | phys_dev_array[phys_dev] = cur_tramp->phys_dev; |
5010 | 0 | } |
5011 | 0 | temp_struct->pPhysicalDevices = phys_dev_array; |
5012 | |
|
5013 | 0 | original_device_group_create_info_struct = (VkDeviceGroupDeviceCreateInfo *)pPrev->pNext; |
5014 | | |
5015 | | // Replace the old struct in the pNext chain with this one. |
5016 | 0 | pPrev->pNext = (VkBaseOutStructure *)temp_struct; |
5017 | 0 | } |
5018 | 0 | break; |
5019 | 0 | } |
5020 | | |
5021 | 0 | pPrev = pNext; |
5022 | 0 | pNext = pNext->pNext; |
5023 | 0 | } |
5024 | 0 | } |
5025 | 0 | if (inst->expanded_activated_layer_list.count > 0) { |
5026 | 0 | layer_device_link_info = loader_stack_alloc(sizeof(VkLayerDeviceLink) * inst->expanded_activated_layer_list.count); |
5027 | 0 | if (!layer_device_link_info) { |
5028 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5029 | 0 | "loader_create_device_chain: Failed to alloc Device objects for layer. Skipping Layer."); |
5030 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
5031 | 0 | } |
5032 | | |
5033 | 0 | activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count); |
5034 | 0 | if (!activated_layers) { |
5035 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5036 | 0 | "loader_create_device_chain: Failed to alloc activated layer storage array"); |
5037 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
5038 | 0 | } |
5039 | | |
5040 | 0 | chain_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO; |
5041 | 0 | chain_info.function = VK_LAYER_LINK_INFO; |
5042 | 0 | chain_info.u.pLayerInfo = NULL; |
5043 | 0 | chain_info.pNext = loader_create_info.pNext; |
5044 | 0 | loader_create_info.pNext = &chain_info; |
5045 | | |
5046 | | // Create instance chain of enabled layers |
5047 | 0 | for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) { |
5048 | 0 | struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i]; |
5049 | 0 | loader_platform_dl_handle lib_handle = layer_prop->lib_handle; |
5050 | | |
5051 | | // Skip it if a Layer with the same name has been already successfully activated |
5052 | 0 | if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) { |
5053 | 0 | continue; |
5054 | 0 | } |
5055 | | |
5056 | | // Skip the layer if the handle is NULL - this is likely because the library failed to load but wasn't removed from |
5057 | | // the list. |
5058 | 0 | if (!lib_handle) { |
5059 | 0 | continue; |
5060 | 0 | } |
5061 | | |
5062 | | // The Get*ProcAddr pointers will already be filled in if they were received from either the json file or the |
5063 | | // version negotiation |
5064 | 0 | if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) == NULL) { |
5065 | 0 | if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) { |
5066 | 0 | fpGIPA = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr"); |
5067 | 0 | layer_prop->functions.get_instance_proc_addr = fpGIPA; |
5068 | 0 | } else |
5069 | 0 | fpGIPA = |
5070 | 0 | (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa); |
5071 | 0 | if (!fpGIPA) { |
5072 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
5073 | 0 | "loader_create_device_chain: Failed to find \'vkGetInstanceProcAddr\' in layer \"%s\". " |
5074 | 0 | "Skipping layer.", |
5075 | 0 | layer_prop->lib_name); |
5076 | 0 | continue; |
5077 | 0 | } |
5078 | 0 | } |
5079 | | |
5080 | 0 | if (fpGIPA == callingLayer) { |
5081 | 0 | if (layerNextGDPA != NULL) { |
5082 | 0 | *layerNextGDPA = nextGDPA; |
5083 | 0 | } |
5084 | | // Break here because if fpGIPA is the same as callingLayer, that means a layer is trying to create a device, |
5085 | | // and once we don't want to continue any further as the next layer will be the calling layer |
5086 | 0 | break; |
5087 | 0 | } |
5088 | | |
5089 | 0 | if ((fpGDPA = layer_prop->functions.get_device_proc_addr) == NULL) { |
5090 | 0 | if (layer_prop->functions.str_gdpa == NULL || strlen(layer_prop->functions.str_gdpa) == 0) { |
5091 | 0 | fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr"); |
5092 | 0 | layer_prop->functions.get_device_proc_addr = fpGDPA; |
5093 | 0 | } else |
5094 | 0 | fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa); |
5095 | 0 | if (!fpGDPA) { |
5096 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
5097 | 0 | "Failed to find vkGetDeviceProcAddr in layer \"%s\"", layer_prop->lib_name); |
5098 | 0 | continue; |
5099 | 0 | } |
5100 | 0 | } |
5101 | | |
5102 | 0 | layer_device_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo; |
5103 | 0 | layer_device_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = nextGIPA; |
5104 | 0 | layer_device_link_info[num_activated_layers].pfnNextGetDeviceProcAddr = nextGDPA; |
5105 | 0 | chain_info.u.pLayerInfo = &layer_device_link_info[num_activated_layers]; |
5106 | 0 | nextGIPA = fpGIPA; |
5107 | 0 | nextGDPA = fpGDPA; |
5108 | |
|
5109 | 0 | activated_layers[num_activated_layers].name = layer_prop->info.layerName; |
5110 | 0 | activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name; |
5111 | 0 | activated_layers[num_activated_layers].library = layer_prop->lib_name; |
5112 | 0 | activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER); |
5113 | 0 | activated_layers[num_activated_layers].enabled_by_what = layer_prop->enabled_by_what; |
5114 | 0 | if (activated_layers[num_activated_layers].is_implicit) { |
5115 | 0 | activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name; |
5116 | 0 | } |
5117 | |
|
5118 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Inserted device layer \"%s\" (%s)", |
5119 | 0 | layer_prop->info.layerName, layer_prop->lib_name); |
5120 | |
|
5121 | 0 | num_activated_layers++; |
5122 | 0 | } |
5123 | 0 | } |
5124 | | |
5125 | 0 | VkDevice created_device = (VkDevice)dev; |
5126 | 0 | PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)nextGIPA(inst->instance, "vkCreateDevice"); |
5127 | 0 | if (fpCreateDevice) { |
5128 | 0 | VkLayerDeviceCreateInfo create_info_disp; |
5129 | |
|
5130 | 0 | create_info_disp.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO; |
5131 | 0 | create_info_disp.function = VK_LOADER_DATA_CALLBACK; |
5132 | |
|
5133 | 0 | create_info_disp.u.pfnSetDeviceLoaderData = vkSetDeviceDispatch; |
5134 | | |
5135 | | // If layer debugging is enabled, let's print out the full callstack with layers in their |
5136 | | // defined order. |
5137 | 0 | uint32_t layer_driver_bits = VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT; |
5138 | 0 | loader_log(inst, layer_driver_bits, 0, "vkCreateDevice layer callstack setup to:"); |
5139 | 0 | loader_log(inst, layer_driver_bits, 0, " <Application>"); |
5140 | 0 | loader_log(inst, layer_driver_bits, 0, " ||"); |
5141 | 0 | loader_log(inst, layer_driver_bits, 0, " <Loader>"); |
5142 | 0 | loader_log(inst, layer_driver_bits, 0, " ||"); |
5143 | 0 | for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) { |
5144 | 0 | uint32_t index = num_activated_layers - cur_layer - 1; |
5145 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " %s", activated_layers[index].name); |
5146 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Type: %s", |
5147 | 0 | activated_layers[index].is_implicit ? "Implicit" : "Explicit"); |
5148 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Enabled By: %s", |
5149 | 0 | get_enabled_by_what_str(activated_layers[index].enabled_by_what)); |
5150 | 0 | if (activated_layers[index].is_implicit) { |
5151 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Disable Env Var: %s", |
5152 | 0 | activated_layers[index].disable_env); |
5153 | 0 | } |
5154 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Manifest: %s", activated_layers[index].manifest); |
5155 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Library: %s", activated_layers[index].library); |
5156 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||"); |
5157 | 0 | } |
5158 | 0 | loader_log(inst, layer_driver_bits, 0, " <Device>"); |
5159 | 0 | create_info_disp.pNext = loader_create_info.pNext; |
5160 | 0 | loader_create_info.pNext = &create_info_disp; |
5161 | 0 | res = fpCreateDevice(pd, &loader_create_info, pAllocator, &created_device); |
5162 | 0 | if (res != VK_SUCCESS) { |
5163 | 0 | return res; |
5164 | 0 | } |
5165 | 0 | dev->chain_device = created_device; |
5166 | | |
5167 | | // Because we changed the pNext chain to use our own VkDeviceGroupDeviceCreateInfo, we need to fixup the chain to |
5168 | | // point back at the original VkDeviceGroupDeviceCreateInfo. |
5169 | 0 | VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext; |
5170 | 0 | VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info; |
5171 | 0 | while (NULL != pNext) { |
5172 | 0 | if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) { |
5173 | 0 | VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext; |
5174 | 0 | if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) { |
5175 | 0 | pPrev->pNext = (VkBaseOutStructure *)original_device_group_create_info_struct; |
5176 | 0 | } |
5177 | 0 | break; |
5178 | 0 | } |
5179 | | |
5180 | 0 | pPrev = pNext; |
5181 | 0 | pNext = pNext->pNext; |
5182 | 0 | } |
5183 | |
|
5184 | 0 | } else { |
5185 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5186 | 0 | "loader_create_device_chain: Failed to find \'vkCreateDevice\' in layers or ICD"); |
5187 | | // Couldn't find CreateDevice function! |
5188 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
5189 | 0 | } |
5190 | | |
5191 | | // Initialize device dispatch table |
5192 | 0 | loader_init_device_dispatch_table(&dev->loader_dispatch, nextGDPA, dev->chain_device); |
5193 | | // Initialize the dispatch table to functions which need terminators |
5194 | | // These functions point directly to the driver, not the terminator functions |
5195 | 0 | init_extension_device_proc_terminator_dispatch(dev); |
5196 | |
|
5197 | 0 | return res; |
5198 | 0 | } |
5199 | | |
5200 | | VkResult loader_validate_layers(const struct loader_instance *inst, const uint32_t layer_count, |
5201 | 7.47k | const char *const *ppEnabledLayerNames, const struct loader_layer_list *list) { |
5202 | 7.47k | struct loader_layer_properties *prop; |
5203 | | |
5204 | 7.47k | if (layer_count > 0 && ppEnabledLayerNames == NULL) { |
5205 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5206 | 0 | "loader_validate_layers: ppEnabledLayerNames is NULL but enabledLayerCount is greater than zero"); |
5207 | 0 | return VK_ERROR_LAYER_NOT_PRESENT; |
5208 | 0 | } |
5209 | | |
5210 | 8.45k | for (uint32_t i = 0; i < layer_count; i++) { |
5211 | 7.47k | VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, ppEnabledLayerNames[i]); |
5212 | 7.47k | if (result != VK_STRING_ERROR_NONE) { |
5213 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5214 | 0 | "loader_validate_layers: ppEnabledLayerNames contains string that is too long or is badly formed"); |
5215 | 0 | return VK_ERROR_LAYER_NOT_PRESENT; |
5216 | 0 | } |
5217 | | |
5218 | 7.47k | prop = loader_find_layer_property(ppEnabledLayerNames[i], list); |
5219 | 7.47k | if (NULL == prop) { |
5220 | 6.49k | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5221 | 6.49k | "loader_validate_layers: Layer %d does not exist in the list of available layers", i); |
5222 | 6.49k | return VK_ERROR_LAYER_NOT_PRESENT; |
5223 | 6.49k | } |
5224 | 981 | if (inst->settings.settings_active && prop->settings_control_value != LOADER_SETTINGS_LAYER_CONTROL_ON && |
5225 | 981 | prop->settings_control_value != LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) { |
5226 | 1 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5227 | 1 | "loader_validate_layers: Layer %d was explicitly prevented from being enabled by the loader settings file", |
5228 | 1 | i); |
5229 | 1 | return VK_ERROR_LAYER_NOT_PRESENT; |
5230 | 1 | } |
5231 | 981 | } |
5232 | 980 | return VK_SUCCESS; |
5233 | 7.47k | } |
5234 | | |
5235 | | VkResult loader_validate_instance_extensions(struct loader_instance *inst, const struct loader_extension_list *icd_exts, |
5236 | | const struct loader_layer_list *instance_layers, |
5237 | | const struct loader_envvar_all_filters *layer_filters, |
5238 | 0 | const VkInstanceCreateInfo *pCreateInfo) { |
5239 | 0 | VkExtensionProperties *extension_prop; |
5240 | 0 | char *env_value; |
5241 | 0 | bool check_if_known = true; |
5242 | 0 | VkResult res = VK_SUCCESS; |
5243 | |
|
5244 | 0 | struct loader_pointer_layer_list active_layers = {0}; |
5245 | 0 | struct loader_pointer_layer_list expanded_layers = {0}; |
5246 | |
|
5247 | 0 | if (pCreateInfo->enabledExtensionCount > 0 && pCreateInfo->ppEnabledExtensionNames == NULL) { |
5248 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5249 | 0 | "loader_validate_instance_extensions: Instance ppEnabledExtensionNames is NULL but enabledExtensionCount is " |
5250 | 0 | "greater than zero"); |
5251 | 0 | return VK_ERROR_EXTENSION_NOT_PRESENT; |
5252 | 0 | } |
5253 | 0 | if (!loader_init_pointer_layer_list(inst, &active_layers)) { |
5254 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
5255 | 0 | goto out; |
5256 | 0 | } |
5257 | 0 | if (!loader_init_pointer_layer_list(inst, &expanded_layers)) { |
5258 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
5259 | 0 | goto out; |
5260 | 0 | } |
5261 | | |
5262 | 0 | if (inst->settings.settings_active) { |
5263 | 0 | res = enable_correct_layers_from_settings(inst, layer_filters, pCreateInfo->enabledLayerCount, |
5264 | 0 | pCreateInfo->ppEnabledLayerNames, instance_layers, &active_layers, |
5265 | 0 | &expanded_layers); |
5266 | 0 | if (res != VK_SUCCESS) { |
5267 | 0 | goto out; |
5268 | 0 | } |
5269 | 0 | } else { |
5270 | | // Build the lists of active layers (including meta layers) and expanded layers (with meta layers resolved to their |
5271 | | // components) |
5272 | 0 | res = loader_add_implicit_layers(inst, layer_filters, &active_layers, &expanded_layers, instance_layers); |
5273 | 0 | if (res != VK_SUCCESS) { |
5274 | 0 | goto out; |
5275 | 0 | } |
5276 | 0 | res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, layer_filters, &active_layers, |
5277 | 0 | &expanded_layers, instance_layers); |
5278 | 0 | if (res != VK_SUCCESS) { |
5279 | 0 | goto out; |
5280 | 0 | } |
5281 | 0 | res = loader_add_layer_names_to_list(inst, layer_filters, &active_layers, &expanded_layers, pCreateInfo->enabledLayerCount, |
5282 | 0 | pCreateInfo->ppEnabledLayerNames, instance_layers); |
5283 | 0 | if (VK_SUCCESS != res) { |
5284 | 0 | goto out; |
5285 | 0 | } |
5286 | 0 | } |
5287 | 0 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
5288 | 0 | VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]); |
5289 | 0 | if (result != VK_STRING_ERROR_NONE) { |
5290 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5291 | 0 | "loader_validate_instance_extensions: Instance ppEnabledExtensionNames contains " |
5292 | 0 | "string that is too long or is badly formed"); |
5293 | 0 | res = VK_ERROR_EXTENSION_NOT_PRESENT; |
5294 | 0 | goto out; |
5295 | 0 | } |
5296 | | |
5297 | | // Check if a user wants to disable the instance extension filtering behavior |
5298 | 0 | env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst); |
5299 | 0 | if (NULL != env_value && atoi(env_value) != 0) { |
5300 | 0 | check_if_known = false; |
5301 | 0 | } |
5302 | 0 | loader_free_getenv(env_value, inst); |
5303 | |
|
5304 | 0 | if (check_if_known) { |
5305 | | // See if the extension is in the list of supported extensions |
5306 | 0 | bool found = false; |
5307 | 0 | for (uint32_t j = 0; LOADER_INSTANCE_EXTENSIONS[j] != NULL; j++) { |
5308 | 0 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], LOADER_INSTANCE_EXTENSIONS[j]) == 0) { |
5309 | 0 | found = true; |
5310 | 0 | break; |
5311 | 0 | } |
5312 | 0 | } |
5313 | | |
5314 | | // If it isn't in the list, return an error |
5315 | 0 | if (!found) { |
5316 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5317 | 0 | "loader_validate_instance_extensions: Extension %s not found in list of known instance extensions.", |
5318 | 0 | pCreateInfo->ppEnabledExtensionNames[i]); |
5319 | 0 | res = VK_ERROR_EXTENSION_NOT_PRESENT; |
5320 | 0 | goto out; |
5321 | 0 | } |
5322 | 0 | } |
5323 | | |
5324 | 0 | extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], icd_exts); |
5325 | |
|
5326 | 0 | if (extension_prop) { |
5327 | 0 | continue; |
5328 | 0 | } |
5329 | | |
5330 | 0 | extension_prop = NULL; |
5331 | | |
5332 | | // Not in global list, search layer extension lists |
5333 | 0 | for (uint32_t j = 0; NULL == extension_prop && j < expanded_layers.count; ++j) { |
5334 | 0 | extension_prop = |
5335 | 0 | get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], &expanded_layers.list[j]->instance_extension_list); |
5336 | 0 | if (extension_prop) { |
5337 | | // Found the extension in one of the layers enabled by the app. |
5338 | 0 | break; |
5339 | 0 | } |
5340 | | |
5341 | 0 | struct loader_layer_properties *layer_prop = |
5342 | 0 | loader_find_layer_property(expanded_layers.list[j]->info.layerName, instance_layers); |
5343 | 0 | if (NULL == layer_prop) { |
5344 | | // Should NOT get here, loader_validate_layers should have already filtered this case out. |
5345 | 0 | continue; |
5346 | 0 | } |
5347 | 0 | } |
5348 | |
|
5349 | 0 | if (!extension_prop) { |
5350 | | // Didn't find extension name in any of the global layers, error out |
5351 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5352 | 0 | "loader_validate_instance_extensions: Instance extension %s not supported by available ICDs or enabled " |
5353 | 0 | "layers.", |
5354 | 0 | pCreateInfo->ppEnabledExtensionNames[i]); |
5355 | 0 | res = VK_ERROR_EXTENSION_NOT_PRESENT; |
5356 | 0 | goto out; |
5357 | 0 | } |
5358 | 0 | } |
5359 | | |
5360 | 0 | out: |
5361 | 0 | loader_destroy_pointer_layer_list(inst, &active_layers); |
5362 | 0 | loader_destroy_pointer_layer_list(inst, &expanded_layers); |
5363 | 0 | return res; |
5364 | 0 | } |
5365 | | |
5366 | | VkResult loader_validate_device_extensions(struct loader_instance *this_instance, |
5367 | | const struct loader_pointer_layer_list *activated_device_layers, |
5368 | 0 | const struct loader_extension_list *icd_exts, const VkDeviceCreateInfo *pCreateInfo) { |
5369 | | // Early out to prevent nullptr dereference |
5370 | 0 | if (pCreateInfo->enabledExtensionCount == 0 || pCreateInfo->ppEnabledExtensionNames == NULL) { |
5371 | 0 | return VK_SUCCESS; |
5372 | 0 | } |
5373 | 0 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
5374 | 0 | if (pCreateInfo->ppEnabledExtensionNames[i] == NULL) { |
5375 | 0 | continue; |
5376 | 0 | } |
5377 | 0 | VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]); |
5378 | 0 | if (result != VK_STRING_ERROR_NONE) { |
5379 | 0 | loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0, |
5380 | 0 | "loader_validate_device_extensions: Device ppEnabledExtensionNames contains " |
5381 | 0 | "string that is too long or is badly formed"); |
5382 | 0 | return VK_ERROR_EXTENSION_NOT_PRESENT; |
5383 | 0 | } |
5384 | | |
5385 | 0 | const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; |
5386 | 0 | VkExtensionProperties *extension_prop = get_extension_property(extension_name, icd_exts); |
5387 | |
|
5388 | 0 | if (extension_prop) { |
5389 | 0 | continue; |
5390 | 0 | } |
5391 | | |
5392 | | // Not in global list, search activated layer extension lists |
5393 | 0 | for (uint32_t j = 0; j < activated_device_layers->count; j++) { |
5394 | 0 | struct loader_layer_properties *layer_prop = activated_device_layers->list[j]; |
5395 | |
|
5396 | 0 | extension_prop = get_dev_extension_property(extension_name, &layer_prop->device_extension_list); |
5397 | 0 | if (extension_prop) { |
5398 | | // Found the extension in one of the layers enabled by the app. |
5399 | 0 | break; |
5400 | 0 | } |
5401 | 0 | } |
5402 | |
|
5403 | 0 | if (!extension_prop) { |
5404 | | // Didn't find extension name in any of the device layers, error out |
5405 | 0 | loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0, |
5406 | 0 | "loader_validate_device_extensions: Device extension %s not supported by selected physical device " |
5407 | 0 | "or enabled layers.", |
5408 | 0 | pCreateInfo->ppEnabledExtensionNames[i]); |
5409 | 0 | return VK_ERROR_EXTENSION_NOT_PRESENT; |
5410 | 0 | } |
5411 | 0 | } |
5412 | 0 | return VK_SUCCESS; |
5413 | 0 | } |
5414 | | |
5415 | | // Terminator functions for the Instance chain |
5416 | | // All named terminator_<Vulkan API name> |
5417 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, |
5418 | 0 | const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) { |
5419 | 0 | struct loader_icd_term *icd_term; |
5420 | 0 | VkExtensionProperties *prop; |
5421 | 0 | char **filtered_extension_names = NULL; |
5422 | 0 | VkInstanceCreateInfo icd_create_info; |
5423 | 0 | VkResult res = VK_SUCCESS; |
5424 | 0 | bool one_icd_successful = false; |
5425 | |
|
5426 | 0 | struct loader_instance *ptr_instance = (struct loader_instance *)*pInstance; |
5427 | 0 | if (NULL == ptr_instance) { |
5428 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0, |
5429 | 0 | "terminator_CreateInstance: Loader instance pointer null encountered. Possibly set by active layer. (Policy " |
5430 | 0 | "#LLP_LAYER_21)"); |
5431 | 0 | } else if (LOADER_MAGIC_NUMBER != ptr_instance->magic) { |
5432 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0, |
5433 | 0 | "terminator_CreateInstance: Instance pointer (%p) has invalid MAGIC value 0x%08" PRIx64 |
5434 | 0 | ". Instance value possibly " |
5435 | 0 | "corrupted by active layer (Policy #LLP_LAYER_21). ", |
5436 | 0 | ptr_instance, ptr_instance->magic); |
5437 | 0 | } |
5438 | | |
5439 | | // Save the application version if it has been modified - layers sometimes needs features in newer API versions than |
5440 | | // what the application requested, and thus will increase the instance version to a level that suites their needs. |
5441 | 0 | if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion) { |
5442 | 0 | loader_api_version altered_version = loader_make_version(pCreateInfo->pApplicationInfo->apiVersion); |
5443 | 0 | if (altered_version.major != ptr_instance->app_api_version.major || |
5444 | 0 | altered_version.minor != ptr_instance->app_api_version.minor) { |
5445 | 0 | ptr_instance->app_api_version = altered_version; |
5446 | 0 | } |
5447 | 0 | } |
5448 | |
|
5449 | 0 | memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info)); |
5450 | |
|
5451 | 0 | icd_create_info.enabledLayerCount = 0; |
5452 | 0 | icd_create_info.ppEnabledLayerNames = NULL; |
5453 | | |
5454 | | // NOTE: Need to filter the extensions to only those supported by the ICD. |
5455 | | // No ICD will advertise support for layers. An ICD library could |
5456 | | // support a layer, but it would be independent of the actual ICD, |
5457 | | // just in the same library. |
5458 | 0 | uint32_t extension_count = pCreateInfo->enabledExtensionCount; |
5459 | 0 | #if defined(LOADER_ENABLE_LINUX_SORT) |
5460 | 0 | extension_count += 1; |
5461 | 0 | #endif // LOADER_ENABLE_LINUX_SORT |
5462 | 0 | filtered_extension_names = loader_stack_alloc(extension_count * sizeof(char *)); |
5463 | 0 | if (!filtered_extension_names) { |
5464 | 0 | loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0, |
5465 | 0 | "terminator_CreateInstance: Failed create extension name array for %d extensions", extension_count); |
5466 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
5467 | 0 | goto out; |
5468 | 0 | } |
5469 | 0 | icd_create_info.ppEnabledExtensionNames = (const char *const *)filtered_extension_names; |
5470 | | |
5471 | | // Determine if Get Physical Device Properties 2 is available to this Instance |
5472 | 0 | if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion >= VK_API_VERSION_1_1) { |
5473 | 0 | ptr_instance->supports_get_dev_prop_2 = true; |
5474 | 0 | } else { |
5475 | 0 | for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) { |
5476 | 0 | if (!strcmp(pCreateInfo->ppEnabledExtensionNames[j], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { |
5477 | 0 | ptr_instance->supports_get_dev_prop_2 = true; |
5478 | 0 | break; |
5479 | 0 | } |
5480 | 0 | } |
5481 | 0 | } |
5482 | |
|
5483 | 0 | for (uint32_t i = 0; i < ptr_instance->icd_tramp_list.count; i++) { |
5484 | 0 | icd_term = loader_icd_add(ptr_instance, &ptr_instance->icd_tramp_list.scanned_list[i]); |
5485 | 0 | if (NULL == icd_term) { |
5486 | 0 | loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0, |
5487 | 0 | "terminator_CreateInstance: Failed to add ICD %d to ICD trampoline list.", i); |
5488 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
5489 | 0 | goto out; |
5490 | 0 | } |
5491 | | |
5492 | | // If any error happens after here, we need to remove the ICD from the list, |
5493 | | // because we've already added it, but haven't validated it |
5494 | | |
5495 | | // Make sure that we reset the pApplicationInfo so we don't get an old pointer |
5496 | 0 | icd_create_info.pApplicationInfo = pCreateInfo->pApplicationInfo; |
5497 | 0 | icd_create_info.enabledExtensionCount = 0; |
5498 | 0 | struct loader_extension_list icd_exts = {0}; |
5499 | | |
5500 | | // traverse scanned icd list adding non-duplicate extensions to the list |
5501 | 0 | res = loader_init_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties)); |
5502 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
5503 | | // If out of memory, bail immediately. |
5504 | 0 | goto out; |
5505 | 0 | } else if (VK_SUCCESS != res) { |
5506 | | // Something bad happened with this ICD, so free it and try the |
5507 | | // next. |
5508 | 0 | ptr_instance->icd_terms = icd_term->next; |
5509 | 0 | icd_term->next = NULL; |
5510 | 0 | loader_icd_destroy(ptr_instance, icd_term, pAllocator); |
5511 | 0 | continue; |
5512 | 0 | } |
5513 | | |
5514 | 0 | res = loader_add_instance_extensions(ptr_instance, icd_term->scanned_icd->EnumerateInstanceExtensionProperties, |
5515 | 0 | icd_term->scanned_icd->lib_name, &icd_exts); |
5516 | 0 | if (VK_SUCCESS != res) { |
5517 | 0 | loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts); |
5518 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
5519 | | // If out of memory, bail immediately. |
5520 | 0 | goto out; |
5521 | 0 | } else { |
5522 | | // Something bad happened with this ICD, so free it and try the next. |
5523 | 0 | ptr_instance->icd_terms = icd_term->next; |
5524 | 0 | icd_term->next = NULL; |
5525 | 0 | loader_icd_destroy(ptr_instance, icd_term, pAllocator); |
5526 | 0 | continue; |
5527 | 0 | } |
5528 | 0 | } |
5529 | | |
5530 | 0 | for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) { |
5531 | 0 | prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[j], &icd_exts); |
5532 | 0 | if (prop) { |
5533 | 0 | filtered_extension_names[icd_create_info.enabledExtensionCount] = (char *)pCreateInfo->ppEnabledExtensionNames[j]; |
5534 | 0 | icd_create_info.enabledExtensionCount++; |
5535 | 0 | } |
5536 | 0 | } |
5537 | 0 | #if defined(LOADER_ENABLE_LINUX_SORT) |
5538 | | // Force on "VK_KHR_get_physical_device_properties2" for Linux as we use it for GPU sorting. This |
5539 | | // should be done if the API version of either the application or the driver does not natively support |
5540 | | // the core version of vkGetPhysicalDeviceProperties2 entrypoint. |
5541 | 0 | if ((ptr_instance->app_api_version.major == 1 && ptr_instance->app_api_version.minor == 0) || |
5542 | 0 | (VK_API_VERSION_MAJOR(icd_term->scanned_icd->api_version) == 1 && |
5543 | 0 | VK_API_VERSION_MINOR(icd_term->scanned_icd->api_version) == 0)) { |
5544 | 0 | prop = get_extension_property(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, &icd_exts); |
5545 | 0 | if (prop) { |
5546 | 0 | filtered_extension_names[icd_create_info.enabledExtensionCount] = |
5547 | 0 | (char *)VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; |
5548 | 0 | icd_create_info.enabledExtensionCount++; |
5549 | | |
5550 | | // At least one ICD supports this, so the instance should be able to support it |
5551 | 0 | ptr_instance->supports_get_dev_prop_2 = true; |
5552 | 0 | } |
5553 | 0 | } |
5554 | 0 | #endif // LOADER_ENABLE_LINUX_SORT |
5555 | | |
5556 | | // Determine if vkGetPhysicalDeviceProperties2 is available to this Instance |
5557 | | // Also determine if VK_EXT_surface_maintenance1 is available on the ICD |
5558 | 0 | if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) { |
5559 | 0 | icd_term->enabled_instance_extensions.khr_get_physical_device_properties2 = true; |
5560 | 0 | } |
5561 | 0 | fill_out_enabled_instance_extensions(icd_create_info.enabledExtensionCount, (const char *const *)filtered_extension_names, |
5562 | 0 | &icd_term->enabled_instance_extensions); |
5563 | |
|
5564 | 0 | loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts); |
5565 | | |
5566 | | // Get the driver version from vkEnumerateInstanceVersion |
5567 | 0 | uint32_t icd_version = VK_API_VERSION_1_0; |
5568 | 0 | VkResult icd_result = VK_SUCCESS; |
5569 | 0 | if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) { |
5570 | 0 | PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version = |
5571 | 0 | (PFN_vkEnumerateInstanceVersion)icd_term->scanned_icd->GetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion"); |
5572 | 0 | if (icd_enumerate_instance_version != NULL) { |
5573 | 0 | icd_result = icd_enumerate_instance_version(&icd_version); |
5574 | 0 | if (icd_result != VK_SUCCESS) { |
5575 | 0 | icd_version = VK_API_VERSION_1_0; |
5576 | 0 | loader_log(ptr_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
5577 | 0 | "terminator_CreateInstance: ICD \"%s\" vkEnumerateInstanceVersion returned error. The ICD will be " |
5578 | 0 | "treated as a 1.0 ICD", |
5579 | 0 | icd_term->scanned_icd->lib_name); |
5580 | 0 | } else if (VK_API_VERSION_MINOR(icd_version) == 0) { |
5581 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
5582 | 0 | "terminator_CreateInstance: Manifest ICD for \"%s\" contained a 1.1 or greater API version, but " |
5583 | 0 | "vkEnumerateInstanceVersion returned 1.0, treating as a 1.0 ICD", |
5584 | 0 | icd_term->scanned_icd->lib_name); |
5585 | 0 | } |
5586 | 0 | } else { |
5587 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
5588 | 0 | "terminator_CreateInstance: Manifest ICD for \"%s\" contained a 1.1 or greater API version, but does " |
5589 | 0 | "not support vkEnumerateInstanceVersion, treating as a 1.0 ICD", |
5590 | 0 | icd_term->scanned_icd->lib_name); |
5591 | 0 | } |
5592 | 0 | } |
5593 | | |
5594 | | // Remove the portability enumeration flag bit if the ICD doesn't support the extension |
5595 | 0 | if ((pCreateInfo->flags & VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR) == 1) { |
5596 | 0 | bool supports_portability_enumeration = false; |
5597 | 0 | for (uint32_t j = 0; j < icd_create_info.enabledExtensionCount; j++) { |
5598 | 0 | if (strcmp(filtered_extension_names[j], VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME) == 0) { |
5599 | 0 | supports_portability_enumeration = true; |
5600 | 0 | break; |
5601 | 0 | } |
5602 | 0 | } |
5603 | | // If the icd supports the extension, use the flags as given, otherwise remove the portability bit |
5604 | 0 | icd_create_info.flags = supports_portability_enumeration |
5605 | 0 | ? pCreateInfo->flags |
5606 | 0 | : pCreateInfo->flags & (~VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR); |
5607 | 0 | } |
5608 | | |
5609 | | // Create an instance, substituting the version to 1.0 if necessary |
5610 | 0 | VkApplicationInfo icd_app_info; |
5611 | 0 | const uint32_t api_variant = 0; |
5612 | 0 | const uint32_t api_version_1_0 = VK_API_VERSION_1_0; |
5613 | 0 | uint32_t icd_version_nopatch = |
5614 | 0 | VK_MAKE_API_VERSION(api_variant, VK_API_VERSION_MAJOR(icd_version), VK_API_VERSION_MINOR(icd_version), 0); |
5615 | 0 | uint32_t requested_version = (pCreateInfo == NULL || pCreateInfo->pApplicationInfo == NULL) |
5616 | 0 | ? api_version_1_0 |
5617 | 0 | : pCreateInfo->pApplicationInfo->apiVersion; |
5618 | 0 | if ((requested_version != 0) && (icd_version_nopatch == api_version_1_0)) { |
5619 | 0 | if (icd_create_info.pApplicationInfo == NULL) { |
5620 | 0 | memset(&icd_app_info, 0, sizeof(icd_app_info)); |
5621 | 0 | } else { |
5622 | 0 | memmove(&icd_app_info, icd_create_info.pApplicationInfo, sizeof(icd_app_info)); |
5623 | 0 | } |
5624 | 0 | icd_app_info.apiVersion = icd_version; |
5625 | 0 | icd_create_info.pApplicationInfo = &icd_app_info; |
5626 | 0 | } |
5627 | 0 | icd_result = |
5628 | 0 | ptr_instance->icd_tramp_list.scanned_list[i].CreateInstance(&icd_create_info, pAllocator, &(icd_term->instance)); |
5629 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_result) { |
5630 | | // If out of memory, bail immediately. |
5631 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
5632 | 0 | goto out; |
5633 | 0 | } else if (VK_SUCCESS != icd_result) { |
5634 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0, |
5635 | 0 | "terminator_CreateInstance: Received return code %i from call to vkCreateInstance in ICD %s. Skipping " |
5636 | 0 | "this driver.", |
5637 | 0 | icd_result, icd_term->scanned_icd->lib_name); |
5638 | 0 | ptr_instance->icd_terms = icd_term->next; |
5639 | 0 | icd_term->next = NULL; |
5640 | 0 | loader_icd_destroy(ptr_instance, icd_term, pAllocator); |
5641 | 0 | continue; |
5642 | 0 | } |
5643 | | |
5644 | 0 | if (!loader_icd_init_entries(ptr_instance, icd_term)) { |
5645 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0, |
5646 | 0 | "terminator_CreateInstance: Failed to find required entrypoints in ICD %s. Skipping this driver.", |
5647 | 0 | icd_term->scanned_icd->lib_name); |
5648 | 0 | ptr_instance->icd_terms = icd_term->next; |
5649 | 0 | icd_term->next = NULL; |
5650 | 0 | loader_icd_destroy(ptr_instance, icd_term, pAllocator); |
5651 | 0 | continue; |
5652 | 0 | } |
5653 | | |
5654 | 0 | if (ptr_instance->icd_tramp_list.scanned_list[i].interface_version < 3 && |
5655 | 0 | ( |
5656 | 0 | #if defined(VK_USE_PLATFORM_XLIB_KHR) |
5657 | 0 | NULL != icd_term->dispatch.CreateXlibSurfaceKHR || |
5658 | 0 | #endif // VK_USE_PLATFORM_XLIB_KHR |
5659 | 0 | #if defined(VK_USE_PLATFORM_XCB_KHR) |
5660 | 0 | NULL != icd_term->dispatch.CreateXcbSurfaceKHR || |
5661 | 0 | #endif // VK_USE_PLATFORM_XCB_KHR |
5662 | 0 | #if defined(VK_USE_PLATFORM_WAYLAND_KHR) |
5663 | 0 | NULL != icd_term->dispatch.CreateWaylandSurfaceKHR || |
5664 | 0 | #endif // VK_USE_PLATFORM_WAYLAND_KHR |
5665 | | #if defined(VK_USE_PLATFORM_ANDROID_KHR) |
5666 | | NULL != icd_term->dispatch.CreateAndroidSurfaceKHR || |
5667 | | #endif // VK_USE_PLATFORM_ANDROID_KHR |
5668 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
5669 | | NULL != icd_term->dispatch.CreateWin32SurfaceKHR || |
5670 | | #endif // VK_USE_PLATFORM_WIN32_KHR |
5671 | 0 | NULL != icd_term->dispatch.DestroySurfaceKHR)) { |
5672 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0, |
5673 | 0 | "terminator_CreateInstance: Driver %s supports interface version %u but still exposes VkSurfaceKHR" |
5674 | 0 | " create/destroy entrypoints (Policy #LDP_DRIVER_8)", |
5675 | 0 | ptr_instance->icd_tramp_list.scanned_list[i].lib_name, |
5676 | 0 | ptr_instance->icd_tramp_list.scanned_list[i].interface_version); |
5677 | 0 | } |
5678 | | |
5679 | | // If we made it this far, at least one ICD was successful |
5680 | 0 | one_icd_successful = true; |
5681 | 0 | } |
5682 | | |
5683 | | // For vkGetPhysicalDeviceProperties2, at least one ICD needs to support the extension for the |
5684 | | // instance to have it |
5685 | 0 | if (ptr_instance->enabled_extensions.khr_get_physical_device_properties2) { |
5686 | 0 | bool at_least_one_supports = false; |
5687 | 0 | icd_term = ptr_instance->icd_terms; |
5688 | 0 | while (icd_term != NULL) { |
5689 | 0 | if (icd_term->enabled_instance_extensions.khr_get_physical_device_properties2) { |
5690 | 0 | at_least_one_supports = true; |
5691 | 0 | break; |
5692 | 0 | } |
5693 | 0 | icd_term = icd_term->next; |
5694 | 0 | } |
5695 | 0 | if (!at_least_one_supports) { |
5696 | 0 | ptr_instance->enabled_extensions.khr_get_physical_device_properties2 = false; |
5697 | 0 | } |
5698 | 0 | } |
5699 | | |
5700 | | // If no ICDs were added to instance list and res is unchanged from it's initial value, the loader was unable to |
5701 | | // find a suitable ICD. |
5702 | 0 | if (VK_SUCCESS == res && (ptr_instance->icd_terms == NULL || !one_icd_successful)) { |
5703 | 0 | loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
5704 | 0 | "terminator_CreateInstance: Found no drivers!"); |
5705 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
5706 | 0 | } |
5707 | |
|
5708 | 0 | out: |
5709 | |
|
5710 | 0 | ptr_instance->create_terminator_invalid_extension = false; |
5711 | |
|
5712 | 0 | if (VK_SUCCESS != res) { |
5713 | 0 | if (VK_ERROR_EXTENSION_NOT_PRESENT == res) { |
5714 | 0 | ptr_instance->create_terminator_invalid_extension = true; |
5715 | 0 | } |
5716 | |
|
5717 | 0 | while (NULL != ptr_instance->icd_terms) { |
5718 | 0 | icd_term = ptr_instance->icd_terms; |
5719 | 0 | ptr_instance->icd_terms = icd_term->next; |
5720 | 0 | if (NULL != icd_term->instance) { |
5721 | 0 | loader_icd_close_objects(ptr_instance, icd_term); |
5722 | 0 | icd_term->dispatch.DestroyInstance(icd_term->instance, pAllocator); |
5723 | 0 | } |
5724 | 0 | loader_icd_destroy(ptr_instance, icd_term, pAllocator); |
5725 | 0 | } |
5726 | 0 | } else { |
5727 | | // Check for enabled extensions here to setup the loader structures so the loader knows what extensions |
5728 | | // it needs to worry about. |
5729 | | // We do it here and again above the layers in the trampoline function since the trampoline function |
5730 | | // may think different extensions are enabled than what's down here. |
5731 | | // This is why we don't clear inside of these function calls. |
5732 | | // The clearing should actually be handled by the overall memset of the pInstance structure in the |
5733 | | // trampoline. |
5734 | 0 | fill_out_enabled_instance_extensions(pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames, |
5735 | 0 | &ptr_instance->enabled_extensions); |
5736 | 0 | } |
5737 | |
|
5738 | 0 | return res; |
5739 | 0 | } |
5740 | | |
5741 | 0 | VKAPI_ATTR void VKAPI_CALL terminator_DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { |
5742 | 0 | struct loader_instance *ptr_instance = loader_get_instance(instance); |
5743 | 0 | if (NULL == ptr_instance) { |
5744 | 0 | return; |
5745 | 0 | } |
5746 | | |
5747 | | // Remove this instance from the list of instances: |
5748 | 0 | struct loader_instance *prev = NULL; |
5749 | 0 | loader_platform_thread_lock_mutex(&loader_global_instance_list_lock); |
5750 | 0 | struct loader_instance *next = loader.instances; |
5751 | 0 | while (next != NULL) { |
5752 | 0 | if (next == ptr_instance) { |
5753 | | // Remove this instance from the list: |
5754 | 0 | if (prev) |
5755 | 0 | prev->next = next->next; |
5756 | 0 | else |
5757 | 0 | loader.instances = next->next; |
5758 | 0 | break; |
5759 | 0 | } |
5760 | 0 | prev = next; |
5761 | 0 | next = next->next; |
5762 | 0 | } |
5763 | 0 | loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock); |
5764 | |
|
5765 | 0 | struct loader_icd_term *icd_terms = ptr_instance->icd_terms; |
5766 | 0 | while (NULL != icd_terms) { |
5767 | 0 | if (icd_terms->instance) { |
5768 | 0 | loader_icd_close_objects(ptr_instance, icd_terms); |
5769 | 0 | icd_terms->dispatch.DestroyInstance(icd_terms->instance, pAllocator); |
5770 | 0 | } |
5771 | 0 | struct loader_icd_term *next_icd_term = icd_terms->next; |
5772 | 0 | icd_terms->instance = VK_NULL_HANDLE; |
5773 | 0 | loader_icd_destroy(ptr_instance, icd_terms, pAllocator); |
5774 | |
|
5775 | 0 | icd_terms = next_icd_term; |
5776 | 0 | } |
5777 | |
|
5778 | 0 | loader_clear_scanned_icd_list(ptr_instance, &ptr_instance->icd_tramp_list); |
5779 | 0 | loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list); |
5780 | 0 | if (NULL != ptr_instance->phys_devs_term) { |
5781 | 0 | for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) { |
5782 | 0 | for (uint32_t j = i + 1; j < ptr_instance->phys_dev_count_term; j++) { |
5783 | 0 | if (ptr_instance->phys_devs_term[i] == ptr_instance->phys_devs_term[j]) { |
5784 | 0 | ptr_instance->phys_devs_term[j] = NULL; |
5785 | 0 | } |
5786 | 0 | } |
5787 | 0 | } |
5788 | 0 | for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) { |
5789 | 0 | loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term[i]); |
5790 | 0 | } |
5791 | 0 | loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term); |
5792 | 0 | } |
5793 | 0 | if (NULL != ptr_instance->phys_dev_groups_term) { |
5794 | 0 | for (uint32_t i = 0; i < ptr_instance->phys_dev_group_count_term; i++) { |
5795 | 0 | loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term[i]); |
5796 | 0 | } |
5797 | 0 | loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term); |
5798 | 0 | } |
5799 | 0 | loader_free_dev_ext_table(ptr_instance); |
5800 | 0 | loader_free_phys_dev_ext_table(ptr_instance); |
5801 | |
|
5802 | 0 | free_string_list(ptr_instance, &ptr_instance->enabled_layer_names); |
5803 | 0 | } |
5804 | | |
5805 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo, |
5806 | 0 | const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { |
5807 | 0 | VkResult res = VK_SUCCESS; |
5808 | 0 | struct loader_physical_device_term *phys_dev_term; |
5809 | 0 | phys_dev_term = (struct loader_physical_device_term *)physicalDevice; |
5810 | 0 | struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; |
5811 | |
|
5812 | 0 | struct loader_device *dev = (struct loader_device *)*pDevice; |
5813 | 0 | PFN_vkCreateDevice fpCreateDevice = icd_term->dispatch.CreateDevice; |
5814 | 0 | struct loader_extension_list icd_exts; |
5815 | |
|
5816 | 0 | VkBaseOutStructure *caller_dgci_container = NULL; |
5817 | 0 | VkDeviceGroupDeviceCreateInfo *caller_dgci = NULL; |
5818 | |
|
5819 | 0 | if (NULL == dev) { |
5820 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_WARN_BIT, 0, |
5821 | 0 | "terminator_CreateDevice: Loader device pointer null encountered. Possibly set by active layer. (Policy " |
5822 | 0 | "#LLP_LAYER_22)"); |
5823 | 0 | } else if (DEVICE_DISP_TABLE_MAGIC_NUMBER != dev->loader_dispatch.core_dispatch.magic) { |
5824 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_WARN_BIT, 0, |
5825 | 0 | "terminator_CreateDevice: Device pointer (%p) has invalid MAGIC value 0x%08" PRIx64 |
5826 | 0 | ". The expected value is " |
5827 | 0 | "0x10ADED040410ADED. Device value possibly " |
5828 | 0 | "corrupted by active layer (Policy #LLP_LAYER_22). ", |
5829 | 0 | dev, dev->loader_dispatch.core_dispatch.magic); |
5830 | 0 | } |
5831 | |
|
5832 | 0 | dev->phys_dev_term = phys_dev_term; |
5833 | |
|
5834 | 0 | icd_exts.list = NULL; |
5835 | |
|
5836 | 0 | if (fpCreateDevice == NULL) { |
5837 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
5838 | 0 | "terminator_CreateDevice: No vkCreateDevice command exposed by ICD %s", icd_term->scanned_icd->lib_name); |
5839 | 0 | res = VK_ERROR_INITIALIZATION_FAILED; |
5840 | 0 | goto out; |
5841 | 0 | } |
5842 | | |
5843 | 0 | VkDeviceCreateInfo localCreateInfo; |
5844 | 0 | memcpy(&localCreateInfo, pCreateInfo, sizeof(localCreateInfo)); |
5845 | | |
5846 | | // NOTE: Need to filter the extensions to only those supported by the ICD. |
5847 | | // No ICD will advertise support for layers. An ICD library could support a layer, |
5848 | | // but it would be independent of the actual ICD, just in the same library. |
5849 | 0 | char **filtered_extension_names = NULL; |
5850 | 0 | if (0 < pCreateInfo->enabledExtensionCount) { |
5851 | 0 | filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *)); |
5852 | 0 | if (NULL == filtered_extension_names) { |
5853 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0, |
5854 | 0 | "terminator_CreateDevice: Failed to create extension name storage for %d extensions", |
5855 | 0 | pCreateInfo->enabledExtensionCount); |
5856 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
5857 | 0 | } |
5858 | 0 | } |
5859 | | |
5860 | 0 | localCreateInfo.enabledLayerCount = 0; |
5861 | 0 | localCreateInfo.ppEnabledLayerNames = NULL; |
5862 | |
|
5863 | 0 | localCreateInfo.enabledExtensionCount = 0; |
5864 | 0 | localCreateInfo.ppEnabledExtensionNames = (const char *const *)filtered_extension_names; |
5865 | | |
5866 | | // Get the physical device (ICD) extensions |
5867 | 0 | res = loader_init_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties)); |
5868 | 0 | if (VK_SUCCESS != res) { |
5869 | 0 | goto out; |
5870 | 0 | } |
5871 | | |
5872 | 0 | res = loader_add_device_extensions(icd_term->this_instance, icd_term->dispatch.EnumerateDeviceExtensionProperties, |
5873 | 0 | phys_dev_term->phys_dev, icd_term->scanned_icd->lib_name, &icd_exts); |
5874 | 0 | if (res != VK_SUCCESS) { |
5875 | 0 | goto out; |
5876 | 0 | } |
5877 | | |
5878 | 0 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
5879 | 0 | if (pCreateInfo->ppEnabledExtensionNames == NULL) { |
5880 | 0 | continue; |
5881 | 0 | } |
5882 | 0 | const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; |
5883 | 0 | if (extension_name == NULL) { |
5884 | 0 | continue; |
5885 | 0 | } |
5886 | 0 | VkExtensionProperties *prop = get_extension_property(extension_name, &icd_exts); |
5887 | 0 | if (prop) { |
5888 | 0 | filtered_extension_names[localCreateInfo.enabledExtensionCount] = (char *)extension_name; |
5889 | 0 | localCreateInfo.enabledExtensionCount++; |
5890 | 0 | } else { |
5891 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
5892 | 0 | "vkCreateDevice extension %s not available for devices associated with ICD %s", extension_name, |
5893 | 0 | icd_term->scanned_icd->lib_name); |
5894 | 0 | } |
5895 | 0 | } |
5896 | | |
5897 | | // Before we continue, If KHX_device_group is the list of enabled and viable extensions, then we then need to look for the |
5898 | | // corresponding VkDeviceGroupDeviceCreateInfo struct in the device list and replace all the physical device values (which |
5899 | | // are really loader physical device terminator values) with the ICD versions. |
5900 | | // if (icd_term->this_instance->enabled_extensions.khr_device_group_creation == 1) { |
5901 | 0 | { |
5902 | 0 | VkBaseOutStructure *pNext = (VkBaseOutStructure *)localCreateInfo.pNext; |
5903 | 0 | VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&localCreateInfo; |
5904 | 0 | while (NULL != pNext) { |
5905 | 0 | if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) { |
5906 | 0 | VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext; |
5907 | 0 | if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) { |
5908 | 0 | VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo)); |
5909 | 0 | VkPhysicalDevice *phys_dev_array = NULL; |
5910 | 0 | if (NULL == temp_struct) { |
5911 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
5912 | 0 | } |
5913 | 0 | memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo)); |
5914 | 0 | phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount); |
5915 | 0 | if (NULL == phys_dev_array) { |
5916 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
5917 | 0 | } |
5918 | | |
5919 | | // Before calling down, replace the incoming physical device values (which are really loader terminator |
5920 | | // physical devices) with the ICDs physical device values. |
5921 | 0 | struct loader_physical_device_term *cur_term; |
5922 | 0 | for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) { |
5923 | 0 | cur_term = (struct loader_physical_device_term *)cur_struct->pPhysicalDevices[phys_dev]; |
5924 | 0 | phys_dev_array[phys_dev] = cur_term->phys_dev; |
5925 | 0 | } |
5926 | 0 | temp_struct->pPhysicalDevices = phys_dev_array; |
5927 | | |
5928 | | // Keep track of pointers to restore pNext chain before returning |
5929 | 0 | caller_dgci_container = pPrev; |
5930 | 0 | caller_dgci = cur_struct; |
5931 | | |
5932 | | // Replace the old struct in the pNext chain with this one. |
5933 | 0 | pPrev->pNext = (VkBaseOutStructure *)temp_struct; |
5934 | 0 | } |
5935 | 0 | break; |
5936 | 0 | } |
5937 | | |
5938 | 0 | pPrev = pNext; |
5939 | 0 | pNext = pNext->pNext; |
5940 | 0 | } |
5941 | 0 | } |
5942 | | |
5943 | | // Handle loader emulation for structs that are not supported by the ICD: |
5944 | | // Presently, the emulation leaves the pNext chain alone. This means that the ICD will receive items in the chain which |
5945 | | // are not recognized by the ICD. If this causes the ICD to fail, then the items would have to be removed here. The current |
5946 | | // implementation does not remove them because copying the pNext chain would be impossible if the loader does not recognize |
5947 | | // the any of the struct types, as the loader would not know the size to allocate and copy. |
5948 | | // if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) { |
5949 | 0 | { |
5950 | 0 | const void *pNext = localCreateInfo.pNext; |
5951 | 0 | while (pNext != NULL) { |
5952 | 0 | VkBaseInStructure pNext_in_structure = {0}; |
5953 | 0 | memcpy(&pNext_in_structure, pNext, sizeof(VkBaseInStructure)); |
5954 | 0 | switch (pNext_in_structure.sType) { |
5955 | 0 | case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: { |
5956 | 0 | const VkPhysicalDeviceFeatures2KHR *features = pNext; |
5957 | |
|
5958 | 0 | if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && |
5959 | 0 | icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) { |
5960 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0, |
5961 | 0 | "vkCreateDevice: Emulating handling of VkPhysicalDeviceFeatures2 in pNext chain for ICD \"%s\"", |
5962 | 0 | icd_term->scanned_icd->lib_name); |
5963 | | |
5964 | | // Verify that VK_KHR_get_physical_device_properties2 is enabled |
5965 | 0 | if (icd_term->this_instance->enabled_extensions.khr_get_physical_device_properties2) { |
5966 | 0 | localCreateInfo.pEnabledFeatures = &features->features; |
5967 | 0 | } |
5968 | 0 | } |
5969 | | |
5970 | | // Leave this item in the pNext chain for now |
5971 | |
|
5972 | 0 | pNext = features->pNext; |
5973 | 0 | break; |
5974 | 0 | } |
5975 | | |
5976 | 0 | case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO: { |
5977 | 0 | const VkDeviceGroupDeviceCreateInfo *group_info = pNext; |
5978 | |
|
5979 | 0 | if (icd_term->dispatch.EnumeratePhysicalDeviceGroups == NULL && |
5980 | 0 | icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR == NULL) { |
5981 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0, |
5982 | 0 | "vkCreateDevice: Emulating handling of VkPhysicalDeviceGroupProperties in pNext chain for " |
5983 | 0 | "ICD \"%s\"", |
5984 | 0 | icd_term->scanned_icd->lib_name); |
5985 | | |
5986 | | // The group must contain only this one device, since physical device groups aren't actually supported |
5987 | 0 | if (group_info->physicalDeviceCount != 1) { |
5988 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0, |
5989 | 0 | "vkCreateDevice: Emulation failed to create device from device group info"); |
5990 | 0 | res = VK_ERROR_INITIALIZATION_FAILED; |
5991 | 0 | goto out; |
5992 | 0 | } |
5993 | 0 | } |
5994 | | |
5995 | | // Nothing needs to be done here because we're leaving the item in the pNext chain and because the spec |
5996 | | // states that the physicalDevice argument must be included in the device group, and we've already checked |
5997 | | // that it is |
5998 | | |
5999 | 0 | pNext = group_info->pNext; |
6000 | 0 | break; |
6001 | 0 | } |
6002 | | |
6003 | | // Multiview properties are also allowed, but since VK_KHX_multiview is a device extension, we'll just let the |
6004 | | // ICD handle that error when the user enables the extension here |
6005 | 0 | default: { |
6006 | 0 | pNext = pNext_in_structure.pNext; |
6007 | 0 | break; |
6008 | 0 | } |
6009 | 0 | } |
6010 | 0 | } |
6011 | 0 | } |
6012 | | |
6013 | 0 | VkBool32 maintenance5_feature_enabled = false; |
6014 | | // Look for the VkPhysicalDeviceMaintenance5FeaturesKHR struct to see if the feature was enabled |
6015 | 0 | { |
6016 | 0 | const void *pNext = localCreateInfo.pNext; |
6017 | 0 | while (pNext != NULL) { |
6018 | 0 | VkBaseInStructure pNext_in_structure = {0}; |
6019 | 0 | memcpy(&pNext_in_structure, pNext, sizeof(VkBaseInStructure)); |
6020 | 0 | switch (pNext_in_structure.sType) { |
6021 | 0 | case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR: { |
6022 | 0 | const VkPhysicalDeviceMaintenance5FeaturesKHR *maintenance_features = pNext; |
6023 | 0 | if (maintenance_features->maintenance5 == VK_TRUE) { |
6024 | 0 | maintenance5_feature_enabled = true; |
6025 | 0 | } |
6026 | 0 | pNext = maintenance_features->pNext; |
6027 | 0 | break; |
6028 | 0 | } |
6029 | | |
6030 | 0 | default: { |
6031 | 0 | pNext = pNext_in_structure.pNext; |
6032 | 0 | break; |
6033 | 0 | } |
6034 | 0 | } |
6035 | 0 | } |
6036 | 0 | } |
6037 | | |
6038 | | // Every extension that has a loader-defined terminator needs to be marked as enabled or disabled so that we know whether or |
6039 | | // not to return that terminator when vkGetDeviceProcAddr is called |
6040 | 0 | for (uint32_t i = 0; i < localCreateInfo.enabledExtensionCount; ++i) { |
6041 | 0 | if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME)) { |
6042 | 0 | dev->driver_extensions.khr_swapchain_enabled = true; |
6043 | 0 | } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME)) { |
6044 | 0 | dev->driver_extensions.khr_display_swapchain_enabled = true; |
6045 | 0 | } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) { |
6046 | 0 | dev->driver_extensions.khr_device_group_enabled = true; |
6047 | 0 | } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) { |
6048 | 0 | dev->driver_extensions.ext_debug_marker_enabled = true; |
6049 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
6050 | | } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME)) { |
6051 | | dev->driver_extensions.ext_full_screen_exclusive_enabled = true; |
6052 | | #endif |
6053 | 0 | } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_MAINTENANCE_5_EXTENSION_NAME) && |
6054 | 0 | maintenance5_feature_enabled) { |
6055 | 0 | dev->should_ignore_device_commands_from_newer_version = true; |
6056 | 0 | } |
6057 | 0 | } |
6058 | 0 | dev->layer_extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_extensions.ext_debug_utils; |
6059 | 0 | dev->driver_extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_extensions.ext_debug_utils; |
6060 | |
|
6061 | 0 | VkPhysicalDeviceProperties properties; |
6062 | 0 | icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, &properties); |
6063 | 0 | if (properties.apiVersion >= VK_API_VERSION_1_1) { |
6064 | 0 | dev->driver_extensions.version_1_1_enabled = true; |
6065 | 0 | } |
6066 | 0 | if (properties.apiVersion >= VK_API_VERSION_1_2) { |
6067 | 0 | dev->driver_extensions.version_1_2_enabled = true; |
6068 | 0 | } |
6069 | 0 | if (properties.apiVersion >= VK_API_VERSION_1_3) { |
6070 | 0 | dev->driver_extensions.version_1_3_enabled = true; |
6071 | 0 | } |
6072 | |
|
6073 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
6074 | 0 | " Using \"%s\" with driver: \"%s\"", properties.deviceName, icd_term->scanned_icd->lib_name); |
6075 | |
|
6076 | 0 | res = fpCreateDevice(phys_dev_term->phys_dev, &localCreateInfo, pAllocator, &dev->icd_device); |
6077 | 0 | if (res != VK_SUCCESS) { |
6078 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
6079 | 0 | "terminator_CreateDevice: Failed in ICD %s vkCreateDevice call", icd_term->scanned_icd->lib_name); |
6080 | 0 | goto out; |
6081 | 0 | } |
6082 | | |
6083 | 0 | *pDevice = dev->icd_device; |
6084 | 0 | loader_add_logical_device(icd_term, dev); |
6085 | | |
6086 | | // Init dispatch pointer in new device object |
6087 | 0 | loader_init_dispatch(*pDevice, &dev->loader_dispatch); |
6088 | |
|
6089 | 0 | out: |
6090 | 0 | if (NULL != icd_exts.list) { |
6091 | 0 | loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts); |
6092 | 0 | } |
6093 | | |
6094 | | // Restore pNext pointer to old VkDeviceGroupDeviceCreateInfo |
6095 | | // in the chain to maintain consistency for the caller. |
6096 | 0 | if (caller_dgci_container != NULL) { |
6097 | 0 | caller_dgci_container->pNext = (VkBaseOutStructure *)caller_dgci; |
6098 | 0 | } |
6099 | |
|
6100 | 0 | return res; |
6101 | 0 | } |
6102 | | |
6103 | | // Update the trampoline physical devices with the wrapped version. |
6104 | | // We always want to re-use previous physical device pointers since they may be used by an application |
6105 | | // after returning previously. |
6106 | 0 | VkResult setup_loader_tramp_phys_devs(struct loader_instance *inst, uint32_t phys_dev_count, VkPhysicalDevice *phys_devs) { |
6107 | 0 | VkResult res = VK_SUCCESS; |
6108 | 0 | uint32_t found_count = 0; |
6109 | 0 | uint32_t old_count = inst->phys_dev_count_tramp; |
6110 | 0 | uint32_t new_count = inst->total_gpu_count; |
6111 | 0 | struct loader_physical_device_tramp **new_phys_devs = NULL; |
6112 | |
|
6113 | 0 | if (0 == phys_dev_count) { |
6114 | 0 | return VK_SUCCESS; |
6115 | 0 | } |
6116 | 0 | if (phys_dev_count > new_count) { |
6117 | 0 | new_count = phys_dev_count; |
6118 | 0 | } |
6119 | | |
6120 | | // We want an old to new index array and a new to old index array |
6121 | 0 | int32_t *old_to_new_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * old_count); |
6122 | 0 | int32_t *new_to_old_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * new_count); |
6123 | 0 | if (NULL == old_to_new_index || NULL == new_to_old_index) { |
6124 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
6125 | 0 | } |
6126 | | |
6127 | | // Initialize both |
6128 | 0 | for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) { |
6129 | 0 | old_to_new_index[cur_idx] = -1; |
6130 | 0 | } |
6131 | 0 | for (uint32_t cur_idx = 0; cur_idx < new_count; ++cur_idx) { |
6132 | 0 | new_to_old_index[cur_idx] = -1; |
6133 | 0 | } |
6134 | | |
6135 | | // Figure out the old->new and new->old indices |
6136 | 0 | for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) { |
6137 | 0 | for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) { |
6138 | 0 | if (inst->phys_devs_tramp[cur_idx]->phys_dev == phys_devs[new_idx]) { |
6139 | 0 | old_to_new_index[cur_idx] = (int32_t)new_idx; |
6140 | 0 | new_to_old_index[new_idx] = (int32_t)cur_idx; |
6141 | 0 | found_count++; |
6142 | 0 | break; |
6143 | 0 | } |
6144 | 0 | } |
6145 | 0 | } |
6146 | | |
6147 | | // If we found exactly the number of items we were looking for as we had before. Then everything |
6148 | | // we already have is good enough and we just need to update the array that was passed in with |
6149 | | // the loader values. |
6150 | 0 | if (found_count == phys_dev_count && 0 != old_count && old_count == new_count) { |
6151 | 0 | for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) { |
6152 | 0 | for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) { |
6153 | 0 | if (old_to_new_index[cur_idx] == (int32_t)new_idx) { |
6154 | 0 | phys_devs[new_idx] = (VkPhysicalDevice)inst->phys_devs_tramp[cur_idx]; |
6155 | 0 | break; |
6156 | 0 | } |
6157 | 0 | } |
6158 | 0 | } |
6159 | | // Nothing else to do for this path |
6160 | 0 | res = VK_SUCCESS; |
6161 | 0 | } else { |
6162 | | // Something is different, so do the full path of checking every device and creating a new array to use. |
6163 | | // This can happen if a device was added, or removed, or we hadn't previously queried all the data and we |
6164 | | // have more to store. |
6165 | 0 | new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_tramp *) * new_count, |
6166 | 0 | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
6167 | 0 | if (NULL == new_phys_devs) { |
6168 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6169 | 0 | "setup_loader_tramp_phys_devs: Failed to allocate new physical device array of size %d", new_count); |
6170 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6171 | 0 | goto out; |
6172 | 0 | } |
6173 | | |
6174 | 0 | if (new_count > phys_dev_count) { |
6175 | 0 | found_count = phys_dev_count; |
6176 | 0 | } else { |
6177 | 0 | found_count = new_count; |
6178 | 0 | } |
6179 | | |
6180 | | // First try to see if an old item exists that matches the new item. If so, just copy it over. |
6181 | 0 | for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) { |
6182 | 0 | bool old_item_found = false; |
6183 | 0 | for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) { |
6184 | 0 | if (old_to_new_index[cur_idx] == (int32_t)new_idx) { |
6185 | | // Copy over old item to correct spot in the new array |
6186 | 0 | new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx]; |
6187 | 0 | old_item_found = true; |
6188 | 0 | break; |
6189 | 0 | } |
6190 | 0 | } |
6191 | | // Something wasn't found, so it's new so add it to the new list |
6192 | 0 | if (!old_item_found) { |
6193 | 0 | new_phys_devs[new_idx] = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_tramp), |
6194 | 0 | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
6195 | 0 | if (NULL == new_phys_devs[new_idx]) { |
6196 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6197 | 0 | "setup_loader_tramp_phys_devs: Failed to allocate new trampoline physical device"); |
6198 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6199 | 0 | goto out; |
6200 | 0 | } |
6201 | | |
6202 | | // Initialize the new physicalDevice object |
6203 | 0 | loader_set_dispatch((void *)new_phys_devs[new_idx], inst->disp); |
6204 | 0 | new_phys_devs[new_idx]->this_instance = inst; |
6205 | 0 | new_phys_devs[new_idx]->phys_dev = phys_devs[new_idx]; |
6206 | 0 | new_phys_devs[new_idx]->magic = PHYS_TRAMP_MAGIC_NUMBER; |
6207 | 0 | } |
6208 | | |
6209 | 0 | phys_devs[new_idx] = (VkPhysicalDevice)new_phys_devs[new_idx]; |
6210 | 0 | } |
6211 | | |
6212 | | // We usually get here if the user array is smaller than the total number of devices, so copy the |
6213 | | // remaining devices we have over to the new array. |
6214 | 0 | uint32_t start = found_count; |
6215 | 0 | for (uint32_t new_idx = start; new_idx < new_count; ++new_idx) { |
6216 | 0 | for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) { |
6217 | 0 | if (old_to_new_index[cur_idx] == -1) { |
6218 | 0 | new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx]; |
6219 | 0 | old_to_new_index[cur_idx] = new_idx; |
6220 | 0 | found_count++; |
6221 | 0 | break; |
6222 | 0 | } |
6223 | 0 | } |
6224 | 0 | } |
6225 | 0 | } |
6226 | | |
6227 | 0 | out: |
6228 | |
|
6229 | 0 | if (NULL != new_phys_devs) { |
6230 | 0 | if (VK_SUCCESS != res) { |
6231 | 0 | for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) { |
6232 | | // If an OOM occurred inside the copying of the new physical devices into the existing array |
6233 | | // will leave some of the old physical devices in the array which may have been copied into |
6234 | | // the new array, leading to them being freed twice. To avoid this we just make sure to not |
6235 | | // delete physical devices which were copied. |
6236 | 0 | bool found = false; |
6237 | 0 | for (uint32_t cur_idx = 0; cur_idx < inst->phys_dev_count_tramp; cur_idx++) { |
6238 | 0 | if (new_phys_devs[new_idx] == inst->phys_devs_tramp[cur_idx]) { |
6239 | 0 | found = true; |
6240 | 0 | break; |
6241 | 0 | } |
6242 | 0 | } |
6243 | 0 | if (!found) { |
6244 | 0 | loader_instance_heap_free(inst, new_phys_devs[new_idx]); |
6245 | 0 | } |
6246 | 0 | } |
6247 | 0 | loader_instance_heap_free(inst, new_phys_devs); |
6248 | 0 | } else { |
6249 | 0 | if (new_count > inst->total_gpu_count) { |
6250 | 0 | inst->total_gpu_count = new_count; |
6251 | 0 | } |
6252 | | // Free everything in the old array that was not copied into the new array |
6253 | | // here. We can't attempt to do that before here since the previous loop |
6254 | | // looking before the "out:" label may hit an out of memory condition resulting |
6255 | | // in memory leaking. |
6256 | 0 | if (NULL != inst->phys_devs_tramp) { |
6257 | 0 | for (uint32_t i = 0; i < inst->phys_dev_count_tramp; i++) { |
6258 | 0 | bool found = false; |
6259 | 0 | for (uint32_t j = 0; j < inst->total_gpu_count; j++) { |
6260 | 0 | if (inst->phys_devs_tramp[i] == new_phys_devs[j]) { |
6261 | 0 | found = true; |
6262 | 0 | break; |
6263 | 0 | } |
6264 | 0 | } |
6265 | 0 | if (!found) { |
6266 | 0 | loader_instance_heap_free(inst, inst->phys_devs_tramp[i]); |
6267 | 0 | } |
6268 | 0 | } |
6269 | 0 | loader_instance_heap_free(inst, inst->phys_devs_tramp); |
6270 | 0 | } |
6271 | 0 | inst->phys_devs_tramp = new_phys_devs; |
6272 | 0 | inst->phys_dev_count_tramp = found_count; |
6273 | 0 | } |
6274 | 0 | } |
6275 | 0 | if (VK_SUCCESS != res) { |
6276 | 0 | inst->total_gpu_count = 0; |
6277 | 0 | } |
6278 | |
|
6279 | 0 | return res; |
6280 | 0 | } |
6281 | | |
6282 | | #if defined(LOADER_ENABLE_LINUX_SORT) |
6283 | 0 | bool is_linux_sort_enabled(struct loader_instance *inst) { |
6284 | 0 | bool sort_items = inst->supports_get_dev_prop_2; |
6285 | 0 | char *env_value = loader_getenv("VK_LOADER_DISABLE_SELECT", inst); |
6286 | 0 | if (NULL != env_value) { |
6287 | 0 | int32_t int_env_val = atoi(env_value); |
6288 | 0 | loader_free_getenv(env_value, inst); |
6289 | 0 | if (int_env_val != 0) { |
6290 | 0 | sort_items = false; |
6291 | 0 | } |
6292 | 0 | } |
6293 | 0 | return sort_items; |
6294 | 0 | } |
6295 | | #endif // LOADER_ENABLE_LINUX_SORT |
6296 | | |
6297 | | // Look for physical_device in the provided phys_devs list, return true if found and put the index into out_idx, otherwise |
6298 | | // return false |
6299 | | bool find_phys_dev(VkPhysicalDevice physical_device, uint32_t phys_devs_count, struct loader_physical_device_term **phys_devs, |
6300 | 0 | uint32_t *out_idx) { |
6301 | 0 | if (NULL == phys_devs) return false; |
6302 | 0 | for (uint32_t idx = 0; idx < phys_devs_count; idx++) { |
6303 | 0 | if (NULL != phys_devs[idx] && physical_device == phys_devs[idx]->phys_dev) { |
6304 | 0 | *out_idx = idx; |
6305 | 0 | return true; |
6306 | 0 | } |
6307 | 0 | } |
6308 | 0 | return false; |
6309 | 0 | } |
6310 | | |
6311 | | // Add physical_device to new_phys_devs |
6312 | | VkResult check_and_add_to_new_phys_devs(struct loader_instance *inst, VkPhysicalDevice physical_device, |
6313 | | struct loader_icd_physical_devices *dev_array, uint32_t *cur_new_phys_dev_count, |
6314 | 0 | struct loader_physical_device_term **new_phys_devs) { |
6315 | 0 | uint32_t out_idx = 0; |
6316 | 0 | uint32_t idx = *cur_new_phys_dev_count; |
6317 | | // Check if the physical_device already exists in the new_phys_devs buffer, that means it was found from both |
6318 | | // EnumerateAdapterPhysicalDevices and EnumeratePhysicalDevices and we need to skip it. |
6319 | 0 | if (find_phys_dev(physical_device, idx, new_phys_devs, &out_idx)) { |
6320 | 0 | return VK_SUCCESS; |
6321 | 0 | } |
6322 | | // Check if it was found in a previous call to vkEnumeratePhysicalDevices, we can just copy over the old data. |
6323 | 0 | if (find_phys_dev(physical_device, inst->phys_dev_count_term, inst->phys_devs_term, &out_idx)) { |
6324 | 0 | new_phys_devs[idx] = inst->phys_devs_term[out_idx]; |
6325 | 0 | (*cur_new_phys_dev_count)++; |
6326 | 0 | return VK_SUCCESS; |
6327 | 0 | } |
6328 | | |
6329 | | // Exit in case something is already present - this shouldn't happen but better to be safe than overwrite existing data |
6330 | | // since this code has been refactored a half dozen times. |
6331 | 0 | if (NULL != new_phys_devs[idx]) { |
6332 | 0 | return VK_SUCCESS; |
6333 | 0 | } |
6334 | | // If this physical device is new, we need to allocate space for it. |
6335 | 0 | new_phys_devs[idx] = |
6336 | 0 | loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
6337 | 0 | if (NULL == new_phys_devs[idx]) { |
6338 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6339 | 0 | "check_and_add_to_new_phys_devs: Failed to allocate physical device terminator object %d", idx); |
6340 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
6341 | 0 | } |
6342 | | |
6343 | 0 | loader_set_dispatch((void *)new_phys_devs[idx], inst->disp); |
6344 | 0 | new_phys_devs[idx]->this_icd_term = dev_array->icd_term; |
6345 | 0 | new_phys_devs[idx]->phys_dev = physical_device; |
6346 | | |
6347 | | // Increment the count of new physical devices |
6348 | 0 | (*cur_new_phys_dev_count)++; |
6349 | 0 | return VK_SUCCESS; |
6350 | 0 | } |
6351 | | |
6352 | | /* Enumerate all physical devices from ICDs and add them to inst->phys_devs_term |
6353 | | * |
6354 | | * There are two methods to find VkPhysicalDevices - vkEnumeratePhysicalDevices and vkEnumerateAdapterPhysicalDevices |
6355 | | * The latter is supported on windows only and on devices supporting ICD Interface Version 6 and greater. |
6356 | | * |
6357 | | * Once all physical devices are acquired, they need to be pulled into a single list of `loader_physical_device_term`'s. |
6358 | | * They also need to be setup - the icd_term, icd_index, phys_dev, and disp (dispatch table) all need the correct data. |
6359 | | * Additionally, we need to keep using already setup physical devices as they may be in use, thus anything enumerated |
6360 | | * that is already in inst->phys_devs_term will be carried over. |
6361 | | */ |
6362 | | |
6363 | 0 | VkResult setup_loader_term_phys_devs(struct loader_instance *inst) { |
6364 | 0 | VkResult res = VK_SUCCESS; |
6365 | 0 | struct loader_icd_term *icd_term; |
6366 | 0 | uint32_t windows_sorted_devices_count = 0; |
6367 | 0 | struct loader_icd_physical_devices *windows_sorted_devices_array = NULL; |
6368 | 0 | uint32_t icd_count = 0; |
6369 | 0 | struct loader_icd_physical_devices *icd_phys_dev_array = NULL; |
6370 | 0 | uint32_t new_phys_devs_capacity = 0; |
6371 | 0 | uint32_t new_phys_devs_count = 0; |
6372 | 0 | struct loader_physical_device_term **new_phys_devs = NULL; |
6373 | |
|
6374 | | #if defined(_WIN32) |
6375 | | // Get the physical devices supported by platform sorting mechanism into a separate list |
6376 | | res = windows_read_sorted_physical_devices(inst, &windows_sorted_devices_count, &windows_sorted_devices_array); |
6377 | | if (VK_SUCCESS != res) { |
6378 | | goto out; |
6379 | | } |
6380 | | #endif |
6381 | |
|
6382 | 0 | icd_count = inst->icd_terms_count; |
6383 | | |
6384 | | // Allocate something to store the physical device characteristics that we read from each ICD. |
6385 | 0 | icd_phys_dev_array = |
6386 | 0 | (struct loader_icd_physical_devices *)loader_stack_alloc(sizeof(struct loader_icd_physical_devices) * icd_count); |
6387 | 0 | if (NULL == icd_phys_dev_array) { |
6388 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6389 | 0 | "setup_loader_term_phys_devs: Failed to allocate temporary ICD Physical device info array of size %d", |
6390 | 0 | icd_count); |
6391 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6392 | 0 | goto out; |
6393 | 0 | } |
6394 | 0 | memset(icd_phys_dev_array, 0, sizeof(struct loader_icd_physical_devices) * icd_count); |
6395 | | |
6396 | | // For each ICD, query the number of physical devices, and then get an |
6397 | | // internal value for those physical devices. |
6398 | 0 | icd_term = inst->icd_terms; |
6399 | 0 | uint32_t icd_idx = 0; |
6400 | 0 | while (NULL != icd_term) { |
6401 | 0 | res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &icd_phys_dev_array[icd_idx].device_count, NULL); |
6402 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
6403 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6404 | 0 | "setup_loader_term_phys_devs: Call to \'vkEnumeratePhysicalDevices\' in ICD %s failed with error code " |
6405 | 0 | "VK_ERROR_OUT_OF_HOST_MEMORY", |
6406 | 0 | icd_term->scanned_icd->lib_name); |
6407 | 0 | goto out; |
6408 | 0 | } else if (VK_SUCCESS == res) { |
6409 | 0 | icd_phys_dev_array[icd_idx].physical_devices = |
6410 | 0 | (VkPhysicalDevice *)loader_stack_alloc(icd_phys_dev_array[icd_idx].device_count * sizeof(VkPhysicalDevice)); |
6411 | 0 | if (NULL == icd_phys_dev_array[icd_idx].physical_devices) { |
6412 | 0 | loader_log( |
6413 | 0 | inst, VULKAN_LOADER_ERROR_BIT, 0, |
6414 | 0 | "setup_loader_term_phys_devs: Failed to allocate temporary ICD Physical device array for ICD %s of size %d", |
6415 | 0 | icd_term->scanned_icd->lib_name, icd_phys_dev_array[icd_idx].device_count); |
6416 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6417 | 0 | goto out; |
6418 | 0 | } |
6419 | | |
6420 | 0 | res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &(icd_phys_dev_array[icd_idx].device_count), |
6421 | 0 | icd_phys_dev_array[icd_idx].physical_devices); |
6422 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
6423 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6424 | 0 | "setup_loader_term_phys_devs: Call to \'vkEnumeratePhysicalDevices\' in ICD %s failed with error code " |
6425 | 0 | "VK_ERROR_OUT_OF_HOST_MEMORY", |
6426 | 0 | icd_term->scanned_icd->lib_name); |
6427 | 0 | goto out; |
6428 | 0 | } |
6429 | 0 | if (VK_SUCCESS != res) { |
6430 | 0 | loader_log( |
6431 | 0 | inst, VULKAN_LOADER_ERROR_BIT, 0, |
6432 | 0 | "setup_loader_term_phys_devs: Call to \'vkEnumeratePhysicalDevices\' in ICD %s failed with error code %d", |
6433 | 0 | icd_term->scanned_icd->lib_name, res); |
6434 | 0 | icd_phys_dev_array[icd_idx].device_count = 0; |
6435 | 0 | icd_phys_dev_array[icd_idx].physical_devices = 0; |
6436 | 0 | } |
6437 | 0 | } else { |
6438 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6439 | 0 | "setup_loader_term_phys_devs: Call to \'vkEnumeratePhysicalDevices\' in ICD %s failed with error code %d", |
6440 | 0 | icd_term->scanned_icd->lib_name, res); |
6441 | 0 | icd_phys_dev_array[icd_idx].device_count = 0; |
6442 | 0 | icd_phys_dev_array[icd_idx].physical_devices = 0; |
6443 | 0 | } |
6444 | 0 | icd_phys_dev_array[icd_idx].icd_term = icd_term; |
6445 | 0 | icd_term->physical_device_count = icd_phys_dev_array[icd_idx].device_count; |
6446 | 0 | icd_term = icd_term->next; |
6447 | 0 | ++icd_idx; |
6448 | 0 | } |
6449 | | |
6450 | | // Add up both the windows sorted and non windows found physical device counts |
6451 | 0 | for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) { |
6452 | 0 | new_phys_devs_capacity += windows_sorted_devices_array[i].device_count; |
6453 | 0 | } |
6454 | 0 | for (uint32_t i = 0; i < icd_count; ++i) { |
6455 | 0 | new_phys_devs_capacity += icd_phys_dev_array[i].device_count; |
6456 | 0 | } |
6457 | | |
6458 | | // Bail out if there are no physical devices reported |
6459 | 0 | if (0 == new_phys_devs_capacity) { |
6460 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6461 | 0 | "setup_loader_term_phys_devs: Failed to detect any valid GPUs in the current config"); |
6462 | 0 | res = VK_ERROR_INITIALIZATION_FAILED; |
6463 | 0 | goto out; |
6464 | 0 | } |
6465 | | |
6466 | | // Create an allocation large enough to hold both the windows sorting enumeration and non-windows physical device |
6467 | | // enumeration |
6468 | 0 | new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_term *) * new_phys_devs_capacity, |
6469 | 0 | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
6470 | 0 | if (NULL == new_phys_devs) { |
6471 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6472 | 0 | "setup_loader_term_phys_devs: Failed to allocate new physical device array of size %d", new_phys_devs_capacity); |
6473 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6474 | 0 | goto out; |
6475 | 0 | } |
6476 | | |
6477 | | // Copy over everything found through sorted enumeration |
6478 | 0 | for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) { |
6479 | 0 | for (uint32_t j = 0; j < windows_sorted_devices_array[i].device_count; ++j) { |
6480 | 0 | res = check_and_add_to_new_phys_devs(inst, windows_sorted_devices_array[i].physical_devices[j], |
6481 | 0 | &windows_sorted_devices_array[i], &new_phys_devs_count, new_phys_devs); |
6482 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
6483 | 0 | goto out; |
6484 | 0 | } |
6485 | 0 | } |
6486 | 0 | } |
6487 | | |
6488 | | // Now go through the rest of the physical devices and add them to new_phys_devs |
6489 | 0 | #if defined(LOADER_ENABLE_LINUX_SORT) |
6490 | | |
6491 | 0 | if (is_linux_sort_enabled(inst)) { |
6492 | 0 | for (uint32_t dev = new_phys_devs_count; dev < new_phys_devs_capacity; ++dev) { |
6493 | 0 | new_phys_devs[dev] = |
6494 | 0 | loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
6495 | 0 | if (NULL == new_phys_devs[dev]) { |
6496 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6497 | 0 | "setup_loader_term_phys_devs: Failed to allocate physical device terminator object %d", dev); |
6498 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6499 | 0 | goto out; |
6500 | 0 | } |
6501 | 0 | } |
6502 | | |
6503 | | // Get the physical devices supported by platform sorting mechanism into a separate list |
6504 | | // Pass in a sublist to the function so it only operates on the correct elements. This means passing in a pointer to the |
6505 | | // current next element in new_phys_devs and passing in a `count` of currently unwritten elements |
6506 | 0 | res = linux_read_sorted_physical_devices(inst, icd_count, icd_phys_dev_array, new_phys_devs_capacity - new_phys_devs_count, |
6507 | 0 | &new_phys_devs[new_phys_devs_count]); |
6508 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
6509 | 0 | goto out; |
6510 | 0 | } |
6511 | | // Keep previously allocated physical device info since apps may already be using that! |
6512 | 0 | for (uint32_t new_idx = new_phys_devs_count; new_idx < new_phys_devs_capacity; new_idx++) { |
6513 | 0 | for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) { |
6514 | 0 | if (new_phys_devs[new_idx]->phys_dev == inst->phys_devs_term[old_idx]->phys_dev) { |
6515 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
6516 | 0 | "Copying old device %u into new device %u", old_idx, new_idx); |
6517 | | // Free the old new_phys_devs info since we're not using it before we assign the new info |
6518 | 0 | loader_instance_heap_free(inst, new_phys_devs[new_idx]); |
6519 | 0 | new_phys_devs[new_idx] = inst->phys_devs_term[old_idx]; |
6520 | 0 | break; |
6521 | 0 | } |
6522 | 0 | } |
6523 | 0 | } |
6524 | | // now set the count to the capacity, as now the list is filled in |
6525 | 0 | new_phys_devs_count = new_phys_devs_capacity; |
6526 | | // We want the following code to run if either linux sorting is disabled at compile time or runtime |
6527 | 0 | } else { |
6528 | 0 | #endif // LOADER_ENABLE_LINUX_SORT |
6529 | | |
6530 | | // Copy over everything found through the non-sorted means. |
6531 | 0 | for (uint32_t i = 0; i < icd_count; ++i) { |
6532 | 0 | for (uint32_t j = 0; j < icd_phys_dev_array[i].device_count; ++j) { |
6533 | 0 | res = check_and_add_to_new_phys_devs(inst, icd_phys_dev_array[i].physical_devices[j], &icd_phys_dev_array[i], |
6534 | 0 | &new_phys_devs_count, new_phys_devs); |
6535 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
6536 | 0 | goto out; |
6537 | 0 | } |
6538 | 0 | } |
6539 | 0 | } |
6540 | 0 | #if defined(LOADER_ENABLE_LINUX_SORT) |
6541 | 0 | } |
6542 | 0 | #endif // LOADER_ENABLE_LINUX_SORT |
6543 | 0 | out: |
6544 | |
|
6545 | 0 | if (VK_SUCCESS != res) { |
6546 | 0 | if (NULL != new_phys_devs) { |
6547 | | // We've encountered an error, so we should free the new buffers. |
6548 | 0 | for (uint32_t i = 0; i < new_phys_devs_capacity; i++) { |
6549 | | // May not have allocated this far, skip it if we hadn't. |
6550 | 0 | if (new_phys_devs[i] == NULL) continue; |
6551 | | |
6552 | | // If an OOM occurred inside the copying of the new physical devices into the existing array |
6553 | | // will leave some of the old physical devices in the array which may have been copied into |
6554 | | // the new array, leading to them being freed twice. To avoid this we just make sure to not |
6555 | | // delete physical devices which were copied. |
6556 | 0 | bool found = false; |
6557 | 0 | if (NULL != inst->phys_devs_term) { |
6558 | 0 | for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) { |
6559 | 0 | if (new_phys_devs[i] == inst->phys_devs_term[old_idx]) { |
6560 | 0 | found = true; |
6561 | 0 | break; |
6562 | 0 | } |
6563 | 0 | } |
6564 | 0 | } |
6565 | 0 | if (!found) { |
6566 | 0 | loader_instance_heap_free(inst, new_phys_devs[i]); |
6567 | 0 | } |
6568 | 0 | } |
6569 | 0 | loader_instance_heap_free(inst, new_phys_devs); |
6570 | 0 | } |
6571 | 0 | inst->total_gpu_count = 0; |
6572 | 0 | } else { |
6573 | 0 | if (NULL != inst->phys_devs_term) { |
6574 | | // Free everything in the old array that was not copied into the new array |
6575 | | // here. We can't attempt to do that before here since the previous loop |
6576 | | // looking before the "out:" label may hit an out of memory condition resulting |
6577 | | // in memory leaking. |
6578 | 0 | for (uint32_t i = 0; i < inst->phys_dev_count_term; i++) { |
6579 | 0 | bool found = false; |
6580 | 0 | for (uint32_t j = 0; j < new_phys_devs_count; j++) { |
6581 | 0 | if (new_phys_devs != NULL && inst->phys_devs_term[i] == new_phys_devs[j]) { |
6582 | 0 | found = true; |
6583 | 0 | break; |
6584 | 0 | } |
6585 | 0 | } |
6586 | 0 | if (!found) { |
6587 | 0 | loader_instance_heap_free(inst, inst->phys_devs_term[i]); |
6588 | 0 | } |
6589 | 0 | } |
6590 | 0 | loader_instance_heap_free(inst, inst->phys_devs_term); |
6591 | 0 | } |
6592 | | |
6593 | | // Swap out old and new devices list |
6594 | 0 | inst->phys_dev_count_term = new_phys_devs_count; |
6595 | 0 | inst->phys_devs_term = new_phys_devs; |
6596 | 0 | inst->total_gpu_count = new_phys_devs_count; |
6597 | 0 | } |
6598 | |
|
6599 | 0 | if (windows_sorted_devices_array != NULL) { |
6600 | 0 | for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) { |
6601 | 0 | if (windows_sorted_devices_array[i].device_count > 0 && windows_sorted_devices_array[i].physical_devices != NULL) { |
6602 | 0 | loader_instance_heap_free(inst, windows_sorted_devices_array[i].physical_devices); |
6603 | 0 | } |
6604 | 0 | } |
6605 | 0 | loader_instance_heap_free(inst, windows_sorted_devices_array); |
6606 | 0 | } |
6607 | |
|
6608 | 0 | return res; |
6609 | 0 | } |
6610 | | /** |
6611 | | * Iterates through all drivers and unloads any which do not contain physical devices. |
6612 | | * This saves address space, which for 32 bit applications is scarce. |
6613 | | * This must only be called after a call to vkEnumeratePhysicalDevices that isn't just querying the count |
6614 | | */ |
6615 | 0 | void unload_drivers_without_physical_devices(struct loader_instance *inst) { |
6616 | 0 | struct loader_icd_term *cur_icd_term = inst->icd_terms; |
6617 | 0 | struct loader_icd_term *prev_icd_term = NULL; |
6618 | |
|
6619 | 0 | while (NULL != cur_icd_term) { |
6620 | 0 | struct loader_icd_term *next_icd_term = cur_icd_term->next; |
6621 | 0 | if (cur_icd_term->physical_device_count == 0) { |
6622 | 0 | uint32_t cur_scanned_icd_index = UINT32_MAX; |
6623 | 0 | if (inst->icd_tramp_list.scanned_list) { |
6624 | 0 | for (uint32_t i = 0; i < inst->icd_tramp_list.count; i++) { |
6625 | 0 | if (&(inst->icd_tramp_list.scanned_list[i]) == cur_icd_term->scanned_icd) { |
6626 | 0 | cur_scanned_icd_index = i; |
6627 | 0 | break; |
6628 | 0 | } |
6629 | 0 | } |
6630 | 0 | } |
6631 | 0 | if (cur_scanned_icd_index != UINT32_MAX) { |
6632 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
6633 | 0 | "Removing driver %s due to not having any physical devices", cur_icd_term->scanned_icd->lib_name); |
6634 | |
|
6635 | 0 | const VkAllocationCallbacks *allocation_callbacks = ignore_null_callback(&(inst->alloc_callbacks)); |
6636 | 0 | if (cur_icd_term->instance) { |
6637 | 0 | loader_icd_close_objects(inst, cur_icd_term); |
6638 | 0 | cur_icd_term->dispatch.DestroyInstance(cur_icd_term->instance, allocation_callbacks); |
6639 | 0 | } |
6640 | 0 | cur_icd_term->instance = VK_NULL_HANDLE; |
6641 | 0 | loader_icd_destroy(inst, cur_icd_term, allocation_callbacks); |
6642 | 0 | cur_icd_term = NULL; |
6643 | 0 | struct loader_scanned_icd *scanned_icd_to_remove = &inst->icd_tramp_list.scanned_list[cur_scanned_icd_index]; |
6644 | | // Iterate through preloaded ICDs and remove the corresponding driver from that list |
6645 | 0 | loader_platform_thread_lock_mutex(&loader_preload_icd_lock); |
6646 | 0 | if (NULL != preloaded_icds.scanned_list) { |
6647 | 0 | for (uint32_t i = 0; i < preloaded_icds.count; i++) { |
6648 | 0 | if (NULL != preloaded_icds.scanned_list[i].lib_name && NULL != scanned_icd_to_remove->lib_name && |
6649 | 0 | strcmp(preloaded_icds.scanned_list[i].lib_name, scanned_icd_to_remove->lib_name) == 0) { |
6650 | 0 | loader_unload_scanned_icd(NULL, &preloaded_icds.scanned_list[i]); |
6651 | | // condense the list so that it doesn't contain empty elements. |
6652 | 0 | if (i < preloaded_icds.count - 1) { |
6653 | 0 | memcpy((void *)&preloaded_icds.scanned_list[i], |
6654 | 0 | (void *)&preloaded_icds.scanned_list[preloaded_icds.count - 1], |
6655 | 0 | sizeof(struct loader_scanned_icd)); |
6656 | 0 | memset((void *)&preloaded_icds.scanned_list[preloaded_icds.count - 1], 0, |
6657 | 0 | sizeof(struct loader_scanned_icd)); |
6658 | 0 | } |
6659 | 0 | if (i > 0) { |
6660 | 0 | preloaded_icds.count--; |
6661 | 0 | } |
6662 | |
|
6663 | 0 | break; |
6664 | 0 | } |
6665 | 0 | } |
6666 | 0 | } |
6667 | 0 | loader_platform_thread_unlock_mutex(&loader_preload_icd_lock); |
6668 | |
|
6669 | 0 | loader_unload_scanned_icd(inst, scanned_icd_to_remove); |
6670 | 0 | } |
6671 | |
|
6672 | 0 | if (NULL == prev_icd_term) { |
6673 | 0 | inst->icd_terms = next_icd_term; |
6674 | 0 | } else { |
6675 | 0 | prev_icd_term->next = next_icd_term; |
6676 | 0 | } |
6677 | 0 | } else { |
6678 | 0 | prev_icd_term = cur_icd_term; |
6679 | 0 | } |
6680 | 0 | cur_icd_term = next_icd_term; |
6681 | 0 | } |
6682 | 0 | } |
6683 | | |
6684 | | VkResult setup_loader_tramp_phys_dev_groups(struct loader_instance *inst, uint32_t group_count, |
6685 | 0 | VkPhysicalDeviceGroupProperties *groups) { |
6686 | 0 | VkResult res = VK_SUCCESS; |
6687 | 0 | uint32_t cur_idx; |
6688 | 0 | uint32_t dev_idx; |
6689 | |
|
6690 | 0 | if (0 == group_count) { |
6691 | 0 | return VK_SUCCESS; |
6692 | 0 | } |
6693 | | |
6694 | | // Generate a list of all the devices and convert them to the loader ID |
6695 | 0 | uint32_t phys_dev_count = 0; |
6696 | 0 | for (cur_idx = 0; cur_idx < group_count; ++cur_idx) { |
6697 | 0 | phys_dev_count += groups[cur_idx].physicalDeviceCount; |
6698 | 0 | } |
6699 | 0 | VkPhysicalDevice *devices = (VkPhysicalDevice *)loader_stack_alloc(sizeof(VkPhysicalDevice) * phys_dev_count); |
6700 | 0 | if (NULL == devices) { |
6701 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
6702 | 0 | } |
6703 | | |
6704 | 0 | uint32_t cur_device = 0; |
6705 | 0 | for (cur_idx = 0; cur_idx < group_count; ++cur_idx) { |
6706 | 0 | for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) { |
6707 | 0 | devices[cur_device++] = groups[cur_idx].physicalDevices[dev_idx]; |
6708 | 0 | } |
6709 | 0 | } |
6710 | | |
6711 | | // Update the devices based on the loader physical device values. |
6712 | 0 | res = setup_loader_tramp_phys_devs(inst, phys_dev_count, devices); |
6713 | 0 | if (VK_SUCCESS != res) { |
6714 | 0 | return res; |
6715 | 0 | } |
6716 | | |
6717 | | // Update the devices in the group structures now |
6718 | 0 | cur_device = 0; |
6719 | 0 | for (cur_idx = 0; cur_idx < group_count; ++cur_idx) { |
6720 | 0 | for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) { |
6721 | 0 | groups[cur_idx].physicalDevices[dev_idx] = devices[cur_device++]; |
6722 | 0 | } |
6723 | 0 | } |
6724 | |
|
6725 | 0 | return res; |
6726 | 0 | } |
6727 | | |
6728 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, |
6729 | 0 | VkPhysicalDevice *pPhysicalDevices) { |
6730 | 0 | struct loader_instance *inst = (struct loader_instance *)instance; |
6731 | 0 | VkResult res = VK_SUCCESS; |
6732 | | |
6733 | | // Always call the setup loader terminator physical devices because they may |
6734 | | // have changed at any point. |
6735 | 0 | res = setup_loader_term_phys_devs(inst); |
6736 | 0 | if (VK_SUCCESS != res) { |
6737 | 0 | goto out; |
6738 | 0 | } |
6739 | | |
6740 | 0 | uint32_t copy_count = inst->phys_dev_count_term; |
6741 | 0 | if (NULL != pPhysicalDevices) { |
6742 | 0 | if (copy_count > *pPhysicalDeviceCount) { |
6743 | 0 | copy_count = *pPhysicalDeviceCount; |
6744 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
6745 | 0 | "terminator_EnumeratePhysicalDevices : Trimming device count from %d to %d.", inst->phys_dev_count_term, |
6746 | 0 | copy_count); |
6747 | 0 | res = VK_INCOMPLETE; |
6748 | 0 | } |
6749 | |
|
6750 | 0 | for (uint32_t i = 0; i < copy_count; i++) { |
6751 | 0 | pPhysicalDevices[i] = (VkPhysicalDevice)inst->phys_devs_term[i]; |
6752 | 0 | } |
6753 | 0 | } |
6754 | |
|
6755 | 0 | *pPhysicalDeviceCount = copy_count; |
6756 | |
|
6757 | 0 | out: |
6758 | |
|
6759 | 0 | return res; |
6760 | 0 | } |
6761 | | |
6762 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, |
6763 | | const char *pLayerName, uint32_t *pPropertyCount, |
6764 | 0 | VkExtensionProperties *pProperties) { |
6765 | 0 | if (NULL == pPropertyCount) { |
6766 | 0 | return VK_INCOMPLETE; |
6767 | 0 | } |
6768 | | |
6769 | 0 | struct loader_physical_device_term *phys_dev_term; |
6770 | | |
6771 | | // Any layer or trampoline wrapping should be removed at this point in time can just cast to the expected |
6772 | | // type for VkPhysicalDevice. |
6773 | 0 | phys_dev_term = (struct loader_physical_device_term *)physicalDevice; |
6774 | | |
6775 | | // if we got here with a non-empty pLayerName, look up the extensions |
6776 | | // from the json |
6777 | 0 | if (pLayerName != NULL && strlen(pLayerName) > 0) { |
6778 | 0 | uint32_t count; |
6779 | 0 | uint32_t copy_size; |
6780 | 0 | const struct loader_instance *inst = phys_dev_term->this_icd_term->this_instance; |
6781 | 0 | struct loader_device_extension_list *dev_ext_list = NULL; |
6782 | 0 | struct loader_device_extension_list local_ext_list; |
6783 | 0 | memset(&local_ext_list, 0, sizeof(local_ext_list)); |
6784 | 0 | if (vk_string_validate(MaxLoaderStringLength, pLayerName) == VK_STRING_ERROR_NONE) { |
6785 | 0 | for (uint32_t i = 0; i < inst->instance_layer_list.count; i++) { |
6786 | 0 | struct loader_layer_properties *props = &inst->instance_layer_list.list[i]; |
6787 | 0 | if (strcmp(props->info.layerName, pLayerName) == 0) { |
6788 | 0 | dev_ext_list = &props->device_extension_list; |
6789 | 0 | } |
6790 | 0 | } |
6791 | |
|
6792 | 0 | count = (dev_ext_list == NULL) ? 0 : dev_ext_list->count; |
6793 | 0 | if (pProperties == NULL) { |
6794 | 0 | *pPropertyCount = count; |
6795 | 0 | loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list); |
6796 | 0 | return VK_SUCCESS; |
6797 | 0 | } |
6798 | | |
6799 | 0 | copy_size = *pPropertyCount < count ? *pPropertyCount : count; |
6800 | 0 | for (uint32_t i = 0; i < copy_size; i++) { |
6801 | 0 | memcpy(&pProperties[i], &dev_ext_list->list[i].props, sizeof(VkExtensionProperties)); |
6802 | 0 | } |
6803 | 0 | *pPropertyCount = copy_size; |
6804 | |
|
6805 | 0 | loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list); |
6806 | 0 | if (copy_size < count) { |
6807 | 0 | return VK_INCOMPLETE; |
6808 | 0 | } |
6809 | 0 | } else { |
6810 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6811 | 0 | "vkEnumerateDeviceExtensionProperties: pLayerName is too long or is badly formed"); |
6812 | 0 | return VK_ERROR_EXTENSION_NOT_PRESENT; |
6813 | 0 | } |
6814 | | |
6815 | 0 | return VK_SUCCESS; |
6816 | 0 | } |
6817 | | |
6818 | | // user is querying driver extensions and has supplied their own storage - just fill it out |
6819 | 0 | else if (pProperties) { |
6820 | 0 | struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; |
6821 | 0 | uint32_t written_count = *pPropertyCount; |
6822 | 0 | VkResult res = |
6823 | 0 | icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &written_count, pProperties); |
6824 | 0 | if (res != VK_SUCCESS) { |
6825 | 0 | return res; |
6826 | 0 | } |
6827 | | |
6828 | | // Iterate over active layers, if they are an implicit layer, add their device extensions |
6829 | | // After calling into the driver, written_count contains the amount of device extensions written. We can therefore write |
6830 | | // layer extensions starting at that point in pProperties |
6831 | 0 | for (uint32_t i = 0; i < icd_term->this_instance->expanded_activated_layer_list.count; i++) { |
6832 | 0 | struct loader_layer_properties *layer_props = icd_term->this_instance->expanded_activated_layer_list.list[i]; |
6833 | 0 | if (0 == (layer_props->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { |
6834 | 0 | struct loader_device_extension_list *layer_ext_list = &layer_props->device_extension_list; |
6835 | 0 | for (uint32_t j = 0; j < layer_ext_list->count; j++) { |
6836 | 0 | struct loader_dev_ext_props *cur_ext_props = &layer_ext_list->list[j]; |
6837 | | // look for duplicates |
6838 | 0 | if (has_vk_extension_property_array(&cur_ext_props->props, written_count, pProperties)) { |
6839 | 0 | continue; |
6840 | 0 | } |
6841 | | |
6842 | 0 | if (*pPropertyCount <= written_count) { |
6843 | 0 | return VK_INCOMPLETE; |
6844 | 0 | } |
6845 | | |
6846 | 0 | memcpy(&pProperties[written_count], &cur_ext_props->props, sizeof(VkExtensionProperties)); |
6847 | 0 | written_count++; |
6848 | 0 | } |
6849 | 0 | } |
6850 | 0 | } |
6851 | | // Make sure we update the pPropertyCount with the how many were written |
6852 | 0 | *pPropertyCount = written_count; |
6853 | 0 | return res; |
6854 | 0 | } |
6855 | | // Use `goto out;` for rest of this function |
6856 | | |
6857 | | // This case is during the call down the instance chain with pLayerName == NULL and pProperties == NULL |
6858 | 0 | struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; |
6859 | 0 | struct loader_extension_list all_exts = {0}; |
6860 | 0 | VkResult res; |
6861 | | |
6862 | | // We need to find the count without duplicates. This requires querying the driver for the names of the extensions. |
6863 | 0 | res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &all_exts.count, NULL); |
6864 | 0 | if (res != VK_SUCCESS) { |
6865 | 0 | goto out; |
6866 | 0 | } |
6867 | | // Then allocate memory to store the physical device extension list + the extensions layers provide |
6868 | | // all_exts.count currently is the number of driver extensions |
6869 | 0 | all_exts.capacity = sizeof(VkExtensionProperties) * (all_exts.count + 20); |
6870 | 0 | all_exts.list = loader_instance_heap_alloc(icd_term->this_instance, all_exts.capacity, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
6871 | 0 | if (NULL == all_exts.list) { |
6872 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6873 | 0 | goto out; |
6874 | 0 | } |
6875 | | |
6876 | | // Get the available device extensions and put them in all_exts.list |
6877 | 0 | res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &all_exts.count, all_exts.list); |
6878 | 0 | if (res != VK_SUCCESS) { |
6879 | 0 | goto out; |
6880 | 0 | } |
6881 | | |
6882 | | // Iterate over active layers, if they are an implicit layer, add their device extensions to all_exts.list |
6883 | 0 | for (uint32_t i = 0; i < icd_term->this_instance->expanded_activated_layer_list.count; i++) { |
6884 | 0 | struct loader_layer_properties *layer_props = icd_term->this_instance->expanded_activated_layer_list.list[i]; |
6885 | 0 | if (0 == (layer_props->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { |
6886 | 0 | struct loader_device_extension_list *layer_ext_list = &layer_props->device_extension_list; |
6887 | 0 | for (uint32_t j = 0; j < layer_ext_list->count; j++) { |
6888 | 0 | res = loader_add_to_ext_list(icd_term->this_instance, &all_exts, 1, &layer_ext_list->list[j].props); |
6889 | 0 | if (res != VK_SUCCESS) { |
6890 | 0 | goto out; |
6891 | 0 | } |
6892 | 0 | } |
6893 | 0 | } |
6894 | 0 | } |
6895 | | |
6896 | | // Write out the final de-duplicated count to pPropertyCount |
6897 | 0 | *pPropertyCount = all_exts.count; |
6898 | 0 | res = VK_SUCCESS; |
6899 | |
|
6900 | 0 | out: |
6901 | |
|
6902 | 0 | loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&all_exts); |
6903 | 0 | return res; |
6904 | 0 | } |
6905 | | |
6906 | 7.47k | VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8) { |
6907 | 7.47k | VkStringErrorFlags result = VK_STRING_ERROR_NONE; |
6908 | 7.47k | int num_char_bytes = 0; |
6909 | 7.47k | int i, j; |
6910 | | |
6911 | 7.47k | if (utf8 == NULL) { |
6912 | 0 | return VK_STRING_ERROR_NULL_PTR; |
6913 | 0 | } |
6914 | | |
6915 | 209k | for (i = 0; i <= max_length; i++) { |
6916 | 209k | if (utf8[i] == 0) { |
6917 | 7.47k | break; |
6918 | 201k | } else if (i == max_length) { |
6919 | 0 | result |= VK_STRING_ERROR_LENGTH; |
6920 | 0 | break; |
6921 | 201k | } else if ((utf8[i] >= 0x20) && (utf8[i] < 0x7f)) { |
6922 | 201k | num_char_bytes = 0; |
6923 | 201k | } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) { |
6924 | 0 | num_char_bytes = 1; |
6925 | 0 | } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) { |
6926 | 0 | num_char_bytes = 2; |
6927 | 0 | } else if ((utf8[i] & UTF8_THREE_BYTE_MASK) == UTF8_THREE_BYTE_CODE) { |
6928 | 0 | num_char_bytes = 3; |
6929 | 0 | } else { |
6930 | 0 | result = VK_STRING_ERROR_BAD_DATA; |
6931 | 0 | } |
6932 | | |
6933 | | // Validate the following num_char_bytes of data |
6934 | 201k | for (j = 0; (j < num_char_bytes) && (i < max_length); j++) { |
6935 | 0 | if (++i == max_length) { |
6936 | 0 | result |= VK_STRING_ERROR_LENGTH; |
6937 | 0 | break; |
6938 | 0 | } |
6939 | 0 | if ((utf8[i] & UTF8_DATA_BYTE_MASK) != UTF8_DATA_BYTE_CODE) { |
6940 | 0 | result |= VK_STRING_ERROR_BAD_DATA; |
6941 | 0 | } |
6942 | 0 | } |
6943 | 201k | } |
6944 | 7.47k | return result; |
6945 | 7.47k | } |
6946 | | |
6947 | 0 | VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceVersion(uint32_t *pApiVersion) { |
6948 | | // NOTE: The Vulkan WG doesn't want us checking pApiVersion for NULL, but instead |
6949 | | // prefers us crashing. |
6950 | 0 | *pApiVersion = VK_HEADER_VERSION_COMPLETE; |
6951 | 0 | return VK_SUCCESS; |
6952 | 0 | } |
6953 | | |
6954 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_pre_instance_EnumerateInstanceVersion(const VkEnumerateInstanceVersionChain *chain, |
6955 | 0 | uint32_t *pApiVersion) { |
6956 | 0 | (void)chain; |
6957 | 0 | return terminator_EnumerateInstanceVersion(pApiVersion); |
6958 | 0 | } |
6959 | | |
6960 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pPropertyCount, |
6961 | 0 | VkExtensionProperties *pProperties) { |
6962 | 0 | struct loader_extension_list *global_ext_list = NULL; |
6963 | 0 | struct loader_layer_list instance_layers; |
6964 | 0 | struct loader_extension_list local_ext_list; |
6965 | 0 | struct loader_icd_tramp_list icd_tramp_list; |
6966 | 0 | uint32_t copy_size; |
6967 | 0 | VkResult res = VK_SUCCESS; |
6968 | 0 | struct loader_envvar_all_filters layer_filters = {0}; |
6969 | |
|
6970 | 0 | memset(&local_ext_list, 0, sizeof(local_ext_list)); |
6971 | 0 | memset(&instance_layers, 0, sizeof(instance_layers)); |
6972 | 0 | memset(&icd_tramp_list, 0, sizeof(icd_tramp_list)); |
6973 | |
|
6974 | 0 | res = parse_layer_environment_var_filters(NULL, &layer_filters); |
6975 | 0 | if (VK_SUCCESS != res) { |
6976 | 0 | goto out; |
6977 | 0 | } |
6978 | | |
6979 | | // Get layer libraries if needed |
6980 | 0 | if (pLayerName && strlen(pLayerName) != 0) { |
6981 | 0 | if (vk_string_validate(MaxLoaderStringLength, pLayerName) != VK_STRING_ERROR_NONE) { |
6982 | 0 | assert(VK_FALSE && "vkEnumerateInstanceExtensionProperties: pLayerName is too long or is badly formed"); |
6983 | 0 | res = VK_ERROR_EXTENSION_NOT_PRESENT; |
6984 | 0 | goto out; |
6985 | 0 | } |
6986 | | |
6987 | 0 | res = loader_scan_for_layers(NULL, &instance_layers, &layer_filters); |
6988 | 0 | if (VK_SUCCESS != res) { |
6989 | 0 | goto out; |
6990 | 0 | } |
6991 | 0 | for (uint32_t i = 0; i < instance_layers.count; i++) { |
6992 | 0 | struct loader_layer_properties *props = &instance_layers.list[i]; |
6993 | 0 | if (strcmp(props->info.layerName, pLayerName) == 0) { |
6994 | 0 | global_ext_list = &props->instance_extension_list; |
6995 | 0 | break; |
6996 | 0 | } |
6997 | 0 | } |
6998 | 0 | } else { |
6999 | | // Preload ICD libraries so subsequent calls to EnumerateInstanceExtensionProperties don't have to load them |
7000 | 0 | loader_preload_icds(); |
7001 | | |
7002 | | // Scan/discover all ICD libraries |
7003 | 0 | res = loader_icd_scan(NULL, &icd_tramp_list, NULL, NULL); |
7004 | | // EnumerateInstanceExtensionProperties can't return anything other than OOM or VK_ERROR_LAYER_NOT_PRESENT |
7005 | 0 | if ((VK_SUCCESS != res && icd_tramp_list.count > 0) || res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
7006 | 0 | goto out; |
7007 | 0 | } |
7008 | | // Get extensions from all ICD's, merge so no duplicates |
7009 | 0 | res = loader_get_icd_loader_instance_extensions(NULL, &icd_tramp_list, &local_ext_list); |
7010 | 0 | if (VK_SUCCESS != res) { |
7011 | 0 | goto out; |
7012 | 0 | } |
7013 | 0 | loader_clear_scanned_icd_list(NULL, &icd_tramp_list); |
7014 | | |
7015 | | // Append enabled implicit layers. |
7016 | 0 | res = loader_scan_for_implicit_layers(NULL, &instance_layers, &layer_filters); |
7017 | 0 | if (VK_SUCCESS != res) { |
7018 | 0 | goto out; |
7019 | 0 | } |
7020 | 0 | for (uint32_t i = 0; i < instance_layers.count; i++) { |
7021 | 0 | struct loader_extension_list *ext_list = &instance_layers.list[i].instance_extension_list; |
7022 | 0 | loader_add_to_ext_list(NULL, &local_ext_list, ext_list->count, ext_list->list); |
7023 | 0 | } |
7024 | |
|
7025 | 0 | global_ext_list = &local_ext_list; |
7026 | 0 | } |
7027 | | |
7028 | 0 | if (global_ext_list == NULL) { |
7029 | 0 | res = VK_ERROR_LAYER_NOT_PRESENT; |
7030 | 0 | goto out; |
7031 | 0 | } |
7032 | | |
7033 | 0 | if (pProperties == NULL) { |
7034 | 0 | *pPropertyCount = global_ext_list->count; |
7035 | 0 | goto out; |
7036 | 0 | } |
7037 | | |
7038 | 0 | copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count; |
7039 | 0 | for (uint32_t i = 0; i < copy_size; i++) { |
7040 | 0 | memcpy(&pProperties[i], &global_ext_list->list[i], sizeof(VkExtensionProperties)); |
7041 | 0 | } |
7042 | 0 | *pPropertyCount = copy_size; |
7043 | |
|
7044 | 0 | if (copy_size < global_ext_list->count) { |
7045 | 0 | res = VK_INCOMPLETE; |
7046 | 0 | goto out; |
7047 | 0 | } |
7048 | | |
7049 | 0 | out: |
7050 | 0 | loader_destroy_generic_list(NULL, (struct loader_generic_list *)&icd_tramp_list); |
7051 | 0 | loader_destroy_generic_list(NULL, (struct loader_generic_list *)&local_ext_list); |
7052 | 0 | loader_delete_layer_list_and_properties(NULL, &instance_layers); |
7053 | 0 | return res; |
7054 | 0 | } |
7055 | | |
7056 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_pre_instance_EnumerateInstanceExtensionProperties( |
7057 | | const VkEnumerateInstanceExtensionPropertiesChain *chain, const char *pLayerName, uint32_t *pPropertyCount, |
7058 | 0 | VkExtensionProperties *pProperties) { |
7059 | 0 | (void)chain; |
7060 | 0 | return terminator_EnumerateInstanceExtensionProperties(pLayerName, pPropertyCount, pProperties); |
7061 | 0 | } |
7062 | | |
7063 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount, |
7064 | 0 | VkLayerProperties *pProperties) { |
7065 | 0 | VkResult result = VK_SUCCESS; |
7066 | 0 | struct loader_layer_list instance_layer_list; |
7067 | 0 | struct loader_envvar_all_filters layer_filters = {0}; |
7068 | |
|
7069 | 0 | LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize); |
7070 | |
|
7071 | 0 | result = parse_layer_environment_var_filters(NULL, &layer_filters); |
7072 | 0 | if (VK_SUCCESS != result) { |
7073 | 0 | goto out; |
7074 | 0 | } |
7075 | | |
7076 | | // Get layer libraries |
7077 | 0 | memset(&instance_layer_list, 0, sizeof(instance_layer_list)); |
7078 | 0 | result = loader_scan_for_layers(NULL, &instance_layer_list, &layer_filters); |
7079 | 0 | if (VK_SUCCESS != result) { |
7080 | 0 | goto out; |
7081 | 0 | } |
7082 | | |
7083 | 0 | uint32_t layers_to_write_out = 0; |
7084 | 0 | for (uint32_t i = 0; i < instance_layer_list.count; i++) { |
7085 | 0 | if (instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_ON || |
7086 | 0 | instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) { |
7087 | 0 | layers_to_write_out++; |
7088 | 0 | } |
7089 | 0 | } |
7090 | |
|
7091 | 0 | if (pProperties == NULL) { |
7092 | 0 | *pPropertyCount = layers_to_write_out; |
7093 | 0 | goto out; |
7094 | 0 | } |
7095 | | |
7096 | 0 | uint32_t output_properties_index = 0; |
7097 | 0 | for (uint32_t i = 0; i < instance_layer_list.count; i++) { |
7098 | 0 | if (output_properties_index < *pPropertyCount && |
7099 | 0 | (instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_ON || |
7100 | 0 | instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_DEFAULT)) { |
7101 | 0 | memcpy(&pProperties[output_properties_index], &instance_layer_list.list[i].info, sizeof(VkLayerProperties)); |
7102 | 0 | output_properties_index++; |
7103 | 0 | } |
7104 | 0 | } |
7105 | 0 | if (output_properties_index < layers_to_write_out) { |
7106 | | // Indicates that we had more elements to write but ran out of room |
7107 | 0 | result = VK_INCOMPLETE; |
7108 | 0 | } |
7109 | |
|
7110 | 0 | *pPropertyCount = output_properties_index; |
7111 | |
|
7112 | 0 | out: |
7113 | |
|
7114 | 0 | loader_delete_layer_list_and_properties(NULL, &instance_layer_list); |
7115 | 0 | return result; |
7116 | 0 | } |
7117 | | |
7118 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_pre_instance_EnumerateInstanceLayerProperties( |
7119 | 0 | const VkEnumerateInstanceLayerPropertiesChain *chain, uint32_t *pPropertyCount, VkLayerProperties *pProperties) { |
7120 | 0 | (void)chain; |
7121 | 0 | return terminator_EnumerateInstanceLayerProperties(pPropertyCount, pProperties); |
7122 | 0 | } |
7123 | | |
7124 | | // ---- Vulkan Core 1.1 terminators |
7125 | | |
7126 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDeviceGroups( |
7127 | 0 | VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) { |
7128 | 0 | struct loader_instance *inst = (struct loader_instance *)instance; |
7129 | |
|
7130 | 0 | VkResult res = VK_SUCCESS; |
7131 | 0 | struct loader_icd_term *icd_term; |
7132 | 0 | uint32_t total_count = 0; |
7133 | 0 | uint32_t cur_icd_group_count = 0; |
7134 | 0 | VkPhysicalDeviceGroupProperties **new_phys_dev_groups = NULL; |
7135 | 0 | struct loader_physical_device_group_term *local_phys_dev_groups = NULL; |
7136 | 0 | PFN_vkEnumeratePhysicalDeviceGroups fpEnumeratePhysicalDeviceGroups = NULL; |
7137 | 0 | struct loader_icd_physical_devices *sorted_phys_dev_array = NULL; |
7138 | 0 | uint32_t sorted_count = 0; |
7139 | | |
7140 | | // For each ICD, query the number of physical device groups, and then get an |
7141 | | // internal value for those physical devices. |
7142 | 0 | icd_term = inst->icd_terms; |
7143 | 0 | while (NULL != icd_term) { |
7144 | 0 | cur_icd_group_count = 0; |
7145 | | |
7146 | | // Get the function pointer to use to call into the ICD. This could be the core or KHR version |
7147 | 0 | if (inst->enabled_extensions.khr_device_group_creation) { |
7148 | 0 | fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR; |
7149 | 0 | } else { |
7150 | 0 | fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups; |
7151 | 0 | } |
7152 | |
|
7153 | 0 | if (NULL == fpEnumeratePhysicalDeviceGroups) { |
7154 | | // Treat each ICD's GPU as it's own group if the extension isn't supported |
7155 | 0 | res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &cur_icd_group_count, NULL); |
7156 | 0 | if (res != VK_SUCCESS) { |
7157 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7158 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of \'EnumeratePhysicalDevices\' " |
7159 | 0 | "to ICD %s to get plain phys dev count.", |
7160 | 0 | icd_term->scanned_icd->lib_name); |
7161 | 0 | continue; |
7162 | 0 | } |
7163 | 0 | } else { |
7164 | | // Query the actual group info |
7165 | 0 | res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &cur_icd_group_count, NULL); |
7166 | 0 | if (res != VK_SUCCESS) { |
7167 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7168 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of " |
7169 | 0 | "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get count.", |
7170 | 0 | icd_term->scanned_icd->lib_name); |
7171 | 0 | continue; |
7172 | 0 | } |
7173 | 0 | } |
7174 | 0 | total_count += cur_icd_group_count; |
7175 | 0 | icd_term = icd_term->next; |
7176 | 0 | } |
7177 | | |
7178 | | // If GPUs not sorted yet, look through them and generate list of all available GPUs |
7179 | 0 | if (0 == total_count || 0 == inst->total_gpu_count) { |
7180 | 0 | res = setup_loader_term_phys_devs(inst); |
7181 | 0 | if (VK_SUCCESS != res) { |
7182 | 0 | goto out; |
7183 | 0 | } |
7184 | 0 | } |
7185 | | |
7186 | 0 | if (NULL != pPhysicalDeviceGroupProperties) { |
7187 | | // Create an array for the new physical device groups, which will be stored |
7188 | | // in the instance for the Terminator code. |
7189 | 0 | new_phys_dev_groups = (VkPhysicalDeviceGroupProperties **)loader_instance_heap_calloc( |
7190 | 0 | inst, total_count * sizeof(VkPhysicalDeviceGroupProperties *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
7191 | 0 | if (NULL == new_phys_dev_groups) { |
7192 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7193 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed to allocate new physical device group array of size %d", |
7194 | 0 | total_count); |
7195 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
7196 | 0 | goto out; |
7197 | 0 | } |
7198 | | |
7199 | | // Create a temporary array (on the stack) to keep track of the |
7200 | | // returned VkPhysicalDevice values. |
7201 | 0 | local_phys_dev_groups = loader_stack_alloc(sizeof(struct loader_physical_device_group_term) * total_count); |
7202 | | // Initialize the memory to something valid |
7203 | 0 | memset(local_phys_dev_groups, 0, sizeof(struct loader_physical_device_group_term) * total_count); |
7204 | |
|
7205 | | #if defined(_WIN32) |
7206 | | // Get the physical devices supported by platform sorting mechanism into a separate list |
7207 | | res = windows_read_sorted_physical_devices(inst, &sorted_count, &sorted_phys_dev_array); |
7208 | | if (VK_SUCCESS != res) { |
7209 | | goto out; |
7210 | | } |
7211 | | #endif |
7212 | |
|
7213 | 0 | cur_icd_group_count = 0; |
7214 | 0 | icd_term = inst->icd_terms; |
7215 | 0 | while (NULL != icd_term) { |
7216 | 0 | uint32_t count_this_time = total_count - cur_icd_group_count; |
7217 | | |
7218 | | // Get the function pointer to use to call into the ICD. This could be the core or KHR version |
7219 | 0 | if (inst->enabled_extensions.khr_device_group_creation) { |
7220 | 0 | fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR; |
7221 | 0 | } else { |
7222 | 0 | fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups; |
7223 | 0 | } |
7224 | |
|
7225 | 0 | if (NULL == fpEnumeratePhysicalDeviceGroups) { |
7226 | 0 | icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, NULL); |
7227 | |
|
7228 | 0 | VkPhysicalDevice *phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * count_this_time); |
7229 | 0 | if (NULL == phys_dev_array) { |
7230 | 0 | loader_log( |
7231 | 0 | inst, VULKAN_LOADER_ERROR_BIT, 0, |
7232 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed to allocate local physical device array of size %d", |
7233 | 0 | count_this_time); |
7234 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
7235 | 0 | goto out; |
7236 | 0 | } |
7237 | | |
7238 | 0 | res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, phys_dev_array); |
7239 | 0 | if (res != VK_SUCCESS) { |
7240 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7241 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of " |
7242 | 0 | "\'EnumeratePhysicalDevices\' to ICD %s to get plain phys dev count.", |
7243 | 0 | icd_term->scanned_icd->lib_name); |
7244 | 0 | goto out; |
7245 | 0 | } |
7246 | | |
7247 | | // Add each GPU as it's own group |
7248 | 0 | for (uint32_t indiv_gpu = 0; indiv_gpu < count_this_time; indiv_gpu++) { |
7249 | 0 | uint32_t cur_index = indiv_gpu + cur_icd_group_count; |
7250 | 0 | local_phys_dev_groups[cur_index].this_icd_term = icd_term; |
7251 | 0 | local_phys_dev_groups[cur_index].group_props.physicalDeviceCount = 1; |
7252 | 0 | local_phys_dev_groups[cur_index].group_props.physicalDevices[0] = phys_dev_array[indiv_gpu]; |
7253 | 0 | } |
7254 | |
|
7255 | 0 | } else { |
7256 | 0 | res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, NULL); |
7257 | 0 | if (res != VK_SUCCESS) { |
7258 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7259 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of " |
7260 | 0 | "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get group count.", |
7261 | 0 | icd_term->scanned_icd->lib_name); |
7262 | 0 | goto out; |
7263 | 0 | } |
7264 | 0 | if (cur_icd_group_count + count_this_time < *pPhysicalDeviceGroupCount) { |
7265 | | // The total amount is still less than the amount of physical device group data passed in |
7266 | | // by the callee. Therefore, we don't have to allocate any temporary structures and we |
7267 | | // can just use the data that was passed in. |
7268 | 0 | res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, |
7269 | 0 | &pPhysicalDeviceGroupProperties[cur_icd_group_count]); |
7270 | 0 | if (res != VK_SUCCESS) { |
7271 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7272 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of " |
7273 | 0 | "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get group information.", |
7274 | 0 | icd_term->scanned_icd->lib_name); |
7275 | 0 | goto out; |
7276 | 0 | } |
7277 | 0 | for (uint32_t group = 0; group < count_this_time; ++group) { |
7278 | 0 | uint32_t cur_index = group + cur_icd_group_count; |
7279 | 0 | local_phys_dev_groups[cur_index].group_props = pPhysicalDeviceGroupProperties[cur_index]; |
7280 | 0 | local_phys_dev_groups[cur_index].this_icd_term = icd_term; |
7281 | 0 | } |
7282 | 0 | } else { |
7283 | | // There's not enough space in the callee's allocated pPhysicalDeviceGroupProperties structs, |
7284 | | // so we have to allocate temporary versions to collect all the data. However, we need to make |
7285 | | // sure that at least the ones we do query utilize any pNext data in the callee's version. |
7286 | 0 | VkPhysicalDeviceGroupProperties *tmp_group_props = |
7287 | 0 | loader_stack_alloc(count_this_time * sizeof(VkPhysicalDeviceGroupProperties)); |
7288 | 0 | for (uint32_t group = 0; group < count_this_time; group++) { |
7289 | 0 | tmp_group_props[group].sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES; |
7290 | 0 | uint32_t cur_index = group + cur_icd_group_count; |
7291 | 0 | if (*pPhysicalDeviceGroupCount > cur_index) { |
7292 | 0 | tmp_group_props[group].pNext = pPhysicalDeviceGroupProperties[cur_index].pNext; |
7293 | 0 | } else { |
7294 | 0 | tmp_group_props[group].pNext = NULL; |
7295 | 0 | } |
7296 | 0 | tmp_group_props[group].subsetAllocation = false; |
7297 | 0 | } |
7298 | |
|
7299 | 0 | res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, tmp_group_props); |
7300 | 0 | if (res != VK_SUCCESS) { |
7301 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7302 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of " |
7303 | 0 | "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get group information for temp data.", |
7304 | 0 | icd_term->scanned_icd->lib_name); |
7305 | 0 | goto out; |
7306 | 0 | } |
7307 | 0 | for (uint32_t group = 0; group < count_this_time; ++group) { |
7308 | 0 | uint32_t cur_index = group + cur_icd_group_count; |
7309 | 0 | local_phys_dev_groups[cur_index].group_props = tmp_group_props[group]; |
7310 | 0 | local_phys_dev_groups[cur_index].this_icd_term = icd_term; |
7311 | 0 | } |
7312 | 0 | } |
7313 | 0 | if (VK_SUCCESS != res) { |
7314 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7315 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of " |
7316 | 0 | "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get content.", |
7317 | 0 | icd_term->scanned_icd->lib_name); |
7318 | 0 | goto out; |
7319 | 0 | } |
7320 | 0 | } |
7321 | | |
7322 | 0 | cur_icd_group_count += count_this_time; |
7323 | 0 | icd_term = icd_term->next; |
7324 | 0 | } |
7325 | | |
7326 | 0 | #if defined(LOADER_ENABLE_LINUX_SORT) |
7327 | 0 | if (is_linux_sort_enabled(inst)) { |
7328 | | // Get the physical devices supported by platform sorting mechanism into a separate list |
7329 | 0 | res = linux_sort_physical_device_groups(inst, total_count, local_phys_dev_groups); |
7330 | 0 | } |
7331 | | #elif defined(_WIN32) |
7332 | | // The Windows sorting information is only on physical devices. We need to take that and convert it to the group |
7333 | | // information if it's present. |
7334 | | if (sorted_count > 0) { |
7335 | | res = |
7336 | | windows_sort_physical_device_groups(inst, total_count, local_phys_dev_groups, sorted_count, sorted_phys_dev_array); |
7337 | | } |
7338 | | #endif // LOADER_ENABLE_LINUX_SORT |
7339 | | |
7340 | | // Just to be safe, make sure we successfully completed setup_loader_term_phys_devs above |
7341 | | // before attempting to do the following. By verifying that setup_loader_term_phys_devs ran |
7342 | | // first, it guarantees that each physical device will have a loader-specific handle. |
7343 | 0 | if (NULL != inst->phys_devs_term) { |
7344 | 0 | for (uint32_t group = 0; group < total_count; group++) { |
7345 | 0 | for (uint32_t group_gpu = 0; group_gpu < local_phys_dev_groups[group].group_props.physicalDeviceCount; |
7346 | 0 | group_gpu++) { |
7347 | 0 | bool found = false; |
7348 | 0 | for (uint32_t term_gpu = 0; term_gpu < inst->phys_dev_count_term; term_gpu++) { |
7349 | 0 | if (local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] == |
7350 | 0 | inst->phys_devs_term[term_gpu]->phys_dev) { |
7351 | 0 | local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] = |
7352 | 0 | (VkPhysicalDevice)inst->phys_devs_term[term_gpu]; |
7353 | 0 | found = true; |
7354 | 0 | break; |
7355 | 0 | } |
7356 | 0 | } |
7357 | 0 | if (!found) { |
7358 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7359 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed to find GPU %d in group %d returned by " |
7360 | 0 | "\'EnumeratePhysicalDeviceGroups\' in list returned by \'EnumeratePhysicalDevices\'", |
7361 | 0 | group_gpu, group); |
7362 | 0 | res = VK_ERROR_INITIALIZATION_FAILED; |
7363 | 0 | goto out; |
7364 | 0 | } |
7365 | 0 | } |
7366 | 0 | } |
7367 | 0 | } |
7368 | | |
7369 | 0 | uint32_t idx = 0; |
7370 | | |
7371 | | // Copy or create everything to fill the new array of physical device groups |
7372 | 0 | for (uint32_t group = 0; group < total_count; group++) { |
7373 | | // Skip groups which have been included through sorting |
7374 | 0 | if (local_phys_dev_groups[group].group_props.physicalDeviceCount == 0) { |
7375 | 0 | continue; |
7376 | 0 | } |
7377 | | |
7378 | | // Find the VkPhysicalDeviceGroupProperties object in local_phys_dev_groups |
7379 | 0 | VkPhysicalDeviceGroupProperties *group_properties = &local_phys_dev_groups[group].group_props; |
7380 | | |
7381 | | // Check if this physical device group with the same contents is already in the old buffer |
7382 | 0 | for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) { |
7383 | 0 | if (NULL != group_properties && NULL != inst->phys_dev_groups_term[old_idx] && |
7384 | 0 | group_properties->physicalDeviceCount == inst->phys_dev_groups_term[old_idx]->physicalDeviceCount) { |
7385 | 0 | bool found_all_gpus = true; |
7386 | 0 | for (uint32_t old_gpu = 0; old_gpu < inst->phys_dev_groups_term[old_idx]->physicalDeviceCount; old_gpu++) { |
7387 | 0 | bool found_gpu = false; |
7388 | 0 | for (uint32_t new_gpu = 0; new_gpu < group_properties->physicalDeviceCount; new_gpu++) { |
7389 | 0 | if (group_properties->physicalDevices[new_gpu] == |
7390 | 0 | inst->phys_dev_groups_term[old_idx]->physicalDevices[old_gpu]) { |
7391 | 0 | found_gpu = true; |
7392 | 0 | break; |
7393 | 0 | } |
7394 | 0 | } |
7395 | |
|
7396 | 0 | if (!found_gpu) { |
7397 | 0 | found_all_gpus = false; |
7398 | 0 | break; |
7399 | 0 | } |
7400 | 0 | } |
7401 | 0 | if (!found_all_gpus) { |
7402 | 0 | continue; |
7403 | 0 | } else { |
7404 | 0 | new_phys_dev_groups[idx] = inst->phys_dev_groups_term[old_idx]; |
7405 | 0 | break; |
7406 | 0 | } |
7407 | 0 | } |
7408 | 0 | } |
7409 | | // If this physical device group isn't in the old buffer, create it |
7410 | 0 | if (group_properties != NULL && NULL == new_phys_dev_groups[idx]) { |
7411 | 0 | new_phys_dev_groups[idx] = (VkPhysicalDeviceGroupProperties *)loader_instance_heap_alloc( |
7412 | 0 | inst, sizeof(VkPhysicalDeviceGroupProperties), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
7413 | 0 | if (NULL == new_phys_dev_groups[idx]) { |
7414 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7415 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed to allocate physical device group Terminator " |
7416 | 0 | "object %d", |
7417 | 0 | idx); |
7418 | 0 | total_count = idx; |
7419 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
7420 | 0 | goto out; |
7421 | 0 | } |
7422 | 0 | memcpy(new_phys_dev_groups[idx], group_properties, sizeof(VkPhysicalDeviceGroupProperties)); |
7423 | 0 | } |
7424 | | |
7425 | 0 | ++idx; |
7426 | 0 | } |
7427 | 0 | } |
7428 | | |
7429 | 0 | out: |
7430 | |
|
7431 | 0 | if (NULL != pPhysicalDeviceGroupProperties) { |
7432 | 0 | if (VK_SUCCESS != res) { |
7433 | 0 | if (NULL != new_phys_dev_groups) { |
7434 | | // We've encountered an error, so we should free the new buffers. |
7435 | 0 | for (uint32_t i = 0; i < total_count; i++) { |
7436 | | // If an OOM occurred inside the copying of the new physical device groups into the existing array will |
7437 | | // leave some of the old physical device groups in the array which may have been copied into the new array, |
7438 | | // leading to them being freed twice. To avoid this we just make sure to not delete physical device groups |
7439 | | // which were copied. |
7440 | 0 | bool found = false; |
7441 | 0 | if (NULL != inst->phys_devs_term) { |
7442 | 0 | for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) { |
7443 | 0 | if (new_phys_dev_groups[i] == inst->phys_dev_groups_term[old_idx]) { |
7444 | 0 | found = true; |
7445 | 0 | break; |
7446 | 0 | } |
7447 | 0 | } |
7448 | 0 | } |
7449 | 0 | if (!found) { |
7450 | 0 | loader_instance_heap_free(inst, new_phys_dev_groups[i]); |
7451 | 0 | } |
7452 | 0 | } |
7453 | 0 | loader_instance_heap_free(inst, new_phys_dev_groups); |
7454 | 0 | } |
7455 | 0 | } else { |
7456 | 0 | if (NULL != inst->phys_dev_groups_term) { |
7457 | | // Free everything in the old array that was not copied into the new array |
7458 | | // here. We can't attempt to do that before here since the previous loop |
7459 | | // looking before the "out:" label may hit an out of memory condition resulting |
7460 | | // in memory leaking. |
7461 | 0 | for (uint32_t i = 0; i < inst->phys_dev_group_count_term; i++) { |
7462 | 0 | bool found = false; |
7463 | 0 | for (uint32_t j = 0; j < total_count; j++) { |
7464 | 0 | if (inst->phys_dev_groups_term[i] == new_phys_dev_groups[j]) { |
7465 | 0 | found = true; |
7466 | 0 | break; |
7467 | 0 | } |
7468 | 0 | } |
7469 | 0 | if (!found) { |
7470 | 0 | loader_instance_heap_free(inst, inst->phys_dev_groups_term[i]); |
7471 | 0 | } |
7472 | 0 | } |
7473 | 0 | loader_instance_heap_free(inst, inst->phys_dev_groups_term); |
7474 | 0 | } |
7475 | | |
7476 | | // Swap in the new physical device group list |
7477 | 0 | inst->phys_dev_group_count_term = total_count; |
7478 | 0 | inst->phys_dev_groups_term = new_phys_dev_groups; |
7479 | 0 | } |
7480 | |
|
7481 | 0 | if (sorted_phys_dev_array != NULL) { |
7482 | 0 | for (uint32_t i = 0; i < sorted_count; ++i) { |
7483 | 0 | if (sorted_phys_dev_array[i].device_count > 0 && sorted_phys_dev_array[i].physical_devices != NULL) { |
7484 | 0 | loader_instance_heap_free(inst, sorted_phys_dev_array[i].physical_devices); |
7485 | 0 | } |
7486 | 0 | } |
7487 | 0 | loader_instance_heap_free(inst, sorted_phys_dev_array); |
7488 | 0 | } |
7489 | |
|
7490 | 0 | uint32_t copy_count = inst->phys_dev_group_count_term; |
7491 | 0 | if (NULL != pPhysicalDeviceGroupProperties) { |
7492 | 0 | if (copy_count > *pPhysicalDeviceGroupCount) { |
7493 | 0 | copy_count = *pPhysicalDeviceGroupCount; |
7494 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
7495 | 0 | "terminator_EnumeratePhysicalDeviceGroups : Trimming device count from %d to %d.", |
7496 | 0 | inst->phys_dev_group_count_term, copy_count); |
7497 | 0 | res = VK_INCOMPLETE; |
7498 | 0 | } |
7499 | |
|
7500 | 0 | for (uint32_t i = 0; i < copy_count; i++) { |
7501 | 0 | memcpy(&pPhysicalDeviceGroupProperties[i], inst->phys_dev_groups_term[i], sizeof(VkPhysicalDeviceGroupProperties)); |
7502 | 0 | } |
7503 | 0 | } |
7504 | |
|
7505 | 0 | *pPhysicalDeviceGroupCount = copy_count; |
7506 | |
|
7507 | 0 | } else { |
7508 | 0 | *pPhysicalDeviceGroupCount = total_count; |
7509 | 0 | } |
7510 | 0 | return res; |
7511 | 0 | } |