/src/vulkan-loader/loader/loader.c
Line | Count | Source |
1 | | /* |
2 | | * |
3 | | * Copyright (c) 2014-2023 The Khronos Group Inc. |
4 | | * Copyright (c) 2014-2023 Valve Corporation |
5 | | * Copyright (c) 2014-2023 LunarG, Inc. |
6 | | * Copyright (C) 2015 Google Inc. |
7 | | * Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
8 | | * Copyright (c) 2023-2023 RasterGrid Kft. |
9 | | * |
10 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
11 | | * you may not use this file except in compliance with the License. |
12 | | * You may obtain a copy of the License at |
13 | | * |
14 | | * http://www.apache.org/licenses/LICENSE-2.0 |
15 | | * |
16 | | * Unless required by applicable law or agreed to in writing, software |
17 | | * distributed under the License is distributed on an "AS IS" BASIS, |
18 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
19 | | * See the License for the specific language governing permissions and |
20 | | * limitations under the License. |
21 | | |
22 | | * |
23 | | * Author: Jon Ashburn <jon@lunarg.com> |
24 | | * Author: Courtney Goeltzenleuchter <courtney@LunarG.com> |
25 | | * Author: Mark Young <marky@lunarg.com> |
26 | | * Author: Lenny Komow <lenny@lunarg.com> |
27 | | * Author: Charles Giessen <charles@lunarg.com> |
28 | | * |
29 | | */ |
30 | | |
31 | | #include "loader.h" |
32 | | |
33 | | #include <errno.h> |
34 | | #include <inttypes.h> |
35 | | #include <limits.h> |
36 | | #include <stdio.h> |
37 | | #include <stdlib.h> |
38 | | #include <stdarg.h> |
39 | | #include <stdbool.h> |
40 | | #include <string.h> |
41 | | #include <stddef.h> |
42 | | |
43 | | #if defined(__APPLE__) |
44 | | #include <CoreFoundation/CoreFoundation.h> |
45 | | #include <sys/param.h> |
46 | | #endif |
47 | | |
48 | | #include <sys/types.h> |
49 | | #if defined(_WIN32) |
50 | | #include "dirent_on_windows.h" |
51 | | #elif COMMON_UNIX_PLATFORMS |
52 | | #include <dirent.h> |
53 | | #else |
54 | | #warning dirent.h not available on this platform |
55 | | #endif // _WIN32 |
56 | | |
57 | | #include "allocation.h" |
58 | | #include "stack_allocation.h" |
59 | | #include "cJSON.h" |
60 | | #include "debug_utils.h" |
61 | | #include "loader_environment.h" |
62 | | #include "loader_json.h" |
63 | | #include "log.h" |
64 | | #include "unknown_function_handling.h" |
65 | | #include "vk_loader_platform.h" |
66 | | #include "wsi.h" |
67 | | |
68 | | #if defined(WIN32) |
69 | | #include "loader_windows.h" |
70 | | #endif |
71 | | #if defined(LOADER_ENABLE_LINUX_SORT) |
72 | | // This header is currently only used when sorting Linux devices, so don't include it otherwise. |
73 | | #include "loader_linux.h" |
74 | | #endif // LOADER_ENABLE_LINUX_SORT |
75 | | |
76 | | // Generated file containing all the extension data |
77 | | #include "vk_loader_extensions.c" |
78 | | |
79 | | struct loader_struct loader = {0}; |
80 | | |
81 | | struct activated_layer_info { |
82 | | char *name; |
83 | | char *manifest; |
84 | | char *library; |
85 | | bool is_implicit; |
86 | | enum loader_layer_enabled_by_what enabled_by_what; |
87 | | char *disable_env; |
88 | | char *enable_name_env; |
89 | | char *enable_value_env; |
90 | | }; |
91 | | |
92 | | // thread safety lock for accessing global data structures such as "loader" |
93 | | // all entrypoints on the instance chain need to be locked except GPA |
94 | | // additionally CreateDevice and DestroyDevice needs to be locked |
95 | | loader_platform_thread_mutex loader_lock; |
96 | | loader_platform_thread_mutex loader_preload_icd_lock; |
97 | | loader_platform_thread_mutex loader_global_instance_list_lock; |
98 | | |
99 | | // A list of ICDs that gets initialized when the loader does its global initialization. This list should never be used by anything |
100 | | // other than EnumerateInstanceExtensionProperties(), vkDestroyInstance, and loader_release(). This list does not change |
101 | | // functionality, but the fact that the libraries already been loaded causes any call that needs to load ICD libraries to speed up |
102 | | // significantly. This can have a huge impact when making repeated calls to vkEnumerateInstanceExtensionProperties and |
103 | | // vkCreateInstance. |
104 | | struct loader_icd_tramp_list preloaded_icds; |
105 | | |
106 | | // controls whether loader_platform_close_library() closes the libraries or not - controlled by an environment |
107 | | // variables - this is just the definition of the variable, usage is in vk_loader_platform.h |
108 | | bool loader_disable_dynamic_library_unloading; |
109 | | |
110 | | LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init); |
111 | | |
112 | | // Creates loader_api_version struct that contains the major and minor fields, setting patch to 0 |
113 | 35.3k | loader_api_version loader_make_version(uint32_t version) { |
114 | 35.3k | loader_api_version out_version; |
115 | 35.3k | out_version.major = VK_API_VERSION_MAJOR(version); |
116 | 35.3k | out_version.minor = VK_API_VERSION_MINOR(version); |
117 | 35.3k | out_version.patch = 0; |
118 | 35.3k | return out_version; |
119 | 35.3k | } |
120 | | |
121 | | // Creates loader_api_version struct containing the major, minor, and patch fields |
122 | 8.80k | loader_api_version loader_make_full_version(uint32_t version) { |
123 | 8.80k | loader_api_version out_version; |
124 | 8.80k | out_version.major = VK_API_VERSION_MAJOR(version); |
125 | 8.80k | out_version.minor = VK_API_VERSION_MINOR(version); |
126 | 8.80k | out_version.patch = VK_API_VERSION_PATCH(version); |
127 | 8.80k | return out_version; |
128 | 8.80k | } |
129 | | |
130 | 26.0k | loader_api_version loader_combine_version(uint32_t major, uint32_t minor, uint32_t patch) { |
131 | 26.0k | loader_api_version out_version; |
132 | 26.0k | out_version.major = (uint16_t)major; |
133 | 26.0k | out_version.minor = (uint16_t)minor; |
134 | 26.0k | out_version.patch = (uint16_t)patch; |
135 | 26.0k | return out_version; |
136 | 26.0k | } |
137 | | |
138 | | // Helper macros for determining if a version is valid or not |
139 | 36.9k | bool loader_check_version_meets_required(loader_api_version required, loader_api_version version) { |
140 | | // major version is satisfied |
141 | 36.9k | return (version.major > required.major) || |
142 | | // major version is equal, minor version is patch version is greater to minimum minor |
143 | 29.1k | (version.major == required.major && version.minor > required.minor) || |
144 | | // major and minor version are equal, patch version is greater or equal to minimum patch |
145 | 25.4k | (version.major == required.major && version.minor == required.minor && version.patch >= required.patch); |
146 | 36.9k | } |
147 | | |
148 | 0 | const char *get_enabled_by_what_str(enum loader_layer_enabled_by_what enabled_by_what) { |
149 | 0 | switch (enabled_by_what) { |
150 | 0 | default: |
151 | 0 | assert(true && "Shouldn't reach this"); |
152 | 0 | return "Unknown"; |
153 | 0 | case (ENABLED_BY_WHAT_UNSET): |
154 | 0 | assert(true && "Shouldn't reach this"); |
155 | 0 | return "Unknown"; |
156 | 0 | case (ENABLED_BY_WHAT_LOADER_SETTINGS_FILE): |
157 | 0 | return "Loader Settings File (Vulkan Configurator)"; |
158 | 0 | case (ENABLED_BY_WHAT_IMPLICIT_LAYER): |
159 | 0 | return "Implicit Layer"; |
160 | 0 | case (ENABLED_BY_WHAT_VK_INSTANCE_LAYERS): |
161 | 0 | return "Environment Variable VK_INSTANCE_LAYERS"; |
162 | 0 | case (ENABLED_BY_WHAT_VK_LOADER_LAYERS_ENABLE): |
163 | 0 | return "Environment Variable VK_LOADER_LAYERS_ENABLE"; |
164 | 0 | case (ENABLED_BY_WHAT_IN_APPLICATION_API): |
165 | 0 | return "By the Application"; |
166 | 0 | case (ENABLED_BY_WHAT_META_LAYER): |
167 | 0 | return "Meta Layer (Vulkan Configurator)"; |
168 | 0 | } |
169 | 0 | } |
170 | | |
171 | | // Wrapper around opendir so that the dirent_on_windows gets the instance it needs |
172 | | // while linux opendir & readdir does not |
173 | 110k | DIR *loader_opendir(const struct loader_instance *instance, const char *name) { |
174 | | #if defined(_WIN32) |
175 | | return opendir(instance ? &instance->alloc_callbacks : NULL, name); |
176 | | #elif COMMON_UNIX_PLATFORMS |
177 | | (void)instance; |
178 | 110k | return opendir(name); |
179 | | #else |
180 | | #warning dirent.h - opendir not available on this platform |
181 | | #endif // _WIN32 |
182 | 110k | } |
183 | 8.49k | int loader_closedir(const struct loader_instance *instance, DIR *dir) { |
184 | | #if defined(_WIN32) |
185 | | return closedir(instance ? &instance->alloc_callbacks : NULL, dir); |
186 | | #elif COMMON_UNIX_PLATFORMS |
187 | | (void)instance; |
188 | 8.49k | return closedir(dir); |
189 | | #else |
190 | | #warning dirent.h - closedir not available on this platform |
191 | | #endif // _WIN32 |
192 | 8.49k | } |
193 | | |
194 | 303k | bool is_json(const char *path, size_t len) { |
195 | 303k | if (len < 5) { |
196 | 55.1k | return false; |
197 | 55.1k | } |
198 | 248k | return !strncmp(path, ".json", 5); |
199 | 303k | } |
200 | | |
201 | | // Handle error from to library loading |
202 | | void loader_handle_load_library_error(const struct loader_instance *inst, const char *filename, |
203 | 71 | enum loader_layer_library_status *lib_status) { |
204 | 71 | const char *error_message = loader_platform_open_library_error(filename); |
205 | | // If the error is due to incompatible architecture (eg 32 bit vs 64 bit), report it with INFO level |
206 | | // Discussed in Github issue 262 & 644 |
207 | | // "wrong ELF class" is a linux error, " with error 193" is a windows error |
208 | 71 | VkFlags err_flag = VULKAN_LOADER_ERROR_BIT; |
209 | 71 | if (strstr(error_message, "wrong ELF class:") != NULL || strstr(error_message, " with error 193") != NULL) { |
210 | 2 | err_flag = VULKAN_LOADER_INFO_BIT; |
211 | 2 | if (NULL != lib_status) { |
212 | 2 | *lib_status = LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE; |
213 | 2 | } |
214 | 2 | } |
215 | | // Check if the error is due to lack of memory |
216 | | // "with error 8" is the windows error code for OOM cases, aka ERROR_NOT_ENOUGH_MEMORY |
217 | | // Linux doesn't have such a nice error message - only if there are reported issues should this be called |
218 | 69 | else if (strstr(error_message, " with error 8") != NULL) { |
219 | 1 | if (NULL != lib_status) { |
220 | 1 | *lib_status = LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY; |
221 | 1 | } |
222 | 68 | } else if (NULL != lib_status) { |
223 | 68 | *lib_status = LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD; |
224 | 68 | } |
225 | 71 | loader_log(inst, err_flag, 0, "%s", error_message); |
226 | 71 | } |
227 | | |
228 | 0 | VKAPI_ATTR VkResult VKAPI_CALL vkSetInstanceDispatch(VkInstance instance, void *object) { |
229 | 0 | struct loader_instance *inst = loader_get_instance(instance); |
230 | 0 | if (!inst) { |
231 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkSetInstanceDispatch: Can not retrieve Instance dispatch table."); |
232 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
233 | 0 | } |
234 | 0 | loader_set_dispatch(object, inst->disp); |
235 | 0 | return VK_SUCCESS; |
236 | 0 | } |
237 | | |
238 | 0 | VKAPI_ATTR VkResult VKAPI_CALL vkSetDeviceDispatch(VkDevice device, void *object) { |
239 | 0 | struct loader_device *dev; |
240 | 0 | struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev); |
241 | |
|
242 | 0 | if (NULL == icd_term || NULL == dev) { |
243 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
244 | 0 | } |
245 | 0 | loader_set_dispatch(object, &dev->loader_dispatch); |
246 | 0 | return VK_SUCCESS; |
247 | 0 | } |
248 | | |
249 | 1.14M | void loader_free_layer_properties(const struct loader_instance *inst, struct loader_layer_properties *layer_properties) { |
250 | 1.14M | loader_instance_heap_free(inst, layer_properties->manifest_file_name); |
251 | 1.14M | loader_instance_heap_free(inst, layer_properties->lib_name); |
252 | 1.14M | loader_instance_heap_free(inst, layer_properties->functions.str_gipa); |
253 | 1.14M | loader_instance_heap_free(inst, layer_properties->functions.str_gdpa); |
254 | 1.14M | loader_instance_heap_free(inst, layer_properties->functions.str_negotiate_interface); |
255 | 1.14M | loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->instance_extension_list); |
256 | 1.14M | if (layer_properties->device_extension_list.capacity > 0 && NULL != layer_properties->device_extension_list.list) { |
257 | 35.1k | for (uint32_t i = 0; i < layer_properties->device_extension_list.count; i++) { |
258 | 32.9k | free_string_list(inst, &layer_properties->device_extension_list.list[i].entrypoints); |
259 | 32.9k | } |
260 | 2.17k | } |
261 | 1.14M | loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->device_extension_list); |
262 | 1.14M | loader_instance_heap_free(inst, layer_properties->disable_env_var.name); |
263 | 1.14M | loader_instance_heap_free(inst, layer_properties->disable_env_var.value); |
264 | 1.14M | loader_instance_heap_free(inst, layer_properties->enable_env_var.name); |
265 | 1.14M | loader_instance_heap_free(inst, layer_properties->enable_env_var.value); |
266 | 1.14M | free_string_list(inst, &layer_properties->component_layer_names); |
267 | 1.14M | loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_extension_properties); |
268 | 1.14M | loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_layer_properties); |
269 | 1.14M | loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_version); |
270 | 1.14M | free_string_list(inst, &layer_properties->override_paths); |
271 | 1.14M | free_string_list(inst, &layer_properties->blacklist_layer_names); |
272 | 1.14M | free_string_list(inst, &layer_properties->app_key_paths); |
273 | | |
274 | | // Make sure to clear out the removed layer, in case new layers are added in the previous location |
275 | 1.14M | memset(layer_properties, 0, sizeof(struct loader_layer_properties)); |
276 | 1.14M | } |
277 | | |
278 | 0 | VkResult loader_init_library_list(struct loader_layer_list *instance_layers, loader_platform_dl_handle **libs) { |
279 | 0 | if (instance_layers->count > 0) { |
280 | 0 | *libs = loader_calloc(NULL, sizeof(loader_platform_dl_handle) * instance_layers->count, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
281 | 0 | if (*libs == NULL) { |
282 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
283 | 0 | } |
284 | 0 | } |
285 | 0 | return VK_SUCCESS; |
286 | 0 | } |
287 | | |
288 | 1.04M | VkResult loader_copy_to_new_str(const struct loader_instance *inst, const char *source_str, char **dest_str) { |
289 | 1.04M | assert(source_str && dest_str); |
290 | 1.04M | size_t str_len = strlen(source_str) + 1; |
291 | 1.04M | *dest_str = loader_instance_heap_calloc(inst, str_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
292 | 1.04M | if (NULL == *dest_str) return VK_ERROR_OUT_OF_HOST_MEMORY; |
293 | 1.04M | loader_strncpy(*dest_str, str_len, source_str, str_len); |
294 | 1.04M | (*dest_str)[str_len - 1] = 0; |
295 | 1.04M | return VK_SUCCESS; |
296 | 1.04M | } |
297 | | |
298 | 11.6k | VkResult create_string_list(const struct loader_instance *inst, uint32_t allocated_count, struct loader_string_list *string_list) { |
299 | 11.6k | assert(string_list); |
300 | 11.6k | string_list->list = loader_instance_heap_calloc(inst, sizeof(char *) * allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
301 | 11.6k | if (NULL == string_list->list) { |
302 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
303 | 0 | } |
304 | 11.6k | string_list->allocated_count = allocated_count; |
305 | 11.6k | string_list->count = 0; |
306 | 11.6k | return VK_SUCCESS; |
307 | 11.6k | } |
308 | | |
309 | 432k | VkResult increase_str_capacity_by_at_least_one(const struct loader_instance *inst, struct loader_string_list *string_list) { |
310 | 432k | assert(string_list); |
311 | 432k | if (string_list->allocated_count == 0) { |
312 | 8.30k | string_list->allocated_count = 32; |
313 | 8.30k | string_list->list = |
314 | 8.30k | loader_instance_heap_calloc(inst, sizeof(char *) * string_list->allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
315 | 8.30k | if (NULL == string_list->list) { |
316 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
317 | 0 | } |
318 | 424k | } else if (string_list->count + 1 > string_list->allocated_count) { |
319 | 30 | uint32_t new_allocated_count = string_list->allocated_count * 2; |
320 | 30 | string_list->list = loader_instance_heap_realloc(inst, string_list->list, sizeof(char *) * string_list->allocated_count, |
321 | 30 | sizeof(char *) * new_allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
322 | 30 | if (NULL == string_list->list) { |
323 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
324 | 0 | } |
325 | 30 | string_list->allocated_count *= 2; |
326 | 30 | } |
327 | 432k | return VK_SUCCESS; |
328 | 432k | } |
329 | | |
330 | 432k | VkResult append_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, char *str) { |
331 | 432k | assert(string_list && str); |
332 | 432k | VkResult res = increase_str_capacity_by_at_least_one(inst, string_list); |
333 | 432k | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
334 | 0 | loader_instance_heap_free(inst, str); // Must clean up in case of failure |
335 | 0 | return res; |
336 | 0 | } |
337 | 432k | string_list->list[string_list->count++] = str; |
338 | 432k | return VK_SUCCESS; |
339 | 432k | } |
340 | | |
341 | 0 | VkResult prepend_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, char *str) { |
342 | 0 | assert(string_list && str); |
343 | 0 | VkResult res = increase_str_capacity_by_at_least_one(inst, string_list); |
344 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
345 | 0 | loader_instance_heap_free(inst, str); // Must clean up in case of failure |
346 | 0 | return res; |
347 | 0 | } |
348 | | // Shift everything down one |
349 | 0 | void *ptr_to_list = memmove(string_list->list + 1, string_list->list, sizeof(char *) * string_list->count); |
350 | 0 | if (ptr_to_list) string_list->list[0] = str; // Write new string to start of list |
351 | 0 | string_list->count++; |
352 | 0 | return VK_SUCCESS; |
353 | 0 | } |
354 | | |
355 | | VkResult copy_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, const char *str, |
356 | 10.1k | size_t str_len) { |
357 | 10.1k | assert(string_list && str); |
358 | 10.1k | char *new_str = loader_instance_heap_calloc(inst, sizeof(char *) * str_len + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
359 | 10.1k | if (NULL == new_str) { |
360 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
361 | 0 | } |
362 | 10.1k | loader_strncpy(new_str, sizeof(char *) * str_len + 1, str, str_len); |
363 | 10.1k | new_str[str_len] = '\0'; |
364 | 10.1k | return append_str_to_string_list(inst, string_list, new_str); |
365 | 10.1k | } |
366 | | |
367 | | VkResult copy_str_to_start_of_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, |
368 | 0 | const char *str, size_t str_len) { |
369 | 0 | assert(string_list && str); |
370 | 0 | char *new_str = loader_instance_heap_calloc(inst, sizeof(char *) * str_len + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
371 | 0 | if (NULL == new_str) { |
372 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
373 | 0 | } |
374 | 0 | loader_strncpy(new_str, sizeof(char *) * str_len + 1, str, str_len); |
375 | 0 | new_str[str_len] = '\0'; |
376 | 0 | return prepend_str_to_string_list(inst, string_list, new_str); |
377 | 0 | } |
378 | | |
379 | 4.66M | void free_string_list(const struct loader_instance *inst, struct loader_string_list *string_list) { |
380 | 4.66M | assert(string_list); |
381 | 4.66M | if (string_list->list) { |
382 | 452k | for (uint32_t i = 0; i < string_list->count; i++) { |
383 | 432k | loader_instance_heap_free(inst, string_list->list[i]); |
384 | 432k | string_list->list[i] = NULL; |
385 | 432k | } |
386 | 19.9k | loader_instance_heap_free(inst, string_list->list); |
387 | 19.9k | } |
388 | 4.66M | memset(string_list, 0, sizeof(struct loader_string_list)); |
389 | 4.66M | } |
390 | | |
391 | | // In place modify the passed in path to do the following: |
392 | | // If HAVE_REALPATH is defined, then this simply calls realpath() so its behavior is defined by realpath() |
393 | | // Else: |
394 | | // * Windows-only: Replace forward slashes with backwards slashes (platform correct directory separator) |
395 | | // * Replace contiguous directory separators with a single directory separator |
396 | | // * Replace "/./" separator with "/" (where / is the platform correct directory separator) |
397 | | // * Replace "/<directory_name>/../" with just "/" (where / is the platform correct directory separator) |
398 | 0 | VkResult normalize_path(const struct loader_instance *inst, char **passed_in_path) { |
399 | | // passed_in_path doesn't point to anything, can't modify inplace so just return |
400 | 0 | if (passed_in_path == NULL) { |
401 | 0 | return VK_SUCCESS; |
402 | 0 | } |
403 | | |
404 | | // POSIX systems has the realpath() function to do this for us, fallback to basic normalization on other platforms |
405 | 0 | #if defined(HAVE_REALPATH) |
406 | 0 | char *path = loader_instance_heap_calloc(inst, PATH_MAX, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
407 | 0 | if (NULL == path) { |
408 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
409 | 0 | } |
410 | 0 | char *ret = realpath(*passed_in_path, path); |
411 | 0 | if (NULL == ret) { |
412 | | // error path |
413 | 0 | int error_code = errno; |
414 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, |
415 | 0 | "normalize_path: Call to realpath() failed with error code %d when given the path %s", error_code, |
416 | 0 | *passed_in_path); |
417 | 0 | loader_instance_heap_free(inst, path); |
418 | 0 | } else { |
419 | | // Replace string pointed to by passed_in_path with the one given to us by realpath() |
420 | 0 | loader_instance_heap_free(inst, *passed_in_path); |
421 | 0 | *passed_in_path = path; |
422 | 0 | } |
423 | 0 | return VK_SUCCESS; |
424 | | |
425 | | // Windows has GetFullPathName which does essentially the same thing. Note that we call GetFullPathNameA because the path has |
426 | | // already been converted from the wide char format when it was initially gotten |
427 | | #elif defined(WIN32) |
428 | | VkResult res = VK_SUCCESS; |
429 | | DWORD path_len = (DWORD)strlen(*passed_in_path) + 1; |
430 | | char *path = loader_instance_heap_calloc(inst, (size_t)path_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
431 | | if (NULL == path) { |
432 | | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
433 | | goto out; |
434 | | } |
435 | | DWORD actual_len = GetFullPathNameA(*passed_in_path, path_len, path, NULL); |
436 | | if (actual_len == 0) { |
437 | | size_t last_error = (size_t)GetLastError(); |
438 | | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, |
439 | | "normalize_path: Call to GetFullPathNameA() failed with error code %zu when given the path %s", last_error, |
440 | | *passed_in_path); |
441 | | res = VK_ERROR_INITIALIZATION_FAILED; |
442 | | goto out; |
443 | | } |
444 | | |
445 | | // If path_len wasn't big enough, need to realloc and call again |
446 | | // actual_len doesn't include null terminator |
447 | | if (actual_len + 1 > path_len) { |
448 | | path = loader_instance_heap_realloc(inst, path, path_len, actual_len + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
449 | | if (NULL == path) { |
450 | | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
451 | | goto out; |
452 | | } |
453 | | // store the updated allocation size (sans null terminator) |
454 | | path_len = actual_len + 1; |
455 | | |
456 | | actual_len = GetFullPathNameA(*passed_in_path, path_len, path, NULL); |
457 | | if (actual_len == 0) { |
458 | | size_t last_error = (size_t)GetLastError(); |
459 | | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, |
460 | | "normalize_path: Call to GetFullPathNameA() failed with error code %zu when given the path %s", last_error, |
461 | | *passed_in_path); |
462 | | res = VK_ERROR_INITIALIZATION_FAILED; |
463 | | goto out; |
464 | | // actual_len doesn't include null terminator |
465 | | } else if (actual_len + 1 != path_len) { |
466 | | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, |
467 | | "normalize_path: Call to GetFullPathNameA() with too small of a buffer when given the path %s after the " |
468 | | "initial call to GetFullPathNameA() failed for the same reason. Buffer size is %zu, actual size is %zu", |
469 | | *passed_in_path, (size_t)path_len, (size_t)actual_len); |
470 | | res = VK_ERROR_INITIALIZATION_FAILED; |
471 | | goto out; |
472 | | } |
473 | | } |
474 | | // Replace string pointed to by passed_in_path with the one given to us by realpath() |
475 | | loader_instance_heap_free(inst, *passed_in_path); |
476 | | *passed_in_path = path; |
477 | | out: |
478 | | if (VK_SUCCESS != res) { |
479 | | if (NULL != path) { |
480 | | loader_instance_heap_free(inst, path); |
481 | | } |
482 | | } |
483 | | return res; |
484 | | |
485 | | #else |
486 | | (void)inst; |
487 | | char *path = *passed_in_path; |
488 | | size_t path_len = strlen(path) + 1; |
489 | | |
490 | | size_t output_index = 0; |
491 | | // Iterate through the string up to the last character, excluding the null terminator |
492 | | for (size_t i = 0; i < path_len - 1; i++) { |
493 | | if (i + 1 < path_len && path[i] == DIRECTORY_SYMBOL && path[i + 1] == DIRECTORY_SYMBOL) { |
494 | | continue; |
495 | | } else if (i + 2 < path_len && path[i] == DIRECTORY_SYMBOL && path[i + 1] == '.' && path[i + 2] == DIRECTORY_SYMBOL) { |
496 | | i += 1; |
497 | | } else { |
498 | | path[output_index++] = path[i]; |
499 | | } |
500 | | } |
501 | | // Add null terminator and set the new length |
502 | | path[output_index++] = '\0'; |
503 | | path_len = output_index; |
504 | | |
505 | | // Loop while there are still ..'s in the path. Easiest implementation resolves them one by one, which requires quadratic |
506 | | // iteration through the string |
507 | | char *directory_stack = loader_stack_alloc(path_len); |
508 | | if (directory_stack == NULL) { |
509 | | return VK_ERROR_OUT_OF_HOST_MEMORY; |
510 | | } |
511 | | |
512 | | size_t top_of_stack = 0; |
513 | | |
514 | | // Iterate through the path, push characters as we see them, if we find a "..", pop off the top of the directory stack until the |
515 | | // current directory is gone. |
516 | | for (size_t i = 0; i < path_len - 1; i++) { |
517 | | // if the next part of path is "/../" we need to pop from the directory stack until we hit the previous directory symbol. |
518 | | if (i + 3 < path_len && path[i] == DIRECTORY_SYMBOL && path[i + 1] == '.' && path[i + 2] == '.' && path_len && |
519 | | path[i + 3] == DIRECTORY_SYMBOL) { |
520 | | // Pop until we hit the next directory symbol in the stack |
521 | | while (top_of_stack > 0 && directory_stack[top_of_stack - 1] != DIRECTORY_SYMBOL) { |
522 | | top_of_stack--; |
523 | | directory_stack[top_of_stack] = '\0'; |
524 | | } |
525 | | // Amend the directory stack so that the top isn't a directory separator |
526 | | if (top_of_stack > 0 && directory_stack[top_of_stack - 1] == DIRECTORY_SYMBOL) { |
527 | | top_of_stack--; |
528 | | directory_stack[top_of_stack] = '\0'; |
529 | | } |
530 | | i += 2; // need to skip the second dot & directory separator |
531 | | } else { |
532 | | // push characters as we come across them |
533 | | directory_stack[top_of_stack++] = path[i]; |
534 | | } |
535 | | } |
536 | | |
537 | | // Can't forget the null terminator |
538 | | directory_stack[top_of_stack] = '\0'; |
539 | | |
540 | | // We now have the path without any ..'s, so just copy it out |
541 | | loader_strncpy(path, path_len, directory_stack, path_len); |
542 | | path[top_of_stack] = '\0'; |
543 | | path_len = top_of_stack + 1; |
544 | | |
545 | | return VK_SUCCESS; |
546 | | #endif |
547 | 0 | } |
548 | | |
549 | | // Queries the path to the library that lib_handle & gipa are associated with, allocating a string to hold it and returning it in |
550 | | // out_path |
551 | | VkResult get_library_path_of_dl_handle(const struct loader_instance *inst, loader_platform_dl_handle lib_handle, |
552 | 0 | PFN_vkGetInstanceProcAddr gipa, char **out_path) { |
553 | 0 | #if COMMON_UNIX_PLATFORMS |
554 | 0 | (void)lib_handle; |
555 | 0 | Dl_info dl_info = {0}; |
556 | 0 | if (dladdr(gipa, &dl_info) != 0 && NULL != dl_info.dli_fname) { |
557 | 0 | return loader_copy_to_new_str(inst, dl_info.dli_fname, out_path); |
558 | 0 | } |
559 | 0 | return VK_SUCCESS; |
560 | |
|
561 | | #elif defined(WIN32) |
562 | | (void)gipa; |
563 | | size_t module_file_name_len = MAX_PATH; // start with reasonably large buffer |
564 | | wchar_t *buffer_utf16 = (wchar_t *)loader_stack_alloc(module_file_name_len * sizeof(wchar_t)); |
565 | | DWORD ret = GetModuleFileNameW(lib_handle, buffer_utf16, (DWORD)module_file_name_len); |
566 | | if (ret == 0) { |
567 | | return VK_SUCCESS; |
568 | | } |
569 | | while (GetLastError() == ERROR_INSUFFICIENT_BUFFER) { |
570 | | module_file_name_len *= 2; |
571 | | buffer_utf16 = (wchar_t *)loader_stack_alloc(module_file_name_len * sizeof(wchar_t)); |
572 | | ret = GetModuleFileNameW(lib_handle, buffer_utf16, (DWORD)module_file_name_len); |
573 | | if (ret == 0) { |
574 | | return VK_SUCCESS; |
575 | | } |
576 | | } |
577 | | |
578 | | // Need to convert from utf16 to utf8 |
579 | | int buffer_utf8_size = WideCharToMultiByte(CP_UTF8, 0, buffer_utf16, -1, NULL, 0, NULL, NULL); |
580 | | if (buffer_utf8_size <= 0) { |
581 | | return VK_SUCCESS; |
582 | | } |
583 | | |
584 | | char *buffer_utf8 = loader_instance_heap_calloc(inst, buffer_utf8_size, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
585 | | if (NULL == buffer_utf8) { |
586 | | return VK_ERROR_OUT_OF_HOST_MEMORY; |
587 | | } |
588 | | if (WideCharToMultiByte(CP_UTF8, 0, buffer_utf16, -1, buffer_utf8, buffer_utf8_size, NULL, NULL) != buffer_utf8_size) { |
589 | | return VK_SUCCESS; |
590 | | } |
591 | | |
592 | | // Successfully got the 'real' path to the library. |
593 | | *out_path = buffer_utf8; |
594 | | return VK_SUCCESS; |
595 | | |
596 | | #else |
597 | | // Do nothing, platform doesn't handle getting the path to a library |
598 | | #endif |
599 | 0 | } |
600 | | |
601 | | // Find and replace the path that was loaded using the lib_name path with the real path of the library. This is done to provide |
602 | | // accurate logging info for users. |
603 | | // This function prints a warning if there is a mismatch between the lib_name path and the real path. |
604 | | VkResult fixup_library_binary_path(const struct loader_instance *inst, char **lib_name, loader_platform_dl_handle lib_handle, |
605 | 0 | PFN_vkGetInstanceProcAddr gipa) { |
606 | 0 | if (lib_name == NULL) { |
607 | | // do nothing as we got an invalid lib_path pointer |
608 | 0 | return VK_SUCCESS; |
609 | 0 | } |
610 | | |
611 | 0 | bool system_path = true; |
612 | 0 | size_t lib_name_len = strlen(*lib_name) + 1; |
613 | 0 | for (size_t i = 0; i < lib_name_len; i++) { |
614 | 0 | if ((*lib_name)[i] == DIRECTORY_SYMBOL) { |
615 | 0 | system_path = false; |
616 | 0 | break; |
617 | 0 | } |
618 | 0 | } |
619 | |
|
620 | 0 | if (!system_path) { |
621 | | // The OS path we get for a binary is normalized, so we need to normalize the path passed into LoadLibrary/dlopen so that |
622 | | // mismatches are minimized. EG, do not warn when we give dlopen/LoadLibrary "/foo/./bar" but get "/foo/bar" as the loaded |
623 | | // binary path from the OS. |
624 | 0 | VkResult res = normalize_path(inst, lib_name); |
625 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
626 | 0 | return res; |
627 | 0 | } |
628 | 0 | } |
629 | 0 | char *os_determined_lib_name = NULL; |
630 | 0 | VkResult res = get_library_path_of_dl_handle(inst, lib_handle, gipa, &os_determined_lib_name); |
631 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
632 | 0 | return res; |
633 | 0 | } |
634 | | |
635 | 0 | if (NULL != os_determined_lib_name) { |
636 | 0 | if (0 != strcmp(os_determined_lib_name, *lib_name)) { |
637 | | // Paths do not match, so we need to replace lib_name with the real path |
638 | 0 | if (!system_path) { |
639 | | // Only warn when the library_path is relative or absolute, not system. EG lib_name had no directory separators |
640 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
641 | 0 | "Path to given binary %s was found to differ from OS loaded path %s", *lib_name, os_determined_lib_name); |
642 | 0 | } |
643 | 0 | loader_instance_heap_free(inst, *lib_name); |
644 | 0 | *lib_name = os_determined_lib_name; |
645 | 0 | } else { |
646 | | // Paths match, so just need to free temporary allocation |
647 | 0 | loader_instance_heap_free(inst, os_determined_lib_name); |
648 | 0 | } |
649 | 0 | } |
650 | |
|
651 | 0 | return res; |
652 | 0 | } |
653 | | |
654 | | // Given string of three part form "maj.min.pat" convert to a vulkan version number. |
655 | | // Also can understand four part form "variant.major.minor.patch" if provided. |
656 | 37.6k | uint32_t loader_parse_version_string(char *vers_str) { |
657 | 37.6k | uint32_t variant = 0, major = 0, minor = 0, patch = 0; |
658 | 37.6k | char *vers_tok; |
659 | 37.6k | char *context = NULL; |
660 | 37.6k | if (!vers_str) { |
661 | 0 | return 0; |
662 | 0 | } |
663 | | |
664 | 37.6k | vers_tok = thread_safe_strtok(vers_str, ".\"\n\r", &context); |
665 | 37.6k | if (NULL != vers_tok) { |
666 | 33.9k | major = (uint16_t)atoi(vers_tok); |
667 | 33.9k | vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context); |
668 | 33.9k | if (NULL != vers_tok) { |
669 | 21.2k | minor = (uint16_t)atoi(vers_tok); |
670 | 21.2k | vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context); |
671 | 21.2k | if (NULL != vers_tok) { |
672 | 11.0k | patch = (uint16_t)atoi(vers_tok); |
673 | 11.0k | vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context); |
674 | | // check that we are using a 4 part version string |
675 | 11.0k | if (NULL != vers_tok) { |
676 | | // if we are, move the values over into the correct place |
677 | 247 | variant = major; |
678 | 247 | major = minor; |
679 | 247 | minor = patch; |
680 | 247 | patch = (uint16_t)atoi(vers_tok); |
681 | 247 | } |
682 | 11.0k | } |
683 | 21.2k | } |
684 | 33.9k | } |
685 | | |
686 | 37.6k | return VK_MAKE_API_VERSION(variant, major, minor, patch); |
687 | 37.6k | } |
688 | | |
689 | 3.78M | bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) { |
690 | 3.78M | return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false; |
691 | 3.78M | } |
692 | | |
693 | | // Search the given ext_array for an extension matching the given vk_ext_prop |
694 | | bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, const uint32_t count, |
695 | 0 | const VkExtensionProperties *ext_array) { |
696 | 0 | for (uint32_t i = 0; i < count; i++) { |
697 | 0 | if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) return true; |
698 | 0 | } |
699 | 0 | return false; |
700 | 0 | } |
701 | | |
702 | | // Search the given ext_list for an extension matching the given vk_ext_prop |
703 | 56.3k | bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, const struct loader_extension_list *ext_list) { |
704 | 2.36M | for (uint32_t i = 0; i < ext_list->count; i++) { |
705 | 2.32M | if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) return true; |
706 | 2.32M | } |
707 | 38.9k | return false; |
708 | 56.3k | } |
709 | | |
710 | | // Search the given ext_list for a device extension matching the given ext_prop |
711 | 65.8k | bool has_vk_dev_ext_property(const VkExtensionProperties *ext_prop, const struct loader_device_extension_list *ext_list) { |
712 | 1.51M | for (uint32_t i = 0; i < ext_list->count; i++) { |
713 | 1.46M | if (compare_vk_extension_properties(&ext_list->list[i].props, ext_prop)) return true; |
714 | 1.46M | } |
715 | 52.4k | return false; |
716 | 65.8k | } |
717 | | |
718 | | VkResult loader_append_layer_property(const struct loader_instance *inst, struct loader_layer_list *layer_list, |
719 | 1.07M | struct loader_layer_properties *layer_property) { |
720 | 1.07M | VkResult res = VK_SUCCESS; |
721 | 1.07M | if (layer_list->capacity == 0) { |
722 | 5.70k | res = loader_init_generic_list(inst, (struct loader_generic_list *)layer_list, sizeof(struct loader_layer_properties)); |
723 | 5.70k | if (VK_SUCCESS != res) { |
724 | 0 | goto out; |
725 | 0 | } |
726 | 5.70k | } |
727 | | |
728 | | // Ensure enough room to add an entry |
729 | 1.07M | if ((layer_list->count + 1) * sizeof(struct loader_layer_properties) > layer_list->capacity) { |
730 | 1.36k | void *new_ptr = loader_instance_heap_realloc(inst, layer_list->list, layer_list->capacity, layer_list->capacity * 2, |
731 | 1.36k | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
732 | 1.36k | if (NULL == new_ptr) { |
733 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_append_layer_property: realloc failed for layer list"); |
734 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
735 | 0 | goto out; |
736 | 0 | } |
737 | 1.36k | layer_list->list = new_ptr; |
738 | 1.36k | layer_list->capacity *= 2; |
739 | 1.36k | } |
740 | 1.07M | memcpy(&layer_list->list[layer_list->count], layer_property, sizeof(struct loader_layer_properties)); |
741 | 1.07M | layer_list->count++; |
742 | 1.07M | memset(layer_property, 0, sizeof(struct loader_layer_properties)); |
743 | 1.07M | out: |
744 | 1.07M | if (res != VK_SUCCESS) { |
745 | 0 | loader_free_layer_properties(inst, layer_property); |
746 | 0 | } |
747 | 1.07M | return res; |
748 | 1.07M | } |
749 | | |
750 | | // Search the given layer list for a layer property matching the given layer name |
751 | 25.2k | struct loader_layer_properties *loader_find_layer_property(const char *name, const struct loader_layer_list *layer_list) { |
752 | 29.7M | for (uint32_t i = 0; i < layer_list->count; i++) { |
753 | 29.7M | const VkLayerProperties *item = &layer_list->list[i].info; |
754 | 29.7M | if (strcmp(name, item->layerName) == 0) return &layer_list->list[i]; |
755 | 29.7M | } |
756 | 8.76k | return NULL; |
757 | 25.2k | } |
758 | | |
759 | | struct loader_layer_properties *loader_find_pointer_layer_property(const char *name, |
760 | 0 | const struct loader_pointer_layer_list *layer_list) { |
761 | 0 | for (uint32_t i = 0; i < layer_list->count; i++) { |
762 | 0 | const VkLayerProperties *item = &layer_list->list[i]->info; |
763 | 0 | if (strcmp(name, item->layerName) == 0) return layer_list->list[i]; |
764 | 0 | } |
765 | 0 | return NULL; |
766 | 0 | } |
767 | | |
768 | | // Search the given layer list for a layer matching the given layer name |
769 | 0 | bool loader_find_layer_name_in_list(const char *name, const struct loader_pointer_layer_list *layer_list) { |
770 | 0 | if (NULL == layer_list) { |
771 | 0 | return false; |
772 | 0 | } |
773 | 0 | if (NULL != loader_find_pointer_layer_property(name, layer_list)) { |
774 | 0 | return true; |
775 | 0 | } |
776 | 0 | return false; |
777 | 0 | } |
778 | | |
779 | | // Search the given meta-layer's component list for a layer matching the given layer name |
780 | | bool loader_find_layer_name_in_meta_layer(const struct loader_instance *inst, const char *layer_name, |
781 | 0 | struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props) { |
782 | 0 | for (uint32_t comp_layer = 0; comp_layer < meta_layer_props->component_layer_names.count; comp_layer++) { |
783 | 0 | if (!strcmp(meta_layer_props->component_layer_names.list[comp_layer], layer_name)) { |
784 | 0 | return true; |
785 | 0 | } |
786 | 0 | struct loader_layer_properties *comp_layer_props = |
787 | 0 | loader_find_layer_property(meta_layer_props->component_layer_names.list[comp_layer], layer_list); |
788 | 0 | if (comp_layer_props->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { |
789 | 0 | return loader_find_layer_name_in_meta_layer(inst, layer_name, layer_list, comp_layer_props); |
790 | 0 | } |
791 | 0 | } |
792 | 0 | return false; |
793 | 0 | } |
794 | | |
795 | | // Search the override layer's blacklist for a layer matching the given layer name |
796 | 98.6k | bool loader_find_layer_name_in_blacklist(const char *layer_name, struct loader_layer_properties *meta_layer_props) { |
797 | 98.6k | for (uint32_t black_layer = 0; black_layer < meta_layer_props->blacklist_layer_names.count; ++black_layer) { |
798 | 0 | if (!strcmp(meta_layer_props->blacklist_layer_names.list[black_layer], layer_name)) { |
799 | 0 | return true; |
800 | 0 | } |
801 | 0 | } |
802 | 98.6k | return false; |
803 | 98.6k | } |
804 | | |
805 | | // Remove all layer properties entries from the list |
806 | | TEST_FUNCTION_EXPORT void loader_delete_layer_list_and_properties(const struct loader_instance *inst, |
807 | 22.9k | struct loader_layer_list *layer_list) { |
808 | 22.9k | uint32_t i; |
809 | 22.9k | if (!layer_list) return; |
810 | | |
811 | 1.09M | for (i = 0; i < layer_list->count; i++) { |
812 | 1.07M | if (layer_list->list[i].lib_handle) { |
813 | 0 | loader_platform_close_library(layer_list->list[i].lib_handle); |
814 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Unloading layer library %s", |
815 | 0 | layer_list->list[i].lib_name); |
816 | 0 | layer_list->list[i].lib_handle = NULL; |
817 | 0 | } |
818 | 1.07M | loader_free_layer_properties(inst, &(layer_list->list[i])); |
819 | 1.07M | } |
820 | 22.9k | layer_list->count = 0; |
821 | | |
822 | 22.9k | if (layer_list->capacity > 0) { |
823 | 6.52k | layer_list->capacity = 0; |
824 | 6.52k | loader_instance_heap_free(inst, layer_list->list); |
825 | 6.52k | } |
826 | 22.9k | memset(layer_list, 0, sizeof(struct loader_layer_list)); |
827 | 22.9k | } |
828 | | |
829 | | void loader_remove_layer_in_list(const struct loader_instance *inst, struct loader_layer_list *layer_list, |
830 | 6.88k | uint32_t layer_to_remove) { |
831 | 6.88k | if (layer_list == NULL || layer_to_remove >= layer_list->count) { |
832 | 23 | return; |
833 | 23 | } |
834 | 6.86k | loader_free_layer_properties(inst, &(layer_list->list[layer_to_remove])); |
835 | | |
836 | | // Remove the current invalid meta-layer from the layer list. Use memmove since we are |
837 | | // overlapping the source and destination addresses. |
838 | 6.86k | if (layer_to_remove + 1 <= layer_list->count) { |
839 | 6.86k | memmove(&layer_list->list[layer_to_remove], &layer_list->list[layer_to_remove + 1], |
840 | 6.86k | sizeof(struct loader_layer_properties) * (layer_list->count - 1 - layer_to_remove)); |
841 | 6.86k | } |
842 | | // Decrement the count (because we now have one less) and decrement the loop index since we need to |
843 | | // re-check this index. |
844 | 6.86k | layer_list->count--; |
845 | 6.86k | } |
846 | | |
847 | | // Remove all layers in the layer list that are blacklisted by the override layer. |
848 | | // NOTE: This should only be called if an override layer is found and not expired. |
849 | 264 | void loader_remove_layers_in_blacklist(const struct loader_instance *inst, struct loader_layer_list *layer_list) { |
850 | 264 | struct loader_layer_properties *override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list); |
851 | 264 | if (NULL == override_prop) { |
852 | 0 | return; |
853 | 0 | } |
854 | | |
855 | 99.1k | for (int32_t j = 0; j < (int32_t)(layer_list->count); j++) { |
856 | 98.8k | struct loader_layer_properties cur_layer_prop = layer_list->list[j]; |
857 | 98.8k | const char *cur_layer_name = &cur_layer_prop.info.layerName[0]; |
858 | | |
859 | | // Skip the override layer itself. |
860 | 98.8k | if (!strcmp(VK_OVERRIDE_LAYER_NAME, cur_layer_name)) { |
861 | 264 | continue; |
862 | 264 | } |
863 | | |
864 | | // If found in the override layer's blacklist, remove it |
865 | 98.6k | if (loader_find_layer_name_in_blacklist(cur_layer_name, override_prop)) { |
866 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, |
867 | 0 | "loader_remove_layers_in_blacklist: Override layer is active and layer %s is in the blacklist inside of it. " |
868 | 0 | "Removing that layer from current layer list.", |
869 | 0 | cur_layer_name); |
870 | 0 | loader_remove_layer_in_list(inst, layer_list, j); |
871 | 0 | j--; |
872 | | |
873 | | // Re-do the query for the override layer |
874 | 0 | override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list); |
875 | 0 | } |
876 | 98.6k | } |
877 | 264 | } |
878 | | |
879 | | // Remove all layers in the layer list that are not found inside any implicit meta-layers. |
880 | 0 | void loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance *inst, struct loader_layer_list *layer_list) { |
881 | 0 | int32_t i; |
882 | 0 | int32_t j; |
883 | 0 | int32_t layer_count = (int32_t)(layer_list->count); |
884 | |
|
885 | 0 | for (i = 0; i < layer_count; i++) { |
886 | 0 | layer_list->list[i].keep = false; |
887 | 0 | } |
888 | |
|
889 | 0 | for (i = 0; i < layer_count; i++) { |
890 | 0 | struct loader_layer_properties *cur_layer_prop = &layer_list->list[i]; |
891 | |
|
892 | 0 | if (0 == (cur_layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { |
893 | 0 | cur_layer_prop->keep = true; |
894 | 0 | continue; |
895 | 0 | } |
896 | 0 | for (j = 0; j < layer_count; j++) { |
897 | 0 | struct loader_layer_properties *layer_to_check = &layer_list->list[j]; |
898 | |
|
899 | 0 | if (i == j) { |
900 | 0 | continue; |
901 | 0 | } |
902 | | |
903 | 0 | if (layer_to_check->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { |
904 | | // For all layers found in this meta layer, we want to keep them as well. |
905 | 0 | if (loader_find_layer_name_in_meta_layer(inst, cur_layer_prop->info.layerName, layer_list, layer_to_check)) { |
906 | 0 | cur_layer_prop->keep = true; |
907 | 0 | } |
908 | 0 | } |
909 | 0 | } |
910 | 0 | } |
911 | | |
912 | | // Remove any layers we don't want to keep (Don't use layer_count here as we need it to be |
913 | | // dynamically updated if we delete a layer property in the list). |
914 | 0 | for (i = 0; i < (int32_t)(layer_list->count); i++) { |
915 | 0 | struct loader_layer_properties *cur_layer_prop = &layer_list->list[i]; |
916 | 0 | if (!cur_layer_prop->keep) { |
917 | 0 | loader_log( |
918 | 0 | inst, VULKAN_LOADER_DEBUG_BIT, 0, |
919 | 0 | "loader_remove_layers_not_in_implicit_meta_layers : Implicit meta-layers are active, and layer %s is not list " |
920 | 0 | "inside of any. So removing layer from current layer list.", |
921 | 0 | cur_layer_prop->info.layerName); |
922 | 0 | loader_remove_layer_in_list(inst, layer_list, i); |
923 | 0 | i--; |
924 | 0 | } |
925 | 0 | } |
926 | 0 | } |
927 | | |
928 | | VkResult loader_add_instance_extensions(const struct loader_instance *inst, |
929 | | const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, const char *lib_name, |
930 | 0 | struct loader_extension_list *ext_list) { |
931 | 0 | uint32_t i, count = 0; |
932 | 0 | VkExtensionProperties *ext_props; |
933 | 0 | VkResult res = VK_SUCCESS; |
934 | |
|
935 | 0 | if (!fp_get_props) { |
936 | | // No EnumerateInstanceExtensionProperties defined |
937 | 0 | goto out; |
938 | 0 | } |
939 | | |
940 | | // Make sure we never call ourself by accident, this should never happen outside of error paths |
941 | 0 | if (fp_get_props == vkEnumerateInstanceExtensionProperties) { |
942 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
943 | 0 | "loader_add_instance_extensions: %s's vkEnumerateInstanceExtensionProperties points to the loader, this would " |
944 | 0 | "lead to infinite recursion.", |
945 | 0 | lib_name); |
946 | 0 | goto out; |
947 | 0 | } |
948 | | |
949 | 0 | res = fp_get_props(NULL, &count, NULL); |
950 | 0 | if (res != VK_SUCCESS) { |
951 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
952 | 0 | "loader_add_instance_extensions: Error getting Instance extension count from %s", lib_name); |
953 | 0 | goto out; |
954 | 0 | } |
955 | | |
956 | 0 | if (count == 0) { |
957 | | // No ExtensionProperties to report |
958 | 0 | goto out; |
959 | 0 | } |
960 | | |
961 | 0 | ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); |
962 | 0 | if (NULL == ext_props) { |
963 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
964 | 0 | goto out; |
965 | 0 | } |
966 | | |
967 | 0 | res = fp_get_props(NULL, &count, ext_props); |
968 | 0 | if (res != VK_SUCCESS) { |
969 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_instance_extensions: Error getting Instance extensions from %s", |
970 | 0 | lib_name); |
971 | 0 | goto out; |
972 | 0 | } |
973 | | |
974 | 0 | for (i = 0; i < count; i++) { |
975 | 0 | bool ext_unsupported = wsi_unsupported_instance_extension(&ext_props[i]); |
976 | 0 | if (!ext_unsupported) { |
977 | 0 | res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); |
978 | 0 | if (res != VK_SUCCESS) { |
979 | 0 | goto out; |
980 | 0 | } |
981 | 0 | } |
982 | 0 | } |
983 | | |
984 | 0 | out: |
985 | 0 | return res; |
986 | 0 | } |
987 | | |
988 | | VkResult loader_add_device_extensions(const struct loader_instance *inst, |
989 | | PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties, |
990 | | VkPhysicalDevice physical_device, const char *lib_name, |
991 | 0 | struct loader_extension_list *ext_list) { |
992 | 0 | uint32_t i = 0, count = 0; |
993 | 0 | VkResult res = VK_SUCCESS; |
994 | 0 | VkExtensionProperties *ext_props = NULL; |
995 | |
|
996 | 0 | res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL); |
997 | 0 | if (res != VK_SUCCESS) { |
998 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
999 | 0 | "loader_add_device_extensions: Error getting physical device extension info count from library %s", lib_name); |
1000 | 0 | return res; |
1001 | 0 | } |
1002 | 0 | if (count > 0) { |
1003 | 0 | ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); |
1004 | 0 | if (!ext_props) { |
1005 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1006 | 0 | "loader_add_device_extensions: Failed to allocate space for device extension properties from library %s.", |
1007 | 0 | lib_name); |
1008 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
1009 | 0 | } |
1010 | 0 | res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props); |
1011 | 0 | if (res != VK_SUCCESS) { |
1012 | 0 | return res; |
1013 | 0 | } |
1014 | 0 | for (i = 0; i < count; i++) { |
1015 | 0 | res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); |
1016 | 0 | if (res != VK_SUCCESS) { |
1017 | 0 | return res; |
1018 | 0 | } |
1019 | 0 | } |
1020 | 0 | } |
1021 | | |
1022 | 0 | return VK_SUCCESS; |
1023 | 0 | } |
1024 | | |
1025 | 10.3k | VkResult loader_init_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info, size_t element_size) { |
1026 | 10.3k | size_t capacity = 32 * element_size; |
1027 | 10.3k | list_info->count = 0; |
1028 | 10.3k | list_info->capacity = 0; |
1029 | 10.3k | list_info->list = loader_instance_heap_calloc(inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1030 | 10.3k | if (list_info->list == NULL) { |
1031 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_init_generic_list: Failed to allocate space for generic list"); |
1032 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
1033 | 0 | } |
1034 | 10.3k | list_info->capacity = capacity; |
1035 | 10.3k | return VK_SUCCESS; |
1036 | 10.3k | } |
1037 | | |
1038 | 0 | VkResult loader_resize_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info) { |
1039 | 0 | list_info->list = loader_instance_heap_realloc(inst, list_info->list, list_info->capacity, list_info->capacity * 2, |
1040 | 0 | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1041 | 0 | if (list_info->list == NULL) { |
1042 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_resize_generic_list: Failed to allocate space for generic list"); |
1043 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
1044 | 0 | } |
1045 | 0 | list_info->capacity = list_info->capacity * 2; |
1046 | 0 | return VK_SUCCESS; |
1047 | 0 | } |
1048 | | |
1049 | 2.32M | void loader_destroy_generic_list(const struct loader_instance *inst, struct loader_generic_list *list) { |
1050 | 2.32M | loader_instance_heap_free(inst, list->list); |
1051 | 2.32M | memset(list, 0, sizeof(struct loader_generic_list)); |
1052 | 2.32M | } |
1053 | | |
1054 | | VkResult loader_get_next_available_entry(const struct loader_instance *inst, struct loader_used_object_list *list_info, |
1055 | 0 | uint32_t *free_index, const VkAllocationCallbacks *pAllocator) { |
1056 | 0 | if (NULL == list_info->list) { |
1057 | 0 | VkResult res = |
1058 | 0 | loader_init_generic_list(inst, (struct loader_generic_list *)list_info, sizeof(struct loader_used_object_status)); |
1059 | 0 | if (VK_SUCCESS != res) { |
1060 | 0 | return res; |
1061 | 0 | } |
1062 | 0 | } |
1063 | 0 | for (uint32_t i = 0; i < list_info->capacity / sizeof(struct loader_used_object_status); i++) { |
1064 | 0 | if (list_info->list[i].status == VK_FALSE) { |
1065 | 0 | list_info->list[i].status = VK_TRUE; |
1066 | 0 | if (pAllocator) { |
1067 | 0 | list_info->list[i].allocation_callbacks = *pAllocator; |
1068 | 0 | } else { |
1069 | 0 | memset(&list_info->list[i].allocation_callbacks, 0, sizeof(VkAllocationCallbacks)); |
1070 | 0 | } |
1071 | 0 | *free_index = i; |
1072 | 0 | return VK_SUCCESS; |
1073 | 0 | } |
1074 | 0 | } |
1075 | | // No free space, must resize |
1076 | | |
1077 | 0 | size_t old_capacity = list_info->capacity; |
1078 | 0 | VkResult res = loader_resize_generic_list(inst, (struct loader_generic_list *)list_info); |
1079 | 0 | if (VK_SUCCESS != res) { |
1080 | 0 | return res; |
1081 | 0 | } |
1082 | 0 | uint32_t new_index = (uint32_t)(old_capacity / sizeof(struct loader_used_object_status)); |
1083 | | // Zero out the newly allocated back half of list. |
1084 | 0 | memset(&list_info->list[new_index], 0, old_capacity); |
1085 | 0 | list_info->list[new_index].status = VK_TRUE; |
1086 | 0 | if (pAllocator) { |
1087 | 0 | list_info->list[new_index].allocation_callbacks = *pAllocator; |
1088 | 0 | } else { |
1089 | 0 | memset(&list_info->list[new_index].allocation_callbacks, 0, sizeof(VkAllocationCallbacks)); |
1090 | 0 | } |
1091 | 0 | *free_index = new_index; |
1092 | 0 | return VK_SUCCESS; |
1093 | 0 | } |
1094 | | |
1095 | 0 | void loader_release_object_from_list(struct loader_used_object_list *list_info, uint32_t index_to_free) { |
1096 | 0 | if (list_info->list && list_info->capacity > index_to_free * sizeof(struct loader_used_object_status)) { |
1097 | 0 | list_info->list[index_to_free].status = VK_FALSE; |
1098 | 0 | memset(&list_info->list[index_to_free].allocation_callbacks, 0, sizeof(VkAllocationCallbacks)); |
1099 | 0 | } |
1100 | 0 | } |
1101 | | |
1102 | | // Append non-duplicate extension properties defined in props to the given ext_list. |
1103 | | // Return - Vk_SUCCESS on success |
1104 | | VkResult loader_add_to_ext_list(const struct loader_instance *inst, struct loader_extension_list *ext_list, |
1105 | 43.9k | uint32_t prop_list_count, const VkExtensionProperties *props) { |
1106 | 43.9k | if (ext_list->list == NULL || ext_list->capacity == 0) { |
1107 | 1.67k | VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties)); |
1108 | 1.67k | if (VK_SUCCESS != res) { |
1109 | 0 | return res; |
1110 | 0 | } |
1111 | 1.67k | } |
1112 | | |
1113 | 87.9k | for (uint32_t i = 0; i < prop_list_count; i++) { |
1114 | 43.9k | const VkExtensionProperties *cur_ext = &props[i]; |
1115 | | |
1116 | | // look for duplicates |
1117 | 43.9k | if (has_vk_extension_property(cur_ext, ext_list)) { |
1118 | 16.8k | continue; |
1119 | 16.8k | } |
1120 | | |
1121 | | // add to list at end |
1122 | | // check for enough capacity |
1123 | 27.1k | if (ext_list->count * sizeof(VkExtensionProperties) >= ext_list->capacity) { |
1124 | 596 | void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2, |
1125 | 596 | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1126 | 596 | if (new_ptr == NULL) { |
1127 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1128 | 0 | "loader_add_to_ext_list: Failed to reallocate space for extension list"); |
1129 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
1130 | 0 | } |
1131 | 596 | ext_list->list = new_ptr; |
1132 | | |
1133 | | // double capacity |
1134 | 596 | ext_list->capacity *= 2; |
1135 | 596 | } |
1136 | | |
1137 | 27.1k | memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties)); |
1138 | 27.1k | ext_list->count++; |
1139 | 27.1k | } |
1140 | 43.9k | return VK_SUCCESS; |
1141 | 43.9k | } |
1142 | | |
1143 | | // Append one extension property defined in props with entrypoints defined in entries to the given |
1144 | | // ext_list. Do not append if a duplicate. |
1145 | | // If this is a duplicate, this function free's the passed in entries - as in it takes ownership over that list (if it is not |
1146 | | // NULL) Return - Vk_SUCCESS on success |
1147 | | VkResult loader_add_to_dev_ext_list(const struct loader_instance *inst, struct loader_device_extension_list *ext_list, |
1148 | 45.8k | const VkExtensionProperties *props, struct loader_string_list *entrys) { |
1149 | 45.8k | VkResult res = VK_SUCCESS; |
1150 | 45.8k | bool should_free_entrys = true; |
1151 | 45.8k | if (ext_list->list == NULL || ext_list->capacity == 0) { |
1152 | 2.17k | res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(struct loader_dev_ext_props)); |
1153 | 2.17k | if (VK_SUCCESS != res) { |
1154 | 0 | goto out; |
1155 | 0 | } |
1156 | 2.17k | } |
1157 | | |
1158 | | // look for duplicates |
1159 | 45.8k | if (has_vk_dev_ext_property(props, ext_list)) { |
1160 | 12.8k | goto out; |
1161 | 12.8k | } |
1162 | | |
1163 | 32.9k | uint32_t idx = ext_list->count; |
1164 | | // add to list at end |
1165 | | // check for enough capacity |
1166 | 32.9k | if (idx * sizeof(struct loader_dev_ext_props) >= ext_list->capacity) { |
1167 | 821 | void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2, |
1168 | 821 | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1169 | | |
1170 | 821 | if (NULL == new_ptr) { |
1171 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1172 | 0 | "loader_add_to_dev_ext_list: Failed to reallocate space for device extension list"); |
1173 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
1174 | 0 | goto out; |
1175 | 0 | } |
1176 | 821 | ext_list->list = new_ptr; |
1177 | | |
1178 | | // double capacity |
1179 | 821 | ext_list->capacity *= 2; |
1180 | 821 | } |
1181 | | |
1182 | 32.9k | memcpy(&ext_list->list[idx].props, props, sizeof(*props)); |
1183 | 32.9k | if (entrys) { |
1184 | 23 | ext_list->list[idx].entrypoints = *entrys; |
1185 | 23 | should_free_entrys = false; |
1186 | 23 | } |
1187 | 32.9k | ext_list->count++; |
1188 | 45.8k | out: |
1189 | 45.8k | if (NULL != entrys && should_free_entrys) { |
1190 | 66 | free_string_list(inst, entrys); |
1191 | 66 | } |
1192 | 45.8k | return res; |
1193 | 32.9k | } |
1194 | | |
1195 | | // Create storage for pointers to loader_layer_properties |
1196 | 0 | bool loader_init_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list) { |
1197 | 0 | list->capacity = 32 * sizeof(void *); |
1198 | 0 | list->list = loader_instance_heap_calloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1199 | 0 | if (list->list == NULL) { |
1200 | 0 | return false; |
1201 | 0 | } |
1202 | 0 | list->count = 0; |
1203 | 0 | return true; |
1204 | 0 | } |
1205 | | |
1206 | | // Search the given array of layer names for an entry matching the given VkLayerProperties |
1207 | | bool loader_names_array_has_layer_property(const VkLayerProperties *vk_layer_prop, uint32_t layer_info_count, |
1208 | 0 | struct activated_layer_info *layer_info) { |
1209 | 0 | for (uint32_t i = 0; i < layer_info_count; i++) { |
1210 | 0 | if (strcmp(vk_layer_prop->layerName, layer_info[i].name) == 0) { |
1211 | 0 | return true; |
1212 | 0 | } |
1213 | 0 | } |
1214 | 0 | return false; |
1215 | 0 | } |
1216 | | |
1217 | 15.3k | void loader_destroy_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *layer_list) { |
1218 | 15.3k | loader_instance_heap_free(inst, layer_list->list); |
1219 | 15.3k | memset(layer_list, 0, sizeof(struct loader_pointer_layer_list)); |
1220 | 15.3k | } |
1221 | | |
1222 | | // Append layer properties defined in prop_list to the given layer_info list |
1223 | | VkResult loader_add_layer_properties_to_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list, |
1224 | 0 | struct loader_layer_properties *props) { |
1225 | 0 | if (list->list == NULL || list->capacity == 0) { |
1226 | 0 | if (!loader_init_pointer_layer_list(inst, list)) { |
1227 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
1228 | 0 | } |
1229 | 0 | } |
1230 | | |
1231 | | // Check for enough capacity |
1232 | 0 | if (((list->count + 1) * sizeof(struct loader_layer_properties)) >= list->capacity) { |
1233 | 0 | size_t new_capacity = list->capacity * 2; |
1234 | 0 | void *new_ptr = |
1235 | 0 | loader_instance_heap_realloc(inst, list->list, list->capacity, new_capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1236 | 0 | if (NULL == new_ptr) { |
1237 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1238 | 0 | "loader_add_layer_properties_to_list: Realloc failed for when attempting to add new layer"); |
1239 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
1240 | 0 | } |
1241 | 0 | list->list = new_ptr; |
1242 | 0 | list->capacity = new_capacity; |
1243 | 0 | } |
1244 | 0 | list->list[list->count++] = props; |
1245 | |
|
1246 | 0 | return VK_SUCCESS; |
1247 | 0 | } |
1248 | | |
1249 | | // Determine if the provided explicit layer should be available by querying the appropriate environmental variables. |
1250 | | bool loader_layer_is_available(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, |
1251 | 916k | const struct loader_layer_properties *prop) { |
1252 | 916k | bool available = true; |
1253 | 916k | bool is_implicit = (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)); |
1254 | 916k | bool disabled_by_type = |
1255 | 916k | (is_implicit) ? (filters->disable_filter.disable_all_implicit) : (filters->disable_filter.disable_all_explicit); |
1256 | 916k | if ((filters->disable_filter.disable_all || disabled_by_type || |
1257 | 916k | check_name_matches_filter_environment_var(prop->info.layerName, &filters->disable_filter.additional_filters)) && |
1258 | 0 | !check_name_matches_filter_environment_var(prop->info.layerName, &filters->allow_filter)) { |
1259 | 0 | available = false; |
1260 | 0 | } |
1261 | 916k | if (check_name_matches_filter_environment_var(prop->info.layerName, &filters->enable_filter)) { |
1262 | 199 | available = true; |
1263 | 916k | } else if (!available) { |
1264 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1265 | 0 | "Layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName, |
1266 | 0 | VK_LAYERS_DISABLE_ENV_VAR); |
1267 | 0 | } |
1268 | | |
1269 | 916k | return available; |
1270 | 916k | } |
1271 | | |
1272 | | // Search the given search_list for any layers in the props list. Add these to the |
1273 | | // output layer_list. |
1274 | | VkResult loader_add_layer_names_to_list(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, |
1275 | | struct loader_pointer_layer_list *output_list, |
1276 | | struct loader_pointer_layer_list *expanded_output_list, uint32_t name_count, |
1277 | 0 | const char *const *names, const struct loader_layer_list *source_list) { |
1278 | 0 | VkResult err = VK_SUCCESS; |
1279 | |
|
1280 | 0 | for (uint32_t i = 0; i < name_count; i++) { |
1281 | 0 | const char *source_name = names[i]; |
1282 | |
|
1283 | 0 | struct loader_layer_properties *layer_prop = loader_find_layer_property(source_name, source_list); |
1284 | 0 | if (NULL == layer_prop) { |
1285 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1286 | 0 | "loader_add_layer_names_to_list: Unable to find layer \"%s\"", source_name); |
1287 | 0 | err = VK_ERROR_LAYER_NOT_PRESENT; |
1288 | 0 | continue; |
1289 | 0 | } |
1290 | | |
1291 | | // Make sure the layer isn't already in the output_list, skip adding it if it is. |
1292 | 0 | if (loader_find_layer_name_in_list(source_name, output_list)) { |
1293 | 0 | continue; |
1294 | 0 | } |
1295 | | |
1296 | 0 | if (!loader_layer_is_available(inst, filters, layer_prop)) { |
1297 | 0 | continue; |
1298 | 0 | } |
1299 | | |
1300 | | // If not a meta-layer, simply add it. |
1301 | 0 | if (0 == (layer_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) { |
1302 | 0 | layer_prop->enabled_by_what = ENABLED_BY_WHAT_IN_APPLICATION_API; |
1303 | 0 | err = loader_add_layer_properties_to_list(inst, output_list, layer_prop); |
1304 | 0 | if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err; |
1305 | 0 | err = loader_add_layer_properties_to_list(inst, expanded_output_list, layer_prop); |
1306 | 0 | if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err; |
1307 | 0 | } else { |
1308 | 0 | err = loader_add_meta_layer(inst, filters, layer_prop, output_list, expanded_output_list, source_list, NULL); |
1309 | 0 | if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err; |
1310 | 0 | } |
1311 | 0 | } |
1312 | | |
1313 | 0 | return err; |
1314 | 0 | } |
1315 | | |
1316 | | // Determine if the provided implicit layer should be enabled by querying the appropriate environmental variables. |
1317 | | // For an implicit layer, at least a disable environment variable is required. |
1318 | | bool loader_implicit_layer_is_enabled(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, |
1319 | 755 | const struct loader_layer_properties *prop) { |
1320 | 755 | bool enable = false; |
1321 | 755 | bool forced_disabled = false; |
1322 | 755 | bool forced_enabled = false; |
1323 | | |
1324 | 755 | if ((filters->disable_filter.disable_all || filters->disable_filter.disable_all_implicit || |
1325 | 755 | check_name_matches_filter_environment_var(prop->info.layerName, &filters->disable_filter.additional_filters)) && |
1326 | 0 | !check_name_matches_filter_environment_var(prop->info.layerName, &filters->allow_filter)) { |
1327 | 0 | forced_disabled = true; |
1328 | 0 | } |
1329 | 755 | if (check_name_matches_filter_environment_var(prop->info.layerName, &filters->enable_filter)) { |
1330 | 0 | forced_enabled = true; |
1331 | 0 | } |
1332 | | |
1333 | | // If no enable_environment variable is specified, this implicit layer is always be enabled by default. |
1334 | 755 | if (NULL == prop->enable_env_var.name) { |
1335 | 632 | enable = true; |
1336 | 632 | } else { |
1337 | 123 | char *env_value = loader_getenv(prop->enable_env_var.name, inst); |
1338 | 123 | if (env_value && !strcmp(prop->enable_env_var.value, env_value)) { |
1339 | 0 | enable = true; |
1340 | 0 | } |
1341 | | |
1342 | | // Otherwise, only enable this layer if the enable environment variable is defined |
1343 | 123 | loader_free_getenv(env_value, inst); |
1344 | 123 | } |
1345 | | |
1346 | 755 | if (forced_enabled) { |
1347 | | // Only report a message that we've forced on a layer if it wouldn't have been enabled |
1348 | | // normally. |
1349 | 0 | if (!enable) { |
1350 | 0 | enable = true; |
1351 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1352 | 0 | "Implicit layer \"%s\" forced enabled due to env var \'%s\'.", prop->info.layerName, |
1353 | 0 | VK_LAYERS_ENABLE_ENV_VAR); |
1354 | 0 | } |
1355 | 755 | } else if (enable && forced_disabled) { |
1356 | 0 | enable = false; |
1357 | | // Report a message that we've forced off a layer if it would have been enabled normally. |
1358 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1359 | 0 | "Implicit layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName, |
1360 | 0 | VK_LAYERS_DISABLE_ENV_VAR); |
1361 | 0 | return enable; |
1362 | 0 | } |
1363 | | |
1364 | | // The disable_environment has priority over everything else. If it is defined, the layer is always |
1365 | | // disabled. |
1366 | 755 | if (NULL != prop->disable_env_var.name) { |
1367 | 661 | char *env_value = loader_getenv(prop->disable_env_var.name, inst); |
1368 | 661 | if (NULL != env_value) { |
1369 | 3 | enable = false; |
1370 | 3 | } |
1371 | 661 | loader_free_getenv(env_value, inst); |
1372 | 661 | } else if ((prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER) == 0) { |
1373 | 94 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1374 | 94 | "Implicit layer \"%s\" missing disabled environment variable!", prop->info.layerName); |
1375 | 94 | } |
1376 | | |
1377 | | // Enable this layer if it is included in the override layer |
1378 | 755 | if (inst != NULL && inst->override_layer_present) { |
1379 | 0 | struct loader_layer_properties *override = NULL; |
1380 | 0 | for (uint32_t i = 0; i < inst->instance_layer_list.count; ++i) { |
1381 | 0 | if (strcmp(inst->instance_layer_list.list[i].info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) { |
1382 | 0 | override = &inst->instance_layer_list.list[i]; |
1383 | 0 | break; |
1384 | 0 | } |
1385 | 0 | } |
1386 | 0 | if (override != NULL) { |
1387 | 0 | for (uint32_t i = 0; i < override->component_layer_names.count; ++i) { |
1388 | 0 | if (strcmp(override->component_layer_names.list[i], prop->info.layerName) == 0) { |
1389 | 0 | enable = true; |
1390 | 0 | break; |
1391 | 0 | } |
1392 | 0 | } |
1393 | 0 | } |
1394 | 0 | } |
1395 | | |
1396 | 755 | return enable; |
1397 | 755 | } |
1398 | | |
1399 | | // Check the individual implicit layer for the enable/disable environment variable settings. Only add it after |
1400 | | // every check has passed indicating it should be used, including making sure a layer of the same name hasn't already been |
1401 | | // added. |
1402 | | VkResult loader_add_implicit_layer(const struct loader_instance *inst, struct loader_layer_properties *prop, |
1403 | | const struct loader_envvar_all_filters *filters, struct loader_pointer_layer_list *target_list, |
1404 | | struct loader_pointer_layer_list *expanded_target_list, |
1405 | 0 | const struct loader_layer_list *source_list) { |
1406 | 0 | VkResult result = VK_SUCCESS; |
1407 | 0 | if (loader_implicit_layer_is_enabled(inst, filters, prop)) { |
1408 | 0 | if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) { |
1409 | | // Make sure the layer isn't already in the output_list, skip adding it if it is. |
1410 | 0 | if (loader_find_layer_name_in_list(&prop->info.layerName[0], target_list)) { |
1411 | 0 | return result; |
1412 | 0 | } |
1413 | 0 | prop->enabled_by_what = ENABLED_BY_WHAT_IMPLICIT_LAYER; |
1414 | 0 | result = loader_add_layer_properties_to_list(inst, target_list, prop); |
1415 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
1416 | 0 | if (NULL != expanded_target_list) { |
1417 | 0 | result = loader_add_layer_properties_to_list(inst, expanded_target_list, prop); |
1418 | 0 | } |
1419 | 0 | } else { |
1420 | 0 | result = loader_add_meta_layer(inst, filters, prop, target_list, expanded_target_list, source_list, NULL); |
1421 | 0 | } |
1422 | 0 | } |
1423 | 0 | return result; |
1424 | 0 | } |
1425 | | |
1426 | | // Add the component layers of a meta-layer to the active list of layers |
1427 | | VkResult loader_add_meta_layer(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters, |
1428 | | struct loader_layer_properties *prop, struct loader_pointer_layer_list *target_list, |
1429 | | struct loader_pointer_layer_list *expanded_target_list, const struct loader_layer_list *source_list, |
1430 | 0 | bool *out_found_all_component_layers) { |
1431 | 0 | VkResult result = VK_SUCCESS; |
1432 | 0 | bool found_all_component_layers = true; |
1433 | | |
1434 | | // We need to add all the individual component layers |
1435 | 0 | loader_api_version meta_layer_api_version = loader_make_version(prop->info.specVersion); |
1436 | 0 | for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) { |
1437 | 0 | struct loader_layer_properties *search_prop = |
1438 | 0 | loader_find_layer_property(prop->component_layer_names.list[comp_layer], source_list); |
1439 | 0 | if (search_prop != NULL) { |
1440 | 0 | loader_api_version search_prop_version = loader_make_version(prop->info.specVersion); |
1441 | 0 | if (!loader_check_version_meets_required(meta_layer_api_version, search_prop_version)) { |
1442 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1443 | 0 | "Meta-layer \"%s\" API version %u.%u, component layer \"%s\" version %u.%u, may have " |
1444 | 0 | "incompatibilities (Policy #LLP_LAYER_8)!", |
1445 | 0 | prop->info.layerName, meta_layer_api_version.major, meta_layer_api_version.minor, |
1446 | 0 | search_prop->info.layerName, search_prop_version.major, search_prop_version.minor); |
1447 | 0 | } |
1448 | |
|
1449 | 0 | if (!loader_layer_is_available(inst, filters, search_prop)) { |
1450 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1451 | 0 | "Meta Layer \"%s\" component layer \"%s\" disabled.", prop->info.layerName, search_prop->info.layerName); |
1452 | 0 | continue; |
1453 | 0 | } |
1454 | | |
1455 | | // If the component layer is itself an implicit layer, we need to do the implicit layer enable |
1456 | | // checks |
1457 | 0 | if (0 == (search_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { |
1458 | 0 | search_prop->enabled_by_what = ENABLED_BY_WHAT_META_LAYER; |
1459 | 0 | result = loader_add_implicit_layer(inst, search_prop, filters, target_list, expanded_target_list, source_list); |
1460 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
1461 | 0 | } else { |
1462 | 0 | if (0 != (search_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) { |
1463 | 0 | bool found_layers_in_component_meta_layer = true; |
1464 | 0 | search_prop->enabled_by_what = ENABLED_BY_WHAT_META_LAYER; |
1465 | 0 | result = loader_add_meta_layer(inst, filters, search_prop, target_list, expanded_target_list, source_list, |
1466 | 0 | &found_layers_in_component_meta_layer); |
1467 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
1468 | 0 | if (!found_layers_in_component_meta_layer) found_all_component_layers = false; |
1469 | 0 | } else if (!loader_find_layer_name_in_list(&search_prop->info.layerName[0], target_list)) { |
1470 | | // Make sure the layer isn't already in the output_list, skip adding it if it is. |
1471 | 0 | search_prop->enabled_by_what = ENABLED_BY_WHAT_META_LAYER; |
1472 | 0 | result = loader_add_layer_properties_to_list(inst, target_list, search_prop); |
1473 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
1474 | 0 | if (NULL != expanded_target_list) { |
1475 | 0 | result = loader_add_layer_properties_to_list(inst, expanded_target_list, search_prop); |
1476 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
1477 | 0 | } |
1478 | 0 | } |
1479 | 0 | } |
1480 | 0 | } else { |
1481 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
1482 | 0 | "Failed to find layer name \"%s\" component layer \"%s\" to activate (Policy #LLP_LAYER_7)", |
1483 | 0 | prop->component_layer_names.list[comp_layer], prop->component_layer_names.list[comp_layer]); |
1484 | 0 | found_all_component_layers = false; |
1485 | 0 | } |
1486 | 0 | } |
1487 | | |
1488 | | // Add this layer to the overall target list (not the expanded one) |
1489 | 0 | if (found_all_component_layers) { |
1490 | 0 | prop->enabled_by_what = ENABLED_BY_WHAT_META_LAYER; |
1491 | 0 | result = loader_add_layer_properties_to_list(inst, target_list, prop); |
1492 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
1493 | | // Write the result to out_found_all_component_layers in case this function is being recursed |
1494 | 0 | if (out_found_all_component_layers) *out_found_all_component_layers = found_all_component_layers; |
1495 | 0 | } |
1496 | | |
1497 | 0 | return result; |
1498 | 0 | } |
1499 | | |
1500 | 0 | VkExtensionProperties *get_extension_property(const char *name, const struct loader_extension_list *list) { |
1501 | 0 | for (uint32_t i = 0; i < list->count; i++) { |
1502 | 0 | if (strcmp(name, list->list[i].extensionName) == 0) return &list->list[i]; |
1503 | 0 | } |
1504 | 0 | return NULL; |
1505 | 0 | } |
1506 | | |
1507 | 0 | VkExtensionProperties *get_dev_extension_property(const char *name, const struct loader_device_extension_list *list) { |
1508 | 0 | for (uint32_t i = 0; i < list->count; i++) { |
1509 | 0 | if (strcmp(name, list->list[i].props.extensionName) == 0) return &list->list[i].props; |
1510 | 0 | } |
1511 | 0 | return NULL; |
1512 | 0 | } |
1513 | | |
1514 | | // For Instance extensions implemented within the loader (i.e. DEBUG_REPORT |
1515 | | // the extension must provide two entry points for the loader to use: |
1516 | | // - "trampoline" entry point - this is the address returned by GetProcAddr |
1517 | | // and will always do what's necessary to support a |
1518 | | // global call. |
1519 | | // - "terminator" function - this function will be put at the end of the |
1520 | | // instance chain and will contain the necessary logic |
1521 | | // to call / process the extension for the appropriate |
1522 | | // ICDs that are available. |
1523 | | // There is no generic mechanism for including these functions, the references |
1524 | | // must be placed into the appropriate loader entry points. |
1525 | | // GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr |
1526 | | // requests |
1527 | | // loader_coalesce_extensions(void) - add extension records to the list of global |
1528 | | // extension available to the app. |
1529 | | // instance_disp - add function pointer for terminator function |
1530 | | // to this array. |
1531 | | // The extension itself should be in a separate file that will be linked directly |
1532 | | // with the loader. |
1533 | | VkResult loader_get_icd_loader_instance_extensions(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list, |
1534 | 0 | struct loader_extension_list *inst_exts) { |
1535 | 0 | struct loader_extension_list icd_exts; |
1536 | 0 | VkResult res = VK_SUCCESS; |
1537 | 0 | char *env_value; |
1538 | 0 | bool filter_extensions = true; |
1539 | | |
1540 | | // Check if a user wants to disable the instance extension filtering behavior |
1541 | 0 | env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst); |
1542 | 0 | if (NULL != env_value && atoi(env_value) != 0) { |
1543 | 0 | filter_extensions = false; |
1544 | 0 | } |
1545 | 0 | loader_free_getenv(env_value, inst); |
1546 | | |
1547 | | // traverse scanned icd list adding non-duplicate extensions to the list |
1548 | 0 | for (uint32_t i = 0; i < icd_tramp_list->count; i++) { |
1549 | 0 | res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties)); |
1550 | 0 | if (VK_SUCCESS != res) { |
1551 | 0 | goto out; |
1552 | 0 | } |
1553 | 0 | res = loader_add_instance_extensions(inst, icd_tramp_list->scanned_list[i].EnumerateInstanceExtensionProperties, |
1554 | 0 | icd_tramp_list->scanned_list[i].lib_name, &icd_exts); |
1555 | 0 | if (VK_SUCCESS == res) { |
1556 | 0 | if (filter_extensions) { |
1557 | | // Remove any extensions not recognized by the loader |
1558 | 0 | for (int32_t j = 0; j < (int32_t)icd_exts.count; j++) { |
1559 | | // See if the extension is in the list of supported extensions |
1560 | 0 | bool found = false; |
1561 | 0 | for (uint32_t k = 0; LOADER_INSTANCE_EXTENSIONS[k] != NULL; k++) { |
1562 | 0 | if (strcmp(icd_exts.list[j].extensionName, LOADER_INSTANCE_EXTENSIONS[k]) == 0) { |
1563 | 0 | found = true; |
1564 | 0 | break; |
1565 | 0 | } |
1566 | 0 | } |
1567 | | |
1568 | | // If it isn't in the list, remove it |
1569 | 0 | if (!found) { |
1570 | 0 | for (uint32_t k = j + 1; k < icd_exts.count; k++) { |
1571 | 0 | icd_exts.list[k - 1] = icd_exts.list[k]; |
1572 | 0 | } |
1573 | 0 | --icd_exts.count; |
1574 | 0 | --j; |
1575 | 0 | } |
1576 | 0 | } |
1577 | 0 | } |
1578 | |
|
1579 | 0 | res = loader_add_to_ext_list(inst, inst_exts, icd_exts.count, icd_exts.list); |
1580 | 0 | } |
1581 | 0 | loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts); |
1582 | 0 | if (VK_SUCCESS != res) { |
1583 | 0 | goto out; |
1584 | 0 | } |
1585 | 0 | }; |
1586 | | |
1587 | | // Traverse loader's extensions, adding non-duplicate extensions to the list |
1588 | 0 | res = add_debug_extensions_to_ext_list(inst, inst_exts); |
1589 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
1590 | 0 | goto out; |
1591 | 0 | } |
1592 | 0 | const VkExtensionProperties portability_enumeration_extension_info[] = { |
1593 | 0 | {VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME, VK_KHR_PORTABILITY_ENUMERATION_SPEC_VERSION}}; |
1594 | | |
1595 | | // Add VK_KHR_portability_subset |
1596 | 0 | res = loader_add_to_ext_list(inst, inst_exts, sizeof(portability_enumeration_extension_info) / sizeof(VkExtensionProperties), |
1597 | 0 | portability_enumeration_extension_info); |
1598 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
1599 | 0 | goto out; |
1600 | 0 | } |
1601 | | |
1602 | 0 | const VkExtensionProperties direct_driver_loading_extension_info[] = { |
1603 | 0 | {VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME, VK_LUNARG_DIRECT_DRIVER_LOADING_SPEC_VERSION}}; |
1604 | | |
1605 | | // Add VK_LUNARG_direct_driver_loading |
1606 | 0 | res = loader_add_to_ext_list(inst, inst_exts, sizeof(direct_driver_loading_extension_info) / sizeof(VkExtensionProperties), |
1607 | 0 | direct_driver_loading_extension_info); |
1608 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
1609 | 0 | goto out; |
1610 | 0 | } |
1611 | | |
1612 | 0 | out: |
1613 | 0 | return res; |
1614 | 0 | } |
1615 | | |
1616 | 0 | struct loader_icd_term *loader_get_icd_and_device(const void *device, struct loader_device **found_dev) { |
1617 | 0 | VkLayerDispatchTable *dispatch_table_device = loader_get_dispatch(device); |
1618 | 0 | if (NULL == dispatch_table_device) { |
1619 | 0 | *found_dev = NULL; |
1620 | 0 | return NULL; |
1621 | 0 | } |
1622 | 0 | loader_platform_thread_lock_mutex(&loader_global_instance_list_lock); |
1623 | 0 | *found_dev = NULL; |
1624 | |
|
1625 | 0 | for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { |
1626 | 0 | for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) { |
1627 | 0 | for (struct loader_device *dev = icd_term->logical_device_list; dev; dev = dev->next) { |
1628 | | // Value comparison of device prevents object wrapping by layers |
1629 | 0 | if (loader_get_dispatch(dev->icd_device) == dispatch_table_device || |
1630 | 0 | (dev->chain_device != VK_NULL_HANDLE && loader_get_dispatch(dev->chain_device) == dispatch_table_device)) { |
1631 | 0 | *found_dev = dev; |
1632 | 0 | loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock); |
1633 | 0 | return icd_term; |
1634 | 0 | } |
1635 | 0 | } |
1636 | 0 | } |
1637 | 0 | } |
1638 | 0 | loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock); |
1639 | 0 | return NULL; |
1640 | 0 | } |
1641 | | |
1642 | 0 | void loader_destroy_logical_device(struct loader_device *dev, const VkAllocationCallbacks *pAllocator) { |
1643 | 0 | if (pAllocator) { |
1644 | 0 | dev->alloc_callbacks = *pAllocator; |
1645 | 0 | } |
1646 | 0 | loader_device_heap_free(dev, dev); |
1647 | 0 | } |
1648 | | |
1649 | 0 | struct loader_device *loader_create_logical_device(const struct loader_instance *inst, const VkAllocationCallbacks *pAllocator) { |
1650 | 0 | struct loader_device *new_dev; |
1651 | 0 | new_dev = loader_calloc(pAllocator, sizeof(struct loader_device), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); |
1652 | |
|
1653 | 0 | if (!new_dev) { |
1654 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_logical_device: Failed to alloc struct loader_device"); |
1655 | 0 | return NULL; |
1656 | 0 | } |
1657 | | |
1658 | 0 | new_dev->loader_dispatch.core_dispatch.magic = DEVICE_DISP_TABLE_MAGIC_NUMBER; |
1659 | |
|
1660 | 0 | if (pAllocator) { |
1661 | 0 | new_dev->alloc_callbacks = *pAllocator; |
1662 | 0 | } |
1663 | |
|
1664 | 0 | return new_dev; |
1665 | 0 | } |
1666 | | |
1667 | 0 | void loader_add_logical_device(struct loader_icd_term *icd_term, struct loader_device *dev) { |
1668 | 0 | dev->next = icd_term->logical_device_list; |
1669 | 0 | icd_term->logical_device_list = dev; |
1670 | 0 | } |
1671 | | |
1672 | | void loader_remove_logical_device(struct loader_icd_term *icd_term, struct loader_device *found_dev, |
1673 | 0 | const VkAllocationCallbacks *pAllocator) { |
1674 | 0 | struct loader_device *dev, *prev_dev; |
1675 | |
|
1676 | 0 | if (!icd_term || !found_dev) return; |
1677 | | |
1678 | 0 | prev_dev = NULL; |
1679 | 0 | dev = icd_term->logical_device_list; |
1680 | 0 | while (dev && dev != found_dev) { |
1681 | 0 | prev_dev = dev; |
1682 | 0 | dev = dev->next; |
1683 | 0 | } |
1684 | |
|
1685 | 0 | if (prev_dev) |
1686 | 0 | prev_dev->next = found_dev->next; |
1687 | 0 | else |
1688 | 0 | icd_term->logical_device_list = found_dev->next; |
1689 | 0 | loader_destroy_logical_device(found_dev, pAllocator); |
1690 | 0 | } |
1691 | | |
1692 | 0 | const VkAllocationCallbacks *ignore_null_callback(const VkAllocationCallbacks *callbacks) { |
1693 | 0 | return NULL != callbacks->pfnAllocation && NULL != callbacks->pfnFree && NULL != callbacks->pfnReallocation && |
1694 | 0 | NULL != callbacks->pfnInternalAllocation && NULL != callbacks->pfnInternalFree |
1695 | 0 | ? callbacks |
1696 | 0 | : NULL; |
1697 | 0 | } |
1698 | | |
1699 | | // Try to close any open objects on the loader_icd_term - this must be done before destroying the instance |
1700 | 0 | void loader_icd_close_objects(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term) { |
1701 | 0 | for (uint32_t i = 0; i < icd_term->surface_list.capacity / sizeof(VkSurfaceKHR); i++) { |
1702 | 0 | if (ptr_inst->surfaces_list.capacity > i * sizeof(struct loader_used_object_status) && |
1703 | 0 | ptr_inst->surfaces_list.list[i].status == VK_TRUE && NULL != icd_term->surface_list.list && |
1704 | 0 | icd_term->surface_list.list[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { |
1705 | 0 | icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, icd_term->surface_list.list[i], |
1706 | 0 | ignore_null_callback(&(ptr_inst->surfaces_list.list[i].allocation_callbacks))); |
1707 | 0 | icd_term->surface_list.list[i] = (VkSurfaceKHR)(uintptr_t)NULL; |
1708 | 0 | } |
1709 | 0 | } |
1710 | 0 | for (uint32_t i = 0; i < icd_term->debug_utils_messenger_list.capacity / sizeof(VkDebugUtilsMessengerEXT); i++) { |
1711 | 0 | if (ptr_inst->debug_utils_messengers_list.capacity > i * sizeof(struct loader_used_object_status) && |
1712 | 0 | ptr_inst->debug_utils_messengers_list.list[i].status == VK_TRUE && NULL != icd_term->debug_utils_messenger_list.list && |
1713 | 0 | icd_term->debug_utils_messenger_list.list[i] && NULL != icd_term->dispatch.DestroyDebugUtilsMessengerEXT) { |
1714 | 0 | icd_term->dispatch.DestroyDebugUtilsMessengerEXT( |
1715 | 0 | icd_term->instance, icd_term->debug_utils_messenger_list.list[i], |
1716 | 0 | ignore_null_callback(&(ptr_inst->debug_utils_messengers_list.list[i].allocation_callbacks))); |
1717 | 0 | icd_term->debug_utils_messenger_list.list[i] = (VkDebugUtilsMessengerEXT)(uintptr_t)NULL; |
1718 | 0 | } |
1719 | 0 | } |
1720 | 0 | for (uint32_t i = 0; i < icd_term->debug_report_callback_list.capacity / sizeof(VkDebugReportCallbackEXT); i++) { |
1721 | 0 | if (ptr_inst->debug_report_callbacks_list.capacity > i * sizeof(struct loader_used_object_status) && |
1722 | 0 | ptr_inst->debug_report_callbacks_list.list[i].status == VK_TRUE && NULL != icd_term->debug_report_callback_list.list && |
1723 | 0 | icd_term->debug_report_callback_list.list[i] && NULL != icd_term->dispatch.DestroyDebugReportCallbackEXT) { |
1724 | 0 | icd_term->dispatch.DestroyDebugReportCallbackEXT( |
1725 | 0 | icd_term->instance, icd_term->debug_report_callback_list.list[i], |
1726 | 0 | ignore_null_callback(&(ptr_inst->debug_report_callbacks_list.list[i].allocation_callbacks))); |
1727 | 0 | icd_term->debug_report_callback_list.list[i] = (VkDebugReportCallbackEXT)(uintptr_t)NULL; |
1728 | 0 | } |
1729 | 0 | } |
1730 | 0 | } |
1731 | | // Free resources allocated inside the loader_icd_term |
1732 | | void loader_icd_destroy(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term, |
1733 | 0 | const VkAllocationCallbacks *pAllocator) { |
1734 | 0 | ptr_inst->icd_terms_count--; |
1735 | 0 | for (struct loader_device *dev = icd_term->logical_device_list; dev;) { |
1736 | 0 | struct loader_device *next_dev = dev->next; |
1737 | 0 | loader_destroy_logical_device(dev, pAllocator); |
1738 | 0 | dev = next_dev; |
1739 | 0 | } |
1740 | |
|
1741 | 0 | loader_destroy_generic_list(ptr_inst, (struct loader_generic_list *)&icd_term->surface_list); |
1742 | 0 | loader_destroy_generic_list(ptr_inst, (struct loader_generic_list *)&icd_term->debug_utils_messenger_list); |
1743 | 0 | loader_destroy_generic_list(ptr_inst, (struct loader_generic_list *)&icd_term->debug_report_callback_list); |
1744 | |
|
1745 | 0 | loader_instance_heap_free(ptr_inst, icd_term); |
1746 | 0 | } |
1747 | | |
1748 | 0 | struct loader_icd_term *loader_icd_add(struct loader_instance *ptr_inst, const struct loader_scanned_icd *scanned_icd) { |
1749 | 0 | struct loader_icd_term *icd_term; |
1750 | |
|
1751 | 0 | icd_term = loader_instance_heap_calloc(ptr_inst, sizeof(struct loader_icd_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1752 | 0 | if (!icd_term) { |
1753 | 0 | return NULL; |
1754 | 0 | } |
1755 | | |
1756 | 0 | icd_term->scanned_icd = scanned_icd; |
1757 | 0 | icd_term->this_instance = ptr_inst; |
1758 | | |
1759 | | // Prepend to the list |
1760 | 0 | icd_term->next = ptr_inst->icd_terms; |
1761 | 0 | ptr_inst->icd_terms = icd_term; |
1762 | 0 | ptr_inst->icd_terms_count++; |
1763 | |
|
1764 | 0 | return icd_term; |
1765 | 0 | } |
1766 | | // Closes the library handle in the scanned ICD, free the lib_name string, and zeros out all data |
1767 | 0 | void loader_unload_scanned_icd(struct loader_instance *inst, struct loader_scanned_icd *scanned_icd) { |
1768 | 0 | if (NULL == scanned_icd) { |
1769 | 0 | return; |
1770 | 0 | } |
1771 | 0 | if (scanned_icd->handle) { |
1772 | 0 | loader_platform_close_library(scanned_icd->handle); |
1773 | 0 | scanned_icd->handle = NULL; |
1774 | 0 | } |
1775 | 0 | loader_instance_heap_free(inst, scanned_icd->lib_name); |
1776 | 0 | memset(scanned_icd, 0, sizeof(struct loader_scanned_icd)); |
1777 | 0 | } |
1778 | | |
1779 | | // Determine the ICD interface version to use. |
1780 | | // @param icd |
1781 | | // @param pVersion Output parameter indicating which version to use or 0 if |
1782 | | // the negotiation API is not supported by the ICD |
1783 | | // @return bool indicating true if the selected interface version is supported |
1784 | | // by the loader, false indicates the version is not supported |
1785 | 0 | bool loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version, uint32_t *pVersion) { |
1786 | 0 | if (fp_negotiate_icd_version == NULL) { |
1787 | | // ICD does not support the negotiation API, it supports version 0 or 1 |
1788 | | // calling code must determine if it is version 0 or 1 |
1789 | 0 | *pVersion = 0; |
1790 | 0 | } else { |
1791 | | // ICD supports the negotiation API, so call it with the loader's |
1792 | | // latest version supported |
1793 | 0 | *pVersion = CURRENT_LOADER_ICD_INTERFACE_VERSION; |
1794 | 0 | VkResult result = fp_negotiate_icd_version(pVersion); |
1795 | |
|
1796 | 0 | if (result == VK_ERROR_INCOMPATIBLE_DRIVER) { |
1797 | | // ICD no longer supports the loader's latest interface version so |
1798 | | // fail loading the ICD |
1799 | 0 | return false; |
1800 | 0 | } |
1801 | 0 | } |
1802 | | |
1803 | | #if MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION > 0 |
1804 | | if (*pVersion < MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION) { |
1805 | | // Loader no longer supports the ICD's latest interface version so fail |
1806 | | // loading the ICD |
1807 | | return false; |
1808 | | } |
1809 | | #endif |
1810 | 0 | return true; |
1811 | 0 | } |
1812 | | |
1813 | 8.78k | void loader_clear_scanned_icd_list(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) { |
1814 | 8.78k | if (0 != icd_tramp_list->capacity && icd_tramp_list->scanned_list) { |
1815 | 1.13k | for (uint32_t i = 0; i < icd_tramp_list->count; i++) { |
1816 | 0 | if (icd_tramp_list->scanned_list[i].handle) { |
1817 | 0 | loader_platform_close_library(icd_tramp_list->scanned_list[i].handle); |
1818 | 0 | icd_tramp_list->scanned_list[i].handle = NULL; |
1819 | 0 | } |
1820 | 0 | loader_instance_heap_free(inst, icd_tramp_list->scanned_list[i].lib_name); |
1821 | 0 | } |
1822 | 1.13k | loader_instance_heap_free(inst, icd_tramp_list->scanned_list); |
1823 | 1.13k | } |
1824 | 8.78k | memset(icd_tramp_list, 0, sizeof(struct loader_icd_tramp_list)); |
1825 | 8.78k | } |
1826 | | |
1827 | 1.13k | VkResult loader_init_scanned_icd_list(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) { |
1828 | 1.13k | VkResult res = VK_SUCCESS; |
1829 | 1.13k | loader_clear_scanned_icd_list(inst, icd_tramp_list); |
1830 | 1.13k | icd_tramp_list->capacity = 8 * sizeof(struct loader_scanned_icd); |
1831 | 1.13k | icd_tramp_list->scanned_list = loader_instance_heap_alloc(inst, icd_tramp_list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1832 | 1.13k | if (NULL == icd_tramp_list->scanned_list) { |
1833 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1834 | 0 | "loader_init_scanned_icd_list: Realloc failed for layer list when attempting to add new layer"); |
1835 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
1836 | 0 | } |
1837 | 1.13k | return res; |
1838 | 1.13k | } |
1839 | | |
1840 | | VkResult loader_add_direct_driver(const struct loader_instance *inst, uint32_t index, |
1841 | 0 | const VkDirectDriverLoadingInfoLUNARG *pDriver, struct loader_icd_tramp_list *icd_tramp_list) { |
1842 | | // Assume pDriver is valid, since there is no real way to check it. Calling code should make sure the pointer to the array |
1843 | | // of VkDirectDriverLoadingInfoLUNARG structures is non-null. |
1844 | 0 | if (NULL == pDriver->pfnGetInstanceProcAddr) { |
1845 | 0 | loader_log( |
1846 | 0 | inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1847 | 0 | "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d contains a NULL pointer for the " |
1848 | 0 | "pfnGetInstanceProcAddr member, skipping.", |
1849 | 0 | index); |
1850 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
1851 | 0 | } |
1852 | | |
1853 | 0 | PFN_vkGetInstanceProcAddr fp_get_proc_addr = pDriver->pfnGetInstanceProcAddr; |
1854 | 0 | PFN_vkCreateInstance fp_create_inst = NULL; |
1855 | 0 | PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props = NULL; |
1856 | 0 | PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL; |
1857 | 0 | PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version = NULL; |
1858 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
1859 | | PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL; |
1860 | | #endif |
1861 | 0 | struct loader_scanned_icd *new_scanned_icd; |
1862 | 0 | uint32_t interface_version = 0; |
1863 | | |
1864 | | // Try to get the negotiate ICD interface version function |
1865 | 0 | fp_negotiate_icd_version = (PFN_vk_icdNegotiateLoaderICDInterfaceVersion)pDriver->pfnGetInstanceProcAddr( |
1866 | 0 | NULL, "vk_icdNegotiateLoaderICDInterfaceVersion"); |
1867 | |
|
1868 | 0 | if (NULL == fp_negotiate_icd_version) { |
1869 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1870 | 0 | "loader_add_direct_driver: Could not get 'vk_icdNegotiateLoaderICDInterfaceVersion' from " |
1871 | 0 | "VkDirectDriverLoadingInfoLUNARG structure at " |
1872 | 0 | "index %d, skipping.", |
1873 | 0 | index); |
1874 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
1875 | 0 | } |
1876 | | |
1877 | 0 | if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_version)) { |
1878 | 0 | loader_log( |
1879 | 0 | inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1880 | 0 | "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d supports interface version %d, " |
1881 | 0 | "which is incompatible with the Loader Driver Interface version that supports the VK_LUNARG_direct_driver_loading " |
1882 | 0 | "extension, skipping.", |
1883 | 0 | index, interface_version); |
1884 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
1885 | 0 | } |
1886 | | |
1887 | 0 | if (interface_version < 7) { |
1888 | 0 | loader_log( |
1889 | 0 | inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1890 | 0 | "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d supports interface version %d, " |
1891 | 0 | "which is incompatible with the Loader Driver Interface version that supports the VK_LUNARG_direct_driver_loading " |
1892 | 0 | "extension, skipping.", |
1893 | 0 | index, interface_version); |
1894 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
1895 | 0 | } |
1896 | | |
1897 | 0 | fp_create_inst = (PFN_vkCreateInstance)pDriver->pfnGetInstanceProcAddr(NULL, "vkCreateInstance"); |
1898 | 0 | if (NULL == fp_create_inst) { |
1899 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1900 | 0 | "loader_add_direct_driver: Could not get 'vkCreateInstance' from VkDirectDriverLoadingInfoLUNARG structure at " |
1901 | 0 | "index %d, skipping.", |
1902 | 0 | index); |
1903 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
1904 | 0 | } |
1905 | 0 | fp_get_inst_ext_props = |
1906 | 0 | (PFN_vkEnumerateInstanceExtensionProperties)pDriver->pfnGetInstanceProcAddr(NULL, "vkEnumerateInstanceExtensionProperties"); |
1907 | 0 | if (NULL == fp_get_inst_ext_props) { |
1908 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1909 | 0 | "loader_add_direct_driver: Could not get 'vkEnumerateInstanceExtensionProperties' from " |
1910 | 0 | "VkDirectDriverLoadingInfoLUNARG structure at index %d, skipping.", |
1911 | 0 | index); |
1912 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
1913 | 0 | } |
1914 | | |
1915 | 0 | fp_get_phys_dev_proc_addr = |
1916 | 0 | (PFN_vk_icdGetPhysicalDeviceProcAddr)pDriver->pfnGetInstanceProcAddr(NULL, "vk_icdGetPhysicalDeviceProcAddr"); |
1917 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
1918 | | // Query "vk_icdEnumerateAdapterPhysicalDevices" with vk_icdGetInstanceProcAddr if the library reports interface version |
1919 | | // 7 or greater, otherwise fallback to loading it from the platform dynamic linker |
1920 | | fp_enum_dxgi_adapter_phys_devs = |
1921 | | (PFN_vk_icdEnumerateAdapterPhysicalDevices)pDriver->pfnGetInstanceProcAddr(NULL, "vk_icdEnumerateAdapterPhysicalDevices"); |
1922 | | #endif |
1923 | | |
1924 | | // check for enough capacity |
1925 | 0 | if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) { |
1926 | 0 | void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity, |
1927 | 0 | icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
1928 | 0 | if (NULL == new_ptr) { |
1929 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
1930 | 0 | "loader_add_direct_driver: Realloc failed on icd library list for ICD index %u", index); |
1931 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
1932 | 0 | } |
1933 | 0 | icd_tramp_list->scanned_list = new_ptr; |
1934 | | |
1935 | | // double capacity |
1936 | 0 | icd_tramp_list->capacity *= 2; |
1937 | 0 | } |
1938 | | |
1939 | | // Driver must be 1.1 to support version 7 |
1940 | 0 | uint32_t api_version = VK_API_VERSION_1_1; |
1941 | 0 | PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version = |
1942 | 0 | (PFN_vkEnumerateInstanceVersion)pDriver->pfnGetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion"); |
1943 | |
|
1944 | 0 | if (icd_enumerate_instance_version) { |
1945 | 0 | VkResult res = icd_enumerate_instance_version(&api_version); |
1946 | 0 | if (res != VK_SUCCESS) { |
1947 | 0 | return res; |
1948 | 0 | } |
1949 | 0 | } |
1950 | | |
1951 | 0 | new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]); |
1952 | 0 | new_scanned_icd->handle = NULL; |
1953 | 0 | new_scanned_icd->api_version = api_version; |
1954 | 0 | new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr; |
1955 | 0 | new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr; |
1956 | 0 | new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props; |
1957 | 0 | new_scanned_icd->CreateInstance = fp_create_inst; |
1958 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
1959 | | new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs; |
1960 | | #endif |
1961 | 0 | new_scanned_icd->interface_version = interface_version; |
1962 | |
|
1963 | 0 | new_scanned_icd->lib_name = NULL; |
1964 | 0 | icd_tramp_list->count++; |
1965 | |
|
1966 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
1967 | 0 | "loader_add_direct_driver: Adding driver found in index %d of " |
1968 | 0 | "VkDirectDriverLoadingListLUNARG::pDrivers structure. pfnGetInstanceProcAddr was set to %p", |
1969 | 0 | index, pDriver->pfnGetInstanceProcAddr); |
1970 | |
|
1971 | 0 | return VK_SUCCESS; |
1972 | 0 | } |
1973 | | |
1974 | | // Search through VkInstanceCreateInfo's pNext chain for any drivers from the direct driver loading extension and load them. |
1975 | | VkResult loader_scan_for_direct_drivers(const struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo, |
1976 | 1.13k | struct loader_icd_tramp_list *icd_tramp_list, bool *direct_driver_loading_exclusive_mode) { |
1977 | 1.13k | if (NULL == pCreateInfo) { |
1978 | | // Don't do this logic unless we are being called from vkCreateInstance, when pCreateInfo will be non-null |
1979 | 0 | return VK_SUCCESS; |
1980 | 0 | } |
1981 | 1.13k | bool direct_driver_loading_enabled = false; |
1982 | | // Try to if VK_LUNARG_direct_driver_loading is enabled and if we are using it exclusively |
1983 | | // Skip this step if inst is NULL, aka when this function is being called before instance creation |
1984 | 1.13k | if (inst != NULL && pCreateInfo->ppEnabledExtensionNames && pCreateInfo->enabledExtensionCount > 0) { |
1985 | | // Look through the enabled extension list, make sure VK_LUNARG_direct_driver_loading is present |
1986 | 0 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
1987 | 0 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME) == 0) { |
1988 | 0 | direct_driver_loading_enabled = true; |
1989 | 0 | break; |
1990 | 0 | } |
1991 | 0 | } |
1992 | 0 | } |
1993 | 1.13k | const VkDirectDriverLoadingListLUNARG *ddl_list = NULL; |
1994 | | // Find the VkDirectDriverLoadingListLUNARG struct in the pNext chain of vkInstanceCreateInfo |
1995 | 1.13k | const void *pNext = pCreateInfo->pNext; |
1996 | 1.13k | while (pNext) { |
1997 | 0 | VkBaseInStructure out_structure = {0}; |
1998 | 0 | memcpy(&out_structure, pNext, sizeof(VkBaseInStructure)); |
1999 | 0 | if (out_structure.sType == VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_LIST_LUNARG) { |
2000 | 0 | ddl_list = (VkDirectDriverLoadingListLUNARG *)pNext; |
2001 | 0 | break; |
2002 | 0 | } |
2003 | 0 | pNext = out_structure.pNext; |
2004 | 0 | } |
2005 | 1.13k | if (NULL == ddl_list) { |
2006 | 1.13k | if (direct_driver_loading_enabled) { |
2007 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
2008 | 0 | "loader_scan_for_direct_drivers: The VK_LUNARG_direct_driver_loading extension was enabled but the " |
2009 | 0 | "pNext chain of " |
2010 | 0 | "VkInstanceCreateInfo did not contain the " |
2011 | 0 | "VkDirectDriverLoadingListLUNARG structure."); |
2012 | 0 | } |
2013 | | // Always want to exit early if there was no VkDirectDriverLoadingListLUNARG in the pNext chain |
2014 | 1.13k | return VK_SUCCESS; |
2015 | 1.13k | } |
2016 | | |
2017 | 0 | if (!direct_driver_loading_enabled) { |
2018 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
2019 | 0 | "loader_scan_for_direct_drivers: The pNext chain of VkInstanceCreateInfo contained the " |
2020 | 0 | "VkDirectDriverLoadingListLUNARG structure, but the VK_LUNARG_direct_driver_loading extension was " |
2021 | 0 | "not enabled."); |
2022 | 0 | return VK_SUCCESS; |
2023 | 0 | } |
2024 | | // If we are using exclusive mode, skip looking for any more drivers from system or environment variables |
2025 | 0 | if (ddl_list->mode == VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG) { |
2026 | 0 | *direct_driver_loading_exclusive_mode = true; |
2027 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
2028 | 0 | "loader_scan_for_direct_drivers: The VK_LUNARG_direct_driver_loading extension is active and specified " |
2029 | 0 | "VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG, skipping system and environment " |
2030 | 0 | "variable driver search mechanisms."); |
2031 | 0 | } |
2032 | 0 | if (NULL == ddl_list->pDrivers) { |
2033 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
2034 | 0 | "loader_scan_for_direct_drivers: The VkDirectDriverLoadingListLUNARG structure in the pNext chain of " |
2035 | 0 | "VkInstanceCreateInfo has a NULL pDrivers member."); |
2036 | 0 | return VK_SUCCESS; |
2037 | 0 | } |
2038 | 0 | if (ddl_list->driverCount == 0) { |
2039 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
2040 | 0 | "loader_scan_for_direct_drivers: The VkDirectDriverLoadingListLUNARG structure in the pNext chain of " |
2041 | 0 | "VkInstanceCreateInfo has a non-null pDrivers member but a driverCount member with a value " |
2042 | 0 | "of zero."); |
2043 | 0 | return VK_SUCCESS; |
2044 | 0 | } |
2045 | | // Go through all VkDirectDriverLoadingInfoLUNARG entries and add each driver |
2046 | | // Because icd_tramp's are prepended, this will result in the drivers appearing at the end |
2047 | 0 | for (uint32_t i = 0; i < ddl_list->driverCount; i++) { |
2048 | 0 | VkResult res = loader_add_direct_driver(inst, i, &ddl_list->pDrivers[i], icd_tramp_list); |
2049 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
2050 | 0 | return res; |
2051 | 0 | } |
2052 | 0 | } |
2053 | | |
2054 | 0 | return VK_SUCCESS; |
2055 | 0 | } |
2056 | | |
2057 | | VkResult loader_scanned_icd_add(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list, |
2058 | 71 | const char *filename, uint32_t api_version, enum loader_layer_library_status *lib_status) { |
2059 | 71 | loader_platform_dl_handle handle = NULL; |
2060 | 71 | PFN_vkCreateInstance fp_create_inst = NULL; |
2061 | 71 | PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props = NULL; |
2062 | 71 | PFN_vkGetInstanceProcAddr fp_get_proc_addr = NULL; |
2063 | 71 | PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL; |
2064 | 71 | PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version = NULL; |
2065 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
2066 | | PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL; |
2067 | | #endif |
2068 | 71 | struct loader_scanned_icd *new_scanned_icd = NULL; |
2069 | 71 | uint32_t interface_vers; |
2070 | 71 | VkResult res = VK_SUCCESS; |
2071 | | |
2072 | | // This shouldn't happen, but the check is necessary because dlopen returns a handle to the main program when |
2073 | | // filename is NULL |
2074 | 71 | if (filename == NULL) { |
2075 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: A NULL filename was used, skipping this ICD"); |
2076 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
2077 | 0 | goto out; |
2078 | 0 | } |
2079 | | |
2080 | | // TODO implement smarter opening/closing of libraries. For now this |
2081 | | // function leaves libraries open and the scanned_icd_clear closes them |
2082 | | #if defined(__Fuchsia__) |
2083 | | handle = loader_platform_open_driver(filename); |
2084 | | #else |
2085 | 71 | handle = loader_platform_open_library(filename); |
2086 | 71 | #endif |
2087 | 71 | if (NULL == handle) { |
2088 | 71 | loader_handle_load_library_error(inst, filename, lib_status); |
2089 | 71 | if (lib_status && *lib_status == LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY) { |
2090 | 1 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
2091 | 70 | } else { |
2092 | 70 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
2093 | 70 | } |
2094 | 71 | goto out; |
2095 | 71 | } |
2096 | | |
2097 | | // Try to load the driver's exported vk_icdNegotiateLoaderICDInterfaceVersion |
2098 | 0 | fp_negotiate_icd_version = loader_platform_get_proc_address(handle, "vk_icdNegotiateLoaderICDInterfaceVersion"); |
2099 | | |
2100 | | // If it isn't exported, we are dealing with either a v0, v1, or a v7 and up driver |
2101 | 0 | if (NULL == fp_negotiate_icd_version) { |
2102 | | // Try to load the driver's exported vk_icdGetInstanceProcAddr - if this is a v7 or up driver, we can use it to get |
2103 | | // the driver's vk_icdNegotiateLoaderICDInterfaceVersion function |
2104 | 0 | fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr"); |
2105 | | |
2106 | | // If we successfully loaded vk_icdGetInstanceProcAddr, try to get vk_icdNegotiateLoaderICDInterfaceVersion |
2107 | 0 | if (fp_get_proc_addr) { |
2108 | 0 | fp_negotiate_icd_version = |
2109 | 0 | (PFN_vk_icdNegotiateLoaderICDInterfaceVersion)fp_get_proc_addr(NULL, "vk_icdNegotiateLoaderICDInterfaceVersion"); |
2110 | 0 | } |
2111 | 0 | } |
2112 | | |
2113 | | // Try to negotiate the Loader and Driver Interface Versions |
2114 | | // loader_get_icd_interface_version will check if fp_negotiate_icd_version is NULL, so we don't have to. |
2115 | | // If it *is* NULL, that means this driver uses interface version 0 or 1 |
2116 | 0 | if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_vers)) { |
2117 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
2118 | 0 | "loader_scanned_icd_add: ICD %s doesn't support interface version compatible with loader, skip this ICD.", |
2119 | 0 | filename); |
2120 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
2121 | 0 | goto out; |
2122 | 0 | } |
2123 | | |
2124 | | // If we didn't already query vk_icdGetInstanceProcAddr, try now |
2125 | 0 | if (NULL == fp_get_proc_addr) { |
2126 | 0 | fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr"); |
2127 | 0 | } |
2128 | | |
2129 | | // If vk_icdGetInstanceProcAddr is NULL, this ICD is using version 0 and so we should respond accordingly. |
2130 | 0 | if (NULL == fp_get_proc_addr) { |
2131 | | // Exporting vk_icdNegotiateLoaderICDInterfaceVersion but not vk_icdGetInstanceProcAddr violates Version 2's |
2132 | | // requirements, as for Version 2 to be supported Version 1 must also be supported |
2133 | 0 | if (interface_vers != 0) { |
2134 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
2135 | 0 | "loader_scanned_icd_add: ICD %s reports an interface version of %d but doesn't export " |
2136 | 0 | "vk_icdGetInstanceProcAddr, skip this ICD.", |
2137 | 0 | filename, interface_vers); |
2138 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
2139 | 0 | goto out; |
2140 | 0 | } |
2141 | | // Use deprecated interface from version 0 |
2142 | 0 | fp_get_proc_addr = loader_platform_get_proc_address(handle, "vkGetInstanceProcAddr"); |
2143 | 0 | if (NULL == fp_get_proc_addr) { |
2144 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
2145 | 0 | "loader_scanned_icd_add: Attempt to retrieve either \'vkGetInstanceProcAddr\' or " |
2146 | 0 | "\'vk_icdGetInstanceProcAddr\' from ICD %s failed.", |
2147 | 0 | filename); |
2148 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
2149 | 0 | goto out; |
2150 | 0 | } else { |
2151 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2152 | 0 | "loader_scanned_icd_add: Using deprecated ICD interface of \'vkGetInstanceProcAddr\' instead of " |
2153 | 0 | "\'vk_icdGetInstanceProcAddr\' for ICD %s", |
2154 | 0 | filename); |
2155 | 0 | } |
2156 | 0 | fp_create_inst = loader_platform_get_proc_address(handle, "vkCreateInstance"); |
2157 | 0 | if (NULL == fp_create_inst) { |
2158 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
2159 | 0 | "loader_scanned_icd_add: Failed querying \'vkCreateInstance\' via dlsym/LoadLibrary for ICD %s", filename); |
2160 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
2161 | 0 | goto out; |
2162 | 0 | } |
2163 | 0 | fp_get_inst_ext_props = loader_platform_get_proc_address(handle, "vkEnumerateInstanceExtensionProperties"); |
2164 | 0 | if (NULL == fp_get_inst_ext_props) { |
2165 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
2166 | 0 | "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via dlsym/LoadLibrary " |
2167 | 0 | "for ICD %s", |
2168 | 0 | filename); |
2169 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
2170 | 0 | goto out; |
2171 | 0 | } |
2172 | 0 | } else { |
2173 | | // vk_icdGetInstanceProcAddr was successfully found, we can assume the version is at least one |
2174 | | // If vk_icdNegotiateLoaderICDInterfaceVersion was also found, interface_vers must be 2 or greater, so this check is |
2175 | | // fine |
2176 | 0 | if (interface_vers == 0) { |
2177 | 0 | interface_vers = 1; |
2178 | 0 | } |
2179 | |
|
2180 | 0 | fp_create_inst = (PFN_vkCreateInstance)fp_get_proc_addr(NULL, "vkCreateInstance"); |
2181 | 0 | if (NULL == fp_create_inst) { |
2182 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
2183 | 0 | "loader_scanned_icd_add: Could not get \'vkCreateInstance\' via \'vk_icdGetInstanceProcAddr\' for ICD %s", |
2184 | 0 | filename); |
2185 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
2186 | 0 | goto out; |
2187 | 0 | } |
2188 | 0 | fp_get_inst_ext_props = |
2189 | 0 | (PFN_vkEnumerateInstanceExtensionProperties)fp_get_proc_addr(NULL, "vkEnumerateInstanceExtensionProperties"); |
2190 | 0 | if (NULL == fp_get_inst_ext_props) { |
2191 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
2192 | 0 | "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via " |
2193 | 0 | "\'vk_icdGetInstanceProcAddr\' for ICD %s", |
2194 | 0 | filename); |
2195 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
2196 | 0 | goto out; |
2197 | 0 | } |
2198 | | // Query "vk_icdGetPhysicalDeviceProcAddr" with vk_icdGetInstanceProcAddr if the library reports interface version 7 or |
2199 | | // greater, otherwise fallback to loading it from the platform dynamic linker |
2200 | 0 | if (interface_vers >= 7) { |
2201 | 0 | fp_get_phys_dev_proc_addr = |
2202 | 0 | (PFN_vk_icdGetPhysicalDeviceProcAddr)fp_get_proc_addr(NULL, "vk_icdGetPhysicalDeviceProcAddr"); |
2203 | 0 | } |
2204 | 0 | if (NULL == fp_get_phys_dev_proc_addr && interface_vers >= 3) { |
2205 | 0 | fp_get_phys_dev_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetPhysicalDeviceProcAddr"); |
2206 | 0 | } |
2207 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
2208 | | // Query "vk_icdEnumerateAdapterPhysicalDevices" with vk_icdGetInstanceProcAddr if the library reports interface version |
2209 | | // 7 or greater, otherwise fallback to loading it from the platform dynamic linker |
2210 | | if (interface_vers >= 7) { |
2211 | | fp_enum_dxgi_adapter_phys_devs = |
2212 | | (PFN_vk_icdEnumerateAdapterPhysicalDevices)fp_get_proc_addr(NULL, "vk_icdEnumerateAdapterPhysicalDevices"); |
2213 | | } |
2214 | | if (NULL == fp_enum_dxgi_adapter_phys_devs && interface_vers >= 6) { |
2215 | | fp_enum_dxgi_adapter_phys_devs = loader_platform_get_proc_address(handle, "vk_icdEnumerateAdapterPhysicalDevices"); |
2216 | | } |
2217 | | #endif |
2218 | 0 | } |
2219 | | |
2220 | | // check for enough capacity |
2221 | 0 | if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) { |
2222 | 0 | void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity, |
2223 | 0 | icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
2224 | 0 | if (NULL == new_ptr) { |
2225 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
2226 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Realloc failed on icd library list for ICD %s", |
2227 | 0 | filename); |
2228 | 0 | goto out; |
2229 | 0 | } |
2230 | 0 | icd_tramp_list->scanned_list = new_ptr; |
2231 | | |
2232 | | // double capacity |
2233 | 0 | icd_tramp_list->capacity *= 2; |
2234 | 0 | } |
2235 | | |
2236 | 0 | loader_api_version api_version_struct = loader_make_version(api_version); |
2237 | 0 | if (interface_vers <= 4 && loader_check_version_meets_required(LOADER_VERSION_1_1_0, api_version_struct)) { |
2238 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2239 | 0 | "loader_scanned_icd_add: Driver %s supports Vulkan %u.%u, but only supports loader interface version %u." |
2240 | 0 | " Interface version 5 or newer required to support this version of Vulkan (Policy #LDP_DRIVER_7)", |
2241 | 0 | filename, api_version_struct.major, api_version_struct.minor, interface_vers); |
2242 | 0 | } |
2243 | |
|
2244 | 0 | new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]); |
2245 | 0 | new_scanned_icd->handle = handle; |
2246 | 0 | new_scanned_icd->api_version = api_version; |
2247 | 0 | new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr; |
2248 | 0 | new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr; |
2249 | 0 | new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props; |
2250 | 0 | new_scanned_icd->CreateInstance = fp_create_inst; |
2251 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
2252 | | new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs; |
2253 | | #endif |
2254 | 0 | new_scanned_icd->interface_version = interface_vers; |
2255 | |
|
2256 | 0 | res = loader_copy_to_new_str(inst, filename, &new_scanned_icd->lib_name); |
2257 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
2258 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Out of memory can't add ICD %s", filename); |
2259 | 0 | goto out; |
2260 | 0 | } |
2261 | 0 | icd_tramp_list->count++; |
2262 | | |
2263 | | // Uses OS calls to find the 'true' path to the binary, for more accurate logging later on. |
2264 | 0 | res = fixup_library_binary_path(inst, &(new_scanned_icd->lib_name), new_scanned_icd->handle, fp_get_proc_addr); |
2265 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
2266 | 0 | goto out; |
2267 | 0 | } |
2268 | 71 | out: |
2269 | 71 | if (res != VK_SUCCESS) { |
2270 | 71 | if (NULL != handle) { |
2271 | 0 | loader_platform_close_library(handle); |
2272 | 0 | } |
2273 | 71 | } |
2274 | | |
2275 | 71 | return res; |
2276 | 0 | } |
2277 | | |
2278 | | #if defined(_WIN32) |
2279 | | BOOL __stdcall loader_initialize(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *Context) { |
2280 | | (void)InitOnce; |
2281 | | (void)Parameter; |
2282 | | (void)Context; |
2283 | | #else |
2284 | 2 | void loader_initialize(void) { |
2285 | 2 | loader_platform_thread_create_mutex(&loader_lock); |
2286 | 2 | loader_platform_thread_create_mutex(&loader_preload_icd_lock); |
2287 | 2 | loader_platform_thread_create_mutex(&loader_global_instance_list_lock); |
2288 | 2 | init_global_loader_settings(); |
2289 | 2 | #endif |
2290 | | |
2291 | | // initialize logging |
2292 | 2 | loader_init_global_debug_level(); |
2293 | | #if defined(_WIN32) |
2294 | | windows_initialization(); |
2295 | | #endif |
2296 | | |
2297 | 2 | loader_api_version version = loader_make_full_version(VK_HEADER_VERSION_COMPLETE); |
2298 | 2 | loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "Vulkan Loader Version %d.%d.%d", version.major, version.minor, version.patch); |
2299 | | |
2300 | | #if defined(GIT_BRANCH_NAME) && defined(GIT_TAG_INFO) |
2301 | | loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "[Vulkan Loader Git - Tag: " GIT_BRANCH_NAME ", Branch/Commit: " GIT_TAG_INFO "]"); |
2302 | | #endif |
2303 | | |
2304 | 2 | char *loader_disable_dynamic_library_unloading_env_var = loader_getenv("VK_LOADER_DISABLE_DYNAMIC_LIBRARY_UNLOADING", NULL); |
2305 | 2 | if (loader_disable_dynamic_library_unloading_env_var && |
2306 | 0 | 0 == strncmp(loader_disable_dynamic_library_unloading_env_var, "1", 2)) { |
2307 | 0 | loader_disable_dynamic_library_unloading = true; |
2308 | 0 | loader_log(NULL, VULKAN_LOADER_WARN_BIT, 0, "Vulkan Loader: library unloading is disabled"); |
2309 | 2 | } else { |
2310 | 2 | loader_disable_dynamic_library_unloading = false; |
2311 | 2 | } |
2312 | 2 | loader_free_getenv(loader_disable_dynamic_library_unloading_env_var, NULL); |
2313 | | #if defined(LOADER_USE_UNSAFE_FILE_SEARCH) |
2314 | | loader_log(NULL, VULKAN_LOADER_WARN_BIT, 0, "Vulkan Loader: unsafe searching is enabled"); |
2315 | | #endif |
2316 | | #if defined(_WIN32) |
2317 | | return TRUE; |
2318 | | #endif |
2319 | 2 | } |
2320 | | |
2321 | 0 | void loader_release(void) { |
2322 | | // Guarantee release of the preloaded ICD libraries. This may have already been called in vkDestroyInstance. |
2323 | 0 | loader_unload_preloaded_icds(); |
2324 | | |
2325 | | // release mutexes |
2326 | 0 | teardown_global_loader_settings(); |
2327 | 0 | loader_platform_thread_delete_mutex(&loader_lock); |
2328 | 0 | loader_platform_thread_delete_mutex(&loader_preload_icd_lock); |
2329 | 0 | loader_platform_thread_delete_mutex(&loader_global_instance_list_lock); |
2330 | 0 | } |
2331 | | |
2332 | | // Preload the ICD libraries that are likely to be needed so we don't repeatedly load/unload them later |
2333 | 0 | void loader_preload_icds(void) { |
2334 | 0 | loader_platform_thread_lock_mutex(&loader_preload_icd_lock); |
2335 | | |
2336 | | // Already preloaded, skip loading again. |
2337 | 0 | if (preloaded_icds.scanned_list != NULL) { |
2338 | 0 | loader_platform_thread_unlock_mutex(&loader_preload_icd_lock); |
2339 | 0 | return; |
2340 | 0 | } |
2341 | | |
2342 | 0 | VkResult result = loader_icd_scan(NULL, &preloaded_icds, NULL, NULL); |
2343 | 0 | if (result != VK_SUCCESS) { |
2344 | 0 | loader_clear_scanned_icd_list(NULL, &preloaded_icds); |
2345 | 0 | } |
2346 | 0 | loader_platform_thread_unlock_mutex(&loader_preload_icd_lock); |
2347 | 0 | } |
2348 | | |
2349 | | // Release the ICD libraries that were preloaded |
2350 | 0 | void loader_unload_preloaded_icds(void) { |
2351 | 0 | loader_platform_thread_lock_mutex(&loader_preload_icd_lock); |
2352 | 0 | loader_clear_scanned_icd_list(NULL, &preloaded_icds); |
2353 | 0 | loader_platform_thread_unlock_mutex(&loader_preload_icd_lock); |
2354 | 0 | } |
2355 | | |
2356 | | #if !defined(_WIN32) |
2357 | 2 | __attribute__((constructor)) void loader_init_library(void) { loader_initialize(); } |
2358 | | |
2359 | 0 | __attribute__((destructor)) void loader_free_library(void) { loader_release(); } |
2360 | | #endif |
2361 | | |
2362 | | // Get next file or dirname given a string list or registry key path |
2363 | | // |
2364 | | // \returns |
2365 | | // A pointer to first char in the next path. |
2366 | | // The next path (or NULL) in the list is returned in next_path. |
2367 | | // Note: input string is modified in some cases. PASS IN A COPY! |
2368 | 259k | char *loader_get_next_path(char *path) { |
2369 | 259k | uint32_t len; |
2370 | 259k | char *next; |
2371 | | |
2372 | 259k | if (path == NULL) return NULL; |
2373 | 259k | next = strchr(path, PATH_SEPARATOR); |
2374 | 259k | if (next == NULL) { |
2375 | 68.1k | len = (uint32_t)strlen(path); |
2376 | 68.1k | next = path + len; |
2377 | 191k | } else { |
2378 | 191k | *next = '\0'; |
2379 | 191k | next++; |
2380 | 191k | } |
2381 | | |
2382 | 259k | return next; |
2383 | 259k | } |
2384 | | |
2385 | | /* Processes a json manifest's library_path and the location of the json manifest to create the path of the library |
2386 | | * The output is stored in out_fullpath by allocating a string - so its the caller's responsibility to free it |
2387 | | * The output is the combination of the base path of manifest_file_path concatenated with library path |
2388 | | * If library_path is an absolute path, we do not prepend the base path of manifest_file_path |
2389 | | * |
2390 | | * This function takes ownership of library_path - caller does not need to worry about freeing it. |
2391 | | */ |
2392 | | VkResult combine_manifest_directory_and_library_path(const struct loader_instance *inst, char *library_path, |
2393 | 10.8k | const char *manifest_file_path, char **out_fullpath) { |
2394 | 10.8k | assert(library_path && manifest_file_path && out_fullpath); |
2395 | 10.8k | if (loader_platform_is_path_absolute(library_path)) { |
2396 | 294 | *out_fullpath = library_path; |
2397 | 294 | return VK_SUCCESS; |
2398 | 294 | } |
2399 | 10.5k | VkResult res = VK_SUCCESS; |
2400 | | |
2401 | 10.5k | size_t library_path_len = strlen(library_path); |
2402 | 10.5k | size_t manifest_file_path_str_len = strlen(manifest_file_path); |
2403 | 10.5k | bool library_path_contains_directory_symbol = false; |
2404 | 429M | for (size_t i = 0; i < library_path_len; i++) { |
2405 | 429M | if (library_path[i] == DIRECTORY_SYMBOL) { |
2406 | 4.10k | library_path_contains_directory_symbol = true; |
2407 | 4.10k | break; |
2408 | 4.10k | } |
2409 | 429M | } |
2410 | | // Means that the library_path is neither absolute nor relative - thus we should not modify it at all |
2411 | 10.5k | if (!library_path_contains_directory_symbol) { |
2412 | 6.46k | *out_fullpath = library_path; |
2413 | 6.46k | return VK_SUCCESS; |
2414 | 6.46k | } |
2415 | | // must include both a directory symbol and the null terminator |
2416 | 4.10k | size_t new_str_len = library_path_len + manifest_file_path_str_len + 1 + 1; |
2417 | | |
2418 | 4.10k | *out_fullpath = loader_instance_heap_calloc(inst, new_str_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
2419 | 4.10k | if (NULL == *out_fullpath) { |
2420 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
2421 | 0 | goto out; |
2422 | 0 | } |
2423 | 4.10k | size_t cur_loc_in_out_fullpath = 0; |
2424 | | // look for the last occurrence of DIRECTORY_SYMBOL in manifest_file_path |
2425 | 4.10k | size_t last_directory_symbol = 0; |
2426 | 4.10k | bool found_directory_symbol = false; |
2427 | 139k | for (size_t i = 0; i < manifest_file_path_str_len; i++) { |
2428 | 135k | if (manifest_file_path[i] == DIRECTORY_SYMBOL) { |
2429 | 11.3k | last_directory_symbol = i + 1; // we want to include the symbol |
2430 | 11.3k | found_directory_symbol = true; |
2431 | | // dont break because we want to find the last occurrence |
2432 | 11.3k | } |
2433 | 135k | } |
2434 | | // Add manifest_file_path up to the last directory symbol |
2435 | 4.10k | if (found_directory_symbol) { |
2436 | 3.27k | loader_strncpy(*out_fullpath, new_str_len, manifest_file_path, last_directory_symbol); |
2437 | 3.27k | cur_loc_in_out_fullpath += last_directory_symbol; |
2438 | 3.27k | } |
2439 | 4.10k | loader_strncpy(&(*out_fullpath)[cur_loc_in_out_fullpath], new_str_len - cur_loc_in_out_fullpath, library_path, |
2440 | 4.10k | library_path_len); |
2441 | 4.10k | cur_loc_in_out_fullpath += library_path_len + 1; |
2442 | 4.10k | (*out_fullpath)[cur_loc_in_out_fullpath] = '\0'; |
2443 | | |
2444 | 4.10k | out: |
2445 | 4.10k | loader_instance_heap_free(inst, library_path); |
2446 | | |
2447 | 4.10k | return res; |
2448 | 4.10k | } |
2449 | | |
2450 | | // Given a filename (file) and a list of paths (in_dirs), try to find an existing |
2451 | | // file in the paths. If filename already is a path then no searching in the given paths. |
2452 | | // |
2453 | | // @return - A string in out_fullpath of either the full path or file. |
2454 | 36.4k | void loader_get_fullpath(const char *file, const char *in_dirs, size_t out_size, char *out_fullpath) { |
2455 | 36.4k | if (!loader_platform_is_path(file) && *in_dirs) { |
2456 | 36.4k | size_t dirs_copy_len = strlen(in_dirs) + 1; |
2457 | 36.4k | char *dirs_copy = loader_stack_alloc(dirs_copy_len); |
2458 | 36.4k | loader_strncpy(dirs_copy, dirs_copy_len, in_dirs, dirs_copy_len); |
2459 | | |
2460 | | // find if file exists after prepending paths in given list |
2461 | | // for (dir = dirs_copy; *dir && (next_dir = loader_get_next_path(dir)); dir = next_dir) { |
2462 | 36.4k | char *dir = dirs_copy; |
2463 | 36.4k | char *next_dir = loader_get_next_path(dir); |
2464 | 36.7k | while (*dir && next_dir) { |
2465 | 36.4k | int path_concat_ret = snprintf(out_fullpath, out_size, "%s%c%s", dir, DIRECTORY_SYMBOL, file); |
2466 | 36.4k | if (path_concat_ret < 0) { |
2467 | 0 | continue; |
2468 | 0 | } |
2469 | 36.4k | if (loader_platform_file_exists(out_fullpath)) { |
2470 | 36.0k | return; |
2471 | 36.0k | } |
2472 | 328 | dir = next_dir; |
2473 | 328 | next_dir = loader_get_next_path(dir); |
2474 | 328 | } |
2475 | 36.4k | } |
2476 | | |
2477 | 386 | (void)snprintf(out_fullpath, out_size, "%s", file); |
2478 | 386 | } |
2479 | | |
2480 | | // Verify that all component layers in a meta-layer are valid. |
2481 | | // This function is potentially recursive so we pass in an array of "already checked" (length of the instance_layers->count) meta |
2482 | | // layers, preventing a stack overflow verifying meta layers that are each other's component layers |
2483 | | bool verify_meta_layer_component_layers(const struct loader_instance *inst, size_t prop_index, |
2484 | 16.7k | struct loader_layer_list *instance_layers, bool *already_checked_meta_layers) { |
2485 | 16.7k | struct loader_layer_properties *prop = &instance_layers->list[prop_index]; |
2486 | 16.7k | loader_api_version meta_layer_version = loader_make_version(prop->info.specVersion); |
2487 | | |
2488 | 16.7k | if (NULL == already_checked_meta_layers) { |
2489 | 12.4k | already_checked_meta_layers = loader_stack_alloc(sizeof(bool) * instance_layers->count); |
2490 | 12.4k | if (already_checked_meta_layers == NULL) { |
2491 | 0 | return false; |
2492 | 0 | } |
2493 | 12.4k | memset(already_checked_meta_layers, 0, sizeof(bool) * instance_layers->count); |
2494 | 12.4k | } |
2495 | | |
2496 | | // Mark this meta layer as 'already checked', indicating which layers have already been recursed. |
2497 | 16.7k | already_checked_meta_layers[prop_index] = true; |
2498 | | |
2499 | 23.8k | for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) { |
2500 | 13.1k | struct loader_layer_properties *comp_prop = |
2501 | 13.1k | loader_find_layer_property(prop->component_layer_names.list[comp_layer], instance_layers); |
2502 | 13.1k | if (comp_prop == NULL) { |
2503 | 2.24k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2504 | 2.24k | "verify_meta_layer_component_layers: Meta-layer %s can't find component layer %s at index %d." |
2505 | 2.24k | " Skipping this layer.", |
2506 | 2.24k | prop->info.layerName, prop->component_layer_names.list[comp_layer], comp_layer); |
2507 | | |
2508 | 2.24k | return false; |
2509 | 2.24k | } |
2510 | | |
2511 | | // Check the version of each layer, they need to be at least MAJOR and MINOR |
2512 | 10.9k | loader_api_version comp_prop_version = loader_make_version(comp_prop->info.specVersion); |
2513 | 10.9k | if (!loader_check_version_meets_required(meta_layer_version, comp_prop_version)) { |
2514 | 1.12k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2515 | 1.12k | "verify_meta_layer_component_layers: Meta-layer uses API version %d.%d, but component " |
2516 | 1.12k | "layer %d has API version %d.%d that is lower. Skipping this layer.", |
2517 | 1.12k | meta_layer_version.major, meta_layer_version.minor, comp_layer, comp_prop_version.major, |
2518 | 1.12k | comp_prop_version.minor); |
2519 | | |
2520 | 1.12k | return false; |
2521 | 1.12k | } |
2522 | | |
2523 | | // Make sure the layer isn't using it's own name |
2524 | 9.79k | if (!strcmp(prop->info.layerName, prop->component_layer_names.list[comp_layer])) { |
2525 | 1.24k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2526 | 1.24k | "verify_meta_layer_component_layers: Meta-layer %s lists itself in its component layer " |
2527 | 1.24k | "list at index %d. Skipping this layer.", |
2528 | 1.24k | prop->info.layerName, comp_layer); |
2529 | | |
2530 | 1.24k | return false; |
2531 | 1.24k | } |
2532 | 8.55k | if (comp_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { |
2533 | 4.57k | size_t comp_prop_index = INT32_MAX; |
2534 | | // Make sure we haven't verified this meta layer before |
2535 | 5.95M | for (uint32_t i = 0; i < instance_layers->count; i++) { |
2536 | 5.95M | if (strcmp(comp_prop->info.layerName, instance_layers->list[i].info.layerName) == 0) { |
2537 | 48.3k | comp_prop_index = i; |
2538 | 48.3k | } |
2539 | 5.95M | } |
2540 | 4.57k | if (comp_prop_index != INT32_MAX && already_checked_meta_layers[comp_prop_index]) { |
2541 | 279 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2542 | 279 | "verify_meta_layer_component_layers: Recursive dependency between Meta-layer %s and Meta-layer %s. " |
2543 | 279 | "Skipping this layer.", |
2544 | 279 | instance_layers->list[prop_index].info.layerName, comp_prop->info.layerName); |
2545 | 279 | return false; |
2546 | 279 | } |
2547 | | |
2548 | 4.29k | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
2549 | 4.29k | "verify_meta_layer_component_layers: Adding meta-layer %s which also contains meta-layer %s", |
2550 | 4.29k | prop->info.layerName, comp_prop->info.layerName); |
2551 | | |
2552 | | // Make sure if the layer is using a meta-layer in its component list that we also verify that. |
2553 | 4.29k | if (!verify_meta_layer_component_layers(inst, comp_prop_index, instance_layers, already_checked_meta_layers)) { |
2554 | 1.21k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2555 | 1.21k | "Meta-layer %s component layer %s can not find all component layers." |
2556 | 1.21k | " Skipping this layer.", |
2557 | 1.21k | prop->info.layerName, prop->component_layer_names.list[comp_layer]); |
2558 | 1.21k | return false; |
2559 | 1.21k | } |
2560 | 4.29k | } |
2561 | 8.55k | } |
2562 | | // Didn't exit early so that means it passed all checks |
2563 | 10.6k | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2564 | 10.6k | "Meta-layer \"%s\" all %d component layers appear to be valid.", prop->info.layerName, |
2565 | 10.6k | prop->component_layer_names.count); |
2566 | | |
2567 | | // If layer logging is on, list the internals included in the meta-layer |
2568 | 17.4k | for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) { |
2569 | 6.80k | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " [%d] %s", comp_layer, prop->component_layer_names.list[comp_layer]); |
2570 | 6.80k | } |
2571 | 10.6k | return true; |
2572 | 16.7k | } |
2573 | | |
2574 | | // Add any instance and device extensions from component layers to this layer |
2575 | | // list, so that anyone querying extensions will only need to look at the meta-layer |
2576 | | bool update_meta_layer_extensions_from_component_layers(const struct loader_instance *inst, struct loader_layer_properties *prop, |
2577 | 7.59k | struct loader_layer_list *instance_layers) { |
2578 | 7.59k | VkResult res = VK_SUCCESS; |
2579 | 11.8k | for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) { |
2580 | 4.21k | struct loader_layer_properties *comp_prop = |
2581 | 4.21k | loader_find_layer_property(prop->component_layer_names.list[comp_layer], instance_layers); |
2582 | | |
2583 | 4.21k | if (NULL != comp_prop->instance_extension_list.list) { |
2584 | 12.8k | for (uint32_t ext = 0; ext < comp_prop->instance_extension_list.count; ext++) { |
2585 | 12.3k | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding instance extension %s", |
2586 | 12.3k | prop->info.layerName, prop->component_layer_names.list[comp_layer], |
2587 | 12.3k | comp_prop->instance_extension_list.list[ext].extensionName); |
2588 | | |
2589 | 12.3k | if (!has_vk_extension_property(&comp_prop->instance_extension_list.list[ext], &prop->instance_extension_list)) { |
2590 | 11.7k | res = loader_add_to_ext_list(inst, &prop->instance_extension_list, 1, |
2591 | 11.7k | &comp_prop->instance_extension_list.list[ext]); |
2592 | 11.7k | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
2593 | 0 | return res; |
2594 | 0 | } |
2595 | 11.7k | } |
2596 | 12.3k | } |
2597 | 579 | } |
2598 | 4.21k | if (NULL != comp_prop->device_extension_list.list) { |
2599 | 21.0k | for (uint32_t ext = 0; ext < comp_prop->device_extension_list.count; ext++) { |
2600 | 19.9k | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding device extension %s", |
2601 | 19.9k | prop->info.layerName, prop->component_layer_names.list[comp_layer], |
2602 | 19.9k | comp_prop->device_extension_list.list[ext].props.extensionName); |
2603 | | |
2604 | 19.9k | if (!has_vk_dev_ext_property(&comp_prop->device_extension_list.list[ext].props, &prop->device_extension_list)) { |
2605 | 19.5k | loader_add_to_dev_ext_list(inst, &prop->device_extension_list, |
2606 | 19.5k | &comp_prop->device_extension_list.list[ext].props, NULL); |
2607 | 19.5k | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
2608 | 0 | return res; |
2609 | 0 | } |
2610 | 19.5k | } |
2611 | 19.9k | } |
2612 | 1.01k | } |
2613 | 4.21k | } |
2614 | 7.59k | return res; |
2615 | 7.59k | } |
2616 | | |
2617 | | // Verify that all meta-layers in a layer list are valid. |
2618 | | VkResult verify_all_meta_layers(struct loader_instance *inst, const struct loader_envvar_all_filters *filters, |
2619 | 7.30k | struct loader_layer_list *instance_layers, bool *override_layer_present) { |
2620 | 7.30k | VkResult res = VK_SUCCESS; |
2621 | 7.30k | *override_layer_present = false; |
2622 | 928k | for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) { |
2623 | 921k | struct loader_layer_properties *prop = &instance_layers->list[i]; |
2624 | | |
2625 | | // If this is a meta-layer, make sure it is valid |
2626 | 921k | if (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { |
2627 | 12.4k | if (verify_meta_layer_component_layers(inst, i, instance_layers, NULL)) { |
2628 | | // If any meta layer is valid, update its extension list to include the extensions from its component layers. |
2629 | 7.59k | res = update_meta_layer_extensions_from_component_layers(inst, prop, instance_layers); |
2630 | 7.59k | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
2631 | 0 | return res; |
2632 | 0 | } |
2633 | 7.59k | if (prop->is_override && loader_implicit_layer_is_enabled(inst, filters, prop)) { |
2634 | 264 | *override_layer_present = true; |
2635 | 264 | } |
2636 | 7.59k | } else { |
2637 | 4.88k | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, |
2638 | 4.88k | "Removing meta-layer %s from instance layer list since it appears invalid.", prop->info.layerName); |
2639 | | |
2640 | 4.88k | loader_remove_layer_in_list(inst, instance_layers, i); |
2641 | 4.88k | i--; |
2642 | 4.88k | } |
2643 | 12.4k | } |
2644 | 921k | } |
2645 | 7.30k | return res; |
2646 | 7.30k | } |
2647 | | |
2648 | | // If the current working directory matches any app_key_path of the layers, remove all other override layers. |
2649 | | // Otherwise if no matching app_key was found, remove all but the global override layer, which has no app_key_path. |
2650 | 7.30k | void remove_all_non_valid_override_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers) { |
2651 | 7.30k | if (instance_layers == NULL) { |
2652 | 0 | return; |
2653 | 0 | } |
2654 | | |
2655 | 7.30k | char cur_path[1024]; |
2656 | 7.30k | char *ret = loader_platform_executable_path(cur_path, 1024); |
2657 | 7.30k | if (NULL == ret) { |
2658 | 0 | return; |
2659 | 0 | } |
2660 | | // Find out if there is an override layer with same the app_key_path as the path to the current executable. |
2661 | | // If more than one is found, remove it and use the first layer |
2662 | | // Remove any layers which aren't global and do not have the same app_key_path as the path to the current executable. |
2663 | 7.30k | bool found_active_override_layer = false; |
2664 | 7.30k | int global_layer_index = -1; |
2665 | 923k | for (uint32_t i = 0; i < instance_layers->count; i++) { |
2666 | 915k | struct loader_layer_properties *props = &instance_layers->list[i]; |
2667 | 915k | if (strcmp(props->info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) { |
2668 | 2.34k | if (props->app_key_paths.count > 0) { // not the global layer |
2669 | 41.9k | for (uint32_t j = 0; j < props->app_key_paths.count; j++) { |
2670 | 41.3k | if (strcmp(props->app_key_paths.list[j], cur_path) == 0) { |
2671 | 0 | if (!found_active_override_layer) { |
2672 | 0 | found_active_override_layer = true; |
2673 | 0 | } else { |
2674 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2675 | 0 | "remove_all_non_valid_override_layers: Multiple override layers where the same path in " |
2676 | 0 | "app_keys " |
2677 | 0 | "was found. Using the first layer found"); |
2678 | | |
2679 | | // Remove duplicate active override layers that have the same app_key_path |
2680 | 0 | loader_remove_layer_in_list(inst, instance_layers, i); |
2681 | 0 | i--; |
2682 | 0 | } |
2683 | 0 | } |
2684 | 41.3k | } |
2685 | 651 | if (!found_active_override_layer) { |
2686 | 651 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2687 | 651 | "--Override layer found but not used because app \'%s\' is not in \'app_keys\' list!", cur_path); |
2688 | | |
2689 | | // Remove non-global override layers that don't have an app_key that matches cur_path |
2690 | 651 | loader_remove_layer_in_list(inst, instance_layers, i); |
2691 | 651 | i--; |
2692 | 651 | } |
2693 | 1.69k | } else { |
2694 | 1.69k | if (global_layer_index == -1) { |
2695 | 487 | global_layer_index = i; |
2696 | 1.20k | } else { |
2697 | 1.20k | loader_log( |
2698 | 1.20k | inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2699 | 1.20k | "remove_all_non_valid_override_layers: Multiple global override layers found. Using the first global " |
2700 | 1.20k | "layer found"); |
2701 | 1.20k | loader_remove_layer_in_list(inst, instance_layers, i); |
2702 | 1.20k | i--; |
2703 | 1.20k | } |
2704 | 1.69k | } |
2705 | 2.34k | } |
2706 | 915k | } |
2707 | | // Remove global layer if layer with same the app_key_path as the path to the current executable is found |
2708 | 7.30k | if (found_active_override_layer && global_layer_index >= 0) { |
2709 | 0 | loader_remove_layer_in_list(inst, instance_layers, global_layer_index); |
2710 | 0 | } |
2711 | | // Should be at most 1 override layer in the list now. |
2712 | 7.30k | if (found_active_override_layer) { |
2713 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the override layer for app key %s", cur_path); |
2714 | 7.30k | } else if (global_layer_index >= 0) { |
2715 | 487 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the global override layer"); |
2716 | 487 | } |
2717 | 7.30k | } |
2718 | | |
2719 | | /* The following are required in the "layer" object: |
2720 | | * "name" |
2721 | | * "type" |
2722 | | * (for non-meta layers) "library_path" |
2723 | | * (for meta layers) "component_layers" |
2724 | | * "api_version" |
2725 | | * "implementation_version" |
2726 | | * "description" |
2727 | | * (for implicit layers) "disable_environment" |
2728 | | */ |
2729 | | |
2730 | | VkResult loader_read_layer_json(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list, |
2731 | 997k | cJSON *layer_node, loader_api_version version, bool is_implicit, char *filename) { |
2732 | 997k | assert(layer_instance_list); |
2733 | 997k | char *library_path = NULL; |
2734 | 997k | VkResult result = VK_SUCCESS; |
2735 | 997k | struct loader_layer_properties props = {0}; |
2736 | | |
2737 | 997k | result = loader_copy_to_new_str(inst, filename, &props.manifest_file_name); |
2738 | 997k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) { |
2739 | 0 | goto out; |
2740 | 0 | } |
2741 | | |
2742 | | // Parse name |
2743 | | |
2744 | 997k | result = loader_parse_json_string_to_existing_str(layer_node, "name", VK_MAX_EXTENSION_NAME_SIZE, props.info.layerName); |
2745 | 997k | if (VK_ERROR_INITIALIZATION_FAILED == result) { |
2746 | 60.5k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2747 | 60.5k | "Layer located at %s didn't find required layer value \"name\" in manifest JSON file, skipping this layer", |
2748 | 60.5k | filename); |
2749 | 60.5k | goto out; |
2750 | 60.5k | } |
2751 | | |
2752 | | // Check if this layer's name matches the override layer name, set is_override to true if so. |
2753 | 936k | if (!strcmp(props.info.layerName, VK_OVERRIDE_LAYER_NAME)) { |
2754 | 5.47k | props.is_override = true; |
2755 | 5.47k | } |
2756 | | |
2757 | 936k | if (0 != strncmp(props.info.layerName, "VK_LAYER_", 9)) { |
2758 | 633k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, "Layer name %s does not conform to naming standard (Policy #LLP_LAYER_3)", |
2759 | 633k | props.info.layerName); |
2760 | 633k | } |
2761 | | |
2762 | | // Parse type |
2763 | 936k | char *type = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(layer_node, "type")); |
2764 | 936k | if (NULL == type) { |
2765 | 905k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2766 | 905k | "Layer located at %s didn't find required layer value \"type\" in manifest JSON file, skipping this layer", |
2767 | 905k | filename); |
2768 | 905k | goto out; |
2769 | 905k | } |
2770 | | |
2771 | | // Add list entry |
2772 | 31.2k | if (!strcmp(type, "DEVICE")) { |
2773 | 209 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Device layers are deprecated. Skipping layer %s", |
2774 | 209 | props.info.layerName); |
2775 | 209 | result = VK_ERROR_INITIALIZATION_FAILED; |
2776 | 209 | goto out; |
2777 | 209 | } |
2778 | | |
2779 | | // Allow either GLOBAL or INSTANCE type interchangeably to handle layers that must work with older loaders |
2780 | 31.0k | if (!strcmp(type, "INSTANCE") || !strcmp(type, "GLOBAL")) { |
2781 | 30.0k | props.type_flags = VK_LAYER_TYPE_FLAG_INSTANCE_LAYER; |
2782 | 30.0k | if (!is_implicit) { |
2783 | 5.91k | props.type_flags |= VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER; |
2784 | 5.91k | } |
2785 | 30.0k | } else { |
2786 | 950 | result = VK_ERROR_INITIALIZATION_FAILED; |
2787 | 950 | goto out; |
2788 | 950 | } |
2789 | | |
2790 | | // Parse api_version |
2791 | 30.0k | char *api_version = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(layer_node, "api_version")); |
2792 | 30.0k | if (NULL == api_version) { |
2793 | 1.28k | loader_log( |
2794 | 1.28k | inst, VULKAN_LOADER_WARN_BIT, 0, |
2795 | 1.28k | "Layer located at %s didn't find required layer value \"api_version\" in manifest JSON file, skipping this layer", |
2796 | 1.28k | filename); |
2797 | 1.28k | goto out; |
2798 | 1.28k | } |
2799 | | |
2800 | 28.7k | props.info.specVersion = loader_parse_version_string(api_version); |
2801 | | |
2802 | | // Make sure the layer's manifest doesn't contain a non zero variant value |
2803 | 28.7k | if (VK_API_VERSION_VARIANT(props.info.specVersion) != 0) { |
2804 | 714 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
2805 | 714 | "Layer \"%s\" has an \'api_version\' field which contains a non-zero variant value of %d. " |
2806 | 714 | " Skipping Layer.", |
2807 | 714 | props.info.layerName, VK_API_VERSION_VARIANT(props.info.specVersion)); |
2808 | 714 | result = VK_ERROR_INITIALIZATION_FAILED; |
2809 | 714 | goto out; |
2810 | 714 | } |
2811 | | |
2812 | | // Parse implementation_version |
2813 | 28.0k | char *implementation_version = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(layer_node, "implementation_version")); |
2814 | 28.0k | if (NULL == implementation_version) { |
2815 | 1.92k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2816 | 1.92k | "Layer located at %s didn't find required layer value \"implementation_version\" in manifest JSON file, " |
2817 | 1.92k | "skipping this layer", |
2818 | 1.92k | filename); |
2819 | 1.92k | goto out; |
2820 | 1.92k | } |
2821 | 26.1k | props.info.implementationVersion = atoi(implementation_version); |
2822 | | |
2823 | | // Parse description |
2824 | | |
2825 | 26.1k | result = |
2826 | 26.1k | loader_parse_json_string_to_existing_str(layer_node, "description", VK_MAX_EXTENSION_NAME_SIZE, props.info.description); |
2827 | 26.1k | if (VK_ERROR_INITIALIZATION_FAILED == result) { |
2828 | 629 | loader_log( |
2829 | 629 | inst, VULKAN_LOADER_WARN_BIT, 0, |
2830 | 629 | "Layer located at %s didn't find required layer value \"description\" in manifest JSON file, skipping this layer", |
2831 | 629 | filename); |
2832 | 629 | goto out; |
2833 | 629 | } |
2834 | | |
2835 | | // Parse library_path |
2836 | | |
2837 | | // Library path no longer required unless component_layers is also not defined |
2838 | 25.5k | result = loader_parse_json_string(layer_node, "library_path", &library_path); |
2839 | 25.5k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) { |
2840 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2841 | 0 | "Skipping layer \"%s\" due to problem accessing the library_path value in the manifest JSON file", |
2842 | 0 | props.info.layerName); |
2843 | 0 | result = VK_ERROR_OUT_OF_HOST_MEMORY; |
2844 | 0 | goto out; |
2845 | 0 | } |
2846 | 25.5k | if (NULL != library_path) { |
2847 | 10.0k | if (NULL != loader_cJSON_GetObjectItem(layer_node, "component_layers")) { |
2848 | 207 | loader_log( |
2849 | 207 | inst, VULKAN_LOADER_WARN_BIT, 0, |
2850 | 207 | "Layer \"%s\" contains meta-layer-specific component_layers, but also defining layer library path. Both are not " |
2851 | 207 | "compatible, so skipping this layer", |
2852 | 207 | props.info.layerName); |
2853 | 207 | result = VK_ERROR_INITIALIZATION_FAILED; |
2854 | 207 | loader_instance_heap_free(inst, library_path); |
2855 | 207 | goto out; |
2856 | 207 | } |
2857 | | |
2858 | | // This function takes ownership of library_path_str - so we don't need to clean it up |
2859 | 9.87k | result = combine_manifest_directory_and_library_path(inst, library_path, filename, &props.lib_name); |
2860 | 9.87k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2861 | 9.87k | } |
2862 | | |
2863 | | // Parse component_layers |
2864 | | |
2865 | 25.3k | if (NULL == library_path) { |
2866 | 15.4k | if (!loader_check_version_meets_required(LOADER_VERSION_1_1_0, version)) { |
2867 | 9.58k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2868 | 9.58k | "Layer \"%s\" contains meta-layer-specific component_layers, but using older JSON file version.", |
2869 | 9.58k | props.info.layerName); |
2870 | 9.58k | } |
2871 | | |
2872 | 15.4k | result = loader_parse_json_array_of_strings(inst, layer_node, "component_layers", &(props.component_layer_names)); |
2873 | 15.4k | if (VK_ERROR_OUT_OF_HOST_MEMORY == result) { |
2874 | 0 | goto out; |
2875 | 0 | } |
2876 | 15.4k | if (VK_ERROR_INITIALIZATION_FAILED == result) { |
2877 | 1.38k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2878 | 1.38k | "Layer \"%s\" is missing both library_path and component_layers fields. One or the other MUST be defined. " |
2879 | 1.38k | "Skipping this layer", |
2880 | 1.38k | props.info.layerName); |
2881 | 1.38k | goto out; |
2882 | 1.38k | } |
2883 | | // This is now, officially, a meta-layer |
2884 | 14.0k | props.type_flags |= VK_LAYER_TYPE_FLAG_META_LAYER; |
2885 | 14.0k | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Encountered meta-layer \"%s\"", |
2886 | 14.0k | props.info.layerName); |
2887 | 14.0k | } |
2888 | | |
2889 | | // Parse blacklisted_layers |
2890 | | |
2891 | 23.9k | if (props.is_override) { |
2892 | 1.26k | result = loader_parse_json_array_of_strings(inst, layer_node, "blacklisted_layers", &(props.blacklist_layer_names)); |
2893 | 1.26k | if (VK_ERROR_OUT_OF_HOST_MEMORY == result) { |
2894 | 0 | goto out; |
2895 | 0 | } |
2896 | 1.26k | } |
2897 | | |
2898 | | // Parse override_paths |
2899 | | |
2900 | 23.9k | result = loader_parse_json_array_of_strings(inst, layer_node, "override_paths", &(props.override_paths)); |
2901 | 23.9k | if (VK_ERROR_OUT_OF_HOST_MEMORY == result) { |
2902 | 0 | goto out; |
2903 | 0 | } |
2904 | 23.9k | if (NULL != props.override_paths.list && !loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) { |
2905 | 100 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2906 | 100 | "Layer \"%s\" contains meta-layer-specific override paths, but using older JSON file version.", |
2907 | 100 | props.info.layerName); |
2908 | 100 | } |
2909 | | |
2910 | | // Parse disable_environment |
2911 | | |
2912 | 23.9k | if (is_implicit) { |
2913 | 18.8k | cJSON *disable_environment = loader_cJSON_GetObjectItem(layer_node, "disable_environment"); |
2914 | 18.8k | if (disable_environment == NULL) { |
2915 | 2.06k | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2916 | 2.06k | "Layer \"%s\" doesn't contain required layer object disable_environment in the manifest JSON file, skipping " |
2917 | 2.06k | "this layer", |
2918 | 2.06k | props.info.layerName); |
2919 | 2.06k | result = VK_ERROR_INITIALIZATION_FAILED; |
2920 | 2.06k | goto out; |
2921 | 2.06k | } |
2922 | | |
2923 | 16.7k | if (!disable_environment->child || disable_environment->child->type != cJSON_String || |
2924 | 16.2k | !disable_environment->child->string || !disable_environment->child->valuestring) { |
2925 | 758 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
2926 | 758 | "Layer \"%s\" doesn't contain required child value in object disable_environment in the manifest JSON file, " |
2927 | 758 | "skipping this layer (Policy #LLP_LAYER_9)", |
2928 | 758 | props.info.layerName); |
2929 | 758 | result = VK_ERROR_INITIALIZATION_FAILED; |
2930 | 758 | goto out; |
2931 | 758 | } |
2932 | 15.9k | result = loader_copy_to_new_str(inst, disable_environment->child->string, &(props.disable_env_var.name)); |
2933 | 15.9k | if (VK_SUCCESS != result) goto out; |
2934 | 15.9k | result = loader_copy_to_new_str(inst, disable_environment->child->valuestring, &(props.disable_env_var.value)); |
2935 | 15.9k | if (VK_SUCCESS != result) goto out; |
2936 | 15.9k | } |
2937 | | |
2938 | | // Now get all optional items and objects and put in list: |
2939 | | // functions |
2940 | | // instance_extensions |
2941 | | // device_extensions |
2942 | | // enable_environment (implicit layers only) |
2943 | | // library_arch |
2944 | | |
2945 | | // Layer interface functions |
2946 | | // vkGetInstanceProcAddr |
2947 | | // vkGetDeviceProcAddr |
2948 | | // vkNegotiateLoaderLayerInterfaceVersion (starting with JSON file 1.1.0) |
2949 | 21.1k | cJSON *functions = loader_cJSON_GetObjectItem(layer_node, "functions"); |
2950 | 21.1k | if (functions != NULL) { |
2951 | 1.28k | if (loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) { |
2952 | 682 | result = loader_parse_json_string(functions, "vkNegotiateLoaderLayerInterfaceVersion", |
2953 | 682 | &props.functions.str_negotiate_interface); |
2954 | 682 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2955 | 682 | } |
2956 | 1.28k | result = loader_parse_json_string(functions, "vkGetInstanceProcAddr", &props.functions.str_gipa); |
2957 | 1.28k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2958 | | |
2959 | 1.28k | if (NULL == props.functions.str_negotiate_interface && props.functions.str_gipa && |
2960 | 0 | loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) { |
2961 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
2962 | 0 | "Layer \"%s\" using deprecated \'vkGetInstanceProcAddr\' tag which was deprecated starting with JSON " |
2963 | 0 | "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for " |
2964 | 0 | "compatibility reasons it may be desirable to continue using the deprecated tag.", |
2965 | 0 | props.info.layerName); |
2966 | 0 | } |
2967 | | |
2968 | 1.28k | result = loader_parse_json_string(functions, "vkGetDeviceProcAddr", &props.functions.str_gdpa); |
2969 | 1.28k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
2970 | | |
2971 | 1.28k | if (NULL == props.functions.str_negotiate_interface && props.functions.str_gdpa && |
2972 | 0 | loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) { |
2973 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
2974 | 0 | "Layer \"%s\" using deprecated \'vkGetDeviceProcAddr\' tag which was deprecated starting with JSON " |
2975 | 0 | "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for " |
2976 | 0 | "compatibility reasons it may be desirable to continue using the deprecated tag.", |
2977 | 0 | props.info.layerName); |
2978 | 0 | } |
2979 | 1.28k | } |
2980 | | |
2981 | | // instance_extensions |
2982 | | // array of { |
2983 | | // name |
2984 | | // spec_version |
2985 | | // } |
2986 | | |
2987 | 21.1k | cJSON *instance_extensions = loader_cJSON_GetObjectItem(layer_node, "instance_extensions"); |
2988 | 21.1k | if (instance_extensions != NULL && instance_extensions->type == cJSON_Array) { |
2989 | 1.40k | cJSON *ext_item = NULL; |
2990 | 37.3k | cJSON_ArrayForEach(ext_item, instance_extensions) { |
2991 | 37.3k | if (ext_item->type != cJSON_Object) { |
2992 | 1.01k | continue; |
2993 | 1.01k | } |
2994 | | |
2995 | 36.2k | VkExtensionProperties ext_prop = {0}; |
2996 | 36.2k | result = loader_parse_json_string_to_existing_str(ext_item, "name", VK_MAX_EXTENSION_NAME_SIZE, ext_prop.extensionName); |
2997 | 36.2k | if (result == VK_ERROR_INITIALIZATION_FAILED) { |
2998 | 3.66k | continue; |
2999 | 3.66k | } |
3000 | 32.6k | char *spec_version = NULL; |
3001 | 32.6k | result = loader_parse_json_string(ext_item, "spec_version", &spec_version); |
3002 | 32.6k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
3003 | 32.6k | if (NULL != spec_version) { |
3004 | 1 | ext_prop.specVersion = atoi(spec_version); |
3005 | 1 | } |
3006 | 32.6k | loader_instance_heap_free(inst, spec_version); |
3007 | 32.6k | bool ext_unsupported = wsi_unsupported_instance_extension(&ext_prop); |
3008 | 32.6k | if (!ext_unsupported) { |
3009 | 32.2k | loader_add_to_ext_list(inst, &props.instance_extension_list, 1, &ext_prop); |
3010 | 32.2k | } |
3011 | 32.6k | } |
3012 | 1.40k | } |
3013 | | |
3014 | | // device_extensions |
3015 | | // array of { |
3016 | | // name |
3017 | | // spec_version |
3018 | | // entrypoints |
3019 | | // } |
3020 | 21.1k | cJSON *device_extensions = loader_cJSON_GetObjectItem(layer_node, "device_extensions"); |
3021 | 21.1k | if (device_extensions != NULL && device_extensions->type == cJSON_Array) { |
3022 | 1.46k | cJSON *ext_item = NULL; |
3023 | 31.0k | cJSON_ArrayForEach(ext_item, device_extensions) { |
3024 | 31.0k | if (ext_item->type != cJSON_Object) { |
3025 | 410 | continue; |
3026 | 410 | } |
3027 | | |
3028 | 30.6k | VkExtensionProperties ext_prop = {0}; |
3029 | 30.6k | result = loader_parse_json_string_to_existing_str(ext_item, "name", VK_MAX_EXTENSION_NAME_SIZE, ext_prop.extensionName); |
3030 | 30.6k | if (result == VK_ERROR_INITIALIZATION_FAILED) { |
3031 | 4.34k | continue; |
3032 | 4.34k | } |
3033 | | |
3034 | 26.3k | char *spec_version = NULL; |
3035 | 26.3k | result = loader_parse_json_string(ext_item, "spec_version", &spec_version); |
3036 | 26.3k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
3037 | 26.3k | if (NULL != spec_version) { |
3038 | 64 | ext_prop.specVersion = atoi(spec_version); |
3039 | 64 | } |
3040 | 26.3k | loader_instance_heap_free(inst, spec_version); |
3041 | | |
3042 | 26.3k | cJSON *entrypoints = loader_cJSON_GetObjectItem(ext_item, "entrypoints"); |
3043 | 26.3k | if (entrypoints == NULL) { |
3044 | 26.2k | result = loader_add_to_dev_ext_list(inst, &props.device_extension_list, &ext_prop, NULL); |
3045 | 26.2k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
3046 | 26.2k | continue; |
3047 | 26.2k | } |
3048 | | |
3049 | 89 | struct loader_string_list entrys = {0}; |
3050 | 89 | result = loader_parse_json_array_of_strings(inst, ext_item, "entrypoints", &entrys); |
3051 | 89 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
3052 | 89 | result = loader_add_to_dev_ext_list(inst, &props.device_extension_list, &ext_prop, &entrys); |
3053 | 89 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
3054 | 89 | } |
3055 | 1.46k | } |
3056 | 21.1k | if (is_implicit) { |
3057 | 15.9k | cJSON *enable_environment = loader_cJSON_GetObjectItem(layer_node, "enable_environment"); |
3058 | | |
3059 | | // enable_environment is optional |
3060 | 15.9k | if (enable_environment && enable_environment->child && enable_environment->child->type == cJSON_String && |
3061 | 1.00k | enable_environment->child->string && enable_environment->child->valuestring) { |
3062 | 807 | result = loader_copy_to_new_str(inst, enable_environment->child->string, &(props.enable_env_var.name)); |
3063 | 807 | if (VK_SUCCESS != result) goto out; |
3064 | 807 | result = loader_copy_to_new_str(inst, enable_environment->child->valuestring, &(props.enable_env_var.value)); |
3065 | 807 | if (VK_SUCCESS != result) goto out; |
3066 | 807 | } |
3067 | 15.9k | } |
3068 | | |
3069 | | // Read in the pre-instance stuff |
3070 | 21.1k | cJSON *pre_instance = loader_cJSON_GetObjectItem(layer_node, "pre_instance_functions"); |
3071 | 21.1k | if (NULL != pre_instance) { |
3072 | | // Supported versions started in 1.1.2, so anything newer |
3073 | 1.42k | if (!loader_check_version_meets_required(loader_combine_version(1, 1, 2), version)) { |
3074 | 718 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
3075 | 718 | "Found pre_instance_functions section in layer from \"%s\". This section is only valid in manifest version " |
3076 | 718 | "1.1.2 or later. The section will be ignored", |
3077 | 718 | filename); |
3078 | 718 | } else if (!is_implicit) { |
3079 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
3080 | 0 | "Found pre_instance_functions section in explicit layer from \"%s\". This section is only valid in implicit " |
3081 | 0 | "layers. The section will be ignored", |
3082 | 0 | filename); |
3083 | 708 | } else { |
3084 | 708 | result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceExtensionProperties", |
3085 | 708 | &props.pre_instance_functions.enumerate_instance_extension_properties); |
3086 | 708 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
3087 | | |
3088 | 708 | result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceLayerProperties", |
3089 | 708 | &props.pre_instance_functions.enumerate_instance_layer_properties); |
3090 | 708 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
3091 | | |
3092 | 708 | result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceVersion", |
3093 | 708 | &props.pre_instance_functions.enumerate_instance_version); |
3094 | 708 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
3095 | 708 | } |
3096 | 1.42k | } |
3097 | | |
3098 | 21.1k | if (loader_cJSON_GetObjectItem(layer_node, "app_keys")) { |
3099 | 2.11k | if (!props.is_override) { |
3100 | 1.46k | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
3101 | 1.46k | "Layer %s contains app_keys, but any app_keys can only be provided by the override meta layer. " |
3102 | 1.46k | "These will be ignored.", |
3103 | 1.46k | props.info.layerName); |
3104 | 1.46k | } |
3105 | | |
3106 | 2.11k | result = loader_parse_json_array_of_strings(inst, layer_node, "app_keys", &props.app_key_paths); |
3107 | 2.11k | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out; |
3108 | 2.11k | } |
3109 | | |
3110 | 21.1k | char *library_arch = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(layer_node, "library_arch")); |
3111 | 21.1k | if (NULL != library_arch) { |
3112 | 612 | if ((strncmp(library_arch, "32", 2) == 0 && sizeof(void *) != 4) || |
3113 | 342 | (strncmp(library_arch, "64", 2) == 0 && sizeof(void *) != 8)) { |
3114 | 342 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
3115 | 342 | "The library architecture in layer %s doesn't match the current running architecture, skipping this layer", |
3116 | 342 | filename); |
3117 | 342 | result = VK_ERROR_INITIALIZATION_FAILED; |
3118 | 342 | goto out; |
3119 | 342 | } |
3120 | 612 | } |
3121 | | |
3122 | 20.7k | result = VK_SUCCESS; |
3123 | | |
3124 | 997k | out: |
3125 | | // Try to append the layer property |
3126 | 997k | if (VK_SUCCESS == result) { |
3127 | 929k | result = loader_append_layer_property(inst, layer_instance_list, &props); |
3128 | 929k | } |
3129 | | // If appending fails - free all the memory allocated in it |
3130 | 997k | if (VK_SUCCESS != result) { |
3131 | 67.7k | loader_free_layer_properties(inst, &props); |
3132 | 67.7k | } |
3133 | 997k | return result; |
3134 | 20.7k | } |
3135 | | |
3136 | 7.67k | bool is_valid_layer_json_version(const loader_api_version *layer_json) { |
3137 | | // Supported versions are: 1.0.0, 1.0.1, 1.1.0 - 1.1.2, and 1.2.0 - 1.2.1. |
3138 | 7.67k | if ((layer_json->major == 1 && layer_json->minor == 2 && layer_json->patch < 2) || |
3139 | 7.63k | (layer_json->major == 1 && layer_json->minor == 1 && layer_json->patch < 3) || |
3140 | 6.75k | (layer_json->major == 1 && layer_json->minor == 0 && layer_json->patch < 2)) { |
3141 | 1.49k | return true; |
3142 | 1.49k | } |
3143 | 6.18k | return false; |
3144 | 7.67k | } |
3145 | | |
3146 | | // Given a cJSON struct (json) of the top level JSON object from layer manifest |
3147 | | // file, add entry to the layer_list. Fill out the layer_properties in this list |
3148 | | // entry from the input cJSON object. |
3149 | | // |
3150 | | // \returns |
3151 | | // void |
3152 | | // layer_list has a new entry and initialized accordingly. |
3153 | | // If the json input object does not have all the required fields no entry |
3154 | | // is added to the list. |
3155 | | VkResult loader_add_layer_properties(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list, cJSON *json, |
3156 | 7.95k | bool is_implicit, char *filename) { |
3157 | | // The following Fields in layer manifest file that are required: |
3158 | | // - "file_format_version" |
3159 | | // - If more than one "layer" object are used, then the "layers" array is |
3160 | | // required |
3161 | 7.95k | VkResult result = VK_ERROR_INITIALIZATION_FAILED; |
3162 | | // Make sure sure the top level json value is an object |
3163 | 7.95k | if (!json || json->type != cJSON_Object) { |
3164 | 259 | goto out; |
3165 | 259 | } |
3166 | 7.69k | char *file_vers = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(json, "file_format_version")); |
3167 | 7.69k | if (NULL == file_vers) { |
3168 | 15 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
3169 | 15 | "loader_add_layer_properties: Manifest %s missing required field file_format_version", filename); |
3170 | 15 | goto out; |
3171 | 15 | } |
3172 | | |
3173 | 7.67k | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, "Found manifest file %s (file version %s)", filename, file_vers); |
3174 | | // Get the major/minor/and patch as integers for easier comparison |
3175 | 7.67k | loader_api_version json_version = loader_make_full_version(loader_parse_version_string(file_vers)); |
3176 | | |
3177 | 7.67k | if (!is_valid_layer_json_version(&json_version)) { |
3178 | 6.18k | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
3179 | 6.18k | "loader_add_layer_properties: %s has unknown layer manifest file version %d.%d.%d. May cause errors.", filename, |
3180 | 6.18k | json_version.major, json_version.minor, json_version.patch); |
3181 | 6.18k | } |
3182 | | |
3183 | | // If "layers" is present, read in the array of layer objects |
3184 | 7.67k | cJSON *layers_node = loader_cJSON_GetObjectItem(json, "layers"); |
3185 | 7.67k | if (layers_node != NULL) { |
3186 | | // Supported versions started in 1.0.1, so anything newer |
3187 | 6.60k | if (!loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) { |
3188 | 4.37k | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
3189 | 4.37k | "loader_add_layer_properties: \'layers\' tag not supported until file version 1.0.1, but %s is reporting " |
3190 | 4.37k | "version %s", |
3191 | 4.37k | filename, file_vers); |
3192 | 4.37k | } |
3193 | 6.60k | cJSON *layer_node = NULL; |
3194 | 996k | cJSON_ArrayForEach(layer_node, layers_node) { |
3195 | 996k | if (layer_node->type != cJSON_Object) { |
3196 | 289 | loader_log( |
3197 | 289 | inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
3198 | 289 | "loader_add_layer_properties: Array element in \"layers\" field in manifest JSON file %s is not an object. " |
3199 | 289 | "Skipping this file", |
3200 | 289 | filename); |
3201 | 289 | goto out; |
3202 | 289 | } |
3203 | 995k | result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, is_implicit, filename); |
3204 | 995k | } |
3205 | 6.60k | } else { |
3206 | | // Otherwise, try to read in individual layers |
3207 | 1.06k | cJSON *layer_node = loader_cJSON_GetObjectItem(json, "layer"); |
3208 | 1.06k | if (layer_node == NULL) { |
3209 | | // Don't warn if this happens to be an ICD manifest |
3210 | 1.00k | if (loader_cJSON_GetObjectItem(json, "ICD") == NULL) { |
3211 | 1.00k | loader_log( |
3212 | 1.00k | inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
3213 | 1.00k | "loader_add_layer_properties: Can not find 'layer' object in manifest JSON file %s. Skipping this file.", |
3214 | 1.00k | filename); |
3215 | 1.00k | } |
3216 | 1.00k | goto out; |
3217 | 1.00k | } |
3218 | | // Loop through all "layer" objects in the file to get a count of them |
3219 | | // first. |
3220 | 66 | uint16_t layer_count = 0; |
3221 | 66 | cJSON *tempNode = layer_node; |
3222 | 1.36k | do { |
3223 | 1.36k | tempNode = tempNode->next; |
3224 | 1.36k | layer_count++; |
3225 | 1.36k | } while (tempNode != NULL); |
3226 | | |
3227 | | // Throw a warning if we encounter multiple "layer" objects in file |
3228 | | // versions newer than 1.0.0. Having multiple objects with the same |
3229 | | // name at the same level is actually a JSON standard violation. |
3230 | 66 | if (layer_count > 1 && loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) { |
3231 | 14 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
3232 | 14 | "loader_add_layer_properties: Multiple 'layer' nodes are deprecated starting in file version \"1.0.1\". " |
3233 | 14 | "Please use 'layers' : [] array instead in %s.", |
3234 | 14 | filename); |
3235 | 52 | } else { |
3236 | 1.33k | do { |
3237 | 1.33k | result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, is_implicit, filename); |
3238 | 1.33k | layer_node = layer_node->next; |
3239 | 1.33k | } while (layer_node != NULL); |
3240 | 52 | } |
3241 | 66 | } |
3242 | | |
3243 | 7.95k | out: |
3244 | | |
3245 | 7.95k | return result; |
3246 | 7.67k | } |
3247 | | |
3248 | 145k | size_t determine_data_file_path_size(const char *cur_path, size_t relative_path_size) { |
3249 | 145k | size_t path_size = 0; |
3250 | | |
3251 | 145k | if (NULL != cur_path) { |
3252 | | // For each folder in cur_path, (detected by finding additional |
3253 | | // path separators in the string) we need to add the relative path on |
3254 | | // the end. Plus, leave an additional two slots on the end to add an |
3255 | | // additional directory slash and path separator if needed |
3256 | 145k | path_size += strlen(cur_path) + relative_path_size + 2; |
3257 | 54.7M | for (const char *x = cur_path; *x; ++x) { |
3258 | 54.6M | if (*x == PATH_SEPARATOR) { |
3259 | 16.4k | path_size += relative_path_size + 2; |
3260 | 16.4k | } |
3261 | 54.6M | } |
3262 | 145k | } |
3263 | | |
3264 | 145k | return path_size; |
3265 | 145k | } |
3266 | | |
3267 | 145k | void copy_data_file_info(const char *cur_path, const char *relative_path, size_t relative_path_size, char **output_path) { |
3268 | 145k | if (NULL != cur_path) { |
3269 | 145k | uint32_t start = 0; |
3270 | 145k | uint32_t stop = 0; |
3271 | 145k | char *cur_write = *output_path; |
3272 | | |
3273 | 300k | while (cur_path[start] != '\0') { |
3274 | 171k | while (cur_path[start] == PATH_SEPARATOR && cur_path[start] != '\0') { |
3275 | 16.4k | start++; |
3276 | 16.4k | } |
3277 | 155k | stop = start; |
3278 | 54.7M | while (cur_path[stop] != PATH_SEPARATOR && cur_path[stop] != '\0') { |
3279 | 54.6M | stop++; |
3280 | 54.6M | } |
3281 | 155k | const size_t s = stop - start; |
3282 | 155k | if (s) { |
3283 | 155k | memcpy(cur_write, &cur_path[start], s); |
3284 | 155k | cur_write += s; |
3285 | | |
3286 | | // If this is a specific JSON file, just add it and don't add any |
3287 | | // relative path or directory symbol to it. |
3288 | 155k | if (!is_json(cur_write - 5, s)) { |
3289 | | // Add the relative directory if present. |
3290 | 154k | if (relative_path_size > 0) { |
3291 | | // If last symbol written was not a directory symbol, add it. |
3292 | 109k | if (*(cur_write - 1) != DIRECTORY_SYMBOL) { |
3293 | 109k | *cur_write++ = DIRECTORY_SYMBOL; |
3294 | 109k | } |
3295 | 109k | memcpy(cur_write, relative_path, relative_path_size); |
3296 | 109k | cur_write += relative_path_size; |
3297 | 109k | } |
3298 | 154k | } |
3299 | | |
3300 | 155k | *cur_write++ = PATH_SEPARATOR; |
3301 | 155k | start = stop; |
3302 | 155k | } |
3303 | 155k | } |
3304 | 145k | *output_path = cur_write; |
3305 | 145k | } |
3306 | 145k | } |
3307 | | |
3308 | | // If the file found is a manifest file name, add it to the end of out_files manifest list. |
3309 | 36.4k | VkResult add_if_manifest_file(const struct loader_instance *inst, const char *file_name, struct loader_string_list *out_files) { |
3310 | 36.4k | assert(NULL != file_name && "add_if_manifest_file: Received NULL pointer for file_name"); |
3311 | 36.4k | assert(NULL != out_files && "add_if_manifest_file: Received NULL pointer for out_files"); |
3312 | | |
3313 | | // Look for files ending with ".json" suffix |
3314 | 36.4k | size_t name_len = strlen(file_name); |
3315 | 36.4k | const char *name_suffix = file_name + name_len - 5; |
3316 | 36.4k | if (!is_json(name_suffix, name_len)) { |
3317 | | // Use incomplete to indicate invalid name, but to keep going. |
3318 | 26.3k | return VK_INCOMPLETE; |
3319 | 26.3k | } |
3320 | | |
3321 | 10.1k | return copy_str_to_string_list(inst, out_files, file_name, name_len); |
3322 | 36.4k | } |
3323 | | |
3324 | | // If the file found is a manifest file name, add it to the start of the out_files manifest list. |
3325 | 0 | VkResult prepend_if_manifest_file(const struct loader_instance *inst, const char *file_name, struct loader_string_list *out_files) { |
3326 | 0 | assert(NULL != file_name && "prepend_if_manifest_file: Received NULL pointer for file_name"); |
3327 | 0 | assert(NULL != out_files && "prepend_if_manifest_file: Received NULL pointer for out_files"); |
3328 | | |
3329 | | // Look for files ending with ".json" suffix |
3330 | 0 | size_t name_len = strlen(file_name); |
3331 | 0 | const char *name_suffix = file_name + name_len - 5; |
3332 | 0 | if (!is_json(name_suffix, name_len)) { |
3333 | | // Use incomplete to indicate invalid name, but to keep going. |
3334 | 0 | return VK_INCOMPLETE; |
3335 | 0 | } |
3336 | | |
3337 | 0 | return copy_str_to_start_of_string_list(inst, out_files, file_name, name_len); |
3338 | 0 | } |
3339 | | |
3340 | | // Add any files found in the search_path. If any path in the search path points to a specific JSON, attempt to |
3341 | | // only open that one JSON. Otherwise, if the path is a folder, search the folder for JSON files. |
3342 | | VkResult add_data_files(const struct loader_instance *inst, char *search_path, struct loader_string_list *out_files, |
3343 | 15.7k | bool use_first_found_manifest) { |
3344 | 15.7k | VkResult vk_result = VK_SUCCESS; |
3345 | 15.7k | char full_path[2048]; |
3346 | 15.7k | #if !defined(_WIN32) |
3347 | 15.7k | char temp_path[2048]; |
3348 | 15.7k | #endif |
3349 | | |
3350 | | // Now, parse the paths |
3351 | 15.7k | char *next_file = search_path; |
3352 | 127k | while (NULL != next_file && *next_file != '\0') { |
3353 | 111k | char *name = NULL; |
3354 | 111k | char *cur_file = next_file; |
3355 | 111k | next_file = loader_get_next_path(cur_file); |
3356 | | |
3357 | | // Is this a JSON file, then try to open it. |
3358 | 111k | size_t len = strlen(cur_file); |
3359 | 111k | if (is_json(cur_file + len - 5, len)) { |
3360 | | #if defined(_WIN32) |
3361 | | name = cur_file; |
3362 | | #elif COMMON_UNIX_PLATFORMS |
3363 | | // Only Linux has relative paths, make a copy of location so it isn't modified |
3364 | 435 | size_t str_len; |
3365 | 435 | if (NULL != next_file) { |
3366 | 435 | str_len = next_file - cur_file + 1; |
3367 | 435 | } else { |
3368 | 0 | str_len = strlen(cur_file) + 1; |
3369 | 0 | } |
3370 | 435 | if (str_len > sizeof(temp_path)) { |
3371 | 66 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "add_data_files: Path to %s too long", cur_file); |
3372 | 66 | continue; |
3373 | 66 | } |
3374 | 369 | strncpy(temp_path, cur_file, str_len); |
3375 | 369 | name = temp_path; |
3376 | | #else |
3377 | | #warning add_data_files must define relative path copy for this platform |
3378 | | #endif |
3379 | 369 | loader_get_fullpath(cur_file, name, sizeof(full_path), full_path); |
3380 | 369 | name = full_path; |
3381 | | |
3382 | 369 | VkResult local_res; |
3383 | 369 | local_res = add_if_manifest_file(inst, name, out_files); |
3384 | | |
3385 | | // Incomplete means this was not a valid data file. |
3386 | 369 | if (local_res == VK_INCOMPLETE) { |
3387 | 0 | continue; |
3388 | 369 | } else if (local_res != VK_SUCCESS) { |
3389 | 0 | vk_result = local_res; |
3390 | 0 | break; |
3391 | 0 | } |
3392 | 110k | } else { // Otherwise, treat it as a directory |
3393 | 110k | DIR *dir_stream = loader_opendir(inst, cur_file); |
3394 | 110k | if (NULL == dir_stream) { |
3395 | 102k | continue; |
3396 | 102k | } |
3397 | 44.5k | while (1) { |
3398 | 44.5k | errno = 0; |
3399 | 44.5k | struct dirent *dir_entry = readdir(dir_stream); |
3400 | 44.5k | #if !defined(WIN32) // Windows doesn't use readdir, don't check errors on functions which aren't called |
3401 | 44.5k | if (errno != 0) { |
3402 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "readdir failed with %d: %s", errno, strerror(errno)); |
3403 | 0 | break; |
3404 | 0 | } |
3405 | 44.5k | #endif |
3406 | 44.5k | if (NULL == dir_entry) { |
3407 | 8.49k | break; |
3408 | 8.49k | } |
3409 | | |
3410 | 36.0k | name = &(dir_entry->d_name[0]); |
3411 | 36.0k | loader_get_fullpath(name, cur_file, sizeof(full_path), full_path); |
3412 | 36.0k | name = full_path; |
3413 | | |
3414 | 36.0k | VkResult local_res; |
3415 | 36.0k | local_res = add_if_manifest_file(inst, name, out_files); |
3416 | | |
3417 | | // Incomplete means this was not a valid data file. |
3418 | 36.0k | if (local_res == VK_INCOMPLETE) { |
3419 | 26.3k | continue; |
3420 | 26.3k | } else if (local_res != VK_SUCCESS) { |
3421 | 0 | vk_result = local_res; |
3422 | 0 | break; |
3423 | 0 | } |
3424 | 36.0k | } |
3425 | 8.49k | loader_closedir(inst, dir_stream); |
3426 | 8.49k | if (vk_result != VK_SUCCESS) { |
3427 | 0 | goto out; |
3428 | 0 | } |
3429 | 8.49k | } |
3430 | 8.86k | if (use_first_found_manifest && out_files->count > 0) { |
3431 | 0 | break; |
3432 | 0 | } |
3433 | 8.86k | } |
3434 | | |
3435 | 15.7k | out: |
3436 | | |
3437 | 15.7k | return vk_result; |
3438 | 15.7k | } |
3439 | | |
3440 | | // Look for data files in the provided paths, but first check the environment override to determine if we should use that |
3441 | | // instead. |
3442 | | VkResult read_data_files_in_search_paths(const struct loader_instance *inst, enum loader_data_files_type manifest_type, |
3443 | 15.7k | const char *path_override, bool *override_active, struct loader_string_list *out_files) { |
3444 | 15.7k | VkResult vk_result = VK_SUCCESS; |
3445 | 15.7k | char *override_env = NULL; |
3446 | 15.7k | const char *override_path = NULL; |
3447 | 15.7k | char *additional_env = NULL; |
3448 | 15.7k | size_t search_path_size = 0; |
3449 | 15.7k | char *search_path = NULL; |
3450 | 15.7k | char *cur_path_ptr = NULL; |
3451 | 15.7k | bool use_first_found_manifest = false; |
3452 | 15.7k | #if COMMON_UNIX_PLATFORMS |
3453 | 15.7k | const char *relative_location = NULL; // Only used on unix platforms |
3454 | 15.7k | size_t rel_size = 0; // unused in windows, dont declare so no compiler warnings are generated |
3455 | 15.7k | #endif |
3456 | | |
3457 | | #if defined(_WIN32) |
3458 | | char *package_path = NULL; |
3459 | | #elif COMMON_UNIX_PLATFORMS |
3460 | | // Determine how much space is needed to generate the full search path |
3461 | | // for the current manifest files. |
3462 | 15.7k | char *xdg_config_home = loader_secure_getenv("XDG_CONFIG_HOME", inst); |
3463 | 15.7k | char *xdg_config_dirs = loader_secure_getenv("XDG_CONFIG_DIRS", inst); |
3464 | | |
3465 | 15.7k | #if !defined(__Fuchsia__) && !defined(__QNX__) |
3466 | 15.7k | if (NULL == xdg_config_dirs || '\0' == xdg_config_dirs[0]) { |
3467 | 15.7k | xdg_config_dirs = FALLBACK_CONFIG_DIRS; |
3468 | 15.7k | } |
3469 | 15.7k | #endif |
3470 | | |
3471 | 15.7k | char *xdg_data_home = loader_secure_getenv("XDG_DATA_HOME", inst); |
3472 | 15.7k | char *xdg_data_dirs = loader_secure_getenv("XDG_DATA_DIRS", inst); |
3473 | | |
3474 | 15.7k | #if !defined(__Fuchsia__) && !defined(__QNX__) |
3475 | 15.7k | if (NULL == xdg_data_dirs || '\0' == xdg_data_dirs[0]) { |
3476 | 15.7k | xdg_data_dirs = FALLBACK_DATA_DIRS; |
3477 | 15.7k | } |
3478 | 15.7k | #endif |
3479 | | |
3480 | 15.7k | char *home = NULL; |
3481 | 15.7k | char *default_data_home = NULL; |
3482 | 15.7k | char *default_config_home = NULL; |
3483 | 15.7k | char *home_data_dir = NULL; |
3484 | 15.7k | char *home_config_dir = NULL; |
3485 | | |
3486 | | // Only use HOME if XDG_DATA_HOME is not present on the system |
3487 | 15.7k | home = loader_secure_getenv("HOME", inst); |
3488 | 15.7k | if (home != NULL) { |
3489 | 15.7k | if (NULL == xdg_config_home || '\0' == xdg_config_home[0]) { |
3490 | 15.7k | const char config_suffix[] = "/.config"; |
3491 | 15.7k | size_t default_config_home_len = strlen(home) + sizeof(config_suffix) + 1; |
3492 | 15.7k | default_config_home = loader_instance_heap_calloc(inst, default_config_home_len, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
3493 | 15.7k | if (default_config_home == NULL) { |
3494 | 0 | vk_result = VK_ERROR_OUT_OF_HOST_MEMORY; |
3495 | 0 | goto out; |
3496 | 0 | } |
3497 | 15.7k | strncpy(default_config_home, home, default_config_home_len); |
3498 | 15.7k | strncat(default_config_home, config_suffix, default_config_home_len); |
3499 | 15.7k | } |
3500 | 15.7k | if (NULL == xdg_data_home || '\0' == xdg_data_home[0]) { |
3501 | 15.7k | const char data_suffix[] = "/.local/share"; |
3502 | 15.7k | size_t default_data_home_len = strlen(home) + sizeof(data_suffix) + 1; |
3503 | 15.7k | default_data_home = loader_instance_heap_calloc(inst, default_data_home_len, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
3504 | 15.7k | if (default_data_home == NULL) { |
3505 | 0 | vk_result = VK_ERROR_OUT_OF_HOST_MEMORY; |
3506 | 0 | goto out; |
3507 | 0 | } |
3508 | 15.7k | strncpy(default_data_home, home, default_data_home_len); |
3509 | 15.7k | strncat(default_data_home, data_suffix, default_data_home_len); |
3510 | 15.7k | } |
3511 | 15.7k | } |
3512 | | |
3513 | 15.7k | if (NULL != default_config_home) { |
3514 | 15.7k | home_config_dir = default_config_home; |
3515 | 15.7k | } else { |
3516 | 0 | home_config_dir = xdg_config_home; |
3517 | 0 | } |
3518 | 15.7k | if (NULL != default_data_home) { |
3519 | 15.7k | home_data_dir = default_data_home; |
3520 | 15.7k | } else { |
3521 | 0 | home_data_dir = xdg_data_home; |
3522 | 0 | } |
3523 | | #else |
3524 | | #warning read_data_files_in_search_paths unsupported platform |
3525 | | #endif |
3526 | | |
3527 | 15.7k | switch (manifest_type) { |
3528 | 1.13k | case LOADER_DATA_FILE_MANIFEST_DRIVER: |
3529 | 1.13k | if (loader_settings_should_use_driver_environment_variables(inst)) { |
3530 | 1.13k | override_env = loader_secure_getenv(VK_DRIVER_FILES_ENV_VAR, inst); |
3531 | 1.13k | if (NULL == override_env) { |
3532 | | // Not there, so fall back to the old name |
3533 | 1.13k | override_env = loader_secure_getenv(VK_ICD_FILENAMES_ENV_VAR, inst); |
3534 | 1.13k | } |
3535 | 1.13k | additional_env = loader_secure_getenv(VK_ADDITIONAL_DRIVER_FILES_ENV_VAR, inst); |
3536 | 1.13k | } |
3537 | 1.13k | #if COMMON_UNIX_PLATFORMS |
3538 | 1.13k | relative_location = VK_DRIVERS_INFO_RELATIVE_DIR; |
3539 | 1.13k | #endif |
3540 | | #if defined(_WIN32) |
3541 | | package_path = windows_get_app_package_manifest_path(inst); |
3542 | | #endif |
3543 | 1.13k | break; |
3544 | 7.30k | case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER: |
3545 | 7.30k | override_env = loader_secure_getenv(VK_IMPLICIT_LAYER_PATH_ENV_VAR, inst); |
3546 | 7.30k | additional_env = loader_secure_getenv(VK_ADDITIONAL_IMPLICIT_LAYER_PATH_ENV_VAR, inst); |
3547 | 7.30k | #if COMMON_UNIX_PLATFORMS |
3548 | 7.30k | relative_location = VK_ILAYERS_INFO_RELATIVE_DIR; |
3549 | 7.30k | #endif |
3550 | | #if defined(_WIN32) |
3551 | | package_path = windows_get_app_package_manifest_path(inst); |
3552 | | #endif |
3553 | 7.30k | break; |
3554 | 7.30k | case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER: |
3555 | 7.30k | override_env = loader_secure_getenv(VK_EXPLICIT_LAYER_PATH_ENV_VAR, inst); |
3556 | 7.30k | additional_env = loader_secure_getenv(VK_ADDITIONAL_EXPLICIT_LAYER_PATH_ENV_VAR, inst); |
3557 | 7.30k | #if COMMON_UNIX_PLATFORMS |
3558 | 7.30k | relative_location = VK_ELAYERS_INFO_RELATIVE_DIR; |
3559 | 7.30k | #endif |
3560 | 7.30k | break; |
3561 | 0 | default: |
3562 | 0 | assert(false && "Shouldn't get here!"); |
3563 | 0 | break; |
3564 | 15.7k | } |
3565 | | |
3566 | | // Log a message when VK_LAYER_PATH is set but the override layer paths take priority |
3567 | 15.7k | if (manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER && NULL != override_env && NULL != path_override) { |
3568 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
3569 | 0 | "Ignoring VK_LAYER_PATH. The Override layer is active and has override paths set, which takes priority. " |
3570 | 0 | "VK_LAYER_PATH is set to %s", |
3571 | 0 | override_env); |
3572 | 0 | } |
3573 | | |
3574 | 15.7k | if (path_override != NULL) { |
3575 | 75 | override_path = path_override; |
3576 | 15.6k | } else if (override_env != NULL) { |
3577 | 0 | override_path = override_env; |
3578 | 0 | } |
3579 | | |
3580 | | // Add two by default for NULL terminator and one path separator on end (just in case) |
3581 | 15.7k | search_path_size = 2; |
3582 | | |
3583 | | // If there's an override, use that (and the local folder if required) and nothing else |
3584 | 15.7k | if (NULL != override_path) { |
3585 | | // Local folder and null terminator |
3586 | 75 | search_path_size += strlen(override_path) + 2; |
3587 | 15.6k | } else { |
3588 | | // Add the size of any additional search paths defined in the additive environment variable |
3589 | 15.6k | if (NULL != additional_env) { |
3590 | 0 | search_path_size += determine_data_file_path_size(additional_env, 0) + 2; |
3591 | | #if defined(_WIN32) |
3592 | | } |
3593 | | if (NULL != package_path) { |
3594 | | search_path_size += determine_data_file_path_size(package_path, 0) + 2; |
3595 | | } |
3596 | | if (search_path_size == 2) { |
3597 | | goto out; |
3598 | | } |
3599 | | #elif COMMON_UNIX_PLATFORMS |
3600 | | } |
3601 | | |
3602 | | // Add the general search folders (with the appropriate relative folder added) |
3603 | 15.6k | rel_size = strlen(relative_location); |
3604 | 15.6k | if (rel_size > 0) { |
3605 | | #if defined(__APPLE__) |
3606 | | search_path_size += MAXPATHLEN; |
3607 | | #endif |
3608 | | // Only add the home folders if defined |
3609 | 15.6k | if (NULL != home_config_dir) { |
3610 | 15.6k | search_path_size += determine_data_file_path_size(home_config_dir, rel_size); |
3611 | 15.6k | } |
3612 | 15.6k | search_path_size += determine_data_file_path_size(xdg_config_dirs, rel_size); |
3613 | 15.6k | search_path_size += determine_data_file_path_size(SYSCONFDIR, rel_size); |
3614 | 15.6k | #if defined(EXTRASYSCONFDIR) |
3615 | 15.6k | search_path_size += determine_data_file_path_size(EXTRASYSCONFDIR, rel_size); |
3616 | 15.6k | #endif |
3617 | | // Only add the home folders if defined |
3618 | 15.6k | if (NULL != home_data_dir) { |
3619 | 15.6k | search_path_size += determine_data_file_path_size(home_data_dir, rel_size); |
3620 | 15.6k | } |
3621 | 15.6k | search_path_size += determine_data_file_path_size(xdg_data_dirs, rel_size); |
3622 | 15.6k | } |
3623 | | #else |
3624 | | #warning read_data_files_in_search_paths unsupported platform |
3625 | | #endif |
3626 | 15.6k | } |
3627 | | |
3628 | | // Allocate the required space |
3629 | 15.7k | search_path = loader_instance_heap_calloc(inst, search_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
3630 | 15.7k | if (NULL == search_path) { |
3631 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
3632 | 0 | "read_data_files_in_search_paths: Failed to allocate space for search path of length %d", |
3633 | 0 | (uint32_t)search_path_size); |
3634 | 0 | vk_result = VK_ERROR_OUT_OF_HOST_MEMORY; |
3635 | 0 | goto out; |
3636 | 0 | } |
3637 | | |
3638 | 15.7k | cur_path_ptr = search_path; |
3639 | | |
3640 | | // Add the remaining paths to the list |
3641 | 15.7k | if (NULL != override_path) { |
3642 | 75 | size_t override_path_len = strlen(override_path); |
3643 | 75 | loader_strncpy(cur_path_ptr, search_path_size, override_path, override_path_len); |
3644 | 75 | cur_path_ptr += override_path_len; |
3645 | 15.6k | } else { |
3646 | | // Add any additional search paths defined in the additive environment variable |
3647 | 15.6k | if (NULL != additional_env) { |
3648 | 0 | copy_data_file_info(additional_env, NULL, 0, &cur_path_ptr); |
3649 | 0 | } |
3650 | | |
3651 | | #if defined(_WIN32) |
3652 | | if (NULL != package_path) { |
3653 | | copy_data_file_info(package_path, NULL, 0, &cur_path_ptr); |
3654 | | } |
3655 | | #elif COMMON_UNIX_PLATFORMS |
3656 | 15.6k | if (rel_size > 0) { |
3657 | | #if defined(__APPLE__) |
3658 | | // Add the bundle's Resources dir to the beginning of the search path. |
3659 | | // Looks for manifests in the bundle first, before any system directories. |
3660 | | // This also appears to work unmodified for iOS, it finds the app bundle on the devices |
3661 | | // file system. (RSW) |
3662 | | CFBundleRef main_bundle = CFBundleGetMainBundle(); |
3663 | | if (NULL != main_bundle) { |
3664 | | CFURLRef ref = CFBundleCopyResourcesDirectoryURL(main_bundle); |
3665 | | if (NULL != ref) { |
3666 | | if (CFURLGetFileSystemRepresentation(ref, TRUE, (UInt8 *)cur_path_ptr, search_path_size)) { |
3667 | | cur_path_ptr += strlen(cur_path_ptr); |
3668 | | *cur_path_ptr++ = DIRECTORY_SYMBOL; |
3669 | | memcpy(cur_path_ptr, relative_location, rel_size); |
3670 | | cur_path_ptr += rel_size; |
3671 | | *cur_path_ptr++ = PATH_SEPARATOR; |
3672 | | if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) { |
3673 | | use_first_found_manifest = true; |
3674 | | } |
3675 | | } |
3676 | | CFRelease(ref); |
3677 | | } |
3678 | | } |
3679 | | #endif // __APPLE__ |
3680 | | |
3681 | | // Only add the home folders if not NULL |
3682 | 15.6k | if (NULL != home_config_dir) { |
3683 | 15.6k | copy_data_file_info(home_config_dir, relative_location, rel_size, &cur_path_ptr); |
3684 | 15.6k | } |
3685 | 15.6k | copy_data_file_info(xdg_config_dirs, relative_location, rel_size, &cur_path_ptr); |
3686 | 15.6k | copy_data_file_info(SYSCONFDIR, relative_location, rel_size, &cur_path_ptr); |
3687 | 15.6k | #if defined(EXTRASYSCONFDIR) |
3688 | 15.6k | copy_data_file_info(EXTRASYSCONFDIR, relative_location, rel_size, &cur_path_ptr); |
3689 | 15.6k | #endif |
3690 | | |
3691 | | // Only add the home folders if not NULL |
3692 | 15.6k | if (NULL != home_data_dir) { |
3693 | 15.6k | copy_data_file_info(home_data_dir, relative_location, rel_size, &cur_path_ptr); |
3694 | 15.6k | } |
3695 | 15.6k | copy_data_file_info(xdg_data_dirs, relative_location, rel_size, &cur_path_ptr); |
3696 | 15.6k | } |
3697 | | |
3698 | | // Remove the last path separator |
3699 | 15.6k | --cur_path_ptr; |
3700 | | |
3701 | 15.6k | assert(cur_path_ptr - search_path < (ptrdiff_t)search_path_size); |
3702 | 15.6k | *cur_path_ptr = '\0'; |
3703 | | #else |
3704 | | #warning read_data_files_in_search_paths unsupported platform |
3705 | | #endif |
3706 | 15.6k | } |
3707 | | |
3708 | | // Remove duplicate paths, or it would result in duplicate extensions, duplicate devices, etc. |
3709 | | // This uses minimal memory, but is O(N^2) on the number of paths. Expect only a few paths. |
3710 | 15.7k | char path_sep_str[2] = {PATH_SEPARATOR, '\0'}; |
3711 | 15.7k | size_t search_path_updated_size = strlen(search_path); |
3712 | 127k | for (size_t first = 0; first < search_path_updated_size;) { |
3713 | | // If this is an empty path, erase it |
3714 | 111k | if (search_path[first] == PATH_SEPARATOR) { |
3715 | 0 | memmove(&search_path[first], &search_path[first + 1], search_path_updated_size - first + 1); |
3716 | 0 | search_path_updated_size -= 1; |
3717 | 0 | continue; |
3718 | 0 | } |
3719 | | |
3720 | 111k | size_t first_end = first + 1; |
3721 | 111k | first_end += strcspn(&search_path[first_end], path_sep_str); |
3722 | 1.24M | for (size_t second = first_end + 1; second < search_path_updated_size;) { |
3723 | 1.13M | size_t second_end = second + 1; |
3724 | 1.13M | second_end += strcspn(&search_path[second_end], path_sep_str); |
3725 | 1.13M | if (first_end - first == second_end - second && |
3726 | 74.8k | !strncmp(&search_path[first], &search_path[second], second_end - second)) { |
3727 | | // Found duplicate. Include PATH_SEPARATOR in second_end, then erase it from search_path. |
3728 | 43.9k | if (search_path[second_end] == PATH_SEPARATOR) { |
3729 | 43.9k | second_end++; |
3730 | 43.9k | } |
3731 | 43.9k | memmove(&search_path[second], &search_path[second_end], search_path_updated_size - second_end + 1); |
3732 | 43.9k | search_path_updated_size -= second_end - second; |
3733 | 1.08M | } else { |
3734 | 1.08M | second = second_end + 1; |
3735 | 1.08M | } |
3736 | 1.13M | } |
3737 | 111k | first = first_end + 1; |
3738 | 111k | } |
3739 | 15.7k | search_path_size = search_path_updated_size; |
3740 | | |
3741 | | // Print out the paths being searched if debugging is enabled |
3742 | 15.7k | uint32_t log_flags = 0; |
3743 | 15.7k | if (search_path_size > 0) { |
3744 | 15.7k | char *tmp_search_path = loader_instance_heap_alloc(inst, search_path_size + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
3745 | 15.7k | if (NULL != tmp_search_path) { |
3746 | 15.7k | loader_strncpy(tmp_search_path, search_path_size + 1, search_path, search_path_size); |
3747 | 15.7k | tmp_search_path[search_path_size] = '\0'; |
3748 | 15.7k | if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) { |
3749 | 1.13k | log_flags = VULKAN_LOADER_DRIVER_BIT; |
3750 | 1.13k | loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Searching for driver manifest files"); |
3751 | 14.6k | } else { |
3752 | 14.6k | log_flags = VULKAN_LOADER_LAYER_BIT; |
3753 | 14.6k | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "Searching for %s layer manifest files", |
3754 | 14.6k | manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER ? "explicit" : "implicit"); |
3755 | 14.6k | } |
3756 | 15.7k | loader_log(inst, log_flags, 0, " In following locations:"); |
3757 | 15.7k | char *cur_file; |
3758 | 15.7k | char *next_file = tmp_search_path; |
3759 | 127k | while (NULL != next_file && *next_file != '\0') { |
3760 | 111k | cur_file = next_file; |
3761 | 111k | next_file = loader_get_next_path(cur_file); |
3762 | 111k | loader_log(inst, log_flags, 0, " %s", cur_file); |
3763 | 111k | } |
3764 | 15.7k | loader_instance_heap_free(inst, tmp_search_path); |
3765 | 15.7k | } |
3766 | 15.7k | } |
3767 | | |
3768 | | // Now, parse the paths and add any manifest files found in them. |
3769 | 15.7k | vk_result = add_data_files(inst, search_path, out_files, use_first_found_manifest); |
3770 | | |
3771 | 15.7k | if (log_flags != 0 && out_files->count > 0) { |
3772 | 8.30k | loader_log(inst, log_flags, 0, " Found the following files:"); |
3773 | 18.4k | for (uint32_t cur_file = 0; cur_file < out_files->count; ++cur_file) { |
3774 | 10.1k | loader_log(inst, log_flags, 0, " %s", out_files->list[cur_file]); |
3775 | 10.1k | } |
3776 | 8.30k | } else { |
3777 | 7.44k | loader_log(inst, log_flags, 0, " Found no files"); |
3778 | 7.44k | } |
3779 | | |
3780 | 15.7k | if (NULL != override_path) { |
3781 | 75 | *override_active = true; |
3782 | 15.6k | } else { |
3783 | 15.6k | *override_active = false; |
3784 | 15.6k | } |
3785 | | |
3786 | 15.7k | out: |
3787 | | |
3788 | 15.7k | loader_free_getenv(additional_env, inst); |
3789 | 15.7k | loader_free_getenv(override_env, inst); |
3790 | | #if defined(_WIN32) |
3791 | | loader_instance_heap_free(inst, package_path); |
3792 | | #elif COMMON_UNIX_PLATFORMS |
3793 | | loader_free_getenv(xdg_config_home, inst); |
3794 | 15.7k | loader_free_getenv(xdg_config_dirs, inst); |
3795 | 15.7k | loader_free_getenv(xdg_data_home, inst); |
3796 | 15.7k | loader_free_getenv(xdg_data_dirs, inst); |
3797 | 15.7k | loader_free_getenv(xdg_data_home, inst); |
3798 | 15.7k | loader_free_getenv(home, inst); |
3799 | 15.7k | loader_instance_heap_free(inst, default_data_home); |
3800 | 15.7k | loader_instance_heap_free(inst, default_config_home); |
3801 | | #else |
3802 | | #warning read_data_files_in_search_paths unsupported platform |
3803 | | #endif |
3804 | | |
3805 | 15.7k | loader_instance_heap_free(inst, search_path); |
3806 | | |
3807 | 15.7k | return vk_result; |
3808 | 15.7k | } |
3809 | | |
3810 | | // Find the Vulkan library manifest files. |
3811 | | // |
3812 | | // This function scans the appropriate locations for a list of JSON manifest files based on the |
3813 | | // "manifest_type". The location is interpreted as Registry path on Windows and a directory path(s) |
3814 | | // on Linux. |
3815 | | // "home_location" is an additional directory in the users home directory to look at. It is |
3816 | | // expanded into the dir path $XDG_DATA_HOME/home_location or $HOME/.local/share/home_location |
3817 | | // depending on environment variables. This "home_location" is only used on Linux. |
3818 | | // |
3819 | | // \returns |
3820 | | // VKResult |
3821 | | // A string list of manifest files to be opened in out_files param. |
3822 | | // List has a pointer to string for each manifest filename. |
3823 | | // When done using the list in out_files, pointers should be freed. |
3824 | | // Location or override string lists can be either files or directories as |
3825 | | // follows: |
3826 | | // | location | override |
3827 | | // -------------------------------- |
3828 | | // Win ICD | files | files |
3829 | | // Win Layer | files | dirs |
3830 | | // Linux ICD | dirs | files |
3831 | | // Linux Layer| dirs | dirs |
3832 | | |
3833 | | VkResult loader_get_data_files(const struct loader_instance *inst, enum loader_data_files_type manifest_type, |
3834 | 15.7k | const char *path_override, struct loader_string_list *out_files) { |
3835 | 15.7k | VkResult res = VK_SUCCESS; |
3836 | 15.7k | bool override_active = false; |
3837 | | |
3838 | | // Free and init the out_files information so there's no false data left from uninitialized variables. |
3839 | 15.7k | free_string_list(inst, out_files); |
3840 | | |
3841 | 15.7k | res = read_data_files_in_search_paths(inst, manifest_type, path_override, &override_active, out_files); |
3842 | 15.7k | if (VK_SUCCESS != res) { |
3843 | 0 | goto out; |
3844 | 0 | } |
3845 | | |
3846 | | #if defined(_WIN32) |
3847 | | // Read the registry if the override wasn't active. |
3848 | | if (!override_active) { |
3849 | | bool warn_if_not_present = false; |
3850 | | char *registry_location = NULL; |
3851 | | |
3852 | | switch (manifest_type) { |
3853 | | default: |
3854 | | goto out; |
3855 | | case LOADER_DATA_FILE_MANIFEST_DRIVER: |
3856 | | warn_if_not_present = true; |
3857 | | registry_location = VK_DRIVERS_INFO_REGISTRY_LOC; |
3858 | | break; |
3859 | | case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER: |
3860 | | registry_location = VK_ILAYERS_INFO_REGISTRY_LOC; |
3861 | | break; |
3862 | | case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER: |
3863 | | warn_if_not_present = true; |
3864 | | registry_location = VK_ELAYERS_INFO_REGISTRY_LOC; |
3865 | | break; |
3866 | | } |
3867 | | VkResult tmp_res = |
3868 | | windows_read_data_files_in_registry(inst, manifest_type, warn_if_not_present, registry_location, out_files); |
3869 | | // Only return an error if there was an error this time, and no manifest files from before. |
3870 | | if (VK_SUCCESS != tmp_res && out_files->count == 0) { |
3871 | | res = tmp_res; |
3872 | | goto out; |
3873 | | } |
3874 | | } |
3875 | | #endif |
3876 | | |
3877 | 15.7k | out: |
3878 | | |
3879 | 15.7k | if (VK_SUCCESS != res) { |
3880 | 0 | free_string_list(inst, out_files); |
3881 | 0 | } |
3882 | | |
3883 | 15.7k | return res; |
3884 | 15.7k | } |
3885 | | |
3886 | | struct ICDManifestInfo { |
3887 | | char *full_library_path; |
3888 | | uint32_t version; |
3889 | | }; |
3890 | | |
3891 | | // Takes a json file, opens, reads, and parses an ICD Manifest out of it. |
3892 | | // Should only return VK_SUCCESS, VK_ERROR_INCOMPATIBLE_DRIVER, or VK_ERROR_OUT_OF_HOST_MEMORY |
3893 | | VkResult loader_parse_icd_manifest(const struct loader_instance *inst, char *file_str, struct ICDManifestInfo *icd, |
3894 | 1.13k | bool *skipped_portability_drivers) { |
3895 | 1.13k | VkResult res = VK_SUCCESS; |
3896 | 1.13k | cJSON *icd_manifest_json = NULL; |
3897 | | |
3898 | 1.13k | if (file_str == NULL) { |
3899 | 0 | goto out; |
3900 | 0 | } |
3901 | | |
3902 | 1.13k | res = loader_get_json(inst, file_str, &icd_manifest_json); |
3903 | 1.13k | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
3904 | 0 | goto out; |
3905 | 0 | } |
3906 | 1.13k | if (res != VK_SUCCESS || NULL == icd_manifest_json) { |
3907 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3908 | 0 | goto out; |
3909 | 0 | } |
3910 | | |
3911 | 1.13k | cJSON *file_format_version_json = loader_cJSON_GetObjectItem(icd_manifest_json, "file_format_version"); |
3912 | 1.13k | if (file_format_version_json == NULL) { |
3913 | 1 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3914 | 1 | "loader_parse_icd_manifest: ICD JSON %s does not have a \'file_format_version\' field. Skipping ICD JSON.", |
3915 | 1 | file_str); |
3916 | 1 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3917 | 1 | goto out; |
3918 | 1 | } |
3919 | | |
3920 | 1.12k | char *file_vers_str = loader_cJSON_GetStringValue(file_format_version_json); |
3921 | 1.12k | if (NULL == file_vers_str) { |
3922 | | // Only reason the print can fail is if there was an allocation issue |
3923 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3924 | 0 | "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'file_format_version\' field. Skipping ICD JSON", |
3925 | 0 | file_str); |
3926 | 0 | goto out; |
3927 | 0 | } |
3928 | 1.12k | loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Found ICD manifest file %s, version %s", file_str, file_vers_str); |
3929 | | |
3930 | | // Get the version of the driver manifest |
3931 | 1.12k | loader_api_version json_file_version = loader_make_full_version(loader_parse_version_string(file_vers_str)); |
3932 | | |
3933 | | // Loader only knows versions 1.0.0 and 1.0.1, anything above it is unknown |
3934 | 1.12k | if (loader_check_version_meets_required(loader_combine_version(1, 0, 2), json_file_version)) { |
3935 | 120 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3936 | 120 | "loader_parse_icd_manifest: %s has unknown icd manifest file version %d.%d.%d. May cause errors.", file_str, |
3937 | 120 | json_file_version.major, json_file_version.minor, json_file_version.patch); |
3938 | 120 | } |
3939 | | |
3940 | 1.12k | cJSON *itemICD = loader_cJSON_GetObjectItem(icd_manifest_json, "ICD"); |
3941 | 1.12k | if (itemICD == NULL) { |
3942 | | // Don't warn if this happens to be a layer manifest file |
3943 | 117 | if (loader_cJSON_GetObjectItem(icd_manifest_json, "layer") == NULL && |
3944 | 116 | loader_cJSON_GetObjectItem(icd_manifest_json, "layers") == NULL) { |
3945 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3946 | 0 | "loader_parse_icd_manifest: Can not find \'ICD\' object in ICD JSON file %s. Skipping ICD JSON", file_str); |
3947 | 0 | } |
3948 | 117 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3949 | 117 | goto out; |
3950 | 117 | } |
3951 | | |
3952 | 1.01k | cJSON *library_path_json = loader_cJSON_GetObjectItem(itemICD, "library_path"); |
3953 | 1.01k | if (library_path_json == NULL) { |
3954 | 18 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3955 | 18 | "loader_parse_icd_manifest: Failed to find \'library_path\' object in ICD JSON file %s. Skipping ICD JSON.", |
3956 | 18 | file_str); |
3957 | 18 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3958 | 18 | goto out; |
3959 | 18 | } |
3960 | 1.01k | bool out_of_memory = false; |
3961 | 994 | char *library_path = loader_cJSON_Print(library_path_json, &out_of_memory); |
3962 | 994 | if (out_of_memory) { |
3963 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3964 | 0 | "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'library_path\' field. Skipping ICD JSON.", file_str); |
3965 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
3966 | 0 | goto out; |
3967 | 994 | } else if (!library_path || strlen(library_path) == 0) { |
3968 | 1 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3969 | 1 | "loader_parse_icd_manifest: ICD JSON %s \'library_path\' field is empty. Skipping ICD JSON.", file_str); |
3970 | 1 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3971 | 1 | loader_instance_heap_free(inst, library_path); |
3972 | 1 | goto out; |
3973 | 1 | } |
3974 | | |
3975 | | // Print out the paths being searched if debugging is enabled |
3976 | 993 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0, "Searching for ICD drivers named %s", library_path); |
3977 | | // This function takes ownership of library_path - so we don't need to clean it up |
3978 | 993 | res = combine_manifest_directory_and_library_path(inst, library_path, file_str, &icd->full_library_path); |
3979 | 993 | if (VK_SUCCESS != res) { |
3980 | 0 | goto out; |
3981 | 0 | } |
3982 | | |
3983 | 993 | cJSON *api_version_json = loader_cJSON_GetObjectItem(itemICD, "api_version"); |
3984 | 993 | if (api_version_json == NULL) { |
3985 | 914 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3986 | 914 | "loader_parse_icd_manifest: ICD JSON %s does not have an \'api_version\' field. Skipping ICD JSON.", file_str); |
3987 | 914 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
3988 | 914 | goto out; |
3989 | 914 | } |
3990 | 79 | char *version_str = loader_cJSON_GetStringValue(api_version_json); |
3991 | 79 | if (NULL == version_str) { |
3992 | | // Only reason the print can fail is if there was an allocation issue |
3993 | 12 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
3994 | 12 | "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'api_version\' field. Skipping ICD JSON.", file_str); |
3995 | | |
3996 | 12 | goto out; |
3997 | 12 | } |
3998 | 67 | icd->version = loader_parse_version_string(version_str); |
3999 | | |
4000 | 67 | if (VK_API_VERSION_VARIANT(icd->version) != 0) { |
4001 | 7 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
4002 | 7 | "loader_parse_icd_manifest: Driver's ICD JSON %s \'api_version\' field contains a non-zero variant value of %d. " |
4003 | 7 | " Skipping ICD JSON.", |
4004 | 7 | file_str, VK_API_VERSION_VARIANT(icd->version)); |
4005 | 7 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
4006 | 7 | goto out; |
4007 | 7 | } |
4008 | | |
4009 | | // Skip over ICD's which contain a true "is_portability_driver" value whenever the application doesn't enable |
4010 | | // portability enumeration. |
4011 | 60 | cJSON *is_portability_driver_json = loader_cJSON_GetObjectItem(itemICD, "is_portability_driver"); |
4012 | 60 | if (loader_cJSON_IsTrue(is_portability_driver_json) && inst && !inst->portability_enumeration_enabled) { |
4013 | 0 | if (skipped_portability_drivers) { |
4014 | 0 | *skipped_portability_drivers = true; |
4015 | 0 | } |
4016 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
4017 | 0 | goto out; |
4018 | 0 | } |
4019 | | |
4020 | 60 | char *library_arch_str = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(itemICD, "library_arch")); |
4021 | 60 | if (library_arch_str != NULL) { |
4022 | 17 | if ((strncmp(library_arch_str, "32", 2) == 0 && sizeof(void *) != 4) || |
4023 | 16 | (strncmp(library_arch_str, "64", 2) == 0 && sizeof(void *) != 8)) { |
4024 | 1 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
4025 | 1 | "loader_parse_icd_manifest: Driver library architecture doesn't match the current running " |
4026 | 1 | "architecture, skipping this driver"); |
4027 | 1 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
4028 | 1 | goto out; |
4029 | 1 | } |
4030 | 17 | } |
4031 | 1.13k | out: |
4032 | 1.13k | loader_cJSON_Delete(icd_manifest_json); |
4033 | 1.13k | return res; |
4034 | 60 | } |
4035 | | |
4036 | | // Try to find the Vulkan ICD driver(s). |
4037 | | // |
4038 | | // This function scans the default system loader path(s) or path specified by either the |
4039 | | // VK_DRIVER_FILES or VK_ICD_FILENAMES environment variable in order to find loadable |
4040 | | // VK ICDs manifest files. |
4041 | | // From these manifest files it finds the ICD libraries. |
4042 | | // |
4043 | | // skipped_portability_drivers is used to report whether the loader found drivers which report |
4044 | | // portability but the application didn't enable the bit to enumerate them |
4045 | | // Can be NULL |
4046 | | // |
4047 | | // \returns |
4048 | | // Vulkan result |
4049 | | // (on result == VK_SUCCESS) a list of icds that were discovered |
4050 | | VkResult loader_icd_scan(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list, |
4051 | 1.13k | const VkInstanceCreateInfo *pCreateInfo, bool *skipped_portability_drivers) { |
4052 | 1.13k | VkResult res = VK_SUCCESS; |
4053 | 1.13k | struct loader_string_list manifest_files = {0}; |
4054 | 1.13k | struct loader_envvar_filter select_filter = {0}; |
4055 | 1.13k | struct loader_envvar_filter disable_filter = {0}; |
4056 | 1.13k | struct ICDManifestInfo *icd_details = NULL; |
4057 | | |
4058 | | // Set up the ICD Trampoline list so elements can be written into it. |
4059 | 1.13k | res = loader_init_scanned_icd_list(inst, icd_tramp_list); |
4060 | 1.13k | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
4061 | 0 | return res; |
4062 | 0 | } |
4063 | | |
4064 | 1.13k | bool direct_driver_loading_exclusive_mode = false; |
4065 | 1.13k | res = loader_scan_for_direct_drivers(inst, pCreateInfo, icd_tramp_list, &direct_driver_loading_exclusive_mode); |
4066 | 1.13k | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
4067 | 0 | goto out; |
4068 | 0 | } |
4069 | 1.13k | if (direct_driver_loading_exclusive_mode) { |
4070 | | // Make sure to jump over the system & env-var driver discovery mechanisms if exclusive mode is set, even if no drivers |
4071 | | // were successfully found through the direct driver loading mechanism |
4072 | 0 | goto out; |
4073 | 0 | } |
4074 | | |
4075 | 1.13k | if (loader_settings_should_use_driver_environment_variables(inst)) { |
4076 | | // Parse the filter environment variables to determine if we have any special behavior |
4077 | 1.13k | res = parse_generic_filter_environment_var(inst, VK_DRIVERS_SELECT_ENV_VAR, &select_filter); |
4078 | 1.13k | if (VK_SUCCESS != res) { |
4079 | 0 | goto out; |
4080 | 0 | } |
4081 | 1.13k | res = parse_generic_filter_environment_var(inst, VK_DRIVERS_DISABLE_ENV_VAR, &disable_filter); |
4082 | 1.13k | if (VK_SUCCESS != res) { |
4083 | 0 | goto out; |
4084 | 0 | } |
4085 | 1.13k | } |
4086 | | |
4087 | | // Get a list of manifest files for ICDs |
4088 | 1.13k | res = loader_get_data_files(inst, LOADER_DATA_FILE_MANIFEST_DRIVER, NULL, &manifest_files); |
4089 | 1.13k | if (VK_SUCCESS != res) { |
4090 | 0 | goto out; |
4091 | 0 | } |
4092 | | |
4093 | | // Add any drivers provided by the loader settings file |
4094 | 1.13k | res = loader_settings_get_additional_driver_files(inst, &manifest_files); |
4095 | 1.13k | if (VK_SUCCESS != res) { |
4096 | 0 | goto out; |
4097 | 0 | } |
4098 | | |
4099 | 1.13k | icd_details = loader_stack_alloc(sizeof(struct ICDManifestInfo) * manifest_files.count); |
4100 | 1.13k | if (NULL == icd_details) { |
4101 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
4102 | 0 | goto out; |
4103 | 0 | } |
4104 | 1.13k | memset(icd_details, 0, sizeof(struct ICDManifestInfo) * manifest_files.count); |
4105 | | |
4106 | 2.26k | for (uint32_t i = 0; i < manifest_files.count; i++) { |
4107 | 1.13k | VkResult icd_res = VK_SUCCESS; |
4108 | | |
4109 | 1.13k | icd_res = loader_parse_icd_manifest(inst, manifest_files.list[i], &icd_details[i], skipped_portability_drivers); |
4110 | 1.13k | if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) { |
4111 | 0 | res = icd_res; |
4112 | 0 | goto out; |
4113 | 1.13k | } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) { |
4114 | 1.05k | continue; |
4115 | 1.05k | } |
4116 | | |
4117 | 71 | if (select_filter.count > 0 || disable_filter.count > 0) { |
4118 | | // Get only the filename for comparing to the filters |
4119 | 0 | char *just_filename_str = strrchr(manifest_files.list[i], DIRECTORY_SYMBOL); |
4120 | | |
4121 | | // No directory symbol, just the filename |
4122 | 0 | if (NULL == just_filename_str) { |
4123 | 0 | just_filename_str = manifest_files.list[i]; |
4124 | 0 | } else { |
4125 | 0 | just_filename_str++; |
4126 | 0 | } |
4127 | |
|
4128 | 0 | bool name_matches_select = |
4129 | 0 | (select_filter.count > 0 && check_name_matches_filter_environment_var(just_filename_str, &select_filter)); |
4130 | 0 | bool name_matches_disable = |
4131 | 0 | (disable_filter.count > 0 && check_name_matches_filter_environment_var(just_filename_str, &disable_filter)); |
4132 | |
|
4133 | 0 | if (name_matches_disable && !name_matches_select) { |
4134 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
4135 | 0 | "Driver \"%s\" ignored because it was disabled by env var \'%s\'", just_filename_str, |
4136 | 0 | VK_DRIVERS_DISABLE_ENV_VAR); |
4137 | 0 | continue; |
4138 | 0 | } |
4139 | 0 | if (select_filter.count != 0 && !name_matches_select) { |
4140 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
4141 | 0 | "Driver \"%s\" ignored because not selected by env var \'%s\'", just_filename_str, |
4142 | 0 | VK_DRIVERS_SELECT_ENV_VAR); |
4143 | 0 | continue; |
4144 | 0 | } |
4145 | 0 | } |
4146 | | |
4147 | 71 | enum loader_layer_library_status lib_status; |
4148 | 71 | icd_res = |
4149 | 71 | loader_scanned_icd_add(inst, icd_tramp_list, icd_details[i].full_library_path, icd_details[i].version, &lib_status); |
4150 | 71 | if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) { |
4151 | 1 | res = icd_res; |
4152 | 1 | goto out; |
4153 | 70 | } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) { |
4154 | 70 | switch (lib_status) { |
4155 | 0 | case LOADER_LAYER_LIB_NOT_LOADED: |
4156 | 68 | case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD: |
4157 | 68 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
4158 | 68 | "loader_icd_scan: Failed loading library associated with ICD JSON %s. Ignoring this JSON", |
4159 | 68 | icd_details[i].full_library_path); |
4160 | 68 | break; |
4161 | 2 | case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: { |
4162 | 2 | loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Requested ICD %s was wrong bit-type. Ignoring this JSON", |
4163 | 2 | icd_details[i].full_library_path); |
4164 | 2 | break; |
4165 | 0 | } |
4166 | 0 | case LOADER_LAYER_LIB_SUCCESS_LOADED: |
4167 | 0 | case LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY: |
4168 | | // Shouldn't be able to reach this but if it is, best to report a debug |
4169 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
4170 | 0 | "Shouldn't reach this. A valid version of requested ICD %s was loaded but something bad " |
4171 | 0 | "happened afterwards.", |
4172 | 0 | icd_details[i].full_library_path); |
4173 | 0 | break; |
4174 | 70 | } |
4175 | 70 | } |
4176 | 71 | } |
4177 | | |
4178 | 1.13k | out: |
4179 | 1.13k | if (NULL != icd_details) { |
4180 | | // Successfully got the icd_details structure, which means we need to free the paths contained within |
4181 | 2.26k | for (uint32_t i = 0; i < manifest_files.count; i++) { |
4182 | 1.13k | loader_instance_heap_free(inst, icd_details[i].full_library_path); |
4183 | 1.13k | } |
4184 | 1.13k | } |
4185 | 1.13k | free_string_list(inst, &manifest_files); |
4186 | 1.13k | return res; |
4187 | 1.13k | } |
4188 | | |
4189 | | // Gets the layer data files corresponding to manifest_type & path_override, then parses the resulting json objects |
4190 | | // into instance_layers |
4191 | | // Manifest type must be either implicit or explicit |
4192 | | VkResult loader_parse_instance_layers(struct loader_instance *inst, enum loader_data_files_type manifest_type, |
4193 | 14.6k | const char *path_override, struct loader_layer_list *instance_layers) { |
4194 | 14.6k | assert(manifest_type == LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER || manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER); |
4195 | 14.6k | VkResult res = VK_SUCCESS; |
4196 | 14.6k | struct loader_string_list manifest_files = {0}; |
4197 | | |
4198 | 14.6k | res = loader_get_data_files(inst, manifest_type, path_override, &manifest_files); |
4199 | 14.6k | if (VK_SUCCESS != res) { |
4200 | 0 | goto out; |
4201 | 0 | } |
4202 | | |
4203 | 23.6k | for (uint32_t i = 0; i < manifest_files.count; i++) { |
4204 | 9.02k | char *file_str = manifest_files.list[i]; |
4205 | 9.02k | if (file_str == NULL) { |
4206 | 0 | continue; |
4207 | 0 | } |
4208 | | |
4209 | | // Parse file into JSON struct |
4210 | 9.02k | cJSON *json = NULL; |
4211 | 9.02k | VkResult local_res = loader_get_json(inst, file_str, &json); |
4212 | 9.02k | if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) { |
4213 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
4214 | 0 | goto out; |
4215 | 9.02k | } else if (VK_SUCCESS != local_res || NULL == json) { |
4216 | 1.32k | continue; |
4217 | 1.32k | } |
4218 | | |
4219 | 7.70k | local_res = loader_add_layer_properties(inst, instance_layers, json, |
4220 | 7.70k | manifest_type == LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, file_str); |
4221 | 7.70k | loader_cJSON_Delete(json); |
4222 | | |
4223 | | // If the error is anything other than out of memory we still want to try to load the other layers |
4224 | 7.70k | if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) { |
4225 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
4226 | 0 | goto out; |
4227 | 0 | } |
4228 | 7.70k | } |
4229 | 14.6k | out: |
4230 | 14.6k | free_string_list(inst, &manifest_files); |
4231 | | |
4232 | 14.6k | return res; |
4233 | 14.6k | } |
4234 | | |
4235 | | // Given a loader_layer_properties struct that is a valid override layer, concatenate the properties override paths and put them |
4236 | | // into the output parameter override_paths |
4237 | | VkResult get_override_layer_override_paths(struct loader_instance *inst, struct loader_layer_properties *prop, |
4238 | 75 | char **override_paths) { |
4239 | 75 | if (prop->override_paths.count > 0) { |
4240 | 75 | char *cur_write_ptr = NULL; |
4241 | 75 | size_t override_path_size = 0; |
4242 | 51.4k | for (uint32_t j = 0; j < prop->override_paths.count; j++) { |
4243 | 51.3k | override_path_size += determine_data_file_path_size(prop->override_paths.list[j], 0); |
4244 | 51.3k | } |
4245 | 75 | *override_paths = loader_instance_heap_alloc(inst, override_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
4246 | 75 | if (*override_paths == NULL) { |
4247 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
4248 | 0 | } |
4249 | 75 | cur_write_ptr = &(*override_paths)[0]; |
4250 | 51.4k | for (uint32_t j = 0; j < prop->override_paths.count; j++) { |
4251 | 51.3k | copy_data_file_info(prop->override_paths.list[j], NULL, 0, &cur_write_ptr); |
4252 | 51.3k | } |
4253 | | |
4254 | | // Subtract one from cur_write_ptr only if something was written so we can set the null terminator |
4255 | 75 | if (*override_paths < cur_write_ptr) { |
4256 | 75 | --cur_write_ptr; |
4257 | 75 | assert(cur_write_ptr - (*override_paths) < (ptrdiff_t)override_path_size); |
4258 | 75 | } |
4259 | | // Remove the last path separator |
4260 | 75 | *cur_write_ptr = '\0'; |
4261 | 75 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Override layer has override paths set to %s", |
4262 | 75 | *override_paths); |
4263 | 75 | } |
4264 | 75 | return VK_SUCCESS; |
4265 | 75 | } |
4266 | | |
4267 | | VkResult loader_scan_for_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers, |
4268 | 7.65k | const struct loader_envvar_all_filters *filters) { |
4269 | 7.65k | VkResult res = VK_SUCCESS; |
4270 | 7.65k | struct loader_layer_list settings_layers = {0}; |
4271 | 7.65k | struct loader_layer_list regular_instance_layers = {0}; |
4272 | 7.65k | bool override_layer_valid = false; |
4273 | 7.65k | char *override_paths = NULL; |
4274 | | |
4275 | 7.65k | bool should_search_for_other_layers = true; |
4276 | 7.65k | res = get_settings_layers(inst, &settings_layers, &should_search_for_other_layers); |
4277 | 7.65k | if (VK_SUCCESS != res) { |
4278 | 0 | goto out; |
4279 | 0 | } |
4280 | | |
4281 | | // If we should not look for layers using other mechanisms, assign settings_layers to instance_layers and jump to the |
4282 | | // output |
4283 | 7.65k | if (!should_search_for_other_layers) { |
4284 | 346 | *instance_layers = settings_layers; |
4285 | 346 | memset(&settings_layers, 0, sizeof(struct loader_layer_list)); |
4286 | 346 | goto out; |
4287 | 346 | } |
4288 | | |
4289 | 7.30k | res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, ®ular_instance_layers); |
4290 | 7.30k | if (VK_SUCCESS != res) { |
4291 | 0 | goto out; |
4292 | 0 | } |
4293 | | |
4294 | | // Remove any extraneous override layers. |
4295 | 7.30k | remove_all_non_valid_override_layers(inst, ®ular_instance_layers); |
4296 | | |
4297 | | // Check to see if the override layer is present, and use it's override paths. |
4298 | 921k | for (uint32_t i = 0; i < regular_instance_layers.count; i++) { |
4299 | 913k | struct loader_layer_properties *prop = ®ular_instance_layers.list[i]; |
4300 | 913k | if (prop->is_override && loader_implicit_layer_is_enabled(inst, filters, prop) && prop->override_paths.count > 0) { |
4301 | 75 | res = get_override_layer_override_paths(inst, prop, &override_paths); |
4302 | 75 | if (VK_SUCCESS != res) { |
4303 | 0 | goto out; |
4304 | 0 | } |
4305 | 75 | break; |
4306 | 75 | } |
4307 | 913k | } |
4308 | | |
4309 | | // Get a list of manifest files for explicit layers |
4310 | 7.30k | res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, ®ular_instance_layers); |
4311 | 7.30k | if (VK_SUCCESS != res) { |
4312 | 0 | goto out; |
4313 | 0 | } |
4314 | | |
4315 | | // Verify any meta-layers in the list are valid and all the component layers are |
4316 | | // actually present in the available layer list |
4317 | 7.30k | res = verify_all_meta_layers(inst, filters, ®ular_instance_layers, &override_layer_valid); |
4318 | 7.30k | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
4319 | 0 | return res; |
4320 | 0 | } |
4321 | | |
4322 | 7.30k | if (override_layer_valid) { |
4323 | 264 | loader_remove_layers_in_blacklist(inst, ®ular_instance_layers); |
4324 | 264 | if (NULL != inst) { |
4325 | 264 | inst->override_layer_present = true; |
4326 | 264 | } |
4327 | 264 | } |
4328 | | |
4329 | | // Remove disabled layers |
4330 | 923k | for (uint32_t i = 0; i < regular_instance_layers.count; ++i) { |
4331 | 916k | if (!loader_layer_is_available(inst, filters, ®ular_instance_layers.list[i])) { |
4332 | 0 | loader_remove_layer_in_list(inst, ®ular_instance_layers, i); |
4333 | 0 | i--; |
4334 | 0 | } |
4335 | 916k | } |
4336 | | |
4337 | 7.30k | res = combine_settings_layers_with_regular_layers(inst, &settings_layers, ®ular_instance_layers, instance_layers); |
4338 | | |
4339 | 7.65k | out: |
4340 | 7.65k | loader_delete_layer_list_and_properties(inst, &settings_layers); |
4341 | 7.65k | loader_delete_layer_list_and_properties(inst, ®ular_instance_layers); |
4342 | | |
4343 | 7.65k | loader_instance_heap_free(inst, override_paths); |
4344 | 7.65k | return res; |
4345 | 7.30k | } |
4346 | | |
4347 | | VkResult loader_scan_for_implicit_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers, |
4348 | 0 | const struct loader_envvar_all_filters *layer_filters) { |
4349 | 0 | VkResult res = VK_SUCCESS; |
4350 | 0 | struct loader_layer_list settings_layers = {0}; |
4351 | 0 | struct loader_layer_list regular_instance_layers = {0}; |
4352 | 0 | bool override_layer_valid = false; |
4353 | 0 | char *override_paths = NULL; |
4354 | 0 | bool implicit_metalayer_present = false; |
4355 | |
|
4356 | 0 | bool should_search_for_other_layers = true; |
4357 | 0 | res = get_settings_layers(inst, &settings_layers, &should_search_for_other_layers); |
4358 | 0 | if (VK_SUCCESS != res) { |
4359 | 0 | goto out; |
4360 | 0 | } |
4361 | | |
4362 | | // Remove layers from settings file that are off, are explicit, or are implicit layers that aren't active |
4363 | 0 | for (uint32_t i = 0; i < settings_layers.count; ++i) { |
4364 | 0 | if (settings_layers.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_OFF || |
4365 | 0 | settings_layers.list[i].settings_control_value == LOADER_SETTINGS_LAYER_UNORDERED_LAYER_LOCATION || |
4366 | 0 | (settings_layers.list[i].type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER) == VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER || |
4367 | 0 | !loader_implicit_layer_is_enabled(inst, layer_filters, &settings_layers.list[i])) { |
4368 | 0 | loader_remove_layer_in_list(inst, &settings_layers, i); |
4369 | 0 | i--; |
4370 | 0 | } |
4371 | 0 | } |
4372 | | |
4373 | | // If we should not look for layers using other mechanisms, assign settings_layers to instance_layers and jump to the |
4374 | | // output |
4375 | 0 | if (!should_search_for_other_layers) { |
4376 | 0 | *instance_layers = settings_layers; |
4377 | 0 | memset(&settings_layers, 0, sizeof(struct loader_layer_list)); |
4378 | 0 | goto out; |
4379 | 0 | } |
4380 | | |
4381 | 0 | res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, ®ular_instance_layers); |
4382 | 0 | if (VK_SUCCESS != res) { |
4383 | 0 | goto out; |
4384 | 0 | } |
4385 | | |
4386 | | // Remove any extraneous override layers. |
4387 | 0 | remove_all_non_valid_override_layers(inst, ®ular_instance_layers); |
4388 | | |
4389 | | // Check to see if either the override layer is present, or another implicit meta-layer. |
4390 | | // Each of these may require explicit layers to be enabled at this time. |
4391 | 0 | for (uint32_t i = 0; i < regular_instance_layers.count; i++) { |
4392 | 0 | struct loader_layer_properties *prop = ®ular_instance_layers.list[i]; |
4393 | 0 | if (prop->is_override && loader_implicit_layer_is_enabled(inst, layer_filters, prop)) { |
4394 | 0 | override_layer_valid = true; |
4395 | 0 | res = get_override_layer_override_paths(inst, prop, &override_paths); |
4396 | 0 | if (VK_SUCCESS != res) { |
4397 | 0 | goto out; |
4398 | 0 | } |
4399 | 0 | } else if (!prop->is_override && prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { |
4400 | 0 | implicit_metalayer_present = true; |
4401 | 0 | } |
4402 | 0 | } |
4403 | | |
4404 | | // If either the override layer or an implicit meta-layer are present, we need to add |
4405 | | // explicit layer info as well. Not to worry, though, all explicit layers not included |
4406 | | // in the override layer will be removed below in loader_remove_layers_in_blacklist(). |
4407 | 0 | if (override_layer_valid || implicit_metalayer_present) { |
4408 | 0 | res = |
4409 | 0 | loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, ®ular_instance_layers); |
4410 | 0 | if (VK_SUCCESS != res) { |
4411 | 0 | goto out; |
4412 | 0 | } |
4413 | 0 | } |
4414 | | |
4415 | | // Verify any meta-layers in the list are valid and all the component layers are |
4416 | | // actually present in the available layer list |
4417 | 0 | res = verify_all_meta_layers(inst, layer_filters, ®ular_instance_layers, &override_layer_valid); |
4418 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
4419 | 0 | return res; |
4420 | 0 | } |
4421 | | |
4422 | 0 | if (override_layer_valid || implicit_metalayer_present) { |
4423 | 0 | loader_remove_layers_not_in_implicit_meta_layers(inst, ®ular_instance_layers); |
4424 | 0 | if (override_layer_valid && inst != NULL) { |
4425 | 0 | inst->override_layer_present = true; |
4426 | 0 | } |
4427 | 0 | } |
4428 | | |
4429 | | // Remove disabled layers |
4430 | 0 | for (uint32_t i = 0; i < regular_instance_layers.count; ++i) { |
4431 | 0 | if (!loader_implicit_layer_is_enabled(inst, layer_filters, ®ular_instance_layers.list[i])) { |
4432 | 0 | loader_remove_layer_in_list(inst, ®ular_instance_layers, i); |
4433 | 0 | i--; |
4434 | 0 | } |
4435 | 0 | } |
4436 | |
|
4437 | 0 | res = combine_settings_layers_with_regular_layers(inst, &settings_layers, ®ular_instance_layers, instance_layers); |
4438 | |
|
4439 | 0 | out: |
4440 | 0 | loader_delete_layer_list_and_properties(inst, &settings_layers); |
4441 | 0 | loader_delete_layer_list_and_properties(inst, ®ular_instance_layers); |
4442 | |
|
4443 | 0 | loader_instance_heap_free(inst, override_paths); |
4444 | 0 | return res; |
4445 | 0 | } |
4446 | | |
4447 | 0 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_terminator(VkInstance inst, const char *pName) { |
4448 | | // inst is not wrapped |
4449 | 0 | if (inst == VK_NULL_HANDLE) { |
4450 | 0 | return NULL; |
4451 | 0 | } |
4452 | | |
4453 | 0 | VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst; |
4454 | |
|
4455 | 0 | if (disp_table == NULL) return NULL; |
4456 | | |
4457 | 0 | struct loader_instance *loader_inst = loader_get_instance(inst); |
4458 | |
|
4459 | 0 | if (loader_inst->instance_finished_creation) { |
4460 | 0 | disp_table = &loader_inst->terminator_dispatch; |
4461 | 0 | } |
4462 | |
|
4463 | 0 | bool found_name; |
4464 | 0 | void *addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name); |
4465 | 0 | if (found_name) { |
4466 | 0 | return addr; |
4467 | 0 | } |
4468 | | |
4469 | | // Check if any drivers support the function, and if so, add it to the unknown function list |
4470 | 0 | addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName); |
4471 | 0 | if (NULL != addr) return addr; |
4472 | | |
4473 | | // Don't call down the chain, this would be an infinite loop |
4474 | 0 | loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpdpa_instance_terminator() unrecognized name %s", pName); |
4475 | 0 | return NULL; |
4476 | 0 | } |
4477 | | |
4478 | 0 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_instance_terminator(VkInstance inst, const char *pName) { |
4479 | | // Global functions - Do not need a valid instance handle to query |
4480 | 0 | if (!strcmp(pName, "vkGetInstanceProcAddr")) { |
4481 | 0 | return (PFN_vkVoidFunction)loader_gpa_instance_terminator; |
4482 | 0 | } |
4483 | 0 | if (!strcmp(pName, "vk_layerGetPhysicalDeviceProcAddr")) { |
4484 | 0 | return (PFN_vkVoidFunction)loader_gpdpa_instance_terminator; |
4485 | 0 | } |
4486 | 0 | if (!strcmp(pName, "vkCreateInstance")) { |
4487 | 0 | return (PFN_vkVoidFunction)terminator_CreateInstance; |
4488 | 0 | } |
4489 | | // If a layer is querying pre-instance functions using vkGetInstanceProcAddr, we need to return function pointers that match the |
4490 | | // Vulkan API |
4491 | 0 | if (!strcmp(pName, "vkEnumerateInstanceLayerProperties")) { |
4492 | 0 | return (PFN_vkVoidFunction)terminator_EnumerateInstanceLayerProperties; |
4493 | 0 | } |
4494 | 0 | if (!strcmp(pName, "vkEnumerateInstanceExtensionProperties")) { |
4495 | 0 | return (PFN_vkVoidFunction)terminator_EnumerateInstanceExtensionProperties; |
4496 | 0 | } |
4497 | 0 | if (!strcmp(pName, "vkEnumerateInstanceVersion")) { |
4498 | 0 | return (PFN_vkVoidFunction)terminator_EnumerateInstanceVersion; |
4499 | 0 | } |
4500 | | |
4501 | | // While the spec is very clear that querying vkCreateDevice requires a valid VkInstance, because the loader allowed querying |
4502 | | // with a NULL VkInstance handle for a long enough time, it is impractical to fix this bug in the loader |
4503 | | |
4504 | | // As such, this is a bug to maintain compatibility for the RTSS layer (Riva Tuner Statistics Server) but may |
4505 | | // be depended upon by other layers out in the wild. |
4506 | 0 | if (!strcmp(pName, "vkCreateDevice")) { |
4507 | 0 | return (PFN_vkVoidFunction)terminator_CreateDevice; |
4508 | 0 | } |
4509 | | |
4510 | | // inst is not wrapped |
4511 | 0 | if (inst == VK_NULL_HANDLE) { |
4512 | 0 | return NULL; |
4513 | 0 | } |
4514 | 0 | VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst; |
4515 | |
|
4516 | 0 | if (disp_table == NULL) return NULL; |
4517 | | |
4518 | 0 | struct loader_instance *loader_inst = loader_get_instance(inst); |
4519 | | |
4520 | | // The VK_EXT_debug_utils functions need a special case here so the terminators can still be found from |
4521 | | // vkGetInstanceProcAddr This is because VK_EXT_debug_utils is an instance level extension with device level functions, and |
4522 | | // is 'supported' by the loader. |
4523 | | // These functions need a terminator to handle the case of a driver not supporting VK_EXT_debug_utils when there are layers |
4524 | | // present which not check for NULL before calling the function. |
4525 | 0 | if (!strcmp(pName, "vkSetDebugUtilsObjectNameEXT")) { |
4526 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectNameEXT : NULL; |
4527 | 0 | } |
4528 | 0 | if (!strcmp(pName, "vkSetDebugUtilsObjectTagEXT")) { |
4529 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectTagEXT : NULL; |
4530 | 0 | } |
4531 | 0 | if (!strcmp(pName, "vkQueueBeginDebugUtilsLabelEXT")) { |
4532 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueBeginDebugUtilsLabelEXT : NULL; |
4533 | 0 | } |
4534 | 0 | if (!strcmp(pName, "vkQueueEndDebugUtilsLabelEXT")) { |
4535 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueEndDebugUtilsLabelEXT : NULL; |
4536 | 0 | } |
4537 | 0 | if (!strcmp(pName, "vkQueueInsertDebugUtilsLabelEXT")) { |
4538 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueInsertDebugUtilsLabelEXT |
4539 | 0 | : NULL; |
4540 | 0 | } |
4541 | 0 | if (!strcmp(pName, "vkCmdBeginDebugUtilsLabelEXT")) { |
4542 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdBeginDebugUtilsLabelEXT : NULL; |
4543 | 0 | } |
4544 | 0 | if (!strcmp(pName, "vkCmdEndDebugUtilsLabelEXT")) { |
4545 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdEndDebugUtilsLabelEXT : NULL; |
4546 | 0 | } |
4547 | 0 | if (!strcmp(pName, "vkCmdInsertDebugUtilsLabelEXT")) { |
4548 | 0 | return loader_inst->enabled_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdInsertDebugUtilsLabelEXT : NULL; |
4549 | 0 | } |
4550 | | |
4551 | 0 | if (loader_inst->instance_finished_creation) { |
4552 | 0 | disp_table = &loader_inst->terminator_dispatch; |
4553 | 0 | } |
4554 | |
|
4555 | 0 | bool found_name; |
4556 | 0 | void *addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name); |
4557 | 0 | if (found_name) { |
4558 | 0 | return addr; |
4559 | 0 | } |
4560 | | |
4561 | | // Check if it is an unknown physical device function, to see if any drivers support it. |
4562 | 0 | addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName); |
4563 | 0 | if (addr) { |
4564 | 0 | return addr; |
4565 | 0 | } |
4566 | | |
4567 | | // Assume it is an unknown device function, check to see if any drivers support it. |
4568 | 0 | addr = loader_dev_ext_gpa_term(loader_get_instance(inst), pName); |
4569 | 0 | if (addr) { |
4570 | 0 | return addr; |
4571 | 0 | } |
4572 | | |
4573 | | // Don't call down the chain, this would be an infinite loop |
4574 | 0 | loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpa_instance_terminator() unrecognized name %s", pName); |
4575 | 0 | return NULL; |
4576 | 0 | } |
4577 | | |
4578 | 0 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_device_terminator(VkDevice device, const char *pName) { |
4579 | 0 | struct loader_device *dev; |
4580 | 0 | struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev); |
4581 | | |
4582 | | // Return this function if a layer above here is asking for the vkGetDeviceProcAddr. |
4583 | | // This is so we can properly intercept any device commands needing a terminator. |
4584 | 0 | if (!strcmp(pName, "vkGetDeviceProcAddr")) { |
4585 | 0 | return (PFN_vkVoidFunction)loader_gpa_device_terminator; |
4586 | 0 | } |
4587 | | |
4588 | | // NOTE: Device Funcs needing Trampoline/Terminator. |
4589 | | // Overrides for device functions needing a trampoline and |
4590 | | // a terminator because certain device entry-points still need to go |
4591 | | // through a terminator before hitting the ICD. This could be for |
4592 | | // several reasons, but the main one is currently unwrapping an |
4593 | | // object before passing the appropriate info along to the ICD. |
4594 | | // This is why we also have to override the direct ICD call to |
4595 | | // vkGetDeviceProcAddr to intercept those calls. |
4596 | | // If the pName is for a 'known' function but isn't available, due to |
4597 | | // the corresponding extension/feature not being enabled, we need to |
4598 | | // return NULL and not call down to the driver's GetDeviceProcAddr. |
4599 | 0 | if (NULL != dev) { |
4600 | 0 | bool found_name = false; |
4601 | 0 | PFN_vkVoidFunction addr = get_extension_device_proc_terminator(dev, pName, &found_name); |
4602 | 0 | if (found_name) { |
4603 | 0 | return addr; |
4604 | 0 | } |
4605 | 0 | } |
4606 | | |
4607 | 0 | if (icd_term == NULL) { |
4608 | 0 | return NULL; |
4609 | 0 | } |
4610 | | |
4611 | 0 | return icd_term->dispatch.GetDeviceProcAddr(device, pName); |
4612 | 0 | } |
4613 | | |
4614 | 0 | struct loader_instance *loader_get_instance(const VkInstance instance) { |
4615 | | // look up the loader_instance in our list by comparing dispatch tables, as |
4616 | | // there is no guarantee the instance is still a loader_instance* after any |
4617 | | // layers which wrap the instance object. |
4618 | 0 | const VkLayerInstanceDispatchTable *disp; |
4619 | 0 | struct loader_instance *ptr_instance = (struct loader_instance *)instance; |
4620 | 0 | if (VK_NULL_HANDLE == instance || LOADER_MAGIC_NUMBER != ptr_instance->magic) { |
4621 | 0 | return NULL; |
4622 | 0 | } else { |
4623 | 0 | disp = loader_get_instance_layer_dispatch(instance); |
4624 | 0 | loader_platform_thread_lock_mutex(&loader_global_instance_list_lock); |
4625 | 0 | for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { |
4626 | 0 | if (&inst->disp->layer_inst_disp == disp) { |
4627 | 0 | ptr_instance = inst; |
4628 | 0 | break; |
4629 | 0 | } |
4630 | 0 | } |
4631 | 0 | loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock); |
4632 | 0 | } |
4633 | 0 | return ptr_instance; |
4634 | 0 | } |
4635 | | |
4636 | 0 | loader_platform_dl_handle loader_open_layer_file(const struct loader_instance *inst, struct loader_layer_properties *prop) { |
4637 | 0 | if ((prop->lib_handle = loader_platform_open_library(prop->lib_name)) == NULL) { |
4638 | 0 | loader_handle_load_library_error(inst, prop->lib_name, &prop->lib_status); |
4639 | 0 | } else { |
4640 | 0 | prop->lib_status = LOADER_LAYER_LIB_SUCCESS_LOADED; |
4641 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Loading layer library %s", prop->lib_name); |
4642 | 0 | } |
4643 | |
|
4644 | 0 | return prop->lib_handle; |
4645 | 0 | } |
4646 | | |
4647 | | // Go through the search_list and find any layers which match type. If layer |
4648 | | // type match is found in then add it to ext_list. |
4649 | | // If the layer name is in enabled_layers_env, do not add it to the list, that way it can be ordered alongside the other env-var |
4650 | | // enabled layers |
4651 | | VkResult loader_add_implicit_layers(const struct loader_instance *inst, const char *enabled_layers_env, |
4652 | | const struct loader_envvar_all_filters *filters, struct loader_pointer_layer_list *target_list, |
4653 | | struct loader_pointer_layer_list *expanded_target_list, |
4654 | 0 | const struct loader_layer_list *source_list) { |
4655 | 0 | for (uint32_t src_layer = 0; src_layer < source_list->count; src_layer++) { |
4656 | 0 | struct loader_layer_properties *prop = &source_list->list[src_layer]; |
4657 | 0 | if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { |
4658 | | // If this layer appears in the enabled_layers_env, don't add it. We will let loader_add_environment_layers handle it |
4659 | 0 | if (NULL == enabled_layers_env || NULL == strstr(enabled_layers_env, prop->info.layerName)) { |
4660 | 0 | VkResult result = loader_add_implicit_layer(inst, prop, filters, target_list, expanded_target_list, source_list); |
4661 | 0 | if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result; |
4662 | 0 | } |
4663 | 0 | } |
4664 | 0 | } |
4665 | 0 | return VK_SUCCESS; |
4666 | 0 | } |
4667 | | |
4668 | 0 | void warn_if_layers_are_older_than_application(struct loader_instance *inst) { |
4669 | 0 | for (uint32_t i = 0; i < inst->expanded_activated_layer_list.count; i++) { |
4670 | | // Verify that the layer api version is at least that of the application's request, if not, throw a warning since |
4671 | | // undefined behavior could occur. |
4672 | 0 | struct loader_layer_properties *prop = inst->expanded_activated_layer_list.list[i]; |
4673 | 0 | loader_api_version prop_spec_version = loader_make_version(prop->info.specVersion); |
4674 | 0 | if (!loader_check_version_meets_required(inst->app_api_version, prop_spec_version)) { |
4675 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
4676 | 0 | "Layer %s uses API version %u.%u which is older than the application specified " |
4677 | 0 | "API version of %u.%u. May cause issues.", |
4678 | 0 | prop->info.layerName, prop_spec_version.major, prop_spec_version.minor, inst->app_api_version.major, |
4679 | 0 | inst->app_api_version.minor); |
4680 | 0 | } |
4681 | 0 | } |
4682 | 0 | } |
4683 | | |
4684 | | VkResult loader_enable_instance_layers(struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo, |
4685 | | const struct loader_layer_list *instance_layers, |
4686 | 0 | const struct loader_envvar_all_filters *layer_filters) { |
4687 | 0 | VkResult res = VK_SUCCESS; |
4688 | 0 | char *enabled_layers_env = NULL; |
4689 | |
|
4690 | 0 | assert(inst && "Cannot have null instance"); |
4691 | |
|
4692 | 0 | if (!loader_init_pointer_layer_list(inst, &inst->app_activated_layer_list)) { |
4693 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
4694 | 0 | "loader_enable_instance_layers: Failed to initialize application version of the layer list"); |
4695 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
4696 | 0 | goto out; |
4697 | 0 | } |
4698 | | |
4699 | 0 | if (!loader_init_pointer_layer_list(inst, &inst->expanded_activated_layer_list)) { |
4700 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
4701 | 0 | "loader_enable_instance_layers: Failed to initialize expanded version of the layer list"); |
4702 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
4703 | 0 | goto out; |
4704 | 0 | } |
4705 | | |
4706 | 0 | if (inst->settings.settings_active) { |
4707 | 0 | res = enable_correct_layers_from_settings(inst, layer_filters, pCreateInfo->enabledLayerCount, |
4708 | 0 | pCreateInfo->ppEnabledLayerNames, &inst->instance_layer_list, |
4709 | 0 | &inst->app_activated_layer_list, &inst->expanded_activated_layer_list); |
4710 | 0 | warn_if_layers_are_older_than_application(inst); |
4711 | |
|
4712 | 0 | goto out; |
4713 | 0 | } |
4714 | | |
4715 | 0 | enabled_layers_env = loader_getenv(ENABLED_LAYERS_ENV, inst); |
4716 | | |
4717 | | // Add any implicit layers first |
4718 | 0 | res = loader_add_implicit_layers(inst, enabled_layers_env, layer_filters, &inst->app_activated_layer_list, |
4719 | 0 | &inst->expanded_activated_layer_list, instance_layers); |
4720 | 0 | if (res != VK_SUCCESS) { |
4721 | 0 | goto out; |
4722 | 0 | } |
4723 | | |
4724 | | // Add any layers specified via environment variable next |
4725 | 0 | res = loader_add_environment_layers(inst, enabled_layers_env, layer_filters, &inst->app_activated_layer_list, |
4726 | 0 | &inst->expanded_activated_layer_list, instance_layers); |
4727 | 0 | if (res != VK_SUCCESS) { |
4728 | 0 | goto out; |
4729 | 0 | } |
4730 | | |
4731 | | // Add layers specified by the application |
4732 | 0 | res = loader_add_layer_names_to_list(inst, layer_filters, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list, |
4733 | 0 | pCreateInfo->enabledLayerCount, pCreateInfo->ppEnabledLayerNames, instance_layers); |
4734 | |
|
4735 | 0 | warn_if_layers_are_older_than_application(inst); |
4736 | 0 | out: |
4737 | 0 | if (enabled_layers_env != NULL) { |
4738 | 0 | loader_free_getenv(enabled_layers_env, inst); |
4739 | 0 | } |
4740 | |
|
4741 | 0 | return res; |
4742 | 0 | } |
4743 | | |
4744 | | // Determine the layer interface version to use. |
4745 | | bool loader_get_layer_interface_version(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version, |
4746 | 0 | VkNegotiateLayerInterface *interface_struct) { |
4747 | 0 | memset(interface_struct, 0, sizeof(VkNegotiateLayerInterface)); |
4748 | 0 | interface_struct->sType = LAYER_NEGOTIATE_INTERFACE_STRUCT; |
4749 | 0 | interface_struct->loaderLayerInterfaceVersion = 1; |
4750 | 0 | interface_struct->pNext = NULL; |
4751 | |
|
4752 | 0 | if (fp_negotiate_layer_version != NULL) { |
4753 | | // Layer supports the negotiation API, so call it with the loader's |
4754 | | // latest version supported |
4755 | 0 | interface_struct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION; |
4756 | 0 | VkResult result = fp_negotiate_layer_version(interface_struct); |
4757 | |
|
4758 | 0 | if (result != VK_SUCCESS) { |
4759 | | // Layer no longer supports the loader's latest interface version so |
4760 | | // fail loading the Layer |
4761 | 0 | return false; |
4762 | 0 | } |
4763 | 0 | } |
4764 | | |
4765 | 0 | if (interface_struct->loaderLayerInterfaceVersion < MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION) { |
4766 | | // Loader no longer supports the layer's latest interface version so |
4767 | | // fail loading the layer |
4768 | 0 | return false; |
4769 | 0 | } |
4770 | | |
4771 | 0 | return true; |
4772 | 0 | } |
4773 | | |
4774 | | // Every extension that has a loader-defined trampoline needs to be marked as enabled or disabled so that we know whether or |
4775 | | // not to return that trampoline when vkGetDeviceProcAddr is called |
4776 | | void setup_logical_device_enabled_layer_extensions(const struct loader_instance *inst, struct loader_device *dev, |
4777 | | const struct loader_extension_list *icd_exts, |
4778 | 0 | const VkDeviceCreateInfo *pCreateInfo) { |
4779 | | // no enabled extensions, early exit |
4780 | 0 | if (pCreateInfo->ppEnabledExtensionNames == NULL) { |
4781 | 0 | return; |
4782 | 0 | } |
4783 | | // Can only setup debug marker as debug utils is an instance extensions. |
4784 | 0 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; ++i) { |
4785 | 0 | if (pCreateInfo->ppEnabledExtensionNames[i] && |
4786 | 0 | !strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) { |
4787 | | // Check if its supported by the driver |
4788 | 0 | for (uint32_t j = 0; j < icd_exts->count; ++j) { |
4789 | 0 | if (!strcmp(icd_exts->list[j].extensionName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) { |
4790 | 0 | dev->layer_extensions.ext_debug_marker_enabled = true; |
4791 | 0 | } |
4792 | 0 | } |
4793 | | // also check if any layers support it. |
4794 | 0 | for (uint32_t j = 0; j < inst->app_activated_layer_list.count; j++) { |
4795 | 0 | struct loader_layer_properties *layer = inst->app_activated_layer_list.list[j]; |
4796 | 0 | for (uint32_t k = 0; k < layer->device_extension_list.count; k++) { |
4797 | 0 | if (!strcmp(layer->device_extension_list.list[k].props.extensionName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) { |
4798 | 0 | dev->layer_extensions.ext_debug_marker_enabled = true; |
4799 | 0 | } |
4800 | 0 | } |
4801 | 0 | } |
4802 | 0 | } |
4803 | 0 | } |
4804 | 0 | } |
4805 | | |
4806 | | VKAPI_ATTR VkResult VKAPI_CALL loader_layer_create_device(VkInstance instance, VkPhysicalDevice physicalDevice, |
4807 | | const VkDeviceCreateInfo *pCreateInfo, |
4808 | | const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, |
4809 | 0 | PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA) { |
4810 | 0 | VkResult res; |
4811 | 0 | VkPhysicalDevice internal_device = VK_NULL_HANDLE; |
4812 | 0 | struct loader_device *dev = NULL; |
4813 | 0 | struct loader_instance *inst = NULL; |
4814 | |
|
4815 | 0 | if (instance != VK_NULL_HANDLE) { |
4816 | 0 | inst = loader_get_instance(instance); |
4817 | 0 | internal_device = physicalDevice; |
4818 | 0 | } else { |
4819 | 0 | struct loader_physical_device_tramp *phys_dev = (struct loader_physical_device_tramp *)physicalDevice; |
4820 | 0 | internal_device = phys_dev->phys_dev; |
4821 | 0 | inst = (struct loader_instance *)phys_dev->this_instance; |
4822 | 0 | } |
4823 | | |
4824 | | // Get the physical device (ICD) extensions |
4825 | 0 | struct loader_extension_list icd_exts = {0}; |
4826 | 0 | icd_exts.list = NULL; |
4827 | 0 | res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties)); |
4828 | 0 | if (VK_SUCCESS != res) { |
4829 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to create ICD extension list"); |
4830 | 0 | goto out; |
4831 | 0 | } |
4832 | | |
4833 | 0 | PFN_vkEnumerateDeviceExtensionProperties enumDeviceExtensionProperties = NULL; |
4834 | 0 | if (layerGIPA != NULL) { |
4835 | 0 | enumDeviceExtensionProperties = |
4836 | 0 | (PFN_vkEnumerateDeviceExtensionProperties)layerGIPA(instance, "vkEnumerateDeviceExtensionProperties"); |
4837 | 0 | } else { |
4838 | 0 | enumDeviceExtensionProperties = inst->disp->layer_inst_disp.EnumerateDeviceExtensionProperties; |
4839 | 0 | } |
4840 | 0 | res = loader_add_device_extensions(inst, enumDeviceExtensionProperties, internal_device, "Unknown", &icd_exts); |
4841 | 0 | if (res != VK_SUCCESS) { |
4842 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to add extensions to list"); |
4843 | 0 | goto out; |
4844 | 0 | } |
4845 | | |
4846 | | // Make sure requested extensions to be enabled are supported |
4847 | 0 | res = loader_validate_device_extensions(inst, &inst->expanded_activated_layer_list, &icd_exts, pCreateInfo); |
4848 | 0 | if (res != VK_SUCCESS) { |
4849 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to validate extensions in list"); |
4850 | 0 | goto out; |
4851 | 0 | } |
4852 | | |
4853 | 0 | dev = loader_create_logical_device(inst, pAllocator); |
4854 | 0 | if (dev == NULL) { |
4855 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
4856 | 0 | goto out; |
4857 | 0 | } |
4858 | | |
4859 | 0 | setup_logical_device_enabled_layer_extensions(inst, dev, &icd_exts, pCreateInfo); |
4860 | |
|
4861 | 0 | res = loader_create_device_chain(internal_device, pCreateInfo, pAllocator, inst, dev, layerGIPA, nextGDPA); |
4862 | 0 | if (res != VK_SUCCESS) { |
4863 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to create device chain."); |
4864 | 0 | goto out; |
4865 | 0 | } |
4866 | | |
4867 | 0 | *pDevice = dev->chain_device; |
4868 | | |
4869 | | // Initialize any device extension dispatch entry's from the instance list |
4870 | 0 | loader_init_dispatch_dev_ext(inst, dev); |
4871 | | |
4872 | | // Initialize WSI device extensions as part of core dispatch since loader |
4873 | | // has dedicated trampoline code for these |
4874 | 0 | loader_init_device_extension_dispatch_table(&dev->loader_dispatch, inst->disp->layer_inst_disp.GetInstanceProcAddr, |
4875 | 0 | dev->loader_dispatch.core_dispatch.GetDeviceProcAddr, inst->instance, *pDevice); |
4876 | |
|
4877 | 0 | out: |
4878 | | |
4879 | | // Failure cleanup |
4880 | 0 | if (VK_SUCCESS != res) { |
4881 | 0 | if (NULL != dev) { |
4882 | | // Find the icd_term this device belongs to then remove it from that icd_term. |
4883 | | // Need to iterate the linked lists and remove the device from it. Don't delete |
4884 | | // the device here since it may not have been added to the icd_term and there |
4885 | | // are other allocations attached to it. |
4886 | 0 | struct loader_icd_term *icd_term = inst->icd_terms; |
4887 | 0 | bool found = false; |
4888 | 0 | while (!found && NULL != icd_term) { |
4889 | 0 | struct loader_device *cur_dev = icd_term->logical_device_list; |
4890 | 0 | struct loader_device *prev_dev = NULL; |
4891 | 0 | while (NULL != cur_dev) { |
4892 | 0 | if (cur_dev == dev) { |
4893 | 0 | if (cur_dev == icd_term->logical_device_list) { |
4894 | 0 | icd_term->logical_device_list = cur_dev->next; |
4895 | 0 | } else if (prev_dev) { |
4896 | 0 | prev_dev->next = cur_dev->next; |
4897 | 0 | } |
4898 | |
|
4899 | 0 | found = true; |
4900 | 0 | break; |
4901 | 0 | } |
4902 | 0 | prev_dev = cur_dev; |
4903 | 0 | cur_dev = cur_dev->next; |
4904 | 0 | } |
4905 | 0 | icd_term = icd_term->next; |
4906 | 0 | } |
4907 | | // Now destroy the device and the allocations associated with it. |
4908 | 0 | loader_destroy_logical_device(dev, pAllocator); |
4909 | 0 | } |
4910 | 0 | } |
4911 | |
|
4912 | 0 | if (NULL != icd_exts.list) { |
4913 | 0 | loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts); |
4914 | 0 | } |
4915 | 0 | return res; |
4916 | 0 | } |
4917 | | |
4918 | | VKAPI_ATTR void VKAPI_CALL loader_layer_destroy_device(VkDevice device, const VkAllocationCallbacks *pAllocator, |
4919 | 0 | PFN_vkDestroyDevice destroyFunction) { |
4920 | 0 | struct loader_device *dev; |
4921 | |
|
4922 | 0 | if (device == VK_NULL_HANDLE) { |
4923 | 0 | return; |
4924 | 0 | } |
4925 | | |
4926 | 0 | struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev); |
4927 | |
|
4928 | 0 | destroyFunction(device, pAllocator); |
4929 | 0 | if (NULL != dev) { |
4930 | 0 | dev->chain_device = NULL; |
4931 | 0 | dev->icd_device = NULL; |
4932 | 0 | loader_remove_logical_device(icd_term, dev, pAllocator); |
4933 | 0 | } |
4934 | 0 | } |
4935 | | |
4936 | | // Given the list of layers to activate in the loader_instance |
4937 | | // structure. This function will add a VkLayerInstanceCreateInfo |
4938 | | // structure to the VkInstanceCreateInfo.pNext pointer. |
4939 | | // Each activated layer will have it's own VkLayerInstanceLink |
4940 | | // structure that tells the layer what Get*ProcAddr to call to |
4941 | | // get function pointers to the next layer down. |
4942 | | // Once the chain info has been created this function will |
4943 | | // execute the CreateInstance call chain. Each layer will |
4944 | | // then have an opportunity in it's CreateInstance function |
4945 | | // to setup it's dispatch table when the lower layer returns |
4946 | | // successfully. |
4947 | | // Each layer can wrap or not-wrap the returned VkInstance object |
4948 | | // as it sees fit. |
4949 | | // The instance chain is terminated by a loader function |
4950 | | // that will call CreateInstance on all available ICD's and |
4951 | | // cache those VkInstance objects for future use. |
4952 | | VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, |
4953 | 0 | struct loader_instance *inst, VkInstance *created_instance) { |
4954 | 0 | uint32_t num_activated_layers = 0; |
4955 | 0 | struct activated_layer_info *activated_layers = NULL; |
4956 | 0 | VkLayerInstanceCreateInfo chain_info; |
4957 | 0 | VkLayerInstanceLink *layer_instance_link_info = NULL; |
4958 | 0 | VkInstanceCreateInfo loader_create_info; |
4959 | 0 | VkResult res; |
4960 | |
|
4961 | 0 | PFN_vkGetInstanceProcAddr next_gipa = loader_gpa_instance_terminator; |
4962 | 0 | PFN_vkGetInstanceProcAddr cur_gipa = loader_gpa_instance_terminator; |
4963 | 0 | PFN_vkGetDeviceProcAddr cur_gdpa = loader_gpa_device_terminator; |
4964 | 0 | PFN_GetPhysicalDeviceProcAddr next_gpdpa = loader_gpdpa_instance_terminator; |
4965 | 0 | PFN_GetPhysicalDeviceProcAddr cur_gpdpa = loader_gpdpa_instance_terminator; |
4966 | |
|
4967 | 0 | memcpy(&loader_create_info, pCreateInfo, sizeof(VkInstanceCreateInfo)); |
4968 | |
|
4969 | 0 | if (inst->expanded_activated_layer_list.count > 0) { |
4970 | 0 | chain_info.u.pLayerInfo = NULL; |
4971 | 0 | chain_info.pNext = pCreateInfo->pNext; |
4972 | 0 | chain_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; |
4973 | 0 | chain_info.function = VK_LAYER_LINK_INFO; |
4974 | 0 | loader_create_info.pNext = &chain_info; |
4975 | |
|
4976 | 0 | layer_instance_link_info = loader_stack_alloc(sizeof(VkLayerInstanceLink) * inst->expanded_activated_layer_list.count); |
4977 | 0 | if (!layer_instance_link_info) { |
4978 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
4979 | 0 | "loader_create_instance_chain: Failed to alloc Instance objects for layer"); |
4980 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
4981 | 0 | } |
4982 | | |
4983 | 0 | activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count); |
4984 | 0 | if (!activated_layers) { |
4985 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
4986 | 0 | "loader_create_instance_chain: Failed to alloc activated layer storage array"); |
4987 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
4988 | 0 | } |
4989 | | |
4990 | | // Create instance chain of enabled layers |
4991 | 0 | for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) { |
4992 | 0 | struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i]; |
4993 | 0 | loader_platform_dl_handle lib_handle; |
4994 | | |
4995 | | // Skip it if a Layer with the same name has been already successfully activated |
4996 | 0 | if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) { |
4997 | 0 | continue; |
4998 | 0 | } |
4999 | | |
5000 | 0 | lib_handle = loader_open_layer_file(inst, layer_prop); |
5001 | 0 | if (layer_prop->lib_status == LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY) { |
5002 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
5003 | 0 | } |
5004 | 0 | if (!lib_handle) { |
5005 | 0 | continue; |
5006 | 0 | } |
5007 | | |
5008 | 0 | if (NULL == layer_prop->functions.negotiate_layer_interface) { |
5009 | 0 | PFN_vkNegotiateLoaderLayerInterfaceVersion negotiate_interface = NULL; |
5010 | 0 | bool functions_in_interface = false; |
5011 | 0 | if (!layer_prop->functions.str_negotiate_interface || strlen(layer_prop->functions.str_negotiate_interface) == 0) { |
5012 | 0 | negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address( |
5013 | 0 | lib_handle, "vkNegotiateLoaderLayerInterfaceVersion"); |
5014 | 0 | } else { |
5015 | 0 | negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address( |
5016 | 0 | lib_handle, layer_prop->functions.str_negotiate_interface); |
5017 | 0 | } |
5018 | | |
5019 | | // If we can negotiate an interface version, then we can also |
5020 | | // get everything we need from the one function call, so try |
5021 | | // that first, and see if we can get all the function pointers |
5022 | | // necessary from that one call. |
5023 | 0 | if (NULL != negotiate_interface) { |
5024 | 0 | layer_prop->functions.negotiate_layer_interface = negotiate_interface; |
5025 | |
|
5026 | 0 | VkNegotiateLayerInterface interface_struct; |
5027 | |
|
5028 | 0 | if (loader_get_layer_interface_version(negotiate_interface, &interface_struct)) { |
5029 | | // Go ahead and set the properties version to the |
5030 | | // correct value. |
5031 | 0 | layer_prop->interface_version = interface_struct.loaderLayerInterfaceVersion; |
5032 | | |
5033 | | // If the interface is 2 or newer, we have access to the |
5034 | | // new GetPhysicalDeviceProcAddr function, so grab it, |
5035 | | // and the other necessary functions, from the |
5036 | | // structure. |
5037 | 0 | if (interface_struct.loaderLayerInterfaceVersion > 1) { |
5038 | 0 | cur_gipa = interface_struct.pfnGetInstanceProcAddr; |
5039 | 0 | cur_gdpa = interface_struct.pfnGetDeviceProcAddr; |
5040 | 0 | cur_gpdpa = interface_struct.pfnGetPhysicalDeviceProcAddr; |
5041 | 0 | if (cur_gipa != NULL) { |
5042 | | // We've set the functions, so make sure we |
5043 | | // don't do the unnecessary calls later. |
5044 | 0 | functions_in_interface = true; |
5045 | 0 | } |
5046 | 0 | } |
5047 | 0 | } |
5048 | 0 | } |
5049 | |
|
5050 | 0 | if (!functions_in_interface) { |
5051 | 0 | if ((cur_gipa = layer_prop->functions.get_instance_proc_addr) == NULL) { |
5052 | 0 | if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) { |
5053 | 0 | cur_gipa = |
5054 | 0 | (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr"); |
5055 | 0 | layer_prop->functions.get_instance_proc_addr = cur_gipa; |
5056 | |
|
5057 | 0 | if (NULL == cur_gipa) { |
5058 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
5059 | 0 | "loader_create_instance_chain: Failed to find \'vkGetInstanceProcAddr\' in layer \"%s\"", |
5060 | 0 | layer_prop->lib_name); |
5061 | 0 | continue; |
5062 | 0 | } |
5063 | 0 | } else { |
5064 | 0 | cur_gipa = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, |
5065 | 0 | layer_prop->functions.str_gipa); |
5066 | |
|
5067 | 0 | if (NULL == cur_gipa) { |
5068 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
5069 | 0 | "loader_create_instance_chain: Failed to find \'%s\' in layer \"%s\"", |
5070 | 0 | layer_prop->functions.str_gipa, layer_prop->lib_name); |
5071 | 0 | continue; |
5072 | 0 | } |
5073 | 0 | } |
5074 | 0 | } |
5075 | 0 | } |
5076 | 0 | } |
5077 | | |
5078 | 0 | layer_instance_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo; |
5079 | 0 | layer_instance_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = next_gipa; |
5080 | 0 | layer_instance_link_info[num_activated_layers].pfnNextGetPhysicalDeviceProcAddr = next_gpdpa; |
5081 | 0 | next_gipa = cur_gipa; |
5082 | 0 | if (layer_prop->interface_version > 1 && cur_gpdpa != NULL) { |
5083 | 0 | layer_prop->functions.get_physical_device_proc_addr = cur_gpdpa; |
5084 | 0 | next_gpdpa = cur_gpdpa; |
5085 | 0 | } |
5086 | 0 | if (layer_prop->interface_version > 1 && cur_gipa != NULL) { |
5087 | 0 | layer_prop->functions.get_instance_proc_addr = cur_gipa; |
5088 | 0 | } |
5089 | 0 | if (layer_prop->interface_version > 1 && cur_gdpa != NULL) { |
5090 | 0 | layer_prop->functions.get_device_proc_addr = cur_gdpa; |
5091 | 0 | } |
5092 | |
|
5093 | 0 | chain_info.u.pLayerInfo = &layer_instance_link_info[num_activated_layers]; |
5094 | |
|
5095 | 0 | res = fixup_library_binary_path(inst, &(layer_prop->lib_name), layer_prop->lib_handle, cur_gipa); |
5096 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
5097 | 0 | return res; |
5098 | 0 | } |
5099 | | |
5100 | 0 | activated_layers[num_activated_layers].name = layer_prop->info.layerName; |
5101 | 0 | activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name; |
5102 | 0 | activated_layers[num_activated_layers].library = layer_prop->lib_name; |
5103 | 0 | activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER); |
5104 | 0 | activated_layers[num_activated_layers].enabled_by_what = layer_prop->enabled_by_what; |
5105 | 0 | if (activated_layers[num_activated_layers].is_implicit) { |
5106 | 0 | activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name; |
5107 | 0 | activated_layers[num_activated_layers].enable_name_env = layer_prop->enable_env_var.name; |
5108 | 0 | activated_layers[num_activated_layers].enable_value_env = layer_prop->enable_env_var.value; |
5109 | 0 | } |
5110 | |
|
5111 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Insert instance layer \"%s\" (%s)", |
5112 | 0 | layer_prop->info.layerName, layer_prop->lib_name); |
5113 | |
|
5114 | 0 | num_activated_layers++; |
5115 | 0 | } |
5116 | 0 | } |
5117 | | |
5118 | | // Make sure each layer requested by the application was actually loaded |
5119 | 0 | for (uint32_t exp = 0; exp < inst->expanded_activated_layer_list.count; ++exp) { |
5120 | 0 | struct loader_layer_properties *exp_layer_prop = inst->expanded_activated_layer_list.list[exp]; |
5121 | 0 | bool found = false; |
5122 | 0 | for (uint32_t act = 0; act < num_activated_layers; ++act) { |
5123 | 0 | if (!strcmp(activated_layers[act].name, exp_layer_prop->info.layerName)) { |
5124 | 0 | found = true; |
5125 | 0 | break; |
5126 | 0 | } |
5127 | 0 | } |
5128 | | // If it wasn't found, we want to at least log an error. However, if it was enabled by the application directly, |
5129 | | // we want to return a bad layer error. |
5130 | 0 | if (!found) { |
5131 | 0 | bool app_requested = false; |
5132 | 0 | for (uint32_t act = 0; act < pCreateInfo->enabledLayerCount; ++act) { |
5133 | 0 | if (!strcmp(pCreateInfo->ppEnabledLayerNames[act], exp_layer_prop->info.layerName)) { |
5134 | 0 | app_requested = true; |
5135 | 0 | break; |
5136 | 0 | } |
5137 | 0 | } |
5138 | 0 | VkFlags log_flag = VULKAN_LOADER_LAYER_BIT; |
5139 | 0 | char ending = '.'; |
5140 | 0 | if (app_requested) { |
5141 | 0 | log_flag |= VULKAN_LOADER_ERROR_BIT; |
5142 | 0 | ending = '!'; |
5143 | 0 | } else { |
5144 | 0 | log_flag |= VULKAN_LOADER_INFO_BIT; |
5145 | 0 | } |
5146 | 0 | switch (exp_layer_prop->lib_status) { |
5147 | 0 | case LOADER_LAYER_LIB_NOT_LOADED: |
5148 | 0 | loader_log(inst, log_flag, 0, "Requested layer \"%s\" was not loaded%c", exp_layer_prop->info.layerName, |
5149 | 0 | ending); |
5150 | 0 | break; |
5151 | 0 | case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: { |
5152 | 0 | loader_log(inst, log_flag, 0, "Requested layer \"%s\" was wrong bit-type%c", exp_layer_prop->info.layerName, |
5153 | 0 | ending); |
5154 | 0 | break; |
5155 | 0 | } |
5156 | 0 | case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD: |
5157 | 0 | loader_log(inst, log_flag, 0, "Requested layer \"%s\" failed to load%c", exp_layer_prop->info.layerName, |
5158 | 0 | ending); |
5159 | 0 | break; |
5160 | 0 | case LOADER_LAYER_LIB_SUCCESS_LOADED: |
5161 | 0 | case LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY: |
5162 | | // Shouldn't be able to reach this but if it is, best to report a debug |
5163 | 0 | loader_log(inst, log_flag, 0, |
5164 | 0 | "Shouldn't reach this. A valid version of requested layer %s was loaded but was not found in the " |
5165 | 0 | "list of activated layers%c", |
5166 | 0 | exp_layer_prop->info.layerName, ending); |
5167 | 0 | break; |
5168 | 0 | } |
5169 | 0 | if (app_requested) { |
5170 | 0 | return VK_ERROR_LAYER_NOT_PRESENT; |
5171 | 0 | } |
5172 | 0 | } |
5173 | 0 | } |
5174 | | |
5175 | 0 | VkLoaderFeatureFlags feature_flags = 0; |
5176 | | #if defined(_WIN32) |
5177 | | feature_flags = windows_initialize_dxgi(); |
5178 | | #endif |
5179 | | |
5180 | | // The following line of code is actually invalid at least according to the Vulkan spec with header update 1.2.193 and onwards. |
5181 | | // The update required calls to vkGetInstanceProcAddr querying "global" functions (which includes vkCreateInstance) to pass NULL |
5182 | | // for the instance parameter. Because it wasn't required to be NULL before, there may be layers which expect the loader's |
5183 | | // behavior of passing a non-NULL value into vkGetInstanceProcAddr. |
5184 | | // In an abundance of caution, the incorrect code remains as is, with a big comment to indicate that its wrong |
5185 | 0 | PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)next_gipa(*created_instance, "vkCreateInstance"); |
5186 | 0 | if (fpCreateInstance) { |
5187 | 0 | VkLayerInstanceCreateInfo instance_dispatch; |
5188 | 0 | instance_dispatch.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; |
5189 | 0 | instance_dispatch.pNext = loader_create_info.pNext; |
5190 | 0 | instance_dispatch.function = VK_LOADER_DATA_CALLBACK; |
5191 | 0 | instance_dispatch.u.pfnSetInstanceLoaderData = vkSetInstanceDispatch; |
5192 | |
|
5193 | 0 | VkLayerInstanceCreateInfo device_callback; |
5194 | 0 | device_callback.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; |
5195 | 0 | device_callback.pNext = &instance_dispatch; |
5196 | 0 | device_callback.function = VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK; |
5197 | 0 | device_callback.u.layerDevice.pfnLayerCreateDevice = loader_layer_create_device; |
5198 | 0 | device_callback.u.layerDevice.pfnLayerDestroyDevice = loader_layer_destroy_device; |
5199 | |
|
5200 | 0 | VkLayerInstanceCreateInfo loader_features; |
5201 | 0 | loader_features.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; |
5202 | 0 | loader_features.pNext = &device_callback; |
5203 | 0 | loader_features.function = VK_LOADER_FEATURES; |
5204 | 0 | loader_features.u.loaderFeatures = feature_flags; |
5205 | |
|
5206 | 0 | loader_create_info.pNext = &loader_features; |
5207 | | |
5208 | | // If layer debugging is enabled, let's print out the full callstack with layers in their |
5209 | | // defined order. |
5210 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "vkCreateInstance layer callstack setup to:"); |
5211 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " <Application>"); |
5212 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||"); |
5213 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " <Loader>"); |
5214 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||"); |
5215 | 0 | for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) { |
5216 | 0 | uint32_t index = num_activated_layers - cur_layer - 1; |
5217 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " %s", activated_layers[index].name); |
5218 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Type: %s", |
5219 | 0 | activated_layers[index].is_implicit ? "Implicit" : "Explicit"); |
5220 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Enabled By: %s", |
5221 | 0 | get_enabled_by_what_str(activated_layers[index].enabled_by_what)); |
5222 | 0 | if (activated_layers[index].is_implicit) { |
5223 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Disable Env Var: %s", |
5224 | 0 | activated_layers[index].disable_env); |
5225 | 0 | if (activated_layers[index].enable_name_env) { |
5226 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, |
5227 | 0 | " This layer was enabled because Env Var %s was set to Value %s", |
5228 | 0 | activated_layers[index].enable_name_env, activated_layers[index].enable_value_env); |
5229 | 0 | } |
5230 | 0 | } |
5231 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Manifest: %s", activated_layers[index].manifest); |
5232 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Library: %s", activated_layers[index].library); |
5233 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||"); |
5234 | 0 | } |
5235 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " <Drivers>"); |
5236 | |
|
5237 | 0 | res = fpCreateInstance(&loader_create_info, pAllocator, created_instance); |
5238 | 0 | } else { |
5239 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_instance_chain: Failed to find \'vkCreateInstance\'"); |
5240 | | // Couldn't find CreateInstance function! |
5241 | 0 | res = VK_ERROR_INITIALIZATION_FAILED; |
5242 | 0 | } |
5243 | |
|
5244 | 0 | if (res == VK_SUCCESS) { |
5245 | | // Copy the current disp table into the terminator_dispatch table so we can use it in loader_gpa_instance_terminator() |
5246 | 0 | memcpy(&inst->terminator_dispatch, &inst->disp->layer_inst_disp, sizeof(VkLayerInstanceDispatchTable)); |
5247 | |
|
5248 | 0 | loader_init_instance_core_dispatch_table(&inst->disp->layer_inst_disp, next_gipa, *created_instance); |
5249 | 0 | inst->instance = *created_instance; |
5250 | |
|
5251 | 0 | if (pCreateInfo->enabledLayerCount > 0 && pCreateInfo->ppEnabledLayerNames != NULL) { |
5252 | 0 | res = create_string_list(inst, pCreateInfo->enabledLayerCount, &inst->enabled_layer_names); |
5253 | 0 | if (res != VK_SUCCESS) { |
5254 | 0 | return res; |
5255 | 0 | } |
5256 | | |
5257 | 0 | for (uint32_t i = 0; i < pCreateInfo->enabledLayerCount; ++i) { |
5258 | 0 | res = copy_str_to_string_list(inst, &inst->enabled_layer_names, pCreateInfo->ppEnabledLayerNames[i], |
5259 | 0 | strlen(pCreateInfo->ppEnabledLayerNames[i])); |
5260 | 0 | if (res != VK_SUCCESS) return res; |
5261 | 0 | } |
5262 | 0 | } |
5263 | 0 | } |
5264 | | |
5265 | 0 | return res; |
5266 | 0 | } |
5267 | | |
5268 | 0 | void loader_activate_instance_layer_extensions(struct loader_instance *inst, VkInstance created_inst) { |
5269 | 0 | loader_init_instance_extension_dispatch_table(&inst->disp->layer_inst_disp, inst->disp->layer_inst_disp.GetInstanceProcAddr, |
5270 | 0 | created_inst); |
5271 | 0 | } |
5272 | | |
5273 | | #if defined(__APPLE__) |
5274 | | VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo, |
5275 | | const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst, |
5276 | | struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer, |
5277 | | PFN_vkGetDeviceProcAddr *layerNextGDPA) __attribute__((optnone)) { |
5278 | | #else |
5279 | | VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo, |
5280 | | const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst, |
5281 | | struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer, |
5282 | 0 | PFN_vkGetDeviceProcAddr *layerNextGDPA) { |
5283 | 0 | #endif |
5284 | 0 | uint32_t num_activated_layers = 0; |
5285 | 0 | struct activated_layer_info *activated_layers = NULL; |
5286 | 0 | VkLayerDeviceLink *layer_device_link_info; |
5287 | 0 | VkLayerDeviceCreateInfo chain_info; |
5288 | 0 | VkDeviceCreateInfo loader_create_info; |
5289 | 0 | VkDeviceGroupDeviceCreateInfo *original_device_group_create_info_struct = NULL; |
5290 | 0 | VkResult res; |
5291 | |
|
5292 | 0 | PFN_vkGetDeviceProcAddr fpGDPA = NULL, nextGDPA = loader_gpa_device_terminator; |
5293 | 0 | PFN_vkGetInstanceProcAddr fpGIPA = NULL, nextGIPA = loader_gpa_instance_terminator; |
5294 | |
|
5295 | 0 | memcpy(&loader_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo)); |
5296 | |
|
5297 | 0 | if (loader_create_info.enabledLayerCount > 0 && loader_create_info.ppEnabledLayerNames != NULL) { |
5298 | 0 | bool invalid_device_layer_usage = false; |
5299 | |
|
5300 | 0 | if (loader_create_info.enabledLayerCount != inst->enabled_layer_names.count && loader_create_info.enabledLayerCount > 0) { |
5301 | 0 | invalid_device_layer_usage = true; |
5302 | 0 | } else if (loader_create_info.enabledLayerCount > 0 && loader_create_info.ppEnabledLayerNames == NULL) { |
5303 | 0 | invalid_device_layer_usage = true; |
5304 | 0 | } else if (loader_create_info.enabledLayerCount == 0 && loader_create_info.ppEnabledLayerNames != NULL) { |
5305 | 0 | invalid_device_layer_usage = true; |
5306 | 0 | } else if (inst->enabled_layer_names.list != NULL) { |
5307 | 0 | for (uint32_t i = 0; i < loader_create_info.enabledLayerCount; i++) { |
5308 | 0 | const char *device_layer_names = loader_create_info.ppEnabledLayerNames[i]; |
5309 | |
|
5310 | 0 | if (strcmp(device_layer_names, inst->enabled_layer_names.list[i]) != 0) { |
5311 | 0 | invalid_device_layer_usage = true; |
5312 | 0 | break; |
5313 | 0 | } |
5314 | 0 | } |
5315 | 0 | } |
5316 | |
|
5317 | 0 | if (invalid_device_layer_usage) { |
5318 | 0 | loader_log( |
5319 | 0 | inst, VULKAN_LOADER_WARN_BIT, 0, |
5320 | 0 | "loader_create_device_chain: Using deprecated and ignored 'ppEnabledLayerNames' member of 'VkDeviceCreateInfo' " |
5321 | 0 | "when creating a Vulkan device."); |
5322 | 0 | } |
5323 | 0 | } |
5324 | | |
5325 | | // Before we continue, we need to find out if the KHR_device_group extension is in the enabled list. If it is, we then |
5326 | | // need to look for the corresponding VkDeviceGroupDeviceCreateInfo struct in the device list. This is because we |
5327 | | // need to replace all the incoming physical device values (which are really loader trampoline physical device values) |
5328 | | // with the layer/ICD version. |
5329 | 0 | { |
5330 | 0 | VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext; |
5331 | 0 | VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info; |
5332 | 0 | while (NULL != pNext) { |
5333 | 0 | if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) { |
5334 | 0 | VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext; |
5335 | 0 | if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) { |
5336 | 0 | VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo)); |
5337 | 0 | VkPhysicalDevice *phys_dev_array = NULL; |
5338 | 0 | if (NULL == temp_struct) { |
5339 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
5340 | 0 | } |
5341 | 0 | memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo)); |
5342 | 0 | phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount); |
5343 | 0 | if (NULL == phys_dev_array) { |
5344 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
5345 | 0 | } |
5346 | | |
5347 | | // Before calling down, replace the incoming physical device values (which are really loader trampoline |
5348 | | // physical devices) with the next layer (or possibly even the terminator) physical device values. |
5349 | 0 | struct loader_physical_device_tramp *cur_tramp; |
5350 | 0 | for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) { |
5351 | 0 | cur_tramp = (struct loader_physical_device_tramp *)cur_struct->pPhysicalDevices[phys_dev]; |
5352 | 0 | phys_dev_array[phys_dev] = cur_tramp->phys_dev; |
5353 | 0 | } |
5354 | 0 | temp_struct->pPhysicalDevices = phys_dev_array; |
5355 | |
|
5356 | 0 | original_device_group_create_info_struct = (VkDeviceGroupDeviceCreateInfo *)pPrev->pNext; |
5357 | | |
5358 | | // Replace the old struct in the pNext chain with this one. |
5359 | 0 | pPrev->pNext = (VkBaseOutStructure *)temp_struct; |
5360 | 0 | } |
5361 | 0 | break; |
5362 | 0 | } |
5363 | | |
5364 | 0 | pPrev = pNext; |
5365 | 0 | pNext = pNext->pNext; |
5366 | 0 | } |
5367 | 0 | } |
5368 | 0 | if (inst->expanded_activated_layer_list.count > 0) { |
5369 | 0 | layer_device_link_info = loader_stack_alloc(sizeof(VkLayerDeviceLink) * inst->expanded_activated_layer_list.count); |
5370 | 0 | if (!layer_device_link_info) { |
5371 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5372 | 0 | "loader_create_device_chain: Failed to alloc Device objects for layer. Skipping Layer."); |
5373 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
5374 | 0 | } |
5375 | | |
5376 | 0 | activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count); |
5377 | 0 | if (!activated_layers) { |
5378 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5379 | 0 | "loader_create_device_chain: Failed to alloc activated layer storage array"); |
5380 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
5381 | 0 | } |
5382 | | |
5383 | 0 | chain_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO; |
5384 | 0 | chain_info.function = VK_LAYER_LINK_INFO; |
5385 | 0 | chain_info.u.pLayerInfo = NULL; |
5386 | 0 | chain_info.pNext = loader_create_info.pNext; |
5387 | 0 | loader_create_info.pNext = &chain_info; |
5388 | | |
5389 | | // Create instance chain of enabled layers |
5390 | 0 | for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) { |
5391 | 0 | struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i]; |
5392 | 0 | loader_platform_dl_handle lib_handle = layer_prop->lib_handle; |
5393 | | |
5394 | | // Skip it if a Layer with the same name has been already successfully activated |
5395 | 0 | if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) { |
5396 | 0 | continue; |
5397 | 0 | } |
5398 | | |
5399 | | // Skip the layer if the handle is NULL - this is likely because the library failed to load but wasn't removed from |
5400 | | // the list. |
5401 | 0 | if (!lib_handle) { |
5402 | 0 | continue; |
5403 | 0 | } |
5404 | | |
5405 | | // The Get*ProcAddr pointers will already be filled in if they were received from either the json file or the |
5406 | | // version negotiation |
5407 | 0 | if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) == NULL) { |
5408 | 0 | if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) { |
5409 | 0 | fpGIPA = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr"); |
5410 | 0 | layer_prop->functions.get_instance_proc_addr = fpGIPA; |
5411 | 0 | } else |
5412 | 0 | fpGIPA = |
5413 | 0 | (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa); |
5414 | 0 | if (!fpGIPA) { |
5415 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
5416 | 0 | "loader_create_device_chain: Failed to find \'vkGetInstanceProcAddr\' in layer \"%s\". " |
5417 | 0 | "Skipping layer.", |
5418 | 0 | layer_prop->lib_name); |
5419 | 0 | continue; |
5420 | 0 | } |
5421 | 0 | } |
5422 | | |
5423 | 0 | if (fpGIPA == callingLayer) { |
5424 | 0 | if (layerNextGDPA != NULL) { |
5425 | 0 | *layerNextGDPA = nextGDPA; |
5426 | 0 | } |
5427 | | // Break here because if fpGIPA is the same as callingLayer, that means a layer is trying to create a device, |
5428 | | // and once we don't want to continue any further as the next layer will be the calling layer |
5429 | 0 | break; |
5430 | 0 | } |
5431 | | |
5432 | 0 | if ((fpGDPA = layer_prop->functions.get_device_proc_addr) == NULL) { |
5433 | 0 | if (layer_prop->functions.str_gdpa == NULL || strlen(layer_prop->functions.str_gdpa) == 0) { |
5434 | 0 | fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr"); |
5435 | 0 | layer_prop->functions.get_device_proc_addr = fpGDPA; |
5436 | 0 | } else |
5437 | 0 | fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa); |
5438 | 0 | if (!fpGDPA) { |
5439 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, |
5440 | 0 | "Failed to find vkGetDeviceProcAddr in layer \"%s\"", layer_prop->lib_name); |
5441 | 0 | continue; |
5442 | 0 | } |
5443 | 0 | } |
5444 | | |
5445 | 0 | layer_device_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo; |
5446 | 0 | layer_device_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = nextGIPA; |
5447 | 0 | layer_device_link_info[num_activated_layers].pfnNextGetDeviceProcAddr = nextGDPA; |
5448 | 0 | chain_info.u.pLayerInfo = &layer_device_link_info[num_activated_layers]; |
5449 | 0 | nextGIPA = fpGIPA; |
5450 | 0 | nextGDPA = fpGDPA; |
5451 | |
|
5452 | 0 | activated_layers[num_activated_layers].name = layer_prop->info.layerName; |
5453 | 0 | activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name; |
5454 | 0 | activated_layers[num_activated_layers].library = layer_prop->lib_name; |
5455 | 0 | activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER); |
5456 | 0 | activated_layers[num_activated_layers].enabled_by_what = layer_prop->enabled_by_what; |
5457 | 0 | if (activated_layers[num_activated_layers].is_implicit) { |
5458 | 0 | activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name; |
5459 | 0 | } |
5460 | |
|
5461 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Inserted device layer \"%s\" (%s)", |
5462 | 0 | layer_prop->info.layerName, layer_prop->lib_name); |
5463 | |
|
5464 | 0 | num_activated_layers++; |
5465 | 0 | } |
5466 | 0 | } |
5467 | | |
5468 | 0 | VkDevice created_device = (VkDevice)dev; |
5469 | 0 | PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)nextGIPA(inst->instance, "vkCreateDevice"); |
5470 | 0 | if (fpCreateDevice) { |
5471 | 0 | VkLayerDeviceCreateInfo create_info_disp; |
5472 | |
|
5473 | 0 | create_info_disp.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO; |
5474 | 0 | create_info_disp.function = VK_LOADER_DATA_CALLBACK; |
5475 | |
|
5476 | 0 | create_info_disp.u.pfnSetDeviceLoaderData = vkSetDeviceDispatch; |
5477 | | |
5478 | | // If layer debugging is enabled, let's print out the full callstack with layers in their |
5479 | | // defined order. |
5480 | 0 | uint32_t layer_driver_bits = VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT; |
5481 | 0 | loader_log(inst, layer_driver_bits, 0, "vkCreateDevice layer callstack setup to:"); |
5482 | 0 | loader_log(inst, layer_driver_bits, 0, " <Application>"); |
5483 | 0 | loader_log(inst, layer_driver_bits, 0, " ||"); |
5484 | 0 | loader_log(inst, layer_driver_bits, 0, " <Loader>"); |
5485 | 0 | loader_log(inst, layer_driver_bits, 0, " ||"); |
5486 | 0 | for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) { |
5487 | 0 | uint32_t index = num_activated_layers - cur_layer - 1; |
5488 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " %s", activated_layers[index].name); |
5489 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Type: %s", |
5490 | 0 | activated_layers[index].is_implicit ? "Implicit" : "Explicit"); |
5491 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Enabled By: %s", |
5492 | 0 | get_enabled_by_what_str(activated_layers[index].enabled_by_what)); |
5493 | 0 | if (activated_layers[index].is_implicit) { |
5494 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Disable Env Var: %s", |
5495 | 0 | activated_layers[index].disable_env); |
5496 | 0 | } |
5497 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Manifest: %s", activated_layers[index].manifest); |
5498 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Library: %s", activated_layers[index].library); |
5499 | 0 | loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||"); |
5500 | 0 | } |
5501 | 0 | loader_log(inst, layer_driver_bits, 0, " <Device>"); |
5502 | 0 | create_info_disp.pNext = loader_create_info.pNext; |
5503 | 0 | loader_create_info.pNext = &create_info_disp; |
5504 | 0 | res = fpCreateDevice(pd, &loader_create_info, pAllocator, &created_device); |
5505 | 0 | if (res != VK_SUCCESS) { |
5506 | 0 | return res; |
5507 | 0 | } |
5508 | 0 | dev->chain_device = created_device; |
5509 | | |
5510 | | // Because we changed the pNext chain to use our own VkDeviceGroupDeviceCreateInfo, we need to fixup the chain to |
5511 | | // point back at the original VkDeviceGroupDeviceCreateInfo. |
5512 | 0 | VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext; |
5513 | 0 | VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info; |
5514 | 0 | while (NULL != pNext) { |
5515 | 0 | if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) { |
5516 | 0 | VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext; |
5517 | 0 | if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) { |
5518 | 0 | pPrev->pNext = (VkBaseOutStructure *)original_device_group_create_info_struct; |
5519 | 0 | } |
5520 | 0 | break; |
5521 | 0 | } |
5522 | | |
5523 | 0 | pPrev = pNext; |
5524 | 0 | pNext = pNext->pNext; |
5525 | 0 | } |
5526 | |
|
5527 | 0 | } else { |
5528 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5529 | 0 | "loader_create_device_chain: Failed to find \'vkCreateDevice\' in layers or ICD"); |
5530 | | // Couldn't find CreateDevice function! |
5531 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
5532 | 0 | } |
5533 | | |
5534 | | // Initialize device dispatch table |
5535 | 0 | loader_init_device_dispatch_table(&dev->loader_dispatch, nextGDPA, dev->chain_device); |
5536 | | // Initialize the dispatch table to functions which need terminators |
5537 | | // These functions point directly to the driver, not the terminator functions |
5538 | 0 | init_extension_device_proc_terminator_dispatch(dev); |
5539 | |
|
5540 | 0 | return res; |
5541 | 0 | } |
5542 | | |
5543 | | VkResult loader_validate_layers(const struct loader_instance *inst, const uint32_t layer_count, |
5544 | 7.65k | const char *const *ppEnabledLayerNames, const struct loader_layer_list *list) { |
5545 | 7.65k | struct loader_layer_properties *prop; |
5546 | | |
5547 | 7.65k | if (layer_count > 0 && ppEnabledLayerNames == NULL) { |
5548 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5549 | 0 | "loader_validate_layers: ppEnabledLayerNames is NULL but enabledLayerCount is greater than zero"); |
5550 | 0 | return VK_ERROR_LAYER_NOT_PRESENT; |
5551 | 0 | } |
5552 | | |
5553 | 8.78k | for (uint32_t i = 0; i < layer_count; i++) { |
5554 | 7.65k | VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, ppEnabledLayerNames[i]); |
5555 | 7.65k | if (result != VK_STRING_ERROR_NONE) { |
5556 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5557 | 0 | "loader_validate_layers: ppEnabledLayerNames contains string that is too long or is badly formed"); |
5558 | 0 | return VK_ERROR_LAYER_NOT_PRESENT; |
5559 | 0 | } |
5560 | | |
5561 | 7.65k | prop = loader_find_layer_property(ppEnabledLayerNames[i], list); |
5562 | 7.65k | if (NULL == prop) { |
5563 | 6.52k | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5564 | 6.52k | "loader_validate_layers: Layer %d does not exist in the list of available layers", i); |
5565 | 6.52k | return VK_ERROR_LAYER_NOT_PRESENT; |
5566 | 6.52k | } |
5567 | 1.13k | if (inst->settings.settings_active && prop->settings_control_value != LOADER_SETTINGS_LAYER_CONTROL_ON && |
5568 | 17 | prop->settings_control_value != LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) { |
5569 | 1 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5570 | 1 | "loader_validate_layers: Layer %d was explicitly prevented from being enabled by the loader settings file", |
5571 | 1 | i); |
5572 | 1 | return VK_ERROR_LAYER_NOT_PRESENT; |
5573 | 1 | } |
5574 | 1.13k | } |
5575 | 1.13k | return VK_SUCCESS; |
5576 | 7.65k | } |
5577 | | |
5578 | | VkResult loader_validate_instance_extensions(struct loader_instance *inst, const struct loader_extension_list *icd_exts, |
5579 | | const struct loader_layer_list *instance_layers, |
5580 | | const struct loader_envvar_all_filters *layer_filters, |
5581 | 0 | const VkInstanceCreateInfo *pCreateInfo) { |
5582 | 0 | VkExtensionProperties *extension_prop; |
5583 | 0 | char *env_value; |
5584 | 0 | char *enabled_layers_env = NULL; |
5585 | 0 | bool check_if_known = true; |
5586 | 0 | VkResult res = VK_SUCCESS; |
5587 | |
|
5588 | 0 | struct loader_pointer_layer_list active_layers = {0}; |
5589 | 0 | struct loader_pointer_layer_list expanded_layers = {0}; |
5590 | |
|
5591 | 0 | if (pCreateInfo->enabledExtensionCount > 0 && pCreateInfo->ppEnabledExtensionNames == NULL) { |
5592 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5593 | 0 | "loader_validate_instance_extensions: Instance ppEnabledExtensionNames is NULL but enabledExtensionCount is " |
5594 | 0 | "greater than zero"); |
5595 | 0 | return VK_ERROR_EXTENSION_NOT_PRESENT; |
5596 | 0 | } |
5597 | 0 | if (!loader_init_pointer_layer_list(inst, &active_layers)) { |
5598 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
5599 | 0 | goto out; |
5600 | 0 | } |
5601 | 0 | if (!loader_init_pointer_layer_list(inst, &expanded_layers)) { |
5602 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
5603 | 0 | goto out; |
5604 | 0 | } |
5605 | | |
5606 | 0 | if (inst->settings.settings_active) { |
5607 | 0 | res = enable_correct_layers_from_settings(inst, layer_filters, pCreateInfo->enabledLayerCount, |
5608 | 0 | pCreateInfo->ppEnabledLayerNames, instance_layers, &active_layers, |
5609 | 0 | &expanded_layers); |
5610 | 0 | if (res != VK_SUCCESS) { |
5611 | 0 | goto out; |
5612 | 0 | } |
5613 | 0 | } else { |
5614 | 0 | enabled_layers_env = loader_getenv(ENABLED_LAYERS_ENV, inst); |
5615 | | |
5616 | | // Build the lists of active layers (including meta layers) and expanded layers (with meta layers resolved to their |
5617 | | // components) |
5618 | 0 | res = |
5619 | 0 | loader_add_implicit_layers(inst, enabled_layers_env, layer_filters, &active_layers, &expanded_layers, instance_layers); |
5620 | 0 | if (res != VK_SUCCESS) { |
5621 | 0 | goto out; |
5622 | 0 | } |
5623 | 0 | res = loader_add_environment_layers(inst, enabled_layers_env, layer_filters, &active_layers, &expanded_layers, |
5624 | 0 | instance_layers); |
5625 | 0 | if (res != VK_SUCCESS) { |
5626 | 0 | goto out; |
5627 | 0 | } |
5628 | 0 | res = loader_add_layer_names_to_list(inst, layer_filters, &active_layers, &expanded_layers, pCreateInfo->enabledLayerCount, |
5629 | 0 | pCreateInfo->ppEnabledLayerNames, instance_layers); |
5630 | 0 | if (VK_SUCCESS != res) { |
5631 | 0 | goto out; |
5632 | 0 | } |
5633 | 0 | } |
5634 | 0 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
5635 | 0 | VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]); |
5636 | 0 | if (result != VK_STRING_ERROR_NONE) { |
5637 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5638 | 0 | "loader_validate_instance_extensions: Instance ppEnabledExtensionNames contains " |
5639 | 0 | "string that is too long or is badly formed"); |
5640 | 0 | res = VK_ERROR_EXTENSION_NOT_PRESENT; |
5641 | 0 | goto out; |
5642 | 0 | } |
5643 | | |
5644 | | // Check if a user wants to disable the instance extension filtering behavior |
5645 | 0 | env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst); |
5646 | 0 | if (NULL != env_value && atoi(env_value) != 0) { |
5647 | 0 | check_if_known = false; |
5648 | 0 | } |
5649 | 0 | loader_free_getenv(env_value, inst); |
5650 | |
|
5651 | 0 | if (check_if_known) { |
5652 | | // See if the extension is in the list of supported extensions |
5653 | 0 | bool found = false; |
5654 | 0 | for (uint32_t j = 0; LOADER_INSTANCE_EXTENSIONS[j] != NULL; j++) { |
5655 | 0 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], LOADER_INSTANCE_EXTENSIONS[j]) == 0) { |
5656 | 0 | found = true; |
5657 | 0 | break; |
5658 | 0 | } |
5659 | 0 | } |
5660 | | |
5661 | | // If it isn't in the list, return an error |
5662 | 0 | if (!found) { |
5663 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5664 | 0 | "loader_validate_instance_extensions: Extension %s not found in list of known instance extensions.", |
5665 | 0 | pCreateInfo->ppEnabledExtensionNames[i]); |
5666 | 0 | res = VK_ERROR_EXTENSION_NOT_PRESENT; |
5667 | 0 | goto out; |
5668 | 0 | } |
5669 | 0 | } |
5670 | | |
5671 | 0 | extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], icd_exts); |
5672 | |
|
5673 | 0 | if (extension_prop) { |
5674 | 0 | continue; |
5675 | 0 | } |
5676 | | |
5677 | 0 | extension_prop = NULL; |
5678 | | |
5679 | | // Not in global list, search layer extension lists |
5680 | 0 | for (uint32_t j = 0; NULL == extension_prop && j < expanded_layers.count; ++j) { |
5681 | 0 | extension_prop = |
5682 | 0 | get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], &expanded_layers.list[j]->instance_extension_list); |
5683 | 0 | if (extension_prop) { |
5684 | | // Found the extension in one of the layers enabled by the app. |
5685 | 0 | break; |
5686 | 0 | } |
5687 | | |
5688 | 0 | struct loader_layer_properties *layer_prop = |
5689 | 0 | loader_find_layer_property(expanded_layers.list[j]->info.layerName, instance_layers); |
5690 | 0 | if (NULL == layer_prop) { |
5691 | | // Should NOT get here, loader_validate_layers should have already filtered this case out. |
5692 | 0 | continue; |
5693 | 0 | } |
5694 | 0 | } |
5695 | |
|
5696 | 0 | if (!extension_prop) { |
5697 | | // Didn't find extension name in any of the global layers, error out |
5698 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
5699 | 0 | "loader_validate_instance_extensions: Instance extension %s not supported by available ICDs or enabled " |
5700 | 0 | "layers.", |
5701 | 0 | pCreateInfo->ppEnabledExtensionNames[i]); |
5702 | 0 | res = VK_ERROR_EXTENSION_NOT_PRESENT; |
5703 | 0 | goto out; |
5704 | 0 | } |
5705 | 0 | } |
5706 | | |
5707 | 0 | out: |
5708 | 0 | loader_destroy_pointer_layer_list(inst, &active_layers); |
5709 | 0 | loader_destroy_pointer_layer_list(inst, &expanded_layers); |
5710 | 0 | if (enabled_layers_env != NULL) { |
5711 | 0 | loader_free_getenv(enabled_layers_env, inst); |
5712 | 0 | } |
5713 | |
|
5714 | 0 | return res; |
5715 | 0 | } |
5716 | | |
5717 | | VkResult loader_validate_device_extensions(struct loader_instance *this_instance, |
5718 | | const struct loader_pointer_layer_list *activated_device_layers, |
5719 | 0 | const struct loader_extension_list *icd_exts, const VkDeviceCreateInfo *pCreateInfo) { |
5720 | | // Early out to prevent nullptr dereference |
5721 | 0 | if (pCreateInfo->enabledExtensionCount == 0 || pCreateInfo->ppEnabledExtensionNames == NULL) { |
5722 | 0 | return VK_SUCCESS; |
5723 | 0 | } |
5724 | 0 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
5725 | 0 | if (pCreateInfo->ppEnabledExtensionNames[i] == NULL) { |
5726 | 0 | continue; |
5727 | 0 | } |
5728 | 0 | VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]); |
5729 | 0 | if (result != VK_STRING_ERROR_NONE) { |
5730 | 0 | loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0, |
5731 | 0 | "loader_validate_device_extensions: Device ppEnabledExtensionNames contains " |
5732 | 0 | "string that is too long or is badly formed"); |
5733 | 0 | return VK_ERROR_EXTENSION_NOT_PRESENT; |
5734 | 0 | } |
5735 | | |
5736 | 0 | const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; |
5737 | 0 | VkExtensionProperties *extension_prop = get_extension_property(extension_name, icd_exts); |
5738 | |
|
5739 | 0 | if (extension_prop) { |
5740 | 0 | continue; |
5741 | 0 | } |
5742 | | |
5743 | | // Not in global list, search activated layer extension lists |
5744 | 0 | for (uint32_t j = 0; j < activated_device_layers->count; j++) { |
5745 | 0 | struct loader_layer_properties *layer_prop = activated_device_layers->list[j]; |
5746 | |
|
5747 | 0 | extension_prop = get_dev_extension_property(extension_name, &layer_prop->device_extension_list); |
5748 | 0 | if (extension_prop) { |
5749 | | // Found the extension in one of the layers enabled by the app. |
5750 | 0 | break; |
5751 | 0 | } |
5752 | 0 | } |
5753 | |
|
5754 | 0 | if (!extension_prop) { |
5755 | | // Didn't find extension name in any of the device layers, error out |
5756 | 0 | loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0, |
5757 | 0 | "loader_validate_device_extensions: Device extension %s not supported by selected physical device " |
5758 | 0 | "or enabled layers.", |
5759 | 0 | pCreateInfo->ppEnabledExtensionNames[i]); |
5760 | 0 | return VK_ERROR_EXTENSION_NOT_PRESENT; |
5761 | 0 | } |
5762 | 0 | } |
5763 | 0 | return VK_SUCCESS; |
5764 | 0 | } |
5765 | | |
5766 | | // Terminator functions for the Instance chain |
5767 | | // All named terminator_<Vulkan API name> |
5768 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, |
5769 | 0 | const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) { |
5770 | 0 | struct loader_icd_term *icd_term = NULL; |
5771 | 0 | VkExtensionProperties *prop = NULL; |
5772 | 0 | char **filtered_extension_names = NULL; |
5773 | 0 | VkInstanceCreateInfo icd_create_info = {0}; |
5774 | 0 | VkResult res = VK_SUCCESS; |
5775 | 0 | bool one_icd_successful = false; |
5776 | |
|
5777 | 0 | struct loader_instance *ptr_instance = (struct loader_instance *)*pInstance; |
5778 | 0 | if (NULL == ptr_instance) { |
5779 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0, |
5780 | 0 | "terminator_CreateInstance: Loader instance pointer null encountered. Possibly set by active layer. (Policy " |
5781 | 0 | "#LLP_LAYER_21)"); |
5782 | 0 | } else if (LOADER_MAGIC_NUMBER != ptr_instance->magic) { |
5783 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0, |
5784 | 0 | "terminator_CreateInstance: Instance pointer (%p) has invalid MAGIC value 0x%08" PRIx64 |
5785 | 0 | ". Instance value possibly " |
5786 | 0 | "corrupted by active layer (Policy #LLP_LAYER_21). ", |
5787 | 0 | ptr_instance, ptr_instance->magic); |
5788 | 0 | } |
5789 | | |
5790 | | // Save the application version if it has been modified - layers sometimes needs features in newer API versions than |
5791 | | // what the application requested, and thus will increase the instance version to a level that suites their needs. |
5792 | 0 | if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion) { |
5793 | 0 | loader_api_version altered_version = loader_make_version(pCreateInfo->pApplicationInfo->apiVersion); |
5794 | 0 | if (altered_version.major != ptr_instance->app_api_version.major || |
5795 | 0 | altered_version.minor != ptr_instance->app_api_version.minor) { |
5796 | 0 | ptr_instance->app_api_version = altered_version; |
5797 | 0 | } |
5798 | 0 | } |
5799 | |
|
5800 | 0 | memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info)); |
5801 | |
|
5802 | 0 | icd_create_info.enabledLayerCount = 0; |
5803 | 0 | icd_create_info.ppEnabledLayerNames = NULL; |
5804 | | |
5805 | | // NOTE: Need to filter the extensions to only those supported by the ICD. |
5806 | | // No ICD will advertise support for layers. An ICD library could |
5807 | | // support a layer, but it would be independent of the actual ICD, |
5808 | | // just in the same library. |
5809 | 0 | uint32_t extension_count = pCreateInfo->enabledExtensionCount; |
5810 | 0 | #if defined(LOADER_ENABLE_LINUX_SORT) |
5811 | 0 | extension_count += 1; |
5812 | 0 | #endif // LOADER_ENABLE_LINUX_SORT |
5813 | 0 | filtered_extension_names = loader_stack_alloc(extension_count * sizeof(char *)); |
5814 | 0 | if (!filtered_extension_names) { |
5815 | 0 | loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0, |
5816 | 0 | "terminator_CreateInstance: Failed create extension name array for %d extensions", extension_count); |
5817 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
5818 | 0 | goto out; |
5819 | 0 | } |
5820 | 0 | icd_create_info.ppEnabledExtensionNames = (const char *const *)filtered_extension_names; |
5821 | | |
5822 | | // Determine if Get Physical Device Properties 2 is available to this Instance |
5823 | 0 | if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion >= VK_API_VERSION_1_1) { |
5824 | 0 | ptr_instance->supports_get_dev_prop_2 = true; |
5825 | 0 | } else { |
5826 | 0 | for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) { |
5827 | 0 | if (!strcmp(pCreateInfo->ppEnabledExtensionNames[j], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { |
5828 | 0 | ptr_instance->supports_get_dev_prop_2 = true; |
5829 | 0 | break; |
5830 | 0 | } |
5831 | 0 | } |
5832 | 0 | } |
5833 | |
|
5834 | 0 | for (uint32_t i = 0; i < ptr_instance->icd_tramp_list.count; i++) { |
5835 | 0 | icd_term = loader_icd_add(ptr_instance, &ptr_instance->icd_tramp_list.scanned_list[i]); |
5836 | 0 | if (NULL == icd_term) { |
5837 | 0 | loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0, |
5838 | 0 | "terminator_CreateInstance: Failed to add ICD %d to ICD trampoline list.", i); |
5839 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
5840 | 0 | goto out; |
5841 | 0 | } |
5842 | | |
5843 | | // If any error happens after here, we need to remove the ICD from the list, |
5844 | | // because we've already added it, but haven't validated it |
5845 | | |
5846 | | // Make sure that we reset the pApplicationInfo so we don't get an old pointer |
5847 | 0 | icd_create_info.pApplicationInfo = pCreateInfo->pApplicationInfo; |
5848 | 0 | icd_create_info.enabledExtensionCount = 0; |
5849 | 0 | struct loader_extension_list icd_exts = {0}; |
5850 | | |
5851 | | // traverse scanned icd list adding non-duplicate extensions to the list |
5852 | 0 | res = loader_init_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties)); |
5853 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
5854 | | // If out of memory, bail immediately. |
5855 | 0 | goto out; |
5856 | 0 | } else if (VK_SUCCESS != res) { |
5857 | | // Something bad happened with this ICD, so free it and try the |
5858 | | // next. |
5859 | 0 | ptr_instance->icd_terms = icd_term->next; |
5860 | 0 | icd_term->next = NULL; |
5861 | 0 | loader_icd_destroy(ptr_instance, icd_term, pAllocator); |
5862 | 0 | continue; |
5863 | 0 | } |
5864 | | |
5865 | 0 | res = loader_add_instance_extensions(ptr_instance, icd_term->scanned_icd->EnumerateInstanceExtensionProperties, |
5866 | 0 | icd_term->scanned_icd->lib_name, &icd_exts); |
5867 | 0 | if (VK_SUCCESS != res) { |
5868 | 0 | loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts); |
5869 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
5870 | | // If out of memory, bail immediately. |
5871 | 0 | goto out; |
5872 | 0 | } else { |
5873 | | // Something bad happened with this ICD, so free it and try the next. |
5874 | 0 | ptr_instance->icd_terms = icd_term->next; |
5875 | 0 | icd_term->next = NULL; |
5876 | 0 | loader_icd_destroy(ptr_instance, icd_term, pAllocator); |
5877 | 0 | continue; |
5878 | 0 | } |
5879 | 0 | } |
5880 | | |
5881 | 0 | for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) { |
5882 | 0 | prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[j], &icd_exts); |
5883 | 0 | if (prop) { |
5884 | 0 | filtered_extension_names[icd_create_info.enabledExtensionCount] = (char *)pCreateInfo->ppEnabledExtensionNames[j]; |
5885 | 0 | icd_create_info.enabledExtensionCount++; |
5886 | 0 | } |
5887 | 0 | } |
5888 | 0 | #if defined(LOADER_ENABLE_LINUX_SORT) |
5889 | | // Force on "VK_KHR_get_physical_device_properties2" for Linux as we use it for GPU sorting. This |
5890 | | // should be done if the API version of either the application or the driver does not natively support |
5891 | | // the core version of vkGetPhysicalDeviceProperties2 entrypoint. |
5892 | 0 | if ((ptr_instance->app_api_version.major == 1 && ptr_instance->app_api_version.minor == 0) || |
5893 | 0 | (VK_API_VERSION_MAJOR(icd_term->scanned_icd->api_version) == 1 && |
5894 | 0 | VK_API_VERSION_MINOR(icd_term->scanned_icd->api_version) == 0)) { |
5895 | 0 | prop = get_extension_property(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, &icd_exts); |
5896 | 0 | if (prop) { |
5897 | 0 | filtered_extension_names[icd_create_info.enabledExtensionCount] = |
5898 | 0 | (char *)VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; |
5899 | 0 | icd_create_info.enabledExtensionCount++; |
5900 | | |
5901 | | // At least one ICD supports this, so the instance should be able to support it |
5902 | 0 | ptr_instance->supports_get_dev_prop_2 = true; |
5903 | 0 | } |
5904 | 0 | } |
5905 | 0 | #endif // LOADER_ENABLE_LINUX_SORT |
5906 | | |
5907 | | // Determine if vkGetPhysicalDeviceProperties2 is available to this Instance |
5908 | | // Also determine if VK_EXT_surface_maintenance1 is available on the ICD |
5909 | 0 | if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) { |
5910 | 0 | icd_term->enabled_instance_extensions.khr_get_physical_device_properties2 = true; |
5911 | 0 | } |
5912 | 0 | fill_out_enabled_instance_extensions(icd_create_info.enabledExtensionCount, (const char *const *)filtered_extension_names, |
5913 | 0 | &icd_term->enabled_instance_extensions); |
5914 | |
|
5915 | 0 | loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts); |
5916 | | |
5917 | | // Get the driver version from vkEnumerateInstanceVersion |
5918 | 0 | uint32_t icd_version = VK_API_VERSION_1_0; |
5919 | 0 | VkResult icd_result = VK_SUCCESS; |
5920 | 0 | if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) { |
5921 | 0 | PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version = |
5922 | 0 | (PFN_vkEnumerateInstanceVersion)icd_term->scanned_icd->GetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion"); |
5923 | 0 | if (icd_enumerate_instance_version != NULL) { |
5924 | 0 | icd_result = icd_enumerate_instance_version(&icd_version); |
5925 | 0 | if (icd_result != VK_SUCCESS) { |
5926 | 0 | icd_version = VK_API_VERSION_1_0; |
5927 | 0 | loader_log(ptr_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
5928 | 0 | "terminator_CreateInstance: ICD \"%s\" vkEnumerateInstanceVersion returned error. The ICD will be " |
5929 | 0 | "treated as a 1.0 ICD", |
5930 | 0 | icd_term->scanned_icd->lib_name); |
5931 | 0 | } else if (VK_API_VERSION_MINOR(icd_version) == 0) { |
5932 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
5933 | 0 | "terminator_CreateInstance: Manifest ICD for \"%s\" contained a 1.1 or greater API version, but " |
5934 | 0 | "vkEnumerateInstanceVersion returned 1.0, treating as a 1.0 ICD", |
5935 | 0 | icd_term->scanned_icd->lib_name); |
5936 | 0 | } |
5937 | 0 | } else { |
5938 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
5939 | 0 | "terminator_CreateInstance: Manifest ICD for \"%s\" contained a 1.1 or greater API version, but does " |
5940 | 0 | "not support vkEnumerateInstanceVersion, treating as a 1.0 ICD", |
5941 | 0 | icd_term->scanned_icd->lib_name); |
5942 | 0 | } |
5943 | 0 | } |
5944 | | |
5945 | | // Remove the portability enumeration flag bit if the ICD doesn't support the extension |
5946 | 0 | if ((pCreateInfo->flags & VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR) == 1) { |
5947 | 0 | bool supports_portability_enumeration = false; |
5948 | 0 | for (uint32_t j = 0; j < icd_create_info.enabledExtensionCount; j++) { |
5949 | 0 | if (strcmp(filtered_extension_names[j], VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME) == 0) { |
5950 | 0 | supports_portability_enumeration = true; |
5951 | 0 | break; |
5952 | 0 | } |
5953 | 0 | } |
5954 | | // If the icd supports the extension, use the flags as given, otherwise remove the portability bit |
5955 | 0 | icd_create_info.flags = supports_portability_enumeration |
5956 | 0 | ? pCreateInfo->flags |
5957 | 0 | : pCreateInfo->flags & (~VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR); |
5958 | 0 | } |
5959 | | |
5960 | | // Create an instance, substituting the version to 1.0 if necessary |
5961 | 0 | VkApplicationInfo icd_app_info = {0}; |
5962 | 0 | const uint32_t api_variant = 0; |
5963 | 0 | const uint32_t api_version_1_0 = VK_API_VERSION_1_0; |
5964 | 0 | uint32_t icd_version_nopatch = |
5965 | 0 | VK_MAKE_API_VERSION(api_variant, VK_API_VERSION_MAJOR(icd_version), VK_API_VERSION_MINOR(icd_version), 0); |
5966 | 0 | uint32_t requested_version = (pCreateInfo == NULL || pCreateInfo->pApplicationInfo == NULL) |
5967 | 0 | ? api_version_1_0 |
5968 | 0 | : pCreateInfo->pApplicationInfo->apiVersion; |
5969 | 0 | if ((requested_version != 0) && (icd_version_nopatch == api_version_1_0)) { |
5970 | 0 | if (icd_create_info.pApplicationInfo == NULL) { |
5971 | 0 | memset(&icd_app_info, 0, sizeof(icd_app_info)); |
5972 | 0 | } else { |
5973 | 0 | memmove(&icd_app_info, icd_create_info.pApplicationInfo, sizeof(icd_app_info)); |
5974 | 0 | } |
5975 | 0 | icd_app_info.apiVersion = icd_version; |
5976 | 0 | icd_create_info.pApplicationInfo = &icd_app_info; |
5977 | 0 | } |
5978 | | |
5979 | | // If the settings file has device_configurations, we need to raise the ApiVersion drivers use to 1.1 if the driver |
5980 | | // supports 1.1 or higher. This allows 1.0 apps to use the device_configurations without the app having to set its own |
5981 | | // ApiVersion to 1.1 on its own. |
5982 | 0 | if (ptr_instance->settings.settings_active && ptr_instance->settings.device_configuration_count > 0 && |
5983 | 0 | icd_version >= VK_API_VERSION_1_1 && requested_version < VK_API_VERSION_1_1) { |
5984 | 0 | if (NULL != pCreateInfo->pApplicationInfo) { |
5985 | 0 | memcpy(&icd_app_info, pCreateInfo->pApplicationInfo, sizeof(VkApplicationInfo)); |
5986 | 0 | } |
5987 | 0 | icd_app_info.apiVersion = VK_API_VERSION_1_1; |
5988 | 0 | icd_create_info.pApplicationInfo = &icd_app_info; |
5989 | |
|
5990 | 0 | loader_log( |
5991 | 0 | ptr_instance, VULKAN_LOADER_INFO_BIT, 0, |
5992 | 0 | "terminator_CreateInstance: Raising the VkApplicationInfo::apiVersion from 1.0 to 1.1 on driver \"%s\" so that " |
5993 | 0 | "the loader settings file is able to use this driver in the device_configuration selection logic.", |
5994 | 0 | icd_term->scanned_icd->lib_name); |
5995 | 0 | } |
5996 | |
|
5997 | 0 | icd_result = |
5998 | 0 | ptr_instance->icd_tramp_list.scanned_list[i].CreateInstance(&icd_create_info, pAllocator, &(icd_term->instance)); |
5999 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_result) { |
6000 | | // If out of memory, bail immediately. |
6001 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6002 | 0 | goto out; |
6003 | 0 | } else if (VK_SUCCESS != icd_result) { |
6004 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0, |
6005 | 0 | "terminator_CreateInstance: Received return code %i from call to vkCreateInstance in ICD %s. Skipping " |
6006 | 0 | "this driver.", |
6007 | 0 | icd_result, icd_term->scanned_icd->lib_name); |
6008 | 0 | ptr_instance->icd_terms = icd_term->next; |
6009 | 0 | icd_term->next = NULL; |
6010 | 0 | loader_icd_destroy(ptr_instance, icd_term, pAllocator); |
6011 | 0 | continue; |
6012 | 0 | } |
6013 | | |
6014 | 0 | if (!loader_icd_init_entries(ptr_instance, icd_term)) { |
6015 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0, |
6016 | 0 | "terminator_CreateInstance: Failed to find required entrypoints in ICD %s. Skipping this driver.", |
6017 | 0 | icd_term->scanned_icd->lib_name); |
6018 | 0 | ptr_instance->icd_terms = icd_term->next; |
6019 | 0 | icd_term->next = NULL; |
6020 | 0 | loader_icd_destroy(ptr_instance, icd_term, pAllocator); |
6021 | 0 | continue; |
6022 | 0 | } |
6023 | | |
6024 | 0 | if (ptr_instance->icd_tramp_list.scanned_list[i].interface_version < 3 && |
6025 | 0 | ( |
6026 | 0 | #if defined(VK_USE_PLATFORM_XLIB_KHR) |
6027 | 0 | NULL != icd_term->dispatch.CreateXlibSurfaceKHR || |
6028 | 0 | #endif // VK_USE_PLATFORM_XLIB_KHR |
6029 | 0 | #if defined(VK_USE_PLATFORM_XCB_KHR) |
6030 | 0 | NULL != icd_term->dispatch.CreateXcbSurfaceKHR || |
6031 | 0 | #endif // VK_USE_PLATFORM_XCB_KHR |
6032 | 0 | #if defined(VK_USE_PLATFORM_WAYLAND_KHR) |
6033 | 0 | NULL != icd_term->dispatch.CreateWaylandSurfaceKHR || |
6034 | 0 | #endif // VK_USE_PLATFORM_WAYLAND_KHR |
6035 | | #if defined(VK_USE_PLATFORM_ANDROID_KHR) |
6036 | | NULL != icd_term->dispatch.CreateAndroidSurfaceKHR || |
6037 | | #endif // VK_USE_PLATFORM_ANDROID_KHR |
6038 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
6039 | | NULL != icd_term->dispatch.CreateWin32SurfaceKHR || |
6040 | | #endif // VK_USE_PLATFORM_WIN32_KHR |
6041 | 0 | NULL != icd_term->dispatch.DestroySurfaceKHR)) { |
6042 | 0 | loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0, |
6043 | 0 | "terminator_CreateInstance: Driver %s supports interface version %u but still exposes VkSurfaceKHR" |
6044 | 0 | " create/destroy entrypoints (Policy #LDP_DRIVER_8)", |
6045 | 0 | ptr_instance->icd_tramp_list.scanned_list[i].lib_name, |
6046 | 0 | ptr_instance->icd_tramp_list.scanned_list[i].interface_version); |
6047 | 0 | } |
6048 | | |
6049 | | // If we made it this far, at least one ICD was successful |
6050 | 0 | one_icd_successful = true; |
6051 | 0 | } |
6052 | | |
6053 | | // For vkGetPhysicalDeviceProperties2, at least one ICD needs to support the extension for the |
6054 | | // instance to have it |
6055 | 0 | if (ptr_instance->enabled_extensions.khr_get_physical_device_properties2) { |
6056 | 0 | bool at_least_one_supports = false; |
6057 | 0 | icd_term = ptr_instance->icd_terms; |
6058 | 0 | while (icd_term != NULL) { |
6059 | 0 | if (icd_term->enabled_instance_extensions.khr_get_physical_device_properties2) { |
6060 | 0 | at_least_one_supports = true; |
6061 | 0 | break; |
6062 | 0 | } |
6063 | 0 | icd_term = icd_term->next; |
6064 | 0 | } |
6065 | 0 | if (!at_least_one_supports) { |
6066 | 0 | ptr_instance->enabled_extensions.khr_get_physical_device_properties2 = false; |
6067 | 0 | } |
6068 | 0 | } |
6069 | | |
6070 | | // If no ICDs were added to instance list and res is unchanged from it's initial value, the loader was unable to |
6071 | | // find a suitable ICD. |
6072 | 0 | if (VK_SUCCESS == res && (ptr_instance->icd_terms == NULL || !one_icd_successful)) { |
6073 | 0 | loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
6074 | 0 | "terminator_CreateInstance: Found no drivers!"); |
6075 | 0 | res = VK_ERROR_INCOMPATIBLE_DRIVER; |
6076 | 0 | } |
6077 | |
|
6078 | 0 | out: |
6079 | |
|
6080 | 0 | ptr_instance->create_terminator_invalid_extension = false; |
6081 | |
|
6082 | 0 | if (VK_SUCCESS != res) { |
6083 | 0 | if (VK_ERROR_EXTENSION_NOT_PRESENT == res) { |
6084 | 0 | ptr_instance->create_terminator_invalid_extension = true; |
6085 | 0 | } |
6086 | |
|
6087 | 0 | while (NULL != ptr_instance->icd_terms) { |
6088 | 0 | icd_term = ptr_instance->icd_terms; |
6089 | 0 | ptr_instance->icd_terms = icd_term->next; |
6090 | 0 | if (NULL != icd_term->instance) { |
6091 | 0 | loader_icd_close_objects(ptr_instance, icd_term); |
6092 | 0 | icd_term->dispatch.DestroyInstance(icd_term->instance, pAllocator); |
6093 | 0 | } |
6094 | 0 | loader_icd_destroy(ptr_instance, icd_term, pAllocator); |
6095 | 0 | } |
6096 | 0 | } else { |
6097 | | // Check for enabled extensions here to setup the loader structures so the loader knows what extensions |
6098 | | // it needs to worry about. |
6099 | | // We do it here and again above the layers in the trampoline function since the trampoline function |
6100 | | // may think different extensions are enabled than what's down here. |
6101 | | // This is why we don't clear inside of these function calls. |
6102 | | // The clearing should actually be handled by the overall memset of the pInstance structure in the |
6103 | | // trampoline. |
6104 | 0 | fill_out_enabled_instance_extensions(pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames, |
6105 | 0 | &ptr_instance->enabled_extensions); |
6106 | 0 | } |
6107 | |
|
6108 | 0 | return res; |
6109 | 0 | } |
6110 | | |
6111 | 0 | VKAPI_ATTR void VKAPI_CALL terminator_DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { |
6112 | 0 | struct loader_instance *ptr_instance = loader_get_instance(instance); |
6113 | 0 | if (NULL == ptr_instance) { |
6114 | 0 | return; |
6115 | 0 | } |
6116 | | |
6117 | | // Remove this instance from the list of instances: |
6118 | 0 | struct loader_instance *prev = NULL; |
6119 | 0 | loader_platform_thread_lock_mutex(&loader_global_instance_list_lock); |
6120 | 0 | struct loader_instance *next = loader.instances; |
6121 | 0 | while (next != NULL) { |
6122 | 0 | if (next == ptr_instance) { |
6123 | | // Remove this instance from the list: |
6124 | 0 | if (prev) |
6125 | 0 | prev->next = next->next; |
6126 | 0 | else |
6127 | 0 | loader.instances = next->next; |
6128 | 0 | break; |
6129 | 0 | } |
6130 | 0 | prev = next; |
6131 | 0 | next = next->next; |
6132 | 0 | } |
6133 | 0 | loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock); |
6134 | |
|
6135 | 0 | struct loader_icd_term *icd_terms = ptr_instance->icd_terms; |
6136 | 0 | while (NULL != icd_terms) { |
6137 | 0 | if (icd_terms->instance) { |
6138 | 0 | loader_icd_close_objects(ptr_instance, icd_terms); |
6139 | 0 | icd_terms->dispatch.DestroyInstance(icd_terms->instance, pAllocator); |
6140 | 0 | } |
6141 | 0 | struct loader_icd_term *next_icd_term = icd_terms->next; |
6142 | 0 | icd_terms->instance = VK_NULL_HANDLE; |
6143 | 0 | loader_icd_destroy(ptr_instance, icd_terms, pAllocator); |
6144 | |
|
6145 | 0 | icd_terms = next_icd_term; |
6146 | 0 | } |
6147 | |
|
6148 | 0 | loader_clear_scanned_icd_list(ptr_instance, &ptr_instance->icd_tramp_list); |
6149 | 0 | loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list); |
6150 | 0 | if (NULL != ptr_instance->phys_devs_term) { |
6151 | 0 | for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) { |
6152 | 0 | for (uint32_t j = i + 1; j < ptr_instance->phys_dev_count_term; j++) { |
6153 | 0 | if (ptr_instance->phys_devs_term[i] == ptr_instance->phys_devs_term[j]) { |
6154 | 0 | ptr_instance->phys_devs_term[j] = NULL; |
6155 | 0 | } |
6156 | 0 | } |
6157 | 0 | } |
6158 | 0 | for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) { |
6159 | 0 | loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term[i]); |
6160 | 0 | } |
6161 | 0 | loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term); |
6162 | 0 | } |
6163 | 0 | if (NULL != ptr_instance->phys_dev_groups_term) { |
6164 | 0 | for (uint32_t i = 0; i < ptr_instance->phys_dev_group_count_term; i++) { |
6165 | 0 | loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term[i]); |
6166 | 0 | } |
6167 | 0 | loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term); |
6168 | 0 | } |
6169 | 0 | loader_free_dev_ext_table(ptr_instance); |
6170 | 0 | loader_free_phys_dev_ext_table(ptr_instance); |
6171 | |
|
6172 | 0 | free_string_list(ptr_instance, &ptr_instance->enabled_layer_names); |
6173 | 0 | } |
6174 | | |
6175 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo, |
6176 | 0 | const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { |
6177 | 0 | VkResult res = VK_SUCCESS; |
6178 | 0 | struct loader_physical_device_term *phys_dev_term; |
6179 | 0 | phys_dev_term = (struct loader_physical_device_term *)physicalDevice; |
6180 | 0 | struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; |
6181 | |
|
6182 | 0 | struct loader_device *dev = (struct loader_device *)*pDevice; |
6183 | 0 | PFN_vkCreateDevice fpCreateDevice = icd_term->dispatch.CreateDevice; |
6184 | 0 | struct loader_extension_list icd_exts; |
6185 | |
|
6186 | 0 | VkBaseOutStructure *caller_dgci_container = NULL; |
6187 | 0 | VkDeviceGroupDeviceCreateInfo *caller_dgci = NULL; |
6188 | |
|
6189 | 0 | if (NULL == dev) { |
6190 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_WARN_BIT, 0, |
6191 | 0 | "terminator_CreateDevice: Loader device pointer null encountered. Possibly set by active layer. (Policy " |
6192 | 0 | "#LLP_LAYER_22)"); |
6193 | 0 | } else if (DEVICE_DISP_TABLE_MAGIC_NUMBER != dev->loader_dispatch.core_dispatch.magic) { |
6194 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_WARN_BIT, 0, |
6195 | 0 | "terminator_CreateDevice: Device pointer (%p) has invalid MAGIC value 0x%08" PRIx64 |
6196 | 0 | ". The expected value is " |
6197 | 0 | "0x10ADED040410ADED. Device value possibly " |
6198 | 0 | "corrupted by active layer (Policy #LLP_LAYER_22). ", |
6199 | 0 | dev, dev->loader_dispatch.core_dispatch.magic); |
6200 | 0 | } |
6201 | |
|
6202 | 0 | dev->phys_dev_term = phys_dev_term; |
6203 | |
|
6204 | 0 | icd_exts.list = NULL; |
6205 | |
|
6206 | 0 | if (fpCreateDevice == NULL) { |
6207 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
6208 | 0 | "terminator_CreateDevice: No vkCreateDevice command exposed by ICD %s", icd_term->scanned_icd->lib_name); |
6209 | 0 | res = VK_ERROR_INITIALIZATION_FAILED; |
6210 | 0 | goto out; |
6211 | 0 | } |
6212 | | |
6213 | 0 | VkDeviceCreateInfo localCreateInfo; |
6214 | 0 | memcpy(&localCreateInfo, pCreateInfo, sizeof(localCreateInfo)); |
6215 | | |
6216 | | // NOTE: Need to filter the extensions to only those supported by the ICD. |
6217 | | // No ICD will advertise support for layers. An ICD library could support a layer, |
6218 | | // but it would be independent of the actual ICD, just in the same library. |
6219 | 0 | char **filtered_extension_names = NULL; |
6220 | 0 | if (0 < pCreateInfo->enabledExtensionCount) { |
6221 | 0 | filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *)); |
6222 | 0 | if (NULL == filtered_extension_names) { |
6223 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0, |
6224 | 0 | "terminator_CreateDevice: Failed to create extension name storage for %d extensions", |
6225 | 0 | pCreateInfo->enabledExtensionCount); |
6226 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
6227 | 0 | } |
6228 | 0 | } |
6229 | | |
6230 | 0 | localCreateInfo.enabledLayerCount = 0; |
6231 | 0 | localCreateInfo.ppEnabledLayerNames = NULL; |
6232 | |
|
6233 | 0 | localCreateInfo.enabledExtensionCount = 0; |
6234 | 0 | localCreateInfo.ppEnabledExtensionNames = (const char *const *)filtered_extension_names; |
6235 | | |
6236 | | // Get the physical device (ICD) extensions |
6237 | 0 | res = loader_init_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties)); |
6238 | 0 | if (VK_SUCCESS != res) { |
6239 | 0 | goto out; |
6240 | 0 | } |
6241 | | |
6242 | 0 | res = loader_add_device_extensions(icd_term->this_instance, icd_term->dispatch.EnumerateDeviceExtensionProperties, |
6243 | 0 | phys_dev_term->phys_dev, icd_term->scanned_icd->lib_name, &icd_exts); |
6244 | 0 | if (res != VK_SUCCESS) { |
6245 | 0 | goto out; |
6246 | 0 | } |
6247 | | |
6248 | 0 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
6249 | 0 | if (pCreateInfo->ppEnabledExtensionNames == NULL) { |
6250 | 0 | continue; |
6251 | 0 | } |
6252 | 0 | const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; |
6253 | 0 | if (extension_name == NULL) { |
6254 | 0 | continue; |
6255 | 0 | } |
6256 | 0 | VkExtensionProperties *prop = get_extension_property(extension_name, &icd_exts); |
6257 | 0 | if (prop) { |
6258 | 0 | filtered_extension_names[localCreateInfo.enabledExtensionCount] = (char *)extension_name; |
6259 | 0 | localCreateInfo.enabledExtensionCount++; |
6260 | 0 | } else { |
6261 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
6262 | 0 | "vkCreateDevice extension %s not available for devices associated with ICD %s", extension_name, |
6263 | 0 | icd_term->scanned_icd->lib_name); |
6264 | 0 | } |
6265 | 0 | } |
6266 | | |
6267 | | // Before we continue, If KHX_device_group is the list of enabled and viable extensions, then we then need to look for the |
6268 | | // corresponding VkDeviceGroupDeviceCreateInfo struct in the device list and replace all the physical device values (which |
6269 | | // are really loader physical device terminator values) with the ICD versions. |
6270 | | // if (icd_term->this_instance->enabled_extensions.khr_device_group_creation == 1) { |
6271 | 0 | { |
6272 | 0 | VkBaseOutStructure *pNext = (VkBaseOutStructure *)localCreateInfo.pNext; |
6273 | 0 | VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&localCreateInfo; |
6274 | 0 | while (NULL != pNext) { |
6275 | 0 | if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) { |
6276 | 0 | VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext; |
6277 | 0 | if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) { |
6278 | 0 | VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo)); |
6279 | 0 | VkPhysicalDevice *phys_dev_array = NULL; |
6280 | 0 | if (NULL == temp_struct) { |
6281 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
6282 | 0 | } |
6283 | 0 | memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo)); |
6284 | 0 | phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount); |
6285 | 0 | if (NULL == phys_dev_array) { |
6286 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
6287 | 0 | } |
6288 | | |
6289 | | // Before calling down, replace the incoming physical device values (which are really loader terminator |
6290 | | // physical devices) with the ICDs physical device values. |
6291 | 0 | struct loader_physical_device_term *cur_term; |
6292 | 0 | for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) { |
6293 | 0 | cur_term = (struct loader_physical_device_term *)cur_struct->pPhysicalDevices[phys_dev]; |
6294 | 0 | phys_dev_array[phys_dev] = cur_term->phys_dev; |
6295 | 0 | } |
6296 | 0 | temp_struct->pPhysicalDevices = phys_dev_array; |
6297 | | |
6298 | | // Keep track of pointers to restore pNext chain before returning |
6299 | 0 | caller_dgci_container = pPrev; |
6300 | 0 | caller_dgci = cur_struct; |
6301 | | |
6302 | | // Replace the old struct in the pNext chain with this one. |
6303 | 0 | pPrev->pNext = (VkBaseOutStructure *)temp_struct; |
6304 | 0 | } |
6305 | 0 | break; |
6306 | 0 | } |
6307 | | |
6308 | 0 | pPrev = pNext; |
6309 | 0 | pNext = pNext->pNext; |
6310 | 0 | } |
6311 | 0 | } |
6312 | | |
6313 | | // Handle loader emulation for structs that are not supported by the ICD: |
6314 | | // Presently, the emulation leaves the pNext chain alone. This means that the ICD will receive items in the chain which |
6315 | | // are not recognized by the ICD. If this causes the ICD to fail, then the items would have to be removed here. The current |
6316 | | // implementation does not remove them because copying the pNext chain would be impossible if the loader does not recognize |
6317 | | // the any of the struct types, as the loader would not know the size to allocate and copy. |
6318 | | // if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) { |
6319 | 0 | { |
6320 | 0 | const void *pNext = localCreateInfo.pNext; |
6321 | 0 | while (pNext != NULL) { |
6322 | 0 | VkBaseInStructure pNext_in_structure = {0}; |
6323 | 0 | memcpy(&pNext_in_structure, pNext, sizeof(VkBaseInStructure)); |
6324 | 0 | switch (pNext_in_structure.sType) { |
6325 | 0 | case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: { |
6326 | 0 | const VkPhysicalDeviceFeatures2KHR *features = pNext; |
6327 | |
|
6328 | 0 | if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && |
6329 | 0 | icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) { |
6330 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0, |
6331 | 0 | "vkCreateDevice: Emulating handling of VkPhysicalDeviceFeatures2 in pNext chain for ICD \"%s\"", |
6332 | 0 | icd_term->scanned_icd->lib_name); |
6333 | | |
6334 | | // Verify that VK_KHR_get_physical_device_properties2 is enabled |
6335 | 0 | if (icd_term->this_instance->enabled_extensions.khr_get_physical_device_properties2) { |
6336 | 0 | localCreateInfo.pEnabledFeatures = &features->features; |
6337 | 0 | } |
6338 | 0 | } |
6339 | | |
6340 | | // Leave this item in the pNext chain for now |
6341 | |
|
6342 | 0 | pNext = features->pNext; |
6343 | 0 | break; |
6344 | 0 | } |
6345 | | |
6346 | 0 | case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO: { |
6347 | 0 | const VkDeviceGroupDeviceCreateInfo *group_info = pNext; |
6348 | |
|
6349 | 0 | if (icd_term->dispatch.EnumeratePhysicalDeviceGroups == NULL && |
6350 | 0 | icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR == NULL) { |
6351 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0, |
6352 | 0 | "vkCreateDevice: Emulating handling of VkPhysicalDeviceGroupProperties in pNext chain for " |
6353 | 0 | "ICD \"%s\"", |
6354 | 0 | icd_term->scanned_icd->lib_name); |
6355 | | |
6356 | | // The group must contain only this one device, since physical device groups aren't actually supported |
6357 | 0 | if (group_info->physicalDeviceCount != 1) { |
6358 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0, |
6359 | 0 | "vkCreateDevice: Emulation failed to create device from device group info"); |
6360 | 0 | res = VK_ERROR_INITIALIZATION_FAILED; |
6361 | 0 | goto out; |
6362 | 0 | } |
6363 | 0 | } |
6364 | | |
6365 | | // Nothing needs to be done here because we're leaving the item in the pNext chain and because the spec |
6366 | | // states that the physicalDevice argument must be included in the device group, and we've already checked |
6367 | | // that it is |
6368 | | |
6369 | 0 | pNext = group_info->pNext; |
6370 | 0 | break; |
6371 | 0 | } |
6372 | | |
6373 | | // Multiview properties are also allowed, but since VK_KHX_multiview is a device extension, we'll just let the |
6374 | | // ICD handle that error when the user enables the extension here |
6375 | 0 | default: { |
6376 | 0 | pNext = pNext_in_structure.pNext; |
6377 | 0 | break; |
6378 | 0 | } |
6379 | 0 | } |
6380 | 0 | } |
6381 | 0 | } |
6382 | | |
6383 | 0 | VkBool32 maintenance5_feature_enabled = false; |
6384 | | // Look for the VkPhysicalDeviceMaintenance5FeaturesKHR struct to see if the feature was enabled |
6385 | 0 | { |
6386 | 0 | const void *pNext = localCreateInfo.pNext; |
6387 | 0 | while (pNext != NULL) { |
6388 | 0 | VkBaseInStructure pNext_in_structure = {0}; |
6389 | 0 | memcpy(&pNext_in_structure, pNext, sizeof(VkBaseInStructure)); |
6390 | 0 | switch (pNext_in_structure.sType) { |
6391 | 0 | case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR: { |
6392 | 0 | const VkPhysicalDeviceMaintenance5FeaturesKHR *maintenance_features = pNext; |
6393 | 0 | if (maintenance_features->maintenance5 == VK_TRUE) { |
6394 | 0 | maintenance5_feature_enabled = true; |
6395 | 0 | } |
6396 | 0 | pNext = maintenance_features->pNext; |
6397 | 0 | break; |
6398 | 0 | } |
6399 | | |
6400 | 0 | default: { |
6401 | 0 | pNext = pNext_in_structure.pNext; |
6402 | 0 | break; |
6403 | 0 | } |
6404 | 0 | } |
6405 | 0 | } |
6406 | 0 | } |
6407 | | |
6408 | | // Every extension that has a loader-defined terminator needs to be marked as enabled or disabled so that we know whether or |
6409 | | // not to return that terminator when vkGetDeviceProcAddr is called |
6410 | 0 | for (uint32_t i = 0; i < localCreateInfo.enabledExtensionCount; ++i) { |
6411 | 0 | if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME)) { |
6412 | 0 | dev->driver_extensions.khr_swapchain_enabled = true; |
6413 | 0 | } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME)) { |
6414 | 0 | dev->driver_extensions.khr_display_swapchain_enabled = true; |
6415 | 0 | } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) { |
6416 | 0 | dev->driver_extensions.khr_device_group_enabled = true; |
6417 | 0 | } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) { |
6418 | 0 | dev->driver_extensions.ext_debug_marker_enabled = true; |
6419 | | #if defined(VK_USE_PLATFORM_WIN32_KHR) |
6420 | | } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME)) { |
6421 | | dev->driver_extensions.ext_full_screen_exclusive_enabled = true; |
6422 | | #endif |
6423 | 0 | } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_MAINTENANCE_5_EXTENSION_NAME) && |
6424 | 0 | maintenance5_feature_enabled) { |
6425 | 0 | dev->should_ignore_device_commands_from_newer_version = true; |
6426 | 0 | } |
6427 | 0 | } |
6428 | 0 | dev->layer_extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_extensions.ext_debug_utils; |
6429 | 0 | dev->driver_extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_extensions.ext_debug_utils; |
6430 | |
|
6431 | 0 | VkPhysicalDeviceProperties properties; |
6432 | 0 | icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, &properties); |
6433 | 0 | if (properties.apiVersion >= VK_API_VERSION_1_1) { |
6434 | 0 | dev->driver_extensions.version_1_1_enabled = true; |
6435 | 0 | } |
6436 | 0 | if (properties.apiVersion >= VK_API_VERSION_1_2) { |
6437 | 0 | dev->driver_extensions.version_1_2_enabled = true; |
6438 | 0 | } |
6439 | 0 | if (properties.apiVersion >= VK_API_VERSION_1_3) { |
6440 | 0 | dev->driver_extensions.version_1_3_enabled = true; |
6441 | 0 | } |
6442 | |
|
6443 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
6444 | 0 | " Using \"%s\" with driver: \"%s\"", properties.deviceName, icd_term->scanned_icd->lib_name); |
6445 | |
|
6446 | 0 | res = fpCreateDevice(phys_dev_term->phys_dev, &localCreateInfo, pAllocator, &dev->icd_device); |
6447 | 0 | if (res != VK_SUCCESS) { |
6448 | 0 | loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
6449 | 0 | "terminator_CreateDevice: Failed in ICD %s vkCreateDevice call", icd_term->scanned_icd->lib_name); |
6450 | 0 | goto out; |
6451 | 0 | } |
6452 | | |
6453 | 0 | *pDevice = dev->icd_device; |
6454 | 0 | loader_add_logical_device(icd_term, dev); |
6455 | | |
6456 | | // Init dispatch pointer in new device object |
6457 | 0 | loader_init_dispatch(*pDevice, &dev->loader_dispatch); |
6458 | |
|
6459 | 0 | out: |
6460 | 0 | if (NULL != icd_exts.list) { |
6461 | 0 | loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts); |
6462 | 0 | } |
6463 | | |
6464 | | // Restore pNext pointer to old VkDeviceGroupDeviceCreateInfo |
6465 | | // in the chain to maintain consistency for the caller. |
6466 | 0 | if (caller_dgci_container != NULL) { |
6467 | 0 | caller_dgci_container->pNext = (VkBaseOutStructure *)caller_dgci; |
6468 | 0 | } |
6469 | |
|
6470 | 0 | return res; |
6471 | 0 | } |
6472 | | |
6473 | | // Update the trampoline physical devices with the wrapped version. |
6474 | | // We always want to re-use previous physical device pointers since they may be used by an application |
6475 | | // after returning previously. |
6476 | 0 | VkResult setup_loader_tramp_phys_devs(struct loader_instance *inst, uint32_t phys_dev_count, VkPhysicalDevice *phys_devs) { |
6477 | 0 | VkResult res = VK_SUCCESS; |
6478 | 0 | uint32_t found_count = 0; |
6479 | 0 | uint32_t old_count = inst->phys_dev_count_tramp; |
6480 | 0 | uint32_t new_count = inst->total_gpu_count; |
6481 | 0 | struct loader_physical_device_tramp **new_phys_devs = NULL; |
6482 | |
|
6483 | 0 | if (0 == phys_dev_count) { |
6484 | 0 | return VK_SUCCESS; |
6485 | 0 | } |
6486 | 0 | if (phys_dev_count > new_count) { |
6487 | 0 | new_count = phys_dev_count; |
6488 | 0 | } |
6489 | | |
6490 | | // We want an old to new index array and a new to old index array |
6491 | 0 | int32_t *old_to_new_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * old_count); |
6492 | 0 | int32_t *new_to_old_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * new_count); |
6493 | 0 | if (NULL == old_to_new_index || NULL == new_to_old_index) { |
6494 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
6495 | 0 | } |
6496 | | |
6497 | | // Initialize both |
6498 | 0 | for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) { |
6499 | 0 | old_to_new_index[cur_idx] = -1; |
6500 | 0 | } |
6501 | 0 | for (uint32_t cur_idx = 0; cur_idx < new_count; ++cur_idx) { |
6502 | 0 | new_to_old_index[cur_idx] = -1; |
6503 | 0 | } |
6504 | | |
6505 | | // Figure out the old->new and new->old indices |
6506 | 0 | for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) { |
6507 | 0 | for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) { |
6508 | 0 | if (inst->phys_devs_tramp[cur_idx]->phys_dev == phys_devs[new_idx]) { |
6509 | 0 | old_to_new_index[cur_idx] = (int32_t)new_idx; |
6510 | 0 | new_to_old_index[new_idx] = (int32_t)cur_idx; |
6511 | 0 | found_count++; |
6512 | 0 | break; |
6513 | 0 | } |
6514 | 0 | } |
6515 | 0 | } |
6516 | | |
6517 | | // If we found exactly the number of items we were looking for as we had before. Then everything |
6518 | | // we already have is good enough and we just need to update the array that was passed in with |
6519 | | // the loader values. |
6520 | 0 | if (found_count == phys_dev_count && 0 != old_count && old_count == new_count) { |
6521 | 0 | for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) { |
6522 | 0 | for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) { |
6523 | 0 | if (old_to_new_index[cur_idx] == (int32_t)new_idx) { |
6524 | 0 | phys_devs[new_idx] = (VkPhysicalDevice)inst->phys_devs_tramp[cur_idx]; |
6525 | 0 | break; |
6526 | 0 | } |
6527 | 0 | } |
6528 | 0 | } |
6529 | | // Nothing else to do for this path |
6530 | 0 | res = VK_SUCCESS; |
6531 | 0 | } else { |
6532 | | // Something is different, so do the full path of checking every device and creating a new array to use. |
6533 | | // This can happen if a device was added, or removed, or we hadn't previously queried all the data and we |
6534 | | // have more to store. |
6535 | 0 | new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_tramp *) * new_count, |
6536 | 0 | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
6537 | 0 | if (NULL == new_phys_devs) { |
6538 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6539 | 0 | "setup_loader_tramp_phys_devs: Failed to allocate new physical device array of size %d", new_count); |
6540 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6541 | 0 | goto out; |
6542 | 0 | } |
6543 | | |
6544 | 0 | if (new_count > phys_dev_count) { |
6545 | 0 | found_count = phys_dev_count; |
6546 | 0 | } else { |
6547 | 0 | found_count = new_count; |
6548 | 0 | } |
6549 | | |
6550 | | // First try to see if an old item exists that matches the new item. If so, just copy it over. |
6551 | 0 | for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) { |
6552 | 0 | bool old_item_found = false; |
6553 | 0 | for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) { |
6554 | 0 | if (old_to_new_index[cur_idx] == (int32_t)new_idx) { |
6555 | | // Copy over old item to correct spot in the new array |
6556 | 0 | new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx]; |
6557 | 0 | old_item_found = true; |
6558 | 0 | break; |
6559 | 0 | } |
6560 | 0 | } |
6561 | | // Something wasn't found, so it's new so add it to the new list |
6562 | 0 | if (!old_item_found) { |
6563 | 0 | new_phys_devs[new_idx] = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_tramp), |
6564 | 0 | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
6565 | 0 | if (NULL == new_phys_devs[new_idx]) { |
6566 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6567 | 0 | "setup_loader_tramp_phys_devs: Failed to allocate new trampoline physical device"); |
6568 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6569 | 0 | goto out; |
6570 | 0 | } |
6571 | | |
6572 | | // Initialize the new physicalDevice object |
6573 | 0 | loader_set_dispatch((void *)new_phys_devs[new_idx], inst->disp); |
6574 | 0 | new_phys_devs[new_idx]->this_instance = inst; |
6575 | 0 | new_phys_devs[new_idx]->phys_dev = phys_devs[new_idx]; |
6576 | 0 | new_phys_devs[new_idx]->magic = PHYS_TRAMP_MAGIC_NUMBER; |
6577 | 0 | } |
6578 | | |
6579 | 0 | phys_devs[new_idx] = (VkPhysicalDevice)new_phys_devs[new_idx]; |
6580 | 0 | } |
6581 | | |
6582 | | // We usually get here if the user array is smaller than the total number of devices, so copy the |
6583 | | // remaining devices we have over to the new array. |
6584 | 0 | uint32_t start = found_count; |
6585 | 0 | for (uint32_t new_idx = start; new_idx < new_count; ++new_idx) { |
6586 | 0 | for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) { |
6587 | 0 | if (old_to_new_index[cur_idx] == -1) { |
6588 | 0 | new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx]; |
6589 | 0 | old_to_new_index[cur_idx] = new_idx; |
6590 | 0 | found_count++; |
6591 | 0 | break; |
6592 | 0 | } |
6593 | 0 | } |
6594 | 0 | } |
6595 | 0 | } |
6596 | | |
6597 | 0 | out: |
6598 | |
|
6599 | 0 | if (NULL != new_phys_devs) { |
6600 | 0 | if (VK_SUCCESS != res) { |
6601 | 0 | for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) { |
6602 | | // If an OOM occurred inside the copying of the new physical devices into the existing array |
6603 | | // will leave some of the old physical devices in the array which may have been copied into |
6604 | | // the new array, leading to them being freed twice. To avoid this we just make sure to not |
6605 | | // delete physical devices which were copied. |
6606 | 0 | bool found = false; |
6607 | 0 | for (uint32_t cur_idx = 0; cur_idx < inst->phys_dev_count_tramp; cur_idx++) { |
6608 | 0 | if (new_phys_devs[new_idx] == inst->phys_devs_tramp[cur_idx]) { |
6609 | 0 | found = true; |
6610 | 0 | break; |
6611 | 0 | } |
6612 | 0 | } |
6613 | 0 | if (!found) { |
6614 | 0 | loader_instance_heap_free(inst, new_phys_devs[new_idx]); |
6615 | 0 | } |
6616 | 0 | } |
6617 | 0 | loader_instance_heap_free(inst, new_phys_devs); |
6618 | 0 | } else { |
6619 | 0 | if (new_count > inst->total_gpu_count) { |
6620 | 0 | inst->total_gpu_count = new_count; |
6621 | 0 | } |
6622 | | // Free everything in the old array that was not copied into the new array |
6623 | | // here. We can't attempt to do that before here since the previous loop |
6624 | | // looking before the "out:" label may hit an out of memory condition resulting |
6625 | | // in memory leaking. |
6626 | 0 | if (NULL != inst->phys_devs_tramp) { |
6627 | 0 | for (uint32_t i = 0; i < inst->phys_dev_count_tramp; i++) { |
6628 | 0 | bool found = false; |
6629 | 0 | for (uint32_t j = 0; j < inst->total_gpu_count; j++) { |
6630 | 0 | if (inst->phys_devs_tramp[i] == new_phys_devs[j]) { |
6631 | 0 | found = true; |
6632 | 0 | break; |
6633 | 0 | } |
6634 | 0 | } |
6635 | 0 | if (!found) { |
6636 | 0 | loader_instance_heap_free(inst, inst->phys_devs_tramp[i]); |
6637 | 0 | } |
6638 | 0 | } |
6639 | 0 | loader_instance_heap_free(inst, inst->phys_devs_tramp); |
6640 | 0 | } |
6641 | 0 | inst->phys_devs_tramp = new_phys_devs; |
6642 | 0 | inst->phys_dev_count_tramp = found_count; |
6643 | 0 | } |
6644 | 0 | } |
6645 | 0 | if (VK_SUCCESS != res) { |
6646 | 0 | inst->total_gpu_count = 0; |
6647 | 0 | } |
6648 | |
|
6649 | 0 | return res; |
6650 | 0 | } |
6651 | | |
6652 | | #if defined(LOADER_ENABLE_LINUX_SORT) |
6653 | 0 | bool is_linux_sort_enabled(struct loader_instance *inst) { |
6654 | 0 | bool sort_items = inst->supports_get_dev_prop_2; |
6655 | 0 | char *env_value = loader_getenv("VK_LOADER_DISABLE_SELECT", inst); |
6656 | 0 | if (NULL != env_value) { |
6657 | 0 | int32_t int_env_val = atoi(env_value); |
6658 | 0 | loader_free_getenv(env_value, inst); |
6659 | 0 | if (int_env_val != 0) { |
6660 | 0 | sort_items = false; |
6661 | 0 | } |
6662 | 0 | } |
6663 | 0 | return sort_items; |
6664 | 0 | } |
6665 | | #endif // LOADER_ENABLE_LINUX_SORT |
6666 | | |
6667 | | // Look for physical_device in the provided phys_devs list, return true if found and put the index into out_idx, otherwise |
6668 | | // return false |
6669 | | bool find_phys_dev(VkPhysicalDevice physical_device, uint32_t phys_devs_count, struct loader_physical_device_term **phys_devs, |
6670 | 0 | uint32_t *out_idx) { |
6671 | 0 | if (NULL == phys_devs) return false; |
6672 | 0 | for (uint32_t idx = 0; idx < phys_devs_count; idx++) { |
6673 | 0 | if (NULL != phys_devs[idx] && physical_device == phys_devs[idx]->phys_dev) { |
6674 | 0 | *out_idx = idx; |
6675 | 0 | return true; |
6676 | 0 | } |
6677 | 0 | } |
6678 | 0 | return false; |
6679 | 0 | } |
6680 | | |
6681 | | // Add physical_device to new_phys_devs |
6682 | | VkResult check_and_add_to_new_phys_devs(struct loader_instance *inst, VkPhysicalDevice physical_device, |
6683 | | struct loader_icd_physical_devices *dev_array, uint32_t *cur_new_phys_dev_count, |
6684 | 0 | struct loader_physical_device_term **new_phys_devs) { |
6685 | 0 | uint32_t out_idx = 0; |
6686 | 0 | uint32_t idx = *cur_new_phys_dev_count; |
6687 | | // Check if the physical_device already exists in the new_phys_devs buffer, that means it was found from both |
6688 | | // EnumerateAdapterPhysicalDevices and EnumeratePhysicalDevices and we need to skip it. |
6689 | 0 | if (find_phys_dev(physical_device, idx, new_phys_devs, &out_idx)) { |
6690 | 0 | return VK_SUCCESS; |
6691 | 0 | } |
6692 | | // Check if it was found in a previous call to vkEnumeratePhysicalDevices, we can just copy over the old data. |
6693 | 0 | if (find_phys_dev(physical_device, inst->phys_dev_count_term, inst->phys_devs_term, &out_idx)) { |
6694 | 0 | new_phys_devs[idx] = inst->phys_devs_term[out_idx]; |
6695 | 0 | (*cur_new_phys_dev_count)++; |
6696 | 0 | return VK_SUCCESS; |
6697 | 0 | } |
6698 | | |
6699 | | // Exit in case something is already present - this shouldn't happen but better to be safe than overwrite existing data |
6700 | | // since this code has been refactored a half dozen times. |
6701 | 0 | if (NULL != new_phys_devs[idx]) { |
6702 | 0 | return VK_SUCCESS; |
6703 | 0 | } |
6704 | | // If this physical device is new, we need to allocate space for it. |
6705 | 0 | new_phys_devs[idx] = |
6706 | 0 | loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
6707 | 0 | if (NULL == new_phys_devs[idx]) { |
6708 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6709 | 0 | "check_and_add_to_new_phys_devs: Failed to allocate physical device terminator object %d", idx); |
6710 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
6711 | 0 | } |
6712 | | |
6713 | 0 | loader_set_dispatch((void *)new_phys_devs[idx], inst->disp); |
6714 | 0 | new_phys_devs[idx]->this_icd_term = dev_array->icd_term; |
6715 | 0 | new_phys_devs[idx]->phys_dev = physical_device; |
6716 | | |
6717 | | // Increment the count of new physical devices |
6718 | 0 | (*cur_new_phys_dev_count)++; |
6719 | 0 | return VK_SUCCESS; |
6720 | 0 | } |
6721 | | |
6722 | | /* Enumerate all physical devices from ICDs and add them to inst->phys_devs_term |
6723 | | * |
6724 | | * There are two methods to find VkPhysicalDevices - vkEnumeratePhysicalDevices and vkEnumerateAdapterPhysicalDevices |
6725 | | * The latter is supported on windows only and on devices supporting ICD Interface Version 6 and greater. |
6726 | | * |
6727 | | * Once all physical devices are acquired, they need to be pulled into a single list of `loader_physical_device_term`'s. |
6728 | | * They also need to be setup - the icd_term, icd_index, phys_dev, and disp (dispatch table) all need the correct data. |
6729 | | * Additionally, we need to keep using already setup physical devices as they may be in use, thus anything enumerated |
6730 | | * that is already in inst->phys_devs_term will be carried over. |
6731 | | */ |
6732 | | |
6733 | 0 | VkResult setup_loader_term_phys_devs(struct loader_instance *inst) { |
6734 | 0 | VkResult res = VK_SUCCESS; |
6735 | 0 | struct loader_icd_term *icd_term; |
6736 | 0 | uint32_t windows_sorted_devices_count = 0; |
6737 | 0 | struct loader_icd_physical_devices *windows_sorted_devices_array = NULL; |
6738 | 0 | uint32_t icd_count = 0; |
6739 | 0 | struct loader_icd_physical_devices *icd_phys_dev_array = NULL; |
6740 | 0 | uint32_t new_phys_devs_capacity = 0; |
6741 | 0 | uint32_t new_phys_devs_count = 0; |
6742 | 0 | struct loader_physical_device_term **new_phys_devs = NULL; |
6743 | |
|
6744 | | #if defined(_WIN32) |
6745 | | // Get the physical devices supported by platform sorting mechanism into a separate list |
6746 | | res = windows_read_sorted_physical_devices(inst, &windows_sorted_devices_count, &windows_sorted_devices_array); |
6747 | | if (VK_SUCCESS != res) { |
6748 | | goto out; |
6749 | | } |
6750 | | #endif |
6751 | |
|
6752 | 0 | icd_count = inst->icd_terms_count; |
6753 | | |
6754 | | // Allocate something to store the physical device characteristics that we read from each ICD. |
6755 | 0 | icd_phys_dev_array = |
6756 | 0 | (struct loader_icd_physical_devices *)loader_stack_alloc(sizeof(struct loader_icd_physical_devices) * icd_count); |
6757 | 0 | if (NULL == icd_phys_dev_array) { |
6758 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6759 | 0 | "setup_loader_term_phys_devs: Failed to allocate temporary ICD Physical device info array of size %d", |
6760 | 0 | icd_count); |
6761 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6762 | 0 | goto out; |
6763 | 0 | } |
6764 | 0 | memset(icd_phys_dev_array, 0, sizeof(struct loader_icd_physical_devices) * icd_count); |
6765 | | |
6766 | | // For each ICD, query the number of physical devices, and then get an |
6767 | | // internal value for those physical devices. |
6768 | 0 | icd_term = inst->icd_terms; |
6769 | 0 | uint32_t icd_idx = 0; |
6770 | 0 | while (NULL != icd_term) { |
6771 | 0 | res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &icd_phys_dev_array[icd_idx].device_count, NULL); |
6772 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
6773 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6774 | 0 | "setup_loader_term_phys_devs: Call to \'vkEnumeratePhysicalDevices\' in ICD %s failed with error code " |
6775 | 0 | "VK_ERROR_OUT_OF_HOST_MEMORY", |
6776 | 0 | icd_term->scanned_icd->lib_name); |
6777 | 0 | goto out; |
6778 | 0 | } else if (VK_SUCCESS == res) { |
6779 | 0 | icd_phys_dev_array[icd_idx].physical_devices = |
6780 | 0 | (VkPhysicalDevice *)loader_stack_alloc(icd_phys_dev_array[icd_idx].device_count * sizeof(VkPhysicalDevice)); |
6781 | 0 | if (NULL == icd_phys_dev_array[icd_idx].physical_devices) { |
6782 | 0 | loader_log( |
6783 | 0 | inst, VULKAN_LOADER_ERROR_BIT, 0, |
6784 | 0 | "setup_loader_term_phys_devs: Failed to allocate temporary ICD Physical device array for ICD %s of size %d", |
6785 | 0 | icd_term->scanned_icd->lib_name, icd_phys_dev_array[icd_idx].device_count); |
6786 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6787 | 0 | goto out; |
6788 | 0 | } |
6789 | | |
6790 | 0 | res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &(icd_phys_dev_array[icd_idx].device_count), |
6791 | 0 | icd_phys_dev_array[icd_idx].physical_devices); |
6792 | 0 | if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { |
6793 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6794 | 0 | "setup_loader_term_phys_devs: Call to \'vkEnumeratePhysicalDevices\' in ICD %s failed with error code " |
6795 | 0 | "VK_ERROR_OUT_OF_HOST_MEMORY", |
6796 | 0 | icd_term->scanned_icd->lib_name); |
6797 | 0 | goto out; |
6798 | 0 | } |
6799 | 0 | if (VK_SUCCESS != res) { |
6800 | 0 | loader_log( |
6801 | 0 | inst, VULKAN_LOADER_ERROR_BIT, 0, |
6802 | 0 | "setup_loader_term_phys_devs: Call to \'vkEnumeratePhysicalDevices\' in ICD %s failed with error code %d", |
6803 | 0 | icd_term->scanned_icd->lib_name, res); |
6804 | 0 | icd_phys_dev_array[icd_idx].device_count = 0; |
6805 | 0 | icd_phys_dev_array[icd_idx].physical_devices = 0; |
6806 | 0 | } |
6807 | 0 | } else { |
6808 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6809 | 0 | "setup_loader_term_phys_devs: Call to \'vkEnumeratePhysicalDevices\' in ICD %s failed with error code %d", |
6810 | 0 | icd_term->scanned_icd->lib_name, res); |
6811 | 0 | icd_phys_dev_array[icd_idx].device_count = 0; |
6812 | 0 | icd_phys_dev_array[icd_idx].physical_devices = 0; |
6813 | 0 | } |
6814 | 0 | icd_phys_dev_array[icd_idx].icd_term = icd_term; |
6815 | 0 | icd_term->physical_device_count = icd_phys_dev_array[icd_idx].device_count; |
6816 | 0 | icd_term = icd_term->next; |
6817 | 0 | ++icd_idx; |
6818 | 0 | } |
6819 | | |
6820 | | // Add up both the windows sorted and non windows found physical device counts |
6821 | 0 | for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) { |
6822 | 0 | new_phys_devs_capacity += windows_sorted_devices_array[i].device_count; |
6823 | 0 | } |
6824 | 0 | for (uint32_t i = 0; i < icd_count; ++i) { |
6825 | 0 | new_phys_devs_capacity += icd_phys_dev_array[i].device_count; |
6826 | 0 | } |
6827 | | |
6828 | | // Bail out if there are no physical devices reported |
6829 | 0 | if (0 == new_phys_devs_capacity) { |
6830 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6831 | 0 | "setup_loader_term_phys_devs: Failed to detect any valid GPUs in the current config"); |
6832 | 0 | res = VK_ERROR_INITIALIZATION_FAILED; |
6833 | 0 | goto out; |
6834 | 0 | } |
6835 | | |
6836 | | // Create an allocation large enough to hold both the windows sorting enumeration and non-windows physical device |
6837 | | // enumeration |
6838 | 0 | new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_term *) * new_phys_devs_capacity, |
6839 | 0 | VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
6840 | 0 | if (NULL == new_phys_devs) { |
6841 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6842 | 0 | "setup_loader_term_phys_devs: Failed to allocate new physical device array of size %d", new_phys_devs_capacity); |
6843 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6844 | 0 | goto out; |
6845 | 0 | } |
6846 | | |
6847 | | // Copy over everything found through sorted enumeration |
6848 | 0 | for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) { |
6849 | 0 | for (uint32_t j = 0; j < windows_sorted_devices_array[i].device_count; ++j) { |
6850 | 0 | res = check_and_add_to_new_phys_devs(inst, windows_sorted_devices_array[i].physical_devices[j], |
6851 | 0 | &windows_sorted_devices_array[i], &new_phys_devs_count, new_phys_devs); |
6852 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
6853 | 0 | goto out; |
6854 | 0 | } |
6855 | 0 | } |
6856 | 0 | } |
6857 | | |
6858 | | // Now go through the rest of the physical devices and add them to new_phys_devs |
6859 | 0 | #if defined(LOADER_ENABLE_LINUX_SORT) |
6860 | | |
6861 | 0 | if (is_linux_sort_enabled(inst)) { |
6862 | 0 | for (uint32_t dev = new_phys_devs_count; dev < new_phys_devs_capacity; ++dev) { |
6863 | 0 | new_phys_devs[dev] = |
6864 | 0 | loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
6865 | 0 | if (NULL == new_phys_devs[dev]) { |
6866 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
6867 | 0 | "setup_loader_term_phys_devs: Failed to allocate physical device terminator object %d", dev); |
6868 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
6869 | 0 | goto out; |
6870 | 0 | } |
6871 | 0 | } |
6872 | | |
6873 | | // Get the physical devices supported by platform sorting mechanism into a separate list |
6874 | | // Pass in a sublist to the function so it only operates on the correct elements. This means passing in a pointer to the |
6875 | | // current next element in new_phys_devs and passing in a `count` of currently unwritten elements |
6876 | 0 | res = linux_read_sorted_physical_devices(inst, icd_count, icd_phys_dev_array, new_phys_devs_capacity - new_phys_devs_count, |
6877 | 0 | &new_phys_devs[new_phys_devs_count]); |
6878 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
6879 | 0 | goto out; |
6880 | 0 | } |
6881 | | // Keep previously allocated physical device info since apps may already be using that! |
6882 | 0 | for (uint32_t new_idx = new_phys_devs_count; new_idx < new_phys_devs_capacity; new_idx++) { |
6883 | 0 | for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) { |
6884 | 0 | if (new_phys_devs[new_idx]->phys_dev == inst->phys_devs_term[old_idx]->phys_dev) { |
6885 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
6886 | 0 | "Copying old device %u into new device %u", old_idx, new_idx); |
6887 | | // Free the old new_phys_devs info since we're not using it before we assign the new info |
6888 | 0 | loader_instance_heap_free(inst, new_phys_devs[new_idx]); |
6889 | 0 | new_phys_devs[new_idx] = inst->phys_devs_term[old_idx]; |
6890 | 0 | break; |
6891 | 0 | } |
6892 | 0 | } |
6893 | 0 | } |
6894 | | // now set the count to the capacity, as now the list is filled in |
6895 | 0 | new_phys_devs_count = new_phys_devs_capacity; |
6896 | | // We want the following code to run if either linux sorting is disabled at compile time or runtime |
6897 | 0 | } else { |
6898 | 0 | #endif // LOADER_ENABLE_LINUX_SORT |
6899 | | |
6900 | | // Copy over everything found through the non-sorted means. |
6901 | 0 | for (uint32_t i = 0; i < icd_count; ++i) { |
6902 | 0 | for (uint32_t j = 0; j < icd_phys_dev_array[i].device_count; ++j) { |
6903 | 0 | res = check_and_add_to_new_phys_devs(inst, icd_phys_dev_array[i].physical_devices[j], &icd_phys_dev_array[i], |
6904 | 0 | &new_phys_devs_count, new_phys_devs); |
6905 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
6906 | 0 | goto out; |
6907 | 0 | } |
6908 | 0 | } |
6909 | 0 | } |
6910 | 0 | #if defined(LOADER_ENABLE_LINUX_SORT) |
6911 | 0 | } |
6912 | 0 | #endif // LOADER_ENABLE_LINUX_SORT |
6913 | 0 | out: |
6914 | |
|
6915 | 0 | if (VK_SUCCESS != res) { |
6916 | 0 | if (NULL != new_phys_devs) { |
6917 | | // We've encountered an error, so we should free the new buffers. |
6918 | 0 | for (uint32_t i = 0; i < new_phys_devs_capacity; i++) { |
6919 | | // May not have allocated this far, skip it if we hadn't. |
6920 | 0 | if (new_phys_devs[i] == NULL) continue; |
6921 | | |
6922 | | // If an OOM occurred inside the copying of the new physical devices into the existing array |
6923 | | // will leave some of the old physical devices in the array which may have been copied into |
6924 | | // the new array, leading to them being freed twice. To avoid this we just make sure to not |
6925 | | // delete physical devices which were copied. |
6926 | 0 | bool found = false; |
6927 | 0 | if (NULL != inst->phys_devs_term) { |
6928 | 0 | for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) { |
6929 | 0 | if (new_phys_devs[i] == inst->phys_devs_term[old_idx]) { |
6930 | 0 | found = true; |
6931 | 0 | break; |
6932 | 0 | } |
6933 | 0 | } |
6934 | 0 | } |
6935 | 0 | if (!found) { |
6936 | 0 | loader_instance_heap_free(inst, new_phys_devs[i]); |
6937 | 0 | } |
6938 | 0 | } |
6939 | 0 | loader_instance_heap_free(inst, new_phys_devs); |
6940 | 0 | } |
6941 | 0 | inst->total_gpu_count = 0; |
6942 | 0 | } else { |
6943 | 0 | if (NULL != inst->phys_devs_term) { |
6944 | | // Free everything in the old array that was not copied into the new array |
6945 | | // here. We can't attempt to do that before here since the previous loop |
6946 | | // looking before the "out:" label may hit an out of memory condition resulting |
6947 | | // in memory leaking. |
6948 | 0 | for (uint32_t i = 0; i < inst->phys_dev_count_term; i++) { |
6949 | 0 | bool found = false; |
6950 | 0 | for (uint32_t j = 0; j < new_phys_devs_count; j++) { |
6951 | 0 | if (new_phys_devs != NULL && inst->phys_devs_term[i] == new_phys_devs[j]) { |
6952 | 0 | found = true; |
6953 | 0 | break; |
6954 | 0 | } |
6955 | 0 | } |
6956 | 0 | if (!found) { |
6957 | 0 | loader_instance_heap_free(inst, inst->phys_devs_term[i]); |
6958 | 0 | } |
6959 | 0 | } |
6960 | 0 | loader_instance_heap_free(inst, inst->phys_devs_term); |
6961 | 0 | } |
6962 | | |
6963 | | // Swap out old and new devices list |
6964 | 0 | inst->phys_dev_count_term = new_phys_devs_count; |
6965 | 0 | inst->phys_devs_term = new_phys_devs; |
6966 | 0 | inst->total_gpu_count = new_phys_devs_count; |
6967 | 0 | } |
6968 | |
|
6969 | 0 | if (windows_sorted_devices_array != NULL) { |
6970 | 0 | for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) { |
6971 | 0 | if (windows_sorted_devices_array[i].device_count > 0 && windows_sorted_devices_array[i].physical_devices != NULL) { |
6972 | 0 | loader_instance_heap_free(inst, windows_sorted_devices_array[i].physical_devices); |
6973 | 0 | } |
6974 | 0 | } |
6975 | 0 | loader_instance_heap_free(inst, windows_sorted_devices_array); |
6976 | 0 | } |
6977 | |
|
6978 | 0 | return res; |
6979 | 0 | } |
6980 | | /** |
6981 | | * Iterates through all drivers and unloads any which do not contain physical devices. |
6982 | | * This saves address space, which for 32 bit applications is scarce. |
6983 | | * This must only be called after a call to vkEnumeratePhysicalDevices that isn't just querying the count |
6984 | | */ |
6985 | 0 | void unload_drivers_without_physical_devices(struct loader_instance *inst) { |
6986 | 0 | struct loader_icd_term *cur_icd_term = inst->icd_terms; |
6987 | 0 | struct loader_icd_term *prev_icd_term = NULL; |
6988 | |
|
6989 | 0 | while (NULL != cur_icd_term) { |
6990 | 0 | struct loader_icd_term *next_icd_term = cur_icd_term->next; |
6991 | 0 | if (cur_icd_term->physical_device_count == 0) { |
6992 | 0 | uint32_t cur_scanned_icd_index = UINT32_MAX; |
6993 | 0 | if (inst->icd_tramp_list.scanned_list) { |
6994 | 0 | for (uint32_t i = 0; i < inst->icd_tramp_list.count; i++) { |
6995 | 0 | if (&(inst->icd_tramp_list.scanned_list[i]) == cur_icd_term->scanned_icd) { |
6996 | 0 | cur_scanned_icd_index = i; |
6997 | 0 | break; |
6998 | 0 | } |
6999 | 0 | } |
7000 | 0 | } |
7001 | 0 | if (cur_scanned_icd_index != UINT32_MAX) { |
7002 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0, |
7003 | 0 | "Removing driver %s due to not having any physical devices", cur_icd_term->scanned_icd->lib_name); |
7004 | |
|
7005 | 0 | const VkAllocationCallbacks *allocation_callbacks = ignore_null_callback(&(inst->alloc_callbacks)); |
7006 | 0 | if (cur_icd_term->instance) { |
7007 | 0 | loader_icd_close_objects(inst, cur_icd_term); |
7008 | 0 | cur_icd_term->dispatch.DestroyInstance(cur_icd_term->instance, allocation_callbacks); |
7009 | 0 | } |
7010 | 0 | cur_icd_term->instance = VK_NULL_HANDLE; |
7011 | 0 | loader_icd_destroy(inst, cur_icd_term, allocation_callbacks); |
7012 | 0 | cur_icd_term = NULL; |
7013 | 0 | struct loader_scanned_icd *scanned_icd_to_remove = &inst->icd_tramp_list.scanned_list[cur_scanned_icd_index]; |
7014 | | // Iterate through preloaded ICDs and remove the corresponding driver from that list |
7015 | 0 | loader_platform_thread_lock_mutex(&loader_preload_icd_lock); |
7016 | 0 | if (NULL != preloaded_icds.scanned_list) { |
7017 | 0 | for (uint32_t i = 0; i < preloaded_icds.count; i++) { |
7018 | 0 | if (NULL != preloaded_icds.scanned_list[i].lib_name && NULL != scanned_icd_to_remove->lib_name && |
7019 | 0 | strcmp(preloaded_icds.scanned_list[i].lib_name, scanned_icd_to_remove->lib_name) == 0) { |
7020 | 0 | loader_unload_scanned_icd(NULL, &preloaded_icds.scanned_list[i]); |
7021 | | // condense the list so that it doesn't contain empty elements. |
7022 | 0 | if (i < preloaded_icds.count - 1) { |
7023 | 0 | memcpy((void *)&preloaded_icds.scanned_list[i], |
7024 | 0 | (void *)&preloaded_icds.scanned_list[preloaded_icds.count - 1], |
7025 | 0 | sizeof(struct loader_scanned_icd)); |
7026 | 0 | memset((void *)&preloaded_icds.scanned_list[preloaded_icds.count - 1], 0, |
7027 | 0 | sizeof(struct loader_scanned_icd)); |
7028 | 0 | } |
7029 | 0 | if (i > 0) { |
7030 | 0 | preloaded_icds.count--; |
7031 | 0 | } |
7032 | |
|
7033 | 0 | break; |
7034 | 0 | } |
7035 | 0 | } |
7036 | 0 | } |
7037 | 0 | loader_platform_thread_unlock_mutex(&loader_preload_icd_lock); |
7038 | |
|
7039 | 0 | loader_unload_scanned_icd(inst, scanned_icd_to_remove); |
7040 | 0 | } |
7041 | |
|
7042 | 0 | if (NULL == prev_icd_term) { |
7043 | 0 | inst->icd_terms = next_icd_term; |
7044 | 0 | } else { |
7045 | 0 | prev_icd_term->next = next_icd_term; |
7046 | 0 | } |
7047 | 0 | } else { |
7048 | 0 | prev_icd_term = cur_icd_term; |
7049 | 0 | } |
7050 | 0 | cur_icd_term = next_icd_term; |
7051 | 0 | } |
7052 | 0 | } |
7053 | | |
7054 | | VkResult setup_loader_tramp_phys_dev_groups(struct loader_instance *inst, uint32_t group_count, |
7055 | 0 | VkPhysicalDeviceGroupProperties *groups) { |
7056 | 0 | VkResult res = VK_SUCCESS; |
7057 | 0 | uint32_t cur_idx; |
7058 | 0 | uint32_t dev_idx; |
7059 | |
|
7060 | 0 | if (0 == group_count) { |
7061 | 0 | return VK_SUCCESS; |
7062 | 0 | } |
7063 | | |
7064 | | // Generate a list of all the devices and convert them to the loader ID |
7065 | 0 | uint32_t phys_dev_count = 0; |
7066 | 0 | for (cur_idx = 0; cur_idx < group_count; ++cur_idx) { |
7067 | 0 | phys_dev_count += groups[cur_idx].physicalDeviceCount; |
7068 | 0 | } |
7069 | 0 | VkPhysicalDevice *devices = (VkPhysicalDevice *)loader_stack_alloc(sizeof(VkPhysicalDevice) * phys_dev_count); |
7070 | 0 | if (NULL == devices) { |
7071 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
7072 | 0 | } |
7073 | | |
7074 | 0 | uint32_t cur_device = 0; |
7075 | 0 | for (cur_idx = 0; cur_idx < group_count; ++cur_idx) { |
7076 | 0 | for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) { |
7077 | 0 | devices[cur_device++] = groups[cur_idx].physicalDevices[dev_idx]; |
7078 | 0 | } |
7079 | 0 | } |
7080 | | |
7081 | | // Update the devices based on the loader physical device values. |
7082 | 0 | res = setup_loader_tramp_phys_devs(inst, phys_dev_count, devices); |
7083 | 0 | if (VK_SUCCESS != res) { |
7084 | 0 | return res; |
7085 | 0 | } |
7086 | | |
7087 | | // Update the devices in the group structures now |
7088 | 0 | cur_device = 0; |
7089 | 0 | for (cur_idx = 0; cur_idx < group_count; ++cur_idx) { |
7090 | 0 | for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) { |
7091 | 0 | groups[cur_idx].physicalDevices[dev_idx] = devices[cur_device++]; |
7092 | 0 | } |
7093 | 0 | } |
7094 | |
|
7095 | 0 | return res; |
7096 | 0 | } |
7097 | | |
7098 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, |
7099 | 0 | VkPhysicalDevice *pPhysicalDevices) { |
7100 | 0 | struct loader_instance *inst = (struct loader_instance *)instance; |
7101 | 0 | VkResult res = VK_SUCCESS; |
7102 | | |
7103 | | // Always call the setup loader terminator physical devices because they may |
7104 | | // have changed at any point. |
7105 | 0 | res = setup_loader_term_phys_devs(inst); |
7106 | 0 | if (VK_SUCCESS != res) { |
7107 | 0 | goto out; |
7108 | 0 | } |
7109 | | |
7110 | 0 | if (inst->settings.settings_active && inst->settings.device_configuration_count > 0) { |
7111 | | // Use settings file device_configurations if present |
7112 | 0 | if (NULL == pPhysicalDevices) { |
7113 | | // take the minimum of the settings configurations count and number of terminators |
7114 | 0 | *pPhysicalDeviceCount = (inst->settings.device_configuration_count < inst->phys_dev_count_term) |
7115 | 0 | ? inst->settings.device_configuration_count |
7116 | 0 | : inst->phys_dev_count_term; |
7117 | 0 | } else { |
7118 | 0 | res = loader_apply_settings_device_configurations(inst, pPhysicalDeviceCount, pPhysicalDevices); |
7119 | 0 | } |
7120 | 0 | } else { |
7121 | | // Otherwise just copy the physical devices up normally and pass it up the chain |
7122 | 0 | uint32_t copy_count = inst->phys_dev_count_term; |
7123 | 0 | if (NULL != pPhysicalDevices) { |
7124 | 0 | if (copy_count > *pPhysicalDeviceCount) { |
7125 | 0 | copy_count = *pPhysicalDeviceCount; |
7126 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
7127 | 0 | "terminator_EnumeratePhysicalDevices : Trimming device count from %d to %d.", inst->phys_dev_count_term, |
7128 | 0 | copy_count); |
7129 | 0 | res = VK_INCOMPLETE; |
7130 | 0 | } |
7131 | |
|
7132 | 0 | for (uint32_t i = 0; i < copy_count; i++) { |
7133 | 0 | pPhysicalDevices[i] = (VkPhysicalDevice)inst->phys_devs_term[i]; |
7134 | 0 | } |
7135 | 0 | } |
7136 | |
|
7137 | 0 | *pPhysicalDeviceCount = copy_count; |
7138 | 0 | } |
7139 | |
|
7140 | 0 | out: |
7141 | |
|
7142 | 0 | return res; |
7143 | 0 | } |
7144 | | |
7145 | | VkResult check_physical_device_extensions_for_driver_properties_extension(struct loader_physical_device_term *phys_dev_term, |
7146 | 0 | bool *supports_driver_properties) { |
7147 | 0 | *supports_driver_properties = false; |
7148 | 0 | uint32_t extension_count = 0; |
7149 | 0 | VkResult res = phys_dev_term->this_icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, |
7150 | 0 | &extension_count, NULL); |
7151 | 0 | if (res != VK_SUCCESS || extension_count == 0) { |
7152 | 0 | return VK_SUCCESS; |
7153 | 0 | } |
7154 | 0 | VkExtensionProperties *extension_data = loader_stack_alloc(sizeof(VkExtensionProperties) * extension_count); |
7155 | 0 | if (NULL == extension_data) { |
7156 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
7157 | 0 | } |
7158 | | |
7159 | 0 | res = phys_dev_term->this_icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &extension_count, |
7160 | 0 | extension_data); |
7161 | 0 | if (res != VK_SUCCESS) { |
7162 | 0 | return VK_SUCCESS; |
7163 | 0 | } |
7164 | 0 | for (uint32_t j = 0; j < extension_count; j++) { |
7165 | 0 | if (!strcmp(extension_data[j].extensionName, VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME)) { |
7166 | 0 | *supports_driver_properties = true; |
7167 | 0 | return VK_SUCCESS; |
7168 | 0 | } |
7169 | 0 | } |
7170 | 0 | return VK_SUCCESS; |
7171 | 0 | } |
7172 | | |
7173 | | // Helper struct containing the relevant details of a VkPhysicalDevice necessary for applying the loader settings device |
7174 | | // configurations. |
7175 | | typedef struct physical_device_configuration_details { |
7176 | | bool pd_was_added; |
7177 | | bool pd_supports_vulkan_11; |
7178 | | bool pd_supports_driver_properties; |
7179 | | VkPhysicalDeviceProperties properties; |
7180 | | VkPhysicalDeviceIDProperties device_id_properties; |
7181 | | VkPhysicalDeviceDriverProperties driver_properties; |
7182 | | |
7183 | | } physical_device_configuration_details; |
7184 | | |
7185 | | // Apply the device_configurations in the settings file to the output VkPhysicalDeviceList. |
7186 | | // That means looking up each VkPhysicalDevice's deviceUUID, filtering using that, and putting them in the order of |
7187 | | // device_configurations in the settings file. |
7188 | | VkResult loader_apply_settings_device_configurations(struct loader_instance *inst, uint32_t *pPhysicalDeviceCount, |
7189 | 0 | VkPhysicalDevice *pPhysicalDevices) { |
7190 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
7191 | 0 | "Reordering the output of vkEnumeratePhysicalDevices to match the loader settings device configurations list"); |
7192 | |
|
7193 | 0 | physical_device_configuration_details *pd_details = |
7194 | 0 | loader_stack_alloc(inst->phys_dev_count_term * sizeof(physical_device_configuration_details)); |
7195 | 0 | if (NULL == pd_details) { |
7196 | 0 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
7197 | 0 | } |
7198 | 0 | memset(pd_details, 0, inst->phys_dev_count_term * sizeof(physical_device_configuration_details)); |
7199 | |
|
7200 | 0 | for (uint32_t i = 0; i < inst->phys_dev_count_term; i++) { |
7201 | 0 | struct loader_physical_device_term *phys_dev_term = inst->phys_devs_term[i]; |
7202 | |
|
7203 | 0 | phys_dev_term->this_icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, &pd_details[i].properties); |
7204 | 0 | if (pd_details[i].properties.apiVersion < VK_API_VERSION_1_1) { |
7205 | | // Device isn't eligible for sorting |
7206 | 0 | continue; |
7207 | 0 | } |
7208 | 0 | pd_details[i].pd_supports_vulkan_11 = true; |
7209 | 0 | if (pd_details[i].properties.apiVersion >= VK_API_VERSION_1_2) { |
7210 | 0 | pd_details[i].pd_supports_driver_properties = true; |
7211 | 0 | } |
7212 | | |
7213 | | // If this physical device isn't 1.2, then we need to check if it supports VK_KHR_driver_properties |
7214 | 0 | if (!pd_details[i].pd_supports_driver_properties) { |
7215 | 0 | VkResult res = check_physical_device_extensions_for_driver_properties_extension( |
7216 | 0 | phys_dev_term, &pd_details[i].pd_supports_driver_properties); |
7217 | 0 | if (res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
7218 | 0 | return res; |
7219 | 0 | } |
7220 | 0 | } |
7221 | | |
7222 | 0 | pd_details[i].device_id_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES; |
7223 | 0 | pd_details[i].driver_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES; |
7224 | 0 | if (pd_details[i].pd_supports_driver_properties) { |
7225 | 0 | pd_details[i].device_id_properties.pNext = (void *)&pd_details[i].driver_properties; |
7226 | 0 | } |
7227 | |
|
7228 | 0 | VkPhysicalDeviceProperties2 props2 = {0}; |
7229 | 0 | props2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2; |
7230 | 0 | props2.pNext = (void *)&pd_details[i].device_id_properties; |
7231 | 0 | if (phys_dev_term->this_icd_term->dispatch.GetPhysicalDeviceProperties2) { |
7232 | 0 | phys_dev_term->this_icd_term->dispatch.GetPhysicalDeviceProperties2(phys_dev_term->phys_dev, &props2); |
7233 | 0 | } |
7234 | 0 | } |
7235 | | |
7236 | | // Loop over the setting's device configurations, find each VkPhysicalDevice which matches the deviceUUID given, add to the |
7237 | | // pPhysicalDevices output list. |
7238 | 0 | uint32_t written_output_index = 0; |
7239 | |
|
7240 | 0 | for (uint32_t i = 0; i < inst->settings.device_configuration_count; i++) { |
7241 | 0 | uint8_t *current_deviceUUID = inst->settings.device_configurations[i].deviceUUID; |
7242 | 0 | uint8_t *current_driverUUID = inst->settings.device_configurations[i].driverUUID; |
7243 | 0 | bool configuration_found = false; |
7244 | 0 | for (uint32_t j = 0; j < inst->phys_dev_count_term; j++) { |
7245 | | // Don't compare deviceUUID's if they have nothing, since we require deviceUUID's to effectively sort them. |
7246 | 0 | if (!pd_details[j].pd_supports_vulkan_11) { |
7247 | 0 | continue; |
7248 | 0 | } |
7249 | 0 | if (memcmp(current_deviceUUID, pd_details[j].device_id_properties.deviceUUID, sizeof(uint8_t) * VK_UUID_SIZE) == 0 && |
7250 | 0 | memcmp(current_driverUUID, pd_details[j].device_id_properties.driverUUID, sizeof(uint8_t) * VK_UUID_SIZE) == 0 && |
7251 | 0 | inst->settings.device_configurations[i].driverVersion == pd_details[j].properties.driverVersion) { |
7252 | 0 | configuration_found = true; |
7253 | | // Catch when there are more device_configurations than space available in the output |
7254 | 0 | if (written_output_index >= *pPhysicalDeviceCount) { |
7255 | 0 | *pPhysicalDeviceCount = written_output_index; // write out how many were written |
7256 | 0 | return VK_INCOMPLETE; |
7257 | 0 | } |
7258 | 0 | if (pd_details[j].pd_supports_driver_properties) { |
7259 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, |
7260 | 0 | "pPhysicalDevices array index %d is set to \"%s\" (%s, version %d) ", written_output_index, |
7261 | 0 | pd_details[j].properties.deviceName, pd_details[i].driver_properties.driverName, |
7262 | 0 | pd_details[i].properties.driverVersion); |
7263 | 0 | } else { |
7264 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, |
7265 | 0 | "pPhysicalDevices array index %d is set to \"%s\" (driver version %d) ", written_output_index, |
7266 | 0 | pd_details[j].properties.deviceName, pd_details[i].properties.driverVersion); |
7267 | 0 | } |
7268 | 0 | pPhysicalDevices[written_output_index++] = (VkPhysicalDevice)inst->phys_devs_term[j]; |
7269 | 0 | pd_details[j].pd_was_added = true; |
7270 | 0 | break; |
7271 | 0 | } |
7272 | 0 | } |
7273 | 0 | if (!configuration_found) { |
7274 | 0 | char device_uuid_str[UUID_STR_LEN] = {0}; |
7275 | 0 | loader_log_generate_uuid_string(current_deviceUUID, device_uuid_str); |
7276 | 0 | char driver_uuid_str[UUID_STR_LEN] = {0}; |
7277 | 0 | loader_log_generate_uuid_string(current_deviceUUID, driver_uuid_str); |
7278 | | |
7279 | | // Log that this configuration was missing. |
7280 | 0 | if (inst->settings.device_configurations[i].deviceName[0] != '\0' && |
7281 | 0 | inst->settings.device_configurations[i].driverName[0] != '\0') { |
7282 | 0 | loader_log( |
7283 | 0 | inst, VULKAN_LOADER_WARN_BIT, 0, |
7284 | 0 | "loader_apply_settings_device_configurations: settings file contained device_configuration which does not " |
7285 | 0 | "appear in the enumerated VkPhysicalDevices. Missing VkPhysicalDevice with deviceName: \"%s\", " |
7286 | 0 | "deviceUUID: %s, driverName: %s, driverUUID: %s, driverVersion: %d", |
7287 | 0 | inst->settings.device_configurations[i].deviceName, device_uuid_str, |
7288 | 0 | inst->settings.device_configurations[i].driverName, driver_uuid_str, |
7289 | 0 | inst->settings.device_configurations[i].driverVersion); |
7290 | 0 | } else if (inst->settings.device_configurations[i].deviceName[0] != '\0') { |
7291 | 0 | loader_log( |
7292 | 0 | inst, VULKAN_LOADER_WARN_BIT, 0, |
7293 | 0 | "loader_apply_settings_device_configurations: settings file contained device_configuration which does not " |
7294 | 0 | "appear in the enumerated VkPhysicalDevices. Missing VkPhysicalDevice with deviceName: \"%s\", " |
7295 | 0 | "deviceUUID: %s, driverUUID: %s, driverVersion: %d", |
7296 | 0 | inst->settings.device_configurations[i].deviceName, device_uuid_str, driver_uuid_str, |
7297 | 0 | inst->settings.device_configurations[i].driverVersion); |
7298 | 0 | } else { |
7299 | 0 | loader_log( |
7300 | 0 | inst, VULKAN_LOADER_WARN_BIT, 0, |
7301 | 0 | "loader_apply_settings_device_configurations: settings file contained device_configuration which does not " |
7302 | 0 | "appear in the enumerated VkPhysicalDevices. Missing VkPhysicalDevice with deviceUUID: " |
7303 | 0 | "%s, driverUUID: %s, driverVersion: %d", |
7304 | 0 | device_uuid_str, driver_uuid_str, inst->settings.device_configurations[i].driverVersion); |
7305 | 0 | } |
7306 | 0 | } |
7307 | 0 | } |
7308 | | |
7309 | 0 | for (uint32_t j = 0; j < inst->phys_dev_count_term; j++) { |
7310 | 0 | if (!pd_details[j].pd_was_added) { |
7311 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
7312 | 0 | "VkPhysicalDevice \"%s\" did not appear in the settings file device configurations list, so was not added " |
7313 | 0 | "to the pPhysicalDevices array", |
7314 | 0 | pd_details[j].properties.deviceName); |
7315 | 0 | } |
7316 | 0 | } |
7317 | |
|
7318 | 0 | if (written_output_index == 0) { |
7319 | 0 | loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, |
7320 | 0 | "loader_apply_settings_device_configurations: None of the settings file device configurations had " |
7321 | 0 | "deviceUUID's that corresponded to enumerated VkPhysicalDevices. Returning VK_ERROR_INITIALIZATION_FAILED"); |
7322 | 0 | return VK_ERROR_INITIALIZATION_FAILED; |
7323 | 0 | } |
7324 | | |
7325 | 0 | *pPhysicalDeviceCount = written_output_index; // update with how many were written |
7326 | 0 | return VK_SUCCESS; |
7327 | 0 | } |
7328 | | |
7329 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, |
7330 | | const char *pLayerName, uint32_t *pPropertyCount, |
7331 | 0 | VkExtensionProperties *pProperties) { |
7332 | 0 | if (NULL == pPropertyCount) { |
7333 | 0 | return VK_INCOMPLETE; |
7334 | 0 | } |
7335 | | |
7336 | 0 | struct loader_physical_device_term *phys_dev_term; |
7337 | | |
7338 | | // Any layer or trampoline wrapping should be removed at this point in time can just cast to the expected |
7339 | | // type for VkPhysicalDevice. |
7340 | 0 | phys_dev_term = (struct loader_physical_device_term *)physicalDevice; |
7341 | | |
7342 | | // if we got here with a non-empty pLayerName, look up the extensions |
7343 | | // from the json |
7344 | 0 | if (pLayerName != NULL && strlen(pLayerName) > 0) { |
7345 | 0 | uint32_t count; |
7346 | 0 | uint32_t copy_size; |
7347 | 0 | const struct loader_instance *inst = phys_dev_term->this_icd_term->this_instance; |
7348 | 0 | struct loader_device_extension_list *dev_ext_list = NULL; |
7349 | 0 | struct loader_device_extension_list local_ext_list; |
7350 | 0 | memset(&local_ext_list, 0, sizeof(local_ext_list)); |
7351 | 0 | if (vk_string_validate(MaxLoaderStringLength, pLayerName) == VK_STRING_ERROR_NONE) { |
7352 | 0 | for (uint32_t i = 0; i < inst->instance_layer_list.count; i++) { |
7353 | 0 | struct loader_layer_properties *props = &inst->instance_layer_list.list[i]; |
7354 | 0 | if (strcmp(props->info.layerName, pLayerName) == 0) { |
7355 | 0 | dev_ext_list = &props->device_extension_list; |
7356 | 0 | } |
7357 | 0 | } |
7358 | |
|
7359 | 0 | count = (dev_ext_list == NULL) ? 0 : dev_ext_list->count; |
7360 | 0 | if (pProperties == NULL) { |
7361 | 0 | *pPropertyCount = count; |
7362 | 0 | loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list); |
7363 | 0 | return VK_SUCCESS; |
7364 | 0 | } |
7365 | | |
7366 | 0 | copy_size = *pPropertyCount < count ? *pPropertyCount : count; |
7367 | 0 | for (uint32_t i = 0; i < copy_size; i++) { |
7368 | 0 | memcpy(&pProperties[i], &dev_ext_list->list[i].props, sizeof(VkExtensionProperties)); |
7369 | 0 | } |
7370 | 0 | *pPropertyCount = copy_size; |
7371 | |
|
7372 | 0 | loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list); |
7373 | 0 | if (copy_size < count) { |
7374 | 0 | return VK_INCOMPLETE; |
7375 | 0 | } |
7376 | 0 | } else { |
7377 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7378 | 0 | "vkEnumerateDeviceExtensionProperties: pLayerName is too long or is badly formed"); |
7379 | 0 | return VK_ERROR_EXTENSION_NOT_PRESENT; |
7380 | 0 | } |
7381 | | |
7382 | 0 | return VK_SUCCESS; |
7383 | 0 | } |
7384 | | |
7385 | | // user is querying driver extensions and has supplied their own storage - just fill it out |
7386 | 0 | else if (pProperties) { |
7387 | 0 | struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; |
7388 | 0 | uint32_t written_count = *pPropertyCount; |
7389 | 0 | VkResult res = |
7390 | 0 | icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &written_count, pProperties); |
7391 | 0 | if (res != VK_SUCCESS) { |
7392 | 0 | return res; |
7393 | 0 | } |
7394 | | |
7395 | | // Iterate over active layers, if they are an implicit layer, add their device extensions |
7396 | | // After calling into the driver, written_count contains the amount of device extensions written. We can therefore write |
7397 | | // layer extensions starting at that point in pProperties |
7398 | 0 | for (uint32_t i = 0; i < icd_term->this_instance->expanded_activated_layer_list.count; i++) { |
7399 | 0 | struct loader_layer_properties *layer_props = icd_term->this_instance->expanded_activated_layer_list.list[i]; |
7400 | 0 | if (0 == (layer_props->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { |
7401 | 0 | struct loader_device_extension_list *layer_ext_list = &layer_props->device_extension_list; |
7402 | 0 | for (uint32_t j = 0; j < layer_ext_list->count; j++) { |
7403 | 0 | struct loader_dev_ext_props *cur_ext_props = &layer_ext_list->list[j]; |
7404 | | // look for duplicates |
7405 | 0 | if (has_vk_extension_property_array(&cur_ext_props->props, written_count, pProperties)) { |
7406 | 0 | continue; |
7407 | 0 | } |
7408 | | |
7409 | 0 | if (*pPropertyCount <= written_count) { |
7410 | 0 | return VK_INCOMPLETE; |
7411 | 0 | } |
7412 | | |
7413 | 0 | memcpy(&pProperties[written_count], &cur_ext_props->props, sizeof(VkExtensionProperties)); |
7414 | 0 | written_count++; |
7415 | 0 | } |
7416 | 0 | } |
7417 | 0 | } |
7418 | | // Make sure we update the pPropertyCount with the how many were written |
7419 | 0 | *pPropertyCount = written_count; |
7420 | 0 | return res; |
7421 | 0 | } |
7422 | | // Use `goto out;` for rest of this function |
7423 | | |
7424 | | // This case is during the call down the instance chain with pLayerName == NULL and pProperties == NULL |
7425 | 0 | struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; |
7426 | 0 | struct loader_extension_list all_exts = {0}; |
7427 | 0 | VkResult res; |
7428 | | |
7429 | | // We need to find the count without duplicates. This requires querying the driver for the names of the extensions. |
7430 | 0 | res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &all_exts.count, NULL); |
7431 | 0 | if (res != VK_SUCCESS) { |
7432 | 0 | goto out; |
7433 | 0 | } |
7434 | | // Then allocate memory to store the physical device extension list + the extensions layers provide |
7435 | | // all_exts.count currently is the number of driver extensions |
7436 | 0 | all_exts.capacity = sizeof(VkExtensionProperties) * (all_exts.count + 20); |
7437 | 0 | all_exts.list = loader_instance_heap_alloc(icd_term->this_instance, all_exts.capacity, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); |
7438 | 0 | if (NULL == all_exts.list) { |
7439 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
7440 | 0 | goto out; |
7441 | 0 | } |
7442 | | |
7443 | | // Get the available device extensions and put them in all_exts.list |
7444 | 0 | res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &all_exts.count, all_exts.list); |
7445 | 0 | if (res != VK_SUCCESS) { |
7446 | 0 | goto out; |
7447 | 0 | } |
7448 | | |
7449 | | // Iterate over active layers, if they are an implicit layer, add their device extensions to all_exts.list |
7450 | 0 | for (uint32_t i = 0; i < icd_term->this_instance->expanded_activated_layer_list.count; i++) { |
7451 | 0 | struct loader_layer_properties *layer_props = icd_term->this_instance->expanded_activated_layer_list.list[i]; |
7452 | 0 | if (0 == (layer_props->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { |
7453 | 0 | struct loader_device_extension_list *layer_ext_list = &layer_props->device_extension_list; |
7454 | 0 | for (uint32_t j = 0; j < layer_ext_list->count; j++) { |
7455 | 0 | res = loader_add_to_ext_list(icd_term->this_instance, &all_exts, 1, &layer_ext_list->list[j].props); |
7456 | 0 | if (res != VK_SUCCESS) { |
7457 | 0 | goto out; |
7458 | 0 | } |
7459 | 0 | } |
7460 | 0 | } |
7461 | 0 | } |
7462 | | |
7463 | | // Write out the final de-duplicated count to pPropertyCount |
7464 | 0 | *pPropertyCount = all_exts.count; |
7465 | 0 | res = VK_SUCCESS; |
7466 | |
|
7467 | 0 | out: |
7468 | |
|
7469 | 0 | loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&all_exts); |
7470 | 0 | return res; |
7471 | 0 | } |
7472 | | |
7473 | 7.65k | VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8) { |
7474 | 7.65k | VkStringErrorFlags result = VK_STRING_ERROR_NONE; |
7475 | 7.65k | int num_char_bytes = 0; |
7476 | 7.65k | int i, j; |
7477 | | |
7478 | 7.65k | if (utf8 == NULL) { |
7479 | 0 | return VK_STRING_ERROR_NULL_PTR; |
7480 | 0 | } |
7481 | | |
7482 | 214k | for (i = 0; i <= max_length; i++) { |
7483 | 214k | if (utf8[i] == 0) { |
7484 | 7.65k | break; |
7485 | 206k | } else if (i == max_length) { |
7486 | 0 | result |= VK_STRING_ERROR_LENGTH; |
7487 | 0 | break; |
7488 | 206k | } else if ((utf8[i] >= 0x20) && (utf8[i] < 0x7f)) { |
7489 | 206k | num_char_bytes = 0; |
7490 | 206k | } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) { |
7491 | 0 | num_char_bytes = 1; |
7492 | 0 | } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) { |
7493 | 0 | num_char_bytes = 2; |
7494 | 0 | } else if ((utf8[i] & UTF8_THREE_BYTE_MASK) == UTF8_THREE_BYTE_CODE) { |
7495 | 0 | num_char_bytes = 3; |
7496 | 0 | } else { |
7497 | 0 | result = VK_STRING_ERROR_BAD_DATA; |
7498 | 0 | } |
7499 | | |
7500 | | // Validate the following num_char_bytes of data |
7501 | 206k | for (j = 0; (j < num_char_bytes) && (i < max_length); j++) { |
7502 | 0 | if (++i == max_length) { |
7503 | 0 | result |= VK_STRING_ERROR_LENGTH; |
7504 | 0 | break; |
7505 | 0 | } |
7506 | 0 | if ((utf8[i] & UTF8_DATA_BYTE_MASK) != UTF8_DATA_BYTE_CODE) { |
7507 | 0 | result |= VK_STRING_ERROR_BAD_DATA; |
7508 | 0 | } |
7509 | 0 | } |
7510 | 206k | } |
7511 | 7.65k | return result; |
7512 | 7.65k | } |
7513 | | |
7514 | 0 | VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceVersion(uint32_t *pApiVersion) { |
7515 | | // NOTE: The Vulkan WG doesn't want us checking pApiVersion for NULL, but instead |
7516 | | // prefers us crashing. |
7517 | 0 | *pApiVersion = VK_HEADER_VERSION_COMPLETE; |
7518 | 0 | return VK_SUCCESS; |
7519 | 0 | } |
7520 | | |
7521 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_pre_instance_EnumerateInstanceVersion(const VkEnumerateInstanceVersionChain *chain, |
7522 | 0 | uint32_t *pApiVersion) { |
7523 | 0 | (void)chain; |
7524 | 0 | return terminator_EnumerateInstanceVersion(pApiVersion); |
7525 | 0 | } |
7526 | | |
7527 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pPropertyCount, |
7528 | 0 | VkExtensionProperties *pProperties) { |
7529 | 0 | struct loader_extension_list *global_ext_list = NULL; |
7530 | 0 | struct loader_layer_list instance_layers; |
7531 | 0 | struct loader_extension_list local_ext_list; |
7532 | 0 | struct loader_icd_tramp_list icd_tramp_list; |
7533 | 0 | uint32_t copy_size; |
7534 | 0 | VkResult res = VK_SUCCESS; |
7535 | 0 | struct loader_envvar_all_filters layer_filters = {0}; |
7536 | |
|
7537 | 0 | memset(&local_ext_list, 0, sizeof(local_ext_list)); |
7538 | 0 | memset(&instance_layers, 0, sizeof(instance_layers)); |
7539 | 0 | memset(&icd_tramp_list, 0, sizeof(icd_tramp_list)); |
7540 | |
|
7541 | 0 | res = parse_layer_environment_var_filters(NULL, &layer_filters); |
7542 | 0 | if (VK_SUCCESS != res) { |
7543 | 0 | goto out; |
7544 | 0 | } |
7545 | | |
7546 | | // Get layer libraries if needed |
7547 | 0 | if (pLayerName && strlen(pLayerName) != 0) { |
7548 | 0 | if (vk_string_validate(MaxLoaderStringLength, pLayerName) != VK_STRING_ERROR_NONE) { |
7549 | 0 | assert(VK_FALSE && "vkEnumerateInstanceExtensionProperties: pLayerName is too long or is badly formed"); |
7550 | 0 | res = VK_ERROR_EXTENSION_NOT_PRESENT; |
7551 | 0 | goto out; |
7552 | 0 | } |
7553 | | |
7554 | 0 | res = loader_scan_for_layers(NULL, &instance_layers, &layer_filters); |
7555 | 0 | if (VK_SUCCESS != res) { |
7556 | 0 | goto out; |
7557 | 0 | } |
7558 | 0 | for (uint32_t i = 0; i < instance_layers.count; i++) { |
7559 | 0 | struct loader_layer_properties *props = &instance_layers.list[i]; |
7560 | 0 | if (strcmp(props->info.layerName, pLayerName) == 0) { |
7561 | 0 | global_ext_list = &props->instance_extension_list; |
7562 | 0 | break; |
7563 | 0 | } |
7564 | 0 | } |
7565 | 0 | } else { |
7566 | | // Preload ICD libraries so subsequent calls to EnumerateInstanceExtensionProperties don't have to load them |
7567 | 0 | loader_preload_icds(); |
7568 | | |
7569 | | // Scan/discover all ICD libraries |
7570 | 0 | res = loader_icd_scan(NULL, &icd_tramp_list, NULL, NULL); |
7571 | | // EnumerateInstanceExtensionProperties can't return anything other than OOM or VK_ERROR_LAYER_NOT_PRESENT |
7572 | 0 | if ((VK_SUCCESS != res && icd_tramp_list.count > 0) || res == VK_ERROR_OUT_OF_HOST_MEMORY) { |
7573 | 0 | goto out; |
7574 | 0 | } |
7575 | | // Get extensions from all ICD's, merge so no duplicates |
7576 | 0 | res = loader_get_icd_loader_instance_extensions(NULL, &icd_tramp_list, &local_ext_list); |
7577 | 0 | if (VK_SUCCESS != res) { |
7578 | 0 | goto out; |
7579 | 0 | } |
7580 | 0 | loader_clear_scanned_icd_list(NULL, &icd_tramp_list); |
7581 | | |
7582 | | // Append enabled implicit layers. |
7583 | 0 | res = loader_scan_for_implicit_layers(NULL, &instance_layers, &layer_filters); |
7584 | 0 | if (VK_SUCCESS != res) { |
7585 | 0 | goto out; |
7586 | 0 | } |
7587 | 0 | for (uint32_t i = 0; i < instance_layers.count; i++) { |
7588 | 0 | struct loader_extension_list *ext_list = &instance_layers.list[i].instance_extension_list; |
7589 | 0 | loader_add_to_ext_list(NULL, &local_ext_list, ext_list->count, ext_list->list); |
7590 | 0 | } |
7591 | |
|
7592 | 0 | global_ext_list = &local_ext_list; |
7593 | 0 | } |
7594 | | |
7595 | 0 | if (global_ext_list == NULL) { |
7596 | 0 | res = VK_ERROR_LAYER_NOT_PRESENT; |
7597 | 0 | goto out; |
7598 | 0 | } |
7599 | | |
7600 | 0 | if (pProperties == NULL) { |
7601 | 0 | *pPropertyCount = global_ext_list->count; |
7602 | 0 | goto out; |
7603 | 0 | } |
7604 | | |
7605 | 0 | copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count; |
7606 | 0 | for (uint32_t i = 0; i < copy_size; i++) { |
7607 | 0 | memcpy(&pProperties[i], &global_ext_list->list[i], sizeof(VkExtensionProperties)); |
7608 | 0 | } |
7609 | 0 | *pPropertyCount = copy_size; |
7610 | |
|
7611 | 0 | if (copy_size < global_ext_list->count) { |
7612 | 0 | res = VK_INCOMPLETE; |
7613 | 0 | goto out; |
7614 | 0 | } |
7615 | | |
7616 | 0 | out: |
7617 | 0 | loader_destroy_generic_list(NULL, (struct loader_generic_list *)&icd_tramp_list); |
7618 | 0 | loader_destroy_generic_list(NULL, (struct loader_generic_list *)&local_ext_list); |
7619 | 0 | loader_delete_layer_list_and_properties(NULL, &instance_layers); |
7620 | 0 | return res; |
7621 | 0 | } |
7622 | | |
7623 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_pre_instance_EnumerateInstanceExtensionProperties( |
7624 | | const VkEnumerateInstanceExtensionPropertiesChain *chain, const char *pLayerName, uint32_t *pPropertyCount, |
7625 | 0 | VkExtensionProperties *pProperties) { |
7626 | 0 | (void)chain; |
7627 | 0 | return terminator_EnumerateInstanceExtensionProperties(pLayerName, pPropertyCount, pProperties); |
7628 | 0 | } |
7629 | | |
7630 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount, |
7631 | 0 | VkLayerProperties *pProperties) { |
7632 | 0 | VkResult result = VK_SUCCESS; |
7633 | 0 | struct loader_layer_list instance_layer_list; |
7634 | 0 | struct loader_envvar_all_filters layer_filters = {0}; |
7635 | |
|
7636 | 0 | LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize); |
7637 | |
|
7638 | 0 | result = parse_layer_environment_var_filters(NULL, &layer_filters); |
7639 | 0 | if (VK_SUCCESS != result) { |
7640 | 0 | goto out; |
7641 | 0 | } |
7642 | | |
7643 | | // Get layer libraries |
7644 | 0 | memset(&instance_layer_list, 0, sizeof(instance_layer_list)); |
7645 | 0 | result = loader_scan_for_layers(NULL, &instance_layer_list, &layer_filters); |
7646 | 0 | if (VK_SUCCESS != result) { |
7647 | 0 | goto out; |
7648 | 0 | } |
7649 | | |
7650 | 0 | uint32_t layers_to_write_out = 0; |
7651 | 0 | for (uint32_t i = 0; i < instance_layer_list.count; i++) { |
7652 | 0 | if (instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_ON || |
7653 | 0 | instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) { |
7654 | 0 | layers_to_write_out++; |
7655 | 0 | } |
7656 | 0 | } |
7657 | |
|
7658 | 0 | if (pProperties == NULL) { |
7659 | 0 | *pPropertyCount = layers_to_write_out; |
7660 | 0 | goto out; |
7661 | 0 | } |
7662 | | |
7663 | 0 | uint32_t output_properties_index = 0; |
7664 | 0 | for (uint32_t i = 0; i < instance_layer_list.count; i++) { |
7665 | 0 | if (output_properties_index < *pPropertyCount && |
7666 | 0 | (instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_ON || |
7667 | 0 | instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_DEFAULT)) { |
7668 | 0 | memcpy(&pProperties[output_properties_index], &instance_layer_list.list[i].info, sizeof(VkLayerProperties)); |
7669 | 0 | output_properties_index++; |
7670 | 0 | } |
7671 | 0 | } |
7672 | 0 | if (output_properties_index < layers_to_write_out) { |
7673 | | // Indicates that we had more elements to write but ran out of room |
7674 | 0 | result = VK_INCOMPLETE; |
7675 | 0 | } |
7676 | |
|
7677 | 0 | *pPropertyCount = output_properties_index; |
7678 | |
|
7679 | 0 | out: |
7680 | |
|
7681 | 0 | loader_delete_layer_list_and_properties(NULL, &instance_layer_list); |
7682 | 0 | return result; |
7683 | 0 | } |
7684 | | |
7685 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_pre_instance_EnumerateInstanceLayerProperties( |
7686 | 0 | const VkEnumerateInstanceLayerPropertiesChain *chain, uint32_t *pPropertyCount, VkLayerProperties *pProperties) { |
7687 | 0 | (void)chain; |
7688 | 0 | return terminator_EnumerateInstanceLayerProperties(pPropertyCount, pProperties); |
7689 | 0 | } |
7690 | | |
7691 | | // ---- Vulkan Core 1.1 terminators |
7692 | | |
7693 | | VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDeviceGroups( |
7694 | 0 | VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) { |
7695 | 0 | struct loader_instance *inst = (struct loader_instance *)instance; |
7696 | |
|
7697 | 0 | VkResult res = VK_SUCCESS; |
7698 | 0 | struct loader_icd_term *icd_term; |
7699 | 0 | uint32_t total_count = 0; |
7700 | 0 | uint32_t cur_icd_group_count = 0; |
7701 | 0 | VkPhysicalDeviceGroupProperties **new_phys_dev_groups = NULL; |
7702 | 0 | struct loader_physical_device_group_term *local_phys_dev_groups = NULL; |
7703 | 0 | PFN_vkEnumeratePhysicalDeviceGroups fpEnumeratePhysicalDeviceGroups = NULL; |
7704 | 0 | struct loader_icd_physical_devices *sorted_phys_dev_array = NULL; |
7705 | 0 | uint32_t sorted_count = 0; |
7706 | | |
7707 | | // For each ICD, query the number of physical device groups, and then get an |
7708 | | // internal value for those physical devices. |
7709 | 0 | icd_term = inst->icd_terms; |
7710 | 0 | while (NULL != icd_term) { |
7711 | 0 | cur_icd_group_count = 0; |
7712 | | |
7713 | | // Get the function pointer to use to call into the ICD. This could be the core or KHR version |
7714 | 0 | if (inst->enabled_extensions.khr_device_group_creation) { |
7715 | 0 | fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR; |
7716 | 0 | } else { |
7717 | 0 | fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups; |
7718 | 0 | } |
7719 | |
|
7720 | 0 | if (NULL == fpEnumeratePhysicalDeviceGroups) { |
7721 | | // Treat each ICD's GPU as it's own group if the extension isn't supported |
7722 | 0 | res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &cur_icd_group_count, NULL); |
7723 | 0 | if (res != VK_SUCCESS) { |
7724 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7725 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of \'EnumeratePhysicalDevices\' " |
7726 | 0 | "to ICD %s to get plain phys dev count.", |
7727 | 0 | icd_term->scanned_icd->lib_name); |
7728 | 0 | continue; |
7729 | 0 | } |
7730 | 0 | } else { |
7731 | | // Query the actual group info |
7732 | 0 | res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &cur_icd_group_count, NULL); |
7733 | 0 | if (res != VK_SUCCESS) { |
7734 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7735 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of " |
7736 | 0 | "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get count.", |
7737 | 0 | icd_term->scanned_icd->lib_name); |
7738 | 0 | continue; |
7739 | 0 | } |
7740 | 0 | } |
7741 | 0 | total_count += cur_icd_group_count; |
7742 | 0 | icd_term = icd_term->next; |
7743 | 0 | } |
7744 | | |
7745 | | // If GPUs not sorted yet, look through them and generate list of all available GPUs |
7746 | 0 | if (0 == total_count || 0 == inst->total_gpu_count) { |
7747 | 0 | res = setup_loader_term_phys_devs(inst); |
7748 | 0 | if (VK_SUCCESS != res) { |
7749 | 0 | goto out; |
7750 | 0 | } |
7751 | 0 | } |
7752 | | |
7753 | 0 | if (NULL != pPhysicalDeviceGroupProperties) { |
7754 | | // Create an array for the new physical device groups, which will be stored |
7755 | | // in the instance for the Terminator code. |
7756 | 0 | new_phys_dev_groups = (VkPhysicalDeviceGroupProperties **)loader_instance_heap_calloc( |
7757 | 0 | inst, total_count * sizeof(VkPhysicalDeviceGroupProperties *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
7758 | 0 | if (NULL == new_phys_dev_groups) { |
7759 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7760 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed to allocate new physical device group array of size %d", |
7761 | 0 | total_count); |
7762 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
7763 | 0 | goto out; |
7764 | 0 | } |
7765 | | |
7766 | | // Create a temporary array (on the stack) to keep track of the |
7767 | | // returned VkPhysicalDevice values. |
7768 | 0 | local_phys_dev_groups = loader_stack_alloc(sizeof(struct loader_physical_device_group_term) * total_count); |
7769 | | // Initialize the memory to something valid |
7770 | 0 | memset(local_phys_dev_groups, 0, sizeof(struct loader_physical_device_group_term) * total_count); |
7771 | |
|
7772 | | #if defined(_WIN32) |
7773 | | // Get the physical devices supported by platform sorting mechanism into a separate list |
7774 | | res = windows_read_sorted_physical_devices(inst, &sorted_count, &sorted_phys_dev_array); |
7775 | | if (VK_SUCCESS != res) { |
7776 | | goto out; |
7777 | | } |
7778 | | #endif |
7779 | |
|
7780 | 0 | cur_icd_group_count = 0; |
7781 | 0 | icd_term = inst->icd_terms; |
7782 | 0 | while (NULL != icd_term) { |
7783 | 0 | uint32_t count_this_time = total_count - cur_icd_group_count; |
7784 | | |
7785 | | // Get the function pointer to use to call into the ICD. This could be the core or KHR version |
7786 | 0 | if (inst->enabled_extensions.khr_device_group_creation) { |
7787 | 0 | fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR; |
7788 | 0 | } else { |
7789 | 0 | fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups; |
7790 | 0 | } |
7791 | |
|
7792 | 0 | if (NULL == fpEnumeratePhysicalDeviceGroups) { |
7793 | 0 | icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, NULL); |
7794 | |
|
7795 | 0 | VkPhysicalDevice *phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * count_this_time); |
7796 | 0 | if (NULL == phys_dev_array) { |
7797 | 0 | loader_log( |
7798 | 0 | inst, VULKAN_LOADER_ERROR_BIT, 0, |
7799 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed to allocate local physical device array of size %d", |
7800 | 0 | count_this_time); |
7801 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
7802 | 0 | goto out; |
7803 | 0 | } |
7804 | | |
7805 | 0 | res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, phys_dev_array); |
7806 | 0 | if (res != VK_SUCCESS) { |
7807 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7808 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of " |
7809 | 0 | "\'EnumeratePhysicalDevices\' to ICD %s to get plain phys dev count.", |
7810 | 0 | icd_term->scanned_icd->lib_name); |
7811 | 0 | goto out; |
7812 | 0 | } |
7813 | | |
7814 | | // Add each GPU as it's own group |
7815 | 0 | for (uint32_t indiv_gpu = 0; indiv_gpu < count_this_time; indiv_gpu++) { |
7816 | 0 | uint32_t cur_index = indiv_gpu + cur_icd_group_count; |
7817 | 0 | local_phys_dev_groups[cur_index].this_icd_term = icd_term; |
7818 | 0 | local_phys_dev_groups[cur_index].group_props.physicalDeviceCount = 1; |
7819 | 0 | local_phys_dev_groups[cur_index].group_props.physicalDevices[0] = phys_dev_array[indiv_gpu]; |
7820 | 0 | } |
7821 | |
|
7822 | 0 | } else { |
7823 | 0 | res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, NULL); |
7824 | 0 | if (res != VK_SUCCESS) { |
7825 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7826 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of " |
7827 | 0 | "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get group count.", |
7828 | 0 | icd_term->scanned_icd->lib_name); |
7829 | 0 | goto out; |
7830 | 0 | } |
7831 | 0 | if (cur_icd_group_count + count_this_time < *pPhysicalDeviceGroupCount) { |
7832 | | // The total amount is still less than the amount of physical device group data passed in |
7833 | | // by the callee. Therefore, we don't have to allocate any temporary structures and we |
7834 | | // can just use the data that was passed in. |
7835 | 0 | res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, |
7836 | 0 | &pPhysicalDeviceGroupProperties[cur_icd_group_count]); |
7837 | 0 | if (res != VK_SUCCESS) { |
7838 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7839 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of " |
7840 | 0 | "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get group information.", |
7841 | 0 | icd_term->scanned_icd->lib_name); |
7842 | 0 | goto out; |
7843 | 0 | } |
7844 | 0 | for (uint32_t group = 0; group < count_this_time; ++group) { |
7845 | 0 | uint32_t cur_index = group + cur_icd_group_count; |
7846 | 0 | local_phys_dev_groups[cur_index].group_props = pPhysicalDeviceGroupProperties[cur_index]; |
7847 | 0 | local_phys_dev_groups[cur_index].this_icd_term = icd_term; |
7848 | 0 | } |
7849 | 0 | } else { |
7850 | | // There's not enough space in the callee's allocated pPhysicalDeviceGroupProperties structs, |
7851 | | // so we have to allocate temporary versions to collect all the data. However, we need to make |
7852 | | // sure that at least the ones we do query utilize any pNext data in the callee's version. |
7853 | 0 | VkPhysicalDeviceGroupProperties *tmp_group_props = |
7854 | 0 | loader_stack_alloc(count_this_time * sizeof(VkPhysicalDeviceGroupProperties)); |
7855 | 0 | for (uint32_t group = 0; group < count_this_time; group++) { |
7856 | 0 | tmp_group_props[group].sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES; |
7857 | 0 | uint32_t cur_index = group + cur_icd_group_count; |
7858 | 0 | if (*pPhysicalDeviceGroupCount > cur_index) { |
7859 | 0 | tmp_group_props[group].pNext = pPhysicalDeviceGroupProperties[cur_index].pNext; |
7860 | 0 | } else { |
7861 | 0 | tmp_group_props[group].pNext = NULL; |
7862 | 0 | } |
7863 | 0 | tmp_group_props[group].subsetAllocation = false; |
7864 | 0 | } |
7865 | |
|
7866 | 0 | res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, tmp_group_props); |
7867 | 0 | if (res != VK_SUCCESS) { |
7868 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7869 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of " |
7870 | 0 | "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get group information for temp data.", |
7871 | 0 | icd_term->scanned_icd->lib_name); |
7872 | 0 | goto out; |
7873 | 0 | } |
7874 | 0 | for (uint32_t group = 0; group < count_this_time; ++group) { |
7875 | 0 | uint32_t cur_index = group + cur_icd_group_count; |
7876 | 0 | local_phys_dev_groups[cur_index].group_props = tmp_group_props[group]; |
7877 | 0 | local_phys_dev_groups[cur_index].this_icd_term = icd_term; |
7878 | 0 | } |
7879 | 0 | } |
7880 | 0 | if (VK_SUCCESS != res) { |
7881 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7882 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of " |
7883 | 0 | "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get content.", |
7884 | 0 | icd_term->scanned_icd->lib_name); |
7885 | 0 | goto out; |
7886 | 0 | } |
7887 | 0 | } |
7888 | | |
7889 | 0 | cur_icd_group_count += count_this_time; |
7890 | 0 | icd_term = icd_term->next; |
7891 | 0 | } |
7892 | | |
7893 | 0 | #if defined(LOADER_ENABLE_LINUX_SORT) |
7894 | 0 | if (is_linux_sort_enabled(inst)) { |
7895 | | // Get the physical devices supported by platform sorting mechanism into a separate list |
7896 | 0 | res = linux_sort_physical_device_groups(inst, total_count, local_phys_dev_groups); |
7897 | 0 | } |
7898 | | #elif defined(_WIN32) |
7899 | | // The Windows sorting information is only on physical devices. We need to take that and convert it to the group |
7900 | | // information if it's present. |
7901 | | if (sorted_count > 0) { |
7902 | | res = |
7903 | | windows_sort_physical_device_groups(inst, total_count, local_phys_dev_groups, sorted_count, sorted_phys_dev_array); |
7904 | | } |
7905 | | #endif // LOADER_ENABLE_LINUX_SORT |
7906 | | |
7907 | | // Just to be safe, make sure we successfully completed setup_loader_term_phys_devs above |
7908 | | // before attempting to do the following. By verifying that setup_loader_term_phys_devs ran |
7909 | | // first, it guarantees that each physical device will have a loader-specific handle. |
7910 | 0 | if (NULL != inst->phys_devs_term) { |
7911 | 0 | for (uint32_t group = 0; group < total_count; group++) { |
7912 | 0 | for (uint32_t group_gpu = 0; group_gpu < local_phys_dev_groups[group].group_props.physicalDeviceCount; |
7913 | 0 | group_gpu++) { |
7914 | 0 | bool found = false; |
7915 | 0 | for (uint32_t term_gpu = 0; term_gpu < inst->phys_dev_count_term; term_gpu++) { |
7916 | 0 | if (local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] == |
7917 | 0 | inst->phys_devs_term[term_gpu]->phys_dev) { |
7918 | 0 | local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] = |
7919 | 0 | (VkPhysicalDevice)inst->phys_devs_term[term_gpu]; |
7920 | 0 | found = true; |
7921 | 0 | break; |
7922 | 0 | } |
7923 | 0 | } |
7924 | 0 | if (!found) { |
7925 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7926 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed to find GPU %d in group %d returned by " |
7927 | 0 | "\'EnumeratePhysicalDeviceGroups\' in list returned by \'EnumeratePhysicalDevices\'", |
7928 | 0 | group_gpu, group); |
7929 | 0 | res = VK_ERROR_INITIALIZATION_FAILED; |
7930 | 0 | goto out; |
7931 | 0 | } |
7932 | 0 | } |
7933 | 0 | } |
7934 | 0 | } |
7935 | | |
7936 | 0 | uint32_t idx = 0; |
7937 | | |
7938 | | // Copy or create everything to fill the new array of physical device groups |
7939 | 0 | for (uint32_t group = 0; group < total_count; group++) { |
7940 | | // Skip groups which have been included through sorting |
7941 | 0 | if (local_phys_dev_groups[group].group_props.physicalDeviceCount == 0) { |
7942 | 0 | continue; |
7943 | 0 | } |
7944 | | |
7945 | | // Find the VkPhysicalDeviceGroupProperties object in local_phys_dev_groups |
7946 | 0 | VkPhysicalDeviceGroupProperties *group_properties = &local_phys_dev_groups[group].group_props; |
7947 | | |
7948 | | // Check if this physical device group with the same contents is already in the old buffer |
7949 | 0 | for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) { |
7950 | 0 | if (NULL != group_properties && NULL != inst->phys_dev_groups_term[old_idx] && |
7951 | 0 | group_properties->physicalDeviceCount == inst->phys_dev_groups_term[old_idx]->physicalDeviceCount) { |
7952 | 0 | bool found_all_gpus = true; |
7953 | 0 | for (uint32_t old_gpu = 0; old_gpu < inst->phys_dev_groups_term[old_idx]->physicalDeviceCount; old_gpu++) { |
7954 | 0 | bool found_gpu = false; |
7955 | 0 | for (uint32_t new_gpu = 0; new_gpu < group_properties->physicalDeviceCount; new_gpu++) { |
7956 | 0 | if (group_properties->physicalDevices[new_gpu] == |
7957 | 0 | inst->phys_dev_groups_term[old_idx]->physicalDevices[old_gpu]) { |
7958 | 0 | found_gpu = true; |
7959 | 0 | break; |
7960 | 0 | } |
7961 | 0 | } |
7962 | |
|
7963 | 0 | if (!found_gpu) { |
7964 | 0 | found_all_gpus = false; |
7965 | 0 | break; |
7966 | 0 | } |
7967 | 0 | } |
7968 | 0 | if (!found_all_gpus) { |
7969 | 0 | continue; |
7970 | 0 | } else { |
7971 | 0 | new_phys_dev_groups[idx] = inst->phys_dev_groups_term[old_idx]; |
7972 | 0 | break; |
7973 | 0 | } |
7974 | 0 | } |
7975 | 0 | } |
7976 | | // If this physical device group isn't in the old buffer, create it |
7977 | 0 | if (group_properties != NULL && NULL == new_phys_dev_groups[idx]) { |
7978 | 0 | new_phys_dev_groups[idx] = (VkPhysicalDeviceGroupProperties *)loader_instance_heap_alloc( |
7979 | 0 | inst, sizeof(VkPhysicalDeviceGroupProperties), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
7980 | 0 | if (NULL == new_phys_dev_groups[idx]) { |
7981 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
7982 | 0 | "terminator_EnumeratePhysicalDeviceGroups: Failed to allocate physical device group Terminator " |
7983 | 0 | "object %d", |
7984 | 0 | idx); |
7985 | 0 | total_count = idx; |
7986 | 0 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
7987 | 0 | goto out; |
7988 | 0 | } |
7989 | 0 | memcpy(new_phys_dev_groups[idx], group_properties, sizeof(VkPhysicalDeviceGroupProperties)); |
7990 | 0 | } |
7991 | | |
7992 | 0 | ++idx; |
7993 | 0 | } |
7994 | 0 | } |
7995 | | |
7996 | 0 | out: |
7997 | |
|
7998 | 0 | if (NULL != pPhysicalDeviceGroupProperties) { |
7999 | 0 | if (VK_SUCCESS != res) { |
8000 | 0 | if (NULL != new_phys_dev_groups) { |
8001 | | // We've encountered an error, so we should free the new buffers. |
8002 | 0 | for (uint32_t i = 0; i < total_count; i++) { |
8003 | | // If an OOM occurred inside the copying of the new physical device groups into the existing array will |
8004 | | // leave some of the old physical device groups in the array which may have been copied into the new array, |
8005 | | // leading to them being freed twice. To avoid this we just make sure to not delete physical device groups |
8006 | | // which were copied. |
8007 | 0 | bool found = false; |
8008 | 0 | if (NULL != inst->phys_devs_term) { |
8009 | 0 | for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) { |
8010 | 0 | if (new_phys_dev_groups[i] == inst->phys_dev_groups_term[old_idx]) { |
8011 | 0 | found = true; |
8012 | 0 | break; |
8013 | 0 | } |
8014 | 0 | } |
8015 | 0 | } |
8016 | 0 | if (!found) { |
8017 | 0 | loader_instance_heap_free(inst, new_phys_dev_groups[i]); |
8018 | 0 | } |
8019 | 0 | } |
8020 | 0 | loader_instance_heap_free(inst, new_phys_dev_groups); |
8021 | 0 | } |
8022 | 0 | } else { |
8023 | 0 | if (NULL != inst->phys_dev_groups_term) { |
8024 | | // Free everything in the old array that was not copied into the new array |
8025 | | // here. We can't attempt to do that before here since the previous loop |
8026 | | // looking before the "out:" label may hit an out of memory condition resulting |
8027 | | // in memory leaking. |
8028 | 0 | for (uint32_t i = 0; i < inst->phys_dev_group_count_term; i++) { |
8029 | 0 | bool found = false; |
8030 | 0 | for (uint32_t j = 0; j < total_count; j++) { |
8031 | 0 | if (inst->phys_dev_groups_term[i] == new_phys_dev_groups[j]) { |
8032 | 0 | found = true; |
8033 | 0 | break; |
8034 | 0 | } |
8035 | 0 | } |
8036 | 0 | if (!found) { |
8037 | 0 | loader_instance_heap_free(inst, inst->phys_dev_groups_term[i]); |
8038 | 0 | } |
8039 | 0 | } |
8040 | 0 | loader_instance_heap_free(inst, inst->phys_dev_groups_term); |
8041 | 0 | } |
8042 | | |
8043 | | // Swap in the new physical device group list |
8044 | 0 | inst->phys_dev_group_count_term = total_count; |
8045 | 0 | inst->phys_dev_groups_term = new_phys_dev_groups; |
8046 | 0 | } |
8047 | |
|
8048 | 0 | if (sorted_phys_dev_array != NULL) { |
8049 | 0 | for (uint32_t i = 0; i < sorted_count; ++i) { |
8050 | 0 | if (sorted_phys_dev_array[i].device_count > 0 && sorted_phys_dev_array[i].physical_devices != NULL) { |
8051 | 0 | loader_instance_heap_free(inst, sorted_phys_dev_array[i].physical_devices); |
8052 | 0 | } |
8053 | 0 | } |
8054 | 0 | loader_instance_heap_free(inst, sorted_phys_dev_array); |
8055 | 0 | } |
8056 | |
|
8057 | 0 | uint32_t copy_count = inst->phys_dev_group_count_term; |
8058 | 0 | if (NULL != pPhysicalDeviceGroupProperties) { |
8059 | 0 | if (copy_count > *pPhysicalDeviceGroupCount) { |
8060 | 0 | copy_count = *pPhysicalDeviceGroupCount; |
8061 | 0 | loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, |
8062 | 0 | "terminator_EnumeratePhysicalDeviceGroups : Trimming device count from %d to %d.", |
8063 | 0 | inst->phys_dev_group_count_term, copy_count); |
8064 | 0 | res = VK_INCOMPLETE; |
8065 | 0 | } |
8066 | |
|
8067 | 0 | for (uint32_t i = 0; i < copy_count; i++) { |
8068 | 0 | memcpy(&pPhysicalDeviceGroupProperties[i], inst->phys_dev_groups_term[i], sizeof(VkPhysicalDeviceGroupProperties)); |
8069 | 0 | } |
8070 | 0 | } |
8071 | |
|
8072 | 0 | *pPhysicalDeviceGroupCount = copy_count; |
8073 | |
|
8074 | 0 | } else { |
8075 | 0 | *pPhysicalDeviceGroupCount = total_count; |
8076 | 0 | } |
8077 | 0 | return res; |
8078 | 0 | } |
8079 | | |
8080 | 0 | VkResult get_device_driver_id(VkPhysicalDevice physicalDevice, VkDriverId *driverId) { |
8081 | 0 | VkPhysicalDeviceDriverProperties physical_device_driver_props = {0}; |
8082 | 0 | physical_device_driver_props.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES; |
8083 | |
|
8084 | 0 | VkPhysicalDeviceProperties2 props2 = {0}; |
8085 | 0 | props2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2; |
8086 | 0 | props2.pNext = &physical_device_driver_props; |
8087 | |
|
8088 | 0 | struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; |
8089 | 0 | struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; |
8090 | 0 | const struct loader_instance *inst = icd_term->this_instance; |
8091 | |
|
8092 | 0 | assert(inst != NULL); |
8093 | | |
8094 | | // Get the function pointer to use to call into the ICD. This could be the core or KHR version |
8095 | 0 | PFN_vkGetPhysicalDeviceProperties2 fpGetPhysicalDeviceProperties2 = NULL; |
8096 | 0 | if (loader_check_version_meets_required(LOADER_VERSION_1_1_0, inst->app_api_version)) { |
8097 | 0 | fpGetPhysicalDeviceProperties2 = icd_term->dispatch.GetPhysicalDeviceProperties2; |
8098 | 0 | } |
8099 | 0 | if (fpGetPhysicalDeviceProperties2 == NULL && inst->enabled_extensions.khr_get_physical_device_properties2) { |
8100 | 0 | fpGetPhysicalDeviceProperties2 = icd_term->dispatch.GetPhysicalDeviceProperties2KHR; |
8101 | 0 | } |
8102 | |
|
8103 | 0 | if (fpGetPhysicalDeviceProperties2 == NULL) { |
8104 | 0 | *driverId = 0; |
8105 | 0 | return VK_ERROR_UNKNOWN; |
8106 | 0 | } |
8107 | | |
8108 | 0 | fpGetPhysicalDeviceProperties2(phys_dev_term->phys_dev, &props2); |
8109 | |
|
8110 | 0 | *driverId = physical_device_driver_props.driverID; |
8111 | 0 | return VK_SUCCESS; |
8112 | 0 | } |
8113 | | |
8114 | | VkResult loader_filter_enumerated_physical_device(const struct loader_instance *inst, |
8115 | | const struct loader_envvar_id_filter *device_id_filter, |
8116 | | const struct loader_envvar_id_filter *vendor_id_filter, |
8117 | | const struct loader_envvar_id_filter *driver_id_filter, |
8118 | | const uint32_t in_PhysicalDeviceCount, |
8119 | | const VkPhysicalDevice *in_pPhysicalDevices, uint32_t *out_pPhysicalDeviceCount, |
8120 | 0 | VkPhysicalDevice *out_pPhysicalDevices) { |
8121 | 0 | uint32_t filtered_physical_device_count = 0; |
8122 | 0 | for (uint32_t i = 0; i < in_PhysicalDeviceCount; i++) { |
8123 | 0 | VkPhysicalDeviceProperties dev_props = {0}; |
8124 | 0 | inst->disp->layer_inst_disp.GetPhysicalDeviceProperties(in_pPhysicalDevices[i], &dev_props); |
8125 | |
|
8126 | 0 | if ((0 != device_id_filter->count) && !check_id_matches_filter_environment_var(dev_props.deviceID, device_id_filter)) { |
8127 | 0 | continue; |
8128 | 0 | } |
8129 | | |
8130 | 0 | if ((0 != vendor_id_filter->count) && !check_id_matches_filter_environment_var(dev_props.vendorID, vendor_id_filter)) { |
8131 | 0 | continue; |
8132 | 0 | } |
8133 | | |
8134 | 0 | if (0 != driver_id_filter->count) { |
8135 | 0 | VkDriverId driver_id; |
8136 | 0 | VkResult res = get_device_driver_id(in_pPhysicalDevices[i], &driver_id); |
8137 | |
|
8138 | 0 | if ((res != VK_SUCCESS) || !check_id_matches_filter_environment_var(driver_id, driver_id_filter)) { |
8139 | 0 | continue; |
8140 | 0 | } |
8141 | 0 | } |
8142 | | |
8143 | 0 | if ((NULL != out_pPhysicalDevices) && (filtered_physical_device_count < *out_pPhysicalDeviceCount)) { |
8144 | 0 | out_pPhysicalDevices[filtered_physical_device_count] = in_pPhysicalDevices[i]; |
8145 | 0 | } |
8146 | 0 | filtered_physical_device_count++; |
8147 | 0 | } |
8148 | |
|
8149 | 0 | if ((NULL == out_pPhysicalDevices) || (filtered_physical_device_count < *out_pPhysicalDeviceCount)) { |
8150 | 0 | *out_pPhysicalDeviceCount = filtered_physical_device_count; |
8151 | 0 | } |
8152 | |
|
8153 | 0 | return (*out_pPhysicalDeviceCount < filtered_physical_device_count) ? VK_INCOMPLETE : VK_SUCCESS; |
8154 | 0 | } |
8155 | | |
8156 | | VkResult loader_filter_enumerated_physical_device_groups( |
8157 | | const struct loader_instance *inst, const struct loader_envvar_id_filter *device_id_filter, |
8158 | | const struct loader_envvar_id_filter *vendor_id_filter, const struct loader_envvar_id_filter *driver_id_filter, |
8159 | | const uint32_t in_PhysicalDeviceGroupCount, const VkPhysicalDeviceGroupProperties *in_pPhysicalDeviceGroupProperties, |
8160 | 0 | uint32_t *out_PhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties *out_pPhysicalDeviceGroupProperties) { |
8161 | 0 | uint32_t filtered_physical_device_group_count = 0; |
8162 | 0 | for (uint32_t i = 0; i < in_PhysicalDeviceGroupCount; i++) { |
8163 | 0 | const VkPhysicalDeviceGroupProperties *device_group = &in_pPhysicalDeviceGroupProperties[i]; |
8164 | |
|
8165 | 0 | bool skip_group = false; |
8166 | 0 | for (uint32_t j = 0; j < device_group->physicalDeviceCount; j++) { |
8167 | 0 | VkPhysicalDeviceProperties dev_props = {0}; |
8168 | 0 | inst->disp->layer_inst_disp.GetPhysicalDeviceProperties(device_group->physicalDevices[j], &dev_props); |
8169 | |
|
8170 | 0 | if ((0 != device_id_filter->count) && !check_id_matches_filter_environment_var(dev_props.deviceID, device_id_filter)) { |
8171 | 0 | skip_group = true; |
8172 | 0 | break; |
8173 | 0 | } |
8174 | | |
8175 | 0 | if ((0 != vendor_id_filter->count) && !check_id_matches_filter_environment_var(dev_props.vendorID, vendor_id_filter)) { |
8176 | 0 | skip_group = true; |
8177 | 0 | break; |
8178 | 0 | } |
8179 | | |
8180 | 0 | if (0 != driver_id_filter->count) { |
8181 | 0 | VkDriverId driver_id; |
8182 | 0 | VkResult res = get_device_driver_id(device_group->physicalDevices[j], &driver_id); |
8183 | |
|
8184 | 0 | if ((res != VK_SUCCESS) || !check_id_matches_filter_environment_var(driver_id, driver_id_filter)) { |
8185 | 0 | skip_group = true; |
8186 | 0 | break; |
8187 | 0 | } |
8188 | 0 | } |
8189 | 0 | } |
8190 | |
|
8191 | 0 | if (skip_group) { |
8192 | 0 | continue; |
8193 | 0 | } |
8194 | | |
8195 | 0 | if ((NULL != out_pPhysicalDeviceGroupProperties) && |
8196 | 0 | (filtered_physical_device_group_count < *out_PhysicalDeviceGroupCount)) { |
8197 | 0 | out_pPhysicalDeviceGroupProperties[filtered_physical_device_group_count] = *device_group; |
8198 | 0 | } |
8199 | |
|
8200 | 0 | filtered_physical_device_group_count++; |
8201 | 0 | } |
8202 | |
|
8203 | 0 | if ((NULL == out_pPhysicalDeviceGroupProperties) || (filtered_physical_device_group_count < *out_PhysicalDeviceGroupCount)) { |
8204 | 0 | *out_PhysicalDeviceGroupCount = filtered_physical_device_group_count; |
8205 | 0 | } |
8206 | |
|
8207 | 0 | return (*out_PhysicalDeviceGroupCount < filtered_physical_device_group_count) ? VK_INCOMPLETE : VK_SUCCESS; |
8208 | 0 | } |