/src/vulkan-loader/loader/unknown_function_handling.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2022 The Khronos Group Inc. |
3 | | * Copyright (c) 2022 Valve Corporation |
4 | | * Copyright (c) 2022 LunarG, Inc. |
5 | | * |
6 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
7 | | * you may not use this file except in compliance with the License. |
8 | | * You may obtain a copy of the License at |
9 | | * |
10 | | * http://www.apache.org/licenses/LICENSE-2.0 |
11 | | * |
12 | | * Unless required by applicable law or agreed to in writing, software |
13 | | * distributed under the License is distributed on an "AS IS" BASIS, |
14 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
15 | | * See the License for the specific language governing permissions and |
16 | | * limitations under the License. |
17 | | * |
18 | | * Author: Jon Ashburn <jon@lunarg.com> |
19 | | * Author: Courtney Goeltzenleuchter <courtney@LunarG.com> |
20 | | * Author: Mark Young <marky@lunarg.com> |
21 | | * Author: Lenny Komow <lenny@lunarg.com> |
22 | | * Author: Charles Giessen <charles@lunarg.com> |
23 | | */ |
24 | | |
25 | | #include "unknown_function_handling.h" |
26 | | |
27 | | // If the assembly code necessary for unknown functions isn't supported, then replace all of the functions with stubs. |
28 | | // This way, if an application queries for an unknown function, they receive NULL and can act accordingly. |
29 | | // Previously, there was a fallback path written in C. However, it depended on the compiler optimizing the functions |
30 | | // in such a way as to not disturb the callstack. This reliance on implementation defined behavior is unsustainable and was only |
31 | | // known to work with GCC. |
32 | | #if !defined(UNKNOWN_FUNCTIONS_SUPPORTED) |
33 | | |
34 | | void loader_init_dispatch_dev_ext(struct loader_instance *inst, struct loader_device *dev) { |
35 | | (void)inst; |
36 | | (void)dev; |
37 | | } |
38 | | void *loader_dev_ext_gpa_tramp(struct loader_instance *inst, const char *funcName) { |
39 | | (void)inst; |
40 | | (void)funcName; |
41 | | return NULL; |
42 | | } |
43 | | void *loader_dev_ext_gpa_term(struct loader_instance *inst, const char *funcName) { |
44 | | (void)inst; |
45 | | (void)funcName; |
46 | | return NULL; |
47 | | } |
48 | | |
49 | | void *loader_phys_dev_ext_gpa_tramp(struct loader_instance *inst, const char *funcName) { |
50 | | (void)inst; |
51 | | (void)funcName; |
52 | | return NULL; |
53 | | } |
54 | | void *loader_phys_dev_ext_gpa_term(struct loader_instance *inst, const char *funcName) { |
55 | | (void)inst; |
56 | | (void)funcName; |
57 | | return NULL; |
58 | | } |
59 | | |
60 | | void loader_free_dev_ext_table(struct loader_instance *inst) { (void)inst; } |
61 | | void loader_free_phys_dev_ext_table(struct loader_instance *inst) { (void)inst; } |
62 | | |
63 | | #else |
64 | | |
65 | | #include "allocation.h" |
66 | | #include "log.h" |
67 | | |
68 | | // Forward declarations |
69 | | void *loader_get_dev_ext_trampoline(uint32_t index); |
70 | | void *loader_get_phys_dev_ext_tramp(uint32_t index); |
71 | | void *loader_get_phys_dev_ext_termin(uint32_t index); |
72 | | |
73 | | // Device function handling |
74 | | |
75 | | // Initialize device_ext dispatch table entry as follows: |
76 | | // If dev == NULL find all logical devices created within this instance and |
77 | | // init the entry (given by idx) in the ext dispatch table. |
78 | | // If dev != NULL only initialize the entry in the given dev's dispatch table. |
79 | | // The initialization value is gotten by calling down the device chain with |
80 | | // GDPA. |
81 | | // If GDPA returns NULL then don't initialize the dispatch table entry. |
82 | | void loader_init_dispatch_dev_ext_entry(struct loader_instance *inst, struct loader_device *dev, uint32_t idx, const char *funcName) |
83 | | |
84 | 0 | { |
85 | 0 | void *gdpa_value; |
86 | 0 | if (dev != NULL) { |
87 | 0 | gdpa_value = dev->loader_dispatch.core_dispatch.GetDeviceProcAddr(dev->chain_device, funcName); |
88 | 0 | if (gdpa_value != NULL) dev->loader_dispatch.ext_dispatch[idx] = (PFN_vkDevExt)gdpa_value; |
89 | 0 | } else { |
90 | 0 | for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term != NULL; icd_term = icd_term->next) { |
91 | 0 | struct loader_device *ldev = icd_term->logical_device_list; |
92 | 0 | while (ldev) { |
93 | 0 | gdpa_value = ldev->loader_dispatch.core_dispatch.GetDeviceProcAddr(ldev->chain_device, funcName); |
94 | 0 | if (gdpa_value != NULL) ldev->loader_dispatch.ext_dispatch[idx] = (PFN_vkDevExt)gdpa_value; |
95 | 0 | ldev = ldev->next; |
96 | 0 | } |
97 | 0 | } |
98 | 0 | } |
99 | 0 | } |
100 | | |
101 | | // Find all dev extension in the function names array and initialize the dispatch table |
102 | | // for dev for each of those extension entrypoints found in function names array. |
103 | 0 | void loader_init_dispatch_dev_ext(struct loader_instance *inst, struct loader_device *dev) { |
104 | 0 | for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) { |
105 | 0 | if (inst->dev_ext_disp_functions[i] != NULL) |
106 | 0 | loader_init_dispatch_dev_ext_entry(inst, dev, i, inst->dev_ext_disp_functions[i]); |
107 | 0 | } |
108 | 0 | } |
109 | | |
110 | 0 | bool loader_check_icds_for_dev_ext_address(struct loader_instance *inst, const char *funcName) { |
111 | 0 | struct loader_icd_term *icd_term; |
112 | 0 | icd_term = inst->icd_terms; |
113 | 0 | while (NULL != icd_term) { |
114 | 0 | if (icd_term->scanned_icd->GetInstanceProcAddr(icd_term->instance, funcName)) |
115 | | // this icd supports funcName |
116 | 0 | return true; |
117 | 0 | icd_term = icd_term->next; |
118 | 0 | } |
119 | | |
120 | 0 | return false; |
121 | 0 | } |
122 | | |
123 | | // Look in the layers list of device extensions, which contain names of entry points. If funcName is present, return true |
124 | | // If not, call down the first layer's vkGetInstanceProcAddr to determine if any layers support the function |
125 | 0 | bool loader_check_layer_list_for_dev_ext_address(struct loader_instance *inst, const char *funcName) { |
126 | | // Iterate over the layers. |
127 | 0 | for (uint32_t layer = 0; layer < inst->expanded_activated_layer_list.count; ++layer) { |
128 | | // Iterate over the extensions. |
129 | 0 | const struct loader_device_extension_list *const extensions = |
130 | 0 | &(inst->expanded_activated_layer_list.list[layer]->device_extension_list); |
131 | 0 | for (uint32_t extension = 0; extension < extensions->count; ++extension) { |
132 | | // Iterate over the entry points. |
133 | 0 | const struct loader_dev_ext_props *const property = &(extensions->list[extension]); |
134 | 0 | for (uint32_t entry = 0; entry < property->entrypoints.count; ++entry) { |
135 | 0 | if (strcmp(property->entrypoints.list[entry], funcName) == 0) { |
136 | 0 | return true; |
137 | 0 | } |
138 | 0 | } |
139 | 0 | } |
140 | 0 | } |
141 | | // If the function pointer doesn't appear in the layer manifest for intercepted device functions, look down the |
142 | | // vkGetInstanceProcAddr chain |
143 | 0 | if (inst->expanded_activated_layer_list.count > 0) { |
144 | 0 | const struct loader_layer_functions *const functions = &(inst->expanded_activated_layer_list.list[0]->functions); |
145 | 0 | if (NULL != functions->get_instance_proc_addr) { |
146 | 0 | return NULL != functions->get_instance_proc_addr((VkInstance)inst->instance, funcName); |
147 | 0 | } |
148 | 0 | } |
149 | | |
150 | 0 | return false; |
151 | 0 | } |
152 | | |
153 | 0 | void loader_free_dev_ext_table(struct loader_instance *inst) { |
154 | 0 | for (uint32_t i = 0; i < inst->dev_ext_disp_function_count; i++) { |
155 | 0 | loader_instance_heap_free(inst, inst->dev_ext_disp_functions[i]); |
156 | 0 | } |
157 | 0 | memset(inst->dev_ext_disp_functions, 0, sizeof(inst->dev_ext_disp_functions)); |
158 | 0 | } |
159 | | |
160 | | /* |
161 | | * This function returns generic trampoline code address for unknown entry points. |
162 | | * Presumably, these unknown entry points (as given by funcName) are device extension |
163 | | * entrypoints. |
164 | | * A function name array is used to keep a list of unknown entry points and their |
165 | | * mapping to the device extension dispatch table. |
166 | | * \returns |
167 | | * For a given entry point string (funcName), if an existing mapping is found the |
168 | | * trampoline address for that mapping is returned. |
169 | | * Otherwise, this unknown entry point has not been seen yet. |
170 | | * Next check if an ICD supports it, and if is_tramp is true, check if any layer |
171 | | * supports it by calling down the chain. |
172 | | * If so then a new entry in the function name array is added and that trampoline |
173 | | * address for the new entry is returned. |
174 | | * NULL is returned if the function name array is full or if no discovered layer or |
175 | | * ICD returns a non-NULL GetProcAddr for it. |
176 | | */ |
177 | 0 | void *loader_dev_ext_gpa_impl(struct loader_instance *inst, const char *funcName, bool is_tramp) { |
178 | | // Linearly look through already added functions to make sure we haven't seen it before |
179 | | // if we have, return the function at the index found |
180 | 0 | for (uint32_t i = 0; i < inst->dev_ext_disp_function_count; i++) { |
181 | 0 | if (inst->dev_ext_disp_functions[i] && !strcmp(inst->dev_ext_disp_functions[i], funcName)) |
182 | 0 | return loader_get_dev_ext_trampoline(i); |
183 | 0 | } |
184 | | |
185 | | // Check if funcName is supported in either ICDs or a layer library |
186 | 0 | if (!loader_check_icds_for_dev_ext_address(inst, funcName)) { |
187 | 0 | if (!is_tramp || !loader_check_layer_list_for_dev_ext_address(inst, funcName)) { |
188 | | // if support found in layers continue on |
189 | 0 | return NULL; |
190 | 0 | } |
191 | 0 | } |
192 | 0 | if (inst->dev_ext_disp_function_count >= MAX_NUM_UNKNOWN_EXTS) { |
193 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_dev_ext_gpa: Exhausted the unknown device function array!"); |
194 | 0 | return NULL; |
195 | 0 | } |
196 | | |
197 | | // add found function to dev_ext_disp_functions; |
198 | 0 | size_t funcName_len = strlen(funcName) + 1; |
199 | 0 | inst->dev_ext_disp_functions[inst->dev_ext_disp_function_count] = |
200 | 0 | (char *)loader_instance_heap_alloc(inst, funcName_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
201 | 0 | if (NULL == inst->dev_ext_disp_functions[inst->dev_ext_disp_function_count]) { |
202 | | // failed to allocate memory, return NULL |
203 | 0 | return NULL; |
204 | 0 | } |
205 | 0 | loader_strncpy(inst->dev_ext_disp_functions[inst->dev_ext_disp_function_count], funcName_len, funcName, funcName_len); |
206 | | // init any dev dispatch table entries as needed |
207 | 0 | loader_init_dispatch_dev_ext_entry(inst, NULL, inst->dev_ext_disp_function_count, funcName); |
208 | 0 | void *out_function = loader_get_dev_ext_trampoline(inst->dev_ext_disp_function_count); |
209 | 0 | inst->dev_ext_disp_function_count++; |
210 | 0 | return out_function; |
211 | 0 | } |
212 | | |
213 | 0 | void *loader_dev_ext_gpa_tramp(struct loader_instance *inst, const char *funcName) { |
214 | 0 | return loader_dev_ext_gpa_impl(inst, funcName, true); |
215 | 0 | } |
216 | | |
217 | 0 | void *loader_dev_ext_gpa_term(struct loader_instance *inst, const char *funcName) { |
218 | 0 | return loader_dev_ext_gpa_impl(inst, funcName, false); |
219 | 0 | } |
220 | | |
221 | | // Physical Device function handling |
222 | | |
223 | 0 | bool loader_check_icds_for_phys_dev_ext_address(struct loader_instance *inst, const char *funcName) { |
224 | 0 | struct loader_icd_term *icd_term; |
225 | 0 | icd_term = inst->icd_terms; |
226 | 0 | while (NULL != icd_term) { |
227 | 0 | if (icd_term->scanned_icd->interface_version >= MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION && |
228 | 0 | icd_term->scanned_icd->GetPhysicalDeviceProcAddr && |
229 | 0 | icd_term->scanned_icd->GetPhysicalDeviceProcAddr(icd_term->instance, funcName)) |
230 | | // this icd supports funcName |
231 | 0 | return true; |
232 | 0 | icd_term = icd_term->next; |
233 | 0 | } |
234 | | |
235 | 0 | return false; |
236 | 0 | } |
237 | | |
238 | 0 | bool loader_check_layer_list_for_phys_dev_ext_address(struct loader_instance *inst, const char *funcName) { |
239 | 0 | for (uint32_t layer = 0; layer < inst->expanded_activated_layer_list.count; layer++) { |
240 | 0 | struct loader_layer_properties *layer_prop_list = inst->expanded_activated_layer_list.list[layer]; |
241 | | // Find the first layer in the call chain which supports vk_layerGetPhysicalDeviceProcAddr |
242 | | // and call that, returning whether it found a valid pointer for this function name. |
243 | | // We return if the topmost layer supports GPDPA since the layer should call down the chain for us. |
244 | 0 | if (layer_prop_list->interface_version > 1) { |
245 | 0 | const struct loader_layer_functions *const functions = &(layer_prop_list->functions); |
246 | 0 | if (NULL != functions->get_physical_device_proc_addr) { |
247 | 0 | return NULL != functions->get_physical_device_proc_addr((VkInstance)inst->instance, funcName); |
248 | 0 | } |
249 | 0 | } |
250 | 0 | } |
251 | 0 | return false; |
252 | 0 | } |
253 | | |
254 | 0 | void loader_free_phys_dev_ext_table(struct loader_instance *inst) { |
255 | 0 | for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) { |
256 | 0 | loader_instance_heap_free(inst, inst->phys_dev_ext_disp_functions[i]); |
257 | 0 | } |
258 | 0 | memset(inst->phys_dev_ext_disp_functions, 0, sizeof(inst->phys_dev_ext_disp_functions)); |
259 | 0 | } |
260 | | |
261 | | // This function returns a generic trampoline or terminator function |
262 | | // address for any unknown physical device extension commands. An array |
263 | | // is used to keep a list of unknown entry points and their |
264 | | // mapping to the physical device extension dispatch table (struct |
265 | | // loader_phys_dev_ext_dispatch_table). |
266 | | // For a given entry point string (funcName), if an existing mapping is |
267 | | // found, then the address for that mapping is returned. The is_tramp |
268 | | // parameter is used to decide whether to return a trampoline or terminator |
269 | | // If it has not been seen before check if a layer or and ICD supports it. |
270 | | // If so then a new entry in the function name array is added. |
271 | | // Null is returned if discovered layer or ICD returns a non-NULL GetProcAddr for it |
272 | | // or if the function name table is full. |
273 | 0 | void *loader_phys_dev_ext_gpa_impl(struct loader_instance *inst, const char *funcName, bool is_tramp) { |
274 | 0 | assert(NULL != inst); |
275 | | |
276 | | // We should always check to see if any ICD supports it. |
277 | 0 | if (!loader_check_icds_for_phys_dev_ext_address(inst, funcName)) { |
278 | | // If we're not checking layers, or we are and it's not in a layer, just |
279 | | // return |
280 | 0 | if (!is_tramp || !loader_check_layer_list_for_phys_dev_ext_address(inst, funcName)) { |
281 | 0 | return NULL; |
282 | 0 | } |
283 | 0 | } |
284 | | |
285 | 0 | bool has_found = false; |
286 | 0 | uint32_t new_function_index = 0; |
287 | | // Linearly look through already added functions to make sure we haven't seen it before |
288 | | // if we have, return the function at the index found |
289 | 0 | for (uint32_t i = 0; i < inst->phys_dev_ext_disp_function_count; i++) { |
290 | 0 | if (inst->phys_dev_ext_disp_functions[i] && !strcmp(inst->phys_dev_ext_disp_functions[i], funcName)) { |
291 | 0 | has_found = true; |
292 | 0 | new_function_index = i; |
293 | 0 | break; |
294 | 0 | } |
295 | 0 | } |
296 | | |
297 | | // A never before seen function name, store it in the array |
298 | 0 | if (!has_found) { |
299 | 0 | if (inst->phys_dev_ext_disp_function_count >= MAX_NUM_UNKNOWN_EXTS) { |
300 | 0 | loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, |
301 | 0 | "loader_dev_ext_gpa: Exhausted the unknown physical device function array!"); |
302 | 0 | return NULL; |
303 | 0 | } |
304 | | |
305 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, |
306 | 0 | "loader_phys_dev_ext_gpa: Adding unknown physical function %s to internal store at index %u", funcName, |
307 | 0 | inst->phys_dev_ext_disp_function_count); |
308 | | |
309 | | // add found function to phys_dev_ext_disp_functions; |
310 | 0 | size_t funcName_len = strlen(funcName) + 1; |
311 | 0 | inst->phys_dev_ext_disp_functions[inst->phys_dev_ext_disp_function_count] = |
312 | 0 | (char *)loader_instance_heap_alloc(inst, funcName_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); |
313 | 0 | if (NULL == inst->phys_dev_ext_disp_functions[inst->phys_dev_ext_disp_function_count]) { |
314 | | // failed to allocate memory, return NULL |
315 | 0 | return NULL; |
316 | 0 | } |
317 | 0 | loader_strncpy(inst->phys_dev_ext_disp_functions[inst->phys_dev_ext_disp_function_count], funcName_len, funcName, |
318 | 0 | funcName_len); |
319 | |
|
320 | 0 | new_function_index = inst->phys_dev_ext_disp_function_count; |
321 | | // increment the count so that the subsequent logic includes the newly added entry point when searching for functions |
322 | 0 | inst->phys_dev_ext_disp_function_count++; |
323 | 0 | } |
324 | | |
325 | | // Setup the ICD function pointers |
326 | 0 | struct loader_icd_term *icd_term = inst->icd_terms; |
327 | 0 | while (NULL != icd_term) { |
328 | 0 | if (MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION <= icd_term->scanned_icd->interface_version && |
329 | 0 | NULL != icd_term->scanned_icd->GetPhysicalDeviceProcAddr) { |
330 | 0 | icd_term->phys_dev_ext[new_function_index] = |
331 | 0 | (PFN_PhysDevExt)icd_term->scanned_icd->GetPhysicalDeviceProcAddr(icd_term->instance, funcName); |
332 | 0 | if (NULL != icd_term->phys_dev_ext[new_function_index]) { |
333 | | // Make sure we set the instance dispatch to point to the loader's terminator now since we can at least handle |
334 | | // it in one ICD. |
335 | 0 | inst->disp->phys_dev_ext[new_function_index] = loader_get_phys_dev_ext_termin(new_function_index); |
336 | |
|
337 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "loader_phys_dev_ext_gpa: Driver %s returned ptr %p for %s", |
338 | 0 | icd_term->scanned_icd->lib_name, inst->disp->phys_dev_ext[new_function_index], funcName); |
339 | 0 | } |
340 | 0 | } else { |
341 | 0 | icd_term->phys_dev_ext[new_function_index] = NULL; |
342 | 0 | } |
343 | |
|
344 | 0 | icd_term = icd_term->next; |
345 | 0 | } |
346 | | |
347 | | // Now if this is being run in the trampoline, search for the first layer attached and query using it to get the first entry |
348 | | // point. Only set the instance dispatch table to it if it isn't NULL. |
349 | 0 | if (is_tramp) { |
350 | 0 | for (uint32_t i = 0; i < inst->expanded_activated_layer_list.count; i++) { |
351 | 0 | struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i]; |
352 | 0 | if (layer_prop->interface_version > 1 && NULL != layer_prop->functions.get_physical_device_proc_addr) { |
353 | 0 | void *layer_ret_function = |
354 | 0 | (PFN_PhysDevExt)layer_prop->functions.get_physical_device_proc_addr(inst->instance, funcName); |
355 | 0 | if (NULL != layer_ret_function) { |
356 | 0 | inst->disp->phys_dev_ext[new_function_index] = layer_ret_function; |
357 | 0 | loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "loader_phys_dev_ext_gpa: Layer %s returned ptr %p for %s", |
358 | 0 | layer_prop->info.layerName, inst->disp->phys_dev_ext[new_function_index], funcName); |
359 | 0 | break; |
360 | 0 | } |
361 | 0 | } |
362 | 0 | } |
363 | 0 | } |
364 | |
|
365 | 0 | if (is_tramp) { |
366 | 0 | return loader_get_phys_dev_ext_tramp(new_function_index); |
367 | 0 | } else { |
368 | 0 | return loader_get_phys_dev_ext_termin(new_function_index); |
369 | 0 | } |
370 | 0 | } |
371 | | // Main interface functions, makes it clear whether it is getting a terminator or trampoline |
372 | 0 | void *loader_phys_dev_ext_gpa_tramp(struct loader_instance *inst, const char *funcName) { |
373 | 0 | return loader_phys_dev_ext_gpa_impl(inst, funcName, true); |
374 | 0 | } |
375 | 0 | void *loader_phys_dev_ext_gpa_term(struct loader_instance *inst, const char *funcName) { |
376 | 0 | return loader_phys_dev_ext_gpa_impl(inst, funcName, false); |
377 | 0 | } |
378 | | |
379 | | #endif |