/* Wine Vulkan ICD implementation * * Copyright 2017 Roderick Colenbrander * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #if 0 #pragma makedep unix #endif #include "config.h" #include #include "vulkan_private.h" #include "wine/vulkan_driver.h" #include "ntgdi.h" #include "ntuser.h" WINE_DEFAULT_DEBUG_CHANNEL(vulkan); static BOOL is_wow64(void) { return sizeof(void *) == sizeof(UINT64) && NtCurrentTeb()->WowTebOffset; } static BOOL use_external_memory(void) { return is_wow64(); } static ULONG_PTR zero_bits = 0; #define wine_vk_count_struct(s, t) wine_vk_count_struct_((void *)s, VK_STRUCTURE_TYPE_##t) static uint32_t wine_vk_count_struct_(void *s, VkStructureType t) { const VkBaseInStructure *header; uint32_t result = 0; for (header = s; header; header = header->pNext) { if (header->sType == t) result++; } return result; } static const struct vulkan_funcs *vk_funcs; #define WINE_VK_ADD_DISPATCHABLE_MAPPING(instance, client_handle, host_handle, object) \ wine_vk_add_handle_mapping((instance), (uintptr_t)(client_handle), (uintptr_t)(host_handle), &(object)->mapping) #define WINE_VK_ADD_NON_DISPATCHABLE_MAPPING(instance, client_handle, host_handle, object) \ wine_vk_add_handle_mapping((instance), (uintptr_t)(client_handle), (host_handle), &(object)->mapping) static void wine_vk_add_handle_mapping(struct wine_instance *instance, uint64_t wrapped_handle, uint64_t host_handle, struct wine_vk_mapping *mapping) { if (instance->enable_wrapper_list) { mapping->host_handle = host_handle; mapping->wine_wrapped_handle = wrapped_handle; pthread_rwlock_wrlock(&instance->wrapper_lock); list_add_tail(&instance->wrappers, &mapping->link); pthread_rwlock_unlock(&instance->wrapper_lock); } } #define WINE_VK_REMOVE_HANDLE_MAPPING(instance, object) \ wine_vk_remove_handle_mapping((instance), &(object)->mapping) static void wine_vk_remove_handle_mapping(struct wine_instance *instance, struct wine_vk_mapping *mapping) { if (instance->enable_wrapper_list) { pthread_rwlock_wrlock(&instance->wrapper_lock); list_remove(&mapping->link); pthread_rwlock_unlock(&instance->wrapper_lock); } } static uint64_t wine_vk_get_wrapper(struct wine_instance *instance, uint64_t host_handle) { struct wine_vk_mapping *mapping; uint64_t result = 0; pthread_rwlock_rdlock(&instance->wrapper_lock); LIST_FOR_EACH_ENTRY(mapping, &instance->wrappers, struct wine_vk_mapping, link) { if (mapping->host_handle == host_handle) { result = mapping->wine_wrapped_handle; break; } } pthread_rwlock_unlock(&instance->wrapper_lock); return result; } static VkBool32 debug_utils_callback_conversion(VkDebugUtilsMessageSeverityFlagBitsEXT severity, VkDebugUtilsMessageTypeFlagsEXT message_types, const VkDebugUtilsMessengerCallbackDataEXT *callback_data, void *user_data) { struct wine_vk_debug_utils_params params; VkDebugUtilsObjectNameInfoEXT *object_name_infos; struct wine_debug_utils_messenger *object; void *ret_ptr; ULONG ret_len; unsigned int i; TRACE("%i, %u, %p, %p\n", severity, message_types, callback_data, user_data); object = user_data; if (!object->instance->host_instance) { /* instance wasn't yet created, this is a message from the host loader */ return VK_FALSE; } /* FIXME: we should pack all referenced structs instead of passing pointers */ params.user_callback = object->user_callback; params.user_data = object->user_data; params.severity = severity; params.message_types = message_types; params.data = *((VkDebugUtilsMessengerCallbackDataEXT *) callback_data); object_name_infos = calloc(params.data.objectCount, sizeof(*object_name_infos)); for (i = 0; i < params.data.objectCount; i++) { object_name_infos[i].sType = callback_data->pObjects[i].sType; object_name_infos[i].pNext = callback_data->pObjects[i].pNext; object_name_infos[i].objectType = callback_data->pObjects[i].objectType; object_name_infos[i].pObjectName = callback_data->pObjects[i].pObjectName; if (wine_vk_is_type_wrapped(callback_data->pObjects[i].objectType)) { object_name_infos[i].objectHandle = wine_vk_get_wrapper(object->instance, callback_data->pObjects[i].objectHandle); if (!object_name_infos[i].objectHandle) { WARN("handle conversion failed 0x%s\n", wine_dbgstr_longlong(callback_data->pObjects[i].objectHandle)); free(object_name_infos); return VK_FALSE; } } else { object_name_infos[i].objectHandle = callback_data->pObjects[i].objectHandle; } } params.data.pObjects = object_name_infos; /* applications should always return VK_FALSE */ KeUserModeCallback( NtUserCallVulkanDebugUtilsCallback, ¶ms, sizeof(params), &ret_ptr, &ret_len ); free(object_name_infos); if (ret_len == sizeof(VkBool32)) return *(VkBool32 *)ret_ptr; return VK_FALSE; } static VkBool32 debug_report_callback_conversion(VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT object_type, uint64_t object_handle, size_t location, int32_t code, const char *layer_prefix, const char *message, void *user_data) { struct wine_vk_debug_report_params params; struct wine_debug_report_callback *object; void *ret_ptr; ULONG ret_len; TRACE("%#x, %#x, 0x%s, 0x%s, %d, %p, %p, %p\n", flags, object_type, wine_dbgstr_longlong(object_handle), wine_dbgstr_longlong(location), code, layer_prefix, message, user_data); object = user_data; if (!object->instance->host_instance) { /* instance wasn't yet created, this is a message from the host loader */ return VK_FALSE; } /* FIXME: we should pack all referenced structs instead of passing pointers */ params.user_callback = object->user_callback; params.user_data = object->user_data; params.flags = flags; params.object_type = object_type; params.location = location; params.code = code; params.layer_prefix = layer_prefix; params.message = message; params.object_handle = wine_vk_get_wrapper(object->instance, object_handle); if (!params.object_handle) params.object_type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT; KeUserModeCallback( NtUserCallVulkanDebugReportCallback, ¶ms, sizeof(params), &ret_ptr, &ret_len ); if (ret_len == sizeof(VkBool32)) return *(VkBool32 *)ret_ptr; return VK_FALSE; } static void wine_vk_physical_device_free(struct wine_phys_dev *phys_dev) { if (!phys_dev) return; WINE_VK_REMOVE_HANDLE_MAPPING(phys_dev->instance, phys_dev); free(phys_dev->extensions); free(phys_dev); } static struct wine_phys_dev *wine_vk_physical_device_alloc(struct wine_instance *instance, VkPhysicalDevice phys_dev, VkPhysicalDevice handle) { struct wine_phys_dev *object; uint32_t num_host_properties, num_properties = 0; VkExtensionProperties *host_properties = NULL; BOOL have_external_memory_host = FALSE; VkResult res; unsigned int i, j; if (!(object = calloc(1, sizeof(*object)))) return NULL; object->instance = instance; object->handle = handle; object->host_physical_device = phys_dev; handle->base.unix_handle = (uintptr_t)object; WINE_VK_ADD_DISPATCHABLE_MAPPING(instance, handle, phys_dev, object); instance->funcs.p_vkGetPhysicalDeviceMemoryProperties(phys_dev, &object->memory_properties); res = instance->funcs.p_vkEnumerateDeviceExtensionProperties(phys_dev, NULL, &num_host_properties, NULL); if (res != VK_SUCCESS) { ERR("Failed to enumerate device extensions, res=%d\n", res); goto err; } host_properties = calloc(num_host_properties, sizeof(*host_properties)); if (!host_properties) { ERR("Failed to allocate memory for device properties!\n"); goto err; } res = instance->funcs.p_vkEnumerateDeviceExtensionProperties(phys_dev, NULL, &num_host_properties, host_properties); if (res != VK_SUCCESS) { ERR("Failed to enumerate device extensions, res=%d\n", res); goto err; } /* Count list of extensions for which we have an implementation. * TODO: perform translation for platform specific extensions. */ for (i = 0; i < num_host_properties; i++) { if (wine_vk_device_extension_supported(host_properties[i].extensionName)) { TRACE("Enabling extension '%s' for physical device %p\n", host_properties[i].extensionName, object); num_properties++; } else { TRACE("Skipping extension '%s', no implementation found in winevulkan.\n", host_properties[i].extensionName); } if (!strcmp(host_properties[i].extensionName, "VK_EXT_external_memory_host")) have_external_memory_host = TRUE; } TRACE("Host supported extensions %u, Wine supported extensions %u\n", num_host_properties, num_properties); if (!(object->extensions = calloc(num_properties, sizeof(*object->extensions)))) { ERR("Failed to allocate memory for device extensions!\n"); goto err; } for (i = 0, j = 0; i < num_host_properties; i++) { if (wine_vk_device_extension_supported(host_properties[i].extensionName)) { object->extensions[j] = host_properties[i]; j++; } } object->extension_count = num_properties; if (use_external_memory() && have_external_memory_host) { VkPhysicalDeviceExternalMemoryHostPropertiesEXT host_mem_props = { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT, }; VkPhysicalDeviceProperties2 props = { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, .pNext = &host_mem_props, }; instance->funcs.p_vkGetPhysicalDeviceProperties2KHR(phys_dev, &props); object->external_memory_align = host_mem_props.minImportedHostPointerAlignment; if (object->external_memory_align) TRACE("Using VK_EXT_external_memory_host for memory mapping with alignment: %u\n", object->external_memory_align); } free(host_properties); return object; err: wine_vk_physical_device_free(object); free(host_properties); return NULL; } static void wine_vk_free_command_buffers(struct wine_device *device, struct wine_cmd_pool *pool, uint32_t count, const VkCommandBuffer *buffers) { unsigned int i; for (i = 0; i < count; i++) { struct wine_cmd_buffer *buffer = wine_cmd_buffer_from_handle(buffers[i]); if (!buffer) continue; device->funcs.p_vkFreeCommandBuffers(device->host_device, pool->host_command_pool, 1, &buffer->host_command_buffer); WINE_VK_REMOVE_HANDLE_MAPPING(device->phys_dev->instance, buffer); buffer->handle->base.unix_handle = 0; free(buffer); } } static void wine_vk_device_get_queues(struct wine_device *device, uint32_t family_index, uint32_t queue_count, VkDeviceQueueCreateFlags flags, struct wine_queue *queues, VkQueue *handles) { VkDeviceQueueInfo2 queue_info; unsigned int i; for (i = 0; i < queue_count; i++) { struct wine_queue *queue = &queues[i]; queue->device = device; queue->handle = (*handles)++; queue->family_index = family_index; queue->queue_index = i; queue->flags = flags; /* The Vulkan spec says: * * "vkGetDeviceQueue must only be used to get queues that were created * with the flags parameter of VkDeviceQueueCreateInfo set to zero." */ if (flags && device->funcs.p_vkGetDeviceQueue2) { queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2; queue_info.pNext = NULL; queue_info.flags = flags; queue_info.queueFamilyIndex = family_index; queue_info.queueIndex = i; device->funcs.p_vkGetDeviceQueue2(device->host_device, &queue_info, &queue->host_queue); } else { device->funcs.p_vkGetDeviceQueue(device->host_device, family_index, i, &queue->host_queue); } queue->handle->base.unix_handle = (uintptr_t)queue; WINE_VK_ADD_DISPATCHABLE_MAPPING(device->phys_dev->instance, queue->handle, queue->host_queue, queue); TRACE("Got device %p queue %p, host_queue %p.\n", device, queue, queue->host_queue); } } static VkResult wine_vk_device_convert_create_info(struct wine_phys_dev *phys_dev, struct conversion_context *ctx, const VkDeviceCreateInfo *src, VkDeviceCreateInfo *dst) { unsigned int i; *dst = *src; /* Should be filtered out by loader as ICDs don't support layers. */ dst->enabledLayerCount = 0; dst->ppEnabledLayerNames = NULL; TRACE("Enabled %u extensions.\n", dst->enabledExtensionCount); for (i = 0; i < dst->enabledExtensionCount; i++) { const char *extension_name = dst->ppEnabledExtensionNames[i]; TRACE("Extension %u: %s.\n", i, debugstr_a(extension_name)); if (!wine_vk_device_extension_supported(extension_name)) { WARN("Extension %s is not supported.\n", debugstr_a(extension_name)); return VK_ERROR_EXTENSION_NOT_PRESENT; } } if (phys_dev->external_memory_align) { const char **new_extensions; new_extensions = conversion_context_alloc(ctx, (dst->enabledExtensionCount + 2) * sizeof(*dst->ppEnabledExtensionNames)); memcpy(new_extensions, src->ppEnabledExtensionNames, dst->enabledExtensionCount * sizeof(*dst->ppEnabledExtensionNames)); new_extensions[dst->enabledExtensionCount++] = "VK_KHR_external_memory"; new_extensions[dst->enabledExtensionCount++] = "VK_EXT_external_memory_host"; dst->ppEnabledExtensionNames = new_extensions; } return VK_SUCCESS; } /* Helper function used for freeing a device structure. This function supports full * and partial object cleanups and can thus be used for vkCreateDevice failures. */ static void wine_vk_device_free(struct wine_device *device) { struct wine_queue *queue; if (!device) return; if (device->queues) { unsigned int i; for (i = 0; i < device->queue_count; i++) { queue = &device->queues[i]; if (queue && queue->host_queue) WINE_VK_REMOVE_HANDLE_MAPPING(device->phys_dev->instance, queue); } free(device->queues); device->queues = NULL; } if (device->host_device && device->funcs.p_vkDestroyDevice) { WINE_VK_REMOVE_HANDLE_MAPPING(device->phys_dev->instance, device); device->funcs.p_vkDestroyDevice(device->host_device, NULL /* pAllocator */); } free(device); } NTSTATUS init_vulkan(void *args) { vk_funcs = __wine_get_vulkan_driver(WINE_VULKAN_DRIVER_VERSION); if (!vk_funcs) { ERR("Failed to load Wine graphics driver supporting Vulkan.\n"); return STATUS_UNSUCCESSFUL; } if (is_wow64()) { SYSTEM_BASIC_INFORMATION info; NtQuerySystemInformation(SystemEmulationBasicInformation, &info, sizeof(info), NULL); zero_bits = (ULONG_PTR)info.HighestUserAddress | 0x7fffffff; } return STATUS_SUCCESS; } /* Helper function for converting between win32 and host compatible VkInstanceCreateInfo. * This function takes care of extensions handled at winevulkan layer, a Wine graphics * driver is responsible for handling e.g. surface extensions. */ static VkResult wine_vk_instance_convert_create_info(struct conversion_context *ctx, const VkInstanceCreateInfo *src, VkInstanceCreateInfo *dst, struct wine_instance *object) { VkDebugUtilsMessengerCreateInfoEXT *debug_utils_messenger; VkDebugReportCallbackCreateInfoEXT *debug_report_callback; VkBaseInStructure *header; unsigned int i; *dst = *src; object->utils_messenger_count = wine_vk_count_struct(dst, DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT); object->utils_messengers = calloc(object->utils_messenger_count, sizeof(*object->utils_messengers)); header = (VkBaseInStructure *) dst; for (i = 0; i < object->utils_messenger_count; i++) { header = find_next_struct(header->pNext, VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT); debug_utils_messenger = (VkDebugUtilsMessengerCreateInfoEXT *) header; object->utils_messengers[i].instance = object; object->utils_messengers[i].host_debug_messenger = VK_NULL_HANDLE; object->utils_messengers[i].user_callback = debug_utils_messenger->pfnUserCallback; object->utils_messengers[i].user_data = debug_utils_messenger->pUserData; /* convert_VkInstanceCreateInfo_* already copied the chain, so we can modify it in-place. */ debug_utils_messenger->pfnUserCallback = (void *) &debug_utils_callback_conversion; debug_utils_messenger->pUserData = &object->utils_messengers[i]; } if ((debug_report_callback = find_next_struct(dst->pNext, VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT))) { object->default_callback.instance = object; object->default_callback.host_debug_callback = VK_NULL_HANDLE; object->default_callback.user_callback = debug_report_callback->pfnCallback; object->default_callback.user_data = debug_report_callback->pUserData; debug_report_callback->pfnCallback = (void *) &debug_report_callback_conversion; debug_report_callback->pUserData = &object->default_callback; } /* ICDs don't support any layers, so nothing to copy. Modern versions of the loader * filter this data out as well. */ if (dst->enabledLayerCount) { FIXME("Loading explicit layers is not supported by winevulkan!\n"); return VK_ERROR_LAYER_NOT_PRESENT; } TRACE("Enabled %u instance extensions.\n", dst->enabledExtensionCount); for (i = 0; i < dst->enabledExtensionCount; i++) { const char *extension_name = dst->ppEnabledExtensionNames[i]; TRACE("Extension %u: %s.\n", i, debugstr_a(extension_name)); if (!wine_vk_instance_extension_supported(extension_name)) { WARN("Extension %s is not supported.\n", debugstr_a(extension_name)); return VK_ERROR_EXTENSION_NOT_PRESENT; } if (!strcmp(extension_name, "VK_EXT_debug_utils") || !strcmp(extension_name, "VK_EXT_debug_report")) { object->enable_wrapper_list = VK_TRUE; } } if (use_external_memory()) { const char **new_extensions; new_extensions = conversion_context_alloc(ctx, (dst->enabledExtensionCount + 2) * sizeof(*dst->ppEnabledExtensionNames)); memcpy(new_extensions, src->ppEnabledExtensionNames, dst->enabledExtensionCount * sizeof(*dst->ppEnabledExtensionNames)); new_extensions[dst->enabledExtensionCount++] = "VK_KHR_get_physical_device_properties2"; new_extensions[dst->enabledExtensionCount++] = "VK_KHR_external_memory_capabilities"; dst->ppEnabledExtensionNames = new_extensions; } return VK_SUCCESS; } /* Helper function which stores wrapped physical devices in the instance object. */ static VkResult wine_vk_instance_load_physical_devices(struct wine_instance *instance) { VkPhysicalDevice *tmp_phys_devs; uint32_t phys_dev_count; unsigned int i; VkResult res; res = instance->funcs.p_vkEnumeratePhysicalDevices(instance->host_instance, &phys_dev_count, NULL); if (res != VK_SUCCESS) { ERR("Failed to enumerate physical devices, res=%d\n", res); return res; } if (!phys_dev_count) return res; if (phys_dev_count > instance->handle->phys_dev_count) { instance->handle->phys_dev_count = phys_dev_count; return VK_ERROR_OUT_OF_POOL_MEMORY; } instance->handle->phys_dev_count = phys_dev_count; if (!(tmp_phys_devs = calloc(phys_dev_count, sizeof(*tmp_phys_devs)))) return VK_ERROR_OUT_OF_HOST_MEMORY; res = instance->funcs.p_vkEnumeratePhysicalDevices(instance->host_instance, &phys_dev_count, tmp_phys_devs); if (res != VK_SUCCESS) { free(tmp_phys_devs); return res; } instance->phys_devs = calloc(phys_dev_count, sizeof(*instance->phys_devs)); if (!instance->phys_devs) { free(tmp_phys_devs); return VK_ERROR_OUT_OF_HOST_MEMORY; } /* Wrap each host physical device handle into a dispatchable object for the ICD loader. */ for (i = 0; i < phys_dev_count; i++) { struct wine_phys_dev *phys_dev = wine_vk_physical_device_alloc(instance, tmp_phys_devs[i], &instance->handle->phys_devs[i]); if (!phys_dev) { ERR("Unable to allocate memory for physical device!\n"); free(tmp_phys_devs); return VK_ERROR_OUT_OF_HOST_MEMORY; } instance->phys_devs[i] = phys_dev; instance->phys_dev_count = i + 1; } instance->phys_dev_count = phys_dev_count; free(tmp_phys_devs); return VK_SUCCESS; } static struct wine_phys_dev *wine_vk_instance_wrap_physical_device(struct wine_instance *instance, VkPhysicalDevice physical_device) { unsigned int i; for (i = 0; i < instance->phys_dev_count; ++i) { struct wine_phys_dev *current = instance->phys_devs[i]; if (current->host_physical_device == physical_device) return current; } ERR("Unrecognized physical device %p.\n", physical_device); return NULL; } /* Helper function used for freeing an instance structure. This function supports full * and partial object cleanups and can thus be used for vkCreateInstance failures. */ static void wine_vk_instance_free(struct wine_instance *instance) { if (!instance) return; if (instance->phys_devs) { unsigned int i; for (i = 0; i < instance->phys_dev_count; i++) { wine_vk_physical_device_free(instance->phys_devs[i]); } free(instance->phys_devs); } if (instance->host_instance) { vk_funcs->p_vkDestroyInstance(instance->host_instance, NULL /* allocator */); WINE_VK_REMOVE_HANDLE_MAPPING(instance, instance); } pthread_rwlock_destroy(&instance->wrapper_lock); free(instance->utils_messengers); free(instance); } VkResult wine_vkAllocateCommandBuffers(VkDevice handle, const VkCommandBufferAllocateInfo *allocate_info, VkCommandBuffer *buffers ) { struct wine_device *device = wine_device_from_handle(handle); struct wine_cmd_buffer *buffer; struct wine_cmd_pool *pool; VkResult res = VK_SUCCESS; unsigned int i; pool = wine_cmd_pool_from_handle(allocate_info->commandPool); for (i = 0; i < allocate_info->commandBufferCount; i++) { VkCommandBufferAllocateInfo allocate_info_host; /* TODO: future extensions (none yet) may require pNext conversion. */ allocate_info_host.pNext = allocate_info->pNext; allocate_info_host.sType = allocate_info->sType; allocate_info_host.commandPool = pool->host_command_pool; allocate_info_host.level = allocate_info->level; allocate_info_host.commandBufferCount = 1; TRACE("Allocating command buffer %u from pool 0x%s.\n", i, wine_dbgstr_longlong(allocate_info_host.commandPool)); if (!(buffer = calloc(1, sizeof(*buffer)))) { res = VK_ERROR_OUT_OF_HOST_MEMORY; break; } buffer->handle = buffers[i]; buffer->device = device; res = device->funcs.p_vkAllocateCommandBuffers(device->host_device, &allocate_info_host, &buffer->host_command_buffer); buffer->handle->base.unix_handle = (uintptr_t)buffer; WINE_VK_ADD_DISPATCHABLE_MAPPING(device->phys_dev->instance, buffer->handle, buffer->host_command_buffer, buffer); if (res != VK_SUCCESS) { ERR("Failed to allocate command buffer, res=%d.\n", res); buffer->host_command_buffer = VK_NULL_HANDLE; break; } } if (res != VK_SUCCESS) wine_vk_free_command_buffers(device, pool, i + 1, buffers); return res; } VkResult wine_vkCreateDevice(VkPhysicalDevice phys_dev_handle, const VkDeviceCreateInfo *create_info, const VkAllocationCallbacks *allocator, VkDevice *ret_device, void *client_ptr) { struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(phys_dev_handle); struct wine_instance *instance = phys_dev->instance; VkDevice device_handle = client_ptr; VkDeviceCreateInfo create_info_host; struct VkQueue_T *queue_handles; struct wine_queue *next_queue; struct conversion_context ctx; struct wine_device *object; unsigned int i; VkResult res; if (allocator) FIXME("Support for allocation callbacks not implemented yet\n"); if (TRACE_ON(vulkan)) { VkPhysicalDeviceProperties properties; instance->funcs.p_vkGetPhysicalDeviceProperties(phys_dev->host_physical_device, &properties); TRACE("Device name: %s.\n", debugstr_a(properties.deviceName)); TRACE("Vendor ID: %#x, Device ID: %#x.\n", properties.vendorID, properties.deviceID); TRACE("Driver version: %#x.\n", properties.driverVersion); } if (!(object = calloc(1, sizeof(*object)))) return VK_ERROR_OUT_OF_HOST_MEMORY; object->phys_dev = phys_dev; init_conversion_context(&ctx); res = wine_vk_device_convert_create_info(phys_dev, &ctx, create_info, &create_info_host); if (res == VK_SUCCESS) res = instance->funcs.p_vkCreateDevice(phys_dev->host_physical_device, &create_info_host, NULL /* allocator */, &object->host_device); free_conversion_context(&ctx); WINE_VK_ADD_DISPATCHABLE_MAPPING(instance, device_handle, object->host_device, object); if (res != VK_SUCCESS) { WARN("Failed to create device, res=%d.\n", res); goto fail; } /* Just load all function pointers we are aware off. The loader takes care of filtering. * We use vkGetDeviceProcAddr as opposed to vkGetInstanceProcAddr for efficiency reasons * as functions pass through fewer dispatch tables within the loader. */ #define USE_VK_FUNC(name) \ object->funcs.p_##name = (void *)vk_funcs->p_vkGetDeviceProcAddr(object->host_device, #name); \ if (object->funcs.p_##name == NULL) TRACE("Not found '%s'.\n", #name); ALL_VK_DEVICE_FUNCS() #undef USE_VK_FUNC /* We need to cache all queues within the device as each requires wrapping since queues are * dispatchable objects. */ for (i = 0; i < create_info_host.queueCreateInfoCount; i++) { object->queue_count += create_info_host.pQueueCreateInfos[i].queueCount; } if (!(object->queues = calloc(object->queue_count, sizeof(*object->queues)))) { res = VK_ERROR_OUT_OF_HOST_MEMORY; goto fail; } next_queue = object->queues; queue_handles = device_handle->queues; for (i = 0; i < create_info_host.queueCreateInfoCount; i++) { uint32_t flags = create_info_host.pQueueCreateInfos[i].flags; uint32_t family_index = create_info_host.pQueueCreateInfos[i].queueFamilyIndex; uint32_t queue_count = create_info_host.pQueueCreateInfos[i].queueCount; TRACE("Queue family index %u, queue count %u.\n", family_index, queue_count); wine_vk_device_get_queues(object, family_index, queue_count, flags, next_queue, &queue_handles); next_queue += queue_count; } device_handle->quirks = instance->quirks; device_handle->base.unix_handle = (uintptr_t)object; *ret_device = device_handle; TRACE("Created device %p, host_device %p.\n", object, object->host_device); return VK_SUCCESS; fail: wine_vk_device_free(object); return res; } VkResult wine_vkCreateInstance(const VkInstanceCreateInfo *create_info, const VkAllocationCallbacks *allocator, VkInstance *instance, void *client_ptr) { VkInstance client_instance = client_ptr; VkInstanceCreateInfo create_info_host; const VkApplicationInfo *app_info; struct conversion_context ctx; struct wine_instance *object; VkResult res; if (allocator) FIXME("Support for allocation callbacks not implemented yet\n"); if (!(object = calloc(1, sizeof(*object)))) { ERR("Failed to allocate memory for instance\n"); return VK_ERROR_OUT_OF_HOST_MEMORY; } list_init(&object->wrappers); pthread_rwlock_init(&object->wrapper_lock, NULL); init_conversion_context(&ctx); res = wine_vk_instance_convert_create_info(&ctx, create_info, &create_info_host, object); if (res == VK_SUCCESS) res = vk_funcs->p_vkCreateInstance(&create_info_host, NULL /* allocator */, &object->host_instance); free_conversion_context(&ctx); if (res != VK_SUCCESS) { ERR("Failed to create instance, res=%d\n", res); wine_vk_instance_free(object); return res; } object->handle = client_instance; WINE_VK_ADD_DISPATCHABLE_MAPPING(object, object->handle, object->host_instance, object); /* Load all instance functions we are aware of. Note the loader takes care * of any filtering for extensions which were not requested, but which the * ICD may support. */ #define USE_VK_FUNC(name) \ object->funcs.p_##name = (void *)vk_funcs->p_vkGetInstanceProcAddr(object->host_instance, #name); ALL_VK_INSTANCE_FUNCS() #undef USE_VK_FUNC /* Cache physical devices for vkEnumeratePhysicalDevices within the instance as * each vkPhysicalDevice is a dispatchable object, which means we need to wrap * the host physical devices and present those to the application. * Cleanup happens as part of wine_vkDestroyInstance. */ res = wine_vk_instance_load_physical_devices(object); if (res != VK_SUCCESS) { ERR("Failed to load physical devices, res=%d\n", res); wine_vk_instance_free(object); return res; } if ((app_info = create_info->pApplicationInfo)) { TRACE("Application name %s, application version %#x.\n", debugstr_a(app_info->pApplicationName), app_info->applicationVersion); TRACE("Engine name %s, engine version %#x.\n", debugstr_a(app_info->pEngineName), app_info->engineVersion); TRACE("API version %#x.\n", app_info->apiVersion); if (app_info->pEngineName && !strcmp(app_info->pEngineName, "idTech")) object->quirks |= WINEVULKAN_QUIRK_GET_DEVICE_PROC_ADDR; } object->quirks |= WINEVULKAN_QUIRK_ADJUST_MAX_IMAGE_COUNT; client_instance->base.unix_handle = (uintptr_t)object; *instance = client_instance; TRACE("Created instance %p, host_instance %p.\n", object, object->host_instance); return VK_SUCCESS; } void wine_vkDestroyDevice(VkDevice handle, const VkAllocationCallbacks *allocator) { struct wine_device *device = wine_device_from_handle(handle); if (allocator) FIXME("Support for allocation callbacks not implemented yet\n"); wine_vk_device_free(device); } void wine_vkDestroyInstance(VkInstance handle, const VkAllocationCallbacks *allocator) { struct wine_instance *instance = wine_instance_from_handle(handle); if (allocator) FIXME("Support allocation allocators\n"); wine_vk_instance_free(instance); } VkResult wine_vkEnumerateDeviceExtensionProperties(VkPhysicalDevice phys_dev_handle, const char *layer_name, uint32_t *count, VkExtensionProperties *properties) { struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(phys_dev_handle); /* This shouldn't get called with layer_name set, the ICD loader prevents it. */ if (layer_name) { ERR("Layer enumeration not supported from ICD.\n"); return VK_ERROR_LAYER_NOT_PRESENT; } if (!properties) { *count = phys_dev->extension_count; return VK_SUCCESS; } *count = min(*count, phys_dev->extension_count); memcpy(properties, phys_dev->extensions, *count * sizeof(*properties)); TRACE("Returning %u extensions.\n", *count); return *count < phys_dev->extension_count ? VK_INCOMPLETE : VK_SUCCESS; } VkResult wine_vkEnumerateInstanceExtensionProperties(const char *name, uint32_t *count, VkExtensionProperties *properties) { uint32_t num_properties = 0, num_host_properties; VkExtensionProperties *host_properties; unsigned int i, j; VkResult res; res = vk_funcs->p_vkEnumerateInstanceExtensionProperties(NULL, &num_host_properties, NULL); if (res != VK_SUCCESS) return res; if (!(host_properties = calloc(num_host_properties, sizeof(*host_properties)))) return VK_ERROR_OUT_OF_HOST_MEMORY; res = vk_funcs->p_vkEnumerateInstanceExtensionProperties(NULL, &num_host_properties, host_properties); if (res != VK_SUCCESS) { ERR("Failed to retrieve host properties, res=%d.\n", res); free(host_properties); return res; } /* The Wine graphics driver provides us with all extensions supported by the host side * including extension fixup (e.g. VK_KHR_xlib_surface -> VK_KHR_win32_surface). It is * up to us here to filter the list down to extensions for which we have thunks. */ for (i = 0; i < num_host_properties; i++) { if (wine_vk_instance_extension_supported(host_properties[i].extensionName)) num_properties++; else TRACE("Instance extension '%s' is not supported.\n", host_properties[i].extensionName); } if (!properties) { TRACE("Returning %u extensions.\n", num_properties); *count = num_properties; free(host_properties); return VK_SUCCESS; } for (i = 0, j = 0; i < num_host_properties && j < *count; i++) { if (wine_vk_instance_extension_supported(host_properties[i].extensionName)) { TRACE("Enabling extension '%s'.\n", host_properties[i].extensionName); properties[j++] = host_properties[i]; } } *count = min(*count, num_properties); free(host_properties); return *count < num_properties ? VK_INCOMPLETE : VK_SUCCESS; } VkResult wine_vkEnumerateDeviceLayerProperties(VkPhysicalDevice phys_dev, uint32_t *count, VkLayerProperties *properties) { *count = 0; return VK_SUCCESS; } VkResult wine_vkEnumerateInstanceVersion(uint32_t *version) { VkResult res; static VkResult (*p_vkEnumerateInstanceVersion)(uint32_t *version); if (!p_vkEnumerateInstanceVersion) p_vkEnumerateInstanceVersion = vk_funcs->p_vkGetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion"); if (p_vkEnumerateInstanceVersion) { res = p_vkEnumerateInstanceVersion(version); } else { *version = VK_API_VERSION_1_0; res = VK_SUCCESS; } TRACE("API version %u.%u.%u.\n", VK_VERSION_MAJOR(*version), VK_VERSION_MINOR(*version), VK_VERSION_PATCH(*version)); *version = min(WINE_VK_VERSION, *version); return res; } VkResult wine_vkEnumeratePhysicalDevices(VkInstance handle, uint32_t *count, VkPhysicalDevice *devices) { struct wine_instance *instance = wine_instance_from_handle(handle); unsigned int i; if (!devices) { *count = instance->phys_dev_count; return VK_SUCCESS; } *count = min(*count, instance->phys_dev_count); for (i = 0; i < *count; i++) { devices[i] = instance->phys_devs[i]->handle; } TRACE("Returning %u devices.\n", *count); return *count < instance->phys_dev_count ? VK_INCOMPLETE : VK_SUCCESS; } void wine_vkFreeCommandBuffers(VkDevice handle, VkCommandPool command_pool, uint32_t count, const VkCommandBuffer *buffers) { struct wine_device *device = wine_device_from_handle(handle); struct wine_cmd_pool *pool = wine_cmd_pool_from_handle(command_pool); wine_vk_free_command_buffers(device, pool, count, buffers); } static VkQueue wine_vk_device_find_queue(VkDevice handle, const VkDeviceQueueInfo2 *info) { struct wine_device *device = wine_device_from_handle(handle); struct wine_queue *queue; uint32_t i; for (i = 0; i < device->queue_count; i++) { queue = &device->queues[i]; if (queue->family_index == info->queueFamilyIndex && queue->queue_index == info->queueIndex && queue->flags == info->flags) { return queue->handle; } } return VK_NULL_HANDLE; } void wine_vkGetDeviceQueue(VkDevice device, uint32_t family_index, uint32_t queue_index, VkQueue *queue) { VkDeviceQueueInfo2 queue_info; queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2; queue_info.pNext = NULL; queue_info.flags = 0; queue_info.queueFamilyIndex = family_index; queue_info.queueIndex = queue_index; *queue = wine_vk_device_find_queue(device, &queue_info); } void wine_vkGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *info, VkQueue *queue) { const VkBaseInStructure *chain; if ((chain = info->pNext)) FIXME("Ignoring a linked structure of type %u.\n", chain->sType); *queue = wine_vk_device_find_queue(device, info); } VkResult wine_vkCreateCommandPool(VkDevice device_handle, const VkCommandPoolCreateInfo *info, const VkAllocationCallbacks *allocator, VkCommandPool *command_pool, void *client_ptr) { struct wine_device *device = wine_device_from_handle(device_handle); struct vk_command_pool *handle = client_ptr; struct wine_cmd_pool *object; VkResult res; if (allocator) FIXME("Support for allocation callbacks not implemented yet\n"); if (!(object = calloc(1, sizeof(*object)))) return VK_ERROR_OUT_OF_HOST_MEMORY; res = device->funcs.p_vkCreateCommandPool(device->host_device, info, NULL, &object->host_command_pool); if (res == VK_SUCCESS) { object->handle = (uintptr_t)handle; handle->unix_handle = (uintptr_t)object; WINE_VK_ADD_NON_DISPATCHABLE_MAPPING(device->phys_dev->instance, object->handle, object->host_command_pool, object); *command_pool = object->handle; } else { free(object); } return res; } void wine_vkDestroyCommandPool(VkDevice device_handle, VkCommandPool handle, const VkAllocationCallbacks *allocator) { struct wine_device *device = wine_device_from_handle(device_handle); struct wine_cmd_pool *pool = wine_cmd_pool_from_handle(handle); if (allocator) FIXME("Support for allocation callbacks not implemented yet\n"); WINE_VK_REMOVE_HANDLE_MAPPING(device->phys_dev->instance, pool); device->funcs.p_vkDestroyCommandPool(device->host_device, pool->host_command_pool, NULL); free(pool); } static VkResult wine_vk_enumerate_physical_device_groups(struct wine_instance *instance, VkResult (*p_vkEnumeratePhysicalDeviceGroups)(VkInstance, uint32_t *, VkPhysicalDeviceGroupProperties *), uint32_t *count, VkPhysicalDeviceGroupProperties *properties) { unsigned int i, j; VkResult res; res = p_vkEnumeratePhysicalDeviceGroups(instance->host_instance, count, properties); if (res < 0 || !properties) return res; for (i = 0; i < *count; ++i) { VkPhysicalDeviceGroupProperties *current = &properties[i]; for (j = 0; j < current->physicalDeviceCount; ++j) { VkPhysicalDevice dev = current->physicalDevices[j]; struct wine_phys_dev *phys_dev = wine_vk_instance_wrap_physical_device(instance, dev); if (!phys_dev) return VK_ERROR_INITIALIZATION_FAILED; current->physicalDevices[j] = phys_dev->handle; } } return res; } VkResult wine_vkEnumeratePhysicalDeviceGroups(VkInstance handle, uint32_t *count, VkPhysicalDeviceGroupProperties *properties) { struct wine_instance *instance = wine_instance_from_handle(handle); return wine_vk_enumerate_physical_device_groups(instance, instance->funcs.p_vkEnumeratePhysicalDeviceGroups, count, properties); } VkResult wine_vkEnumeratePhysicalDeviceGroupsKHR(VkInstance handle, uint32_t *count, VkPhysicalDeviceGroupProperties *properties) { struct wine_instance *instance = wine_instance_from_handle(handle); return wine_vk_enumerate_physical_device_groups(instance, instance->funcs.p_vkEnumeratePhysicalDeviceGroupsKHR, count, properties); } void wine_vkGetPhysicalDeviceExternalFenceProperties(VkPhysicalDevice phys_dev, const VkPhysicalDeviceExternalFenceInfo *fence_info, VkExternalFenceProperties *properties) { properties->exportFromImportedHandleTypes = 0; properties->compatibleHandleTypes = 0; properties->externalFenceFeatures = 0; } void wine_vkGetPhysicalDeviceExternalFencePropertiesKHR(VkPhysicalDevice phys_dev, const VkPhysicalDeviceExternalFenceInfo *fence_info, VkExternalFenceProperties *properties) { properties->exportFromImportedHandleTypes = 0; properties->compatibleHandleTypes = 0; properties->externalFenceFeatures = 0; } void wine_vkGetPhysicalDeviceExternalBufferProperties(VkPhysicalDevice phys_dev, const VkPhysicalDeviceExternalBufferInfo *buffer_info, VkExternalBufferProperties *properties) { memset(&properties->externalMemoryProperties, 0, sizeof(properties->externalMemoryProperties)); } void wine_vkGetPhysicalDeviceExternalBufferPropertiesKHR(VkPhysicalDevice phys_dev, const VkPhysicalDeviceExternalBufferInfo *buffer_info, VkExternalBufferProperties *properties) { memset(&properties->externalMemoryProperties, 0, sizeof(properties->externalMemoryProperties)); } VkResult wine_vkGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice phys_dev_handle, const VkPhysicalDeviceImageFormatInfo2 *format_info, VkImageFormatProperties2 *properties) { struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(phys_dev_handle); VkExternalImageFormatProperties *external_image_properties; VkResult res; res = phys_dev->instance->funcs.p_vkGetPhysicalDeviceImageFormatProperties2(phys_dev->host_physical_device, format_info, properties); if ((external_image_properties = find_next_struct(properties, VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES))) { VkExternalMemoryProperties *p = &external_image_properties->externalMemoryProperties; p->externalMemoryFeatures = 0; p->exportFromImportedHandleTypes = 0; p->compatibleHandleTypes = 0; } return res; } VkResult wine_vkGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice phys_dev_handle, const VkPhysicalDeviceImageFormatInfo2 *format_info, VkImageFormatProperties2 *properties) { struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(phys_dev_handle); VkExternalImageFormatProperties *external_image_properties; VkResult res; res = phys_dev->instance->funcs.p_vkGetPhysicalDeviceImageFormatProperties2KHR(phys_dev->host_physical_device, format_info, properties); if ((external_image_properties = find_next_struct(properties, VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES))) { VkExternalMemoryProperties *p = &external_image_properties->externalMemoryProperties; p->externalMemoryFeatures = 0; p->exportFromImportedHandleTypes = 0; p->compatibleHandleTypes = 0; } return res; } /* From ntdll/unix/sync.c */ #define NANOSECONDS_IN_A_SECOND 1000000000 #define TICKSPERSEC 10000000 static inline VkTimeDomainEXT get_performance_counter_time_domain(void) { #if !defined(__APPLE__) && defined(HAVE_CLOCK_GETTIME) # ifdef CLOCK_MONOTONIC_RAW return VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT; # else return VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT; # endif #else FIXME("No mapping for VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT on this platform.\n"); return VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT; #endif } static VkTimeDomainEXT map_to_host_time_domain(VkTimeDomainEXT domain) { /* Matches ntdll/unix/sync.c's performance counter implementation. */ if (domain == VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT) return get_performance_counter_time_domain(); return domain; } static inline uint64_t convert_monotonic_timestamp(uint64_t value) { return value / (NANOSECONDS_IN_A_SECOND / TICKSPERSEC); } static inline uint64_t convert_timestamp(VkTimeDomainEXT host_domain, VkTimeDomainEXT target_domain, uint64_t value) { if (host_domain == target_domain) return value; /* Convert between MONOTONIC time in ns -> QueryPerformanceCounter */ if ((host_domain == VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT || host_domain == VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT) && target_domain == VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT) return convert_monotonic_timestamp(value); FIXME("Couldn't translate between host domain %d and target domain %d\n", host_domain, target_domain); return value; } VkResult wine_vkGetCalibratedTimestampsEXT(VkDevice handle, uint32_t timestamp_count, const VkCalibratedTimestampInfoEXT *timestamp_infos, uint64_t *timestamps, uint64_t *max_deviation) { struct wine_device *device = wine_device_from_handle(handle); VkCalibratedTimestampInfoEXT* host_timestamp_infos; unsigned int i; VkResult res; TRACE("%p, %u, %p, %p, %p\n", device, timestamp_count, timestamp_infos, timestamps, max_deviation); if (!(host_timestamp_infos = malloc(sizeof(VkCalibratedTimestampInfoEXT) * timestamp_count))) return VK_ERROR_OUT_OF_HOST_MEMORY; for (i = 0; i < timestamp_count; i++) { host_timestamp_infos[i].sType = timestamp_infos[i].sType; host_timestamp_infos[i].pNext = timestamp_infos[i].pNext; host_timestamp_infos[i].timeDomain = map_to_host_time_domain(timestamp_infos[i].timeDomain); } res = device->funcs.p_vkGetCalibratedTimestampsEXT(device->host_device, timestamp_count, host_timestamp_infos, timestamps, max_deviation); if (res != VK_SUCCESS) return res; for (i = 0; i < timestamp_count; i++) timestamps[i] = convert_timestamp(host_timestamp_infos[i].timeDomain, timestamp_infos[i].timeDomain, timestamps[i]); free(host_timestamp_infos); return res; } VkResult wine_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(VkPhysicalDevice handle, uint32_t *time_domain_count, VkTimeDomainEXT *time_domains) { struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(handle); BOOL supports_device = FALSE, supports_monotonic = FALSE, supports_monotonic_raw = FALSE; const VkTimeDomainEXT performance_counter_domain = get_performance_counter_time_domain(); VkTimeDomainEXT *host_time_domains; uint32_t host_time_domain_count; VkTimeDomainEXT out_time_domains[2]; uint32_t out_time_domain_count; unsigned int i; VkResult res; /* Find out the time domains supported on the host */ res = phys_dev->instance->funcs.p_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(phys_dev->host_physical_device, &host_time_domain_count, NULL); if (res != VK_SUCCESS) return res; if (!(host_time_domains = malloc(sizeof(VkTimeDomainEXT) * host_time_domain_count))) return VK_ERROR_OUT_OF_HOST_MEMORY; res = phys_dev->instance->funcs.p_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(phys_dev->host_physical_device, &host_time_domain_count, host_time_domains); if (res != VK_SUCCESS) { free(host_time_domains); return res; } for (i = 0; i < host_time_domain_count; i++) { if (host_time_domains[i] == VK_TIME_DOMAIN_DEVICE_EXT) supports_device = TRUE; else if (host_time_domains[i] == VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT) supports_monotonic = TRUE; else if (host_time_domains[i] == VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT) supports_monotonic_raw = TRUE; else FIXME("Unknown time domain %d\n", host_time_domains[i]); } free(host_time_domains); out_time_domain_count = 0; /* Map our monotonic times -> QPC */ if (supports_monotonic_raw && performance_counter_domain == VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT) out_time_domains[out_time_domain_count++] = VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT; else if (supports_monotonic && performance_counter_domain == VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT) out_time_domains[out_time_domain_count++] = VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT; else FIXME("VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT not supported on this platform.\n"); /* Forward the device domain time */ if (supports_device) out_time_domains[out_time_domain_count++] = VK_TIME_DOMAIN_DEVICE_EXT; /* Send the count/domains back to the app */ if (!time_domains) { *time_domain_count = out_time_domain_count; return VK_SUCCESS; } for (i = 0; i < min(*time_domain_count, out_time_domain_count); i++) time_domains[i] = out_time_domains[i]; res = *time_domain_count < out_time_domain_count ? VK_INCOMPLETE : VK_SUCCESS; *time_domain_count = out_time_domain_count; return res; } void wine_vkGetPhysicalDeviceExternalSemaphoreProperties(VkPhysicalDevice phys_dev, const VkPhysicalDeviceExternalSemaphoreInfo *info, VkExternalSemaphoreProperties *properties) { properties->exportFromImportedHandleTypes = 0; properties->compatibleHandleTypes = 0; properties->externalSemaphoreFeatures = 0; } void wine_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(VkPhysicalDevice phys_dev, const VkPhysicalDeviceExternalSemaphoreInfo *info, VkExternalSemaphoreProperties *properties) { properties->exportFromImportedHandleTypes = 0; properties->compatibleHandleTypes = 0; properties->externalSemaphoreFeatures = 0; } VkResult wine_vkCreateWin32SurfaceKHR(VkInstance handle, const VkWin32SurfaceCreateInfoKHR *createInfo, const VkAllocationCallbacks *allocator, VkSurfaceKHR *surface) { struct wine_instance *instance = wine_instance_from_handle(handle); struct wine_surface *object; VkResult res; if (allocator) FIXME("Support for allocation callbacks not implemented yet\n"); object = calloc(1, sizeof(*object)); if (!object) return VK_ERROR_OUT_OF_HOST_MEMORY; res = instance->funcs.p_vkCreateWin32SurfaceKHR(instance->host_instance, createInfo, NULL, &object->driver_surface); if (res != VK_SUCCESS) { free(object); return res; } object->host_surface = vk_funcs->p_wine_get_host_surface(object->driver_surface); WINE_VK_ADD_NON_DISPATCHABLE_MAPPING(instance, object, object->host_surface, object); *surface = wine_surface_to_handle(object); return VK_SUCCESS; } void wine_vkDestroySurfaceKHR(VkInstance handle, VkSurfaceKHR surface, const VkAllocationCallbacks *allocator) { struct wine_instance *instance = wine_instance_from_handle(handle); struct wine_surface *object = wine_surface_from_handle(surface); if (!object) return; instance->funcs.p_vkDestroySurfaceKHR(instance->host_instance, object->driver_surface, NULL); WINE_VK_REMOVE_HANDLE_MAPPING(instance, object); free(object); } VkResult wine_vkAllocateMemory(VkDevice handle, const VkMemoryAllocateInfo *alloc_info, const VkAllocationCallbacks *allocator, VkDeviceMemory *ret) { struct wine_device *device = wine_device_from_handle(handle); struct wine_device_memory *memory; VkMemoryAllocateInfo info = *alloc_info; VkImportMemoryHostPointerInfoEXT host_pointer_info; uint32_t mem_flags; void *mapping = NULL; VkResult result; /* For host visible memory, we try to use VK_EXT_external_memory_host on wow64 * to ensure that mapped pointer is 32-bit. */ mem_flags = device->phys_dev->memory_properties.memoryTypes[alloc_info->memoryTypeIndex].propertyFlags; if (device->phys_dev->external_memory_align && (mem_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) && !find_next_struct(alloc_info->pNext, VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT)) { VkMemoryHostPointerPropertiesEXT props = { .sType = VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT, }; uint32_t i, align = device->phys_dev->external_memory_align - 1; SIZE_T alloc_size = info.allocationSize; static int once; if (!once++) FIXME("Using VK_EXT_external_memory_host\n"); if (NtAllocateVirtualMemory(GetCurrentProcess(), &mapping, zero_bits, &alloc_size, MEM_COMMIT, PAGE_READWRITE)) { ERR("NtAllocateVirtualMemory failed\n"); return VK_ERROR_OUT_OF_HOST_MEMORY; } result = device->funcs.p_vkGetMemoryHostPointerPropertiesEXT(device->host_device, VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT, mapping, &props); if (result != VK_SUCCESS) { ERR("vkGetMemoryHostPointerPropertiesEXT failed: %d\n", result); return result; } if (!(props.memoryTypeBits & (1u << info.memoryTypeIndex))) { /* If requested memory type is not allowed to use external memory, * try to find a supported compatible type. */ uint32_t mask = mem_flags & ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; for (i = 0; i < device->phys_dev->memory_properties.memoryTypeCount; i++) { if (!(props.memoryTypeBits & (1u << i))) continue; if ((device->phys_dev->memory_properties.memoryTypes[i].propertyFlags & mask) != mask) continue; TRACE("Memory type not compatible with host memory, using %u instead\n", i); info.memoryTypeIndex = i; break; } if (i == device->phys_dev->memory_properties.memoryTypeCount) { FIXME("Not found compatible memory type\n"); alloc_size = 0; NtFreeVirtualMemory(GetCurrentProcess(), &mapping, &alloc_size, MEM_RELEASE); } } if (props.memoryTypeBits & (1u << info.memoryTypeIndex)) { host_pointer_info.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT; host_pointer_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT; host_pointer_info.pHostPointer = mapping; host_pointer_info.pNext = info.pNext; info.pNext = &host_pointer_info; info.allocationSize = (info.allocationSize + align) & ~align; } } if (!(memory = malloc(sizeof(*memory)))) return VK_ERROR_OUT_OF_HOST_MEMORY; result = device->funcs.p_vkAllocateMemory(device->host_device, &info, NULL, &memory->host_memory); if (result != VK_SUCCESS) { free(memory); return result; } memory->mapping = mapping; *ret = (VkDeviceMemory)(uintptr_t)memory; return VK_SUCCESS; } void wine_vkFreeMemory(VkDevice handle, VkDeviceMemory memory_handle, const VkAllocationCallbacks *allocator) { struct wine_device *device = wine_device_from_handle(handle); struct wine_device_memory *memory; if (!memory_handle) return; memory = wine_device_memory_from_handle(memory_handle); device->funcs.p_vkFreeMemory(device->host_device, memory->host_memory, NULL); if (memory->mapping) { SIZE_T alloc_size = 0; NtFreeVirtualMemory(GetCurrentProcess(), &memory->mapping, &alloc_size, MEM_RELEASE); } free(memory); } VkResult wine_vkMapMemory(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void **data) { const VkMemoryMapInfoKHR info = { .sType = VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR, .flags = flags, .memory = memory, .offset = offset, .size = size, }; return wine_vkMapMemory2KHR(device, &info, data); } VkResult wine_vkMapMemory2KHR(VkDevice handle, const VkMemoryMapInfoKHR *map_info, void **data) { struct wine_device *device = wine_device_from_handle(handle); struct wine_device_memory *memory = wine_device_memory_from_handle(map_info->memory); VkMemoryMapInfoKHR info = *map_info; VkResult result; info.memory = memory->host_memory; if (memory->mapping) { *data = (char *)memory->mapping + info.offset; TRACE("returning %p\n", *data); return VK_SUCCESS; } if (device->funcs.p_vkMapMemory2KHR) { result = device->funcs.p_vkMapMemory2KHR(device->host_device, &info, data); } else { assert(!info.pNext); result = device->funcs.p_vkMapMemory(device->host_device, info.memory, info.offset, info.size, info.flags, data); } #ifdef _WIN64 if (NtCurrentTeb()->WowTebOffset && result == VK_SUCCESS && (UINT_PTR)*data >> 32) { FIXME("returned mapping %p does not fit 32-bit pointer\n", *data); device->funcs.p_vkUnmapMemory(device->host_device, memory->host_memory); *data = NULL; result = VK_ERROR_OUT_OF_HOST_MEMORY; } #endif return result; } void wine_vkUnmapMemory(VkDevice device, VkDeviceMemory memory) { const VkMemoryUnmapInfoKHR info = { .sType = VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR, .memory = memory, }; wine_vkUnmapMemory2KHR(device, &info); } VkResult wine_vkUnmapMemory2KHR(VkDevice handle, const VkMemoryUnmapInfoKHR *unmap_info) { struct wine_device *device = wine_device_from_handle(handle); struct wine_device_memory *memory = wine_device_memory_from_handle(unmap_info->memory); VkMemoryUnmapInfoKHR info; if (memory->mapping) return VK_SUCCESS; if (!device->funcs.p_vkUnmapMemory2KHR) { assert(!unmap_info->pNext); device->funcs.p_vkUnmapMemory(device->host_device, memory->host_memory); return VK_SUCCESS; } info = *unmap_info; info.memory = memory->host_memory; return device->funcs.p_vkUnmapMemory2KHR(device->host_device, &info); } VkResult wine_vkCreateBuffer(VkDevice handle, const VkBufferCreateInfo *create_info, const VkAllocationCallbacks *allocator, VkBuffer *buffer) { struct wine_device *device = wine_device_from_handle(handle); VkExternalMemoryBufferCreateInfo external_memory_info; VkBufferCreateInfo info = *create_info; if (device->phys_dev->external_memory_align && !find_next_struct(info.pNext, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO)) { external_memory_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO; external_memory_info.pNext = info.pNext; external_memory_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT; info.pNext = &external_memory_info; } return device->funcs.p_vkCreateBuffer(device->host_device, &info, NULL, buffer); } VkResult wine_vkCreateImage(VkDevice handle, const VkImageCreateInfo *create_info, const VkAllocationCallbacks *allocator, VkImage *image) { struct wine_device *device = wine_device_from_handle(handle); VkExternalMemoryImageCreateInfo external_memory_info; VkImageCreateInfo info = *create_info; if (device->phys_dev->external_memory_align && !find_next_struct(info.pNext, VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO)) { external_memory_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO; external_memory_info.pNext = info.pNext; external_memory_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT; info.pNext = &external_memory_info; } return device->funcs.p_vkCreateImage(device->host_device, &info, NULL, image); } static inline void adjust_max_image_count(struct wine_phys_dev *phys_dev, VkSurfaceCapabilitiesKHR* capabilities) { /* Many Windows games, for example Strange Brigade, No Man's Sky, Path of Exile * and World War Z, do not expect that maxImageCount can be set to 0. * A value of 0 means that there is no limit on the number of images. * Nvidia reports 8 on Windows, AMD 16. * https://vulkan.gpuinfo.org/displayreport.php?id=9122#surface * https://vulkan.gpuinfo.org/displayreport.php?id=9121#surface */ if ((phys_dev->instance->quirks & WINEVULKAN_QUIRK_ADJUST_MAX_IMAGE_COUNT) && !capabilities->maxImageCount) { capabilities->maxImageCount = max(capabilities->minImageCount, 16); } } VkResult wine_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice handle, VkSurfaceKHR surface_handle, VkSurfaceCapabilitiesKHR *capabilities) { struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(handle); struct wine_surface *surface = wine_surface_from_handle(surface_handle); VkResult res; res = phys_dev->instance->funcs.p_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(phys_dev->host_physical_device, surface->driver_surface, capabilities); if (res == VK_SUCCESS) adjust_max_image_count(phys_dev, capabilities); return res; } VkResult wine_vkGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice handle, const VkPhysicalDeviceSurfaceInfo2KHR *surface_info, VkSurfaceCapabilities2KHR *capabilities) { struct wine_phys_dev *phys_dev = wine_phys_dev_from_handle(handle); struct wine_surface *surface = wine_surface_from_handle(surface_info->surface); VkPhysicalDeviceSurfaceInfo2KHR host_info; VkResult res; host_info.sType = surface_info->sType; host_info.pNext = surface_info->pNext; host_info.surface = surface->driver_surface; res = phys_dev->instance->funcs.p_vkGetPhysicalDeviceSurfaceCapabilities2KHR(phys_dev->host_physical_device, &host_info, capabilities); if (res == VK_SUCCESS) adjust_max_image_count(phys_dev, &capabilities->surfaceCapabilities); return res; } VkResult wine_vkCreateDebugUtilsMessengerEXT(VkInstance handle, const VkDebugUtilsMessengerCreateInfoEXT *create_info, const VkAllocationCallbacks *allocator, VkDebugUtilsMessengerEXT *messenger) { struct wine_instance *instance = wine_instance_from_handle(handle); VkDebugUtilsMessengerCreateInfoEXT wine_create_info; struct wine_debug_utils_messenger *object; VkResult res; if (allocator) FIXME("Support for allocation callbacks not implemented yet\n"); if (!(object = calloc(1, sizeof(*object)))) return VK_ERROR_OUT_OF_HOST_MEMORY; object->instance = instance; object->user_callback = create_info->pfnUserCallback; object->user_data = create_info->pUserData; wine_create_info = *create_info; wine_create_info.pfnUserCallback = (void *) &debug_utils_callback_conversion; wine_create_info.pUserData = object; res = instance->funcs.p_vkCreateDebugUtilsMessengerEXT(instance->host_instance, &wine_create_info, NULL, &object->host_debug_messenger); if (res != VK_SUCCESS) { free(object); return res; } WINE_VK_ADD_NON_DISPATCHABLE_MAPPING(instance, object, object->host_debug_messenger, object); *messenger = wine_debug_utils_messenger_to_handle(object); return VK_SUCCESS; } void wine_vkDestroyDebugUtilsMessengerEXT(VkInstance handle, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks *allocator) { struct wine_instance *instance = wine_instance_from_handle(handle); struct wine_debug_utils_messenger *object; object = wine_debug_utils_messenger_from_handle(messenger); if (!object) return; instance->funcs.p_vkDestroyDebugUtilsMessengerEXT(instance->host_instance, object->host_debug_messenger, NULL); WINE_VK_REMOVE_HANDLE_MAPPING(instance, object); free(object); } VkResult wine_vkCreateDebugReportCallbackEXT(VkInstance handle, const VkDebugReportCallbackCreateInfoEXT *create_info, const VkAllocationCallbacks *allocator, VkDebugReportCallbackEXT *callback) { struct wine_instance *instance = wine_instance_from_handle(handle); VkDebugReportCallbackCreateInfoEXT wine_create_info; struct wine_debug_report_callback *object; VkResult res; if (allocator) FIXME("Support for allocation callbacks not implemented yet\n"); if (!(object = calloc(1, sizeof(*object)))) return VK_ERROR_OUT_OF_HOST_MEMORY; object->instance = instance; object->user_callback = create_info->pfnCallback; object->user_data = create_info->pUserData; wine_create_info = *create_info; wine_create_info.pfnCallback = (void *) debug_report_callback_conversion; wine_create_info.pUserData = object; res = instance->funcs.p_vkCreateDebugReportCallbackEXT(instance->host_instance, &wine_create_info, NULL, &object->host_debug_callback); if (res != VK_SUCCESS) { free(object); return res; } WINE_VK_ADD_NON_DISPATCHABLE_MAPPING(instance, object, object->host_debug_callback, object); *callback = wine_debug_report_callback_to_handle(object); return VK_SUCCESS; } void wine_vkDestroyDebugReportCallbackEXT(VkInstance handle, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks *allocator) { struct wine_instance *instance = wine_instance_from_handle(handle); struct wine_debug_report_callback *object; object = wine_debug_report_callback_from_handle(callback); if (!object) return; instance->funcs.p_vkDestroyDebugReportCallbackEXT(instance->host_instance, object->host_debug_callback, NULL); WINE_VK_REMOVE_HANDLE_MAPPING(instance, object); free(object); } VkResult wine_vkCreateDeferredOperationKHR(VkDevice handle, const VkAllocationCallbacks* allocator, VkDeferredOperationKHR* deferredOperation) { struct wine_device *device = wine_device_from_handle(handle); struct wine_deferred_operation *object; VkResult res; if (allocator) FIXME("Support for allocation callbacks not implemented yet\n"); if (!(object = calloc(1, sizeof(*object)))) return VK_ERROR_OUT_OF_HOST_MEMORY; res = device->funcs.p_vkCreateDeferredOperationKHR(device->host_device, NULL, &object->host_deferred_operation); if (res != VK_SUCCESS) { free(object); return res; } init_conversion_context(&object->ctx); WINE_VK_ADD_NON_DISPATCHABLE_MAPPING(device->phys_dev->instance, object, object->host_deferred_operation, object); *deferredOperation = wine_deferred_operation_to_handle(object); return VK_SUCCESS; } void wine_vkDestroyDeferredOperationKHR(VkDevice handle, VkDeferredOperationKHR operation, const VkAllocationCallbacks* allocator) { struct wine_device *device = wine_device_from_handle(handle); struct wine_deferred_operation *object; object = wine_deferred_operation_from_handle(operation); if (!object) return; device->funcs.p_vkDestroyDeferredOperationKHR(device->host_device, object->host_deferred_operation, NULL); WINE_VK_REMOVE_HANDLE_MAPPING(device->phys_dev->instance, object); free_conversion_context(&object->ctx); free(object); } #ifdef _WIN64 NTSTATUS vk_is_available_instance_function(void *arg) { struct is_available_instance_function_params *params = arg; struct wine_instance *instance = wine_instance_from_handle(params->instance); return !!vk_funcs->p_vkGetInstanceProcAddr(instance->host_instance, params->name); } NTSTATUS vk_is_available_device_function(void *arg) { struct is_available_device_function_params *params = arg; struct wine_device *device = wine_device_from_handle(params->device); return !!vk_funcs->p_vkGetDeviceProcAddr(device->host_device, params->name); } #endif /* _WIN64 */ NTSTATUS vk_is_available_instance_function32(void *arg) { struct { UINT32 instance; UINT32 name; } *params = arg; struct wine_instance *instance = wine_instance_from_handle(UlongToPtr(params->instance)); return !!vk_funcs->p_vkGetInstanceProcAddr(instance->host_instance, UlongToPtr(params->name)); } NTSTATUS vk_is_available_device_function32(void *arg) { struct { UINT32 device; UINT32 name; } *params = arg; struct wine_device *device = wine_device_from_handle(UlongToPtr(params->device)); return !!vk_funcs->p_vkGetDeviceProcAddr(device->host_device, UlongToPtr(params->name)); }