788 lines
26 KiB
C++
788 lines
26 KiB
C++
#include "lve_device.hpp"
|
|
|
|
// std headers
|
|
#include <cstring>
|
|
#include <iostream>
|
|
#include <set>
|
|
#include <unordered_set>
|
|
|
|
namespace lve {
|
|
|
|
// local callback functions
|
|
static VKAPI_ATTR VkBool32 VKAPI_CALL debugCallback(
|
|
VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
|
|
VkDebugUtilsMessageTypeFlagsEXT messageType,
|
|
const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData,
|
|
void *pUserData) {
|
|
std::cerr << "validation layer: " << pCallbackData->pMessage << std::endl;
|
|
|
|
return VK_FALSE;
|
|
}
|
|
|
|
VkResult CreateDebugUtilsMessengerEXT(
|
|
VkInstance instance,
|
|
const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
|
|
const VkAllocationCallbacks *pAllocator,
|
|
VkDebugUtilsMessengerEXT *pDebugMessenger) {
|
|
auto func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
|
|
instance,
|
|
"vkCreateDebugUtilsMessengerEXT");
|
|
if (func != nullptr) {
|
|
return func(instance, pCreateInfo, pAllocator, pDebugMessenger);
|
|
} else {
|
|
return VK_ERROR_EXTENSION_NOT_PRESENT;
|
|
}
|
|
}
|
|
|
|
void DestroyDebugUtilsMessengerEXT(
|
|
VkInstance instance,
|
|
VkDebugUtilsMessengerEXT debugMessenger,
|
|
const VkAllocationCallbacks *pAllocator) {
|
|
auto func = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
|
|
instance,
|
|
"vkDestroyDebugUtilsMessengerEXT");
|
|
if (func != nullptr) {
|
|
func(instance, debugMessenger, pAllocator);
|
|
}
|
|
}
|
|
|
|
// class member functions
|
|
void LveDevice::init() {
|
|
createInstance();
|
|
setupDebugMessenger();
|
|
createSurface();
|
|
pickPhysicalDevice();
|
|
createLogicalDevice();
|
|
createCommandPool();
|
|
}
|
|
|
|
void LveDevice::cleanup() {
|
|
vkDestroyCommandPool(device_, commandPool, nullptr);
|
|
vkDestroyDevice(device_, nullptr);
|
|
|
|
if (enableValidationLayers) {
|
|
DestroyDebugUtilsMessengerEXT(instance, debugMessenger, nullptr);
|
|
}
|
|
|
|
vkDestroySurfaceKHR(instance, surface_, nullptr);
|
|
vkDestroyInstance(instance, nullptr);
|
|
}
|
|
|
|
void LveDevice::createInstance() {
|
|
if (enableValidationLayers && !checkValidationLayerSupport()) {
|
|
throw std::runtime_error("validation layers requested, but not available!");
|
|
}
|
|
|
|
VkApplicationInfo appInfo = {};
|
|
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
|
|
appInfo.pApplicationName = "Thineng App";
|
|
appInfo.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
|
|
appInfo.pEngineName = "No Engine";
|
|
appInfo.engineVersion = VK_MAKE_VERSION(1, 0, 0);
|
|
appInfo.apiVersion = VK_API_VERSION_1_0;
|
|
|
|
VkInstanceCreateInfo createInfo = {};
|
|
createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
|
|
createInfo.pApplicationInfo = &appInfo;
|
|
|
|
auto extensions = getRequiredExtensions();
|
|
createInfo.enabledExtensionCount = static_cast<uint32_t>(extensions.size());
|
|
createInfo.ppEnabledExtensionNames = extensions.data();
|
|
|
|
VkDebugUtilsMessengerCreateInfoEXT debugCreateInfo;
|
|
if (enableValidationLayers) {
|
|
createInfo.enabledLayerCount = static_cast<uint32_t>(validationLayers.size());
|
|
createInfo.ppEnabledLayerNames = validationLayers.data();
|
|
|
|
populateDebugMessengerCreateInfo(debugCreateInfo);
|
|
createInfo.pNext = (VkDebugUtilsMessengerCreateInfoEXT *)&debugCreateInfo;
|
|
} else {
|
|
createInfo.enabledLayerCount = 0;
|
|
createInfo.pNext = nullptr;
|
|
}
|
|
|
|
if (vkCreateInstance(&createInfo, nullptr, &instance) != VK_SUCCESS) {
|
|
throw std::runtime_error("failed to create instance!");
|
|
}
|
|
|
|
hasGflwRequiredInstanceExtensions();
|
|
}
|
|
|
|
void LveDevice::pickPhysicalDevice() {
|
|
uint32_t deviceCount = 0;
|
|
vkEnumeratePhysicalDevices(instance, &deviceCount, nullptr);
|
|
if (deviceCount == 0) {
|
|
throw std::runtime_error("failed to find GPUs with Vulkan support!");
|
|
}
|
|
std::cout << "Device count: " << deviceCount << std::endl;
|
|
std::vector<VkPhysicalDevice> devices(deviceCount);
|
|
vkEnumeratePhysicalDevices(instance, &deviceCount, devices.data());
|
|
|
|
for (const auto &device : devices) {
|
|
if (isDeviceSuitable(device)) {
|
|
physicalDevice = device;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (physicalDevice == VK_NULL_HANDLE) {
|
|
throw std::runtime_error("failed to find a suitable GPU!");
|
|
}
|
|
|
|
VkPhysicalDeviceProperties pProperties;
|
|
vkGetPhysicalDeviceProperties(physicalDevice, &pProperties);
|
|
|
|
std::cout << "physical device: " << pProperties.deviceName << std::endl;
|
|
}
|
|
|
|
void LveDevice::createLogicalDevice() {
|
|
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
|
|
|
|
std::vector<VkDeviceQueueCreateInfo> queueCreateInfos;
|
|
std::set<uint32_t> uniqueQueueFamilies = {indices.graphicsFamily, indices.presentFamily};
|
|
|
|
float queuePriority = 1.0f;
|
|
for (uint32_t queueFamily : uniqueQueueFamilies) {
|
|
VkDeviceQueueCreateInfo queueCreateInfo = {};
|
|
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
|
|
queueCreateInfo.queueFamilyIndex = queueFamily;
|
|
queueCreateInfo.queueCount = 1;
|
|
queueCreateInfo.pQueuePriorities = &queuePriority;
|
|
queueCreateInfos.push_back(queueCreateInfo);
|
|
}
|
|
|
|
VkPhysicalDeviceFeatures deviceFeatures = {};
|
|
deviceFeatures.samplerAnisotropy = VK_TRUE;
|
|
|
|
VkDeviceCreateInfo createInfo = {};
|
|
createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
|
|
|
|
createInfo.queueCreateInfoCount = static_cast<uint32_t>(queueCreateInfos.size());
|
|
createInfo.pQueueCreateInfos = queueCreateInfos.data();
|
|
|
|
createInfo.pEnabledFeatures = &deviceFeatures;
|
|
createInfo.enabledExtensionCount = static_cast<uint32_t>(deviceExtensions.size());
|
|
createInfo.ppEnabledExtensionNames = deviceExtensions.data();
|
|
|
|
// not really necessary anymore because device specific validation layers
|
|
// have been deprecated
|
|
if (enableValidationLayers) {
|
|
createInfo.enabledLayerCount = static_cast<uint32_t>(validationLayers.size());
|
|
createInfo.ppEnabledLayerNames = validationLayers.data();
|
|
} else {
|
|
createInfo.enabledLayerCount = 0;
|
|
}
|
|
|
|
if (vkCreateDevice(physicalDevice, &createInfo, nullptr, &device_) != VK_SUCCESS) {
|
|
throw std::runtime_error("failed to create logical device!");
|
|
}
|
|
|
|
vkGetDeviceQueue(device_, indices.graphicsFamily, 0, &graphicsQueue_);
|
|
vkGetDeviceQueue(device_, indices.presentFamily, 0, &presentQueue_);
|
|
}
|
|
|
|
void LveDevice::createCommandPool() {
|
|
QueueFamilyIndices queueFamilyIndices = findPhysicalQueueFamilies();
|
|
|
|
VkCommandPoolCreateInfo poolInfo = {};
|
|
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
|
|
poolInfo.queueFamilyIndex = queueFamilyIndices.graphicsFamily;
|
|
poolInfo.flags =
|
|
VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
|
|
|
|
if (vkCreateCommandPool(device_, &poolInfo, nullptr, &commandPool) != VK_SUCCESS) {
|
|
throw std::runtime_error("failed to create command pool!");
|
|
}
|
|
}
|
|
|
|
void LveDevice::createSurface() { window.createWindowSurface(instance, &surface_); }
|
|
|
|
bool LveDevice::isDeviceSuitable(VkPhysicalDevice device) {
|
|
QueueFamilyIndices indices = findQueueFamilies(device);
|
|
|
|
bool extensionsSupported = checkDeviceExtensionSupport(device);
|
|
|
|
bool swapChainAdequate = false;
|
|
if (extensionsSupported) {
|
|
SwapChainSupportDetails swapChainSupport = querySwapChainSupport(device);
|
|
swapChainAdequate = !swapChainSupport.formats.empty() && !swapChainSupport.presentModes.empty();
|
|
}
|
|
|
|
VkPhysicalDeviceFeatures supportedFeatures;
|
|
vkGetPhysicalDeviceFeatures(device, &supportedFeatures);
|
|
|
|
return indices.isComplete() && extensionsSupported && swapChainAdequate &&
|
|
supportedFeatures.samplerAnisotropy;
|
|
}
|
|
|
|
void LveDevice::populateDebugMessengerCreateInfo(VkDebugUtilsMessengerCreateInfoEXT &createInfo) {
|
|
createInfo = {};
|
|
createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
|
|
createInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT |
|
|
VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
|
|
VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
|
|
createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
|
|
VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
|
|
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
|
|
createInfo.pfnUserCallback = debugCallback;
|
|
createInfo.pUserData = nullptr; // Optional
|
|
}
|
|
|
|
void LveDevice::setupDebugMessenger() {
|
|
if (!enableValidationLayers) return;
|
|
VkDebugUtilsMessengerCreateInfoEXT createInfo;
|
|
populateDebugMessengerCreateInfo(createInfo);
|
|
if (CreateDebugUtilsMessengerEXT(instance, &createInfo, nullptr, &debugMessenger) != VK_SUCCESS) {
|
|
throw std::runtime_error("failed to set up debug messenger!");
|
|
}
|
|
}
|
|
|
|
bool LveDevice::checkValidationLayerSupport() {
|
|
uint32_t layerCount;
|
|
vkEnumerateInstanceLayerProperties(&layerCount, nullptr);
|
|
|
|
std::vector<VkLayerProperties> availableLayers(layerCount);
|
|
vkEnumerateInstanceLayerProperties(&layerCount, availableLayers.data());
|
|
|
|
for (const char *layerName : validationLayers) {
|
|
bool layerFound = false;
|
|
|
|
for (const auto &layerProperties : availableLayers) {
|
|
if (strcmp(layerName, layerProperties.layerName) == 0) {
|
|
layerFound = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (!layerFound) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
std::vector<const char *> LveDevice::getRequiredExtensions() {
|
|
uint32_t glfwExtensionCount = 0;
|
|
const char **glfwExtensions;
|
|
glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount);
|
|
|
|
std::vector<const char *> extensions(glfwExtensions, glfwExtensions + glfwExtensionCount);
|
|
|
|
if (enableValidationLayers) {
|
|
extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
|
|
}
|
|
|
|
return extensions;
|
|
}
|
|
|
|
void LveDevice::hasGflwRequiredInstanceExtensions() {
|
|
uint32_t extensionCount = 0;
|
|
vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
|
|
std::vector<VkExtensionProperties> extensions(extensionCount);
|
|
vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.data());
|
|
|
|
std::cout << "available extensions:" << std::endl;
|
|
std::unordered_set<std::string> available;
|
|
for (const auto &extension : extensions) {
|
|
std::cout << "\t" << extension.extensionName << std::endl;
|
|
available.insert(extension.extensionName);
|
|
}
|
|
|
|
std::cout << "required extensions:" << std::endl;
|
|
auto requiredExtensions = getRequiredExtensions();
|
|
for (const auto &required : requiredExtensions) {
|
|
std::cout << "\t" << required << std::endl;
|
|
if (available.find(required) == available.end()) {
|
|
throw std::runtime_error("Missing required glfw extension");
|
|
}
|
|
}
|
|
}
|
|
|
|
bool LveDevice::checkDeviceExtensionSupport(VkPhysicalDevice device) {
|
|
uint32_t extensionCount;
|
|
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount, nullptr);
|
|
|
|
std::vector<VkExtensionProperties> availableExtensions(extensionCount);
|
|
vkEnumerateDeviceExtensionProperties(
|
|
device,
|
|
nullptr,
|
|
&extensionCount,
|
|
availableExtensions.data());
|
|
|
|
std::set<std::string> requiredExtensions(deviceExtensions.begin(), deviceExtensions.end());
|
|
|
|
for (const auto &extension : availableExtensions) {
|
|
requiredExtensions.erase(extension.extensionName);
|
|
}
|
|
|
|
return requiredExtensions.empty();
|
|
}
|
|
|
|
// challenge: use queue with VK_QUEUE_TRANSFER_BIT for staging transfer
|
|
// https://vulkan-tutorial.com/Vertex_buffers/Staging_buffer
|
|
QueueFamilyIndices LveDevice::findQueueFamilies(VkPhysicalDevice device) {
|
|
QueueFamilyIndices indices;
|
|
|
|
uint32_t queueFamilyCount = 0;
|
|
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount, nullptr);
|
|
|
|
std::vector<VkQueueFamilyProperties> queueFamilies(queueFamilyCount);
|
|
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount, queueFamilies.data());
|
|
|
|
int i = 0;
|
|
for (const auto &queueFamily : queueFamilies) {
|
|
if (queueFamily.queueCount > 0 && queueFamily.queueFlags & VK_QUEUE_GRAPHICS_BIT) {
|
|
indices.graphicsFamily = i;
|
|
indices.graphicsFamilyHasValue = true;
|
|
}
|
|
VkBool32 presentSupport = false;
|
|
vkGetPhysicalDeviceSurfaceSupportKHR(device, i, surface_, &presentSupport);
|
|
if (queueFamily.queueCount > 0 && presentSupport) {
|
|
indices.presentFamily = i;
|
|
indices.presentFamilyHasValue = true;
|
|
}
|
|
if (indices.isComplete()) {
|
|
break;
|
|
}
|
|
|
|
i++;
|
|
}
|
|
|
|
return indices;
|
|
}
|
|
|
|
SwapChainSupportDetails LveDevice::querySwapChainSupport(VkPhysicalDevice device) {
|
|
SwapChainSupportDetails details;
|
|
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, surface_, &details.capabilities);
|
|
|
|
uint32_t formatCount;
|
|
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface_, &formatCount, nullptr);
|
|
|
|
if (formatCount != 0) {
|
|
details.formats.resize(formatCount);
|
|
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface_, &formatCount, details.formats.data());
|
|
}
|
|
|
|
uint32_t presentModeCount;
|
|
vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface_, &presentModeCount, nullptr);
|
|
|
|
if (presentModeCount != 0) {
|
|
details.presentModes.resize(presentModeCount);
|
|
vkGetPhysicalDeviceSurfacePresentModesKHR(
|
|
device,
|
|
surface_,
|
|
&presentModeCount,
|
|
details.presentModes.data());
|
|
}
|
|
return details;
|
|
}
|
|
|
|
VkFormat LveDevice::findSupportedFormat(
|
|
const std::vector<VkFormat> &candidates, VkImageTiling tiling, VkFormatFeatureFlags features) {
|
|
for (VkFormat format : candidates) {
|
|
VkFormatProperties props;
|
|
vkGetPhysicalDeviceFormatProperties(physicalDevice, format, &props);
|
|
|
|
if (tiling == VK_IMAGE_TILING_LINEAR && (props.linearTilingFeatures & features) == features) {
|
|
return format;
|
|
} else if (
|
|
tiling == VK_IMAGE_TILING_OPTIMAL && (props.optimalTilingFeatures & features) == features) {
|
|
return format;
|
|
}
|
|
}
|
|
throw std::runtime_error("failed to find supported format!");
|
|
}
|
|
|
|
uint32_t LveDevice::findMemoryType(uint32_t typeFilter, VkMemoryPropertyFlags properties) {
|
|
VkPhysicalDeviceMemoryProperties memProperties;
|
|
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProperties);
|
|
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) {
|
|
if ((typeFilter & (1 << i)) &&
|
|
(memProperties.memoryTypes[i].propertyFlags & properties) == properties) {
|
|
return i;
|
|
}
|
|
}
|
|
|
|
throw std::runtime_error("failed to find suitable memory type!");
|
|
}
|
|
|
|
void LveDevice::createBuffer(
|
|
VkDeviceSize size,
|
|
VkBufferUsageFlags usage,
|
|
VkMemoryPropertyFlags properties,
|
|
VkBuffer &buffer,
|
|
VkDeviceMemory &bufferMemory) {
|
|
VkBufferCreateInfo bufferInfo{};
|
|
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
|
bufferInfo.size = size;
|
|
bufferInfo.usage = usage;
|
|
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
|
|
|
if (vkCreateBuffer(device_, &bufferInfo, nullptr, &buffer) != VK_SUCCESS) {
|
|
throw std::runtime_error("failed to create vertex buffer!");
|
|
}
|
|
|
|
VkMemoryRequirements memRequirements;
|
|
vkGetBufferMemoryRequirements(device_, buffer, &memRequirements);
|
|
|
|
VkMemoryAllocateInfo allocInfo{};
|
|
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
|
|
allocInfo.allocationSize = memRequirements.size;
|
|
allocInfo.memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties);
|
|
|
|
// TODO there is a maximum number of memory allocations
|
|
// maxMemoryAllocationCount for a physical device. Should create a custom
|
|
// allocator that batches together a large number of objects at once. See
|
|
// https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator
|
|
if (vkAllocateMemory(device_, &allocInfo, nullptr, &bufferMemory) != VK_SUCCESS) {
|
|
throw std::runtime_error("failed to allocate vertex buffer memory!");
|
|
}
|
|
|
|
vkBindBufferMemory(device_, buffer, bufferMemory, 0);
|
|
}
|
|
|
|
VkCommandBuffer LveDevice::beginSingleTimeCommands() {
|
|
// You may wish to create a separate command pool for these kinds of
|
|
// short-lived buffers,
|
|
// because the implementation may be able to apply memory allocation
|
|
// optimizations. You should use the VK_COMMAND_POOL_CREATE_TRANSIENT_BIT
|
|
// flag during command pool generation in that case.
|
|
VkCommandBufferAllocateInfo allocInfo{};
|
|
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
|
|
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
|
|
allocInfo.commandPool = commandPool;
|
|
allocInfo.commandBufferCount = 1;
|
|
|
|
VkCommandBuffer commandBuffer;
|
|
vkAllocateCommandBuffers(device_, &allocInfo, &commandBuffer);
|
|
|
|
VkCommandBufferBeginInfo beginInfo{};
|
|
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
|
|
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
|
|
|
|
vkBeginCommandBuffer(commandBuffer, &beginInfo);
|
|
return commandBuffer;
|
|
}
|
|
|
|
void LveDevice::endSingleTimeCommands(VkCommandBuffer commandBuffer) {
|
|
vkEndCommandBuffer(commandBuffer);
|
|
|
|
VkSubmitInfo submitInfo{};
|
|
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
|
|
submitInfo.commandBufferCount = 1;
|
|
submitInfo.pCommandBuffers = &commandBuffer;
|
|
|
|
vkQueueSubmit(graphicsQueue_, 1, &submitInfo, VK_NULL_HANDLE);
|
|
vkQueueWaitIdle(graphicsQueue_);
|
|
|
|
vkFreeCommandBuffers(device_, commandPool, 1, &commandBuffer);
|
|
}
|
|
|
|
void LveDevice::copyBuffer(VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size) {
|
|
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
|
|
|
|
VkBufferCopy copyRegion{};
|
|
copyRegion.srcOffset = 0; // Optional
|
|
copyRegion.dstOffset = 0; // Optional
|
|
copyRegion.size = size;
|
|
vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, 1, ©Region);
|
|
|
|
endSingleTimeCommands(commandBuffer);
|
|
}
|
|
|
|
void LveDevice::copyBufferToImage(
|
|
VkBuffer buffer, VkImage image, uint32_t width, uint32_t height, uint32_t layerCount) {
|
|
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
|
|
|
|
VkBufferImageCopy region{};
|
|
region.bufferOffset = 0;
|
|
region.bufferRowLength = 0;
|
|
region.bufferImageHeight = 0;
|
|
|
|
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
|
region.imageSubresource.mipLevel = 0;
|
|
region.imageSubresource.baseArrayLayer = 0;
|
|
region.imageSubresource.layerCount = layerCount;
|
|
|
|
region.imageOffset = {0, 0, 0};
|
|
region.imageExtent = {width, height, 1};
|
|
|
|
vkCmdCopyBufferToImage(
|
|
commandBuffer,
|
|
buffer,
|
|
image,
|
|
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
|
1,
|
|
®ion);
|
|
endSingleTimeCommands(commandBuffer);
|
|
}
|
|
|
|
bool hasStencilComponent(VkFormat format) {
|
|
return format == VK_FORMAT_D32_SFLOAT_S8_UINT || format == VK_FORMAT_D24_UNORM_S8_UINT;
|
|
}
|
|
|
|
void LveDevice::transitionImageLayout(
|
|
VkImage image,
|
|
VkFormat format,
|
|
VkImageLayout oldLayout,
|
|
VkImageLayout newLayout,
|
|
uint32_t mipLevels,
|
|
uint32_t layerCount) {
|
|
// uses an image memory barrier transition image layouts and transfer queue
|
|
// family ownership when VK_SHARING_MODE_EXCLUSIVE is used. There is an
|
|
// equivalent buffer memory barrier to do this for buffers
|
|
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
|
|
|
|
VkImageMemoryBarrier barrier{};
|
|
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
|
|
barrier.oldLayout = oldLayout;
|
|
barrier.newLayout = newLayout;
|
|
|
|
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
|
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
|
|
|
barrier.image = image;
|
|
barrier.subresourceRange.baseMipLevel = 0;
|
|
barrier.subresourceRange.levelCount = mipLevels;
|
|
barrier.subresourceRange.baseArrayLayer = 0;
|
|
barrier.subresourceRange.layerCount = layerCount;
|
|
|
|
if (newLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
|
|
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
|
|
|
|
if (hasStencilComponent(format)) {
|
|
barrier.subresourceRange.aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT;
|
|
}
|
|
} else {
|
|
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
|
}
|
|
|
|
VkPipelineStageFlags sourceStage;
|
|
VkPipelineStageFlags destinationStage;
|
|
|
|
if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
|
|
barrier.srcAccessMask = 0;
|
|
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
|
|
|
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
|
|
destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
|
|
} else if (
|
|
oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
|
|
newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
|
|
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
|
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
|
|
|
sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
|
|
destinationStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
|
|
} else if (
|
|
oldLayout == VK_IMAGE_LAYOUT_UNDEFINED &&
|
|
newLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
|
|
barrier.srcAccessMask = 0;
|
|
barrier.dstAccessMask =
|
|
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
|
|
|
|
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
|
|
destinationStage = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
|
|
} else {
|
|
throw std::invalid_argument("unsupported layout transition!");
|
|
}
|
|
vkCmdPipelineBarrier(
|
|
commandBuffer,
|
|
sourceStage,
|
|
destinationStage,
|
|
0,
|
|
0,
|
|
nullptr,
|
|
0,
|
|
nullptr,
|
|
1,
|
|
&barrier);
|
|
|
|
endSingleTimeCommands(commandBuffer);
|
|
}
|
|
|
|
void LveDevice::createImage(
|
|
uint32_t width,
|
|
uint32_t height,
|
|
uint32_t mipLevels,
|
|
VkFormat format,
|
|
VkImageTiling tiling,
|
|
VkImageUsageFlags usage,
|
|
VkMemoryPropertyFlags properties,
|
|
VkImage &image,
|
|
VkDeviceMemory &imageMemory,
|
|
VkImageCreateFlags flags,
|
|
uint32_t arrayLayers) {
|
|
VkImageCreateInfo imageInfo{};
|
|
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
|
|
imageInfo.imageType = VK_IMAGE_TYPE_2D;
|
|
imageInfo.extent.width = width;
|
|
imageInfo.extent.height = height;
|
|
imageInfo.extent.depth = 1;
|
|
imageInfo.mipLevels = mipLevels;
|
|
imageInfo.arrayLayers = arrayLayers;
|
|
imageInfo.format = format;
|
|
imageInfo.tiling = tiling;
|
|
imageInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
|
imageInfo.usage = usage;
|
|
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
|
|
imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
|
imageInfo.flags = flags;
|
|
|
|
if (vkCreateImage(device_, &imageInfo, nullptr, &image) != VK_SUCCESS) {
|
|
throw std::runtime_error("failed to create image!");
|
|
}
|
|
|
|
VkMemoryRequirements memRequirements;
|
|
vkGetImageMemoryRequirements(device_, image, &memRequirements);
|
|
|
|
VkMemoryAllocateInfo allocInfo{};
|
|
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
|
|
allocInfo.allocationSize = memRequirements.size;
|
|
allocInfo.memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties);
|
|
|
|
if (vkAllocateMemory(device_, &allocInfo, nullptr, &imageMemory) != VK_SUCCESS) {
|
|
throw std::runtime_error("failed to allocate image memory!");
|
|
}
|
|
|
|
vkBindImageMemory(device_, image, imageMemory, 0);
|
|
}
|
|
|
|
VkImageView LveDevice::createImageView(
|
|
VkImage image,
|
|
VkFormat format,
|
|
VkImageAspectFlags aspectFlags,
|
|
uint32_t mipLevels,
|
|
VkImageViewType viewType) {
|
|
VkImageViewCreateInfo viewInfo{};
|
|
viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
|
|
viewInfo.image = image;
|
|
viewInfo.viewType = viewType;
|
|
viewInfo.format = format;
|
|
viewInfo.subresourceRange.aspectMask = aspectFlags;
|
|
viewInfo.subresourceRange.baseMipLevel = 0;
|
|
viewInfo.subresourceRange.levelCount = mipLevels;
|
|
viewInfo.subresourceRange.baseArrayLayer = 0;
|
|
viewInfo.subresourceRange.layerCount = viewType == VK_IMAGE_VIEW_TYPE_CUBE ? 6 : 1;
|
|
|
|
VkImageView imageView;
|
|
if (vkCreateImageView(device_, &viewInfo, nullptr, &imageView) != VK_SUCCESS) {
|
|
throw std::runtime_error("failed to create texture image view!");
|
|
}
|
|
|
|
return imageView;
|
|
}
|
|
|
|
void LveDevice::generateMipmaps(
|
|
VkImage image, VkFormat imageFormat, int32_t texWidth, int32_t texHeight, uint32_t mipLevels) {
|
|
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
|
|
|
|
// Check if image format supports linear blitting
|
|
VkFormatProperties formatProperties;
|
|
vkGetPhysicalDeviceFormatProperties(physicalDevice, imageFormat, &formatProperties);
|
|
|
|
if (!(formatProperties.optimalTilingFeatures &
|
|
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT)) {
|
|
throw std::runtime_error("texture image format does not support linear blitting!");
|
|
}
|
|
|
|
VkImageMemoryBarrier barrier{};
|
|
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
|
|
barrier.image = image;
|
|
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
|
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
|
|
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
|
barrier.subresourceRange.baseArrayLayer = 0;
|
|
barrier.subresourceRange.layerCount = 1;
|
|
barrier.subresourceRange.levelCount = 1;
|
|
|
|
int32_t mipWidth = texWidth;
|
|
int32_t mipHeight = texHeight;
|
|
|
|
for (uint32_t i = 1; i < mipLevels; i++) {
|
|
barrier.subresourceRange.baseMipLevel = i - 1;
|
|
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
|
|
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
|
|
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
|
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
|
|
|
|
vkCmdPipelineBarrier(
|
|
commandBuffer,
|
|
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
|
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
|
0,
|
|
0,
|
|
nullptr,
|
|
0,
|
|
nullptr,
|
|
1,
|
|
&barrier);
|
|
|
|
VkImageBlit blit{};
|
|
blit.srcOffsets[0] = {0, 0, 0};
|
|
blit.srcOffsets[1] = {mipWidth, mipHeight, 1};
|
|
blit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
|
blit.srcSubresource.mipLevel = i - 1;
|
|
blit.srcSubresource.baseArrayLayer = 0;
|
|
blit.srcSubresource.layerCount = 1;
|
|
blit.dstOffsets[0] = {0, 0, 0};
|
|
blit.dstOffsets[1] = {mipWidth > 1 ? mipWidth / 2 : 1, mipHeight > 1 ? mipHeight / 2 : 1, 1};
|
|
blit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
|
blit.dstSubresource.mipLevel = i;
|
|
blit.dstSubresource.baseArrayLayer = 0;
|
|
blit.dstSubresource.layerCount = 1;
|
|
|
|
vkCmdBlitImage(
|
|
commandBuffer,
|
|
image,
|
|
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
|
|
image,
|
|
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
|
1,
|
|
&blit,
|
|
VK_FILTER_LINEAR);
|
|
|
|
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
|
|
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
|
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
|
|
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
|
|
|
vkCmdPipelineBarrier(
|
|
commandBuffer,
|
|
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
|
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
|
|
0,
|
|
0,
|
|
nullptr,
|
|
0,
|
|
nullptr,
|
|
1,
|
|
&barrier);
|
|
|
|
if (mipWidth > 1) mipWidth /= 2;
|
|
if (mipHeight > 1) mipHeight /= 2;
|
|
}
|
|
|
|
barrier.subresourceRange.baseMipLevel = mipLevels - 1;
|
|
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
|
|
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
|
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
|
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
|
|
|
vkCmdPipelineBarrier(
|
|
commandBuffer,
|
|
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
|
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
|
|
0,
|
|
0,
|
|
nullptr,
|
|
0,
|
|
nullptr,
|
|
1,
|
|
&barrier);
|
|
|
|
endSingleTimeCommands(commandBuffer);
|
|
}
|
|
|
|
} // namespace lve
|