vk: extract tex/image uploading routines

This commit is contained in:
Ivan Avdeev 2023-10-12 13:39:07 -04:00
parent 4158234fb2
commit 8ba7b3649e
3 changed files with 385 additions and 439 deletions

View File

@ -1,6 +1,10 @@
#include "vk_image.h"
#include "vk_staging.h"
#include "vk_combuf.h"
#include "vk_logs.h"
#include "xash3d_mathlib.h" // Q_max
static const VkImageUsageFlags usage_bits_implying_views =
VK_IMAGE_USAGE_SAMPLED_BIT |
VK_IMAGE_USAGE_STORAGE_BIT |
@ -17,278 +21,6 @@ static const VkImageUsageFlags usage_bits_implying_views =
VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR;
*/
static VkFormat unormFormatFor(VkFormat fmt) {
switch (fmt) {
case VK_FORMAT_R8_SRGB: return VK_FORMAT_R8_UNORM;
case VK_FORMAT_R8_UNORM: return VK_FORMAT_R8_UNORM;
case VK_FORMAT_R8G8_SRGB: return VK_FORMAT_R8G8_UNORM;
case VK_FORMAT_R8G8_UNORM: return VK_FORMAT_R8G8_UNORM;
case VK_FORMAT_R8G8B8_SRGB: return VK_FORMAT_R8G8B8_UNORM;
case VK_FORMAT_R8G8B8_UNORM: return VK_FORMAT_R8G8B8_UNORM;
case VK_FORMAT_B8G8R8_SRGB: return VK_FORMAT_B8G8R8_UNORM;
case VK_FORMAT_B8G8R8_UNORM: return VK_FORMAT_B8G8R8_UNORM;
case VK_FORMAT_R8G8B8A8_SRGB: return VK_FORMAT_R8G8B8A8_UNORM;
case VK_FORMAT_R8G8B8A8_UNORM: return VK_FORMAT_R8G8B8A8_UNORM;
case VK_FORMAT_B8G8R8A8_SRGB: return VK_FORMAT_B8G8R8A8_UNORM;
case VK_FORMAT_B8G8R8A8_UNORM: return VK_FORMAT_B8G8R8A8_UNORM;
case VK_FORMAT_A8B8G8R8_SRGB_PACK32: return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
case VK_FORMAT_A8B8G8R8_UNORM_PACK32: return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
case VK_FORMAT_BC1_RGB_SRGB_BLOCK: return VK_FORMAT_BC1_RGB_UNORM_BLOCK;
case VK_FORMAT_BC1_RGB_UNORM_BLOCK: return VK_FORMAT_BC1_RGB_UNORM_BLOCK;
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK: return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
case VK_FORMAT_BC2_SRGB_BLOCK: return VK_FORMAT_BC2_UNORM_BLOCK;
case VK_FORMAT_BC2_UNORM_BLOCK: return VK_FORMAT_BC2_UNORM_BLOCK;
case VK_FORMAT_BC3_SRGB_BLOCK: return VK_FORMAT_BC3_UNORM_BLOCK;
case VK_FORMAT_BC3_UNORM_BLOCK: return VK_FORMAT_BC3_UNORM_BLOCK;
case VK_FORMAT_BC7_SRGB_BLOCK: return VK_FORMAT_BC7_UNORM_BLOCK;
case VK_FORMAT_BC7_UNORM_BLOCK: return VK_FORMAT_BC7_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK: return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK: return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
case VK_FORMAT_ASTC_4x4_SRGB_BLOCK: return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
case VK_FORMAT_ASTC_4x4_UNORM_BLOCK: return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
case VK_FORMAT_ASTC_5x4_SRGB_BLOCK: return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
case VK_FORMAT_ASTC_5x4_UNORM_BLOCK: return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
case VK_FORMAT_ASTC_5x5_SRGB_BLOCK: return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_5x5_UNORM_BLOCK: return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_6x5_SRGB_BLOCK: return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_6x5_UNORM_BLOCK: return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_6x6_SRGB_BLOCK: return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_6x6_UNORM_BLOCK: return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x5_SRGB_BLOCK: return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x5_UNORM_BLOCK: return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x6_SRGB_BLOCK: return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x6_UNORM_BLOCK: return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x8_SRGB_BLOCK: return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x8_UNORM_BLOCK: return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x5_SRGB_BLOCK: return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x5_UNORM_BLOCK: return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x6_SRGB_BLOCK: return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x6_UNORM_BLOCK: return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x8_SRGB_BLOCK: return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x8_UNORM_BLOCK: return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x10_SRGB_BLOCK: return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x10_UNORM_BLOCK: return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
case VK_FORMAT_ASTC_12x10_SRGB_BLOCK: return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
case VK_FORMAT_ASTC_12x10_UNORM_BLOCK: return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK: return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
case VK_FORMAT_ASTC_12x12_UNORM_BLOCK: return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
case VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG: return VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG: return VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG: return VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG: return VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG: return VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG: return VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG: return VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG: return VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG;
default:
return VK_FORMAT_UNDEFINED;
}
}
r_vk_image_t R_VkImageCreate(const r_vk_image_create_t *create) {
const qboolean is_depth = !!(create->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
r_vk_image_t image = {0};
VkMemoryRequirements memreq;
const qboolean is_cubemap = !!(create->flags & kVkImageFlagIsCubemap);
const VkFormat unorm_format = unormFormatFor(create->format);
const qboolean create_unorm =
!!(create->flags & kVkImageFlagCreateUnormView)
&& unorm_format != VK_FORMAT_UNDEFINED
&& unorm_format != create->format;
VkImageCreateInfo ici = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.imageType = VK_IMAGE_TYPE_2D,
.extent.width = create->width,
.extent.height = create->height,
.extent.depth = 1,
.mipLevels = create->mips,
.arrayLayers = create->layers,
.format = create->format,
.tiling = create->tiling,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.usage = create->usage,
.samples = VK_SAMPLE_COUNT_1_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.flags = 0
| (is_cubemap ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0)
| (create_unorm ? VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT : 0),
};
XVK_CHECK(vkCreateImage(vk_core.device, &ici, NULL, &image.image));
image.format = ici.format;
if (create->debug_name)
SET_DEBUG_NAME(image.image, VK_OBJECT_TYPE_IMAGE, create->debug_name);
vkGetImageMemoryRequirements(vk_core.device, image.image, &memreq);
image.devmem = VK_DevMemAllocate(create->debug_name, memreq, create->memory_props ? create->memory_props : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, 0);
XVK_CHECK(vkBindImageMemory(vk_core.device, image.image, image.devmem.device_memory, image.devmem.offset));
if (create->usage & usage_bits_implying_views) {
const qboolean has_alpha = !!(create->flags & kVkImageFlagHasAlpha);
VkImageViewCreateInfo ivci = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.viewType = is_cubemap ? VK_IMAGE_VIEW_TYPE_CUBE : VK_IMAGE_VIEW_TYPE_2D,
.format = ici.format,
.image = image.image,
.subresourceRange.aspectMask = is_depth ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT,
.subresourceRange.baseMipLevel = 0,
.subresourceRange.levelCount = ici.mipLevels,
.subresourceRange.baseArrayLayer = 0,
.subresourceRange.layerCount = ici.arrayLayers,
// TODO component swizzling based on format, e.g. R8 -> RRRR
.components = (VkComponentMapping){0, 0, 0, (is_depth || has_alpha) ? 0 : VK_COMPONENT_SWIZZLE_ONE},
};
XVK_CHECK(vkCreateImageView(vk_core.device, &ivci, NULL, &image.view));
if (create->debug_name)
SET_DEBUG_NAME(image.view, VK_OBJECT_TYPE_IMAGE_VIEW, create->debug_name);
if (create_unorm) {
ivci.format = unorm_format;
XVK_CHECK(vkCreateImageView(vk_core.device, &ivci, NULL, &image.view_unorm));
if (create->debug_name)
SET_DEBUG_NAMEF(image.view_unorm, VK_OBJECT_TYPE_IMAGE_VIEW, "%s_unorm", create->debug_name);
}
}
image.width = create->width;
image.height = create->height;
image.mips = create->mips;
return image;
}
void R_VkImageDestroy(r_vk_image_t *img) {
if (img->view_unorm != VK_NULL_HANDLE)
vkDestroyImageView(vk_core.device, img->view_unorm, NULL);
if (img->view != VK_NULL_HANDLE)
vkDestroyImageView(vk_core.device, img->view, NULL);
vkDestroyImage(vk_core.device, img->image, NULL);
VK_DevMemFree(&img->devmem);
*img = (r_vk_image_t){0};
}
void R_VkImageClear(VkCommandBuffer cmdbuf, VkImage image) {
const VkImageMemoryBarrier image_barriers[] = { {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.image = image,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
.subresourceRange = (VkImageSubresourceRange) {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
}} };
const VkClearColorValue clear_value = {0};
vkCmdPipelineBarrier(cmdbuf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0,
0, NULL, 0, NULL, COUNTOF(image_barriers), image_barriers);
vkCmdClearColorImage(cmdbuf, image, VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &image_barriers->subresourceRange);
}
void R_VkImageBlit(VkCommandBuffer cmdbuf, const r_vkimage_blit_args *blit_args) {
{
const VkImageMemoryBarrier image_barriers[] = { {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.image = blit_args->src.image,
.srcAccessMask = blit_args->src.srcAccessMask,
.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
.oldLayout = blit_args->src.oldLayout,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.subresourceRange =
(VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
}, {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.image = blit_args->dst.image,
.srcAccessMask = blit_args->dst.srcAccessMask,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = blit_args->dst.oldLayout,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.subresourceRange =
(VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
} };
vkCmdPipelineBarrier(cmdbuf,
blit_args->in_stage,
VK_PIPELINE_STAGE_TRANSFER_BIT,
0, 0, NULL, 0, NULL, COUNTOF(image_barriers), image_barriers);
}
{
VkImageBlit region = {0};
region.srcOffsets[1].x = blit_args->src.width;
region.srcOffsets[1].y = blit_args->src.height;
region.srcOffsets[1].z = 1;
region.dstOffsets[1].x = blit_args->dst.width;
region.dstOffsets[1].y = blit_args->dst.height;
region.dstOffsets[1].z = 1;
region.srcSubresource.aspectMask = region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.srcSubresource.layerCount = region.dstSubresource.layerCount = 1;
vkCmdBlitImage(cmdbuf,
blit_args->src.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
blit_args->dst.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, &region,
VK_FILTER_NEAREST);
}
{
VkImageMemoryBarrier image_barriers[] = {
{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.image = blit_args->dst.image,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
.subresourceRange =
(VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
}};
vkCmdPipelineBarrier(cmdbuf,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
0, 0, NULL, 0, NULL, COUNTOF(image_barriers), image_barriers);
}
}
uint32_t R_VkImageFormatTexelBlockSize( VkFormat format ) {
switch (format) {
case VK_FORMAT_R4G4_UNORM_PACK8:
@ -653,3 +385,367 @@ uint32_t R_VkImageFormatTexelBlockSize( VkFormat format ) {
return 4;
}
static VkFormat unormFormatFor(VkFormat fmt) {
switch (fmt) {
case VK_FORMAT_R8_SRGB: return VK_FORMAT_R8_UNORM;
case VK_FORMAT_R8_UNORM: return VK_FORMAT_R8_UNORM;
case VK_FORMAT_R8G8_SRGB: return VK_FORMAT_R8G8_UNORM;
case VK_FORMAT_R8G8_UNORM: return VK_FORMAT_R8G8_UNORM;
case VK_FORMAT_R8G8B8_SRGB: return VK_FORMAT_R8G8B8_UNORM;
case VK_FORMAT_R8G8B8_UNORM: return VK_FORMAT_R8G8B8_UNORM;
case VK_FORMAT_B8G8R8_SRGB: return VK_FORMAT_B8G8R8_UNORM;
case VK_FORMAT_B8G8R8_UNORM: return VK_FORMAT_B8G8R8_UNORM;
case VK_FORMAT_R8G8B8A8_SRGB: return VK_FORMAT_R8G8B8A8_UNORM;
case VK_FORMAT_R8G8B8A8_UNORM: return VK_FORMAT_R8G8B8A8_UNORM;
case VK_FORMAT_B8G8R8A8_SRGB: return VK_FORMAT_B8G8R8A8_UNORM;
case VK_FORMAT_B8G8R8A8_UNORM: return VK_FORMAT_B8G8R8A8_UNORM;
case VK_FORMAT_A8B8G8R8_SRGB_PACK32: return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
case VK_FORMAT_A8B8G8R8_UNORM_PACK32: return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
case VK_FORMAT_BC1_RGB_SRGB_BLOCK: return VK_FORMAT_BC1_RGB_UNORM_BLOCK;
case VK_FORMAT_BC1_RGB_UNORM_BLOCK: return VK_FORMAT_BC1_RGB_UNORM_BLOCK;
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK: return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
case VK_FORMAT_BC2_SRGB_BLOCK: return VK_FORMAT_BC2_UNORM_BLOCK;
case VK_FORMAT_BC2_UNORM_BLOCK: return VK_FORMAT_BC2_UNORM_BLOCK;
case VK_FORMAT_BC3_SRGB_BLOCK: return VK_FORMAT_BC3_UNORM_BLOCK;
case VK_FORMAT_BC3_UNORM_BLOCK: return VK_FORMAT_BC3_UNORM_BLOCK;
case VK_FORMAT_BC7_SRGB_BLOCK: return VK_FORMAT_BC7_UNORM_BLOCK;
case VK_FORMAT_BC7_UNORM_BLOCK: return VK_FORMAT_BC7_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK: return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK: return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
case VK_FORMAT_ASTC_4x4_SRGB_BLOCK: return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
case VK_FORMAT_ASTC_4x4_UNORM_BLOCK: return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
case VK_FORMAT_ASTC_5x4_SRGB_BLOCK: return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
case VK_FORMAT_ASTC_5x4_UNORM_BLOCK: return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
case VK_FORMAT_ASTC_5x5_SRGB_BLOCK: return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_5x5_UNORM_BLOCK: return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_6x5_SRGB_BLOCK: return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_6x5_UNORM_BLOCK: return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_6x6_SRGB_BLOCK: return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_6x6_UNORM_BLOCK: return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x5_SRGB_BLOCK: return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x5_UNORM_BLOCK: return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x6_SRGB_BLOCK: return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x6_UNORM_BLOCK: return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x8_SRGB_BLOCK: return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x8_UNORM_BLOCK: return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x5_SRGB_BLOCK: return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x5_UNORM_BLOCK: return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x6_SRGB_BLOCK: return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x6_UNORM_BLOCK: return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x8_SRGB_BLOCK: return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x8_UNORM_BLOCK: return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x10_SRGB_BLOCK: return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x10_UNORM_BLOCK: return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
case VK_FORMAT_ASTC_12x10_SRGB_BLOCK: return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
case VK_FORMAT_ASTC_12x10_UNORM_BLOCK: return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK: return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
case VK_FORMAT_ASTC_12x12_UNORM_BLOCK: return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
case VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG: return VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG: return VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG: return VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG: return VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG: return VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG: return VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG: return VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG: return VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG;
default:
return VK_FORMAT_UNDEFINED;
}
}
r_vk_image_t R_VkImageCreate(const r_vk_image_create_t *create) {
const qboolean is_depth = !!(create->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
r_vk_image_t image = {0};
VkMemoryRequirements memreq;
const qboolean is_cubemap = !!(create->flags & kVkImageFlagIsCubemap);
const VkFormat unorm_format = unormFormatFor(create->format);
const qboolean create_unorm =
!!(create->flags & kVkImageFlagCreateUnormView)
&& unorm_format != VK_FORMAT_UNDEFINED
&& unorm_format != create->format;
VkImageCreateInfo ici = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.imageType = VK_IMAGE_TYPE_2D,
.extent.width = create->width,
.extent.height = create->height,
.extent.depth = 1,
.mipLevels = create->mips,
.arrayLayers = create->layers,
.format = create->format,
.tiling = create->tiling,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.usage = create->usage,
.samples = VK_SAMPLE_COUNT_1_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.flags = 0
| (is_cubemap ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0)
| (create_unorm ? VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT : 0),
};
XVK_CHECK(vkCreateImage(vk_core.device, &ici, NULL, &image.image));
image.format = ici.format;
if (create->debug_name)
SET_DEBUG_NAME(image.image, VK_OBJECT_TYPE_IMAGE, create->debug_name);
vkGetImageMemoryRequirements(vk_core.device, image.image, &memreq);
image.devmem = VK_DevMemAllocate(create->debug_name, memreq, create->memory_props ? create->memory_props : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, 0);
XVK_CHECK(vkBindImageMemory(vk_core.device, image.image, image.devmem.device_memory, image.devmem.offset));
if (create->usage & usage_bits_implying_views) {
const qboolean has_alpha = !!(create->flags & kVkImageFlagHasAlpha);
VkImageViewCreateInfo ivci = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.viewType = is_cubemap ? VK_IMAGE_VIEW_TYPE_CUBE : VK_IMAGE_VIEW_TYPE_2D,
.format = ici.format,
.image = image.image,
.subresourceRange.aspectMask = is_depth ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT,
.subresourceRange.baseMipLevel = 0,
.subresourceRange.levelCount = ici.mipLevels,
.subresourceRange.baseArrayLayer = 0,
.subresourceRange.layerCount = ici.arrayLayers,
// TODO component swizzling based on format, e.g. R8 -> RRRR
.components = (VkComponentMapping){0, 0, 0, (is_depth || has_alpha) ? 0 : VK_COMPONENT_SWIZZLE_ONE},
};
XVK_CHECK(vkCreateImageView(vk_core.device, &ivci, NULL, &image.view));
if (create->debug_name)
SET_DEBUG_NAME(image.view, VK_OBJECT_TYPE_IMAGE_VIEW, create->debug_name);
if (create_unorm) {
ivci.format = unorm_format;
XVK_CHECK(vkCreateImageView(vk_core.device, &ivci, NULL, &image.view_unorm));
if (create->debug_name)
SET_DEBUG_NAMEF(image.view_unorm, VK_OBJECT_TYPE_IMAGE_VIEW, "%s_unorm", create->debug_name);
}
}
image.width = create->width;
image.height = create->height;
image.mips = create->mips;
image.layers = create->layers;
return image;
}
void R_VkImageDestroy(r_vk_image_t *img) {
if (img->view_unorm != VK_NULL_HANDLE)
vkDestroyImageView(vk_core.device, img->view_unorm, NULL);
if (img->view != VK_NULL_HANDLE)
vkDestroyImageView(vk_core.device, img->view, NULL);
vkDestroyImage(vk_core.device, img->image, NULL);
VK_DevMemFree(&img->devmem);
*img = (r_vk_image_t){0};
}
void R_VkImageClear(VkCommandBuffer cmdbuf, VkImage image) {
const VkImageMemoryBarrier image_barriers[] = { {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.image = image,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
.subresourceRange = (VkImageSubresourceRange) {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
}} };
const VkClearColorValue clear_value = {0};
vkCmdPipelineBarrier(cmdbuf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0,
0, NULL, 0, NULL, COUNTOF(image_barriers), image_barriers);
vkCmdClearColorImage(cmdbuf, image, VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &image_barriers->subresourceRange);
}
void R_VkImageBlit(VkCommandBuffer cmdbuf, const r_vkimage_blit_args *blit_args) {
{
const VkImageMemoryBarrier image_barriers[] = { {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.image = blit_args->src.image,
.srcAccessMask = blit_args->src.srcAccessMask,
.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
.oldLayout = blit_args->src.oldLayout,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.subresourceRange =
(VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
}, {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.image = blit_args->dst.image,
.srcAccessMask = blit_args->dst.srcAccessMask,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = blit_args->dst.oldLayout,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.subresourceRange =
(VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
} };
vkCmdPipelineBarrier(cmdbuf,
blit_args->in_stage,
VK_PIPELINE_STAGE_TRANSFER_BIT,
0, 0, NULL, 0, NULL, COUNTOF(image_barriers), image_barriers);
}
{
VkImageBlit region = {0};
region.srcOffsets[1].x = blit_args->src.width;
region.srcOffsets[1].y = blit_args->src.height;
region.srcOffsets[1].z = 1;
region.dstOffsets[1].x = blit_args->dst.width;
region.dstOffsets[1].y = blit_args->dst.height;
region.dstOffsets[1].z = 1;
region.srcSubresource.aspectMask = region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.srcSubresource.layerCount = region.dstSubresource.layerCount = 1;
vkCmdBlitImage(cmdbuf,
blit_args->src.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
blit_args->dst.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, &region,
VK_FILTER_NEAREST);
}
{
VkImageMemoryBarrier image_barriers[] = {
{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.image = blit_args->dst.image,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
.subresourceRange =
(VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
}};
vkCmdPipelineBarrier(cmdbuf,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
0, 0, NULL, 0, NULL, COUNTOF(image_barriers), image_barriers);
}
}
void R_VkImageUploadBegin( r_vk_image_t *img ) {
const VkImageMemoryBarrier image_barrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.image = img->image,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.subresourceRange = (VkImageSubresourceRange) {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = img->mips,
.baseArrayLayer = 0,
.layerCount = img->layers,
}
};
// Command buffer might be invalidated on any slice load
const VkCommandBuffer cmdbuf = R_VkStagingGetCommandBuffer();
vkCmdPipelineBarrier(cmdbuf,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
0, 0, NULL, 0, NULL, 1, &image_barrier);
}
void R_VkImageUploadSlice( r_vk_image_t *img, int layer, int mip, int size, const void *data ) {
const uint32_t width = Q_max(1, img->width >> mip);
const uint32_t height = Q_max(1, img->height >> mip);
const uint32_t texel_block_size = R_VkImageFormatTexelBlockSize(img->format);
const vk_staging_image_args_t staging_args = {
.image = img->image,
.region = (VkBufferImageCopy) {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = (VkImageSubresourceLayers){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = mip,
.baseArrayLayer = layer,
.layerCount = 1,
},
.imageExtent = (VkExtent3D){
.width = width,
.height = height,
.depth = 1,
},
},
.layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.size = size,
.alignment = texel_block_size,
};
{
const vk_staging_region_t staging = R_VkStagingLockForImage(staging_args);
ASSERT(staging.ptr);
memcpy(staging.ptr, data, size);
R_VkStagingUnlock(staging.handle);
}
}
void R_VkImageUploadEnd( r_vk_image_t *img ) {
// TODO Don't change layout here. Alternatively:
// I. Attach layout metadata to the image, and request its change next time it is used.
// II. Build-in layout transfer to staging commit and do it there on commit.
const VkImageMemoryBarrier image_barrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.image = img->image,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.subresourceRange = (VkImageSubresourceRange) {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = img->mips,
.baseArrayLayer = 0,
.layerCount = img->layers,
}
};
// Commit is needed to make sure that all previous image loads have been submitted to cmdbuf
const VkCommandBuffer cmdbuf = R_VkStagingCommit()->cmdbuf;
vkCmdPipelineBarrier(cmdbuf,
VK_PIPELINE_STAGE_TRANSFER_BIT,
// FIXME incorrect, we also use them in compute and potentially ray tracing shaders
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
0, 0, NULL, 0, NULL, 1, &image_barrier);
}

View File

@ -12,7 +12,7 @@ typedef struct r_vk_image_s {
VkImageView view_unorm;
uint32_t width, height;
int mips;
int mips, layers;
VkFormat format;
} r_vk_image_t;
@ -51,3 +51,7 @@ typedef struct {
void R_VkImageBlit( VkCommandBuffer cmdbuf, const r_vkimage_blit_args *blit_args );
uint32_t R_VkImageFormatTexelBlockSize( VkFormat format );
void R_VkImageUploadBegin( r_vk_image_t *img );
void R_VkImageUploadSlice( r_vk_image_t *img, int layer, int mip, int size, const void *data );
void R_VkImageUploadEnd( r_vk_image_t *img );

View File

@ -716,103 +716,26 @@ static qboolean uploadTexture(vk_texture_t *tex, rgbdata_t *const *const layers,
}
{
// 5.1 upload buf -> image:layout:DST
// 5.1.1 transitionToLayout(UNDEFINED -> DST)
VkImageMemoryBarrier image_barrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.image = tex->vk.image.image,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.subresourceRange = (VkImageSubresourceRange) {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mipCount,
.baseArrayLayer = 0,
.layerCount = num_layers,
}};
R_VkImageUploadBegin(&tex->vk.image);
{
// cmdbuf may become invalidated in locks in the loops below
const VkCommandBuffer cmdbuf = R_VkStagingGetCommandBuffer();
vkCmdPipelineBarrier(cmdbuf,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
0, 0, NULL, 0, NULL, 1, &image_barrier);
}
// 5.1.2 copyBufferToImage for all mip levels
for (int layer = 0; layer < num_layers; ++layer) {
for (int mip = 0; mip < mipCount; ++mip) {
const rgbdata_t *const pic = layers[layer];
byte *buf = pic->buffer;
const int width = Q_max( 1, ( pic->width >> mip ));
const int height = Q_max( 1, ( pic->height >> mip ));
const size_t mip_size = CalcImageSize( pic->type, width, height, 1 );
const uint32_t texel_block_size = R_VkImageFormatTexelBlockSize(format);
const vk_staging_image_args_t staging_args = {
.image = tex->vk.image.image,
.region = (VkBufferImageCopy) {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = (VkImageSubresourceLayers){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = mip,
.baseArrayLayer = layer,
.layerCount = 1,
},
.imageExtent = (VkExtent3D){
.width = width,
.height = height,
.depth = 1,
},
},
.layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.size = mip_size,
.alignment = texel_block_size,
};
const vk_staging_region_t staging = R_VkStagingLockForImage(staging_args);
ASSERT(staging.ptr);
memcpy(staging.ptr, buf, mip_size);
byte *const buf = pic->buffer;
R_VkImageUploadSlice(&tex->vk.image, layer, mip, mip_size, buf);
tex->total_size += mip_size;
// Build mip in place for the next mip level
if ( mip < mipCount - 1 )
{
BuildMipMap( buf, width, height, 1, tex->flags );
}
R_VkStagingUnlock(staging.handle);
}
}
// TODO Don't change layout here. Alternatively:
// I. Attach layout metadata to the image, and request its change next time it is used.
// II. Build-in layout transfer to staging commit and do it there on commit.
const VkCommandBuffer cmdbuf = R_VkStagingCommit()->cmdbuf;
// 5.2 image:layout:DST -> image:layout:SAMPLED
// 5.2.1 transitionToLayout(DST -> SHADER_READ_ONLY)
image_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
image_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
image_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
image_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
image_barrier.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mipCount,
.baseArrayLayer = 0,
.layerCount = num_layers,
};
vkCmdPipelineBarrier(cmdbuf,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, // FIXME incorrect, we also use them in compute and potentially ray tracing shaders
0, 0, NULL, 0, NULL, 1, &image_barrier);
R_VkImageUploadEnd(&tex->vk.image);
}
// TODO how should we approach this:
@ -950,7 +873,6 @@ static qboolean loadKtx2Raw( vk_texture_t *tex, const rgbdata_t* pic ) {
// FIXME has_alpha
// FIXME no supercompressionScheme
// 1. Create image
{
const r_vk_image_create_t create = {
.debug_name = tex->name,
@ -967,98 +889,22 @@ static qboolean loadKtx2Raw( vk_texture_t *tex, const rgbdata_t* pic ) {
tex->vk.image = R_VkImageCreate(&create);
}
// 2. Prep cmdbuf, barrier, etc
{
VkImageMemoryBarrier image_barrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.image = tex->vk.image.image,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.subresourceRange = (VkImageSubresourceRange) {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = header->levelCount,
.baseArrayLayer = 0,
.layerCount = 1, // TODO cubemap
}
};
R_VkImageUploadBegin(&tex->vk.image);
{
// cmdbuf may become invalidated in locks in the loops below
const VkCommandBuffer cmdbuf = R_VkStagingGetCommandBuffer();
vkCmdPipelineBarrier(cmdbuf,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
0, 0, NULL, 0, NULL, 1, &image_barrier);
}
// 3. For levels
// 3.1 upload
// TODO layers
for (int mip = 0; mip < header->levelCount; ++mip) {
const ktx2_level_t* const level = levels + mip;
const uint32_t width = Q_max(1, header->pixelWidth >> mip);
const uint32_t height = Q_max(1, header->pixelHeight >> mip);
const size_t mip_size = level->byteLength;
const uint32_t texel_block_size = R_VkImageFormatTexelBlockSize(header->vkFormat);
const void* const image_data = data + level->byteOffset;
const vk_staging_image_args_t staging_args = {
.image = tex->vk.image.image,
.region = (VkBufferImageCopy) {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = (VkImageSubresourceLayers){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = mip,
.baseArrayLayer = 0, // TODO cubemap
.layerCount = 1,
},
.imageExtent = (VkExtent3D){
.width = width,
.height = height,
.depth = 1,
},
},
.layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.size = mip_size,
.alignment = texel_block_size,
};
// FIXME validate wrt file size
{
const vk_staging_region_t staging = R_VkStagingLockForImage(staging_args);
ASSERT(staging.ptr);
memcpy(staging.ptr, image_data, mip_size);
tex->total_size += mip_size;
R_VkStagingUnlock(staging.handle);
}
} // for levels
const int layer = 0;
R_VkImageUploadSlice(&tex->vk.image, layer, mip, mip_size, image_data);
tex->total_size += mip_size;
} // for mip levels
{
// TODO Don't change layout here. Alternatively:
// I. Attach layout metadata to the image, and request its change next time it is used.
// II. Build-in layout transfer to staging commit and do it there on commit.
const VkCommandBuffer cmdbuf = R_VkStagingCommit()->cmdbuf;
// 5.2 image:layout:DST -> image:layout:SAMPLED
// 5.2.1 transitionToLayout(DST -> SHADER_READ_ONLY)
image_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
image_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
image_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
image_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
image_barrier.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = header->levelCount,
.baseArrayLayer = 0,
.layerCount = 1, // TODO cubemap
};
vkCmdPipelineBarrier(cmdbuf,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, // FIXME incorrect, we also use them in compute and potentially ray tracing shaders
0, 0, NULL, 0, NULL, 1, &image_barrier);
}
R_VkImageUploadEnd(&tex->vk.image);
}
// KTX2 textures are inaccessible from trad renderer (for now)