vk: image: make a raw unorm image view for trad renderer

Traditional renderer operates in sRGB-γ space (as everyone was in 199x).
Making vulkan textures explicitly SRGB for RT renderer breaks color for
trad rendere. Make sure trad renderer still has raw SRGB-unaware texture
sampling.

Adds an UNORM image view for all relevant SRGB textures, and uses them
exclusively for trad renderer. RT still gets proper SRGB views.

Also rename XVK_ to R_Vk for images
This commit is contained in:
Ivan Avdeev 2023-10-09 12:39:12 -04:00
parent 9b9e89adec
commit 1d2da5831e
12 changed files with 207 additions and 73 deletions

View File

@ -9,5 +9,5 @@ layout(location=1) in vec4 vColor;
layout(location = 0) out vec4 outColor;
void main() {
outColor = LINEARtoSRGB(texture(tex, vUv)) * vColor;
outColor = texture(tex, vUv) * vColor;
}

View File

@ -30,7 +30,8 @@ const float dlight_attenuation_const = 5000.;
void main() {
outColor = vec4(0.);
const vec4 tex_color = LINEARtoSRGB(texture(sTexture0, vTexture0));
const vec4 tex_color = texture(sTexture0, vTexture0);
// TODO make sure textures are premultiplied alpha
const vec4 baseColor = vColor * tex_color;
@ -38,7 +39,7 @@ void main() {
discard;
outColor.a = baseColor.a;
outColor.rgb += baseColor.rgb * LINEARtoSRGB(texture(sLightmap, vLightmapUV).rgb);
outColor.rgb += baseColor.rgb * texture(sLightmap, vLightmapUV).rgb;
for (uint i = 0; i < ubo.num_lights; ++i) {
const vec4 light_pos_r = ubo.lights[i].pos_r;

View File

@ -12,7 +12,8 @@ typedef struct descriptor_pool_s
int next_free;
//uint32_t *free_set;
VkDescriptorSet sets[MAX_TEXTURES];
// * 2 because of unorm views for trad renderer
VkDescriptorSet sets[MAX_TEXTURES * 2];
VkDescriptorSetLayout one_texture_layout;
// FIXME HOW THE F
@ -31,7 +32,7 @@ typedef union {
VkDescriptorImageInfo image;
VkDescriptorImageInfo *image_array;
VkWriteDescriptorSetAccelerationStructureKHR accel;
const struct xvk_image_s *image_object;
const struct r_vk_image_s *image_object;
} vk_descriptor_value_t;
typedef struct {

View File

@ -510,7 +510,7 @@ static qboolean canBlitFromSwapchainToFormat( VkFormat dest_format ) {
static rgbdata_t *XVK_ReadPixels( void ) {
const VkFormat dest_format = VK_FORMAT_R8G8B8A8_UNORM;
xvk_image_t dest_image;
r_vk_image_t dest_image;
const VkImage frame_image = g_frame.current.framebuffer.image;
rgbdata_t *r_shot = NULL;
qboolean blit = canBlitFromSwapchainToFormat( dest_format );
@ -525,7 +525,7 @@ static rgbdata_t *XVK_ReadPixels( void ) {
// Create destination image to blit/copy framebuffer pixels to
{
const xvk_image_create_t xic = {
const r_vk_image_create_t xic = {
.debug_name = "screenshot",
.width = vk_frame.width,
.height = vk_frame.height,
@ -534,11 +534,10 @@ static rgbdata_t *XVK_ReadPixels( void ) {
.format = dest_format,
.tiling = VK_IMAGE_TILING_LINEAR,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT,
.has_alpha = false,
.is_cubemap = false,
.flags = 0,
.memory_props = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
};
dest_image = XVK_ImageCreate(&xic);
dest_image = R_VkImageCreate(&xic);
}
// Make sure that all rendering ops are enqueued
@ -700,7 +699,7 @@ static rgbdata_t *XVK_ReadPixels( void ) {
}
}
XVK_ImageDestroy( &dest_image );
R_VkImageDestroy( &dest_image );
return r_shot;
}

View File

@ -1,4 +1,5 @@
#include "vk_image.h"
#include "vk_logs.h"
static const VkImageUsageFlags usage_bits_implying_views =
VK_IMAGE_USAGE_SAMPLED_BIT |
@ -16,11 +17,87 @@ static const VkImageUsageFlags usage_bits_implying_views =
VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR;
*/
xvk_image_t XVK_ImageCreate(const xvk_image_create_t *create) {
static VkFormat unormFormatFor(VkFormat fmt) {
switch (fmt) {
case VK_FORMAT_R8_SRGB: return VK_FORMAT_R8_UNORM;
case VK_FORMAT_R8_UNORM: return VK_FORMAT_R8_UNORM;
case VK_FORMAT_R8G8_SRGB: return VK_FORMAT_R8G8_UNORM;
case VK_FORMAT_R8G8_UNORM: return VK_FORMAT_R8G8_UNORM;
case VK_FORMAT_R8G8B8_SRGB: return VK_FORMAT_R8G8B8_UNORM;
case VK_FORMAT_R8G8B8_UNORM: return VK_FORMAT_R8G8B8_UNORM;
case VK_FORMAT_B8G8R8_SRGB: return VK_FORMAT_B8G8R8_UNORM;
case VK_FORMAT_B8G8R8_UNORM: return VK_FORMAT_B8G8R8_UNORM;
case VK_FORMAT_R8G8B8A8_SRGB: return VK_FORMAT_R8G8B8A8_UNORM;
case VK_FORMAT_R8G8B8A8_UNORM: return VK_FORMAT_R8G8B8A8_UNORM;
case VK_FORMAT_B8G8R8A8_SRGB: return VK_FORMAT_B8G8R8A8_UNORM;
case VK_FORMAT_B8G8R8A8_UNORM: return VK_FORMAT_B8G8R8A8_UNORM;
case VK_FORMAT_A8B8G8R8_SRGB_PACK32: return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
case VK_FORMAT_A8B8G8R8_UNORM_PACK32: return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
case VK_FORMAT_BC1_RGB_SRGB_BLOCK: return VK_FORMAT_BC1_RGB_UNORM_BLOCK;
case VK_FORMAT_BC1_RGB_UNORM_BLOCK: return VK_FORMAT_BC1_RGB_UNORM_BLOCK;
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK: return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
case VK_FORMAT_BC2_SRGB_BLOCK: return VK_FORMAT_BC2_UNORM_BLOCK;
case VK_FORMAT_BC2_UNORM_BLOCK: return VK_FORMAT_BC2_UNORM_BLOCK;
case VK_FORMAT_BC3_SRGB_BLOCK: return VK_FORMAT_BC3_UNORM_BLOCK;
case VK_FORMAT_BC3_UNORM_BLOCK: return VK_FORMAT_BC3_UNORM_BLOCK;
case VK_FORMAT_BC7_SRGB_BLOCK: return VK_FORMAT_BC7_UNORM_BLOCK;
case VK_FORMAT_BC7_UNORM_BLOCK: return VK_FORMAT_BC7_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK: return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK: return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
case VK_FORMAT_ASTC_4x4_SRGB_BLOCK: return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
case VK_FORMAT_ASTC_4x4_UNORM_BLOCK: return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
case VK_FORMAT_ASTC_5x4_SRGB_BLOCK: return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
case VK_FORMAT_ASTC_5x4_UNORM_BLOCK: return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
case VK_FORMAT_ASTC_5x5_SRGB_BLOCK: return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_5x5_UNORM_BLOCK: return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_6x5_SRGB_BLOCK: return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_6x5_UNORM_BLOCK: return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_6x6_SRGB_BLOCK: return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_6x6_UNORM_BLOCK: return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x5_SRGB_BLOCK: return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x5_UNORM_BLOCK: return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x6_SRGB_BLOCK: return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x6_UNORM_BLOCK: return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x8_SRGB_BLOCK: return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
case VK_FORMAT_ASTC_8x8_UNORM_BLOCK: return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x5_SRGB_BLOCK: return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x5_UNORM_BLOCK: return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x6_SRGB_BLOCK: return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x6_UNORM_BLOCK: return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x8_SRGB_BLOCK: return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x8_UNORM_BLOCK: return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x10_SRGB_BLOCK: return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
case VK_FORMAT_ASTC_10x10_UNORM_BLOCK: return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
case VK_FORMAT_ASTC_12x10_SRGB_BLOCK: return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
case VK_FORMAT_ASTC_12x10_UNORM_BLOCK: return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK: return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
case VK_FORMAT_ASTC_12x12_UNORM_BLOCK: return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
case VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG: return VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG: return VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG: return VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG: return VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG: return VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG: return VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG: return VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG;
case VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG: return VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG;
default:
return VK_FORMAT_UNDEFINED;
}
}
r_vk_image_t R_VkImageCreate(const r_vk_image_create_t *create) {
const qboolean is_depth = !!(create->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
xvk_image_t image = {0};
r_vk_image_t image = {0};
VkMemoryRequirements memreq;
const qboolean is_cubemap = !!(create->flags & kVkImageFlagIsCubemap);
const qboolean create_unorm = !!(create->flags & kVkImageFlagCreateUnormView);
VkImageCreateInfo ici = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.imageType = VK_IMAGE_TYPE_2D,
@ -35,7 +112,9 @@ xvk_image_t XVK_ImageCreate(const xvk_image_create_t *create) {
.usage = create->usage,
.samples = VK_SAMPLE_COUNT_1_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.flags = create->is_cubemap ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0,
.flags = 0
| (is_cubemap ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0)
| (create_unorm ? VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT : 0),
};
XVK_CHECK(vkCreateImage(vk_core.device, &ici, NULL, &image.image));
@ -48,9 +127,11 @@ xvk_image_t XVK_ImageCreate(const xvk_image_create_t *create) {
XVK_CHECK(vkBindImageMemory(vk_core.device, image.image, image.devmem.device_memory, image.devmem.offset));
if (create->usage & usage_bits_implying_views) {
const VkImageViewCreateInfo ivci = {
const qboolean has_alpha = !!(create->flags & kVkImageFlagHasAlpha);
VkImageViewCreateInfo ivci = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.viewType = create->is_cubemap ? VK_IMAGE_VIEW_TYPE_CUBE : VK_IMAGE_VIEW_TYPE_2D,
.viewType = is_cubemap ? VK_IMAGE_VIEW_TYPE_CUBE : VK_IMAGE_VIEW_TYPE_2D,
.format = ici.format,
.image = image.image,
.subresourceRange.aspectMask = is_depth ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT,
@ -58,12 +139,26 @@ xvk_image_t XVK_ImageCreate(const xvk_image_create_t *create) {
.subresourceRange.levelCount = ici.mipLevels,
.subresourceRange.baseArrayLayer = 0,
.subresourceRange.layerCount = ici.arrayLayers,
.components = (VkComponentMapping){0, 0, 0, (is_depth || create->has_alpha) ? 0 : VK_COMPONENT_SWIZZLE_ONE},
// TODO component swizzling based on format, e.g. R8 -> RRRR
.components = (VkComponentMapping){0, 0, 0, (is_depth || has_alpha) ? 0 : VK_COMPONENT_SWIZZLE_ONE},
};
XVK_CHECK(vkCreateImageView(vk_core.device, &ivci, NULL, &image.view));
if (create->debug_name)
SET_DEBUG_NAME(image.view, VK_OBJECT_TYPE_IMAGE_VIEW, create->debug_name);
if (create_unorm) {
// FIXME handle same formats: reuse the same image_view, destroy it properly, etc
ivci.format = unormFormatFor(ici.format);
if (ivci.format != VK_FORMAT_UNDEFINED) {
XVK_CHECK(vkCreateImageView(vk_core.device, &ivci, NULL, &image.view_unorm));
if (create->debug_name)
SET_DEBUG_NAMEF(image.view_unorm, VK_OBJECT_TYPE_IMAGE_VIEW, "%s_unorm", create->debug_name);
} else {
WARN("There's no UNORM format for %s for image \"%s\"", R_VkFormatName(ici.format), create->debug_name);
}
}
}
image.width = create->width;
@ -73,11 +168,17 @@ xvk_image_t XVK_ImageCreate(const xvk_image_create_t *create) {
return image;
}
void XVK_ImageDestroy(xvk_image_t *img) {
vkDestroyImageView(vk_core.device, img->view, NULL);
void R_VkImageDestroy(r_vk_image_t *img) {
if (img->view_unorm != VK_NULL_HANDLE)
vkDestroyImageView(vk_core.device, img->view_unorm, NULL);
if (img->view != VK_NULL_HANDLE)
vkDestroyImageView(vk_core.device, img->view, NULL);
vkDestroyImage(vk_core.device, img->image, NULL);
VK_DevMemFree(&img->devmem);
*img = (xvk_image_t){0};
*img = (r_vk_image_t){0};
}
void R_VkImageClear(VkCommandBuffer cmdbuf, VkImage image) {

View File

@ -2,14 +2,24 @@
#include "vk_core.h"
#include "vk_devmem.h"
typedef struct xvk_image_s {
typedef struct r_vk_image_s {
vk_devmem_t devmem;
VkImage image;
VkImageView view;
// Optional, created by kVkImageFlagCreateUnormView
// Used for sRGB-γ-unaware traditional renderer
VkImageView view_unorm;
uint32_t width, height;
int mips;
} xvk_image_t;
} r_vk_image_t;
enum {
kVkImageFlagHasAlpha = (1<<0),
kVkImageFlagIsCubemap = (1<<1),
kVkImageFlagCreateUnormView = (1<<2),
};
typedef struct {
const char *debug_name;
@ -18,13 +28,12 @@ typedef struct {
VkFormat format;
VkImageTiling tiling;
VkImageUsageFlags usage;
qboolean has_alpha;
qboolean is_cubemap;
VkMemoryPropertyFlags memory_props;
} xvk_image_create_t;
uint32_t flags;
} r_vk_image_create_t;
xvk_image_t XVK_ImageCreate(const xvk_image_create_t *create);
void XVK_ImageDestroy(xvk_image_t *img);
r_vk_image_t R_VkImageCreate(const r_vk_image_create_t *create);
void R_VkImageDestroy(r_vk_image_t *img);
void R_VkImageClear(VkCommandBuffer cmdbuf, VkImage image);

View File

@ -266,10 +266,10 @@ static void drawOverlay( VkCommandBuffer cmdbuf ) {
{
vk_texture_t *texture = findTexture(g2d.batch[i].texture);
const VkPipeline pipeline = g2d.pipelines[g2d.batch[i].blending_mode];
if (texture->vk.descriptor)
if (texture->vk.descriptor_unorm)
{
vkCmdBindPipeline(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
vkCmdBindDescriptorSets(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, g2d.pipeline_layout, 0, 1, &texture->vk.descriptor, 0, NULL);
vkCmdBindDescriptorSets(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, g2d.pipeline_layout, 0, 1, &texture->vk.descriptor_unorm, 0, NULL);
vkCmdDraw(cmdbuf, g2d.batch[i].vertex_count, 1, g2d.batch[i].vertex_offset, 0);
} // FIXME else what?
}

View File

@ -589,14 +589,14 @@ void VK_RenderEnd( VkCommandBuffer cmdbuf, qboolean draw )
if (lightmap != draw->draw.lightmap) {
lightmap = draw->draw.lightmap;
vkCmdBindDescriptorSets(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, g_render.pipeline_layout, 2, 1, &findTexture(lightmap)->vk.descriptor, 0, NULL);
vkCmdBindDescriptorSets(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, g_render.pipeline_layout, 2, 1, &findTexture(lightmap)->vk.descriptor_unorm, 0, NULL);
}
if (texture != draw->draw.texture)
{
texture = draw->draw.texture;
// TODO names/enums for binding points
vkCmdBindDescriptorSets(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, g_render.pipeline_layout, 1, 1, &findTexture(texture)->vk.descriptor, 0, NULL);
vkCmdBindDescriptorSets(cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, g_render.pipeline_layout, 1, 1, &findTexture(texture)->vk.descriptor_unorm, 0, NULL);
}
// Only indexed mode is supported

View File

@ -67,7 +67,7 @@ enum {
typedef struct {
char name[64];
vk_resource_t resource;
xvk_image_t image;
r_vk_image_t image;
int refcount;
int source_index_plus_1;
} rt_resource_t;
@ -238,7 +238,7 @@ static void performTracing( vk_combuf_t *combuf, const perform_tracing_args_t* a
// Swap resources
const vk_resource_t tmp_res = res->resource;
const xvk_image_t tmp_img = res->image;
const r_vk_image_t tmp_img = res->image;
res->resource = src->resource;
res->image = src->image;
@ -346,7 +346,7 @@ static void cleanupResources(void) {
if (!res->name[0] || res->refcount || !res->image.image)
continue;
XVK_ImageDestroy(&res->image);
R_VkImageDestroy(&res->image);
res->name[0] = '\0';
}
}
@ -416,7 +416,7 @@ static void reloadMainpipe(void) {
if (create) {
if (res->image.image == VK_NULL_HANDLE) {
const xvk_image_create_t create = {
const r_vk_image_create_t create = {
.debug_name = mr->name,
.width = FRAME_WIDTH,
.height = FRAME_HEIGHT,
@ -427,10 +427,9 @@ static void reloadMainpipe(void) {
// TODO figure out how to detect this need properly. prev_dest is not defined as "output"
//.usage = VK_IMAGE_USAGE_STORAGE_BIT | (output ? VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT : 0),
.usage = VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
.has_alpha = true,
.is_cubemap = false,
.flags = kVkImageFlagHasAlpha,
};
res->image = XVK_ImageCreate(&create);
res->image = R_VkImageCreate(&create);
Q_strncpy(res->name, mr->name, sizeof(res->name));
} else {
// TODO if (mr->image_format != res->image.format) { S_ERROR and goto fail }
@ -493,7 +492,7 @@ static void reloadMainpipe(void) {
// TODO currently changing texture format is not handled. It will try to reuse existing image with the old format
// which will probably fail. To handle it we'd need to refactor this:
// 1. xvk_image_t should have a field with its current format? (or we'd also store if with the resource here)
// 1. r_vk_image_t should have a field with its current format? (or we'd also store if with the resource here)
// 2. do another loop here to detect format mismatch and recreate.
g_rtx.mainpipe = newpipe;

View File

@ -18,7 +18,7 @@ static struct {
VkImageView *image_views;
VkFramebuffer *framebuffers;
xvk_image_t depth;
r_vk_image_t depth;
uint32_t width, height;
@ -33,11 +33,10 @@ static uint32_t clamp_u32(uint32_t v, uint32_t min, uint32_t max) {
}
static void createDepthImage(int w, int h, VkFormat depth_format) {
const xvk_image_create_t xic = {
const r_vk_image_create_t xic = {
.debug_name = "depth",
.format = depth_format,
.has_alpha = false,
.is_cubemap = false,
.flags = 0,
.mips = 1,
.layers = 1,
.width = w,
@ -45,7 +44,7 @@ static void createDepthImage(int w, int h, VkFormat depth_format) {
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
};
g_swapchain.depth = XVK_ImageCreate( &xic );
g_swapchain.depth = R_VkImageCreate( &xic );
}
static void destroySwapchainAndFramebuffers( VkSwapchainKHR swapchain ) {
@ -56,7 +55,7 @@ static void destroySwapchainAndFramebuffers( VkSwapchainKHR swapchain ) {
vkDestroyFramebuffer(vk_core.device, g_swapchain.framebuffers[i], NULL);
}
XVK_ImageDestroy( &g_swapchain.depth );
R_VkImageDestroy( &g_swapchain.depth );
vkDestroySwapchainKHR(vk_core.device, swapchain, NULL);
}

View File

@ -95,7 +95,7 @@ void destroyTextures( void )
unloadSkybox();
XVK_ImageDestroy(&tglob.cubemap_placeholder.vk.image);
R_VkImageDestroy(&tglob.cubemap_placeholder.vk.image);
g_textures.stats.size_total -= tglob.cubemap_placeholder.total_size;
g_textures.stats.count--;
memset(&tglob.cubemap_placeholder, 0, sizeof(tglob.cubemap_placeholder));
@ -606,7 +606,7 @@ static qboolean uploadTexture(vk_texture_t *tex, rgbdata_t *const *const layers,
// data = GL_ApplyFilter( data, tex->width, tex->height );
{
const xvk_image_create_t create = {
const r_vk_image_create_t create = {
.debug_name = tex->name,
.width = tex->width,
.height = tex->height,
@ -615,10 +615,12 @@ static qboolean uploadTexture(vk_texture_t *tex, rgbdata_t *const *const layers,
.format = format,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
.has_alpha = layers[0]->flags & IMAGE_HAS_ALPHA,
.is_cubemap = cubemap,
.flags = 0
| ((layers[0]->flags & IMAGE_HAS_ALPHA) ? kVkImageFlagHasAlpha : 0)
| (cubemap ? kVkImageFlagIsCubemap : 0)
| (colorspace_hint == kColorspaceGamma ? kVkImageFlagCreateUnormView : 0),
};
tex->vk.image = XVK_ImageCreate(&create);
tex->vk.image = R_VkImageCreate(&create);
}
{
@ -724,30 +726,52 @@ static qboolean uploadTexture(vk_texture_t *tex, rgbdata_t *const *const layers,
// TODO how should we approach this:
// - per-texture desc sets can be inconvenient if texture is used in different incompatible contexts
// - update descriptor sets in batch?
if (vk_desc.next_free != MAX_TEXTURES) {
if (vk_desc.next_free < MAX_TEXTURES-2) {
const int index = tex - vk_textures;
VkDescriptorImageInfo dii_tmp;
// FIXME handle cubemaps properly w/o this garbage. they should be the same as regular textures.
VkDescriptorImageInfo *const dii_tex = (num_layers == 1) ? tglob.dii_all_textures + index : &dii_tmp;
*dii_tex = (VkDescriptorImageInfo){
const VkDescriptorSet ds = vk_desc.sets[vk_desc.next_free++];
const VkDescriptorSet ds_unorm = colorspace_hint == kColorspaceGamma ? vk_desc.sets[vk_desc.next_free++] : VK_NULL_HANDLE;
const VkDescriptorImageInfo dii = {
.imageView = tex->vk.image.view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.sampler = pickSamplerForFlags( tex->flags ),
};
const VkWriteDescriptorSet wds[] = { {
const VkDescriptorImageInfo dii_unorm = {
.imageView = tex->vk.image.view_unorm,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.sampler = dii.sampler,
};
VkWriteDescriptorSet wds[2] = { {
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = dii_tex,
.dstSet = tex->vk.descriptor = vk_desc.sets[vk_desc.next_free++],
.pImageInfo = &dii,
.dstSet = ds,
}, {
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &dii_unorm,
.dstSet = ds_unorm,
}};
vkUpdateDescriptorSets(vk_core.device, ARRAYSIZE(wds), wds, 0, NULL);
vkUpdateDescriptorSets(vk_core.device, ds_unorm != VK_NULL_HANDLE ? 2 : 1 , wds, 0, NULL);
// FIXME handle cubemaps properly w/o this garbage. they should be the same as regular textures.
if (num_layers == 1) {
tglob.dii_all_textures[index] = dii;
}
tex->vk.descriptor_unorm = ds_unorm != VK_NULL_HANDLE ? ds_unorm : ds;
}
else
{
tex->vk.descriptor = VK_NULL_HANDLE;
tex->vk.descriptor_unorm = VK_NULL_HANDLE;
}
g_textures.stats.size_total += tex->total_size;
@ -891,7 +915,7 @@ static int loadKtx2( const char *name ) {
// 1. Create image
{
const xvk_image_create_t create = {
const r_vk_image_create_t create = {
.debug_name = tex->name,
.width = header->pixelWidth,
.height = header->pixelHeight,
@ -900,10 +924,10 @@ static int loadKtx2( const char *name ) {
.format = header->vkFormat,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
.has_alpha = false, // FIXME
.is_cubemap = false,
// FIXME find out if there's alpha
.flags = 0,
};
tex->vk.image = XVK_ImageCreate(&create);
tex->vk.image = R_VkImageCreate(&create);
}
// 2. Prep cmdbuf, barrier, etc
@ -1000,9 +1024,13 @@ static int loadKtx2( const char *name ) {
}
}
// KTX2 textures are inaccessible from trad renderer (for now)
tex->vk.descriptor_unorm = VK_NULL_HANDLE;
// TODO how should we approach this:
// - per-texture desc sets can be inconvenient if texture is used in different incompatible contexts
// - update descriptor sets in batch?
if (vk_desc.next_free != MAX_TEXTURES) {
const int num_layers = 1; // TODO cubemap
const int index = tex - vk_textures;
@ -1021,14 +1049,10 @@ static int loadKtx2( const char *name ) {
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = dii_tex,
.dstSet = tex->vk.descriptor = vk_desc.sets[vk_desc.next_free++],
.dstSet = vk_desc.sets[vk_desc.next_free++],
}};
vkUpdateDescriptorSets(vk_core.device, ARRAYSIZE(wds), wds, 0, NULL);
}
else
{
tex->vk.descriptor = VK_NULL_HANDLE;
}
g_textures.stats.size_total += tex->total_size;
g_textures.stats.count++;
@ -1189,7 +1213,7 @@ void VK_FreeTexture( unsigned int texnum ) {
R_VkStagingFlushSync();
XVK_CHECK(vkDeviceWaitIdle(vk_core.device));
XVK_ImageDestroy(&tex->vk.image);
R_VkImageDestroy(&tex->vk.image);
g_textures.stats.size_total -= tex->total_size;
g_textures.stats.count--;
memset(tex, 0, sizeof(*tex));
@ -1257,7 +1281,7 @@ int XVK_TextureLookupF( const char *fmt, ...) {
static void unloadSkybox( void ) {
if (tglob.skybox_cube.vk.image.image) {
XVK_ImageDestroy(&tglob.skybox_cube.vk.image);
R_VkImageDestroy(&tglob.skybox_cube.vk.image);
g_textures.stats.size_total -= tglob.skybox_cube.total_size;
g_textures.stats.count--;
memset(&tglob.skybox_cube, 0, sizeof(tglob.skybox_cube));

View File

@ -16,8 +16,8 @@ typedef struct vk_texture_s
uint texnum;
struct {
xvk_image_t image;
VkDescriptorSet descriptor;
r_vk_image_t image;
VkDescriptorSet descriptor_unorm;
} vk;
uint hashValue;
@ -48,6 +48,7 @@ typedef struct vk_textures_global_s
vk_texture_t skybox_cube;
vk_texture_t cubemap_placeholder;
// All textures descriptors in their native formats used for RT
VkDescriptorImageInfo dii_all_textures[MAX_TEXTURES];
// FIXME this should not exist, all textures should have their own samplers based on flags