mirror of
https://github.com/w23/xash3d-fwgs
synced 2024-12-15 13:41:33 +01:00
rt: reimagine staging
- make staging/prep phase use separate command buffer - flush this command buffer early if needed - move command pool to its own module - move shader loading stuff to pipeline module - cleanup lots of command buffer passing for model loading, it can use staging explicitly now
This commit is contained in:
parent
f93ae5de80
commit
2c4d0846d4
@ -511,7 +511,7 @@ static qboolean loadBrushSurfaces( model_sizes_t sizes, const model_t *mod ) {
|
||||
// FIXME move this to rt_light_bsp and static loading
|
||||
{
|
||||
qboolean is_emissive = false;
|
||||
vec3_t emissive;
|
||||
vec3_t emissive = {0};
|
||||
rt_light_add_polygon_t polylight;
|
||||
|
||||
if (psurf && (psurf->flags & Patch_Surface_Emissive)) {
|
||||
@ -637,7 +637,7 @@ static qboolean loadBrushSurfaces( model_sizes_t sizes, const model_t *mod ) {
|
||||
return true;
|
||||
}
|
||||
|
||||
qboolean VK_BrushModelLoad( VkCommandBuffer cmdbuf, model_t *mod, qboolean map )
|
||||
qboolean VK_BrushModelLoad( model_t *mod, qboolean map )
|
||||
{
|
||||
if (mod->cache.data)
|
||||
{
|
||||
@ -667,7 +667,7 @@ qboolean VK_BrushModelLoad( VkCommandBuffer cmdbuf, model_t *mod, qboolean map )
|
||||
if (!map && sizes.emissive_surfaces)
|
||||
bmodel->render_model.polylights = Mem_Malloc(vk_core.pool, sizeof(bmodel->render_model.polylights[0]) * sizes.emissive_surfaces);
|
||||
|
||||
if (!loadBrushSurfaces(sizes, mod) || !VK_RenderModelInit(cmdbuf, &bmodel->render_model)) {
|
||||
if (!loadBrushSurfaces(sizes, mod) || !VK_RenderModelInit(&bmodel->render_model)) {
|
||||
gEngine.Con_Printf(S_ERROR "Could not load model %s\n", mod->name);
|
||||
Mem_Free(bmodel);
|
||||
return false;
|
||||
|
@ -1,7 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "xash3d_types.h"
|
||||
#include "vk_render.h"
|
||||
#include "vk_render.h" // cl_entity_t
|
||||
|
||||
struct ref_viewpass_s;
|
||||
struct draw_list_s;
|
||||
@ -11,8 +11,8 @@ struct cl_entity_s;
|
||||
qboolean VK_BrushInit( void );
|
||||
void VK_BrushShutdown( void );
|
||||
|
||||
qboolean VK_BrushModelLoad( VkCommandBuffer cmdbuf, struct model_s *mod, qboolean map);
|
||||
void VK_BrushModelDestroy( struct model_s *mod );
|
||||
qboolean VK_BrushModelLoad(struct model_s *mod, qboolean map);
|
||||
void VK_BrushModelDestroy(struct model_s *mod);
|
||||
|
||||
void VK_BrushModelDraw( const cl_entity_t *ent, int render_mode, const matrix4x4 model );
|
||||
void VK_BrushStatsClear( void );
|
||||
|
32
ref_vk/vk_commandpool.c
Normal file
32
ref_vk/vk_commandpool.c
Normal file
@ -0,0 +1,32 @@
|
||||
#include "vk_commandpool.h"
|
||||
|
||||
vk_command_pool_t R_VkCommandPoolCreate( int count ) {
|
||||
vk_command_pool_t ret = {0};
|
||||
|
||||
const VkCommandPoolCreateInfo cpci = {
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
|
||||
.queueFamilyIndex = 0,
|
||||
.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
|
||||
};
|
||||
|
||||
VkCommandBufferAllocateInfo cbai = {
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
|
||||
.commandBufferCount = count,
|
||||
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
|
||||
};
|
||||
|
||||
XVK_CHECK(vkCreateCommandPool(vk_core.device, &cpci, NULL, &ret.pool));
|
||||
|
||||
cbai.commandPool = ret.pool;
|
||||
ret.buffers = Mem_Malloc(vk_core.pool, sizeof(VkCommandBuffer) * count);
|
||||
ret.buffers_count = count;
|
||||
XVK_CHECK(vkAllocateCommandBuffers(vk_core.device, &cbai, ret.buffers));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void R_VkCommandPoolDestroy( vk_command_pool_t *pool ) {
|
||||
ASSERT(pool->buffers);
|
||||
vkDestroyCommandPool(vk_core.device, pool->pool, NULL);
|
||||
Mem_Free(pool->buffers);
|
||||
}
|
10
ref_vk/vk_commandpool.h
Normal file
10
ref_vk/vk_commandpool.h
Normal file
@ -0,0 +1,10 @@
|
||||
#include "vk_core.h"
|
||||
|
||||
typedef struct {
|
||||
VkCommandPool pool;
|
||||
VkCommandBuffer *buffers;
|
||||
int buffers_count;
|
||||
} vk_command_pool_t;
|
||||
|
||||
vk_command_pool_t R_VkCommandPoolCreate( int count );
|
||||
void R_VkCommandPoolDestroy( vk_command_pool_t *pool );
|
@ -17,6 +17,7 @@
|
||||
#include "vk_descriptor.h"
|
||||
#include "vk_nv_aftermath.h"
|
||||
#include "vk_devmem.h"
|
||||
#include "vk_commandpool.h"
|
||||
|
||||
// FIXME move this rt-specific stuff out
|
||||
#include "vk_light.h"
|
||||
@ -621,37 +622,6 @@ static qboolean initSurface( void )
|
||||
return true;
|
||||
}
|
||||
|
||||
vk_command_pool_t R_VkCommandPoolCreate( int count ) {
|
||||
vk_command_pool_t ret = {0};
|
||||
|
||||
const VkCommandPoolCreateInfo cpci = {
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
|
||||
.queueFamilyIndex = 0,
|
||||
.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
|
||||
};
|
||||
|
||||
VkCommandBufferAllocateInfo cbai = {
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
|
||||
.commandBufferCount = count,
|
||||
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
|
||||
};
|
||||
|
||||
XVK_CHECK(vkCreateCommandPool(vk_core.device, &cpci, NULL, &ret.pool));
|
||||
|
||||
cbai.commandPool = ret.pool;
|
||||
ret.buffers = Mem_Malloc(vk_core.pool, sizeof(VkCommandBuffer) * count);
|
||||
ret.buffers_count = count;
|
||||
XVK_CHECK(vkAllocateCommandBuffers(vk_core.device, &cbai, ret.buffers));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void R_VkCommandPoolDestroy( vk_command_pool_t *pool ) {
|
||||
ASSERT(pool->buffers);
|
||||
vkDestroyCommandPool(vk_core.device, pool->pool, NULL);
|
||||
Mem_Free(pool->buffers);
|
||||
}
|
||||
|
||||
qboolean R_VkInit( void )
|
||||
{
|
||||
// FIXME !!!! handle initialization errors properly: destroy what has already been created
|
||||
@ -713,8 +683,6 @@ qboolean R_VkInit( void )
|
||||
if (!initSurface())
|
||||
return false;
|
||||
|
||||
vk_core.upload_pool = R_VkCommandPoolCreate( 1 );
|
||||
|
||||
if (!VK_DevMemInit())
|
||||
return false;
|
||||
|
||||
@ -812,8 +780,6 @@ void R_VkShutdown( void ) {
|
||||
|
||||
VK_DevMemDestroy();
|
||||
|
||||
R_VkCommandPoolDestroy( &vk_core.upload_pool );
|
||||
|
||||
vkDestroyDevice(vk_core.device, NULL);
|
||||
|
||||
#if USE_AFTERMATH
|
||||
@ -834,37 +800,6 @@ void R_VkShutdown( void ) {
|
||||
gEngine.R_Free_Video();
|
||||
}
|
||||
|
||||
VkShaderModule loadShader(const char *filename) {
|
||||
fs_offset_t size = 0;
|
||||
VkShaderModuleCreateInfo smci = {
|
||||
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
|
||||
};
|
||||
VkShaderModule shader;
|
||||
byte* buf = gEngine.fsapi->LoadFile( filename, &size, false);
|
||||
uint32_t *pcode;
|
||||
|
||||
if (!buf)
|
||||
{
|
||||
gEngine.Host_Error( S_ERROR "Cannot open shader file \"%s\"\n", filename);
|
||||
}
|
||||
|
||||
if ((size % 4 != 0) || (((uintptr_t)buf & 3) != 0)) {
|
||||
gEngine.Host_Error( S_ERROR "size %zu or buf %p is not aligned to 4 bytes as required by SPIR-V/Vulkan spec", size, buf);
|
||||
}
|
||||
|
||||
smci.codeSize = size;
|
||||
//smci.pCode = (const uint32_t*)buf;
|
||||
//memcpy(&smci.pCode, &buf, sizeof(void*));
|
||||
memcpy(&pcode, &buf, sizeof(pcode));
|
||||
smci.pCode = pcode;
|
||||
|
||||
XVK_CHECK(vkCreateShaderModule(vk_core.device, &smci, NULL, &shader));
|
||||
SET_DEBUG_NAME(shader, VK_OBJECT_TYPE_SHADER_MODULE, filename);
|
||||
|
||||
Mem_Free(buf);
|
||||
return shader;
|
||||
}
|
||||
|
||||
VkSemaphore R_VkSemaphoreCreate( void ) {
|
||||
VkSemaphore sema;
|
||||
VkSemaphoreCreateInfo sci = {
|
||||
|
@ -10,18 +10,6 @@
|
||||
qboolean R_VkInit( void );
|
||||
void R_VkShutdown( void );
|
||||
|
||||
typedef struct {
|
||||
VkCommandPool pool;
|
||||
VkCommandBuffer *buffers;
|
||||
int buffers_count;
|
||||
} vk_command_pool_t;
|
||||
|
||||
vk_command_pool_t R_VkCommandPoolCreate( int count );
|
||||
void R_VkCommandPoolDestroy( vk_command_pool_t *pool );
|
||||
|
||||
// TODO load from embedded static structs
|
||||
VkShaderModule loadShader(const char *filename);
|
||||
|
||||
VkSemaphore R_VkSemaphoreCreate( void );
|
||||
void R_VkSemaphoreDestroy(VkSemaphore sema);
|
||||
|
||||
@ -64,8 +52,6 @@ typedef struct vulkan_core_s {
|
||||
VkDevice device;
|
||||
VkQueue queue;
|
||||
|
||||
vk_command_pool_t upload_pool;
|
||||
|
||||
VkSampler default_sampler;
|
||||
|
||||
unsigned int num_devices;
|
||||
@ -108,12 +94,13 @@ do { \
|
||||
} while (0)
|
||||
|
||||
#if USE_AFTERMATH
|
||||
void R_VK_NV_CheckpointF(VkCommandBuffer cmdbuf, const char *fmt, ...);
|
||||
void R_Vk_NV_CheckpointF(VkCommandBuffer cmdbuf, const char *fmt, ...);
|
||||
void R_Vk_NV_Checkpoint_Dump(void);
|
||||
#define DEBUG_NV_CHECKPOINTF(cmdbuf, fmt, ...) \
|
||||
do { \
|
||||
if (vk_core.debug) { \
|
||||
R_Vk_NV_CheckpointF(cmdbuf, fmt, ##__VA_ARGS__); \
|
||||
if (0) gEngine.Con_Reportf(fmt "\n", ##__VA_ARGS__); \
|
||||
} \
|
||||
} while(0)
|
||||
#else
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "vk_swapchain.h"
|
||||
#include "vk_image.h"
|
||||
#include "vk_staging.h"
|
||||
#include "vk_commandpool.h"
|
||||
|
||||
#include "profiler.h"
|
||||
|
||||
@ -217,7 +218,7 @@ void R_BeginFrame( qboolean clearScene ) {
|
||||
ASSERT(!g_frame.current.framebuffer.framebuffer);
|
||||
|
||||
waitForFrameFence();
|
||||
R_VkStagingFrameFlip();
|
||||
R_VkStagingFrameBegin();
|
||||
|
||||
g_frame.current.framebuffer = R_VkSwapchainAcquire( g_frame.sem_framebuffer_ready[g_frame.current.index] );
|
||||
vk_frame.width = g_frame.current.framebuffer.width;
|
||||
@ -252,9 +253,6 @@ static void enqueueRendering( VkCommandBuffer cmdbuf ) {
|
||||
|
||||
ASSERT(g_frame.current.phase == Phase_FrameBegan);
|
||||
|
||||
//R_VkStagingFlushSync();
|
||||
|
||||
R_VkStagingCommit(cmdbuf); // FIXME where and when
|
||||
VK_Render_FIXME_Barrier(cmdbuf);
|
||||
|
||||
if (g_frame.rtx_enabled)
|
||||
@ -301,6 +299,11 @@ static void submit( VkCommandBuffer cmdbuf, qboolean wait ) {
|
||||
|
||||
XVK_CHECK(vkEndCommandBuffer(cmdbuf));
|
||||
|
||||
const VkCommandBuffer cmdbufs[] = {
|
||||
R_VkStagingFrameEnd(),
|
||||
cmdbuf,
|
||||
};
|
||||
|
||||
{
|
||||
const VkPipelineStageFlags stageflags[] = {
|
||||
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
|
||||
@ -317,8 +320,8 @@ static void submit( VkCommandBuffer cmdbuf, qboolean wait ) {
|
||||
const VkSubmitInfo subinfo = {
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.pNext = NULL,
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = &cmdbuf,
|
||||
.commandBufferCount = cmdbufs[0] ? 2 : 1,
|
||||
.pCommandBuffers = cmdbufs[0] ? cmdbufs : cmdbufs + 1,
|
||||
.waitSemaphoreCount = COUNTOF(waitophores),
|
||||
.pWaitSemaphores = waitophores,
|
||||
.pWaitDstStageMask = stageflags,
|
||||
@ -357,7 +360,8 @@ void R_EndFrame( void )
|
||||
if (g_frame.current.phase == Phase_FrameBegan) {
|
||||
const VkCommandBuffer cmdbuf = currentCommandBuffer();
|
||||
enqueueRendering( cmdbuf );
|
||||
submit( cmdbuf, false );
|
||||
//submit( cmdbuf, false );
|
||||
submit( cmdbuf, true );
|
||||
|
||||
vk_frame.cmdbuf = VK_NULL_HANDLE;
|
||||
}
|
||||
|
@ -1209,9 +1209,6 @@ vk_lights_bindings_t VK_LightsUpload( VkCommandBuffer cmdbuf ) {
|
||||
|
||||
R_VkStagingUnlock( locked.handle );
|
||||
|
||||
// TODO probably should do this somewhere else
|
||||
R_VkStagingCommit( cmdbuf );
|
||||
|
||||
return (vk_lights_bindings_t){
|
||||
.buffer = g_lights_.buffer.buffer,
|
||||
.metadata = {
|
||||
|
@ -25,6 +25,38 @@ void VK_PipelineShutdown( void )
|
||||
vkDestroyPipelineCache(vk_core.device, g_pipeline_cache, NULL);
|
||||
}
|
||||
|
||||
// TODO load from embedded static structs
|
||||
static VkShaderModule loadShader(const char *filename) {
|
||||
fs_offset_t size = 0;
|
||||
VkShaderModuleCreateInfo smci = {
|
||||
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
|
||||
};
|
||||
VkShaderModule shader;
|
||||
byte* buf = gEngine.fsapi->LoadFile( filename, &size, false);
|
||||
uint32_t *pcode;
|
||||
|
||||
if (!buf)
|
||||
{
|
||||
gEngine.Host_Error( S_ERROR "Cannot open shader file \"%s\"\n", filename);
|
||||
}
|
||||
|
||||
if ((size % 4 != 0) || (((uintptr_t)buf & 3) != 0)) {
|
||||
gEngine.Host_Error( S_ERROR "size %zu or buf %p is not aligned to 4 bytes as required by SPIR-V/Vulkan spec", size, buf);
|
||||
}
|
||||
|
||||
smci.codeSize = size;
|
||||
//smci.pCode = (const uint32_t*)buf;
|
||||
//memcpy(&smci.pCode, &buf, sizeof(void*));
|
||||
memcpy(&pcode, &buf, sizeof(pcode));
|
||||
smci.pCode = pcode;
|
||||
|
||||
XVK_CHECK(vkCreateShaderModule(vk_core.device, &smci, NULL, &shader));
|
||||
SET_DEBUG_NAME(shader, VK_OBJECT_TYPE_SHADER_MODULE, filename);
|
||||
|
||||
Mem_Free(buf);
|
||||
return shader;
|
||||
}
|
||||
|
||||
VkPipeline VK_PipelineGraphicsCreate(const vk_pipeline_graphics_create_info_t *ci)
|
||||
{
|
||||
VkPipeline pipeline;
|
||||
|
@ -208,7 +208,7 @@ static void applyMaterialToKusok(vk_kusok_data_t* kusok, const vk_render_geometr
|
||||
}
|
||||
}
|
||||
|
||||
vk_ray_model_t* VK_RayModelCreate( VkCommandBuffer cmdbuf, vk_ray_model_init_t args ) {
|
||||
vk_ray_model_t* VK_RayModelCreate( vk_ray_model_init_t args ) {
|
||||
VkAccelerationStructureGeometryKHR *geoms;
|
||||
uint32_t *geom_max_prim_counts;
|
||||
VkAccelerationStructureBuildRangeInfoKHR *geom_build_ranges;
|
||||
@ -247,6 +247,7 @@ vk_ray_model_t* VK_RayModelCreate( VkCommandBuffer cmdbuf, vk_ray_model_init_t a
|
||||
geom_max_prim_counts = Mem_Malloc(vk_core.pool, args.model->num_geometries * sizeof(*geom_max_prim_counts));
|
||||
geom_build_ranges = Mem_Calloc(vk_core.pool, args.model->num_geometries * sizeof(*geom_build_ranges));
|
||||
|
||||
/* gEngine.Con_Reportf("Loading model %s, geoms: %d\n", args.model->debug_name, args.model->num_geometries); */
|
||||
|
||||
for (int i = 0; i < args.model->num_geometries; ++i) {
|
||||
vk_render_geometry_t *mg = args.model->geometries + i;
|
||||
@ -283,6 +284,15 @@ vk_ray_model_t* VK_RayModelCreate( VkCommandBuffer cmdbuf, vk_ray_model_init_t a
|
||||
.firstVertex = mg->vertex_offset,
|
||||
};
|
||||
|
||||
/* { */
|
||||
/* const uint32_t index_offset = mg->index_offset * sizeof(uint16_t); */
|
||||
/* gEngine.Con_Reportf(" g%d: vertices:[%08x, %08x) indices:[%08x, %08x)\n", */
|
||||
/* i, */
|
||||
/* mg->vertex_offset * sizeof(vk_vertex_t), (mg->vertex_offset + mg->max_vertex) * sizeof(vk_vertex_t), */
|
||||
/* index_offset, index_offset + mg->element_count * sizeof(uint16_t) */
|
||||
/* ); */
|
||||
/* } */
|
||||
|
||||
if (mg->material == kXVkMaterialSky) {
|
||||
kusochki[i].tex_base_color |= KUSOK_MATERIAL_FLAG_SKYBOX;
|
||||
} else {
|
||||
@ -296,7 +306,7 @@ vk_ray_model_t* VK_RayModelCreate( VkCommandBuffer cmdbuf, vk_ray_model_init_t a
|
||||
R_VkStagingUnlock(kusok_staging.handle);
|
||||
|
||||
// FIXME this is definitely not the right place. We should upload everything in bulk, and only then build blases in bulk too
|
||||
R_VkStagingCommit(cmdbuf);
|
||||
const VkCommandBuffer cmdbuf = R_VkStagingCommit();
|
||||
{
|
||||
const VkBufferMemoryBarrier bmb[] = { {
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
|
||||
|
@ -652,7 +652,7 @@ void VK_RenderEndRTX( VkCommandBuffer cmdbuf, VkImageView img_dst_view, VkImage
|
||||
}
|
||||
}
|
||||
|
||||
qboolean VK_RenderModelInit( VkCommandBuffer cmdbuf, vk_render_model_t *model ) {
|
||||
qboolean VK_RenderModelInit( vk_render_model_t *model ) {
|
||||
if (vk_core.rtx && (g_render_state.current_frame_is_ray_traced || !model->dynamic)) {
|
||||
const VkBuffer geom_buffer = R_GeometryBuffer_Get();
|
||||
// TODO runtime rtx switch: ???
|
||||
@ -660,7 +660,7 @@ qboolean VK_RenderModelInit( VkCommandBuffer cmdbuf, vk_render_model_t *model )
|
||||
.buffer = geom_buffer,
|
||||
.model = model,
|
||||
};
|
||||
model->ray_model = VK_RayModelCreate(cmdbuf, args);
|
||||
model->ray_model = VK_RayModelCreate(args);
|
||||
return !!model->ray_model;
|
||||
}
|
||||
|
||||
@ -772,7 +772,7 @@ void VK_RenderModelDynamicCommit( void ) {
|
||||
|
||||
if (g_dynamic_model.model.num_geometries > 0) {
|
||||
g_dynamic_model.model.dynamic = true;
|
||||
VK_RenderModelInit( vk_frame.cmdbuf, &g_dynamic_model.model );
|
||||
VK_RenderModelInit( &g_dynamic_model.model );
|
||||
VK_RenderModelDraw( NULL, &g_dynamic_model.model );
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@ typedef struct vk_render_model_s {
|
||||
int polylights_count;
|
||||
} vk_render_model_t;
|
||||
|
||||
qboolean VK_RenderModelInit( VkCommandBuffer cmdbuf, vk_render_model_t* model );
|
||||
qboolean VK_RenderModelInit( vk_render_model_t* model );
|
||||
void VK_RenderModelDestroy( vk_render_model_t* model );
|
||||
void VK_RenderModelDraw( const cl_entity_t *ent, vk_render_model_t* model );
|
||||
|
||||
|
@ -207,7 +207,6 @@ static void performTracing(VkCommandBuffer cmdbuf, const perform_tracing_args_t*
|
||||
|
||||
// Upload kusochki updates
|
||||
{
|
||||
R_VkStagingCommit(cmdbuf);
|
||||
const VkBufferMemoryBarrier bmb[] = { {
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
|
||||
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
|
@ -11,7 +11,7 @@ typedef struct {
|
||||
VkBuffer buffer; // TODO must be uniform for all models. Shall we read it directly from vk_render?
|
||||
} vk_ray_model_init_t;
|
||||
|
||||
struct vk_ray_model_s *VK_RayModelCreate( VkCommandBuffer cmdbuf, vk_ray_model_init_t model_init );
|
||||
struct vk_ray_model_s *VK_RayModelCreate( vk_ray_model_init_t model_init );
|
||||
void VK_RayModelDestroy( struct vk_ray_model_s *model );
|
||||
|
||||
void VK_RayFrameBegin( void );
|
||||
|
@ -181,18 +181,6 @@ void R_NewMap( void ) {
|
||||
// Need parsed map entities, and also should happen before brush model loading
|
||||
RT_LightsNewMapBegin(map);
|
||||
|
||||
// RTX map loading requires command buffer for building blases
|
||||
if (vk_core.rtx)
|
||||
{
|
||||
//ASSERT(!"Not implemented");
|
||||
const VkCommandBufferBeginInfo beginfo = {
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
||||
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
|
||||
};
|
||||
|
||||
XVK_CHECK(vkBeginCommandBuffer(vk_core.upload_pool.buffers[0], &beginfo));
|
||||
}
|
||||
|
||||
// Load all models at once
|
||||
gEngine.Con_Reportf( "Num models: %d:\n", num_models );
|
||||
for( int i = 0; i < num_models; i++ )
|
||||
@ -206,7 +194,7 @@ void R_NewMap( void ) {
|
||||
if( m->type != mod_brush )
|
||||
continue;
|
||||
|
||||
if (!VK_BrushModelLoad( vk_core.upload_pool.buffers[0], m, i == 0 ))
|
||||
if (!VK_BrushModelLoad(m, i == 0))
|
||||
{
|
||||
gEngine.Con_Printf( S_ERROR "Couldn't load model %s\n", m->name );
|
||||
}
|
||||
@ -216,28 +204,6 @@ void R_NewMap( void ) {
|
||||
// Reads surfaces from loaded brush models (must happen after all brushes are loaded)
|
||||
RT_LightsNewMapEnd(map);
|
||||
|
||||
if (!vk_core.rtx) {
|
||||
// FIXME this is a workaround for uploading staging for non-rtx mode. In rtx mode things get naturally uploaded deep in VK_BrushModelLoad.
|
||||
// FIXME there shouldn't be this difference. Ideally, rtx would only continue with also building BLASes, but uploading part should be the same.
|
||||
R_VkStagingFlushSync();
|
||||
} else {
|
||||
R_VKStagingMarkEmpty_FIXME();
|
||||
}
|
||||
|
||||
if (vk_core.rtx)
|
||||
{
|
||||
//ASSERT(!"Not implemented");
|
||||
const VkSubmitInfo subinfo = {
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = &vk_core.upload_pool.buffers[0],
|
||||
};
|
||||
|
||||
XVK_CHECK(vkEndCommandBuffer(vk_core.upload_pool.buffers[0]));
|
||||
XVK_CHECK(vkQueueSubmit(vk_core.queue, 1, &subinfo, VK_NULL_HANDLE));
|
||||
XVK_CHECK(vkQueueWaitIdle(vk_core.queue));
|
||||
}
|
||||
|
||||
// TODO should we do something like VK_BrushEndLoad?
|
||||
VK_UploadLightmap();
|
||||
}
|
||||
|
@ -1,11 +1,13 @@
|
||||
#include "vk_staging.h"
|
||||
#include "vk_buffer.h"
|
||||
#include "alolcator.h"
|
||||
#include "vk_commandpool.h"
|
||||
|
||||
#include <memory.h>
|
||||
|
||||
#define DEFAULT_STAGING_SIZE (64*1024*1024)
|
||||
#define MAX_STAGING_ALLOCS (2048)
|
||||
#define MAX_CONCURRENT_FRAMES 2
|
||||
|
||||
typedef struct {
|
||||
VkImage image;
|
||||
@ -33,13 +35,18 @@ static struct {
|
||||
|
||||
struct {
|
||||
uint32_t offset;
|
||||
} frames[2];
|
||||
} frames[MAX_CONCURRENT_FRAMES];
|
||||
|
||||
vk_command_pool_t upload_pool;
|
||||
VkCommandBuffer cmdbuf;
|
||||
} g_staging = {0};
|
||||
|
||||
qboolean R_VkStagingInit(void) {
|
||||
if (!VK_BufferCreate("staging", &g_staging.buffer, DEFAULT_STAGING_SIZE, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
|
||||
return false;
|
||||
|
||||
g_staging.upload_pool = R_VkCommandPoolCreate( MAX_CONCURRENT_FRAMES );
|
||||
|
||||
aloRingInit(&g_staging.ring, DEFAULT_STAGING_SIZE);
|
||||
|
||||
return true;
|
||||
@ -47,20 +54,58 @@ qboolean R_VkStagingInit(void) {
|
||||
|
||||
void R_VkStagingShutdown(void) {
|
||||
VK_BufferDestroy(&g_staging.buffer);
|
||||
R_VkCommandPoolDestroy( &g_staging.upload_pool );
|
||||
}
|
||||
|
||||
static void flushStagingBufferSync(void) {
|
||||
const VkCommandBuffer cmdbuf = R_VkStagingCommit();
|
||||
if (!cmdbuf)
|
||||
return;
|
||||
|
||||
XVK_CHECK(vkEndCommandBuffer(cmdbuf));
|
||||
g_staging.cmdbuf = VK_NULL_HANDLE;
|
||||
|
||||
gEngine.Con_Reportf(S_WARN "flushing staging buffer img committed=%d count=%d\n", g_staging.images.committed, g_staging.images.count);
|
||||
|
||||
const VkSubmitInfo subinfo = {
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = &cmdbuf,
|
||||
};
|
||||
|
||||
XVK_CHECK(vkQueueSubmit(vk_core.queue, 1, &subinfo, VK_NULL_HANDLE));
|
||||
XVK_CHECK(vkQueueWaitIdle(vk_core.queue));
|
||||
|
||||
g_staging.buffers.committed = g_staging.buffers.count = 0;
|
||||
g_staging.images.committed = g_staging.images.count = 0;
|
||||
g_staging.frames[0].offset = g_staging.frames[1].offset = ALO_ALLOC_FAILED;
|
||||
aloRingInit(&g_staging.ring, DEFAULT_STAGING_SIZE);
|
||||
};
|
||||
|
||||
static uint32_t allocateInRing(uint32_t size, uint32_t alignment) {
|
||||
alignment = alignment < 1 ? 1 : alignment;
|
||||
|
||||
const uint32_t offset = aloRingAlloc(&g_staging.ring, size, alignment );
|
||||
if (offset != ALO_ALLOC_FAILED)
|
||||
return offset;
|
||||
|
||||
flushStagingBufferSync();
|
||||
|
||||
return aloRingAlloc(&g_staging.ring, size, alignment );
|
||||
}
|
||||
|
||||
vk_staging_region_t R_VkStagingLockForBuffer(vk_staging_buffer_args_t args) {
|
||||
if ( g_staging.buffers.count >= MAX_STAGING_ALLOCS )
|
||||
return (vk_staging_region_t){0};
|
||||
flushStagingBufferSync();
|
||||
|
||||
const int index = g_staging.buffers.count;
|
||||
|
||||
const uint32_t offset = aloRingAlloc(&g_staging.ring, args.size, args.alignment < 1 ? 1 : args.alignment );
|
||||
const uint32_t offset = allocateInRing(args.size, args.alignment);
|
||||
if (offset == ALO_ALLOC_FAILED)
|
||||
return (vk_staging_region_t){0};
|
||||
if (g_staging.frames[1].offset == ALO_ALLOC_FAILED)
|
||||
g_staging.frames[1].offset = offset;
|
||||
|
||||
const int index = g_staging.buffers.count;
|
||||
|
||||
g_staging.buffers.dest[index] = args.buffer;
|
||||
g_staging.buffers.copy[index] = (VkBufferCopy){
|
||||
.srcOffset = offset,
|
||||
@ -78,17 +123,17 @@ vk_staging_region_t R_VkStagingLockForBuffer(vk_staging_buffer_args_t args) {
|
||||
|
||||
vk_staging_region_t R_VkStagingLockForImage(vk_staging_image_args_t args) {
|
||||
if ( g_staging.images.count >= MAX_STAGING_ALLOCS )
|
||||
return (vk_staging_region_t){0};
|
||||
flushStagingBufferSync();
|
||||
|
||||
const int index = g_staging.images.count;
|
||||
staging_image_t *const dest = g_staging.images.dest + index;
|
||||
|
||||
const uint32_t offset = aloRingAlloc(&g_staging.ring, args.size, args.alignment);
|
||||
const uint32_t offset = allocateInRing(args.size, args.alignment);
|
||||
if (offset == ALO_ALLOC_FAILED)
|
||||
return (vk_staging_region_t){0};
|
||||
if (g_staging.frames[1].offset == ALO_ALLOC_FAILED)
|
||||
g_staging.frames[1].offset = offset;
|
||||
|
||||
const int index = g_staging.images.count;
|
||||
staging_image_t *const dest = g_staging.images.dest + index;
|
||||
|
||||
dest->image = args.image;
|
||||
dest->layout = args.layout;
|
||||
g_staging.images.copy[index] = args.region;
|
||||
@ -117,6 +162,11 @@ static void commitBuffers(VkCommandBuffer cmdbuf) {
|
||||
VkBuffer prev_buffer = VK_NULL_HANDLE;
|
||||
int first_copy = 0;
|
||||
for (int i = g_staging.buffers.committed; i < g_staging.buffers.count; i++) {
|
||||
/* { */
|
||||
/* const VkBufferCopy *const copy = g_staging.buffers.copy + i; */
|
||||
/* gEngine.Con_Reportf(" %d: [%08llx, %08llx) => [%08llx, %08llx)\n", i, copy->srcOffset, copy->srcOffset + copy->size, copy->dstOffset, copy->dstOffset + copy->size); */
|
||||
/* } */
|
||||
|
||||
if (prev_buffer == g_staging.buffers.dest[i])
|
||||
continue;
|
||||
|
||||
@ -132,6 +182,7 @@ static void commitBuffers(VkCommandBuffer cmdbuf) {
|
||||
}
|
||||
|
||||
if (prev_buffer != VK_NULL_HANDLE) {
|
||||
DEBUG_NV_CHECKPOINTF(cmdbuf, "staging dst_buffer=%p count=%d", prev_buffer, g_staging.buffers.count-first_copy);
|
||||
vkCmdCopyBuffer(cmdbuf, g_staging.buffer.buffer,
|
||||
prev_buffer,
|
||||
g_staging.buffers.count - first_copy, g_staging.buffers.copy + first_copy);
|
||||
@ -142,6 +193,11 @@ static void commitBuffers(VkCommandBuffer cmdbuf) {
|
||||
|
||||
static void commitImages(VkCommandBuffer cmdbuf) {
|
||||
for (int i = g_staging.images.committed; i < g_staging.images.count; i++) {
|
||||
/* { */
|
||||
/* const VkBufferImageCopy *const copy = g_staging.images.copy + i; */
|
||||
/* gEngine.Con_Reportf(" i%d: [%08llx, ?) => %p\n", i, copy->bufferOffset, g_staging.images.dest[i].image); */
|
||||
/* } */
|
||||
|
||||
vkCmdCopyBufferToImage(cmdbuf, g_staging.buffer.buffer,
|
||||
g_staging.images.dest[i].image,
|
||||
g_staging.images.dest[i].layout,
|
||||
@ -151,13 +207,32 @@ static void commitImages(VkCommandBuffer cmdbuf) {
|
||||
g_staging.images.committed = g_staging.images.count;
|
||||
}
|
||||
|
||||
VkCommandBuffer R_VkStagingGetCommandBuffer(void) {
|
||||
if (g_staging.cmdbuf)
|
||||
return g_staging.cmdbuf;
|
||||
|
||||
void R_VkStagingCommit(VkCommandBuffer cmdbuf) {
|
||||
commitBuffers(cmdbuf);
|
||||
commitImages(cmdbuf);
|
||||
g_staging.cmdbuf = g_staging.upload_pool.buffers[0];
|
||||
|
||||
const VkCommandBufferBeginInfo beginfo = {
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
||||
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
|
||||
};
|
||||
XVK_CHECK(vkBeginCommandBuffer(g_staging.cmdbuf, &beginfo));
|
||||
|
||||
return g_staging.cmdbuf;
|
||||
}
|
||||
|
||||
void R_VkStagingFrameFlip(void) {
|
||||
VkCommandBuffer R_VkStagingCommit(void) {
|
||||
if (!g_staging.images.count && !g_staging.buffers.count && !g_staging.cmdbuf)
|
||||
return VK_NULL_HANDLE;
|
||||
|
||||
const VkCommandBuffer cmdbuf = R_VkStagingGetCommandBuffer();
|
||||
commitBuffers(cmdbuf);
|
||||
commitImages(cmdbuf);
|
||||
return cmdbuf;
|
||||
}
|
||||
|
||||
void R_VkStagingFrameBegin(void) {
|
||||
if (g_staging.frames[0].offset != ALO_ALLOC_FAILED)
|
||||
aloRingFree(&g_staging.ring, g_staging.frames[0].offset);
|
||||
|
||||
@ -168,39 +243,16 @@ void R_VkStagingFrameFlip(void) {
|
||||
g_staging.images.committed = g_staging.images.count = 0;
|
||||
}
|
||||
|
||||
void R_VKStagingMarkEmpty_FIXME(void) {
|
||||
g_staging.buffers.committed = g_staging.buffers.count = 0;
|
||||
g_staging.images.committed = g_staging.images.count = 0;
|
||||
g_staging.frames[0].offset = g_staging.frames[1].offset = ALO_ALLOC_FAILED;
|
||||
aloRingInit(&g_staging.ring, DEFAULT_STAGING_SIZE);
|
||||
}
|
||||
|
||||
void R_VkStagingFlushSync(void) {
|
||||
if ( g_staging.buffers.count == g_staging.buffers.committed
|
||||
&& g_staging.images.count == g_staging.images.committed)
|
||||
return;
|
||||
|
||||
{
|
||||
// FIXME get the right one
|
||||
const VkCommandBuffer cmdbuf = vk_core.upload_pool.buffers[0];
|
||||
|
||||
const VkCommandBufferBeginInfo beginfo = {
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
||||
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
|
||||
};
|
||||
|
||||
const VkSubmitInfo subinfo = {
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = &cmdbuf,
|
||||
};
|
||||
|
||||
XVK_CHECK(vkBeginCommandBuffer(cmdbuf, &beginfo));
|
||||
R_VkStagingCommit(cmdbuf);
|
||||
VkCommandBuffer R_VkStagingFrameEnd(void) {
|
||||
const VkCommandBuffer cmdbuf = R_VkStagingCommit();
|
||||
if (cmdbuf)
|
||||
XVK_CHECK(vkEndCommandBuffer(cmdbuf));
|
||||
XVK_CHECK(vkQueueSubmit(vk_core.queue, 1, &subinfo, VK_NULL_HANDLE));
|
||||
XVK_CHECK(vkQueueWaitIdle(vk_core.queue));
|
||||
|
||||
R_VKStagingMarkEmpty_FIXME();
|
||||
}
|
||||
g_staging.cmdbuf = VK_NULL_HANDLE;
|
||||
|
||||
const VkCommandBuffer tmp = g_staging.upload_pool.buffers[0];
|
||||
g_staging.upload_pool.buffers[0] = g_staging.upload_pool.buffers[1];
|
||||
g_staging.upload_pool.buffers[1] = tmp;
|
||||
|
||||
return cmdbuf;
|
||||
}
|
||||
|
@ -34,13 +34,16 @@ vk_staging_region_t R_VkStagingLockForImage(vk_staging_image_args_t args);
|
||||
// Mark allocated region as ready for upload
|
||||
void R_VkStagingUnlock(staging_handle_t handle);
|
||||
|
||||
// Append copy commands to command buffer and mark staging as empty
|
||||
// FIXME: it's not empty yet, as it depends on cmdbuf being actually submitted and completed
|
||||
void R_VkStagingCommit(VkCommandBuffer cmdbuf);
|
||||
void R_VkStagingFrameFlip(void);
|
||||
// Append copy commands to command buffer.
|
||||
VkCommandBuffer R_VkStagingCommit(void);
|
||||
|
||||
// FIXME Remove this with proper staging
|
||||
void R_VKStagingMarkEmpty_FIXME(void);
|
||||
// Mark previous frame data as uploaded and safe to use.
|
||||
void R_VkStagingFrameBegin(void);
|
||||
|
||||
// Force commit synchronously
|
||||
void R_VkStagingFlushSync(void);
|
||||
// Uploads staging contents and returns the command buffer ready to be submitted.
|
||||
// Can return NULL if there's nothing to upload.
|
||||
VkCommandBuffer R_VkStagingFrameEnd(void);
|
||||
|
||||
// Gets the current command buffer.
|
||||
// WARNING: Can be invalidated by any of the Lock calls
|
||||
VkCommandBuffer R_VkStagingGetCommandBuffer(void);
|
||||
|
@ -482,7 +482,6 @@ static void BuildMipMap( byte *in, int srcWidth, int srcHeight, int srcDepth, in
|
||||
static qboolean uploadTexture(vk_texture_t *tex, rgbdata_t *const *const layers, int num_layers, qboolean cubemap) {
|
||||
const VkFormat format = VK_GetFormat(layers[0]->type);
|
||||
int mipCount = 0;
|
||||
const VkCommandBuffer cmdbuf = vk_core.upload_pool.buffers[0];
|
||||
|
||||
// TODO non-rbga textures
|
||||
|
||||
@ -546,12 +545,6 @@ static qboolean uploadTexture(vk_texture_t *tex, rgbdata_t *const *const layers,
|
||||
}
|
||||
|
||||
{
|
||||
// 5. Create/get cmdbuf for transitions
|
||||
VkCommandBufferBeginInfo beginfo = {
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
||||
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
|
||||
};
|
||||
|
||||
// 5.1 upload buf -> image:layout:DST
|
||||
// 5.1.1 transitionToLayout(UNDEFINED -> DST)
|
||||
VkImageMemoryBarrier image_barrier = {
|
||||
@ -569,11 +562,14 @@ static qboolean uploadTexture(vk_texture_t *tex, rgbdata_t *const *const layers,
|
||||
.layerCount = num_layers,
|
||||
}};
|
||||
|
||||
XVK_CHECK(vkBeginCommandBuffer(cmdbuf, &beginfo));
|
||||
vkCmdPipelineBarrier(cmdbuf,
|
||||
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
0, 0, NULL, 0, NULL, 1, &image_barrier);
|
||||
{
|
||||
// cmdbuf may become invalidated in locks in the loops below
|
||||
const VkCommandBuffer cmdbuf = R_VkStagingGetCommandBuffer();
|
||||
vkCmdPipelineBarrier(cmdbuf,
|
||||
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
0, 0, NULL, 0, NULL, 1, &image_barrier);
|
||||
}
|
||||
|
||||
// 5.1.2 copyBufferToImage for all mip levels
|
||||
for (int layer = 0; layer < num_layers; ++layer) {
|
||||
@ -621,7 +617,7 @@ static qboolean uploadTexture(vk_texture_t *tex, rgbdata_t *const *const layers,
|
||||
}
|
||||
}
|
||||
|
||||
R_VkStagingCommit(cmdbuf);
|
||||
const VkCommandBuffer cmdbuf = R_VkStagingCommit();
|
||||
|
||||
// 5.2 image:layout:DST -> image:layout:SAMPLED
|
||||
// 5.2.1 transitionToLayout(DST -> SHADER_READ_ONLY)
|
||||
@ -641,19 +637,8 @@ static qboolean uploadTexture(vk_texture_t *tex, rgbdata_t *const *const layers,
|
||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
|
||||
0, 0, NULL, 0, NULL, 1, &image_barrier);
|
||||
|
||||
XVK_CHECK(vkEndCommandBuffer(cmdbuf));
|
||||
}
|
||||
|
||||
{
|
||||
VkSubmitInfo subinfo = {.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO};
|
||||
subinfo.commandBufferCount = 1;
|
||||
subinfo.pCommandBuffers = &cmdbuf;
|
||||
XVK_CHECK(vkQueueSubmit(vk_core.queue, 1, &subinfo, VK_NULL_HANDLE));
|
||||
XVK_CHECK(vkQueueWaitIdle(vk_core.queue));
|
||||
}
|
||||
|
||||
R_VKStagingMarkEmpty_FIXME();
|
||||
|
||||
// TODO how should we approach this:
|
||||
// - per-texture desc sets can be inconvenient if texture is used in different incompatible contexts
|
||||
// - update descriptor sets in batch?
|
||||
|
Loading…
Reference in New Issue
Block a user