do not overwrite staging data until we know it's been uploaded

This commit is contained in:
Ivan 'provod' Avdeev 2022-06-25 11:12:48 -07:00 committed by Ivan Avdeev
parent be20ae2fb0
commit f8ea93656f
7 changed files with 60 additions and 23 deletions

View File

@ -217,6 +217,7 @@ void R_BeginFrame( qboolean clearScene ) {
ASSERT(!g_frame.current.framebuffer.framebuffer); ASSERT(!g_frame.current.framebuffer.framebuffer);
waitForFrameFence(); waitForFrameFence();
R_VkStagingFrameFlip();
g_frame.current.framebuffer = R_VkSwapchainAcquire( g_frame.sem_framebuffer_ready[g_frame.current.index] ); g_frame.current.framebuffer = R_VkSwapchainAcquire( g_frame.sem_framebuffer_ready[g_frame.current.index] );
vk_frame.width = g_frame.current.framebuffer.width; vk_frame.width = g_frame.current.framebuffer.width;
@ -251,8 +252,10 @@ static void enqueueRendering( VkCommandBuffer cmdbuf ) {
ASSERT(g_frame.current.phase == Phase_FrameBegan); ASSERT(g_frame.current.phase == Phase_FrameBegan);
//R_VkStagingFlushSync();
R_VkStagingCommit(cmdbuf); // FIXME where and when R_VkStagingCommit(cmdbuf); // FIXME where and when
R_VKStagingMarkEmpty_FIXME(); VK_Render_FIXME_Barrier(cmdbuf);
if (g_frame.rtx_enabled) if (g_frame.rtx_enabled)
VK_RenderEndRTX( cmdbuf, g_frame.current.framebuffer.view, g_frame.current.framebuffer.image, g_frame.current.framebuffer.width, g_frame.current.framebuffer.height ); VK_RenderEndRTX( cmdbuf, g_frame.current.framebuffer.view, g_frame.current.framebuffer.image, g_frame.current.framebuffer.width, g_frame.current.framebuffer.height );

View File

@ -622,6 +622,26 @@ static uint32_t writeDlightsToUBO( void )
return ubo_lights_offset; return ubo_lights_offset;
} }
void VK_Render_FIXME_Barrier( VkCommandBuffer cmdbuf )
// FIXME
{
const VkBufferMemoryBarrier bmb[] = { {
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
//.dstAccessMask = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR, // FIXME
.dstAccessMask = VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT , // FIXME
.buffer = g_geom.buffer.buffer,
.offset = 0, // FIXME
.size = VK_WHOLE_SIZE, // FIXME
} };
vkCmdPipelineBarrier(cmdbuf,
VK_PIPELINE_STAGE_TRANSFER_BIT,
//VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,
//VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
0, 0, NULL, ARRAYSIZE(bmb), bmb, 0, NULL);
}
void VK_RenderEnd( VkCommandBuffer cmdbuf ) void VK_RenderEnd( VkCommandBuffer cmdbuf )
{ {
// TODO we can sort collected draw commands for more efficient and correct rendering // TODO we can sort collected draw commands for more efficient and correct rendering

View File

@ -152,3 +152,5 @@ void VK_RenderDebugLabelEnd( void );
void VK_RenderBegin( qboolean ray_tracing ); void VK_RenderBegin( qboolean ray_tracing );
void VK_RenderEnd( VkCommandBuffer cmdbuf ); void VK_RenderEnd( VkCommandBuffer cmdbuf );
void VK_RenderEndRTX( VkCommandBuffer cmdbuf, VkImageView img_dst_view, VkImage img_dst, uint32_t w, uint32_t h ); void VK_RenderEndRTX( VkCommandBuffer cmdbuf, VkImageView img_dst_view, VkImage img_dst, uint32_t w, uint32_t h );
void VK_Render_FIXME_Barrier( VkCommandBuffer cmdbuf );

View File

@ -167,7 +167,6 @@ void R_NewMap( void ) {
if (vk_core.rtx) if (vk_core.rtx)
VK_RayNewMap(); VK_RayNewMap();
// Load light entities and patch data prior to loading map brush model // Load light entities and patch data prior to loading map brush model
XVK_ParseMapEntities(); XVK_ParseMapEntities();

View File

@ -1,12 +1,11 @@
#include "vk_staging.h" #include "vk_staging.h"
#include "vk_buffer.h" #include "vk_buffer.h"
#include "alolcator.h"
#include <memory.h> #include <memory.h>
#define DEFAULT_STAGING_SIZE (16*1024*1024) #define DEFAULT_STAGING_SIZE (64*1024*1024)
#define MAX_STAGING_ALLOCS (1024) #define MAX_STAGING_ALLOCS (2048)
#define ALLOC_FAILED 0xffffffffu
typedef struct { typedef struct {
VkImage image; VkImage image;
@ -15,7 +14,7 @@ typedef struct {
static struct { static struct {
vk_buffer_t buffer; vk_buffer_t buffer;
uint32_t offset; alo_ring_t ring;
struct { struct {
VkBuffer dest[MAX_STAGING_ALLOCS]; VkBuffer dest[MAX_STAGING_ALLOCS];
@ -31,12 +30,18 @@ static struct {
int count; int count;
int committed; int committed;
} images; } images;
struct {
uint32_t offset;
} frames[2];
} g_staging = {0}; } g_staging = {0};
qboolean R_VkStagingInit(void) { qboolean R_VkStagingInit(void) {
if (!VK_BufferCreate("staging", &g_staging.buffer, DEFAULT_STAGING_SIZE, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) if (!VK_BufferCreate("staging", &g_staging.buffer, DEFAULT_STAGING_SIZE, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))
return false; return false;
aloRingInit(&g_staging.ring, DEFAULT_STAGING_SIZE);
return true; return true;
} }
@ -44,25 +49,17 @@ void R_VkStagingShutdown(void) {
VK_BufferDestroy(&g_staging.buffer); VK_BufferDestroy(&g_staging.buffer);
} }
static uint32_t stagingAlloc(uint32_t size, uint32_t alignment) {
const uint32_t offset = ALIGN_UP(g_staging.offset, alignment);
if ( offset + size > g_staging.buffer.size )
return ALLOC_FAILED;
g_staging.offset = offset + size;
return offset;
}
vk_staging_region_t R_VkStagingLockForBuffer(vk_staging_buffer_args_t args) { vk_staging_region_t R_VkStagingLockForBuffer(vk_staging_buffer_args_t args) {
if ( g_staging.buffers.count >= MAX_STAGING_ALLOCS ) if ( g_staging.buffers.count >= MAX_STAGING_ALLOCS )
return (vk_staging_region_t){0}; return (vk_staging_region_t){0};
const int index = g_staging.buffers.count; const int index = g_staging.buffers.count;
const uint32_t offset = stagingAlloc(args.size, args.alignment); const uint32_t offset = aloRingAlloc(&g_staging.ring, args.size, args.alignment);
if (offset == ALLOC_FAILED) if (offset == ALO_ALLOC_FAILED)
return (vk_staging_region_t){0}; return (vk_staging_region_t){0};
if (g_staging.frames[1].offset == ALO_ALLOC_FAILED)
g_staging.frames[1].offset = offset;
g_staging.buffers.dest[index] = args.buffer; g_staging.buffers.dest[index] = args.buffer;
g_staging.buffers.copy[index] = (VkBufferCopy){ g_staging.buffers.copy[index] = (VkBufferCopy){
@ -86,9 +83,11 @@ vk_staging_region_t R_VkStagingLockForImage(vk_staging_image_args_t args) {
const int index = g_staging.images.count; const int index = g_staging.images.count;
staging_image_t *const dest = g_staging.images.dest + index; staging_image_t *const dest = g_staging.images.dest + index;
const uint32_t offset = stagingAlloc(args.size, args.alignment); const uint32_t offset = aloRingAlloc(&g_staging.ring, args.size, args.alignment);
if (offset == ALLOC_FAILED) if (offset == ALO_ALLOC_FAILED)
return (vk_staging_region_t){0}; return (vk_staging_region_t){0};
if (g_staging.frames[1].offset == ALO_ALLOC_FAILED)
g_staging.frames[1].offset = offset;
dest->image = args.image; dest->image = args.image;
dest->layout = args.layout; dest->layout = args.layout;
@ -157,10 +156,22 @@ void R_VkStagingCommit(VkCommandBuffer cmdbuf) {
commitImages(cmdbuf); commitImages(cmdbuf);
} }
void R_VkStagingFrameFlip(void) {
if (g_staging.frames[0].offset != ALO_ALLOC_FAILED)
aloRingFree(&g_staging.ring, g_staging.frames[0].offset);
g_staging.frames[0] = g_staging.frames[1];
g_staging.frames[1].offset = ALO_ALLOC_FAILED;
g_staging.buffers.committed = g_staging.buffers.count = 0;
g_staging.images.committed = g_staging.images.count = 0;
}
void R_VKStagingMarkEmpty_FIXME(void) { void R_VKStagingMarkEmpty_FIXME(void) {
g_staging.buffers.committed = g_staging.buffers.count = 0; g_staging.buffers.committed = g_staging.buffers.count = 0;
g_staging.images.committed = g_staging.images.count = 0; g_staging.images.committed = g_staging.images.count = 0;
g_staging.offset = 0; g_staging.frames[0].offset = g_staging.frames[1].offset = ALO_ALLOC_FAILED;
aloRingInit(&g_staging.ring, DEFAULT_STAGING_SIZE);
} }
void R_VkStagingFlushSync(void) { void R_VkStagingFlushSync(void) {

View File

@ -37,6 +37,7 @@ void R_VkStagingUnlock(staging_handle_t handle);
// Append copy commands to command buffer and mark staging as empty // Append copy commands to command buffer and mark staging as empty
// FIXME: it's not empty yet, as it depends on cmdbuf being actually submitted and completed // FIXME: it's not empty yet, as it depends on cmdbuf being actually submitted and completed
void R_VkStagingCommit(VkCommandBuffer cmdbuf); void R_VkStagingCommit(VkCommandBuffer cmdbuf);
void R_VkStagingFrameFlip(void);
// FIXME Remove this with proper staging // FIXME Remove this with proper staging
void R_VKStagingMarkEmpty_FIXME(void); void R_VKStagingMarkEmpty_FIXME(void);

View File

@ -622,7 +622,6 @@ static qboolean uploadTexture(vk_texture_t *tex, rgbdata_t *const *const layers,
} }
R_VkStagingCommit(cmdbuf); R_VkStagingCommit(cmdbuf);
R_VKStagingMarkEmpty_FIXME();
// 5.2 image:layout:DST -> image:layout:SAMPLED // 5.2 image:layout:DST -> image:layout:SAMPLED
// 5.2.1 transitionToLayout(DST -> SHADER_READ_ONLY) // 5.2.1 transitionToLayout(DST -> SHADER_READ_ONLY)
@ -653,6 +652,8 @@ static qboolean uploadTexture(vk_texture_t *tex, rgbdata_t *const *const layers,
XVK_CHECK(vkQueueWaitIdle(vk_core.queue)); XVK_CHECK(vkQueueWaitIdle(vk_core.queue));
} }
R_VKStagingMarkEmpty_FIXME();
// TODO how should we approach this: // TODO how should we approach this:
// - per-texture desc sets can be inconvenient if texture is used in different incompatible contexts // - per-texture desc sets can be inconvenient if texture is used in different incompatible contexts
// - update descriptor sets in batch? // - update descriptor sets in batch?