2022-04-28 09:16:25 +02:00
# include "vk_staging.h"
# include "vk_buffer.h"
2022-06-25 20:12:48 +02:00
# include "alolcator.h"
2022-09-11 20:38:51 +02:00
# include "vk_commandpool.h"
2023-03-18 19:36:05 +01:00
# include "profiler.h"
2023-03-23 18:49:42 +01:00
# include "r_speeds.h"
2023-04-07 20:14:41 +02:00
# include "vk_combuf.h"
2022-04-28 09:16:25 +02:00
# include <memory.h>
2023-06-13 18:39:50 +02:00
# define MODULE_NAME "staging"
2023-03-04 20:31:34 +01:00
# define DEFAULT_STAGING_SIZE (128*1024*1024)
2022-06-25 20:12:48 +02:00
# define MAX_STAGING_ALLOCS (2048)
2022-09-11 20:38:51 +02:00
# define MAX_CONCURRENT_FRAMES 2
2022-09-17 19:54:18 +02:00
# define COMMAND_BUFFER_COUNT (MAX_CONCURRENT_FRAMES + 1) // to accommodate two frames in flight plus something trying to upload data before waiting for the next frame to complete
2022-06-04 23:04:44 +02:00
2022-04-28 09:16:25 +02:00
typedef struct {
2022-06-04 23:04:44 +02:00
VkImage image ;
VkImageLayout layout ;
2023-03-23 18:49:42 +01:00
size_t size ; // for stats only
2022-06-04 23:04:44 +02:00
} staging_image_t ;
2022-04-28 09:16:25 +02:00
static struct {
vk_buffer_t buffer ;
2022-09-11 21:09:47 +02:00
r_flipping_buffer_t buffer_alloc ;
2022-06-04 23:04:44 +02:00
struct {
VkBuffer dest [ MAX_STAGING_ALLOCS ] ;
VkBufferCopy copy [ MAX_STAGING_ALLOCS ] ;
int count ;
} buffers ;
struct {
staging_image_t dest [ MAX_STAGING_ALLOCS ] ;
VkBufferImageCopy copy [ MAX_STAGING_ALLOCS ] ;
int count ;
} images ;
2022-06-25 20:12:48 +02:00
2023-04-07 20:14:41 +02:00
vk_combuf_t * combuf [ 3 ] ;
// Currently opened command buffer, ready to accept new commands
vk_combuf_t * current ;
2023-03-23 18:49:42 +01:00
struct {
2023-03-25 18:20:45 +01:00
int total_size ;
int buffers_size ;
int images_size ;
int buffer_chunks ;
int images ;
2023-03-23 18:49:42 +01:00
} stats ;
2023-04-10 18:26:37 +02:00
int buffer_upload_scope_id ;
int image_upload_scope_id ;
2022-04-28 09:16:25 +02:00
} g_staging = { 0 } ;
qboolean R_VkStagingInit ( void ) {
if ( ! VK_BufferCreate ( " staging " , & g_staging . buffer , DEFAULT_STAGING_SIZE , VK_BUFFER_USAGE_TRANSFER_SRC_BIT , VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ) )
return false ;
2023-04-07 20:14:41 +02:00
g_staging . combuf [ 0 ] = R_VkCombufOpen ( ) ;
g_staging . combuf [ 1 ] = R_VkCombufOpen ( ) ;
g_staging . combuf [ 2 ] = R_VkCombufOpen ( ) ;
2022-09-11 20:38:51 +02:00
2022-09-11 21:09:47 +02:00
R_FlippingBuffer_Init ( & g_staging . buffer_alloc , DEFAULT_STAGING_SIZE ) ;
2022-06-25 20:12:48 +02:00
2023-06-14 20:23:09 +02:00
R_SPEEDS_COUNTER ( g_staging . stats . total_size , " total_size " , kSpeedsMetricBytes ) ;
R_SPEEDS_COUNTER ( g_staging . stats . buffers_size , " buffers_size " , kSpeedsMetricBytes ) ;
R_SPEEDS_COUNTER ( g_staging . stats . images_size , " images_size " , kSpeedsMetricBytes ) ;
2023-03-23 18:49:42 +01:00
2023-06-14 20:23:09 +02:00
R_SPEEDS_COUNTER ( g_staging . stats . buffer_chunks , " buffer_chunks " , kSpeedsMetricCount ) ;
R_SPEEDS_COUNTER ( g_staging . stats . images , " images " , kSpeedsMetricCount ) ;
2023-03-23 18:49:42 +01:00
2023-04-10 18:26:37 +02:00
g_staging . buffer_upload_scope_id = R_VkGpuScope_Register ( " staging_buffers " ) ;
g_staging . image_upload_scope_id = R_VkGpuScope_Register ( " staging_images " ) ;
2022-04-28 09:16:25 +02:00
return true ;
}
void R_VkStagingShutdown ( void ) {
VK_BufferDestroy ( & g_staging . buffer ) ;
2022-09-11 20:38:51 +02:00
}
2023-04-01 06:01:17 +02:00
// FIXME There's a severe race condition here. Submitting things manually and prematurely (before framectl had a chance to synchronize with the previous frame)
// may lead to data races and memory corruption (e.g. writing into memory that's being read in some pipeline stage still going)
2023-03-04 19:36:47 +01:00
void R_VkStagingFlushSync ( void ) {
2023-03-21 19:47:35 +01:00
APROF_SCOPE_DECLARE_BEGIN ( function , __FUNCTION__ ) ;
2023-03-18 19:36:05 +01:00
2023-04-07 20:14:41 +02:00
vk_combuf_t * combuf = R_VkStagingCommit ( ) ;
if ( ! combuf )
2023-03-18 19:36:05 +01:00
goto end ;
2022-09-11 20:38:51 +02:00
2023-04-07 20:14:41 +02:00
R_VkCombufEnd ( combuf ) ;
g_staging . current = NULL ;
2022-09-11 20:38:51 +02:00
2023-03-04 19:36:47 +01:00
//gEngine.Con_Reportf(S_WARN "flushing staging buffer img count=%d\n", g_staging.images.count);
2022-09-11 20:38:51 +02:00
2023-03-18 21:28:14 +01:00
{
const VkSubmitInfo subinfo = {
. sType = VK_STRUCTURE_TYPE_SUBMIT_INFO ,
. commandBufferCount = 1 ,
2023-04-07 20:14:41 +02:00
. pCommandBuffers = & combuf - > cmdbuf ,
2023-03-18 21:28:14 +01:00
} ;
// TODO wait for previous command buffer completion. Why: we might end up writing into the same dst
XVK_CHECK ( vkQueueSubmit ( vk_core . queue , 1 , & subinfo , VK_NULL_HANDLE ) ) ;
2023-04-07 20:14:41 +02:00
// TODO wait for fence, not this
2023-03-18 21:28:14 +01:00
XVK_CHECK ( vkQueueWaitIdle ( vk_core . queue ) ) ;
}
2022-09-11 20:38:51 +02:00
2022-09-17 19:54:18 +02:00
g_staging . buffers . count = 0 ;
g_staging . images . count = 0 ;
2022-09-11 21:09:47 +02:00
R_FlippingBuffer_Clear ( & g_staging . buffer_alloc ) ;
2023-03-18 19:36:05 +01:00
end :
2023-03-21 19:47:35 +01:00
APROF_SCOPE_END ( function ) ;
2022-09-11 20:38:51 +02:00
} ;
static uint32_t allocateInRing ( uint32_t size , uint32_t alignment ) {
alignment = alignment < 1 ? 1 : alignment ;
2022-09-11 21:09:47 +02:00
const uint32_t offset = R_FlippingBuffer_Alloc ( & g_staging . buffer_alloc , size , alignment ) ;
2022-09-11 20:38:51 +02:00
if ( offset ! = ALO_ALLOC_FAILED )
return offset ;
2023-03-04 19:36:47 +01:00
R_VkStagingFlushSync ( ) ;
2022-09-11 20:38:51 +02:00
2022-09-11 21:09:47 +02:00
return R_FlippingBuffer_Alloc ( & g_staging . buffer_alloc , size , alignment ) ;
2022-04-28 09:16:25 +02:00
}
2022-06-04 23:04:44 +02:00
vk_staging_region_t R_VkStagingLockForBuffer ( vk_staging_buffer_args_t args ) {
if ( g_staging . buffers . count > = MAX_STAGING_ALLOCS )
2023-03-04 19:36:47 +01:00
R_VkStagingFlushSync ( ) ;
2022-04-28 09:16:25 +02:00
2022-09-11 20:38:51 +02:00
const uint32_t offset = allocateInRing ( args . size , args . alignment ) ;
2022-06-25 20:12:48 +02:00
if ( offset = = ALO_ALLOC_FAILED )
2022-04-28 09:16:25 +02:00
return ( vk_staging_region_t ) { 0 } ;
2022-09-11 20:38:51 +02:00
const int index = g_staging . buffers . count ;
2022-06-04 23:04:44 +02:00
g_staging . buffers . dest [ index ] = args . buffer ;
g_staging . buffers . copy [ index ] = ( VkBufferCopy ) {
. srcOffset = offset ,
. dstOffset = args . offset ,
. size = args . size ,
} ;
g_staging . buffers . count + + ;
return ( vk_staging_region_t ) {
. ptr = ( char * ) g_staging . buffer . mapped + offset ,
. handle = index ,
} ;
2022-04-28 09:16:25 +02:00
}
2022-06-04 23:04:44 +02:00
vk_staging_region_t R_VkStagingLockForImage ( vk_staging_image_args_t args ) {
if ( g_staging . images . count > = MAX_STAGING_ALLOCS )
2023-03-04 19:36:47 +01:00
R_VkStagingFlushSync ( ) ;
2022-06-04 23:04:44 +02:00
2022-09-11 20:38:51 +02:00
const uint32_t offset = allocateInRing ( args . size , args . alignment ) ;
2022-06-25 20:12:48 +02:00
if ( offset = = ALO_ALLOC_FAILED )
2022-06-04 23:04:44 +02:00
return ( vk_staging_region_t ) { 0 } ;
2022-05-04 18:23:37 +02:00
2022-09-11 20:38:51 +02:00
const int index = g_staging . images . count ;
staging_image_t * const dest = g_staging . images . dest + index ;
2022-06-04 23:04:44 +02:00
dest - > image = args . image ;
dest - > layout = args . layout ;
2023-03-23 18:49:42 +01:00
dest - > size = args . size ;
2022-06-04 23:04:44 +02:00
g_staging . images . copy [ index ] = args . region ;
2022-06-04 23:26:35 +02:00
g_staging . images . copy [ index ] . bufferOffset + = offset ;
2022-04-28 09:16:25 +02:00
2022-06-04 23:04:44 +02:00
g_staging . images . count + + ;
return ( vk_staging_region_t ) {
. ptr = ( char * ) g_staging . buffer . mapped + offset ,
. handle = index + MAX_STAGING_ALLOCS ,
} ;
2022-04-28 09:16:25 +02:00
}
2022-06-04 23:04:44 +02:00
void R_VkStagingUnlock ( staging_handle_t handle ) {
ASSERT ( handle > = 0 ) ;
ASSERT ( handle < MAX_STAGING_ALLOCS * 2 ) ;
// FIXME mark and check ready
2022-04-28 09:16:25 +02:00
}
2023-04-10 18:26:37 +02:00
static void commitBuffers ( vk_combuf_t * combuf ) {
if ( ! g_staging . buffers . count )
return ;
2023-04-07 20:14:41 +02:00
2023-04-10 18:26:37 +02:00
const VkCommandBuffer cmdbuf = g_staging . current - > cmdbuf ;
const int begin_index = R_VkCombufScopeBegin ( combuf , g_staging . buffer_upload_scope_id ) ;
2023-04-07 20:14:41 +02:00
2022-06-04 23:27:12 +02:00
// TODO better coalescing:
// - upload once per buffer
// - join adjacent regions
VkBuffer prev_buffer = VK_NULL_HANDLE ;
int first_copy = 0 ;
2022-09-17 19:54:18 +02:00
for ( int i = 0 ; i < g_staging . buffers . count ; i + + ) {
2022-09-11 20:38:51 +02:00
/* { */
/* const VkBufferCopy *const copy = g_staging.buffers.copy + i; */
/* gEngine.Con_Reportf(" %d: [%08llx, %08llx) => [%08llx, %08llx)\n", i, copy->srcOffset, copy->srcOffset + copy->size, copy->dstOffset, copy->dstOffset + copy->size); */
/* } */
2022-06-04 23:27:12 +02:00
if ( prev_buffer = = g_staging . buffers . dest [ i ] )
continue ;
if ( prev_buffer ! = VK_NULL_HANDLE ) {
2022-09-03 23:04:50 +02:00
DEBUG_NV_CHECKPOINTF ( cmdbuf , " staging dst_buffer=%p count=%d " , prev_buffer , i - first_copy ) ;
2023-03-25 18:20:45 +01:00
g_staging . stats . buffer_chunks + + ;
2022-06-04 23:27:12 +02:00
vkCmdCopyBuffer ( cmdbuf , g_staging . buffer . buffer ,
prev_buffer ,
i - first_copy , g_staging . buffers . copy + first_copy ) ;
}
2023-03-25 18:20:45 +01:00
g_staging . stats . buffers_size + = g_staging . buffers . copy [ i ] . size ;
2023-03-23 18:49:42 +01:00
2022-06-04 23:27:12 +02:00
prev_buffer = g_staging . buffers . dest [ i ] ;
first_copy = i ;
}
if ( prev_buffer ! = VK_NULL_HANDLE ) {
2022-09-11 20:38:51 +02:00
DEBUG_NV_CHECKPOINTF ( cmdbuf , " staging dst_buffer=%p count=%d " , prev_buffer , g_staging . buffers . count - first_copy ) ;
2023-03-25 18:20:45 +01:00
g_staging . stats . buffer_chunks + + ;
2022-06-04 23:04:44 +02:00
vkCmdCopyBuffer ( cmdbuf , g_staging . buffer . buffer ,
2022-06-04 23:27:12 +02:00
prev_buffer ,
g_staging . buffers . count - first_copy , g_staging . buffers . copy + first_copy ) ;
2022-06-04 23:04:44 +02:00
}
2022-09-17 19:54:18 +02:00
g_staging . buffers . count = 0 ;
2023-04-10 18:26:37 +02:00
R_VkCombufScopeEnd ( combuf , begin_index , VK_PIPELINE_STAGE_TRANSFER_BIT ) ;
2022-06-04 23:04:44 +02:00
}
2023-04-10 18:26:37 +02:00
static void commitImages ( vk_combuf_t * combuf ) {
if ( ! g_staging . images . count )
return ;
2023-04-07 20:14:41 +02:00
const VkCommandBuffer cmdbuf = g_staging . current - > cmdbuf ;
2023-04-10 18:26:37 +02:00
const int begin_index = R_VkCombufScopeBegin ( combuf , g_staging . image_upload_scope_id ) ;
2022-09-17 19:54:18 +02:00
for ( int i = 0 ; i < g_staging . images . count ; i + + ) {
2022-09-11 20:38:51 +02:00
/* { */
/* const VkBufferImageCopy *const copy = g_staging.images.copy + i; */
/* gEngine.Con_Reportf(" i%d: [%08llx, ?) => %p\n", i, copy->bufferOffset, g_staging.images.dest[i].image); */
/* } */
2023-03-25 18:20:45 +01:00
g_staging . stats . images + + ;
g_staging . stats . images_size + = g_staging . images . dest [ i ] . size ;
2023-03-23 18:49:42 +01:00
2022-06-04 23:04:44 +02:00
vkCmdCopyBufferToImage ( cmdbuf , g_staging . buffer . buffer ,
g_staging . images . dest [ i ] . image ,
g_staging . images . dest [ i ] . layout ,
1 , g_staging . images . copy + i ) ;
2022-04-28 09:16:25 +02:00
}
2022-09-17 19:54:18 +02:00
g_staging . images . count = 0 ;
2023-04-10 18:26:37 +02:00
R_VkCombufScopeEnd ( combuf , begin_index , VK_PIPELINE_STAGE_TRANSFER_BIT ) ;
2022-06-04 23:04:44 +02:00
}
2023-04-07 20:14:41 +02:00
static vk_combuf_t * getCurrentCombuf ( void ) {
if ( ! g_staging . current ) {
g_staging . current = g_staging . combuf [ 0 ] ;
R_VkCombufBegin ( g_staging . current ) ;
}
2022-09-11 20:38:51 +02:00
2023-04-07 20:14:41 +02:00
return g_staging . current ;
}
2022-09-11 20:38:51 +02:00
2023-04-07 20:14:41 +02:00
VkCommandBuffer R_VkStagingGetCommandBuffer ( void ) {
return getCurrentCombuf ( ) - > cmdbuf ;
2022-09-11 20:38:51 +02:00
}
2023-04-07 20:14:41 +02:00
vk_combuf_t * R_VkStagingCommit ( void ) {
if ( ! g_staging . images . count & & ! g_staging . buffers . count & & ! g_staging . current )
2022-09-11 20:38:51 +02:00
return VK_NULL_HANDLE ;
2022-06-04 23:04:44 +02:00
2023-04-07 20:14:41 +02:00
getCurrentCombuf ( ) ;
2023-04-10 18:26:37 +02:00
commitBuffers ( g_staging . current ) ;
commitImages ( g_staging . current ) ;
2023-04-07 20:14:41 +02:00
return g_staging . current ;
2022-05-04 18:23:37 +02:00
}
2022-09-11 20:38:51 +02:00
void R_VkStagingFrameBegin ( void ) {
2022-09-11 21:09:47 +02:00
R_FlippingBuffer_Flip ( & g_staging . buffer_alloc ) ;
2022-06-25 20:12:48 +02:00
2022-09-17 19:54:18 +02:00
g_staging . buffers . count = 0 ;
g_staging . images . count = 0 ;
2022-06-25 20:12:48 +02:00
}
2023-04-07 20:14:41 +02:00
vk_combuf_t * R_VkStagingFrameEnd ( void ) {
R_VkStagingCommit ( ) ;
vk_combuf_t * current = g_staging . current ;
2022-04-28 09:16:25 +02:00
2023-04-07 20:14:41 +02:00
if ( current ) {
R_VkCombufEnd ( g_staging . current ) ;
}
2022-04-28 09:16:25 +02:00
2023-04-07 20:14:41 +02:00
g_staging . current = NULL ;
vk_combuf_t * const tmp = g_staging . combuf [ 0 ] ;
g_staging . combuf [ 0 ] = g_staging . combuf [ 1 ] ;
g_staging . combuf [ 1 ] = g_staging . combuf [ 2 ] ;
g_staging . combuf [ 2 ] = tmp ;
2022-06-04 23:04:44 +02:00
2023-03-25 18:45:37 +01:00
g_staging . stats . total_size = g_staging . stats . images_size + g_staging . stats . buffers_size ;
2023-03-23 18:49:42 +01:00
2023-04-07 20:14:41 +02:00
return current ;
2022-04-28 09:16:25 +02:00
}