2022-04-28 09:16:25 +02:00
# include "vk_staging.h"
# include "vk_buffer.h"
2022-06-25 20:12:48 +02:00
# include "alolcator.h"
2022-09-11 20:38:51 +02:00
# include "vk_commandpool.h"
2022-04-28 09:16:25 +02:00
# include <memory.h>
2023-03-04 20:31:34 +01:00
# define DEFAULT_STAGING_SIZE (128*1024*1024)
2022-06-25 20:12:48 +02:00
# define MAX_STAGING_ALLOCS (2048)
2022-09-11 20:38:51 +02:00
# define MAX_CONCURRENT_FRAMES 2
2022-09-17 19:54:18 +02:00
# define COMMAND_BUFFER_COUNT (MAX_CONCURRENT_FRAMES + 1) // to accommodate two frames in flight plus something trying to upload data before waiting for the next frame to complete
2022-06-04 23:04:44 +02:00
2022-04-28 09:16:25 +02:00
typedef struct {
2022-06-04 23:04:44 +02:00
VkImage image ;
VkImageLayout layout ;
} staging_image_t ;
2022-04-28 09:16:25 +02:00
static struct {
vk_buffer_t buffer ;
2022-09-11 21:09:47 +02:00
r_flipping_buffer_t buffer_alloc ;
2022-06-04 23:04:44 +02:00
struct {
VkBuffer dest [ MAX_STAGING_ALLOCS ] ;
VkBufferCopy copy [ MAX_STAGING_ALLOCS ] ;
int count ;
} buffers ;
struct {
staging_image_t dest [ MAX_STAGING_ALLOCS ] ;
VkBufferImageCopy copy [ MAX_STAGING_ALLOCS ] ;
int count ;
} images ;
2022-06-25 20:12:48 +02:00
2022-09-11 20:38:51 +02:00
vk_command_pool_t upload_pool ;
VkCommandBuffer cmdbuf ;
2022-04-28 09:16:25 +02:00
} g_staging = { 0 } ;
qboolean R_VkStagingInit ( void ) {
if ( ! VK_BufferCreate ( " staging " , & g_staging . buffer , DEFAULT_STAGING_SIZE , VK_BUFFER_USAGE_TRANSFER_SRC_BIT , VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ) )
return false ;
2022-09-17 19:54:18 +02:00
g_staging . upload_pool = R_VkCommandPoolCreate ( COMMAND_BUFFER_COUNT ) ;
2022-09-11 20:38:51 +02:00
2022-09-11 21:09:47 +02:00
R_FlippingBuffer_Init ( & g_staging . buffer_alloc , DEFAULT_STAGING_SIZE ) ;
2022-06-25 20:12:48 +02:00
2022-04-28 09:16:25 +02:00
return true ;
}
void R_VkStagingShutdown ( void ) {
VK_BufferDestroy ( & g_staging . buffer ) ;
2022-09-11 20:38:51 +02:00
R_VkCommandPoolDestroy ( & g_staging . upload_pool ) ;
}
2023-03-04 19:36:47 +01:00
void R_VkStagingFlushSync ( void ) {
2022-09-11 20:38:51 +02:00
const VkCommandBuffer cmdbuf = R_VkStagingCommit ( ) ;
if ( ! cmdbuf )
return ;
XVK_CHECK ( vkEndCommandBuffer ( cmdbuf ) ) ;
g_staging . cmdbuf = VK_NULL_HANDLE ;
2023-03-04 19:36:47 +01:00
//gEngine.Con_Reportf(S_WARN "flushing staging buffer img count=%d\n", g_staging.images.count);
2022-09-11 20:38:51 +02:00
const VkSubmitInfo subinfo = {
. sType = VK_STRUCTURE_TYPE_SUBMIT_INFO ,
. commandBufferCount = 1 ,
. pCommandBuffers = & cmdbuf ,
} ;
2022-09-11 21:09:47 +02:00
// TODO wait for previous command buffer completion. Why: we might end up writing into the same dst
2022-09-11 20:38:51 +02:00
XVK_CHECK ( vkQueueSubmit ( vk_core . queue , 1 , & subinfo , VK_NULL_HANDLE ) ) ;
XVK_CHECK ( vkQueueWaitIdle ( vk_core . queue ) ) ;
2022-09-17 19:54:18 +02:00
g_staging . buffers . count = 0 ;
g_staging . images . count = 0 ;
2022-09-11 21:09:47 +02:00
R_FlippingBuffer_Clear ( & g_staging . buffer_alloc ) ;
2022-09-11 20:38:51 +02:00
} ;
static uint32_t allocateInRing ( uint32_t size , uint32_t alignment ) {
alignment = alignment < 1 ? 1 : alignment ;
2022-09-11 21:09:47 +02:00
const uint32_t offset = R_FlippingBuffer_Alloc ( & g_staging . buffer_alloc , size , alignment ) ;
2022-09-11 20:38:51 +02:00
if ( offset ! = ALO_ALLOC_FAILED )
return offset ;
2023-03-04 19:36:47 +01:00
R_VkStagingFlushSync ( ) ;
2022-09-11 20:38:51 +02:00
2022-09-11 21:09:47 +02:00
return R_FlippingBuffer_Alloc ( & g_staging . buffer_alloc , size , alignment ) ;
2022-04-28 09:16:25 +02:00
}
2022-06-04 23:04:44 +02:00
vk_staging_region_t R_VkStagingLockForBuffer ( vk_staging_buffer_args_t args ) {
if ( g_staging . buffers . count > = MAX_STAGING_ALLOCS )
2023-03-04 19:36:47 +01:00
R_VkStagingFlushSync ( ) ;
2022-04-28 09:16:25 +02:00
2022-09-11 20:38:51 +02:00
const uint32_t offset = allocateInRing ( args . size , args . alignment ) ;
2022-06-25 20:12:48 +02:00
if ( offset = = ALO_ALLOC_FAILED )
2022-04-28 09:16:25 +02:00
return ( vk_staging_region_t ) { 0 } ;
2022-09-11 20:38:51 +02:00
const int index = g_staging . buffers . count ;
2022-06-04 23:04:44 +02:00
g_staging . buffers . dest [ index ] = args . buffer ;
g_staging . buffers . copy [ index ] = ( VkBufferCopy ) {
. srcOffset = offset ,
. dstOffset = args . offset ,
. size = args . size ,
} ;
g_staging . buffers . count + + ;
return ( vk_staging_region_t ) {
. ptr = ( char * ) g_staging . buffer . mapped + offset ,
. handle = index ,
} ;
2022-04-28 09:16:25 +02:00
}
2022-06-04 23:04:44 +02:00
vk_staging_region_t R_VkStagingLockForImage ( vk_staging_image_args_t args ) {
if ( g_staging . images . count > = MAX_STAGING_ALLOCS )
2023-03-04 19:36:47 +01:00
R_VkStagingFlushSync ( ) ;
2022-06-04 23:04:44 +02:00
2022-09-11 20:38:51 +02:00
const uint32_t offset = allocateInRing ( args . size , args . alignment ) ;
2022-06-25 20:12:48 +02:00
if ( offset = = ALO_ALLOC_FAILED )
2022-06-04 23:04:44 +02:00
return ( vk_staging_region_t ) { 0 } ;
2022-05-04 18:23:37 +02:00
2022-09-11 20:38:51 +02:00
const int index = g_staging . images . count ;
staging_image_t * const dest = g_staging . images . dest + index ;
2022-06-04 23:04:44 +02:00
dest - > image = args . image ;
dest - > layout = args . layout ;
g_staging . images . copy [ index ] = args . region ;
2022-06-04 23:26:35 +02:00
g_staging . images . copy [ index ] . bufferOffset + = offset ;
2022-04-28 09:16:25 +02:00
2022-06-04 23:04:44 +02:00
g_staging . images . count + + ;
return ( vk_staging_region_t ) {
. ptr = ( char * ) g_staging . buffer . mapped + offset ,
. handle = index + MAX_STAGING_ALLOCS ,
} ;
2022-04-28 09:16:25 +02:00
}
2022-06-04 23:04:44 +02:00
void R_VkStagingUnlock ( staging_handle_t handle ) {
ASSERT ( handle > = 0 ) ;
ASSERT ( handle < MAX_STAGING_ALLOCS * 2 ) ;
// FIXME mark and check ready
2022-04-28 09:16:25 +02:00
}
2022-06-04 23:04:44 +02:00
static void commitBuffers ( VkCommandBuffer cmdbuf ) {
2022-06-04 23:27:12 +02:00
// TODO better coalescing:
// - upload once per buffer
// - join adjacent regions
VkBuffer prev_buffer = VK_NULL_HANDLE ;
int first_copy = 0 ;
2022-09-17 19:54:18 +02:00
for ( int i = 0 ; i < g_staging . buffers . count ; i + + ) {
2022-09-11 20:38:51 +02:00
/* { */
/* const VkBufferCopy *const copy = g_staging.buffers.copy + i; */
/* gEngine.Con_Reportf(" %d: [%08llx, %08llx) => [%08llx, %08llx)\n", i, copy->srcOffset, copy->srcOffset + copy->size, copy->dstOffset, copy->dstOffset + copy->size); */
/* } */
2022-06-04 23:27:12 +02:00
if ( prev_buffer = = g_staging . buffers . dest [ i ] )
continue ;
if ( prev_buffer ! = VK_NULL_HANDLE ) {
2022-09-03 23:04:50 +02:00
DEBUG_NV_CHECKPOINTF ( cmdbuf , " staging dst_buffer=%p count=%d " , prev_buffer , i - first_copy ) ;
2022-06-04 23:27:12 +02:00
vkCmdCopyBuffer ( cmdbuf , g_staging . buffer . buffer ,
prev_buffer ,
i - first_copy , g_staging . buffers . copy + first_copy ) ;
}
prev_buffer = g_staging . buffers . dest [ i ] ;
first_copy = i ;
}
if ( prev_buffer ! = VK_NULL_HANDLE ) {
2022-09-11 20:38:51 +02:00
DEBUG_NV_CHECKPOINTF ( cmdbuf , " staging dst_buffer=%p count=%d " , prev_buffer , g_staging . buffers . count - first_copy ) ;
2022-06-04 23:04:44 +02:00
vkCmdCopyBuffer ( cmdbuf , g_staging . buffer . buffer ,
2022-06-04 23:27:12 +02:00
prev_buffer ,
g_staging . buffers . count - first_copy , g_staging . buffers . copy + first_copy ) ;
2022-06-04 23:04:44 +02:00
}
2022-09-17 19:54:18 +02:00
g_staging . buffers . count = 0 ;
2022-06-04 23:04:44 +02:00
}
static void commitImages ( VkCommandBuffer cmdbuf ) {
2022-09-17 19:54:18 +02:00
for ( int i = 0 ; i < g_staging . images . count ; i + + ) {
2022-09-11 20:38:51 +02:00
/* { */
/* const VkBufferImageCopy *const copy = g_staging.images.copy + i; */
/* gEngine.Con_Reportf(" i%d: [%08llx, ?) => %p\n", i, copy->bufferOffset, g_staging.images.dest[i].image); */
/* } */
2022-06-04 23:04:44 +02:00
vkCmdCopyBufferToImage ( cmdbuf , g_staging . buffer . buffer ,
g_staging . images . dest [ i ] . image ,
g_staging . images . dest [ i ] . layout ,
1 , g_staging . images . copy + i ) ;
2022-04-28 09:16:25 +02:00
}
2022-09-17 19:54:18 +02:00
g_staging . images . count = 0 ;
2022-06-04 23:04:44 +02:00
}
2022-09-11 20:38:51 +02:00
VkCommandBuffer R_VkStagingGetCommandBuffer ( void ) {
if ( g_staging . cmdbuf )
return g_staging . cmdbuf ;
g_staging . cmdbuf = g_staging . upload_pool . buffers [ 0 ] ;
const VkCommandBufferBeginInfo beginfo = {
. sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO ,
. flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT ,
} ;
XVK_CHECK ( vkBeginCommandBuffer ( g_staging . cmdbuf , & beginfo ) ) ;
return g_staging . cmdbuf ;
}
VkCommandBuffer R_VkStagingCommit ( void ) {
if ( ! g_staging . images . count & & ! g_staging . buffers . count & & ! g_staging . cmdbuf )
return VK_NULL_HANDLE ;
2022-06-04 23:04:44 +02:00
2022-09-11 20:38:51 +02:00
const VkCommandBuffer cmdbuf = R_VkStagingGetCommandBuffer ( ) ;
2022-06-04 23:04:44 +02:00
commitBuffers ( cmdbuf ) ;
commitImages ( cmdbuf ) ;
2022-09-11 20:38:51 +02:00
return cmdbuf ;
2022-05-04 18:23:37 +02:00
}
2022-09-11 20:38:51 +02:00
void R_VkStagingFrameBegin ( void ) {
2022-09-11 21:09:47 +02:00
R_FlippingBuffer_Flip ( & g_staging . buffer_alloc ) ;
2022-06-25 20:12:48 +02:00
2022-09-17 19:54:18 +02:00
g_staging . buffers . count = 0 ;
g_staging . images . count = 0 ;
2022-06-25 20:12:48 +02:00
}
2022-09-11 20:38:51 +02:00
VkCommandBuffer R_VkStagingFrameEnd ( void ) {
const VkCommandBuffer cmdbuf = R_VkStagingCommit ( ) ;
if ( cmdbuf )
XVK_CHECK ( vkEndCommandBuffer ( cmdbuf ) ) ;
2022-04-28 09:16:25 +02:00
2022-09-11 20:38:51 +02:00
g_staging . cmdbuf = VK_NULL_HANDLE ;
2022-04-28 09:16:25 +02:00
2022-09-11 20:38:51 +02:00
const VkCommandBuffer tmp = g_staging . upload_pool . buffers [ 0 ] ;
g_staging . upload_pool . buffers [ 0 ] = g_staging . upload_pool . buffers [ 1 ] ;
2022-09-17 19:54:18 +02:00
g_staging . upload_pool . buffers [ 1 ] = g_staging . upload_pool . buffers [ 2 ] ;
g_staging . upload_pool . buffers [ 2 ] = tmp ;
2022-06-04 23:04:44 +02:00
2022-09-11 20:38:51 +02:00
return cmdbuf ;
2022-04-28 09:16:25 +02:00
}