vk: add block allocator draft

The intent is to manage long-vs-single-frame allocations better.
Previously long allocations were map-long bump allocations, and couldn't be freed
mid-map, as there was neither a reference to the allocated range, nor a
way to actully free it.

Add a two-mode block allocator (similar to previous debuffer alloc) that
allows making long and once allocations. But now long allocations are
backed by "pool" allocator and return references to the range.

This commit doesn't do the deallocation yet, so map chaning doesn't yet
work.
This commit is contained in:
Ivan Avdeev 2023-05-25 12:12:18 -07:00
parent 9200cbfc25
commit d24961db15
17 changed files with 422 additions and 33 deletions

View File

@ -181,3 +181,47 @@ Funcs:
- Allocate dynamic (single-frame) kusochki[N]
- Upload geom[N] -> kusochki[N]
- Upload subset geom[ind[M] -> kusochki[M]
# E269
RT model alloc:
- blas -- fixed
- accel buffer region -- fixed
- (scratch: once for build)
- (geoms: once for build)
- -> geometry buffer -- fixed
- kusochki[G]: geometry data -- fixed
- materials[G]: -- fixed
RT model update:
- lives in the same statically allocated blas + accel_buffer
-
RT model draw:
- mmode
- materials[G] -- can be fully static, partially dynamic, fully dynamic
- update inplace for most of dynamic things
- clone for instanced
- color
- transforms
## Blocks
### Layer 0: abstract, not backing-dependent
handle = R_BlockAlloc(int size, lifetime);
- block possible users: {accel buffer, geometry, kusochki, materials};
- lifetime
- long: map, N frames: basically everything)
- once = this frame only: sprite materials, triapi geometry/kusochki/materials
- handle: offset, size
- R_BlockAcquire/Release(handle);
- R_BlocksClearOnce(); -- frees "once" regions, checking that they are not referenced
- R_blocksClearFull(); -- clears everything, checking that there are not external references
### Layer 1: backed by buffer
- lock = R_SmthLock(handle, size, offset)
- marks region/block as dirty (cannot be used by anything yet, prevents release, clear, etc.),
- opens staging regiong for filling and uploading
- R_SmthUnlock(lock)
- remembers dirty region (for barriers)
- submits into staging queue
- ?? R_SmthBarrier -- somehow ask for the barrier struct given pipelines, etc

View File

@ -295,6 +295,71 @@ uint32_t aloRingAlloc(alo_ring_t* ring, uint32_t size, uint32_t alignment) {
return 0;
}
// free--><- allocated
// [a....p|q.r.]
// free->
// [a....|pq.r.]
// freeing item:
// - swap with first allocated
// [a....r|q.p.]
void aloIntPoolGrow(alo_int_pool_t *pool, int new_capacity) {
int *const new_free_list = MALLOC(sizeof(int) * new_capacity);
const int new_items = new_capacity - pool->capacity;
for (int i = 0; i < pool->free; ++i)
new_free_list[i] = pool->free_list[i];
for (int i = 0; i < new_items; ++i)
new_free_list[pool->free + i] = new_capacity - i - 1;
if (pool->free_list)
FREE(pool->free_list);
pool->free_list = new_free_list;
pool->free += new_items;
pool->capacity = new_capacity;
}
int aloIntPoolAlloc(alo_int_pool_t *pool) {
if (pool->free == 0)
return -1;
pool->free--;
return pool->free_list[pool->free];
}
void aloIntPoolFree(alo_int_pool_t *pool, int val) {
ASSERT(pool->free < pool->capacity);
ASSERT(val >= 0);
ASSERT(val < pool->capacity);
// Manager allocated tail list
for (int i = pool->free; i < pool->capacity; ++i) {
if (pool->free_list[i] != val)
continue;
const int tmp = pool->free_list[pool->free];
pool->free_list[pool->free] = val;
pool->free_list[i] = tmp;
++pool->free_list;
return;
}
ASSERT(!"Item not found");
}
void aloIntPoolClear(alo_int_pool_t *pool) {
// Depends on the fact that the tail free_list contains properly maintained allocated ints
pool->free = pool->capacity;
}
void aloIntPoolDestroy(alo_int_pool_t *pool) {
if (pool->free_list)
FREE(pool->free_list);
}
#if defined(ALOLCATOR_TEST)
#include <stdio.h>
uint32_t rand_pcg32(uint32_t max) {

View File

@ -35,3 +35,17 @@ uint32_t aloRingAlloc(alo_ring_t* ring, uint32_t size, uint32_t alignment);
// Marks everything up-to-pos as free (expects up-to-pos to be valid)
void aloRingFree(alo_ring_t* ring, uint32_t up_to_pos);
// Integer pool/freelist
// Get integers from 0 to capacity
typedef struct alo_int_pool_s {
int *free_list;
int capacity;
int free;
} alo_int_pool_t;
void aloIntPoolGrow(alo_int_pool_t *pool, int new_capacity);
int aloIntPoolAlloc(alo_int_pool_t *pool);
void aloIntPoolFree(alo_int_pool_t *pool, int);
void aloIntPoolClear(alo_int_pool_t *pool);
void aloIntPoolDestroy(alo_int_pool_t *pool);

116
ref/vk/r_block.c Normal file
View File

@ -0,0 +1,116 @@
#include "r_block.h"
#include "vk_common.h" // ASSERT
#include "vk_core.h" // vk_core.pool
typedef struct r_blocks_block_s {
int long_index;
uint32_t refcount;
} r_blocks_block_t;
// logical blocks
// <---- lifetime long -><-- once -->
// [.....................|............]
// <--- pool --><-- ring --->
// offset ? --->
int allocMetablock(r_blocks_t *blocks) {
return aloIntPoolAlloc(&blocks->blocks.freelist);
// TODO grow if needed
}
r_block_t R_BlockAllocLong(r_blocks_t *blocks, uint32_t size, uint32_t alignment) {
r_block_t ret = {
.offset = ALO_ALLOC_FAILED,
.size = 0,
.impl_ = {-1}
};
const alo_block_t ablock = aloPoolAllocate(blocks->long_pool, size, alignment);
if (ablock.offset == ALO_ALLOC_FAILED)
return ret;
const int metablock_index = allocMetablock(blocks);
if (metablock_index < 0) {
aloPoolFree(blocks->long_pool, ablock.index);
return ret;
}
ret.offset = ablock.offset;
ret.size = ablock.size;
ret.impl_.index = metablock_index;
ret.impl_.blocks = blocks;
r_blocks_block_t *metablock = blocks->blocks.storage + metablock_index;
metablock->long_index = ablock.index;
metablock->refcount = 1;
return ret;
}
uint32_t R_BlockAllocOnce(r_blocks_t *blocks, uint32_t size, uint32_t alignment) {
const uint32_t offset = R_FlippingBuffer_Alloc(&blocks->once.flipping, size, alignment);
if (offset == ALO_ALLOC_FAILED)
return ALO_ALLOC_FAILED;
return offset + blocks->once.ring_offset;
}
void R_BlocksCreate(r_blocks_t *blocks, uint32_t size, uint32_t once_size, int expected_allocs) {
memset(blocks, 0, sizeof(*blocks));
blocks->size = size;
blocks->long_pool = aloPoolCreate(size - once_size, expected_allocs, 4);
aloIntPoolGrow(&blocks->blocks.freelist, expected_allocs);
blocks->blocks.storage = Mem_Malloc(vk_core.pool, expected_allocs * sizeof(blocks->blocks.storage[0]));
blocks->once.ring_offset = size - once_size;
R_FlippingBuffer_Init(&blocks->once.flipping, once_size);
}
void R_BlockRelease(const r_block_t *block) {
r_blocks_t *const blocks = block->impl_.blocks;
ASSERT(block->impl_.index >= 0);
ASSERT(block->impl_.index < blocks->blocks.freelist.capacity);
r_blocks_block_t *const metablock = blocks->blocks.storage + block->impl_.index;
ASSERT (metablock->refcount > 0);
if (--metablock->refcount)
return;
aloPoolFree(blocks->long_pool, metablock->long_index);
aloIntPoolFree(&blocks->blocks.freelist, block->impl_.index);
}
void R_BlocksDestroy(r_blocks_t *blocks) {
for (int i = blocks->blocks.freelist.free; i < blocks->blocks.freelist.capacity; ++i) {
r_blocks_block_t *b = blocks->blocks.storage + blocks->blocks.freelist.free_list[i];
ASSERT(b->refcount == 0);
}
aloPoolDestroy(blocks->long_pool);
aloIntPoolDestroy(&blocks->blocks.freelist);
Mem_Free(blocks->blocks.storage);
}
// Clear all LifetimeOnce blocks, checking that they are not referenced by anything
void R_BlocksClearOnce(r_blocks_t *blocks) {
R_FlippingBuffer_Flip(&blocks->once.flipping);
}
// Clear all blocks, checking that they're not referenced
void R_BlocksClearFull(r_blocks_t *blocks) {
for (int i = blocks->blocks.freelist.free; i < blocks->blocks.freelist.capacity; ++i) {
r_blocks_block_t *b = blocks->blocks.storage + blocks->blocks.freelist.free_list[i];
ASSERT(b->refcount == 0);
ASSERT(b->long_index >= 0);
aloPoolFree(blocks->long_pool, b->long_index);
}
aloIntPoolClear(&blocks->blocks.freelist);
R_FlippingBuffer_Clear(&blocks->once.flipping);
}

48
ref/vk/r_block.h Normal file
View File

@ -0,0 +1,48 @@
#pragma once
#include "r_flipping.h"
#include "alolcator.h"
#include <stdint.h>
struct r_blocks_s;
typedef struct r_block_s {
uint32_t offset;
uint32_t size;
struct {
int index;
struct r_blocks_s *blocks;
} impl_;
} r_block_t;
struct r_blocks_block_s;
typedef struct r_blocks_s {
uint32_t size;
struct alo_pool_s *long_pool;
struct {
uint32_t ring_offset;
r_flipping_buffer_t flipping;
} once;
struct {
alo_int_pool_t freelist;
struct r_blocks_block_s *storage;
} blocks;
} r_blocks_t;
r_block_t R_BlockAllocLong(r_blocks_t *blocks, uint32_t size, uint32_t alignment);
uint32_t R_BlockAllocOnce(r_blocks_t *blocks, uint32_t size, uint32_t alignment);
//void R_BlockAcquire(r_block_t *block);
void R_BlockRelease(const r_block_t *block);
void R_BlocksCreate(r_blocks_t *blocks, uint32_t max_size, uint32_t once_max, int expected_allocs);
void R_BlocksDestroy(r_blocks_t *blocks);
// Clear all LifetimeOnce blocks, checking that they're not references
void R_BlocksClearOnce(r_blocks_t *blocks);
// Clear all blocks, checking that they're not referenced
void R_BlocksClearFull(r_blocks_t *blocks);

17
ref/vk/r_flipping.h Normal file
View File

@ -0,0 +1,17 @@
#pragma once
#include "alolcator.h"
typedef struct {
alo_ring_t ring;
uint32_t frame_offsets[2];
} r_flipping_buffer_t;
void R_FlippingBuffer_Init(r_flipping_buffer_t *flibuf, uint32_t size);
uint32_t R_FlippingBuffer_Alloc(r_flipping_buffer_t* flibuf, uint32_t size, uint32_t align);
// (¿°¿°)¿¿ ¿¿¿
void R_FlippingBuffer_Flip(r_flipping_buffer_t* flibuf);
// ¿¿¿¿( º _ º¿)
void R_FlippingBuffer_Clear(r_flipping_buffer_t *flibuf);

View File

@ -100,7 +100,7 @@ struct ModelHeader {
};
struct Kusok {
// Geometry data
// Geometry data, static
uint index_offset;
uint vertex_offset;
uint triangles;
@ -110,6 +110,7 @@ struct Kusok {
uint _padding0;
// Per-kusok because individual surfaces can be patched
// TODO? still move to material, or its own table? As this can be dynamic
vec3 emissive;
PAD(1)

View File

@ -244,7 +244,7 @@ static void R_DrawSegs( vec3_t source, vec3_t delta, float width, float scale, f
total_indices = (total_vertices - 2) * 3; // STRIP unrolled into LIST (TODO get rid of this)
ASSERT(total_vertices < UINT16_MAX );
if (!R_GeometryBufferAllocAndLock( &buffer, total_vertices, total_indices, LifetimeSingleFrame )) {
if (!R_GeometryBufferAllocOnceAndLock( &buffer, total_vertices, total_indices)) {
gEngine.Con_Printf(S_ERROR "Cannot allocate geometry for beam\n");
return;
}

View File

@ -132,7 +132,7 @@ static void EmitWaterPolys( const cl_entity_t *ent, const msurface_t *warp, qboo
num_indices += triangles * 3;
}
if (!R_GeometryBufferAllocAndLock( &buffer, num_vertices, num_indices, LifetimeSingleFrame )) {
if (!R_GeometryBufferAllocOnceAndLock( &buffer, num_vertices, num_indices)) {
gEngine.Con_Printf(S_ERROR "Cannot allocate geometry for %s\n", ent->model->name );
return;
}
@ -591,18 +591,21 @@ static qboolean loadBrushSurfaces( model_sizes_t sizes, const model_t *mod ) {
vk_vertex_t *bvert = NULL;
uint16_t *bind = NULL;
uint32_t index_offset = 0;
r_geometry_buffer_lock_t buffer;
int animated_count = 0;
if (!R_GeometryBufferAllocAndLock( &buffer, sizes.num_vertices, sizes.num_indices, LifetimeLong )) {
const r_geometry_range_t geom_range = R_GeometryRangeAlloc(sizes.num_vertices, sizes.num_indices);
if (!geom_range.block_handle.size) {
gEngine.Con_Printf(S_ERROR "Cannot allocate geometry for %s\n", mod->name );
return false;
}
bvert = buffer.vertices.ptr;
bind = buffer.indices.ptr;
const r_geometry_range_lock_t geom_lock = R_GeometryRangeLock(&geom_range);
index_offset = buffer.indices.unit_offset;
bvert = geom_lock.vertices;
bind = geom_lock.indices;
index_offset = geom_range.indices.unit_offset;
// Load sorted by gl_texturenum
// TODO this does not make that much sense in vulkan (can sort later)
@ -656,7 +659,7 @@ static qboolean loadBrushSurfaces( model_sizes_t sizes, const model_t *mod ) {
model_geometry->surf_deprecate = surf;
model_geometry->texture = tex_id;
model_geometry->vertex_offset = buffer.vertices.unit_offset;
model_geometry->vertex_offset = geom_range.vertices.unit_offset;
model_geometry->max_vertex = vertex_offset + surf->numedges;
model_geometry->index_offset = index_offset;
@ -762,7 +765,7 @@ static qboolean loadBrushSurfaces( model_sizes_t sizes, const model_t *mod ) {
}
}
R_GeometryBufferUnlock( &buffer );
R_GeometryRangeUnlock( &geom_lock );
bmodel->render_model.dynamic_polylights = NULL;
bmodel->render_model.dynamic_polylights_count = 0;

View File

@ -2,6 +2,7 @@
#include "vk_core.h"
#include "vk_devmem.h"
#include "r_flipping.h"
#include "alolcator.h"
typedef struct vk_buffer_s {
@ -17,18 +18,6 @@ void VK_BufferDestroy(vk_buffer_t *buf);
VkDeviceAddress R_VkBufferGetDeviceAddress(VkBuffer buffer);
typedef struct {
alo_ring_t ring;
uint32_t frame_offsets[2];
} r_flipping_buffer_t;
void R_FlippingBuffer_Init(r_flipping_buffer_t *flibuf, uint32_t size);
void R_FlippingBuffer_Clear(r_flipping_buffer_t *flibuf);
uint32_t R_FlippingBuffer_Alloc(r_flipping_buffer_t* flibuf, uint32_t size, uint32_t align);
void R_FlippingBuffer_Flip(r_flipping_buffer_t* flibuf);
typedef struct {
r_flipping_buffer_t dynamic;
uint32_t static_size;

View File

@ -13,17 +13,74 @@
#define GEOMETRY_BUFFER_SIZE (GEOMETRY_BUFFER_STATIC_SIZE + GEOMETRY_BUFFER_DYNAMIC_SIZE)
// TODO profiler counters
static struct {
vk_buffer_t buffer;
r_debuffer_t alloc;
r_blocks_t alloc;
} g_geom;
qboolean R_GeometryBufferAllocAndLock( r_geometry_buffer_lock_t *lock, int vertex_count, int index_count, r_geometry_lifetime_t lifetime ) {
r_geometry_range_t R_GeometryRangeAlloc(int vertices, int indices) {
const uint32_t vertices_size = vertices * sizeof(vk_vertex_t);
const uint32_t indices_size = indices * sizeof(uint16_t);
const uint32_t total_size = vertices_size + indices_size;
r_geometry_range_t ret = {
.block_handle = R_BlockAllocLong(&g_geom.alloc, total_size, sizeof(vk_vertex_t)),
};
if (!ret.block_handle.size)
return ret;
ret.vertices.unit_offset = ret.block_handle.offset / sizeof(vk_vertex_t);
ret.indices.unit_offset = (ret.block_handle.offset + vertices_size) / sizeof(uint16_t);
ret.vertices.count = vertices;
ret.indices.count = indices;
return ret;
}
void R_GeometryRangeFree(const r_geometry_range_t* range) {
R_BlockRelease(&range->block_handle);
}
r_geometry_range_lock_t R_GeometryRangeLock(const r_geometry_range_t *range) {
const vk_staging_buffer_args_t staging_args = {
.buffer = g_geom.buffer.buffer,
.offset = range->block_handle.offset,
.size = range->block_handle.size,
.alignment = 4,
};
const vk_staging_region_t staging = R_VkStagingLockForBuffer(staging_args);
ASSERT(staging.ptr);
const uint32_t vertices_size = range->vertices.count * sizeof(vk_vertex_t);
ASSERT( range->block_handle.offset % sizeof(vk_vertex_t) == 0 );
ASSERT( (range->block_handle.offset + vertices_size) % sizeof(uint16_t) == 0 );
return (r_geometry_range_lock_t){
.vertices = (vk_vertex_t *)staging.ptr,
.indices = (uint16_t *)((char*)staging.ptr + vertices_size),
.impl_ = {
.staging_handle = staging.handle,
},
};
}
void R_GeometryRangeUnlock(const r_geometry_range_lock_t *lock) {
R_VkStagingUnlock(lock->impl_.staging_handle);
}
qboolean R_GeometryBufferAllocOnceAndLock(r_geometry_buffer_lock_t *lock, int vertex_count, int index_count) {
const uint32_t vertices_size = vertex_count * sizeof(vk_vertex_t);
const uint32_t indices_size = index_count * sizeof(uint16_t);
const uint32_t total_size = vertices_size + indices_size;
const uint32_t offset = R_DEBuffer_Alloc(&g_geom.alloc, (lifetime == LifetimeSingleFrame) ? LifetimeDynamic : LifetimeStatic, total_size, sizeof(vk_vertex_t));
const uint32_t offset = R_BlockAllocOnce(&g_geom.alloc, total_size, sizeof(vk_vertex_t));
if (offset == ALO_ALLOC_FAILED) {
/* gEngine.Con_Printf(S_ERROR "Cannot allocate %s geometry buffer for %d vertices (%d bytes) and %d indices (%d bytes)\n", */
/* lifetime == LifetimeSingleFrame ? "dynamic" : "static", */
@ -72,7 +129,7 @@ void R_GeometryBufferUnlock( const r_geometry_buffer_lock_t *lock ) {
}
void R_GeometryBuffer_MapClear( void ) {
R_DEBuffer_Init(&g_geom.alloc, GEOMETRY_BUFFER_STATIC_SIZE, GEOMETRY_BUFFER_DYNAMIC_SIZE);
R_BlocksClearFull(&g_geom.alloc);
}
qboolean R_GeometryBuffer_Init(void) {
@ -83,16 +140,18 @@ qboolean R_GeometryBuffer_Init(void) {
(vk_core.rtx ? VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT : 0)))
return false;
R_GeometryBuffer_MapClear();
#define EXPECTED_ALLOCS 1024
R_BlocksCreate(&g_geom.alloc, GEOMETRY_BUFFER_SIZE, GEOMETRY_BUFFER_DYNAMIC_SIZE, EXPECTED_ALLOCS);
return true;
}
void R_GeometryBuffer_Shutdown(void) {
R_BlocksDestroy(&g_geom.alloc);
VK_BufferDestroy( &g_geom.buffer );
}
void R_GeometryBuffer_Flip(void) {
R_DEBuffer_Flip(&g_geom.alloc);
R_BlocksClearOnce(&g_geom.alloc);
}
VkBuffer R_GeometryBuffer_Get(void) {

View File

@ -1,5 +1,6 @@
#pragma once
#include "vk_common.h"
#include "r_block.h"
#include "vk_core.h"
#include <stdint.h>
@ -24,6 +25,35 @@ typedef struct vk_vertex_s {
float pad4_[3];
} vk_vertex_t;
typedef struct {
struct {
int count, unit_offset;
} vertices;
struct {
int count, unit_offset;
} indices;
r_block_t block_handle;
} r_geometry_range_t;
// Allocates a range in geometry buffer with a long lifetime
r_geometry_range_t R_GeometryRangeAlloc(int vertices, int indices);
void R_GeometryRangeFree(const r_geometry_range_t*);
typedef struct {
vk_vertex_t *vertices;
uint16_t *indices;
struct {
int staging_handle;
} impl_;
} r_geometry_range_lock_t;
// Lock staging memory for uploading
r_geometry_range_lock_t R_GeometryRangeLock(const r_geometry_range_t *range);
void R_GeometryRangeUnlock(const r_geometry_range_lock_t *lock);
typedef struct {
struct {
vk_vertex_t *ptr;
@ -47,9 +77,8 @@ typedef enum {
LifetimeSingleFrame
} r_geometry_lifetime_t;
qboolean R_GeometryBufferAllocAndLock( r_geometry_buffer_lock_t *lock, int vertex_count, int index_count, r_geometry_lifetime_t lifetime );
qboolean R_GeometryBufferAllocOnceAndLock(r_geometry_buffer_lock_t *lock, int vertex_count, int index_count);
void R_GeometryBufferUnlock( const r_geometry_buffer_lock_t *lock );
//void R_VkGeometryBufferFree( int handle );
void R_GeometryBuffer_MapClear( void ); // Free the entire buffer for a new map

View File

@ -388,6 +388,8 @@ qboolean RT_VkAccelInit(void) {
g_accel.tlas_geom_buffer_addr = R_VkBufferGetDeviceAddress(g_accel.tlas_geom_buffer.buffer);
R_FlippingBuffer_Init(&g_accel.tlas_geom_buffer_alloc, MAX_INSTANCES * 2);
g_accel.accels_buffer_alloc = aloPoolCreate(MAX_ACCELS_BUFFER, MAX_INSTANCES, /* why */ 256);
R_SpeedsRegisterMetric(&g_accel.stats.instances_count, "accels_instances_count", kSpeedsMetricCount);
R_SpeedsRegisterMetric(&g_accel.stats.accels_built, "accels_built", kSpeedsMetricCount);

View File

@ -13,6 +13,7 @@
typedef struct vk_ray_model_s {
VkAccelerationStructureKHR blas;
VkDeviceAddress blas_addr;
uint32_t kusochki_offset;
qboolean dynamic;
@ -30,6 +31,7 @@ typedef struct Kusok vk_kusok_data_t;
typedef struct rt_draw_instance_s {
vk_ray_model_t *model_toremove;
VkDeviceAddress blas_addr;
uint32_t kusochki_offset;
matrix3x4 transform_row;

View File

@ -673,7 +673,7 @@ static vk_render_type_e spriteRenderModeToRenderType( int render_mode ) {
static void R_DrawSpriteQuad( const char *debug_name, mspriteframe_t *frame, vec3_t org, vec3_t v_right, vec3_t v_up, float scale, int texture, int render_mode, const vec4_t color ) {
r_geometry_buffer_lock_t buffer;
if (!R_GeometryBufferAllocAndLock( &buffer, 4, 6, LifetimeSingleFrame )) {
if (!R_GeometryBufferAllocOnceAndLock( &buffer, 4, 6)) {
gEngine.Con_Printf(S_ERROR "Cannot allocate geometry for sprite quad\n");
return;
}

View File

@ -1941,7 +1941,7 @@ static void R_StudioDrawNormalMesh( short *ptricmds, vec3_t *pstudionorms, float
ASSERT(num_indices > 0);
// Get buffer region for vertices and indices
if (!R_GeometryBufferAllocAndLock( &buffer, num_vertices, num_indices, LifetimeSingleFrame )) {
if (!R_GeometryBufferAllocOnceAndLock( &buffer, num_vertices, num_indices)) {
gEngine.Con_Printf(S_ERROR "Cannot allocate geometry for studio model\n");
return;
}

View File

@ -141,7 +141,7 @@ static void emitDynamicGeometry(int num_indices, const vec4_t color, const char*
return;
r_geometry_buffer_lock_t buffer;
if (!R_GeometryBufferAllocAndLock( &buffer, g_triapi.num_vertices, num_indices, LifetimeSingleFrame )) {
if (!R_GeometryBufferAllocOnceAndLock( &buffer, g_triapi.num_vertices, num_indices)) {
gEngine.Con_Printf(S_ERROR "Cannot allocate geometry for tri api\n");
return;
}