accel/tcg: Restrict page_collection structure to system TB maintainance

Only the system emulation part of TB maintainance uses the
page_collection structure. Restrict its declaration (and the
functions requiring it) to tb-maint.c.

Convert the 'len' argument of tb_invalidate_phys_page_fast__locked()
from signed to unsigned.

Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <20221209093649.43738-6-philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Philippe Mathieu-Daudé 2022-12-09 10:36:49 +01:00 committed by Richard Henderson
parent f349e92e8e
commit 8112426549
2 changed files with 7 additions and 15 deletions

View File

@ -36,16 +36,9 @@ void page_table_config_init(void);
#endif #endif
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
struct page_collection;
void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
tb_page_addr_t start, int len,
uintptr_t retaddr);
struct page_collection *page_collection_lock(tb_page_addr_t start,
tb_page_addr_t end);
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
unsigned size, unsigned size,
uintptr_t retaddr); uintptr_t retaddr);
void page_collection_unlock(struct page_collection *set);
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */ #endif /* CONFIG_SOFTMMU */

View File

@ -513,8 +513,8 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
* intersecting TBs. * intersecting TBs.
* Locking order: acquire locks in ascending order of page index. * Locking order: acquire locks in ascending order of page index.
*/ */
struct page_collection * static struct page_collection *page_collection_lock(tb_page_addr_t start,
page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) tb_page_addr_t end)
{ {
struct page_collection *set = g_malloc(sizeof(*set)); struct page_collection *set = g_malloc(sizeof(*set));
tb_page_addr_t index; tb_page_addr_t index;
@ -558,7 +558,7 @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
return set; return set;
} }
void page_collection_unlock(struct page_collection *set) static void page_collection_unlock(struct page_collection *set)
{ {
/* entries are unlocked and freed via page_entry_destroy */ /* entries are unlocked and freed via page_entry_destroy */
g_tree_destroy(set->tree); g_tree_destroy(set->tree);
@ -1186,9 +1186,9 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
/* /*
* Call with all @pages in the range [@start, @start + len[ locked. * Call with all @pages in the range [@start, @start + len[ locked.
*/ */
void tb_invalidate_phys_page_fast__locked(struct page_collection *pages, static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
tb_page_addr_t start, int len, tb_page_addr_t start,
uintptr_t retaddr) unsigned len, uintptr_t ra)
{ {
PageDesc *p; PageDesc *p;
@ -1198,8 +1198,7 @@ void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
} }
assert_page_locked(p); assert_page_locked(p);
tb_invalidate_phys_page_range__locked(pages, p, start, start + len, tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra);
retaddr);
} }
/* /*