include/hw/core: Create struct CPUJumpCache

Wrap the bare TranslationBlock pointer into a structure.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2022-08-15 15:13:05 -05:00
parent 1d41a79b3c
commit a976a99a29
12 changed files with 72 additions and 28 deletions

View File

@ -21,6 +21,10 @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
{ {
} }
void tcg_flush_jmp_cache(CPUState *cpu)
{
}
int probe_access_flags(CPUArchState *env, target_ulong addr, int probe_access_flags(CPUArchState *env, target_ulong addr,
MMUAccessType access_type, int mmu_idx, MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr) bool nonfault, void **phost, uintptr_t retaddr)

View File

@ -42,6 +42,7 @@
#include "sysemu/replay.h" #include "sysemu/replay.h"
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "exec/helper-proto.h" #include "exec/helper-proto.h"
#include "tb-jmp-cache.h"
#include "tb-hash.h" #include "tb-hash.h"
#include "tb-context.h" #include "tb-context.h"
#include "internal.h" #include "internal.h"
@ -252,7 +253,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
tcg_debug_assert(!(cflags & CF_INVALID)); tcg_debug_assert(!(cflags & CF_INVALID));
hash = tb_jmp_cache_hash_func(pc); hash = tb_jmp_cache_hash_func(pc);
tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); tb = qatomic_rcu_read(&cpu->tb_jmp_cache->array[hash].tb);
if (likely(tb && if (likely(tb &&
tb->pc == pc && tb->pc == pc &&
@ -266,7 +267,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
if (tb == NULL) { if (tb == NULL) {
return NULL; return NULL;
} }
qatomic_set(&cpu->tb_jmp_cache[hash], tb); qatomic_set(&cpu->tb_jmp_cache->array[hash].tb, tb);
return tb; return tb;
} }
@ -987,6 +988,8 @@ int cpu_exec(CPUState *cpu)
tb = tb_lookup(cpu, pc, cs_base, flags, cflags); tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) { if (tb == NULL) {
uint32_t h;
mmap_lock(); mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock(); mmap_unlock();
@ -994,7 +997,8 @@ int cpu_exec(CPUState *cpu)
* We add the TB in the virtual pc hash table * We add the TB in the virtual pc hash table
* for the fast lookup * for the fast lookup
*/ */
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); h = tb_jmp_cache_hash_func(pc);
qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
} }
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY

View File

@ -100,10 +100,11 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
{ {
unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); int i, i0 = tb_jmp_cache_hash_page(page_addr);
CPUJumpCache *jc = cpu->tb_jmp_cache;
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); qatomic_set(&jc->array[i0 + i].tb, NULL);
} }
} }
@ -356,7 +357,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
qemu_spin_unlock(&env_tlb(env)->c.lock); qemu_spin_unlock(&env_tlb(env)->c.lock);
cpu_tb_jmp_cache_clear(cpu); tcg_flush_jmp_cache(cpu);
if (to_clean == ALL_MMUIDX_BITS) { if (to_clean == ALL_MMUIDX_BITS) {
qatomic_set(&env_tlb(env)->c.full_flush_count, qatomic_set(&env_tlb(env)->c.full_flush_count,
@ -785,7 +786,7 @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
* longer to clear each entry individually than it will to clear it all. * longer to clear each entry individually than it will to clear it all.
*/ */
if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
cpu_tb_jmp_cache_clear(cpu); tcg_flush_jmp_cache(cpu);
return; return;
} }

View File

@ -23,6 +23,7 @@
#include "exec/cpu-defs.h" #include "exec/cpu-defs.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "qemu/xxhash.h" #include "qemu/xxhash.h"
#include "tb-jmp-cache.h"
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU

24
accel/tcg/tb-jmp-cache.h Normal file
View File

@ -0,0 +1,24 @@
/*
* The per-CPU TranslationBlock jump cache.
*
* Copyright (c) 2003 Fabrice Bellard
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef ACCEL_TCG_TB_JMP_CACHE_H
#define ACCEL_TCG_TB_JMP_CACHE_H
#define TB_JMP_CACHE_BITS 12
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/*
* Accessed in parallel; all accesses to 'tb' must be atomic.
*/
struct CPUJumpCache {
struct {
TranslationBlock *tb;
} array[TB_JMP_CACHE_SIZE];
};
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */

View File

@ -58,6 +58,7 @@
#include "sysemu/tcg.h" #include "sysemu/tcg.h"
#include "qapi/error.h" #include "qapi/error.h"
#include "hw/core/tcg-cpu-ops.h" #include "hw/core/tcg-cpu-ops.h"
#include "tb-jmp-cache.h"
#include "tb-hash.h" #include "tb-hash.h"
#include "tb-context.h" #include "tb-context.h"
#include "internal.h" #include "internal.h"
@ -967,7 +968,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
} }
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
cpu_tb_jmp_cache_clear(cpu); tcg_flush_jmp_cache(cpu);
} }
qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
@ -1187,8 +1188,9 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
/* remove the TB from the hash list */ /* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc); h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) { CPUJumpCache *jc = cpu->tb_jmp_cache;
qatomic_set(&cpu->tb_jmp_cache[h], NULL); if (qatomic_read(&jc->array[h].tb) == tb) {
qatomic_set(&jc->array[h].tb, NULL);
} }
} }
@ -2443,6 +2445,26 @@ int page_unprotect(target_ulong address, uintptr_t pc)
} }
#endif /* CONFIG_USER_ONLY */ #endif /* CONFIG_USER_ONLY */
/*
* Called by generic code at e.g. cpu reset after cpu creation,
* therefore we must be prepared to allocate the jump cache.
*/
void tcg_flush_jmp_cache(CPUState *cpu)
{
CPUJumpCache *jc = cpu->tb_jmp_cache;
if (likely(jc)) {
for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
qatomic_set(&jc->array[i].tb, NULL);
}
} else {
/* This should happen once during realize, and thus never race. */
jc = g_new0(CPUJumpCache, 1);
jc = qatomic_xchg(&cpu->tb_jmp_cache, jc);
assert(jc == NULL);
}
}
/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
void tcg_flush_softmmu_tlb(CPUState *cs) void tcg_flush_softmmu_tlb(CPUState *cs)
{ {

View File

@ -137,8 +137,7 @@ static void cpu_common_reset(DeviceState *dev)
cpu->cflags_next_tb = -1; cpu->cflags_next_tb = -1;
if (tcg_enabled()) { if (tcg_enabled()) {
cpu_tb_jmp_cache_clear(cpu); tcg_flush_jmp_cache(cpu);
tcg_flush_softmmu_tlb(cpu); tcg_flush_softmmu_tlb(cpu);
} }
} }

View File

@ -38,6 +38,7 @@ void cpu_list_unlock(void);
unsigned int cpu_list_generation_id_get(void); unsigned int cpu_list_generation_id_get(void);
void tcg_flush_softmmu_tlb(CPUState *cs); void tcg_flush_softmmu_tlb(CPUState *cs);
void tcg_flush_jmp_cache(CPUState *cs);
void tcg_iommu_init_notifier_list(CPUState *cpu); void tcg_iommu_init_notifier_list(CPUState *cpu);
void tcg_iommu_free_notifier_list(CPUState *cpu); void tcg_iommu_free_notifier_list(CPUState *cpu);

View File

@ -236,9 +236,6 @@ struct kvm_run;
struct hax_vcpu_state; struct hax_vcpu_state;
struct hvf_vcpu_state; struct hvf_vcpu_state;
#define TB_JMP_CACHE_BITS 12
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/* work queue */ /* work queue */
/* The union type allows passing of 64 bit target pointers on 32 bit /* The union type allows passing of 64 bit target pointers on 32 bit
@ -369,8 +366,7 @@ struct CPUState {
CPUArchState *env_ptr; CPUArchState *env_ptr;
IcountDecr *icount_decr_ptr; IcountDecr *icount_decr_ptr;
/* Accessed in parallel; all accesses must be atomic */ CPUJumpCache *tb_jmp_cache;
TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
struct GDBRegisterState *gdb_regs; struct GDBRegisterState *gdb_regs;
int gdb_num_regs; int gdb_num_regs;
@ -456,15 +452,6 @@ extern CPUTailQ cpus;
extern __thread CPUState *current_cpu; extern __thread CPUState *current_cpu;
static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
{
unsigned int i;
for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
qatomic_set(&cpu->tb_jmp_cache[i], NULL);
}
}
/** /**
* qemu_tcg_mttcg_enabled: * qemu_tcg_mttcg_enabled:
* Check whether we are running MultiThread TCG or not. * Check whether we are running MultiThread TCG or not.

View File

@ -41,6 +41,7 @@ typedef struct CoMutex CoMutex;
typedef struct ConfidentialGuestSupport ConfidentialGuestSupport; typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
typedef struct CPUAddressSpace CPUAddressSpace; typedef struct CPUAddressSpace CPUAddressSpace;
typedef struct CPUArchState CPUArchState; typedef struct CPUArchState CPUArchState;
typedef struct CPUJumpCache CPUJumpCache;
typedef struct CPUState CPUState; typedef struct CPUState CPUState;
typedef struct CPUTLBEntryFull CPUTLBEntryFull; typedef struct CPUTLBEntryFull CPUTLBEntryFull;
typedef struct DeviceListener DeviceListener; typedef struct DeviceListener DeviceListener;

View File

@ -56,7 +56,7 @@ struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id)
static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data)
{ {
bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX); bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX);
cpu_tb_jmp_cache_clear(cpu); tcg_flush_jmp_cache(cpu);
} }
static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata) static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata)

View File

@ -65,7 +65,7 @@ static void trace_event_synchronize_vcpu_state_dynamic(
{ {
bitmap_copy(vcpu->trace_dstate, vcpu->trace_dstate_delayed, bitmap_copy(vcpu->trace_dstate, vcpu->trace_dstate_delayed,
CPU_TRACE_DSTATE_MAX_EVENTS); CPU_TRACE_DSTATE_MAX_EVENTS);
cpu_tb_jmp_cache_clear(vcpu); tcg_flush_jmp_cache(vcpu);
} }
void trace_event_set_vcpu_state_dynamic(CPUState *vcpu, void trace_event_set_vcpu_state_dynamic(CPUState *vcpu,