exec-all: extract tb->tc_* into a separate struct tc_tb

In preparation for adding tc.size to be able to keep track of
TB's using the binary search tree implementation from glib.

Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Emilio G. Cota 2017-07-12 00:08:21 -04:00 committed by Richard Henderson
parent 6eb062abd6
commit e7e168f413
5 changed files with 33 additions and 25 deletions

View File

@ -143,11 +143,11 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
uintptr_t ret; uintptr_t ret;
TranslationBlock *last_tb; TranslationBlock *last_tb;
int tb_exit; int tb_exit;
uint8_t *tb_ptr = itb->tc_ptr; uint8_t *tb_ptr = itb->tc.ptr;
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc, qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
"Trace %p [%d: " TARGET_FMT_lx "] %s\n", "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
itb->tc_ptr, cpu->cpu_index, itb->pc, itb->tc.ptr, cpu->cpu_index, itb->pc,
lookup_symbol(itb->pc)); lookup_symbol(itb->pc));
#if defined(DEBUG_DISAS) #if defined(DEBUG_DISAS)
@ -179,7 +179,7 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
"Stopped execution of TB chain before %p [" "Stopped execution of TB chain before %p ["
TARGET_FMT_lx "] %s\n", TARGET_FMT_lx "] %s\n",
last_tb->tc_ptr, last_tb->pc, last_tb->tc.ptr, last_tb->pc,
lookup_symbol(last_tb->pc)); lookup_symbol(last_tb->pc));
if (cc->synchronize_from_tb) { if (cc->synchronize_from_tb) {
cc->synchronize_from_tb(cpu, last_tb); cc->synchronize_from_tb(cpu, last_tb);
@ -334,7 +334,7 @@ void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
{ {
if (TCG_TARGET_HAS_direct_jump) { if (TCG_TARGET_HAS_direct_jump) {
uintptr_t offset = tb->jmp_target_arg[n]; uintptr_t offset = tb->jmp_target_arg[n];
uintptr_t tc_ptr = (uintptr_t)tb->tc_ptr; uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr); tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
} else { } else {
tb->jmp_target_arg[n] = addr; tb->jmp_target_arg[n] = addr;
@ -354,11 +354,11 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"Linking TBs %p [" TARGET_FMT_lx "Linking TBs %p [" TARGET_FMT_lx
"] index %d -> %p [" TARGET_FMT_lx "]\n", "] index %d -> %p [" TARGET_FMT_lx "]\n",
tb->tc_ptr, tb->pc, n, tb->tc.ptr, tb->pc, n,
tb_next->tc_ptr, tb_next->pc); tb_next->tc.ptr, tb_next->pc);
/* patch the native jump address */ /* patch the native jump address */
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
/* add in TB jmp circular list */ /* add in TB jmp circular list */
tb->jmp_list_next[n] = tb_next->jmp_list_first; tb->jmp_list_next[n] = tb_next->jmp_list_first;

View File

@ -157,9 +157,9 @@ void *HELPER(lookup_tb_ptr)(CPUArchState *env)
} }
qemu_log_mask_and_addr(CPU_LOG_EXEC, pc, qemu_log_mask_and_addr(CPU_LOG_EXEC, pc,
"Chain %p [%d: " TARGET_FMT_lx "] %s\n", "Chain %p [%d: " TARGET_FMT_lx "] %s\n",
tb->tc_ptr, cpu->cpu_index, pc, tb->tc.ptr, cpu->cpu_index, pc,
lookup_symbol(pc)); lookup_symbol(pc));
return tb->tc_ptr; return tb->tc.ptr;
} }
void HELPER(exit_atomic)(CPUArchState *env) void HELPER(exit_atomic)(CPUArchState *env)

View File

@ -260,7 +260,7 @@ static target_long decode_sleb128(uint8_t **pp)
which comes from the host pc of the end of the code implementing the insn. which comes from the host pc of the end of the code implementing the insn.
Each line of the table is encoded as sleb128 deltas from the previous Each line of the table is encoded as sleb128 deltas from the previous
line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }. line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
That is, the first column is seeded with the guest pc, the last column That is, the first column is seeded with the guest pc, the last column
with the host pc, and the middle columns with zeros. */ with the host pc, and the middle columns with zeros. */
@ -270,7 +270,7 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
uint8_t *p = block; uint8_t *p = block;
int i, j, n; int i, j, n;
tb->tc_search = block; tb->tc.search = block;
for (i = 0, n = tb->icount; i < n; ++i) { for (i = 0, n = tb->icount; i < n; ++i) {
target_ulong prev; target_ulong prev;
@ -305,9 +305,9 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t searched_pc) uintptr_t searched_pc)
{ {
target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
uintptr_t host_pc = (uintptr_t)tb->tc_ptr; uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
uint8_t *p = tb->tc_search; uint8_t *p = tb->tc.search;
int i, j, num_insns = tb->icount; int i, j, num_insns = tb->icount;
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
int64_t ti = profile_getclock(); int64_t ti = profile_getclock();
@ -858,7 +858,7 @@ void tb_free(TranslationBlock *tb)
tb == tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) { tb == tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
size_t struct_size = ROUND_UP(sizeof(*tb), qemu_icache_linesize); size_t struct_size = ROUND_UP(sizeof(*tb), qemu_icache_linesize);
tcg_ctx.code_gen_ptr = tb->tc_ptr - struct_size; tcg_ctx.code_gen_ptr = tb->tc.ptr - struct_size;
tcg_ctx.tb_ctx.nb_tbs--; tcg_ctx.tb_ctx.nb_tbs--;
} }
} }
@ -1059,7 +1059,7 @@ static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
another TB */ another TB */
static inline void tb_reset_jump(TranslationBlock *tb, int n) static inline void tb_reset_jump(TranslationBlock *tb, int n)
{ {
uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]); uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
tb_set_jmp_target(tb, n, addr); tb_set_jmp_target(tb, n, addr);
} }
@ -1288,7 +1288,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
} }
gen_code_buf = tcg_ctx.code_gen_ptr; gen_code_buf = tcg_ctx.code_gen_ptr;
tb->tc_ptr = gen_code_buf; tb->tc.ptr = gen_code_buf;
tb->pc = pc; tb->pc = pc;
tb->cs_base = cs_base; tb->cs_base = cs_base;
tb->flags = flags; tb->flags = flags;
@ -1307,7 +1307,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
gen_intermediate_code(cpu, tb); gen_intermediate_code(cpu, tb);
tcg_ctx.cpu = NULL; tcg_ctx.cpu = NULL;
trace_translate_block(tb, tb->pc, tb->tc_ptr); trace_translate_block(tb, tb->pc, tb->tc.ptr);
/* generate machine code */ /* generate machine code */
tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
@ -1354,11 +1354,11 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
qemu_log_lock(); qemu_log_lock();
qemu_log("OUT: [size=%d]\n", gen_code_size); qemu_log("OUT: [size=%d]\n", gen_code_size);
if (tcg_ctx.data_gen_ptr) { if (tcg_ctx.data_gen_ptr) {
size_t code_size = tcg_ctx.data_gen_ptr - tb->tc_ptr; size_t code_size = tcg_ctx.data_gen_ptr - tb->tc.ptr;
size_t data_size = gen_code_size - code_size; size_t data_size = gen_code_size - code_size;
size_t i; size_t i;
log_disas(tb->tc_ptr, code_size); log_disas(tb->tc.ptr, code_size);
for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) { for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
if (sizeof(tcg_target_ulong) == 8) { if (sizeof(tcg_target_ulong) == 8) {
@ -1372,7 +1372,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
} }
} }
} else { } else {
log_disas(tb->tc_ptr, gen_code_size); log_disas(tb->tc.ptr, gen_code_size);
} }
qemu_log("\n"); qemu_log("\n");
qemu_log_flush(); qemu_log_flush();
@ -1699,7 +1699,7 @@ static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
while (m_min <= m_max) { while (m_min <= m_max) {
m = (m_min + m_max) >> 1; m = (m_min + m_max) >> 1;
tb = tcg_ctx.tb_ctx.tbs[m]; tb = tcg_ctx.tb_ctx.tbs[m];
v = (uintptr_t)tb->tc_ptr; v = (uintptr_t)tb->tc.ptr;
if (v == tc_ptr) { if (v == tc_ptr) {
return tb; return tb;
} else if (tc_ptr < v) { } else if (tc_ptr < v) {

View File

@ -303,6 +303,14 @@ static inline void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
#define CODE_GEN_AVG_BLOCK_SIZE 150 #define CODE_GEN_AVG_BLOCK_SIZE 150
#endif #endif
/*
* Translation Cache-related fields of a TB.
*/
struct tb_tc {
void *ptr; /* pointer to the translated code */
uint8_t *search; /* pointer to search data */
};
struct TranslationBlock { struct TranslationBlock {
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
target_ulong cs_base; /* CS base for this block */ target_ulong cs_base; /* CS base for this block */
@ -321,8 +329,8 @@ struct TranslationBlock {
/* Per-vCPU dynamic tracing state used to generate this TB */ /* Per-vCPU dynamic tracing state used to generate this TB */
uint32_t trace_vcpu_dstate; uint32_t trace_vcpu_dstate;
void *tc_ptr; /* pointer to the translated code */ struct tb_tc tc;
uint8_t *tc_search; /* pointer to search data */
/* original tb when cflags has CF_NOCACHE */ /* original tb when cflags has CF_NOCACHE */
struct TranslationBlock *orig_tb; struct TranslationBlock *orig_tb;
/* first and second physical page containing code. The lower bit /* first and second physical page containing code. The lower bit

View File

@ -2836,8 +2836,8 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
tcg_reg_alloc_start(s); tcg_reg_alloc_start(s);
s->code_buf = tb->tc_ptr; s->code_buf = tb->tc.ptr;
s->code_ptr = tb->tc_ptr; s->code_ptr = tb->tc.ptr;
#ifdef TCG_TARGET_NEED_LDST_LABELS #ifdef TCG_TARGET_NEED_LDST_LABELS
s->ldst_labels = NULL; s->ldst_labels = NULL;