accel/tcg: remove CF_NOCACHE and special cases

Now we no longer generate CF_NOCACHE blocks we can remove a bunch of
the special case handling for them. While we are at it we can remove
the unused tb->orig_tb field and save a few bytes on the TB structure.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20210213130325.14781-20-alex.bennee@linaro.org>
This commit is contained in:
Alex Bennée 2021-02-13 13:03:21 +00:00
parent 873d64ac30
commit c4afb3456c
2 changed files with 15 additions and 39 deletions

View File

@ -410,12 +410,6 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
TranslationBlock *tb = tcg_tb_lookup(host_pc); TranslationBlock *tb = tcg_tb_lookup(host_pc);
if (tb) { if (tb) {
cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
if (tb_cflags(tb) & CF_NOCACHE) {
/* one-shot translation, invalidate it immediately */
tb_phys_invalidate(tb, -1);
tcg_tb_remove(tb);
tb_destroy(tb);
}
return true; return true;
} }
} }
@ -1634,8 +1628,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK, h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
tb->trace_vcpu_dstate); tb->trace_vcpu_dstate);
if (!(tb->cflags & CF_NOCACHE) && if (!qht_remove(&tb_ctx.htable, tb, h)) {
!qht_remove(&tb_ctx.htable, tb, h)) {
return; return;
} }
@ -1796,6 +1789,8 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
{ {
PageDesc *p; PageDesc *p;
PageDesc *p2 = NULL; PageDesc *p2 = NULL;
void *existing_tb = NULL;
uint32_t h;
assert_memory_lock(); assert_memory_lock();
@ -1815,25 +1810,20 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb->page_addr[1] = -1; tb->page_addr[1] = -1;
} }
if (!(tb->cflags & CF_NOCACHE)) { /* add in the hash table */
void *existing_tb = NULL; h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
uint32_t h; tb->trace_vcpu_dstate);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
/* add in the hash table */ /* remove TB from the page(s) if we couldn't insert it */
h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK, if (unlikely(existing_tb)) {
tb->trace_vcpu_dstate); tb_page_remove(p, tb);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb); invalidate_page_bitmap(p);
if (p2) {
/* remove TB from the page(s) if we couldn't insert it */ tb_page_remove(p2, tb);
if (unlikely(existing_tb)) { invalidate_page_bitmap(p2);
tb_page_remove(p, tb);
invalidate_page_bitmap(p);
if (p2) {
tb_page_remove(p2, tb);
invalidate_page_bitmap(p2);
}
tb = existing_tb;
} }
tb = existing_tb;
} }
if (p2 && p2 != p) { if (p2 && p2 != p) {
@ -1906,7 +1896,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb->cs_base = cs_base; tb->cs_base = cs_base;
tb->flags = flags; tb->flags = flags;
tb->cflags = cflags; tb->cflags = cflags;
tb->orig_tb = NULL;
tb->trace_vcpu_dstate = *cpu->trace_dstate; tb->trace_vcpu_dstate = *cpu->trace_dstate;
tcg_ctx->tb_cflags = cflags; tcg_ctx->tb_cflags = cflags;
tb_overflow: tb_overflow:
@ -2445,16 +2434,6 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
/* Generate a new TB executing the I/O insn. */ /* Generate a new TB executing the I/O insn. */
cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n; cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
if (tb_cflags(tb) & CF_NOCACHE) {
if (tb->orig_tb) {
/* Invalidate original TB if this TB was generated in
* cpu_exec_nocache() */
tb_phys_invalidate(tb->orig_tb, -1);
}
tcg_tb_remove(tb);
tb_destroy(tb);
}
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"cpu_io_recompile: rewound execution of TB to " "cpu_io_recompile: rewound execution of TB to "
TARGET_FMT_lx "\n", tb->pc); TARGET_FMT_lx "\n", tb->pc);

View File

@ -454,7 +454,6 @@ struct TranslationBlock {
uint32_t cflags; /* compile flags */ uint32_t cflags; /* compile flags */
#define CF_COUNT_MASK 0x00007fff #define CF_COUNT_MASK 0x00007fff
#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
#define CF_NOCACHE 0x00010000 /* To be freed after execution */
#define CF_USE_ICOUNT 0x00020000 #define CF_USE_ICOUNT 0x00020000
#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ #define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
@ -469,8 +468,6 @@ struct TranslationBlock {
struct tb_tc tc; struct tb_tc tc;
/* original tb when cflags has CF_NOCACHE */
struct TranslationBlock *orig_tb;
/* first and second physical page containing code. The lower bit /* first and second physical page containing code. The lower bit
of the pointer tells the index in page_next[]. of the pointer tells the index in page_next[].
The list is protected by the TB's page('s) lock(s) */ The list is protected by the TB's page('s) lock(s) */