diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index eebe97dabb..adb711963b 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -239,12 +239,9 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) { - if (addr == (tlb_entry->addr_read & - (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || - addr == (tlb_entry->addr_write & - (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || - addr == (tlb_entry->addr_code & - (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (tlb_hit_page(tlb_entry->addr_read, addr) || + tlb_hit_page(tlb_entry->addr_write, addr) || + tlb_hit_page(tlb_entry->addr_code, addr)) { memset(tlb_entry, -1, sizeof(*tlb_entry)); } } @@ -1046,8 +1043,7 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { /* TLB entry is for a different page */ if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, @@ -1091,8 +1087,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, } /* Check TLB entry and enforce page permissions. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, mmu_idx, retaddr); diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h index c47591c970..badbf14880 100644 --- a/accel/tcg/softmmu_template.h +++ b/accel/tcg/softmmu_template.h @@ -123,8 +123,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, } /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE, mmu_idx, retaddr); @@ -191,8 +190,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, } /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE, mmu_idx, retaddr); @@ -286,8 +284,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, } /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); @@ -322,7 +319,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; - if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + if (!tlb_hit_page(tlb_addr2, page2) && !VICTIM_TLB_HIT(addr_write, page2)) { tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); @@ -364,8 +361,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, } /* If the TLB entry is for a different page, reload and try again. */ - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); @@ -400,7 +396,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; - if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + if (!tlb_hit_page(tlb_addr2, page2) && !VICTIM_TLB_HIT(addr_write, page2)) { tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 7338f57062..117d2fbbca 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -339,6 +339,29 @@ CPUArchState *cpu_copy(CPUArchState *env); #define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ | TLB_RECHECK) +/** + * tlb_hit_page: return true if page aligned @addr is a hit against the + * TLB entry @tlb_addr + * + * @addr: virtual address to test (must be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr) +{ + return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); +} + +/** + * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr + * + * @addr: virtual address to test (need not be page aligned) + * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) + */ +static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr) +{ + return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); +} + void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf); #endif /* !CONFIG_USER_ONLY */ diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h index 5de8c8a5af..0f2cb717b1 100644 --- a/include/exec/cpu_ldst.h +++ b/include/exec/cpu_ldst.h @@ -422,8 +422,7 @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr, g_assert_not_reached(); } - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + if (!tlb_hit(tlb_addr, addr)) { /* TLB entry is for a different page */ return NULL; }