cputlb: Add functions for flushing TLB for a single MMU index

Guest CPU TLB maintenance operations may be sufficiently
specialized to only need to flush TLB entries corresponding
to a particular MMU index. Implement cputlb functions for
this, to avoid the inefficiency of flushing TLB entries
which we don't need to.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Message-id: 1439548879-1972-2-git-send-email-peter.maydell@linaro.org
This commit is contained in:
Peter Maydell 2015-08-25 15:45:09 +01:00
parent 14db7fe09a
commit d7a74a9d4a
2 changed files with 144 additions and 0 deletions

View File

@ -69,6 +69,47 @@ void tlb_flush(CPUState *cpu, int flush_global)
tlb_flush_count++; tlb_flush_count++;
} }
static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
{
CPUArchState *env = cpu->env_ptr;
#if defined(DEBUG_TLB)
printf("tlb_flush_by_mmuidx:");
#endif
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
cpu->current_tb = NULL;
for (;;) {
int mmu_idx = va_arg(argp, int);
if (mmu_idx < 0) {
break;
}
#if defined(DEBUG_TLB)
printf(" %d", mmu_idx);
#endif
memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
}
#if defined(DEBUG_TLB)
printf("\n");
#endif
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
}
void tlb_flush_by_mmuidx(CPUState *cpu, ...)
{
va_list argp;
va_start(argp, cpu);
v_tlb_flush_by_mmuidx(cpu, argp);
va_end(argp);
}
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{ {
if (addr == (tlb_entry->addr_read & if (addr == (tlb_entry->addr_read &
@ -121,6 +162,62 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
tb_flush_jmp_cache(cpu, addr); tb_flush_jmp_cache(cpu, addr);
} }
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
{
CPUArchState *env = cpu->env_ptr;
int i, k;
va_list argp;
va_start(argp, addr);
#if defined(DEBUG_TLB)
printf("tlb_flush_page_by_mmu_idx: " TARGET_FMT_lx, addr);
#endif
/* Check if we need to flush due to large pages. */
if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
#if defined(DEBUG_TLB)
printf(" forced full flush ("
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
env->tlb_flush_addr, env->tlb_flush_mask);
#endif
v_tlb_flush_by_mmuidx(cpu, argp);
va_end(argp);
return;
}
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
cpu->current_tb = NULL;
addr &= TARGET_PAGE_MASK;
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
for (;;) {
int mmu_idx = va_arg(argp, int);
if (mmu_idx < 0) {
break;
}
#if defined(DEBUG_TLB)
printf(" %d", mmu_idx);
#endif
tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
/* check whether there are vltb entries that need to be flushed */
for (k = 0; k < CPU_VTLB_SIZE; k++) {
tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
}
}
va_end(argp);
#if defined(DEBUG_TLB)
printf("\n");
#endif
tb_flush_jmp_cache(cpu, addr);
}
/* update the TLBs so that writes to code in the virtual page 'addr' /* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */ can be detected */
void tlb_protect_code(ram_addr_t ram_addr) void tlb_protect_code(ram_addr_t ram_addr)

View File

@ -96,8 +96,46 @@ bool qemu_in_vcpu_thread(void);
void cpu_reload_memory_map(CPUState *cpu); void cpu_reload_memory_map(CPUState *cpu);
void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as); void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
/* cputlb.c */ /* cputlb.c */
/**
* tlb_flush_page:
* @cpu: CPU whose TLB should be flushed
* @addr: virtual address of page to be flushed
*
* Flush one page from the TLB of the specified CPU, for all
* MMU indexes.
*/
void tlb_flush_page(CPUState *cpu, target_ulong addr); void tlb_flush_page(CPUState *cpu, target_ulong addr);
/**
* tlb_flush:
* @cpu: CPU whose TLB should be flushed
* @flush_global: ignored
*
* Flush the entire TLB for the specified CPU.
* The flush_global flag is in theory an indicator of whether the whole
* TLB should be flushed, or only those entries not marked global.
* In practice QEMU does not implement any global/not global flag for
* TLB entries, and the argument is ignored.
*/
void tlb_flush(CPUState *cpu, int flush_global); void tlb_flush(CPUState *cpu, int flush_global);
/**
* tlb_flush_page_by_mmuidx:
* @cpu: CPU whose TLB should be flushed
* @addr: virtual address of page to be flushed
* @...: list of MMU indexes to flush, terminated by a negative value
*
* Flush one page from the TLB of the specified CPU, for the specified
* MMU indexes.
*/
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
/**
* tlb_flush_by_mmuidx:
* @cpu: CPU whose TLB should be flushed
* @...: list of MMU indexes to flush, terminated by a negative value
*
* Flush all entries from the TLB of the specified CPU, for the specified
* MMU indexes.
*/
void tlb_flush_by_mmuidx(CPUState *cpu, ...);
void tlb_set_page(CPUState *cpu, target_ulong vaddr, void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot, hwaddr paddr, int prot,
int mmu_idx, target_ulong size); int mmu_idx, target_ulong size);
@ -115,6 +153,15 @@ static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
static inline void tlb_flush(CPUState *cpu, int flush_global) static inline void tlb_flush(CPUState *cpu, int flush_global)
{ {
} }
static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
target_ulong addr, ...)
{
}
static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
{
}
#endif #endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */