accel/tcg: Inline tb_flush_jmp_cache
This function has two users, who use it incompatibly. In tlb_flush_page_by_mmuidx_async_0, when flushing a single page, we need to flush exactly two pages. In tlb_flush_range_by_mmuidx_async_0, when flushing a range of pages, we need to flush N+1 pages. This avoids double-flushing of jmp cache pages in a range. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
93b996161b
commit
1d41a79b3c
|
@ -107,14 +107,6 @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
|
|
||||||
{
|
|
||||||
/* Discard jump cache entries for any tb which might potentially
|
|
||||||
overlap the flushed page. */
|
|
||||||
tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
|
|
||||||
tb_jmp_cache_clear_page(cpu, addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
|
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
|
||||||
* @desc: The CPUTLBDesc portion of the TLB
|
* @desc: The CPUTLBDesc portion of the TLB
|
||||||
|
@ -541,7 +533,12 @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
|
||||||
}
|
}
|
||||||
qemu_spin_unlock(&env_tlb(env)->c.lock);
|
qemu_spin_unlock(&env_tlb(env)->c.lock);
|
||||||
|
|
||||||
tb_flush_jmp_cache(cpu, addr);
|
/*
|
||||||
|
* Discard jump cache entries for any tb which might potentially
|
||||||
|
* overlap the flushed page, which includes the previous.
|
||||||
|
*/
|
||||||
|
tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
|
||||||
|
tb_jmp_cache_clear_page(cpu, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -792,8 +789,14 @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
|
/*
|
||||||
tb_flush_jmp_cache(cpu, d.addr + i);
|
* Discard jump cache entries for any tb which might potentially
|
||||||
|
* overlap the flushed pages, which includes the previous.
|
||||||
|
*/
|
||||||
|
d.addr -= TARGET_PAGE_SIZE;
|
||||||
|
for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
|
||||||
|
tb_jmp_cache_clear_page(cpu, d.addr);
|
||||||
|
d.addr += TARGET_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue