cputlb: Pass retaddr to tb_invalidate_phys_page_fast
Rather than rely on cpu->mem_io_pc, pass retaddr down directly. Within tb_invalidate_phys_page_range__locked, the is_cpu_write_access parameter is non-zero exactly when retaddr would be non-zero, so that is a simple replacement. Recognize that current_tb_not_found is true only when mem_io_pc (and now retaddr) are also non-zero, so remove a redundant test. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
ce9f5e2792
commit
5a7c27bb8a
@ -1094,11 +1094,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
|
||||
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
|
||||
struct page_collection *pages
|
||||
= page_collection_lock(ram_addr, ram_addr + size);
|
||||
|
||||
/* We require mem_io_pc in tb_invalidate_phys_page_range. */
|
||||
cpu->mem_io_pc = retaddr;
|
||||
|
||||
tb_invalidate_phys_page_fast(pages, ram_addr, size);
|
||||
tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
|
||||
page_collection_unlock(pages);
|
||||
}
|
||||
|
||||
|
@ -1889,7 +1889,7 @@ static void
|
||||
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||
PageDesc *p, tb_page_addr_t start,
|
||||
tb_page_addr_t end,
|
||||
int is_cpu_write_access)
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
tb_page_addr_t tb_start, tb_end;
|
||||
@ -1897,9 +1897,9 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
CPUState *cpu = current_cpu;
|
||||
CPUArchState *env = NULL;
|
||||
int current_tb_not_found = is_cpu_write_access;
|
||||
bool current_tb_not_found = retaddr != 0;
|
||||
bool current_tb_modified = false;
|
||||
TranslationBlock *current_tb = NULL;
|
||||
int current_tb_modified = 0;
|
||||
target_ulong current_pc = 0;
|
||||
target_ulong current_cs_base = 0;
|
||||
uint32_t current_flags = 0;
|
||||
@ -1931,24 +1931,21 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
||||
if (!(tb_end <= start || tb_start >= end)) {
|
||||
#ifdef TARGET_HAS_PRECISE_SMC
|
||||
if (current_tb_not_found) {
|
||||
current_tb_not_found = 0;
|
||||
current_tb = NULL;
|
||||
if (cpu->mem_io_pc) {
|
||||
/* now we have a real cpu fault */
|
||||
current_tb = tcg_tb_lookup(cpu->mem_io_pc);
|
||||
}
|
||||
current_tb_not_found = false;
|
||||
/* now we have a real cpu fault */
|
||||
current_tb = tcg_tb_lookup(retaddr);
|
||||
}
|
||||
if (current_tb == tb &&
|
||||
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
|
||||
/* If we are modifying the current TB, we must stop
|
||||
its execution. We could be more precise by checking
|
||||
that the modification is after the current PC, but it
|
||||
would require a specialized function to partially
|
||||
restore the CPU state */
|
||||
|
||||
current_tb_modified = 1;
|
||||
cpu_restore_state_from_tb(cpu, current_tb,
|
||||
cpu->mem_io_pc, true);
|
||||
/*
|
||||
* If we are modifying the current TB, we must stop
|
||||
* its execution. We could be more precise by checking
|
||||
* that the modification is after the current PC, but it
|
||||
* would require a specialized function to partially
|
||||
* restore the CPU state.
|
||||
*/
|
||||
current_tb_modified = true;
|
||||
cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
|
||||
cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
|
||||
¤t_flags);
|
||||
}
|
||||
@ -2042,7 +2039,8 @@ void tb_invalidate_phys_range(target_ulong start, target_ulong end)
|
||||
* Call with all @pages in the range [@start, @start + len[ locked.
|
||||
*/
|
||||
void tb_invalidate_phys_page_fast(struct page_collection *pages,
|
||||
tb_page_addr_t start, int len)
|
||||
tb_page_addr_t start, int len,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
PageDesc *p;
|
||||
|
||||
@ -2069,7 +2067,8 @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
|
||||
}
|
||||
} else {
|
||||
do_invalidate:
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1);
|
||||
tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
|
||||
retaddr);
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
@ -27,7 +27,8 @@ struct page_collection *page_collection_lock(tb_page_addr_t start,
|
||||
tb_page_addr_t end);
|
||||
void page_collection_unlock(struct page_collection *set);
|
||||
void tb_invalidate_phys_page_fast(struct page_collection *pages,
|
||||
tb_page_addr_t start, int len);
|
||||
tb_page_addr_t start, int len,
|
||||
uintptr_t retaddr);
|
||||
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
|
||||
void tb_check_watchpoint(CPUState *cpu);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user