cputlb: Pass retaddr to tb_invalidate_phys_page_fast

Rather than rely on cpu->mem_io_pc, pass retaddr down directly.

Within tb_invalidate_phys_page_range__locked, the is_cpu_write_access
parameter is non-zero exactly when retaddr would be non-zero, so that
is a simple replacement.

Recognize that current_tb_not_found is true only when mem_io_pc
(and now retaddr) are also non-zero, so remove a redundant test.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2019-09-21 20:16:09 -07:00
parent ce9f5e2792
commit 5a7c27bb8a
3 changed files with 22 additions and 26 deletions

View File

@ -1094,11 +1094,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
struct page_collection *pages struct page_collection *pages
= page_collection_lock(ram_addr, ram_addr + size); = page_collection_lock(ram_addr, ram_addr + size);
tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
/* We require mem_io_pc in tb_invalidate_phys_page_range. */
cpu->mem_io_pc = retaddr;
tb_invalidate_phys_page_fast(pages, ram_addr, size);
page_collection_unlock(pages); page_collection_unlock(pages);
} }

View File

@ -1889,7 +1889,7 @@ static void
tb_invalidate_phys_page_range__locked(struct page_collection *pages, tb_invalidate_phys_page_range__locked(struct page_collection *pages,
PageDesc *p, tb_page_addr_t start, PageDesc *p, tb_page_addr_t start,
tb_page_addr_t end, tb_page_addr_t end,
int is_cpu_write_access) uintptr_t retaddr)
{ {
TranslationBlock *tb; TranslationBlock *tb;
tb_page_addr_t tb_start, tb_end; tb_page_addr_t tb_start, tb_end;
@ -1897,9 +1897,9 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
#ifdef TARGET_HAS_PRECISE_SMC #ifdef TARGET_HAS_PRECISE_SMC
CPUState *cpu = current_cpu; CPUState *cpu = current_cpu;
CPUArchState *env = NULL; CPUArchState *env = NULL;
int current_tb_not_found = is_cpu_write_access; bool current_tb_not_found = retaddr != 0;
bool current_tb_modified = false;
TranslationBlock *current_tb = NULL; TranslationBlock *current_tb = NULL;
int current_tb_modified = 0;
target_ulong current_pc = 0; target_ulong current_pc = 0;
target_ulong current_cs_base = 0; target_ulong current_cs_base = 0;
uint32_t current_flags = 0; uint32_t current_flags = 0;
@ -1931,24 +1931,21 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
if (!(tb_end <= start || tb_start >= end)) { if (!(tb_end <= start || tb_start >= end)) {
#ifdef TARGET_HAS_PRECISE_SMC #ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_not_found) { if (current_tb_not_found) {
current_tb_not_found = 0; current_tb_not_found = false;
current_tb = NULL; /* now we have a real cpu fault */
if (cpu->mem_io_pc) { current_tb = tcg_tb_lookup(retaddr);
/* now we have a real cpu fault */
current_tb = tcg_tb_lookup(cpu->mem_io_pc);
}
} }
if (current_tb == tb && if (current_tb == tb &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop /*
its execution. We could be more precise by checking * If we are modifying the current TB, we must stop
that the modification is after the current PC, but it * its execution. We could be more precise by checking
would require a specialized function to partially * that the modification is after the current PC, but it
restore the CPU state */ * would require a specialized function to partially
* restore the CPU state.
current_tb_modified = 1; */
cpu_restore_state_from_tb(cpu, current_tb, current_tb_modified = true;
cpu->mem_io_pc, true); cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base, cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
&current_flags); &current_flags);
} }
@ -2042,7 +2039,8 @@ void tb_invalidate_phys_range(target_ulong start, target_ulong end)
* Call with all @pages in the range [@start, @start + len[ locked. * Call with all @pages in the range [@start, @start + len[ locked.
*/ */
void tb_invalidate_phys_page_fast(struct page_collection *pages, void tb_invalidate_phys_page_fast(struct page_collection *pages,
tb_page_addr_t start, int len) tb_page_addr_t start, int len,
uintptr_t retaddr)
{ {
PageDesc *p; PageDesc *p;
@ -2069,7 +2067,8 @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
} }
} else { } else {
do_invalidate: do_invalidate:
tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1); tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
retaddr);
} }
} }
#else #else

View File

@ -27,7 +27,8 @@ struct page_collection *page_collection_lock(tb_page_addr_t start,
tb_page_addr_t end); tb_page_addr_t end);
void page_collection_unlock(struct page_collection *set); void page_collection_unlock(struct page_collection *set);
void tb_invalidate_phys_page_fast(struct page_collection *pages, void tb_invalidate_phys_page_fast(struct page_collection *pages,
tb_page_addr_t start, int len); tb_page_addr_t start, int len,
uintptr_t retaddr);
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end); void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
void tb_check_watchpoint(CPUState *cpu); void tb_check_watchpoint(CPUState *cpu);