target/hppa: Always report one page to tlb_set_page

No need to trigger the large_page_mask code unnecessarily.
Drop the now unused HPPATLBEntry.page_size field.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-10-27 01:09:21 -07:00
parent 66866cc74f
commit f8cda28b8d
2 changed files with 12 additions and 4 deletions

View File

@ -179,15 +179,16 @@ typedef struct HPPATLBEntry {
IntervalTreeNode itree;
target_ureg pa;
unsigned entry_valid : 1;
unsigned u : 1;
unsigned t : 1;
unsigned d : 1;
unsigned b : 1;
unsigned page_size : 4;
unsigned ar_type : 3;
unsigned ar_pl1 : 2;
unsigned ar_pl2 : 2;
unsigned entry_valid : 1;
unsigned access_id : 16;
} HPPATLBEntry;

View File

@ -268,9 +268,16 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
phys & TARGET_PAGE_MASK, size, type, mmu_idx);
/* Success! Store the translation into the QEMU TLB. */
/*
* Success! Store the translation into the QEMU TLB.
* Note that we always install a single-page entry, because that
* is what works best with softmmu -- anything else will trigger
* the large page protection mask. We do not require this,
* because we record the large page here in the hppa tlb.
*/
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE << (ent ? 2 * ent->page_size : 0));
prot, mmu_idx, TARGET_PAGE_SIZE);
return true;
}