target/arm: Use probe_access_full for MTE
The CPUTLBEntryFull structure now stores the original pte attributes, as well as the physical address. Therefore, we no longer need a separate bit in MemTxAttrs, nor do we need to walk the tree of memory regions. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20221011031911.2408754-3-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
24d18d5d7e
commit
b8967ddf39
@ -3400,7 +3400,6 @@ static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
|
|||||||
* generic target bits directly.
|
* generic target bits directly.
|
||||||
*/
|
*/
|
||||||
#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
|
#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
|
||||||
#define arm_tlb_mte_tagged(x) (typecheck_memtxattrs(x)->target_tlb_bit1)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
|
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
|
||||||
|
@ -105,10 +105,9 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
|||||||
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
|
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
|
||||||
return tags + index;
|
return tags + index;
|
||||||
#else
|
#else
|
||||||
uintptr_t index;
|
|
||||||
CPUTLBEntryFull *full;
|
CPUTLBEntryFull *full;
|
||||||
|
MemTxAttrs attrs;
|
||||||
int in_page, flags;
|
int in_page, flags;
|
||||||
ram_addr_t ptr_ra;
|
|
||||||
hwaddr ptr_paddr, tag_paddr, xlat;
|
hwaddr ptr_paddr, tag_paddr, xlat;
|
||||||
MemoryRegion *mr;
|
MemoryRegion *mr;
|
||||||
ARMASIdx tag_asi;
|
ARMASIdx tag_asi;
|
||||||
@ -124,30 +123,12 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
|||||||
* valid. Indicate to probe_access_flags no-fault, then assert that
|
* valid. Indicate to probe_access_flags no-fault, then assert that
|
||||||
* we received a valid page.
|
* we received a valid page.
|
||||||
*/
|
*/
|
||||||
flags = probe_access_flags(env, ptr, ptr_access, ptr_mmu_idx,
|
flags = probe_access_full(env, ptr, ptr_access, ptr_mmu_idx,
|
||||||
ra == 0, &host, ra);
|
ra == 0, &host, &full, ra);
|
||||||
assert(!(flags & TLB_INVALID_MASK));
|
assert(!(flags & TLB_INVALID_MASK));
|
||||||
|
|
||||||
/*
|
|
||||||
* Find the CPUTLBEntryFull for ptr. This *must* be present in the TLB
|
|
||||||
* because we just found the mapping.
|
|
||||||
* TODO: Perhaps there should be a cputlb helper that returns a
|
|
||||||
* matching tlb entry + iotlb entry.
|
|
||||||
*/
|
|
||||||
index = tlb_index(env, ptr_mmu_idx, ptr);
|
|
||||||
# ifdef CONFIG_DEBUG_TCG
|
|
||||||
{
|
|
||||||
CPUTLBEntry *entry = tlb_entry(env, ptr_mmu_idx, ptr);
|
|
||||||
target_ulong comparator = (ptr_access == MMU_DATA_LOAD
|
|
||||||
? entry->addr_read
|
|
||||||
: tlb_addr_write(entry));
|
|
||||||
g_assert(tlb_hit(comparator, ptr));
|
|
||||||
}
|
|
||||||
# endif
|
|
||||||
full = &env_tlb(env)->d[ptr_mmu_idx].fulltlb[index];
|
|
||||||
|
|
||||||
/* If the virtual page MemAttr != Tagged, access unchecked. */
|
/* If the virtual page MemAttr != Tagged, access unchecked. */
|
||||||
if (!arm_tlb_mte_tagged(&full->attrs)) {
|
if (full->pte_attrs != 0xf0) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -162,6 +143,14 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Remember these values across the second lookup below,
|
||||||
|
* which may invalidate this pointer via tlb resize.
|
||||||
|
*/
|
||||||
|
ptr_paddr = full->phys_addr;
|
||||||
|
attrs = full->attrs;
|
||||||
|
full = NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The Normal memory access can extend to the next page. E.g. a single
|
* The Normal memory access can extend to the next page. E.g. a single
|
||||||
* 8-byte access to the last byte of a page will check only the last
|
* 8-byte access to the last byte of a page will check only the last
|
||||||
@ -170,9 +159,8 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
|||||||
*/
|
*/
|
||||||
in_page = -(ptr | TARGET_PAGE_MASK);
|
in_page = -(ptr | TARGET_PAGE_MASK);
|
||||||
if (unlikely(ptr_size > in_page)) {
|
if (unlikely(ptr_size > in_page)) {
|
||||||
void *ignore;
|
flags |= probe_access_full(env, ptr + in_page, ptr_access,
|
||||||
flags |= probe_access_flags(env, ptr + in_page, ptr_access,
|
ptr_mmu_idx, ra == 0, &host, &full, ra);
|
||||||
ptr_mmu_idx, ra == 0, &ignore, ra);
|
|
||||||
assert(!(flags & TLB_INVALID_MASK));
|
assert(!(flags & TLB_INVALID_MASK));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -180,33 +168,17 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
|
|||||||
if (unlikely(flags & TLB_WATCHPOINT)) {
|
if (unlikely(flags & TLB_WATCHPOINT)) {
|
||||||
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
|
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
|
||||||
assert(ra != 0);
|
assert(ra != 0);
|
||||||
cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
|
cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
|
||||||
full->attrs, wp, ra);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Find the physical address within the normal mem space.
|
|
||||||
* The memory region lookup must succeed because TLB_MMIO was
|
|
||||||
* not set in the cputlb lookup above.
|
|
||||||
*/
|
|
||||||
mr = memory_region_from_host(host, &ptr_ra);
|
|
||||||
tcg_debug_assert(mr != NULL);
|
|
||||||
tcg_debug_assert(memory_region_is_ram(mr));
|
|
||||||
ptr_paddr = ptr_ra;
|
|
||||||
do {
|
|
||||||
ptr_paddr += mr->addr;
|
|
||||||
mr = mr->container;
|
|
||||||
} while (mr);
|
|
||||||
|
|
||||||
/* Convert to the physical address in tag space. */
|
/* Convert to the physical address in tag space. */
|
||||||
tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
|
tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
|
||||||
|
|
||||||
/* Look up the address in tag space. */
|
/* Look up the address in tag space. */
|
||||||
tag_asi = full->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
|
tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
|
||||||
tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
|
tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
|
||||||
mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
|
mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
|
||||||
tag_access == MMU_DATA_STORE,
|
tag_access == MMU_DATA_STORE, attrs);
|
||||||
full->attrs);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note that @mr will never be NULL. If there is nothing in the address
|
* Note that @mr will never be NULL. If there is nothing in the address
|
||||||
|
@ -5351,8 +5351,19 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
|
|||||||
*/
|
*/
|
||||||
addr = useronly_clean_ptr(addr);
|
addr = useronly_clean_ptr(addr);
|
||||||
|
|
||||||
|
#ifdef CONFIG_USER_ONLY
|
||||||
flags = probe_access_flags(env, addr, access_type, mmu_idx, nofault,
|
flags = probe_access_flags(env, addr, access_type, mmu_idx, nofault,
|
||||||
&info->host, retaddr);
|
&info->host, retaddr);
|
||||||
|
memset(&info->attrs, 0, sizeof(info->attrs));
|
||||||
|
/* Require both ANON and MTE; see allocation_tag_mem(). */
|
||||||
|
info->tagged = (flags & PAGE_ANON) && (flags & PAGE_MTE);
|
||||||
|
#else
|
||||||
|
CPUTLBEntryFull *full;
|
||||||
|
flags = probe_access_full(env, addr, access_type, mmu_idx, nofault,
|
||||||
|
&info->host, &full, retaddr);
|
||||||
|
info->attrs = full->attrs;
|
||||||
|
info->tagged = full->pte_attrs == 0xf0;
|
||||||
|
#endif
|
||||||
info->flags = flags;
|
info->flags = flags;
|
||||||
|
|
||||||
if (flags & TLB_INVALID_MASK) {
|
if (flags & TLB_INVALID_MASK) {
|
||||||
@ -5362,33 +5373,6 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
|
|||||||
|
|
||||||
/* Ensure that info->host[] is relative to addr, not addr + mem_off. */
|
/* Ensure that info->host[] is relative to addr, not addr + mem_off. */
|
||||||
info->host -= mem_off;
|
info->host -= mem_off;
|
||||||
|
|
||||||
#ifdef CONFIG_USER_ONLY
|
|
||||||
memset(&info->attrs, 0, sizeof(info->attrs));
|
|
||||||
/* Require both MAP_ANON and PROT_MTE -- see allocation_tag_mem. */
|
|
||||||
arm_tlb_mte_tagged(&info->attrs) =
|
|
||||||
(flags & PAGE_ANON) && (flags & PAGE_MTE);
|
|
||||||
#else
|
|
||||||
/*
|
|
||||||
* Find the iotlbentry for addr and return the transaction attributes.
|
|
||||||
* This *must* be present in the TLB because we just found the mapping.
|
|
||||||
*/
|
|
||||||
{
|
|
||||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
|
||||||
|
|
||||||
# ifdef CONFIG_DEBUG_TCG
|
|
||||||
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
|
|
||||||
target_ulong comparator = (access_type == MMU_DATA_LOAD
|
|
||||||
? entry->addr_read
|
|
||||||
: tlb_addr_write(entry));
|
|
||||||
g_assert(tlb_hit(comparator, addr));
|
|
||||||
# endif
|
|
||||||
|
|
||||||
CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
|
|
||||||
info->attrs = full->attrs;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5617,7 +5601,7 @@ void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
|
|||||||
intptr_t mem_off, reg_off, reg_last;
|
intptr_t mem_off, reg_off, reg_last;
|
||||||
|
|
||||||
/* Process the page only if MemAttr == Tagged. */
|
/* Process the page only if MemAttr == Tagged. */
|
||||||
if (arm_tlb_mte_tagged(&info->page[0].attrs)) {
|
if (info->page[0].tagged) {
|
||||||
mem_off = info->mem_off_first[0];
|
mem_off = info->mem_off_first[0];
|
||||||
reg_off = info->reg_off_first[0];
|
reg_off = info->reg_off_first[0];
|
||||||
reg_last = info->reg_off_split;
|
reg_last = info->reg_off_split;
|
||||||
@ -5638,7 +5622,7 @@ void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
|
|||||||
}
|
}
|
||||||
|
|
||||||
mem_off = info->mem_off_first[1];
|
mem_off = info->mem_off_first[1];
|
||||||
if (mem_off >= 0 && arm_tlb_mte_tagged(&info->page[1].attrs)) {
|
if (mem_off >= 0 && info->page[1].tagged) {
|
||||||
reg_off = info->reg_off_first[1];
|
reg_off = info->reg_off_first[1];
|
||||||
reg_last = info->reg_off_last[1];
|
reg_last = info->reg_off_last[1];
|
||||||
|
|
||||||
@ -6017,7 +6001,7 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr,
|
|||||||
* Disable MTE checking if the Tagged bit is not set. Since TBI must
|
* Disable MTE checking if the Tagged bit is not set. Since TBI must
|
||||||
* be set within MTEDESC for MTE, !mtedesc => !mte_active.
|
* be set within MTEDESC for MTE, !mtedesc => !mte_active.
|
||||||
*/
|
*/
|
||||||
if (!arm_tlb_mte_tagged(&info.page[0].attrs)) {
|
if (!info.page[0].tagged) {
|
||||||
mtedesc = 0;
|
mtedesc = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6568,7 +6552,7 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
|
|||||||
cpu_check_watchpoint(env_cpu(env), addr, msize,
|
cpu_check_watchpoint(env_cpu(env), addr, msize,
|
||||||
info.attrs, BP_MEM_READ, retaddr);
|
info.attrs, BP_MEM_READ, retaddr);
|
||||||
}
|
}
|
||||||
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
|
if (mtedesc && info.tagged) {
|
||||||
mte_check(env, mtedesc, addr, retaddr);
|
mte_check(env, mtedesc, addr, retaddr);
|
||||||
}
|
}
|
||||||
if (unlikely(info.flags & TLB_MMIO)) {
|
if (unlikely(info.flags & TLB_MMIO)) {
|
||||||
@ -6585,7 +6569,7 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
|
|||||||
msize, info.attrs,
|
msize, info.attrs,
|
||||||
BP_MEM_READ, retaddr);
|
BP_MEM_READ, retaddr);
|
||||||
}
|
}
|
||||||
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
|
if (mtedesc && info.tagged) {
|
||||||
mte_check(env, mtedesc, addr, retaddr);
|
mte_check(env, mtedesc, addr, retaddr);
|
||||||
}
|
}
|
||||||
tlb_fn(env, &scratch, reg_off, addr, retaddr);
|
tlb_fn(env, &scratch, reg_off, addr, retaddr);
|
||||||
@ -6786,9 +6770,7 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
|
|||||||
(env_cpu(env), addr, msize) & BP_MEM_READ)) {
|
(env_cpu(env), addr, msize) & BP_MEM_READ)) {
|
||||||
goto fault;
|
goto fault;
|
||||||
}
|
}
|
||||||
if (mtedesc &&
|
if (mtedesc && info.tagged && !mte_probe(env, mtedesc, addr)) {
|
||||||
arm_tlb_mte_tagged(&info.attrs) &&
|
|
||||||
!mte_probe(env, mtedesc, addr)) {
|
|
||||||
goto fault;
|
goto fault;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6974,7 +6956,7 @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm,
|
|||||||
info.attrs, BP_MEM_WRITE, retaddr);
|
info.attrs, BP_MEM_WRITE, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mtedesc && arm_tlb_mte_tagged(&info.attrs)) {
|
if (mtedesc && info.tagged) {
|
||||||
mte_check(env, mtedesc, addr, retaddr);
|
mte_check(env, mtedesc, addr, retaddr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -134,6 +134,7 @@ typedef struct {
|
|||||||
void *host;
|
void *host;
|
||||||
int flags;
|
int flags;
|
||||||
MemTxAttrs attrs;
|
MemTxAttrs attrs;
|
||||||
|
bool tagged;
|
||||||
} SVEHostPage;
|
} SVEHostPage;
|
||||||
|
|
||||||
bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
|
bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
|
||||||
|
@ -231,10 +231,6 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|||||||
res.f.phys_addr &= TARGET_PAGE_MASK;
|
res.f.phys_addr &= TARGET_PAGE_MASK;
|
||||||
address &= TARGET_PAGE_MASK;
|
address &= TARGET_PAGE_MASK;
|
||||||
}
|
}
|
||||||
/* Notice and record tagged memory. */
|
|
||||||
if (cpu_isar_feature(aa64_mte, cpu) && res.cacheattrs.attrs == 0xf0) {
|
|
||||||
arm_tlb_mte_tagged(&res.f.attrs) = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
res.f.pte_attrs = res.cacheattrs.attrs;
|
res.f.pte_attrs = res.cacheattrs.attrs;
|
||||||
res.f.shareability = res.cacheattrs.shareability;
|
res.f.shareability = res.cacheattrs.shareability;
|
||||||
|
Loading…
Reference in New Issue
Block a user