target/arm: Export sve contiguous ldst support functions

Export all of the support functions for performing bulk
fault analysis on a set of elements at contiguous addresses
controlled by a predicate.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20220607203306.657998-15-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2022-06-08 19:38:58 +01:00 committed by Peter Maydell
parent d1ce862602
commit 0b68112b39
2 changed files with 111 additions and 70 deletions

View File

@ -5341,15 +5341,8 @@ static intptr_t find_next_active(uint64_t *vg, intptr_t reg_off,
* exit via page fault exception. * exit via page fault exception.
*/ */
typedef struct { bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
void *host; target_ulong addr, int mem_off, MMUAccessType access_type,
int flags;
MemTxAttrs attrs;
} SVEHostPage;
static bool sve_probe_page(SVEHostPage *info, bool nofault,
CPUARMState *env, target_ulong addr,
int mem_off, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr) int mmu_idx, uintptr_t retaddr)
{ {
int flags; int flags;
@ -5406,59 +5399,13 @@ static bool sve_probe_page(SVEHostPage *info, bool nofault,
return true; return true;
} }
/*
* Analyse contiguous data, protected by a governing predicate.
*/
typedef enum {
FAULT_NO,
FAULT_FIRST,
FAULT_ALL,
} SVEContFault;
typedef struct {
/*
* First and last element wholly contained within the two pages.
* mem_off_first[0] and reg_off_first[0] are always set >= 0.
* reg_off_last[0] may be < 0 if the first element crosses pages.
* All of mem_off_first[1], reg_off_first[1] and reg_off_last[1]
* are set >= 0 only if there are complete elements on a second page.
*
* The reg_off_* offsets are relative to the internal vector register.
* The mem_off_first offset is relative to the memory address; the
* two offsets are different when a load operation extends, a store
* operation truncates, or for multi-register operations.
*/
int16_t mem_off_first[2];
int16_t reg_off_first[2];
int16_t reg_off_last[2];
/*
* One element that is misaligned and spans both pages,
* or -1 if there is no such active element.
*/
int16_t mem_off_split;
int16_t reg_off_split;
/*
* The byte offset at which the entire operation crosses a page boundary.
* Set >= 0 if and only if the entire operation spans two pages.
*/
int16_t page_split;
/* TLB data for the two pages. */
SVEHostPage page[2];
} SVEContLdSt;
/* /*
* Find first active element on each page, and a loose bound for the * Find first active element on each page, and a loose bound for the
* final element on each page. Identify any single element that spans * final element on each page. Identify any single element that spans
* the page boundary. Return true if there are any active elements. * the page boundary. Return true if there are any active elements.
*/ */
static bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr, bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr, uint64_t *vg,
uint64_t *vg, intptr_t reg_max, intptr_t reg_max, int esz, int msize)
int esz, int msize)
{ {
const int esize = 1 << esz; const int esize = 1 << esz;
const uint64_t pg_mask = pred_esz_masks[esz]; const uint64_t pg_mask = pred_esz_masks[esz];
@ -5548,7 +5495,7 @@ static bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr,
* Control the generation of page faults with @fault. Return false if * Control the generation of page faults with @fault. Return false if
* there is no work to do, which can only happen with @fault == FAULT_NO. * there is no work to do, which can only happen with @fault == FAULT_NO.
*/ */
static bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault, bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault,
CPUARMState *env, target_ulong addr, CPUARMState *env, target_ulong addr,
MMUAccessType access_type, uintptr_t retaddr) MMUAccessType access_type, uintptr_t retaddr)
{ {
@ -5606,12 +5553,12 @@ static bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault,
return have_work; return have_work;
} }
static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, #ifndef CONFIG_USER_ONLY
void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
uint64_t *vg, target_ulong addr, uint64_t *vg, target_ulong addr,
int esize, int msize, int wp_access, int esize, int msize, int wp_access,
uintptr_t retaddr) uintptr_t retaddr)
{ {
#ifndef CONFIG_USER_ONLY
intptr_t mem_off, reg_off, reg_last; intptr_t mem_off, reg_off, reg_last;
int flags0 = info->page[0].flags; int flags0 = info->page[0].flags;
int flags1 = info->page[1].flags; int flags1 = info->page[1].flags;
@ -5667,10 +5614,10 @@ static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
} while (reg_off & 63); } while (reg_off & 63);
} while (reg_off <= reg_last); } while (reg_off <= reg_last);
} }
#endif
} }
#endif
static void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
uint64_t *vg, target_ulong addr, int esize, uint64_t *vg, target_ulong addr, int esize,
int msize, uint32_t mtedesc, uintptr_t ra) int msize, uint32_t mtedesc, uintptr_t ra)
{ {

View File

@ -124,4 +124,98 @@ DO_ST_PRIM_2(dd, H1_8, uint64_t, uint64_t, stq)
#undef DO_LD_PRIM_2 #undef DO_LD_PRIM_2
#undef DO_ST_PRIM_2 #undef DO_ST_PRIM_2
/*
* Resolve the guest virtual address to info->host and info->flags.
* If @nofault, return false if the page is invalid, otherwise
* exit via page fault exception.
*/
typedef struct {
void *host;
int flags;
MemTxAttrs attrs;
} SVEHostPage;
bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
target_ulong addr, int mem_off, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
/*
* Analyse contiguous data, protected by a governing predicate.
*/
typedef enum {
FAULT_NO,
FAULT_FIRST,
FAULT_ALL,
} SVEContFault;
typedef struct {
/*
* First and last element wholly contained within the two pages.
* mem_off_first[0] and reg_off_first[0] are always set >= 0.
* reg_off_last[0] may be < 0 if the first element crosses pages.
* All of mem_off_first[1], reg_off_first[1] and reg_off_last[1]
* are set >= 0 only if there are complete elements on a second page.
*
* The reg_off_* offsets are relative to the internal vector register.
* The mem_off_first offset is relative to the memory address; the
* two offsets are different when a load operation extends, a store
* operation truncates, or for multi-register operations.
*/
int16_t mem_off_first[2];
int16_t reg_off_first[2];
int16_t reg_off_last[2];
/*
* One element that is misaligned and spans both pages,
* or -1 if there is no such active element.
*/
int16_t mem_off_split;
int16_t reg_off_split;
/*
* The byte offset at which the entire operation crosses a page boundary.
* Set >= 0 if and only if the entire operation spans two pages.
*/
int16_t page_split;
/* TLB data for the two pages. */
SVEHostPage page[2];
} SVEContLdSt;
/*
* Find first active element on each page, and a loose bound for the
* final element on each page. Identify any single element that spans
* the page boundary. Return true if there are any active elements.
*/
bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr, uint64_t *vg,
intptr_t reg_max, int esz, int msize);
/*
* Resolve the guest virtual addresses to info->page[].
* Control the generation of page faults with @fault. Return false if
* there is no work to do, which can only happen with @fault == FAULT_NO.
*/
bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault,
CPUARMState *env, target_ulong addr,
MMUAccessType access_type, uintptr_t retaddr);
#ifdef CONFIG_USER_ONLY
static inline void
sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, uint64_t *vg,
target_ulong addr, int esize, int msize,
int wp_access, uintptr_t retaddr)
{ }
#else
void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
uint64_t *vg, target_ulong addr,
int esize, int msize, int wp_access,
uintptr_t retaddr);
#endif
void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, uint64_t *vg,
target_ulong addr, int esize, int msize,
uint32_t mtedesc, uintptr_t ra);
#endif /* TARGET_ARM_SVE_LDST_INTERNAL_H */ #endif /* TARGET_ARM_SVE_LDST_INTERNAL_H */