target/arm: Export sve contiguous ldst support functions
Export all of the support functions for performing bulk fault analysis on a set of elements at contiguous addresses controlled by a predicate. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20220607203306.657998-15-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
d1ce862602
commit
0b68112b39
@ -5341,16 +5341,9 @@ static intptr_t find_next_active(uint64_t *vg, intptr_t reg_off,
|
||||
* exit via page fault exception.
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
void *host;
|
||||
int flags;
|
||||
MemTxAttrs attrs;
|
||||
} SVEHostPage;
|
||||
|
||||
static bool sve_probe_page(SVEHostPage *info, bool nofault,
|
||||
CPUARMState *env, target_ulong addr,
|
||||
int mem_off, MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr)
|
||||
bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
|
||||
target_ulong addr, int mem_off, MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
int flags;
|
||||
|
||||
@ -5406,59 +5399,13 @@ static bool sve_probe_page(SVEHostPage *info, bool nofault,
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Analyse contiguous data, protected by a governing predicate.
|
||||
*/
|
||||
|
||||
typedef enum {
|
||||
FAULT_NO,
|
||||
FAULT_FIRST,
|
||||
FAULT_ALL,
|
||||
} SVEContFault;
|
||||
|
||||
typedef struct {
|
||||
/*
|
||||
* First and last element wholly contained within the two pages.
|
||||
* mem_off_first[0] and reg_off_first[0] are always set >= 0.
|
||||
* reg_off_last[0] may be < 0 if the first element crosses pages.
|
||||
* All of mem_off_first[1], reg_off_first[1] and reg_off_last[1]
|
||||
* are set >= 0 only if there are complete elements on a second page.
|
||||
*
|
||||
* The reg_off_* offsets are relative to the internal vector register.
|
||||
* The mem_off_first offset is relative to the memory address; the
|
||||
* two offsets are different when a load operation extends, a store
|
||||
* operation truncates, or for multi-register operations.
|
||||
*/
|
||||
int16_t mem_off_first[2];
|
||||
int16_t reg_off_first[2];
|
||||
int16_t reg_off_last[2];
|
||||
|
||||
/*
|
||||
* One element that is misaligned and spans both pages,
|
||||
* or -1 if there is no such active element.
|
||||
*/
|
||||
int16_t mem_off_split;
|
||||
int16_t reg_off_split;
|
||||
|
||||
/*
|
||||
* The byte offset at which the entire operation crosses a page boundary.
|
||||
* Set >= 0 if and only if the entire operation spans two pages.
|
||||
*/
|
||||
int16_t page_split;
|
||||
|
||||
/* TLB data for the two pages. */
|
||||
SVEHostPage page[2];
|
||||
} SVEContLdSt;
|
||||
|
||||
/*
|
||||
* Find first active element on each page, and a loose bound for the
|
||||
* final element on each page. Identify any single element that spans
|
||||
* the page boundary. Return true if there are any active elements.
|
||||
*/
|
||||
static bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr,
|
||||
uint64_t *vg, intptr_t reg_max,
|
||||
int esz, int msize)
|
||||
bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr, uint64_t *vg,
|
||||
intptr_t reg_max, int esz, int msize)
|
||||
{
|
||||
const int esize = 1 << esz;
|
||||
const uint64_t pg_mask = pred_esz_masks[esz];
|
||||
@ -5548,9 +5495,9 @@ static bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr,
|
||||
* Control the generation of page faults with @fault. Return false if
|
||||
* there is no work to do, which can only happen with @fault == FAULT_NO.
|
||||
*/
|
||||
static bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault,
|
||||
CPUARMState *env, target_ulong addr,
|
||||
MMUAccessType access_type, uintptr_t retaddr)
|
||||
bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault,
|
||||
CPUARMState *env, target_ulong addr,
|
||||
MMUAccessType access_type, uintptr_t retaddr)
|
||||
{
|
||||
int mmu_idx = cpu_mmu_index(env, false);
|
||||
int mem_off = info->mem_off_first[0];
|
||||
@ -5606,12 +5553,12 @@ static bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault,
|
||||
return have_work;
|
||||
}
|
||||
|
||||
static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
|
||||
uint64_t *vg, target_ulong addr,
|
||||
int esize, int msize, int wp_access,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
|
||||
uint64_t *vg, target_ulong addr,
|
||||
int esize, int msize, int wp_access,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
intptr_t mem_off, reg_off, reg_last;
|
||||
int flags0 = info->page[0].flags;
|
||||
int flags1 = info->page[1].flags;
|
||||
@ -5667,12 +5614,12 @@ static void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
|
||||
} while (reg_off & 63);
|
||||
} while (reg_off <= reg_last);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
static void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
|
||||
uint64_t *vg, target_ulong addr, int esize,
|
||||
int msize, uint32_t mtedesc, uintptr_t ra)
|
||||
void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
|
||||
uint64_t *vg, target_ulong addr, int esize,
|
||||
int msize, uint32_t mtedesc, uintptr_t ra)
|
||||
{
|
||||
intptr_t mem_off, reg_off, reg_last;
|
||||
|
||||
|
@ -124,4 +124,98 @@ DO_ST_PRIM_2(dd, H1_8, uint64_t, uint64_t, stq)
|
||||
#undef DO_LD_PRIM_2
|
||||
#undef DO_ST_PRIM_2
|
||||
|
||||
/*
|
||||
* Resolve the guest virtual address to info->host and info->flags.
|
||||
* If @nofault, return false if the page is invalid, otherwise
|
||||
* exit via page fault exception.
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
void *host;
|
||||
int flags;
|
||||
MemTxAttrs attrs;
|
||||
} SVEHostPage;
|
||||
|
||||
bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
|
||||
target_ulong addr, int mem_off, MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr);
|
||||
|
||||
/*
|
||||
* Analyse contiguous data, protected by a governing predicate.
|
||||
*/
|
||||
|
||||
typedef enum {
|
||||
FAULT_NO,
|
||||
FAULT_FIRST,
|
||||
FAULT_ALL,
|
||||
} SVEContFault;
|
||||
|
||||
typedef struct {
|
||||
/*
|
||||
* First and last element wholly contained within the two pages.
|
||||
* mem_off_first[0] and reg_off_first[0] are always set >= 0.
|
||||
* reg_off_last[0] may be < 0 if the first element crosses pages.
|
||||
* All of mem_off_first[1], reg_off_first[1] and reg_off_last[1]
|
||||
* are set >= 0 only if there are complete elements on a second page.
|
||||
*
|
||||
* The reg_off_* offsets are relative to the internal vector register.
|
||||
* The mem_off_first offset is relative to the memory address; the
|
||||
* two offsets are different when a load operation extends, a store
|
||||
* operation truncates, or for multi-register operations.
|
||||
*/
|
||||
int16_t mem_off_first[2];
|
||||
int16_t reg_off_first[2];
|
||||
int16_t reg_off_last[2];
|
||||
|
||||
/*
|
||||
* One element that is misaligned and spans both pages,
|
||||
* or -1 if there is no such active element.
|
||||
*/
|
||||
int16_t mem_off_split;
|
||||
int16_t reg_off_split;
|
||||
|
||||
/*
|
||||
* The byte offset at which the entire operation crosses a page boundary.
|
||||
* Set >= 0 if and only if the entire operation spans two pages.
|
||||
*/
|
||||
int16_t page_split;
|
||||
|
||||
/* TLB data for the two pages. */
|
||||
SVEHostPage page[2];
|
||||
} SVEContLdSt;
|
||||
|
||||
/*
|
||||
* Find first active element on each page, and a loose bound for the
|
||||
* final element on each page. Identify any single element that spans
|
||||
* the page boundary. Return true if there are any active elements.
|
||||
*/
|
||||
bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr, uint64_t *vg,
|
||||
intptr_t reg_max, int esz, int msize);
|
||||
|
||||
/*
|
||||
* Resolve the guest virtual addresses to info->page[].
|
||||
* Control the generation of page faults with @fault. Return false if
|
||||
* there is no work to do, which can only happen with @fault == FAULT_NO.
|
||||
*/
|
||||
bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault,
|
||||
CPUARMState *env, target_ulong addr,
|
||||
MMUAccessType access_type, uintptr_t retaddr);
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
static inline void
|
||||
sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, uint64_t *vg,
|
||||
target_ulong addr, int esize, int msize,
|
||||
int wp_access, uintptr_t retaddr)
|
||||
{ }
|
||||
#else
|
||||
void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
|
||||
uint64_t *vg, target_ulong addr,
|
||||
int esize, int msize, int wp_access,
|
||||
uintptr_t retaddr);
|
||||
#endif
|
||||
|
||||
void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, uint64_t *vg,
|
||||
target_ulong addr, int esize, int msize,
|
||||
uint32_t mtedesc, uintptr_t ra);
|
||||
|
||||
#endif /* TARGET_ARM_SVE_LDST_INTERNAL_H */
|
||||
|
Loading…
Reference in New Issue
Block a user