target/arm: Use SVEContLdSt for multi-register contiguous loads

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200508154359.7494-14-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2020-05-08 08:43:53 -07:00 committed by Peter Maydell
parent 4bcc3f0ff8
commit 5c9b8458a0
1 changed files with 79 additions and 144 deletions

View File

@ -4449,27 +4449,28 @@ static inline bool test_host_page(void *host)
} }
/* /*
* Common helper for all contiguous one-register predicated loads. * Common helper for all contiguous 1,2,3,4-register predicated stores.
*/ */
static inline QEMU_ALWAYS_INLINE static inline QEMU_ALWAYS_INLINE
void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr, void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
uint32_t desc, const uintptr_t retaddr, uint32_t desc, const uintptr_t retaddr,
const int esz, const int msz, const int esz, const int msz, const int N,
sve_ldst1_host_fn *host_fn, sve_ldst1_host_fn *host_fn,
sve_ldst1_tlb_fn *tlb_fn) sve_ldst1_tlb_fn *tlb_fn)
{ {
const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5); const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
void *vd = &env->vfp.zregs[rd];
const intptr_t reg_max = simd_oprsz(desc); const intptr_t reg_max = simd_oprsz(desc);
intptr_t reg_off, reg_last, mem_off; intptr_t reg_off, reg_last, mem_off;
SVEContLdSt info; SVEContLdSt info;
void *host; void *host;
int flags; int flags, i;
/* Find the active elements. */ /* Find the active elements. */
if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, 1 << msz)) { if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, N << msz)) {
/* The entire predicate was false; no load occurs. */ /* The entire predicate was false; no load occurs. */
memset(vd, 0, reg_max); for (i = 0; i < N; ++i) {
memset(&env->vfp.zregs[(rd + i) & 31], 0, reg_max);
}
return; return;
} }
@ -4477,7 +4478,7 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, retaddr); sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, retaddr);
/* Handle watchpoints for all active elements. */ /* Handle watchpoints for all active elements. */
sve_cont_ldst_watchpoints(&info, env, vg, addr, 1 << esz, 1 << msz, sve_cont_ldst_watchpoints(&info, env, vg, addr, 1 << esz, N << msz,
BP_MEM_READ, retaddr); BP_MEM_READ, retaddr);
/* TODO: MTE check. */ /* TODO: MTE check. */
@ -4493,9 +4494,8 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
* which for ARM will raise SyncExternal. Perform the load * which for ARM will raise SyncExternal. Perform the load
* into scratch memory to preserve register state until the end. * into scratch memory to preserve register state until the end.
*/ */
ARMVectorReg scratch; ARMVectorReg scratch[4] = { };
memset(&scratch, 0, reg_max);
mem_off = info.mem_off_first[0]; mem_off = info.mem_off_first[0];
reg_off = info.reg_off_first[0]; reg_off = info.reg_off_first[0];
reg_last = info.reg_off_last[1]; reg_last = info.reg_off_last[1];
@ -4510,21 +4510,29 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
uint64_t pg = vg[reg_off >> 6]; uint64_t pg = vg[reg_off >> 6];
do { do {
if ((pg >> (reg_off & 63)) & 1) { if ((pg >> (reg_off & 63)) & 1) {
tlb_fn(env, &scratch, reg_off, addr + mem_off, retaddr); for (i = 0; i < N; ++i) {
tlb_fn(env, &scratch[i], reg_off,
addr + mem_off + (i << msz), retaddr);
}
} }
reg_off += 1 << esz; reg_off += 1 << esz;
mem_off += 1 << msz; mem_off += N << msz;
} while (reg_off & 63); } while (reg_off & 63);
} while (reg_off <= reg_last); } while (reg_off <= reg_last);
memcpy(vd, &scratch, reg_max); for (i = 0; i < N; ++i) {
memcpy(&env->vfp.zregs[(rd + i) & 31], &scratch[i], reg_max);
}
return; return;
#endif #endif
} }
/* The entire operation is in RAM, on valid pages. */ /* The entire operation is in RAM, on valid pages. */
memset(vd, 0, reg_max); for (i = 0; i < N; ++i) {
memset(&env->vfp.zregs[(rd + i) & 31], 0, reg_max);
}
mem_off = info.mem_off_first[0]; mem_off = info.mem_off_first[0];
reg_off = info.reg_off_first[0]; reg_off = info.reg_off_first[0];
reg_last = info.reg_off_last[0]; reg_last = info.reg_off_last[0];
@ -4534,10 +4542,13 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
uint64_t pg = vg[reg_off >> 6]; uint64_t pg = vg[reg_off >> 6];
do { do {
if ((pg >> (reg_off & 63)) & 1) { if ((pg >> (reg_off & 63)) & 1) {
host_fn(vd, reg_off, host + mem_off); for (i = 0; i < N; ++i) {
host_fn(&env->vfp.zregs[(rd + i) & 31], reg_off,
host + mem_off + (i << msz));
}
} }
reg_off += 1 << esz; reg_off += 1 << esz;
mem_off += 1 << msz; mem_off += N << msz;
} while (reg_off <= reg_last && (reg_off & 63)); } while (reg_off <= reg_last && (reg_off & 63));
} }
@ -4547,7 +4558,11 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
*/ */
mem_off = info.mem_off_split; mem_off = info.mem_off_split;
if (unlikely(mem_off >= 0)) { if (unlikely(mem_off >= 0)) {
tlb_fn(env, vd, info.reg_off_split, addr + mem_off, retaddr); reg_off = info.reg_off_split;
for (i = 0; i < N; ++i) {
tlb_fn(env, &env->vfp.zregs[(rd + i) & 31], reg_off,
addr + mem_off + (i << msz), retaddr);
}
} }
mem_off = info.mem_off_first[1]; mem_off = info.mem_off_first[1];
@ -4560,10 +4575,13 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
uint64_t pg = vg[reg_off >> 6]; uint64_t pg = vg[reg_off >> 6];
do { do {
if ((pg >> (reg_off & 63)) & 1) { if ((pg >> (reg_off & 63)) & 1) {
host_fn(vd, reg_off, host + mem_off); for (i = 0; i < N; ++i) {
host_fn(&env->vfp.zregs[(rd + i) & 31], reg_off,
host + mem_off + (i << msz));
}
} }
reg_off += 1 << esz; reg_off += 1 << esz;
mem_off += 1 << msz; mem_off += N << msz;
} while (reg_off & 63); } while (reg_off & 63);
} while (reg_off <= reg_last); } while (reg_off <= reg_last);
} }
@ -4573,7 +4591,7 @@ void sve_ld1_r(CPUARMState *env, uint64_t *vg, const target_ulong addr,
void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \ void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
target_ulong addr, uint32_t desc) \ target_ulong addr, uint32_t desc) \
{ \ { \
sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, 0, \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, \
sve_##NAME##_host, sve_##NAME##_tlb); \ sve_##NAME##_host, sve_##NAME##_tlb); \
} }
@ -4581,159 +4599,76 @@ void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \
void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \ void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \
target_ulong addr, uint32_t desc) \ target_ulong addr, uint32_t desc) \
{ \ { \
sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
} \ } \
void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \ void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \
target_ulong addr, uint32_t desc) \ target_ulong addr, uint32_t desc) \
{ \ { \
sve_ld1_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \
sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
} }
DO_LD1_1(ld1bb, 0) DO_LD1_1(ld1bb, MO_8)
DO_LD1_1(ld1bhu, 1) DO_LD1_1(ld1bhu, MO_16)
DO_LD1_1(ld1bhs, 1) DO_LD1_1(ld1bhs, MO_16)
DO_LD1_1(ld1bsu, 2) DO_LD1_1(ld1bsu, MO_32)
DO_LD1_1(ld1bss, 2) DO_LD1_1(ld1bss, MO_32)
DO_LD1_1(ld1bdu, 3) DO_LD1_1(ld1bdu, MO_64)
DO_LD1_1(ld1bds, 3) DO_LD1_1(ld1bds, MO_64)
DO_LD1_2(ld1hh, 1, 1) DO_LD1_2(ld1hh, MO_16, MO_16)
DO_LD1_2(ld1hsu, 2, 1) DO_LD1_2(ld1hsu, MO_32, MO_16)
DO_LD1_2(ld1hss, 2, 1) DO_LD1_2(ld1hss, MO_32, MO_16)
DO_LD1_2(ld1hdu, 3, 1) DO_LD1_2(ld1hdu, MO_64, MO_16)
DO_LD1_2(ld1hds, 3, 1) DO_LD1_2(ld1hds, MO_64, MO_16)
DO_LD1_2(ld1ss, 2, 2) DO_LD1_2(ld1ss, MO_32, MO_32)
DO_LD1_2(ld1sdu, 3, 2) DO_LD1_2(ld1sdu, MO_64, MO_32)
DO_LD1_2(ld1sds, 3, 2) DO_LD1_2(ld1sds, MO_64, MO_32)
DO_LD1_2(ld1dd, 3, 3) DO_LD1_2(ld1dd, MO_64, MO_64)
#undef DO_LD1_1 #undef DO_LD1_1
#undef DO_LD1_2 #undef DO_LD1_2
/*
* Common helpers for all contiguous 2,3,4-register predicated loads.
*/
static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr,
uint32_t desc, int size, uintptr_t ra,
sve_ldst1_tlb_fn *tlb_fn)
{
const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
intptr_t i, oprsz = simd_oprsz(desc);
ARMVectorReg scratch[2] = { };
for (i = 0; i < oprsz; ) {
uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
do {
if (pg & 1) {
tlb_fn(env, &scratch[0], i, addr, ra);
tlb_fn(env, &scratch[1], i, addr + size, ra);
}
i += size, pg >>= size;
addr += 2 * size;
} while (i & 15);
}
/* Wait until all exceptions have been raised to write back. */
memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz);
}
static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr,
uint32_t desc, int size, uintptr_t ra,
sve_ldst1_tlb_fn *tlb_fn)
{
const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
intptr_t i, oprsz = simd_oprsz(desc);
ARMVectorReg scratch[3] = { };
for (i = 0; i < oprsz; ) {
uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
do {
if (pg & 1) {
tlb_fn(env, &scratch[0], i, addr, ra);
tlb_fn(env, &scratch[1], i, addr + size, ra);
tlb_fn(env, &scratch[2], i, addr + 2 * size, ra);
}
i += size, pg >>= size;
addr += 3 * size;
} while (i & 15);
}
/* Wait until all exceptions have been raised to write back. */
memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz);
memcpy(&env->vfp.zregs[(rd + 2) & 31], &scratch[2], oprsz);
}
static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr,
uint32_t desc, int size, uintptr_t ra,
sve_ldst1_tlb_fn *tlb_fn)
{
const unsigned rd = extract32(desc, SIMD_DATA_SHIFT + MEMOPIDX_SHIFT, 5);
intptr_t i, oprsz = simd_oprsz(desc);
ARMVectorReg scratch[4] = { };
for (i = 0; i < oprsz; ) {
uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3));
do {
if (pg & 1) {
tlb_fn(env, &scratch[0], i, addr, ra);
tlb_fn(env, &scratch[1], i, addr + size, ra);
tlb_fn(env, &scratch[2], i, addr + 2 * size, ra);
tlb_fn(env, &scratch[3], i, addr + 3 * size, ra);
}
i += size, pg >>= size;
addr += 4 * size;
} while (i & 15);
}
/* Wait until all exceptions have been raised to write back. */
memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
memcpy(&env->vfp.zregs[(rd + 1) & 31], &scratch[1], oprsz);
memcpy(&env->vfp.zregs[(rd + 2) & 31], &scratch[2], oprsz);
memcpy(&env->vfp.zregs[(rd + 3) & 31], &scratch[3], oprsz);
}
#define DO_LDN_1(N) \ #define DO_LDN_1(N) \
void QEMU_FLATTEN HELPER(sve_ld##N##bb_r) \ void HELPER(sve_ld##N##bb_r)(CPUARMState *env, void *vg, \
(CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ target_ulong addr, uint32_t desc) \
{ \ { \
sve_ld##N##_r(env, vg, addr, desc, 1, GETPC(), sve_ld1bb_tlb); \ sve_ldN_r(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, \
sve_ld1bb_host, sve_ld1bb_tlb); \
} }
#define DO_LDN_2(N, SUFF, SIZE) \ #define DO_LDN_2(N, SUFF, ESZ) \
void QEMU_FLATTEN HELPER(sve_ld##N##SUFF##_le_r) \ void HELPER(sve_ld##N##SUFF##_le_r)(CPUARMState *env, void *vg, \
(CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ target_ulong addr, uint32_t desc) \
{ \ { \
sve_ld##N##_r(env, vg, addr, desc, SIZE, GETPC(), \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, \
sve_ld1##SUFF##_le_tlb); \ sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \
} \ } \
void QEMU_FLATTEN HELPER(sve_ld##N##SUFF##_be_r) \ void HELPER(sve_ld##N##SUFF##_be_r)(CPUARMState *env, void *vg, \
(CPUARMState *env, void *vg, target_ulong addr, uint32_t desc) \ target_ulong addr, uint32_t desc) \
{ \ { \
sve_ld##N##_r(env, vg, addr, desc, SIZE, GETPC(), \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, \
sve_ld1##SUFF##_be_tlb); \ sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \
} }
DO_LDN_1(2) DO_LDN_1(2)
DO_LDN_1(3) DO_LDN_1(3)
DO_LDN_1(4) DO_LDN_1(4)
DO_LDN_2(2, hh, 2) DO_LDN_2(2, hh, MO_16)
DO_LDN_2(3, hh, 2) DO_LDN_2(3, hh, MO_16)
DO_LDN_2(4, hh, 2) DO_LDN_2(4, hh, MO_16)
DO_LDN_2(2, ss, 4) DO_LDN_2(2, ss, MO_32)
DO_LDN_2(3, ss, 4) DO_LDN_2(3, ss, MO_32)
DO_LDN_2(4, ss, 4) DO_LDN_2(4, ss, MO_32)
DO_LDN_2(2, dd, 8) DO_LDN_2(2, dd, MO_64)
DO_LDN_2(3, dd, 8) DO_LDN_2(3, dd, MO_64)
DO_LDN_2(4, dd, 8) DO_LDN_2(4, dd, MO_64)
#undef DO_LDN_1 #undef DO_LDN_1
#undef DO_LDN_2 #undef DO_LDN_2