target/arm: Implement MVE incrementing/decrementing dup insns
Implement the MVE incrementing/decrementing dup insns VIDUP, VDDUP, VIWDUP and VDWDUP. These fill the elements of a vector with successively incrementing values, starting at the offset specified in a general purpose register. The final value of the offset is written back to this register. The wrapping variants take a second general purpose register which specifies the point where the count should wrap back to 0. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
c1bd78cb06
commit
395b92d50e
@ -35,6 +35,18 @@ DEF_HELPER_FLAGS_3(mve_vstrh_w, TCG_CALL_NO_WG, void, env, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_3(mve_vdup, TCG_CALL_NO_WG, void, env, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_4(mve_vidupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
|
||||
DEF_HELPER_FLAGS_4(mve_viduph, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
|
||||
DEF_HELPER_FLAGS_4(mve_vidupw, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_5(mve_viwdupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_5(mve_viwduph, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_5(mve_viwdupw, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_5(mve_vdwdupb, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_5(mve_vdwduph, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
|
||||
DEF_HELPER_FLAGS_5(mve_vdwdupw, TCG_CALL_NO_WG, i32, env, ptr, i32, i32, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_3(mve_vclsb, TCG_CALL_NO_WG, void, env, ptr, ptr)
|
||||
DEF_HELPER_FLAGS_3(mve_vclsh, TCG_CALL_NO_WG, void, env, ptr, ptr)
|
||||
DEF_HELPER_FLAGS_3(mve_vclsw, TCG_CALL_NO_WG, void, env, ptr, ptr)
|
||||
|
@ -35,6 +35,8 @@
|
||||
&2scalar qd qn rm size
|
||||
&1imm qd imm cmode op
|
||||
&2shift qd qm shift size
|
||||
&vidup qd rn size imm
|
||||
&viwdup qd rn rm size imm
|
||||
|
||||
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
|
||||
# Note that both Rn and Qd are 3 bits only (no D bit)
|
||||
@ -259,6 +261,29 @@ VDUP 1110 1110 1 1 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=0
|
||||
VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 1 1 0000 @vdup size=1
|
||||
VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2
|
||||
|
||||
# Incrementing and decrementing dup
|
||||
|
||||
# VIDUP, VDDUP format immediate: 1 << (immh:imml)
|
||||
%imm_vidup 7:1 0:1 !function=vidup_imm
|
||||
|
||||
# VIDUP, VDDUP registers: Rm bits [3:1] from insn, bit 0 is 1;
|
||||
# Rn bits [3:1] from insn, bit 0 is 0
|
||||
%vidup_rm 1:3 !function=times_2_plus_1
|
||||
%vidup_rn 17:3 !function=times_2
|
||||
|
||||
@vidup .... .... . . size:2 .... .... .... .... .... \
|
||||
qd=%qd imm=%imm_vidup rn=%vidup_rn &vidup
|
||||
@viwdup .... .... . . size:2 .... .... .... .... .... \
|
||||
qd=%qd imm=%imm_vidup rm=%vidup_rm rn=%vidup_rn &viwdup
|
||||
{
|
||||
VIDUP 1110 1110 0 . .. ... 1 ... 0 1111 . 110 111 . @vidup
|
||||
VIWDUP 1110 1110 0 . .. ... 1 ... 0 1111 . 110 ... . @viwdup
|
||||
}
|
||||
{
|
||||
VDDUP 1110 1110 0 . .. ... 1 ... 1 1111 . 110 111 . @vidup
|
||||
VDWDUP 1110 1110 0 . .. ... 1 ... 1 1111 . 110 ... . @viwdup
|
||||
}
|
||||
|
||||
# multiply-add long dual accumulate
|
||||
# rdahi: bits [3:1] from insn, bit 0 is 1
|
||||
# rdalo: bits [3:1] from insn, bit 0 is 0
|
||||
|
@ -1695,3 +1695,66 @@ uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift)
|
||||
{
|
||||
return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF);
|
||||
}
|
||||
|
||||
#define DO_VIDUP(OP, ESIZE, TYPE, FN) \
|
||||
uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
|
||||
uint32_t offset, uint32_t imm) \
|
||||
{ \
|
||||
TYPE *d = vd; \
|
||||
uint16_t mask = mve_element_mask(env); \
|
||||
unsigned e; \
|
||||
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
||||
mergemask(&d[H##ESIZE(e)], offset, mask); \
|
||||
offset = FN(offset, imm); \
|
||||
} \
|
||||
mve_advance_vpt(env); \
|
||||
return offset; \
|
||||
}
|
||||
|
||||
#define DO_VIWDUP(OP, ESIZE, TYPE, FN) \
|
||||
uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \
|
||||
uint32_t offset, uint32_t wrap, \
|
||||
uint32_t imm) \
|
||||
{ \
|
||||
TYPE *d = vd; \
|
||||
uint16_t mask = mve_element_mask(env); \
|
||||
unsigned e; \
|
||||
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
|
||||
mergemask(&d[H##ESIZE(e)], offset, mask); \
|
||||
offset = FN(offset, wrap, imm); \
|
||||
} \
|
||||
mve_advance_vpt(env); \
|
||||
return offset; \
|
||||
}
|
||||
|
||||
#define DO_VIDUP_ALL(OP, FN) \
|
||||
DO_VIDUP(OP##b, 1, int8_t, FN) \
|
||||
DO_VIDUP(OP##h, 2, int16_t, FN) \
|
||||
DO_VIDUP(OP##w, 4, int32_t, FN)
|
||||
|
||||
#define DO_VIWDUP_ALL(OP, FN) \
|
||||
DO_VIWDUP(OP##b, 1, int8_t, FN) \
|
||||
DO_VIWDUP(OP##h, 2, int16_t, FN) \
|
||||
DO_VIWDUP(OP##w, 4, int32_t, FN)
|
||||
|
||||
static uint32_t do_add_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
|
||||
{
|
||||
offset += imm;
|
||||
if (offset == wrap) {
|
||||
offset = 0;
|
||||
}
|
||||
return offset;
|
||||
}
|
||||
|
||||
static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm)
|
||||
{
|
||||
if (offset == 0) {
|
||||
offset = wrap;
|
||||
}
|
||||
offset -= imm;
|
||||
return offset;
|
||||
}
|
||||
|
||||
DO_VIDUP_ALL(vidup, DO_ADD)
|
||||
DO_VIWDUP_ALL(viwdup, do_add_wrap)
|
||||
DO_VIWDUP_ALL(vdwdup, do_sub_wrap)
|
||||
|
@ -25,6 +25,11 @@
|
||||
#include "translate.h"
|
||||
#include "translate-a32.h"
|
||||
|
||||
static inline int vidup_imm(DisasContext *s, int x)
|
||||
{
|
||||
return 1 << x;
|
||||
}
|
||||
|
||||
/* Include the generated decoder */
|
||||
#include "decode-mve.c.inc"
|
||||
|
||||
@ -36,6 +41,8 @@ typedef void MVEGenTwoOpShiftFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
|
||||
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
|
||||
typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
|
||||
typedef void MVEGenOneOpImmFn(TCGv_ptr, TCGv_ptr, TCGv_i64);
|
||||
typedef void MVEGenVIDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32);
|
||||
typedef void MVEGenVIWDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
|
||||
|
||||
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
|
||||
static inline long mve_qreg_offset(unsigned reg)
|
||||
@ -1059,3 +1066,116 @@ static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a)
|
||||
mve_update_eci(s);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool do_vidup(DisasContext *s, arg_vidup *a, MVEGenVIDUPFn *fn)
|
||||
{
|
||||
TCGv_ptr qd;
|
||||
TCGv_i32 rn;
|
||||
|
||||
/*
|
||||
* Vector increment/decrement with wrap and duplicate (VIDUP, VDDUP).
|
||||
* This fills the vector with elements of successively increasing
|
||||
* or decreasing values, starting from Rn.
|
||||
*/
|
||||
if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
|
||||
return false;
|
||||
}
|
||||
if (a->size == MO_64) {
|
||||
/* size 0b11 is another encoding */
|
||||
return false;
|
||||
}
|
||||
if (!mve_eci_check(s) || !vfp_access_check(s)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
qd = mve_qreg_ptr(a->qd);
|
||||
rn = load_reg(s, a->rn);
|
||||
fn(rn, cpu_env, qd, rn, tcg_constant_i32(a->imm));
|
||||
store_reg(s, a->rn, rn);
|
||||
tcg_temp_free_ptr(qd);
|
||||
mve_update_eci(s);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool do_viwdup(DisasContext *s, arg_viwdup *a, MVEGenVIWDUPFn *fn)
|
||||
{
|
||||
TCGv_ptr qd;
|
||||
TCGv_i32 rn, rm;
|
||||
|
||||
/*
|
||||
* Vector increment/decrement with wrap and duplicate (VIWDUp, VDWDUP)
|
||||
* This fills the vector with elements of successively increasing
|
||||
* or decreasing values, starting from Rn. Rm specifies a point where
|
||||
* the count wraps back around to 0. The updated offset is written back
|
||||
* to Rn.
|
||||
*/
|
||||
if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
|
||||
return false;
|
||||
}
|
||||
if (!fn || a->rm == 13 || a->rm == 15) {
|
||||
/*
|
||||
* size 0b11 is another encoding; Rm == 13 is UNPREDICTABLE;
|
||||
* Rm == 13 is VIWDUP, VDWDUP.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
if (!mve_eci_check(s) || !vfp_access_check(s)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
qd = mve_qreg_ptr(a->qd);
|
||||
rn = load_reg(s, a->rn);
|
||||
rm = load_reg(s, a->rm);
|
||||
fn(rn, cpu_env, qd, rn, rm, tcg_constant_i32(a->imm));
|
||||
store_reg(s, a->rn, rn);
|
||||
tcg_temp_free_ptr(qd);
|
||||
tcg_temp_free_i32(rm);
|
||||
mve_update_eci(s);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_VIDUP(DisasContext *s, arg_vidup *a)
|
||||
{
|
||||
static MVEGenVIDUPFn * const fns[] = {
|
||||
gen_helper_mve_vidupb,
|
||||
gen_helper_mve_viduph,
|
||||
gen_helper_mve_vidupw,
|
||||
NULL,
|
||||
};
|
||||
return do_vidup(s, a, fns[a->size]);
|
||||
}
|
||||
|
||||
static bool trans_VDDUP(DisasContext *s, arg_vidup *a)
|
||||
{
|
||||
static MVEGenVIDUPFn * const fns[] = {
|
||||
gen_helper_mve_vidupb,
|
||||
gen_helper_mve_viduph,
|
||||
gen_helper_mve_vidupw,
|
||||
NULL,
|
||||
};
|
||||
/* VDDUP is just like VIDUP but with a negative immediate */
|
||||
a->imm = -a->imm;
|
||||
return do_vidup(s, a, fns[a->size]);
|
||||
}
|
||||
|
||||
static bool trans_VIWDUP(DisasContext *s, arg_viwdup *a)
|
||||
{
|
||||
static MVEGenVIWDUPFn * const fns[] = {
|
||||
gen_helper_mve_viwdupb,
|
||||
gen_helper_mve_viwduph,
|
||||
gen_helper_mve_viwdupw,
|
||||
NULL,
|
||||
};
|
||||
return do_viwdup(s, a, fns[a->size]);
|
||||
}
|
||||
|
||||
static bool trans_VDWDUP(DisasContext *s, arg_viwdup *a)
|
||||
{
|
||||
static MVEGenVIWDUPFn * const fns[] = {
|
||||
gen_helper_mve_vdwdupb,
|
||||
gen_helper_mve_vdwduph,
|
||||
gen_helper_mve_vdwdupw,
|
||||
NULL,
|
||||
};
|
||||
return do_viwdup(s, a, fns[a->size]);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user