target/arm: Use asimd_imm_const for A64 decode

The A64 AdvSIMD modified-immediate grouping uses almost the same
constant encoding that A32 Neon does; reuse asimd_imm_const() (to
which we add the AArch64-specific case for cmode 15 op 1) instead of
reimplementing it all.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210628135835.6690-5-peter.maydell@linaro.org
This commit is contained in:
Peter Maydell 2021-06-28 14:58:21 +01:00
parent dfd66bc0f3
commit 2c0286dba4
3 changed files with 24 additions and 82 deletions

View File

@ -8190,8 +8190,6 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
{
int rd = extract32(insn, 0, 5);
int cmode = extract32(insn, 12, 4);
int cmode_3_1 = extract32(cmode, 1, 3);
int cmode_0 = extract32(cmode, 0, 1);
int o2 = extract32(insn, 11, 1);
uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
bool is_neg = extract32(insn, 29, 1);
@ -8210,83 +8208,13 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
return;
}
/* See AdvSIMDExpandImm() in ARM ARM */
switch (cmode_3_1) {
case 0: /* Replicate(Zeros(24):imm8, 2) */
case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
case 3: /* Replicate(imm8:Zeros(24), 2) */
{
int shift = cmode_3_1 * 8;
imm = bitfield_replicate(abcdefgh << shift, 32);
break;
}
case 4: /* Replicate(Zeros(8):imm8, 4) */
case 5: /* Replicate(imm8:Zeros(8), 4) */
{
int shift = (cmode_3_1 & 0x1) * 8;
imm = bitfield_replicate(abcdefgh << shift, 16);
break;
}
case 6:
if (cmode_0) {
/* Replicate(Zeros(8):imm8:Ones(16), 2) */
imm = (abcdefgh << 16) | 0xffff;
} else {
/* Replicate(Zeros(16):imm8:Ones(8), 2) */
imm = (abcdefgh << 8) | 0xff;
}
imm = bitfield_replicate(imm, 32);
break;
case 7:
if (!cmode_0 && !is_neg) {
imm = bitfield_replicate(abcdefgh, 8);
} else if (!cmode_0 && is_neg) {
int i;
imm = 0;
for (i = 0; i < 8; i++) {
if ((abcdefgh) & (1 << i)) {
imm |= 0xffULL << (i * 8);
}
}
} else if (cmode_0) {
if (is_neg) {
imm = (abcdefgh & 0x3f) << 48;
if (abcdefgh & 0x80) {
imm |= 0x8000000000000000ULL;
}
if (abcdefgh & 0x40) {
imm |= 0x3fc0000000000000ULL;
} else {
imm |= 0x4000000000000000ULL;
}
} else {
if (o2) {
/* FMOV (vector, immediate) - half-precision */
imm = vfp_expand_imm(MO_16, abcdefgh);
/* now duplicate across the lanes */
imm = bitfield_replicate(imm, 16);
} else {
imm = (abcdefgh & 0x3f) << 19;
if (abcdefgh & 0x80) {
imm |= 0x80000000;
}
if (abcdefgh & 0x40) {
imm |= 0x3e000000;
} else {
imm |= 0x40000000;
}
imm |= (imm << 32);
}
}
}
break;
default:
g_assert_not_reached();
}
if (cmode_3_1 != 7 && is_neg) {
imm = ~imm;
if (cmode == 15 && o2 && !is_neg) {
/* FMOV (vector, immediate) - half-precision */
imm = vfp_expand_imm(MO_16, abcdefgh);
/* now duplicate across the lanes */
imm = bitfield_replicate(imm, 16);
} else {
imm = asimd_imm_const(abcdefgh, cmode, is_neg);
}
if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {

View File

@ -121,8 +121,8 @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
case 14:
if (op) {
/*
* This is the only case where the top and bottom 32 bits
* of the encoded constant differ.
* This and cmode == 15 op == 1 are the only cases where
* the top and bottom 32 bits of the encoded constant differ.
*/
uint64_t imm64 = 0;
int n;
@ -137,6 +137,19 @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
imm |= (imm << 8) | (imm << 16) | (imm << 24);
break;
case 15:
if (op) {
/* Reserved encoding for AArch32; valid for AArch64 */
uint64_t imm64 = (uint64_t)(imm & 0x3f) << 48;
if (imm & 0x80) {
imm64 |= 0x8000000000000000ULL;
}
if (imm & 0x40) {
imm64 |= 0x3fc0000000000000ULL;
} else {
imm64 |= 0x4000000000000000ULL;
}
return imm64;
}
imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
| ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
break;

View File

@ -540,7 +540,8 @@ static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
* VMVN and VBIC (when cmode < 14 && op == 1).
*
* The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
* callers must catch this.
* callers must catch this; we return the 64-bit constant value defined
* for AArch64.
*
* cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
* is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;