Add tcg_gen_gvec_dup_imm

Misc tcg patches
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAl6zAK8dHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV898Af/WGXWyNr9AOnYdIjq
 fIAHyxGNow+FiGbm8RgR38dY2weyVAFFpmhmEdusfy7nJhJ63P5Ayl6tbmuWTyvC
 IXFYhoCVFahyFwoEuZnSniAgMAMyzesfmQaXlGumbJy97g/rjKxvv2SPvjuYe6x4
 X5KkSWksjXmSgiAKlapcMp33xIrNExf1IrlBrqFI2E6VFZzBNipJJ6QIsF0mnYTr
 qJq3dcpYuf/cexR9cA4mz4ZazEG6IW+8AvSUkUtQxDESP2OUC8lO6s3w+acKnVK8
 eRouIG1LjRSL9WRc19frp9VA0PiMNVvv3xOdkF48hSSmH0/tGylPpgzODbXYKO27
 eTCNVQ==
 =3wO0
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20200506' into staging

Add tcg_gen_gvec_dup_imm
Misc tcg patches

# gpg: Signature made Wed 06 May 2020 19:23:43 BST
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20200506:
  tcg: Fix integral argument type to tcg_gen_rot[rl]i_i{32,64}
  tcg: Add load_dest parameter to GVecGen2
  tcg: Improve vector tail clearing
  tcg: Add tcg_gen_gvec_dup_tl
  tcg: Remove tcg_gen_gvec_dup{8,16,32,64}i
  tcg: Use tcg_gen_gvec_dup_imm in logical simplifications
  target/arm: Use tcg_gen_gvec_dup_imm
  target/ppc: Use tcg_gen_gvec_dup_imm
  target/s390x: Use tcg_gen_gvec_dup_imm
  tcg: Add tcg_gen_gvec_dup_imm

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-05-07 09:45:54 +01:00
commit 609dd53df5
10 changed files with 166 additions and 139 deletions

View File

@ -109,6 +109,8 @@ typedef struct {
uint8_t vece;
/* Prefer i64 to v64. */
bool prefer_i64;
/* Load dest as a 2nd source operand. */
bool load_dest;
} GVecGen2;
typedef struct {
@ -313,15 +315,18 @@ void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t s, uint32_t m);
void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t s,
uint32_t m, uint64_t imm);
void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t s,
uint32_t m, TCGv_i32);
void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t s,
uint32_t m, TCGv_i64);
void tcg_gen_gvec_dup8i(uint32_t dofs, uint32_t s, uint32_t m, uint8_t x);
void tcg_gen_gvec_dup16i(uint32_t dofs, uint32_t s, uint32_t m, uint16_t x);
void tcg_gen_gvec_dup32i(uint32_t dofs, uint32_t s, uint32_t m, uint32_t x);
void tcg_gen_gvec_dup64i(uint32_t dofs, uint32_t s, uint32_t m, uint64_t x);
#if TARGET_LONG_BITS == 64
# define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i64
#else
# define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i32
#endif
void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t shift, uint32_t oprsz, uint32_t maxsz);

View File

@ -297,9 +297,9 @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg);
void tcg_gen_ctpop_i32(TCGv_i32 a1, TCGv_i32 a2);
void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2);
void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2);
void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
unsigned int ofs, unsigned int len);
void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
@ -493,9 +493,9 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg);
void tcg_gen_ctpop_i64(TCGv_i64 a1, TCGv_i64 a2);
void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2);
void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2);
void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
unsigned int ofs, unsigned int len);
void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,

View File

@ -502,7 +502,7 @@ static void clear_vec_high(DisasContext *s, bool is_q, int rd)
tcg_temp_free_i64(tcg_zero);
}
if (vsz > 16) {
tcg_gen_gvec_dup8i(ofs + 16, vsz - 16, vsz - 16, 0);
tcg_gen_gvec_dup_imm(MO_64, ofs + 16, vsz - 16, vsz - 16, 0);
}
}
@ -7785,8 +7785,8 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
/* MOVI or MVNI, with MVNI negation handled above. */
tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), is_q ? 16 : 8,
vec_full_reg_size(s), imm);
tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
vec_full_reg_size(s), imm);
} else {
/* ORR or BIC, with BIC negation to AND handled above. */
if (is_neg) {
@ -10214,8 +10214,8 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
if (is_u) {
if (shift == 8 << size) {
/* Shift count the same size as element size produces zero. */
tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd),
is_q ? 16 : 8, vec_full_reg_size(s), 0);
tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
is_q ? 16 : 8, vec_full_reg_size(s), 0);
} else {
gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size);
}

View File

@ -177,7 +177,7 @@ static bool do_mov_z(DisasContext *s, int rd, int rn)
static void do_dupi_z(DisasContext *s, int rd, uint64_t word)
{
unsigned vsz = vec_full_reg_size(s);
tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), vsz, vsz, word);
tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word);
}
/* Invoke a vector expander on two Pregs. */
@ -1453,7 +1453,7 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
unsigned oprsz = size_for_gvec(setsz / 8);
if (oprsz * 8 == setsz) {
tcg_gen_gvec_dup64i(ofs, oprsz, maxsz, word);
tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word);
goto done;
}
}
@ -2044,7 +2044,7 @@ static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
unsigned nofs = vec_reg_offset(s, a->rn, index, esz);
tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz);
} else {
tcg_gen_gvec_dup64i(dofs, vsz, vsz, 0);
tcg_gen_gvec_dup_imm(esz, dofs, vsz, vsz, 0);
}
}
return true;
@ -3260,9 +3260,7 @@ static bool trans_FDUP(DisasContext *s, arg_FDUP *a)
/* Decode the VFP immediate. */
imm = vfp_expand_imm(a->esz, a->imm);
imm = dup_const(a->esz, imm);
tcg_gen_gvec_dup64i(dofs, vsz, vsz, imm);
tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm);
}
return true;
}
@ -3276,7 +3274,7 @@ static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a)
unsigned vsz = vec_full_reg_size(s);
int dofs = vec_full_reg_offset(s, a->rd);
tcg_gen_gvec_dup64i(dofs, vsz, vsz, dup_const(a->esz, a->imm));
tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm);
}
return true;
}

View File

@ -5209,7 +5209,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
MIN(shift, (8 << size) - 1),
vec_size, vec_size);
} else if (shift >= 8 << size) {
tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
tcg_gen_gvec_dup_imm(MO_8, rd_ofs, vec_size,
vec_size, 0);
} else {
tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
vec_size, vec_size);
@ -5260,7 +5261,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
* architecturally valid and results in zero.
*/
if (shift >= 8 << size) {
tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
tcg_gen_gvec_dup_imm(size, rd_ofs,
vec_size, vec_size, 0);
} else {
tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
vec_size, vec_size);
@ -5606,7 +5608,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
}
tcg_temp_free_i64(t64);
} else {
tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
tcg_gen_gvec_dup_imm(MO_32, reg_ofs, vec_size,
vec_size, imm);
}
}
}

View File

@ -1035,21 +1035,25 @@ GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \
GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \
vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207)
#define GEN_VXFORM_DUPI(name, tcg_op, opc2, opc3) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
int simm; \
if (unlikely(!ctx->altivec_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
simm = SIMM5(ctx->opcode); \
tcg_op(avr_full_offset(rD(ctx->opcode)), 16, 16, simm); \
static void gen_vsplti(DisasContext *ctx, int vece)
{
int simm;
if (unlikely(!ctx->altivec_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VPU);
return;
}
GEN_VXFORM_DUPI(vspltisb, tcg_gen_gvec_dup8i, 6, 12);
GEN_VXFORM_DUPI(vspltish, tcg_gen_gvec_dup16i, 6, 13);
GEN_VXFORM_DUPI(vspltisw, tcg_gen_gvec_dup32i, 6, 14);
simm = SIMM5(ctx->opcode);
tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);
}
#define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \
static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); }
GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12);
GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13);
GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14);
#define GEN_VXFORM_NOA(name, opc2, opc3) \
static void glue(gen_, name)(DisasContext *ctx) \
@ -1559,7 +1563,7 @@ GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
#undef GEN_VXRFORM_DUAL
#undef GEN_VXRFORM1
#undef GEN_VXRFORM
#undef GEN_VXFORM_DUPI
#undef GEN_VXFORM_VSPLTI
#undef GEN_VXFORM_NOA
#undef GEN_VXFORM_UIMM
#undef GEN_VAFORM_PAIRED

View File

@ -1579,7 +1579,7 @@ static void gen_xxspltib(DisasContext *ctx)
return;
}
}
tcg_gen_gvec_dup8i(vsr_full_offset(rt), 16, 16, uim8);
tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(rt), 16, 16, uim8);
}
static void gen_xxsldwi(DisasContext *ctx)

View File

@ -231,8 +231,8 @@ static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr,
#define gen_gvec_mov(v1, v2) \
tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \
16)
#define gen_gvec_dup64i(v1, c) \
tcg_gen_gvec_dup64i(vec_full_reg_offset(v1), 16, 16, c)
#define gen_gvec_dup_imm(es, v1, c) \
tcg_gen_gvec_dup_imm(es, vec_full_reg_offset(v1), 16, 16, c);
#define gen_gvec_fn_2(fn, es, v1, v2) \
tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
16, 16)
@ -316,31 +316,6 @@ static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a,
tcg_temp_free_i64(cl);
}
static void gen_gvec_dupi(uint8_t es, uint8_t reg, uint64_t c)
{
switch (es) {
case ES_8:
tcg_gen_gvec_dup8i(vec_full_reg_offset(reg), 16, 16, c);
break;
case ES_16:
tcg_gen_gvec_dup16i(vec_full_reg_offset(reg), 16, 16, c);
break;
case ES_32:
tcg_gen_gvec_dup32i(vec_full_reg_offset(reg), 16, 16, c);
break;
case ES_64:
gen_gvec_dup64i(reg, c);
break;
default:
g_assert_not_reached();
}
}
static void zero_vec(uint8_t reg)
{
tcg_gen_gvec_dup8i(vec_full_reg_offset(reg), 16, 16, 0);
}
static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
uint64_t b)
{
@ -396,8 +371,8 @@ static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o)
* Masks for both 64 bit elements of the vector are the same.
* Trust tcg to produce a good constant loading.
*/
gen_gvec_dup64i(get_field(s, v1),
generate_byte_mask(i2 & 0xff));
gen_gvec_dup_imm(ES_64, get_field(s, v1),
generate_byte_mask(i2 & 0xff));
} else {
TCGv_i64 t = tcg_temp_new_i64();
@ -432,7 +407,7 @@ static DisasJumpType op_vgm(DisasContext *s, DisasOps *o)
}
}
gen_gvec_dupi(es, get_field(s, v1), mask);
gen_gvec_dup_imm(es, get_field(s, v1), mask);
return DISAS_NEXT;
}
@ -585,7 +560,7 @@ static DisasJumpType op_vllez(DisasContext *s, DisasOps *o)
t = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es);
zero_vec(get_field(s, v1));
gen_gvec_dup_imm(es, get_field(s, v1), 0);
write_vec_element_i64(t, get_field(s, v1), enr, es);
tcg_temp_free_i64(t);
return DISAS_NEXT;
@ -892,7 +867,7 @@ static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
gen_gvec_dupi(es, get_field(s, v1), data);
gen_gvec_dup_imm(es, get_field(s, v1), data);
return DISAS_NEXT;
}
@ -1372,7 +1347,7 @@ static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o)
read_vec_element_i32(tmp, get_field(s, v2), i, ES_32);
tcg_gen_add2_i32(tmp, sum, sum, sum, tmp, tmp);
}
zero_vec(get_field(s, v1));
gen_gvec_dup_imm(ES_32, get_field(s, v1), 0);
write_vec_element_i32(sum, get_field(s, v1), 1, ES_32);
tcg_temp_free_i32(tmp);

View File

@ -326,11 +326,34 @@ void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
in units of LNSZ. This limits the expansion of inline code. */
static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz)
{
if (oprsz % lnsz == 0) {
uint32_t lnct = oprsz / lnsz;
return lnct >= 1 && lnct <= MAX_UNROLL;
uint32_t q, r;
if (oprsz < lnsz) {
return false;
}
return false;
q = oprsz / lnsz;
r = oprsz % lnsz;
tcg_debug_assert((r & 7) == 0);
if (lnsz < 16) {
/* For sizes below 16, accept no remainder. */
if (r != 0) {
return false;
}
} else {
/*
* Recall that ARM SVE allows vector sizes that are not a
* power of 2, but always a multiple of 16. The intent is
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
* In addition, expand_clr needs to handle a multiple of 8.
* Thus we can handle the tail with one more operation per
* diminishing power of 2.
*/
q += ctpop32(r);
}
return q <= MAX_UNROLL;
}
static void expand_clr(uint32_t dofs, uint32_t maxsz);
@ -402,22 +425,31 @@ static void gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
static TCGType choose_vector_type(const TCGOpcode *list, unsigned vece,
uint32_t size, bool prefer_i64)
{
if (TCG_TARGET_HAS_v256 && check_size_impl(size, 32)) {
/*
* Recall that ARM SVE allows vector sizes that are not a
* power of 2, but always a multiple of 16. The intent is
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
* It is hard to imagine a case in which v256 is supported
* but v128 is not, but check anyway.
*/
if (tcg_can_emit_vecop_list(list, TCG_TYPE_V256, vece)
&& (size % 32 == 0
|| tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece))) {
return TCG_TYPE_V256;
}
/*
* Recall that ARM SVE allows vector sizes that are not a
* power of 2, but always a multiple of 16. The intent is
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
* It is hard to imagine a case in which v256 is supported
* but v128 is not, but check anyway.
* In addition, expand_clr needs to handle a multiple of 8.
*/
if (TCG_TARGET_HAS_v256 &&
check_size_impl(size, 32) &&
tcg_can_emit_vecop_list(list, TCG_TYPE_V256, vece) &&
(!(size & 16) ||
(TCG_TARGET_HAS_v128 &&
tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece))) &&
(!(size & 8) ||
(TCG_TARGET_HAS_v64 &&
tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)))) {
return TCG_TYPE_V256;
}
if (TCG_TARGET_HAS_v128 && check_size_impl(size, 16)
&& tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece)) {
if (TCG_TARGET_HAS_v128 &&
check_size_impl(size, 16) &&
tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece) &&
(!(size & 8) ||
(TCG_TARGET_HAS_v64 &&
tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)))) {
return TCG_TYPE_V128;
}
if (TCG_TARGET_HAS_v64 && !prefer_i64 && check_size_impl(size, 8)
@ -432,6 +464,18 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
{
uint32_t i = 0;
tcg_debug_assert(oprsz >= 8);
/*
* This may be expand_clr for the tail of an operation, e.g.
* oprsz == 8 && maxsz == 64. The first 8 bytes of this store
* are misaligned wrt the maximum vector size, so do that first.
*/
if (dofs & 8) {
tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V64);
i += 8;
}
switch (type) {
case TCG_TYPE_V256:
/*
@ -619,17 +663,22 @@ static void expand_clr(uint32_t dofs, uint32_t maxsz)
/* Expand OPSZ bytes worth of two-operand operations using i32 elements. */
static void expand_2_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
void (*fni)(TCGv_i32, TCGv_i32))
bool load_dest, void (*fni)(TCGv_i32, TCGv_i32))
{
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
tcg_gen_ld_i32(t0, cpu_env, aofs + i);
fni(t0, t0);
tcg_gen_st_i32(t0, cpu_env, dofs + i);
if (load_dest) {
tcg_gen_ld_i32(t1, cpu_env, dofs + i);
}
fni(t1, t0);
tcg_gen_st_i32(t1, cpu_env, dofs + i);
}
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
}
static void expand_2i_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
@ -749,17 +798,22 @@ static void expand_4_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
/* Expand OPSZ bytes worth of two-operand operations using i64 elements. */
static void expand_2_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
void (*fni)(TCGv_i64, TCGv_i64))
bool load_dest, void (*fni)(TCGv_i64, TCGv_i64))
{
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
tcg_gen_ld_i64(t0, cpu_env, aofs + i);
fni(t0, t0);
tcg_gen_st_i64(t0, cpu_env, dofs + i);
if (load_dest) {
tcg_gen_ld_i64(t1, cpu_env, dofs + i);
}
fni(t1, t0);
tcg_gen_st_i64(t1, cpu_env, dofs + i);
}
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
}
static void expand_2i_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
@ -880,17 +934,23 @@ static void expand_4_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
/* Expand OPSZ bytes worth of two-operand operations using host vectors. */
static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t tysz, TCGType type,
bool load_dest,
void (*fni)(unsigned, TCGv_vec, TCGv_vec))
{
TCGv_vec t0 = tcg_temp_new_vec(type);
TCGv_vec t1 = tcg_temp_new_vec(type);
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
tcg_gen_ld_vec(t0, cpu_env, aofs + i);
fni(vece, t0, t0);
tcg_gen_st_vec(t0, cpu_env, dofs + i);
if (load_dest) {
tcg_gen_ld_vec(t1, cpu_env, dofs + i);
}
fni(vece, t1, t0);
tcg_gen_st_vec(t1, cpu_env, dofs + i);
}
tcg_temp_free_vec(t0);
tcg_temp_free_vec(t1);
}
/* Expand OPSZ bytes worth of two-vector operands and an immediate operand
@ -1044,7 +1104,8 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
some = QEMU_ALIGN_DOWN(oprsz, 32);
expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, g->fniv);
expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
g->load_dest, g->fniv);
if (some == oprsz) {
break;
}
@ -1054,17 +1115,19 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
maxsz -= some;
/* fallthru */
case TCG_TYPE_V128:
expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, g->fniv);
expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
g->load_dest, g->fniv);
break;
case TCG_TYPE_V64:
expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, g->fniv);
expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
g->load_dest, g->fniv);
break;
case 0:
if (g->fni8 && check_size_impl(oprsz, 8)) {
expand_2_i64(dofs, aofs, oprsz, g->fni8);
expand_2_i64(dofs, aofs, oprsz, g->load_dest, g->fni8);
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
expand_2_i32(dofs, aofs, oprsz, g->fni4);
expand_2_i32(dofs, aofs, oprsz, g->load_dest, g->fni4);
} else {
assert(g->fno != NULL);
tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno);
@ -1541,32 +1604,11 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
}
}
void tcg_gen_gvec_dup64i(uint32_t dofs, uint32_t oprsz,
uint32_t maxsz, uint64_t x)
void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t oprsz,
uint32_t maxsz, uint64_t x)
{
check_size_align(oprsz, maxsz, dofs);
do_dup(MO_64, dofs, oprsz, maxsz, NULL, NULL, x);
}
void tcg_gen_gvec_dup32i(uint32_t dofs, uint32_t oprsz,
uint32_t maxsz, uint32_t x)
{
check_size_align(oprsz, maxsz, dofs);
do_dup(MO_32, dofs, oprsz, maxsz, NULL, NULL, x);
}
void tcg_gen_gvec_dup16i(uint32_t dofs, uint32_t oprsz,
uint32_t maxsz, uint16_t x)
{
check_size_align(oprsz, maxsz, dofs);
do_dup(MO_16, dofs, oprsz, maxsz, NULL, NULL, x);
}
void tcg_gen_gvec_dup8i(uint32_t dofs, uint32_t oprsz,
uint32_t maxsz, uint8_t x)
{
check_size_align(oprsz, maxsz, dofs);
do_dup(MO_8, dofs, oprsz, maxsz, NULL, NULL, x);
do_dup(vece, dofs, oprsz, maxsz, NULL, NULL, x);
}
void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
@ -2319,7 +2361,7 @@ void tcg_gen_gvec_xor(unsigned vece, uint32_t dofs, uint32_t aofs,
};
if (aofs == bofs) {
tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, 0);
tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, 0);
} else {
tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
}
@ -2336,7 +2378,7 @@ void tcg_gen_gvec_andc(unsigned vece, uint32_t dofs, uint32_t aofs,
};
if (aofs == bofs) {
tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, 0);
tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, 0);
} else {
tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
}
@ -2353,7 +2395,7 @@ void tcg_gen_gvec_orc(unsigned vece, uint32_t dofs, uint32_t aofs,
};
if (aofs == bofs) {
tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, -1);
tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, -1);
} else {
tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
}
@ -2404,7 +2446,7 @@ void tcg_gen_gvec_eqv(unsigned vece, uint32_t dofs, uint32_t aofs,
};
if (aofs == bofs) {
tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, -1);
tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, -1);
} else {
tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
}

View File

@ -540,9 +540,9 @@ void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
}
}
void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
tcg_debug_assert(arg2 < 32);
tcg_debug_assert(arg2 >= 0 && arg2 < 32);
/* some cases can be optimized here */
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
@ -580,9 +580,9 @@ void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
}
}
void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
tcg_debug_assert(arg2 < 32);
tcg_debug_assert(arg2 >= 0 && arg2 < 32);
/* some cases can be optimized here */
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
@ -1962,9 +1962,9 @@ void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
}
}
void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
tcg_debug_assert(arg2 < 64);
tcg_debug_assert(arg2 >= 0 && arg2 < 64);
/* some cases can be optimized here */
if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1);
@ -2001,9 +2001,9 @@ void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
}
}
void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
tcg_debug_assert(arg2 < 64);
tcg_debug_assert(arg2 >= 0 && arg2 < 64);
/* some cases can be optimized here */
if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1);