target/loongarch: Implement xvaddw/xvsubw
This patch includes: - XVADDW{EV/OD}.{H.B/W.H/D.W/Q.D}[U]; - XVSUBW{EV/OD}.{H.B/W.H/D.W/Q.D}[U]; - XVADDW{EV/OD}.{H.BU.B/W.HU.H/D.WU.W/Q.DU.D}. Signed-off-by: Song Gao <gaosong@loongson.cn> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20230914022645.1151356-21-gaosong@loongson.cn>
This commit is contained in:
parent
64cf6b99d7
commit
85995f076a
@ -1782,6 +1782,49 @@ INSN_LASX(xvhsubw_wu_hu, vvv)
|
||||
INSN_LASX(xvhsubw_du_wu, vvv)
|
||||
INSN_LASX(xvhsubw_qu_du, vvv)
|
||||
|
||||
INSN_LASX(xvaddwev_h_b, vvv)
|
||||
INSN_LASX(xvaddwev_w_h, vvv)
|
||||
INSN_LASX(xvaddwev_d_w, vvv)
|
||||
INSN_LASX(xvaddwev_q_d, vvv)
|
||||
INSN_LASX(xvaddwod_h_b, vvv)
|
||||
INSN_LASX(xvaddwod_w_h, vvv)
|
||||
INSN_LASX(xvaddwod_d_w, vvv)
|
||||
INSN_LASX(xvaddwod_q_d, vvv)
|
||||
INSN_LASX(xvsubwev_h_b, vvv)
|
||||
INSN_LASX(xvsubwev_w_h, vvv)
|
||||
INSN_LASX(xvsubwev_d_w, vvv)
|
||||
INSN_LASX(xvsubwev_q_d, vvv)
|
||||
INSN_LASX(xvsubwod_h_b, vvv)
|
||||
INSN_LASX(xvsubwod_w_h, vvv)
|
||||
INSN_LASX(xvsubwod_d_w, vvv)
|
||||
INSN_LASX(xvsubwod_q_d, vvv)
|
||||
|
||||
INSN_LASX(xvaddwev_h_bu, vvv)
|
||||
INSN_LASX(xvaddwev_w_hu, vvv)
|
||||
INSN_LASX(xvaddwev_d_wu, vvv)
|
||||
INSN_LASX(xvaddwev_q_du, vvv)
|
||||
INSN_LASX(xvaddwod_h_bu, vvv)
|
||||
INSN_LASX(xvaddwod_w_hu, vvv)
|
||||
INSN_LASX(xvaddwod_d_wu, vvv)
|
||||
INSN_LASX(xvaddwod_q_du, vvv)
|
||||
INSN_LASX(xvsubwev_h_bu, vvv)
|
||||
INSN_LASX(xvsubwev_w_hu, vvv)
|
||||
INSN_LASX(xvsubwev_d_wu, vvv)
|
||||
INSN_LASX(xvsubwev_q_du, vvv)
|
||||
INSN_LASX(xvsubwod_h_bu, vvv)
|
||||
INSN_LASX(xvsubwod_w_hu, vvv)
|
||||
INSN_LASX(xvsubwod_d_wu, vvv)
|
||||
INSN_LASX(xvsubwod_q_du, vvv)
|
||||
|
||||
INSN_LASX(xvaddwev_h_bu_b, vvv)
|
||||
INSN_LASX(xvaddwev_w_hu_h, vvv)
|
||||
INSN_LASX(xvaddwev_d_wu_w, vvv)
|
||||
INSN_LASX(xvaddwev_q_du_d, vvv)
|
||||
INSN_LASX(xvaddwod_h_bu_b, vvv)
|
||||
INSN_LASX(xvaddwod_w_hu_h, vvv)
|
||||
INSN_LASX(xvaddwod_d_wu_w, vvv)
|
||||
INSN_LASX(xvaddwod_q_du_d, vvv)
|
||||
|
||||
INSN_LASX(xvreplgr2vr_b, vr)
|
||||
INSN_LASX(xvreplgr2vr_h, vr)
|
||||
INSN_LASX(xvreplgr2vr_w, vr)
|
||||
|
@ -550,6 +550,10 @@ TRANS(vaddwev_h_b, LSX, gvec_vvv, MO_8, do_vaddwev_s)
|
||||
TRANS(vaddwev_w_h, LSX, gvec_vvv, MO_16, do_vaddwev_s)
|
||||
TRANS(vaddwev_d_w, LSX, gvec_vvv, MO_32, do_vaddwev_s)
|
||||
TRANS(vaddwev_q_d, LSX, gvec_vvv, MO_64, do_vaddwev_s)
|
||||
TRANS(xvaddwev_h_b, LASX, gvec_xxx, MO_8, do_vaddwev_s)
|
||||
TRANS(xvaddwev_w_h, LASX, gvec_xxx, MO_16, do_vaddwev_s)
|
||||
TRANS(xvaddwev_d_w, LASX, gvec_xxx, MO_32, do_vaddwev_s)
|
||||
TRANS(xvaddwev_q_d, LASX, gvec_xxx, MO_64, do_vaddwev_s)
|
||||
|
||||
static void gen_vaddwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
|
||||
{
|
||||
@ -629,6 +633,11 @@ TRANS(vaddwod_h_b, LSX, gvec_vvv, MO_8, do_vaddwod_s)
|
||||
TRANS(vaddwod_w_h, LSX, gvec_vvv, MO_16, do_vaddwod_s)
|
||||
TRANS(vaddwod_d_w, LSX, gvec_vvv, MO_32, do_vaddwod_s)
|
||||
TRANS(vaddwod_q_d, LSX, gvec_vvv, MO_64, do_vaddwod_s)
|
||||
TRANS(xvaddwod_h_b, LASX, gvec_xxx, MO_8, do_vaddwod_s)
|
||||
TRANS(xvaddwod_w_h, LASX, gvec_xxx, MO_16, do_vaddwod_s)
|
||||
TRANS(xvaddwod_d_w, LASX, gvec_xxx, MO_32, do_vaddwod_s)
|
||||
TRANS(xvaddwod_q_d, LASX, gvec_xxx, MO_64, do_vaddwod_s)
|
||||
|
||||
|
||||
static void gen_vsubwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||
{
|
||||
@ -712,6 +721,10 @@ TRANS(vsubwev_h_b, LSX, gvec_vvv, MO_8, do_vsubwev_s)
|
||||
TRANS(vsubwev_w_h, LSX, gvec_vvv, MO_16, do_vsubwev_s)
|
||||
TRANS(vsubwev_d_w, LSX, gvec_vvv, MO_32, do_vsubwev_s)
|
||||
TRANS(vsubwev_q_d, LSX, gvec_vvv, MO_64, do_vsubwev_s)
|
||||
TRANS(xvsubwev_h_b, LASX, gvec_xxx, MO_8, do_vsubwev_s)
|
||||
TRANS(xvsubwev_w_h, LASX, gvec_xxx, MO_16, do_vsubwev_s)
|
||||
TRANS(xvsubwev_d_w, LASX, gvec_xxx, MO_32, do_vsubwev_s)
|
||||
TRANS(xvsubwev_q_d, LASX, gvec_xxx, MO_64, do_vsubwev_s)
|
||||
|
||||
static void gen_vsubwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||
{
|
||||
@ -791,6 +804,10 @@ TRANS(vsubwod_h_b, LSX, gvec_vvv, MO_8, do_vsubwod_s)
|
||||
TRANS(vsubwod_w_h, LSX, gvec_vvv, MO_16, do_vsubwod_s)
|
||||
TRANS(vsubwod_d_w, LSX, gvec_vvv, MO_32, do_vsubwod_s)
|
||||
TRANS(vsubwod_q_d, LSX, gvec_vvv, MO_64, do_vsubwod_s)
|
||||
TRANS(xvsubwod_h_b, LASX, gvec_xxx, MO_8, do_vsubwod_s)
|
||||
TRANS(xvsubwod_w_h, LASX, gvec_xxx, MO_16, do_vsubwod_s)
|
||||
TRANS(xvsubwod_d_w, LASX, gvec_xxx, MO_32, do_vsubwod_s)
|
||||
TRANS(xvsubwod_q_d, LASX, gvec_xxx, MO_64, do_vsubwod_s)
|
||||
|
||||
static void gen_vaddwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||
{
|
||||
@ -866,6 +883,10 @@ TRANS(vaddwev_h_bu, LSX, gvec_vvv, MO_8, do_vaddwev_u)
|
||||
TRANS(vaddwev_w_hu, LSX, gvec_vvv, MO_16, do_vaddwev_u)
|
||||
TRANS(vaddwev_d_wu, LSX, gvec_vvv, MO_32, do_vaddwev_u)
|
||||
TRANS(vaddwev_q_du, LSX, gvec_vvv, MO_64, do_vaddwev_u)
|
||||
TRANS(xvaddwev_h_bu, LASX, gvec_xxx, MO_8, do_vaddwev_u)
|
||||
TRANS(xvaddwev_w_hu, LASX, gvec_xxx, MO_16, do_vaddwev_u)
|
||||
TRANS(xvaddwev_d_wu, LASX, gvec_xxx, MO_32, do_vaddwev_u)
|
||||
TRANS(xvaddwev_q_du, LASX, gvec_xxx, MO_64, do_vaddwev_u)
|
||||
|
||||
static void gen_vaddwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||
{
|
||||
@ -945,6 +966,10 @@ TRANS(vaddwod_h_bu, LSX, gvec_vvv, MO_8, do_vaddwod_u)
|
||||
TRANS(vaddwod_w_hu, LSX, gvec_vvv, MO_16, do_vaddwod_u)
|
||||
TRANS(vaddwod_d_wu, LSX, gvec_vvv, MO_32, do_vaddwod_u)
|
||||
TRANS(vaddwod_q_du, LSX, gvec_vvv, MO_64, do_vaddwod_u)
|
||||
TRANS(xvaddwod_h_bu, LASX, gvec_xxx, MO_8, do_vaddwod_u)
|
||||
TRANS(xvaddwod_w_hu, LASX, gvec_xxx, MO_16, do_vaddwod_u)
|
||||
TRANS(xvaddwod_d_wu, LASX, gvec_xxx, MO_32, do_vaddwod_u)
|
||||
TRANS(xvaddwod_q_du, LASX, gvec_xxx, MO_64, do_vaddwod_u)
|
||||
|
||||
static void gen_vsubwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||
{
|
||||
@ -1020,6 +1045,10 @@ TRANS(vsubwev_h_bu, LSX, gvec_vvv, MO_8, do_vsubwev_u)
|
||||
TRANS(vsubwev_w_hu, LSX, gvec_vvv, MO_16, do_vsubwev_u)
|
||||
TRANS(vsubwev_d_wu, LSX, gvec_vvv, MO_32, do_vsubwev_u)
|
||||
TRANS(vsubwev_q_du, LSX, gvec_vvv, MO_64, do_vsubwev_u)
|
||||
TRANS(xvsubwev_h_bu, LASX, gvec_xxx, MO_8, do_vsubwev_u)
|
||||
TRANS(xvsubwev_w_hu, LASX, gvec_xxx, MO_16, do_vsubwev_u)
|
||||
TRANS(xvsubwev_d_wu, LASX, gvec_xxx, MO_32, do_vsubwev_u)
|
||||
TRANS(xvsubwev_q_du, LASX, gvec_xxx, MO_64, do_vsubwev_u)
|
||||
|
||||
static void gen_vsubwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||
{
|
||||
@ -1099,6 +1128,10 @@ TRANS(vsubwod_h_bu, LSX, gvec_vvv, MO_8, do_vsubwod_u)
|
||||
TRANS(vsubwod_w_hu, LSX, gvec_vvv, MO_16, do_vsubwod_u)
|
||||
TRANS(vsubwod_d_wu, LSX, gvec_vvv, MO_32, do_vsubwod_u)
|
||||
TRANS(vsubwod_q_du, LSX, gvec_vvv, MO_64, do_vsubwod_u)
|
||||
TRANS(xvsubwod_h_bu, LASX, gvec_xxx, MO_8, do_vsubwod_u)
|
||||
TRANS(xvsubwod_w_hu, LASX, gvec_xxx, MO_16, do_vsubwod_u)
|
||||
TRANS(xvsubwod_d_wu, LASX, gvec_xxx, MO_32, do_vsubwod_u)
|
||||
TRANS(xvsubwod_q_du, LASX, gvec_xxx, MO_64, do_vsubwod_u)
|
||||
|
||||
static void gen_vaddwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||
{
|
||||
@ -1182,6 +1215,10 @@ TRANS(vaddwev_h_bu_b, LSX, gvec_vvv, MO_8, do_vaddwev_u_s)
|
||||
TRANS(vaddwev_w_hu_h, LSX, gvec_vvv, MO_16, do_vaddwev_u_s)
|
||||
TRANS(vaddwev_d_wu_w, LSX, gvec_vvv, MO_32, do_vaddwev_u_s)
|
||||
TRANS(vaddwev_q_du_d, LSX, gvec_vvv, MO_64, do_vaddwev_u_s)
|
||||
TRANS(xvaddwev_h_bu_b, LASX, gvec_xxx, MO_8, do_vaddwev_u_s)
|
||||
TRANS(xvaddwev_w_hu_h, LASX, gvec_xxx, MO_16, do_vaddwev_u_s)
|
||||
TRANS(xvaddwev_d_wu_w, LASX, gvec_xxx, MO_32, do_vaddwev_u_s)
|
||||
TRANS(xvaddwev_q_du_d, LASX, gvec_xxx, MO_64, do_vaddwev_u_s)
|
||||
|
||||
static void gen_vaddwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
|
||||
{
|
||||
@ -1262,6 +1299,10 @@ TRANS(vaddwod_h_bu_b, LSX, gvec_vvv, MO_8, do_vaddwod_u_s)
|
||||
TRANS(vaddwod_w_hu_h, LSX, gvec_vvv, MO_16, do_vaddwod_u_s)
|
||||
TRANS(vaddwod_d_wu_w, LSX, gvec_vvv, MO_32, do_vaddwod_u_s)
|
||||
TRANS(vaddwod_q_du_d, LSX, gvec_vvv, MO_64, do_vaddwod_u_s)
|
||||
TRANS(xvaddwod_h_bu_b, LSX, gvec_xxx, MO_8, do_vaddwod_u_s)
|
||||
TRANS(xvaddwod_w_hu_h, LSX, gvec_xxx, MO_16, do_vaddwod_u_s)
|
||||
TRANS(xvaddwod_d_wu_w, LSX, gvec_xxx, MO_32, do_vaddwod_u_s)
|
||||
TRANS(xvaddwod_q_du_d, LSX, gvec_xxx, MO_64, do_vaddwod_u_s)
|
||||
|
||||
static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
|
||||
void (*gen_shr_vec)(unsigned, TCGv_vec,
|
||||
|
@ -1361,6 +1361,51 @@ xvhsubw_wu_hu 0111 01000101 10101 ..... ..... ..... @vvv
|
||||
xvhsubw_du_wu 0111 01000101 10110 ..... ..... ..... @vvv
|
||||
xvhsubw_qu_du 0111 01000101 10111 ..... ..... ..... @vvv
|
||||
|
||||
xvaddwev_h_b 0111 01000001 11100 ..... ..... ..... @vvv
|
||||
xvaddwev_w_h 0111 01000001 11101 ..... ..... ..... @vvv
|
||||
xvaddwev_d_w 0111 01000001 11110 ..... ..... ..... @vvv
|
||||
xvaddwev_q_d 0111 01000001 11111 ..... ..... ..... @vvv
|
||||
xvaddwod_h_b 0111 01000010 00100 ..... ..... ..... @vvv
|
||||
xvaddwod_w_h 0111 01000010 00101 ..... ..... ..... @vvv
|
||||
xvaddwod_d_w 0111 01000010 00110 ..... ..... ..... @vvv
|
||||
xvaddwod_q_d 0111 01000010 00111 ..... ..... ..... @vvv
|
||||
|
||||
xvsubwev_h_b 0111 01000010 00000 ..... ..... ..... @vvv
|
||||
xvsubwev_w_h 0111 01000010 00001 ..... ..... ..... @vvv
|
||||
xvsubwev_d_w 0111 01000010 00010 ..... ..... ..... @vvv
|
||||
xvsubwev_q_d 0111 01000010 00011 ..... ..... ..... @vvv
|
||||
xvsubwod_h_b 0111 01000010 01000 ..... ..... ..... @vvv
|
||||
xvsubwod_w_h 0111 01000010 01001 ..... ..... ..... @vvv
|
||||
xvsubwod_d_w 0111 01000010 01010 ..... ..... ..... @vvv
|
||||
xvsubwod_q_d 0111 01000010 01011 ..... ..... ..... @vvv
|
||||
|
||||
xvaddwev_h_bu 0111 01000010 11100 ..... ..... ..... @vvv
|
||||
xvaddwev_w_hu 0111 01000010 11101 ..... ..... ..... @vvv
|
||||
xvaddwev_d_wu 0111 01000010 11110 ..... ..... ..... @vvv
|
||||
xvaddwev_q_du 0111 01000010 11111 ..... ..... ..... @vvv
|
||||
xvaddwod_h_bu 0111 01000011 00100 ..... ..... ..... @vvv
|
||||
xvaddwod_w_hu 0111 01000011 00101 ..... ..... ..... @vvv
|
||||
xvaddwod_d_wu 0111 01000011 00110 ..... ..... ..... @vvv
|
||||
xvaddwod_q_du 0111 01000011 00111 ..... ..... ..... @vvv
|
||||
|
||||
xvsubwev_h_bu 0111 01000011 00000 ..... ..... ..... @vvv
|
||||
xvsubwev_w_hu 0111 01000011 00001 ..... ..... ..... @vvv
|
||||
xvsubwev_d_wu 0111 01000011 00010 ..... ..... ..... @vvv
|
||||
xvsubwev_q_du 0111 01000011 00011 ..... ..... ..... @vvv
|
||||
xvsubwod_h_bu 0111 01000011 01000 ..... ..... ..... @vvv
|
||||
xvsubwod_w_hu 0111 01000011 01001 ..... ..... ..... @vvv
|
||||
xvsubwod_d_wu 0111 01000011 01010 ..... ..... ..... @vvv
|
||||
xvsubwod_q_du 0111 01000011 01011 ..... ..... ..... @vvv
|
||||
|
||||
xvaddwev_h_bu_b 0111 01000011 11100 ..... ..... ..... @vvv
|
||||
xvaddwev_w_hu_h 0111 01000011 11101 ..... ..... ..... @vvv
|
||||
xvaddwev_d_wu_w 0111 01000011 11110 ..... ..... ..... @vvv
|
||||
xvaddwev_q_du_d 0111 01000011 11111 ..... ..... ..... @vvv
|
||||
xvaddwod_h_bu_b 0111 01000100 00000 ..... ..... ..... @vvv
|
||||
xvaddwod_w_hu_h 0111 01000100 00001 ..... ..... ..... @vvv
|
||||
xvaddwod_d_wu_w 0111 01000100 00010 ..... ..... ..... @vvv
|
||||
xvaddwod_q_du_d 0111 01000100 00011 ..... ..... ..... @vvv
|
||||
|
||||
xvreplgr2vr_b 0111 01101001 11110 00000 ..... ..... @vr
|
||||
xvreplgr2vr_h 0111 01101001 11110 00001 ..... ..... @vr
|
||||
xvreplgr2vr_w 0111 01101001 11110 00010 ..... ..... @vr
|
||||
|
@ -106,133 +106,173 @@ void HELPER(vhsubw_qu_du)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||
}
|
||||
|
||||
#define DO_EVEN(NAME, BIT, E1, E2, DO_OP) \
|
||||
void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
|
||||
void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
|
||||
{ \
|
||||
int i; \
|
||||
VReg *Vd = (VReg *)vd; \
|
||||
VReg *Vj = (VReg *)vj; \
|
||||
VReg *Vk = (VReg *)vk; \
|
||||
typedef __typeof(Vd->E1(0)) TD; \
|
||||
for (i = 0; i < LSX_LEN/BIT; i++) { \
|
||||
int oprsz = simd_oprsz(desc); \
|
||||
\
|
||||
for (i = 0; i < oprsz / (BIT / 8); i++) { \
|
||||
Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i) ,(TD)Vk->E2(2 * i)); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define DO_ODD(NAME, BIT, E1, E2, DO_OP) \
|
||||
void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
|
||||
void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
|
||||
{ \
|
||||
int i; \
|
||||
VReg *Vd = (VReg *)vd; \
|
||||
VReg *Vj = (VReg *)vj; \
|
||||
VReg *Vk = (VReg *)vk; \
|
||||
typedef __typeof(Vd->E1(0)) TD; \
|
||||
for (i = 0; i < LSX_LEN/BIT; i++) { \
|
||||
int oprsz = simd_oprsz(desc); \
|
||||
\
|
||||
for (i = 0; i < oprsz / (BIT / 8); i++) { \
|
||||
Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i + 1)); \
|
||||
} \
|
||||
}
|
||||
|
||||
void HELPER(vaddwev_q_d)(void *vd, void *vj, void *vk, uint32_t v)
|
||||
void HELPER(vaddwev_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||
{
|
||||
int i;
|
||||
VReg *Vd = (VReg *)vd;
|
||||
VReg *Vj = (VReg *)vj;
|
||||
VReg *Vk = (VReg *)vk;
|
||||
int oprsz = simd_oprsz(desc);
|
||||
|
||||
Vd->Q(0) = int128_add(int128_makes64(Vj->D(0)), int128_makes64(Vk->D(0)));
|
||||
for (i = 0; i < oprsz / 16; i++) {
|
||||
Vd->Q(i) = int128_add(int128_makes64(Vj->D(2 * i)),
|
||||
int128_makes64(Vk->D(2 * i)));
|
||||
}
|
||||
}
|
||||
|
||||
DO_EVEN(vaddwev_h_b, 16, H, B, DO_ADD)
|
||||
DO_EVEN(vaddwev_w_h, 32, W, H, DO_ADD)
|
||||
DO_EVEN(vaddwev_d_w, 64, D, W, DO_ADD)
|
||||
|
||||
void HELPER(vaddwod_q_d)(void *vd, void *vj, void *vk, uint32_t v)
|
||||
void HELPER(vaddwod_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||
{
|
||||
int i;
|
||||
VReg *Vd = (VReg *)vd;
|
||||
VReg *Vj = (VReg *)vj;
|
||||
VReg *Vk = (VReg *)vk;
|
||||
int oprsz = simd_oprsz(desc);
|
||||
|
||||
Vd->Q(0) = int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(1)));
|
||||
for (i = 0; i < oprsz / 16; i++) {
|
||||
Vd->Q(i) = int128_add(int128_makes64(Vj->D(2 * i +1)),
|
||||
int128_makes64(Vk->D(2 * i +1)));
|
||||
}
|
||||
}
|
||||
|
||||
DO_ODD(vaddwod_h_b, 16, H, B, DO_ADD)
|
||||
DO_ODD(vaddwod_w_h, 32, W, H, DO_ADD)
|
||||
DO_ODD(vaddwod_d_w, 64, D, W, DO_ADD)
|
||||
|
||||
void HELPER(vsubwev_q_d)(void *vd, void *vj, void *vk, uint32_t v)
|
||||
void HELPER(vsubwev_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||
{
|
||||
int i;
|
||||
VReg *Vd = (VReg *)vd;
|
||||
VReg *Vj = (VReg *)vj;
|
||||
VReg *Vk = (VReg *)vk;
|
||||
int oprsz = simd_oprsz(desc);
|
||||
|
||||
Vd->Q(0) = int128_sub(int128_makes64(Vj->D(0)), int128_makes64(Vk->D(0)));
|
||||
for (i = 0; i < oprsz / 16; i++) {
|
||||
Vd->Q(i) = int128_sub(int128_makes64(Vj->D(2 * i)),
|
||||
int128_makes64(Vk->D(2 * i)));
|
||||
}
|
||||
}
|
||||
|
||||
DO_EVEN(vsubwev_h_b, 16, H, B, DO_SUB)
|
||||
DO_EVEN(vsubwev_w_h, 32, W, H, DO_SUB)
|
||||
DO_EVEN(vsubwev_d_w, 64, D, W, DO_SUB)
|
||||
|
||||
void HELPER(vsubwod_q_d)(void *vd, void *vj, void *vk, uint32_t v)
|
||||
void HELPER(vsubwod_q_d)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||
{
|
||||
int i;
|
||||
VReg *Vd = (VReg *)vd;
|
||||
VReg *Vj = (VReg *)vj;
|
||||
VReg *Vk = (VReg *)vk;
|
||||
int oprsz = simd_oprsz(desc);
|
||||
|
||||
Vd->Q(0) = int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(1)));
|
||||
for (i = 0; i < oprsz / 16; i++) {
|
||||
Vd->Q(i) = int128_sub(int128_makes64(Vj->D(2 * i + 1)),
|
||||
int128_makes64(Vk->D(2 * i + 1)));
|
||||
}
|
||||
}
|
||||
|
||||
DO_ODD(vsubwod_h_b, 16, H, B, DO_SUB)
|
||||
DO_ODD(vsubwod_w_h, 32, W, H, DO_SUB)
|
||||
DO_ODD(vsubwod_d_w, 64, D, W, DO_SUB)
|
||||
|
||||
void HELPER(vaddwev_q_du)(void *vd, void *vj, void *vk, uint32_t v)
|
||||
void HELPER(vaddwev_q_du)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||
{
|
||||
int i;
|
||||
VReg *Vd = (VReg *)vd;
|
||||
VReg *Vj = (VReg *)vj;
|
||||
VReg *Vk = (VReg *)vk;
|
||||
int oprsz = simd_oprsz(desc);
|
||||
|
||||
Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(0)),
|
||||
int128_make64((uint64_t)Vk->D(0)));
|
||||
for (i = 0; i < oprsz / 16; i++) {
|
||||
Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i)),
|
||||
int128_make64(Vk->UD(2 * i)));
|
||||
}
|
||||
}
|
||||
|
||||
DO_EVEN(vaddwev_h_bu, 16, UH, UB, DO_ADD)
|
||||
DO_EVEN(vaddwev_w_hu, 32, UW, UH, DO_ADD)
|
||||
DO_EVEN(vaddwev_d_wu, 64, UD, UW, DO_ADD)
|
||||
|
||||
void HELPER(vaddwod_q_du)(void *vd, void *vj, void *vk, uint32_t v)
|
||||
void HELPER(vaddwod_q_du)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||
{
|
||||
int i;
|
||||
VReg *Vd = (VReg *)vd;
|
||||
VReg *Vj = (VReg *)vj;
|
||||
VReg *Vk = (VReg *)vk;
|
||||
int oprsz = simd_oprsz(desc);
|
||||
|
||||
Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)),
|
||||
int128_make64((uint64_t)Vk->D(1)));
|
||||
for (i = 0; i < oprsz / 16; i++) {
|
||||
Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i + 1)),
|
||||
int128_make64(Vk->UD(2 * i + 1)));
|
||||
}
|
||||
}
|
||||
|
||||
DO_ODD(vaddwod_h_bu, 16, UH, UB, DO_ADD)
|
||||
DO_ODD(vaddwod_w_hu, 32, UW, UH, DO_ADD)
|
||||
DO_ODD(vaddwod_d_wu, 64, UD, UW, DO_ADD)
|
||||
|
||||
void HELPER(vsubwev_q_du)(void *vd, void *vj, void *vk, uint32_t v)
|
||||
void HELPER(vsubwev_q_du)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||
{
|
||||
int i;
|
||||
VReg *Vd = (VReg *)vd;
|
||||
VReg *Vj = (VReg *)vj;
|
||||
VReg *Vk = (VReg *)vk;
|
||||
int oprsz = simd_oprsz(desc);
|
||||
|
||||
Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(0)),
|
||||
int128_make64((uint64_t)Vk->D(0)));
|
||||
for (i = 0; i < oprsz / 16; i++) {
|
||||
Vd->Q(i) = int128_sub(int128_make64(Vj->UD(2 * i)),
|
||||
int128_make64(Vk->UD(2 * i)));
|
||||
}
|
||||
}
|
||||
|
||||
DO_EVEN(vsubwev_h_bu, 16, UH, UB, DO_SUB)
|
||||
DO_EVEN(vsubwev_w_hu, 32, UW, UH, DO_SUB)
|
||||
DO_EVEN(vsubwev_d_wu, 64, UD, UW, DO_SUB)
|
||||
|
||||
void HELPER(vsubwod_q_du)(void *vd, void *vj, void *vk, uint32_t v)
|
||||
void HELPER(vsubwod_q_du)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||
{
|
||||
int i;
|
||||
VReg *Vd = (VReg *)vd;
|
||||
VReg *Vj = (VReg *)vj;
|
||||
VReg *Vk = (VReg *)vk;
|
||||
int oprsz = simd_oprsz(desc);
|
||||
|
||||
Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(1)),
|
||||
int128_make64((uint64_t)Vk->D(1)));
|
||||
for (i = 0; i < oprsz / 16; i++) {
|
||||
Vd->Q(i) = int128_sub(int128_make64(Vj->UD(2 * i + 1)),
|
||||
int128_make64(Vk->UD(2 * i + 1)));
|
||||
}
|
||||
}
|
||||
|
||||
DO_ODD(vsubwod_h_bu, 16, UH, UB, DO_SUB)
|
||||
@ -240,7 +280,7 @@ DO_ODD(vsubwod_w_hu, 32, UW, UH, DO_SUB)
|
||||
DO_ODD(vsubwod_d_wu, 64, UD, UW, DO_SUB)
|
||||
|
||||
#define DO_EVEN_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
|
||||
void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
|
||||
void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
|
||||
{ \
|
||||
int i; \
|
||||
VReg *Vd = (VReg *)vd; \
|
||||
@ -248,13 +288,15 @@ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
|
||||
VReg *Vk = (VReg *)vk; \
|
||||
typedef __typeof(Vd->ES1(0)) TDS; \
|
||||
typedef __typeof(Vd->EU1(0)) TDU; \
|
||||
for (i = 0; i < LSX_LEN/BIT; i++) { \
|
||||
int oprsz = simd_oprsz(desc); \
|
||||
\
|
||||
for (i = 0; i < oprsz / (BIT / 8); i++) { \
|
||||
Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i) ,(TDS)Vk->ES2(2 * i)); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define DO_ODD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
|
||||
void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
|
||||
void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
|
||||
{ \
|
||||
int i; \
|
||||
VReg *Vd = (VReg *)vd; \
|
||||
@ -262,33 +304,43 @@ void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
|
||||
VReg *Vk = (VReg *)vk; \
|
||||
typedef __typeof(Vd->ES1(0)) TDS; \
|
||||
typedef __typeof(Vd->EU1(0)) TDU; \
|
||||
for (i = 0; i < LSX_LEN/BIT; i++) { \
|
||||
int oprsz = simd_oprsz(desc); \
|
||||
\
|
||||
for (i = 0; i < oprsz / (BIT / 8); i++) { \
|
||||
Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i + 1), (TDS)Vk->ES2(2 * i + 1)); \
|
||||
} \
|
||||
}
|
||||
|
||||
void HELPER(vaddwev_q_du_d)(void *vd, void *vj, void *vk, uint32_t v)
|
||||
void HELPER(vaddwev_q_du_d)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||
{
|
||||
int i;
|
||||
VReg *Vd = (VReg *)vd;
|
||||
VReg *Vj = (VReg *)vj;
|
||||
VReg *Vk = (VReg *)vk;
|
||||
int oprsz = simd_oprsz(desc);
|
||||
|
||||
Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(0)),
|
||||
int128_makes64(Vk->D(0)));
|
||||
for (i = 0; i < oprsz / 16; i++) {
|
||||
Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i)),
|
||||
int128_makes64(Vk->D(2 * i)));
|
||||
}
|
||||
}
|
||||
|
||||
DO_EVEN_U_S(vaddwev_h_bu_b, 16, H, UH, B, UB, DO_ADD)
|
||||
DO_EVEN_U_S(vaddwev_w_hu_h, 32, W, UW, H, UH, DO_ADD)
|
||||
DO_EVEN_U_S(vaddwev_d_wu_w, 64, D, UD, W, UW, DO_ADD)
|
||||
|
||||
void HELPER(vaddwod_q_du_d)(void *vd, void *vj, void *vk, uint32_t v)
|
||||
void HELPER(vaddwod_q_du_d)(void *vd, void *vj, void *vk, uint32_t desc)
|
||||
{
|
||||
int i;
|
||||
VReg *Vd = (VReg *)vd;
|
||||
VReg *Vj = (VReg *)vj;
|
||||
VReg *Vk = (VReg *)vk;
|
||||
int oprsz = simd_oprsz(desc);
|
||||
|
||||
Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)),
|
||||
int128_makes64(Vk->D(1)));
|
||||
for (i = 0; i < oprsz / 16; i++) {
|
||||
Vd->Q(i) = int128_add(int128_make64(Vj->UD(2 * i + 1)),
|
||||
int128_makes64(Vk->D(2 * i + 1)));
|
||||
}
|
||||
}
|
||||
|
||||
DO_ODD_U_S(vaddwod_h_bu_b, 16, H, UH, B, UB, DO_ADD)
|
||||
|
Loading…
Reference in New Issue
Block a user