arm: Auto-vectorization for MVE: vshr

This patch enables MVE vshr instructions for auto-vectorization.  New
MVE patterns are introduced that take a vector of constants as second
operand, all constants being equal.

The existing mve_vshrq_n_<supf><mode> is kept, as it takes a single
immediate as second operand, and is used by arm_mve.h.

The vashr<mode>3 and vlshr<mode>3 expanders are moved fron neon.md to
vec-common.md, updated to rely on the normal expansion scheme to
generate shifts by immediate.

2020-12-03  Christophe Lyon  <christophe.lyon@linaro.org>

	gcc/
	* config/arm/mve.md (mve_vshrq_n_s<mode>_imm): New entry.
	(mve_vshrq_n_u<mode>_imm): Likewise.
	* config/arm/neon.md (vashr<mode>3, vlshr<mode>3): Move to ...
	* config/arm/vec-common.md: ... here.

	gcc/testsuite/
	* gcc.target/arm/simd/mve-vshr.c: Add tests for vshr.
This commit is contained in:
Christophe Lyon 2020-11-19 16:25:48 +00:00
parent 7432f255b7
commit bfab355012
4 changed files with 130 additions and 35 deletions

View File

@ -763,6 +763,7 @@
;;
;; [vshrq_n_s, vshrq_n_u])
;;
;; Version that takes an immediate as operand 2.
(define_insn "mve_vshrq_n_<supf><mode>"
[
(set (match_operand:MVE_2 0 "s_register_operand" "=w")
@ -775,6 +776,39 @@
[(set_attr "type" "mve_move")
])
;; Versions that take constant vectors as operand 2 (with all elements
;; equal).
(define_insn "mve_vshrq_n_s<mode>_imm"
[
(set (match_operand:MVE_2 0 "s_register_operand" "=w")
(ashiftrt:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
(match_operand:MVE_2 2 "imm_for_neon_rshift_operand" "i")))
]
"TARGET_HAVE_MVE"
{
return neon_output_shift_immediate ("vshr", 's', &operands[2],
<MODE>mode,
VALID_NEON_QREG_MODE (<MODE>mode),
true);
}
[(set_attr "type" "mve_move")
])
(define_insn "mve_vshrq_n_u<mode>_imm"
[
(set (match_operand:MVE_2 0 "s_register_operand" "=w")
(lshiftrt:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
(match_operand:MVE_2 2 "imm_for_neon_rshift_operand" "i")))
]
"TARGET_HAVE_MVE"
{
return neon_output_shift_immediate ("vshr", 'u', &operands[2],
<MODE>mode,
VALID_NEON_QREG_MODE (<MODE>mode),
true);
}
[(set_attr "type" "mve_move")
])
;;
;; [vcvtq_n_from_f_s, vcvtq_n_from_f_u])
;;

View File

@ -899,40 +899,6 @@
[(set_attr "type" "neon_shift_reg<q>")]
)
(define_expand "vashr<mode>3"
[(set (match_operand:VDQIW 0 "s_register_operand")
(ashiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
(match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
"TARGET_NEON"
{
if (s_register_operand (operands[2], <MODE>mode))
{
rtx neg = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neon_neg<mode>2 (neg, operands[2]));
emit_insn (gen_ashl<mode>3_signed (operands[0], operands[1], neg));
}
else
emit_insn (gen_vashr<mode>3_imm (operands[0], operands[1], operands[2]));
DONE;
})
(define_expand "vlshr<mode>3"
[(set (match_operand:VDQIW 0 "s_register_operand")
(lshiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
(match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
"TARGET_NEON"
{
if (s_register_operand (operands[2], <MODE>mode))
{
rtx neg = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neon_neg<mode>2 (neg, operands[2]));
emit_insn (gen_ashl<mode>3_unsigned (operands[0], operands[1], neg));
}
else
emit_insn (gen_vlshr<mode>3_imm (operands[0], operands[1], operands[2]));
DONE;
})
;; 64-bit shifts
;; This pattern loads a 32-bit shift count into a 64-bit NEON register,

View File

@ -259,4 +259,40 @@
{
emit_insn (gen_mve_vshlq_u<mode> (operands[0], operands[1], operands[2]));
DONE;
})
})
;; When operand 2 is an immediate, use the normal expansion to match
;; gen_vashr<mode>3_imm for Neon and gen_mve_vshrq_n_s<mode>_imm for
;; MVE.
(define_expand "vashr<mode>3"
[(set (match_operand:VDQIW 0 "s_register_operand")
(ashiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
(match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
"ARM_HAVE_<MODE>_ARITH"
{
if (s_register_operand (operands[2], <MODE>mode))
{
rtx neg = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neg<mode>2 (neg, operands[2]));
emit_insn (gen_mve_vshlq_s<mode> (operands[0], operands[1], neg));
DONE;
}
})
;; When operand 2 is an immediate, use the normal expansion to match
;; gen_vashr<mode>3_imm for Neon and gen_mve_vshrq_n_u<mode>_imm for
;; MVE.
(define_expand "vlshr<mode>3"
[(set (match_operand:VDQIW 0 "s_register_operand")
(lshiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
(match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
"ARM_HAVE_<MODE>_ARITH"
{
if (s_register_operand (operands[2], <MODE>mode))
{
rtx neg = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neg<mode>2 (neg, operands[2]));
emit_insn (gen_mve_vshlq_u<mode> (operands[0], operands[1], neg));
DONE;
}
})

View File

@ -0,0 +1,59 @@
/* { dg-do assemble } */
/* { dg-require-effective-target arm_v8_1m_mve_ok } */
/* { dg-add-options arm_v8_1m_mve } */
/* { dg-additional-options "-O3" } */
#include <stdint.h>
#define FUNC(SIGN, TYPE, BITS, NB, OP, NAME) \
void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a, TYPE##BITS##_t *b) { \
int i; \
for (i=0; i<NB; i++) { \
dest[i] = a[i] OP b[i]; \
} \
}
#define FUNC_IMM(SIGN, TYPE, BITS, NB, OP, NAME) \
void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a) { \
int i; \
for (i=0; i<NB; i++) { \
dest[i] = a[i] OP 5; \
} \
}
/* 64-bit vectors. */
FUNC(s, int, 32, 2, >>, vshr)
FUNC(u, uint, 32, 2, >>, vshr)
FUNC(s, int, 16, 4, >>, vshr)
FUNC(u, uint, 16, 4, >>, vshr)
FUNC(s, int, 8, 8, >>, vshr)
FUNC(u, uint, 8, 8, >>, vshr)
/* 128-bit vectors. */
FUNC(s, int, 32, 4, >>, vshr)
FUNC(u, uint, 32, 4, >>, vshr)
FUNC(s, int, 16, 8, >>, vshr)
FUNC(u, uint, 16, 8, >>, vshr)
FUNC(s, int, 8, 16, >>, vshr)
FUNC(u, uint, 8, 16, >>, vshr)
/* 64-bit vectors. */
FUNC_IMM(s, int, 32, 2, >>, vshrimm)
FUNC_IMM(u, uint, 32, 2, >>, vshrimm)
FUNC_IMM(s, int, 16, 4, >>, vshrimm)
FUNC_IMM(u, uint, 16, 4, >>, vshrimm)
FUNC_IMM(s, int, 8, 8, >>, vshrimm)
FUNC_IMM(u, uint, 8, 8, >>, vshrimm)
/* 128-bit vectors. */
FUNC_IMM(s, int, 32, 4, >>, vshrimm)
FUNC_IMM(u, uint, 32, 4, >>, vshrimm)
FUNC_IMM(s, int, 16, 8, >>, vshrimm)
FUNC_IMM(u, uint, 16, 8, >>, vshrimm)
FUNC_IMM(s, int, 8, 16, >>, vshrimm)
FUNC_IMM(u, uint, 8, 16, >>, vshrimm)
/* MVE has only 128-bit vectors, so we can vectorize only half of the
functions above. */
/* { dg-final { scan-assembler-times {vshr.s[0-9]+\tq[0-9]+, q[0-9]+} 3 } } */
/* { dg-final { scan-assembler-times {vshr.u[0-9]+\tq[0-9]+, q[0-9]+} 3 } } */