gcc: Add vec_select -> subreg RTL simplification
Add a new RTL simplification for the case of a VEC_SELECT selecting the low part of a vector. The simplification returns a SUBREG. The primary goal of this patch is to enable better combinations of Neon RTL patterns - specifically allowing generation of 'write-to- high-half' narrowing intructions. Adding this RTL simplification means that the expected results for a number of tests need to be updated: * aarch64 Neon: Update the scan-assembler regex for intrinsics tests to expect a scalar register instead of lane 0 of a vector. * aarch64 SVE: Likewise. * arm MVE: Use lane 1 instead of lane 0 for lane-extraction intrinsics tests (as the move instructions get optimized away for lane 0.) This patch also adds new code generation tests to narrow_high_combine.c to verify the benefit of this RTL simplification. gcc/ChangeLog: 2021-06-08 Jonathan Wright <jonathan.wright@arm.com> * combine.c (combine_simplify_rtx): Add vec_select -> subreg simplification. * config/aarch64/aarch64.md (*zero_extend<SHORT:mode><GPI:mode>2_aarch64): Add Neon to general purpose register case for zero-extend pattern. * config/arm/vfp.md (*arm_movsi_vfp): Remove "*" from *t -> r case to prevent some cases opting to go through memory. * cse.c (fold_rtx): Add vec_select -> subreg simplification. * rtl.c (rtvec_series_p): Define predicate to determine whether a vector contains a linear series of integers. * rtl.h (rtvec_series_p): Define. * rtlanal.c (vec_series_lowpart_p): Define predicate to determine if a vector selection is equivalent to the low part of the vector. * rtlanal.h (vec_series_lowpart_p): Define. * simplify-rtx.c (simplify_context::simplify_binary_operation_1): Add vec_select -> subreg simplification. gcc/testsuite/ChangeLog: * gcc.target/aarch64/extract_zero_extend.c: Remove dump scan for RTL pattern match. * gcc.target/aarch64/narrow_high_combine.c: Add new tests. * gcc.target/aarch64/simd/vmulx_laneq_f64_1.c: Update scan-assembler regex to look for a scalar register instead of lane 0 of a vector. * gcc.target/aarch64/simd/vmulxd_laneq_f64_1.c: Likewise. * gcc.target/aarch64/simd/vmulxs_lane_f32_1.c: Likewise. * gcc.target/aarch64/simd/vmulxs_laneq_f32_1.c: Likewise. * gcc.target/aarch64/simd/vqdmlalh_lane_s16.c: Likewise. * gcc.target/aarch64/simd/vqdmlals_lane_s32.c: Likewise. * gcc.target/aarch64/simd/vqdmlslh_lane_s16.c: Likewise. * gcc.target/aarch64/simd/vqdmlsls_lane_s32.c: Likewise. * gcc.target/aarch64/simd/vqdmullh_lane_s16.c: Likewise. * gcc.target/aarch64/simd/vqdmullh_laneq_s16.c: Likewise. * gcc.target/aarch64/simd/vqdmulls_lane_s32.c: Likewise. * gcc.target/aarch64/simd/vqdmulls_laneq_s32.c: Likewise. * gcc.target/aarch64/sve/dup_lane_1.c: Likewise. * gcc.target/aarch64/sve/extract_1.c: Likewise. * gcc.target/aarch64/sve/extract_2.c: Likewise. * gcc.target/aarch64/sve/extract_3.c: Likewise. * gcc.target/aarch64/sve/extract_4.c: Likewise. * gcc.target/aarch64/sve/live_1.c: Update scan-assembler regex cases to look for 'b' and 'h' registers instead of 'w'. * gcc.target/arm/crypto-vsha1cq_u32.c: Update scan-assembler regex to reflect lane 0 vector extractions being simplified to scalar register moves. * gcc.target/arm/crypto-vsha1h_u32.c: Likewise. * gcc.target/arm/crypto-vsha1mq_u32.c: Likewise. * gcc.target/arm/crypto-vsha1pq_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vgetq_lane_f16.c: Extract lane 1 as the moves for lane 0 now get optimized away. * gcc.target/arm/mve/intrinsics/vgetq_lane_f32.c: Likewise. * gcc.target/arm/mve/intrinsics/vgetq_lane_s16.c: Likewise. * gcc.target/arm/mve/intrinsics/vgetq_lane_s32.c: Likewise. * gcc.target/arm/mve/intrinsics/vgetq_lane_s8.c: Likewise. * gcc.target/arm/mve/intrinsics/vgetq_lane_u16.c: Likewise. * gcc.target/arm/mve/intrinsics/vgetq_lane_u32.c: Likewise. * gcc.target/arm/mve/intrinsics/vgetq_lane_u8.c: Likewise.
This commit is contained in:
parent
60aee15bb7
commit
8695bf78da
@ -90,6 +90,7 @@ along with GCC; see the file COPYING3. If not see
|
||||
#include "rtl-iter.h"
|
||||
#include "print-rtl.h"
|
||||
#include "function-abi.h"
|
||||
#include "rtlanal.h"
|
||||
|
||||
/* Number of attempts to combine instructions in this function. */
|
||||
|
||||
@ -6276,6 +6277,19 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
|
||||
- 1,
|
||||
0));
|
||||
break;
|
||||
case VEC_SELECT:
|
||||
{
|
||||
rtx trueop0 = XEXP (x, 0);
|
||||
mode = GET_MODE (trueop0);
|
||||
rtx trueop1 = XEXP (x, 1);
|
||||
/* If we select a low-part subreg, return that. */
|
||||
if (vec_series_lowpart_p (GET_MODE (x), mode, trueop1))
|
||||
{
|
||||
rtx new_rtx = lowpart_subreg (GET_MODE (x), trueop0, mode);
|
||||
if (new_rtx != NULL_RTX)
|
||||
return new_rtx;
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
|
@ -1884,15 +1884,16 @@
|
||||
)
|
||||
|
||||
(define_insn "*zero_extend<SHORT:mode><GPI:mode>2_aarch64"
|
||||
[(set (match_operand:GPI 0 "register_operand" "=r,r,w")
|
||||
(zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m,m")))]
|
||||
[(set (match_operand:GPI 0 "register_operand" "=r,r,w,r")
|
||||
(zero_extend:GPI (match_operand:SHORT 1 "nonimmediate_operand" "r,m,m,w")))]
|
||||
""
|
||||
"@
|
||||
and\t%<GPI:w>0, %<GPI:w>1, <SHORT:short_mask>
|
||||
ldr<SHORT:size>\t%w0, %1
|
||||
ldr\t%<SHORT:size>0, %1"
|
||||
[(set_attr "type" "logic_imm,load_4,f_loads")
|
||||
(set_attr "arch" "*,*,fp")]
|
||||
ldr\t%<SHORT:size>0, %1
|
||||
umov\t%w0, %1.<SHORT:size>[0]"
|
||||
[(set_attr "type" "logic_imm,load_4,f_loads,neon_to_gp")
|
||||
(set_attr "arch" "*,*,fp,fp")]
|
||||
)
|
||||
|
||||
(define_expand "<optab>qihi2"
|
||||
|
@ -224,7 +224,7 @@
|
||||
;; problems because small constants get converted into adds.
|
||||
(define_insn "*arm_movsi_vfp"
|
||||
[(set (match_operand:SI 0 "nonimmediate_operand" "=rk,r,r,r,rk,m ,*t,r,*t,*t, *Uv")
|
||||
(match_operand:SI 1 "general_operand" "rk, I,K,j,mi,rk,r,*t,*t,*Uvi,*t"))]
|
||||
(match_operand:SI 1 "general_operand" "rk, I,K,j,mi,rk,r,t,*t,*Uvi,*t"))]
|
||||
"TARGET_ARM && TARGET_HARD_FLOAT
|
||||
&& ( s_register_operand (operands[0], SImode)
|
||||
|| s_register_operand (operands[1], SImode))"
|
||||
|
14
gcc/cse.c
14
gcc/cse.c
@ -43,6 +43,7 @@ along with GCC; see the file COPYING3. If not see
|
||||
#include "rtl-iter.h"
|
||||
#include "regs.h"
|
||||
#include "function-abi.h"
|
||||
#include "rtlanal.h"
|
||||
|
||||
/* The basic idea of common subexpression elimination is to go
|
||||
through the code, keeping a record of expressions that would
|
||||
@ -3171,6 +3172,19 @@ fold_rtx (rtx x, rtx_insn *insn)
|
||||
if (NO_FUNCTION_CSE && CONSTANT_P (XEXP (XEXP (x, 0), 0)))
|
||||
return x;
|
||||
break;
|
||||
case VEC_SELECT:
|
||||
{
|
||||
rtx trueop0 = XEXP (x, 0);
|
||||
mode = GET_MODE (trueop0);
|
||||
rtx trueop1 = XEXP (x, 1);
|
||||
/* If we select a low-part subreg, return that. */
|
||||
if (vec_series_lowpart_p (GET_MODE (x), mode, trueop1))
|
||||
{
|
||||
rtx new_rtx = lowpart_subreg (GET_MODE (x), trueop0, mode);
|
||||
if (new_rtx != NULL_RTX)
|
||||
return new_rtx;
|
||||
}
|
||||
}
|
||||
|
||||
/* Anything else goes through the loop below. */
|
||||
default:
|
||||
|
15
gcc/rtl.c
15
gcc/rtl.c
@ -736,6 +736,21 @@ rtvec_all_equal_p (const_rtvec vec)
|
||||
}
|
||||
}
|
||||
|
||||
/* Return true if VEC contains a linear series of integers
|
||||
{ START, START+1, START+2, ... }. */
|
||||
|
||||
bool
|
||||
rtvec_series_p (rtvec vec, int start)
|
||||
{
|
||||
for (int i = 0; i < GET_NUM_ELEM (vec); i++)
|
||||
{
|
||||
rtx x = RTVEC_ELT (vec, i);
|
||||
if (!CONST_INT_P (x) || INTVAL (x) != i + start)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Return an indication of which type of insn should have X as a body.
|
||||
In generator files, this can be UNKNOWN if the answer is only known
|
||||
at (GCC) runtime. Otherwise the value is CODE_LABEL, INSN, CALL_INSN
|
||||
|
@ -2996,6 +2996,7 @@ extern unsigned int rtx_size (const_rtx);
|
||||
extern rtx shallow_copy_rtx (const_rtx CXX_MEM_STAT_INFO);
|
||||
extern int rtx_equal_p (const_rtx, const_rtx);
|
||||
extern bool rtvec_all_equal_p (const_rtvec);
|
||||
extern bool rtvec_series_p (rtvec, int);
|
||||
|
||||
/* Return true if X is a vector constant with a duplicated element value. */
|
||||
|
||||
|
@ -6940,3 +6940,22 @@ register_asm_p (const_rtx x)
|
||||
&& DECL_ASSEMBLER_NAME_SET_P (REG_EXPR (x))
|
||||
&& DECL_REGISTER (REG_EXPR (x)));
|
||||
}
|
||||
|
||||
/* Return true if, for all OP of mode OP_MODE:
|
||||
|
||||
(vec_select:RESULT_MODE OP SEL)
|
||||
|
||||
is equivalent to the lowpart RESULT_MODE of OP. */
|
||||
|
||||
bool
|
||||
vec_series_lowpart_p (machine_mode result_mode, machine_mode op_mode, rtx sel)
|
||||
{
|
||||
int nunits;
|
||||
if (GET_MODE_NUNITS (op_mode).is_constant (&nunits)
|
||||
&& targetm.can_change_mode_class (op_mode, result_mode, ALL_REGS))
|
||||
{
|
||||
int offset = BYTES_BIG_ENDIAN ? nunits - XVECLEN (sel, 0) : 0;
|
||||
return rtvec_series_p (XVEC (sel, 0), offset);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -331,4 +331,7 @@ inline vec_rtx_properties_base::~vec_rtx_properties_base ()
|
||||
collecting the references a second time. */
|
||||
using vec_rtx_properties = growing_rtx_properties<vec_rtx_properties_base>;
|
||||
|
||||
bool
|
||||
vec_series_lowpart_p (machine_mode result_mode, machine_mode op_mode, rtx sel);
|
||||
|
||||
#endif
|
||||
|
@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see
|
||||
#include "selftest.h"
|
||||
#include "selftest-rtl.h"
|
||||
#include "rtx-vector-builder.h"
|
||||
#include "rtlanal.h"
|
||||
|
||||
/* Simplification and canonicalization of RTL. */
|
||||
|
||||
@ -4201,6 +4202,15 @@ simplify_context::simplify_binary_operation_1 (rtx_code code,
|
||||
return trueop0;
|
||||
}
|
||||
|
||||
/* If we select a low-part subreg, return that. */
|
||||
if (vec_series_lowpart_p (mode, GET_MODE (trueop0), trueop1))
|
||||
{
|
||||
rtx new_rtx = lowpart_subreg (mode, trueop0,
|
||||
GET_MODE (trueop0));
|
||||
if (new_rtx != NULL_RTX)
|
||||
return new_rtx;
|
||||
}
|
||||
|
||||
/* If we build {a,b} then permute it, build the result directly. */
|
||||
if (XVECLEN (trueop1, 0) == 2
|
||||
&& CONST_INT_P (XVECEXP (trueop1, 0, 0))
|
||||
|
@ -70,12 +70,3 @@ foo_siv4hi (siv4hi a)
|
||||
|
||||
/* { dg-final { scan-assembler-times "umov\\t" 8 } } */
|
||||
/* { dg-final { scan-assembler-not "and\\t" } } */
|
||||
|
||||
/* { dg-final { scan-rtl-dump "aarch64_get_lane_zero_extenddiv16qi" "final" } } */
|
||||
/* { dg-final { scan-rtl-dump "aarch64_get_lane_zero_extenddiv8qi" "final" } } */
|
||||
/* { dg-final { scan-rtl-dump "aarch64_get_lane_zero_extenddiv8hi" "final" } } */
|
||||
/* { dg-final { scan-rtl-dump "aarch64_get_lane_zero_extenddiv4hi" "final" } } */
|
||||
/* { dg-final { scan-rtl-dump "aarch64_get_lane_zero_extendsiv16qi" "final" } } */
|
||||
/* { dg-final { scan-rtl-dump "aarch64_get_lane_zero_extendsiv8qi" "final" } } */
|
||||
/* { dg-final { scan-rtl-dump "aarch64_get_lane_zero_extendsiv8hi" "final" } } */
|
||||
/* { dg-final { scan-rtl-dump "aarch64_get_lane_zero_extendsiv4hi" "final" } } */
|
||||
|
@ -4,122 +4,228 @@
|
||||
|
||||
#include <arm_neon.h>
|
||||
|
||||
#define TEST_ARITH(name, rettype, rmwtype, intype, fs, rs) \
|
||||
rettype test_ ## name ## _ ## fs ## _high_combine \
|
||||
#define TEST_1_ARITH(name, rettype, rmwtype, intype, fs, rs) \
|
||||
rettype test_1_ ## name ## _ ## fs ## _high_combine \
|
||||
(rmwtype a, intype b, intype c) \
|
||||
{ \
|
||||
return vcombine_ ## rs (a, name ## _ ## fs (b, c)); \
|
||||
}
|
||||
|
||||
TEST_ARITH (vaddhn, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_ARITH (vaddhn, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_ARITH (vaddhn, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_ARITH (vaddhn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_ARITH (vaddhn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_ARITH (vaddhn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
TEST_1_ARITH (vaddhn, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_1_ARITH (vaddhn, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_1_ARITH (vaddhn, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_1_ARITH (vaddhn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_1_ARITH (vaddhn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_1_ARITH (vaddhn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_ARITH (vraddhn, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_ARITH (vraddhn, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_ARITH (vraddhn, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_ARITH (vraddhn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_ARITH (vraddhn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_ARITH (vraddhn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
TEST_1_ARITH (vraddhn, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_1_ARITH (vraddhn, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_1_ARITH (vraddhn, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_1_ARITH (vraddhn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_1_ARITH (vraddhn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_1_ARITH (vraddhn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_ARITH (vsubhn, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_ARITH (vsubhn, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_ARITH (vsubhn, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_ARITH (vsubhn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_ARITH (vsubhn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_ARITH (vsubhn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
TEST_1_ARITH (vsubhn, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_1_ARITH (vsubhn, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_1_ARITH (vsubhn, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_1_ARITH (vsubhn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_1_ARITH (vsubhn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_1_ARITH (vsubhn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_ARITH (vrsubhn, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_ARITH (vrsubhn, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_ARITH (vrsubhn, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_ARITH (vrsubhn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_ARITH (vrsubhn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_ARITH (vrsubhn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
TEST_1_ARITH (vrsubhn, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_1_ARITH (vrsubhn, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_1_ARITH (vrsubhn, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_1_ARITH (vrsubhn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_1_ARITH (vrsubhn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_1_ARITH (vrsubhn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
|
||||
#define TEST_SHIFT(name, rettype, rmwtype, intype, fs, rs) \
|
||||
rettype test_ ## name ## _ ## fs ## _high_combine \
|
||||
#define TEST_2_ARITH(name, rettype, intype, fs, rs) \
|
||||
rettype test_2_ ## name ## _ ## fs ## _high_combine \
|
||||
(intype a, intype b, intype c) \
|
||||
{ \
|
||||
return vcombine_ ## rs (name ## _ ## fs (a, c), \
|
||||
name ## _ ## fs (b, c)); \
|
||||
}
|
||||
|
||||
TEST_2_ARITH (vaddhn, int8x16_t, int16x8_t, s16, s8)
|
||||
TEST_2_ARITH (vaddhn, int16x8_t, int32x4_t, s32, s16)
|
||||
TEST_2_ARITH (vaddhn, int32x4_t, int64x2_t, s64, s32)
|
||||
TEST_2_ARITH (vaddhn, uint8x16_t, uint16x8_t, u16, u8)
|
||||
TEST_2_ARITH (vaddhn, uint16x8_t, uint32x4_t, u32, u16)
|
||||
TEST_2_ARITH (vaddhn, uint32x4_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_2_ARITH (vraddhn, int8x16_t, int16x8_t, s16, s8)
|
||||
TEST_2_ARITH (vraddhn, int16x8_t, int32x4_t, s32, s16)
|
||||
TEST_2_ARITH (vraddhn, int32x4_t, int64x2_t, s64, s32)
|
||||
TEST_2_ARITH (vraddhn, uint8x16_t, uint16x8_t, u16, u8)
|
||||
TEST_2_ARITH (vraddhn, uint16x8_t, uint32x4_t, u32, u16)
|
||||
TEST_2_ARITH (vraddhn, uint32x4_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_2_ARITH (vsubhn, int8x16_t, int16x8_t, s16, s8)
|
||||
TEST_2_ARITH (vsubhn, int16x8_t, int32x4_t, s32, s16)
|
||||
TEST_2_ARITH (vsubhn, int32x4_t, int64x2_t, s64, s32)
|
||||
TEST_2_ARITH (vsubhn, uint8x16_t, uint16x8_t, u16, u8)
|
||||
TEST_2_ARITH (vsubhn, uint16x8_t, uint32x4_t, u32, u16)
|
||||
TEST_2_ARITH (vsubhn, uint32x4_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_2_ARITH (vrsubhn, int8x16_t, int16x8_t, s16, s8)
|
||||
TEST_2_ARITH (vrsubhn, int16x8_t, int32x4_t, s32, s16)
|
||||
TEST_2_ARITH (vrsubhn, int32x4_t, int64x2_t, s64, s32)
|
||||
TEST_2_ARITH (vrsubhn, uint8x16_t, uint16x8_t, u16, u8)
|
||||
TEST_2_ARITH (vrsubhn, uint16x8_t, uint32x4_t, u32, u16)
|
||||
TEST_2_ARITH (vrsubhn, uint32x4_t, uint64x2_t, u64, u32)
|
||||
|
||||
#define TEST_1_SHIFT(name, rettype, rmwtype, intype, fs, rs) \
|
||||
rettype test_1_ ## name ## _ ## fs ## _high_combine \
|
||||
(rmwtype a, intype b) \
|
||||
{ \
|
||||
return vcombine_ ## rs (a, name ## _ ## fs (b, 4)); \
|
||||
}
|
||||
|
||||
TEST_SHIFT (vshrn_n, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_SHIFT (vshrn_n, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_SHIFT (vshrn_n, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_SHIFT (vshrn_n, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_SHIFT (vshrn_n, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_SHIFT (vshrn_n, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
TEST_1_SHIFT (vshrn_n, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_1_SHIFT (vshrn_n, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_1_SHIFT (vshrn_n, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_1_SHIFT (vshrn_n, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_1_SHIFT (vshrn_n, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_1_SHIFT (vshrn_n, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_SHIFT (vrshrn_n, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_SHIFT (vrshrn_n, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_SHIFT (vrshrn_n, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_SHIFT (vrshrn_n, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_SHIFT (vrshrn_n, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_SHIFT (vrshrn_n, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
TEST_1_SHIFT (vrshrn_n, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_1_SHIFT (vrshrn_n, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_1_SHIFT (vrshrn_n, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_1_SHIFT (vrshrn_n, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_1_SHIFT (vrshrn_n, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_1_SHIFT (vrshrn_n, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_SHIFT (vqshrn_n, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_SHIFT (vqshrn_n, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_SHIFT (vqshrn_n, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_SHIFT (vqshrn_n, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_SHIFT (vqshrn_n, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_SHIFT (vqshrn_n, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
TEST_1_SHIFT (vqshrn_n, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_1_SHIFT (vqshrn_n, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_1_SHIFT (vqshrn_n, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_1_SHIFT (vqshrn_n, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_1_SHIFT (vqshrn_n, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_1_SHIFT (vqshrn_n, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_SHIFT (vqrshrn_n, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_SHIFT (vqrshrn_n, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_SHIFT (vqrshrn_n, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_SHIFT (vqrshrn_n, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_SHIFT (vqrshrn_n, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_SHIFT (vqrshrn_n, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
TEST_1_SHIFT (vqrshrn_n, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_1_SHIFT (vqrshrn_n, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_1_SHIFT (vqrshrn_n, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_1_SHIFT (vqrshrn_n, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_1_SHIFT (vqrshrn_n, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_1_SHIFT (vqrshrn_n, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_SHIFT (vqshrun_n, uint8x16_t, uint8x8_t, int16x8_t, s16, u8)
|
||||
TEST_SHIFT (vqshrun_n, uint16x8_t, uint16x4_t, int32x4_t, s32, u16)
|
||||
TEST_SHIFT (vqshrun_n, uint32x4_t, uint32x2_t, int64x2_t, s64, u32)
|
||||
TEST_1_SHIFT (vqshrun_n, uint8x16_t, uint8x8_t, int16x8_t, s16, u8)
|
||||
TEST_1_SHIFT (vqshrun_n, uint16x8_t, uint16x4_t, int32x4_t, s32, u16)
|
||||
TEST_1_SHIFT (vqshrun_n, uint32x4_t, uint32x2_t, int64x2_t, s64, u32)
|
||||
|
||||
TEST_SHIFT (vqrshrun_n, uint8x16_t, uint8x8_t, int16x8_t, s16, u8)
|
||||
TEST_SHIFT (vqrshrun_n, uint16x8_t, uint16x4_t, int32x4_t, s32, u16)
|
||||
TEST_SHIFT (vqrshrun_n, uint32x4_t, uint32x2_t, int64x2_t, s64, u32)
|
||||
TEST_1_SHIFT (vqrshrun_n, uint8x16_t, uint8x8_t, int16x8_t, s16, u8)
|
||||
TEST_1_SHIFT (vqrshrun_n, uint16x8_t, uint16x4_t, int32x4_t, s32, u16)
|
||||
TEST_1_SHIFT (vqrshrun_n, uint32x4_t, uint32x2_t, int64x2_t, s64, u32)
|
||||
|
||||
#define TEST_UNARY(name, rettype, rmwtype, intype, fs, rs) \
|
||||
rettype test_ ## name ## _ ## fs ## _high_combine \
|
||||
#define TEST_2_SHIFT(name, rettype, intype, fs, rs) \
|
||||
rettype test_2_ ## name ## _ ## fs ## _high_combine \
|
||||
(intype a, intype b) \
|
||||
{ \
|
||||
return vcombine_ ## rs (name ## _ ## fs (a, 4), \
|
||||
name ## _ ## fs (b, 4)); \
|
||||
}
|
||||
|
||||
TEST_2_SHIFT (vshrn_n, int8x16_t, int16x8_t, s16, s8)
|
||||
TEST_2_SHIFT (vshrn_n, int16x8_t, int32x4_t, s32, s16)
|
||||
TEST_2_SHIFT (vshrn_n, int32x4_t, int64x2_t, s64, s32)
|
||||
TEST_2_SHIFT (vshrn_n, uint8x16_t, uint16x8_t, u16, u8)
|
||||
TEST_2_SHIFT (vshrn_n, uint16x8_t, uint32x4_t, u32, u16)
|
||||
TEST_2_SHIFT (vshrn_n, uint32x4_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_2_SHIFT (vrshrn_n, int8x16_t, int16x8_t, s16, s8)
|
||||
TEST_2_SHIFT (vrshrn_n, int16x8_t, int32x4_t, s32, s16)
|
||||
TEST_2_SHIFT (vrshrn_n, int32x4_t, int64x2_t, s64, s32)
|
||||
TEST_2_SHIFT (vrshrn_n, uint8x16_t, uint16x8_t, u16, u8)
|
||||
TEST_2_SHIFT (vrshrn_n, uint16x8_t, uint32x4_t, u32, u16)
|
||||
TEST_2_SHIFT (vrshrn_n, uint32x4_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_2_SHIFT (vqshrn_n, int8x16_t, int16x8_t, s16, s8)
|
||||
TEST_2_SHIFT (vqshrn_n, int16x8_t, int32x4_t, s32, s16)
|
||||
TEST_2_SHIFT (vqshrn_n, int32x4_t, int64x2_t, s64, s32)
|
||||
TEST_2_SHIFT (vqshrn_n, uint8x16_t, uint16x8_t, u16, u8)
|
||||
TEST_2_SHIFT (vqshrn_n, uint16x8_t, uint32x4_t, u32, u16)
|
||||
TEST_2_SHIFT (vqshrn_n, uint32x4_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_2_SHIFT (vqrshrn_n, int8x16_t, int16x8_t, s16, s8)
|
||||
TEST_2_SHIFT (vqrshrn_n, int16x8_t, int32x4_t, s32, s16)
|
||||
TEST_2_SHIFT (vqrshrn_n, int32x4_t, int64x2_t, s64, s32)
|
||||
TEST_2_SHIFT (vqrshrn_n, uint8x16_t, uint16x8_t, u16, u8)
|
||||
TEST_2_SHIFT (vqrshrn_n, uint16x8_t, uint32x4_t, u32, u16)
|
||||
TEST_2_SHIFT (vqrshrn_n, uint32x4_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_2_SHIFT (vqshrun_n, uint8x16_t, int16x8_t, s16, u8)
|
||||
TEST_2_SHIFT (vqshrun_n, uint16x8_t, int32x4_t, s32, u16)
|
||||
TEST_2_SHIFT (vqshrun_n, uint32x4_t, int64x2_t, s64, u32)
|
||||
|
||||
TEST_2_SHIFT (vqrshrun_n, uint8x16_t, int16x8_t, s16, u8)
|
||||
TEST_2_SHIFT (vqrshrun_n, uint16x8_t, int32x4_t, s32, u16)
|
||||
TEST_2_SHIFT (vqrshrun_n, uint32x4_t, int64x2_t, s64, u32)
|
||||
|
||||
#define TEST_1_UNARY(name, rettype, rmwtype, intype, fs, rs) \
|
||||
rettype test_1_ ## name ## _ ## fs ## _high_combine \
|
||||
(rmwtype a, intype b) \
|
||||
{ \
|
||||
return vcombine_ ## rs (a, name ## _ ## fs (b)); \
|
||||
}
|
||||
|
||||
TEST_UNARY (vmovn, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_UNARY (vmovn, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_UNARY (vmovn, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_UNARY (vmovn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_UNARY (vmovn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_UNARY (vmovn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
TEST_1_UNARY (vmovn, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_1_UNARY (vmovn, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_1_UNARY (vmovn, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_1_UNARY (vmovn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_1_UNARY (vmovn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_1_UNARY (vmovn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_UNARY (vqmovn, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_UNARY (vqmovn, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_UNARY (vqmovn, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_UNARY (vqmovn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_UNARY (vqmovn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_UNARY (vqmovn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
TEST_1_UNARY (vqmovn, int8x16_t, int8x8_t, int16x8_t, s16, s8)
|
||||
TEST_1_UNARY (vqmovn, int16x8_t, int16x4_t, int32x4_t, s32, s16)
|
||||
TEST_1_UNARY (vqmovn, int32x4_t, int32x2_t, int64x2_t, s64, s32)
|
||||
TEST_1_UNARY (vqmovn, uint8x16_t, uint8x8_t, uint16x8_t, u16, u8)
|
||||
TEST_1_UNARY (vqmovn, uint16x8_t, uint16x4_t, uint32x4_t, u32, u16)
|
||||
TEST_1_UNARY (vqmovn, uint32x4_t, uint32x2_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_UNARY (vqmovun, uint8x16_t, uint8x8_t, int16x8_t, s16, u8)
|
||||
TEST_UNARY (vqmovun, uint16x8_t, uint16x4_t, int32x4_t, s32, u16)
|
||||
TEST_UNARY (vqmovun, uint32x4_t, uint32x2_t, int64x2_t, s64, u32)
|
||||
TEST_1_UNARY (vqmovun, uint8x16_t, uint8x8_t, int16x8_t, s16, u8)
|
||||
TEST_1_UNARY (vqmovun, uint16x8_t, uint16x4_t, int32x4_t, s32, u16)
|
||||
TEST_1_UNARY (vqmovun, uint32x4_t, uint32x2_t, int64x2_t, s64, u32)
|
||||
|
||||
/* { dg-final { scan-assembler-times "\\taddhn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsubhn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\trsubhn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\traddhn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\trshrn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\tshrn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsqshrun2\\tv" 3} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsqrshrun2\\tv" 3} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsqshrn2\\tv" 3} } */
|
||||
/* { dg-final { scan-assembler-times "\\tuqshrn2\\tv" 3} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsqrshrn2\\tv" 3} } */
|
||||
/* { dg-final { scan-assembler-times "\\tuqrshrn2\\tv" 3} } */
|
||||
/* { dg-final { scan-assembler-times "\\txtn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\tuqxtn2\\tv" 3} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsqxtn2\\tv" 3} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsqxtun2\\tv" 3} } */
|
||||
#define TEST_2_UNARY(name, rettype, intype, fs, rs) \
|
||||
rettype test_2_ ## name ## _ ## fs ## _high_combine \
|
||||
(intype a, intype b) \
|
||||
{ \
|
||||
return vcombine_ ## rs (name ## _ ## fs (a), \
|
||||
name ## _ ## fs (b)); \
|
||||
}
|
||||
|
||||
TEST_2_UNARY (vmovn, int8x16_t, int16x8_t, s16, s8)
|
||||
TEST_2_UNARY (vmovn, int16x8_t, int32x4_t, s32, s16)
|
||||
TEST_2_UNARY (vmovn, int32x4_t, int64x2_t, s64, s32)
|
||||
TEST_2_UNARY (vmovn, uint8x16_t, uint16x8_t, u16, u8)
|
||||
TEST_2_UNARY (vmovn, uint16x8_t, uint32x4_t, u32, u16)
|
||||
TEST_2_UNARY (vmovn, uint32x4_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_2_UNARY (vqmovn, int8x16_t, int16x8_t, s16, s8)
|
||||
TEST_2_UNARY (vqmovn, int16x8_t, int32x4_t, s32, s16)
|
||||
TEST_2_UNARY (vqmovn, int32x4_t, int64x2_t, s64, s32)
|
||||
TEST_2_UNARY (vqmovn, uint8x16_t, uint16x8_t, u16, u8)
|
||||
TEST_2_UNARY (vqmovn, uint16x8_t, uint32x4_t, u32, u16)
|
||||
TEST_2_UNARY (vqmovn, uint32x4_t, uint64x2_t, u64, u32)
|
||||
|
||||
TEST_2_UNARY (vqmovun, uint8x16_t, int16x8_t, s16, u8)
|
||||
TEST_2_UNARY (vqmovun, uint16x8_t, int32x4_t, s32, u16)
|
||||
TEST_2_UNARY (vqmovun, uint32x4_t, int64x2_t, s64, u32)
|
||||
|
||||
/* { dg-final { scan-assembler-times "\\taddhn2\\tv" 12} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsubhn2\\tv" 12} } */
|
||||
/* { dg-final { scan-assembler-times "\\trsubhn2\\tv" 12} } */
|
||||
/* { dg-final { scan-assembler-times "\\traddhn2\\tv" 12} } */
|
||||
/* { dg-final { scan-assembler-times "\\trshrn2\\tv" 12} } */
|
||||
/* { dg-final { scan-assembler-times "\\tshrn2\\tv" 12} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsqshrun2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsqrshrun2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsqshrn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\tuqshrn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsqrshrn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\tuqrshrn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\txtn2\\tv" 12} } */
|
||||
/* { dg-final { scan-assembler-times "\\tuqxtn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsqxtn2\\tv" 6} } */
|
||||
/* { dg-final { scan-assembler-times "\\tsqxtun2\\tv" 6} } */
|
||||
|
@ -72,5 +72,5 @@ main (void)
|
||||
set_and_test_case3 ();
|
||||
return 0;
|
||||
}
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[dD\]\\\[0\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[dD\]\\\[1\\\]\n" 1 } } */
|
||||
|
@ -58,5 +58,5 @@ main (void)
|
||||
set_and_test_case3 ();
|
||||
return 0;
|
||||
}
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[dD\]\\\[0\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[dD\]\\\[1\\\]\n" 1 } } */
|
||||
|
@ -57,5 +57,5 @@ main (void)
|
||||
set_and_test_case3 ();
|
||||
return 0;
|
||||
}
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[sS\]\\\[0\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[sS\]\\\[1\\\]\n" 1 } } */
|
||||
|
@ -79,7 +79,7 @@ main (void)
|
||||
set_and_test_case3 ();
|
||||
return 0;
|
||||
}
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[sS\]\\\[0\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[sS\]\\\[1\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[sS\]\\\[2\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[sS\]\\\[3\\\]\n" 1 } } */
|
||||
|
@ -11,4 +11,4 @@ t_vqdmlalh_lane_s16 (int32_t a, int16_t b, int16x4_t c)
|
||||
return vqdmlalh_lane_s16 (a, b, c, 0);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler-times "sqdmlal\[ \t\]+\[sS\]\[0-9\]+, ?\[hH\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[hH\]\\\[0\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "sqdmlal\[ \t\]+\[sS\]\[0-9\]+, ?\[hH\]\[0-9\]+, ?\[hH\]\[0-9\]\n" 1 } } */
|
||||
|
@ -11,4 +11,4 @@ t_vqdmlals_lane_s32 (int64_t a, int32_t b, int32x2_t c)
|
||||
return vqdmlals_lane_s32 (a, b, c, 0);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler-times "sqdmlal\[ \t\]+\[dD\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[sS\]\\\[0\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "sqdmlal\[ \t\]+\[dD\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]\n" 1 } } */
|
||||
|
@ -11,4 +11,4 @@ t_vqdmlslh_lane_s16 (int32_t a, int16_t b, int16x4_t c)
|
||||
return vqdmlslh_lane_s16 (a, b, c, 0);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler-times "sqdmlsl\[ \t\]+\[sS\]\[0-9\]+, ?\[hH\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[hH\]\\\[0\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "sqdmlsl\[ \t\]+\[sS\]\[0-9\]+, ?\[hH\]\[0-9\]+, ?\[hH\]\[0-9\]\n" 1 } } */
|
||||
|
@ -11,4 +11,4 @@ t_vqdmlsls_lane_s32 (int64_t a, int32_t b, int32x2_t c)
|
||||
return vqdmlsls_lane_s32 (a, b, c, 0);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler-times "sqdmlsl\[ \t\]+\[dD\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[sS\]\\\[0\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "sqdmlsl\[ \t\]+\[dD\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]\n" 1 } } */
|
||||
|
@ -11,4 +11,4 @@ t_vqdmullh_lane_s16 (int16_t a, int16x4_t b)
|
||||
return vqdmullh_lane_s16 (a, b, 0);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler-times "sqdmull\[ \t\]+\[sS\]\[0-9\]+, ?\[hH\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[hH\]\\\[0\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "sqdmull\[ \t\]+\[sS\]\[0-9\]+, ?\[hH\]\[0-9\]+, ?\[hH\]\[0-9\]\n" 1 } } */
|
||||
|
@ -11,4 +11,4 @@ t_vqdmullh_laneq_s16 (int16_t a, int16x8_t b)
|
||||
return vqdmullh_laneq_s16 (a, b, 0);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler-times "sqdmull\[ \t\]+\[sS\]\[0-9\]+, ?\[hH\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[hH\]\\\[0\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "sqdmull\[ \t\]+\[sS\]\[0-9\]+, ?\[hH\]\[0-9\]+, ?\[hH\]\[0-9\]\n" 1 } } */
|
||||
|
@ -11,4 +11,4 @@ t_vqdmulls_lane_s32 (int32_t a, int32x2_t b)
|
||||
return vqdmulls_lane_s32 (a, b, 0);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler-times "sqdmull\[ \t\]+\[dD\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[sS\]\\\[0\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "sqdmull\[ \t\]+\[dD\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]\n" 1 } } */
|
||||
|
@ -11,4 +11,4 @@ t_vqdmulls_laneq_s32 (int32_t a, int32x4_t b)
|
||||
return vqdmulls_laneq_s32 (a, b, 0);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler-times "sqdmull\[ \t\]+\[dD\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[vV\]\[0-9\]+\.\[sS\]\\\[0\\\]\n" 1 } } */
|
||||
/* { dg-final { scan-assembler-times "sqdmull\[ \t\]+\[dD\]\[0-9\]+, ?\[sS\]\[0-9\]+, ?\[sS\]\[0-9\]\n" 1 } } */
|
||||
|
@ -56,15 +56,27 @@ TEST_ALL (DUP_LANE)
|
||||
|
||||
/* { dg-final { scan-assembler-not {\ttbl\t} } } */
|
||||
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.d, z[0-9]+\.d\[0\]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.d, d[0-9]} 2 {
|
||||
target { aarch64_little_endian } } } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.d, z[0-9]+\.d\[0\]} 2 {
|
||||
target { aarch64_big_endian } } } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.d, z[0-9]+\.d\[2\]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.d, z[0-9]+\.d\[3\]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.s, z[0-9]+\.s\[0\]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.s, s[0-9]} 2 {
|
||||
target { aarch64_little_endian } } } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.s, z[0-9]+\.s\[0\]} 2 {
|
||||
target { aarch64_big_endian } } } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.s, z[0-9]+\.s\[5\]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.s, z[0-9]+\.s\[7\]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.h, z[0-9]+\.h\[0\]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.h, h[0-9]} 2 {
|
||||
target { aarch64_little_endian } } } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.h, z[0-9]+\.h\[0\]} 2 {
|
||||
target { aarch64_big_endian } } } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.h, z[0-9]+\.h\[6\]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.h, z[0-9]+\.h\[15\]} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.b, z[0-9]+\.b\[0\]} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tmov\tz[0-9]+\.b, b[0-9]} 1 {
|
||||
target { aarch64_little_endian } } } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.b, z[0-9]+\.b\[0\]} 1 {
|
||||
target { aarch64_big_endian } } } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.b, z[0-9]+\.b\[19\]} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.b, z[0-9]+\.b\[31\]} 1 } } */
|
||||
|
@ -56,7 +56,7 @@ typedef _Float16 vnx8hf __attribute__((vector_size (32)));
|
||||
|
||||
TEST_ALL (EXTRACT)
|
||||
|
||||
/* { dg-final { scan-assembler-times {\tumov\tx[0-9]+, v[0-9]+\.d\[0\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tfmov\tx[0-9]+, d[0-9]\n} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tumov\tx[0-9]+, v[0-9]+\.d\[1\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-not {\tdup\td[0-9]+, v[0-9]+\.d\[0\]\n} } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\td[0-9]+, v[0-9]+\.d\[1\]\n} 1 } } */
|
||||
@ -64,7 +64,7 @@ TEST_ALL (EXTRACT)
|
||||
/* { dg-final { scan-assembler-times {\tlastb\tx[0-9]+, p[0-7], z[0-9]+\.d\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tlastb\td[0-9]+, p[0-7], z[0-9]+\.d\n} 1 } } */
|
||||
|
||||
/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s\[0\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tfmov\tw[0-9]+, s[0-9]\n} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s\[1\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s\[3\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-not {\tdup\ts[0-9]+, v[0-9]+\.s\[0\]\n} } } */
|
||||
|
@ -56,7 +56,7 @@ typedef _Float16 vnx16hf __attribute__((vector_size (64)));
|
||||
|
||||
TEST_ALL (EXTRACT)
|
||||
|
||||
/* { dg-final { scan-assembler-times {\tumov\tx[0-9]+, v[0-9]+\.d\[0\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tfmov\tx[0-9]+, d[0-9]\n} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tumov\tx[0-9]+, v[0-9]+\.d\[1\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-not {\tdup\td[0-9]+, v[0-9]+\.d\[0\]\n} } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\td[0-9]+, v[0-9]+\.d\[1\]\n} 1 } } */
|
||||
@ -64,7 +64,7 @@ TEST_ALL (EXTRACT)
|
||||
/* { dg-final { scan-assembler-times {\tlastb\tx[0-9]+, p[0-7], z[0-9]+\.d\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tlastb\td[0-9]+, p[0-7], z[0-9]+\.d\n} 1 } } */
|
||||
|
||||
/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s\[0\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tfmov\tw[0-9]+, s[0-9]\n} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s\[1\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s\[3\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-not {\tdup\ts[0-9]+, v[0-9]+\.s\[0\]\n} } } */
|
||||
|
@ -77,7 +77,7 @@ typedef _Float16 vnx32hf __attribute__((vector_size (128)));
|
||||
|
||||
TEST_ALL (EXTRACT)
|
||||
|
||||
/* { dg-final { scan-assembler-times {\tumov\tx[0-9]+, v[0-9]+\.d\[0\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tfmov\tx[0-9]+, d[0-9]\n} 5 } } */
|
||||
/* { dg-final { scan-assembler-times {\tumov\tx[0-9]+, v[0-9]+\.d\[1\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-not {\tdup\td[0-9]+, v[0-9]+\.d\[0\]\n} } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\td[0-9]+, v[0-9]+\.d\[1\]\n} 1 } } */
|
||||
@ -86,7 +86,7 @@ TEST_ALL (EXTRACT)
|
||||
/* { dg-final { scan-assembler-times {\tlastb\tx[0-9]+, p[0-7], z[0-9]+\.d\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tlastb\td[0-9]+, p[0-7], z[0-9]+\.d\n} 1 } } */
|
||||
|
||||
/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s\[0\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tfmov\tw[0-9]+, s[0-9]\n} 5 } } */
|
||||
/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s\[1\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s\[3\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-not {\tdup\ts[0-9]+, v[0-9]+\.s\[0\]\n} } } */
|
||||
|
@ -84,7 +84,7 @@ typedef _Float16 v128hf __attribute__((vector_size (256)));
|
||||
|
||||
TEST_ALL (EXTRACT)
|
||||
|
||||
/* { dg-final { scan-assembler-times {\tumov\tx[0-9]+, v[0-9]+\.d\[0\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tfmov\tx[0-9]+, d[0-9]\n} 6 } } */
|
||||
/* { dg-final { scan-assembler-times {\tumov\tx[0-9]+, v[0-9]+\.d\[1\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-not {\tdup\td[0-9]+, v[0-9]+\.d\[0\]\n} } } */
|
||||
/* { dg-final { scan-assembler-times {\tdup\td[0-9]+, v[0-9]+\.d\[1\]\n} 1 } } */
|
||||
@ -93,7 +93,7 @@ TEST_ALL (EXTRACT)
|
||||
/* { dg-final { scan-assembler-times {\tlastb\tx[0-9]+, p[0-7], z[0-9]+\.d\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tlastb\td[0-9]+, p[0-7], z[0-9]+\.d\n} 1 } } */
|
||||
|
||||
/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s\[0\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tfmov\tw[0-9]+, s[0-9]\n} 6 } } */
|
||||
/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s\[1\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tumov\tw[0-9]+, v[0-9]+\.s\[3\]\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-not {\tdup\ts[0-9]+, v[0-9]+\.s\[0\]\n} } } */
|
||||
|
@ -32,10 +32,9 @@ TEST_ALL (EXTRACT_LAST)
|
||||
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7].s, } 4 } } */
|
||||
/* { dg-final { scan-assembler-times {\twhilelo\tp[0-7].d, } 4 } } */
|
||||
|
||||
/* { dg-final { scan-assembler-times {\tlastb\tw[0-9]+, p[0-7], z[0-9]+\.b\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tlastb\tw[0-9]+, p[0-7], z[0-9]+\.h\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tlastb\tb[0-9]+, p[0-7], z[0-9]+\.b\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tlastb\th[0-9]+, p[0-7], z[0-9]+\.h\n} 2 } } */
|
||||
/* { dg-final { scan-assembler-times {\tlastb\tw[0-9]+, p[0-7], z[0-9]+\.s\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tlastb\tx[0-9]+, p[0-7], z[0-9]+\.d\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tlastb\th[0-9]+, p[0-7], z[0-9]+\.h\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tlastb\ts[0-9]+, p[0-7], z[0-9]+\.s\n} 1 } } */
|
||||
/* { dg-final { scan-assembler-times {\tlastb\td[0-9]+, p[0-7], z[0-9]+\.d\n} 1 } } */
|
||||
|
@ -31,5 +31,5 @@ uint32_t foo (void)
|
||||
TEST_SHA1C_VEC_SELECT (GET_LANE)
|
||||
|
||||
/* { dg-final { scan-assembler-times {sha1c.32\tq[0-9]+, q[0-9]+} 5 } } */
|
||||
/* { dg-final { scan-assembler-times {vdup.32\tq[0-9]+, r[0-9]+} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vmov.32\tr[0-9]+, d[0-9]+\[[0-9]+\]+} 4 } } */
|
||||
/* { dg-final { scan-assembler-times {vdup.32\tq[0-9]+, r[0-9]+} 4 } } */
|
||||
/* { dg-final { scan-assembler-times {vmov.32\tr[0-9]+, d[0-9]+\[[0-9]+\]+} 3 } } */
|
||||
|
@ -27,5 +27,5 @@ uint32_t foo (void)
|
||||
TEST_SHA1H_VEC_SELECT (GET_LANE)
|
||||
|
||||
/* { dg-final { scan-assembler-times {sha1h.32\tq[0-9]+, q[0-9]+} 5 } } */
|
||||
/* { dg-final { scan-assembler-times {vdup.32\tq[0-9]+, r[0-9]+} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vmov.32\tr[0-9]+, d[0-9]+\[[0-9]+\]+} 8 } } */
|
||||
/* { dg-final { scan-assembler-times {vdup.32\tq[0-9]+, r[0-9]+} 4 } } */
|
||||
/* { dg-final { scan-assembler-times {vmov.32\tr[0-9]+, d[0-9]+\[[0-9]+\]+} 3 } } */
|
||||
|
@ -31,5 +31,5 @@ uint32_t foo (void)
|
||||
TEST_SHA1M_VEC_SELECT (GET_LANE)
|
||||
|
||||
/* { dg-final { scan-assembler-times {sha1m.32\tq[0-9]+, q[0-9]+} 5 } } */
|
||||
/* { dg-final { scan-assembler-times {vdup.32\tq[0-9]+, r[0-9]+} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vmov.32\tr[0-9]+, d[0-9]+\[[0-9]+\]+} 4 } } */
|
||||
/* { dg-final { scan-assembler-times {vdup.32\tq[0-9]+, r[0-9]+} 4 } } */
|
||||
/* { dg-final { scan-assembler-times {vmov.32\tr[0-9]+, d[0-9]+\[[0-9]+\]+} 3 } } */
|
||||
|
@ -31,5 +31,5 @@ uint32_t foo (void)
|
||||
TEST_SHA1P_VEC_SELECT (GET_LANE)
|
||||
|
||||
/* { dg-final { scan-assembler-times {sha1p.32\tq[0-9]+, q[0-9]+} 5 } } */
|
||||
/* { dg-final { scan-assembler-times {vdup.32\tq[0-9]+, r[0-9]+} 3 } } */
|
||||
/* { dg-final { scan-assembler-times {vmov.32\tr[0-9]+, d[0-9]+\[[0-9]+\]+} 4 } } */
|
||||
/* { dg-final { scan-assembler-times {vdup.32\tq[0-9]+, r[0-9]+} 4 } } */
|
||||
/* { dg-final { scan-assembler-times {vmov.32\tr[0-9]+, d[0-9]+\[[0-9]+\]+} 3 } } */
|
||||
|
@ -8,7 +8,7 @@
|
||||
float16_t
|
||||
foo (float16x8_t a)
|
||||
{
|
||||
return vgetq_lane_f16 (a, 0);
|
||||
return vgetq_lane_f16 (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.u16" } } */
|
||||
@ -16,7 +16,7 @@ foo (float16x8_t a)
|
||||
float16_t
|
||||
foo1 (float16x8_t a)
|
||||
{
|
||||
return vgetq_lane (a, 0);
|
||||
return vgetq_lane (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.u16" } } */
|
||||
|
@ -8,7 +8,7 @@
|
||||
float32_t
|
||||
foo (float32x4_t a)
|
||||
{
|
||||
return vgetq_lane_f32 (a, 0);
|
||||
return vgetq_lane_f32 (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.32" } } */
|
||||
@ -16,7 +16,7 @@ foo (float32x4_t a)
|
||||
float32_t
|
||||
foo1 (float32x4_t a)
|
||||
{
|
||||
return vgetq_lane (a, 0);
|
||||
return vgetq_lane (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.32" } } */
|
||||
|
@ -8,7 +8,7 @@
|
||||
int16_t
|
||||
foo (int16x8_t a)
|
||||
{
|
||||
return vgetq_lane_s16 (a, 0);
|
||||
return vgetq_lane_s16 (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.s16" } } */
|
||||
@ -16,7 +16,7 @@ foo (int16x8_t a)
|
||||
int16_t
|
||||
foo1 (int16x8_t a)
|
||||
{
|
||||
return vgetq_lane (a, 0);
|
||||
return vgetq_lane (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.s16" } } */
|
||||
|
@ -8,7 +8,7 @@
|
||||
int32_t
|
||||
foo (int32x4_t a)
|
||||
{
|
||||
return vgetq_lane_s32 (a, 0);
|
||||
return vgetq_lane_s32 (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.32" } } */
|
||||
@ -16,7 +16,7 @@ foo (int32x4_t a)
|
||||
int32_t
|
||||
foo1 (int32x4_t a)
|
||||
{
|
||||
return vgetq_lane (a, 0);
|
||||
return vgetq_lane (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.32" } } */
|
||||
|
@ -8,7 +8,7 @@
|
||||
int8_t
|
||||
foo (int8x16_t a)
|
||||
{
|
||||
return vgetq_lane_s8 (a, 0);
|
||||
return vgetq_lane_s8 (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.s8" } } */
|
||||
@ -16,7 +16,7 @@ foo (int8x16_t a)
|
||||
int8_t
|
||||
foo1 (int8x16_t a)
|
||||
{
|
||||
return vgetq_lane (a, 0);
|
||||
return vgetq_lane (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.s8" } } */
|
||||
|
@ -8,7 +8,7 @@
|
||||
uint16_t
|
||||
foo (uint16x8_t a)
|
||||
{
|
||||
return vgetq_lane_u16 (a, 0);
|
||||
return vgetq_lane_u16 (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.u16" } } */
|
||||
@ -16,7 +16,7 @@ foo (uint16x8_t a)
|
||||
uint16_t
|
||||
foo1 (uint16x8_t a)
|
||||
{
|
||||
return vgetq_lane (a, 0);
|
||||
return vgetq_lane (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.u16" } } */
|
||||
|
@ -8,7 +8,7 @@
|
||||
uint32_t
|
||||
foo (uint32x4_t a)
|
||||
{
|
||||
return vgetq_lane_u32 (a, 0);
|
||||
return vgetq_lane_u32 (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.32" } } */
|
||||
@ -16,7 +16,7 @@ foo (uint32x4_t a)
|
||||
uint32_t
|
||||
foo1 (uint32x4_t a)
|
||||
{
|
||||
return vgetq_lane (a, 0);
|
||||
return vgetq_lane (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.32" } } */
|
||||
|
@ -8,7 +8,7 @@
|
||||
uint8_t
|
||||
foo (uint8x16_t a)
|
||||
{
|
||||
return vgetq_lane_u8 (a, 0);
|
||||
return vgetq_lane_u8 (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.u8" } } */
|
||||
@ -16,7 +16,7 @@ foo (uint8x16_t a)
|
||||
uint8_t
|
||||
foo1 (uint8x16_t a)
|
||||
{
|
||||
return vgetq_lane (a, 0);
|
||||
return vgetq_lane (a, 1);
|
||||
}
|
||||
|
||||
/* { dg-final { scan-assembler "vmov.u8" } } */
|
||||
|
Loading…
x
Reference in New Issue
Block a user