[AArch64] Fix ICE on non-constant indices to __builtin_aarch64_im_lane_boundsi

gcc/:

	* config/aarch64/aarch64-builtins.c (aarch64_types_binopv_qualifiers,
	TYPES_BINOPV): Delete.
	(enum aarch64_builtins): Add AARCH64_BUILTIN_SIMD_LANE_CHECK and
	AARCH64_SIMD_PATTERN_START.
	(aarch64_init_simd_builtins): Register
	__builtin_aarch64_im_lane_boundsi; use  AARCH64_SIMD_PATTERN_START.
	(aarch64_simd_expand_builtin): Handle AARCH64_BUILTIN_LANE_CHECK; use
	AARCH64_SIMD_PATTERN_START.

	* config/aarch64/aarch64-simd.md (aarch64_im_lane_boundsi): Delete.
	* config/aarch64/aarch64-simd-builtins.def (im_lane_bound): Delete.

	* config/aarch64/arm_neon.h (__AARCH64_LANE_CHECK): New.
	(__aarch64_vget_lane_f64, __aarch64_vget_lane_s64,
	__aarch64_vget_lane_u64, __aarch64_vset_lane_any, vdupd_lane_f64,
	vdupd_lane_s64, vdupd_lane_u64, vext_f32, vext_f64, vext_p8, vext_p16,
	vext_s8, vext_s16, vext_s32, vext_s64, vext_u8, vext_u16, vext_u32,
	vext_u64, vextq_f32, vextq_f64, vextq_p8, vextq_p16, vextq_s8,
	vextq_s16, vextq_s32, vextq_s64, vextq_u8, vextq_u16, vextq_u32,
	vextq_u64, vmulq_lane_f64): Use __AARCH64_LANE_CHECK.

gcc/testsuite/:

	* gcc.target/aarch64/simd/vset_lane_s16_const_1.c: New test.

From-SVN: r218532
This commit is contained in:
Alan Lawrence 2014-12-09 19:52:22 +00:00 committed by Alan Lawrence
parent 2310e29f49
commit 661fce82a6
7 changed files with 104 additions and 56 deletions

View File

@ -1,3 +1,26 @@
2014-12-09 Alan Lawrence <alan.lawrence@arm.com>
* config/aarch64/aarch64-builtins.c (aarch64_types_binopv_qualifiers,
TYPES_BINOPV): Delete.
(enum aarch64_builtins): Add AARCH64_BUILTIN_SIMD_LANE_CHECK and
AARCH64_SIMD_PATTERN_START.
(aarch64_init_simd_builtins): Register
__builtin_aarch64_im_lane_boundsi; use AARCH64_SIMD_PATTERN_START.
(aarch64_simd_expand_builtin): Handle AARCH64_BUILTIN_LANE_CHECK; use
AARCH64_SIMD_PATTERN_START.
* config/aarch64/aarch64-simd.md (aarch64_im_lane_boundsi): Delete.
* config/aarch64/aarch64-simd-builtins.def (im_lane_bound): Delete.
* config/aarch64/arm_neon.h (__AARCH64_LANE_CHECK): New.
(__aarch64_vget_lane_f64, __aarch64_vget_lane_s64,
__aarch64_vget_lane_u64, __aarch64_vset_lane_any, vdupd_lane_f64,
vdupd_lane_s64, vdupd_lane_u64, vext_f32, vext_f64, vext_p8, vext_p16,
vext_s8, vext_s16, vext_s32, vext_s64, vext_u8, vext_u16, vext_u32,
vext_u64, vextq_f32, vextq_f64, vextq_p8, vextq_p16, vextq_s8,
vextq_s16, vextq_s32, vextq_s64, vextq_u8, vextq_u16, vextq_u32,
vextq_u64, vmulq_lane_f64): Use __AARCH64_LANE_CHECK.
2014-12-09 Alan Lawrence <alan.lawrence@arm.com>
* config/aarch64/arm_neon.h (__AARCH64_NUM_LANES, __aarch64_lane *2):

View File

@ -141,10 +141,6 @@ aarch64_types_binop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_none, qualifier_none, qualifier_maybe_immediate };
#define TYPES_BINOP (aarch64_types_binop_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_binopv_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_void, qualifier_none, qualifier_none };
#define TYPES_BINOPV (aarch64_types_binopv_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_binopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned };
#define TYPES_BINOPU (aarch64_types_binopu_qualifiers)
@ -342,9 +338,12 @@ enum aarch64_builtins
AARCH64_BUILTIN_SET_FPSR,
AARCH64_SIMD_BUILTIN_BASE,
AARCH64_SIMD_BUILTIN_LANE_CHECK,
#include "aarch64-simd-builtins.def"
AARCH64_SIMD_BUILTIN_MAX = AARCH64_SIMD_BUILTIN_BASE
+ ARRAY_SIZE (aarch64_simd_builtin_data),
/* The first enum element which is based on an insn_data pattern. */
AARCH64_SIMD_PATTERN_START = AARCH64_SIMD_BUILTIN_LANE_CHECK + 1,
AARCH64_SIMD_BUILTIN_MAX = AARCH64_SIMD_PATTERN_START
+ ARRAY_SIZE (aarch64_simd_builtin_data) - 1,
AARCH64_CRC32_BUILTIN_BASE,
AARCH64_CRC32_BUILTINS
AARCH64_CRC32_BUILTIN_MAX,
@ -685,7 +684,7 @@ aarch64_init_simd_builtin_scalar_types (void)
static void
aarch64_init_simd_builtins (void)
{
unsigned int i, fcode = AARCH64_SIMD_BUILTIN_BASE + 1;
unsigned int i, fcode = AARCH64_SIMD_PATTERN_START;
aarch64_init_simd_builtin_types ();
@ -695,6 +694,15 @@ aarch64_init_simd_builtins (void)
system. */
aarch64_init_simd_builtin_scalar_types ();
tree lane_check_fpr = build_function_type_list (void_type_node,
intSI_type_node,
intSI_type_node,
NULL);
aarch64_builtin_decls[AARCH64_SIMD_BUILTIN_LANE_CHECK] =
add_builtin_function ("__builtin_aarch64_im_lane_boundsi", lane_check_fpr,
AARCH64_SIMD_BUILTIN_LANE_CHECK, BUILT_IN_MD,
NULL, NULL_TREE);
for (i = 0; i < ARRAY_SIZE (aarch64_simd_builtin_data); i++, fcode++)
{
bool print_type_signature_p = false;
@ -974,8 +982,20 @@ aarch64_simd_expand_args (rtx target, int icode, int have_retval,
rtx
aarch64_simd_expand_builtin (int fcode, tree exp, rtx target)
{
if (fcode == AARCH64_SIMD_BUILTIN_LANE_CHECK)
{
tree nlanes = CALL_EXPR_ARG (exp, 0);
gcc_assert (TREE_CODE (nlanes) == INTEGER_CST);
rtx lane_idx = expand_normal (CALL_EXPR_ARG (exp, 1));
if (CONST_INT_P (lane_idx))
aarch64_simd_lane_bounds (lane_idx, 0, TREE_INT_CST_LOW (nlanes), exp);
else
error ("%Klane index must be a constant immediate", exp);
/* Don't generate any RTL. */
return const0_rtx;
}
aarch64_simd_builtin_datum *d =
&aarch64_simd_builtin_data[fcode - (AARCH64_SIMD_BUILTIN_BASE + 1)];
&aarch64_simd_builtin_data[fcode - AARCH64_SIMD_PATTERN_START];
enum insn_code icode = d->code;
builtin_simd_arg args[SIMD_MAX_BUILTIN_ARGS];
int num_args = insn_data[d->code].n_operands;

View File

@ -397,5 +397,3 @@
VAR1 (BINOPP, crypto_pmull, 0, di)
VAR1 (BINOPP, crypto_pmull, 0, v2di)
/* Meta-op to check lane bounds of immediate in aarch64_expand_builtin. */
VAR1 (BINOPV, im_lane_bound, 0, si)

View File

@ -4596,19 +4596,6 @@
[(set_attr "type" "neon_ext<q>")]
)
;; This exists solely to check the arguments to the corresponding __builtin.
;; Used where we want an error for out-of-range indices which would otherwise
;; be silently wrapped (e.g. the mask to a __builtin_shuffle).
(define_expand "aarch64_im_lane_boundsi"
[(match_operand:SI 0 "immediate_operand" "i")
(match_operand:SI 1 "immediate_operand" "i")]
"TARGET_SIMD"
{
aarch64_simd_lane_bounds (operands[0], 0, INTVAL (operands[1]), NULL);
DONE;
}
)
(define_insn "aarch64_rev<REVERSE:rev_op><mode>"
[(set (match_operand:VALL 0 "register_operand" "=w")
(unspec:VALL [(match_operand:VALL 1 "register_operand" "w")]

View File

@ -436,7 +436,7 @@ typedef struct poly16x8x4_t
__aarch64_vget_lane_any (v2sf, , , __a, __b)
#define __aarch64_vget_lane_f64(__a, __b) __extension__ \
({ \
__builtin_aarch64_im_lane_boundsi (__b, 1); \
__AARCH64_LANE_CHECK (__a, __b); \
__a[0]; \
})
@ -453,7 +453,7 @@ typedef struct poly16x8x4_t
__aarch64_vget_lane_any (v2si, , ,__a, __b)
#define __aarch64_vget_lane_s64(__a, __b) __extension__ \
({ \
__builtin_aarch64_im_lane_boundsi (__b, 1); \
__AARCH64_LANE_CHECK (__a, __b); \
__a[0]; \
})
@ -465,7 +465,7 @@ typedef struct poly16x8x4_t
__aarch64_vget_lane_any (v2si, (uint32_t), (int32x2_t), __a, __b)
#define __aarch64_vget_lane_u64(__a, __b) __extension__ \
({ \
__builtin_aarch64_im_lane_boundsi (__b, 1); \
__AARCH64_LANE_CHECK (__a, __b); \
__a[0]; \
})
@ -607,6 +607,8 @@ typedef struct poly16x8x4_t
/* Internal macro for lane indices. */
#define __AARCH64_NUM_LANES(__v) (sizeof (__v) / sizeof (__v[0]))
#define __AARCH64_LANE_CHECK(__vec, __idx) \
__builtin_aarch64_im_lane_boundsi (__AARCH64_NUM_LANES (__vec), __idx)
/* For big-endian, GCC's vector indices are the opposite way around
to the architectural lane indices used by Neon intrinsics. */
@ -621,8 +623,7 @@ typedef struct poly16x8x4_t
#define __aarch64_vset_lane_any(__elem, __vec, __index) \
__extension__ \
({ \
__builtin_aarch64_im_lane_boundsi (__index, \
__AARCH64_NUM_LANES (__vec)); \
__AARCH64_LANE_CHECK (__vec, __index); \
__vec[__aarch64_lane (__vec, __index)] = __elem; \
__vec; \
})
@ -14764,21 +14765,21 @@ vdups_lane_u32 (uint32x2_t __a, const int __b)
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
vdupd_lane_f64 (float64x1_t __a, const int __b)
{
__builtin_aarch64_im_lane_boundsi (__b, 1);
__AARCH64_LANE_CHECK (__a, __b);
return __a[0];
}
__extension__ static __inline int64_t __attribute__ ((__always_inline__))
vdupd_lane_s64 (int64x1_t __a, const int __b)
{
__builtin_aarch64_im_lane_boundsi (__b, 1);
__AARCH64_LANE_CHECK (__a, __b);
return __a[0];
}
__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
vdupd_lane_u64 (uint64x1_t __a, const int __b)
{
__builtin_aarch64_im_lane_boundsi (__b, 1);
__AARCH64_LANE_CHECK (__a, __b);
return __a[0];
}
@ -14863,7 +14864,7 @@ vdupd_laneq_u64 (uint64x2_t __a, const int __b)
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vext_f32 (float32x2_t __a, float32x2_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 2);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint32x2_t) {2-__c, 3-__c});
#else
@ -14874,14 +14875,14 @@ vext_f32 (float32x2_t __a, float32x2_t __b, __const int __c)
__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
vext_f64 (float64x1_t __a, float64x1_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
/* The only possible index to the assembler instruction returns element 0. */
__builtin_aarch64_im_lane_boundsi (__c, 1);
return __a;
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vext_p8 (poly8x8_t __a, poly8x8_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 8);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint8x8_t)
{8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
@ -14894,7 +14895,7 @@ vext_p8 (poly8x8_t __a, poly8x8_t __b, __const int __c)
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
vext_p16 (poly16x4_t __a, poly16x4_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 4);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a,
(uint16x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
@ -14906,7 +14907,7 @@ vext_p16 (poly16x4_t __a, poly16x4_t __b, __const int __c)
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vext_s8 (int8x8_t __a, int8x8_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 8);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint8x8_t)
{8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
@ -14919,7 +14920,7 @@ vext_s8 (int8x8_t __a, int8x8_t __b, __const int __c)
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vext_s16 (int16x4_t __a, int16x4_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 4);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a,
(uint16x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
@ -14931,7 +14932,7 @@ vext_s16 (int16x4_t __a, int16x4_t __b, __const int __c)
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vext_s32 (int32x2_t __a, int32x2_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 2);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint32x2_t) {2-__c, 3-__c});
#else
@ -14942,15 +14943,15 @@ vext_s32 (int32x2_t __a, int32x2_t __b, __const int __c)
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vext_s64 (int64x1_t __a, int64x1_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
/* The only possible index to the assembler instruction returns element 0. */
__builtin_aarch64_im_lane_boundsi (__c, 1);
return __a;
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vext_u8 (uint8x8_t __a, uint8x8_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 8);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint8x8_t)
{8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
@ -14963,7 +14964,7 @@ vext_u8 (uint8x8_t __a, uint8x8_t __b, __const int __c)
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vext_u16 (uint16x4_t __a, uint16x4_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 4);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a,
(uint16x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
@ -14975,7 +14976,7 @@ vext_u16 (uint16x4_t __a, uint16x4_t __b, __const int __c)
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vext_u32 (uint32x2_t __a, uint32x2_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 2);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint32x2_t) {2-__c, 3-__c});
#else
@ -14986,15 +14987,15 @@ vext_u32 (uint32x2_t __a, uint32x2_t __b, __const int __c)
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vext_u64 (uint64x1_t __a, uint64x1_t __b, __const int __c)
{
__AARCH64_LANE_CHECK (__a, __c);
/* The only possible index to the assembler instruction returns element 0. */
__builtin_aarch64_im_lane_boundsi (__c, 1);
return __a;
}
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vextq_f32 (float32x4_t __a, float32x4_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 4);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a,
(uint32x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
@ -15006,7 +15007,7 @@ vextq_f32 (float32x4_t __a, float32x4_t __b, __const int __c)
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
vextq_f64 (float64x2_t __a, float64x2_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 2);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
#else
@ -15017,7 +15018,7 @@ vextq_f64 (float64x2_t __a, float64x2_t __b, __const int __c)
__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
vextq_p8 (poly8x16_t __a, poly8x16_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 16);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint8x16_t)
{16-__c, 17-__c, 18-__c, 19-__c, 20-__c, 21-__c, 22-__c, 23-__c,
@ -15032,7 +15033,7 @@ vextq_p8 (poly8x16_t __a, poly8x16_t __b, __const int __c)
__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__))
vextq_p16 (poly16x8_t __a, poly16x8_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 8);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint16x8_t)
{8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
@ -15045,7 +15046,7 @@ vextq_p16 (poly16x8_t __a, poly16x8_t __b, __const int __c)
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vextq_s8 (int8x16_t __a, int8x16_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 16);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint8x16_t)
{16-__c, 17-__c, 18-__c, 19-__c, 20-__c, 21-__c, 22-__c, 23-__c,
@ -15060,7 +15061,7 @@ vextq_s8 (int8x16_t __a, int8x16_t __b, __const int __c)
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vextq_s16 (int16x8_t __a, int16x8_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 8);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint16x8_t)
{8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
@ -15073,7 +15074,7 @@ vextq_s16 (int16x8_t __a, int16x8_t __b, __const int __c)
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vextq_s32 (int32x4_t __a, int32x4_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 4);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a,
(uint32x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
@ -15085,7 +15086,7 @@ vextq_s32 (int32x4_t __a, int32x4_t __b, __const int __c)
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vextq_s64 (int64x2_t __a, int64x2_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 2);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
#else
@ -15096,7 +15097,7 @@ vextq_s64 (int64x2_t __a, int64x2_t __b, __const int __c)
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vextq_u8 (uint8x16_t __a, uint8x16_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 16);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint8x16_t)
{16-__c, 17-__c, 18-__c, 19-__c, 20-__c, 21-__c, 22-__c, 23-__c,
@ -15111,7 +15112,7 @@ vextq_u8 (uint8x16_t __a, uint8x16_t __b, __const int __c)
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vextq_u16 (uint16x8_t __a, uint16x8_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 8);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint16x8_t)
{8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
@ -15124,7 +15125,7 @@ vextq_u16 (uint16x8_t __a, uint16x8_t __b, __const int __c)
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vextq_u32 (uint32x4_t __a, uint32x4_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 4);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a,
(uint32x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
@ -15136,7 +15137,7 @@ vextq_u32 (uint32x4_t __a, uint32x4_t __b, __const int __c)
__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__))
vextq_u64 (uint64x2_t __a, uint64x2_t __b, __const int __c)
{
__builtin_aarch64_im_lane_boundsi (__c, 2);
__AARCH64_LANE_CHECK (__a, __c);
#ifdef __AARCH64EB__
return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
#else
@ -18965,7 +18966,7 @@ vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __lane)
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
vmulq_lane_f64 (float64x2_t __a, float64x1_t __b, const int __lane)
{
__builtin_aarch64_im_lane_boundsi (__lane, 1);
__AARCH64_LANE_CHECK (__a, __lane);
return __a * __b[0];
}

View File

@ -1,3 +1,7 @@
2014-12-09 Alan Lawrence <alan.lawrence@arm.com>
* gcc.target/aarch64/simd/vset_lane_s16_const_1.c: New test.
2014-12-09 Alan Lawrence <alan.lawrence@arm.com>
* gcc.target/aarch64/vld1_lane-o0.c: New test.

View File

@ -0,0 +1,15 @@
/* Test error message when passing a non-constant value in as a lane index. */
/* { dg-do assemble } */
/* { dg-options "-std=c99" } */
#include <arm_neon.h>
int
main (int argc, char **argv)
{
int16x4_t in = vcreate_s16 (0xdeadbeef00000000ULL);
/* { dg-error "must be a constant immediate" "" { target *-*-* } 0 } */
int16x4_t out = vset_lane_s16 (65535, in, argc);
return vget_lane_s16 (out, 0);
}