Segregate sparc's handling of vector vs. non-vector modes.

gcc/
	* config/sparc/sparc-modes.def: Add single entry vector modes for
	DImode and SImode.
	* config/sparc/sparc/sparc.md (V32, V32I, V64, V64I, V64N8): Delete
	mode iterators.
	(mov<V32:mode>): Revert back to plain SFmode pattern.
	(*movsf_insn): Likewise.
	(mov<V64:mode>): Revert back to plain DFmode pattern.
	(*movdf_insn_sp32): Likewise.
	(*movdf_insn_sp32_v9): Likewise.
	(*movdf_insn_sp64): Likewise.
	(V64 mode splitters) Likewise.
	(addsi3): Remove VIS alternatives.
	(subsi3): Likewise.
	(and<V64I:mode>3): Revert to DImode only pattern.
	(and<V64I:mode>3_sp32): Likewise.
	(*and<V64I:mode>3_sp64): Likewise.
	(and<V32I:mode>3): Likewise.
	(*and_not_<V64I:mode>_sp32): Likewise.
	(*and_not_<V64I:mode>_sp64): Likewise.
	(*and_not_<V32I:mode>): Likewise.
	(ior<V64I:mode>3): Likewise.
	(*ior<V64I:mode>3_sp32): Likewise.
	(*ior<V64I:mode>3_sp64): Likewise.
	(ior<V32I:mode>3): Likewise.
	(*or_not_<V64I:mode>_sp32): Likewise.
	(*or_not_<V64I:mode>_sp64): Likewise.
	(*or_not_<V32I:mode>): Likewise.
	(xor<V64I:mode>3): Likewise.
	(*xor<V64I:mode>3_sp32): Likewise.
	(*xor<V64I:mode>3_sp64): Likewise.
	(xor<V32I:mode>3): Likewise.
	(V64I mode splitters): Likewise.
	(*xor_not_<V64I:mode>_sp32): Likewise.
	(*xor_not_<V64I:mode>_sp64): Likewise.
	(*xor_not_<V32I:mode>): Likewise.
	(one_cmpl<V64I:mode>2): Likewise.
	(*one_cmpl<V64I:mode>2_sp32): Likewise.
	(*one_cmpl<V64I:mode>2_sp64): Likewise.
	(one_cmpl<V32I:mode>2): Likewise.
	(VM32, VM64, VMALL): New mode iterators.
	(vbits, vconstr, vfptype): New mode attributes.
	(mov<VMALL:mode>): New expander.
	(*mov<VM32:mode>_insn): New insn.
	(*mov<VM64:mode>_insn_sp64): New insn.
	(*mov<VM64:mode>_insn_sp32): New insn, and associated splitter
	specifically for the register to memory case.
	(vec_init<mode>): New expander.
	(VADDSUB): New mode iterator.
	(<plusminus_insn>v2si3, <plusminus_insn>v2hi3): Remove and replace
	with...
	(<plusminus_insn><mode>3): New consolidated pattern.
	(VL): New mode iterator for logical operations.
	(vlsuf): New more attribute.
	(vlop): New code iterator.
	(vlinsn, vlninsn): New code attributes.
	(<code><mode>3): New insn to non-negated vector logical ops.
	(*not_<code><mode>3): Likewise for negated variants.
	(*nand<mode>_vis): New insn.
	(vlnotop): New code iterator.
	(*<code>_not1<mode>_vis, *<code>_not2<mode>_vis): New insns.
	(one_cmpl<mode>2): New insn.
	(faligndata<V64I:mode>_vis): Rewrite to use VM64 iterator.
	(bshuffle<VM64:mode>_vis): Likewise.
	(v<vis3_shift_patname><mode>3): Use GCM mode iterator.
	(fp<plusminus_insn>64_vis): Use V1DI mode.
	(VASS mode iterator): Use V1SI not SI mode.
	* config/sparc/sparc.c (sparc_vis_init_builtins): Account for
	single-entry vector mode changes.
	(sparc_expand_builtin): Likewise.
	(sparc_expand_vector_init): New function.
	* config/sparc/sparc-protos.h (sparc_expand_vector_init): Declare.

gcc/testsuite/

	* gcc.target/sparc/fand.c: Remove __LP64__ ifdefs and expect
	all operations to emit VIS instructions.
	* gcc.target/sparc/fandnot.c: Likewise.
	* gcc.target/sparc/fnot.c: Likewise.
	* gcc.target/sparc/for.c: Likewise.
	* gcc.target/sparc/fornot.c: Likewise.
	* gcc.target/sparc/fxnor.c: Likewise.
	* gcc.target/sparc/fxor.c: Likewise.
	* gcc.target/sparc/combined-1.c: Revert change to use -O2, no longer
	needed.

From-SVN: r180112
This commit is contained in:
David S. Miller 2011-10-17 22:50:29 +00:00 committed by David S. Miller
parent b2c4e3157f
commit e00560c290
14 changed files with 529 additions and 495 deletions

View File

@ -1,3 +1,77 @@
2011-10-17 David S. Miller <davem@davemloft.net>
* config/sparc/sparc-modes.def: Add single entry vector modes for
DImode and SImode.
* config/sparc/sparc/sparc.md (V32, V32I, V64, V64I, V64N8): Delete
mode iterators.
(mov<V32:mode>): Revert back to plain SFmode pattern.
(*movsf_insn): Likewise.
(mov<V64:mode>): Revert back to plain DFmode pattern.
(*movdf_insn_sp32): Likewise.
(*movdf_insn_sp32_v9): Likewise.
(*movdf_insn_sp64): Likewise.
(V64 mode splitters) Likewise.
(addsi3): Remove VIS alternatives.
(subsi3): Likewise.
(and<V64I:mode>3): Revert to DImode only pattern.
(and<V64I:mode>3_sp32): Likewise.
(*and<V64I:mode>3_sp64): Likewise.
(and<V32I:mode>3): Likewise.
(*and_not_<V64I:mode>_sp32): Likewise.
(*and_not_<V64I:mode>_sp64): Likewise.
(*and_not_<V32I:mode>): Likewise.
(ior<V64I:mode>3): Likewise.
(*ior<V64I:mode>3_sp32): Likewise.
(*ior<V64I:mode>3_sp64): Likewise.
(ior<V32I:mode>3): Likewise.
(*or_not_<V64I:mode>_sp32): Likewise.
(*or_not_<V64I:mode>_sp64): Likewise.
(*or_not_<V32I:mode>): Likewise.
(xor<V64I:mode>3): Likewise.
(*xor<V64I:mode>3_sp32): Likewise.
(*xor<V64I:mode>3_sp64): Likewise.
(xor<V32I:mode>3): Likewise.
(V64I mode splitters): Likewise.
(*xor_not_<V64I:mode>_sp32): Likewise.
(*xor_not_<V64I:mode>_sp64): Likewise.
(*xor_not_<V32I:mode>): Likewise.
(one_cmpl<V64I:mode>2): Likewise.
(*one_cmpl<V64I:mode>2_sp32): Likewise.
(*one_cmpl<V64I:mode>2_sp64): Likewise.
(one_cmpl<V32I:mode>2): Likewise.
(VM32, VM64, VMALL): New mode iterators.
(vbits, vconstr, vfptype): New mode attributes.
(mov<VMALL:mode>): New expander.
(*mov<VM32:mode>_insn): New insn.
(*mov<VM64:mode>_insn_sp64): New insn.
(*mov<VM64:mode>_insn_sp32): New insn, and associated splitter
specifically for the register to memory case.
(vec_init<mode>): New expander.
(VADDSUB): New mode iterator.
(<plusminus_insn>v2si3, <plusminus_insn>v2hi3): Remove and replace
with...
(<plusminus_insn><mode>3): New consolidated pattern.
(VL): New mode iterator for logical operations.
(vlsuf): New more attribute.
(vlop): New code iterator.
(vlinsn, vlninsn): New code attributes.
(<code><mode>3): New insn to non-negated vector logical ops.
(*not_<code><mode>3): Likewise for negated variants.
(*nand<mode>_vis): New insn.
(vlnotop): New code iterator.
(*<code>_not1<mode>_vis, *<code>_not2<mode>_vis): New insns.
(one_cmpl<mode>2): New insn.
(faligndata<V64I:mode>_vis): Rewrite to use VM64 iterator.
(bshuffle<VM64:mode>_vis): Likewise.
(v<vis3_shift_patname><mode>3): Use GCM mode iterator.
(fp<plusminus_insn>64_vis): Use V1DI mode.
(VASS mode iterator): Use V1SI not SI mode.
* config/sparc/sparc.c (sparc_vis_init_builtins): Account for
single-entry vector mode changes.
(sparc_expand_builtin): Likewise.
(sparc_expand_vector_init): New function.
* config/sparc/sparc-protos.h (sparc_expand_vector_init): Declare.
2011-10-17 Kai Tietz <ktietz@redhat.com>
* fold-const.c (simple_operand_p_2): New function.

View File

@ -45,4 +45,6 @@ CC_MODE (CCFPE);
/* Vector modes. */
VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI */
VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */
VECTOR_MODES (INT, 4); /* V4QI V2HI */
VECTOR_MODES (INT, 4); /* V4QI V2HI */
VECTOR_MODE (INT, DI, 1); /* V1DI */
VECTOR_MODE (INT, SI, 1); /* V1SI */

View File

@ -106,6 +106,7 @@ extern int sparc_check_64 (rtx, rtx);
extern rtx gen_df_reg (rtx, int);
extern void sparc_expand_compare_and_swap_12 (rtx, rtx, rtx, rtx);
extern const char *output_v8plus_mult (rtx, rtx *, const char *);
extern void sparc_expand_vector_init (rtx, rtx);
#endif /* RTX_CODE */
#endif /* __SPARC_PROTOS_H__ */

View File

@ -9403,7 +9403,7 @@ sparc_vis_init_builtins (void)
v8qi_ftype_v8qi_v8qi);
def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
v2si_ftype_v2si_v2si);
def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatav1di_vis,
di_ftype_di_di);
def_builtin ("__builtin_vis_write_gsr", CODE_FOR_wrgsr_vis,
@ -9539,7 +9539,7 @@ sparc_vis_init_builtins (void)
v2hi_ftype_v2hi_v2hi);
def_builtin_const ("__builtin_vis_fpadd32", CODE_FOR_addv2si3,
v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addsi3,
def_builtin_const ("__builtin_vis_fpadd32s", CODE_FOR_addv1si3,
v1si_ftype_v1si_v1si);
def_builtin_const ("__builtin_vis_fpsub16", CODE_FOR_subv4hi3,
v4hi_ftype_v4hi_v4hi);
@ -9547,7 +9547,7 @@ sparc_vis_init_builtins (void)
v2hi_ftype_v2hi_v2hi);
def_builtin_const ("__builtin_vis_fpsub32", CODE_FOR_subv2si3,
v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subsi3,
def_builtin_const ("__builtin_vis_fpsub32s", CODE_FOR_subv1si3,
v1si_ftype_v1si_v1si);
/* Three-dimensional array addressing. */
@ -9585,7 +9585,7 @@ sparc_vis_init_builtins (void)
v8qi_ftype_v8qi_v8qi);
def_builtin ("__builtin_vis_bshufflev2si", CODE_FOR_bshufflev2si_vis,
v2si_ftype_v2si_v2si);
def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshuffledi_vis,
def_builtin ("__builtin_vis_bshuffledi", CODE_FOR_bshufflev1di_vis,
di_ftype_di_di);
}
@ -9654,11 +9654,11 @@ sparc_vis_init_builtins (void)
v2hi_ftype_v2hi_v2hi);
def_builtin_const ("__builtin_vis_fpadds32", CODE_FOR_ssaddv2si3,
v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddsi3,
def_builtin_const ("__builtin_vis_fpadds32s", CODE_FOR_ssaddv1si3,
v1si_ftype_v1si_v1si);
def_builtin_const ("__builtin_vis_fpsubs32", CODE_FOR_sssubv2si3,
v2si_ftype_v2si_v2si);
def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubsi3,
def_builtin_const ("__builtin_vis_fpsubs32s", CODE_FOR_sssubv1si3,
v1si_ftype_v1si_v1si);
if (TARGET_ARCH64)
@ -9748,6 +9748,13 @@ sparc_expand_builtin (tree exp, rtx target,
insn_op = &insn_data[icode].operand[idx];
op[arg_count] = expand_normal (arg);
if (insn_op->mode == V1DImode
&& GET_MODE (op[arg_count]) == DImode)
op[arg_count] = gen_lowpart (V1DImode, op[arg_count]);
else if (insn_op->mode == V1SImode
&& GET_MODE (op[arg_count]) == SImode)
op[arg_count] = gen_lowpart (V1SImode, op[arg_count]);
if (! (*insn_data[icode].operand[idx].predicate) (op[arg_count],
insn_op->mode))
op[arg_count] = copy_to_mode_reg (insn_op->mode, op[arg_count]);
@ -11060,4 +11067,34 @@ output_v8plus_mult (rtx insn, rtx *operands, const char *name)
}
}
void
sparc_expand_vector_init (rtx target, rtx vals)
{
enum machine_mode mode = GET_MODE (target);
enum machine_mode inner_mode = GET_MODE_INNER (mode);
int n_elts = GET_MODE_NUNITS (mode);
int i, n_var = 0;
rtx mem;
for (i = 0; i < n_elts; i++)
{
rtx x = XVECEXP (vals, 0, i);
if (!CONSTANT_P (x))
n_var++;
}
if (n_var == 0)
{
emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
return;
}
mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
for (i = 0; i < n_elts; i++)
emit_move_insn (adjust_address_nv (mem, inner_mode,
i * GET_MODE_SIZE (inner_mode)),
XVECEXP (vals, 0, i));
emit_move_insn (target, mem);
}
#include "gt-sparc.h"

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,16 @@
2011-10-17 David S. Miller <davem@davemloft.net>
* gcc.target/sparc/fand.c: Remove __LP64__ ifdefs and expect
all operations to emit VIS instructions.
* gcc.target/sparc/fandnot.c: Likewise.
* gcc.target/sparc/fnot.c: Likewise.
* gcc.target/sparc/for.c: Likewise.
* gcc.target/sparc/fornot.c: Likewise.
* gcc.target/sparc/fxnor.c: Likewise.
* gcc.target/sparc/fxor.c: Likewise.
* gcc.target/sparc/combined-1.c: Revert change to use -O2, no longer
needed.
2011-10-17 Jakub Jelinek <jakub@redhat.com>
* gcc.c-torture/execute/vshuf-v16hi.c: New test.

View File

@ -1,5 +1,5 @@
/* { dg-do compile } */
/* { dg-options "-O2 -mcpu=ultrasparc -mvis" } */
/* { dg-options "-O -mcpu=ultrasparc -mvis" } */
typedef short vec16 __attribute__((vector_size(8)));
typedef int vec32 __attribute__((vector_size(8)));

View File

@ -12,13 +12,10 @@ vec8 fun8(void)
return foo1_8 () & foo2_8 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec8 fun8_2(vec8 a, vec8 b)
{
return a & b;
}
#endif
extern vec16 foo1_16(void);
extern vec16 foo2_16(void);
@ -28,13 +25,10 @@ vec16 fun16(void)
return foo1_16 () & foo2_16 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec16 fun16_2(vec16 a, vec16 b)
{
return a & b;
}
#endif
extern vec32 foo1_32(void);
extern vec32 foo2_32(void);
@ -44,12 +38,9 @@ vec32 fun32(void)
return foo1_32 () & foo2_32 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec32 fun32_2(vec32 a, vec32 b)
{
return a & b;
}
#endif
/* { dg-final { scan-assembler-times "fand\t%" 3 } } */
/* { dg-final { scan-assembler-times "fand\t%" 6 } } */

View File

@ -12,13 +12,10 @@ vec8 fun8(void)
return ~foo1_8 () & foo2_8 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec8 fun8_2(vec8 a, vec8 b)
{
return ~a & b;
}
#endif
extern vec16 foo1_16(void);
extern vec16 foo2_16(void);
@ -28,13 +25,10 @@ vec16 fun16(void)
return ~foo1_16 () & foo2_16 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec16 fun16_2(vec16 a, vec16 b)
{
return ~a & b;
}
#endif
extern vec32 foo1_32(void);
extern vec32 foo2_32(void);
@ -44,13 +38,10 @@ vec32 fun32(void)
return ~foo1_32 () & foo2_32 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec32 fun32_2(vec32 a, vec32 b)
{
return ~a & b;
}
#endif
/* This should be transformed into ~b & a. */
@ -59,38 +50,29 @@ vec8 fun8b(void)
return foo1_8 () & ~foo2_8 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec8 fun8_2b(vec8 a, vec8 b)
{
return a & ~b;
}
#endif
vec16 fun16b(void)
{
return foo1_16 () & ~foo2_16 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec16 fun16_2b(vec16 a, vec16 b)
{
return a & ~b;
}
#endif
vec32 fun32b(void)
{
return foo1_32 () & ~foo2_32 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec32 fun32_2b(vec32 a, vec32 b)
{
return a & ~b;
}
#endif
/* { dg-final { scan-assembler-times "fandnot1\t%" 6 } } */
/* { dg-final { scan-assembler-times "fandnot1\t%" 12 } } */

View File

@ -12,13 +12,10 @@ vec8 fun8(void)
return ~foo1_8 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec8 fun8_2(vec8 a)
{
foo2_8 (~a);
}
#endif
extern vec16 foo1_16(void);
extern void foo2_16(vec16);
@ -29,13 +26,10 @@ vec16 fun16(void)
return ~foo1_16 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec16 fun16_2(vec16 a)
{
foo2_16 (~a);
}
#endif
extern vec32 foo1_32(void);
extern void foo2_32(vec32);
@ -45,12 +39,9 @@ vec32 fun32(void)
return ~foo1_32 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec32 fun32_2(vec32 a)
{
foo2_32 (~a);
}
#endif
/* { dg-final { scan-assembler-times "fnot1\t%" 3 } } */
/* { dg-final { scan-assembler-times "fnot1\t%" 6 } } */

View File

@ -12,13 +12,10 @@ vec8 fun8(void)
return foo1_8 () | foo2_8 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec8 fun8_2(vec8 a, vec8 b)
{
return a | b;
}
#endif
extern vec16 foo1_16(void);
extern vec16 foo2_16(void);
@ -28,13 +25,10 @@ vec16 fun16(void)
return foo1_16 () | foo2_16 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec16 fun16_2(vec16 a, vec16 b)
{
return a | b;
}
#endif
extern vec32 foo1_32(void);
extern vec32 foo2_32(void);
@ -44,12 +38,9 @@ vec32 fun32(void)
return foo1_32 () | foo2_32 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec32 fun32_2(vec32 a, vec32 b)
{
return a | b;
}
#endif
/* { dg-final { scan-assembler-times "for\t%" 3 } } */
/* { dg-final { scan-assembler-times "for\t%" 6 } } */

View File

@ -12,13 +12,10 @@ vec8 fun8(void)
return ~foo1_8 () | foo2_8 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec8 fun8_2(vec8 a, vec8 b)
{
return ~a | b;
}
#endif
extern vec16 foo1_16(void);
extern vec16 foo2_16(void);
@ -28,13 +25,10 @@ vec16 fun16(void)
return ~foo1_16 () | foo2_16 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec16 fun16_2(vec16 a, vec16 b)
{
return ~a | b;
}
#endif
extern vec32 foo1_32(void);
extern vec32 foo2_32(void);
@ -44,14 +38,10 @@ vec32 fun32(void)
return ~foo1_32 () | foo2_32 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec32 fun32_2(vec32 a, vec32 b)
{
return ~a | b;
}
#endif
/* This should be transformed into ~b | a. */
vec8 fun8b(void)
@ -59,38 +49,29 @@ vec8 fun8b(void)
return foo1_8 () | ~foo2_8 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec8 fun8_2b(vec8 a, vec8 b)
{
return a | ~b;
}
#endif
vec16 fun16b(void)
{
return foo1_16 () | ~foo2_16 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec16 fun16_2b(vec16 a, vec16 b)
{
return a | ~b;
}
#endif
vec32 fun32b(void)
{
return foo1_32 () | ~foo2_32 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec32 fun32_2b(vec32 a, vec32 b)
{
return a | ~b;
}
#endif
/* { dg-final { scan-assembler-times "fornot1\t%" 6 } } */
/* { dg-final { scan-assembler-times "fornot1\t%" 12 } } */

View File

@ -12,13 +12,10 @@ vec8 fun8(void)
return ~(foo1_8 () ^ foo2_8 ());
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec8 fun8_2(vec8 a, vec8 b)
{
return ~(a ^ b);
}
#endif
extern vec16 foo1_16(void);
extern vec16 foo2_16(void);
@ -28,13 +25,10 @@ vec16 fun16(void)
return ~(foo1_16 () ^ foo2_16 ());
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec16 fun16_2(vec16 a, vec16 b)
{
return ~(a ^ b);
}
#endif
extern vec32 foo1_32(void);
extern vec32 foo2_32(void);
@ -44,13 +38,10 @@ vec32 fun32(void)
return ~(foo1_32 () ^ foo2_32 ());
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec32 fun32_2(vec32 a, vec32 b)
{
return ~(a ^ b);
}
#endif
/* This should be transformed into ~(b ^ a). */
@ -59,38 +50,29 @@ vec8 fun8b(void)
return foo1_8 () ^ ~foo2_8 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec8 fun8_2b(vec8 a, vec8 b)
{
return a ^ ~b;
}
#endif
vec16 fun16b(void)
{
return foo1_16 () ^ ~foo2_16 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec16 fun16_2b(vec16 a, vec16 b)
{
return a ^ ~b;
}
#endif
vec32 fun32b(void)
{
return foo1_32 () ^ ~foo2_32 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec32 fun32_2b(vec32 a, vec32 b)
{
return a ^ ~b;
}
#endif
/* { dg-final { scan-assembler-times "fxnor\t%" 6 } } */
/* { dg-final { scan-assembler-times "fxnor\t%" 12 } } */

View File

@ -12,13 +12,10 @@ vec8 fun8(void)
return foo1_8 () ^ foo2_8 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec8 fun8_2(vec8 a, vec8 b)
{
return a ^ b;
}
#endif
extern vec16 foo1_16(void);
extern vec16 foo2_16(void);
@ -28,13 +25,10 @@ vec16 fun16(void)
return foo1_16 () ^ foo2_16 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec16 fun16_2(vec16 a, vec16 b)
{
return a ^ b;
}
#endif
extern vec32 foo1_32(void);
extern vec32 foo2_32(void);
@ -44,12 +38,9 @@ vec32 fun32(void)
return foo1_32 () ^ foo2_32 ();
}
#ifndef __LP64__
/* Test the 32-bit splitter. */
vec32 fun32_2(vec32 a, vec32 b)
{
return a ^ b;
}
#endif
/* { dg-final { scan-assembler-times "fxor\t%" 3 } } */
/* { dg-final { scan-assembler-times "fxor\t%" 6 } } */