diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 0184dbd5553..3aba416fef0 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,29 @@ +2014-01-30 Bill Schmidt + + * gcc/config/rs6000/rs6000.c (rs6000_expand_vector_init): Use + gen_vsx_xxspltw_v4sf_direct instead of gen_vsx_xxspltw_v4sf; + remove element index adjustment for endian (now handled in vsx.md + and altivec.md). + (altivec_expand_vec_perm_const): Use + gen_altivec_vsplt[bhw]_direct instead of gen_altivec_vsplt[bhw]. + * gcc/config/rs6000/vsx.md (UNSPEC_VSX_XXSPLTW): New unspec. + (vsx_xxspltw_): Adjust element index for little endian. + * gcc/config/rs6000/altivec.md (altivec_vspltb): Divide into a + define_expand and a new define_insn *altivec_vspltb_internal; + adjust for -maltivec=be on a little endian target. + (altivec_vspltb_direct): New. + (altivec_vsplth): Divide into a define_expand and a new + define_insn *altivec_vsplth_internal; adjust for -maltivec=be on a + little endian target. + (altivec_vsplth_direct): New. + (altivec_vspltw): Divide into a define_expand and a new + define_insn *altivec_vspltw_internal; adjust for -maltivec=be on a + little endian target. + (altivec_vspltw_direct): New. + (altivec_vspltsf): Divide into a define_expand and a new + define_insn *altivec_vspltsf_internal; adjust for -maltivec=be on + a little endian target. + 2014-01-30 Richard Biener PR tree-optimization/59993 diff --git a/gcc/config/rs6000/altivec.md b/gcc/config/rs6000/altivec.md index 57e8adae950..aacd3fbc0d6 100644 --- a/gcc/config/rs6000/altivec.md +++ b/gcc/config/rs6000/altivec.md @@ -1600,44 +1600,187 @@ "vsumsws %0,%1,%2" [(set_attr "type" "veccomplex")]) -(define_insn "altivec_vspltb" +(define_expand "altivec_vspltb" + [(match_operand:V16QI 0 "register_operand" "") + (match_operand:V16QI 1 "register_operand" "") + (match_operand:QI 2 "u5bit_cint_operand" "")] + "TARGET_ALTIVEC" +{ + rtvec v; + rtx x; + + /* Special handling for LE with -maltivec=be. We have to reflect + the actual selected index for the splat in the RTL. */ + if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) + operands[2] = GEN_INT (15 - INTVAL (operands[2])); + + v = gen_rtvec (1, operands[2]); + x = gen_rtx_VEC_SELECT (QImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v)); + x = gen_rtx_VEC_DUPLICATE (V16QImode, x); + emit_insn (gen_rtx_SET (VOIDmode, operands[0], x)); + DONE; +}) + +(define_insn "*altivec_vspltb_internal" [(set (match_operand:V16QI 0 "register_operand" "=v") (vec_duplicate:V16QI (vec_select:QI (match_operand:V16QI 1 "register_operand" "v") (parallel [(match_operand:QI 2 "u5bit_cint_operand" "")]))))] "TARGET_ALTIVEC" +{ + /* For true LE, this adjusts the selected index. For LE with + -maltivec=be, this reverses what was done in the define_expand + because the instruction already has big-endian bias. */ + if (!BYTES_BIG_ENDIAN) + operands[2] = GEN_INT (15 - INTVAL (operands[2])); + + return "vspltb %0,%1,%2"; +} + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vspltb_direct" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:QI 2 "u5bit_cint_operand" "i")] + UNSPEC_VSPLT_DIRECT))] + "TARGET_ALTIVEC" "vspltb %0,%1,%2" [(set_attr "type" "vecperm")]) -(define_insn "altivec_vsplth" +(define_expand "altivec_vsplth" + [(match_operand:V8HI 0 "register_operand" "") + (match_operand:V8HI 1 "register_operand" "") + (match_operand:QI 2 "u5bit_cint_operand" "")] + "TARGET_ALTIVEC" +{ + rtvec v; + rtx x; + + /* Special handling for LE with -maltivec=be. We have to reflect + the actual selected index for the splat in the RTL. */ + if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) + operands[2] = GEN_INT (7 - INTVAL (operands[2])); + + v = gen_rtvec (1, operands[2]); + x = gen_rtx_VEC_SELECT (HImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v)); + x = gen_rtx_VEC_DUPLICATE (V8HImode, x); + emit_insn (gen_rtx_SET (VOIDmode, operands[0], x)); + DONE; +}) + +(define_insn "*altivec_vsplth_internal" [(set (match_operand:V8HI 0 "register_operand" "=v") (vec_duplicate:V8HI (vec_select:HI (match_operand:V8HI 1 "register_operand" "v") (parallel [(match_operand:QI 2 "u5bit_cint_operand" "")]))))] "TARGET_ALTIVEC" +{ + /* For true LE, this adjusts the selected index. For LE with + -maltivec=be, this reverses what was done in the define_expand + because the instruction already has big-endian bias. */ + if (!BYTES_BIG_ENDIAN) + operands[2] = GEN_INT (7 - INTVAL (operands[2])); + + return "vsplth %0,%1,%2"; +} + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vsplth_direct" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v") + (match_operand:QI 2 "u5bit_cint_operand" "i")] + UNSPEC_VSPLT_DIRECT))] + "TARGET_ALTIVEC" "vsplth %0,%1,%2" [(set_attr "type" "vecperm")]) -(define_insn "altivec_vspltw" +(define_expand "altivec_vspltw" + [(match_operand:V4SI 0 "register_operand" "") + (match_operand:V4SI 1 "register_operand" "") + (match_operand:QI 2 "u5bit_cint_operand" "")] + "TARGET_ALTIVEC" +{ + rtvec v; + rtx x; + + /* Special handling for LE with -maltivec=be. We have to reflect + the actual selected index for the splat in the RTL. */ + if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) + operands[2] = GEN_INT (3 - INTVAL (operands[2])); + + v = gen_rtvec (1, operands[2]); + x = gen_rtx_VEC_SELECT (SImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v)); + x = gen_rtx_VEC_DUPLICATE (V4SImode, x); + emit_insn (gen_rtx_SET (VOIDmode, operands[0], x)); + DONE; +}) + +(define_insn "*altivec_vspltw_internal" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_duplicate:V4SI (vec_select:SI (match_operand:V4SI 1 "register_operand" "v") (parallel [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))] "TARGET_ALTIVEC" +{ + /* For true LE, this adjusts the selected index. For LE with + -maltivec=be, this reverses what was done in the define_expand + because the instruction already has big-endian bias. */ + if (!BYTES_BIG_ENDIAN) + operands[2] = GEN_INT (3 - INTVAL (operands[2])); + + return "vspltw %0,%1,%2"; +} + [(set_attr "type" "vecperm")]) + +(define_insn "altivec_vspltw_direct" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") + (match_operand:QI 2 "u5bit_cint_operand" "i")] + UNSPEC_VSPLT_DIRECT))] + "TARGET_ALTIVEC" "vspltw %0,%1,%2" [(set_attr "type" "vecperm")]) -(define_insn "altivec_vspltsf" +(define_expand "altivec_vspltsf" + [(match_operand:V4SF 0 "register_operand" "") + (match_operand:V4SF 1 "register_operand" "") + (match_operand:QI 2 "u5bit_cint_operand" "")] + "TARGET_ALTIVEC" +{ + rtvec v; + rtx x; + + /* Special handling for LE with -maltivec=be. We have to reflect + the actual selected index for the splat in the RTL. */ + if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) + operands[2] = GEN_INT (3 - INTVAL (operands[2])); + + v = gen_rtvec (1, operands[2]); + x = gen_rtx_VEC_SELECT (SFmode, operands[1], gen_rtx_PARALLEL (VOIDmode, v)); + x = gen_rtx_VEC_DUPLICATE (V4SFmode, x); + emit_insn (gen_rtx_SET (VOIDmode, operands[0], x)); + DONE; +}) + +(define_insn "*altivec_vspltsf_internal" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_duplicate:V4SF (vec_select:SF (match_operand:V4SF 1 "register_operand" "v") (parallel [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))] "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" - "vspltw %0,%1,%2" +{ + /* For true LE, this adjusts the selected index. For LE with + -maltivec=be, this reverses what was done in the define_expand + because the instruction already has big-endian bias. */ + if (!BYTES_BIG_ENDIAN) + operands[2] = GEN_INT (3 - INTVAL (operands[2])); + + return "vspltw %0,%1,%2"; +} [(set_attr "type" "vecperm")]) (define_insn "altivec_vspltis" diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c index 8753e16030b..ec5f5bf1bea 100644 --- a/gcc/config/rs6000/rs6000.c +++ b/gcc/config/rs6000/rs6000.c @@ -5485,7 +5485,7 @@ rs6000_expand_vector_init (rtx target, rtx vals) : gen_vsx_xscvdpsp_scalar (freg, sreg)); emit_insn (cvt); - emit_insn (gen_vsx_xxspltw_v4sf (target, freg, const0_rtx)); + emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg, const0_rtx)); } else { @@ -5522,11 +5522,9 @@ rs6000_expand_vector_init (rtx target, rtx vals) gen_rtx_SET (VOIDmode, target, mem), x))); - field = (BYTES_BIG_ENDIAN ? const0_rtx - : GEN_INT (GET_MODE_NUNITS (mode) - 1)); x = gen_rtx_VEC_SELECT (inner_mode, target, gen_rtx_PARALLEL (VOIDmode, - gen_rtvec (1, field))); + gen_rtvec (1, const0_rtx))); emit_insn (gen_rtx_SET (VOIDmode, target, gen_rtx_VEC_DUPLICATE (mode, x))); return; @@ -29980,7 +29978,7 @@ altivec_expand_vec_perm_const (rtx operands[4]) { if (!BYTES_BIG_ENDIAN) elt = 15 - elt; - emit_insn (gen_altivec_vspltb (target, op0, GEN_INT (elt))); + emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt))); return true; } @@ -29993,8 +29991,8 @@ altivec_expand_vec_perm_const (rtx operands[4]) { int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2; x = gen_reg_rtx (V8HImode); - emit_insn (gen_altivec_vsplth (x, gen_lowpart (V8HImode, op0), - GEN_INT (field))); + emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0), + GEN_INT (field))); emit_move_insn (target, gen_lowpart (V16QImode, x)); return true; } @@ -30012,8 +30010,8 @@ altivec_expand_vec_perm_const (rtx operands[4]) { int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4; x = gen_reg_rtx (V4SImode); - emit_insn (gen_altivec_vspltw (x, gen_lowpart (V4SImode, op0), - GEN_INT (field))); + emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0), + GEN_INT (field))); emit_move_insn (target, gen_lowpart (V16QImode, x)); return true; } diff --git a/gcc/config/rs6000/vsx.md b/gcc/config/rs6000/vsx.md index 5a80fe4ccd1..c6d558ad615 100644 --- a/gcc/config/rs6000/vsx.md +++ b/gcc/config/rs6000/vsx.md @@ -213,6 +213,7 @@ UNSPEC_VSX_ROUND_I UNSPEC_VSX_ROUND_IC UNSPEC_VSX_SLDWI + UNSPEC_VSX_XXSPLTW ]) ;; VSX moves @@ -1751,6 +1752,20 @@ (parallel [(match_operand:QI 2 "u5bit_cint_operand" "i,i")]))))] "VECTOR_MEM_VSX_P (mode)" +{ + if (!BYTES_BIG_ENDIAN) + operands[2] = GEN_INT (3 - INTVAL (operands[2])); + + return "xxspltw %x0,%x1,%2"; +} + [(set_attr "type" "vecperm")]) + +(define_insn "vsx_xxspltw__direct" + [(set (match_operand:VSX_W 0 "vsx_register_operand" "=wf,?wa") + (unspec:VSX_W [(match_operand:VSX_W 1 "vsx_register_operand" "wf,wa") + (match_operand:QI 2 "u5bit_cint_operand" "i,i")] + UNSPEC_VSX_XXSPLTW))] + "VECTOR_MEM_VSX_P (mode)" "xxspltw %x0,%x1,%2" [(set_attr "type" "vecperm")]) diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 6c22615181a..d8d8c076bda 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,12 @@ +2014-01-30 Bill Schmidt + + * gcc.dg/vmx/splat.c: New. + * gcc.dg/vmx/splat-vsx.c: New. + * gcc.dg/vmx/splat-be-order.c: New. + * gcc.dg/vmx/splat-vsx-be-order.c: New. + * gcc.dg/vmx/eg-5.c: Remove special casing for little endian. + * gcc.dg/vmx/sn7153.c: Add special casing for little endian. + 2014-01-30 Richard Biener PR tree-optimization/59993 diff --git a/gcc/testsuite/gcc.dg/vmx/eg-5.c b/gcc/testsuite/gcc.dg/vmx/eg-5.c index eb43e846b79..eb4b4e02836 100644 --- a/gcc/testsuite/gcc.dg/vmx/eg-5.c +++ b/gcc/testsuite/gcc.dg/vmx/eg-5.c @@ -6,19 +6,10 @@ matvecmul4 (vector float c0, vector float c1, vector float c2, { /* Set result to a vector of f32 0's */ vector float result = ((vector float){0.,0.,0.,0.}); - -#ifdef __LITTLE_ENDIAN__ - result = vec_madd (c0, vec_splat (v, 3), result); - result = vec_madd (c1, vec_splat (v, 2), result); - result = vec_madd (c2, vec_splat (v, 1), result); - result = vec_madd (c3, vec_splat (v, 0), result); -#else result = vec_madd (c0, vec_splat (v, 0), result); result = vec_madd (c1, vec_splat (v, 1), result); result = vec_madd (c2, vec_splat (v, 2), result); result = vec_madd (c3, vec_splat (v, 3), result); -#endif - return result; } diff --git a/gcc/testsuite/gcc.dg/vmx/sn7153.c b/gcc/testsuite/gcc.dg/vmx/sn7153.c index a498a862006..2381a891cd3 100644 --- a/gcc/testsuite/gcc.dg/vmx/sn7153.c +++ b/gcc/testsuite/gcc.dg/vmx/sn7153.c @@ -34,7 +34,11 @@ main() void validate_sat() { +#ifdef __LITTLE_ENDIAN__ + if (vec_any_ne(vec_splat(vec_mfvscr(), 0), ((vector unsigned short){1,1,1,1,1,1,1,1}))) +#else if (vec_any_ne(vec_splat(vec_mfvscr(), 7), ((vector unsigned short){1,1,1,1,1,1,1,1}))) +#endif { union {vector unsigned short v; unsigned short s[8];} u; u.v = vec_mfvscr(); diff --git a/gcc/testsuite/gcc.dg/vmx/splat-be-order.c b/gcc/testsuite/gcc.dg/vmx/splat-be-order.c new file mode 100644 index 00000000000..e265ae4be20 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vmx/splat-be-order.c @@ -0,0 +1,59 @@ +/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */ + +#include "harness.h" + +static void test() +{ + /* Input vectors. */ + vector unsigned char vuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; + vector signed char vsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7}; + vector unsigned short vus = {0,1,2,3,4,5,6,7}; + vector signed short vss = {-4,-3,-2,-1,0,1,2,3}; + vector unsigned int vui = {0,1,2,3}; + vector signed int vsi = {-2,-1,0,1}; + vector float vf = {-2.0,-1.0,0.0,1.0}; + + /* Result vectors. */ + vector unsigned char vucr; + vector signed char vscr; + vector unsigned short vusr; + vector signed short vssr; + vector unsigned int vuir; + vector signed int vsir; + vector float vfr; + + /* Expected result vectors. */ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + vector unsigned char vucer = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14}; + vector signed char vscer = {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; + vector unsigned short vuser = {0,0,0,0,0,0,0,0}; + vector signed short vsser = {3,3,3,3,3,3,3,3}; + vector unsigned int vuier = {1,1,1,1}; + vector signed int vsier = {-2,-2,-2,-2}; + vector float vfer = {0.0,0.0,0.0,0.0}; +#else + vector unsigned char vucer = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}; + vector signed char vscer = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + vector unsigned short vuser = {7,7,7,7,7,7,7,7}; + vector signed short vsser = {-4,-4,-4,-4,-4,-4,-4,-4}; + vector unsigned int vuier = {2,2,2,2}; + vector signed int vsier = {1,1,1,1}; + vector float vfer = {-1.0,-1.0,-1.0,-1.0}; +#endif + + vucr = vec_splat (vuc, 1); + vscr = vec_splat (vsc, 8); + vusr = vec_splat (vus, 7); + vssr = vec_splat (vss, 0); + vuir = vec_splat (vui, 2); + vsir = vec_splat (vsi, 3); + vfr = vec_splat (vf, 1); + + check (vec_all_eq (vucr, vucer), "vuc"); + check (vec_all_eq (vscr, vscer), "vsc"); + check (vec_all_eq (vusr, vuser), "vus"); + check (vec_all_eq (vssr, vsser), "vss"); + check (vec_all_eq (vuir, vuier), "vui"); + check (vec_all_eq (vsir, vsier), "vsi"); + check (vec_all_eq (vfr, vfer ), "vf"); +} diff --git a/gcc/testsuite/gcc.dg/vmx/splat-vsx-be-order.c b/gcc/testsuite/gcc.dg/vmx/splat-vsx-be-order.c new file mode 100644 index 00000000000..cd389bd0f66 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vmx/splat-vsx-be-order.c @@ -0,0 +1,37 @@ +/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */ +/* { dg-require-effective-target powerpc_vsx_ok } */ +/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */ + +#include "harness.h" + +static void test() +{ + /* Input vectors. */ + vector unsigned int vui = {0,1,2,3}; + vector signed int vsi = {-2,-1,0,1}; + vector float vf = {-2.0,-1.0,0.0,1.0}; + + /* Result vectors. */ + vector unsigned int vuir; + vector signed int vsir; + vector float vfr; + + /* Expected result vectors. */ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + vector unsigned int vuier = {1,1,1,1}; + vector signed int vsier = {-2,-2,-2,-2}; + vector float vfer = {0.0,0.0,0.0,0.0}; +#else + vector unsigned int vuier = {2,2,2,2}; + vector signed int vsier = {1,1,1,1}; + vector float vfer = {-1.0,-1.0,-1.0,-1.0}; +#endif + + vuir = vec_splat (vui, 2); + vsir = vec_splat (vsi, 3); + vfr = vec_splat (vf, 1); + + check (vec_all_eq (vuir, vuier), "vui"); + check (vec_all_eq (vsir, vsier), "vsi"); + check (vec_all_eq (vfr, vfer ), "vf"); +} diff --git a/gcc/testsuite/gcc.dg/vmx/splat-vsx.c b/gcc/testsuite/gcc.dg/vmx/splat-vsx.c new file mode 100644 index 00000000000..5a6e7dfe46c --- /dev/null +++ b/gcc/testsuite/gcc.dg/vmx/splat-vsx.c @@ -0,0 +1,31 @@ +/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */ +/* { dg-require-effective-target powerpc_vsx_ok } */ +/* { dg-options "-maltivec -mabi=altivec -std=gnu99 -mvsx" } */ + +#include "harness.h" + +static void test() +{ + /* Input vectors. */ + vector unsigned int vui = {0,1,2,3}; + vector signed int vsi = {-2,-1,0,1}; + vector float vf = {-2.0,-1.0,0.0,1.0}; + + /* Result vectors. */ + vector unsigned int vuir; + vector signed int vsir; + vector float vfr; + + /* Expected result vectors. */ + vector unsigned int vuier = {2,2,2,2}; + vector signed int vsier = {1,1,1,1}; + vector float vfer = {-1.0,-1.0,-1.0,-1.0}; + + vuir = vec_splat (vui, 2); + vsir = vec_splat (vsi, 3); + vfr = vec_splat (vf, 1); + + check (vec_all_eq (vuir, vuier), "vui"); + check (vec_all_eq (vsir, vsier), "vsi"); + check (vec_all_eq (vfr, vfer ), "vf"); +} diff --git a/gcc/testsuite/gcc.dg/vmx/splat.c b/gcc/testsuite/gcc.dg/vmx/splat.c new file mode 100644 index 00000000000..e45974ac910 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vmx/splat.c @@ -0,0 +1,47 @@ +#include "harness.h" + +static void test() +{ + /* Input vectors. */ + vector unsigned char vuc = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; + vector signed char vsc = {-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7}; + vector unsigned short vus = {0,1,2,3,4,5,6,7}; + vector signed short vss = {-4,-3,-2,-1,0,1,2,3}; + vector unsigned int vui = {0,1,2,3}; + vector signed int vsi = {-2,-1,0,1}; + vector float vf = {-2.0,-1.0,0.0,1.0}; + + /* Result vectors. */ + vector unsigned char vucr; + vector signed char vscr; + vector unsigned short vusr; + vector signed short vssr; + vector unsigned int vuir; + vector signed int vsir; + vector float vfr; + + /* Expected result vectors. */ + vector unsigned char vucer = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}; + vector signed char vscer = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + vector unsigned short vuser = {7,7,7,7,7,7,7,7}; + vector signed short vsser = {-4,-4,-4,-4,-4,-4,-4,-4}; + vector unsigned int vuier = {2,2,2,2}; + vector signed int vsier = {1,1,1,1}; + vector float vfer = {-1.0,-1.0,-1.0,-1.0}; + + vucr = vec_splat (vuc, 1); + vscr = vec_splat (vsc, 8); + vusr = vec_splat (vus, 7); + vssr = vec_splat (vss, 0); + vuir = vec_splat (vui, 2); + vsir = vec_splat (vsi, 3); + vfr = vec_splat (vf, 1); + + check (vec_all_eq (vucr, vucer), "vuc"); + check (vec_all_eq (vscr, vscer), "vsc"); + check (vec_all_eq (vusr, vuser), "vus"); + check (vec_all_eq (vssr, vsser), "vss"); + check (vec_all_eq (vuir, vuier), "vui"); + check (vec_all_eq (vsir, vsier), "vsi"); + check (vec_all_eq (vfr, vfer ), "vf"); +}