vector.md (mov<mode>): Emit permuted move sequences for LE VSX loads and stores at expand time.

gcc:

2013-10-07  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>

	* config/rs6000/vector.md (mov<mode>): Emit permuted move
	sequences for LE VSX loads and stores at expand time.
	* config/rs6000/rs6000-protos.h (rs6000_emit_le_vsx_move): New
	prototype.
	* config/rs6000/rs6000.c (rs6000_const_vec): New.
	(rs6000_gen_le_vsx_permute): New.
	(rs6000_gen_le_vsx_load): New.
	(rs6000_gen_le_vsx_store): New.
	(rs6000_gen_le_vsx_move): New.
	* config/rs6000/vsx.md (*vsx_le_perm_load_v2di): New.
	(*vsx_le_perm_load_v4si): New.
	(*vsx_le_perm_load_v8hi): New.
	(*vsx_le_perm_load_v16qi): New.
	(*vsx_le_perm_store_v2di): New.
	(*vsx_le_perm_store_v4si): New.
	(*vsx_le_perm_store_v8hi): New.
	(*vsx_le_perm_store_v16qi): New.
	(*vsx_xxpermdi2_le_<mode>): New.
	(*vsx_xxpermdi4_le_<mode>): New.
	(*vsx_xxpermdi8_le_V8HI): New.
	(*vsx_xxpermdi16_le_V16QI): New.
	(*vsx_lxvd2x2_le_<mode>): New.
	(*vsx_lxvd2x4_le_<mode>): New.
	(*vsx_lxvd2x8_le_V8HI): New.
	(*vsx_lxvd2x16_le_V16QI): New.
	(*vsx_stxvd2x2_le_<mode>): New.
	(*vsx_stxvd2x4_le_<mode>): New.
	(*vsx_stxvd2x8_le_V8HI): New.
	(*vsx_stxvd2x16_le_V16QI): New.

gcc/testsuite:

2013-10-07  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>

	* gcc.target/powerpc/pr43154.c: Skip for ppc64 little endian.
	* gcc.target/powerpc/fusion.c: Likewise.

From-SVN: r203246
This commit is contained in:
Bill Schmidt 2013-10-07 12:56:08 +00:00 committed by William Schmidt
parent 9520e1eb27
commit 0cf686948b
8 changed files with 530 additions and 1 deletions

View File

@ -1,3 +1,35 @@
2013-10-07 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
* config/rs6000/vector.md (mov<mode>): Emit permuted move
sequences for LE VSX loads and stores at expand time.
* config/rs6000/rs6000-protos.h (rs6000_emit_le_vsx_move): New
prototype.
* config/rs6000/rs6000.c (rs6000_const_vec): New.
(rs6000_gen_le_vsx_permute): New.
(rs6000_gen_le_vsx_load): New.
(rs6000_gen_le_vsx_store): New.
(rs6000_gen_le_vsx_move): New.
* config/rs6000/vsx.md (*vsx_le_perm_load_v2di): New.
(*vsx_le_perm_load_v4si): New.
(*vsx_le_perm_load_v8hi): New.
(*vsx_le_perm_load_v16qi): New.
(*vsx_le_perm_store_v2di): New.
(*vsx_le_perm_store_v4si): New.
(*vsx_le_perm_store_v8hi): New.
(*vsx_le_perm_store_v16qi): New.
(*vsx_xxpermdi2_le_<mode>): New.
(*vsx_xxpermdi4_le_<mode>): New.
(*vsx_xxpermdi8_le_V8HI): New.
(*vsx_xxpermdi16_le_V16QI): New.
(*vsx_lxvd2x2_le_<mode>): New.
(*vsx_lxvd2x4_le_<mode>): New.
(*vsx_lxvd2x8_le_V8HI): New.
(*vsx_lxvd2x16_le_V16QI): New.
(*vsx_stxvd2x2_le_<mode>): New.
(*vsx_stxvd2x4_le_<mode>): New.
(*vsx_stxvd2x8_le_V8HI): New.
(*vsx_stxvd2x16_le_V16QI): New.
2013-10-07 Renlin Li <Renlin.Li@arm.com>
* config/arm/arm-cores.def (cortex-a53): Use cortex tuning.

View File

@ -122,6 +122,7 @@ extern rtx rs6000_longcall_ref (rtx);
extern void rs6000_fatal_bad_address (rtx);
extern rtx create_TOC_reference (rtx, rtx);
extern void rs6000_split_multireg_move (rtx, rtx);
extern void rs6000_emit_le_vsx_move (rtx, rtx, enum machine_mode);
extern void rs6000_emit_move (rtx, rtx, enum machine_mode);
extern rtx rs6000_secondary_memory_needed_rtx (enum machine_mode);
extern rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode,

View File

@ -7665,6 +7665,106 @@ rs6000_eliminate_indexed_memrefs (rtx operands[2])
copy_addr_to_reg (XEXP (operands[1], 0)));
}
/* Generate a vector of constants to permute MODE for a little-endian
storage operation by swapping the two halves of a vector. */
static rtvec
rs6000_const_vec (enum machine_mode mode)
{
int i, subparts;
rtvec v;
switch (mode)
{
case V2DFmode:
case V2DImode:
subparts = 2;
break;
case V4SFmode:
case V4SImode:
subparts = 4;
break;
case V8HImode:
subparts = 8;
break;
case V16QImode:
subparts = 16;
break;
default:
gcc_unreachable();
}
v = rtvec_alloc (subparts);
for (i = 0; i < subparts / 2; ++i)
RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
for (i = subparts / 2; i < subparts; ++i)
RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
return v;
}
/* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
for a VSX load or store operation. */
rtx
rs6000_gen_le_vsx_permute (rtx source, enum machine_mode mode)
{
rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
return gen_rtx_VEC_SELECT (mode, source, par);
}
/* Emit a little-endian load from vector memory location SOURCE to VSX
register DEST in mode MODE. The load is done with two permuting
insn's that represent an lxvd2x and xxpermdi. */
void
rs6000_emit_le_vsx_load (rtx dest, rtx source, enum machine_mode mode)
{
rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
rtx permute_mem = rs6000_gen_le_vsx_permute (source, mode);
rtx permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_mem));
emit_insn (gen_rtx_SET (VOIDmode, dest, permute_reg));
}
/* Emit a little-endian store to vector memory location DEST from VSX
register SOURCE in mode MODE. The store is done with two permuting
insn's that represent an xxpermdi and an stxvd2x. */
void
rs6000_emit_le_vsx_store (rtx dest, rtx source, enum machine_mode mode)
{
rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
rtx permute_src = rs6000_gen_le_vsx_permute (source, mode);
rtx permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_src));
emit_insn (gen_rtx_SET (VOIDmode, dest, permute_tmp));
}
/* Emit a sequence representing a little-endian VSX load or store,
moving data from SOURCE to DEST in mode MODE. This is done
separately from rs6000_emit_move to ensure it is called only
during expand. LE VSX loads and stores introduced later are
handled with a split. The expand-time RTL generation allows
us to optimize away redundant pairs of register-permutes. */
void
rs6000_emit_le_vsx_move (rtx dest, rtx source, enum machine_mode mode)
{
gcc_assert (!BYTES_BIG_ENDIAN
&& VECTOR_MEM_VSX_P (mode)
&& mode != TImode
&& (MEM_P (source) ^ MEM_P (dest)));
if (MEM_P (source))
{
gcc_assert (REG_P (dest));
rs6000_emit_le_vsx_load (dest, source, mode);
}
else
{
if (!REG_P (source))
source = force_reg (mode, source);
rs6000_emit_le_vsx_store (dest, source, mode);
}
}
/* Emit a move from SOURCE to DEST in mode MODE. */
void
rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)

View File

@ -88,7 +88,8 @@
(smax "smax")])
;; Vector move instructions.
;; Vector move instructions. Little-endian VSX loads and stores require
;; special handling to circumvent "element endianness."
(define_expand "mov<mode>"
[(set (match_operand:VEC_M 0 "nonimmediate_operand" "")
(match_operand:VEC_M 1 "any_operand" ""))]
@ -104,6 +105,15 @@
&& !vlogical_operand (operands[1], <MODE>mode))
operands[1] = force_reg (<MODE>mode, operands[1]);
}
if (!BYTES_BIG_ENDIAN
&& VECTOR_MEM_VSX_P (<MODE>mode)
&& <MODE>mode != TImode
&& (memory_operand (operands[0], <MODE>mode)
^ memory_operand (operands[1], <MODE>mode)))
{
rs6000_emit_le_vsx_move (operands[0], operands[1], <MODE>mode);
DONE;
}
})
;; Generic vector floating point load/store instructions. These will match

View File

@ -216,6 +216,238 @@
])
;; VSX moves
;; The patterns for LE permuted loads and stores come before the general
;; VSX moves so they match first.
(define_insn_and_split "*vsx_le_perm_load_v2di"
[(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
(match_operand:V2DI 1 "memory_operand" "Z"))]
"!BYTES_BIG_ENDIAN && TARGET_VSX"
"#"
"!BYTES_BIG_ENDIAN && TARGET_VSX"
[(set (match_dup 2)
(vec_select:V2DI
(match_dup 1)
(parallel [(const_int 1) (const_int 0)])))
(set (match_dup 0)
(vec_select:V2DI
(match_dup 2)
(parallel [(const_int 1) (const_int 0)])))]
"
{
operands[2] = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (operands[0])
: operands[0];
}
"
[(set_attr "type" "vecload")
(set_attr "length" "8")])
(define_insn_and_split "*vsx_le_perm_load_v4si"
[(set (match_operand:V4SI 0 "vsx_register_operand" "=wa")
(match_operand:V4SI 1 "memory_operand" "Z"))]
"!BYTES_BIG_ENDIAN && TARGET_VSX"
"#"
"!BYTES_BIG_ENDIAN && TARGET_VSX"
[(set (match_dup 2)
(vec_select:V4SI
(match_dup 1)
(parallel [(const_int 2) (const_int 3)
(const_int 0) (const_int 1)])))
(set (match_dup 0)
(vec_select:V4SI
(match_dup 2)
(parallel [(const_int 2) (const_int 3)
(const_int 0) (const_int 1)])))]
"
{
operands[2] = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (operands[0])
: operands[0];
}
"
[(set_attr "type" "vecload")
(set_attr "length" "8")])
(define_insn_and_split "*vsx_le_perm_load_v8hi"
[(set (match_operand:V8HI 0 "vsx_register_operand" "=wa")
(match_operand:V8HI 1 "memory_operand" "Z"))]
"!BYTES_BIG_ENDIAN && TARGET_VSX"
"#"
"!BYTES_BIG_ENDIAN && TARGET_VSX"
[(set (match_dup 2)
(vec_select:V8HI
(match_dup 1)
(parallel [(const_int 4) (const_int 5)
(const_int 6) (const_int 7)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)])))
(set (match_dup 0)
(vec_select:V8HI
(match_dup 2)
(parallel [(const_int 4) (const_int 5)
(const_int 6) (const_int 7)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)])))]
"
{
operands[2] = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (operands[0])
: operands[0];
}
"
[(set_attr "type" "vecload")
(set_attr "length" "8")])
(define_insn_and_split "*vsx_le_perm_load_v16qi"
[(set (match_operand:V16QI 0 "vsx_register_operand" "=wa")
(match_operand:V16QI 1 "memory_operand" "Z"))]
"!BYTES_BIG_ENDIAN && TARGET_VSX"
"#"
"!BYTES_BIG_ENDIAN && TARGET_VSX"
[(set (match_dup 2)
(vec_select:V16QI
(match_dup 1)
(parallel [(const_int 8) (const_int 9)
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)])))
(set (match_dup 0)
(vec_select:V16QI
(match_dup 2)
(parallel [(const_int 8) (const_int 9)
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)])))]
"
{
operands[2] = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (operands[0])
: operands[0];
}
"
[(set_attr "type" "vecload")
(set_attr "length" "8")])
(define_insn_and_split "*vsx_le_perm_store_v2di"
[(set (match_operand:V2DI 0 "memory_operand" "=Z")
(match_operand:V2DI 1 "vsx_register_operand" "+wa"))]
"!BYTES_BIG_ENDIAN && TARGET_VSX"
"#"
"!BYTES_BIG_ENDIAN && TARGET_VSX"
[(set (match_dup 2)
(vec_select:V2DI
(match_dup 1)
(parallel [(const_int 1) (const_int 0)])))
(set (match_dup 0)
(vec_select:V2DI
(match_dup 2)
(parallel [(const_int 1) (const_int 0)])))]
"
{
operands[2] = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (operands[1])
: operands[1];
}
"
[(set_attr "type" "vecstore")
(set_attr "length" "8")])
(define_insn_and_split "*vsx_le_perm_store_v4si"
[(set (match_operand:V4SI 0 "memory_operand" "=Z")
(match_operand:V4SI 1 "vsx_register_operand" "+wa"))]
"!BYTES_BIG_ENDIAN && TARGET_VSX"
"#"
"!BYTES_BIG_ENDIAN && TARGET_VSX"
[(set (match_dup 2)
(vec_select:V4SI
(match_dup 1)
(parallel [(const_int 2) (const_int 3)
(const_int 0) (const_int 1)])))
(set (match_dup 0)
(vec_select:V4SI
(match_dup 2)
(parallel [(const_int 2) (const_int 3)
(const_int 0) (const_int 1)])))]
"
{
operands[2] = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (operands[1])
: operands[1];
}
"
[(set_attr "type" "vecstore")
(set_attr "length" "8")])
(define_insn_and_split "*vsx_le_perm_store_v8hi"
[(set (match_operand:V8HI 0 "memory_operand" "=Z")
(match_operand:V8HI 1 "vsx_register_operand" "+wa"))]
"!BYTES_BIG_ENDIAN && TARGET_VSX"
"#"
"!BYTES_BIG_ENDIAN && TARGET_VSX"
[(set (match_dup 2)
(vec_select:V8HI
(match_dup 1)
(parallel [(const_int 4) (const_int 5)
(const_int 6) (const_int 7)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)])))
(set (match_dup 0)
(vec_select:V8HI
(match_dup 2)
(parallel [(const_int 4) (const_int 5)
(const_int 6) (const_int 7)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)])))]
"
{
operands[2] = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (operands[1])
: operands[1];
}
"
[(set_attr "type" "vecstore")
(set_attr "length" "8")])
(define_insn_and_split "*vsx_le_perm_store_v16qi"
[(set (match_operand:V16QI 0 "memory_operand" "=Z")
(match_operand:V16QI 1 "vsx_register_operand" "+wa"))]
"!BYTES_BIG_ENDIAN && TARGET_VSX"
"#"
"!BYTES_BIG_ENDIAN && TARGET_VSX"
[(set (match_dup 2)
(vec_select:V16QI
(match_dup 1)
(parallel [(const_int 8) (const_int 9)
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)])))
(set (match_dup 0)
(vec_select:V16QI
(match_dup 2)
(parallel [(const_int 8) (const_int 9)
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)])))]
"
{
operands[2] = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (operands[1])
: operands[1];
}
"
[(set_attr "type" "vecstore")
(set_attr "length" "8")])
(define_insn "*vsx_mov<mode>"
[(set (match_operand:VSX_M 0 "nonimmediate_operand" "=Z,<VSr>,<VSr>,?Z,?wa,?wa,wQ,?&r,??Y,??r,??r,<VSr>,?wa,*r,v,wZ, v")
(match_operand:VSX_M 1 "input_operand" "<VSr>,Z,<VSr>,wa,Z,wa,r,wQ,r,Y,r,j,j,j,W,v,wZ"))]
@ -978,6 +1210,153 @@
"xxpermdi %x0,%x1,%x2,0"
[(set_attr "type" "vecperm")])
;; xxpermdi for little endian loads and stores. We need several of
;; these since the form of the PARALLEL differs by mode.
(define_insn "*vsx_xxpermdi2_le_<mode>"
[(set (match_operand:VSX_D 0 "vsx_register_operand" "=wa")
(vec_select:VSX_D
(match_operand:VSX_D 1 "vsx_register_operand" "wa")
(parallel [(const_int 1) (const_int 0)])))]
"!BYTES_BIG_ENDIAN && VECTOR_MEM_VSX_P (<MODE>mode)"
"xxpermdi %x0,%x1,%x1,2"
[(set_attr "type" "vecperm")])
(define_insn "*vsx_xxpermdi4_le_<mode>"
[(set (match_operand:VSX_W 0 "vsx_register_operand" "=wa")
(vec_select:VSX_W
(match_operand:VSX_W 1 "vsx_register_operand" "wa")
(parallel [(const_int 2) (const_int 3)
(const_int 0) (const_int 1)])))]
"!BYTES_BIG_ENDIAN && VECTOR_MEM_VSX_P (<MODE>mode)"
"xxpermdi %x0,%x1,%x1,2"
[(set_attr "type" "vecperm")])
(define_insn "*vsx_xxpermdi8_le_V8HI"
[(set (match_operand:V8HI 0 "vsx_register_operand" "=wa")
(vec_select:V8HI
(match_operand:V8HI 1 "vsx_register_operand" "wa")
(parallel [(const_int 4) (const_int 5)
(const_int 6) (const_int 7)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)])))]
"!BYTES_BIG_ENDIAN && VECTOR_MEM_VSX_P (V8HImode)"
"xxpermdi %x0,%x1,%x1,2"
[(set_attr "type" "vecperm")])
(define_insn "*vsx_xxpermdi16_le_V16QI"
[(set (match_operand:V16QI 0 "vsx_register_operand" "=wa")
(vec_select:V16QI
(match_operand:V16QI 1 "vsx_register_operand" "wa")
(parallel [(const_int 8) (const_int 9)
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)])))]
"!BYTES_BIG_ENDIAN && VECTOR_MEM_VSX_P (V16QImode)"
"xxpermdi %x0,%x1,%x1,2"
[(set_attr "type" "vecperm")])
;; lxvd2x for little endian loads. We need several of
;; these since the form of the PARALLEL differs by mode.
(define_insn "*vsx_lxvd2x2_le_<mode>"
[(set (match_operand:VSX_D 0 "vsx_register_operand" "=wa")
(vec_select:VSX_D
(match_operand:VSX_D 1 "memory_operand" "Z")
(parallel [(const_int 1) (const_int 0)])))]
"!BYTES_BIG_ENDIAN && VECTOR_MEM_VSX_P (<MODE>mode)"
"lxvd2x %x0,%y1"
[(set_attr "type" "vecload")])
(define_insn "*vsx_lxvd2x4_le_<mode>"
[(set (match_operand:VSX_W 0 "vsx_register_operand" "=wa")
(vec_select:VSX_W
(match_operand:VSX_W 1 "memory_operand" "Z")
(parallel [(const_int 2) (const_int 3)
(const_int 0) (const_int 1)])))]
"!BYTES_BIG_ENDIAN && VECTOR_MEM_VSX_P (<MODE>mode)"
"lxvd2x %x0,%y1"
[(set_attr "type" "vecload")])
(define_insn "*vsx_lxvd2x8_le_V8HI"
[(set (match_operand:V8HI 0 "vsx_register_operand" "=wa")
(vec_select:V8HI
(match_operand:V8HI 1 "memory_operand" "Z")
(parallel [(const_int 4) (const_int 5)
(const_int 6) (const_int 7)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)])))]
"!BYTES_BIG_ENDIAN && VECTOR_MEM_VSX_P (V8HImode)"
"lxvd2x %x0,%y1"
[(set_attr "type" "vecload")])
(define_insn "*vsx_lxvd2x16_le_V16QI"
[(set (match_operand:V16QI 0 "vsx_register_operand" "=wa")
(vec_select:V16QI
(match_operand:V16QI 1 "memory_operand" "Z")
(parallel [(const_int 8) (const_int 9)
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)])))]
"!BYTES_BIG_ENDIAN && VECTOR_MEM_VSX_P (V16QImode)"
"lxvd2x %x0,%y1"
[(set_attr "type" "vecload")])
;; stxvd2x for little endian stores. We need several of
;; these since the form of the PARALLEL differs by mode.
(define_insn "*vsx_stxvd2x2_le_<mode>"
[(set (match_operand:VSX_D 0 "memory_operand" "=Z")
(vec_select:VSX_D
(match_operand:VSX_D 1 "vsx_register_operand" "wa")
(parallel [(const_int 1) (const_int 0)])))]
"!BYTES_BIG_ENDIAN && VECTOR_MEM_VSX_P (<MODE>mode)"
"stxvd2x %x1,%y0"
[(set_attr "type" "vecstore")])
(define_insn "*vsx_stxvd2x4_le_<mode>"
[(set (match_operand:VSX_W 0 "memory_operand" "=Z")
(vec_select:VSX_W
(match_operand:VSX_W 1 "vsx_register_operand" "wa")
(parallel [(const_int 2) (const_int 3)
(const_int 0) (const_int 1)])))]
"!BYTES_BIG_ENDIAN && VECTOR_MEM_VSX_P (<MODE>mode)"
"stxvd2x %x1,%y0"
[(set_attr "type" "vecstore")])
(define_insn "*vsx_stxvd2x8_le_V8HI"
[(set (match_operand:V8HI 0 "memory_operand" "=Z")
(vec_select:V8HI
(match_operand:V8HI 1 "vsx_register_operand" "wa")
(parallel [(const_int 4) (const_int 5)
(const_int 6) (const_int 7)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)])))]
"!BYTES_BIG_ENDIAN && VECTOR_MEM_VSX_P (V8HImode)"
"stxvd2x %x1,%y0"
[(set_attr "type" "vecstore")])
(define_insn "*vsx_stxvd2x16_le_V16QI"
[(set (match_operand:V16QI 0 "memory_operand" "=Z")
(vec_select:V16QI
(match_operand:V16QI 1 "vsx_register_operand" "wa")
(parallel [(const_int 8) (const_int 9)
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)
(const_int 0) (const_int 1)
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)])))]
"!BYTES_BIG_ENDIAN && VECTOR_MEM_VSX_P (V16QImode)"
"stxvd2x %x1,%y0"
[(set_attr "type" "vecstore")])
;; Set the element of a V2DI/VD2F mode
(define_insn "vsx_set_<mode>"
[(set (match_operand:VSX_D 0 "vsx_register_operand" "=wd,?wa")

View File

@ -1,3 +1,8 @@
2013-10-07 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
* gcc.target/powerpc/pr43154.c: Skip for ppc64 little endian.
* gcc.target/powerpc/fusion.c: Likewise.
2013-10-07 Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
* gcc.target/s390/htm-nofloat-2.c: New testcase.

View File

@ -1,5 +1,6 @@
/* { dg-do compile { target { powerpc*-*-* } } } */
/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
/* { dg-skip-if "" { powerpc*le-*-* } { "*" } { "" } } */
/* { dg-require-effective-target powerpc_p8vector_ok } */
/* { dg-options "-mcpu=power7 -mtune=power8 -O3" } */

View File

@ -1,5 +1,6 @@
/* { dg-do compile { target { powerpc*-*-* } } } */
/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
/* { dg-skip-if "" { powerpc*le-*-* } { "*" } { "" } } */
/* { dg-require-effective-target powerpc_vsx_ok } */
/* { dg-options "-O2 -mcpu=power7" } */