i386.md (isa): Add x64 and nox64.

* config/i386/i386.md (isa): Add x64 and nox64.
	(enabled): Define x64 for TARGET_64BIT and nox64 for !TARGET_64BIT.
	(*pushtf): Enable *roF alternative for x64 isa only.
	(*pushxf): Merge with *pushxf_nointeger.  Use Yx*r constraint. Set
	mode attribute of integer alternatives to DImode for TARGET_64BIT.
	(*pushdf): Merge with *pushdf_rex64.  Use x64 and nox64 isa attributes.
	(*movtf_internal): Merge from *movtf_internal_rex64 and
	*movtf_internal_sse.  Use x64 and nox64 isa attributes.
	(*movxf_internal): Merge with *movxf_internal_rex64.  Use x64 and
	nox64 isa attributes.
	(*movdf_internal): Merge with *movdf_internal_rex64.  Use x64 and
	nox64 isa attributes.
	* config/i386/constraints.md (Yd): Do not set for TARGET_64BIT.

From-SVN: r196757
This commit is contained in:
Uros Bizjak 2013-03-17 20:33:40 +01:00 committed by Uros Bizjak
parent 88b97037e2
commit 286fb67781
3 changed files with 128 additions and 330 deletions

View File

@ -1,3 +1,19 @@
2013-03-17 Uros Bizjak <ubizjak@gmail.com>
* config/i386/i386.md (isa): Add x64 and nox64.
(enabled): Define x64 for TARGET_64BIT and nox64 for !TARGET_64BIT.
(*pushtf): Enable *roF alternative for x64 isa only.
(*pushxf): Merge with *pushxf_nointeger. Use Yx*r constraint. Set
mode attribute of integer alternatives to DImode for TARGET_64BIT.
(*pushdf): Merge with *pushdf_rex64. Use x64 and nox64 isa attributes.
(*movtf_internal): Merge from *movtf_internal_rex64 and
*movtf_internal_sse. Use x64 and nox64 isa attributes.
(*movxf_internal): Merge with *movxf_internal_rex64. Use x64 and
nox64 isa attributes.
(*movdf_internal): Merge with *movdf_internal_rex64. Use x64 and
nox64 isa attributes.
* config/i386/constraints.md (Yd): Do not set for TARGET_64BIT.
2013-03-17 Uros Bizjak <ubizjak@gmail.com>
* config/alpha/alpha.c (TARGET_LRA_P): New define.

View File

@ -116,8 +116,7 @@
"@internal Any integer register when zero extensions with AND are disabled.")
(define_register_constraint "Yd"
"(TARGET_64BIT
|| (TARGET_INTEGER_DFMODE_MOVES && optimize_function_for_speed_p (cfun)))
"TARGET_INTEGER_DFMODE_MOVES && optimize_function_for_speed_p (cfun)
? GENERAL_REGS : NO_REGS"
"@internal Any integer register when integer DFmode moves are enabled.")

View File

@ -651,12 +651,14 @@
(define_attr "movu" "0,1" (const_string "0"))
;; Used to control the "enabled" attribute on a per-instruction basis.
(define_attr "isa" "base,sse2,sse2_noavx,sse3,sse4,sse4_noavx,noavx,avx,
avx2,noavx2,bmi2,fma4,fma"
(define_attr "isa" "base,x64,nox64,sse2,sse2_noavx,sse3,sse4,sse4_noavx,
noavx,avx,avx2,noavx2,bmi2,fma4,fma"
(const_string "base"))
(define_attr "enabled" ""
(cond [(eq_attr "isa" "sse2") (symbol_ref "TARGET_SSE2")
(cond [(eq_attr "isa" "x64") (symbol_ref "TARGET_64BIT")
(eq_attr "isa" "nox64") (symbol_ref "!TARGET_64BIT")
(eq_attr "isa" "sse2") (symbol_ref "TARGET_SSE2")
(eq_attr "isa" "sse2_noavx")
(symbol_ref "TARGET_SSE2 && !TARGET_AVX")
(eq_attr "isa" "sse3") (symbol_ref "TARGET_SSE3")
@ -2582,16 +2584,17 @@
;; Floating point push instructions.
(define_insn "*pushtf"
[(set (match_operand:TF 0 "push_operand" "=<,<,<")
(match_operand:TF 1 "general_no_elim_operand" "x,Fo,*r"))]
"TARGET_SSE"
[(set (match_operand:TF 0 "push_operand" "=<,<")
(match_operand:TF 1 "general_no_elim_operand" "x,*roF"))]
"TARGET_64BIT || TARGET_SSE"
{
/* This insn should be already split before reg-stack. */
gcc_unreachable ();
}
[(set_attr "type" "multi")
(set_attr "unit" "sse,*,*")
(set_attr "mode" "TF,SI,SI")])
[(set_attr "isa" "*,x64")
(set_attr "type" "multi")
(set_attr "unit" "sse,*")
(set_attr "mode" "TF,DI")])
;; %%% Kill this when call knows how to work this out.
(define_split
@ -2603,33 +2606,21 @@
(define_insn "*pushxf"
[(set (match_operand:XF 0 "push_operand" "=<,<")
(match_operand:XF 1 "general_no_elim_operand" "f,ro"))]
"optimize_function_for_speed_p (cfun)"
(match_operand:XF 1 "general_no_elim_operand" "f,Yx*roF"))]
""
{
/* This insn should be already split before reg-stack. */
gcc_unreachable ();
}
[(set_attr "type" "multi")
(set_attr "unit" "i387,*")
(set_attr "mode" "XF,SI")])
;; Size of pushxf is 3 (for sub) + 2 (for fstp) + memory operand size.
;; Size of pushxf using integer instructions is 3+3*memory operand size
;; Pushing using integer instructions is longer except for constants
;; and direct memory references (assuming that any given constant is pushed
;; only once, but this ought to be handled elsewhere).
(define_insn "*pushxf_nointeger"
[(set (match_operand:XF 0 "push_operand" "=<,<")
(match_operand:XF 1 "general_no_elim_operand" "f,*rFo"))]
"optimize_function_for_size_p (cfun)"
{
/* This insn should be already split before reg-stack. */
gcc_unreachable ();
}
[(set_attr "type" "multi")
(set_attr "unit" "i387,*")
(set_attr "mode" "XF,SI")])
(set (attr "mode")
(cond [(eq_attr "alternative" "1")
(if_then_else (match_test "TARGET_64BIT")
(const_string "DI")
(const_string "SI"))
]
(const_string "XF")))])
;; %%% Kill this when call knows how to work this out.
(define_split
@ -2640,34 +2631,18 @@
(set (mem:XF (reg:P SP_REG)) (match_dup 1))]
"operands[2] = GEN_INT (-GET_MODE_SIZE (XFmode));")
(define_insn "*pushdf_rex64"
[(set (match_operand:DF 0 "push_operand" "=<,<,<")
(match_operand:DF 1 "general_no_elim_operand" "f,Yd*rFm,x"))]
"TARGET_64BIT"
{
/* This insn should be already split before reg-stack. */
gcc_unreachable ();
}
[(set_attr "type" "multi")
(set_attr "unit" "i387,*,*")
(set_attr "mode" "DF,DI,DF")])
;; Size of pushdf is 3 (for sub) + 2 (for fstp) + memory operand size.
;; Size of pushdf using integer instructions is 2+2*memory operand size
;; On the average, pushdf using integers can be still shorter.
(define_insn "*pushdf"
[(set (match_operand:DF 0 "push_operand" "=<,<,<")
(match_operand:DF 1 "general_no_elim_operand" "f,Yd*rFo,x"))]
"!TARGET_64BIT"
[(set (match_operand:DF 0 "push_operand" "=<,<,<,<")
(match_operand:DF 1 "general_no_elim_operand" "f,Yd*roF,rmF,x"))]
""
{
/* This insn should be already split before reg-stack. */
gcc_unreachable ();
}
[(set_attr "isa" "*,*,sse2")
[(set_attr "isa" "*,nox64,x64,sse2")
(set_attr "type" "multi")
(set_attr "unit" "i387,*,*")
(set_attr "mode" "DF,DI,DF")])
(set_attr "unit" "i387,*,*,sse")
(set_attr "mode" "DF,SI,DI,DF")])
;; %%% Kill this when call knows how to work this out.
(define_split
@ -2692,7 +2667,7 @@
(define_insn "*pushsf"
[(set (match_operand:SF 0 "push_operand" "=<,<,<")
(match_operand:SF 1 "general_no_elim_operand" "f,rFm,x"))]
(match_operand:SF 1 "general_no_elim_operand" "f,rmF,x"))]
"!TARGET_64BIT"
{
/* Anything else should be already split before reg-stack. */
@ -2736,10 +2711,7 @@
[(set (match_operand:TF 0 "nonimmediate_operand")
(match_operand:TF 1 "nonimmediate_operand"))]
"TARGET_64BIT || TARGET_SSE"
{
ix86_expand_move (TFmode, operands);
DONE;
})
"ix86_expand_move (TFmode, operands); DONE;")
(define_expand "mov<mode>"
[(set (match_operand:X87MODEF 0 "nonimmediate_operand")
@ -2747,73 +2719,10 @@
""
"ix86_expand_move (<MODE>mode, operands); DONE;")
(define_insn "*movtf_internal_rex64"
(define_insn "*movtf_internal"
[(set (match_operand:TF 0 "nonimmediate_operand" "=x,x ,m,?*r ,!o")
(match_operand:TF 1 "general_operand" "C ,xm,x,*roF,*rC"))]
"TARGET_64BIT && !(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (!can_create_pseudo_p ()
|| (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
|| GET_CODE (operands[1]) != CONST_DOUBLE
|| (optimize_function_for_size_p (cfun)
&& standard_sse_constant_p (operands[1])
&& !memory_operand (operands[0], TFmode))
|| (!TARGET_MEMORY_MISMATCH_STALL
&& memory_operand (operands[0], TFmode)))"
{
switch (which_alternative)
{
case 0:
return standard_sse_constant_opcode (insn, operands[1]);
case 1:
case 2:
/* Handle misaligned load/store since we
don't have movmisaligntf pattern. */
if (misaligned_operand (operands[0], TFmode)
|| misaligned_operand (operands[1], TFmode))
{
if (get_attr_mode (insn) == MODE_V4SF)
return "%vmovups\t{%1, %0|%0, %1}";
else
return "%vmovdqu\t{%1, %0|%0, %1}";
}
else
{
if (get_attr_mode (insn) == MODE_V4SF)
return "%vmovaps\t{%1, %0|%0, %1}";
else
return "%vmovdqa\t{%1, %0|%0, %1}";
}
case 3:
case 4:
return "#";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "sselog1,ssemov,ssemov,*,*")
(set_attr "prefix" "maybe_vex,maybe_vex,maybe_vex,*,*")
(set (attr "mode")
(cond [(eq_attr "alternative" "3,4")
(const_string "DI")
(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "V4SF")
(and (eq_attr "alternative" "2")
(match_test "TARGET_SSE_TYPELESS_STORES"))
(const_string "V4SF")
(match_test "TARGET_AVX")
(const_string "TI")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
]
(const_string "TI")))])
(define_insn "*movtf_internal_sse"
[(set (match_operand:TF 0 "nonimmediate_operand" "=x,x ,m")
(match_operand:TF 1 "general_operand" "C ,xm,x"))]
"TARGET_SSE && !TARGET_64BIT
"(TARGET_64BIT || TARGET_SSE)
&& !(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (!can_create_pseudo_p ()
|| (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
@ -2847,14 +2756,22 @@
else
return "%vmovdqa\t{%1, %0|%0, %1}";
}
case 3:
case 4:
return "#";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "sselog1,ssemov,ssemov")
(set_attr "prefix" "maybe_vex")
[(set_attr "isa" "*,*,*,x64,x64")
(set_attr "type" "sselog1,ssemov,ssemov,*,*")
(set_attr "prefix" "maybe_vex,maybe_vex,maybe_vex,*,*")
(set (attr "mode")
(cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(cond [(eq_attr "alternative" "3,4")
(const_string "DI")
(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "V4SF")
(and (eq_attr "alternative" "2")
(match_test "TARGET_SSE_TYPELESS_STORES"))
@ -2867,44 +2784,13 @@
]
(const_string "TI")))])
(define_insn "*movxf_internal_rex64"
[(set (match_operand:XF 0 "nonimmediate_operand" "=f,m,f,?Yx*r ,!o")
(match_operand:XF 1 "general_operand" "fm,f,G,Yx*roF,Yx*rC"))]
"TARGET_64BIT && !(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (!can_create_pseudo_p ()
|| (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
|| GET_CODE (operands[1]) != CONST_DOUBLE
|| (optimize_function_for_size_p (cfun)
&& standard_80387_constant_p (operands[1]) > 0
&& !memory_operand (operands[0], XFmode))
|| (!TARGET_MEMORY_MISMATCH_STALL
&& memory_operand (operands[0], XFmode)))"
{
switch (which_alternative)
{
case 0:
case 1:
return output_387_reg_move (insn, operands);
case 2:
return standard_80387_constant_opcode (operands[1]);
case 3:
case 4:
return "#";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "fmov,fmov,fmov,multi,multi")
(set_attr "mode" "XF,XF,XF,SI,SI")])
;; Possible store forwarding (partial memory) stall in alternative 4.
;; Possible store forwarding (partial memory) stall in alternatives 4 and 5.
(define_insn "*movxf_internal"
[(set (match_operand:XF 0 "nonimmediate_operand" "=f,m,f,?Yx*r ,!o")
(match_operand:XF 1 "general_operand" "fm,f,G,Yx*roF,Yx*rF"))]
"!TARGET_64BIT && !(MEM_P (operands[0]) && MEM_P (operands[1]))
[(set (match_operand:XF 0 "nonimmediate_operand"
"=f,m,f,?Yx*r ,!o ,!o")
(match_operand:XF 1 "general_operand"
"fm,f,G,Yx*roF,Yx*rF,Yx*rC"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (!can_create_pseudo_p ()
|| (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
|| GET_CODE (operands[1]) != CONST_DOUBLE
@ -2925,166 +2811,30 @@
case 3:
case 4:
case 5:
return "#";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "fmov,fmov,fmov,multi,multi")
(set_attr "mode" "XF,XF,XF,SI,SI")])
(define_insn "*movdf_internal_rex64"
[(set (match_operand:DF 0 "nonimmediate_operand"
"=Yf*f,m ,Yf*f,?r,?m,?r,?r,x,x,x,m,Yi,r")
(match_operand:DF 1 "general_operand"
"Yf*fm,Yf*f,G ,rm,rC,C ,F ,C,x,m,x,r ,Yi"))]
"TARGET_64BIT && !(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (!can_create_pseudo_p ()
|| (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
|| GET_CODE (operands[1]) != CONST_DOUBLE
|| (optimize_function_for_size_p (cfun)
&& ((!(TARGET_SSE2 && TARGET_SSE_MATH)
&& standard_80387_constant_p (operands[1]) > 0)
|| (TARGET_SSE2 && TARGET_SSE_MATH
&& standard_sse_constant_p (operands[1]))))
|| memory_operand (operands[0], DFmode))"
{
switch (which_alternative)
{
case 0:
case 1:
return output_387_reg_move (insn, operands);
case 2:
return standard_80387_constant_opcode (operands[1]);
case 3:
case 4:
return "mov{q}\t{%1, %0|%0, %1}";
case 5:
return "mov{l}\t{%1, %k0|%k0, %1}";
case 6:
return "movabs{q}\t{%1, %0|%0, %1}";
case 7:
return standard_sse_constant_opcode (insn, operands[1]);
case 8:
case 9:
case 10:
switch (get_attr_mode (insn))
{
case MODE_V2DF:
return "%vmovapd\t{%1, %0|%0, %1}";
case MODE_V4SF:
return "%vmovaps\t{%1, %0|%0, %1}";
case MODE_DI:
return "%vmovq\t{%1, %0|%0, %1}";
case MODE_DF:
if (TARGET_AVX && REG_P (operands[0]) && REG_P (operands[1]))
return "vmovsd\t{%1, %0, %0|%0, %0, %1}";
return "%vmovsd\t{%1, %0|%0, %1}";
case MODE_V1DF:
return "%vmovlpd\t{%1, %d0|%d0, %1}";
case MODE_V2SF:
return "%vmovlps\t{%1, %d0|%d0, %1}";
default:
gcc_unreachable ();
}
case 11:
case 12:
/* Handle broken assemblers that require movd instead of movq. */
return "%vmovd\t{%1, %0|%0, %1}";
default:
gcc_unreachable();
}
}
[(set (attr "type")
(cond [(eq_attr "alternative" "0,1,2")
(const_string "fmov")
(eq_attr "alternative" "3,4,5,6")
(const_string "imov")
(eq_attr "alternative" "7")
(const_string "sselog1")
]
(const_string "ssemov")))
(set (attr "modrm")
(if_then_else
(and (eq_attr "alternative" "6") (eq_attr "type" "imov"))
(const_string "0")
(const_string "*")))
(set (attr "length_immediate")
(if_then_else
(and (eq_attr "alternative" "6") (eq_attr "type" "imov"))
(const_string "8")
(const_string "*")))
(set (attr "prefix")
(if_then_else (eq_attr "alternative" "0,1,2,3,4,5,6")
(const_string "orig")
(const_string "maybe_vex")))
(set (attr "prefix_data16")
(if_then_else (eq_attr "mode" "V1DF")
(const_string "1")
(const_string "*")))
[(set_attr "isa" "*,*,*,*,nox64,x64")
(set_attr "type" "fmov,fmov,fmov,multi,multi,multi")
(set (attr "mode")
(cond [(eq_attr "alternative" "0,1,2")
(const_string "DF")
(eq_attr "alternative" "3,4,6,11,12")
(cond [(eq_attr "alternative" "3,4,5")
(if_then_else (match_test "TARGET_64BIT")
(const_string "DI")
(eq_attr "alternative" "5")
(const_string "SI")
/* xorps is one byte shorter for !TARGET_AVX. */
(eq_attr "alternative" "7")
(cond [(match_test "TARGET_AVX")
(const_string "V2DF")
(match_test "optimize_function_for_size_p (cfun)")
(const_string "V4SF")
(match_test "TARGET_SSE_LOAD0_BY_PXOR")
(const_string "TI")
(const_string "SI"))
]
(const_string "V2DF"))
/* For architectures resolving dependencies on
whole SSE registers use APD move to break dependency
chains, otherwise use short move to avoid extra work.
movaps encodes one byte shorter for !TARGET_AVX. */
(eq_attr "alternative" "8")
(cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "V4SF")
(match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
(const_string "V2DF")
(match_test "TARGET_AVX")
(const_string "DF")
(match_test "optimize_function_for_size_p (cfun)")
(const_string "V4SF")
]
(const_string "DF"))
/* For architectures resolving dependencies on register
parts we may avoid extra work to zero out upper part
of register. */
(eq_attr "alternative" "9")
(if_then_else
(match_test "TARGET_SSE_SPLIT_REGS")
(const_string "V1DF")
(const_string "DF"))
]
(const_string "DF")))])
(const_string "XF")))])
;; Possible store forwarding (partial memory) stall in alternative 4.
(define_insn "*movdf_internal"
[(set (match_operand:DF 0 "nonimmediate_operand"
"=Yf*f,m ,Yf*f,?Yd*r ,!o ,x,x,x,m,*x,*x,*x,m")
"=Yf*f,m ,Yf*f,?Yd*r ,!o ,?r,?m,?r,?r,x,x,x,m,*x,*x,*x,m ,Yi,r")
(match_operand:DF 1 "general_operand"
"Yf*fm,Yf*f,G ,Yd*roF,Yd*rF,C,x,m,x,C ,*x,m ,*x"))]
"!TARGET_64BIT && !(MEM_P (operands[0]) && MEM_P (operands[1]))
"Yf*fm,Yf*f,G ,Yd*roF,Yd*rF,rm,rC,C ,F ,C,x,m,x,C ,*x,m ,*x,r ,Yi"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (!can_create_pseudo_p ()
|| (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
|| GET_CODE (operands[1]) != CONST_DOUBLE
@ -3094,7 +2844,7 @@
|| (TARGET_SSE2 && TARGET_SSE_MATH
&& standard_sse_constant_p (operands[1])))
&& !memory_operand (operands[0], DFmode))
|| (!TARGET_MEMORY_MISMATCH_STALL
|| ((TARGET_64BIT || !TARGET_MEMORY_MISMATCH_STALL)
&& memory_operand (operands[0], DFmode)))"
{
switch (which_alternative)
@ -3111,15 +2861,25 @@
return "#";
case 5:
case 6:
return "mov{q}\t{%1, %0|%0, %1}";
case 7:
return "mov{l}\t{%1, %k0|%k0, %1}";
case 8:
return "movabs{q}\t{%1, %0|%0, %1}";
case 9:
case 13:
return standard_sse_constant_opcode (insn, operands[1]);
case 6:
case 7:
case 8:
case 10:
case 11:
case 12:
case 14:
case 15:
case 16:
switch (get_attr_mode (insn))
{
case MODE_V2DF:
@ -3141,25 +2901,45 @@
gcc_unreachable ();
}
case 17:
case 18:
/* Handle broken assemblers that require movd instead of movq. */
return "%vmovd\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
}
[(set (attr "isa")
(if_then_else (eq_attr "alternative" "5,6,7,8")
(cond [(eq_attr "alternative" "3,4")
(const_string "nox64")
(eq_attr "alternative" "5,6,7,8,17,18")
(const_string "x64")
(eq_attr "alternative" "9,10,11,12")
(const_string "sse2")
]
(const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "0,1,2")
(const_string "fmov")
(eq_attr "alternative" "3,4")
(const_string "multi")
(eq_attr "alternative" "5,9")
(eq_attr "alternative" "5,6,7,8")
(const_string "imov")
(eq_attr "alternative" "9,13")
(const_string "sselog1")
]
(const_string "ssemov")))
(set (attr "modrm")
(if_then_else (eq_attr "alternative" "8")
(const_string "0")
(const_string "*")))
(set (attr "length_immediate")
(if_then_else (eq_attr "alternative" "8")
(const_string "8")
(const_string "*")))
(set (attr "prefix")
(if_then_else (eq_attr "alternative" "0,1,2,3,4")
(if_then_else (eq_attr "alternative" "0,1,2,3,4,5,6,7,8")
(const_string "orig")
(const_string "maybe_vex")))
(set (attr "prefix_data16")
@ -3167,21 +2947,16 @@
(const_string "1")
(const_string "*")))
(set (attr "mode")
(cond [(eq_attr "alternative" "0,1,2")
(const_string "DF")
(eq_attr "alternative" "3,4")
(cond [(eq_attr "alternative" "3,4,7")
(const_string "SI")
/* For SSE1, we have many fewer alternatives. */
(not (match_test "TARGET_SSE2"))
(if_then_else
(eq_attr "alternative" "5,6,9,10")
(const_string "V4SF")
(const_string "V2SF"))
(eq_attr "alternative" "5,6,8,17,18")
(const_string "DI")
/* xorps is one byte shorter for !TARGET_AVX. */
(eq_attr "alternative" "5,9")
(cond [(match_test "TARGET_AVX")
(eq_attr "alternative" "9,13")
(cond [(not (match_test "TARGET_SSE2"))
(const_string "V4SF")
(match_test "TARGET_AVX")
(const_string "V2DF")
(match_test "optimize_function_for_size_p (cfun)")
(const_string "V4SF")
@ -3195,8 +2970,9 @@
chains, otherwise use short move to avoid extra work.
movaps encodes one byte shorter for !TARGET_AVX. */
(eq_attr "alternative" "6,10")
(cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(eq_attr "alternative" "10,14")
(cond [(ior (not (match_test "TARGET_SSE2"))
(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL"))
(const_string "V4SF")
(match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
(const_string "V2DF")
@ -3210,11 +2986,18 @@
/* For architectures resolving dependencies on register
parts we may avoid extra work to zero out upper part
of register. */
(eq_attr "alternative" "7,11")
(if_then_else
(eq_attr "alternative" "11,15")
(cond [(not (match_test "TARGET_SSE2"))
(const_string "V2SF")
(match_test "TARGET_SSE_SPLIT_REGS")
(const_string "V1DF")
]
(const_string "DF"))
(eq_attr "alternative" "12,16")
(if_then_else (match_test "TARGET_SSE2")
(const_string "DF")
(const_string "V2SF"))
]
(const_string "DF")))])