From a720b48e18470367633194fff2107ba0b131d3bb Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Tue, 12 Nov 2013 13:52:08 +0000 Subject: [PATCH] Turn on SEE unaligned load and store for Haswell PR target/59088 * config/i386/x86-tune.def (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL): Set for m_HASWELL. (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL): Set for m_HASWELL. From-SVN: r204701 --- gcc/ChangeLog | 7 +++++++ gcc/config/i386/x86-tune.def | 4 ++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 3cdf24779c6..6667b33ea2d 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,10 @@ +2013-11-12 H.J. Lu + + PR target/59088 + * config/i386/x86-tune.def (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL): + Set for m_HASWELL. + (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL): Set for m_HASWELL. + 2013-11-12 H.J. Lu PR target/59084 diff --git a/gcc/config/i386/x86-tune.def b/gcc/config/i386/x86-tune.def index 54867d2f31f..4c13c3a0ec6 100644 --- a/gcc/config/i386/x86-tune.def +++ b/gcc/config/i386/x86-tune.def @@ -318,12 +318,12 @@ DEF_TUNE (X86_TUNE_GENERAL_REGS_SSE_SPILL, "general_regs_sse_spill", /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL: Use movups for misaligned loads instead of a sequence loading registers by parts. */ DEF_TUNE (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL, "sse_unaligned_load_optimal", - m_COREI7 | m_COREI7_AVX | m_AMDFAM10 | m_BDVER | m_BTVER | m_SLM | m_GENERIC) + m_COREI7 | m_COREI7_AVX | m_HASWELL | m_AMDFAM10 | m_BDVER | m_BTVER | m_SLM | m_GENERIC) /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL: Use movups for misaligned stores instead of a sequence loading registers by parts. */ DEF_TUNE (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL, "sse_unaligned_store_optimal", - m_COREI7 | m_COREI7_AVX | m_BDVER | m_SLM | m_GENERIC) + m_COREI7 | m_COREI7_AVX | m_HASWELL | m_BDVER | m_SLM | m_GENERIC) /* Use packed single precision instructions where posisble. I.e. movups instead of movupd. */