diff --git a/gcc/ChangeLog b/gcc/ChangeLog index b0ebfbac70e..b2b6aa5b567 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,9 @@ +2014-07-31 James Greenhalgh + + * config/aarch64/aarch64-builtins.c + (aarch64_gimple_fold_builtin): Don't fold reduction operations for + BYTES_BIG_ENDIAN. + 2014-07-31 James Greenhalgh * config/aarch64/aarch64.c (aarch64_simd_vect_par_cnst_half): Vary diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c index fee17ecf637..58db77e91c6 100644 --- a/gcc/config/aarch64/aarch64-builtins.c +++ b/gcc/config/aarch64/aarch64-builtins.c @@ -1383,6 +1383,20 @@ aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi) tree call = gimple_call_fn (stmt); tree fndecl; gimple new_stmt = NULL; + + /* The operations folded below are reduction operations. These are + defined to leave their result in the 0'th element (from the perspective + of GCC). The architectural instruction we are folding will leave the + result in the 0'th element (from the perspective of the architecture). + For big-endian systems, these perspectives are not aligned. + + It is therefore wrong to perform this fold on big-endian. There + are some tricks we could play with shuffling, but the mid-end is + inconsistent in the way it treats reduction operations, so we will + end up in difficulty. Until we fix the ambiguity - just bail out. */ + if (BYTES_BIG_ENDIAN) + return false; + if (call) { fndecl = gimple_call_fndecl (stmt);