Revert "[ARM] Fix PR85434: spilling of stack protector guard's address on ARM"

This reverts commit r263245.

From-SVN: r263252
This commit is contained in:
Thomas Preud'homme 2018-08-02 11:16:05 +00:00
parent 0016d8d91c
commit a8b2130aee
11 changed files with 36 additions and 462 deletions

View File

@ -89,44 +89,6 @@
(process_file): Move functions processing to
process_all_functions.
2018-08-02 Thomas Preud'homme <thomas.preudhomme@linaro.org>
PR target/85434
* target-insns.def (stack_protect_combined_set): Define new standard
pattern name.
(stack_protect_combined_test): Likewise.
* cfgexpand.c (stack_protect_prologue): Try new
stack_protect_combined_set pattern first.
* function.c (stack_protect_epilogue): Try new
stack_protect_combined_test pattern first.
* config/arm/arm.c (require_pic_register): Add pic_reg and compute_now
parameters to control which register to use as PIC register and force
reloading PIC register respectively. Insert in the stream of insns if
possible.
(legitimize_pic_address): Expose above new parameters in prototype and
adapt recursive calls accordingly.
(arm_legitimize_address): Adapt to new legitimize_pic_address
prototype.
(thumb_legitimize_address): Likewise.
(arm_emit_call_insn): Adapt to new require_pic_register prototype.
* config/arm/arm-protos.h (legitimize_pic_address): Adapt to prototype
change.
* config/arm/arm.md (movsi expander): Adapt to legitimize_pic_address
prototype change.
(stack_protect_combined_set): New insn_and_split pattern.
(stack_protect_set): New insn pattern.
(stack_protect_combined_test): New insn_and_split pattern.
(stack_protect_test): New insn pattern.
* config/arm/unspecs.md (UNSPEC_SP_SET): New unspec.
(UNSPEC_SP_TEST): Likewise.
* doc/md.texi (stack_protect_combined_set): Document new standard
pattern name.
(stack_protect_set): Clarify that the operand for guard's address is
legal.
(stack_protect_combined_test): Document new standard pattern name.
(stack_protect_test): Clarify that the operand for guard's address is
legal.
2018-08-02 David Malcolm <dmalcolm@redhat.com>
* dumpfile.c (dump_user_location_t::dump_user_location_t): Add

View File

@ -6105,18 +6105,8 @@ stack_protect_prologue (void)
{
tree guard_decl = targetm.stack_protect_guard ();
rtx x, y;
struct expand_operand ops[2];
x = expand_normal (crtl->stack_protect_guard);
create_fixed_operand (&ops[0], x);
create_fixed_operand (&ops[1], DECL_RTL (guard_decl));
/* Allow the target to compute address of Y and copy it to X without
leaking Y into a register. This combined address + copy pattern allows
the target to prevent spilling of any intermediate results by splitting
it after register allocator. */
if (maybe_expand_insn (targetm.code_for_stack_protect_combined_set, 2, ops))
return;
if (guard_decl)
y = expand_normal (guard_decl);
else

View File

@ -67,7 +67,7 @@ extern int const_ok_for_dimode_op (HOST_WIDE_INT, enum rtx_code);
extern int arm_split_constant (RTX_CODE, machine_mode, rtx,
HOST_WIDE_INT, rtx, rtx, int);
extern int legitimate_pic_operand_p (rtx);
extern rtx legitimize_pic_address (rtx, machine_mode, rtx, rtx, bool);
extern rtx legitimize_pic_address (rtx, machine_mode, rtx);
extern rtx legitimize_tls_address (rtx, rtx);
extern bool arm_legitimate_address_p (machine_mode, rtx, bool);
extern int arm_legitimate_address_outer_p (machine_mode, rtx, RTX_CODE, int);

View File

@ -7369,26 +7369,20 @@ legitimate_pic_operand_p (rtx x)
}
/* Record that the current function needs a PIC register. Initialize
cfun->machine->pic_reg if we have not already done so.
A new pseudo register is used for the PIC register if possible, otherwise
PIC_REG must be non NULL and is used instead. COMPUTE_NOW forces the PIC
register to be loaded, irregardless of whether it was loaded previously. */
cfun->machine->pic_reg if we have not already done so. */
static void
require_pic_register (rtx pic_reg, bool compute_now)
require_pic_register (void)
{
/* A lot of the logic here is made obscure by the fact that this
routine gets called as part of the rtx cost estimation process.
We don't want those calls to affect any assumptions about the real
function; and further, we can't call entry_of_function() until we
start the real expansion process. */
if (!crtl->uses_pic_offset_table || compute_now)
if (!crtl->uses_pic_offset_table)
{
gcc_assert (can_create_pseudo_p ()
|| (pic_reg != NULL_RTX && GET_MODE (pic_reg) == Pmode));
gcc_assert (can_create_pseudo_p ());
if (arm_pic_register != INVALID_REGNUM
&& can_create_pseudo_p ()
&& !(TARGET_THUMB1 && arm_pic_register > LAST_LO_REGNUM))
{
if (!cfun->machine->pic_reg)
@ -7405,8 +7399,7 @@ require_pic_register (rtx pic_reg, bool compute_now)
rtx_insn *seq, *insn;
if (!cfun->machine->pic_reg)
cfun->machine->pic_reg =
can_create_pseudo_p () ? gen_reg_rtx (Pmode) : pic_reg;
cfun->machine->pic_reg = gen_reg_rtx (Pmode);
/* Play games to avoid marking the function as needing pic
if we are being called as part of the cost-estimation
@ -7417,8 +7410,7 @@ require_pic_register (rtx pic_reg, bool compute_now)
start_sequence ();
if (TARGET_THUMB1 && arm_pic_register != INVALID_REGNUM
&& arm_pic_register > LAST_LO_REGNUM
&& can_create_pseudo_p ())
&& arm_pic_register > LAST_LO_REGNUM)
emit_move_insn (cfun->machine->pic_reg,
gen_rtx_REG (Pmode, arm_pic_register));
else
@ -7435,29 +7427,15 @@ require_pic_register (rtx pic_reg, bool compute_now)
we can't yet emit instructions directly in the final
insn stream. Queue the insns on the entry edge, they will
be committed after everything else is expanded. */
if (currently_expanding_to_rtl)
insert_insn_on_edge (seq,
single_succ_edge
(ENTRY_BLOCK_PTR_FOR_FN (cfun)));
else
emit_insn (seq);
insert_insn_on_edge (seq,
single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
}
}
}
}
/* Legitimize PIC load to ORIG into REG. If REG is NULL, a new pseudo is
created to hold the result of the load. If not NULL, PIC_REG indicates
which register to use as PIC register, otherwise it is decided by register
allocator. COMPUTE_NOW forces the PIC register to be loaded at the current
location in the instruction stream, irregardless of whether it was loaded
previously.
Returns the register REG into which the PIC load is performed. */
rtx
legitimize_pic_address (rtx orig, machine_mode mode, rtx reg, rtx pic_reg,
bool compute_now)
legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
{
if (GET_CODE (orig) == SYMBOL_REF
|| GET_CODE (orig) == LABEL_REF)
@ -7491,7 +7469,7 @@ legitimize_pic_address (rtx orig, machine_mode mode, rtx reg, rtx pic_reg,
rtx mem;
/* If this function doesn't have a pic register, create one now. */
require_pic_register (pic_reg, compute_now);
require_pic_register ();
pat = gen_calculate_pic_address (reg, cfun->machine->pic_reg, orig);
@ -7542,11 +7520,9 @@ legitimize_pic_address (rtx orig, machine_mode mode, rtx reg, rtx pic_reg,
gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg,
pic_reg, compute_now);
base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
base == reg ? 0 : reg, pic_reg,
compute_now);
base == reg ? 0 : reg);
if (CONST_INT_P (offset))
{
@ -8731,8 +8707,7 @@ arm_legitimize_address (rtx x, rtx orig_x, machine_mode mode)
{
/* We need to find and carefully transform any SYMBOL and LABEL
references; so go back to the original address expression. */
rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX, NULL_RTX,
false /*compute_now*/);
rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
if (new_x != orig_x)
x = new_x;
@ -8800,8 +8775,7 @@ thumb_legitimize_address (rtx x, rtx orig_x, machine_mode mode)
{
/* We need to find and carefully transform any SYMBOL and LABEL
references; so go back to the original address expression. */
rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX, NULL_RTX,
false /*compute_now*/);
rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
if (new_x != orig_x)
x = new_x;
@ -18085,7 +18059,7 @@ arm_emit_call_insn (rtx pat, rtx addr, bool sibcall)
? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
: !SYMBOL_REF_LOCAL_P (addr)))
{
require_pic_register (NULL_RTX, false /*compute_now*/);
require_pic_register ();
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), cfun->machine->pic_reg);
}

View File

@ -6021,8 +6021,7 @@
operands[1] = legitimize_pic_address (operands[1], SImode,
(!can_create_pseudo_p ()
? operands[0]
: NULL_RTX), NULL_RTX,
false /*compute_now*/);
: 0));
}
"
)
@ -8635,95 +8634,6 @@
(set_attr "conds" "clob")]
)
;; Named patterns for stack smashing protection.
(define_insn_and_split "stack_protect_combined_set"
[(set (match_operand:SI 0 "memory_operand" "=m")
(unspec:SI [(match_operand:SI 1 "memory_operand" "X")]
UNSPEC_SP_SET))
(match_scratch:SI 2 "=r")
(match_scratch:SI 3 "=r")]
""
"#"
"reload_completed"
[(parallel [(set (match_dup 0) (unspec:SI [(mem:SI (match_dup 2))]
UNSPEC_SP_SET))
(clobber (match_dup 2))])]
"
{
rtx addr = XEXP (operands[1], 0);
if (flag_pic)
{
/* Forces recomputing of GOT base now. */
operands[1] = legitimize_pic_address (addr, SImode, operands[2],
operands[3], true /*compute_now*/);
}
else
{
if (!address_operand (addr, SImode))
operands[1] = force_const_mem (SImode, addr);
emit_move_insn (operands[2], operands[1]);
}
}"
)
(define_insn "stack_protect_set"
[(set (match_operand:SI 0 "memory_operand" "=m")
(unspec:SI [(mem:SI (match_operand:SI 1 "register_operand" "r"))]
UNSPEC_SP_SET))
(clobber (match_dup 1))]
""
"ldr\\t%1, [%1]\;str\\t%1, %0\;mov\t%1,0"
[(set_attr "length" "12")
(set_attr "type" "multiple")])
(define_insn_and_split "stack_protect_combined_test"
[(set (pc)
(if_then_else
(eq (match_operand:SI 0 "memory_operand" "m")
(unspec:SI [(match_operand:SI 1 "memory_operand" "X")]
UNSPEC_SP_TEST))
(label_ref (match_operand 2))
(pc)))
(match_scratch:SI 3 "=r")
(match_scratch:SI 4 "=r")]
""
"#"
"reload_completed"
[(const_int 0)]
{
rtx eq, addr;
addr = XEXP (operands[1], 0);
if (flag_pic)
{
/* Forces recomputing of GOT base now. */
operands[1] = legitimize_pic_address (addr, SImode, operands[3],
operands[4],
true /*compute_now*/);
}
else
{
if (!address_operand (addr, SImode))
operands[1] = force_const_mem (SImode, addr);
emit_move_insn (operands[3], operands[1]);
}
emit_insn (gen_stack_protect_test (operands[4], operands[0], operands[3]));
eq = gen_rtx_EQ (VOIDmode, operands[4], const0_rtx);
emit_jump_insn (gen_cbranchsi4 (eq, operands[4], const0_rtx, operands[2]));
DONE;
})
(define_insn "stack_protect_test"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand:SI 1 "memory_operand" "m")
(mem:SI (match_operand:SI 2 "register_operand" "r"))]
UNSPEC_SP_TEST))
(clobber (match_dup 2))]
""
"ldr\t%0, [%2]\;ldr\t%2, %1\;eor\t%0, %2, %0"
[(set_attr "length" "12")
(set_attr "type" "multiple")])
(define_expand "casesi"
[(match_operand:SI 0 "s_register_operand" "") ; index to jump on
(match_operand:SI 1 "const_int_operand" "") ; lower bound

View File

@ -86,9 +86,6 @@
UNSPEC_PROBE_STACK ; Probe stack memory reference
UNSPEC_NONSECURE_MEM ; Represent non-secure memory in ARMv8-M with
; security extension
UNSPEC_SP_SET ; Represent the setting of stack protector's canary
UNSPEC_SP_TEST ; Represent the testing of stack protector's canary
; against the guard.
])
(define_c_enum "unspec" [

View File

@ -7388,61 +7388,22 @@ builtins.
The get/set patterns have a single output/input operand respectively,
with @var{mode} intended to be @code{Pmode}.
@cindex @code{stack_protect_combined_set} instruction pattern
@item @samp{stack_protect_combined_set}
This pattern, if defined, moves a @code{ptr_mode} value from an address
whose declaration RTX is given in operand 1 to the memory in operand 0
without leaving the value in a register afterward. If several
instructions are needed by the target to perform the operation (eg. to
load the address from a GOT entry then load the @code{ptr_mode} value
and finally store it), it is the backend's responsibility to ensure no
intermediate result gets spilled. This is to avoid leaking the value
some place that an attacker might use to rewrite the stack guard slot
after having clobbered it.
If this pattern is not defined, then the address declaration is
expanded first in the standard way and a @code{stack_protect_set}
pattern is then generated to move the value from that address to the
address in operand 0.
@cindex @code{stack_protect_set} instruction pattern
@item @samp{stack_protect_set}
This pattern, if defined, moves a @code{ptr_mode} value from the valid
memory location in operand 1 to the memory in operand 0 without leaving
the value in a register afterward. This is to avoid leaking the value
some place that an attacker might use to rewrite the stack guard slot
after having clobbered it.
Note: on targets where the addressing modes do not allow to load
directly from stack guard address, the address is expanded in a standard
way first which could cause some spills.
This pattern, if defined, moves a @code{ptr_mode} value from the memory
in operand 1 to the memory in operand 0 without leaving the value in
a register afterward. This is to avoid leaking the value some place
that an attacker might use to rewrite the stack guard slot after
having clobbered it.
If this pattern is not defined, then a plain move pattern is generated.
@cindex @code{stack_protect_combined_test} instruction pattern
@item @samp{stack_protect_combined_test}
This pattern, if defined, compares a @code{ptr_mode} value from an
address whose declaration RTX is given in operand 1 with the memory in
operand 0 without leaving the value in a register afterward and
branches to operand 2 if the values were equal. If several
instructions are needed by the target to perform the operation (eg. to
load the address from a GOT entry then load the @code{ptr_mode} value
and finally store it), it is the backend's responsibility to ensure no
intermediate result gets spilled. This is to avoid leaking the value
some place that an attacker might use to rewrite the stack guard slot
after having clobbered it.
If this pattern is not defined, then the address declaration is
expanded first in the standard way and a @code{stack_protect_test}
pattern is then generated to compare the value from that address to the
value at the memory in operand 0.
@cindex @code{stack_protect_test} instruction pattern
@item @samp{stack_protect_test}
This pattern, if defined, compares a @code{ptr_mode} value from the
valid memory location in operand 1 with the memory in operand 0 without
leaving the value in a register afterward and branches to operand 2 if
the values were equal.
memory in operand 1 with the memory in operand 0 without leaving the
value in a register afterward and branches to operand 2 if the values
were equal.
If this pattern is not defined, then a plain compare pattern and
conditional branch pattern is used.

View File

@ -4893,33 +4893,20 @@ stack_protect_epilogue (void)
rtx_code_label *label = gen_label_rtx ();
rtx x, y;
rtx_insn *seq;
struct expand_operand ops[3];
x = expand_normal (crtl->stack_protect_guard);
create_fixed_operand (&ops[0], x);
create_fixed_operand (&ops[1], DECL_RTL (guard_decl));
create_fixed_operand (&ops[2], label);
/* Allow the target to compute address of Y and compare it with X without
leaking Y into a register. This combined address + compare pattern allows
the target to prevent spilling of any intermediate results by splitting
it after register allocator. */
if (!maybe_expand_jump_insn (targetm.code_for_stack_protect_combined_test,
3, ops))
{
if (guard_decl)
y = expand_normal (guard_decl);
else
y = const0_rtx;
if (guard_decl)
y = expand_normal (guard_decl);
else
y = const0_rtx;
/* Allow the target to compare Y with X without leaking either into
a register. */
if (targetm.have_stack_protect_test ()
&& ((seq = targetm.gen_stack_protect_test (x, y, label))
!= NULL_RTX))
emit_insn (seq);
else
emit_cmp_and_jump_insns (x, y, EQ, NULL_RTX, ptr_mode, 1, label);
}
/* Allow the target to compare Y with X without leaking either into
a register. */
if (targetm.have_stack_protect_test ()
&& ((seq = targetm.gen_stack_protect_test (x, y, label)) != NULL_RTX))
emit_insn (seq);
else
emit_cmp_and_jump_insns (x, y, EQ, NULL_RTX, ptr_mode, 1, label);
/* The noreturn predictor has been moved to the tree level. The rtl-level
predictors estimate this branch about 20%, which isn't enough to get

View File

@ -96,9 +96,7 @@ DEF_TARGET_INSN (sibcall_value, (rtx x0, rtx x1, rtx opt2, rtx opt3,
DEF_TARGET_INSN (simple_return, (void))
DEF_TARGET_INSN (split_stack_prologue, (void))
DEF_TARGET_INSN (split_stack_space_check, (rtx x0, rtx x1))
DEF_TARGET_INSN (stack_protect_combined_set, (rtx x0, rtx x1))
DEF_TARGET_INSN (stack_protect_set, (rtx x0, rtx x1))
DEF_TARGET_INSN (stack_protect_combined_test, (rtx x0, rtx x1, rtx x2))
DEF_TARGET_INSN (stack_protect_test, (rtx x0, rtx x1, rtx x2))
DEF_TARGET_INSN (store_multiple, (rtx x0, rtx x1, rtx x2))
DEF_TARGET_INSN (tablejump, (rtx x0, rtx x1))

View File

@ -15,11 +15,6 @@
PR target/86014
* gcc.target/aarch64/ldp_stp_13.c: New test.
2018-08-02 Thomas Preud'homme <thomas.preudhomme@linaro.org>
PR target/85434
* gcc.target/arm/pr85434.c: New test.
2018-08-01 Martin Sebor <msebor@redhat.com>
PR tree-optimization/86650

View File

@ -1,200 +0,0 @@
/* { dg-do compile } */
/* { dg-require-effective-target fstack_protector }*/
/* { dg-require-effective-target fpic }*/
/* { dg-additional-options "-Os -fpic -fstack-protector-strong" } */
#include <stddef.h>
#include <stdint.h>
static const unsigned char base64_enc_map[64] =
{
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', '+', '/'
};
#define BASE64_SIZE_T_MAX ( (size_t) -1 ) /* SIZE_T_MAX is not standard */
void doSmth(void *x);
#include <string.h>
void check(int n) {
if (!(n % 2 && n % 3 && n % 5)) {
__asm__ ( "add r8, r8, #1;" );
}
}
uint32_t test(
uint32_t a1,
uint32_t a2,
size_t a3,
size_t a4,
size_t a5,
size_t a6)
{
uint32_t nResult = 0;
uint8_t* h = 0L;
uint8_t X[128];
uint8_t mac[64];
size_t len;
doSmth(&a1);
doSmth(&a2);
doSmth(&a3);
doSmth(&a4);
doSmth(&a5);
doSmth(&a6);
if (a1 && a2 && a3 && a4 && a5 && a6) {
nResult = 1;
h = (void*)X;
len = sizeof(X);
memset(X, a2, len);
len -= 64;
memcpy(mac ,X, len);
*(h + len) = a6;
{
unsigned char *dst = X;
size_t dlen = a3;
size_t *olen = &a6;
const unsigned char *src = mac;
size_t slen = a4;
size_t i, n;
int C1, C2, C3;
unsigned char *p;
if( slen == 0 )
{
*olen = 0;
return( 0 );
}
n = slen / 3 + ( slen % 3 != 0 );
if( n > ( BASE64_SIZE_T_MAX - 1 ) / 4 )
{
*olen = BASE64_SIZE_T_MAX;
return( 0 );
}
n *= 4;
if( ( dlen < n + 1 ) || ( NULL == dst ) )
{
*olen = n + 1;
return( 0 );
}
n = ( slen / 3 ) * 3;
for( i = 0, p = dst; i < n; i += 3 )
{
C1 = *src++;
C2 = *src++;
C3 = *src++;
check(i);
*p++ = base64_enc_map[(C1 >> 2) & 0x3F];
*p++ = base64_enc_map[(((C1 & 3) << 4) + (C2 >> 4)) & 0x3F];
*p++ = base64_enc_map[(((C2 & 15) << 2) + (C3 >> 6)) & 0x3F];
*p++ = base64_enc_map[C3 & 0x3F];
}
if( i < slen )
{
C1 = *src++;
C2 = ( ( i + 1 ) < slen ) ? *src++ : 0;
*p++ = base64_enc_map[(C1 >> 2) & 0x3F];
*p++ = base64_enc_map[(((C1 & 3) << 4) + (C2 >> 4)) & 0x3F];
if( ( i + 1 ) < slen )
*p++ = base64_enc_map[((C2 & 15) << 2) & 0x3F];
else *p++ = '=';
*p++ = '=';
}
*olen = p - dst;
*p = 0;
}
__asm__ ("mov r8, %0;" : "=r" ( nResult ));
}
else
{
nResult = 2;
}
doSmth(X);
doSmth(mac);
return nResult;
}
/* The pattern below catches sequences of instructions that were generated
for ARM and Thumb-2 before the fix for this PR. They are of the form:
ldr rX, <offset from sp or fp>
<optional non ldr instructions>
ldr rY, <offset from sp or fp>
ldr rZ, [rX]
<optional non ldr instructions>
cmp rY, rZ
<optional non cmp instructions>
bl __stack_chk_fail
Ideally the optional block would check for the various rX, rY and rZ
registers not being set but this is not possible due to back references
being illegal in lookahead expression in Tcl, thus preventing to use the
only construct that allow to negate a regexp from using the backreferences
to those registers. Instead we go for the heuristic of allowing non ldr/cmp
instructions with the assumptions that (i) those are not part of the stack
protector sequences and (ii) they would only be scheduled here if they don't
conflict with registers used by stack protector.
Note on the regexp logic:
Allowing non X instructions (where X is ldr or cmp) is done by looking for
some non newline spaces, followed by something which is not X, followed by
an alphanumeric character followed by anything but a newline and ended by a
newline the whole thing an undetermined number of times. The alphanumeric
character is there to force the match of the negative lookahead for X to
only happen after all the initial spaces and thus to check the mnemonic.
This prevents it to match one of the initial space. */
/* { dg-final { scan-assembler-not {ldr[ \t]+([^,]+), \[(?:sp|fp)[^]]*\](?:\n[ \t]+(?!ldr)\w[^\n]*)*\n[ \t]+ldr[ \t]+([^,]+), \[(?:sp|fp)[^]]*\]\n[ \t]+ldr[ \t]+([^,]+), \[\1\](?:\n[ \t]+(?!ldr)\w[^\n]*)*\n[ \t]+cmp[ \t]+\2, \3(?:\n[ \t]+(?!cmp)\w[^\n]*)*\n[ \t]+bl[ \t]+__stack_chk_fail} } } */
/* Likewise for Thumb-1 sequences of instructions prior to the fix for this PR
which had the form:
ldr rS, <offset from sp or fp>
<optional non ldr instructions>
ldr rT, <PC relative offset>
<optional non ldr instructions>
ldr rX, [rS, rT]
<optional non ldr instructions>
ldr rY, <offset from sp or fp>
ldr rZ, [rX]
<optional non ldr instructions>
cmp rY, rZ
<optional non cmp instructions>
bl __stack_chk_fail
Note on the regexp logic:
PC relative offset is checked by looking for a source operand that does not
contain [ or ]. */
/* { dg-final { scan-assembler-not {ldr[ \t]+([^,]+), \[(?:sp|fp)[^]]*\](?:\n[ \t]+(?!ldr)\w[^\n]*)*\n[ \t]+ldr[ \t]+([^,]+), [^][\n]*(?:\n[ \t]+(?!ldr)\w[^\n]*)*\n[ \t]+ldr[ \t]+([^,]+), \[\1, \2\](?:\n[ \t]+(?!ldr)\w[^\n]*)*\n[ \t]+ldr[ \t]+([^,]+), \[(?:sp|fp)[^]]*\]\n[ \t]+ldr[ \t]+([^,]+), \[\3\](?:\n[ \t]+(?!ldr)\w[^\n]*)*\n[ \t]+cmp[ \t]+\4, \5(?:\n[ \t]+(?!cmp)\w[^\n]*)*\n[ \t]+bl[ \t]+__stack_chk_fail} } } */