Minor formatting changes.

Add definitions of ASM_SPEC and SUBTARGET_EXTRA_ASM_SPEC to arm.h if not
already defined.

From-SVN: r32810
This commit is contained in:
Nick Clifton 2000-03-29 19:15:36 +00:00 committed by Nick Clifton
parent 0194e877a3
commit 6354dc9bf8
3 changed files with 145 additions and 141 deletions

View File

@ -1,3 +1,10 @@
2000-03-29 Nick Clifton <nickc@cygnus.com>
* config/arm/arm.c: Minor formatting changes/
* config/arm/arm.h (SUBTARGET_EXTRA_ASM_SPEC): Define if not
already defined.
(ASM_SPEC): Define if not already defined.
2000-03-29 Zack Weinberg <zack@wolery.cumb.org>
* cppfiles.c (cpp_read_file): Don't pass zero-length string to

View File

@ -64,7 +64,7 @@ static int eliminate_lr2ip PARAMS ((rtx *));
static char * shift_op PARAMS ((rtx, HOST_WIDE_INT *));
static int pattern_really_clobbers_lr PARAMS ((rtx));
static int function_really_clobbers_lr PARAMS ((rtx));
static rtx emit_multi_reg_push PARAMS ((int));
static rtx emit_multi_reg_push PARAMS ((int));
static rtx emit_sfm PARAMS ((int, int));
static enum arm_cond_code get_arm_condition_code PARAMS ((rtx));
static int const_ok_for_op PARAMS ((HOST_WIDE_INT, enum rtx_code));
@ -76,23 +76,23 @@ static int max_insns_skipped = 5;
extern FILE * asm_out_file;
/* True if we are currently building a constant table. */
/* True if we are currently building a constant table. */
int making_const_table;
/* Define the information needed to generate branch insns. This is
stored from the compare operation. */
stored from the compare operation. */
rtx arm_compare_op0, arm_compare_op1;
/* What type of floating point are we tuning for? */
/* What type of floating point are we tuning for? */
enum floating_point_type arm_fpu;
/* What type of floating point instructions are available? */
/* What type of floating point instructions are available? */
enum floating_point_type arm_fpu_arch;
/* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
/* What program mode is the cpu running in? 26-bit mode or 32-bit mode. */
enum prog_mode_type arm_prgmode;
/* Set by the -mfp=... option */
/* Set by the -mfp=... option. */
const char * target_fp_name = NULL;
/* Used to parse -mstructure_size_boundary command line option. */
@ -125,10 +125,10 @@ static int tune_flags = 0;
/* Nonzero if this is an "M" variant of the processor. */
int arm_fast_multiply = 0;
/* Nonzero if this chip supports the ARM Architecture 4 extensions */
/* Nonzero if this chip supports the ARM Architecture 4 extensions. */
int arm_arch4 = 0;
/* Nonzero if this chip supports the ARM Architecture 5 extensions */
/* Nonzero if this chip supports the ARM Architecture 5 extensions. */
int arm_arch5 = 0;
/* Nonzero if this chip can benefit from load scheduling. */
@ -153,11 +153,11 @@ const char * arm_pic_register_string = NULL;
int arm_pic_register = 9;
/* Set to one if we think that lr is only saved because of subroutine calls,
but all of these can be `put after' return insns */
but all of these can be `put after' return insns. */
int lr_save_eliminated;
/* Set to 1 when a return insn is output, this means that the epilogue
is not needed. */
is not needed. */
static int return_used_this_function;
/* Set to 1 after arm_reorg has started. Reset to start at the start of
@ -184,7 +184,7 @@ static enum arm_cond_code get_arm_condition_code ();
#define streq(string1, string2) (strcmp (string1, string2) == 0)
/* Initialization code */
/* Initialization code. */
struct processors
{
@ -308,7 +308,7 @@ arm_override_options ()
/* If we have been given an architecture and a processor
make sure that they are compatible. We only generate
a warning though, and we prefer the CPU over the
architecture. */
architecture. */
if (insn_flags != 0 && (insn_flags ^ sel->flags))
warning ("switch -mcpu=%s conflicts with -march= switch",
ptr->string);
@ -604,8 +604,7 @@ arm_add_gc_roots ()
}
/* Return 1 if it is possible to return using a single instruction */
/* Return 1 if it is possible to return using a single instruction. */
int
use_return_insn (iscond)
int iscond;
@ -636,7 +635,7 @@ use_return_insn (iscond)
}
/* Can't be done if any of the FPU regs are pushed, since this also
requires an insn */
requires an insn. */
for (regno = 16; regno < 24; regno++)
if (regs_ever_live[regno] && ! call_used_regs[regno])
return 0;
@ -680,7 +679,7 @@ const_ok_for_arm (i)
return FALSE;
}
/* Return true if I is a valid constant for the operation CODE. */
/* Return true if I is a valid constant for the operation CODE. */
static int
const_ok_for_op (i, code)
HOST_WIDE_INT i;
@ -895,7 +894,7 @@ arm_gen_constant (code, mode, val, target, source, subtargets, generate)
abort ();
}
/* If we can do it in one insn get out quickly */
/* If we can do it in one insn get out quickly. */
if (const_ok_for_arm (val)
|| (can_negate_initial && const_ok_for_arm (-val))
|| (can_invert && const_ok_for_arm (~val)))
@ -908,10 +907,8 @@ arm_gen_constant (code, mode, val, target, source, subtargets, generate)
return 1;
}
/* Calculate a few attributes that may be useful for specific
optimizations. */
optimizations. */
for (i = 31; i >= 0; i--)
{
if ((remainder & (1 << i)) == 0)
@ -993,7 +990,7 @@ arm_gen_constant (code, mode, val, target, source, subtargets, generate)
temp1 = remainder & 0xffff0000UL;
temp2 = remainder & 0x0000ffff;
/* Overlaps outside this range are best done using other methods. */
/* Overlaps outside this range are best done using other methods. */
for (i = 9; i < 24; i++)
{
if ((((temp2 | (temp2 << i)) & 0xffffffffUL) == remainder)
@ -1016,7 +1013,7 @@ arm_gen_constant (code, mode, val, target, source, subtargets, generate)
}
}
/* Don't duplicate cases already considered. */
/* Don't duplicate cases already considered. */
for (i = 17; i < 24; i++)
{
if (((temp1 | (temp1 >> i)) == remainder)
@ -1227,7 +1224,7 @@ arm_gen_constant (code, mode, val, target, source, subtargets, generate)
We start by looking for the largest block of zeros that are aligned on
a 2-bit boundary, we then fill up the temps, wrapping around to the
top of the word when we drop off the bottom.
In the worst case this code should produce no more than four insns. */
In the worst case this code should produce no more than four insns. */
{
int best_start = 0;
int best_consecutive_zeros = 0;
@ -1254,7 +1251,7 @@ arm_gen_constant (code, mode, val, target, source, subtargets, generate)
/* Now start emitting the insns, starting with the one with the highest
bit set: we do this so that the smallest number will be emitted last;
this is more likely to be combinable with addressing insns. */
this is more likely to be combinable with addressing insns. */
i = best_start;
do
{
@ -1466,8 +1463,7 @@ arm_return_in_memory (type)
return 0;
}
/* XXX Not sure what should be done for
other aggregates so put them in memory. */
/* Put other aggregates in memory. */
return 1;
}
@ -1648,8 +1644,8 @@ arm_encode_call_attribute (decl, flag)
char flag;
{
const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
int len = strlen (str);
char * newstr;
int len = strlen (str);
char * newstr;
if (TREE_CODE (decl) != FUNCTION_DECL)
return;
@ -1809,8 +1805,7 @@ legitimize_pic_address (orig, mode, reg)
#ifdef AOF_ASSEMBLER
/* The AOF assembler can generate relocations for these directly, and
understands that the PIC register has to be added into the offset.
*/
understands that the PIC register has to be added into the offset. */
insn = emit_insn (gen_pic_load_addr_based (reg, orig));
#else
if (subregs)
@ -2103,7 +2098,7 @@ arm_rtx_costs (x, code)
case MULT:
/* There is no point basing this on the tuning, since it is always the
fast variant if it exists at all */
fast variant if it exists at all. */
if (arm_fast_multiply && mode == DImode
&& (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
&& (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
@ -2120,7 +2115,8 @@ arm_rtx_costs (x, code)
& (unsigned HOST_WIDE_INT) 0xffffffffUL);
int add_cost = const_ok_for_arm (i) ? 4 : 8;
int j;
/* Tune as appropriate */
/* Tune as appropriate. */
int booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2);
for (j = 0; i && j < 32; j += booth_unit_size)
@ -2206,7 +2202,7 @@ arm_adjust_cost (insn, link, dep, cost)
{
rtx i_pat, d_pat;
/* XXX This is not strictly true for the FPA. */
/* XXX This is not strictly true for the FPA. */
if (REG_NOTE_KIND(link) == REG_DEP_ANTI
|| REG_NOTE_KIND(link) == REG_DEP_OUTPUT)
return 0;
@ -2219,7 +2215,7 @@ arm_adjust_cost (insn, link, dep, cost)
/* This is a load after a store, there is no conflict if the load reads
from a cached area. Assume that loads from the stack, and from the
constant pool are cached, and that others will miss. This is a
hack. */
hack. */
if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat), 0))
|| reg_mentioned_p (stack_pointer_rtx, XEXP (SET_SRC (i_pat), 0))
@ -2232,7 +2228,7 @@ arm_adjust_cost (insn, link, dep, cost)
return cost;
}
/* This code has been fixed for cross compilation. */
/* This code has been fixed for cross compilation. */
static int fpa_consts_inited = 0;
@ -2259,7 +2255,7 @@ init_fpa_table ()
fpa_consts_inited = 1;
}
/* Return TRUE if rtx X is a valid immediate FPU constant. */
/* Return TRUE if rtx X is a valid immediate FPU constant. */
int
const_double_rtx_ok_for_fpu (x)
@ -2282,7 +2278,7 @@ const_double_rtx_ok_for_fpu (x)
return 0;
}
/* Return TRUE if rtx X is a valid immediate FPU constant. */
/* Return TRUE if rtx X is a valid immediate FPU constant. */
int
neg_const_double_rtx_ok_for_fpu (x)
@ -2384,20 +2380,20 @@ bad_signed_byte_operand (op, mode)
op = XEXP (op, 0);
/* A sum of anything more complex than reg + reg or reg + const is bad */
/* A sum of anything more complex than reg + reg or reg + const is bad. */
if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
&& (! s_register_operand (XEXP (op, 0), VOIDmode)
|| (! s_register_operand (XEXP (op, 1), VOIDmode)
&& GET_CODE (XEXP (op, 1)) != CONST_INT)))
return 1;
/* Big constants are also bad */
/* Big constants are also bad. */
if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
&& (INTVAL (XEXP (op, 1)) > 0xff
|| -INTVAL (XEXP (op, 1)) > 0xff))
return 1;
/* Everything else is good, or can will automatically be made so. */
/* Everything else is good, or can will automatically be made so. */
return 0;
}
@ -2634,7 +2630,7 @@ soft_df_operand (op, mode)
}
}
/* Return TRUE for valid index operands. */
/* Return TRUE for valid index operands. */
int
index_operand (op, mode)
@ -2648,7 +2644,7 @@ index_operand (op, mode)
/* Return TRUE for valid shifts by a constant. This also accepts any
power of two on the (somewhat overly relaxed) assumption that the
shift operator in this case was a mult. */
shift operator in this case was a mult. */
int
const_shift_operand (op, mode)
@ -2696,7 +2692,7 @@ logical_binary_operator (x, mode)
}
}
/* Return TRUE for shift operators. */
/* Return TRUE for shift operators. */
int
shift_operator (x, mode)
@ -2717,15 +2713,16 @@ shift_operator (x, mode)
}
}
int equality_operator (x, mode)
/* Return TRUE if x is EQ or NE. */
int
equality_operator (x, mode)
rtx x;
enum machine_mode mode ATTRIBUTE_UNUSED;
{
return GET_CODE (x) == EQ || GET_CODE (x) == NE;
}
/* Return TRUE for SMIN SMAX UMIN UMAX operators. */
/* Return TRUE for SMIN SMAX UMIN UMAX operators. */
int
minmax_operator (x, mode)
rtx x;
@ -2739,11 +2736,8 @@ minmax_operator (x, mode)
return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
}
/* return TRUE if x is EQ or NE */
/* Return TRUE if this is the condition code register, if we aren't given
a mode, accept any class CCmode register */
a mode, accept any class CCmode register. */
int
cc_register (x, mode)
rtx x;
@ -2765,7 +2759,6 @@ cc_register (x, mode)
/* Return TRUE if this is the condition code register, if we aren't given
a mode, accept any class CCmode register which indicates a dominance
expression. */
int
dominant_cc_register (x, mode)
rtx x;
@ -2867,8 +2860,7 @@ minmax_code (x)
abort ();
}
/* Return 1 if memory locations are adjacent */
/* Return 1 if memory locations are adjacent. */
int
adjacent_mem_locations (a, b)
rtx a, b;
@ -2903,7 +2895,7 @@ adjacent_mem_locations (a, b)
}
/* Return 1 if OP is a load multiple operation. It is known to be
parallel and the first section will be tested. */
parallel and the first section will be tested. */
int
load_multiple_operation (op, mode)
@ -2920,13 +2912,13 @@ load_multiple_operation (op, mode)
|| GET_CODE (XVECEXP (op, 0, 0)) != SET)
return 0;
/* Check to see if this might be a write-back */
/* Check to see if this might be a write-back. */
if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
{
i++;
base = 1;
/* Now check it more carefully */
/* Now check it more carefully. */
if (GET_CODE (SET_DEST (elt)) != REG
|| GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
|| REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
@ -2958,7 +2950,7 @@ load_multiple_operation (op, mode)
if (GET_CODE (elt) != SET
|| GET_CODE (SET_DEST (elt)) != REG
|| GET_MODE (SET_DEST (elt)) != SImode
|| REGNO (SET_DEST (elt)) != dest_regno + i - base
|| REGNO (SET_DEST (elt)) != (unsigned int)(dest_regno + i - base)
|| GET_CODE (SET_SRC (elt)) != MEM
|| GET_MODE (SET_SRC (elt)) != SImode
|| GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
@ -2972,8 +2964,7 @@ load_multiple_operation (op, mode)
}
/* Return 1 if OP is a store multiple operation. It is known to be
parallel and the first section will be tested. */
parallel and the first section will be tested. */
int
store_multiple_operation (op, mode)
rtx op;
@ -2989,13 +2980,13 @@ store_multiple_operation (op, mode)
|| GET_CODE (XVECEXP (op, 0, 0)) != SET)
return 0;
/* Check to see if this might be a write-back */
/* Check to see if this might be a write-back. */
if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
{
i++;
base = 1;
/* Now check it more carefully */
/* Now check it more carefully. */
if (GET_CODE (SET_DEST (elt)) != REG
|| GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
|| REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
@ -3027,7 +3018,7 @@ store_multiple_operation (op, mode)
if (GET_CODE (elt) != SET
|| GET_CODE (SET_SRC (elt)) != REG
|| GET_MODE (SET_SRC (elt)) != SImode
|| REGNO (SET_SRC (elt)) != src_regno + i - base
|| REGNO (SET_SRC (elt)) != (unsigned int)(src_regno + i - base)
|| GET_CODE (SET_DEST (elt)) != MEM
|| GET_MODE (SET_DEST (elt)) != SImode
|| GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
@ -3103,7 +3094,7 @@ load_multiple_sequence (operands, nops, regs, base, load_offset)
}
else
{
if (base_reg != REGNO (reg))
if (base_reg != (int) REGNO (reg))
/* Not addressed from the same base register. */
return 0;
@ -3338,7 +3329,7 @@ store_multiple_sequence (operands, nops, regs, base, load_offset)
}
else
{
if (base_reg != REGNO (reg))
if (base_reg != (int) REGNO (reg))
/* Not addressed from the same base register. */
return 0;
@ -3513,8 +3504,7 @@ arm_naked_function_p (func)
return a != NULL_TREE;
}
/* Routines for use in generating RTL */
/* Routines for use in generating RTL. */
rtx
arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p,
in_struct_p, scalar_p)
@ -3729,7 +3719,7 @@ arm_gen_movstrqi (operands)
if (part_bytes_reg == NULL)
abort ();
/* The bytes we want are in the top end of the word */
/* The bytes we want are in the top end of the word. */
emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
GEN_INT (8 * (4 - last_bytes))));
part_bytes_reg = tmp;
@ -4040,7 +4030,7 @@ arm_reload_in_hi (operands)
base = find_replacement (&XEXP (ref, 0));
}
else
/* The slot is out of range, or was dressed up in a SUBREG */
/* The slot is out of range, or was dressed up in a SUBREG. */
base = reg_equiv_address[REGNO (ref)];
}
else
@ -4057,13 +4047,13 @@ arm_reload_in_hi (operands)
}
else if (GET_CODE (base) == PLUS)
{
/* The addend must be CONST_INT, or we would have dealt with it above */
/* The addend must be CONST_INT, or we would have dealt with it above. */
HOST_WIDE_INT hi, lo;
offset += INTVAL (XEXP (base, 1));
base = XEXP (base, 0);
/* Rework the address into a legal sequence of insns */
/* Rework the address into a legal sequence of insns. */
/* Valid range for lo is -4095 -> 4095 */
lo = (offset >= 0
? (offset & 0xfff)
@ -4087,7 +4077,7 @@ arm_reload_in_hi (operands)
rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
/* Get the base address; addsi3 knows how to handle constants
that require more than one insn */
that require more than one insn. */
emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
base = base_plus;
offset = lo;
@ -4158,7 +4148,7 @@ arm_reload_out_hi (operands)
base = find_replacement (&XEXP (ref, 0));
}
else
/* The slot is out of range, or was dressed up in a SUBREG */
/* The slot is out of range, or was dressed up in a SUBREG. */
base = reg_equiv_address[REGNO (ref)];
}
else
@ -4203,13 +4193,13 @@ arm_reload_out_hi (operands)
}
else if (GET_CODE (base) == PLUS)
{
/* The addend must be CONST_INT, or we would have dealt with it above */
/* The addend must be CONST_INT, or we would have dealt with it above. */
HOST_WIDE_INT hi, lo;
offset += INTVAL (XEXP (base, 1));
base = XEXP (base, 0);
/* Rework the address into a legal sequence of insns */
/* Rework the address into a legal sequence of insns. */
/* Valid range for lo is -4095 -> 4095 */
lo = (offset >= 0
? (offset & 0xfff)
@ -4259,7 +4249,7 @@ arm_reload_out_hi (operands)
}
/* Get the base address; addsi3 knows how to handle constants
that require more than one insn */
that require more than one insn. */
emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
base = base_plus;
offset = lo;
@ -4342,9 +4332,9 @@ arm_reload_out_hi (operands)
been inserted, the insns are then modified to reference the
relevant entry in the pool.
Possible enhancements to the alogorithm (not implemented) are:
Possible enhancements to the algorithm (not implemented) are:
1) ARM instructions (but not thumb) can use negative offsets, so we
1) ARM instructions (but not Thumb) can use negative offsets, so we
could reference back to a previous pool rather than forwards to a
new one. For large functions this may reduce the number of pools
required.
@ -4352,9 +4342,7 @@ arm_reload_out_hi (operands)
2) For some processors and object formats, there may be benefit in
aligning the pools to the start of cache lines; this alignment
would need to be taken into account when calculating addressability
of a pool.
*/
of a pool. */
typedef struct
{
@ -4366,7 +4354,6 @@ typedef struct
/* The maximum number of constants that can fit into one pool, since
the pc relative range is 0...4092 bytes and constants are at least 4
bytes long. */
#define MAX_MINIPOOL_SIZE (4092/4)
static minipool_node minipool_vector[MAX_MINIPOOL_SIZE];
static int minipool_size;
@ -4402,7 +4389,7 @@ add_minipool_constant (x, mode)
}
}
/* Need a new one */
/* Need a new one. */
minipool_vector[minipool_size].next_offset = GET_MODE_SIZE (mode);
offset = 0;
if (minipool_size == 0)
@ -4417,7 +4404,7 @@ add_minipool_constant (x, mode)
return offset;
}
/* Output the literal table */
/* Output the literal table. */
static void
dump_minipool (scan)
rtx scan;
@ -4471,7 +4458,7 @@ find_barrier (from, max_count)
if (GET_CODE (from) == BARRIER)
found_barrier = from;
/* Count the length of this insn */
/* Count the length of this insn. */
if (GET_CODE (from) == JUMP_INSN
&& JUMP_LABEL (from) != 0
&& ((tmp = next_real_insn (JUMP_LABEL (from)))
@ -4587,7 +4574,7 @@ push_minipool_fix (insn, address, loc, mode, value)
if (fix->range == 0)
abort ();
/* Add it to the chain of fixes */
/* Add it to the chain of fixes. */
fix->next = NULL;
if (minipool_fix_head != NULL)
minipool_fix_tail->next = fix;
@ -4604,10 +4591,10 @@ note_invalid_constants (insn, address)
{
int opno;
/* Extract the operands of the insn */
/* Extract the operands of the insn. */
extract_insn(insn);
/* Find the alternative selected */
/* Find the alternative selected. */
if (! constrain_operands (1))
fatal_insn_not_found (insn);
@ -4616,7 +4603,7 @@ note_invalid_constants (insn, address)
for (opno = 0; opno < recog_data.n_operands; opno++)
{
/* Things we need to fix can only occur in inputs */
/* Things we need to fix can only occur in inputs. */
if (recog_data.operand_type[opno] != OP_IN)
continue;
@ -4788,7 +4775,7 @@ arm_reorg (first)
/* If the rtx is the correct value then return the string of the number.
In this way we can ensure that valid double constants are generated even
when cross compiling. */
when cross compiling. */
char *
fp_immediate_constant (x)
rtx x;
@ -4857,13 +4844,13 @@ print_multi_reg (stream, instr, reg, mask, hat)
fprintf (stream, "}%s\n", hat ? "^" : "");
}
/* Output a 'call' insn. */
/* Output a 'call' insn. */
char *
output_call (operands)
rtx * operands;
{
/* Handle calls to lr using ip (which may be clobbered in subr anyway). */
/* Handle calls to lr using ip (which may be clobbered in subr anyway). */
if (REGNO (operands[0]) == LR_REGNUM)
{
@ -4901,7 +4888,7 @@ eliminate_lr2ip (x)
}
return 0;
default:
/* Scan through the sub-elements and change any references there */
/* Scan through the sub-elements and change any references there. */
fmt = GET_RTX_FORMAT (code);
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
@ -4915,15 +4902,14 @@ eliminate_lr2ip (x)
}
}
/* Output a 'call' insn that is a reference in memory. */
/* Output a 'call' insn that is a reference in memory. */
char *
output_call_mem (operands)
rtx * operands;
{
operands[0] = copy_rtx (operands[0]); /* Be ultra careful */
/* Handle calls using lr by using ip (which may be clobbered in subr anyway).
*/
operands[0] = copy_rtx (operands[0]); /* Be ultra careful. */
/* Handle calls using lr by using ip (which may be clobbered in subr anyway). */
if (eliminate_lr2ip (&operands[0]))
output_asm_insn ("mov%?\t%|ip, %|lr", operands);
@ -4997,7 +4983,7 @@ char *
output_mov_long_double_arm_from_arm (operands)
rtx * operands;
{
/* We have to be careful here because the two might overlap */
/* We have to be careful here because the two might overlap. */
int dest_start = REGNO (operands[0]);
int src_start = REGNO (operands[1]);
rtx ops[2];
@ -5092,7 +5078,7 @@ output_move_double (operands)
if (reg1 == IP_REGNUM)
abort ();
/* Ensure the second source is not overwritten */
/* Ensure the second source is not overwritten. */
if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
output_asm_insn ("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
else
@ -5145,7 +5131,7 @@ output_move_double (operands)
operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
}
#else
/* Sign extend the intval into the high-order word */
/* Sign extend the intval into the high-order word. */
if (WORDS_BIG_ENDIAN)
{
otherops[1] = operands[1];
@ -5167,7 +5153,7 @@ output_move_double (operands)
break;
case PRE_INC:
abort (); /* Should never happen now */
abort (); /* Should never happen now. */
break;
case PRE_DEC:
@ -5179,7 +5165,7 @@ output_move_double (operands)
break;
case POST_DEC:
abort (); /* Should never happen now */
abort (); /* Should never happen now. */
break;
case LABEL_REF:
@ -5242,7 +5228,7 @@ output_move_double (operands)
}
}
else
abort (); /* Constraints should prevent this */
abort (); /* Constraints should prevent this. */
}
else if (code0 == MEM && code1 == REG)
{
@ -5256,7 +5242,7 @@ output_move_double (operands)
break;
case PRE_INC:
abort (); /* Should never happen now */
abort (); /* Should never happen now. */
break;
case PRE_DEC:
@ -5268,7 +5254,7 @@ output_move_double (operands)
break;
case POST_DEC:
abort (); /* Should never happen now */
abort (); /* Should never happen now. */
break;
case PLUS:
@ -5331,13 +5317,13 @@ output_mov_immediate (operands)
return "";
}
/* If all else fails, make it out of ORRs or BICs as appropriate. */
/* If all else fails, make it out of ORRs or BICs as appropriate. */
for (i=0; i < 32; i++)
if (n & 1 << i)
n_ones++;
if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
output_multi_immediate(operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
~n);
else
@ -5393,14 +5379,14 @@ output_multi_immediate (operands, instr1, instr2, immed_op, n)
if (n == 0)
{
operands[immed_op] = const0_rtx;
output_asm_insn (instr1, operands); /* Quick and easy output */
output_asm_insn (instr1, operands); /* Quick and easy output. */
}
else
{
int i;
char *instr = instr1;
/* Note that n is never zero here (which would give no output) */
/* Note that n is never zero here (which would give no output). */
for (i = 0; i < 32; i += 2)
{
if (n & (3 << i))
@ -5453,7 +5439,7 @@ arithmetic_instr (op, shift_first_arg)
for the operation code. The returned result should not be overwritten.
OP is the rtx code of the shift.
On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
shift. */
shift. */
static char *
shift_op (op, amountp)
@ -5528,8 +5514,7 @@ shift_op (op, amountp)
}
/* Obtain the shift from the POWER of two. */
/* Obtain the shift from the POWER of two. */
static HOST_WIDE_INT
int_log2 (power)
HOST_WIDE_INT power;
@ -5635,8 +5620,7 @@ output_ascii_pseudo_op (stream, p, len)
NOTE: This code does not check for side-effect expressions in a SET_SRC:
such a check should not be needed because these only update an existing
value within a register; the register must still be set elsewhere within
the function. */
the function. */
static int
pattern_really_clobbers_lr (x)
rtx x;
@ -5715,7 +5699,7 @@ function_really_clobbers_lr (first)
case CALL_INSN:
/* Don't yet know how to handle those calls that are not to a
SYMBOL_REF */
SYMBOL_REF. */
if (GET_CODE (PATTERN (insn)) != PARALLEL)
abort ();
@ -5734,7 +5718,7 @@ function_really_clobbers_lr (first)
return 1;
break;
default: /* Don't recognize it, be safe */
default: /* Don't recognize it, be safe. */
return 1;
}
@ -5748,11 +5732,12 @@ function_really_clobbers_lr (first)
if ((next = next_nonnote_insn (insn)) == NULL)
return 1;
/* No need to worry about lr if the call never returns */
/* No need to worry about lr if the call never returns. */
if (GET_CODE (next) == BARRIER)
break;
if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == USE
if (GET_CODE (next) == INSN
&& GET_CODE (PATTERN (next)) == USE
&& (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
&& (REGNO (SET_DEST (XVECEXP (PATTERN (insn), 0, 0)))
== REGNO (XEXP (PATTERN (next), 0))))
@ -5769,7 +5754,7 @@ function_really_clobbers_lr (first)
}
}
/* We have reached the end of the chain so lr was _not_ clobbered */
/* We have reached the end of the chain so lr was _not_ clobbered. */
return 0;
}
@ -5908,12 +5893,12 @@ output_return_instruction (operand, really_return, reverse)
Such functions never return, and many memory cycles can be saved
by not storing register values that will never be needed again.
This optimization was added to speed up context switching in a
kernel application. */
kernel application. */
int
arm_volatile_func ()
{
return (optimize > 0 && current_function_nothrow
return (optimize > 0
&& current_function_nothrow
&& TREE_THIS_VOLATILE (current_function_decl));
}
@ -5969,8 +5954,7 @@ arm_poke_function_name (stream, name)
no stack frame requirement and no live registers execpt for `lr'. If we
can guarantee that by making all function calls into tail calls and that
lr is not clobbered in any other way, then there is no need to push lr
onto the stack. */
onto the stack. */
void
output_func_prologue (f, frame_size)
FILE * f;
@ -5984,7 +5968,7 @@ output_func_prologue (f, frame_size)
int store_arg_regs = 0;
if (arm_ccfsm_state || arm_target_insn)
abort (); /* Sanity check */
abort (); /* Sanity check. */
if (arm_naked_function_p (current_function_decl))
return;
@ -6026,11 +6010,10 @@ output_func_prologue (f, frame_size)
if (live_regs_mask)
{
/* if a di mode load/store multiple is used, and the base register
/* If a di mode load/store multiple is used, and the base register
is r3, then r4 can become an ever live register without lr
doing so, in this case we need to push lr as well, or we
will fail to get a proper return. */
will fail to get a proper return. */
live_regs_mask |= 1 << LR_REGNUM;
lr_save_eliminated = 0;
@ -6050,7 +6033,7 @@ arm_output_epilogue ()
{
int reg;
int live_regs_mask = 0;
/* If we need this, then it will always be at least this much */
/* If we need this, then it will always be at least this much. */
int floats_offset = 12;
rtx operands[3];
int frame_size = get_frame_size ();
@ -6112,7 +6095,7 @@ arm_output_epilogue ()
{
floats_offset += 12;
/* We can't unstack more than four registers at once */
/* We can't unstack more than four registers at once. */
if (start_reg - reg == 3)
{
asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
@ -6224,7 +6207,7 @@ arm_output_epilogue ()
{
if (live_regs_mask || regs_ever_live[LR_REGNUM])
{
/* Restore the integer regs, and the return address into lr */
/* Restore the integer regs, and the return address into lr. */
if (! lr_save_eliminated)
live_regs_mask |= 1 << LR_REGNUM;
@ -6234,12 +6217,12 @@ arm_output_epilogue ()
if (current_function_pretend_args_size)
{
/* Unwind the pre-pushed regs */
/* Unwind the pre-pushed regs. */
operands[0] = operands[1] = stack_pointer_rtx;
operands[2] = GEN_INT (current_function_pretend_args_size);
output_add_immediate (operands);
}
/* And finally, go home */
/* And finally, go home. */
if (TARGET_INTERWORK)
asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
else if (TARGET_APCS_32)
@ -6270,7 +6253,6 @@ output_func_epilogue (frame_size)
Unfortunately, since this insn does not reflect very well the actual
semantics of the operation, we need to annotate the insn for the benefit
of DWARF2 frame unwind information. */
static rtx
emit_multi_reg_push (mask)
int mask;
@ -6459,7 +6441,6 @@ arm_expand_prologue ()
}
/* And now the floating point regs. */
if (! volatile_func)
{
if (arm_fpu_arch == FP_SOFT2)
@ -6676,7 +6657,7 @@ arm_print_operand (stream, x, code)
else if (GET_CODE (x) == CONST_DOUBLE)
fprintf (stream, "#%s", fp_immediate_constant (x));
else if (GET_CODE (x) == NEG)
abort (); /* This should never happen now. */
abort (); /* This should never happen now. */
else
{
fputc ('#', stream);
@ -6846,7 +6827,7 @@ arm_final_prescan_insn (insn)
out what the conditions are when the jump isn't taken. */
int jump_clobbers = 0;
/* If we start with a return insn, we only succeed if we find another one. */
/* If we start with a return insn, we only succeed if we find another one. */
int seeking_return = 0;
/* START_INSN will hold the insn from where we start looking. This is the
@ -7002,7 +6983,7 @@ arm_final_prescan_insn (insn)
/* Succeed if the following insn is the target label.
Otherwise fail.
If return insns are used then the last insn in a function
will be a barrier. */
will be a barrier. */
this_insn = next_nonnote_insn (this_insn);
if (this_insn && this_insn == label)
{
@ -7190,7 +7171,7 @@ arm_strip_name_encoding (const char * name)
}
#ifdef AOF_ASSEMBLER
/* Special functions only needed when producing AOF syntax assembler. */
/* Special functions only needed when producing AOF syntax assembler. */
rtx aof_pic_label = NULL_RTX;
struct pic_chain

View File

@ -250,6 +250,22 @@ Unrecognized value in TARGET_CPU_DEFAULT.
#define SUBTARGET_CPP_SPEC ""
#endif
#ifndef SUBTARGET_EXTRA_ASM_SPEC
#define SUBTARGET_EXTRA_ASM_SPEC
#endif
#ifndef ASM_SPEC
#define ASM_SPEC "\
%{mbig-endian:-EB} \
%{mcpu=*:-m%*} \
%{march=*:-m%*} \
%{mapcs-*:-mapcs-%*} \
%{matpcs:-matpcs} \
%{mapcs-float:-mfloat} \
%{msoft-float:-mno-fpu} \
%{mthumb-interwork:-mthumb-interwork} \
" SUBTARGET_EXTRA_ASM_SPEC
#endif
/* Run-time Target Specification. */
#ifndef TARGET_VERSION
@ -2100,7 +2116,7 @@ extern struct rtx_def * arm_compare_op1;
do \
{ \
if (TARGET_POKE_FUNCTION_NAME) \
arm_poke_function_name (STREAM, NAME); \
arm_poke_function_name (STREAM, (char *) NAME); \
} \
while (0)
@ -2240,7 +2256,7 @@ extern struct rtx_def * arm_compare_op1;
do \
{ \
int mi_delta = (DELTA); \
const char *mi_op = mi_delta < 0 ? "sub" : "add"; \
const char * mi_op = mi_delta < 0 ? "sub" : "add"; \
int shift = 0; \
int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) \
? 1 : 0); \