df-problems.c (df_set_note): Avoid C++ keywords.

* df-problems.c (df_set_note): Avoid C++ keywords.
	* df-scan.c (df_ref_change_reg_with_loc_1): Likewise.
	* dse.c (record_store, remove_useless_values): Likewise.
	* emit-rtl.c (gen_reg_rtx, update_reg_offset, gen_rtx_REG_offset,
	gen_reg_rtx_offset, operand_subword, change_address_1,
	change_address, adjust_address_1, offset_address,
	widen_memory_access, emit_copy_of_insn_after): Likewise.
	* explow.c (round_push, allocate_dynamic_stack_space): Likewise.
	* fwprop.c (should_replace_address, propagate_rtx_1,
	propagate_rtx, try_fwprop_subst, forward_propagate_and_simplify):
	Likewise.
	* gcse.c (cprop_jump, find_implicit_sets, bypass_block,
	gcse_emit_move_after, update_ld_motion_stores): Likewise.
	* lcm.c (compute_insert_delete, pre_edge_lcm,
	compute_rev_insert_delete, pre_edge_rev_lcm): Likewise.
	* lower-subreg.c (resolve_reg_notes): Likewise.
	* mode-switching.c (optimize_mode_switching): Likewise.

From-SVN: r137848
This commit is contained in:
Kaveh R. Ghazi 2008-07-15 17:51:00 +00:00 committed by Kaveh Ghazi
parent 32e9fa4804
commit 6056428918
11 changed files with 210 additions and 190 deletions

View File

@ -1,3 +1,23 @@
2008-07-15 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
* df-problems.c (df_set_note): Avoid C++ keywords.
* df-scan.c (df_ref_change_reg_with_loc_1): Likewise.
* dse.c (record_store, remove_useless_values): Likewise.
* emit-rtl.c (gen_reg_rtx, update_reg_offset, gen_rtx_REG_offset,
gen_reg_rtx_offset, operand_subword, change_address_1,
change_address, adjust_address_1, offset_address,
widen_memory_access, emit_copy_of_insn_after): Likewise.
* explow.c (round_push, allocate_dynamic_stack_space): Likewise.
* fwprop.c (should_replace_address, propagate_rtx_1,
propagate_rtx, try_fwprop_subst, forward_propagate_and_simplify):
Likewise.
* gcse.c (cprop_jump, find_implicit_sets, bypass_block,
gcse_emit_move_after, update_ld_motion_stores): Likewise.
* lcm.c (compute_insert_delete, pre_edge_lcm,
compute_rev_insert_delete, pre_edge_rev_lcm): Likewise.
* lower-subreg.c (resolve_reg_notes): Likewise.
* mode-switching.c (optimize_mode_switching): Likewise.
2008-07-15 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
* bt-load.c (add_btr_def, migrate_btr_def,

View File

@ -3170,24 +3170,24 @@ df_kill_notes (rtx insn, rtx *old_dead_notes, rtx *old_unused_notes)
static inline rtx
df_set_note (enum reg_note note_type, rtx insn, rtx old, rtx reg)
{
rtx this = old;
rtx curr = old;
rtx prev = NULL;
while (this)
if (XEXP (this, 0) == reg)
while (curr)
if (XEXP (curr, 0) == reg)
{
if (prev)
XEXP (prev, 1) = XEXP (this, 1);
XEXP (prev, 1) = XEXP (curr, 1);
else
old = XEXP (this, 1);
XEXP (this, 1) = REG_NOTES (insn);
REG_NOTES (insn) = this;
old = XEXP (curr, 1);
XEXP (curr, 1) = REG_NOTES (insn);
REG_NOTES (insn) = curr;
return old;
}
else
{
prev = this;
this = XEXP (this, 1);
prev = curr;
curr = XEXP (curr, 1);
}
/* Did not find the note. */

View File

@ -1887,10 +1887,10 @@ df_insn_change_bb (rtx insn, basic_block new_bb)
/* Helper function for df_ref_change_reg_with_loc. */
static void
df_ref_change_reg_with_loc_1 (struct df_reg_info *old, struct df_reg_info *new,
df_ref_change_reg_with_loc_1 (struct df_reg_info *old_df, struct df_reg_info *new_df,
int new_regno, rtx loc)
{
struct df_ref *the_ref = old->reg_chain;
struct df_ref *the_ref = old_df->reg_chain;
while (the_ref)
{
@ -1908,18 +1908,18 @@ df_ref_change_reg_with_loc_1 (struct df_reg_info *old, struct df_reg_info *new,
if (prev_ref)
prev_ref->next_reg = next_ref;
else
old->reg_chain = next_ref;
old_df->reg_chain = next_ref;
if (next_ref)
next_ref->prev_reg = prev_ref;
old->n_refs--;
old_df->n_refs--;
/* Put the ref into the new regno chain. */
the_ref->prev_reg = NULL;
the_ref->next_reg = new->reg_chain;
if (new->reg_chain)
new->reg_chain->prev_reg = the_ref;
new->reg_chain = the_ref;
new->n_refs++;
the_ref->next_reg = new_df->reg_chain;
if (new_df->reg_chain)
new_df->reg_chain->prev_reg = the_ref;
new_df->reg_chain = the_ref;
new_df->n_refs++;
df_set_bb_dirty (DF_REF_BB (the_ref));
/* Need to resort the record that the ref was in because the

View File

@ -1295,7 +1295,7 @@ record_store (rtx body, bb_info_t bb_info)
{
insn_info_t next = ptr->next_local_store;
store_info_t s_info = ptr->store_rec;
bool delete = true;
bool del = true;
/* Skip the clobbers. We delete the active insn if this insn
shadows the set. To have been put on the active list, it
@ -1304,7 +1304,7 @@ record_store (rtx body, bb_info_t bb_info)
s_info = s_info->next;
if (s_info->alias_set != spill_alias_set)
delete = false;
del = false;
else if (s_info->alias_set)
{
struct clear_alias_mode_holder *entry
@ -1317,7 +1317,7 @@ record_store (rtx body, bb_info_t bb_info)
if ((GET_MODE (mem) == GET_MODE (s_info->mem))
&& (GET_MODE (mem) == entry->mode))
{
delete = true;
del = true;
s_info->positions_needed = (unsigned HOST_WIDE_INT) 0;
}
if (dump_file)
@ -1352,9 +1352,9 @@ record_store (rtx body, bb_info_t bb_info)
/* An insn can be deleted if every position of every one of
its s_infos is zero. */
if (s_info->positions_needed != (unsigned HOST_WIDE_INT) 0)
delete = false;
del = false;
if (delete)
if (del)
{
insn_info_t insn_to_delete = ptr;
@ -2080,7 +2080,7 @@ remove_useless_values (cselib_val *base)
while (insn_info)
{
store_info_t store_info = insn_info->store_rec;
bool delete = false;
bool del = false;
/* If ANY of the store_infos match the cselib group that is
being deleted, then the insn can not be deleted. */
@ -2089,13 +2089,13 @@ remove_useless_values (cselib_val *base)
if ((store_info->group_id == -1)
&& (store_info->cse_base == base))
{
delete = true;
del = true;
break;
}
store_info = store_info->next;
}
if (delete)
if (del)
{
if (last)
last->next_local_store = insn_info->next_local_store;

View File

@ -890,12 +890,12 @@ gen_reg_rtx (enum machine_mode mode)
if (reg_rtx_no == crtl->emit.regno_pointer_align_length)
{
int old_size = crtl->emit.regno_pointer_align_length;
char *new;
char *tmp;
rtx *new1;
new = XRESIZEVEC (char, crtl->emit.regno_pointer_align, old_size * 2);
memset (new + old_size, 0, old_size);
crtl->emit.regno_pointer_align = (unsigned char *) new;
tmp = XRESIZEVEC (char, crtl->emit.regno_pointer_align, old_size * 2);
memset (tmp + old_size, 0, old_size);
crtl->emit.regno_pointer_align = (unsigned char *) tmp;
new1 = GGC_RESIZEVEC (rtx, regno_reg_rtx, old_size * 2);
memset (new1 + old_size, 0, old_size * sizeof (rtx));
@ -913,9 +913,9 @@ gen_reg_rtx (enum machine_mode mode)
to the REG_OFFSET. */
static void
update_reg_offset (rtx new, rtx reg, int offset)
update_reg_offset (rtx new_rtx, rtx reg, int offset)
{
REG_ATTRS (new) = get_reg_attrs (REG_EXPR (reg),
REG_ATTRS (new_rtx) = get_reg_attrs (REG_EXPR (reg),
REG_OFFSET (reg) + offset);
}
@ -926,10 +926,10 @@ rtx
gen_rtx_REG_offset (rtx reg, enum machine_mode mode, unsigned int regno,
int offset)
{
rtx new = gen_rtx_REG (mode, regno);
rtx new_rtx = gen_rtx_REG (mode, regno);
update_reg_offset (new, reg, offset);
return new;
update_reg_offset (new_rtx, reg, offset);
return new_rtx;
}
/* Generate a new pseudo-register with the same attributes as REG, but
@ -938,10 +938,10 @@ gen_rtx_REG_offset (rtx reg, enum machine_mode mode, unsigned int regno,
rtx
gen_reg_rtx_offset (rtx reg, enum machine_mode mode, int offset)
{
rtx new = gen_reg_rtx (mode);
rtx new_rtx = gen_reg_rtx (mode);
update_reg_offset (new, reg, offset);
return new;
update_reg_offset (new_rtx, reg, offset);
return new_rtx;
}
/* Adjust REG in-place so that it has mode MODE. It is assumed that the
@ -1372,18 +1372,18 @@ operand_subword (rtx op, unsigned int offset, int validate_address, enum machine
/* Form a new MEM at the requested address. */
if (MEM_P (op))
{
rtx new = adjust_address_nv (op, word_mode, offset * UNITS_PER_WORD);
rtx new_rtx = adjust_address_nv (op, word_mode, offset * UNITS_PER_WORD);
if (! validate_address)
return new;
return new_rtx;
else if (reload_completed)
{
if (! strict_memory_address_p (word_mode, XEXP (new, 0)))
if (! strict_memory_address_p (word_mode, XEXP (new_rtx, 0)))
return 0;
}
else
return replace_equiv_address (new, XEXP (new, 0));
return replace_equiv_address (new_rtx, XEXP (new_rtx, 0));
}
/* Rest can be handled by simplify_subreg. */
@ -1834,7 +1834,7 @@ set_mem_size (rtx mem, rtx size)
static rtx
change_address_1 (rtx memref, enum machine_mode mode, rtx addr, int validate)
{
rtx new;
rtx new_rtx;
gcc_assert (MEM_P (memref));
if (mode == VOIDmode)
@ -1856,9 +1856,9 @@ change_address_1 (rtx memref, enum machine_mode mode, rtx addr, int validate)
if (rtx_equal_p (addr, XEXP (memref, 0)) && mode == GET_MODE (memref))
return memref;
new = gen_rtx_MEM (mode, addr);
MEM_COPY_ATTRIBUTES (new, memref);
return new;
new_rtx = gen_rtx_MEM (mode, addr);
MEM_COPY_ATTRIBUTES (new_rtx, memref);
return new_rtx;
}
/* Like change_address_1 with VALIDATE nonzero, but we are not saying in what
@ -1867,31 +1867,31 @@ change_address_1 (rtx memref, enum machine_mode mode, rtx addr, int validate)
rtx
change_address (rtx memref, enum machine_mode mode, rtx addr)
{
rtx new = change_address_1 (memref, mode, addr, 1), size;
enum machine_mode mmode = GET_MODE (new);
rtx new_rtx = change_address_1 (memref, mode, addr, 1), size;
enum machine_mode mmode = GET_MODE (new_rtx);
unsigned int align;
size = mmode == BLKmode ? 0 : GEN_INT (GET_MODE_SIZE (mmode));
align = mmode == BLKmode ? BITS_PER_UNIT : GET_MODE_ALIGNMENT (mmode);
/* If there are no changes, just return the original memory reference. */
if (new == memref)
if (new_rtx == memref)
{
if (MEM_ATTRS (memref) == 0
|| (MEM_EXPR (memref) == NULL
&& MEM_OFFSET (memref) == NULL
&& MEM_SIZE (memref) == size
&& MEM_ALIGN (memref) == align))
return new;
return new_rtx;
new = gen_rtx_MEM (mmode, XEXP (memref, 0));
MEM_COPY_ATTRIBUTES (new, memref);
new_rtx = gen_rtx_MEM (mmode, XEXP (memref, 0));
MEM_COPY_ATTRIBUTES (new_rtx, memref);
}
MEM_ATTRS (new)
MEM_ATTRS (new_rtx)
= get_mem_attrs (MEM_ALIAS_SET (memref), 0, 0, size, align, mmode);
return new;
return new_rtx;
}
/* Return a memory reference like MEMREF, but with its mode changed
@ -1905,7 +1905,7 @@ adjust_address_1 (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset,
int validate, int adjust)
{
rtx addr = XEXP (memref, 0);
rtx new;
rtx new_rtx;
rtx memoffset = MEM_OFFSET (memref);
rtx size = 0;
unsigned int memalign = MEM_ALIGN (memref);
@ -1934,7 +1934,7 @@ adjust_address_1 (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset,
addr = plus_constant (addr, offset);
}
new = change_address_1 (memref, mode, addr, validate);
new_rtx = change_address_1 (memref, mode, addr, validate);
/* Compute the new values of the memory attributes due to this adjustment.
We add the offsets and update the alignment. */
@ -1950,17 +1950,17 @@ adjust_address_1 (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset,
(unsigned HOST_WIDE_INT) (offset & -offset) * BITS_PER_UNIT);
/* We can compute the size in a number of ways. */
if (GET_MODE (new) != BLKmode)
size = GEN_INT (GET_MODE_SIZE (GET_MODE (new)));
if (GET_MODE (new_rtx) != BLKmode)
size = GEN_INT (GET_MODE_SIZE (GET_MODE (new_rtx)));
else if (MEM_SIZE (memref))
size = plus_constant (MEM_SIZE (memref), -offset);
MEM_ATTRS (new) = get_mem_attrs (MEM_ALIAS_SET (memref), MEM_EXPR (memref),
memoffset, size, memalign, GET_MODE (new));
MEM_ATTRS (new_rtx) = get_mem_attrs (MEM_ALIAS_SET (memref), MEM_EXPR (memref),
memoffset, size, memalign, GET_MODE (new_rtx));
/* At some point, we should validate that this offset is within the object,
if all the appropriate values are known. */
return new;
return new_rtx;
}
/* Return a memory reference like MEMREF, but with its mode changed
@ -1983,9 +1983,9 @@ adjust_automodify_address_1 (rtx memref, enum machine_mode mode, rtx addr,
rtx
offset_address (rtx memref, rtx offset, unsigned HOST_WIDE_INT pow2)
{
rtx new, addr = XEXP (memref, 0);
rtx new_rtx, addr = XEXP (memref, 0);
new = simplify_gen_binary (PLUS, Pmode, addr, offset);
new_rtx = simplify_gen_binary (PLUS, Pmode, addr, offset);
/* At this point we don't know _why_ the address is invalid. It
could have secondary memory references, multiplies or anything.
@ -1994,28 +1994,28 @@ offset_address (rtx memref, rtx offset, unsigned HOST_WIDE_INT pow2)
being able to recognize the magic around pic_offset_table_rtx.
This stuff is fragile, and is yet another example of why it is
bad to expose PIC machinery too early. */
if (! memory_address_p (GET_MODE (memref), new)
if (! memory_address_p (GET_MODE (memref), new_rtx)
&& GET_CODE (addr) == PLUS
&& XEXP (addr, 0) == pic_offset_table_rtx)
{
addr = force_reg (GET_MODE (addr), addr);
new = simplify_gen_binary (PLUS, Pmode, addr, offset);
new_rtx = simplify_gen_binary (PLUS, Pmode, addr, offset);
}
update_temp_slot_address (XEXP (memref, 0), new);
new = change_address_1 (memref, VOIDmode, new, 1);
update_temp_slot_address (XEXP (memref, 0), new_rtx);
new_rtx = change_address_1 (memref, VOIDmode, new_rtx, 1);
/* If there are no changes, just return the original memory reference. */
if (new == memref)
return new;
if (new_rtx == memref)
return new_rtx;
/* Update the alignment to reflect the offset. Reset the offset, which
we don't know. */
MEM_ATTRS (new)
MEM_ATTRS (new_rtx)
= get_mem_attrs (MEM_ALIAS_SET (memref), MEM_EXPR (memref), 0, 0,
MIN (MEM_ALIGN (memref), pow2 * BITS_PER_UNIT),
GET_MODE (new));
return new;
GET_MODE (new_rtx));
return new_rtx;
}
/* Return a memory reference like MEMREF, but with its address changed to
@ -2048,14 +2048,14 @@ replace_equiv_address_nv (rtx memref, rtx addr)
rtx
widen_memory_access (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset)
{
rtx new = adjust_address_1 (memref, mode, offset, 1, 1);
tree expr = MEM_EXPR (new);
rtx memoffset = MEM_OFFSET (new);
rtx new_rtx = adjust_address_1 (memref, mode, offset, 1, 1);
tree expr = MEM_EXPR (new_rtx);
rtx memoffset = MEM_OFFSET (new_rtx);
unsigned int size = GET_MODE_SIZE (mode);
/* If there are no changes, just return the original memory reference. */
if (new == memref)
return new;
if (new_rtx == memref)
return new_rtx;
/* If we don't know what offset we were at within the expression, then
we can't know if we've overstepped the bounds. */
@ -2117,10 +2117,10 @@ widen_memory_access (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset)
/* The widened memory may alias other stuff, so zap the alias set. */
/* ??? Maybe use get_alias_set on any remaining expression. */
MEM_ATTRS (new) = get_mem_attrs (0, expr, memoffset, GEN_INT (size),
MEM_ALIGN (new), mode);
MEM_ATTRS (new_rtx) = get_mem_attrs (0, expr, memoffset, GEN_INT (size),
MEM_ALIGN (new_rtx), mode);
return new;
return new_rtx;
}
/* Return a newly created CODE_LABEL rtx with a unique label number. */
@ -5464,27 +5464,27 @@ init_emit_once (int line_numbers)
rtx
emit_copy_of_insn_after (rtx insn, rtx after)
{
rtx new, link;
rtx new_rtx, link;
switch (GET_CODE (insn))
{
case INSN:
new = emit_insn_after (copy_insn (PATTERN (insn)), after);
new_rtx = emit_insn_after (copy_insn (PATTERN (insn)), after);
break;
case JUMP_INSN:
new = emit_jump_insn_after (copy_insn (PATTERN (insn)), after);
new_rtx = emit_jump_insn_after (copy_insn (PATTERN (insn)), after);
break;
case CALL_INSN:
new = emit_call_insn_after (copy_insn (PATTERN (insn)), after);
new_rtx = emit_call_insn_after (copy_insn (PATTERN (insn)), after);
if (CALL_INSN_FUNCTION_USAGE (insn))
CALL_INSN_FUNCTION_USAGE (new)
CALL_INSN_FUNCTION_USAGE (new_rtx)
= copy_insn (CALL_INSN_FUNCTION_USAGE (insn));
SIBLING_CALL_P (new) = SIBLING_CALL_P (insn);
RTL_CONST_CALL_P (new) = RTL_CONST_CALL_P (insn);
RTL_PURE_CALL_P (new) = RTL_PURE_CALL_P (insn);
RTL_LOOPING_CONST_OR_PURE_CALL_P (new)
SIBLING_CALL_P (new_rtx) = SIBLING_CALL_P (insn);
RTL_CONST_CALL_P (new_rtx) = RTL_CONST_CALL_P (insn);
RTL_PURE_CALL_P (new_rtx) = RTL_PURE_CALL_P (insn);
RTL_LOOPING_CONST_OR_PURE_CALL_P (new_rtx)
= RTL_LOOPING_CONST_OR_PURE_CALL_P (insn);
break;
@ -5493,14 +5493,14 @@ emit_copy_of_insn_after (rtx insn, rtx after)
}
/* Update LABEL_NUSES. */
mark_jump_label (PATTERN (new), new, 0);
mark_jump_label (PATTERN (new_rtx), new_rtx, 0);
INSN_LOCATOR (new) = INSN_LOCATOR (insn);
INSN_LOCATOR (new_rtx) = INSN_LOCATOR (insn);
/* If the old insn is frame related, then so is the new one. This is
primarily needed for IA-64 unwind info which marks epilogue insns,
which may be duplicated by the basic block reordering code. */
RTX_FRAME_RELATED_P (new) = RTX_FRAME_RELATED_P (insn);
RTX_FRAME_RELATED_P (new_rtx) = RTX_FRAME_RELATED_P (insn);
/* Copy all REG_NOTES except REG_LABEL_OPERAND since mark_jump_label
will make them. REG_LABEL_TARGETs are created there too, but are
@ -5509,14 +5509,14 @@ emit_copy_of_insn_after (rtx insn, rtx after)
if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND)
{
if (GET_CODE (link) == EXPR_LIST)
add_reg_note (new, REG_NOTE_KIND (link),
add_reg_note (new_rtx, REG_NOTE_KIND (link),
copy_insn_1 (XEXP (link, 0)));
else
add_reg_note (new, REG_NOTE_KIND (link), XEXP (link, 0));
add_reg_note (new_rtx, REG_NOTE_KIND (link), XEXP (link, 0));
}
INSN_CODE (new) = INSN_CODE (insn);
return new;
INSN_CODE (new_rtx) = INSN_CODE (insn);
return new_rtx;
}
static GTY((deletable)) rtx hard_reg_clobbers [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];

View File

@ -874,10 +874,10 @@ round_push (rtx size)
if (GET_CODE (size) == CONST_INT)
{
HOST_WIDE_INT new = (INTVAL (size) + align - 1) / align * align;
HOST_WIDE_INT new_size = (INTVAL (size) + align - 1) / align * align;
if (INTVAL (size) != new)
size = GEN_INT (new);
if (INTVAL (size) != new_size)
size = GEN_INT (new_size);
}
else
{
@ -1136,10 +1136,10 @@ allocate_dynamic_stack_space (rtx size, rtx target, int known_align)
if (GET_CODE (size) == CONST_INT)
{
HOST_WIDE_INT new = INTVAL (size) / align * align;
HOST_WIDE_INT new_size = INTVAL (size) / align * align;
if (INTVAL (size) != new)
size = GEN_INT (new);
if (INTVAL (size) != new_size)
size = GEN_INT (new_size);
}
else
{

View File

@ -184,26 +184,26 @@ canonicalize_address (rtx x)
for a memory access in the given MODE. */
static bool
should_replace_address (rtx old, rtx new, enum machine_mode mode)
should_replace_address (rtx old_rtx, rtx new_rtx, enum machine_mode mode)
{
int gain;
if (rtx_equal_p (old, new) || !memory_address_p (mode, new))
if (rtx_equal_p (old_rtx, new_rtx) || !memory_address_p (mode, new_rtx))
return false;
/* Copy propagation is always ok. */
if (REG_P (old) && REG_P (new))
if (REG_P (old_rtx) && REG_P (new_rtx))
return true;
/* Prefer the new address if it is less expensive. */
gain = address_cost (old, mode) - address_cost (new, mode);
gain = address_cost (old_rtx, mode) - address_cost (new_rtx, mode);
/* If the addresses have equivalent cost, prefer the new address
if it has the highest `rtx_cost'. That has the potential of
eliminating the most insns without additional costs, and it
is the same that cse.c used to do. */
if (gain == 0)
gain = rtx_cost (new, SET) - rtx_cost (old, SET);
gain = rtx_cost (new_rtx, SET) - rtx_cost (old_rtx, SET);
return (gain > 0);
}
@ -244,7 +244,7 @@ enum {
that is because there is no simplify_gen_* function for LO_SUM). */
static bool
propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
propagate_rtx_1 (rtx *px, rtx old_rtx, rtx new_rtx, int flags)
{
rtx x = *px, tem = NULL_RTX, op0, op1, op2;
enum rtx_code code = GET_CODE (x);
@ -265,9 +265,9 @@ propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
/* If X is OLD_RTX, return NEW_RTX. But not if replacing only within an
address, and we are *not* inside one. */
if (x == old)
if (x == old_rtx)
{
*px = new;
*px = new_rtx;
return can_appear;
}
@ -277,7 +277,7 @@ propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
case RTX_UNARY:
op0 = XEXP (x, 0);
op_mode = GET_MODE (op0);
valid_ops &= propagate_rtx_1 (&op0, old, new, flags);
valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
if (op0 == XEXP (x, 0))
return true;
tem = simplify_gen_unary (code, mode, op0, op_mode);
@ -287,8 +287,8 @@ propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
case RTX_COMM_ARITH:
op0 = XEXP (x, 0);
op1 = XEXP (x, 1);
valid_ops &= propagate_rtx_1 (&op0, old, new, flags);
valid_ops &= propagate_rtx_1 (&op1, old, new, flags);
valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
valid_ops &= propagate_rtx_1 (&op1, old_rtx, new_rtx, flags);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return true;
tem = simplify_gen_binary (code, mode, op0, op1);
@ -299,8 +299,8 @@ propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
op0 = XEXP (x, 0);
op1 = XEXP (x, 1);
op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
valid_ops &= propagate_rtx_1 (&op0, old, new, flags);
valid_ops &= propagate_rtx_1 (&op1, old, new, flags);
valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
valid_ops &= propagate_rtx_1 (&op1, old_rtx, new_rtx, flags);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return true;
tem = simplify_gen_relational (code, mode, op_mode, op0, op1);
@ -312,9 +312,9 @@ propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
op1 = XEXP (x, 1);
op2 = XEXP (x, 2);
op_mode = GET_MODE (op0);
valid_ops &= propagate_rtx_1 (&op0, old, new, flags);
valid_ops &= propagate_rtx_1 (&op1, old, new, flags);
valid_ops &= propagate_rtx_1 (&op2, old, new, flags);
valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
valid_ops &= propagate_rtx_1 (&op1, old_rtx, new_rtx, flags);
valid_ops &= propagate_rtx_1 (&op2, old_rtx, new_rtx, flags);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
return true;
if (op_mode == VOIDmode)
@ -327,7 +327,7 @@ propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
if (code == SUBREG)
{
op0 = XEXP (x, 0);
valid_ops &= propagate_rtx_1 (&op0, old, new, flags);
valid_ops &= propagate_rtx_1 (&op0, old_rtx, new_rtx, flags);
if (op0 == XEXP (x, 0))
return true;
tem = simplify_gen_subreg (mode, op0, GET_MODE (SUBREG_REG (x)),
@ -336,7 +336,7 @@ propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
break;
case RTX_OBJ:
if (code == MEM && x != new)
if (code == MEM && x != new_rtx)
{
rtx new_op0;
op0 = XEXP (x, 0);
@ -346,7 +346,7 @@ propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
return true;
op0 = new_op0 = targetm.delegitimize_address (op0);
valid_ops &= propagate_rtx_1 (&new_op0, old, new,
valid_ops &= propagate_rtx_1 (&new_op0, old_rtx, new_rtx,
flags | PR_CAN_APPEAR);
/* Dismiss transformation that we do not want to carry on. */
@ -359,7 +359,7 @@ propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
canonicalize_address (new_op0);
/* Copy propagations are always ok. Otherwise check the costs. */
if (!(REG_P (old) && REG_P (new))
if (!(REG_P (old_rtx) && REG_P (new_rtx))
&& !should_replace_address (op0, new_op0, GET_MODE (x)))
return true;
@ -374,8 +374,8 @@ propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
/* The only simplification we do attempts to remove references to op0
or make it constant -- in both cases, op0's invalidity will not
make the result invalid. */
propagate_rtx_1 (&op0, old, new, flags | PR_CAN_APPEAR);
valid_ops &= propagate_rtx_1 (&op1, old, new, flags);
propagate_rtx_1 (&op0, old_rtx, new_rtx, flags | PR_CAN_APPEAR);
valid_ops &= propagate_rtx_1 (&op1, old_rtx, new_rtx, flags);
if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
return true;
@ -393,9 +393,9 @@ propagate_rtx_1 (rtx *px, rtx old, rtx new, int flags)
else if (code == REG)
{
if (rtx_equal_p (x, old))
if (rtx_equal_p (x, old_rtx))
{
*px = new;
*px = new_rtx;
return can_appear;
}
}
@ -438,23 +438,23 @@ varying_mem_p (rtx *body, void *data ATTRIBUTE_UNUSED)
Otherwise, we accept simplifications that have a lower or equal cost. */
static rtx
propagate_rtx (rtx x, enum machine_mode mode, rtx old, rtx new)
propagate_rtx (rtx x, enum machine_mode mode, rtx old_rtx, rtx new_rtx)
{
rtx tem;
bool collapsed;
int flags;
if (REG_P (new) && REGNO (new) < FIRST_PSEUDO_REGISTER)
if (REG_P (new_rtx) && REGNO (new_rtx) < FIRST_PSEUDO_REGISTER)
return NULL_RTX;
flags = 0;
if (REG_P (new) || CONSTANT_P (new))
if (REG_P (new_rtx) || CONSTANT_P (new_rtx))
flags |= PR_CAN_APPEAR;
if (!for_each_rtx (&new, varying_mem_p, NULL))
if (!for_each_rtx (&new_rtx, varying_mem_p, NULL))
flags |= PR_HANDLE_MEM;
tem = x;
collapsed = propagate_rtx_1 (&tem, old, copy_rtx (new), flags);
collapsed = propagate_rtx_1 (&tem, old_rtx, copy_rtx (new_rtx), flags);
if (tem == x || !collapsed)
return NULL_RTX;
@ -717,7 +717,7 @@ update_df (rtx insn, rtx *loc, struct df_ref **use_rec, enum df_ref_type type,
performed. */
static bool
try_fwprop_subst (struct df_ref *use, rtx *loc, rtx new, rtx def_insn, bool set_reg_equal)
try_fwprop_subst (struct df_ref *use, rtx *loc, rtx new_rtx, rtx def_insn, bool set_reg_equal)
{
rtx insn = DF_REF_INSN (use);
enum df_ref_type type = DF_REF_TYPE (use);
@ -731,11 +731,11 @@ try_fwprop_subst (struct df_ref *use, rtx *loc, rtx new, rtx def_insn, bool set_
fprintf (dump_file, "\nIn insn %d, replacing\n ", INSN_UID (insn));
print_inline_rtx (dump_file, *loc, 2);
fprintf (dump_file, "\n with ");
print_inline_rtx (dump_file, new, 2);
print_inline_rtx (dump_file, new_rtx, 2);
fprintf (dump_file, "\n");
}
validate_unshare_change (insn, loc, new, true);
validate_unshare_change (insn, loc, new_rtx, true);
if (!verify_changes (0))
{
if (dump_file)
@ -766,7 +766,7 @@ try_fwprop_subst (struct df_ref *use, rtx *loc, rtx new, rtx def_insn, bool set_
num_changes++;
df_ref_remove (use);
if (!CONSTANT_P (new))
if (!CONSTANT_P (new_rtx))
{
struct df_insn_info *insn_info = DF_INSN_INFO_GET (def_insn);
update_df (insn, loc, DF_INSN_INFO_USES (insn_info), type, flags);
@ -784,11 +784,11 @@ try_fwprop_subst (struct df_ref *use, rtx *loc, rtx new, rtx def_insn, bool set_
if (dump_file)
fprintf (dump_file, " Setting REG_EQUAL note\n");
set_unique_reg_note (insn, REG_EQUAL, copy_rtx (new));
set_unique_reg_note (insn, REG_EQUAL, copy_rtx (new_rtx));
/* ??? Is this still necessary if we add the note through
set_unique_reg_note? */
if (!CONSTANT_P (new))
if (!CONSTANT_P (new_rtx))
{
struct df_insn_info *insn_info = DF_INSN_INFO_GET (def_insn);
update_df (insn, loc, DF_INSN_INFO_USES (insn_info),
@ -845,7 +845,7 @@ forward_propagate_and_simplify (struct df_ref *use, rtx def_insn, rtx def_set)
{
rtx use_insn = DF_REF_INSN (use);
rtx use_set = single_set (use_insn);
rtx src, reg, new, *loc;
rtx src, reg, new_rtx, *loc;
bool set_reg_equal;
enum machine_mode mode;
@ -888,10 +888,10 @@ forward_propagate_and_simplify (struct df_ref *use, rtx def_insn, rtx def_set)
if (x != src)
{
rtx note = find_reg_note (use_insn, REG_EQUAL, NULL_RTX);
rtx old = note ? XEXP (note, 0) : SET_SRC (use_set);
rtx new = simplify_replace_rtx (old, src, x);
if (old != new)
set_unique_reg_note (use_insn, REG_EQUAL, copy_rtx (new));
rtx old_rtx = note ? XEXP (note, 0) : SET_SRC (use_set);
rtx new_rtx = simplify_replace_rtx (old_rtx, src, x);
if (old_rtx != new_rtx)
set_unique_reg_note (use_insn, REG_EQUAL, copy_rtx (new_rtx));
}
return false;
}
@ -923,12 +923,12 @@ forward_propagate_and_simplify (struct df_ref *use, rtx def_insn, rtx def_set)
else
mode = GET_MODE (*loc);
new = propagate_rtx (*loc, mode, reg, src);
new_rtx = propagate_rtx (*loc, mode, reg, src);
if (!new)
if (!new_rtx)
return false;
return try_fwprop_subst (use, loc, new, def_insn, set_reg_equal);
return try_fwprop_subst (use, loc, new_rtx, def_insn, set_reg_equal);
}

View File

@ -2791,7 +2791,7 @@ find_avail_set (int regno, rtx insn)
static int
cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
{
rtx new, set_src, note_src;
rtx new_rtx, set_src, note_src;
rtx set = pc_set (jump);
rtx note = find_reg_equal_equiv_note (jump);
@ -2823,22 +2823,22 @@ cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
else
setcc = NULL_RTX;
new = simplify_replace_rtx (set_src, from, src);
new_rtx = simplify_replace_rtx (set_src, from, src);
/* If no simplification can be made, then try the next register. */
if (rtx_equal_p (new, SET_SRC (set)))
if (rtx_equal_p (new_rtx, SET_SRC (set)))
return 0;
/* If this is now a no-op delete it, otherwise this must be a valid insn. */
if (new == pc_rtx)
if (new_rtx == pc_rtx)
delete_insn (jump);
else
{
/* Ensure the value computed inside the jump insn to be equivalent
to one computed by setcc. */
if (setcc && modified_in_p (new, setcc))
if (setcc && modified_in_p (new_rtx, setcc))
return 0;
if (! validate_unshare_change (jump, &SET_SRC (set), new, 0))
if (! validate_unshare_change (jump, &SET_SRC (set), new_rtx, 0))
{
/* When (some) constants are not valid in a comparison, and there
are two registers to be replaced by constants before the entire
@ -2849,8 +2849,8 @@ cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
we need to attach a note to the branch itself to make this
optimization work. */
if (!rtx_equal_p (new, note_src))
set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new));
if (!rtx_equal_p (new_rtx, note_src))
set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new_rtx));
return 0;
}
@ -2881,7 +2881,7 @@ cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
/* If a conditional jump has been changed into unconditional jump, remove
the jump and make the edge fallthru - this is always called in
cfglayout mode. */
if (new != pc_rtx && simplejump_p (jump))
if (new_rtx != pc_rtx && simplejump_p (jump))
{
edge e;
edge_iterator ei;
@ -3306,7 +3306,7 @@ find_implicit_sets (void)
{
basic_block bb, dest;
unsigned int count;
rtx cond, new;
rtx cond, new_rtx;
count = 0;
FOR_EACH_BB (bb)
@ -3327,9 +3327,9 @@ find_implicit_sets (void)
if (dest && single_pred_p (dest)
&& dest != EXIT_BLOCK_PTR)
{
new = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
new_rtx = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
XEXP (cond, 1));
implicit_sets[dest->index] = new;
implicit_sets[dest->index] = new_rtx;
if (dump_file)
{
fprintf(dump_file, "Implicit set of reg %d in ",
@ -3539,7 +3539,7 @@ bypass_block (basic_block bb, rtx setcc, rtx jump)
unsigned int regno = REGNO (reg_used->reg_rtx);
basic_block dest, old_dest;
struct expr *set;
rtx src, new;
rtx src, new_rtx;
if (regno >= max_gcse_regno)
continue;
@ -3560,7 +3560,7 @@ bypass_block (basic_block bb, rtx setcc, rtx jump)
SET_DEST (PATTERN (setcc)),
SET_SRC (PATTERN (setcc)));
new = simplify_replace_rtx (src, reg_used->reg_rtx,
new_rtx = simplify_replace_rtx (src, reg_used->reg_rtx,
SET_SRC (set->expr));
/* Jump bypassing may have already placed instructions on
@ -3568,14 +3568,14 @@ bypass_block (basic_block bb, rtx setcc, rtx jump)
has instructions associated with it, as these insns won't
get executed if the incoming edge is redirected. */
if (new == pc_rtx)
if (new_rtx == pc_rtx)
{
edest = FALLTHRU_EDGE (bb);
dest = edest->insns.r ? NULL : edest->dest;
}
else if (GET_CODE (new) == LABEL_REF)
else if (GET_CODE (new_rtx) == LABEL_REF)
{
dest = BLOCK_FOR_INSN (XEXP (new, 0));
dest = BLOCK_FOR_INSN (XEXP (new_rtx, 0));
/* Don't bypass edges containing instructions. */
edest = find_edge (bb, dest);
if (edest && edest->insns.r)
@ -4336,7 +4336,7 @@ pre_insert_copies (void)
static rtx
gcse_emit_move_after (rtx src, rtx dest, rtx insn)
{
rtx new;
rtx new_rtx;
rtx set = single_set (insn), set2;
rtx note;
rtx eqv;
@ -4344,20 +4344,20 @@ gcse_emit_move_after (rtx src, rtx dest, rtx insn)
/* This should never fail since we're creating a reg->reg copy
we've verified to be valid. */
new = emit_insn_after (gen_move_insn (dest, src), insn);
new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
/* Note the equivalence for local CSE pass. */
set2 = single_set (new);
set2 = single_set (new_rtx);
if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
return new;
return new_rtx;
if ((note = find_reg_equal_equiv_note (insn)))
eqv = XEXP (note, 0);
else
eqv = SET_SRC (set);
set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (eqv));
set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
return new;
return new_rtx;
}
/* Delete redundant computations.
@ -5384,7 +5384,7 @@ update_ld_motion_stores (struct expr * expr)
rtx pat = PATTERN (insn);
rtx src = SET_SRC (pat);
rtx reg = expr->reaching_reg;
rtx copy, new;
rtx copy, new_rtx;
/* If we've already copied it, continue. */
if (expr->reaching_reg == src)
@ -5400,8 +5400,8 @@ update_ld_motion_stores (struct expr * expr)
}
copy = gen_move_insn ( reg, copy_rtx (SET_SRC (pat)));
new = emit_insn_before (copy, insn);
record_one_set (REGNO (reg), new);
new_rtx = emit_insn_before (copy, insn);
record_one_set (REGNO (reg), new_rtx);
SET_SRC (pat) = reg;
df_insn_rescan (insn);

View File

@ -350,13 +350,13 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
static void
compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc,
sbitmap *later, sbitmap *laterin, sbitmap *insert,
sbitmap *delete)
sbitmap *del)
{
int x;
basic_block bb;
FOR_EACH_BB (bb)
sbitmap_difference (delete[bb->index], antloc[bb->index],
sbitmap_difference (del[bb->index], antloc[bb->index],
laterin[bb->index]);
for (x = 0; x < NUM_EDGES (edge_list); x++)
@ -377,7 +377,7 @@ compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc,
struct edge_list *
pre_edge_lcm (int n_exprs, sbitmap *transp,
sbitmap *avloc, sbitmap *antloc, sbitmap *kill,
sbitmap **insert, sbitmap **delete)
sbitmap **insert, sbitmap **del)
{
sbitmap *antin, *antout, *earliest;
sbitmap *avin, *avout;
@ -450,8 +450,8 @@ pre_edge_lcm (int n_exprs, sbitmap *transp,
sbitmap_vector_free (earliest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
*delete = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_insert_delete (edge_list, antloc, later, laterin, *insert, *delete);
*del = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_insert_delete (edge_list, antloc, later, laterin, *insert, *del);
sbitmap_vector_free (laterin);
sbitmap_vector_free (later);
@ -460,7 +460,7 @@ pre_edge_lcm (int n_exprs, sbitmap *transp,
if (dump_file)
{
dump_sbitmap_vector (dump_file, "pre_insert_map", "", *insert, num_edges);
dump_sbitmap_vector (dump_file, "pre_delete_map", "", *delete,
dump_sbitmap_vector (dump_file, "pre_delete_map", "", *del,
last_basic_block);
}
#endif
@ -684,13 +684,13 @@ compute_nearerout (struct edge_list *edge_list, sbitmap *farthest,
static void
compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc,
sbitmap *nearer, sbitmap *nearerout,
sbitmap *insert, sbitmap *delete)
sbitmap *insert, sbitmap *del)
{
int x;
basic_block bb;
FOR_EACH_BB (bb)
sbitmap_difference (delete[bb->index], st_avloc[bb->index],
sbitmap_difference (del[bb->index], st_avloc[bb->index],
nearerout[bb->index]);
for (x = 0; x < NUM_EDGES (edge_list); x++)
@ -711,7 +711,7 @@ compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc,
struct edge_list *
pre_edge_rev_lcm (int n_exprs, sbitmap *transp,
sbitmap *st_avloc, sbitmap *st_antloc, sbitmap *kill,
sbitmap **insert, sbitmap **delete)
sbitmap **insert, sbitmap **del)
{
sbitmap *st_antin, *st_antout;
sbitmap *st_avout, *st_avin, *farthest;
@ -790,9 +790,9 @@ pre_edge_rev_lcm (int n_exprs, sbitmap *transp,
sbitmap_vector_free (farthest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
*delete = sbitmap_vector_alloc (last_basic_block, n_exprs);
*del = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
*insert, *delete);
*insert, *del);
sbitmap_vector_free (nearerout);
sbitmap_vector_free (nearer);
@ -801,7 +801,7 @@ pre_edge_rev_lcm (int n_exprs, sbitmap *transp,
if (dump_file)
{
dump_sbitmap_vector (dump_file, "pre_insert_map", "", *insert, num_edges);
dump_sbitmap_vector (dump_file, "pre_delete_map", "", *delete,
dump_sbitmap_vector (dump_file, "pre_delete_map", "", *del,
last_basic_block);
}
#endif

View File

@ -577,7 +577,7 @@ resolve_reg_notes (rtx insn)
pnote = &REG_NOTES (insn);
while (*pnote != NULL_RTX)
{
bool delete = false;
bool del = false;
note = *pnote;
switch (REG_NOTE_KIND (note))
@ -585,14 +585,14 @@ resolve_reg_notes (rtx insn)
case REG_DEAD:
case REG_UNUSED:
if (resolve_reg_p (XEXP (note, 0)))
delete = true;
del = true;
break;
default:
break;
}
if (delete)
if (del)
*pnote = XEXP (note, 1);
else
pnote = &XEXP (note, 1);

View File

@ -585,7 +585,7 @@ optimize_mode_switching (void)
for (i = 0; i < max_num_modes; i++)
{
int current_mode[N_ENTITIES];
sbitmap *delete;
sbitmap *del;
sbitmap *insert;
/* Set the anticipatable and computing arrays. */
@ -612,7 +612,7 @@ optimize_mode_switching (void)
FOR_EACH_BB (bb)
sbitmap_not (kill[bb->index], transp[bb->index]);
edge_list = pre_edge_lcm (n_entities, transp, comp, antic,
kill, &insert, &delete);
kill, &insert, &del);
for (j = n_entities - 1; j >= 0; j--)
{
@ -663,7 +663,7 @@ optimize_mode_switching (void)
}
FOR_EACH_BB_REVERSE (bb)
if (TEST_BIT (delete[bb->index], j))
if (TEST_BIT (del[bb->index], j))
{
make_preds_opaque (bb, j);
/* Cancel the 'deleted' mode set. */
@ -671,7 +671,7 @@ optimize_mode_switching (void)
}
}
sbitmap_vector_free (delete);
sbitmap_vector_free (del);
sbitmap_vector_free (insert);
clear_aux_for_edges ();
free_edge_list (edge_list);