make several functions in the scheduler take rtx_insn *

gcc/ChangeLog:

2015-05-02  Trevor Saunders  <tbsaunde+gcc@tbsaunde.org>

	* haifa-sched.c: Change the type of some variables to rtx_insn *.
	* sched-deps.c: Likewise.
	* sched-int.h: Likewise.
	* sched-rgn.c: Likewise.
	* sel-sched.c: Likewise.

From-SVN: r222737
This commit is contained in:
Trevor Saunders 2015-05-02 21:05:54 +00:00 committed by Trevor Saunders
parent 0bd5850c9b
commit 90831096a7
6 changed files with 40 additions and 32 deletions

View File

@ -1,3 +1,11 @@
2015-05-02 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
* haifa-sched.c: Change the type of some variables to rtx_insn *.
* sched-deps.c: Likewise.
* sched-int.h: Likewise.
* sched-rgn.c: Likewise.
* sel-sched.c: Likewise.
2015-05-02 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
to rtx_insn *.

View File

@ -881,7 +881,7 @@ static int early_queue_to_ready (state_t, struct ready_list *);
/* The following functions are used to implement multi-pass scheduling
on the first cycle. */
static rtx_insn *ready_remove (struct ready_list *, int);
static void ready_remove_insn (rtx);
static void ready_remove_insn (rtx_insn *);
static void fix_inter_tick (rtx_insn *, rtx_insn *);
static int fix_tick_ready (rtx_insn *);
@ -894,7 +894,7 @@ static void extend_h_i_d (void);
static void init_h_i_d (rtx_insn *);
static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
static void generate_recovery_code (rtx_insn *);
static void process_insn_forw_deps_be_in_spec (rtx, rtx_insn *, ds_t);
static void process_insn_forw_deps_be_in_spec (rtx_insn *, rtx_insn *, ds_t);
static void begin_speculative_block (rtx_insn *);
static void add_to_speculative_block (rtx_insn *);
static void init_before_recovery (basic_block *);
@ -1390,7 +1390,7 @@ static rtx_insn *last_scheduled_insn;
block, or the prev_head of the scheduling block. Used by
rank_for_schedule, so that insns independent of the last scheduled
insn will be preferred over dependent instructions. */
static rtx last_nondebug_scheduled_insn;
static rtx_insn *last_nondebug_scheduled_insn;
/* Pointer that iterates through the list of unscheduled insns if we
have a dbg_cnt enabled. It always points at an insn prior to the
@ -1598,7 +1598,7 @@ contributes_to_priority_p (dep_t dep)
/* Compute the number of nondebug deps in list LIST for INSN. */
static int
dep_list_size (rtx insn, sd_list_types_def list)
dep_list_size (rtx_insn *insn, sd_list_types_def list)
{
sd_iterator_def sd_it;
dep_t dep;
@ -2787,7 +2787,7 @@ rank_for_schedule (const void *x, const void *y)
{
dep_t dep1;
dep_t dep2;
rtx last = last_nondebug_scheduled_insn;
rtx_insn *last = last_nondebug_scheduled_insn;
/* Classify the instructions into three classes:
1) Data dependent on last schedule insn.
@ -3032,7 +3032,7 @@ ready_remove (struct ready_list *ready, int index)
/* Remove INSN from the ready list. */
static void
ready_remove_insn (rtx insn)
ready_remove_insn (rtx_insn *insn)
{
int i;
@ -3287,7 +3287,7 @@ sched_setup_bb_reg_pressure_info (basic_block bb, rtx_insn *after)
only be scheduled once their control dependency is resolved. */
static void
check_clobbered_conditions (rtx insn)
check_clobbered_conditions (rtx_insn *insn)
{
HARD_REG_SET t;
int i;
@ -4309,7 +4309,7 @@ struct haifa_saved_data
state_t curr_state;
rtx_insn *last_scheduled_insn;
rtx last_nondebug_scheduled_insn;
rtx_insn *last_nondebug_scheduled_insn;
rtx_insn *nonscheduled_insns_begin;
int cycle_issued_insns;
@ -4339,7 +4339,7 @@ static struct haifa_saved_data *backtrack_queue;
/* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
to SET_P. */
static void
mark_backtrack_feeds (rtx insn, int set_p)
mark_backtrack_feeds (rtx_insn *insn, int set_p)
{
sd_iterator_def sd_it;
dep_t dep;
@ -4485,7 +4485,7 @@ undo_replacements_for_backtrack (struct haifa_saved_data *save)
queued nowhere. */
static void
unschedule_insns_until (rtx insn)
unschedule_insns_until (rtx_insn *insn)
{
auto_vec<rtx_insn *> recompute_vec;
@ -5133,7 +5133,7 @@ queue_to_ready (struct ready_list *ready)
{
rtx_insn *insn;
rtx_insn_list *link;
rtx skip_insn;
rtx_insn *skip_insn;
q_ptr = NEXT_Q (q_ptr);
@ -5142,7 +5142,7 @@ queue_to_ready (struct ready_list *ready)
nonscheduled insn. */
skip_insn = first_nonscheduled_insn ();
else
skip_insn = NULL_RTX;
skip_insn = NULL;
/* Add all pending insns that can be scheduled without stalls to the
ready list. */
@ -5237,7 +5237,7 @@ queue_to_ready (struct ready_list *ready)
addition) depending on user flags and target hooks. */
static bool
ok_for_early_queue_removal (rtx insn)
ok_for_early_queue_removal (rtx_insn *insn)
{
if (targetm.sched.is_costly_dependence)
{
@ -6467,7 +6467,7 @@ schedule_block (basic_block *target_bb, state_t init_state)
/* We start inserting insns after PREV_HEAD. */
last_scheduled_insn = prev_head;
last_nondebug_scheduled_insn = NULL_RTX;
last_nondebug_scheduled_insn = NULL;
nonscheduled_insns_begin = NULL;
gcc_assert ((NOTE_P (last_scheduled_insn)
@ -7799,7 +7799,7 @@ generate_recovery_code (rtx_insn *insn)
Tries to add speculative dependencies of type FS between instructions
in deps_list L and TWIN. */
static void
process_insn_forw_deps_be_in_spec (rtx insn, rtx_insn *twin, ds_t fs)
process_insn_forw_deps_be_in_spec (rtx_insn *insn, rtx_insn *twin, ds_t fs)
{
sd_iterator_def sd_it;
dep_t dep;

View File

@ -503,7 +503,7 @@ static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
rtx_insn_list **, int, enum reg_note,
bool);
static void delete_all_dependences (rtx);
static void delete_all_dependences (rtx_insn *);
static void chain_to_prev_insn (rtx_insn *);
static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
@ -1621,7 +1621,7 @@ add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
occurrences removed. */
static int
remove_from_dependence_list (rtx insn, rtx_insn_list **listp)
remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
{
int removed = 0;
@ -1642,7 +1642,7 @@ remove_from_dependence_list (rtx insn, rtx_insn_list **listp)
/* Same as above, but process two lists at once. */
static int
remove_from_both_dependence_lists (rtx insn,
remove_from_both_dependence_lists (rtx_insn *insn,
rtx_insn_list **listp,
rtx_expr_list **exprp)
{
@ -1667,7 +1667,7 @@ remove_from_both_dependence_lists (rtx insn,
/* Clear all dependencies for an insn. */
static void
delete_all_dependences (rtx insn)
delete_all_dependences (rtx_insn *insn)
{
sd_iterator_def sd_it;
dep_t dep;
@ -2211,7 +2211,7 @@ mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
/* Set up reg pressure info related to INSN. */
void
init_insn_reg_pressure_info (rtx insn)
init_insn_reg_pressure_info (rtx_insn *insn)
{
int i, len;
enum reg_class cl;
@ -3531,7 +3531,7 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
/* FIXME: Why can't this function just use flags_from_decl_or_type and
test for ECF_NORETURN? */
static bool
call_may_noreturn_p (rtx insn)
call_may_noreturn_p (rtx_insn *insn)
{
rtx call;
@ -3594,7 +3594,7 @@ call_may_noreturn_p (rtx insn)
instruction of that group. */
static bool
chain_to_prev_insn_p (rtx insn)
chain_to_prev_insn_p (rtx_insn *insn)
{
rtx prev, x;
@ -3844,7 +3844,7 @@ sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
/* Helper for sched_free_deps ().
Delete INSN's (RESOLVED_P) backward dependencies. */
static void
delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
{
sd_iterator_def sd_it;
dep_t dep;

View File

@ -1345,7 +1345,7 @@ extern void init_deps_global (void);
extern void finish_deps_global (void);
extern void deps_analyze_insn (struct deps_desc *, rtx_insn *);
extern void remove_from_deps (struct deps_desc *, rtx_insn *);
extern void init_insn_reg_pressure_info (rtx);
extern void init_insn_reg_pressure_info (rtx_insn *);
extern dw_t get_dep_weak (ds_t, ds_t);
extern ds_t set_dep_weak (ds_t, ds_t, dw_t);

View File

@ -240,10 +240,10 @@ static edgeset *ancestor_edges;
static int check_live_1 (int, rtx);
static void update_live_1 (int, rtx);
static int is_pfree (rtx, int, int);
static int find_conditional_protection (rtx, int);
static int find_conditional_protection (rtx_insn *, int);
static int is_conditionally_protected (rtx, int, int);
static int is_prisky (rtx, int, int);
static int is_exception_free (rtx, int, int);
static int is_exception_free (rtx_insn *, int, int);
static bool sets_likely_spilled (rtx);
static void sets_likely_spilled_1 (rtx, const_rtx, void *);
@ -1841,7 +1841,7 @@ check_live (rtx_insn *insn, int src)
block src to trg. */
static void
update_live (rtx insn, int src)
update_live (rtx_insn *insn, int src)
{
/* Find the registers set by instruction. */
if (GET_CODE (PATTERN (insn)) == SET
@ -1882,7 +1882,7 @@ set_spec_fed (rtx load_insn)
branch depending on insn, that guards the speculative load. */
static int
find_conditional_protection (rtx insn, int load_insn_bb)
find_conditional_protection (rtx_insn *insn, int load_insn_bb)
{
sd_iterator_def sd_it;
dep_t dep;
@ -2042,7 +2042,7 @@ is_prisky (rtx load_insn, int bb_src, int bb_trg)
and 0 otherwise. */
static int
is_exception_free (rtx insn, int bb_src, int bb_trg)
is_exception_free (rtx_insn *insn, int bb_src, int bb_trg)
{
int insn_class = haifa_classify_insn (insn);

View File

@ -614,7 +614,7 @@ advance_one_cycle (fence_t fence)
/* Returns true when SUCC in a fallthru bb of INSN, possibly
skipping empty basic blocks. */
static bool
in_fallthru_bb_p (rtx insn, rtx succ)
in_fallthru_bb_p (rtx_insn *insn, rtx succ)
{
basic_block bb = BLOCK_FOR_INSN (insn);
edge e;
@ -1853,7 +1853,7 @@ create_speculation_check (expr_t c_expr, ds_t check_ds, insn_t orig_insn)
/* True when INSN is a "regN = regN" copy. */
static bool
identical_copy_p (rtx insn)
identical_copy_p (rtx_insn *insn)
{
rtx lhs, rhs, pat;
@ -5830,7 +5830,7 @@ move_op_after_merge_succs (cmpd_local_params_p lp, void *sparams)
/* Track bookkeeping copies created, insns scheduled, and blocks for
rescheduling when INSN is found by move_op. */
static void
track_scheduled_insns_and_blocks (rtx insn)
track_scheduled_insns_and_blocks (rtx_insn *insn)
{
/* Even if this insn can be a copy that will be removed during current move_op,
we still need to count it as an originator. */