recog_memoized works on an rtx_insn *

gcc/ChangeLog:
2014-09-09  David Malcolm  <dmalcolm@redhat.com>

	* caller-save.c (rtx saveinsn): Strengthen this variable from rtx
	to rtx_insn *.
	(restinsn): Likewise.
	* config/aarch64/aarch64-protos.h (aarch64_simd_attr_length_move):
	Likewise for param.
	* config/aarch64/aarch64.c (aarch64_simd_attr_length_move):
	Likewise.
	* config/arc/arc-protos.h (arc_adjust_insn_length): Likewise for
	first param.
	(arc_hazard): Likewise for both params.
	* config/arc/arc.c (arc600_corereg_hazard): Likewise, adding
	checked casts to rtx_sequence * and uses of the insn method for
	type-safety.
	(arc_hazard): Strengthen both params from rtx to rtx_insn *.
	(arc_adjust_insn_length): Likewise for param "insn".
	(struct insn_length_parameters_s): Likewise for first param of
	"get_variants" callback field.
	(arc_get_insn_variants): Likewise for first param and local
	"inner".  Replace a check of GET_CODE with a dyn_cast to
	rtx_sequence *, using methods for type-safety and clarity.
	* config/arc/arc.h (ADJUST_INSN_LENGTH): Use casts to
	rtx_sequence * and uses of the insn method for type-safety when
	invoking arc_adjust_insn_length.
	* config/arm/arm-protos.h (arm_attr_length_move_neon): Likewise
	for param.
	(arm_address_offset_is_imm): Likewise.
	(struct tune_params): Likewise for params 1 and 3 of the
	"sched_adjust_cost" callback field.
	* config/arm/arm.c (cortex_a9_sched_adjust_cost): Likewise for
	params 1 and 3 ("insn" and "dep").
	(xscale_sched_adjust_cost): Likewise.
	(fa726te_sched_adjust_cost): Likewise.
	(cortexa7_older_only): Likewise for param "insn".
	(cortexa7_younger): Likewise.
	(arm_attr_length_move_neon): Likewise.
	(arm_address_offset_is_imm): Likewise.
	* config/avr/avr-protos.h (avr_notice_update_cc): Likewise.
	* config/avr/avr.c (avr_notice_update_cc): Likewise.
	* config/bfin/bfin.c (hwloop_pattern_reg): Likewise.
	(workaround_speculation): Likewise for local "last_condjump".
	* config/c6x/c6x.c (shadow_p): Likewise for param "insn".
	(shadow_or_blockage_p): Likewise.
	(get_unit_reqs): Likewise.
	(get_unit_operand_masks): Likewise.
	(c6x_registers_update): Likewise.
	(returning_call_p): Likewise.
	(can_use_callp): Likewise.
	(convert_to_callp): Likewise.
	(find_last_same_clock): Likwise for local "t".
	(reorg_split_calls): Likewise for local "shadow".
	(hwloop_pattern_reg): Likewise for param "insn".
	* config/frv/frv-protos.h (frv_final_prescan_insn): Likewise.
	* config/frv/frv.c (frv_final_prescan_insn): Likewise.
	(frv_extract_membar): Likewise.
	(frv_optimize_membar_local): Strengthen param "last_membar" from
	rtx * to rtx_insn **.
	(frv_optimize_membar_global): Strengthen param "membar" from rtx
	to rtx_insn *.
	(frv_optimize_membar): Strengthen local "last_membar" from rtx *
	to rtx_insn **.
	* config/ia64/ia64-protos.h (ia64_st_address_bypass_p): Strengthen
	both params from rtx to rtx_insn *.
	(ia64_ld_address_bypass_p): Likewise.
	* config/ia64/ia64.c (ia64_safe_itanium_class): Likewise for param
	"insn".
	(ia64_safe_type): Likewise.
	(group_barrier_needed): Likewise.
	(safe_group_barrier_needed): Likewise.
	(ia64_single_set): Likewise.
	(is_load_p): Likewise.
	(record_memory_reference): Likewise.
	(get_mode_no_for_insn): Likewise.
	(important_for_bundling_p): Likewise.
	(unknown_for_bundling_p): Likewise.
	(ia64_st_address_bypass_p): Likewise for both params.
	(ia64_ld_address_bypass_p): Likewise.
	(expand_vselect): Introduce new local rtx_insn * "insn", using it
	in place of rtx "x" after the emit_insn call.
	* config/i386/i386-protos.h (x86_extended_QIreg_mentioned_p):
	Strengthen param from rtx to rtx_insn *.
	(ix86_agi_dependent): Likewise for both params.
	(ix86_attr_length_immediate_default): Likewise for param 1.
	(ix86_attr_length_address_default): Likewise for param.
	(ix86_attr_length_vex_default): Likewise for param 1.
	* config/i386/i386.c (ix86_attr_length_immediate_default):
	Likewise for param "insn".
	(ix86_attr_length_address_default): Likewise.
	(ix86_attr_length_vex_default): Likewise.
	(ix86_agi_dependent): Likewise for both params.
	(x86_extended_QIreg_mentioned_p): Likewise for param "insn".
	(vselect_insn): Likewise for this variable.
	* config/m68k/m68k-protos.h (m68k_sched_attr_opx_type): Likewise
	for param 1.
	(m68k_sched_attr_opy_type): Likewise.
	* config/m68k/m68k.c (sched_get_operand): Likewise.
	(sched_attr_op_type): Likewise.
	(m68k_sched_attr_opx_type): Likewise.
	(m68k_sched_attr_opy_type): Likewise.
	(sched_get_reg_operand): Likewise.
	(sched_get_mem_operand): Likewise.
	(m68k_sched_address_bypass_p): Likewise for both params.
	(sched_get_indexed_address_scale): Likewise.
	(m68k_sched_indexed_address_bypass_p): Likewise.
	* config/m68k/m68k.h (m68k_sched_address_bypass_p): Likewise.
	(m68k_sched_indexed_address_bypass_p): Likewise.
	* config/mep/mep.c (mep_jmp_return_reorg): Strengthen locals
	"label", "ret" from rtx to rtx_insn *, adding a checked cast and
	removing another.
	* config/mips/mips-protos.h (mips_linked_madd_p): Strengthen both
	params from rtx to rtx_insn *.
	(mips_fmadd_bypass): Likewise.
	* config/mips/mips.c (mips_fmadd_bypass): Likewise.
	(mips_linked_madd_p): Likewise.
	(mips_macc_chains_last_hilo): Likewise for this variable.
	(mips_macc_chains_record): Likewise for param.
	(vr4130_last_insn): Likewise for this variable.
	(vr4130_swap_insns_p): Likewise for both params.
	(mips_ls2_variable_issue): Likewise for param.
	(mips_need_noat_wrapper_p): Likewise for param "insn".
	(mips_expand_vselect): Add a new local rtx_insn * "insn", using it
	in place of "x" after the emit_insn.
	* config/pa/pa-protos.h (pa_fpstore_bypass_p): Strengthen both
	params from rtx to rtx_insn *.
	* config/pa/pa.c (pa_fpstore_bypass_p): Likewise.
	(pa_combine_instructions): Introduce local "par" for result of
	gen_rtx_PARALLEL, moving decl and usage of new_rtx for after call
	to make_insn_raw.
	(pa_can_combine_p): Strengthen param "new_rtx" from rtx to rtx_insn *.
	* config/rl78/rl78.c (insn_ok_now): Likewise for param "insn".
	(rl78_alloc_physical_registers_op1): Likewise.
	(rl78_alloc_physical_registers_op2): Likewise.
	(rl78_alloc_physical_registers_ro1): Likewise.
	(rl78_alloc_physical_registers_cmp): Likewise.
	(rl78_alloc_physical_registers_umul): Likewise.
	(rl78_alloc_address_registers_macax): Likewise.
	(rl78_alloc_physical_registers): Likewise for locals "insn", "curr".
	* config/s390/predicates.md (execute_operation): Likewise for
	local "insn".
	* config/s390/s390-protos.h (s390_agen_dep_p): Likewise for both
	params.
	* config/s390/s390.c (s390_safe_attr_type): Likewise for param.
	(addr_generation_dependency_p): Likewise for param "insn".
	(s390_agen_dep_p): Likewise for both params.
	(s390_fpload_toreg): Likewise for param "insn".
	* config/sh/sh-protos.h (sh_loop_align): Likewise for param.
	* config/sh/sh.c (sh_loop_align): Likewise for param and local
	"next".
	* config/sh/sh.md (define_peephole2): Likewise for local "insn2".
	* config/sh/sh_treg_combine.cc
	(sh_treg_combine::make_inv_ccreg_insn): Likewise for return type
	and local "i".
	(sh_treg_combine::try_eliminate_cstores): Likewise for local "i".
	* config/stormy16/stormy16.c (combine_bnp): Likewise for locals
	"and_insn", "load", "shift".
	* config/tilegx/tilegx.c (match_pcrel_step2): Likewise for param
	"insn".
	* final.c (final_scan_insn): Introduce local rtx_insn * "other"
	for XEXP (note, 0) of the REG_CC_SETTER note.
	(cleanup_subreg_operands): Strengthen param "insn" from rtx to
	rtx_insn *, eliminating a checked cast made redundant by this.
	* gcse.c (process_insert_insn): Strengthen local "insn" from rtx
	to rtx_insn *.
	* genattr.c (main): When writing out the prototype to
	const_num_delay_slots, strengthen the param from rtx to
	rtx_insn *.
	* genattrtab.c (write_const_num_delay_slots): Likewise when
	writing out the implementation of const_num_delay_slots.
	* hw-doloop.h (struct hw_doloop_hooks): Strengthen the param
	"insn" of callback field "end_pattern_reg" from rtx to rtx_insn *.
	* ifcvt.c (noce_emit_store_flag): Eliminate local rtx "tmp" in
	favor of new rtx locals "src" and "set" and new local rtx_insn *
	"insn" and "seq".
	(noce_emit_move_insn): Strengthen locals "seq" and "insn" from rtx
	to rtx_insn *.
	(noce_emit_cmove): Eliminate local rtx "tmp" in favor of new rtx
	locals "cond", "if_then_else", "set" and new rtx_insn * locals
	"insn" and "seq".
	(noce_try_cmove_arith): Strengthen locals "insn_a" and "insn_b",
	"last" from rtx to rtx_insn *.  Likewise for a local "tmp",
	renaming to "tmp_insn".  Eliminate the other local rtx "tmp" from
	the top-level scope, replacing with new more tightly-scoped rtx
	locals "reg", "pat", "mem" and rtx_insn * "insn", "copy_of_a",
	"new_insn", "copy_of_insn_b", and make local rtx "set" more
	tightly-scoped.
	* ira-int.h (ira_setup_alts): Strengthen param "insn" from rtx to
	rtx_insn *.
	* ira.c (setup_prohibited_mode_move_regs): Likewise for local
	"move_insn".
	(ira_setup_alts): Likewise for param "insn".
	* lra-constraints.c (emit_inc): Likewise for local "add_insn".
	* lra.c (emit_add3_insn): Split local rtx "insn" in two, an rtx
	and an rtx_insn *.
	(lra_emit_add): Eliminate top-level local rtx "insn" in favor of
	new more-tightly scoped rtx locals "add3_insn", "insn",
	"add2_insn" and rtx_insn * "move_insn".
	* postreload-gcse.c (eliminate_partially_redundant_load): Add
	checked cast on result of gen_move_insn when invoking
	extract_insn.
	* recog.c (insn_invalid_p): Strengthen param "insn" from rtx to
	rtx_insn *.
	(verify_changes): Add a checked cast on "object" when invoking
	insn_invalid_p.
	(extract_insn_cached): Strengthen param "insn" from rtx to
	rtx_insn *.
	(extract_constrain_insn_cached): Likewise.
	(extract_insn): Likewise.
	* recog.h (insn_invalid_p): Likewise for param 1.
	(recog_memoized): Likewise for param.
	(extract_insn): Likewise.
	(extract_constrain_insn_cached): Likewise.
	(extract_insn_cached): Likewise.
	* reload.c (can_reload_into): Likewise for local "test_insn".
	* reload.h (cleanup_subreg_operands): Likewise for param.
	* reload1.c (emit_insn_if_valid_for_reload): Rename param from
	"insn" to "pat", reintroducing "insn" as an rtx_insn * on the
	result of emit_insn.  Remove a checked cast made redundant by this
	change.
	* sel-sched-ir.c (sel_insn_rtx_cost): Strengthen param "insn" from
	rtx to rtx_insn *.
	* sel-sched.c (get_reg_class): Likewise.

From-SVN: r215087
This commit is contained in:
David Malcolm 2014-09-09 16:34:56 +00:00 committed by David Malcolm
parent b677236af0
commit 647d790d2f
55 changed files with 486 additions and 252 deletions

View File

@ -1,3 +1,226 @@
2014-09-09 David Malcolm <dmalcolm@redhat.com>
* caller-save.c (rtx saveinsn): Strengthen this variable from rtx
to rtx_insn *.
(restinsn): Likewise.
* config/aarch64/aarch64-protos.h (aarch64_simd_attr_length_move):
Likewise for param.
* config/aarch64/aarch64.c (aarch64_simd_attr_length_move):
Likewise.
* config/arc/arc-protos.h (arc_adjust_insn_length): Likewise for
first param.
(arc_hazard): Likewise for both params.
* config/arc/arc.c (arc600_corereg_hazard): Likewise, adding
checked casts to rtx_sequence * and uses of the insn method for
type-safety.
(arc_hazard): Strengthen both params from rtx to rtx_insn *.
(arc_adjust_insn_length): Likewise for param "insn".
(struct insn_length_parameters_s): Likewise for first param of
"get_variants" callback field.
(arc_get_insn_variants): Likewise for first param and local
"inner". Replace a check of GET_CODE with a dyn_cast to
rtx_sequence *, using methods for type-safety and clarity.
* config/arc/arc.h (ADJUST_INSN_LENGTH): Use casts to
rtx_sequence * and uses of the insn method for type-safety when
invoking arc_adjust_insn_length.
* config/arm/arm-protos.h (arm_attr_length_move_neon): Likewise
for param.
(arm_address_offset_is_imm): Likewise.
(struct tune_params): Likewise for params 1 and 3 of the
"sched_adjust_cost" callback field.
* config/arm/arm.c (cortex_a9_sched_adjust_cost): Likewise for
params 1 and 3 ("insn" and "dep").
(xscale_sched_adjust_cost): Likewise.
(fa726te_sched_adjust_cost): Likewise.
(cortexa7_older_only): Likewise for param "insn".
(cortexa7_younger): Likewise.
(arm_attr_length_move_neon): Likewise.
(arm_address_offset_is_imm): Likewise.
* config/avr/avr-protos.h (avr_notice_update_cc): Likewise.
* config/avr/avr.c (avr_notice_update_cc): Likewise.
* config/bfin/bfin.c (hwloop_pattern_reg): Likewise.
(workaround_speculation): Likewise for local "last_condjump".
* config/c6x/c6x.c (shadow_p): Likewise for param "insn".
(shadow_or_blockage_p): Likewise.
(get_unit_reqs): Likewise.
(get_unit_operand_masks): Likewise.
(c6x_registers_update): Likewise.
(returning_call_p): Likewise.
(can_use_callp): Likewise.
(convert_to_callp): Likewise.
(find_last_same_clock): Likwise for local "t".
(reorg_split_calls): Likewise for local "shadow".
(hwloop_pattern_reg): Likewise for param "insn".
* config/frv/frv-protos.h (frv_final_prescan_insn): Likewise.
* config/frv/frv.c (frv_final_prescan_insn): Likewise.
(frv_extract_membar): Likewise.
(frv_optimize_membar_local): Strengthen param "last_membar" from
rtx * to rtx_insn **.
(frv_optimize_membar_global): Strengthen param "membar" from rtx
to rtx_insn *.
(frv_optimize_membar): Strengthen local "last_membar" from rtx *
to rtx_insn **.
* config/ia64/ia64-protos.h (ia64_st_address_bypass_p): Strengthen
both params from rtx to rtx_insn *.
(ia64_ld_address_bypass_p): Likewise.
* config/ia64/ia64.c (ia64_safe_itanium_class): Likewise for param
"insn".
(ia64_safe_type): Likewise.
(group_barrier_needed): Likewise.
(safe_group_barrier_needed): Likewise.
(ia64_single_set): Likewise.
(is_load_p): Likewise.
(record_memory_reference): Likewise.
(get_mode_no_for_insn): Likewise.
(important_for_bundling_p): Likewise.
(unknown_for_bundling_p): Likewise.
(ia64_st_address_bypass_p): Likewise for both params.
(ia64_ld_address_bypass_p): Likewise.
(expand_vselect): Introduce new local rtx_insn * "insn", using it
in place of rtx "x" after the emit_insn call.
* config/i386/i386-protos.h (x86_extended_QIreg_mentioned_p):
Strengthen param from rtx to rtx_insn *.
(ix86_agi_dependent): Likewise for both params.
(ix86_attr_length_immediate_default): Likewise for param 1.
(ix86_attr_length_address_default): Likewise for param.
(ix86_attr_length_vex_default): Likewise for param 1.
* config/i386/i386.c (ix86_attr_length_immediate_default):
Likewise for param "insn".
(ix86_attr_length_address_default): Likewise.
(ix86_attr_length_vex_default): Likewise.
(ix86_agi_dependent): Likewise for both params.
(x86_extended_QIreg_mentioned_p): Likewise for param "insn".
(vselect_insn): Likewise for this variable.
* config/m68k/m68k-protos.h (m68k_sched_attr_opx_type): Likewise
for param 1.
(m68k_sched_attr_opy_type): Likewise.
* config/m68k/m68k.c (sched_get_operand): Likewise.
(sched_attr_op_type): Likewise.
(m68k_sched_attr_opx_type): Likewise.
(m68k_sched_attr_opy_type): Likewise.
(sched_get_reg_operand): Likewise.
(sched_get_mem_operand): Likewise.
(m68k_sched_address_bypass_p): Likewise for both params.
(sched_get_indexed_address_scale): Likewise.
(m68k_sched_indexed_address_bypass_p): Likewise.
* config/m68k/m68k.h (m68k_sched_address_bypass_p): Likewise.
(m68k_sched_indexed_address_bypass_p): Likewise.
* config/mep/mep.c (mep_jmp_return_reorg): Strengthen locals
"label", "ret" from rtx to rtx_insn *, adding a checked cast and
removing another.
* config/mips/mips-protos.h (mips_linked_madd_p): Strengthen both
params from rtx to rtx_insn *.
(mips_fmadd_bypass): Likewise.
* config/mips/mips.c (mips_fmadd_bypass): Likewise.
(mips_linked_madd_p): Likewise.
(mips_macc_chains_last_hilo): Likewise for this variable.
(mips_macc_chains_record): Likewise for param.
(vr4130_last_insn): Likewise for this variable.
(vr4130_swap_insns_p): Likewise for both params.
(mips_ls2_variable_issue): Likewise for param.
(mips_need_noat_wrapper_p): Likewise for param "insn".
(mips_expand_vselect): Add a new local rtx_insn * "insn", using it
in place of "x" after the emit_insn.
* config/pa/pa-protos.h (pa_fpstore_bypass_p): Strengthen both
params from rtx to rtx_insn *.
* config/pa/pa.c (pa_fpstore_bypass_p): Likewise.
(pa_combine_instructions): Introduce local "par" for result of
gen_rtx_PARALLEL, moving decl and usage of new_rtx for after call
to make_insn_raw.
(pa_can_combine_p): Strengthen param "new_rtx" from rtx to rtx_insn *.
* config/rl78/rl78.c (insn_ok_now): Likewise for param "insn".
(rl78_alloc_physical_registers_op1): Likewise.
(rl78_alloc_physical_registers_op2): Likewise.
(rl78_alloc_physical_registers_ro1): Likewise.
(rl78_alloc_physical_registers_cmp): Likewise.
(rl78_alloc_physical_registers_umul): Likewise.
(rl78_alloc_address_registers_macax): Likewise.
(rl78_alloc_physical_registers): Likewise for locals "insn", "curr".
* config/s390/predicates.md (execute_operation): Likewise for
local "insn".
* config/s390/s390-protos.h (s390_agen_dep_p): Likewise for both
params.
* config/s390/s390.c (s390_safe_attr_type): Likewise for param.
(addr_generation_dependency_p): Likewise for param "insn".
(s390_agen_dep_p): Likewise for both params.
(s390_fpload_toreg): Likewise for param "insn".
* config/sh/sh-protos.h (sh_loop_align): Likewise for param.
* config/sh/sh.c (sh_loop_align): Likewise for param and local
"next".
* config/sh/sh.md (define_peephole2): Likewise for local "insn2".
* config/sh/sh_treg_combine.cc
(sh_treg_combine::make_inv_ccreg_insn): Likewise for return type
and local "i".
(sh_treg_combine::try_eliminate_cstores): Likewise for local "i".
* config/stormy16/stormy16.c (combine_bnp): Likewise for locals
"and_insn", "load", "shift".
* config/tilegx/tilegx.c (match_pcrel_step2): Likewise for param
"insn".
* final.c (final_scan_insn): Introduce local rtx_insn * "other"
for XEXP (note, 0) of the REG_CC_SETTER note.
(cleanup_subreg_operands): Strengthen param "insn" from rtx to
rtx_insn *, eliminating a checked cast made redundant by this.
* gcse.c (process_insert_insn): Strengthen local "insn" from rtx
to rtx_insn *.
* genattr.c (main): When writing out the prototype to
const_num_delay_slots, strengthen the param from rtx to
rtx_insn *.
* genattrtab.c (write_const_num_delay_slots): Likewise when
writing out the implementation of const_num_delay_slots.
* hw-doloop.h (struct hw_doloop_hooks): Strengthen the param
"insn" of callback field "end_pattern_reg" from rtx to rtx_insn *.
* ifcvt.c (noce_emit_store_flag): Eliminate local rtx "tmp" in
favor of new rtx locals "src" and "set" and new local rtx_insn *
"insn" and "seq".
(noce_emit_move_insn): Strengthen locals "seq" and "insn" from rtx
to rtx_insn *.
(noce_emit_cmove): Eliminate local rtx "tmp" in favor of new rtx
locals "cond", "if_then_else", "set" and new rtx_insn * locals
"insn" and "seq".
(noce_try_cmove_arith): Strengthen locals "insn_a" and "insn_b",
"last" from rtx to rtx_insn *. Likewise for a local "tmp",
renaming to "tmp_insn". Eliminate the other local rtx "tmp" from
the top-level scope, replacing with new more tightly-scoped rtx
locals "reg", "pat", "mem" and rtx_insn * "insn", "copy_of_a",
"new_insn", "copy_of_insn_b", and make local rtx "set" more
tightly-scoped.
* ira-int.h (ira_setup_alts): Strengthen param "insn" from rtx to
rtx_insn *.
* ira.c (setup_prohibited_mode_move_regs): Likewise for local
"move_insn".
(ira_setup_alts): Likewise for param "insn".
* lra-constraints.c (emit_inc): Likewise for local "add_insn".
* lra.c (emit_add3_insn): Split local rtx "insn" in two, an rtx
and an rtx_insn *.
(lra_emit_add): Eliminate top-level local rtx "insn" in favor of
new more-tightly scoped rtx locals "add3_insn", "insn",
"add2_insn" and rtx_insn * "move_insn".
* postreload-gcse.c (eliminate_partially_redundant_load): Add
checked cast on result of gen_move_insn when invoking
extract_insn.
* recog.c (insn_invalid_p): Strengthen param "insn" from rtx to
rtx_insn *.
(verify_changes): Add a checked cast on "object" when invoking
insn_invalid_p.
(extract_insn_cached): Strengthen param "insn" from rtx to
rtx_insn *.
(extract_constrain_insn_cached): Likewise.
(extract_insn): Likewise.
* recog.h (insn_invalid_p): Likewise for param 1.
(recog_memoized): Likewise for param.
(extract_insn): Likewise.
(extract_constrain_insn_cached): Likewise.
(extract_insn_cached): Likewise.
* reload.c (can_reload_into): Likewise for local "test_insn".
* reload.h (cleanup_subreg_operands): Likewise for param.
* reload1.c (emit_insn_if_valid_for_reload): Rename param from
"insn" to "pat", reintroducing "insn" as an rtx_insn * on the
result of emit_insn. Remove a checked cast made redundant by this
change.
* sel-sched-ir.c (sel_insn_rtx_cost): Strengthen param "insn" from
rtx to rtx_insn *.
* sel-sched.c (get_reg_class): Likewise.
2014-09-09 Marcus Shawcroft <marcus.shawcroft@arm.com>
Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>

View File

@ -103,8 +103,8 @@ static GTY(()) rtx savepat;
static GTY(()) rtx restpat;
static GTY(()) rtx test_reg;
static GTY(()) rtx test_mem;
static GTY(()) rtx saveinsn;
static GTY(()) rtx restinsn;
static GTY(()) rtx_insn *saveinsn;
static GTY(()) rtx_insn *restinsn;
/* Return the INSN_CODE used to save register REG in mode MODE. */
static int

View File

@ -225,7 +225,7 @@ enum machine_mode aarch64_hard_regno_caller_save_mode (unsigned, unsigned,
enum machine_mode);
int aarch64_hard_regno_mode_ok (unsigned, enum machine_mode);
int aarch64_hard_regno_nregs (unsigned, enum machine_mode);
int aarch64_simd_attr_length_move (rtx);
int aarch64_simd_attr_length_move (rtx_insn *);
int aarch64_uxt_size (int, HOST_WIDE_INT);
rtx aarch64_final_eh_return_addr (void);
rtx aarch64_legitimize_reload_address (rtx *, enum machine_mode, int, int, int);

View File

@ -8009,7 +8009,7 @@ aarch64_simd_disambiguate_copy (rtx *operands, rtx *dest,
/* Compute and return the length of aarch64_simd_mov<mode>, where <mode> is
one of VSTRUCT modes: OI, CI or XI. */
int
aarch64_simd_attr_length_move (rtx insn)
aarch64_simd_attr_length_move (rtx_insn *insn)
{
enum machine_mode mode;

View File

@ -78,9 +78,9 @@ struct secondary_reload_info;
extern int arc_register_move_cost (enum machine_mode, enum reg_class,
enum reg_class);
extern rtx disi_highpart (rtx);
extern int arc_adjust_insn_length (rtx, int, bool);
extern int arc_adjust_insn_length (rtx_insn *, int, bool);
extern int arc_corereg_hazard (rtx, rtx);
extern int arc_hazard (rtx, rtx);
extern int arc_hazard (rtx_insn *, rtx_insn *);
extern int arc_write_ext_corereg (rtx);
extern rtx gen_acc1 (void);
extern rtx gen_acc2 (void);

View File

@ -7739,7 +7739,7 @@ arc600_corereg_hazard_1 (rtx *xp, void *data)
between PRED and SUCC to prevent a hazard. */
static int
arc600_corereg_hazard (rtx pred, rtx succ)
arc600_corereg_hazard (rtx_insn *pred, rtx_insn *succ)
{
if (!TARGET_ARC600)
return 0;
@ -7752,9 +7752,9 @@ arc600_corereg_hazard (rtx pred, rtx succ)
if (recog_memoized (succ) == CODE_FOR_doloop_begin_i)
return 0;
if (GET_CODE (PATTERN (pred)) == SEQUENCE)
pred = XVECEXP (PATTERN (pred), 0, 1);
pred = as_a <rtx_sequence *> (PATTERN (pred))->insn (1);
if (GET_CODE (PATTERN (succ)) == SEQUENCE)
succ = XVECEXP (PATTERN (succ), 0, 0);
succ = as_a <rtx_sequence *> (PATTERN (succ))->insn (0);
if (recog_memoized (pred) == CODE_FOR_mulsi_600
|| recog_memoized (pred) == CODE_FOR_umul_600
|| recog_memoized (pred) == CODE_FOR_mac_600
@ -7773,7 +7773,7 @@ arc600_corereg_hazard (rtx pred, rtx succ)
between PRED and SUCC to prevent a hazard. */
int
arc_hazard (rtx pred, rtx succ)
arc_hazard (rtx_insn *pred, rtx_insn *succ)
{
if (!TARGET_ARC600)
return 0;
@ -7793,7 +7793,7 @@ arc_hazard (rtx pred, rtx succ)
/* Return length adjustment for INSN. */
int
arc_adjust_insn_length (rtx insn, int len, bool)
arc_adjust_insn_length (rtx_insn *insn, int len, bool)
{
if (!INSN_P (insn))
return len;
@ -7889,7 +7889,7 @@ typedef struct insn_length_parameters_s
int align_unit_log;
int align_base_log;
int max_variants;
int (*get_variants) (rtx, int, bool, bool, insn_length_variant_t *);
int (*get_variants) (rtx_insn *, int, bool, bool, insn_length_variant_t *);
} insn_length_parameters_t;
static void
@ -7897,7 +7897,7 @@ arc_insn_length_parameters (insn_length_parameters_t *ilp) ATTRIBUTE_UNUSED;
#endif
static int
arc_get_insn_variants (rtx insn, int len, bool, bool target_p,
arc_get_insn_variants (rtx_insn *insn, int len, bool, bool target_p,
insn_length_variant_t *ilv)
{
if (!NONDEBUG_INSN_P (insn))
@ -7907,15 +7907,15 @@ arc_get_insn_variants (rtx insn, int len, bool, bool target_p,
get_variants mechanism, so turn this off for now. */
if (optimize_size)
return 0;
if (GET_CODE (PATTERN (insn)) == SEQUENCE)
if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
{
/* The interaction of a short delay slot insn with a short branch is
too weird for shorten_branches to piece together, so describe the
entire SEQUENCE. */
rtx pat, inner;
rtx_insn *inner;
if (TARGET_UPSIZE_DBR
&& get_attr_length (XVECEXP ((pat = PATTERN (insn)), 0, 1)) <= 2
&& (((type = get_attr_type (inner = XVECEXP (pat, 0, 0)))
&& get_attr_length (XVECEXP (pat, 0, 1)) <= 2
&& (((type = get_attr_type (inner = pat->insn (0)))
== TYPE_UNCOND_BRANCH)
|| type == TYPE_BRANCH)
&& get_attr_delay_slot_filled (inner) == DELAY_SLOT_FILLED_YES)

View File

@ -1657,12 +1657,12 @@ extern enum arc_function_type arc_compute_function_type (struct function *);
((LENGTH) \
= (GET_CODE (PATTERN (X)) == SEQUENCE \
? ((LENGTH) \
+ arc_adjust_insn_length (XVECEXP (PATTERN (X), 0, 0), \
+ arc_adjust_insn_length (as_a <rtx_sequence *> (PATTERN (X))->insn (0), \
get_attr_length (XVECEXP (PATTERN (X), \
0, 0)), \
true) \
- get_attr_length (XVECEXP (PATTERN (X), 0, 0)) \
+ arc_adjust_insn_length (XVECEXP (PATTERN (X), 0, 1), \
+ arc_adjust_insn_length (as_a <rtx_sequence *> (PATTERN (X))->insn (1), \
get_attr_length (XVECEXP (PATTERN (X), \
0, 1)), \
true) \

View File

@ -136,8 +136,8 @@ extern const char *output_move_quad (rtx *);
extern int arm_count_output_move_double_insns (rtx *);
extern const char *output_move_vfp (rtx *operands);
extern const char *output_move_neon (rtx *operands);
extern int arm_attr_length_move_neon (rtx);
extern int arm_address_offset_is_imm (rtx);
extern int arm_attr_length_move_neon (rtx_insn *);
extern int arm_address_offset_is_imm (rtx_insn *);
extern const char *output_add_immediate (rtx *);
extern const char *arithmetic_instr (rtx, int);
extern void output_ascii_pseudo_op (FILE *, const unsigned char *, int);
@ -253,7 +253,7 @@ struct tune_params
{
bool (*rtx_costs) (rtx, RTX_CODE, RTX_CODE, int *, bool);
const struct cpu_cost_table *insn_extra_cost;
bool (*sched_adjust_cost) (rtx, rtx, rtx, int *);
bool (*sched_adjust_cost) (rtx_insn *, rtx, rtx_insn *, int *);
int constant_limit;
/* Maximum number of instructions to conditionalise. */
int max_insns_skipped;

View File

@ -255,9 +255,9 @@ static void arm_asm_trampoline_template (FILE *);
static void arm_trampoline_init (rtx, tree, rtx);
static rtx arm_trampoline_adjust_address (rtx);
static rtx arm_pic_static_addr (rtx orig, rtx reg);
static bool cortex_a9_sched_adjust_cost (rtx, rtx, rtx, int *);
static bool xscale_sched_adjust_cost (rtx, rtx, rtx, int *);
static bool fa726te_sched_adjust_cost (rtx, rtx, rtx, int *);
static bool cortex_a9_sched_adjust_cost (rtx_insn *, rtx, rtx_insn *, int *);
static bool xscale_sched_adjust_cost (rtx_insn *, rtx, rtx_insn *, int *);
static bool fa726te_sched_adjust_cost (rtx_insn *, rtx, rtx_insn *, int *);
static bool arm_array_mode_supported_p (enum machine_mode,
unsigned HOST_WIDE_INT);
static enum machine_mode arm_preferred_simd_mode (enum machine_mode);
@ -11440,7 +11440,7 @@ arm_address_cost (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
/* Adjust cost hook for XScale. */
static bool
xscale_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost)
xscale_sched_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int * cost)
{
/* Some true dependencies can have a higher cost depending
on precisely how certain input operands are used. */
@ -11501,7 +11501,7 @@ xscale_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost)
/* Adjust cost hook for Cortex A9. */
static bool
cortex_a9_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost)
cortex_a9_sched_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int * cost)
{
switch (REG_NOTE_KIND (link))
{
@ -11574,7 +11574,7 @@ cortex_a9_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost)
/* Adjust cost hook for FA726TE. */
static bool
fa726te_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost)
fa726te_sched_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int * cost)
{
/* For FA726TE, true dependency on CPSR (i.e. set cond followed by predicated)
have penalty of 3. */
@ -11743,7 +11743,7 @@ arm_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
/* Return true if and only if this insn can dual-issue only as older. */
static bool
cortexa7_older_only (rtx insn)
cortexa7_older_only (rtx_insn *insn)
{
if (recog_memoized (insn) < 0)
return false;
@ -11795,7 +11795,7 @@ cortexa7_older_only (rtx insn)
/* Return true if and only if this insn can dual-issue as younger. */
static bool
cortexa7_younger (FILE *file, int verbose, rtx insn)
cortexa7_younger (FILE *file, int verbose, rtx_insn *insn)
{
if (recog_memoized (insn) < 0)
{
@ -18649,7 +18649,7 @@ output_move_neon (rtx *operands)
/* Compute and return the length of neon_mov<mode>, where <mode> is
one of VSTRUCT modes: EI, OI, CI or XI. */
int
arm_attr_length_move_neon (rtx insn)
arm_attr_length_move_neon (rtx_insn *insn)
{
rtx reg, mem, addr;
int load;
@ -18700,7 +18700,7 @@ arm_attr_length_move_neon (rtx insn)
return zero. */
int
arm_address_offset_is_imm (rtx insn)
arm_address_offset_is_imm (rtx_insn *insn)
{
rtx mem, addr;

View File

@ -101,7 +101,7 @@ extern const char* output_reload_inhi (rtx*, rtx, int*);
extern const char* output_reload_insisf (rtx*, rtx, int*);
extern const char* avr_out_reload_inpsi (rtx*, rtx, int*);
extern const char* avr_out_lpm (rtx_insn *, rtx*, int*);
extern void avr_notice_update_cc (rtx body, rtx insn);
extern void avr_notice_update_cc (rtx body, rtx_insn *insn);
extern int reg_unused_after (rtx_insn *insn, rtx reg);
extern int _reg_unused_after (rtx_insn *insn, rtx reg);
extern int avr_jump_mode (rtx x, rtx_insn *insn);

View File

@ -2290,7 +2290,7 @@ avr_print_operand (FILE *file, rtx x, int code)
/* Update the condition code in the INSN. */
void
avr_notice_update_cc (rtx body ATTRIBUTE_UNUSED, rtx insn)
avr_notice_update_cc (rtx body ATTRIBUTE_UNUSED, rtx_insn *insn)
{
rtx set;
enum attr_cc cc = get_attr_cc (insn);

View File

@ -3865,7 +3865,7 @@ hwloop_fail (hwloop_info loop)
loop counter. Otherwise, return NULL_RTX. */
static rtx
hwloop_pattern_reg (rtx insn)
hwloop_pattern_reg (rtx_insn *insn)
{
rtx reg;
@ -4287,7 +4287,7 @@ static void
workaround_speculation (void)
{
rtx_insn *insn, *next;
rtx last_condjump = NULL_RTX;
rtx_insn *last_condjump = NULL;
int cycles_since_jump = INT_MAX;
int delay_added = 0;

View File

@ -2978,7 +2978,7 @@ shadow_type_p (enum attr_type type)
/* Return true iff INSN is a shadow pattern. */
static bool
shadow_p (rtx insn)
shadow_p (rtx_insn *insn)
{
if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0)
return false;
@ -2987,7 +2987,7 @@ shadow_p (rtx insn)
/* Return true iff INSN is a shadow or blockage pattern. */
static bool
shadow_or_blockage_p (rtx insn)
shadow_or_blockage_p (rtx_insn *insn)
{
enum attr_type type;
if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0)
@ -3227,7 +3227,7 @@ unit_req_factor (enum unitreqs r)
instructions reservation, e.g. UNIT_REQ_DL. REQ2 is used to either
describe a cross path, or for loads/stores, the T unit. */
static int
get_unit_reqs (rtx insn, int *req1, int *side1, int *req2, int *side2)
get_unit_reqs (rtx_insn *insn, int *req1, int *side1, int *req2, int *side2)
{
enum attr_units units;
enum attr_cross cross;
@ -3362,7 +3362,8 @@ res_mii (unit_req_table reqs)
found by get_unit_reqs. Return true if we did this successfully, false
if we couldn't identify what to do with INSN. */
static bool
get_unit_operand_masks (rtx insn, unsigned int *pmask1, unsigned int *pmask2)
get_unit_operand_masks (rtx_insn *insn, unsigned int *pmask1,
unsigned int *pmask2)
{
enum attr_op_pattern op_pat;
@ -4046,7 +4047,7 @@ c6x_mark_reg_written (rtx reg, int cycles)
next cycle. */
static bool
c6x_registers_update (rtx insn)
c6x_registers_update (rtx_insn *insn)
{
enum attr_cross cross;
enum attr_dest_regfile destrf;
@ -4749,7 +4750,7 @@ emit_nop_after (int cycles, rtx after)
placed. */
static bool
returning_call_p (rtx insn)
returning_call_p (rtx_insn *insn)
{
if (CALL_P (insn))
return (!SIBLING_CALL_P (insn)
@ -4764,7 +4765,7 @@ returning_call_p (rtx insn)
/* Determine whether INSN's pattern can be converted to use callp. */
static bool
can_use_callp (rtx insn)
can_use_callp (rtx_insn *insn)
{
int icode = recog_memoized (insn);
if (!TARGET_INSNS_64PLUS
@ -4780,7 +4781,7 @@ can_use_callp (rtx insn)
/* Convert the pattern of INSN, which must be a CALL_INSN, into a callp. */
static void
convert_to_callp (rtx insn)
convert_to_callp (rtx_insn *insn)
{
rtx lab;
extract_insn (insn);
@ -4835,7 +4836,7 @@ static rtx
find_last_same_clock (rtx insn)
{
rtx retval = insn;
rtx t = next_real_insn (insn);
rtx_insn *t = next_real_insn (insn);
while (t && GET_MODE (t) != TImode)
{
@ -4942,7 +4943,8 @@ reorg_split_calls (rtx *call_labels)
/* Find the first insn of the next execute packet. If it
is the shadow insn corresponding to this call, we may
use a CALLP insn. */
rtx shadow = next_nonnote_nondebug_insn (last_same_clock);
rtx_insn *shadow =
next_nonnote_nondebug_insn (last_same_clock);
if (CALL_P (shadow)
&& insn_get_clock (shadow) == this_clock + 5)
@ -5413,7 +5415,7 @@ conditionalize_after_sched (void)
loop counter. Otherwise, return NULL_RTX. */
static rtx
hwloop_pattern_reg (rtx insn)
hwloop_pattern_reg (rtx_insn *insn)
{
rtx pat, reg;

View File

@ -45,7 +45,7 @@ extern rtx frv_return_addr_rtx (int, rtx);
extern rtx frv_index_memory (rtx, enum machine_mode, int);
extern const char *frv_asm_output_opcode
(FILE *, const char *);
extern void frv_final_prescan_insn (rtx, rtx *, int);
extern void frv_final_prescan_insn (rtx_insn *, rtx *, int);
extern void frv_emit_move (enum machine_mode, rtx, rtx);
extern int frv_emit_movsi (rtx, rtx);
extern const char *output_move_single (rtx *, rtx);

View File

@ -2402,7 +2402,7 @@ frv_asm_output_opcode (FILE *f, const char *ptr)
function is not called for asm insns. */
void
frv_final_prescan_insn (rtx insn, rtx *opvec,
frv_final_prescan_insn (rtx_insn *insn, rtx *opvec,
int noperands ATTRIBUTE_UNUSED)
{
if (INSN_P (insn))
@ -7788,7 +7788,7 @@ frv_io_union (struct frv_io *x, const struct frv_io *y)
membar instruction INSN. */
static void
frv_extract_membar (struct frv_io *io, rtx insn)
frv_extract_membar (struct frv_io *io, rtx_insn *insn)
{
extract_insn (insn);
io->type = (enum frv_io_type) INTVAL (recog_data.operand[2]);
@ -7867,7 +7867,7 @@ frv_io_handle_use (rtx *x, void *data)
static void
frv_optimize_membar_local (basic_block bb, struct frv_io *next_io,
rtx *last_membar)
rtx_insn **last_membar)
{
HARD_REG_SET used_regs;
rtx next_membar, set;
@ -8001,7 +8001,7 @@ frv_optimize_membar_local (basic_block bb, struct frv_io *next_io,
static void
frv_optimize_membar_global (basic_block bb, struct frv_io *first_io,
rtx membar)
rtx_insn *membar)
{
struct frv_io this_io, next_io;
edge succ;
@ -8047,11 +8047,11 @@ frv_optimize_membar (void)
{
basic_block bb;
struct frv_io *first_io;
rtx *last_membar;
rtx_insn **last_membar;
compute_bb_for_insn ();
first_io = XCNEWVEC (struct frv_io, last_basic_block_for_fn (cfun));
last_membar = XCNEWVEC (rtx, last_basic_block_for_fn (cfun));
last_membar = XCNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun));
FOR_EACH_BB_FN (bb, cfun)
frv_optimize_membar_local (bb, &first_io[bb->index],

View File

@ -50,7 +50,7 @@ extern int standard_sse_constant_p (rtx);
extern const char *standard_sse_constant_opcode (rtx, rtx);
extern bool symbolic_reference_mentioned_p (rtx);
extern bool extended_reg_mentioned_p (rtx);
extern bool x86_extended_QIreg_mentioned_p (rtx);
extern bool x86_extended_QIreg_mentioned_p (rtx_insn *);
extern bool x86_extended_reg_mentioned_p (rtx);
extern bool x86_maybe_negate_const_int (rtx *, enum machine_mode);
extern enum machine_mode ix86_cc_mode (enum rtx_code, rtx, rtx);
@ -100,7 +100,7 @@ extern void ix86_split_lea_for_addr (rtx_insn *, rtx[], enum machine_mode);
extern bool ix86_lea_for_add_ok (rtx_insn *, rtx[]);
extern bool ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high);
extern bool ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn);
extern bool ix86_agi_dependent (rtx set_insn, rtx use_insn);
extern bool ix86_agi_dependent (rtx_insn *set_insn, rtx_insn *use_insn);
extern void ix86_expand_unary_operator (enum rtx_code, enum machine_mode,
rtx[]);
extern rtx ix86_build_const_vector (enum machine_mode, bool, rtx);
@ -145,9 +145,9 @@ extern void ix86_split_idivmod (enum machine_mode, rtx[], bool);
extern bool ix86_emit_cfi ();
extern rtx assign_386_stack_local (enum machine_mode, enum ix86_stack_slot);
extern int ix86_attr_length_immediate_default (rtx, bool);
extern int ix86_attr_length_address_default (rtx);
extern int ix86_attr_length_vex_default (rtx, bool, bool);
extern int ix86_attr_length_immediate_default (rtx_insn *, bool);
extern int ix86_attr_length_address_default (rtx_insn *);
extern int ix86_attr_length_vex_default (rtx_insn *, bool, bool);
extern enum machine_mode ix86_fp_compare_mode (enum rtx_code);

View File

@ -25212,7 +25212,7 @@ memory_address_length (rtx addr, bool lea)
/* Compute default value for "length_immediate" attribute. When SHORTFORM
is set, expect that insn have 8bit immediate alternative. */
int
ix86_attr_length_immediate_default (rtx insn, bool shortform)
ix86_attr_length_immediate_default (rtx_insn *insn, bool shortform)
{
int len = 0;
int i;
@ -25271,7 +25271,7 @@ ix86_attr_length_immediate_default (rtx insn, bool shortform)
/* Compute default value for "length_address" attribute. */
int
ix86_attr_length_address_default (rtx insn)
ix86_attr_length_address_default (rtx_insn *insn)
{
int i;
@ -25317,7 +25317,8 @@ ix86_attr_length_address_default (rtx insn)
2 or 3 byte VEX prefix and 1 opcode byte. */
int
ix86_attr_length_vex_default (rtx insn, bool has_0f_opcode, bool has_vex_w)
ix86_attr_length_vex_default (rtx_insn *insn, bool has_0f_opcode,
bool has_vex_w)
{
int i;
@ -25440,7 +25441,7 @@ ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
SET_INSN. */
bool
ix86_agi_dependent (rtx set_insn, rtx use_insn)
ix86_agi_dependent (rtx_insn *set_insn, rtx_insn *use_insn)
{
int i;
extract_insn_cached (use_insn);
@ -39464,7 +39465,7 @@ ix86_reorg (void)
/* Return nonzero when QImode register that must be represented via REX prefix
is used. */
bool
x86_extended_QIreg_mentioned_p (rtx insn)
x86_extended_QIreg_mentioned_p (rtx_insn *insn)
{
int i;
extract_insn_cached (insn);
@ -42499,7 +42500,7 @@ ix86_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
insn, so that expand_vselect{,_vconcat} doesn't have to create a fresh
insn every time. */
static GTY(()) rtx vselect_insn;
static GTY(()) rtx_insn *vselect_insn;
/* Initialize vselect_insn. */

View File

@ -24,8 +24,8 @@ extern enum unwind_info_type ia64_except_unwind_info (struct gcc_options *);
extern int bundling_p;
#ifdef RTX_CODE
extern int ia64_st_address_bypass_p (rtx, rtx);
extern int ia64_ld_address_bypass_p (rtx, rtx);
extern int ia64_st_address_bypass_p (rtx_insn *, rtx_insn *);
extern int ia64_ld_address_bypass_p (rtx_insn *, rtx_insn *);
extern int ia64_produce_address_p (rtx);
extern rtx ia64_expand_move (rtx, rtx);

View File

@ -281,8 +281,8 @@ static int get_max_pos (state_t);
static int get_template (state_t, int);
static rtx_insn *get_next_important_insn (rtx_insn *, rtx_insn *);
static bool important_for_bundling_p (rtx);
static bool unknown_for_bundling_p (rtx);
static bool important_for_bundling_p (rtx_insn *);
static bool unknown_for_bundling_p (rtx_insn *);
static void bundling (FILE *, int, rtx_insn *, rtx_insn *);
static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
@ -6050,11 +6050,11 @@ ia64_init_machine_status (void)
return ggc_cleared_alloc<machine_function> ();
}
static enum attr_itanium_class ia64_safe_itanium_class (rtx);
static enum attr_type ia64_safe_type (rtx);
static enum attr_itanium_class ia64_safe_itanium_class (rtx_insn *);
static enum attr_type ia64_safe_type (rtx_insn *);
static enum attr_itanium_class
ia64_safe_itanium_class (rtx insn)
ia64_safe_itanium_class (rtx_insn *insn)
{
if (recog_memoized (insn) >= 0)
return get_attr_itanium_class (insn);
@ -6065,7 +6065,7 @@ ia64_safe_itanium_class (rtx insn)
}
static enum attr_type
ia64_safe_type (rtx insn)
ia64_safe_type (rtx_insn *insn)
{
if (recog_memoized (insn) >= 0)
return get_attr_type (insn);
@ -6191,8 +6191,8 @@ static void update_set_flags (rtx, struct reg_flags *);
static int set_src_needs_barrier (rtx, struct reg_flags, int);
static int rtx_needs_barrier (rtx, struct reg_flags, int);
static void init_insn_group_barriers (void);
static int group_barrier_needed (rtx);
static int safe_group_barrier_needed (rtx);
static int group_barrier_needed (rtx_insn *);
static int safe_group_barrier_needed (rtx_insn *);
static int in_safe_group_barrier;
/* Update *RWS for REGNO, which is being written by the current instruction,
@ -6820,7 +6820,7 @@ init_insn_group_barriers (void)
include the effects of INSN as a side-effect. */
static int
group_barrier_needed (rtx insn)
group_barrier_needed (rtx_insn *insn)
{
rtx pat;
int need_barrier = 0;
@ -6929,7 +6929,7 @@ group_barrier_needed (rtx insn)
/* Like group_barrier_needed, but do not clobber the current state. */
static int
safe_group_barrier_needed (rtx insn)
safe_group_barrier_needed (rtx_insn *insn)
{
int saved_first_instruction;
int t;
@ -7123,7 +7123,7 @@ static char mem_ops_in_group[4];
/* Number of current processor cycle (from scheduler's point of view). */
static int current_cycle;
static rtx ia64_single_set (rtx);
static rtx ia64_single_set (rtx_insn *);
static void ia64_emit_insn_before (rtx, rtx);
/* Map a bundle number to its pseudo-op. */
@ -7146,7 +7146,7 @@ ia64_issue_rate (void)
/* Helper function - like single_set, but look inside COND_EXEC. */
static rtx
ia64_single_set (rtx insn)
ia64_single_set (rtx_insn *insn)
{
rtx x = PATTERN (insn), ret;
if (GET_CODE (x) == COND_EXEC)
@ -7331,7 +7331,7 @@ ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
/* Return TRUE if INSN is a load (either normal or speculative, but not a
speculation check), FALSE otherwise. */
static bool
is_load_p (rtx insn)
is_load_p (rtx_insn *insn)
{
enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
@ -7345,7 +7345,7 @@ is_load_p (rtx insn)
Itanium 2 Reference Manual for Software Development and Optimization,
6.7.3.1). */
static void
record_memory_reference (rtx insn)
record_memory_reference (rtx_insn *insn)
{
enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
@ -7963,7 +7963,7 @@ ia64_set_sched_flags (spec_info_t spec_info)
/* If INSN is an appropriate load return its mode.
Return -1 otherwise. */
static int
get_mode_no_for_insn (rtx insn)
get_mode_no_for_insn (rtx_insn *insn)
{
rtx reg, mem, mode_rtx;
int mode_no;
@ -8905,7 +8905,7 @@ get_template (state_t state, int pos)
/* True when INSN is important for bundling. */
static bool
important_for_bundling_p (rtx insn)
important_for_bundling_p (rtx_insn *insn)
{
return (INSN_P (insn)
&& ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
@ -8928,7 +8928,7 @@ get_next_important_insn (rtx_insn *insn, rtx_insn *tail)
/* True when INSN is unknown, but important, for bundling. */
static bool
unknown_for_bundling_p (rtx insn)
unknown_for_bundling_p (rtx_insn *insn)
{
return (INSN_P (insn)
&& ia64_safe_itanium_class (insn) == ITANIUM_CLASS_UNKNOWN
@ -9516,7 +9516,7 @@ ia64_dfa_pre_cycle_insn (void)
ld) produces address for CONSUMER (of type st or stf). */
int
ia64_st_address_bypass_p (rtx producer, rtx consumer)
ia64_st_address_bypass_p (rtx_insn *producer, rtx_insn *consumer)
{
rtx dest, reg, mem;
@ -9540,7 +9540,7 @@ ia64_st_address_bypass_p (rtx producer, rtx consumer)
ld) produces address for CONSUMER (of type ld or fld). */
int
ia64_ld_address_bypass_p (rtx producer, rtx consumer)
ia64_ld_address_bypass_p (rtx_insn *producer, rtx_insn *consumer)
{
rtx dest, src, reg, mem;
@ -11212,10 +11212,10 @@ expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
x = gen_rtx_SET (VOIDmode, target, x);
x = emit_insn (x);
if (recog_memoized (x) < 0)
rtx_insn *insn = emit_insn (x);
if (recog_memoized (insn) < 0)
{
remove_insn (x);
remove_insn (insn);
return false;
}
return true;

View File

@ -78,8 +78,8 @@ extern rtx m68k_unwrap_symbol (rtx, bool);
extern enum attr_cpu m68k_sched_cpu;
extern enum attr_mac m68k_sched_mac;
extern enum attr_opx_type m68k_sched_attr_opx_type (rtx, int);
extern enum attr_opy_type m68k_sched_attr_opy_type (rtx, int);
extern enum attr_opx_type m68k_sched_attr_opx_type (rtx_insn *, int);
extern enum attr_opy_type m68k_sched_attr_opy_type (rtx_insn *, int);
extern enum attr_size m68k_sched_attr_size (rtx);
extern enum attr_op_mem m68k_sched_attr_op_mem (rtx);
#endif /* HAVE_ATTR_cpu */

View File

@ -5414,7 +5414,7 @@ sched_address_type (enum machine_mode mode, rtx addr_rtx)
/* Return X or Y (depending on OPX_P) operand of INSN. */
static rtx
sched_get_operand (rtx insn, bool opx_p)
sched_get_operand (rtx_insn *insn, bool opx_p)
{
int i;
@ -5437,7 +5437,7 @@ sched_get_operand (rtx insn, bool opx_p)
/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
If ADDRESS_P is true, return type of memory location operand refers to. */
static enum attr_op_type
sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
{
rtx op;
@ -5556,7 +5556,7 @@ sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
Return type of INSN's operand X.
If ADDRESS_P is true, return type of memory location operand refers to. */
enum attr_opx_type
m68k_sched_attr_opx_type (rtx insn, int address_p)
m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
{
switch (sched_attr_op_type (insn, true, address_p != 0))
{
@ -5599,7 +5599,7 @@ m68k_sched_attr_opx_type (rtx insn, int address_p)
Return type of INSN's operand Y.
If ADDRESS_P is true, return type of memory location operand refers to. */
enum attr_opy_type
m68k_sched_attr_opy_type (rtx insn, int address_p)
m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
{
switch (sched_attr_op_type (insn, false, address_p != 0))
{
@ -6289,7 +6289,7 @@ m68k_sched_dfa_post_advance_cycle (void)
/* Return X or Y (depending on OPX_P) operand of INSN,
if it is an integer register, or NULL overwise. */
static rtx
sched_get_reg_operand (rtx insn, bool opx_p)
sched_get_reg_operand (rtx_insn *insn, bool opx_p)
{
rtx op = NULL;
@ -6338,7 +6338,7 @@ sched_mem_operand_p (rtx insn, bool opx_p)
/* Return X or Y (depending on OPX_P) operand of INSN,
if it is a MEM, or NULL overwise. */
static rtx
sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
{
bool opx_p;
bool opy_p;
@ -6371,7 +6371,7 @@ sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
/* Return non-zero if PRO modifies register used as part of
address in CON. */
int
m68k_sched_address_bypass_p (rtx pro, rtx con)
m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
{
rtx pro_x;
rtx con_mem_read;
@ -6393,7 +6393,7 @@ m68k_sched_address_bypass_p (rtx pro, rtx con)
if PRO modifies register used as index in CON,
return scale of indexed memory access in CON. Return zero overwise. */
static int
sched_get_indexed_address_scale (rtx pro, rtx con)
sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
{
rtx reg;
rtx mem;
@ -6422,7 +6422,7 @@ sched_get_indexed_address_scale (rtx pro, rtx con)
/* Return non-zero if PRO modifies register used
as index with scale 2 or 4 in CON. */
int
m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
{
gcc_assert (sched_cfv4_bypass_data.pro == NULL
&& sched_cfv4_bypass_data.con == NULL

View File

@ -974,7 +974,7 @@ extern M68K_CONST_METHOD m68k_const_method (HOST_WIDE_INT);
extern void m68k_emit_move_double (rtx [2]);
extern int m68k_sched_address_bypass_p (rtx, rtx);
extern int m68k_sched_indexed_address_bypass_p (rtx, rtx);
extern int m68k_sched_address_bypass_p (rtx_insn *, rtx_insn *);
extern int m68k_sched_indexed_address_bypass_p (rtx_insn *, rtx_insn *);
#define CPU_UNITS_QUERY 1

View File

@ -5638,20 +5638,19 @@ mep_reorg_erepeat (rtx_insn *insns)
static void
mep_jmp_return_reorg (rtx_insn *insns)
{
rtx_insn *insn;
rtx label, ret;
rtx_insn *insn, *label, *ret;
int ret_code;
for (insn = insns; insn; insn = NEXT_INSN (insn))
if (simplejump_p (insn))
{
/* Find the fist real insn the jump jumps to. */
label = ret = JUMP_LABEL (insn);
label = ret = safe_as_a <rtx_insn *> (JUMP_LABEL (insn));
while (ret
&& (NOTE_P (ret)
|| LABEL_P (ret)
|| GET_CODE (PATTERN (ret)) == USE))
ret = NEXT_INSN (as_a <rtx_insn *> (ret));
ret = NEXT_INSN (ret);
if (ret)
{

View File

@ -299,7 +299,7 @@ extern unsigned int mips_sync_loop_insns (rtx, rtx *);
extern const char *mips_output_division (const char *, rtx *);
extern const char *mips_output_probe_stack_range (rtx, rtx);
extern unsigned int mips_hard_regno_nregs (int, enum machine_mode);
extern bool mips_linked_madd_p (rtx, rtx);
extern bool mips_linked_madd_p (rtx_insn *, rtx_insn *);
extern bool mips_store_data_bypass_p (rtx, rtx);
extern int mips_dspalu_bypass_p (rtx, rtx);
extern rtx mips_prefetch_cookie (rtx, rtx);
@ -315,7 +315,7 @@ extern bool mips16e_save_restore_pattern_p (rtx, HOST_WIDE_INT,
extern bool mask_low_and_shift_p (enum machine_mode, rtx, rtx, int);
extern int mask_low_and_shift_len (enum machine_mode, rtx, rtx);
extern bool and_operands_ok (enum machine_mode, rtx, rtx);
extern bool mips_fmadd_bypass (rtx, rtx);
extern bool mips_fmadd_bypass (rtx_insn *, rtx_insn *);
union mips_gen_fn_ptrs
{

View File

@ -13032,7 +13032,7 @@ mips_output_division (const char *division, rtx *operands)
madd.s a, dst, b, c */
bool
mips_fmadd_bypass (rtx out_insn, rtx in_insn)
mips_fmadd_bypass (rtx_insn *out_insn, rtx_insn *in_insn)
{
int dst_reg, src_reg;
@ -13055,7 +13055,7 @@ mips_fmadd_bypass (rtx out_insn, rtx in_insn)
instruction and if OUT_INSN assigns to the accumulator operand. */
bool
mips_linked_madd_p (rtx out_insn, rtx in_insn)
mips_linked_madd_p (rtx_insn *out_insn, rtx_insn *in_insn)
{
enum attr_accum_in accum_in;
int accum_in_opnum;
@ -13364,13 +13364,13 @@ mips_maybe_swap_ready (rtx_insn **ready, int pos1, int pos2, int limit)
/* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
that may clobber hi or lo. */
static rtx mips_macc_chains_last_hilo;
static rtx_insn *mips_macc_chains_last_hilo;
/* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
been scheduled, updating mips_macc_chains_last_hilo appropriately. */
static void
mips_macc_chains_record (rtx insn)
mips_macc_chains_record (rtx_insn *insn)
{
if (get_attr_may_clobber_hilo (insn))
mips_macc_chains_last_hilo = insn;
@ -13403,7 +13403,7 @@ mips_macc_chains_reorder (rtx_insn **ready, int nready)
}
/* The last instruction to be scheduled. */
static rtx vr4130_last_insn;
static rtx_insn *vr4130_last_insn;
/* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
points to an rtx that is initially an instruction. Nullify the rtx
@ -13441,7 +13441,7 @@ vr4130_true_reg_dependence_p (rtx insn)
alignment than (INSN1, INSN2). See 4130.md for more details. */
static bool
vr4130_swap_insns_p (rtx insn1, rtx insn2)
vr4130_swap_insns_p (rtx_insn *insn1, rtx_insn *insn2)
{
sd_iterator_def sd_it;
dep_t dep;
@ -13637,7 +13637,7 @@ mips_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
/* Update round-robin counters for ALU1/2 and FALU1/2. */
static void
mips_ls2_variable_issue (rtx insn)
mips_ls2_variable_issue (rtx_insn *insn)
{
if (mips_ls2.alu1_turn_p)
{
@ -17567,7 +17567,7 @@ mips_at_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
INSN has NOPERANDS operands, stored in OPVEC. */
static bool
mips_need_noat_wrapper_p (rtx insn, rtx *opvec, int noperands)
mips_need_noat_wrapper_p (rtx_insn *insn, rtx *opvec, int noperands)
{
int i;
@ -18186,6 +18186,7 @@ mips_expand_vselect (rtx target, rtx op0,
const unsigned char *perm, unsigned nelt)
{
rtx rperm[MAX_VECT_LEN], x;
rtx_insn *insn;
unsigned i;
for (i = 0; i < nelt; ++i)
@ -18195,10 +18196,10 @@ mips_expand_vselect (rtx target, rtx op0,
x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
x = gen_rtx_SET (VOIDmode, target, x);
x = emit_insn (x);
if (recog_memoized (x) < 0)
insn = emit_insn (x);
if (recog_memoized (insn) < 0)
{
remove_insn (x);
remove_insn (insn);
return false;
}
return true;

View File

@ -64,7 +64,7 @@ extern int pa_emit_move_sequence (rtx *, enum machine_mode, rtx);
extern int pa_emit_hpdiv_const (rtx *, int);
extern int pa_is_function_label_plus_const (rtx);
extern int pa_jump_in_call_delay (rtx_insn *);
extern int pa_fpstore_bypass_p (rtx, rtx);
extern int pa_fpstore_bypass_p (rtx_insn *, rtx_insn *);
extern int pa_attr_length_millicode_call (rtx_insn *);
extern int pa_attr_length_call (rtx_insn *, int);
extern int pa_attr_length_indirect_call (rtx_insn *);

View File

@ -57,7 +57,7 @@ along with GCC; see the file COPYING3. If not see
/* Return nonzero if there is a bypass for the output of
OUT_INSN and the fp store IN_INSN. */
int
pa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
{
enum machine_mode store_mode;
enum machine_mode other_mode;
@ -99,7 +99,8 @@ static bool hppa_rtx_costs (rtx, int, int, int, int *, bool);
static inline rtx force_mode (enum machine_mode, rtx);
static void pa_reorg (void);
static void pa_combine_instructions (void);
static int pa_can_combine_p (rtx, rtx_insn *, rtx_insn *, int, rtx, rtx, rtx);
static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
rtx, rtx);
static bool forward_branch_p (rtx_insn *);
static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
@ -8996,7 +8997,6 @@ static void
pa_combine_instructions (void)
{
rtx_insn *anchor;
rtx new_rtx;
/* This can get expensive since the basic algorithm is on the
order of O(n^2) (or worse). Only do it for -O2 or higher
@ -9008,8 +9008,8 @@ pa_combine_instructions (void)
may be combined with "floating" insns. As the name implies,
"anchor" instructions don't move, while "floating" insns may
move around. */
new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
new_rtx = make_insn_raw (new_rtx);
rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
rtx_insn *new_rtx = make_insn_raw (par);
for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
{
@ -9178,7 +9178,7 @@ pa_combine_instructions (void)
}
static int
pa_can_combine_p (rtx new_rtx, rtx_insn *anchor, rtx_insn *floater,
pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
int reversed, rtx dest,
rtx src1, rtx src2)
{

View File

@ -2145,7 +2145,7 @@ rl78_es_base (rtx addr)
carefully to ensure that all the constraint information is accurate
for the newly matched insn. */
static bool
insn_ok_now (rtx insn)
insn_ok_now (rtx_insn *insn)
{
rtx pattern = PATTERN (insn);
int i;
@ -2617,7 +2617,7 @@ move_to_de (int opno, rtx before)
/* Devirtualize an insn of the form (SET (op) (unop (op))). */
static void
rl78_alloc_physical_registers_op1 (rtx insn)
rl78_alloc_physical_registers_op1 (rtx_insn *insn)
{
/* op[0] = func op[1] */
@ -2696,7 +2696,7 @@ has_constraint (unsigned int opnum, enum constraint_num constraint)
/* Devirtualize an insn of the form (SET (op) (binop (op) (op))). */
static void
rl78_alloc_physical_registers_op2 (rtx insn)
rl78_alloc_physical_registers_op2 (rtx_insn *insn)
{
rtx prev;
rtx first;
@ -2850,7 +2850,7 @@ rl78_alloc_physical_registers_op2 (rtx insn)
/* Devirtualize an insn of the form SET (PC) (MEM/REG). */
static void
rl78_alloc_physical_registers_ro1 (rtx insn)
rl78_alloc_physical_registers_ro1 (rtx_insn *insn)
{
OP (0) = transcode_memory_rtx (OP (0), BC, insn);
@ -2863,7 +2863,7 @@ rl78_alloc_physical_registers_ro1 (rtx insn)
/* Devirtualize a compare insn. */
static void
rl78_alloc_physical_registers_cmp (rtx insn)
rl78_alloc_physical_registers_cmp (rtx_insn *insn)
{
int tmp_id;
rtx saved_op1;
@ -2956,7 +2956,7 @@ rl78_alloc_physical_registers_cmp (rtx insn)
/* Like op2, but AX = A * X. */
static void
rl78_alloc_physical_registers_umul (rtx insn)
rl78_alloc_physical_registers_umul (rtx_insn *insn)
{
rtx prev = prev_nonnote_nondebug_insn (insn);
rtx first;
@ -3020,7 +3020,7 @@ rl78_alloc_physical_registers_umul (rtx insn)
}
static void
rl78_alloc_address_registers_macax (rtx insn)
rl78_alloc_address_registers_macax (rtx_insn *insn)
{
int which, op;
bool replace_in_op0 = false;
@ -3085,7 +3085,7 @@ rl78_alloc_physical_registers (void)
registers. At this point, we need to assign physical registers
to the vitual ones, and copy in/out as needed. */
rtx insn, curr;
rtx_insn *insn, *curr;
enum attr_valloc valloc_method;
for (insn = get_insns (); insn; insn = curr)

View File

@ -374,7 +374,7 @@
(match_code "parallel")
{
rtx pattern = op;
rtx insn;
rtx_insn *insn;
int icode;
/* This is redundant but since this predicate is evaluated

View File

@ -103,7 +103,7 @@ extern void print_operand_address (FILE *, rtx);
extern void print_operand (FILE *, rtx, int);
extern void s390_output_pool_entry (rtx, enum machine_mode, unsigned int);
extern int s390_label_align (rtx);
extern int s390_agen_dep_p (rtx, rtx);
extern int s390_agen_dep_p (rtx_insn *, rtx_insn *);
extern rtx_insn *s390_load_got (void);
extern rtx s390_get_thread_pointer (void);
extern void s390_emit_tpf_eh_return (rtx);

View File

@ -1693,7 +1693,7 @@ const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
/* Return attribute type of insn. */
static enum attr_type
s390_safe_attr_type (rtx insn)
s390_safe_attr_type (rtx_insn *insn)
{
if (recog_memoized (insn) >= 0)
return get_attr_type (insn);
@ -5753,7 +5753,7 @@ reg_used_in_mem_p (int regno, rtx x)
used by instruction INSN to address memory. */
static bool
addr_generation_dependency_p (rtx dep_rtx, rtx insn)
addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
{
rtx target, pat;
@ -5793,7 +5793,7 @@ addr_generation_dependency_p (rtx dep_rtx, rtx insn)
/* Return 1, if dep_insn sets register used in insn in the agen unit. */
int
s390_agen_dep_p (rtx dep_insn, rtx insn)
s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
{
rtx dep_rtx = PATTERN (dep_insn);
int i;
@ -11405,7 +11405,7 @@ s390_reorg (void)
/* Return true if INSN is a fp load insn writing register REGNO. */
static inline bool
s390_fpload_toreg (rtx insn, unsigned int regno)
s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
{
rtx set;
enum attr_type flag = s390_safe_attr_type (insn);

View File

@ -104,7 +104,7 @@ extern const char *output_far_jump (rtx_insn *, rtx);
extern rtx sfunc_uses_reg (rtx);
extern int barrier_align (rtx_insn *);
extern int sh_loop_align (rtx);
extern int sh_loop_align (rtx_insn *);
extern bool fp_zero_operand (rtx);
extern bool fp_one_operand (rtx);
extern rtx get_fpscr_rtx (void);

View File

@ -5965,9 +5965,9 @@ barrier_align (rtx_insn *barrier_or_label)
Applying loop alignment to small constant or switch tables is a waste
of space, so we suppress this too. */
int
sh_loop_align (rtx label)
sh_loop_align (rtx_insn *label)
{
rtx next = label;
rtx_insn *next = label;
if (! optimize || optimize_size)
return 0;

View File

@ -1566,7 +1566,8 @@
[(set (match_dup 0) (match_dup 3))
(set (match_dup 4) (match_dup 5))]
{
rtx set1, set2, insn2;
rtx set1, set2;
rtx_insn *insn2;
rtx replacements[4];
/* We want to replace occurrences of operands[0] with operands[1] and

View File

@ -563,7 +563,7 @@ private:
rtx make_not_reg_insn (rtx dst_reg, rtx src_reg) const;
// Create an insn rtx that inverts the ccreg.
rtx make_inv_ccreg_insn (void) const;
rtx_insn *make_inv_ccreg_insn (void) const;
// Adds the specified insn to the set of modified or newly added insns that
// might need splitting at the end of the pass.
@ -899,13 +899,13 @@ sh_treg_combine::make_not_reg_insn (rtx dst_reg, rtx src_reg) const
return i;
}
rtx
rtx_insn *
sh_treg_combine::make_inv_ccreg_insn (void) const
{
start_sequence ();
rtx i = emit_insn (gen_rtx_SET (VOIDmode, m_ccreg,
gen_rtx_fmt_ee (XOR, GET_MODE (m_ccreg),
m_ccreg, const1_rtx)));
rtx_insn *i = emit_insn (gen_rtx_SET (VOIDmode, m_ccreg,
gen_rtx_fmt_ee (XOR, GET_MODE (m_ccreg),
m_ccreg, const1_rtx)));
end_sequence ();
return i;
}
@ -1222,7 +1222,7 @@ sh_treg_combine::try_eliminate_cstores (cbranch_trace& trace,
// invert the ccreg as a replacement for one of them.
if (cstore_count != 0 && inv_cstore_count != 0)
{
rtx i = make_inv_ccreg_insn ();
rtx_insn *i = make_inv_ccreg_insn ();
if (recog_memoized (i) < 0)
{
log_msg ("failed to match ccreg inversion insn:\n");

View File

@ -2393,10 +2393,11 @@ combine_bnp (rtx_insn *insn)
{
int insn_code, regno, need_extend;
unsigned int mask;
rtx cond, reg, and_insn, load, qireg, mem;
rtx cond, reg, qireg, mem;
rtx_insn *and_insn, *load;
enum machine_mode load_mode = QImode;
enum machine_mode and_mode = QImode;
rtx shift = NULL_RTX;
rtx_insn *shift = NULL;
insn_code = recog_memoized (insn);
if (insn_code != CODE_FOR_cbranchhi
@ -2501,7 +2502,7 @@ combine_bnp (rtx_insn *insn)
if (reg_mentioned_p (reg, shift)
|| (! NOTE_P (shift) && ! NONJUMP_INSN_P (shift)))
{
shift = NULL_RTX;
shift = NULL;
break;
}
}

View File

@ -4594,7 +4594,7 @@ replace_mov_pcrel_step1 (rtx_insn *insn)
/* Returns true if INSN is the second instruction of a pc-relative
address compuatation. */
static bool
match_pcrel_step2 (rtx insn)
match_pcrel_step2 (rtx_insn *insn)
{
rtx unspec;
rtx addr;

View File

@ -2491,7 +2491,8 @@ final_scan_insn (rtx_insn *insn, FILE *file, int optimize_p ATTRIBUTE_UNUSED,
rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
if (note)
{
NOTICE_UPDATE_CC (PATTERN (XEXP (note, 0)), XEXP (note, 0));
rtx_insn *other = as_a <rtx_insn *> (XEXP (note, 0));
NOTICE_UPDATE_CC (PATTERN (other), other);
cc_prev_status = cc_status;
}
}
@ -3103,7 +3104,7 @@ notice_source_line (rtx_insn *insn, bool *is_stmt)
directly to the desired hard register. */
void
cleanup_subreg_operands (rtx insn)
cleanup_subreg_operands (rtx_insn *insn)
{
int i;
bool changed = false;
@ -3139,7 +3140,7 @@ cleanup_subreg_operands (rtx insn)
*recog_data.dup_loc[i] = walk_alter_subreg (recog_data.dup_loc[i], &changed);
}
if (changed)
df_insn_rescan (as_a <rtx_insn *> (insn));
df_insn_rescan (insn);
}
/* If X is a SUBREG, try to replace it with a REG or a MEM, based on

View File

@ -2142,7 +2142,7 @@ process_insert_insn (struct expr *expr)
insn will be recognized (this also adds any needed CLOBBERs). */
else
{
rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
rtx_insn *insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
if (insn_invalid_p (insn, false))
gcc_unreachable ();

View File

@ -178,7 +178,7 @@ main (int argc, char **argv)
{
printf ("extern int num_delay_slots (rtx);\n");
printf ("extern int eligible_for_delay (rtx_insn *, int, rtx_insn *, int);\n\n");
printf ("extern int const_num_delay_slots (rtx);\n\n");
printf ("extern int const_num_delay_slots (rtx_insn *);\n\n");
have_delay = 1;
}

View File

@ -4694,7 +4694,7 @@ write_const_num_delay_slots (FILE *outf)
if (attr)
{
fprintf (outf, "int\nconst_num_delay_slots (rtx insn)\n");
fprintf (outf, "int\nconst_num_delay_slots (rtx_insn *insn)\n");
fprintf (outf, "{\n");
fprintf (outf, " switch (recog_memoized (insn))\n");
fprintf (outf, " {\n");

View File

@ -140,7 +140,7 @@ struct hw_doloop_hooks
/* Examine INSN. If it is a suitable doloop_end pattern, return the
iteration register, which should be a single hard register.
Otherwise, return NULL_RTX. */
rtx (*end_pattern_reg) (rtx insn);
rtx (*end_pattern_reg) (rtx_insn *insn);
/* Optimize LOOP. The target should perform any additional analysis
(e.g. checking that the loop isn't too long), and then perform
its transformations. Return true if successful, false if the

View File

@ -861,20 +861,18 @@ noce_emit_store_flag (struct noce_if_info *if_info, rtx x, int reversep,
if ((if_info->cond_earliest == if_info->jump || cond_complex)
&& (normalize == 0 || STORE_FLAG_VALUE == normalize))
{
rtx tmp;
tmp = gen_rtx_fmt_ee (code, GET_MODE (x), XEXP (cond, 0),
rtx src = gen_rtx_fmt_ee (code, GET_MODE (x), XEXP (cond, 0),
XEXP (cond, 1));
tmp = gen_rtx_SET (VOIDmode, x, tmp);
rtx set = gen_rtx_SET (VOIDmode, x, src);
start_sequence ();
tmp = emit_insn (tmp);
rtx_insn *insn = emit_insn (set);
if (recog_memoized (tmp) >= 0)
if (recog_memoized (insn) >= 0)
{
tmp = get_insns ();
rtx_insn *seq = get_insns ();
end_sequence ();
emit_insn (tmp);
emit_insn (seq);
if_info->cond_earliest = if_info->jump;
@ -906,7 +904,8 @@ noce_emit_move_insn (rtx x, rtx y)
if (GET_CODE (x) != STRICT_LOW_PART)
{
rtx seq, insn, target;
rtx_insn *seq, *insn;
rtx target;
optab ot;
start_sequence ();
@ -1417,20 +1416,19 @@ noce_emit_cmove (struct noce_if_info *if_info, rtx x, enum rtx_code code,
if (if_info->cond_earliest == if_info->jump)
{
rtx tmp;
tmp = gen_rtx_fmt_ee (code, GET_MODE (if_info->cond), cmp_a, cmp_b);
tmp = gen_rtx_IF_THEN_ELSE (GET_MODE (x), tmp, vtrue, vfalse);
tmp = gen_rtx_SET (VOIDmode, x, tmp);
rtx cond = gen_rtx_fmt_ee (code, GET_MODE (if_info->cond), cmp_a, cmp_b);
rtx if_then_else = gen_rtx_IF_THEN_ELSE (GET_MODE (x),
cond, vtrue, vfalse);
rtx set = gen_rtx_SET (VOIDmode, x, if_then_else);
start_sequence ();
tmp = emit_insn (tmp);
rtx_insn *insn = emit_insn (set);
if (recog_memoized (tmp) >= 0)
if (recog_memoized (insn) >= 0)
{
tmp = get_insns ();
rtx_insn *seq = get_insns ();
end_sequence ();
emit_insn (tmp);
emit_insn (seq);
return x;
}
@ -1563,11 +1561,12 @@ noce_try_cmove_arith (struct noce_if_info *if_info)
rtx b = if_info->b;
rtx x = if_info->x;
rtx orig_a, orig_b;
rtx insn_a, insn_b;
rtx tmp, target;
rtx_insn *insn_a, *insn_b;
rtx target;
int is_mem = 0;
int insn_cost;
enum rtx_code code;
rtx_insn *ifcvt_seq;
/* A conditional move from two memory sources is equivalent to a
conditional on their addresses followed by a load. Don't do this
@ -1637,9 +1636,11 @@ noce_try_cmove_arith (struct noce_if_info *if_info)
if (reversep)
{
rtx tmp;
rtx_insn *tmp_insn;
code = reversed_comparison_code (if_info->cond, if_info->jump);
tmp = a, a = b, b = tmp;
tmp = insn_a, insn_a = insn_b, insn_b = tmp;
tmp_insn = insn_a, insn_a = insn_b, insn_b = tmp_insn;
}
}
@ -1654,44 +1655,46 @@ noce_try_cmove_arith (struct noce_if_info *if_info)
This is of course not possible in the IS_MEM case. */
if (! general_operand (a, GET_MODE (a)))
{
rtx set;
rtx_insn *insn;
if (is_mem)
{
tmp = gen_reg_rtx (GET_MODE (a));
tmp = emit_insn (gen_rtx_SET (VOIDmode, tmp, a));
rtx reg = gen_reg_rtx (GET_MODE (a));
insn = emit_insn (gen_rtx_SET (VOIDmode, reg, a));
}
else if (! insn_a)
goto end_seq_and_fail;
else
{
a = gen_reg_rtx (GET_MODE (a));
tmp = copy_rtx (insn_a);
set = single_set (tmp);
rtx_insn *copy_of_a = as_a <rtx_insn *> (copy_rtx (insn_a));
rtx set = single_set (copy_of_a);
SET_DEST (set) = a;
tmp = emit_insn (PATTERN (tmp));
insn = emit_insn (PATTERN (copy_of_a));
}
if (recog_memoized (tmp) < 0)
if (recog_memoized (insn) < 0)
goto end_seq_and_fail;
}
if (! general_operand (b, GET_MODE (b)))
{
rtx set, last;
rtx pat;
rtx_insn *last;
rtx_insn *new_insn;
if (is_mem)
{
tmp = gen_reg_rtx (GET_MODE (b));
tmp = gen_rtx_SET (VOIDmode, tmp, b);
rtx reg = gen_reg_rtx (GET_MODE (b));
pat = gen_rtx_SET (VOIDmode, reg, b);
}
else if (! insn_b)
goto end_seq_and_fail;
else
{
b = gen_reg_rtx (GET_MODE (b));
tmp = copy_rtx (insn_b);
set = single_set (tmp);
rtx_insn *copy_of_insn_b = as_a <rtx_insn *> (copy_rtx (insn_b));
rtx set = single_set (copy_of_insn_b);
SET_DEST (set) = b;
tmp = PATTERN (tmp);
pat = PATTERN (copy_of_insn_b);
}
/* If insn to set up A clobbers any registers B depends on, try to
@ -1700,14 +1703,14 @@ noce_try_cmove_arith (struct noce_if_info *if_info)
last = get_last_insn ();
if (last && modified_in_p (orig_b, last))
{
tmp = emit_insn_before (tmp, get_insns ());
if (modified_in_p (orig_a, tmp))
new_insn = emit_insn_before (pat, get_insns ());
if (modified_in_p (orig_a, new_insn))
goto end_seq_and_fail;
}
else
tmp = emit_insn (tmp);
new_insn = emit_insn (pat);
if (recog_memoized (tmp) < 0)
if (recog_memoized (new_insn) < 0)
goto end_seq_and_fail;
}
@ -1720,29 +1723,30 @@ noce_try_cmove_arith (struct noce_if_info *if_info)
/* If we're handling a memory for above, emit the load now. */
if (is_mem)
{
tmp = gen_rtx_MEM (GET_MODE (if_info->x), target);
rtx mem = gen_rtx_MEM (GET_MODE (if_info->x), target);
/* Copy over flags as appropriate. */
if (MEM_VOLATILE_P (if_info->a) || MEM_VOLATILE_P (if_info->b))
MEM_VOLATILE_P (tmp) = 1;
MEM_VOLATILE_P (mem) = 1;
if (MEM_ALIAS_SET (if_info->a) == MEM_ALIAS_SET (if_info->b))
set_mem_alias_set (tmp, MEM_ALIAS_SET (if_info->a));
set_mem_align (tmp,
set_mem_alias_set (mem, MEM_ALIAS_SET (if_info->a));
set_mem_align (mem,
MIN (MEM_ALIGN (if_info->a), MEM_ALIGN (if_info->b)));
gcc_assert (MEM_ADDR_SPACE (if_info->a) == MEM_ADDR_SPACE (if_info->b));
set_mem_addr_space (tmp, MEM_ADDR_SPACE (if_info->a));
set_mem_addr_space (mem, MEM_ADDR_SPACE (if_info->a));
noce_emit_move_insn (if_info->x, tmp);
noce_emit_move_insn (if_info->x, mem);
}
else if (target != x)
noce_emit_move_insn (x, target);
tmp = end_ifcvt_sequence (if_info);
if (!tmp)
ifcvt_seq = end_ifcvt_sequence (if_info);
if (!ifcvt_seq)
return FALSE;
emit_insn_before_setloc (tmp, if_info->jump, INSN_LOCATION (if_info->insn_a));
emit_insn_before_setloc (ifcvt_seq, if_info->jump,
INSN_LOCATION (if_info->insn_a));
return TRUE;
end_seq_and_fail:

View File

@ -960,7 +960,7 @@ extern void ira_print_disposition (FILE *);
extern void ira_debug_disposition (void);
extern void ira_debug_allocno_classes (void);
extern void ira_init_register_move_cost (enum machine_mode);
extern void ira_setup_alts (rtx insn, HARD_REG_SET &alts);
extern void ira_setup_alts (rtx_insn *insn, HARD_REG_SET &alts);
extern int ira_get_dup_out_num (int op_num, HARD_REG_SET &alts);
/* ira-build.c */

View File

@ -1737,7 +1737,8 @@ static void
setup_prohibited_mode_move_regs (void)
{
int i, j;
rtx test_reg1, test_reg2, move_pat, move_insn;
rtx test_reg1, test_reg2, move_pat;
rtx_insn *move_insn;
if (ira_prohibited_mode_move_regs_initialized_p)
return;
@ -1773,7 +1774,7 @@ setup_prohibited_mode_move_regs (void)
/* Setup possible alternatives in ALTS for INSN. */
void
ira_setup_alts (rtx insn, HARD_REG_SET &alts)
ira_setup_alts (rtx_insn *insn, HARD_REG_SET &alts)
{
/* MAP nalt * nop -> start of constraints for given operand and
alternative */

View File

@ -3000,7 +3000,7 @@ emit_inc (enum reg_class new_rclass, rtx in, rtx value, int inc_amount)
|| GET_CODE (value) == POST_MODIFY);
rtx_insn *last;
rtx inc;
rtx add_insn;
rtx_insn *add_insn;
int code;
rtx real_in = in == value ? incloc : in;
rtx result;

View File

@ -253,13 +253,12 @@ static rtx
emit_add3_insn (rtx x, rtx y, rtx z)
{
rtx_insn *last;
rtx insn;
last = get_last_insn ();
if (have_addptr3_insn (x, y, z))
{
insn = gen_addptr3_insn (x, y, z);
rtx insn = gen_addptr3_insn (x, y, z);
/* If the target provides an "addptr" pattern it hopefully does
for a reason. So falling back to the normal add would be
@ -269,12 +268,12 @@ emit_add3_insn (rtx x, rtx y, rtx z)
return insn;
}
insn = emit_insn (gen_rtx_SET (VOIDmode, x,
gen_rtx_PLUS (GET_MODE (y), y, z)));
rtx_insn *insn = emit_insn (gen_rtx_SET (VOIDmode, x,
gen_rtx_PLUS (GET_MODE (y), y, z)));
if (recog_memoized (insn) < 0)
{
delete_insns_since (last);
insn = NULL_RTX;
insn = NULL;
}
return insn;
}
@ -310,14 +309,13 @@ void
lra_emit_add (rtx x, rtx y, rtx z)
{
int old;
rtx insn;
rtx_insn *last;
rtx a1, a2, base, index, disp, scale, index_scale;
bool ok_p;
insn = emit_add3_insn (x, y, z);
rtx add3_insn = emit_add3_insn (x, y, z);
old = max_reg_num ();
if (insn != NULL_RTX)
if (add3_insn != NULL)
;
else
{
@ -368,7 +366,7 @@ lra_emit_add (rtx x, rtx y, rtx z)
adding the address segment to register. */
lra_assert (x != y && x != z);
emit_move_insn (x, y);
insn = emit_add2_insn (x, z);
rtx insn = emit_add2_insn (x, z);
lra_assert (insn != NULL_RTX);
}
else
@ -380,7 +378,7 @@ lra_emit_add (rtx x, rtx y, rtx z)
/* Generate x = index_scale; x = x + base. */
lra_assert (index_scale != NULL_RTX && base != NULL_RTX);
emit_move_insn (x, index_scale);
insn = emit_add2_insn (x, base);
rtx insn = emit_add2_insn (x, base);
lra_assert (insn != NULL_RTX);
}
else if (scale == NULL_RTX)
@ -388,20 +386,20 @@ lra_emit_add (rtx x, rtx y, rtx z)
/* Try x = base + disp. */
lra_assert (base != NULL_RTX);
last = get_last_insn ();
insn = emit_move_insn (x, gen_rtx_PLUS (GET_MODE (base),
base, disp));
if (recog_memoized (insn) < 0)
rtx_insn *move_insn =
emit_move_insn (x, gen_rtx_PLUS (GET_MODE (base), base, disp));
if (recog_memoized (move_insn) < 0)
{
delete_insns_since (last);
/* Generate x = disp; x = x + base. */
emit_move_insn (x, disp);
insn = emit_add2_insn (x, base);
lra_assert (insn != NULL_RTX);
rtx add2_insn = emit_add2_insn (x, base);
lra_assert (add2_insn != NULL_RTX);
}
/* Generate x = x + index. */
if (index != NULL_RTX)
{
insn = emit_add2_insn (x, index);
rtx insn = emit_add2_insn (x, index);
lra_assert (insn != NULL_RTX);
}
}
@ -409,11 +407,11 @@ lra_emit_add (rtx x, rtx y, rtx z)
{
/* Try x = index_scale; x = x + disp; x = x + base. */
last = get_last_insn ();
insn = emit_move_insn (x, index_scale);
rtx_insn *move_insn = emit_move_insn (x, index_scale);
ok_p = false;
if (recog_memoized (insn) >= 0)
if (recog_memoized (move_insn) >= 0)
{
insn = emit_add2_insn (x, disp);
rtx insn = emit_add2_insn (x, disp);
if (insn != NULL_RTX)
{
insn = emit_add2_insn (x, disp);
@ -426,7 +424,7 @@ lra_emit_add (rtx x, rtx y, rtx z)
delete_insns_since (last);
/* Generate x = disp; x = x + base; x = x + index_scale. */
emit_move_insn (x, disp);
insn = emit_add2_insn (x, base);
rtx insn = emit_add2_insn (x, base);
lra_assert (insn != NULL_RTX);
insn = emit_add2_insn (x, index_scale);
lra_assert (insn != NULL_RTX);

View File

@ -1003,8 +1003,9 @@ eliminate_partially_redundant_load (basic_block bb, rtx_insn *insn,
/* Make sure we can generate a move from register avail_reg to
dest. */
extract_insn (gen_move_insn (copy_rtx (dest),
copy_rtx (avail_reg)));
extract_insn (as_a <rtx_insn *> (
gen_move_insn (copy_rtx (dest),
copy_rtx (avail_reg))));
if (! constrain_operands (1)
|| reg_killed_on_edge (avail_reg, pred)
|| reg_used_on_edge (dest, pred))

View File

@ -316,7 +316,7 @@ canonicalize_change_group (rtx insn, rtx x)
Otherwise the changes will take effect immediately. */
int
insn_invalid_p (rtx insn, bool in_group)
insn_invalid_p (rtx_insn *insn, bool in_group)
{
rtx pat = PATTERN (insn);
int num_clobbers = 0;
@ -424,7 +424,7 @@ verify_changes (int num)
}
else if (DEBUG_INSN_P (object))
continue;
else if (insn_invalid_p (object, true))
else if (insn_invalid_p (as_a <rtx_insn *> (object), true))
{
rtx pat = PATTERN (object);
@ -2100,7 +2100,7 @@ get_enabled_alternatives (rtx insn)
valid information. This is used primary by gen_attr infrastructure that
often does extract insn again and again. */
void
extract_insn_cached (rtx insn)
extract_insn_cached (rtx_insn *insn)
{
if (recog_data.insn == insn && INSN_CODE (insn) >= 0)
return;
@ -2111,7 +2111,7 @@ extract_insn_cached (rtx insn)
/* Do cached extract_insn, constrain_operands and complain about failures.
Used by insn_attrtab. */
void
extract_constrain_insn_cached (rtx insn)
extract_constrain_insn_cached (rtx_insn *insn)
{
extract_insn_cached (insn);
if (which_alternative == -1
@ -2132,7 +2132,7 @@ constrain_operands_cached (int strict)
/* Analyze INSN and fill in recog_data. */
void
extract_insn (rtx insn)
extract_insn (rtx_insn *insn)
{
int i;
int icode;

View File

@ -89,7 +89,7 @@ extern int asm_operand_ok (rtx, const char *, const char **);
extern bool validate_change (rtx, rtx *, rtx, bool);
extern bool validate_unshare_change (rtx, rtx *, rtx, bool);
extern bool canonicalize_change_group (rtx insn, rtx x);
extern int insn_invalid_p (rtx, bool);
extern int insn_invalid_p (rtx_insn *, bool);
extern int verify_changes (int);
extern void confirm_change_group (void);
extern int apply_change_group (void);
@ -128,14 +128,14 @@ extern bool mode_dependent_address_p (rtx, addr_space_t);
extern int recog (rtx, rtx, int *);
#ifndef GENERATOR_FILE
static inline int recog_memoized (rtx insn);
static inline int recog_memoized (rtx_insn *insn);
#endif
extern void add_clobbers (rtx, int);
extern int added_clobbers_hard_reg_p (int);
extern void insn_extract (rtx);
extern void extract_insn (rtx);
extern void extract_constrain_insn_cached (rtx);
extern void extract_insn_cached (rtx);
extern void extract_insn (rtx_insn *);
extern void extract_constrain_insn_cached (rtx_insn *);
extern void extract_insn_cached (rtx_insn *);
extern void preprocess_constraints (int, int, const char **,
operand_alternative *);
extern const operand_alternative *preprocess_insn_constraints (int);
@ -163,7 +163,7 @@ extern int if_test_bypass_p (rtx, rtx);
through this one. */
static inline int
recog_memoized (rtx insn)
recog_memoized (rtx_insn *insn)
{
if (INSN_CODE (insn) < 0)
INSN_CODE (insn) = recog (PATTERN (insn), insn, 0);

View File

@ -885,7 +885,8 @@ reload_inner_reg_of_subreg (rtx x, enum machine_mode mode, bool output)
static int
can_reload_into (rtx in, int regno, enum machine_mode mode)
{
rtx dst, test_insn;
rtx dst;
rtx_insn *test_insn;
int r = 0;
struct recog_data_d save_recog_data;

View File

@ -446,7 +446,7 @@ extern void setup_save_areas (void);
extern void save_call_clobbered_regs (void);
/* Replace (subreg (reg)) with the appropriate (reg) for any operands. */
extern void cleanup_subreg_operands (rtx);
extern void cleanup_subreg_operands (rtx_insn *);
/* Debugging support. */
extern void debug_reload_to_stream (FILE *);

View File

@ -8562,12 +8562,12 @@ emit_reload_insns (struct insn_chain *chain)
Return the emitted insn if valid, else return NULL. */
static rtx_insn *
emit_insn_if_valid_for_reload (rtx insn)
emit_insn_if_valid_for_reload (rtx pat)
{
rtx_insn *last = get_last_insn ();
int code;
insn = emit_insn (insn);
rtx_insn *insn = emit_insn (pat);
code = recog_memoized (insn);
if (code >= 0)
@ -8577,7 +8577,7 @@ emit_insn_if_valid_for_reload (rtx insn)
validity determination, i.e., the way it would after reload has
completed. */
if (constrain_operands (1))
return as_a <rtx_insn *> (insn);
return insn;
}
delete_insns_since (last);

View File

@ -1309,7 +1309,7 @@ vinsn_cond_branch_p (vinsn_t vi)
/* Return latency of INSN. */
static int
sel_insn_rtx_cost (rtx insn)
sel_insn_rtx_cost (rtx_insn *insn)
{
int cost;

View File

@ -990,7 +990,7 @@ vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs,
Code adopted from regrename.c::build_def_use. */
static enum reg_class
get_reg_class (rtx insn)
get_reg_class (rtx_insn *insn)
{
int i, n_ops;