[multiple changes]

2011-03-28  Vladimir Makarov  <vmakarov@redhat.com>

	* ira-color.c (update_left_conflict_sizes_p): Don't assume that
	conflict object hard regset nodes have intersecting hard reg sets.
	
	* regmove.c (regmove_optimize): Move ira_set_pseudo_classes call
	after regstat_init_n_sets_and_refs.

	* ira.c: Add more comments at the top.
	(setup_stack_reg_pressure_class, setup_pressure_classes):
	Add comments how we compute the register pressure classes.
	(setup_allocno_and_important_classes): Add more comments.
	(setup_class_translate_array, reorder_important_classes)
	(setup_reg_class_relations): Add comments.

	* ira-emit.c: Add 2011 to the Copyright line.  Add comments at the
	start of the file.

	* ira-color.c: Add 2011 to the Copyright line.
	(assign_hard_reg):  Add more comments.
	(improve_allocation): Ditto.

	* ira-costs.c: Add 2011 to the Copyright line.
	(setup_cost_classes, setup_regno_cost_classes_by_aclass): Add more
	comments.
	(setup_regno_cost_classes_by_mode): Ditto.

	Initial patches from ira-improv branch:

	2010-08-13  Vladimir Makarov  <vmakarov@redhat.com>

	* ira-build.c: (ira_create_object): Remove initialization of
	OBJECT_PROFITABLE_HARD_REGS.  Initialize OBJECT_ADD_DATA.
	(ira_create_allocno): Remove initialization of
	ALLOCNO_MEM_OPTIMIZED_DEST, ALLOCNO_MEM_OPTIMIZED_DEST_P,
	ALLOCNO_SOMEWHERE_RENAMED_P, ALLOCNO_CHILD_RENAMED_P,
	ALLOCNO_IN_GRAPH_P, ALLOCNO_MAY_BE_SPILLED_P, ALLOCNO_COLORABLE_P,
	ALLOCNO_NEXT_BUCKET_ALLOCNO, ALLOCNO_PREV_BUCKET_ALLOCNO,
	ALLOCNO_FIRST_COALESCED_ALLOCNO, ALLOCNO_NEXT_COALESCED_ALLOCNO.
	Initialize ALLOCNO_ADD_DATA.
	(copy_info_to_removed_store_destinations): Use ALLOCNO_EMIT_DATA
	and allocno_emit_reg instead of ALLOCNO_MEM_OPTIMIZED_DEST_P and
	ALLOCNO_REG.
	(ira_flattening): Ditto.  Use ALLOCNO_EMIT_DATA instead of
	ALLOCNO_MEM_OPTIMIZED_DEST and ALLOCNO_SOMEWHERE_RENAMED_P.

	* ira.c (ira_reallocate): Remove.
	(setup_pressure_classes): Call
	ira_init_register_move_cost_if_necessary.  Use
	ira_register_move_cost instead of ira_get_register_move_cost.
	(setup_allocno_assignment_flags): Use ALLOCNO_EMIT_DATA.
	(ira): Call ira_initiate_emit_data and ira_finish_emit_data.

	* ira-color.c: Use ALLOCNO_COLOR_DATA instead of
	ALLOCNO_IN_GRAPH_P, ALLOCNO_MAY_BE_SPILLED_P, ALLOCNO_COLORABLE_P,
	ALLOCNO_AVAILABLE_REGS_NUM, ALLOCNO_NEXT_BUCKET_ALLOCNO,
	ALLOCNO_PREV_BUCKET_ALLOCNO. ALLOCNO_TEMP. Use OBJECT_COLOR_DATA
	instead of OBJECT_PROFITABLE_HARD_REGS, OBJECT_HARD_REGS_NODE,
	OBJECT_HARD_REGS_SUBNODES_START, OBJECT_HARD_REGS_SUBNODES_NUM.
	Fix formatting.
	(object_hard_regs_t, object_hard_regs_node_t): Move from
	ira-int.h.
	(struct object_hard_regs, struct object_hard_regs_node): Ditto.
	(struct allocno_color_data): New.
	(allocno_color_data_t): New typedef.
	(allocno_color_data): New definition.
	(ALLOCNO_COLOR_DATA): New macro.
	(struct object_color_data): New.
	(object_color_data_t): New typedef.
	(object_color_data): New definition.
	(OBJECT_COLOR_DATA): New macro.
	(update_copy_costs, calculate_allocno_spill_cost): Call
	ira_init_register_move_cost_if_necessary.  Use
	ira_register_move_cost instead of ira_get_register_move_cost.
	(move_spill_restore, update_curr_costs): Ditto.
	(allocno_spill_priority): Make it inline.
	(color_pass): Allocate and free allocno_color_dat and
	object_color_data.
	(struct coalesce_data, coalesce_data_t): New.
	(allocno_coalesce_data): New definition.
	(ALLOCNO_COALESCE_DATA): New macro.
	(merge_allocnos, coalesced_allocno_conflict_p): Use
	ALLOCNO_COALESCED_DATA instead of ALLOCNO_FIRST_COALESCED_ALLOCNO,
	ALLOCNO_NEXT_COALESCED_ALLOCNO, ALLOCNO_TEMP.
	(coalesce_allocnos): Ditto.
	(setup_coalesced_allocno_costs_and_nums): Ditto.
	(collect_spilled_coalesced_allocnos): Ditto.
	(slot_coalesced_allocno_live_ranges_intersect_p): Ditto.
	(setup_slot_coalesced_allocno_live_ranges): Ditto.
	(coalesce_spill_slots): Ditto.
	(ira_sort_regnos_for_alter_reg): Ditto.  Allocate, initialize and
	free allocno_coalesce_data.

	* ira-conflicts.c: Fix formatting.
	(process_regs_for_copy): Call
	ira_init_register_move_cost_if_necessary.  Use
	ira_register_move_cost instead of ira_get_register_move_cost.
	(build_object_conflicts): Optimize.

	* ira-costs.c (record_reg_classes): Optimize.  Call
	ira_init_register_move_cost_if_necessary.  Use
	ira_register_move_cost, ira_may_move_in_cost, and
	ira_may_move_out_cost instead of ira_get_register_move_cost and
	ira_get_may_move_cost.
	(record_address_regs): Ditto.
	(scan_one_insn): Optimize.
	(find_costs_and_classes): Optimize.
	(process_bb_node_for_hard_reg_moves): Call
	ira_init_register_move_cost_if_necessary.  Use
	ira_register_move_cost instead of ira_get_register_move_cost.

	* ira-emit.c: Use allocno_emit_reg, ALLOCNO_EMIT_DATA instead of
	ALLOCNO_REG, ALLOCNO_CHILD_RENAMED_P, ALLOCNO_MEM_OPTIMIZED_DEST,
	ALLOCNO_MEM_OPTIMIZED_DEST_P, and ALLOCNO_SOMEWHERE_RENAMED_P.
	(ira_allocno_emit_data, void_p, new_allocno_emit_data_vec): New
	definitions.
	(ira_initiate_emit_data, ira_finish_emit_data)
	(create_new_allocno): New functions.
	(modify_move_list): Call create_new_alloc instead of
	ira_create_allocno.
	(emit_move_list): Call ira_init_register_move_cost_if_necessary.
	Use ira_register_move_cost instead of ira_get_register_move_cost.

	* ira-int.h: Fix some comments.
	(object_hard_regs_t, object_hard_regs_node_t): Move
	to ira-color.c.
	(struct object_hard_regs, struct object_hard_regs_node):
	Ditto.
	(struct ira_object): Remove profitable_hard_regs, hard_regs_node,
	hard_regs_subnodes_start, hard_regs_subnodes_num.  Add new member
	add_data.
	(struct ira_allocno): Make mode and aclass a bitfield.  Move other
	bitfield after mode.  Make hard_regno a short int.  Make
	hard_regno short.  Remove first_coalesced_allocno and
	next_coalesced_allocno.  Move mem_optimized_dest_p,
	somewhere_renamed_p, child_renamed_p, reg, and mem_optimized_dest
	into struct ira_emit_data.  Remove in_graph_p, may_be_spilled_p,
	available_regs_num, next_bucket_allocno, prev_bucket_allocno,
	temp, colorable_p.  Add new member add_data.
	(ALLOCNO_IN_GRAPH_P, ALLOCNO_MAY_BE_SPILLED_P): Remove.
	(ALLOCNO_COLORABLE_P, ALLOCNO_AVAILABLE_REGS_NUM): Remove.
	(ALLOCNO_NEXT_BUCKET_ALLOCNO, ALLOCNO_PREV_BUCKET_ALLOCNO): Remove.
	(ALLOCNO_TEMP, ALLOCNO_FIRST_COALESCED_ALLOCNO): Remove.
	(ALLOCNO_NEXT_COALESCED_ALLOCNO): Remove.
	(ALLOCNO_ADD_DATA): New macro.
	(ira_emit_data_t): New typedef.
	(struct ira_emit_data): New.  Move mem_optimized_dest_p,
	somewhere_renamed_p, child_renamed_p, reg, mem_optimized_dest
	from struct ira_allocno.
	(ALLOCNO_EMIT_DATA): New macro.
	(ira_allocno_emit_data, allocno_emit_reg): New.
	(ALLOCNO_PROFITABLE_HARD_REGS, OBJECT_HARD_REGS_NODE): Remove.
	(OBJECT_HARD_REGS_SUBNODES_STAR, OBJECT_HARD_REGS_SUBNODES_NUM):
	Remove.
	(OBJECT_ADD_DATA): New macro.
	(ira_reallocate): Remove.
	(ira_initiate_emit_data, ira_finish_emit_data): New.
	(ira_get_register_move_cost, ira_get_may_move_cost): Remove.
	(ira_init_register_move_cost_if_necessary): New.
	(ira_object_conflict_iter_next): Merge into
	ira_object_conflict_iter_cond.
	(FOR_EACH_OBJECT_CONFLICT): Don't use
	ira_object_conflict_iter_next.

	* ira-live.c: (process_single_reg_class_operands): Call
	ira_init_register_move_cost_if_necessary.  Use
	ira_register_move_cost instead of ira_get_register_move_cost.

	2010-08-13  Vladimir Makarov  <vmakarov@redhat.com>

	* ira-int.h (struct target_ira_int): Remove x_cost_classes.

	* ira-costs.c: Fix formatting.
	(cost_classes, cost_classes_num): Remove.
	(struct cost_classes, cost_classes_t, const_cost_classes_t): New.
	(regno_cost_classes, cost_classes_hash, cost_classes_eq): New.
	(cost_classes_del, cost_classes_htab): New.
	(cost_classes_aclass_cache, cost_classes_mode_cache): New.
	(initiate_regno_cost_classes, setup_cost_classes): New.
	(setup_regno_cost_classes_by_aclass): New.
	(setup_regno_cost_classes_by_mode, finish_regno_cost_classes):
	New.
	(record_reg_classes): Use regno_cost_classes instead of
	cost_classes.  Move checking opposite operand up.
	(record_address_regs): Use regno_cost_classes
	instead of cost_classes.
	(scan_one_insn): Ditto.  Use always general register.
	(print_allocno_costs): Use regno_cost_classes instead of
	cost_classes.
	(print_pseudo_costs): Ditto.  Use Reg_N_REFS.
	(find_costs_and_classes): Set up cost classes for each registers.
	Use also their mode for this.  Use regno_cost_classes instead of
	cost_classes.
	(setup_allocno_class_and_costs): Use regno_cost_classes instead of
	cost_classes.
	(free_ira_costs, ira_init_costs): Don't use cost_classes.
	(ira_costs, ira_set_pseudo_classes): Call
	initiate_regno_cost_classes and finish_regno_cost_classes.

	2010-10-04  Vladimir Makarov  <vmakarov@redhat.com>

	* target-def.h (TARGET_IRA_COVER_CLASSES): Remove.

	* target.def (ira_cover_classes): Remove.

	* doc/tm.texi: Remove TARGET_IRA_COVER_CLASSES and
	IRA_COVER_CLASSES.

	* doc/tm.texi.in: Ditto.

	* ira-conflicts.c: Remove mentioning cover classes from the file.
	Use ALLOCNO_CLASS instead of ALLOCNO_COVER_CLASS.  Use
	ALLOCNO_COVER_CLASS_COST instead of ALLOCNO_CLASS_COST.  Fix
	formatting.

	* targhooks.c (default_ira_cover_classes): Remove.

	* targhooks.h (default_ira_cover_classes): Ditto.

	* haifa-sched.c: Remove mentioning cover classes from the file.
	Use ira_reg_pressure_cover instead of ira_reg_class_cover.  Use
	ira_pressure_classes and ira_pressure_classes_num instead of
	ira_reg_class_cover_size and ira_reg_class_cover.  Use
	sched_regno_pressure_class instead of sched_regno_cover_class.
	(mark_regno_birth_or_death, setup_insn_reg_pressure_info): Use
	ira_reg_class_max_nregs instead of ira_reg_class_nregs.

	* ira-int.h: Add 2010 to Copyright.  Remove mentioning cover
	classes from the file.
	(object_hard_regs_t, object_hard_regs_node_t): New typedefs.
	(struct object_hard_regs, struct object_hard_regs_node): New.
	(struct ira_object): New members profitable_hard_regs,
	hard_regs_node, hard_regs_subnodes_start, hard_regs_subnodes_num.
	(struct ira_allocno): Rename cover_class to aclass.  Rename
	cover_class_cost and updated_cover_class_cost to class_cost and
	updated_class_cost.  Remove splay_removed_p and
	left_conflict_size.  Add new members colorable_p.
	(ALLOCNO_SPLAY_REMOVED_P, ALLOCNO_LEFT_CONFLICTS_SIZE): Remove.
	(ALLOCNO_COLORABLE_P): New macro.
	(ALLOCNO_COVER_CLASS): Rename to ALLOCNO_CLASS.
	(ALLOCNO_COVER_CLASS_COST, ALLOCNO_UPDATED_COVER_CLASS_COST):
	Rename to ALLOCNO_CLASS_COST and ALLOCNO_UPDATED__CLASS_COST.
	(OBJECT_...): Rename parameter C to O.
	(OBJECT_PROFITABLE_HARD_REGS): New macro.
	(OBJECT_HARD_REGS_NODE, OBJECT_HARD_REGS_SUBNODES_START)
	(OBJECT_HARD_REGS_SUBNODES_NUM): New macros.
	(struct target_ira_int): New members x_ira_max_memory_move_cost,
	x_ira_max_register_move_cost, x_ira_max_may_move_in_cost,
	x_ira_max_may_move_out_cost, x_ira_reg_allocno_class_p,
	x_ira_reg_pressure_class_p, x_ira_important_class_nums,
	x_ira_reg_class_superunion.  Rename x_prohibited_class_mode_reg to
	x_ira_prohibited_class_mode_reg.  Rename x_ira_reg_class_union to
	x_ira_reg_class_subunion.
	(ira_max_memory_move_cost, ira_max_register_move_cost)
	(ira_max_may_move_in_cost, ira_max_may_move_out_cost)
	(ira_reg_allocno_class_p, ira_reg_pressure_class_p)
	(ira_important_class_nums, ira_reg_class_superunion): New macros.
	(prohibited_class_mode_regs): Rename to
	ira_prohibited_class_mode_regs.
	(ira_reg_class_union): Rename to ira_reg_class_subunion.
	(ira_debug_class_cover): Rename to ira_debug_allocno_classes.
	(ira_set_allocno_cover_class): Rename to ira_set_allocno_class.
	(ira_tune_allocno_costs_and_cover_classes): Rename to
	ira_tune_allocno_costs.
	(ira_debug_hard_regs_forest): New.
	(ira_object_conflict_iter_init, ira_object_conflict_iter_cond)
	(ira_object_conflict_iter_next): Fix comments.
	(ira_hard_reg_set_intersection_p, hard_reg_set_size): New
	functions.
	(ira_allocate_and_set_costs, ira_allocate_and_copy_costs): Rename
	cover_class to aclass.
	(ira_allocate_and_accumulate_costs): Ditto.
	(ira_allocate_and_set_or_copy_costs): Ditto.

	* opts.c (decode_options): Remove ira_cover_class check.

	* ira-color.c: Remove mentioning cover classes from the file.  Use
	ALLOCNO_CLASS, ALLOCNO_CLASS_COST, and ALLOCNO_UPDATED_CLASS_COST
	instead of ALLOCNO_COVER_CLASS, ALLOCNO_COVER_CLASS_COST, and
	ALLOCNO_UPDATED_COVER_CLASS_COST.  Fix formatting.
	(splay-tree.h): Remove include.
	(allocno_coalesced_p, processed_coalesced_allocno_bitmap): Move
	before copy_freq_compare_func.
	(allocnos_for_spilling, removed_splay_allocno_vec): Remove.
	(object_hard_regs_vec, object_hard_regs_htab, node_check_tick):
	New definitions.
	(hard_regs_roots, hard_regs_node_vec): Ditto.
	(object_hard_regs_hash, object_hard_regs_eq, find_hard_regs): Ditto.
	(insert_hard_regs, init_object_hard_regs, add_object_hard_regs): Ditto.
	(finish_object_hard_regs, object_hard_regs_compare): Ditto.
	(create_new_object_hard_regs_node): Ditto.
	(add_new_object_hard_regs_node_to_forest): Ditto.
	(add_object_hard_regs_to_forest, collect_object_hard_regs_cover):
	Ditto.
	(setup_object_hard_regs_nodes_parent, first_common_ancestor_node):
	Ditto.
	(print_hard_reg_set, print_hard_regs_subforest): Ditto.
	(print_hard_regs_forest, ira_debug_hard_regs_forest): Ditto.
	(remove_unused_object_hard_regs_nodes): Ditto.
	(enumerate_object_hard_regs_nodes): Ditto.
	(object_hard_regs_nodes_num, object_hard_regs_nodes): Ditto.
	(object_hard_regs_subnode_t): Ditto.
	(struct object_hard_regs_subnode): Ditto.
	(object_hard_regs_subnodes, object_hard_regs_subnode_index): Ditto.
	(setup_object_hard_regs_subnode_index): Ditto.
	(get_object_hard_regs_subnodes_num): Ditto.
	(form_object_hard_regs_nodes_forest): Ditto.
	(finish_object_hard_regs_nodes_tree): Ditto.
	(finish_object_hard_regs_nodes_forest): Ditto.
	(allocnos_have_intersected_live_ranges_p): Rename to
	allocnos_conflict_by_live_ranges_p.  Move before
	copy_freq_compare_func.
	(pseudos_have_intersected_live_ranges_p): Rename to
	conflict_by_live_ranges_p.  Move before copy_freq_compare_func.
	(setup_left_conflict_sizes_p, update_left_conflict_sizes_p): Ditto.
	(empty_profitable_hard_regs, setup_profitable_hard_regs): Ditto.
	(update_copy_costs): Remove assert.  Skip cost update if the hard
	reg does not belong the class.
	(assign_hard_reg): Process only profitable hard regs.
	(uncolorable_allocnos_num): Make it scalar.
	(allocno_spill_priority): Use ALLOCNO_EXCESS_PRESSURE_POINTS_NUM
	and ira_reg_class_max_nregs instead of ALLOCNO_LEFT_CONFLICTS_SIZE
	and ira_reg_class_max_nregs.
	(bucket_allocno_compare_func): Check frequency first.
	(sort_bucket): Add compare function as a parameter.
	(add_allocno_to_ordered_bucket): Assume no coalesced allocnos.
	(uncolorable_allocnos_splay_tree, USE_SPLAY_P): Remove.
	(push_allocno_to_stack): Rewrite for checking new allocno
	colorability.
	(remove_allocno_from_bucket_and_push): Print cost too.  Remove
	assert.
	(push_only_colorable): Pass new parameter to sort_bucket.
	(push_allocno_to_spill): Remove.
	(allocno_spill_priority_compare): Make it inline and rewrite.
	(splay_tree_allocate, splay_tree_free): Remove.
	(allocno_spill_sort_compare): New function.
	(push_allocnos_to_stack): Sort allocnos for spilling once.  Don't
	build and use splay tree.  Choose first allocno in uncolorable
	allocno bucket to spill.  Remove setting spill cost.
	(all_conflicting_hard_regs): Remove.
	(setup_allocno_available_regs_num): Check only profitable hard
	regs.  Print info about hard regs nodes.
	(setup_allocno_left_conflicts_size): Remove.
	(put_allocno_into_bucket): Don't call
	setup_allocno_left_conflicts_size.  Use
	setup_left_conflict_sizes_p.
	(improve_allocation): New.
	(color_allocnos): Call setup_profitable_hard_regs,
	form_object_hard_regs_nodes_forest, improve_allocation,
	finish_object_hard_regs_nodes_forest.  Setup spill cost.
	(print_loop_title): Use pressure classes.
	(color_allocnso): Ditto.
	(do_coloring): Remove allocation and freeing splay_tree_node_pool
	and allocnos_for_spilling.
	(ira_sort_regnos_for_alter_reg): Don't setup members
	{first,next}_coalesced_allocno.
	(color): Remove allocating and freeing removed_splay_allocno_vec.
	(fast_allocation): Use ira_prohibited_class_mode_regs instead of
	prohibited_class_mode_regs.

	* ira-lives.c: Remove mentioning cover classes from the file.  Fix
	formatting.
	(update_allocno_pressure_excess_length): Use pressure classes.
	(inc_register_pressure, dec_register_pressure): Check for pressure
	class.
	(mark_pseudo_regno_live, mark_pseudo_regno_subword_live): Use
	pressure class.  Use ira_reg_class_nregs instead of
	ira_reg_class_max_nregs.
	(mark_pseudo_regno_dead, mark_pseudo_regno_subword_dead): Ditto.
	(mark_hard_reg_live, mark_hard_reg_dead): Use pressure class.
	(single_reg_class): Use ira_reg_class_nregs instead of
	ira_reg_class_max_nregs.
	(process_bb_node_lives): Use pressure classes.

	* ira-emit.c: Remove mentioning cover classes from the file.  Use
	ALLOCNO_CLASS instead of ALLOCNO_COVER_CLASS.  Fix formatting.
	(change_loop): Use pressure classes.
	(modify_move_list): Call ira_set_allocno_class instead of
	ira_set_allocno_cover_class.

	* ira-build.c: Remove mentioning cover classes from the file.  Use
	ALLOCNO_CLASS and ALLOCNO_CLASS_COST instead of
	ALLOCNO_COVER_CLASS and ALLOCNO_COVER_CLASS_COST.  Use
	ALLOCNO_UPDATED_CLASS_COST instead of
	ALLOCNO_UPDATED_COVER_CLASS_COST.  Fix formatting.
	(ira_create_object): Initiate OBJECT_PROFITABLE_HARD_REGS.
	(ira_create_allocno): Remove initialization of
	ALLOCNO_SPLAY_REMOVED_P, ALLOCNO_LEFT_CONFLICT_SIZE.  Initialize
	ALLOCNO_COLORABLE_P.
	(ira_set_allocno_cover_class): Rename to ira_set_allocno_class.
	Update conflict regs for the objects.
	(create_cap_allocno): Remove assert.  Don't propagate
	ALLOCNO_AVAILABLE_REGS_NUM.
	(ira_free_allocno_costs): New function.
	(finish_allocno): Change a part of code into call of
	ira_free_allocno_costs.
	(low_pressure_loop_node_p): Use pressure classes.
	(object_range_compare_func): Don't compare classes.
	(setup_min_max_conflict_allocno_ids): Ditto.

	* loop-invariant.c: Remove mentioning cover classes from the file.
	Use ira_pressure_classes and ira_pressure_classes_num instead of
	ira_reg_class_cover_size and ira_reg_class_cover.  Fix formatting.
	(get_cover_class_and_nregs): Rename to
	get_cover_pressure_and_nregs.  Use ira_reg_class_max_nregs instead
	of ira_reg_class_nregs.  Use reg_allocno_class instead of
	reg_cover_class.
	(get_inv_cost): Use instead ira_stack_reg_pressure_class of
	STACK_REG_COVER_CLASS.
	(get_regno_cover_class): Rename to get_regno_pressure_class.
	(move_loop_invariants): Initialize and finalize regstat.

	* ira.c: Remove mentioning cover classes from the file.  Add
	comments about coloring without cover classes.  Use ALLOCNO_CLASS
	instead of ALLOCNO_COVER_CLASS.  Fix formatting.
	(alloc_reg_class_subclasses, setup_reg_subclasses): Move it before
	setup_class_subset_and_memory_move_costs.
	(setup_stack_reg_pressure_class, setup_pressure_classes): New.
	(setup_cover_and_important_classes): Rename to
	setup_allocno_and_important_classes.
	(setup_class_translate_array): New.
	(setup_class_translate): Call it for allocno and pressure classes.
	(cover_class_order): Rename to allocno_class_order.
	(comp_reg_classes_func): Use ira_allocno_class_translate instead
	of ira_class_translate.
	(reorder_important_classes): Set up ira_important_class_nums.
	(setup_reg_class_relations): Set up ira_reg_class_superunion.
	(print_class_cover): Rename to print_classes.  Add parameter.
	(ira_debug_class_cover): Rename to ira_debug_allocno_classes.
	Print pressure classes too.
	(find_reg_class_closure): Rename to find_reg_classes.  Don't call
	setup_reg_subclasses.
	(ira_hard_regno_cover_class): Rename to
	ira_hard_regno_allocno_class.
	(ira_reg_class_nregs): Rename to ira_reg_class_max_nregs.
	(setup_prohibited_class_mode_regs): Use
	ira_prohibited_class_mode_regs instead of
	prohibited_class_mode_regs.
	(clarify_prohibited_class_mode_regs): New function.
	(ira_init_register_move_cost): Set up ira_max_register_move_cost,
	ira_max_may_move_in_cost, and ira_max_may_move_out_cost.
	(ira_init_once): Initialize them.
	(free_register_move_costs): Process them.
	(ira_init): Move calls of find_reg_classes and
	setup_hard_regno_aclass after setup_prohibited_class_mode_regs.
	Call clarify_prohibited_class_mode_regs.
	(ira_no_alloc_reg): Remove.
	(too_high_register_pressure_p): Use pressure classes.

	* sched-deps.c: Remove mentioning cover classes from the file.
	Use ira_reg_pressure_cover instead of ira_reg_class_cover.  Use
	ira_pressure_classes and ira_pressure_classes_num instead of
	ira_reg_class_cover_size and ira_reg_class_cover.
	(mark_insn_hard_regno_birth, mark_hard_regno_death): Use
	sched_regno_pressure_class instead of sched_regno_cover_class.
	(mark_insn_pseudo_birth, mark_pseudo_death): Ditto.  Use
	ira_reg_class_max_nregs instead of ira_reg_class_nregs.

	* ira.h: Add 2010 to Copyright.
	(ira_no_alloc_reg): Remove external.
	(struct target_ira): Rename x_ira_hard_regno_cover_class,
	x_ira_reg_class_cover_size, x_ira_reg_class_cover, and
	x_ira_class_translate to x_ira_hard_regno_allocno_class,
	x_ira_allocno_classes_num, x_ira_allocno_classes, and
	x_ira_allocno_class_translate.  Add x_ira_pressure_classes_num,
	x_ira_pressure_classes, x_ira_pressure_class_translate, and
	x_ira_stack_reg_pressure_class.  Rename x_ira_reg_class_nregs to
	x_ira_reg_class_max_nregs.  Add x_ira_reg_class_min_nregs and
	x_ira_no_alloc_regs.
	(ira_hard_regno_cover_class): Rename to
	ira_hard_regno_allocno_class.
	(ira_reg_class_cover_size, ira_reg_class_cover): Rename to
	ira_allocno_classes_num and ira_allocno_classes.
	(ira_class_translate): Rename to ira_allocno_class_translate.
	(ira_pressure_classes_num, ira_pressure_classes): New definitions.
	(ira_pressure_class_translate, ira_stack_reg_pressure_class): Ditto.
	(ira_reg_class_nregs): Rename to ira_reg_class_max_nregs.
	(ira_reg_class_min_nregs, ira_stack_reg_pressure_class): New
	(ira_no_alloc_regs): New.

	* ira-costs.c: Add 2010 to Copyright.  Remove mentioning cover
	classes from the file.  Use ALLOCNO_CLASS instead of
	ALLOCNO_COVER_CLASS.  Use ALLOCNO_CLASS_COST instead of
	ALLOCNO_COVER_CLASS_COST.
	(regno_cover_class): Rename to regno_aclass.
	(record_reg_classes): Use ira_reg_class_subunion instead of
	ira_reg_class_union.
	(record_address_regs): Check overflow.
	(scan_one_insn): Ditto.
	(print_allocno_costs): Print total mem cost fore regional
	allocation.
	(print_pseudo_costs): Use REG_N_REFS.
	(find_costs_and_classes): Use classes intersected with them on the
	1st pass. Check overflow.  Use ira_reg_class_subunion instead of
	ira_reg_class_union.  Use ira_allocno_class_translate and
	regno_aclass instead of ira_class_translate and regno_cover_class.
	Modify code for finding regno_aclass.  Setup preferred classes for
	the next pass.
	(setup_allocno_cover_class_and_costs): Rename to
	setup_allocno_class_and_costs.  Use regno_aclass instead of
	regno_cover_class.  Use ira_set_allocno_class instead of
	ira_set_allocno_cover_class.
	(init_costs, finish_costs): Use regno_aclass instead of
	regno_cover_class.
	(ira_costs): Use setup_allocno_class_and_costs instead of
	setup_allocno_cover_class_and_costs.
	(ira_tune_allocno_costs_and_cover_classes): Rename to
	ira_tune_allocno_costs.  Check overflow.  Skip conflict hard regs
	by processing objects.  Use ira_reg_class_max_nregs instead of
	ira_reg_class_nregs.

	* rtl.h (reg_cover_class): Rename to reg_allocno_class.

	* sched-int.h: Remove mentioning cover classes from the file.
	(sched_regno_cover_class): Rename to sched_regno_pressure_class.

	* reginfo.c: Add 2010 to Copyright.  Remove mentioning cover
	classes from the file.
	(struct reg_pref): Rename coverclass into allocnoclass.
	(reg_cover_class): Rename to reg_allocno_class.

	* Makefile.in (ira-color.o): Remove SPLAY_TREE_H from
	dependencies.

	* config/alpha/alpha.h (IRA_COVER_CLASSES): Remove.

	* config/arm/arm.h (IRA_COVER_CLASSES): Ditto.

	* config/avr/avr.h (IRA_COVER_CLASSES): Ditto.

	* config/bfin/bfin.h (IRA_COVER_CLASSES): Ditto.

	* config/cris/cris.h (IRA_COVER_CLASSES): Ditto.

	* config/fr30/fr30.h (IRA_COVER_CLASSES): Ditto.

	* config/frv/frv.h (IRA_COVER_CLASSES): Ditto.

	* config/h8300/h8300.h (IRA_COVER_CLASSES): Ditto.

	* config/i386/i386.h (STACK_REG_COVER_CLASS): Ditto.

	* config/i386/i386.c (TARGET_IRA_COVER_CLASSES)
	(i386_ira_cover_classes): Ditto.

	* config/ia64/ia64.h (IRA_COVER_CLASSES): Ditto.

	* config/iq2000/iq2000.h (IRA_COVER_CLASSES): Ditto.

	* config/m32r/m32r.h (IRA_COVER_CLASSES): Ditto.

	* config/m68k/m68k.h (IRA_COVER_CLASSES): Ditto.

	* config/mcore/mcore.h (IRA_COVER_CLASSES): Ditto.

	* config/mep/mep.h (IRA_COVER_CLASSES): Ditto.

	* config/mips/mips.c (TARGET_IRA_COVER_CLASSES)
	(mips_ira_cover_classes): Ditto.

	* config/mn10300/mn10300.h (IRA_COVER_CLASSES): Ditto.

	* config/moxie/moxie.h (IRA_COVER_CLASSES): Ditto.

	* config/pa/pa64-regs.h (IRA_COVER_CLASSES): Ditto.

	* config/pa/pa32-regs.h (IRA_COVER_CLASSES): Ditto.

	* config/picochip/picochip.h (IRA_COVER_CLASSES): Ditto.

	* config/rs6000/rs6000.h (IRA_COVER_CLASSES_PRE_VSX)
	(IRA_COVER_CLASSES_VSX): Ditto.

	* config/rs6000/rs6000.c (TARGET_IRA_COVER_CLASSES)
	(rs6000_ira_cover_classes): Ditto.

	* config/rx/rx.h (IRA_COVER_CLASSES): Ditto.

	* config/s390/s390.h (IRA_COVER_CLASSES): Ditto.

	* config/score/score.h (IRA_COVER_CLASSES): Ditto.

	* config/sh/sh.h (IRA_COVER_CLASSES): Ditto.

	* config/sparc/sparc.h (IRA_COVER_CLASSES): Ditto.

	* config/spu/spu.h (IRA_COVER_CLASSES): Ditto.

	* config/stormy16/stormy16.h (IRA_COVER_CLASSES): Ditto.

	* config/v850/v850.h (IRA_COVER_CLASSES): Ditto.

	* config/vax/vax.h (IRA_COVER_CLASSES): Ditto.

	* config/xtensa/xtensa.h (IRA_COVER_CLASSES): Ditto.

From-SVN: r171649
This commit is contained in:
Vladimir Makarov 2011-03-29 01:02:05 +00:00
parent e59155a384
commit 1756cb6614
60 changed files with 4909 additions and 2527 deletions

View File

@ -1,3 +1,599 @@
2011-03-28 Vladimir Makarov <vmakarov@redhat.com>
* ira-color.c (update_left_conflict_sizes_p): Don't assume that
conflict object hard regset nodes have intersecting hard reg sets.
* regmove.c (regmove_optimize): Move ira_set_pseudo_classes call
after regstat_init_n_sets_and_refs.
* ira.c: Add more comments at the top.
(setup_stack_reg_pressure_class, setup_pressure_classes):
Add comments how we compute the register pressure classes.
(setup_allocno_and_important_classes): Add more comments.
(setup_class_translate_array, reorder_important_classes)
(setup_reg_class_relations): Add comments.
* ira-emit.c: Add 2011 to the Copyright line. Add comments at the
start of the file.
* ira-color.c: Add 2011 to the Copyright line.
(assign_hard_reg): Add more comments.
(improve_allocation): Ditto.
* ira-costs.c: Add 2011 to the Copyright line.
(setup_cost_classes, setup_regno_cost_classes_by_aclass): Add more
comments.
(setup_regno_cost_classes_by_mode): Ditto.
Initial patches from ira-improv branch:
2010-08-13 Vladimir Makarov <vmakarov@redhat.com>
* ira-build.c: (ira_create_object): Remove initialization of
OBJECT_PROFITABLE_HARD_REGS. Initialize OBJECT_ADD_DATA.
(ira_create_allocno): Remove initialization of
ALLOCNO_MEM_OPTIMIZED_DEST, ALLOCNO_MEM_OPTIMIZED_DEST_P,
ALLOCNO_SOMEWHERE_RENAMED_P, ALLOCNO_CHILD_RENAMED_P,
ALLOCNO_IN_GRAPH_P, ALLOCNO_MAY_BE_SPILLED_P, ALLOCNO_COLORABLE_P,
ALLOCNO_NEXT_BUCKET_ALLOCNO, ALLOCNO_PREV_BUCKET_ALLOCNO,
ALLOCNO_FIRST_COALESCED_ALLOCNO, ALLOCNO_NEXT_COALESCED_ALLOCNO.
Initialize ALLOCNO_ADD_DATA.
(copy_info_to_removed_store_destinations): Use ALLOCNO_EMIT_DATA
and allocno_emit_reg instead of ALLOCNO_MEM_OPTIMIZED_DEST_P and
ALLOCNO_REG.
(ira_flattening): Ditto. Use ALLOCNO_EMIT_DATA instead of
ALLOCNO_MEM_OPTIMIZED_DEST and ALLOCNO_SOMEWHERE_RENAMED_P.
* ira.c (ira_reallocate): Remove.
(setup_pressure_classes): Call
ira_init_register_move_cost_if_necessary. Use
ira_register_move_cost instead of ira_get_register_move_cost.
(setup_allocno_assignment_flags): Use ALLOCNO_EMIT_DATA.
(ira): Call ira_initiate_emit_data and ira_finish_emit_data.
* ira-color.c: Use ALLOCNO_COLOR_DATA instead of
ALLOCNO_IN_GRAPH_P, ALLOCNO_MAY_BE_SPILLED_P, ALLOCNO_COLORABLE_P,
ALLOCNO_AVAILABLE_REGS_NUM, ALLOCNO_NEXT_BUCKET_ALLOCNO,
ALLOCNO_PREV_BUCKET_ALLOCNO. ALLOCNO_TEMP. Use OBJECT_COLOR_DATA
instead of OBJECT_PROFITABLE_HARD_REGS, OBJECT_HARD_REGS_NODE,
OBJECT_HARD_REGS_SUBNODES_START, OBJECT_HARD_REGS_SUBNODES_NUM.
Fix formatting.
(object_hard_regs_t, object_hard_regs_node_t): Move from
ira-int.h.
(struct object_hard_regs, struct object_hard_regs_node): Ditto.
(struct allocno_color_data): New.
(allocno_color_data_t): New typedef.
(allocno_color_data): New definition.
(ALLOCNO_COLOR_DATA): New macro.
(struct object_color_data): New.
(object_color_data_t): New typedef.
(object_color_data): New definition.
(OBJECT_COLOR_DATA): New macro.
(update_copy_costs, calculate_allocno_spill_cost): Call
ira_init_register_move_cost_if_necessary. Use
ira_register_move_cost instead of ira_get_register_move_cost.
(move_spill_restore, update_curr_costs): Ditto.
(allocno_spill_priority): Make it inline.
(color_pass): Allocate and free allocno_color_dat and
object_color_data.
(struct coalesce_data, coalesce_data_t): New.
(allocno_coalesce_data): New definition.
(ALLOCNO_COALESCE_DATA): New macro.
(merge_allocnos, coalesced_allocno_conflict_p): Use
ALLOCNO_COALESCED_DATA instead of ALLOCNO_FIRST_COALESCED_ALLOCNO,
ALLOCNO_NEXT_COALESCED_ALLOCNO, ALLOCNO_TEMP.
(coalesce_allocnos): Ditto.
(setup_coalesced_allocno_costs_and_nums): Ditto.
(collect_spilled_coalesced_allocnos): Ditto.
(slot_coalesced_allocno_live_ranges_intersect_p): Ditto.
(setup_slot_coalesced_allocno_live_ranges): Ditto.
(coalesce_spill_slots): Ditto.
(ira_sort_regnos_for_alter_reg): Ditto. Allocate, initialize and
free allocno_coalesce_data.
* ira-conflicts.c: Fix formatting.
(process_regs_for_copy): Call
ira_init_register_move_cost_if_necessary. Use
ira_register_move_cost instead of ira_get_register_move_cost.
(build_object_conflicts): Optimize.
* ira-costs.c (record_reg_classes): Optimize. Call
ira_init_register_move_cost_if_necessary. Use
ira_register_move_cost, ira_may_move_in_cost, and
ira_may_move_out_cost instead of ira_get_register_move_cost and
ira_get_may_move_cost.
(record_address_regs): Ditto.
(scan_one_insn): Optimize.
(find_costs_and_classes): Optimize.
(process_bb_node_for_hard_reg_moves): Call
ira_init_register_move_cost_if_necessary. Use
ira_register_move_cost instead of ira_get_register_move_cost.
* ira-emit.c: Use allocno_emit_reg, ALLOCNO_EMIT_DATA instead of
ALLOCNO_REG, ALLOCNO_CHILD_RENAMED_P, ALLOCNO_MEM_OPTIMIZED_DEST,
ALLOCNO_MEM_OPTIMIZED_DEST_P, and ALLOCNO_SOMEWHERE_RENAMED_P.
(ira_allocno_emit_data, void_p, new_allocno_emit_data_vec): New
definitions.
(ira_initiate_emit_data, ira_finish_emit_data)
(create_new_allocno): New functions.
(modify_move_list): Call create_new_alloc instead of
ira_create_allocno.
(emit_move_list): Call ira_init_register_move_cost_if_necessary.
Use ira_register_move_cost instead of ira_get_register_move_cost.
* ira-int.h: Fix some comments.
(object_hard_regs_t, object_hard_regs_node_t): Move
to ira-color.c.
(struct object_hard_regs, struct object_hard_regs_node):
Ditto.
(struct ira_object): Remove profitable_hard_regs, hard_regs_node,
hard_regs_subnodes_start, hard_regs_subnodes_num. Add new member
add_data.
(struct ira_allocno): Make mode and aclass a bitfield. Move other
bitfield after mode. Make hard_regno a short int. Make
hard_regno short. Remove first_coalesced_allocno and
next_coalesced_allocno. Move mem_optimized_dest_p,
somewhere_renamed_p, child_renamed_p, reg, and mem_optimized_dest
into struct ira_emit_data. Remove in_graph_p, may_be_spilled_p,
available_regs_num, next_bucket_allocno, prev_bucket_allocno,
temp, colorable_p. Add new member add_data.
(ALLOCNO_IN_GRAPH_P, ALLOCNO_MAY_BE_SPILLED_P): Remove.
(ALLOCNO_COLORABLE_P, ALLOCNO_AVAILABLE_REGS_NUM): Remove.
(ALLOCNO_NEXT_BUCKET_ALLOCNO, ALLOCNO_PREV_BUCKET_ALLOCNO): Remove.
(ALLOCNO_TEMP, ALLOCNO_FIRST_COALESCED_ALLOCNO): Remove.
(ALLOCNO_NEXT_COALESCED_ALLOCNO): Remove.
(ALLOCNO_ADD_DATA): New macro.
(ira_emit_data_t): New typedef.
(struct ira_emit_data): New. Move mem_optimized_dest_p,
somewhere_renamed_p, child_renamed_p, reg, mem_optimized_dest
from struct ira_allocno.
(ALLOCNO_EMIT_DATA): New macro.
(ira_allocno_emit_data, allocno_emit_reg): New.
(ALLOCNO_PROFITABLE_HARD_REGS, OBJECT_HARD_REGS_NODE): Remove.
(OBJECT_HARD_REGS_SUBNODES_STAR, OBJECT_HARD_REGS_SUBNODES_NUM):
Remove.
(OBJECT_ADD_DATA): New macro.
(ira_reallocate): Remove.
(ira_initiate_emit_data, ira_finish_emit_data): New.
(ira_get_register_move_cost, ira_get_may_move_cost): Remove.
(ira_init_register_move_cost_if_necessary): New.
(ira_object_conflict_iter_next): Merge into
ira_object_conflict_iter_cond.
(FOR_EACH_OBJECT_CONFLICT): Don't use
ira_object_conflict_iter_next.
* ira-live.c: (process_single_reg_class_operands): Call
ira_init_register_move_cost_if_necessary. Use
ira_register_move_cost instead of ira_get_register_move_cost.
2010-08-13 Vladimir Makarov <vmakarov@redhat.com>
* ira-int.h (struct target_ira_int): Remove x_cost_classes.
* ira-costs.c: Fix formatting.
(cost_classes, cost_classes_num): Remove.
(struct cost_classes, cost_classes_t, const_cost_classes_t): New.
(regno_cost_classes, cost_classes_hash, cost_classes_eq): New.
(cost_classes_del, cost_classes_htab): New.
(cost_classes_aclass_cache, cost_classes_mode_cache): New.
(initiate_regno_cost_classes, setup_cost_classes): New.
(setup_regno_cost_classes_by_aclass): New.
(setup_regno_cost_classes_by_mode, finish_regno_cost_classes):
New.
(record_reg_classes): Use regno_cost_classes instead of
cost_classes. Move checking opposite operand up.
(record_address_regs): Use regno_cost_classes
instead of cost_classes.
(scan_one_insn): Ditto. Use always general register.
(print_allocno_costs): Use regno_cost_classes instead of
cost_classes.
(print_pseudo_costs): Ditto. Use Reg_N_REFS.
(find_costs_and_classes): Set up cost classes for each registers.
Use also their mode for this. Use regno_cost_classes instead of
cost_classes.
(setup_allocno_class_and_costs): Use regno_cost_classes instead of
cost_classes.
(free_ira_costs, ira_init_costs): Don't use cost_classes.
(ira_costs, ira_set_pseudo_classes): Call
initiate_regno_cost_classes and finish_regno_cost_classes.
2010-10-04 Vladimir Makarov <vmakarov@redhat.com>
* target-def.h (TARGET_IRA_COVER_CLASSES): Remove.
* target.def (ira_cover_classes): Remove.
* doc/tm.texi: Remove TARGET_IRA_COVER_CLASSES and
IRA_COVER_CLASSES.
* doc/tm.texi.in: Ditto.
* ira-conflicts.c: Remove mentioning cover classes from the file.
Use ALLOCNO_CLASS instead of ALLOCNO_COVER_CLASS. Use
ALLOCNO_COVER_CLASS_COST instead of ALLOCNO_CLASS_COST. Fix
formatting.
* targhooks.c (default_ira_cover_classes): Remove.
* targhooks.h (default_ira_cover_classes): Ditto.
* haifa-sched.c: Remove mentioning cover classes from the file.
Use ira_reg_pressure_cover instead of ira_reg_class_cover. Use
ira_pressure_classes and ira_pressure_classes_num instead of
ira_reg_class_cover_size and ira_reg_class_cover. Use
sched_regno_pressure_class instead of sched_regno_cover_class.
(mark_regno_birth_or_death, setup_insn_reg_pressure_info): Use
ira_reg_class_max_nregs instead of ira_reg_class_nregs.
* ira-int.h: Add 2010 to Copyright. Remove mentioning cover
classes from the file.
(object_hard_regs_t, object_hard_regs_node_t): New typedefs.
(struct object_hard_regs, struct object_hard_regs_node): New.
(struct ira_object): New members profitable_hard_regs,
hard_regs_node, hard_regs_subnodes_start, hard_regs_subnodes_num.
(struct ira_allocno): Rename cover_class to aclass. Rename
cover_class_cost and updated_cover_class_cost to class_cost and
updated_class_cost. Remove splay_removed_p and
left_conflict_size. Add new members colorable_p.
(ALLOCNO_SPLAY_REMOVED_P, ALLOCNO_LEFT_CONFLICTS_SIZE): Remove.
(ALLOCNO_COLORABLE_P): New macro.
(ALLOCNO_COVER_CLASS): Rename to ALLOCNO_CLASS.
(ALLOCNO_COVER_CLASS_COST, ALLOCNO_UPDATED_COVER_CLASS_COST):
Rename to ALLOCNO_CLASS_COST and ALLOCNO_UPDATED__CLASS_COST.
(OBJECT_...): Rename parameter C to O.
(OBJECT_PROFITABLE_HARD_REGS): New macro.
(OBJECT_HARD_REGS_NODE, OBJECT_HARD_REGS_SUBNODES_START)
(OBJECT_HARD_REGS_SUBNODES_NUM): New macros.
(struct target_ira_int): New members x_ira_max_memory_move_cost,
x_ira_max_register_move_cost, x_ira_max_may_move_in_cost,
x_ira_max_may_move_out_cost, x_ira_reg_allocno_class_p,
x_ira_reg_pressure_class_p, x_ira_important_class_nums,
x_ira_reg_class_superunion. Rename x_prohibited_class_mode_reg to
x_ira_prohibited_class_mode_reg. Rename x_ira_reg_class_union to
x_ira_reg_class_subunion.
(ira_max_memory_move_cost, ira_max_register_move_cost)
(ira_max_may_move_in_cost, ira_max_may_move_out_cost)
(ira_reg_allocno_class_p, ira_reg_pressure_class_p)
(ira_important_class_nums, ira_reg_class_superunion): New macros.
(prohibited_class_mode_regs): Rename to
ira_prohibited_class_mode_regs.
(ira_reg_class_union): Rename to ira_reg_class_subunion.
(ira_debug_class_cover): Rename to ira_debug_allocno_classes.
(ira_set_allocno_cover_class): Rename to ira_set_allocno_class.
(ira_tune_allocno_costs_and_cover_classes): Rename to
ira_tune_allocno_costs.
(ira_debug_hard_regs_forest): New.
(ira_object_conflict_iter_init, ira_object_conflict_iter_cond)
(ira_object_conflict_iter_next): Fix comments.
(ira_hard_reg_set_intersection_p, hard_reg_set_size): New
functions.
(ira_allocate_and_set_costs, ira_allocate_and_copy_costs): Rename
cover_class to aclass.
(ira_allocate_and_accumulate_costs): Ditto.
(ira_allocate_and_set_or_copy_costs): Ditto.
* opts.c (decode_options): Remove ira_cover_class check.
* ira-color.c: Remove mentioning cover classes from the file. Use
ALLOCNO_CLASS, ALLOCNO_CLASS_COST, and ALLOCNO_UPDATED_CLASS_COST
instead of ALLOCNO_COVER_CLASS, ALLOCNO_COVER_CLASS_COST, and
ALLOCNO_UPDATED_COVER_CLASS_COST. Fix formatting.
(splay-tree.h): Remove include.
(allocno_coalesced_p, processed_coalesced_allocno_bitmap): Move
before copy_freq_compare_func.
(allocnos_for_spilling, removed_splay_allocno_vec): Remove.
(object_hard_regs_vec, object_hard_regs_htab, node_check_tick):
New definitions.
(hard_regs_roots, hard_regs_node_vec): Ditto.
(object_hard_regs_hash, object_hard_regs_eq, find_hard_regs): Ditto.
(insert_hard_regs, init_object_hard_regs, add_object_hard_regs): Ditto.
(finish_object_hard_regs, object_hard_regs_compare): Ditto.
(create_new_object_hard_regs_node): Ditto.
(add_new_object_hard_regs_node_to_forest): Ditto.
(add_object_hard_regs_to_forest, collect_object_hard_regs_cover):
Ditto.
(setup_object_hard_regs_nodes_parent, first_common_ancestor_node):
Ditto.
(print_hard_reg_set, print_hard_regs_subforest): Ditto.
(print_hard_regs_forest, ira_debug_hard_regs_forest): Ditto.
(remove_unused_object_hard_regs_nodes): Ditto.
(enumerate_object_hard_regs_nodes): Ditto.
(object_hard_regs_nodes_num, object_hard_regs_nodes): Ditto.
(object_hard_regs_subnode_t): Ditto.
(struct object_hard_regs_subnode): Ditto.
(object_hard_regs_subnodes, object_hard_regs_subnode_index): Ditto.
(setup_object_hard_regs_subnode_index): Ditto.
(get_object_hard_regs_subnodes_num): Ditto.
(form_object_hard_regs_nodes_forest): Ditto.
(finish_object_hard_regs_nodes_tree): Ditto.
(finish_object_hard_regs_nodes_forest): Ditto.
(allocnos_have_intersected_live_ranges_p): Rename to
allocnos_conflict_by_live_ranges_p. Move before
copy_freq_compare_func.
(pseudos_have_intersected_live_ranges_p): Rename to
conflict_by_live_ranges_p. Move before copy_freq_compare_func.
(setup_left_conflict_sizes_p, update_left_conflict_sizes_p): Ditto.
(empty_profitable_hard_regs, setup_profitable_hard_regs): Ditto.
(update_copy_costs): Remove assert. Skip cost update if the hard
reg does not belong the class.
(assign_hard_reg): Process only profitable hard regs.
(uncolorable_allocnos_num): Make it scalar.
(allocno_spill_priority): Use ALLOCNO_EXCESS_PRESSURE_POINTS_NUM
and ira_reg_class_max_nregs instead of ALLOCNO_LEFT_CONFLICTS_SIZE
and ira_reg_class_max_nregs.
(bucket_allocno_compare_func): Check frequency first.
(sort_bucket): Add compare function as a parameter.
(add_allocno_to_ordered_bucket): Assume no coalesced allocnos.
(uncolorable_allocnos_splay_tree, USE_SPLAY_P): Remove.
(push_allocno_to_stack): Rewrite for checking new allocno
colorability.
(remove_allocno_from_bucket_and_push): Print cost too. Remove
assert.
(push_only_colorable): Pass new parameter to sort_bucket.
(push_allocno_to_spill): Remove.
(allocno_spill_priority_compare): Make it inline and rewrite.
(splay_tree_allocate, splay_tree_free): Remove.
(allocno_spill_sort_compare): New function.
(push_allocnos_to_stack): Sort allocnos for spilling once. Don't
build and use splay tree. Choose first allocno in uncolorable
allocno bucket to spill. Remove setting spill cost.
(all_conflicting_hard_regs): Remove.
(setup_allocno_available_regs_num): Check only profitable hard
regs. Print info about hard regs nodes.
(setup_allocno_left_conflicts_size): Remove.
(put_allocno_into_bucket): Don't call
setup_allocno_left_conflicts_size. Use
setup_left_conflict_sizes_p.
(improve_allocation): New.
(color_allocnos): Call setup_profitable_hard_regs,
form_object_hard_regs_nodes_forest, improve_allocation,
finish_object_hard_regs_nodes_forest. Setup spill cost.
(print_loop_title): Use pressure classes.
(color_allocnso): Ditto.
(do_coloring): Remove allocation and freeing splay_tree_node_pool
and allocnos_for_spilling.
(ira_sort_regnos_for_alter_reg): Don't setup members
{first,next}_coalesced_allocno.
(color): Remove allocating and freeing removed_splay_allocno_vec.
(fast_allocation): Use ira_prohibited_class_mode_regs instead of
prohibited_class_mode_regs.
* ira-lives.c: Remove mentioning cover classes from the file. Fix
formatting.
(update_allocno_pressure_excess_length): Use pressure classes.
(inc_register_pressure, dec_register_pressure): Check for pressure
class.
(mark_pseudo_regno_live, mark_pseudo_regno_subword_live): Use
pressure class. Use ira_reg_class_nregs instead of
ira_reg_class_max_nregs.
(mark_pseudo_regno_dead, mark_pseudo_regno_subword_dead): Ditto.
(mark_hard_reg_live, mark_hard_reg_dead): Use pressure class.
(single_reg_class): Use ira_reg_class_nregs instead of
ira_reg_class_max_nregs.
(process_bb_node_lives): Use pressure classes.
* ira-emit.c: Remove mentioning cover classes from the file. Use
ALLOCNO_CLASS instead of ALLOCNO_COVER_CLASS. Fix formatting.
(change_loop): Use pressure classes.
(modify_move_list): Call ira_set_allocno_class instead of
ira_set_allocno_cover_class.
* ira-build.c: Remove mentioning cover classes from the file. Use
ALLOCNO_CLASS and ALLOCNO_CLASS_COST instead of
ALLOCNO_COVER_CLASS and ALLOCNO_COVER_CLASS_COST. Use
ALLOCNO_UPDATED_CLASS_COST instead of
ALLOCNO_UPDATED_COVER_CLASS_COST. Fix formatting.
(ira_create_object): Initiate OBJECT_PROFITABLE_HARD_REGS.
(ira_create_allocno): Remove initialization of
ALLOCNO_SPLAY_REMOVED_P, ALLOCNO_LEFT_CONFLICT_SIZE. Initialize
ALLOCNO_COLORABLE_P.
(ira_set_allocno_cover_class): Rename to ira_set_allocno_class.
Update conflict regs for the objects.
(create_cap_allocno): Remove assert. Don't propagate
ALLOCNO_AVAILABLE_REGS_NUM.
(ira_free_allocno_costs): New function.
(finish_allocno): Change a part of code into call of
ira_free_allocno_costs.
(low_pressure_loop_node_p): Use pressure classes.
(object_range_compare_func): Don't compare classes.
(setup_min_max_conflict_allocno_ids): Ditto.
* loop-invariant.c: Remove mentioning cover classes from the file.
Use ira_pressure_classes and ira_pressure_classes_num instead of
ira_reg_class_cover_size and ira_reg_class_cover. Fix formatting.
(get_cover_class_and_nregs): Rename to
get_cover_pressure_and_nregs. Use ira_reg_class_max_nregs instead
of ira_reg_class_nregs. Use reg_allocno_class instead of
reg_cover_class.
(get_inv_cost): Use instead ira_stack_reg_pressure_class of
STACK_REG_COVER_CLASS.
(get_regno_cover_class): Rename to get_regno_pressure_class.
(move_loop_invariants): Initialize and finalize regstat.
* ira.c: Remove mentioning cover classes from the file. Add
comments about coloring without cover classes. Use ALLOCNO_CLASS
instead of ALLOCNO_COVER_CLASS. Fix formatting.
(alloc_reg_class_subclasses, setup_reg_subclasses): Move it before
setup_class_subset_and_memory_move_costs.
(setup_stack_reg_pressure_class, setup_pressure_classes): New.
(setup_cover_and_important_classes): Rename to
setup_allocno_and_important_classes.
(setup_class_translate_array): New.
(setup_class_translate): Call it for allocno and pressure classes.
(cover_class_order): Rename to allocno_class_order.
(comp_reg_classes_func): Use ira_allocno_class_translate instead
of ira_class_translate.
(reorder_important_classes): Set up ira_important_class_nums.
(setup_reg_class_relations): Set up ira_reg_class_superunion.
(print_class_cover): Rename to print_classes. Add parameter.
(ira_debug_class_cover): Rename to ira_debug_allocno_classes.
Print pressure classes too.
(find_reg_class_closure): Rename to find_reg_classes. Don't call
setup_reg_subclasses.
(ira_hard_regno_cover_class): Rename to
ira_hard_regno_allocno_class.
(ira_reg_class_nregs): Rename to ira_reg_class_max_nregs.
(setup_prohibited_class_mode_regs): Use
ira_prohibited_class_mode_regs instead of
prohibited_class_mode_regs.
(clarify_prohibited_class_mode_regs): New function.
(ira_init_register_move_cost): Set up ira_max_register_move_cost,
ira_max_may_move_in_cost, and ira_max_may_move_out_cost.
(ira_init_once): Initialize them.
(free_register_move_costs): Process them.
(ira_init): Move calls of find_reg_classes and
setup_hard_regno_aclass after setup_prohibited_class_mode_regs.
Call clarify_prohibited_class_mode_regs.
(ira_no_alloc_reg): Remove.
(too_high_register_pressure_p): Use pressure classes.
* sched-deps.c: Remove mentioning cover classes from the file.
Use ira_reg_pressure_cover instead of ira_reg_class_cover. Use
ira_pressure_classes and ira_pressure_classes_num instead of
ira_reg_class_cover_size and ira_reg_class_cover.
(mark_insn_hard_regno_birth, mark_hard_regno_death): Use
sched_regno_pressure_class instead of sched_regno_cover_class.
(mark_insn_pseudo_birth, mark_pseudo_death): Ditto. Use
ira_reg_class_max_nregs instead of ira_reg_class_nregs.
* ira.h: Add 2010 to Copyright.
(ira_no_alloc_reg): Remove external.
(struct target_ira): Rename x_ira_hard_regno_cover_class,
x_ira_reg_class_cover_size, x_ira_reg_class_cover, and
x_ira_class_translate to x_ira_hard_regno_allocno_class,
x_ira_allocno_classes_num, x_ira_allocno_classes, and
x_ira_allocno_class_translate. Add x_ira_pressure_classes_num,
x_ira_pressure_classes, x_ira_pressure_class_translate, and
x_ira_stack_reg_pressure_class. Rename x_ira_reg_class_nregs to
x_ira_reg_class_max_nregs. Add x_ira_reg_class_min_nregs and
x_ira_no_alloc_regs.
(ira_hard_regno_cover_class): Rename to
ira_hard_regno_allocno_class.
(ira_reg_class_cover_size, ira_reg_class_cover): Rename to
ira_allocno_classes_num and ira_allocno_classes.
(ira_class_translate): Rename to ira_allocno_class_translate.
(ira_pressure_classes_num, ira_pressure_classes): New definitions.
(ira_pressure_class_translate, ira_stack_reg_pressure_class): Ditto.
(ira_reg_class_nregs): Rename to ira_reg_class_max_nregs.
(ira_reg_class_min_nregs, ira_stack_reg_pressure_class): New
(ira_no_alloc_regs): New.
* ira-costs.c: Add 2010 to Copyright. Remove mentioning cover
classes from the file. Use ALLOCNO_CLASS instead of
ALLOCNO_COVER_CLASS. Use ALLOCNO_CLASS_COST instead of
ALLOCNO_COVER_CLASS_COST.
(regno_cover_class): Rename to regno_aclass.
(record_reg_classes): Use ira_reg_class_subunion instead of
ira_reg_class_union.
(record_address_regs): Check overflow.
(scan_one_insn): Ditto.
(print_allocno_costs): Print total mem cost fore regional
allocation.
(print_pseudo_costs): Use REG_N_REFS.
(find_costs_and_classes): Use classes intersected with them on the
1st pass. Check overflow. Use ira_reg_class_subunion instead of
ira_reg_class_union. Use ira_allocno_class_translate and
regno_aclass instead of ira_class_translate and regno_cover_class.
Modify code for finding regno_aclass. Setup preferred classes for
the next pass.
(setup_allocno_cover_class_and_costs): Rename to
setup_allocno_class_and_costs. Use regno_aclass instead of
regno_cover_class. Use ira_set_allocno_class instead of
ira_set_allocno_cover_class.
(init_costs, finish_costs): Use regno_aclass instead of
regno_cover_class.
(ira_costs): Use setup_allocno_class_and_costs instead of
setup_allocno_cover_class_and_costs.
(ira_tune_allocno_costs_and_cover_classes): Rename to
ira_tune_allocno_costs. Check overflow. Skip conflict hard regs
by processing objects. Use ira_reg_class_max_nregs instead of
ira_reg_class_nregs.
* rtl.h (reg_cover_class): Rename to reg_allocno_class.
* sched-int.h: Remove mentioning cover classes from the file.
(sched_regno_cover_class): Rename to sched_regno_pressure_class.
* reginfo.c: Add 2010 to Copyright. Remove mentioning cover
classes from the file.
(struct reg_pref): Rename coverclass into allocnoclass.
(reg_cover_class): Rename to reg_allocno_class.
* Makefile.in (ira-color.o): Remove SPLAY_TREE_H from
dependencies.
* config/alpha/alpha.h (IRA_COVER_CLASSES): Remove.
* config/arm/arm.h (IRA_COVER_CLASSES): Ditto.
* config/avr/avr.h (IRA_COVER_CLASSES): Ditto.
* config/bfin/bfin.h (IRA_COVER_CLASSES): Ditto.
* config/cris/cris.h (IRA_COVER_CLASSES): Ditto.
* config/fr30/fr30.h (IRA_COVER_CLASSES): Ditto.
* config/frv/frv.h (IRA_COVER_CLASSES): Ditto.
* config/h8300/h8300.h (IRA_COVER_CLASSES): Ditto.
* config/i386/i386.h (STACK_REG_COVER_CLASS): Ditto.
* config/i386/i386.c (TARGET_IRA_COVER_CLASSES)
(i386_ira_cover_classes): Ditto.
* config/ia64/ia64.h (IRA_COVER_CLASSES): Ditto.
* config/iq2000/iq2000.h (IRA_COVER_CLASSES): Ditto.
* config/m32r/m32r.h (IRA_COVER_CLASSES): Ditto.
* config/m68k/m68k.h (IRA_COVER_CLASSES): Ditto.
* config/mcore/mcore.h (IRA_COVER_CLASSES): Ditto.
* config/mep/mep.h (IRA_COVER_CLASSES): Ditto.
* config/mips/mips.c (TARGET_IRA_COVER_CLASSES)
(mips_ira_cover_classes): Ditto.
* config/mn10300/mn10300.h (IRA_COVER_CLASSES): Ditto.
* config/moxie/moxie.h (IRA_COVER_CLASSES): Ditto.
* config/pa/pa64-regs.h (IRA_COVER_CLASSES): Ditto.
* config/pa/pa32-regs.h (IRA_COVER_CLASSES): Ditto.
* config/picochip/picochip.h (IRA_COVER_CLASSES): Ditto.
* config/rs6000/rs6000.h (IRA_COVER_CLASSES_PRE_VSX)
(IRA_COVER_CLASSES_VSX): Ditto.
* config/rs6000/rs6000.c (TARGET_IRA_COVER_CLASSES)
(rs6000_ira_cover_classes): Ditto.
* config/rx/rx.h (IRA_COVER_CLASSES): Ditto.
* config/s390/s390.h (IRA_COVER_CLASSES): Ditto.
* config/score/score.h (IRA_COVER_CLASSES): Ditto.
* config/sh/sh.h (IRA_COVER_CLASSES): Ditto.
* config/sparc/sparc.h (IRA_COVER_CLASSES): Ditto.
* config/spu/spu.h (IRA_COVER_CLASSES): Ditto.
* config/stormy16/stormy16.h (IRA_COVER_CLASSES): Ditto.
* config/v850/v850.h (IRA_COVER_CLASSES): Ditto.
* config/vax/vax.h (IRA_COVER_CLASSES): Ditto.
* config/xtensa/xtensa.h (IRA_COVER_CLASSES): Ditto.
2011-03-29 Jakub Jelinek <jakub@redhat.com>
PR debug/48253

View File

@ -3318,7 +3318,7 @@ ira-conflicts.o: ira-conflicts.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
ira-color.o: ira-color.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(TARGET_H) $(RTL_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) \
$(EXPR_H) $(BASIC_BLOCK_H) $(DIAGNOSTIC_CORE_H) $(TM_P_H) reload.h $(PARAMS_H) \
$(DF_H) $(SPLAY_TREE_H) $(IRA_INT_H)
$(DF_H) $(IRA_INT_H)
ira-emit.o: ira-emit.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(REGS_H) $(RTL_H) $(TM_P_H) $(TARGET_H) $(FLAGS_H) hard-reg-set.h \
$(BASIC_BLOCK_H) $(EXPR_H) $(RECOG_H) $(PARAMS_H) $(TIMEVAR_H) \

View File

@ -511,19 +511,6 @@ enum reg_class {
{0x00000000, 0x7fffffff}, /* FLOAT_REGS */ \
{0xffffffff, 0xffffffff} }
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, FLOAT_REGS, LIM_REG_CLASSES \
}
/* The same information, inverted:
Return the class number of the smallest class containing
reg number REGNO. This could be a conditional expression

View File

@ -1152,20 +1152,6 @@ enum reg_class
or could index an array. */
#define REGNO_REG_CLASS(REGNO) arm_regno_class (REGNO)
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, FPA_REGS, CIRRUS_REGS, VFP_REGS, IWMMXT_GR_REGS, IWMMXT_REGS,\
LIM_REG_CLASSES \
}
/* FPA registers can't do subreg as all values are reformatted to internal
precision. In VFPv1, VFP registers could only be accessed in the mode
they were set, so subregs would be invalid there too. However, we don't

View File

@ -296,19 +296,6 @@ enum reg_class {
#define REGNO_REG_CLASS(R) avr_regno_reg_class(R)
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, LIM_REG_CLASSES \
}
#define BASE_REG_CLASS (reload_completed ? BASE_POINTER_REGS : POINTER_REGS)
#define INDEX_REG_CLASS NO_REGS

View File

@ -664,19 +664,6 @@ enum reg_class
: (REGNO) >= REG_RETS ? PROLOGUE_REGS \
: NO_REGS)
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
MOST_REGS, AREGS, CCREGS, LIM_REG_CLASSES \
}
/* When this hook returns true for MODE, the compiler allows
registers explicitly used in the rtl to be used as spill registers
but prevents the compiler from extending the lifetime of these

View File

@ -550,8 +550,6 @@ enum reg_class
#define INDEX_REG_CLASS GENERAL_REGS
#define IRA_COVER_CLASSES { GENERAL_REGS, SPECIAL_REGS, LIM_REG_CLASSES }
#define REG_CLASS_FROM_LETTER(C) \
( \
(C) == 'a' ? ACR_REGS : \

View File

@ -290,11 +290,6 @@ enum reg_class
#define GENERAL_REGS REAL_REGS
#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
#define IRA_COVER_CLASSES \
{ \
REAL_REGS, MULTIPLY_64_REG, LIM_REG_CLASSES \
}
/* An initializer containing the names of the register classes as C string
constants. These names are used in writing some of the debugging dumps. */
#define REG_CLASS_NAMES \

View File

@ -970,21 +970,6 @@ enum reg_class
{ 0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0x1fff}, /* ALL_REGS */\
}
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
GPR_REGS, FPR_REGS, ACC_REGS, ICR_REGS, FCR_REGS, ICC_REGS, FCC_REGS, \
ACCG_REGS, SPR_REGS, \
LIM_REG_CLASSES \
}
/* A C expression whose value is a register class containing hard register
REGNO. In general there is more than one such class; choose a class which
is "minimal", meaning that no smaller class also contains the register. */

View File

@ -330,19 +330,6 @@ enum reg_class {
{ "NO_REGS", "COUNTER_REGS", "SOURCE_REGS", "DESTINATION_REGS", \
"GENERAL_REGS", "MAC_REGS", "ALL_REGS", "LIM_REGS" }
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, MAC_REGS, LIM_REG_CLASSES \
}
/* Define which registers fit in which classes.
This is an initializer for a vector of HARD_REG_SET
of length N_REG_CLASSES. */

View File

@ -28434,22 +28434,6 @@ ix86_free_from_memory (enum machine_mode mode)
}
}
/* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
same. */
static const reg_class_t *
i386_ira_cover_classes (void)
{
static const reg_class_t sse_fpmath_classes[] = {
GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
};
static const reg_class_t no_sse_fpmath_classes[] = {
GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
};
return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
}
/* Implement TARGET_PREFERRED_RELOAD_CLASS.
Put float CONST_DOUBLE in the constant pool instead of fp regs.
@ -35349,9 +35333,6 @@ ix86_autovectorize_vector_sizes (void)
#undef TARGET_LEGITIMATE_ADDRESS_P
#define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
#undef TARGET_IRA_COVER_CLASSES
#define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
#undef TARGET_FRAME_POINTER_REQUIRED
#define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required

View File

@ -870,9 +870,6 @@ enum target_cpu_default
|| ((MODE) == DFmode && !(TARGET_SSE2 && TARGET_SSE_MATH)) \
|| (MODE) == XFmode)
/* Cover class containing the stack registers. */
#define STACK_REG_COVER_CLASS FLOAT_REGS
/* Number of actual hardware registers.
The hardware registers are assigned numbers for the compiler
from 0 to just below FIRST_PSEUDO_REGISTER.

View File

@ -774,19 +774,6 @@ enum reg_class
0xFFFFFFFF, 0xFFFFFFFF, 0x3FFF }, \
}
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
PR_REGS, BR_REGS, AR_M_REGS, AR_I_REGS, GR_REGS, FR_REGS, LIM_REG_CLASSES \
}
/* A C expression whose value is a register class containing hard register
REGNO. In general there is more than one such class; choose a class which
is "minimal", meaning that no smaller class also contains the register. */

View File

@ -209,11 +209,6 @@ enum reg_class
#define N_REG_CLASSES (int) LIM_REG_CLASSES
#define IRA_COVER_CLASSES \
{ \
GR_REGS, LIM_REG_CLASSES \
}
#define REG_CLASS_NAMES \
{ \
"NO_REGS", \

View File

@ -459,11 +459,6 @@ enum reg_class
NO_REGS, CARRY_REG, ACCUM_REGS, GENERAL_REGS, ALL_REGS, LIM_REG_CLASSES
};
#define IRA_COVER_CLASSES \
{ \
ACCUM_REGS, GENERAL_REGS, LIM_REG_CLASSES \
}
#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
/* Give names of register classes as strings for dump file. */

View File

@ -496,10 +496,6 @@ extern enum reg_class regno_reg_class[];
#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) \
((((CLASS1) == FP_REGS) != ((CLASS2) == FP_REGS)) ? 4 : 2)
#define IRA_COVER_CLASSES \
{ \
ALL_REGS, LIM_REG_CLASSES \
}
/* Stack layout; function entry, exit and calling. */

View File

@ -324,11 +324,6 @@ enum reg_class
#define N_REG_CLASSES (int) LIM_REG_CLASSES
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, C_REGS, LIM_REG_CLASSES \
}
/* Give names of register classes as strings for dump file. */
#define REG_CLASS_NAMES \

View File

@ -407,8 +407,6 @@ enum reg_class
#define REGNO_REG_CLASS(REGNO) (enum reg_class) mep_regno_reg_class (REGNO)
#define IRA_COVER_CLASSES { GENERAL_REGS, CONTROL_REGS, CR_REGS, CCR_REGS, LIM_REG_CLASSES }
#define BASE_REG_CLASS GENERAL_REGS
#define INDEX_REG_CLASS GENERAL_REGS

View File

@ -11017,29 +11017,6 @@ mips_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
+ memory_move_secondary_cost (mode, rclass, in));
}
/* Implement TARGET_IRA_COVER_CLASSES. */
static const reg_class_t *
mips_ira_cover_classes (void)
{
static const reg_class_t acc_classes[] = {
GR_AND_ACC_REGS, FP_REGS, COP0_REGS, COP2_REGS, COP3_REGS,
ST_REGS, LIM_REG_CLASSES
};
static const reg_class_t no_acc_classes[] = {
GR_REGS, FP_REGS, COP0_REGS, COP2_REGS, COP3_REGS,
ST_REGS, LIM_REG_CLASSES
};
/* Don't allow the register allocators to use LO and HI in MIPS16 mode,
which has no MTLO or MTHI instructions. Also, using GR_AND_ACC_REGS
as a cover class only works well when we keep per-register costs.
Using it when not optimizing can cause us to think accumulators
have the same cost as GPRs in cases where GPRs are actually much
cheaper. */
return TARGET_MIPS16 || !optimize ? no_acc_classes : acc_classes;
}
/* Return the register class required for a secondary register when
copying between one of the registers in RCLASS and value X, which
has mode MODE. X is the source of the move if IN_P, otherwise it
@ -16618,9 +16595,6 @@ mips_shift_truncation_mask (enum machine_mode mode)
#undef TARGET_DWARF_REGISTER_SPAN
#define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
#undef TARGET_IRA_COVER_CLASSES
#define TARGET_IRA_COVER_CLASSES mips_ira_cover_classes
#undef TARGET_ASM_FINAL_POSTSCAN_INSN
#define TARGET_ASM_FINAL_POSTSCAN_INSN mips_final_postscan_insn

View File

@ -309,19 +309,6 @@ enum reg_class
{ 0xffffffff, 0xfffff } /* ALL_REGS */ \
}
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, FP_REGS, MDR_REGS, LIM_REG_CLASSES \
}
/* The same information, inverted:
Return the class number of the smallest class containing
reg number REGNO. This could be a conditional expression

View File

@ -135,15 +135,6 @@ enum reg_class
};
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES { GENERAL_REGS, LIM_REG_CLASSES }
#define REG_CLASS_CONTENTS \
{ { 0x00000000 }, /* Empty */ \
{ 0x0003FFFF }, /* $fp, $sp, $r0 to $r13, ?fp */ \

View File

@ -294,19 +294,6 @@ enum reg_class { NO_REGS, R1_REGS, GENERAL_REGS, FPUPPER_REGS, FP_REGS,
{0x00000000, 0x00000000, 0x01000000}, /* SHIFT_REGS */ \
{0xfffffffe, 0xffffffff, 0x03ffffff}} /* ALL_REGS */
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, FP_REGS, SHIFT_REGS, LIM_REG_CLASSES \
}
/* Defines invalid mode changes. */
#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \

View File

@ -230,19 +230,6 @@ enum reg_class { NO_REGS, R1_REGS, GENERAL_REGS, FPUPPER_REGS, FP_REGS,
{0x00000000, 0x10000000}, /* SHIFT_REGS */ \
{0xfffffffe, 0x3fffffff}} /* ALL_REGS */
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, FP_REGS, SHIFT_REGS, LIM_REG_CLASSES \
}
/* Defines invalid mode changes. */
#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \

View File

@ -292,19 +292,6 @@ enum reg_class
#define N_REG_CLASSES (int) LIM_REG_CLASSES
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
GR_REGS, LIM_REG_CLASSES \
}
/* The names of the register classes */
#define REG_CLASS_NAMES \

View File

@ -1208,8 +1208,6 @@ static reg_class_t rs6000_secondary_reload (bool, rtx, reg_class_t,
enum machine_mode,
struct secondary_reload_info *);
static const reg_class_t *rs6000_ira_cover_classes (void);
const int INSN_NOT_AVAILABLE = -1;
static enum machine_mode rs6000_eh_return_filter_mode (void);
static bool rs6000_can_eliminate (const int, const int);
@ -1636,9 +1634,6 @@ static const struct default_options rs6000_option_optimization_table[] =
#undef TARGET_SECONDARY_RELOAD
#define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
#undef TARGET_IRA_COVER_CLASSES
#define TARGET_IRA_COVER_CLASSES rs6000_ira_cover_classes
#undef TARGET_LEGITIMATE_ADDRESS_P
#define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
@ -15288,26 +15283,6 @@ rs6000_secondary_reload_ppc64 (rtx reg, rtx mem, rtx scratch, bool store_p)
return;
}
/* Target hook to return the cover classes for Integrated Register Allocator.
Cover classes is a set of non-intersected register classes covering all hard
registers used for register allocation purpose. Any move between two
registers of a cover class should be cheaper than load or store of the
registers. The value is array of register classes with LIM_REG_CLASSES used
as the end marker.
We need two IRA_COVER_CLASSES, one for pre-VSX, and the other for VSX to
account for the Altivec and Floating registers being subsets of the VSX
register set under VSX, but distinct register sets on pre-VSX machines. */
static const reg_class_t *
rs6000_ira_cover_classes (void)
{
static const reg_class_t cover_pre_vsx[] = IRA_COVER_CLASSES_PRE_VSX;
static const reg_class_t cover_vsx[] = IRA_COVER_CLASSES_VSX;
return (TARGET_VSX) ? cover_vsx : cover_pre_vsx;
}
/* Allocate a 64-bit stack slot to be used for copying SDmode
values through if this function has any SDmode references. */

View File

@ -1252,34 +1252,6 @@ enum reg_class
{ 0xffffffff, 0xffffffff, 0xffffffff, 0x0003ffff } /* ALL_REGS */ \
}
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker.
We need two IRA_COVER_CLASSES, one for pre-VSX, and the other for VSX to
account for the Altivec and Floating registers being subsets of the VSX
register set. */
#define IRA_COVER_CLASSES_PRE_VSX \
{ \
GENERAL_REGS, SPECIAL_REGS, FLOAT_REGS, ALTIVEC_REGS, /* VSX_REGS, */ \
/* VRSAVE_REGS,*/ VSCR_REGS, SPE_ACC_REGS, SPEFSCR_REGS, \
/* MQ_REGS, LINK_REGS, CTR_REGS, */ \
CR_REGS, CA_REGS, LIM_REG_CLASSES \
}
#define IRA_COVER_CLASSES_VSX \
{ \
GENERAL_REGS, SPECIAL_REGS, /* FLOAT_REGS, ALTIVEC_REGS, */ VSX_REGS, \
/* VRSAVE_REGS,*/ VSCR_REGS, SPE_ACC_REGS, SPEFSCR_REGS, \
/* MQ_REGS, LINK_REGS, CTR_REGS, */ \
CR_REGS, CA_REGS, LIM_REG_CLASSES \
}
/* The same information, inverted:
Return the class number of the smallest class containing
reg number REGNO. This could be a conditional expression

View File

@ -187,11 +187,6 @@ enum reg_class
{ 0x0000ffff } /* All registers. */ \
}
#define IRA_COVER_CLASSES \
{ \
GR_REGS, LIM_REG_CLASSES \
}
#define SMALL_REGISTER_CLASSES 0
#define N_REG_CLASSES (int) LIM_REG_CLASSES
#define CLASS_MAX_NREGS(CLASS, MODE) ((GET_MODE_SIZE (MODE) \

View File

@ -468,19 +468,6 @@ enum reg_class
{ 0xffffffff, 0x0000003f }, /* ALL_REGS */ \
}
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, FP_REGS, CC_REGS, ACCESS_REGS, LIM_REG_CLASSES \
}
/* In some case register allocation order is not enough for IRA to
generate a good code. The following macro (if defined) increases
cost of REGNO for a pseudo approximately by pseudo usage frequency

View File

@ -390,18 +390,6 @@ enum reg_class
also contains the register. */
#define REGNO_REG_CLASS(REGNO) (enum reg_class) score_reg_class (REGNO)
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
G32_REGS, CE_REGS, SP_REGS, LIM_REG_CLASSES \
}
/* A macro whose definition is the name of the class to which a
valid base register must belong. A base register is one used in
an address which is the register value plus a displacement. */

View File

@ -1133,20 +1133,6 @@ enum reg_class
extern enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER];
#define REGNO_REG_CLASS(REGNO) regno_reg_class[(REGNO)]
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, FP_REGS, PR_REGS, T_REGS, MAC_REGS, TARGET_REGS, \
FPUL_REGS, LIM_REG_CLASSES \
}
/* When this hook returns true for MODE, the compiler allows
registers explicitly used in the rtl to be used as spill registers
but prevents the compiler from extending the lifetime of these

View File

@ -972,19 +972,6 @@ extern enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
#define REGNO_REG_CLASS(REGNO) sparc_regno_reg_class[(REGNO)]
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, EXTRA_FP_REGS, FPCC_REGS, LIM_REG_CLASSES \
}
/* Defines invalid mode changes. Borrowed from pa64-regs.h.
SImode loads to floating-point registers are not zero-extended.

View File

@ -196,9 +196,6 @@ enum reg_class {
LIM_REG_CLASSES
};
/* SPU is simple, it really only has one class of registers. */
#define IRA_COVER_CLASSES { GENERAL_REGS, LIM_REG_CLASSES }
#define N_REG_CLASSES (int) LIM_REG_CLASSES
#define REG_CLASS_NAMES \

View File

@ -179,11 +179,6 @@ enum reg_class
#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, LIM_REG_CLASSES \
}
#define REG_CLASS_NAMES \
{ \
"NO_REGS", \

View File

@ -304,11 +304,6 @@ enum reg_class
#define N_REG_CLASSES (int) LIM_REG_CLASSES
#define IRA_COVER_CLASSES \
{ \
GENERAL_REGS, LIM_REG_CLASSES \
}
/* Give names of register classes as strings for dump file. */
#define REG_CLASS_NAMES \

View File

@ -226,15 +226,6 @@ enum reg_class { NO_REGS, ALL_REGS, LIM_REG_CLASSES };
#define REG_CLASS_NAMES \
{ "NO_REGS", "ALL_REGS" }
/* The following macro defines cover classes for Integrated Register
Allocator. Cover classes is a set of non-intersected register
classes covering all hard registers used for register allocation
purpose. Any move between two registers of a cover class should be
cheaper than load or store of the registers. The macro value is
array of register classes with LIM_REG_CLASSES used as the end
marker. */
#define IRA_COVER_CLASSES { ALL_REGS, LIM_REG_CLASSES }
/* Return the maximum number of consecutive registers
needed to represent mode MODE in a register of class CLASS. */
#define CLASS_MAX_NREGS(CLASS, MODE) \

View File

@ -432,11 +432,6 @@ enum reg_class
{ 0xffffffff, 0x0000000f } /* all registers */ \
}
#define IRA_COVER_CLASSES \
{ \
BR_REGS, FP_REGS, ACC_REG, AR_REGS, LIM_REG_CLASSES \
}
/* A C expression whose value is a register class containing hard
register REGNO. In general there is more that one such class;
choose a class which is "minimal", meaning that no smaller class

View File

@ -2854,36 +2854,6 @@ as below:
@end smallexample
@end defmac
@deftypefn {Target Hook} {const reg_class_t *} TARGET_IRA_COVER_CLASSES (void)
Return an array of cover classes for the Integrated Register Allocator
(@acronym{IRA}). Cover classes are a set of non-intersecting register
classes covering all hard registers used for register allocation
purposes. If a move between two registers in the same cover class is
possible, it should be cheaper than a load or store of the registers.
The array is terminated by a @code{LIM_REG_CLASSES} element.
The order of cover classes in the array is important. If two classes
have the same cost of usage for a pseudo, the class occurred first in
the array is chosen for the pseudo.
This hook is called once at compiler startup, after the command-line
options have been processed. It is then re-examined by every call to
@code{target_reinit}.
The default implementation returns @code{IRA_COVER_CLASSES}, if defined,
otherwise there is no default implementation. You must define either this
macro or @code{IRA_COVER_CLASSES} in order to use the integrated register
allocator with Chaitin-Briggs coloring. If the macro is not defined,
the only available coloring algorithm is Chow's priority coloring.
This hook must not be modified from @code{NULL} to non-@code{NULL} or
vice versa by command-line option processing.
@end deftypefn
@defmac IRA_COVER_CLASSES
See the documentation for @code{TARGET_IRA_COVER_CLASSES}.
@end defmac
@node Old Constraints
@section Obsolete Macros for Defining Constraints
@cindex defining constraints, obsolete method

View File

@ -2842,36 +2842,6 @@ as below:
@end smallexample
@end defmac
@hook TARGET_IRA_COVER_CLASSES
Return an array of cover classes for the Integrated Register Allocator
(@acronym{IRA}). Cover classes are a set of non-intersecting register
classes covering all hard registers used for register allocation
purposes. If a move between two registers in the same cover class is
possible, it should be cheaper than a load or store of the registers.
The array is terminated by a @code{LIM_REG_CLASSES} element.
The order of cover classes in the array is important. If two classes
have the same cost of usage for a pseudo, the class occurred first in
the array is chosen for the pseudo.
This hook is called once at compiler startup, after the command-line
options have been processed. It is then re-examined by every call to
@code{target_reinit}.
The default implementation returns @code{IRA_COVER_CLASSES}, if defined,
otherwise there is no default implementation. You must define either this
macro or @code{IRA_COVER_CLASSES} in order to use the integrated register
allocator with Chaitin-Briggs coloring. If the macro is not defined,
the only available coloring algorithm is Chow's priority coloring.
This hook must not be modified from @code{NULL} to non-@code{NULL} or
vice versa by command-line option processing.
@end deftypefn
@defmac IRA_COVER_CLASSES
See the documentation for @code{TARGET_IRA_COVER_CLASSES}.
@end defmac
@node Old Constraints
@section Obsolete Macros for Defining Constraints
@cindex defining constraints, obsolete method

View File

@ -573,11 +573,11 @@ schedule_insns (void)
up. */
bool sched_pressure_p;
/* Map regno -> its cover class. The map defined only when
/* Map regno -> its pressure class. The map defined only when
SCHED_PRESSURE_P is true. */
enum reg_class *sched_regno_cover_class;
enum reg_class *sched_regno_pressure_class;
/* The current register pressure. Only elements corresponding cover
/* The current register pressure. Only elements corresponding pressure
classes are defined. */
static int curr_reg_pressure[N_REG_CLASSES];
@ -607,39 +607,41 @@ sched_init_region_reg_pressure_info (void)
static void
mark_regno_birth_or_death (int regno, bool birth_p)
{
enum reg_class cover_class;
enum reg_class pressure_class;
cover_class = sched_regno_cover_class[regno];
pressure_class = sched_regno_pressure_class[regno];
if (regno >= FIRST_PSEUDO_REGISTER)
{
if (cover_class != NO_REGS)
if (pressure_class != NO_REGS)
{
if (birth_p)
{
bitmap_set_bit (curr_reg_live, regno);
curr_reg_pressure[cover_class]
+= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
curr_reg_pressure[pressure_class]
+= (ira_reg_class_max_nregs
[pressure_class][PSEUDO_REGNO_MODE (regno)]);
}
else
{
bitmap_clear_bit (curr_reg_live, regno);
curr_reg_pressure[cover_class]
-= ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
curr_reg_pressure[pressure_class]
-= (ira_reg_class_max_nregs
[pressure_class][PSEUDO_REGNO_MODE (regno)]);
}
}
}
else if (cover_class != NO_REGS
else if (pressure_class != NO_REGS
&& ! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
if (birth_p)
{
bitmap_set_bit (curr_reg_live, regno);
curr_reg_pressure[cover_class]++;
curr_reg_pressure[pressure_class]++;
}
else
{
bitmap_clear_bit (curr_reg_live, regno);
curr_reg_pressure[cover_class]--;
curr_reg_pressure[pressure_class]--;
}
}
}
@ -653,8 +655,8 @@ initiate_reg_pressure_info (bitmap live)
unsigned int j;
bitmap_iterator bi;
for (i = 0; i < ira_reg_class_cover_size; i++)
curr_reg_pressure[ira_reg_class_cover[i]] = 0;
for (i = 0; i < ira_pressure_classes_num; i++)
curr_reg_pressure[ira_pressure_classes[i]] = 0;
bitmap_clear (curr_reg_live);
EXECUTE_IF_SET_IN_BITMAP (live, 0, j, bi)
if (current_nr_blocks == 1 || bitmap_bit_p (region_ref_regs, j))
@ -723,9 +725,9 @@ save_reg_pressure (void)
{
int i;
for (i = 0; i < ira_reg_class_cover_size; i++)
saved_reg_pressure[ira_reg_class_cover[i]]
= curr_reg_pressure[ira_reg_class_cover[i]];
for (i = 0; i < ira_pressure_classes_num; i++)
saved_reg_pressure[ira_pressure_classes[i]]
= curr_reg_pressure[ira_pressure_classes[i]];
bitmap_copy (saved_reg_live, curr_reg_live);
}
@ -735,9 +737,9 @@ restore_reg_pressure (void)
{
int i;
for (i = 0; i < ira_reg_class_cover_size; i++)
curr_reg_pressure[ira_reg_class_cover[i]]
= saved_reg_pressure[ira_reg_class_cover[i]];
for (i = 0; i < ira_pressure_classes_num; i++)
curr_reg_pressure[ira_pressure_classes[i]]
= saved_reg_pressure[ira_pressure_classes[i]];
bitmap_copy (curr_reg_live, saved_reg_live);
}
@ -755,7 +757,7 @@ dying_use_p (struct reg_use_data *use)
}
/* Print info about the current register pressure and its excess for
each cover class. */
each pressure class. */
static void
print_curr_reg_pressure (void)
{
@ -763,9 +765,9 @@ print_curr_reg_pressure (void)
enum reg_class cl;
fprintf (sched_dump, ";;\t");
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_pressure_classes_num; i++)
{
cl = ira_reg_class_cover[i];
cl = ira_pressure_classes[i];
gcc_assert (curr_reg_pressure[cl] >= 0);
fprintf (sched_dump, " %s:%d(%d)", reg_class_names[cl],
curr_reg_pressure[cl],
@ -1108,23 +1110,24 @@ setup_insn_reg_pressure_info (rtx insn)
gcc_checking_assert (!DEBUG_INSN_P (insn));
excess_cost_change = 0;
for (i = 0; i < ira_reg_class_cover_size; i++)
death[ira_reg_class_cover[i]] = 0;
for (i = 0; i < ira_pressure_classes_num; i++)
death[ira_pressure_classes[i]] = 0;
for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
if (dying_use_p (use))
{
cl = sched_regno_cover_class[use->regno];
cl = sched_regno_pressure_class[use->regno];
if (use->regno < FIRST_PSEUDO_REGISTER)
death[cl]++;
else
death[cl] += ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
death[cl]
+= ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (use->regno)];
}
pressure_info = INSN_REG_PRESSURE (insn);
max_reg_pressure = INSN_MAX_REG_PRESSURE (insn);
gcc_assert (pressure_info != NULL && max_reg_pressure != NULL);
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_pressure_classes_num; i++)
{
cl = ira_reg_class_cover[i];
cl = ira_pressure_classes[i];
gcc_assert (curr_reg_pressure[cl] >= 0);
change = (int) pressure_info[i].set_increase - death[cl];
before = MAX (0, max_reg_pressure[i] - ira_available_class_regs[cl]);
@ -1569,9 +1572,9 @@ setup_insn_max_reg_pressure (rtx after, bool update_p)
static int max_reg_pressure[N_REG_CLASSES];
save_reg_pressure ();
for (i = 0; i < ira_reg_class_cover_size; i++)
max_reg_pressure[ira_reg_class_cover[i]]
= curr_reg_pressure[ira_reg_class_cover[i]];
for (i = 0; i < ira_pressure_classes_num; i++)
max_reg_pressure[ira_pressure_classes[i]]
= curr_reg_pressure[ira_pressure_classes[i]];
for (insn = NEXT_INSN (after);
insn != NULL_RTX && ! BARRIER_P (insn)
&& BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (after);
@ -1579,24 +1582,24 @@ setup_insn_max_reg_pressure (rtx after, bool update_p)
if (NONDEBUG_INSN_P (insn))
{
eq_p = true;
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_pressure_classes_num; i++)
{
p = max_reg_pressure[ira_reg_class_cover[i]];
p = max_reg_pressure[ira_pressure_classes[i]];
if (INSN_MAX_REG_PRESSURE (insn)[i] != p)
{
eq_p = false;
INSN_MAX_REG_PRESSURE (insn)[i]
= max_reg_pressure[ira_reg_class_cover[i]];
= max_reg_pressure[ira_pressure_classes[i]];
}
}
if (update_p && eq_p)
break;
update_register_pressure (insn);
for (i = 0; i < ira_reg_class_cover_size; i++)
if (max_reg_pressure[ira_reg_class_cover[i]]
< curr_reg_pressure[ira_reg_class_cover[i]])
max_reg_pressure[ira_reg_class_cover[i]]
= curr_reg_pressure[ira_reg_class_cover[i]];
for (i = 0; i < ira_pressure_classes_num; i++)
if (max_reg_pressure[ira_pressure_classes[i]]
< curr_reg_pressure[ira_pressure_classes[i]])
max_reg_pressure[ira_pressure_classes[i]]
= curr_reg_pressure[ira_pressure_classes[i]];
}
restore_reg_pressure ();
}
@ -1610,13 +1613,13 @@ update_reg_and_insn_max_reg_pressure (rtx insn)
int i;
int before[N_REG_CLASSES];
for (i = 0; i < ira_reg_class_cover_size; i++)
before[i] = curr_reg_pressure[ira_reg_class_cover[i]];
for (i = 0; i < ira_pressure_classes_num; i++)
before[i] = curr_reg_pressure[ira_pressure_classes[i]];
update_register_pressure (insn);
for (i = 0; i < ira_reg_class_cover_size; i++)
if (curr_reg_pressure[ira_reg_class_cover[i]] != before[i])
for (i = 0; i < ira_pressure_classes_num; i++)
if (curr_reg_pressure[ira_pressure_classes[i]] != before[i])
break;
if (i < ira_reg_class_cover_size)
if (i < ira_pressure_classes_num)
setup_insn_max_reg_pressure (insn, true);
}
@ -1662,9 +1665,9 @@ schedule_insn (rtx insn)
if (pressure_info != NULL)
{
fputc (':', sched_dump);
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_pressure_classes_num; i++)
fprintf (sched_dump, "%s%+d(%d)",
reg_class_names[ira_reg_class_cover[i]],
reg_class_names[ira_pressure_classes[i]],
pressure_info[i].set_increase, pressure_info[i].change);
}
fputc ('\n', sched_dump);
@ -3509,13 +3512,13 @@ sched_init (void)
int i, max_regno = max_reg_num ();
ira_set_pseudo_classes (sched_verbose ? sched_dump : NULL);
sched_regno_cover_class
sched_regno_pressure_class
= (enum reg_class *) xmalloc (max_regno * sizeof (enum reg_class));
for (i = 0; i < max_regno; i++)
sched_regno_cover_class[i]
sched_regno_pressure_class[i]
= (i < FIRST_PSEUDO_REGISTER
? ira_class_translate[REGNO_REG_CLASS (i)]
: reg_cover_class (i));
? ira_pressure_class_translate[REGNO_REG_CLASS (i)]
: ira_pressure_class_translate[reg_allocno_class (i)]);
curr_reg_live = BITMAP_ALLOC (NULL);
saved_reg_live = BITMAP_ALLOC (NULL);
region_ref_regs = BITMAP_ALLOC (NULL);
@ -3620,7 +3623,7 @@ sched_finish (void)
haifa_finish_h_i_d ();
if (sched_pressure_p)
{
free (sched_regno_cover_class);
free (sched_regno_pressure_class);
BITMAP_FREE (region_ref_regs);
BITMAP_FREE (saved_reg_live);
BITMAP_FREE (curr_reg_live);

View File

@ -415,7 +415,8 @@ initiate_allocnos (void)
= VEC_alloc (ira_object_t, heap, max_reg_num () * 2);
ira_object_id_map = NULL;
ira_regno_allocno_map
= (ira_allocno_t *) ira_allocate (max_reg_num () * sizeof (ira_allocno_t));
= (ira_allocno_t *) ira_allocate (max_reg_num ()
* sizeof (ira_allocno_t));
memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
}
@ -423,7 +424,7 @@ initiate_allocnos (void)
static ira_object_t
ira_create_object (ira_allocno_t a, int subword)
{
enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
enum reg_class aclass = ALLOCNO_CLASS (a);
ira_object_t obj = (ira_object_t) pool_alloc (object_pool);
OBJECT_ALLOCNO (obj) = a;
@ -435,12 +436,13 @@ ira_create_object (ira_allocno_t a, int subword)
COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
reg_class_contents[cover_class]);
reg_class_contents[aclass]);
IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
reg_class_contents[cover_class]);
reg_class_contents[aclass]);
OBJECT_MIN (obj) = INT_MAX;
OBJECT_MAX (obj) = -1;
OBJECT_LIVE_RANGES (obj) = NULL;
OBJECT_ADD_DATA (obj) = NULL;
VEC_safe_push (ira_object_t, heap, ira_object_id_map_vec, obj);
ira_object_id_map
@ -454,7 +456,8 @@ ira_create_object (ira_allocno_t a, int subword)
LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
same regno if CAP_P is FALSE. */
ira_allocno_t
ira_create_allocno (int regno, bool cap_p, ira_loop_tree_node_t loop_tree_node)
ira_create_allocno (int regno, bool cap_p,
ira_loop_tree_node_t loop_tree_node)
{
ira_allocno_t a;
@ -484,35 +487,24 @@ ira_create_allocno (int regno, bool cap_p, ira_loop_tree_node_t loop_tree_node)
ALLOCNO_NO_STACK_REG_P (a) = false;
ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
#endif
ALLOCNO_MEM_OPTIMIZED_DEST (a) = NULL;
ALLOCNO_MEM_OPTIMIZED_DEST_P (a) = false;
ALLOCNO_SOMEWHERE_RENAMED_P (a) = false;
ALLOCNO_CHILD_RENAMED_P (a) = false;
ALLOCNO_DONT_REASSIGN_P (a) = false;
ALLOCNO_BAD_SPILL_P (a) = false;
ALLOCNO_IN_GRAPH_P (a) = false;
ALLOCNO_ASSIGNED_P (a) = false;
ALLOCNO_MAY_BE_SPILLED_P (a) = false;
ALLOCNO_SPLAY_REMOVED_P (a) = false;
ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
ALLOCNO_COPIES (a) = NULL;
ALLOCNO_HARD_REG_COSTS (a) = NULL;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
ALLOCNO_LEFT_CONFLICTS_SIZE (a) = -1;
ALLOCNO_COVER_CLASS (a) = NO_REGS;
ALLOCNO_UPDATED_COVER_CLASS_COST (a) = 0;
ALLOCNO_COVER_CLASS_COST (a) = 0;
ALLOCNO_CLASS (a) = NO_REGS;
ALLOCNO_UPDATED_CLASS_COST (a) = 0;
ALLOCNO_CLASS_COST (a) = 0;
ALLOCNO_MEMORY_COST (a) = 0;
ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
ALLOCNO_NEXT_BUCKET_ALLOCNO (a) = NULL;
ALLOCNO_PREV_BUCKET_ALLOCNO (a) = NULL;
ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
ALLOCNO_NUM_OBJECTS (a) = 0;
ALLOCNO_ADD_DATA (a) = NULL;
VEC_safe_push (ira_allocno_t, heap, allocno_vec, a);
ira_allocnos = VEC_address (ira_allocno_t, allocno_vec);
ira_allocnos_num = VEC_length (ira_allocno_t, allocno_vec);
@ -520,11 +512,22 @@ ira_create_allocno (int regno, bool cap_p, ira_loop_tree_node_t loop_tree_node)
return a;
}
/* Set up cover class for A and update its conflict hard registers. */
/* Set up register class for A and update its conflict hard
registers. */
void
ira_set_allocno_cover_class (ira_allocno_t a, enum reg_class cover_class)
ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
{
ALLOCNO_COVER_CLASS (a) = cover_class;
ira_allocno_object_iterator oi;
ira_object_t obj;
ALLOCNO_CLASS (a) = aclass;
FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
{
IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
reg_class_contents[aclass]);
IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
reg_class_contents[aclass]);
}
}
/* Determine the number of objects we should associate with allocno A
@ -533,8 +536,8 @@ void
ira_create_allocno_objects (ira_allocno_t a)
{
enum machine_mode mode = ALLOCNO_MODE (a);
enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
int n = ira_reg_class_nregs[cover_class][mode];
enum reg_class aclass = ALLOCNO_CLASS (a);
int n = ira_reg_class_max_nregs[aclass][mode];
int i;
if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
@ -546,7 +549,7 @@ ira_create_allocno_objects (ira_allocno_t a)
}
/* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
ALLOCNO_OBJECT structures. This must be called after the cover
ALLOCNO_OBJECT structures. This must be called after the allocno
classes are known. */
static void
create_allocno_objects (void)
@ -571,6 +574,7 @@ merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
{
ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
if (!total_only)
IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
OBJECT_CONFLICT_HARD_REGS (from_obj));
@ -592,6 +596,7 @@ ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
{
ira_allocno_object_iterator i;
ira_object_t obj;
FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
{
IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
@ -849,25 +854,22 @@ create_cap_allocno (ira_allocno_t a)
{
ira_allocno_t cap;
ira_loop_tree_node_t parent;
enum reg_class cover_class;
enum reg_class aclass;
ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a
&& ALLOCNO_NEXT_COALESCED_ALLOCNO (a) == a);
parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
cover_class = ALLOCNO_COVER_CLASS (a);
ira_set_allocno_cover_class (cap, cover_class);
aclass = ALLOCNO_CLASS (a);
ira_set_allocno_class (cap, aclass);
ira_create_allocno_objects (cap);
ALLOCNO_AVAILABLE_REGS_NUM (cap) = ALLOCNO_AVAILABLE_REGS_NUM (a);
ALLOCNO_CAP_MEMBER (cap) = a;
ALLOCNO_CAP (a) = cap;
ALLOCNO_COVER_CLASS_COST (cap) = ALLOCNO_COVER_CLASS_COST (a);
ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
ira_allocate_and_copy_costs
(&ALLOCNO_HARD_REG_COSTS (cap), cover_class, ALLOCNO_HARD_REG_COSTS (a));
(&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
ira_allocate_and_copy_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), cover_class,
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
@ -1063,23 +1065,24 @@ ira_finish_live_range_list (live_range_t r)
void
ira_free_allocno_updated_costs (ira_allocno_t a)
{
enum reg_class cover_class;
enum reg_class aclass;
cover_class = ALLOCNO_COVER_CLASS (a);
aclass = ALLOCNO_CLASS (a);
if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
cover_class);
aclass);
ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
}
/* Free the memory allocated for allocno A. */
/* Free and nullify all cost vectors allocated earlier for allocno
A. */
static void
finish_allocno (ira_allocno_t a)
ira_free_allocno_costs (ira_allocno_t a)
{
enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
enum reg_class aclass = ALLOCNO_CLASS (a);
ira_object_t obj;
ira_allocno_object_iterator oi;
@ -1094,14 +1097,25 @@ finish_allocno (ira_allocno_t a)
ira_allocnos[ALLOCNO_NUM (a)] = NULL;
if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), cover_class);
ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), cover_class);
ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), cover_class);
ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
cover_class);
aclass);
ALLOCNO_HARD_REG_COSTS (a) = NULL;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
}
/* Free the memory allocated for allocno A. */
static void
finish_allocno (ira_allocno_t a)
{
ira_free_allocno_costs (a);
pool_free (allocno_pool, a);
}
@ -1365,55 +1379,54 @@ finish_copies (void)
/* Pools for cost vectors. It is defined only for cover classes. */
/* Pools for cost vectors. It is defined only for allocno classes. */
static alloc_pool cost_vector_pool[N_REG_CLASSES];
/* The function initiates work with hard register cost vectors. It
creates allocation pool for each cover class. */
creates allocation pool for each allocno class. */
static void
initiate_cost_vectors (void)
{
int i;
enum reg_class cover_class;
enum reg_class aclass;
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_allocno_classes_num; i++)
{
cover_class = ira_reg_class_cover[i];
cost_vector_pool[cover_class]
aclass = ira_allocno_classes[i];
cost_vector_pool[aclass]
= create_alloc_pool ("cost vectors",
sizeof (int)
* ira_class_hard_regs_num[cover_class],
sizeof (int) * ira_class_hard_regs_num[aclass],
100);
}
}
/* Allocate and return a cost vector VEC for COVER_CLASS. */
/* Allocate and return a cost vector VEC for ACLASS. */
int *
ira_allocate_cost_vector (enum reg_class cover_class)
ira_allocate_cost_vector (enum reg_class aclass)
{
return (int *) pool_alloc (cost_vector_pool[cover_class]);
return (int *) pool_alloc (cost_vector_pool[aclass]);
}
/* Free a cost vector VEC for COVER_CLASS. */
/* Free a cost vector VEC for ACLASS. */
void
ira_free_cost_vector (int *vec, enum reg_class cover_class)
ira_free_cost_vector (int *vec, enum reg_class aclass)
{
ira_assert (vec != NULL);
pool_free (cost_vector_pool[cover_class], vec);
pool_free (cost_vector_pool[aclass], vec);
}
/* Finish work with hard register cost vectors. Release allocation
pool for each cover class. */
pool for each allocno class. */
static void
finish_cost_vectors (void)
{
int i;
enum reg_class cover_class;
enum reg_class aclass;
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_allocno_classes_num; i++)
{
cover_class = ira_reg_class_cover[i];
free_alloc_pool (cost_vector_pool[cover_class]);
aclass = ira_allocno_classes[i];
free_alloc_pool (cost_vector_pool[aclass]);
}
}
@ -1644,7 +1657,7 @@ propagate_allocno_info (void)
int i;
ira_allocno_t a, parent_a;
ira_loop_tree_node_t parent;
enum reg_class cover_class;
enum reg_class aclass;
if (flag_ira_region != IRA_REGION_ALL
&& flag_ira_region != IRA_REGION_MIXED)
@ -1670,17 +1683,17 @@ propagate_allocno_info (void)
+= ALLOCNO_CALLS_CROSSED_NUM (a);
ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
cover_class = ALLOCNO_COVER_CLASS (a);
ira_assert (cover_class == ALLOCNO_COVER_CLASS (parent_a));
aclass = ALLOCNO_CLASS (a);
ira_assert (aclass == ALLOCNO_CLASS (parent_a));
ira_allocate_and_accumulate_costs
(&ALLOCNO_HARD_REG_COSTS (parent_a), cover_class,
(&ALLOCNO_HARD_REG_COSTS (parent_a), aclass,
ALLOCNO_HARD_REG_COSTS (a));
ira_allocate_and_accumulate_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
cover_class,
aclass,
ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
ALLOCNO_COVER_CLASS_COST (parent_a)
+= ALLOCNO_COVER_CLASS_COST (a);
ALLOCNO_CLASS_COST (parent_a)
+= ALLOCNO_CLASS_COST (a);
ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
}
}
@ -1778,16 +1791,16 @@ static bool
low_pressure_loop_node_p (ira_loop_tree_node_t node)
{
int i;
enum reg_class cover_class;
enum reg_class pclass;
if (node->bb != NULL)
return false;
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_pressure_classes_num; i++)
{
cover_class = ira_reg_class_cover[i];
if (node->reg_pressure[cover_class]
> ira_available_class_regs[cover_class])
pclass = ira_pressure_classes[i];
if (node->reg_pressure[pclass] > ira_available_class_regs[pclass]
&& ira_available_class_regs[pclass] > 1)
return false;
}
return true;
@ -2003,7 +2016,7 @@ ira_rebuild_regno_allocno_list (int regno)
static void
propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
{
enum reg_class cover_class;
enum reg_class aclass;
merge_hard_reg_conflicts (from_a, a, false);
ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
@ -2014,14 +2027,14 @@ propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
if (! ALLOCNO_BAD_SPILL_P (from_a))
ALLOCNO_BAD_SPILL_P (a) = false;
cover_class = ALLOCNO_COVER_CLASS (from_a);
ira_assert (cover_class == ALLOCNO_COVER_CLASS (a));
ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), cover_class,
aclass = ALLOCNO_CLASS (from_a);
ira_assert (aclass == ALLOCNO_CLASS (a));
ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), aclass,
ALLOCNO_HARD_REG_COSTS (from_a));
ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
cover_class,
aclass,
ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
ALLOCNO_COVER_CLASS_COST (a) += ALLOCNO_COVER_CLASS_COST (from_a);
ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
}
@ -2173,8 +2186,8 @@ remove_low_level_allocnos (void)
/* Remove loops from consideration. We remove all loops except for
root if ALL_P or loops for which a separate allocation will not
improve the result. We have to do this after allocno creation and
their costs and cover class evaluation because only after that the
register pressure can be known and is calculated. */
their costs and allocno class evaluation because only after that
the register pressure can be known and is calculated. */
static void
remove_unnecessary_regions (bool all_p)
{
@ -2223,27 +2236,27 @@ update_bad_spill_attribute (void)
ira_allocno_object_iterator aoi;
ira_object_t obj;
live_range_t r;
enum reg_class cover_class;
enum reg_class aclass;
bitmap_head dead_points[N_REG_CLASSES];
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_allocno_classes_num; i++)
{
cover_class = ira_reg_class_cover[i];
bitmap_initialize (&dead_points[cover_class], &reg_obstack);
aclass = ira_allocno_classes[i];
bitmap_initialize (&dead_points[aclass], &reg_obstack);
}
FOR_EACH_ALLOCNO (a, ai)
{
cover_class = ALLOCNO_COVER_CLASS (a);
if (cover_class == NO_REGS)
aclass = ALLOCNO_CLASS (a);
if (aclass == NO_REGS)
continue;
FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
bitmap_set_bit (&dead_points[cover_class], r->finish);
bitmap_set_bit (&dead_points[aclass], r->finish);
}
FOR_EACH_ALLOCNO (a, ai)
{
cover_class = ALLOCNO_COVER_CLASS (a);
if (cover_class == NO_REGS)
aclass = ALLOCNO_CLASS (a);
if (aclass == NO_REGS)
continue;
if (! ALLOCNO_BAD_SPILL_P (a))
continue;
@ -2252,7 +2265,7 @@ update_bad_spill_attribute (void)
for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
{
for (i = r->start + 1; i < r->finish; i++)
if (bitmap_bit_p (&dead_points[cover_class], i))
if (bitmap_bit_p (&dead_points[aclass], i))
break;
if (i < r->finish)
break;
@ -2264,10 +2277,10 @@ update_bad_spill_attribute (void)
}
}
}
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_allocno_classes_num; i++)
{
cover_class = ira_reg_class_cover[i];
bitmap_clear (&dead_points[cover_class]);
aclass = ira_allocno_classes[i];
bitmap_clear (&dead_points[aclass]);
}
}
@ -2290,6 +2303,7 @@ setup_min_max_allocno_live_range_point (void)
FOR_EACH_ALLOCNO (a, ai)
{
int n = ALLOCNO_NUM_OBJECTS (a);
for (i = 0; i < n; i++)
{
ira_object_t obj = ALLOCNO_OBJECT (a, i);
@ -2309,6 +2323,7 @@ setup_min_max_allocno_live_range_point (void)
{
int j;
int n = ALLOCNO_NUM_OBJECTS (a);
for (j = 0; j < n; j++)
{
ira_object_t obj = ALLOCNO_OBJECT (a, j);
@ -2352,10 +2367,10 @@ setup_min_max_allocno_live_range_point (void)
}
/* Sort allocnos according to their live ranges. Allocnos with
smaller cover class are put first unless we use priority coloring.
Allocnos with the same cover class are ordered according their start
(min). Allocnos with the same start are ordered according their
finish (max). */
smaller allocno class are put first unless we use priority
coloring. Allocnos with the same class are ordered according
their start (min). Allocnos with the same start are ordered
according their finish (max). */
static int
object_range_compare_func (const void *v1p, const void *v2p)
{
@ -2365,9 +2380,6 @@ object_range_compare_func (const void *v1p, const void *v2p)
ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
if (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
&& (diff = ALLOCNO_COVER_CLASS (a1) - ALLOCNO_COVER_CLASS (a2)) != 0)
return diff;
if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
return diff;
if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
@ -2397,6 +2409,7 @@ sort_conflict_id_map (void)
for (i = 0; i < num; i++)
{
ira_object_t obj = ira_object_id_map[i];
gcc_assert (obj != NULL);
OBJECT_CONFLICT_ID (obj) = i;
}
@ -2409,7 +2422,7 @@ sort_conflict_id_map (void)
static void
setup_min_max_conflict_allocno_ids (void)
{
int cover_class;
int aclass;
int i, j, min, max, start, finish, first_not_finished, filled_area_start;
int *live_range_min, *last_lived;
int word0_min, word0_max;
@ -2417,21 +2430,20 @@ setup_min_max_conflict_allocno_ids (void)
ira_allocno_iterator ai;
live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
cover_class = -1;
aclass = -1;
first_not_finished = -1;
for (i = 0; i < ira_objects_num; i++)
{
ira_object_t obj = ira_object_id_map[i];
if (obj == NULL)
continue;
a = OBJECT_ALLOCNO (obj);
if (cover_class < 0
|| (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
&& cover_class != (int) ALLOCNO_COVER_CLASS (a)))
if (aclass < 0)
{
cover_class = ALLOCNO_COVER_CLASS (a);
aclass = ALLOCNO_CLASS (a);
min = i;
first_not_finished = i;
}
@ -2456,20 +2468,19 @@ setup_min_max_conflict_allocno_ids (void)
OBJECT_MIN (obj) = min;
}
last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
cover_class = -1;
aclass = -1;
filled_area_start = -1;
for (i = ira_objects_num - 1; i >= 0; i--)
{
ira_object_t obj = ira_object_id_map[i];
if (obj == NULL)
continue;
a = OBJECT_ALLOCNO (obj);
if (cover_class < 0
|| (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
&& cover_class != (int) ALLOCNO_COVER_CLASS (a)))
if (aclass < 0)
{
cover_class = ALLOCNO_COVER_CLASS (a);
aclass = ALLOCNO_CLASS (a);
for (j = 0; j < ira_max_point; j++)
last_lived[j] = -1;
filled_area_start = ira_max_point;
@ -2507,6 +2518,7 @@ setup_min_max_conflict_allocno_ids (void)
{
int n = ALLOCNO_NUM_OBJECTS (a);
ira_object_t obj0;
if (n < 2)
continue;
obj0 = ALLOCNO_OBJECT (a, 0);
@ -2519,6 +2531,7 @@ setup_min_max_conflict_allocno_ids (void)
{
int n = ALLOCNO_NUM_OBJECTS (a);
ira_object_t obj0;
if (n < 2)
continue;
obj0 = ALLOCNO_OBJECT (a, 0);
@ -2611,7 +2624,7 @@ copy_info_to_removed_store_destinations (int regno)
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
{
if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))])
if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
/* This allocno will be removed. */
continue;
@ -2621,9 +2634,10 @@ copy_info_to_removed_store_destinations (int regno)
parent != NULL;
parent = parent->parent)
if ((parent_a = parent->regno_allocno_map[regno]) == NULL
|| (parent_a == regno_top_level_allocno_map[REGNO (ALLOCNO_REG
(parent_a))]
&& ALLOCNO_MEM_OPTIMIZED_DEST_P (parent_a)))
|| (parent_a
== regno_top_level_allocno_map[REGNO
(allocno_emit_reg (parent_a))]
&& ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
break;
if (parent == NULL || parent_a == NULL)
continue;
@ -2655,7 +2669,7 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
int hard_regs_num;
bool new_pseudos_p, merged_p, mem_dest_p;
unsigned int n;
enum reg_class cover_class;
enum reg_class aclass;
ira_allocno_t a, parent_a, first, second, node_first, node_second;
ira_copy_t cp;
ira_loop_tree_node_t node;
@ -2664,7 +2678,8 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
ira_copy_iterator ci;
regno_top_level_allocno_map
= (ira_allocno_t *) ira_allocate (max_reg_num () * sizeof (ira_allocno_t));
= (ira_allocno_t *) ira_allocate (max_reg_num ()
* sizeof (ira_allocno_t));
memset (regno_top_level_allocno_map, 0,
max_reg_num () * sizeof (ira_allocno_t));
new_pseudos_p = merged_p = false;
@ -2672,6 +2687,7 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
{
ira_allocno_object_iterator oi;
ira_object_t obj;
if (ALLOCNO_CAP_MEMBER (a) != NULL)
/* Caps are not in the regno allocno maps and they are never
will be transformed into allocnos existing after IR
@ -2692,28 +2708,31 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
{
ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
if (ALLOCNO_SOMEWHERE_RENAMED_P (a))
if (data->somewhere_renamed_p)
new_pseudos_p = true;
parent_a = ira_parent_allocno (a);
if (parent_a == NULL)
{
ALLOCNO_COPIES (a) = NULL;
regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))] = a;
regno_top_level_allocno_map[REGNO (data->reg)] = a;
continue;
}
ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
if (ALLOCNO_MEM_OPTIMIZED_DEST (a) != NULL)
if (data->mem_optimized_dest != NULL)
mem_dest_p = true;
if (REGNO (ALLOCNO_REG (a)) == REGNO (ALLOCNO_REG (parent_a)))
parent_data = ALLOCNO_EMIT_DATA (parent_a);
if (REGNO (data->reg) == REGNO (parent_data->reg))
{
merge_hard_reg_conflicts (a, parent_a, true);
move_allocno_live_ranges (a, parent_a);
merged_p = true;
ALLOCNO_MEM_OPTIMIZED_DEST_P (parent_a)
= (ALLOCNO_MEM_OPTIMIZED_DEST_P (parent_a)
|| ALLOCNO_MEM_OPTIMIZED_DEST_P (a));
parent_data->mem_optimized_dest_p
= (parent_data->mem_optimized_dest_p
|| data->mem_optimized_dest_p);
continue;
}
new_pseudos_p = true;
@ -2729,8 +2748,8 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
&& ALLOCNO_NREFS (parent_a) >= 0
&& ALLOCNO_FREQ (parent_a) >= 0);
cover_class = ALLOCNO_COVER_CLASS (parent_a);
hard_regs_num = ira_class_hard_regs_num[cover_class];
aclass = ALLOCNO_CLASS (parent_a);
hard_regs_num = ira_class_hard_regs_num[aclass];
if (ALLOCNO_HARD_REG_COSTS (a) != NULL
&& ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
for (j = 0; j < hard_regs_num; j++)
@ -2741,15 +2760,15 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
for (j = 0; j < hard_regs_num; j++)
ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
-= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
ALLOCNO_COVER_CLASS_COST (parent_a)
-= ALLOCNO_COVER_CLASS_COST (a);
ALLOCNO_CLASS_COST (parent_a)
-= ALLOCNO_CLASS_COST (a);
ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
parent_a = ira_parent_allocno (parent_a);
if (parent_a == NULL)
break;
}
ALLOCNO_COPIES (a) = NULL;
regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))] = a;
regno_top_level_allocno_map[REGNO (data->reg)] = a;
}
if (mem_dest_p && copy_info_to_removed_store_destinations (i))
merged_p = true;
@ -2766,7 +2785,8 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
{
ira_allocno_object_iterator oi;
ira_object_t obj;
if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
|| ALLOCNO_CAP_MEMBER (a) != NULL)
continue;
FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
@ -2782,19 +2802,21 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
{
ira_object_t obj = r->object;
a = OBJECT_ALLOCNO (obj);
if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
|| ALLOCNO_CAP_MEMBER (a) != NULL)
continue;
cover_class = ALLOCNO_COVER_CLASS (a);
aclass = ALLOCNO_CLASS (a);
sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
{
ira_object_t live_obj = ira_object_id_map[n];
ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
enum reg_class live_cover = ALLOCNO_COVER_CLASS (live_a);
if (ira_reg_classes_intersect_p[cover_class][live_cover]
enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
if (ira_reg_classes_intersect_p[aclass][live_aclass]
/* Don't set up conflict for the allocno with itself. */
&& live_a != a)
ira_add_conflict (obj, live_obj);
@ -2818,14 +2840,18 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
fprintf
(ira_dump_file, " Remove cp%d:%c%dr%d-%c%dr%d\n",
cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
ALLOCNO_NUM (cp->first), REGNO (ALLOCNO_REG (cp->first)),
ALLOCNO_NUM (cp->first),
REGNO (allocno_emit_reg (cp->first)),
ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
ALLOCNO_NUM (cp->second), REGNO (ALLOCNO_REG (cp->second)));
ALLOCNO_NUM (cp->second),
REGNO (allocno_emit_reg (cp->second)));
cp->loop_tree_node = NULL;
continue;
}
first = regno_top_level_allocno_map[REGNO (ALLOCNO_REG (cp->first))];
second = regno_top_level_allocno_map[REGNO (ALLOCNO_REG (cp->second))];
first
= regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
second
= regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
node = cp->loop_tree_node;
if (node == NULL)
keep_p = true; /* It copy generated in ira-emit.c. */
@ -2835,10 +2861,10 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
which we will have different pseudos. */
node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
keep_p = ((REGNO (ALLOCNO_REG (first))
== REGNO (ALLOCNO_REG (node_first)))
&& (REGNO (ALLOCNO_REG (second))
== REGNO (ALLOCNO_REG (node_second))));
keep_p = ((REGNO (allocno_emit_reg (first))
== REGNO (allocno_emit_reg (node_first)))
&& (REGNO (allocno_emit_reg (second))
== REGNO (allocno_emit_reg (node_second))));
}
if (keep_p)
{
@ -2852,28 +2878,29 @@ ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Remove cp%d:a%dr%d-a%dr%d\n",
cp->num, ALLOCNO_NUM (cp->first),
REGNO (ALLOCNO_REG (cp->first)), ALLOCNO_NUM (cp->second),
REGNO (ALLOCNO_REG (cp->second)));
REGNO (allocno_emit_reg (cp->first)),
ALLOCNO_NUM (cp->second),
REGNO (allocno_emit_reg (cp->second)));
}
}
/* Remove unnecessary allocnos on lower levels of the loop tree. */
FOR_EACH_ALLOCNO (a, ai)
{
if (a != regno_top_level_allocno_map[REGNO (ALLOCNO_REG (a))]
if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
|| ALLOCNO_CAP_MEMBER (a) != NULL)
{
if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Remove a%dr%d\n",
ALLOCNO_NUM (a), REGNO (ALLOCNO_REG (a)));
ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
finish_allocno (a);
continue;
}
ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
ALLOCNO_REGNO (a) = REGNO (ALLOCNO_REG (a));
ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
ALLOCNO_CAP (a) = NULL;
/* Restore updated costs for assignments from reload. */
ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
ALLOCNO_UPDATED_COVER_CLASS_COST (a) = ALLOCNO_COVER_CLASS_COST (a);
ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
if (! ALLOCNO_ASSIGNED_P (a))
ira_free_allocno_updated_costs (a);
ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
@ -2942,29 +2969,28 @@ update_conflict_hard_reg_costs (void)
FOR_EACH_ALLOCNO (a, ai)
{
enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
enum reg_class aclass = ALLOCNO_CLASS (a);
enum reg_class pref = reg_preferred_class (ALLOCNO_REGNO (a));
if (reg_class_size[pref] != 1)
continue;
index = (ira_class_hard_reg_index[cover_class]
[ira_class_hard_regs[pref][0]]);
index = ira_class_hard_reg_index[aclass][ira_class_hard_regs[pref][0]];
if (index < 0)
continue;
if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
|| ALLOCNO_HARD_REG_COSTS (a) == NULL)
continue;
min = INT_MAX;
for (i = ira_class_hard_regs_num[cover_class] - 1; i >= 0; i--)
if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_COVER_CLASS_COST (a)
for (i = ira_class_hard_regs_num[aclass] - 1; i >= 0; i--)
if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
&& min > ALLOCNO_HARD_REG_COSTS (a)[i])
min = ALLOCNO_HARD_REG_COSTS (a)[i];
if (min == INT_MAX)
continue;
ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
cover_class, 0);
aclass, 0);
ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
-= min - ALLOCNO_COVER_CLASS_COST (a);
-= min - ALLOCNO_CLASS_COST (a);
}
}
@ -3000,7 +3026,7 @@ ira_build (bool loops_p)
propagate_allocno_info ();
create_caps ();
}
ira_tune_allocno_costs_and_cover_classes ();
ira_tune_allocno_costs ();
#ifdef ENABLE_IRA_CHECKING
check_allocno_creation ();
#endif
@ -3042,6 +3068,7 @@ ira_build (bool loops_p)
FOR_EACH_ALLOCNO (a, ai)
{
int j, nobj = ALLOCNO_NUM_OBJECTS (a);
if (nobj > 1)
nr_big++;
for (j = 0; j < nobj; j++)

File diff suppressed because it is too large Load Diff

View File

@ -97,7 +97,7 @@ build_conflict_bit_table (void)
{
int i;
unsigned int j;
enum reg_class cover_class;
enum reg_class aclass;
int object_set_words, allocated_words_num, conflict_bit_vec_words_num;
live_range_t r;
ira_allocno_t allocno;
@ -170,15 +170,15 @@ build_conflict_bit_table (void)
gcc_assert (id < ira_objects_num);
cover_class = ALLOCNO_COVER_CLASS (allocno);
aclass = ALLOCNO_CLASS (allocno);
sparseset_set_bit (objects_live, id);
EXECUTE_IF_SET_IN_SPARSESET (objects_live, j)
{
ira_object_t live_obj = ira_object_id_map[j];
ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
enum reg_class live_cover_class = ALLOCNO_COVER_CLASS (live_a);
enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
if (ira_reg_classes_intersect_p[cover_class][live_cover_class]
if (ira_reg_classes_intersect_p[aclass][live_aclass]
/* Don't set up conflict for the allocno with itself. */
&& live_a != allocno)
{
@ -205,6 +205,7 @@ allocnos_conflict_for_copy_p (ira_allocno_t a1, ira_allocno_t a2)
the lowest order words. */
ira_object_t obj1 = ALLOCNO_OBJECT (a1, 0);
ira_object_t obj2 = ALLOCNO_OBJECT (a2, 0);
return OBJECTS_CONFLICT_P (obj1, obj2);
}
@ -389,7 +390,7 @@ process_regs_for_copy (rtx reg1, rtx reg2, bool constraint_p,
int allocno_preferenced_hard_regno, cost, index, offset1, offset2;
bool only_regs_p;
ira_allocno_t a;
enum reg_class rclass, cover_class;
enum reg_class rclass, aclass;
enum machine_mode mode;
ira_copy_t cp;
@ -426,35 +427,37 @@ process_regs_for_copy (rtx reg1, rtx reg2, bool constraint_p,
return false;
}
if (! IN_RANGE (allocno_preferenced_hard_regno, 0, FIRST_PSEUDO_REGISTER - 1))
if (! IN_RANGE (allocno_preferenced_hard_regno,
0, FIRST_PSEUDO_REGISTER - 1))
/* Can not be tied. */
return false;
rclass = REGNO_REG_CLASS (allocno_preferenced_hard_regno);
mode = ALLOCNO_MODE (a);
cover_class = ALLOCNO_COVER_CLASS (a);
aclass = ALLOCNO_CLASS (a);
if (only_regs_p && insn != NULL_RTX
&& reg_class_size[rclass] <= (unsigned) CLASS_MAX_NREGS (rclass, mode))
/* It is already taken into account in ira-costs.c. */
return false;
index = ira_class_hard_reg_index[cover_class][allocno_preferenced_hard_regno];
index = ira_class_hard_reg_index[aclass][allocno_preferenced_hard_regno];
if (index < 0)
/* Can not be tied. It is not in the cover class. */
/* Can not be tied. It is not in the allocno class. */
return false;
ira_init_register_move_cost_if_necessary (mode);
if (HARD_REGISTER_P (reg1))
cost = ira_get_register_move_cost (mode, cover_class, rclass) * freq;
cost = ira_register_move_cost[mode][aclass][rclass] * freq;
else
cost = ira_get_register_move_cost (mode, rclass, cover_class) * freq;
cost = ira_register_move_cost[mode][rclass][aclass] * freq;
do
{
ira_allocate_and_set_costs
(&ALLOCNO_HARD_REG_COSTS (a), cover_class,
ALLOCNO_COVER_CLASS_COST (a));
(&ALLOCNO_HARD_REG_COSTS (a), aclass,
ALLOCNO_CLASS_COST (a));
ira_allocate_and_set_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (a), cover_class, 0);
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass, 0);
ALLOCNO_HARD_REG_COSTS (a)[index] -= cost;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index] -= cost;
if (ALLOCNO_HARD_REG_COSTS (a)[index] < ALLOCNO_COVER_CLASS_COST (a))
ALLOCNO_COVER_CLASS_COST (a) = ALLOCNO_HARD_REG_COSTS (a)[index];
if (ALLOCNO_HARD_REG_COSTS (a)[index] < ALLOCNO_CLASS_COST (a))
ALLOCNO_CLASS_COST (a) = ALLOCNO_HARD_REG_COSTS (a)[index];
a = ira_parent_or_cap_allocno (a);
}
while (a != NULL);
@ -507,7 +510,8 @@ add_insn_allocno_copies (rtx insn)
? SET_SRC (set)
: SUBREG_REG (SET_SRC (set))) != NULL_RTX)
{
process_regs_for_copy (SET_DEST (set), SET_SRC (set), false, insn, freq);
process_regs_for_copy (SET_DEST (set), SET_SRC (set),
false, insn, freq);
return;
}
/* Fast check of possibility of constraint or shuffle copies. If
@ -608,6 +612,7 @@ build_object_conflicts (ira_object_t obj)
ira_allocno_t a = OBJECT_ALLOCNO (obj);
IRA_INT_TYPE *object_conflicts;
minmax_set_iterator asi;
int parent_min, parent_max;
object_conflicts = conflicts[OBJECT_CONFLICT_ID (obj)];
px = 0;
@ -616,8 +621,9 @@ build_object_conflicts (ira_object_t obj)
{
ira_object_t another_obj = ira_object_id_map[i];
ira_allocno_t another_a = OBJECT_ALLOCNO (obj);
ira_assert (ira_reg_classes_intersect_p
[ALLOCNO_COVER_CLASS (a)][ALLOCNO_COVER_CLASS (another_a)]);
[ALLOCNO_CLASS (a)][ALLOCNO_CLASS (another_a)]);
collected_conflict_objects[px++] = another_obj;
}
if (ira_conflict_vector_profitable_p (obj, px))
@ -632,6 +638,7 @@ build_object_conflicts (ira_object_t obj)
else
{
int conflict_bit_vec_words_num;
OBJECT_CONFLICT_ARRAY (obj) = object_conflicts;
if (OBJECT_MAX (obj) < OBJECT_MIN (obj))
conflict_bit_vec_words_num = 0;
@ -646,10 +653,12 @@ build_object_conflicts (ira_object_t obj)
parent_a = ira_parent_or_cap_allocno (a);
if (parent_a == NULL)
return;
ira_assert (ALLOCNO_COVER_CLASS (a) == ALLOCNO_COVER_CLASS (parent_a));
ira_assert (ALLOCNO_CLASS (a) == ALLOCNO_CLASS (parent_a));
ira_assert (ALLOCNO_NUM_OBJECTS (a) == ALLOCNO_NUM_OBJECTS (parent_a));
parent_obj = ALLOCNO_OBJECT (parent_a, OBJECT_SUBWORD (obj));
parent_num = OBJECT_CONFLICT_ID (parent_obj);
parent_min = OBJECT_MIN (parent_obj);
parent_max = OBJECT_MAX (parent_obj);
FOR_EACH_BIT_IN_MINMAX_SET (object_conflicts,
OBJECT_MIN (obj), OBJECT_MAX (obj), i, asi)
{
@ -658,21 +667,20 @@ build_object_conflicts (ira_object_t obj)
int another_word = OBJECT_SUBWORD (another_obj);
ira_assert (ira_reg_classes_intersect_p
[ALLOCNO_COVER_CLASS (a)][ALLOCNO_COVER_CLASS (another_a)]);
[ALLOCNO_CLASS (a)][ALLOCNO_CLASS (another_a)]);
another_parent_a = ira_parent_or_cap_allocno (another_a);
if (another_parent_a == NULL)
continue;
ira_assert (ALLOCNO_NUM (another_parent_a) >= 0);
ira_assert (ALLOCNO_COVER_CLASS (another_a)
== ALLOCNO_COVER_CLASS (another_parent_a));
ira_assert (ALLOCNO_CLASS (another_a)
== ALLOCNO_CLASS (another_parent_a));
ira_assert (ALLOCNO_NUM_OBJECTS (another_a)
== ALLOCNO_NUM_OBJECTS (another_parent_a));
SET_MINMAX_SET_BIT (conflicts[parent_num],
OBJECT_CONFLICT_ID (ALLOCNO_OBJECT (another_parent_a,
another_word)),
OBJECT_MIN (parent_obj),
OBJECT_MAX (parent_obj));
another_word)),
parent_min, parent_max);
}
}
@ -792,14 +800,14 @@ print_allocno_conflicts (FILE * file, bool reg_p, ira_allocno_t a)
COPY_HARD_REG_SET (conflicting_hard_regs, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
AND_COMPL_HARD_REG_SET (conflicting_hard_regs, ira_no_alloc_regs);
AND_HARD_REG_SET (conflicting_hard_regs,
reg_class_contents[ALLOCNO_COVER_CLASS (a)]);
reg_class_contents[ALLOCNO_CLASS (a)]);
print_hard_reg_set (file, "\n;; total conflict hard regs:",
conflicting_hard_regs);
COPY_HARD_REG_SET (conflicting_hard_regs, OBJECT_CONFLICT_HARD_REGS (obj));
AND_COMPL_HARD_REG_SET (conflicting_hard_regs, ira_no_alloc_regs);
AND_HARD_REG_SET (conflicting_hard_regs,
reg_class_contents[ALLOCNO_COVER_CLASS (a)]);
reg_class_contents[ALLOCNO_CLASS (a)]);
print_hard_reg_set (file, ";; conflict hard regs:",
conflicting_hard_regs);
putc ('\n', file);
@ -876,6 +884,7 @@ ira_build_conflicts (void)
FOR_EACH_ALLOCNO (a, ai)
{
int i, n = ALLOCNO_NUM_OBJECTS (a);
for (i = 0; i < n; i++)
{
ira_object_t obj = ALLOCNO_OBJECT (a, i);

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* Integrated Register Allocator. Changing code and generating moves.
Copyright (C) 2006, 2007, 2008, 2009, 2010
Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Vladimir Makarov <vmakarov@redhat.com>.
@ -19,6 +19,52 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* When we have more one region, we need to change the original RTL
code after coloring. Let us consider two allocnos representing the
same pseudo-register outside and inside a region respectively.
They can get different hard-registers. The reload pass works on
pseudo registers basis and there is no way to say the reload that
pseudo could be in different registers and it is even more
difficult to say in what places of the code the pseudo should have
particular hard-registers. So in this case IRA has to create and
use a new pseudo-register inside the region and adds code to move
allocno values on the region's borders. This is done by the code
in this file.
The code makes top-down traversal of the regions and generate new
pseudos and the move code on the region borders. In some
complicated cases IRA can create a new pseudo used temporarily to
move allocno values when a swap of values stored in two
hard-registers is needed (e.g. two allocnos representing different
pseudos outside region got respectively hard registers 1 and 2 and
the corresponding allocnos inside the region got respectively hard
registers 2 and 1). At this stage, the new pseudo is marked as
spilled.
IRA still creates the pseudo-register and the moves on the region
borders even when the both corresponding allocnos were assigned to
the same hard-register. It is done because, if the reload pass for
some reason spills a pseudo-register representing the original
pseudo outside or inside the region, the effect will be smaller
because another pseudo will still be in the hard-register. In most
cases, this is better then spilling the original pseudo in its
whole live-range. If reload does not change the allocation for the
two pseudo-registers, the trivial move will be removed by
post-reload optimizations.
IRA does not generate a new pseudo and moves for the allocno values
if the both allocnos representing an original pseudo inside and
outside region assigned to the same hard register when the register
pressure in the region for the corresponding pressure class is less
than number of available hard registers for given pressure class.
IRA also does some optimizations to remove redundant moves which is
transformed into stores by the reload pass on CFG edges
representing exits from the region.
IRA tries to reduce duplication of code generated on CFG edges
which are enters and exits to/from regions by moving some code to
the edge sources or destinations when it is possible. */
#include "config.h"
#include "system.h"
@ -44,6 +90,73 @@ along with GCC; see the file COPYING3. If not see
#include "ira-int.h"
/* Data used to emit live range split insns and to flattening IR. */
ira_emit_data_t ira_allocno_emit_data;
/* Definitions for vectors of pointers. */
typedef void *void_p;
DEF_VEC_P (void_p);
DEF_VEC_ALLOC_P (void_p,heap);
/* Pointers to data allocated for allocnos being created during
emitting. Usually there are quite few such allocnos because they
are created only for resolving loop in register shuffling. */
static VEC(void_p, heap) *new_allocno_emit_data_vec;
/* Allocate and initiate the emit data. */
void
ira_initiate_emit_data (void)
{
ira_allocno_t a;
ira_allocno_iterator ai;
ira_allocno_emit_data
= (ira_emit_data_t) ira_allocate (ira_allocnos_num
* sizeof (struct ira_emit_data));
memset (ira_allocno_emit_data, 0,
ira_allocnos_num * sizeof (struct ira_emit_data));
FOR_EACH_ALLOCNO (a, ai)
ALLOCNO_ADD_DATA (a) = ira_allocno_emit_data + ALLOCNO_NUM (a);
new_allocno_emit_data_vec = VEC_alloc (void_p, heap, 50);
}
/* Free the emit data. */
void
ira_finish_emit_data (void)
{
void_p p;
ira_allocno_t a;
ira_allocno_iterator ai;
ira_free (ira_allocno_emit_data);
FOR_EACH_ALLOCNO (a, ai)
ALLOCNO_ADD_DATA (a) = NULL;
for (;VEC_length (void_p, new_allocno_emit_data_vec) != 0;)
{
p = VEC_pop (void_p, new_allocno_emit_data_vec);
ira_free (p);
}
VEC_free (void_p, heap, new_allocno_emit_data_vec);
}
/* Create and return a new allocno with given REGNO and
LOOP_TREE_NODE. Allocate emit data for it. */
static ira_allocno_t
create_new_allocno (int regno, ira_loop_tree_node_t loop_tree_node)
{
ira_allocno_t a;
a = ira_create_allocno (regno, false, loop_tree_node);
ALLOCNO_ADD_DATA (a) = ira_allocate (sizeof (struct ira_emit_data));
memset (ALLOCNO_ADD_DATA (a), 0, sizeof (struct ira_emit_data));
VEC_safe_push (void_p, heap, new_allocno_emit_data_vec, ALLOCNO_ADD_DATA (a));
return a;
}
/* See comments below. */
typedef struct move *move_t;
/* The structure represents an allocno move. Both allocnos have the
@ -171,7 +284,7 @@ change_regs (rtx *loc)
return false;
if (ira_curr_regno_allocno_map[regno] == NULL)
return false;
reg = ALLOCNO_REG (ira_curr_regno_allocno_map[regno]);
reg = allocno_emit_reg (ira_curr_regno_allocno_map[regno]);
if (reg == *loc)
return false;
*loc = reg;
@ -258,9 +371,9 @@ set_allocno_reg (ira_allocno_t allocno, rtx reg)
a != NULL;
a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
if (subloop_tree_node_p (ALLOCNO_LOOP_TREE_NODE (a), node))
ALLOCNO_REG (a) = reg;
ALLOCNO_EMIT_DATA (a)->reg = reg;
for (a = ALLOCNO_CAP (allocno); a != NULL; a = ALLOCNO_CAP (a))
ALLOCNO_REG (a) = reg;
ALLOCNO_EMIT_DATA (a)->reg = reg;
regno = ALLOCNO_REGNO (allocno);
for (a = allocno;;)
{
@ -273,9 +386,9 @@ set_allocno_reg (ira_allocno_t allocno, rtx reg)
}
if (a == NULL)
continue;
if (ALLOCNO_CHILD_RENAMED_P (a))
if (ALLOCNO_EMIT_DATA (a)->child_renamed_p)
break;
ALLOCNO_CHILD_RENAMED_P (a) = true;
ALLOCNO_EMIT_DATA (a)->child_renamed_p = true;
}
}
@ -289,7 +402,9 @@ entered_from_non_parent_p (ira_loop_tree_node_t loop_node)
edge e;
edge_iterator ei;
for (bb_node = loop_node->children; bb_node != NULL; bb_node = bb_node->next)
for (bb_node = loop_node->children;
bb_node != NULL;
bb_node = bb_node->next)
if (bb_node->bb != NULL)
{
FOR_EACH_EDGE (e, ei, bb_node->bb->preds)
@ -344,14 +459,14 @@ store_can_be_removed_p (ira_allocno_t src_allocno, ira_allocno_t dest_allocno)
ira_assert (ALLOCNO_CAP_MEMBER (src_allocno) == NULL
&& ALLOCNO_CAP_MEMBER (dest_allocno) == NULL);
orig_regno = ALLOCNO_REGNO (src_allocno);
regno = REGNO (ALLOCNO_REG (dest_allocno));
regno = REGNO (allocno_emit_reg (dest_allocno));
for (node = ALLOCNO_LOOP_TREE_NODE (src_allocno);
node != NULL;
node = node->parent)
{
a = node->regno_allocno_map[orig_regno];
ira_assert (a != NULL);
if (REGNO (ALLOCNO_REG (a)) == (unsigned) regno)
if (REGNO (allocno_emit_reg (a)) == (unsigned) regno)
/* We achieved the destination and everything is ok. */
return true;
else if (bitmap_bit_p (node->modified_regnos, orig_regno))
@ -396,8 +511,8 @@ generate_edge_moves (edge e)
{
src_allocno = src_map[regno];
dest_allocno = dest_map[regno];
if (REGNO (ALLOCNO_REG (src_allocno))
== REGNO (ALLOCNO_REG (dest_allocno)))
if (REGNO (allocno_emit_reg (src_allocno))
== REGNO (allocno_emit_reg (dest_allocno)))
continue;
/* Remove unnecessary stores at the region exit. We should do
this for readonly memory for sure and this is guaranteed by
@ -408,8 +523,8 @@ generate_edge_moves (edge e)
&& ALLOCNO_HARD_REGNO (src_allocno) >= 0
&& store_can_be_removed_p (src_allocno, dest_allocno))
{
ALLOCNO_MEM_OPTIMIZED_DEST (src_allocno) = dest_allocno;
ALLOCNO_MEM_OPTIMIZED_DEST_P (dest_allocno) = true;
ALLOCNO_EMIT_DATA (src_allocno)->mem_optimized_dest = dest_allocno;
ALLOCNO_EMIT_DATA (dest_allocno)->mem_optimized_dest_p = true;
if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Remove r%d:a%d->a%d(mem)\n",
regno, ALLOCNO_NUM (src_allocno),
@ -445,7 +560,7 @@ change_loop (ira_loop_tree_node_t node)
bool used_p;
ira_allocno_t allocno, parent_allocno, *map;
rtx insn, original_reg;
enum reg_class cover_class;
enum reg_class aclass, pclass;
ira_loop_tree_node_t parent;
if (node != ira_loop_tree_root)
@ -474,7 +589,8 @@ change_loop (ira_loop_tree_node_t node)
{
allocno = ira_allocnos[i];
regno = ALLOCNO_REGNO (allocno);
cover_class = ALLOCNO_COVER_CLASS (allocno);
aclass = ALLOCNO_CLASS (allocno);
pclass = ira_pressure_class_translate[aclass];
parent_allocno = map[regno];
ira_assert (regno < ira_reg_equiv_len);
/* We generate the same hard register move because the
@ -487,8 +603,8 @@ change_loop (ira_loop_tree_node_t node)
&& (ALLOCNO_HARD_REGNO (allocno)
== ALLOCNO_HARD_REGNO (parent_allocno))
&& (ALLOCNO_HARD_REGNO (allocno) < 0
|| (parent->reg_pressure[cover_class] + 1
<= ira_available_class_regs[cover_class])
|| (parent->reg_pressure[pclass] + 1
<= ira_available_class_regs[pclass])
|| TEST_HARD_REG_BIT (ira_prohibited_mode_move_regs
[ALLOCNO_MODE (allocno)],
ALLOCNO_HARD_REGNO (allocno))
@ -498,9 +614,10 @@ change_loop (ira_loop_tree_node_t node)
|| ira_reg_equiv_invariant_p[regno]
|| ira_reg_equiv_const[regno] != NULL_RTX))
continue;
original_reg = ALLOCNO_REG (allocno);
original_reg = allocno_emit_reg (allocno);
if (parent_allocno == NULL
|| REGNO (ALLOCNO_REG (parent_allocno)) == REGNO (original_reg))
|| (REGNO (allocno_emit_reg (parent_allocno))
== REGNO (original_reg)))
{
if (internal_flag_ira_verbose > 3 && ira_dump_file)
fprintf (ira_dump_file, " %i vs parent %i:",
@ -523,11 +640,11 @@ change_loop (ira_loop_tree_node_t node)
if (ALLOCNO_CAP_MEMBER (allocno) != NULL)
continue;
used_p = !bitmap_set_bit (used_regno_bitmap, regno);
ALLOCNO_SOMEWHERE_RENAMED_P (allocno) = true;
ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
if (! used_p)
continue;
bitmap_set_bit (renamed_regno_bitmap, regno);
set_allocno_reg (allocno, create_new_reg (ALLOCNO_REG (allocno)));
set_allocno_reg (allocno, create_new_reg (allocno_emit_reg (allocno)));
}
}
@ -543,8 +660,8 @@ set_allocno_somewhere_renamed_p (void)
{
regno = ALLOCNO_REGNO (allocno);
if (bitmap_bit_p (renamed_regno_bitmap, regno)
&& REGNO (ALLOCNO_REG (allocno)) == regno)
ALLOCNO_SOMEWHERE_RENAMED_P (allocno) = true;
&& REGNO (allocno_emit_reg (allocno)) == regno)
ALLOCNO_EMIT_DATA (allocno)->somewhere_renamed_p = true;
}
}
@ -724,16 +841,16 @@ modify_move_list (move_t list)
subsequent IRA internal representation
flattening. */
new_allocno
= ira_create_allocno (ALLOCNO_REGNO (set_move->to), false,
= create_new_allocno (ALLOCNO_REGNO (set_move->to),
ALLOCNO_LOOP_TREE_NODE (set_move->to));
ALLOCNO_MODE (new_allocno) = ALLOCNO_MODE (set_move->to);
ira_set_allocno_cover_class
(new_allocno, ALLOCNO_COVER_CLASS (set_move->to));
ira_set_allocno_class (new_allocno,
ALLOCNO_CLASS (set_move->to));
ira_create_allocno_objects (new_allocno);
ALLOCNO_ASSIGNED_P (new_allocno) = true;
ALLOCNO_HARD_REGNO (new_allocno) = -1;
ALLOCNO_REG (new_allocno)
= create_new_reg (ALLOCNO_REG (set_move->to));
ALLOCNO_EMIT_DATA (new_allocno)->reg
= create_new_reg (allocno_emit_reg (set_move->to));
/* Make it possibly conflicting with all earlier
created allocnos. Cases where temporary allocnos
@ -756,7 +873,7 @@ modify_move_list (move_t list)
fprintf (ira_dump_file,
" Creating temporary allocno a%dr%d\n",
ALLOCNO_NUM (new_allocno),
REGNO (ALLOCNO_REG (new_allocno)));
REGNO (allocno_emit_reg (new_allocno)));
}
}
if ((hard_regno = ALLOCNO_HARD_REGNO (to)) < 0)
@ -786,13 +903,14 @@ emit_move_list (move_t list, int freq)
int cost;
rtx result, insn;
enum machine_mode mode;
enum reg_class cover_class;
enum reg_class aclass;
start_sequence ();
for (; list != NULL; list = list->next)
{
start_sequence ();
emit_move_insn (ALLOCNO_REG (list->to), ALLOCNO_REG (list->from));
emit_move_insn (allocno_emit_reg (list->to),
allocno_emit_reg (list->from));
list->insn = get_insns ();
end_sequence ();
/* The reload needs to have set up insn codes. If the reload
@ -803,13 +921,13 @@ emit_move_list (move_t list, int freq)
recog_memoized (insn);
emit_insn (list->insn);
mode = ALLOCNO_MODE (list->to);
cover_class = ALLOCNO_COVER_CLASS (list->to);
aclass = ALLOCNO_CLASS (list->to);
cost = 0;
if (ALLOCNO_HARD_REGNO (list->to) < 0)
{
if (ALLOCNO_HARD_REGNO (list->from) >= 0)
{
cost = ira_memory_move_cost[mode][cover_class][0] * freq;
cost = ira_memory_move_cost[mode][aclass][0] * freq;
ira_store_cost += cost;
}
}
@ -817,14 +935,14 @@ emit_move_list (move_t list, int freq)
{
if (ALLOCNO_HARD_REGNO (list->to) >= 0)
{
cost = ira_memory_move_cost[mode][cover_class][0] * freq;
cost = ira_memory_move_cost[mode][aclass][0] * freq;
ira_load_cost += cost;
}
}
else
{
cost = (ira_get_register_move_cost (mode, cover_class, cover_class)
* freq);
ira_init_register_move_cost_if_necessary (mode);
cost = ira_register_move_cost[mode][aclass][aclass] * freq;
ira_shuffle_cost += cost;
}
ira_overall_cost += cost;
@ -902,7 +1020,7 @@ update_costs (ira_allocno_t a, bool read_p, int freq)
ALLOCNO_NREFS (a)++;
ALLOCNO_FREQ (a) += freq;
ALLOCNO_MEMORY_COST (a)
+= (ira_memory_move_cost[ALLOCNO_MODE (a)][ALLOCNO_COVER_CLASS (a)]
+= (ira_memory_move_cost[ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)]
[read_p ? 1 : 0] * freq);
if (ALLOCNO_CAP (a) != NULL)
a = ALLOCNO_CAP (a);
@ -956,7 +1074,7 @@ add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
{
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Allocate conflicts for a%dr%d\n",
ALLOCNO_NUM (to), REGNO (ALLOCNO_REG (to)));
ALLOCNO_NUM (to), REGNO (allocno_emit_reg (to)));
ira_allocate_object_conflicts (to_obj, n);
}
}
@ -969,8 +1087,9 @@ add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
fprintf (ira_dump_file, " Adding cp%d:a%dr%d-a%dr%d\n",
cp->num, ALLOCNO_NUM (cp->first),
REGNO (ALLOCNO_REG (cp->first)), ALLOCNO_NUM (cp->second),
REGNO (ALLOCNO_REG (cp->second)));
REGNO (allocno_emit_reg (cp->first)),
ALLOCNO_NUM (cp->second),
REGNO (allocno_emit_reg (cp->second)));
nr = ALLOCNO_NUM_OBJECTS (from);
for (i = 0; i < nr; i++)
@ -984,7 +1103,7 @@ add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
fprintf (ira_dump_file,
" Adding range [%d..%d] to allocno a%dr%d\n",
start, ira_max_point, ALLOCNO_NUM (from),
REGNO (ALLOCNO_REG (from)));
REGNO (allocno_emit_reg (from)));
}
else
{
@ -993,7 +1112,7 @@ add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
fprintf (ira_dump_file,
" Adding range [%d..%d] to allocno a%dr%d\n",
r->start, ira_max_point, ALLOCNO_NUM (from),
REGNO (ALLOCNO_REG (from)));
REGNO (allocno_emit_reg (from)));
}
}
ira_max_point++;
@ -1020,7 +1139,7 @@ add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
fprintf (ira_dump_file,
" Adding range [%d..%d] to allocno a%dr%d\n",
r->start, r->finish, ALLOCNO_NUM (move->to),
REGNO (ALLOCNO_REG (move->to)));
REGNO (allocno_emit_reg (move->to)));
}
}
}
@ -1030,7 +1149,7 @@ add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
int nr, i;
a = node->regno_allocno_map[regno];
if ((to = ALLOCNO_MEM_OPTIMIZED_DEST (a)) != NULL)
if ((to = ALLOCNO_EMIT_DATA (a)->mem_optimized_dest) != NULL)
a = to;
nr = ALLOCNO_NUM_OBJECTS (a);
for (i = 0; i < nr; i++)
@ -1044,7 +1163,7 @@ add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node,
" Adding range [%d..%d] to live through %s allocno a%dr%d\n",
start, ira_max_point - 1,
to != NULL ? "upper level" : "",
ALLOCNO_NUM (a), REGNO (ALLOCNO_REG (a)));
ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
}
}
@ -1097,7 +1216,7 @@ ira_emit (bool loops_p)
ira_allocno_iterator ai;
FOR_EACH_ALLOCNO (a, ai)
ALLOCNO_REG (a) = regno_reg_rtx[ALLOCNO_REGNO (a)];
ALLOCNO_EMIT_DATA (a)->reg = regno_reg_rtx[ALLOCNO_REGNO (a)];
if (! loops_p)
return;
at_bb_start = (move_t *) ira_allocate (sizeof (move_t) * last_basic_block);

View File

@ -43,9 +43,9 @@ along with GCC; see the file COPYING3. If not see
profile driven feedback is available and the function is never
executed, frequency is always equivalent. Otherwise rescale the
edge frequency. */
#define REG_FREQ_FROM_EDGE_FREQ(freq) \
(optimize_size || (flag_branch_probabilities && !ENTRY_BLOCK_PTR->count) \
? REG_FREQ_MAX : (freq * REG_FREQ_MAX / BB_FREQ_MAX) \
#define REG_FREQ_FROM_EDGE_FREQ(freq) \
(optimize_size || (flag_branch_probabilities && !ENTRY_BLOCK_PTR->count) \
? REG_FREQ_MAX : (freq * REG_FREQ_MAX / BB_FREQ_MAX) \
? (freq * REG_FREQ_MAX / BB_FREQ_MAX) : 1)
/* All natural loops. */
@ -122,7 +122,7 @@ struct ira_loop_tree_node
bool entered_from_non_parent_p;
/* Maximal register pressure inside loop for given register class
(defined only for the cover classes). */
(defined only for the pressure classes). */
int reg_pressure[N_REG_CLASSES];
/* Numbers of allocnos referred or living in the loop node (except
@ -193,11 +193,8 @@ extern ira_loop_tree_node_t ira_loop_nodes;
/* The structure describes program points where a given allocno lives.
To save memory we store allocno conflicts only for the same cover
class allocnos which is enough to assign hard registers. To find
conflicts for other allocnos (e.g. to assign stack memory slot) we
use the live ranges. If the live ranges of two allocnos are
intersected, the allocnos are in conflict. */
If the live ranges of two allocnos are intersected, the allocnos
are in conflict. */
struct live_range
{
/* Object whose live range is described by given structure. */
@ -232,8 +229,7 @@ struct ira_object
ira_allocno_t allocno;
/* Vector of accumulated conflicting conflict_redords with NULL end
marker (if OBJECT_CONFLICT_VEC_P is true) or conflict bit vector
otherwise. Only ira_objects belonging to allocnos with the
same cover class are in the vector or in the bit vector. */
otherwise. */
void *conflicts_array;
/* Pointer to structures describing at what program point the
object lives. We always maintain the list in such way that *the
@ -256,7 +252,7 @@ struct ira_object
int min, max;
/* Initial and accumulated hard registers conflicting with this
object and as a consequences can not be assigned to the allocno.
All non-allocatable hard regs and hard regs of cover classes
All non-allocatable hard regs and hard regs of register classes
different from given allocno one are included in the sets. */
HARD_REG_SET conflict_hard_regs, total_conflict_hard_regs;
/* Number of accumulated conflicts in the vector of conflicting
@ -266,6 +262,9 @@ struct ira_object
ira_object structures. Otherwise, we use a bit vector indexed
by conflict ID numbers. */
unsigned int conflict_vec_p : 1;
/* Different additional data. It is used to decrease size of
allocno data footprint. */
void *add_data;
};
/* A structure representing an allocno (allocation entity). Allocno
@ -285,16 +284,40 @@ struct ira_allocno
int regno;
/* Mode of the allocno which is the mode of the corresponding
pseudo-register. */
enum machine_mode mode;
ENUM_BITFIELD (machine_mode) mode : 8;
/* Register class which should be used for allocation for given
allocno. NO_REGS means that we should use memory. */
ENUM_BITFIELD (reg_class) aclass : 16;
/* During the reload, value TRUE means that we should not reassign a
hard register to the allocno got memory earlier. It is set up
when we removed memory-memory move insn before each iteration of
the reload. */
unsigned int dont_reassign_p : 1;
#ifdef STACK_REGS
/* Set to TRUE if allocno can't be assigned to the stack hard
register correspondingly in this region and area including the
region and all its subregions recursively. */
unsigned int no_stack_reg_p : 1, total_no_stack_reg_p : 1;
#endif
/* TRUE value means that there is no sense to spill the allocno
during coloring because the spill will result in additional
reloads in reload pass. */
unsigned int bad_spill_p : 1;
/* TRUE if a hard register or memory has been assigned to the
allocno. */
unsigned int assigned_p : 1;
/* TRUE if conflicts for given allocno are represented by vector of
pointers to the conflicting allocnos. Otherwise, we use a bit
vector where a bit with given index represents allocno with the
same number. */
unsigned int conflict_vec_p : 1;
/* Hard register assigned to given allocno. Negative value means
that memory was allocated to the allocno. During the reload,
spilled allocno has value equal to the corresponding stack slot
number (0, ...) - 2. Value -1 is used for allocnos spilled by the
reload (at this point pseudo-register has only one allocno) which
did not get stack slot yet. */
int hard_regno;
/* Final rtx representation of the allocno. */
rtx reg;
short int hard_regno;
/* Allocnos with the same regno are linked by the following member.
Allocnos corresponding to inner loops are first in the list (it
corresponds to depth-first traverse of the loops). */
@ -312,12 +335,9 @@ struct ira_allocno
int nrefs;
/* Accumulated frequency of usage of the allocno. */
int freq;
/* Register class which should be used for allocation for given
allocno. NO_REGS means that we should use memory. */
enum reg_class cover_class;
/* Minimal accumulated and updated costs of usage register of the
cover class for the allocno. */
int cover_class_cost, updated_cover_class_cost;
allocno class. */
int class_cost, updated_class_cost;
/* Minimal accumulated, and updated costs of memory for the allocno.
At the allocation start, the original and updated costs are
equal. The updated cost may be changed after finishing
@ -342,11 +362,6 @@ struct ira_allocno
/* It is a link to allocno (cap) on lower loop level represented by
given cap. Null if given allocno is not a cap. */
ira_allocno_t cap_member;
/* Coalesced allocnos form a cyclic list. One allocno given by
FIRST_COALESCED_ALLOCNO represents all coalesced allocnos. The
list is chained by NEXT_COALESCED_ALLOCNO. */
ira_allocno_t first_coalesced_allocno;
ira_allocno_t next_coalesced_allocno;
/* The number of objects tracked in the following array. */
int num_objects;
/* An array of structures describing conflict information and live
@ -359,86 +374,34 @@ struct ira_allocno
int call_freq;
/* Accumulated number of the intersected calls. */
int calls_crossed_num;
/* TRUE if the allocno assigned to memory was a destination of
removed move (see ira-emit.c) at loop exit because the value of
the corresponding pseudo-register is not changed inside the
loop. */
unsigned int mem_optimized_dest_p : 1;
/* TRUE if the corresponding pseudo-register has disjoint live
ranges and the other allocnos of the pseudo-register except this
one changed REG. */
unsigned int somewhere_renamed_p : 1;
/* TRUE if allocno with the same REGNO in a subregion has been
renamed, in other words, got a new pseudo-register. */
unsigned int child_renamed_p : 1;
/* During the reload, value TRUE means that we should not reassign a
hard register to the allocno got memory earlier. It is set up
when we removed memory-memory move insn before each iteration of
the reload. */
unsigned int dont_reassign_p : 1;
#ifdef STACK_REGS
/* Set to TRUE if allocno can't be assigned to the stack hard
register correspondingly in this region and area including the
region and all its subregions recursively. */
unsigned int no_stack_reg_p : 1, total_no_stack_reg_p : 1;
#endif
/* TRUE value means that there is no sense to spill the allocno
during coloring because the spill will result in additional
reloads in reload pass. */
unsigned int bad_spill_p : 1;
/* TRUE value means that the allocno was not removed yet from the
conflicting graph during colouring. */
unsigned int in_graph_p : 1;
/* TRUE if a hard register or memory has been assigned to the
allocno. */
unsigned int assigned_p : 1;
/* TRUE if it is put on the stack to make other allocnos
colorable. */
unsigned int may_be_spilled_p : 1;
/* TRUE if the allocno was removed from the splay tree used to
choose allocn for spilling (see ira-color.c::. */
unsigned int splay_removed_p : 1;
/* Non NULL if we remove restoring value from given allocno to
MEM_OPTIMIZED_DEST at loop exit (see ira-emit.c) because the
allocno value is not changed inside the loop. */
ira_allocno_t mem_optimized_dest;
/* Array of usage costs (accumulated and the one updated during
coloring) for each hard register of the allocno cover class. The
coloring) for each hard register of the allocno class. The
member value can be NULL if all costs are the same and equal to
COVER_CLASS_COST. For example, the costs of two different hard
CLASS_COST. For example, the costs of two different hard
registers can be different if one hard register is callee-saved
and another one is callee-used and the allocno lives through
calls. Another example can be case when for some insn the
corresponding pseudo-register value should be put in specific
register class (e.g. AREG for x86) which is a strict subset of
the allocno cover class (GENERAL_REGS for x86). We have updated
costs to reflect the situation when the usage cost of a hard
register is decreased because the allocno is connected to another
allocno by a copy and the another allocno has been assigned to
the hard register. */
the allocno class (GENERAL_REGS for x86). We have updated costs
to reflect the situation when the usage cost of a hard register
is decreased because the allocno is connected to another allocno
by a copy and the another allocno has been assigned to the hard
register. */
int *hard_reg_costs, *updated_hard_reg_costs;
/* Array of decreasing costs (accumulated and the one updated during
coloring) for allocnos conflicting with given allocno for hard
regno of the allocno cover class. The member value can be NULL
if all costs are the same. These costs are used to reflect
preferences of other allocnos not assigned yet during assigning
to given allocno. */
regno of the allocno class. The member value can be NULL if all
costs are the same. These costs are used to reflect preferences
of other allocnos not assigned yet during assigning to given
allocno. */
int *conflict_hard_reg_costs, *updated_conflict_hard_reg_costs;
/* Size (in hard registers) of the same cover class allocnos with
TRUE in_graph_p value and conflicting with given allocno during
each point of graph coloring. */
int left_conflicts_size;
/* Number of hard registers of the allocno cover class really
available for the allocno allocation. */
int available_regs_num;
/* Allocnos in a bucket (used in coloring) chained by the following
two members. */
ira_allocno_t next_bucket_allocno;
ira_allocno_t prev_bucket_allocno;
/* Used for temporary purposes. */
int temp;
/* Different additional data. It is used to decrease size of
allocno data footprint. */
void *add_data;
};
/* All members of the allocno structures should be accessed only
through the following macros. */
#define ALLOCNO_NUM(A) ((A)->num)
@ -463,10 +426,7 @@ struct ira_allocno
#define ALLOCNO_TOTAL_NO_STACK_REG_P(A) ((A)->total_no_stack_reg_p)
#endif
#define ALLOCNO_BAD_SPILL_P(A) ((A)->bad_spill_p)
#define ALLOCNO_IN_GRAPH_P(A) ((A)->in_graph_p)
#define ALLOCNO_ASSIGNED_P(A) ((A)->assigned_p)
#define ALLOCNO_MAY_BE_SPILLED_P(A) ((A)->may_be_spilled_p)
#define ALLOCNO_SPLAY_REMOVED_P(A) ((A)->splay_removed_p)
#define ALLOCNO_MODE(A) ((A)->mode)
#define ALLOCNO_COPIES(A) ((A)->allocno_copies)
#define ALLOCNO_HARD_REG_COSTS(A) ((A)->hard_reg_costs)
@ -475,36 +435,71 @@ struct ira_allocno
((A)->conflict_hard_reg_costs)
#define ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS(A) \
((A)->updated_conflict_hard_reg_costs)
#define ALLOCNO_LEFT_CONFLICTS_SIZE(A) ((A)->left_conflicts_size)
#define ALLOCNO_COVER_CLASS(A) ((A)->cover_class)
#define ALLOCNO_COVER_CLASS_COST(A) ((A)->cover_class_cost)
#define ALLOCNO_UPDATED_COVER_CLASS_COST(A) ((A)->updated_cover_class_cost)
#define ALLOCNO_CLASS(A) ((A)->aclass)
#define ALLOCNO_CLASS_COST(A) ((A)->class_cost)
#define ALLOCNO_UPDATED_CLASS_COST(A) ((A)->updated_class_cost)
#define ALLOCNO_MEMORY_COST(A) ((A)->memory_cost)
#define ALLOCNO_UPDATED_MEMORY_COST(A) ((A)->updated_memory_cost)
#define ALLOCNO_EXCESS_PRESSURE_POINTS_NUM(A) ((A)->excess_pressure_points_num)
#define ALLOCNO_AVAILABLE_REGS_NUM(A) ((A)->available_regs_num)
#define ALLOCNO_NEXT_BUCKET_ALLOCNO(A) ((A)->next_bucket_allocno)
#define ALLOCNO_PREV_BUCKET_ALLOCNO(A) ((A)->prev_bucket_allocno)
#define ALLOCNO_TEMP(A) ((A)->temp)
#define ALLOCNO_FIRST_COALESCED_ALLOCNO(A) ((A)->first_coalesced_allocno)
#define ALLOCNO_NEXT_COALESCED_ALLOCNO(A) ((A)->next_coalesced_allocno)
#define ALLOCNO_EXCESS_PRESSURE_POINTS_NUM(A) \
((A)->excess_pressure_points_num)
#define ALLOCNO_OBJECT(A,N) ((A)->objects[N])
#define ALLOCNO_NUM_OBJECTS(A) ((A)->num_objects)
#define ALLOCNO_ADD_DATA(A) ((A)->add_data)
#define OBJECT_ALLOCNO(C) ((C)->allocno)
#define OBJECT_SUBWORD(C) ((C)->subword)
#define OBJECT_CONFLICT_ARRAY(C) ((C)->conflicts_array)
#define OBJECT_CONFLICT_VEC(C) ((ira_object_t *)(C)->conflicts_array)
#define OBJECT_CONFLICT_BITVEC(C) ((IRA_INT_TYPE *)(C)->conflicts_array)
#define OBJECT_CONFLICT_ARRAY_SIZE(C) ((C)->conflicts_array_size)
#define OBJECT_CONFLICT_VEC_P(C) ((C)->conflict_vec_p)
#define OBJECT_NUM_CONFLICTS(C) ((C)->num_accumulated_conflicts)
#define OBJECT_CONFLICT_HARD_REGS(C) ((C)->conflict_hard_regs)
#define OBJECT_TOTAL_CONFLICT_HARD_REGS(C) ((C)->total_conflict_hard_regs)
#define OBJECT_MIN(C) ((C)->min)
#define OBJECT_MAX(C) ((C)->max)
#define OBJECT_CONFLICT_ID(C) ((C)->id)
#define OBJECT_LIVE_RANGES(A) ((A)->live_ranges)
/* Typedef for pointer to the subsequent structure. */
typedef struct ira_emit_data *ira_emit_data_t;
/* Allocno bound data used for emit pseudo live range split insns and
to flattening IR. */
struct ira_emit_data
{
/* TRUE if the allocno assigned to memory was a destination of
removed move (see ira-emit.c) at loop exit because the value of
the corresponding pseudo-register is not changed inside the
loop. */
unsigned int mem_optimized_dest_p : 1;
/* TRUE if the corresponding pseudo-register has disjoint live
ranges and the other allocnos of the pseudo-register except this
one changed REG. */
unsigned int somewhere_renamed_p : 1;
/* TRUE if allocno with the same REGNO in a subregion has been
renamed, in other words, got a new pseudo-register. */
unsigned int child_renamed_p : 1;
/* Final rtx representation of the allocno. */
rtx reg;
/* Non NULL if we remove restoring value from given allocno to
MEM_OPTIMIZED_DEST at loop exit (see ira-emit.c) because the
allocno value is not changed inside the loop. */
ira_allocno_t mem_optimized_dest;
};
#define ALLOCNO_EMIT_DATA(a) ((ira_emit_data_t) ALLOCNO_ADD_DATA (a))
/* Data used to emit live range split insns and to flattening IR. */
extern ira_emit_data_t ira_allocno_emit_data;
/* Abbreviation for frequent emit data access. */
static inline rtx
allocno_emit_reg (ira_allocno_t a)
{
return ALLOCNO_EMIT_DATA (a)->reg;
}
#define OBJECT_ALLOCNO(O) ((O)->allocno)
#define OBJECT_SUBWORD(O) ((O)->subword)
#define OBJECT_CONFLICT_ARRAY(O) ((O)->conflicts_array)
#define OBJECT_CONFLICT_VEC(O) ((ira_object_t *)(O)->conflicts_array)
#define OBJECT_CONFLICT_BITVEC(O) ((IRA_INT_TYPE *)(O)->conflicts_array)
#define OBJECT_CONFLICT_ARRAY_SIZE(O) ((O)->conflicts_array_size)
#define OBJECT_CONFLICT_VEC_P(O) ((O)->conflict_vec_p)
#define OBJECT_NUM_CONFLICTS(O) ((O)->num_accumulated_conflicts)
#define OBJECT_CONFLICT_HARD_REGS(O) ((O)->conflict_hard_regs)
#define OBJECT_TOTAL_CONFLICT_HARD_REGS(O) ((O)->total_conflict_hard_regs)
#define OBJECT_MIN(O) ((O)->min)
#define OBJECT_MAX(O) ((O)->max)
#define OBJECT_CONFLICT_ID(O) ((O)->id)
#define OBJECT_LIVE_RANGES(O) ((O)->live_ranges)
#define OBJECT_ADD_DATA(O) ((O)->add_data)
/* Map regno -> allocnos with given regno (see comments for
allocno member `next_regno_allocno'). */
@ -590,6 +585,7 @@ extern int ira_overall_cost;
extern int ira_reg_cost, ira_mem_cost;
extern int ira_load_cost, ira_store_cost, ira_shuffle_cost;
extern int ira_move_loops_num, ira_additional_jumps_num;
/* This page contains a bitset implementation called 'min/max sets' used to
record conflicts in IRA.
@ -757,11 +753,6 @@ struct target_ira_int {
struct costs *x_op_costs[MAX_RECOG_OPERANDS];
struct costs *x_this_op_costs[MAX_RECOG_OPERANDS];
/* Classes used for cost calculation. They may be different on
different iterations of the cost calculations or in different
optimization modes. */
enum reg_class *x_cost_classes;
/* Hard registers that can not be used for the register allocator for
all functions of the current compilation unit. */
HARD_REG_SET x_no_unit_alloc_regs;
@ -776,6 +767,12 @@ struct target_ira_int {
ira_get_may_move_cost instead. */
move_table *x_ira_register_move_cost[MAX_MACHINE_MODE];
/* Array analogs of the macros MEMORY_MOVE_COST and
REGISTER_MOVE_COST but they contain maximal cost not minimal as
the previous two ones do. */
short int x_ira_max_memory_move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][2];
move_table *x_ira_max_register_move_cost[MAX_MACHINE_MODE];
/* Similar to may_move_in_cost but it is calculated in IRA instead of
regclass. Another difference we take only available hard registers
into account to figure out that one register class is a subset of
@ -790,6 +787,18 @@ struct target_ira_int {
ira_get_may_move_cost instead. */
move_table *x_ira_may_move_out_cost[MAX_MACHINE_MODE];
/* Similar to ira_may_move_in_cost and ira_may_move_out_cost but they
return maximal cost. */
move_table *x_ira_max_may_move_in_cost[MAX_MACHINE_MODE];
move_table *x_ira_max_may_move_out_cost[MAX_MACHINE_MODE];
/* Map class->true if class is a possible allocno class, false
otherwise. */
bool x_ira_reg_allocno_class_p[N_REG_CLASSES];
/* Map class->true if class is a pressure class, false otherwise. */
bool x_ira_reg_pressure_class_p[N_REG_CLASSES];
/* Register class subset relation: TRUE if the first class is a subset
of the second one considering only hard registers available for the
allocation. */
@ -809,16 +818,20 @@ struct target_ira_int {
/* Array whose values are hard regset of hard registers available for
the allocation of given register class whose HARD_REGNO_MODE_OK
values for given mode are zero. */
HARD_REG_SET x_prohibited_class_mode_regs[N_REG_CLASSES][NUM_MACHINE_MODES];
HARD_REG_SET x_ira_prohibited_class_mode_regs[N_REG_CLASSES][NUM_MACHINE_MODES];
/* The value is number of elements in the subsequent array. */
int x_ira_important_classes_num;
/* The array containing non-empty classes (including non-empty cover
classes; which are subclasses of cover classes. Such classes is
/* The array containing all non-empty classes. Such classes is
important for calculation of the hard register usage costs. */
enum reg_class x_ira_important_classes[N_REG_CLASSES];
/* The array containing indexes of important classes in the previous
array. The array elements are defined only for important
classes. */
int x_ira_important_class_nums[N_REG_CLASSES];
/* The biggest important class inside of intersection of the two
classes (that is calculated taking only hard registers available
for allocation into account;. If the both classes contain no hard
@ -837,14 +850,15 @@ struct target_ira_int {
allocation into account. */
enum reg_class x_ira_reg_class_super_classes[N_REG_CLASSES][N_REG_CLASSES];
/* The biggest important class inside of union of the two classes
(that is calculated taking only hard registers available for
allocation into account;. If the both classes contain no hard
registers available for allocation, the value is calculated with
taking all hard-registers including fixed ones into account. In
other words, the value is the corresponding reg_class_subunion
value. */
enum reg_class x_ira_reg_class_union[N_REG_CLASSES][N_REG_CLASSES];
/* The biggest (smallest) important class inside of (covering) union
of the two classes (that is calculated taking only hard registers
available for allocation into account). If the both classes
contain no hard registers available for allocation, the value is
calculated with taking all hard-registers including fixed ones
into account. In other words, the value is the corresponding
reg_class_subunion (reg_class_superunion) value. */
enum reg_class x_ira_reg_class_subunion[N_REG_CLASSES][N_REG_CLASSES];
enum reg_class x_ira_reg_class_superunion[N_REG_CLASSES][N_REG_CLASSES];
/* For each reg class, table listing all the classes contained in it
(excluding the class itself. Non-allocatable registers are
@ -871,43 +885,58 @@ extern struct target_ira_int *this_target_ira_int;
(this_target_ira_int->x_ira_reg_mode_hard_regset)
#define ira_register_move_cost \
(this_target_ira_int->x_ira_register_move_cost)
#define ira_max_memory_move_cost \
(this_target_ira_int->x_ira_max_memory_move_cost)
#define ira_max_register_move_cost \
(this_target_ira_int->x_ira_max_register_move_cost)
#define ira_may_move_in_cost \
(this_target_ira_int->x_ira_may_move_in_cost)
#define ira_may_move_out_cost \
(this_target_ira_int->x_ira_may_move_out_cost)
#define ira_max_may_move_in_cost \
(this_target_ira_int->x_ira_max_may_move_in_cost)
#define ira_max_may_move_out_cost \
(this_target_ira_int->x_ira_max_may_move_out_cost)
#define ira_reg_allocno_class_p \
(this_target_ira_int->x_ira_reg_allocno_class_p)
#define ira_reg_pressure_class_p \
(this_target_ira_int->x_ira_reg_pressure_class_p)
#define ira_class_subset_p \
(this_target_ira_int->x_ira_class_subset_p)
#define ira_non_ordered_class_hard_regs \
(this_target_ira_int->x_ira_non_ordered_class_hard_regs)
#define ira_class_hard_reg_index \
(this_target_ira_int->x_ira_class_hard_reg_index)
#define prohibited_class_mode_regs \
(this_target_ira_int->x_prohibited_class_mode_regs)
#define ira_prohibited_class_mode_regs \
(this_target_ira_int->x_ira_prohibited_class_mode_regs)
#define ira_important_classes_num \
(this_target_ira_int->x_ira_important_classes_num)
#define ira_important_classes \
(this_target_ira_int->x_ira_important_classes)
#define ira_important_class_nums \
(this_target_ira_int->x_ira_important_class_nums)
#define ira_reg_class_intersect \
(this_target_ira_int->x_ira_reg_class_intersect)
#define ira_reg_classes_intersect_p \
(this_target_ira_int->x_ira_reg_classes_intersect_p)
#define ira_reg_class_super_classes \
(this_target_ira_int->x_ira_reg_class_super_classes)
#define ira_reg_class_union \
(this_target_ira_int->x_ira_reg_class_union)
#define ira_reg_class_subunion \
(this_target_ira_int->x_ira_reg_class_subunion)
#define ira_reg_class_superunion \
(this_target_ira_int->x_ira_reg_class_superunion)
#define ira_prohibited_mode_move_regs \
(this_target_ira_int->x_ira_prohibited_mode_move_regs)
/* ira.c: */
extern void *ira_allocate (size_t);
extern void *ira_reallocate (void *, size_t);
extern void ira_free (void *addr);
extern bitmap ira_allocate_bitmap (void);
extern void ira_free_bitmap (bitmap);
extern void ira_print_disposition (FILE *);
extern void ira_debug_disposition (void);
extern void ira_debug_class_cover (void);
extern void ira_debug_allocno_classes (void);
extern void ira_init_register_move_cost (enum machine_mode);
/* The length of the two following arrays. */
@ -938,7 +967,7 @@ extern ira_allocno_t ira_parent_allocno (ira_allocno_t);
extern ira_allocno_t ira_parent_or_cap_allocno (ira_allocno_t);
extern ira_allocno_t ira_create_allocno (int, bool, ira_loop_tree_node_t);
extern void ira_create_allocno_objects (ira_allocno_t);
extern void ira_set_allocno_cover_class (ira_allocno_t, enum reg_class);
extern void ira_set_allocno_class (ira_allocno_t, enum reg_class);
extern bool ira_conflict_vector_profitable_p (ira_object_t, int);
extern void ira_allocate_conflict_vec (ira_object_t, int);
extern void ira_allocate_object_conflicts (ira_object_t, int);
@ -972,7 +1001,7 @@ extern void ira_init_costs_once (void);
extern void ira_init_costs (void);
extern void ira_finish_costs_once (void);
extern void ira_costs (void);
extern void ira_tune_allocno_costs_and_cover_classes (void);
extern void ira_tune_allocno_costs (void);
/* ira-lives.c */
@ -990,6 +1019,7 @@ extern void ira_debug_conflicts (bool);
extern void ira_build_conflicts (void);
/* ira-color.c */
extern void ira_debug_hard_regs_forest (void);
extern int ira_loop_edge_freq (ira_loop_tree_node_t, int, bool);
extern void ira_reassign_conflict_allocnos (int);
extern void ira_initiate_assign (void);
@ -997,34 +1027,18 @@ extern void ira_finish_assign (void);
extern void ira_color (void);
/* ira-emit.c */
extern void ira_initiate_emit_data (void);
extern void ira_finish_emit_data (void);
extern void ira_emit (bool);
/* Return cost of moving value of MODE from register of class FROM to
register of class TO. */
static inline int
ira_get_register_move_cost (enum machine_mode mode,
enum reg_class from, enum reg_class to)
/* Initialize register costs for MODE if necessary. */
static inline void
ira_init_register_move_cost_if_necessary (enum machine_mode mode)
{
if (ira_register_move_cost[mode] == NULL)
ira_init_register_move_cost (mode);
return ira_register_move_cost[mode][from][to];
}
/* Return cost of moving value of MODE from register of class FROM to
register of class TO. Return zero if IN_P is true and FROM is
subset of TO or if IN_P is false and FROM is superset of TO. */
static inline int
ira_get_may_move_cost (enum machine_mode mode,
enum reg_class from, enum reg_class to,
bool in_p)
{
if (ira_register_move_cost[mode] == NULL)
ira_init_register_move_cost (mode);
return (in_p
? ira_may_move_in_cost[mode][from][to]
: ira_may_move_out_cost[mode][from][to]);
}
@ -1237,14 +1251,17 @@ ira_object_conflict_iter_cond (ira_object_conflict_iterator *i,
if (i->conflict_vec_p)
{
obj = ((ira_object_t *) i->vec)[i->word_num];
obj = ((ira_object_t *) i->vec)[i->word_num++];
if (obj == NULL)
return false;
}
else
{
unsigned IRA_INT_TYPE word = i->word;
unsigned int bit_num = i->bit_num;
/* Skip words that are zeros. */
for (; i->word == 0; i->word = ((IRA_INT_TYPE *) i->vec)[i->word_num])
for (; word == 0; word = ((IRA_INT_TYPE *) i->vec)[i->word_num])
{
i->word_num++;
@ -1252,43 +1269,59 @@ ira_object_conflict_iter_cond (ira_object_conflict_iterator *i,
if (i->word_num * sizeof (IRA_INT_TYPE) >= i->size)
return false;
i->bit_num = i->word_num * IRA_INT_BITS;
bit_num = i->word_num * IRA_INT_BITS;
}
/* Skip bits that are zero. */
for (; (i->word & 1) == 0; i->word >>= 1)
i->bit_num++;
for (; (word & 1) == 0; word >>= 1)
bit_num++;
obj = ira_object_id_map[i->bit_num + i->base_conflict_id];
obj = ira_object_id_map[bit_num + i->base_conflict_id];
i->bit_num = bit_num + 1;
i->word = word >> 1;
}
*pobj = obj;
return true;
}
/* Advance to the next conflicting allocno. */
static inline void
ira_object_conflict_iter_next (ira_object_conflict_iterator *i)
{
if (i->conflict_vec_p)
i->word_num++;
else
{
i->word >>= 1;
i->bit_num++;
}
}
/* Loop over all objects conflicting with OBJ. In each iteration,
CONF is set to the next conflicting object. ITER is an instance
of ira_object_conflict_iterator used to iterate the conflicts. */
#define FOR_EACH_OBJECT_CONFLICT(OBJ, CONF, ITER) \
for (ira_object_conflict_iter_init (&(ITER), (OBJ)); \
ira_object_conflict_iter_cond (&(ITER), &(CONF)); \
ira_object_conflict_iter_next (&(ITER)))
ira_object_conflict_iter_cond (&(ITER), &(CONF));)
/* The function returns TRUE if at least one hard register from ones
starting with HARD_REGNO and containing value of MODE are in set
HARD_REGSET. */
static inline bool
ira_hard_reg_set_intersection_p (int hard_regno, enum machine_mode mode,
HARD_REG_SET hard_regset)
{
int i;
gcc_assert (hard_regno >= 0);
for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
if (TEST_HARD_REG_BIT (hard_regset, hard_regno + i))
return true;
return false;
}
/* Return number of hard registers in hard register SET. */
static inline int
hard_reg_set_size (HARD_REG_SET set)
{
int i, size;
for (size = i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (set, i))
size++;
return size;
}
/* The function returns TRUE if hard registers starting with
HARD_REGNO and containing value of MODE are not in set
HARD_REGSET. */
@ -1311,61 +1344,60 @@ ira_hard_reg_not_in_set_p (int hard_regno, enum machine_mode mode,
initialization of the cost vectors. We do this only when it is
really necessary. */
/* Allocate cost vector *VEC for hard registers of COVER_CLASS and
/* Allocate cost vector *VEC for hard registers of ACLASS and
initialize the elements by VAL if it is necessary */
static inline void
ira_allocate_and_set_costs (int **vec, enum reg_class cover_class, int val)
ira_allocate_and_set_costs (int **vec, enum reg_class aclass, int val)
{
int i, *reg_costs;
int len;
if (*vec != NULL)
return;
*vec = reg_costs = ira_allocate_cost_vector (cover_class);
len = ira_class_hard_regs_num[cover_class];
*vec = reg_costs = ira_allocate_cost_vector (aclass);
len = ira_class_hard_regs_num[aclass];
for (i = 0; i < len; i++)
reg_costs[i] = val;
}
/* Allocate cost vector *VEC for hard registers of COVER_CLASS and
copy values of vector SRC into the vector if it is necessary */
/* Allocate cost vector *VEC for hard registers of ACLASS and copy
values of vector SRC into the vector if it is necessary */
static inline void
ira_allocate_and_copy_costs (int **vec, enum reg_class cover_class, int *src)
ira_allocate_and_copy_costs (int **vec, enum reg_class aclass, int *src)
{
int len;
if (*vec != NULL || src == NULL)
return;
*vec = ira_allocate_cost_vector (cover_class);
len = ira_class_hard_regs_num[cover_class];
*vec = ira_allocate_cost_vector (aclass);
len = ira_class_hard_regs_num[aclass];
memcpy (*vec, src, sizeof (int) * len);
}
/* Allocate cost vector *VEC for hard registers of COVER_CLASS and
add values of vector SRC into the vector if it is necessary */
/* Allocate cost vector *VEC for hard registers of ACLASS and add
values of vector SRC into the vector if it is necessary */
static inline void
ira_allocate_and_accumulate_costs (int **vec, enum reg_class cover_class,
int *src)
ira_allocate_and_accumulate_costs (int **vec, enum reg_class aclass, int *src)
{
int i, len;
if (src == NULL)
return;
len = ira_class_hard_regs_num[cover_class];
len = ira_class_hard_regs_num[aclass];
if (*vec == NULL)
{
*vec = ira_allocate_cost_vector (cover_class);
*vec = ira_allocate_cost_vector (aclass);
memset (*vec, 0, sizeof (int) * len);
}
for (i = 0; i < len; i++)
(*vec)[i] += src[i];
}
/* Allocate cost vector *VEC for hard registers of COVER_CLASS and
copy values of vector SRC into the vector or initialize it by VAL
(if SRC is null). */
/* Allocate cost vector *VEC for hard registers of ACLASS and copy
values of vector SRC into the vector or initialize it by VAL (if
SRC is null). */
static inline void
ira_allocate_and_set_or_copy_costs (int **vec, enum reg_class cover_class,
ira_allocate_and_set_or_copy_costs (int **vec, enum reg_class aclass,
int val, int *src)
{
int i, *reg_costs;
@ -1373,8 +1405,8 @@ ira_allocate_and_set_or_copy_costs (int **vec, enum reg_class cover_class,
if (*vec != NULL)
return;
*vec = reg_costs = ira_allocate_cost_vector (cover_class);
len = ira_class_hard_regs_num[cover_class];
*vec = reg_costs = ira_allocate_cost_vector (aclass);
len = ira_class_hard_regs_num[aclass];
if (src != NULL)
memcpy (reg_costs, src, sizeof (int) * len);
else

View File

@ -64,8 +64,8 @@ static int curr_point;
register pressure excess. Excess pressure for a register class at
some point means that there are more allocnos of given register
class living at the point than number of hard-registers of the
class available for the allocation. It is defined only for cover
classes. */
class available for the allocation. It is defined only for
pressure classes. */
static int high_pressure_start_point[N_REG_CLASSES];
/* Objects live at current point in the scan. */
@ -97,6 +97,7 @@ make_hard_regno_born (int regno)
EXECUTE_IF_SET_IN_SPARSESET (objects_live, i)
{
ira_object_t obj = ira_object_id_map[i];
SET_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj), regno);
SET_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno);
}
@ -134,14 +135,17 @@ update_allocno_pressure_excess_length (ira_object_t obj)
{
ira_allocno_t a = OBJECT_ALLOCNO (obj);
int start, i;
enum reg_class cover_class, cl;
enum reg_class aclass, pclass, cl;
live_range_t p;
cover_class = ALLOCNO_COVER_CLASS (a);
aclass = ALLOCNO_CLASS (a);
pclass = ira_pressure_class_translate[aclass];
for (i = 0;
(cl = ira_reg_class_super_classes[cover_class][i]) != LIM_REG_CLASSES;
(cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
i++)
{
if (! ira_reg_pressure_class_p[cl])
continue;
if (high_pressure_start_point[cl] < 0)
continue;
p = OBJECT_LIVE_RANGES (obj);
@ -166,24 +170,26 @@ make_object_dead (ira_object_t obj)
update_allocno_pressure_excess_length (obj);
}
/* The current register pressures for each cover class for the current
/* The current register pressures for each pressure class for the current
basic block. */
static int curr_reg_pressure[N_REG_CLASSES];
/* Record that register pressure for COVER_CLASS increased by N
registers. Update the current register pressure, maximal register
pressure for the current BB and the start point of the register
pressure excess. */
/* Record that register pressure for PCLASS increased by N registers.
Update the current register pressure, maximal register pressure for
the current BB and the start point of the register pressure
excess. */
static void
inc_register_pressure (enum reg_class cover_class, int n)
inc_register_pressure (enum reg_class pclass, int n)
{
int i;
enum reg_class cl;
for (i = 0;
(cl = ira_reg_class_super_classes[cover_class][i]) != LIM_REG_CLASSES;
(cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
i++)
{
if (! ira_reg_pressure_class_p[cl])
continue;
curr_reg_pressure[cl] += n;
if (high_pressure_start_point[cl] < 0
&& (curr_reg_pressure[cl] > ira_available_class_regs[cl]))
@ -193,13 +199,13 @@ inc_register_pressure (enum reg_class cover_class, int n)
}
}
/* Record that register pressure for COVER_CLASS has decreased by
NREGS registers; update current register pressure, start point of
the register pressure excess, and register pressure excess length
for living allocnos. */
/* Record that register pressure for PCLASS has decreased by NREGS
registers; update current register pressure, start point of the
register pressure excess, and register pressure excess length for
living allocnos. */
static void
dec_register_pressure (enum reg_class cover_class, int nregs)
dec_register_pressure (enum reg_class pclass, int nregs)
{
int i;
unsigned int j;
@ -207,9 +213,11 @@ dec_register_pressure (enum reg_class cover_class, int nregs)
bool set_p = false;
for (i = 0;
(cl = ira_reg_class_super_classes[cover_class][i]) != LIM_REG_CLASSES;
(cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
i++)
{
if (! ira_reg_pressure_class_p[cl])
continue;
curr_reg_pressure[cl] -= nregs;
ira_assert (curr_reg_pressure[cl] >= 0);
if (high_pressure_start_point[cl] >= 0
@ -221,12 +229,15 @@ dec_register_pressure (enum reg_class cover_class, int nregs)
EXECUTE_IF_SET_IN_SPARSESET (objects_live, j)
update_allocno_pressure_excess_length (ira_object_id_map[j]);
for (i = 0;
(cl = ira_reg_class_super_classes[cover_class][i])
!= LIM_REG_CLASSES;
(cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
i++)
if (high_pressure_start_point[cl] >= 0
&& curr_reg_pressure[cl] <= ira_available_class_regs[cl])
high_pressure_start_point[cl] = -1;
{
if (! ira_reg_pressure_class_p[cl])
continue;
if (high_pressure_start_point[cl] >= 0
&& curr_reg_pressure[cl] <= ira_available_class_regs[cl])
high_pressure_start_point[cl] = -1;
}
}
}
@ -236,8 +247,8 @@ static void
mark_pseudo_regno_live (int regno)
{
ira_allocno_t a = ira_curr_regno_allocno_map[regno];
enum reg_class pclass;
int i, n, nregs;
enum reg_class cl;
if (a == NULL)
return;
@ -246,8 +257,8 @@ mark_pseudo_regno_live (int regno)
allocno_saved_at_call[ALLOCNO_NUM (a)] = 0;
n = ALLOCNO_NUM_OBJECTS (a);
cl = ALLOCNO_COVER_CLASS (a);
nregs = ira_reg_class_nregs[cl][ALLOCNO_MODE (a)];
pclass = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
nregs = ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
if (n > 1)
{
/* We track every subobject separately. */
@ -258,10 +269,11 @@ mark_pseudo_regno_live (int regno)
for (i = 0; i < n; i++)
{
ira_object_t obj = ALLOCNO_OBJECT (a, i);
if (sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
continue;
inc_register_pressure (cl, nregs);
inc_register_pressure (pclass, nregs);
make_object_born (obj);
}
}
@ -274,7 +286,7 @@ mark_pseudo_regno_subword_live (int regno, int subword)
{
ira_allocno_t a = ira_curr_regno_allocno_map[regno];
int n, nregs;
enum reg_class cl;
enum reg_class pclass;
ira_object_t obj;
if (a == NULL)
@ -290,15 +302,15 @@ mark_pseudo_regno_subword_live (int regno, int subword)
return;
}
cl = ALLOCNO_COVER_CLASS (a);
nregs = ira_reg_class_nregs[cl][ALLOCNO_MODE (a)];
pclass = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
nregs = ira_reg_class_max_nregs[pclass][ALLOCNO_MODE (a)];
gcc_assert (nregs == n);
obj = ALLOCNO_OBJECT (a, subword);
if (sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
return;
inc_register_pressure (cl, nregs);
inc_register_pressure (pclass, nregs);
make_object_born (obj);
}
@ -313,14 +325,16 @@ mark_hard_reg_live (rtx reg)
if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
int last = regno + hard_regno_nregs[regno][GET_MODE (reg)];
enum reg_class aclass, pclass;
while (regno < last)
{
if (! TEST_HARD_REG_BIT (hard_regs_live, regno)
&& ! TEST_HARD_REG_BIT (eliminable_regset, regno))
{
enum reg_class cover_class = ira_hard_regno_cover_class[regno];
inc_register_pressure (cover_class, 1);
aclass = ira_hard_regno_allocno_class[regno];
pclass = ira_pressure_class_translate[aclass];
inc_register_pressure (pclass, 1);
make_hard_regno_born (regno);
}
regno++;
@ -375,8 +389,8 @@ mark_pseudo_regno_dead (int regno)
allocno_saved_at_call[ALLOCNO_NUM (a)] = 0;
n = ALLOCNO_NUM_OBJECTS (a);
cl = ALLOCNO_COVER_CLASS (a);
nregs = ira_reg_class_nregs[cl][ALLOCNO_MODE (a)];
cl = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
nregs = ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
if (n > 1)
{
/* We track every subobject separately. */
@ -415,8 +429,8 @@ mark_pseudo_regno_subword_dead (int regno, int subword)
/* The allocno as a whole doesn't die in this case. */
return;
cl = ALLOCNO_COVER_CLASS (a);
nregs = ira_reg_class_nregs[cl][ALLOCNO_MODE (a)];
cl = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
nregs = ira_reg_class_max_nregs[cl][ALLOCNO_MODE (a)];
gcc_assert (nregs == n);
obj = ALLOCNO_OBJECT (a, subword);
@ -437,13 +451,15 @@ mark_hard_reg_dead (rtx reg)
if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
int last = regno + hard_regno_nregs[regno][GET_MODE (reg)];
enum reg_class aclass, pclass;
while (regno < last)
{
if (TEST_HARD_REG_BIT (hard_regs_live, regno))
{
enum reg_class cover_class = ira_hard_regno_cover_class[regno];
dec_register_pressure (cover_class, 1);
aclass = ira_hard_regno_allocno_class[regno];
pclass = ira_pressure_class_translate[aclass];
dec_register_pressure (pclass, 1);
make_hard_regno_dead (regno);
}
regno++;
@ -512,7 +528,7 @@ make_pseudo_conflict (rtx reg, enum reg_class cl, rtx dreg, rtx orig_dreg,
return advance_p;
a = ira_curr_regno_allocno_map[REGNO (reg)];
if (! reg_classes_intersect_p (cl, ALLOCNO_COVER_CLASS (a)))
if (! reg_classes_intersect_p (cl, ALLOCNO_CLASS (a)))
return advance_p;
if (advance_p)
@ -585,7 +601,7 @@ check_and_make_def_conflict (int alt, int def, enum reg_class def_cl)
return;
a = ira_curr_regno_allocno_map[REGNO (dreg)];
acl = ALLOCNO_COVER_CLASS (a);
acl = ALLOCNO_CLASS (a);
if (! reg_classes_intersect_p (acl, def_cl))
return;
@ -815,7 +831,7 @@ single_reg_class (const char *constraints, rtx op, rtx equiv_const)
: REG_CLASS_FROM_CONSTRAINT (c, constraints));
if ((cl != NO_REGS && next_cl != cl)
|| (ira_available_class_regs[next_cl]
> ira_reg_class_nregs[next_cl][GET_MODE (op)]))
> ira_reg_class_max_nregs[next_cl][GET_MODE (op)]))
return NO_REGS;
cl = next_cl;
break;
@ -828,7 +844,7 @@ single_reg_class (const char *constraints, rtx op, rtx equiv_const)
if ((cl != NO_REGS && next_cl != cl)
|| next_cl == NO_REGS
|| (ira_available_class_regs[next_cl]
> ira_reg_class_nregs[next_cl][GET_MODE (op)]))
> ira_reg_class_max_nregs[next_cl][GET_MODE (op)]))
return NO_REGS;
cl = next_cl;
break;
@ -903,7 +919,7 @@ ira_implicitly_set_insn_hard_regs (HARD_REG_SET *set)
regs in this class are fixed. */
&& ira_available_class_regs[cl] != 0
&& (ira_available_class_regs[cl]
<= ira_reg_class_nregs[cl][mode]))
<= ira_reg_class_max_nregs[cl][mode]))
IOR_HARD_REG_SET (*set, reg_class_contents[cl]);
break;
}
@ -944,11 +960,11 @@ process_single_reg_class_operands (bool in_p, int freq)
if (REG_P (operand)
&& (regno = REGNO (operand)) >= FIRST_PSEUDO_REGISTER)
{
enum reg_class cover_class;
enum reg_class aclass;
operand_a = ira_curr_regno_allocno_map[regno];
cover_class = ALLOCNO_COVER_CLASS (operand_a);
if (ira_class_subset_p[cl][cover_class]
aclass = ALLOCNO_CLASS (operand_a);
if (ira_class_subset_p[cl][aclass]
&& ira_class_hard_regs_num[cl] != 0)
{
/* View the desired allocation of OPERAND as:
@ -968,21 +984,19 @@ process_single_reg_class_operands (bool in_p, int freq)
offset = subreg_lowpart_offset (ymode, xmode);
yregno = simplify_subreg_regno (xregno, xmode, offset, ymode);
if (yregno >= 0
&& ira_class_hard_reg_index[cover_class][yregno] >= 0)
&& ira_class_hard_reg_index[aclass][yregno] >= 0)
{
int cost;
ira_allocate_and_set_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a),
cover_class, 0);
cost
= (freq
* (in_p
? ira_get_register_move_cost (xmode, cover_class, cl)
: ira_get_register_move_cost (xmode, cl,
cover_class)));
aclass, 0);
ira_init_register_move_cost_if_necessary (xmode);
cost = freq * (in_p
? ira_register_move_cost[xmode][aclass][cl]
: ira_register_move_cost[xmode][cl][aclass]);
ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a)
[ira_class_hard_reg_index[cover_class][yregno]] -= cost;
[ira_class_hard_reg_index[aclass][yregno]] -= cost;
}
}
}
@ -1040,10 +1054,10 @@ process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
bb = loop_tree_node->bb;
if (bb != NULL)
{
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_pressure_classes_num; i++)
{
curr_reg_pressure[ira_reg_class_cover[i]] = 0;
high_pressure_start_point[ira_reg_class_cover[i]] = -1;
curr_reg_pressure[ira_pressure_classes[i]] = 0;
high_pressure_start_point[ira_pressure_classes[i]] = -1;
}
curr_bb_node = loop_tree_node;
reg_live_out = DF_LR_OUT (bb);
@ -1054,14 +1068,17 @@ process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (hard_regs_live, i))
{
enum reg_class cover_class, cl;
enum reg_class aclass, pclass, cl;
cover_class = ira_class_translate[REGNO_REG_CLASS (i)];
aclass = ira_allocno_class_translate[REGNO_REG_CLASS (i)];
pclass = ira_pressure_class_translate[aclass];
for (j = 0;
(cl = ira_reg_class_super_classes[cover_class][j])
(cl = ira_reg_class_super_classes[pclass][j])
!= LIM_REG_CLASSES;
j++)
{
if (! ira_reg_pressure_class_p[cl])
continue;
curr_reg_pressure[cl]++;
if (curr_bb_node->reg_pressure[cl] < curr_reg_pressure[cl])
curr_bb_node->reg_pressure[cl] = curr_reg_pressure[cl];
@ -1261,6 +1278,7 @@ process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
EXECUTE_IF_SET_IN_SPARSESET (objects_live, px)
{
ira_allocno_t a = OBJECT_ALLOCNO (ira_object_id_map[px]);
ALLOCNO_NO_STACK_REG_P (a) = true;
ALLOCNO_TOTAL_NO_STACK_REG_P (a) = true;
}
@ -1284,15 +1302,15 @@ process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
}
/* Propagate register pressure to upper loop tree nodes: */
if (loop_tree_node != ira_loop_tree_root)
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_pressure_classes_num; i++)
{
enum reg_class cover_class;
enum reg_class pclass;
cover_class = ira_reg_class_cover[i];
if (loop_tree_node->reg_pressure[cover_class]
> loop_tree_node->parent->reg_pressure[cover_class])
loop_tree_node->parent->reg_pressure[cover_class]
= loop_tree_node->reg_pressure[cover_class];
pclass = ira_pressure_classes[i];
if (loop_tree_node->reg_pressure[pclass]
> loop_tree_node->parent->reg_pressure[pclass])
loop_tree_node->parent->reg_pressure[pclass]
= loop_tree_node->reg_pressure[pclass];
}
}
@ -1424,6 +1442,7 @@ print_allocno_live_ranges (FILE *f, ira_allocno_t a)
{
int n = ALLOCNO_NUM_OBJECTS (a);
int i;
for (i = 0; i < n; i++)
{
fprintf (f, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));

1117
gcc/ira.c

File diff suppressed because it is too large Load Diff

View File

@ -20,10 +20,6 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Function specific hard registers can not be used for the register
allocation. */
extern HARD_REG_SET ira_no_alloc_regs;
/* True if we have allocno conflicts. It is false for non-optimized
mode or when the conflict table is too big. */
extern bool ira_conflicts_p;
@ -33,31 +29,47 @@ struct target_ira {
allocation for given classes. */
int x_ira_available_class_regs[N_REG_CLASSES];
/* Map: hard register number -> cover class it belongs to. If the
/* Map: hard register number -> allocno class it belongs to. If the
corresponding class is NO_REGS, the hard register is not available
for allocation. */
enum reg_class x_ira_hard_regno_cover_class[FIRST_PSEUDO_REGISTER];
enum reg_class x_ira_hard_regno_allocno_class[FIRST_PSEUDO_REGISTER];
/* Number of cover classes. Cover classes is non-intersected register
classes containing all hard-registers available for the
allocation. */
int x_ira_reg_class_cover_size;
/* Number of allocno classes. Allocno classes are register classes
which can be used for allocations of allocnos. */
int x_ira_allocno_classes_num;
/* The array containing cover classes (see also comments for macro
IRA_COVER_CLASSES;. Only first IRA_REG_CLASS_COVER_SIZE elements are
used for this. */
enum reg_class x_ira_reg_class_cover[N_REG_CLASSES];
/* The array containing allocno classes. Only first
IRA_ALLOCNO_CLASSES_NUM elements are used for this. */
enum reg_class x_ira_allocno_classes[N_REG_CLASSES];
/* Map of all register classes to corresponding cover class containing
the given class. If given class is not a subset of a cover class,
we translate it into the cheapest cover class. */
enum reg_class x_ira_class_translate[N_REG_CLASSES];
/* Map of all register classes to corresponding allocno classes
containing the given class. If given class is not a subset of an
allocno class, we translate it into the cheapest allocno class. */
enum reg_class x_ira_allocno_class_translate[N_REG_CLASSES];
/* Map: register class x machine mode -> number of hard registers of
given class needed to store value of given mode. If the number for
some hard-registers of the register class is different, the size
will be negative. */
int x_ira_reg_class_nregs[N_REG_CLASSES][MAX_MACHINE_MODE];
/* Number of pressure classes. Pressure classes are register
classes for which we calculate register pressure. */
int x_ira_pressure_classes_num;
/* The array containing pressure classes. Only first
IRA_PRESSURE_CLASSES_NUM elements are used for this. */
enum reg_class x_ira_pressure_classes[N_REG_CLASSES];
/* Map of all register classes to corresponding pressure classes
containing the given class. If given class is not a subset of an
pressure class, we translate it into the cheapest pressure
class. */
enum reg_class x_ira_pressure_class_translate[N_REG_CLASSES];
/* Bigest pressure register class containing stack registers.
NO_REGS if there are no stack registers. */
enum reg_class x_ira_stack_reg_pressure_class;
/* Maps: register class x machine mode -> maximal/minimal number of
hard registers of given class needed to store value of given
mode. */
int x_ira_reg_class_max_nregs[N_REG_CLASSES][MAX_MACHINE_MODE];
int x_ira_reg_class_min_nregs[N_REG_CLASSES][MAX_MACHINE_MODE];
/* Array analogous to target hook TARGET_MEMORY_MOVE_COST. */
short x_ira_memory_move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][2];
@ -70,6 +82,10 @@ struct target_ira {
/* The number of elements of the above array for given register
class. */
int x_ira_class_hard_regs_num[N_REG_CLASSES];
/* Function specific hard registers can not be used for the register
allocation. */
HARD_REG_SET x_ira_no_alloc_regs;
};
extern struct target_ira default_target_ira;
@ -81,22 +97,34 @@ extern struct target_ira *this_target_ira;
#define ira_available_class_regs \
(this_target_ira->x_ira_available_class_regs)
#define ira_hard_regno_cover_class \
(this_target_ira->x_ira_hard_regno_cover_class)
#define ira_reg_class_cover_size \
(this_target_ira->x_ira_reg_class_cover_size)
#define ira_reg_class_cover \
(this_target_ira->x_ira_reg_class_cover)
#define ira_class_translate \
(this_target_ira->x_ira_class_translate)
#define ira_reg_class_nregs \
(this_target_ira->x_ira_reg_class_nregs)
#define ira_hard_regno_allocno_class \
(this_target_ira->x_ira_hard_regno_allocno_class)
#define ira_allocno_classes_num \
(this_target_ira->x_ira_allocno_classes_num)
#define ira_allocno_classes \
(this_target_ira->x_ira_allocno_classes)
#define ira_allocno_class_translate \
(this_target_ira->x_ira_allocno_class_translate)
#define ira_pressure_classes_num \
(this_target_ira->x_ira_pressure_classes_num)
#define ira_pressure_classes \
(this_target_ira->x_ira_pressure_classes)
#define ira_pressure_class_translate \
(this_target_ira->x_ira_pressure_class_translate)
#define ira_stack_reg_pressure_class \
(this_target_ira->x_ira_stack_reg_pressure_class)
#define ira_reg_class_max_nregs \
(this_target_ira->x_ira_reg_class_max_nregs)
#define ira_reg_class_min_nregs \
(this_target_ira->x_ira_reg_class_min_nregs)
#define ira_memory_move_cost \
(this_target_ira->x_ira_memory_move_cost)
#define ira_class_hard_regs \
(this_target_ira->x_ira_class_hard_regs)
#define ira_class_hard_regs_num \
(this_target_ira->x_ira_class_hard_regs_num)
#define ira_no_alloc_regs \
(this_target_ira->x_ira_no_alloc_regs)
extern void ira_init_once (void);
extern void ira_init (void);

View File

@ -64,7 +64,7 @@ struct loop_data
struct loop *outermost_exit; /* The outermost exit of the loop. */
bool has_call; /* True if the loop contains a call. */
/* Maximal register pressure inside loop for given register class
(defined only for the cover classes). */
(defined only for the pressure classes). */
int max_reg_pressure[N_REG_CLASSES];
/* Loop regs referenced and live pseudo-registers. */
bitmap_head regs_ref;
@ -1012,13 +1012,13 @@ free_use_list (struct use *use)
}
}
/* Return cover class and number of hard registers (through *NREGS)
/* Return pressure class and number of hard registers (through *NREGS)
for destination of INSN. */
static enum reg_class
get_cover_class_and_nregs (rtx insn, int *nregs)
get_pressure_class_and_nregs (rtx insn, int *nregs)
{
rtx reg;
enum reg_class cover_class;
enum reg_class pressure_class;
rtx set = single_set (insn);
/* Considered invariant insns have only one set. */
@ -1029,19 +1029,23 @@ get_cover_class_and_nregs (rtx insn, int *nregs)
if (MEM_P (reg))
{
*nregs = 0;
cover_class = NO_REGS;
pressure_class = NO_REGS;
}
else
{
if (! REG_P (reg))
reg = NULL_RTX;
if (reg == NULL_RTX)
cover_class = GENERAL_REGS;
pressure_class = GENERAL_REGS;
else
cover_class = reg_cover_class (REGNO (reg));
*nregs = ira_reg_class_nregs[cover_class][GET_MODE (SET_SRC (set))];
{
pressure_class = reg_allocno_class (REGNO (reg));
pressure_class = ira_pressure_class_translate[pressure_class];
}
*nregs
= ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
}
return cover_class;
return pressure_class;
}
/* Calculates cost and number of registers needed for moving invariant INV
@ -1064,8 +1068,8 @@ get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed)
regs_needed[0] = 0;
else
{
for (i = 0; i < ira_reg_class_cover_size; i++)
regs_needed[ira_reg_class_cover[i]] = 0;
for (i = 0; i < ira_pressure_classes_num; i++)
regs_needed[ira_pressure_classes[i]] = 0;
}
if (inv->move
@ -1078,10 +1082,10 @@ get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed)
else
{
int nregs;
enum reg_class cover_class;
enum reg_class pressure_class;
cover_class = get_cover_class_and_nregs (inv->insn, &nregs);
regs_needed[cover_class] += nregs;
pressure_class = get_pressure_class_and_nregs (inv->insn, &nregs);
regs_needed[pressure_class] += nregs;
}
if (!inv->cheap_address
@ -1112,7 +1116,7 @@ get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed)
&& constant_pool_constant_p (SET_SRC (set)))
{
if (flag_ira_loop_pressure)
regs_needed[STACK_REG_COVER_CLASS] += 2;
regs_needed[ira_stack_reg_pressure_class] += 2;
else
regs_needed[0] += 2;
}
@ -1131,10 +1135,10 @@ get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed)
check_p = aregs_needed[0] != 0;
else
{
for (i = 0; i < ira_reg_class_cover_size; i++)
if (aregs_needed[ira_reg_class_cover[i]] != 0)
for (i = 0; i < ira_pressure_classes_num; i++)
if (aregs_needed[ira_pressure_classes[i]] != 0)
break;
check_p = i < ira_reg_class_cover_size;
check_p = i < ira_pressure_classes_num;
}
if (check_p
/* We need to check always_executed, since if the original value of
@ -1151,10 +1155,10 @@ get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed)
else
{
int nregs;
enum reg_class cover_class;
enum reg_class pressure_class;
cover_class = get_cover_class_and_nregs (inv->insn, &nregs);
aregs_needed[cover_class] -= nregs;
pressure_class = get_pressure_class_and_nregs (inv->insn, &nregs);
aregs_needed[pressure_class] -= nregs;
}
}
@ -1162,9 +1166,9 @@ get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed)
regs_needed[0] += aregs_needed[0];
else
{
for (i = 0; i < ira_reg_class_cover_size; i++)
regs_needed[ira_reg_class_cover[i]]
+= aregs_needed[ira_reg_class_cover[i]];
for (i = 0; i < ira_pressure_classes_num; i++)
regs_needed[ira_pressure_classes[i]]
+= aregs_needed[ira_pressure_classes[i]];
}
(*comp_cost) += acomp_cost;
}
@ -1197,19 +1201,19 @@ gain_for_invariant (struct invariant *inv, unsigned *regs_needed,
else
{
int i;
enum reg_class cover_class;
enum reg_class pressure_class;
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_pressure_classes_num; i++)
{
cover_class = ira_reg_class_cover[i];
if ((int) new_regs[cover_class]
+ (int) regs_needed[cover_class]
+ LOOP_DATA (curr_loop)->max_reg_pressure[cover_class]
pressure_class = ira_pressure_classes[i];
if ((int) new_regs[pressure_class]
+ (int) regs_needed[pressure_class]
+ LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
+ IRA_LOOP_RESERVED_REGS
> ira_available_class_regs[cover_class])
> ira_available_class_regs[pressure_class])
break;
}
if (i < ira_reg_class_cover_size)
if (i < ira_pressure_classes_num)
/* There will be register pressure excess and we want not to
make this loop invariant motion. All loop invariants with
non-positive gains will be rejected in function
@ -1273,9 +1277,9 @@ best_gain_for_invariant (struct invariant **best, unsigned *regs_needed,
regs_needed[0] = aregs_needed[0];
else
{
for (i = 0; i < ira_reg_class_cover_size; i++)
regs_needed[ira_reg_class_cover[i]]
= aregs_needed[ira_reg_class_cover[i]];
for (i = 0; i < ira_pressure_classes_num; i++)
regs_needed[ira_pressure_classes[i]]
= aregs_needed[ira_pressure_classes[i]];
}
}
}
@ -1352,8 +1356,8 @@ find_invariants_to_move (bool speed, bool call_p)
new_regs[0] = regs_needed[0] = 0;
else
{
for (i = 0; (int) i < ira_reg_class_cover_size; i++)
new_regs[ira_reg_class_cover[i]] = 0;
for (i = 0; (int) i < ira_pressure_classes_num; i++)
new_regs[ira_pressure_classes[i]] = 0;
}
while ((gain = best_gain_for_invariant (&inv, regs_needed,
new_regs, regs_used,
@ -1364,9 +1368,9 @@ find_invariants_to_move (bool speed, bool call_p)
new_regs[0] += regs_needed[0];
else
{
for (i = 0; (int) i < ira_reg_class_cover_size; i++)
new_regs[ira_reg_class_cover[i]]
+= regs_needed[ira_reg_class_cover[i]];
for (i = 0; (int) i < ira_pressure_classes_num; i++)
new_regs[ira_pressure_classes[i]]
+= regs_needed[ira_pressure_classes[i]];
}
}
}
@ -1519,7 +1523,7 @@ move_invariants (struct loop *loop)
setup_reg_classes (REGNO (inv->reg),
reg_preferred_class (inv->orig_regno),
reg_alternate_class (inv->orig_regno),
reg_cover_class (inv->orig_regno));
reg_allocno_class (inv->orig_regno));
else
setup_reg_classes (REGNO (inv->reg),
GENERAL_REGS, NO_REGS, GENERAL_REGS);
@ -1604,7 +1608,7 @@ free_loop_data (struct loop *loop)
/* Registers currently living. */
static bitmap_head curr_regs_live;
/* Current reg pressure for each cover class. */
/* Current reg pressure for each pressure class. */
static int curr_reg_pressure[N_REG_CLASSES];
/* Record all regs that are set in any one insn. Communication from
@ -1615,23 +1619,26 @@ static rtx regs_set[(FIRST_PSEUDO_REGISTER > MAX_RECOG_OPERANDS
/* Number of regs stored in the previous array. */
static int n_regs_set;
/* Return cover class and number of needed hard registers (through
/* Return pressure class and number of needed hard registers (through
*NREGS) of register REGNO. */
static enum reg_class
get_regno_cover_class (int regno, int *nregs)
get_regno_pressure_class (int regno, int *nregs)
{
if (regno >= FIRST_PSEUDO_REGISTER)
{
enum reg_class cover_class = reg_cover_class (regno);
enum reg_class pressure_class;
*nregs = ira_reg_class_nregs[cover_class][PSEUDO_REGNO_MODE (regno)];
return cover_class;
pressure_class = reg_allocno_class (regno);
pressure_class = ira_pressure_class_translate[pressure_class];
*nregs
= ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
return pressure_class;
}
else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
&& ! TEST_HARD_REG_BIT (eliminable_regset, regno))
{
*nregs = 1;
return ira_class_translate[REGNO_REG_CLASS (regno)];
return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
}
else
{
@ -1646,18 +1653,18 @@ static void
change_pressure (int regno, bool incr_p)
{
int nregs;
enum reg_class cover_class;
enum reg_class pressure_class;
cover_class = get_regno_cover_class (regno, &nregs);
pressure_class = get_regno_pressure_class (regno, &nregs);
if (! incr_p)
curr_reg_pressure[cover_class] -= nregs;
curr_reg_pressure[pressure_class] -= nregs;
else
{
curr_reg_pressure[cover_class] += nregs;
if (LOOP_DATA (curr_loop)->max_reg_pressure[cover_class]
< curr_reg_pressure[cover_class])
LOOP_DATA (curr_loop)->max_reg_pressure[cover_class]
= curr_reg_pressure[cover_class];
curr_reg_pressure[pressure_class] += nregs;
if (LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
< curr_reg_pressure[pressure_class])
LOOP_DATA (curr_loop)->max_reg_pressure[pressure_class]
= curr_reg_pressure[pressure_class];
}
}
@ -1813,8 +1820,8 @@ calculate_loop_reg_pressure (void)
bitmap_ior_into (&LOOP_DATA (loop)->regs_live, DF_LR_IN (bb));
bitmap_copy (&curr_regs_live, DF_LR_IN (bb));
for (i = 0; i < ira_reg_class_cover_size; i++)
curr_reg_pressure[ira_reg_class_cover[i]] = 0;
for (i = 0; i < ira_pressure_classes_num; i++)
curr_reg_pressure[ira_pressure_classes[i]] = 0;
EXECUTE_IF_SET_IN_BITMAP (&curr_regs_live, 0, j, bi)
change_pressure (j, true);
@ -1864,11 +1871,11 @@ calculate_loop_reg_pressure (void)
EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_live, 0, j, bi)
if (! bitmap_bit_p (&LOOP_DATA (loop)->regs_ref, j))
{
enum reg_class cover_class;
enum reg_class pressure_class;
int nregs;
cover_class = get_regno_cover_class (j, &nregs);
LOOP_DATA (loop)->max_reg_pressure[cover_class] -= nregs;
pressure_class = get_regno_pressure_class (j, &nregs);
LOOP_DATA (loop)->max_reg_pressure[pressure_class] -= nregs;
}
}
if (dump_file == NULL)
@ -1886,15 +1893,15 @@ calculate_loop_reg_pressure (void)
EXECUTE_IF_SET_IN_BITMAP (&LOOP_DATA (loop)->regs_live, 0, j, bi)
fprintf (dump_file, " %d", j);
fprintf (dump_file, "\n Pressure:");
for (i = 0; (int) i < ira_reg_class_cover_size; i++)
for (i = 0; (int) i < ira_pressure_classes_num; i++)
{
enum reg_class cover_class;
enum reg_class pressure_class;
cover_class = ira_reg_class_cover[i];
if (LOOP_DATA (loop)->max_reg_pressure[cover_class] == 0)
pressure_class = ira_pressure_classes[i];
if (LOOP_DATA (loop)->max_reg_pressure[pressure_class] == 0)
continue;
fprintf (dump_file, " %s=%d", reg_class_names[cover_class],
LOOP_DATA (loop)->max_reg_pressure[cover_class]);
fprintf (dump_file, " %s=%d", reg_class_names[pressure_class],
LOOP_DATA (loop)->max_reg_pressure[pressure_class]);
}
fprintf (dump_file, "\n");
}
@ -1913,8 +1920,10 @@ move_loop_invariants (void)
if (flag_ira_loop_pressure)
{
df_analyze ();
regstat_init_n_sets_and_refs ();
ira_set_pseudo_classes (dump_file);
calculate_loop_reg_pressure ();
regstat_free_n_sets_and_refs ();
}
df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN);
/* Process the loops, innermost first. */

View File

@ -289,11 +289,6 @@ init_options_struct (struct gcc_options *opts, struct gcc_options *opts_set)
opts_set->x_param_values = XCNEWVEC (int, num_params);
init_param_values (opts->x_param_values);
/* Use priority coloring if cover classes is not defined for the
target. */
if (targetm.ira_cover_classes == NULL)
opts->x_flag_ira_algorithm = IRA_ALGORITHM_PRIORITY;
/* Initialize whether `char' is signed. */
opts->x_flag_signed_char = DEFAULT_SIGNED_CHAR;
/* Set this to a special "uninitialized" value. The actual default
@ -758,14 +753,6 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
if (!opts->x_flag_sel_sched_pipelining)
opts->x_flag_sel_sched_pipelining_outer_loops = 0;
if (!targetm.ira_cover_classes
&& opts->x_flag_ira_algorithm == IRA_ALGORITHM_CB)
{
inform (loc,
"-fira-algorithm=CB does not work on this architecture");
opts->x_flag_ira_algorithm = IRA_ALGORITHM_PRIORITY;
}
if (opts->x_flag_conserve_stack)
{
maybe_set_param_value (PARAM_LARGE_STACK_FRAME, 100,

View File

@ -890,9 +890,9 @@ struct reg_pref
union of most major pair of classes, that generality is not required. */
char altclass;
/* coverclass is a register class that IRA uses for allocating
/* allocnoclass is a register class that IRA uses for allocating
the pseudo. */
char coverclass;
char allocnoclass;
};
/* Record preferences of each pseudo. This is available after RA is
@ -925,12 +925,12 @@ reg_alternate_class (int regno)
/* Return the reg_class which is used by IRA for its allocation. */
enum reg_class
reg_cover_class (int regno)
reg_allocno_class (int regno)
{
if (reg_pref == 0)
return NO_REGS;
return (enum reg_class) reg_pref[regno].coverclass;
return (enum reg_class) reg_pref[regno].allocnoclass;
}
@ -1027,18 +1027,18 @@ struct rtl_opt_pass pass_reginfo_init =
/* Set up preferred, alternate, and cover classes for REGNO as
PREFCLASS, ALTCLASS, and COVERCLASS. */
PREFCLASS, ALTCLASS, and ALLOCNOCLASS. */
void
setup_reg_classes (int regno,
enum reg_class prefclass, enum reg_class altclass,
enum reg_class coverclass)
enum reg_class allocnoclass)
{
if (reg_pref == NULL)
return;
gcc_assert (reg_info_size == max_reg_num ());
reg_pref[regno].prefclass = prefclass;
reg_pref[regno].altclass = altclass;
reg_pref[regno].coverclass = coverclass;
reg_pref[regno].allocnoclass = allocnoclass;
}

View File

@ -1236,12 +1236,12 @@ regmove_optimize (void)
df_note_add_problem ();
df_analyze ();
if (flag_ira_loop_pressure)
ira_set_pseudo_classes (dump_file);
regstat_init_n_sets_and_refs ();
regstat_compute_ri ();
if (flag_ira_loop_pressure)
ira_set_pseudo_classes (dump_file);
regno_src_regno = XNEWVEC (int, nregs);
for (i = nregs; --i >= 0; )
regno_src_regno[i] = -1;

View File

@ -1992,7 +1992,7 @@ extern const char *decode_asm_operands (rtx, rtx *, rtx **, const char **,
extern enum reg_class reg_preferred_class (int);
extern enum reg_class reg_alternate_class (int);
extern enum reg_class reg_cover_class (int);
extern enum reg_class reg_allocno_class (int);
extern void setup_reg_classes (int, enum reg_class, enum reg_class,
enum reg_class);

View File

@ -1821,10 +1821,10 @@ mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
enum reg_class cl;
gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
cl = sched_regno_cover_class[regno];
cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
{
incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
if (clobber_p)
{
new_incr = reg_pressure_info[cl].clobber_increase + incr;
@ -1861,7 +1861,7 @@ mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
gcc_assert (regno < FIRST_PSEUDO_REGISTER);
if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
cl = sched_regno_cover_class[regno];
cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
{
if (clobber_p)
@ -1922,10 +1922,10 @@ mark_pseudo_death (int regno)
enum reg_class cl;
gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
cl = sched_regno_cover_class[regno];
cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
{
incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
reg_pressure_info[cl].change -= incr;
}
}
@ -1943,7 +1943,7 @@ mark_hard_regno_death (int regno, int nregs)
gcc_assert (regno < FIRST_PSEUDO_REGISTER);
if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
{
cl = sched_regno_cover_class[regno];
cl = sched_regno_pressure_class[regno];
if (cl != NO_REGS)
reg_pressure_info[cl].change -= 1;
}
@ -2004,9 +2004,9 @@ setup_insn_reg_pressure_info (rtx insn)
if (! INSN_P (insn))
return;
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_pressure_classes_num; i++)
{
cl = ira_reg_class_cover[i];
cl = ira_pressure_classes[i];
reg_pressure_info[cl].clobber_increase = 0;
reg_pressure_info[cl].set_increase = 0;
reg_pressure_info[cl].unused_set_increase = 0;
@ -2027,14 +2027,14 @@ setup_insn_reg_pressure_info (rtx insn)
if (REG_NOTE_KIND (link) == REG_DEAD)
mark_reg_death (XEXP (link, 0));
len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
pressure_info
= INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_reg_class_cover_size
INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
* sizeof (int), 1);
for (i = 0; i < ira_reg_class_cover_size; i++)
for (i = 0; i < ira_pressure_classes_num; i++)
{
cl = ira_reg_class_cover[i];
cl = ira_pressure_classes[i];
pressure_info[i].clobber_increase
= reg_pressure_info[cl].clobber_increase;
pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;

View File

@ -653,9 +653,9 @@ extern struct haifa_sched_info *current_sched_info;
up. */
extern bool sched_pressure_p;
/* Map regno -> its cover class. The map defined only when
/* Map regno -> its pressure class. The map defined only when
SCHED_PRESSURE_P is true. */
extern enum reg_class *sched_regno_cover_class;
extern enum reg_class *sched_regno_pressure_class;
/* Indexed by INSN_UID, the collection of all data associated with
a single instruction. */
@ -707,7 +707,7 @@ struct _haifa_deps_insn_data
#define INCREASE_BITS 8
/* The structure describes how the corresponding insn increases the
register pressure for each cover class. */
register pressure for each pressure class. */
struct reg_pressure_data
{
/* Pressure increase for given class because of clobber. */
@ -736,7 +736,7 @@ struct reg_use_data
};
/* The following structure describes used sets of registers by insns.
Registers are pseudos whose cover class is not NO_REGS or hard
Registers are pseudos whose pressure class is not NO_REGS or hard
registers available for allocations. */
struct reg_set_data
{
@ -804,7 +804,7 @@ struct _haifa_insn_data
struct reg_pressure_data *reg_pressure;
/* The following array contains maximal reg pressure between last
scheduled insn and given insn. There is an element for each
cover class of pseudos referenced in insns. This info updated
pressure class of pseudos referenced in insns. This info updated
after scheduling each insn for each insn between the two
mentioned insns. */
int *max_reg_pressure;

View File

@ -829,7 +829,7 @@ extern void fancy_abort (const char *, int, const char *) ATTRIBUTE_NORETURN;
HANDLE_SYSV_PRAGMA HANDLE_PRAGMA_WEAK CONDITIONAL_REGISTER_USAGE \
FUNCTION_ARG_BOUNDARY MUST_USE_SJLJ_EXCEPTIONS US_SOFTWARE_GOFAST \
USING_SVR4_H SVR4_ASM_SPEC FUNCTION_ARG FUNCTION_ARG_ADVANCE \
FUNCTION_INCOMING_ARG
FUNCTION_INCOMING_ARG IRA_COVER_CLASSES
/* Hooks that are no longer used. */
#pragma GCC poison LANG_HOOKS_FUNCTION_MARK LANG_HOOKS_FUNCTION_FREE \
@ -840,7 +840,8 @@ extern void fancy_abort (const char *, int, const char *) ATTRIBUTE_NORETURN;
LANG_HOOKS_POPLEVEL LANG_HOOKS_TRUTHVALUE_CONVERSION \
TARGET_PROMOTE_FUNCTION_ARGS TARGET_PROMOTE_FUNCTION_RETURN \
LANG_HOOKS_MISSING_ARGUMENT LANG_HOOKS_HASH_TYPES \
TARGET_HANDLE_OFAST TARGET_OPTION_OPTIMIZATION
TARGET_HANDLE_OFAST TARGET_OPTION_OPTIMIZATION \
TARGET_IRA_COVER_CLASSES
/* Hooks into libgcc2. */
#pragma GCC poison LIBGCC2_DOUBLE_TYPE_SIZE LIBGCC2_WORDS_BIG_ENDIAN \

View File

@ -104,10 +104,6 @@
TARGET_ASM_UNALIGNED_DI_OP, \
TARGET_ASM_UNALIGNED_TI_OP}
#ifndef IRA_COVER_CLASSES
#define TARGET_IRA_COVER_CLASSES 0
#endif
#if !defined (TARGET_FUNCTION_INCOMING_ARG)
#define TARGET_FUNCTION_INCOMING_ARG TARGET_FUNCTION_ARG
#endif

View File

@ -2224,13 +2224,6 @@ DEFHOOK
tree, (tree type, tree expr),
hook_tree_tree_tree_null)
/* Return the array of IRA cover classes for the current target. */
DEFHOOK
(ira_cover_classes,
"",
const reg_class_t *, (void),
default_ira_cover_classes)
/* Return the class for a secondary reload, and fill in extra information. */
DEFHOOK
(secondary_reload,

View File

@ -841,15 +841,6 @@ default_branch_target_register_class (void)
return NO_REGS;
}
#ifdef IRA_COVER_CLASSES
const reg_class_t *
default_ira_cover_classes (void)
{
static reg_class_t classes[] = IRA_COVER_CLASSES;
return classes;
}
#endif
reg_class_t
default_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED,
reg_class_t reload_class_i ATTRIBUTE_UNUSED,

View File

@ -124,9 +124,6 @@ extern rtx default_static_chain (const_tree, bool);
extern void default_trampoline_init (rtx, tree, rtx);
extern int default_return_pops_args (tree, tree, int);
extern reg_class_t default_branch_target_register_class (void);
#ifdef IRA_COVER_CLASSES
extern const reg_class_t *default_ira_cover_classes (void);
#endif
extern reg_class_t default_secondary_reload (bool, rtx, reg_class_t,
enum machine_mode,
secondary_reload_info *);