1997-08-12 06:07:19 +02:00
|
|
|
|
/* Instruction scheduling pass.
|
1999-01-12 00:15:28 +01:00
|
|
|
|
Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
|
1997-08-12 06:07:19 +02:00
|
|
|
|
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
|
|
|
|
|
and currently maintained by, Jim Wilson (wilson@cygnus.com)
|
|
|
|
|
|
|
|
|
|
This file is part of GNU CC.
|
|
|
|
|
|
|
|
|
|
GNU CC is free software; you can redistribute it and/or modify it
|
|
|
|
|
under the terms of the GNU General Public License as published by
|
|
|
|
|
the Free Software Foundation; either version 2, or (at your option)
|
|
|
|
|
any later version.
|
|
|
|
|
|
|
|
|
|
GNU CC is distributed in the hope that it will be useful, but
|
|
|
|
|
WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
General Public License for more details.
|
|
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
|
along with GNU CC; see the file COPYING. If not, write to the Free
|
|
|
|
|
the Free Software Foundation, 59 Temple Place - Suite 330,
|
|
|
|
|
Boston, MA 02111-1307, USA. */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Instruction scheduling pass.
|
|
|
|
|
|
|
|
|
|
This pass implements list scheduling within basic blocks. It is
|
|
|
|
|
run twice: (1) after flow analysis, but before register allocation,
|
|
|
|
|
and (2) after register allocation.
|
|
|
|
|
|
|
|
|
|
The first run performs interblock scheduling, moving insns between
|
|
|
|
|
different blocks in the same "region", and the second runs only
|
|
|
|
|
basic block scheduling.
|
|
|
|
|
|
|
|
|
|
Interblock motions performed are useful motions and speculative
|
|
|
|
|
motions, including speculative loads. Motions requiring code
|
|
|
|
|
duplication are not supported. The identification of motion type
|
|
|
|
|
and the check for validity of speculative motions requires
|
|
|
|
|
construction and analysis of the function's control flow graph.
|
|
|
|
|
The scheduler works as follows:
|
|
|
|
|
|
|
|
|
|
We compute insn priorities based on data dependencies. Flow
|
|
|
|
|
analysis only creates a fraction of the data-dependencies we must
|
|
|
|
|
observe: namely, only those dependencies which the combiner can be
|
|
|
|
|
expected to use. For this pass, we must therefore create the
|
|
|
|
|
remaining dependencies we need to observe: register dependencies,
|
|
|
|
|
memory dependencies, dependencies to keep function calls in order,
|
|
|
|
|
and the dependence between a conditional branch and the setting of
|
|
|
|
|
condition codes are all dealt with here.
|
|
|
|
|
|
|
|
|
|
The scheduler first traverses the data flow graph, starting with
|
|
|
|
|
the last instruction, and proceeding to the first, assigning values
|
|
|
|
|
to insn_priority as it goes. This sorts the instructions
|
|
|
|
|
topologically by data dependence.
|
|
|
|
|
|
|
|
|
|
Once priorities have been established, we order the insns using
|
|
|
|
|
list scheduling. This works as follows: starting with a list of
|
|
|
|
|
all the ready insns, and sorted according to priority number, we
|
|
|
|
|
schedule the insn from the end of the list by placing its
|
|
|
|
|
predecessors in the list according to their priority order. We
|
|
|
|
|
consider this insn scheduled by setting the pointer to the "end" of
|
|
|
|
|
the list to point to the previous insn. When an insn has no
|
|
|
|
|
predecessors, we either queue it until sufficient time has elapsed
|
|
|
|
|
or add it to the ready list. As the instructions are scheduled or
|
|
|
|
|
when stalls are introduced, the queue advances and dumps insns into
|
|
|
|
|
the ready list. When all insns down to the lowest priority have
|
|
|
|
|
been scheduled, the critical path of the basic block has been made
|
|
|
|
|
as short as possible. The remaining insns are then scheduled in
|
|
|
|
|
remaining slots.
|
|
|
|
|
|
|
|
|
|
Function unit conflicts are resolved during forward list scheduling
|
|
|
|
|
by tracking the time when each insn is committed to the schedule
|
|
|
|
|
and from that, the time the function units it uses must be free.
|
|
|
|
|
As insns on the ready list are considered for scheduling, those
|
|
|
|
|
that would result in a blockage of the already committed insns are
|
|
|
|
|
queued until no blockage will result.
|
|
|
|
|
|
|
|
|
|
The following list shows the order in which we want to break ties
|
|
|
|
|
among insns in the ready list:
|
|
|
|
|
|
|
|
|
|
1. choose insn with the longest path to end of bb, ties
|
|
|
|
|
broken by
|
|
|
|
|
2. choose insn with least contribution to register pressure,
|
|
|
|
|
ties broken by
|
|
|
|
|
3. prefer in-block upon interblock motion, ties broken by
|
|
|
|
|
4. prefer useful upon speculative motion, ties broken by
|
|
|
|
|
5. choose insn with largest control flow probability, ties
|
|
|
|
|
broken by
|
|
|
|
|
6. choose insn with the least dependences upon the previously
|
|
|
|
|
scheduled insn, or finally
|
1998-06-05 13:32:28 +02:00
|
|
|
|
7 choose the insn which has the most insns dependent on it.
|
|
|
|
|
8. choose insn with lowest UID.
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
Memory references complicate matters. Only if we can be certain
|
|
|
|
|
that memory references are not part of the data dependency graph
|
|
|
|
|
(via true, anti, or output dependence), can we move operations past
|
|
|
|
|
memory references. To first approximation, reads can be done
|
|
|
|
|
independently, while writes introduce dependencies. Better
|
|
|
|
|
approximations will yield fewer dependencies.
|
|
|
|
|
|
|
|
|
|
Before reload, an extended analysis of interblock data dependences
|
|
|
|
|
is required for interblock scheduling. This is performed in
|
|
|
|
|
compute_block_backward_dependences ().
|
|
|
|
|
|
|
|
|
|
Dependencies set up by memory references are treated in exactly the
|
|
|
|
|
same way as other dependencies, by using LOG_LINKS backward
|
|
|
|
|
dependences. LOG_LINKS are translated into INSN_DEPEND forward
|
|
|
|
|
dependences for the purpose of forward list scheduling.
|
|
|
|
|
|
|
|
|
|
Having optimized the critical path, we may have also unduly
|
|
|
|
|
extended the lifetimes of some registers. If an operation requires
|
|
|
|
|
that constants be loaded into registers, it is certainly desirable
|
|
|
|
|
to load those constants as early as necessary, but no earlier.
|
|
|
|
|
I.e., it will not do to load up a bunch of registers at the
|
|
|
|
|
beginning of a basic block only to use them at the end, if they
|
|
|
|
|
could be loaded later, since this may result in excessive register
|
|
|
|
|
utilization.
|
|
|
|
|
|
|
|
|
|
Note that since branches are never in basic blocks, but only end
|
|
|
|
|
basic blocks, this pass will not move branches. But that is ok,
|
|
|
|
|
since we can use GNU's delayed branch scheduling pass to take care
|
|
|
|
|
of this case.
|
|
|
|
|
|
|
|
|
|
Also note that no further optimizations based on algebraic
|
|
|
|
|
identities are performed, so this pass would be a good one to
|
|
|
|
|
perform instruction splitting, such as breaking up a multiply
|
|
|
|
|
instruction into shifts and adds where that is profitable.
|
|
|
|
|
|
|
|
|
|
Given the memory aliasing analysis that this pass should perform,
|
|
|
|
|
it should be possible to remove redundant stores to memory, and to
|
|
|
|
|
load values from registers instead of hitting memory.
|
|
|
|
|
|
|
|
|
|
Before reload, speculative insns are moved only if a 'proof' exists
|
|
|
|
|
that no exception will be caused by this, and if no live registers
|
|
|
|
|
exist that inhibit the motion (live registers constraints are not
|
|
|
|
|
represented by data dependence edges).
|
|
|
|
|
|
|
|
|
|
This pass must update information that subsequent passes expect to
|
|
|
|
|
be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
reg_n_calls_crossed, and reg_live_length. Also, BLOCK_HEAD,
|
|
|
|
|
BLOCK_END.
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
The information in the line number notes is carefully retained by
|
|
|
|
|
this pass. Notes that refer to the starting and ending of
|
|
|
|
|
exception regions are also carefully retained by this pass. All
|
|
|
|
|
other NOTE insns are grouped in their same relative order at the
|
|
|
|
|
beginning of basic blocks and regions that have been scheduled.
|
|
|
|
|
|
|
|
|
|
The main entry point for this pass is schedule_insns(), called for
|
|
|
|
|
each function. The work of the scheduler is organized in three
|
|
|
|
|
levels: (1) function level: insns are subject to splitting,
|
|
|
|
|
control-flow-graph is constructed, regions are computed (after
|
|
|
|
|
reload, each region is of one block), (2) region level: control
|
|
|
|
|
flow graph attributes required for interblock scheduling are
|
|
|
|
|
computed (dominators, reachability, etc.), data dependences and
|
|
|
|
|
priorities are computed, and (3) block level: insns in the block
|
|
|
|
|
are actually scheduled. */
|
|
|
|
|
|
|
|
|
|
#include "config.h"
|
1998-02-17 22:35:43 +01:00
|
|
|
|
#include "system.h"
|
c-aux-info.c, [...]: Include toplev.h for real declaration of trim_filename.
1999-04-17 20:11 -0400 Zack Weinberg <zack@rabi.columbia.edu>
* c-aux-info.c, emit-rtl.c, explow.c, expmed.c, gcse.c,
haifa-sched.c, optabs.c, reorg.c, resource.c, sched.c: Include
toplev.h for real declaration of trim_filename.
* Makefile.in: Update dependencies.
From-SVN: r26523
1999-04-17 19:14:58 +02:00
|
|
|
|
#include "toplev.h"
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#include "rtl.h"
|
1999-09-20 12:00:03 +02:00
|
|
|
|
#include "tm_p.h"
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#include "basic-block.h"
|
|
|
|
|
#include "regs.h"
|
1999-08-09 16:00:21 +02:00
|
|
|
|
#include "function.h"
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#include "hard-reg-set.h"
|
|
|
|
|
#include "flags.h"
|
|
|
|
|
#include "insn-config.h"
|
|
|
|
|
#include "insn-attr.h"
|
|
|
|
|
#include "except.h"
|
Warning Fixes:
* Makefile.in (print-rtl.o): Depend on bitmap.h.
(dbxout.o): Depend on toplev.h.
($(SCHED_PREFIX)sched.o): Likewise.
($(out_object_file)): Likewise for system.h and toplev.h.
(cppmain.o): Depend on gansidecl.h.
(cpplib.o): Likewise.
(cpperror.o): Likewise.
(cppexp.o): Likewise.
(cpphash.o): Likewise.
(cppalloc.o): Likewise.
(fix-header.o): Depend on cpplib.h and cpphash.h.
(scan-decls.o): Depend on gansidecl.h.
* basic-block.h (free_regset_vector): Add prototype.
* cccp.c (check_precompiled): Mark parameter `fname' with
ATTRIBUTE_UNUSED.
(do_assert): Likewise for `op' and `keyword'.
(do_unassert): Likewise.
(do_line): Likewise for `keyword'.
(do_error): Likewise for `op' and `keyword'.
(do_warning): Likewise.
(do_ident): Likewise for `keyword'.
(do_pragma): Likewise for `limit', `op' and `keyword'.
(do_sccs): Likewise.
(do_if): Likewise for `keyword'.
(do_elif): Likewise.
(do_else): Likewise.
(do_endif): Likewise.
* collect2.c (getenv): Remove redundant prototype.
(collect_exit, collect_execute, dump_file): Likewise.
(dump_list): Wrap prototype and definition in COLLECT_EXPORT_LIST.
(dump_prefix_list): Hide prototype and definition.
* sparc.c: Include toplev.h.
(intreg_operand): Mark parameter `mode' with ATTRIBUTE_UNUSED.
(symbolic_memory_operand): Likewise.
(sp64_medium_pic_operand): Likewise.
(data_segment_operand): Likewise.
(text_segment_operand): Likewise.
(splittable_symbolic_memory_operand): Likewise.
(splittable_immediate_memory_operand): Likewise.
(eq_or_neq): Likewise.
(normal_comp_operator): Likewise.
(noov_compare_op): Likewise.
(v9_regcmp_op): Likewise.
(v8plus_regcmp_op): Likewise.
(extend_op): Likewise.
(cc_arithop): Likewise.
(cc_arithopn): Likewise.
(small_int): Likewise.
(uns_small_int): Likewise.
(clobbered_register): Likewise.
(legitimize_pic_address): Likewise.
(delay_operand): Likewise.
(sparc_builtin_saveregs): Remove unused variable `stdarg'.
* sparc.h (order_regs_for_local_alloc, eligible_for_return_delay,
sparc_issue_rate, v8plus_regcmp_p): Add prototypes.
* sparc.md (cmpdi_v8plus): Add abort for default case in switch.
* cppalloc.c: Include gansidecl.h.
* cpperror.c: Include stdarg.h/varargs.h and gansidecl.h.
(cpp_file_line_for_message): Mark parameter `pfile' with
ATTRIBUTE_UNUSED.
(v_cpp_message): New function.
(cpp_message): Use it. Also convert to variable arguments.
(cpp_fatal): Likewise.
(cpp_pfatal_with_name): Constify parameter `name'.
* cppexp.c: Move gansidecl.h before cpplib.h.
* cpphash.c: Likewise.
* cpphash.h (hashf, delete_macro): Add prototypes.
* cpplib.c: Include stdarg.h/varargs.h and move gansidecl.h before
cpplib.h. Don't include errno.h.
(update_path): Add arguments to prototype.
(cpp_fatal, cpp_file_line_for_message, cpp_message, delete_macro,
cpp_print_containing_files): Remove redundant prototypes.
(cpp_hash_cleanup, add_import, append_include_chain,
make_assertion, path_include, initialize_builtins,
initialize_char_syntax, finclude, validate_else, comp_def_part,
lookup_import, redundant_include_p, is_system_include,
read_name_map, read_filename_string, open_include_file,
check_macro_name, compare_defs, compare_token_lists,
eval_if_expression, change_newlines): Add prototype arguments.
(hashf): Remove redundant prototype.
(read_token_list, free_token_list, safe_read, xcalloc, savestring,
conditional_skip, skip_if_group): Add prototype arguments.
(fdopen): Remove redundant prototype.
(do_define, do_line, do_include, do_undef, do_error, do_pragma,
do_ident, do_if, do_xifdef, do_else, do_elif, do_endif, do_sccs,
do_once, do_assert, do_unassert, do_warning): Add prototype arguments.
(struct directive): Add prototype arguments to function pointer
member `func'.
(handle_directive): Add missing arguments to call to `do_line'.
(do_include): Mark parameters `unused1' and `unused2' with
ATTRIBUTE_UNUSED.
(do_line): Likewise for `keyword' and new parameters `unused1' and
`unused2'.
(do_error): Likewise for `keyword'.
(do_warning): Likewise. Also add missing argument `pfile' in call
to cpp_pedwarn.
(do_once): Mark parameter `keyword', `unused1' and `unused2' with
ATTRIBUTE_UNUSED.
(do_ident): Likewise for `keyword', `buf' and `limit'.
(do_pragma): Likewise. Also add missing arguments in call to do_once.
(do_sccs): Mark parameter `keyword', `buf' and `limit' with
ATTRIBUTE_UNUSED.
(do_if): Likewise for `keyword'.
(do_elif): Likewise.
(eval_if_expression): Likewise for `buf' and `length'.
(do_xifdef): Likewise for `unused1' and `unused2'.
(do_else): Likewise for `keyword', `buf' and `limit'.
(do_endif): Likewise.
(parse_name): Add missing argument `pfile' in call to cpp_pedwarn.
(cpp_handle_options): Remove superfluous NULL argument in call to
cpp_fatal.
(cpp_handle_options): Likewise.
(do_assert): Mark parameter `keyword', `buf' and `limit' with
ATTRIBUTE_UNUSED.
(do_unassert): Likewise.
(cpp_print_file_and_line): Add missing argument `pfile' in call to
cpp_file_line_for_message.
(v_cpp_error): New function.
(cpp_error): Use it. Also accept variable arguments.
(v_cpp_warning): New function.
(cpp_warning): Use it. Also accept variable arguments.
(cpp_pedwarn): Accept variable arguments.
(v_cpp_error_with_line): New function
(cpp_error_with_line): Use it. Accept variable arguments.
(v_cpp_warning_with_line): New function.
(cpp_warning_with_line): Use it. Accept variable arguments. Hide
definition.
(cpp_pedwarn_with_line): Accept variable arguments.
(cpp_pedwarn_with_file_and_line): Likewise.
(cpp_error_from_errno): Constify parameter `name'. Add missing
argument `pfile' in call to cpp_file_line_for_message.
(cpp_perror_with_name): Constify parameter `name'.
* cpplib.h: Define PARAMS() in terms of PROTO().
(fatal): Remove redundant prototype.
(cpp_error, cpp_warning, cpp_pedwarn, cpp_error_with_line,
cpp_pedwarn_with_line, cpp_pedwarn_with_file_and_line,
cpp_error_from_errno, cpp_perror_with_name, cpp_pfatal_with_name,
cpp_fatal, cpp_message, cpp_pfatal_with_name,
cpp_file_line_for_message, cpp_print_containing_files): Add
arguments to prototypes.
(scan_decls, cpp_finish): Add prototypes.
* cppmain.c: Include gansidecl.h.
(main): Remove unused variable `i'.
* dbxout.c: Include toplev.h.
* demangle.h (do_tlink, collect_execute, collect_exit,
collect_wait, dump_file, file_exists): Add prototype.
* dwarf2out.c (dwarf_type_encoding_name, decl_start_label): Hide
prototype and definition.
(gen_unspecified_parameters_die): Don't assign results of call to
function new_die() to unused variable `parm_die'.
(dwarf2out_line): Mark parameter `filename' with ATTRIBUTE_UNUSED.
(dwarf2out_define): Likewise for `lineno' and `buffer'.
* dwarfout.c (output_unsigned_leb128, output_signed_leb128): Hide
prototype and definition.
(output_die): Add prototype arguments to function pointer arg.
(output_unspecified_parameters_die): Mark parameter `arg' with
ATTRIBUTE_UNUSED.
* except.c (output_exception_table_entry): Remove unused variable
`eh_entry'.
* except.h (expand_fixup_region_start, expand_fixup_region_end):
Add prototypes.
* expr.c (do_jump_by_parts_equality_rtx): Remove prototype.
* expr.h (do_jump_by_parts_equality_rtx): Add prototype.
* fix-header.c: Include stdarg.h/varargs.h, move gansidecl.h
before cpplib.h, include cpphash.h, remove redundant prototype of
cpp_fatal, don't define `const', add a prototype for `fatal'.
(cpp_file_line_for_message): Add missing arguments `pfile'.
(v_cpp_message): New function.
(cpp_message): Use it.
(v_fatal): New function.
(fatal, cpp_fatal): Use it.
(cpp_pfatal_with_name): Constify parameter `name'.
* flow.c (free_regset_vector): Remove redundant prototype.
* function.c (round_down): Wrap prototype and definition with
macro ARGS_GROW_DOWNWARD.
(record_insns): Wrap prototype and definition with
defined (HAVE_prologue) || defined (HAVE_epilogue).
* gansidecl.h (ATTRIBUTE_PRINTF_4, ATTRIBUTE_PRINTF_5): New macros.
* gen-protos.c: Include gansidecl.h.
(hashf): Don't make it static, constify parameter `name'.
* genattrtab.c (check_attr_test): Change XEXP() to XSTR() to match
specifier %s in calls to function `fatal'.
* haifa-sched.c: Include toplev.h.
(find_rgns): Remove unused variable `j'.
* integrate.c (note_modified_parmregs): Mark parameter `x' with
ATTRIBUTE_UNUSED.
(mark_stores): Likewise.
* jump.c (mark_modified_reg): Likewise.
* output.h (insn_current_reference_address): Add prototype.
(eh_frame_section): Likewise.
* print-rtl.c: Include bitmap.h.
* reload1.c (reload): Wrap variables `note' and `next' in macro
PRESERVE_DEATH_INFO_REGNO_P.
(forget_old_reloads_1): Mark parameter `ignored' with
ATTRIBUTE_UNUSED.
(choose_reload_regs): Remove unused variable `in'.
(reload_cse_invalidate_mem): Mark parameter `ignore' with
ATTRIBUTE_UNUSED.
(reload_cse_check_clobber): Likewise.
* rtl.h (expand_null_return, reg_classes_intersect_p): Add prototype.
(mark_elimination): Fix typo in prototype.
* scan-decls.c: Include gansidecl.h.
* tree.h (using_eh_for_cleanups, supports_one_only): Add prototype.
From-SVN: r19867
1998-05-19 10:42:48 +02:00
|
|
|
|
#include "toplev.h"
|
1998-10-17 22:26:29 +02:00
|
|
|
|
#include "recog.h"
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
extern char *reg_known_equiv_p;
|
|
|
|
|
extern rtx *reg_known_value;
|
|
|
|
|
|
|
|
|
|
#ifdef INSN_SCHEDULING
|
|
|
|
|
|
|
|
|
|
/* target_units bitmask has 1 for each unit in the cpu. It should be
|
|
|
|
|
possible to compute this variable from the machine description.
|
1999-09-06 23:55:23 +02:00
|
|
|
|
But currently it is computed by examining the insn list. Since
|
1997-08-12 06:07:19 +02:00
|
|
|
|
this is only needed for visualization, it seems an acceptable
|
|
|
|
|
solution. (For understanding the mapping of bits to units, see
|
1999-09-06 23:55:23 +02:00
|
|
|
|
definition of function_units[] in "insn-attrtab.c".) */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static int target_units = 0;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* issue_rate is the number of insns that can be scheduled in the same
|
|
|
|
|
machine cycle. It can be defined in the config/mach/mach.h file,
|
|
|
|
|
otherwise we set it to 1. */
|
|
|
|
|
|
|
|
|
|
static int issue_rate;
|
|
|
|
|
|
1997-08-19 23:22:04 +02:00
|
|
|
|
#ifndef ISSUE_RATE
|
|
|
|
|
#define ISSUE_RATE 1
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#endif
|
|
|
|
|
|
1998-05-06 18:32:40 +02:00
|
|
|
|
/* sched-verbose controls the amount of debugging output the
|
1997-08-12 06:07:19 +02:00
|
|
|
|
scheduler prints. It is controlled by -fsched-verbose-N:
|
|
|
|
|
N>0 and no -DSR : the output is directed to stderr.
|
|
|
|
|
N>=10 will direct the printouts to stderr (regardless of -dSR).
|
|
|
|
|
N=1: same as -dSR.
|
|
|
|
|
N=2: bb's probabilities, detailed ready list info, unit/insn info.
|
|
|
|
|
N=3: rtl at abort point, control-flow, regions info.
|
1998-05-06 18:32:40 +02:00
|
|
|
|
N=5: dependences info. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
#define MAX_RGN_BLOCKS 10
|
|
|
|
|
#define MAX_RGN_INSNS 100
|
|
|
|
|
|
|
|
|
|
static int sched_verbose_param = 0;
|
|
|
|
|
static int sched_verbose = 0;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* nr_inter/spec counts interblock/speculative motion for the function. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int nr_inter, nr_spec;
|
|
|
|
|
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Debugging file. All printouts are sent to dump, which is always set,
|
1997-08-12 06:07:19 +02:00
|
|
|
|
either to stderr, or to the dump listing file (-dRS). */
|
|
|
|
|
static FILE *dump = 0;
|
|
|
|
|
|
|
|
|
|
/* fix_sched_param() is called from toplev.c upon detection
|
|
|
|
|
of the -fsched-***-N options. */
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
fix_sched_param (param, val)
|
rtl.c (rtx_name): Constify a char*.
* rtl.c (rtx_name): Constify a char*.
* rtl.h (rtx_name, fix_sched_param): Likewise.
* gmicro/gmicro.c (rtx_name): Remove redundant declaration.
(mypr): Use accessor macro, not `rtx_name'.
* genemit.c (print_code): Constify a char*.
* genopinit.c (gen_insn): Use accessor macro, not `rtx_name'.
* genpeep.c (print_code): Constify a char*.
* genrecog.c (print_code): Likewise.
* graph.c (start_fct, start_bb, node_data, draw_edge, end_fct,
end_bb): Add static prototype.
(draw_edge): Constify a char*.
(end_bb): Remove unused parameter.
* haifa-sched.c (fix_sched_param, safe_concat, print_exp
print_block_visualization): Constify a char*.
From-SVN: r28782
1999-08-21 00:32:54 +02:00
|
|
|
|
const char *param, *val;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-05-06 18:32:40 +02:00
|
|
|
|
if (!strcmp (param, "verbose"))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
sched_verbose_param = atoi (val);
|
|
|
|
|
else
|
|
|
|
|
warning ("fix_sched_param: unknown param: %s", param);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Element N is the next insn that sets (hard or pseudo) register
|
|
|
|
|
N within the current basic block; or zero, if there is no
|
|
|
|
|
such insn. Needed for new registers which may be introduced
|
|
|
|
|
by splitting insns. */
|
|
|
|
|
static rtx *reg_last_uses;
|
|
|
|
|
static rtx *reg_last_sets;
|
1999-03-07 12:22:10 +01:00
|
|
|
|
static rtx *reg_last_clobbers;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static regset reg_pending_sets;
|
1999-03-07 12:22:10 +01:00
|
|
|
|
static regset reg_pending_clobbers;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int reg_pending_sets_all;
|
|
|
|
|
|
|
|
|
|
/* Vector indexed by INSN_UID giving the original ordering of the insns. */
|
|
|
|
|
static int *insn_luid;
|
|
|
|
|
#define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
|
|
|
|
|
|
1999-10-17 08:28:22 +02:00
|
|
|
|
/* To speed up the test for duplicate dependency links we keep a record
|
1999-10-17 23:27:56 +02:00
|
|
|
|
of true dependencies created by add_dependence when the average number
|
|
|
|
|
of instructions in a basic block is very large.
|
1999-10-17 08:28:22 +02:00
|
|
|
|
|
1999-10-17 23:27:56 +02:00
|
|
|
|
Studies have shown that there is typically around 5 instructions between
|
|
|
|
|
branches for typical C code. So we can make a guess that the average
|
|
|
|
|
basic block is approximately 5 instructions long; we will choose 100X
|
|
|
|
|
the average size as a very large basic block.
|
|
|
|
|
|
1999-10-17 08:28:22 +02:00
|
|
|
|
Each insn has an associated bitmap for its dependencies. Each bitmap
|
|
|
|
|
has enough entries to represent a dependency on any other insn in the
|
|
|
|
|
insn chain. */
|
|
|
|
|
static sbitmap *true_dependency_cache;
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
/* Vector indexed by INSN_UID giving each instruction a priority. */
|
|
|
|
|
static int *insn_priority;
|
|
|
|
|
#define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
|
|
|
|
|
|
|
|
|
|
static short *insn_costs;
|
|
|
|
|
#define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
|
|
|
|
|
|
|
|
|
|
/* Vector indexed by INSN_UID giving an encoding of the function units
|
|
|
|
|
used. */
|
|
|
|
|
static short *insn_units;
|
|
|
|
|
#define INSN_UNIT(INSN) insn_units[INSN_UID (INSN)]
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Vector indexed by INSN_UID giving each instruction a
|
|
|
|
|
register-weight. This weight is an estimation of the insn
|
|
|
|
|
contribution to registers pressure. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int *insn_reg_weight;
|
|
|
|
|
#define INSN_REG_WEIGHT(INSN) (insn_reg_weight[INSN_UID (INSN)])
|
|
|
|
|
|
|
|
|
|
/* Vector indexed by INSN_UID giving list of insns which
|
|
|
|
|
depend upon INSN. Unlike LOG_LINKS, it represents forward dependences. */
|
|
|
|
|
static rtx *insn_depend;
|
|
|
|
|
#define INSN_DEPEND(INSN) insn_depend[INSN_UID (INSN)]
|
|
|
|
|
|
|
|
|
|
/* Vector indexed by INSN_UID. Initialized to the number of incoming
|
|
|
|
|
edges in forward dependence graph (= number of LOG_LINKS). As
|
|
|
|
|
scheduling procedes, dependence counts are decreased. An
|
|
|
|
|
instruction moves to the ready list when its counter is zero. */
|
|
|
|
|
static int *insn_dep_count;
|
|
|
|
|
#define INSN_DEP_COUNT(INSN) (insn_dep_count[INSN_UID (INSN)])
|
|
|
|
|
|
|
|
|
|
/* Vector indexed by INSN_UID giving an encoding of the blockage range
|
|
|
|
|
function. The unit and the range are encoded. */
|
|
|
|
|
static unsigned int *insn_blockage;
|
|
|
|
|
#define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
|
|
|
|
|
#define UNIT_BITS 5
|
|
|
|
|
#define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
|
|
|
|
|
#define ENCODE_BLOCKAGE(U, R) \
|
1999-03-07 12:50:32 +01:00
|
|
|
|
(((U) << BLOCKAGE_BITS \
|
1997-08-12 06:07:19 +02:00
|
|
|
|
| MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
|
1999-03-07 12:50:32 +01:00
|
|
|
|
| MAX_BLOCKAGE_COST (R))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
|
|
|
|
|
#define BLOCKAGE_RANGE(B) \
|
|
|
|
|
(((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
|
1998-02-17 22:35:43 +01:00
|
|
|
|
| ((B) & BLOCKAGE_MASK))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Encodings of the `<name>_unit_blockage_range' function. */
|
|
|
|
|
#define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
|
|
|
|
|
#define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
|
|
|
|
|
|
|
|
|
|
#define DONE_PRIORITY -1
|
|
|
|
|
#define MAX_PRIORITY 0x7fffffff
|
|
|
|
|
#define TAIL_PRIORITY 0x7ffffffe
|
|
|
|
|
#define LAUNCH_PRIORITY 0x7f000001
|
|
|
|
|
#define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
|
|
|
|
|
#define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Vector indexed by INSN_UID giving number of insns referring to this
|
|
|
|
|
insn. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int *insn_ref_count;
|
|
|
|
|
#define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
|
|
|
|
|
|
|
|
|
|
/* Vector indexed by INSN_UID giving line-number note in effect for each
|
|
|
|
|
insn. For line-number notes, this indicates whether the note may be
|
|
|
|
|
reused. */
|
|
|
|
|
static rtx *line_note;
|
|
|
|
|
#define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
|
|
|
|
|
|
|
|
|
|
/* Vector indexed by basic block number giving the starting line-number
|
|
|
|
|
for each basic block. */
|
|
|
|
|
static rtx *line_note_head;
|
|
|
|
|
|
|
|
|
|
/* List of important notes we must keep around. This is a pointer to the
|
|
|
|
|
last element in the list. */
|
|
|
|
|
static rtx note_list;
|
|
|
|
|
|
|
|
|
|
/* Queues, etc. */
|
|
|
|
|
|
|
|
|
|
/* An instruction is ready to be scheduled when all insns preceding it
|
|
|
|
|
have already been scheduled. It is important to ensure that all
|
|
|
|
|
insns which use its result will not be executed until its result
|
|
|
|
|
has been computed. An insn is maintained in one of four structures:
|
|
|
|
|
|
|
|
|
|
(P) the "Pending" set of insns which cannot be scheduled until
|
|
|
|
|
their dependencies have been satisfied.
|
|
|
|
|
(Q) the "Queued" set of insns that can be scheduled when sufficient
|
|
|
|
|
time has passed.
|
|
|
|
|
(R) the "Ready" list of unscheduled, uncommitted insns.
|
|
|
|
|
(S) the "Scheduled" list of insns.
|
|
|
|
|
|
|
|
|
|
Initially, all insns are either "Pending" or "Ready" depending on
|
|
|
|
|
whether their dependencies are satisfied.
|
|
|
|
|
|
|
|
|
|
Insns move from the "Ready" list to the "Scheduled" list as they
|
|
|
|
|
are committed to the schedule. As this occurs, the insns in the
|
|
|
|
|
"Pending" list have their dependencies satisfied and move to either
|
|
|
|
|
the "Ready" list or the "Queued" set depending on whether
|
|
|
|
|
sufficient time has passed to make them ready. As time passes,
|
|
|
|
|
insns move from the "Queued" set to the "Ready" list. Insns may
|
|
|
|
|
move from the "Ready" list to the "Queued" set if they are blocked
|
|
|
|
|
due to a function unit conflict.
|
|
|
|
|
|
|
|
|
|
The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
|
|
|
|
|
insns, i.e., those that are ready, queued, and pending.
|
|
|
|
|
The "Queued" set (Q) is implemented by the variable `insn_queue'.
|
|
|
|
|
The "Ready" list (R) is implemented by the variables `ready' and
|
|
|
|
|
`n_ready'.
|
|
|
|
|
The "Scheduled" list (S) is the new insn chain built by this pass.
|
|
|
|
|
|
|
|
|
|
The transition (R->S) is implemented in the scheduling loop in
|
|
|
|
|
`schedule_block' when the best insn to schedule is chosen.
|
|
|
|
|
The transition (R->Q) is implemented in `queue_insn' when an
|
1998-05-06 23:09:07 +02:00
|
|
|
|
insn is found to have a function unit conflict with the already
|
1997-08-12 06:07:19 +02:00
|
|
|
|
committed insns.
|
|
|
|
|
The transitions (P->R and P->Q) are implemented in `schedule_insn' as
|
|
|
|
|
insns move from the ready list to the scheduled list.
|
|
|
|
|
The transition (Q->R) is implemented in 'queue_to_insn' as time
|
|
|
|
|
passes or stalls are introduced. */
|
|
|
|
|
|
|
|
|
|
/* Implement a circular buffer to delay instructions until sufficient
|
|
|
|
|
time has passed. INSN_QUEUE_SIZE is a power of two larger than
|
|
|
|
|
MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. This is the
|
|
|
|
|
longest time an isnsn may be queued. */
|
|
|
|
|
static rtx insn_queue[INSN_QUEUE_SIZE];
|
|
|
|
|
static int q_ptr = 0;
|
|
|
|
|
static int q_size = 0;
|
|
|
|
|
#define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
|
|
|
|
|
#define NEXT_Q_AFTER(X, C) (((X)+C) & (INSN_QUEUE_SIZE-1))
|
|
|
|
|
|
|
|
|
|
/* Vector indexed by INSN_UID giving the minimum clock tick at which
|
|
|
|
|
the insn becomes ready. This is used to note timing constraints for
|
|
|
|
|
insns in the pending list. */
|
|
|
|
|
static int *insn_tick;
|
|
|
|
|
#define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
|
|
|
|
|
|
|
|
|
|
/* Forward declarations. */
|
|
|
|
|
static void add_dependence PROTO ((rtx, rtx, enum reg_note));
|
1999-10-19 00:20:27 +02:00
|
|
|
|
#ifdef HAVE_cc0
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static void remove_dependence PROTO ((rtx, rtx));
|
1999-10-19 00:20:27 +02:00
|
|
|
|
#endif
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static rtx find_insn_list PROTO ((rtx, rtx));
|
|
|
|
|
static int insn_unit PROTO ((rtx));
|
|
|
|
|
static unsigned int blockage_range PROTO ((int, rtx));
|
|
|
|
|
static void clear_units PROTO ((void));
|
|
|
|
|
static int actual_hazard_this_instance PROTO ((int, int, rtx, int, int));
|
|
|
|
|
static void schedule_unit PROTO ((int, rtx, int));
|
|
|
|
|
static int actual_hazard PROTO ((int, rtx, int, int));
|
|
|
|
|
static int potential_hazard PROTO ((int, rtx, int));
|
|
|
|
|
static int insn_cost PROTO ((rtx, rtx, rtx));
|
|
|
|
|
static int priority PROTO ((rtx));
|
|
|
|
|
static void free_pending_lists PROTO ((void));
|
|
|
|
|
static void add_insn_mem_dependence PROTO ((rtx *, rtx *, rtx, rtx));
|
|
|
|
|
static void flush_pending_lists PROTO ((rtx, int));
|
|
|
|
|
static void sched_analyze_1 PROTO ((rtx, rtx));
|
|
|
|
|
static void sched_analyze_2 PROTO ((rtx, rtx));
|
|
|
|
|
static void sched_analyze_insn PROTO ((rtx, rtx, rtx));
|
|
|
|
|
static void sched_analyze PROTO ((rtx, rtx));
|
gansidecl.h (__attribute__, [...]): Delete.
* gansidecl.h (__attribute__, ATTRIBUTE_UNUSED_LABEL,
ATTRIBUTE_UNUSED, ATTRIBUTE_NORETURN, ATTRIBUTE_PRINTF,
ATTRIBUTE_PRINTF_1, ATTRIBUTE_PRINTF_2, ATTRIBUTE_PRINTF_3,
ATTRIBUTE_PRINTF_4, ATTRIBUTE_PRINTF_5, GENERIC_PTR): Delete.
* c-decl.c (field_decl_cmp): Use PTR instead of GENERIC_PTR.
* cccp.c (pcfinclude): Likewise.
* global.c (allocno_compare): Likewise.
* haifa-sched.c (rank_for_schedule): Likewise.
* local-alloc.c (qty_sugg_compare_1, qty_compare_1): Likewise.
* reload1.c (hard_reg_use_compare, reload_reg_class_lower): Likewise.
* stupid.c (stupid_reg_compare): Likewise.
* tree.c (_obstack_allocated_p): Likewise.
* varray.h (varray_data_tag, VARRAY_GENERIC_PTR_INIT): Likewise.
From-SVN: r29208
1999-09-08 17:44:18 +02:00
|
|
|
|
static int rank_for_schedule PROTO ((const PTR, const PTR));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static void swap_sort PROTO ((rtx *, int));
|
|
|
|
|
static void queue_insn PROTO ((rtx, int));
|
|
|
|
|
static int schedule_insn PROTO ((rtx, rtx *, int, int));
|
1999-10-11 01:45:27 +02:00
|
|
|
|
static void find_insn_reg_weight PROTO ((int));
|
1998-02-17 22:35:43 +01:00
|
|
|
|
static int schedule_block PROTO ((int, int));
|
rtl.c (rtx_name): Constify a char*.
* rtl.c (rtx_name): Constify a char*.
* rtl.h (rtx_name, fix_sched_param): Likewise.
* gmicro/gmicro.c (rtx_name): Remove redundant declaration.
(mypr): Use accessor macro, not `rtx_name'.
* genemit.c (print_code): Constify a char*.
* genopinit.c (gen_insn): Use accessor macro, not `rtx_name'.
* genpeep.c (print_code): Constify a char*.
* genrecog.c (print_code): Likewise.
* graph.c (start_fct, start_bb, node_data, draw_edge, end_fct,
end_bb): Add static prototype.
(draw_edge): Constify a char*.
(end_bb): Remove unused parameter.
* haifa-sched.c (fix_sched_param, safe_concat, print_exp
print_block_visualization): Constify a char*.
From-SVN: r28782
1999-08-21 00:32:54 +02:00
|
|
|
|
static char *safe_concat PROTO ((char *, char *, const char *));
|
1998-06-21 20:03:21 +02:00
|
|
|
|
static int insn_issue_delay PROTO ((rtx));
|
|
|
|
|
static void adjust_priority PROTO ((rtx));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Some insns (e.g. call) are not allowed to move across blocks. */
|
|
|
|
|
static char *cant_move;
|
|
|
|
|
#define CANT_MOVE(insn) (cant_move[INSN_UID (insn)])
|
|
|
|
|
|
|
|
|
|
/* Control flow graph edges are kept in circular lists. */
|
|
|
|
|
typedef struct
|
|
|
|
|
{
|
|
|
|
|
int from_block;
|
|
|
|
|
int to_block;
|
|
|
|
|
int next_in;
|
|
|
|
|
int next_out;
|
|
|
|
|
}
|
1999-02-26 00:45:42 +01:00
|
|
|
|
haifa_edge;
|
|
|
|
|
static haifa_edge *edge_table;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
#define NEXT_IN(edge) (edge_table[edge].next_in)
|
|
|
|
|
#define NEXT_OUT(edge) (edge_table[edge].next_out)
|
|
|
|
|
#define FROM_BLOCK(edge) (edge_table[edge].from_block)
|
|
|
|
|
#define TO_BLOCK(edge) (edge_table[edge].to_block)
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Number of edges in the control flow graph. (In fact, larger than
|
|
|
|
|
that by 1, since edge 0 is unused.) */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int nr_edges;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Circular list of incoming/outgoing edges of a block. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int *in_edges;
|
|
|
|
|
static int *out_edges;
|
|
|
|
|
|
|
|
|
|
#define IN_EDGES(block) (in_edges[block])
|
|
|
|
|
#define OUT_EDGES(block) (out_edges[block])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1998-03-08 03:15:26 +01:00
|
|
|
|
static int is_cfg_nonregular PROTO ((void));
|
1998-04-17 00:00:09 +02:00
|
|
|
|
static int build_control_flow PROTO ((int_list_ptr *, int_list_ptr *,
|
|
|
|
|
int *, int *));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static void new_edge PROTO ((int, int));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* A region is the main entity for interblock scheduling: insns
|
|
|
|
|
are allowed to move between blocks in the same region, along
|
|
|
|
|
control flow graph edges, in the 'up' direction. */
|
|
|
|
|
typedef struct
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
int rgn_nr_blocks; /* Number of blocks in region. */
|
|
|
|
|
int rgn_blocks; /* cblocks in the region (actually index in rgn_bb_table). */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
region;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Number of regions in the procedure. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int nr_regions;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Table of region descriptions. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static region *rgn_table;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Array of lists of regions' blocks. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int *rgn_bb_table;
|
|
|
|
|
|
|
|
|
|
/* Topological order of blocks in the region (if b2 is reachable from
|
1999-09-06 23:55:23 +02:00
|
|
|
|
b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
|
|
|
|
|
always referred to by either block or b, while its topological
|
|
|
|
|
order name (in the region) is refered to by bb. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int *block_to_bb;
|
|
|
|
|
|
|
|
|
|
/* The number of the region containing a block. */
|
|
|
|
|
static int *containing_rgn;
|
|
|
|
|
|
|
|
|
|
#define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
|
|
|
|
|
#define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
|
|
|
|
|
#define BLOCK_TO_BB(block) (block_to_bb[block])
|
|
|
|
|
#define CONTAINING_RGN(block) (containing_rgn[block])
|
|
|
|
|
|
|
|
|
|
void debug_regions PROTO ((void));
|
|
|
|
|
static void find_single_block_region PROTO ((void));
|
1998-04-17 00:00:09 +02:00
|
|
|
|
static void find_rgns PROTO ((int_list_ptr *, int_list_ptr *,
|
|
|
|
|
int *, int *, sbitmap *));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int too_large PROTO ((int, int *, int *));
|
|
|
|
|
|
|
|
|
|
extern void debug_live PROTO ((int, int));
|
|
|
|
|
|
|
|
|
|
/* Blocks of the current region being scheduled. */
|
|
|
|
|
static int current_nr_blocks;
|
|
|
|
|
static int current_blocks;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* The mapping from bb to block. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Bit vectors and bitset operations are needed for computations on
|
|
|
|
|
the control flow graph. */
|
|
|
|
|
|
|
|
|
|
typedef unsigned HOST_WIDE_INT *bitset;
|
|
|
|
|
typedef struct
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
int *first_member; /* Pointer to the list start in bitlst_table. */
|
|
|
|
|
int nr_members; /* The number of members of the bit list. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
bitlst;
|
|
|
|
|
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static int bitlst_table_last;
|
|
|
|
|
static int bitlst_table_size;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int *bitlst_table;
|
|
|
|
|
|
|
|
|
|
static char bitset_member PROTO ((bitset, int, int));
|
|
|
|
|
static void extract_bitlst PROTO ((bitset, int, bitlst *));
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Target info declarations.
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
The block currently being scheduled is referred to as the "target" block,
|
|
|
|
|
while other blocks in the region from which insns can be moved to the
|
|
|
|
|
target are called "source" blocks. The candidate structure holds info
|
|
|
|
|
about such sources: are they valid? Speculative? Etc. */
|
|
|
|
|
typedef bitlst bblst;
|
|
|
|
|
typedef struct
|
|
|
|
|
{
|
|
|
|
|
char is_valid;
|
|
|
|
|
char is_speculative;
|
|
|
|
|
int src_prob;
|
|
|
|
|
bblst split_bbs;
|
|
|
|
|
bblst update_bbs;
|
|
|
|
|
}
|
|
|
|
|
candidate;
|
|
|
|
|
|
|
|
|
|
static candidate *candidate_table;
|
|
|
|
|
|
|
|
|
|
/* A speculative motion requires checking live information on the path
|
|
|
|
|
from 'source' to 'target'. The split blocks are those to be checked.
|
|
|
|
|
After a speculative motion, live information should be modified in
|
|
|
|
|
the 'update' blocks.
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
Lists of split and update blocks for each candidate of the current
|
|
|
|
|
target are in array bblst_table. */
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static int *bblst_table, bblst_size, bblst_last;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
#define IS_VALID(src) ( candidate_table[src].is_valid )
|
|
|
|
|
#define IS_SPECULATIVE(src) ( candidate_table[src].is_speculative )
|
|
|
|
|
#define SRC_PROB(src) ( candidate_table[src].src_prob )
|
|
|
|
|
|
|
|
|
|
/* The bb being currently scheduled. */
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static int target_bb;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* List of edges. */
|
|
|
|
|
typedef bitlst edgelst;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Target info functions. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static void split_edges PROTO ((int, int, edgelst *));
|
|
|
|
|
static void compute_trg_info PROTO ((int));
|
|
|
|
|
void debug_candidate PROTO ((int));
|
|
|
|
|
void debug_candidates PROTO ((int));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Bit-set of bbs, where bit 'i' stands for bb 'i'. */
|
|
|
|
|
typedef bitset bbset;
|
|
|
|
|
|
|
|
|
|
/* Number of words of the bbset. */
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static int bbset_size;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Dominators array: dom[i] contains the bbset of dominators of
|
|
|
|
|
bb i in the region. */
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static bbset *dom;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* bb 0 is the only region entry. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#define IS_RGN_ENTRY(bb) (!bb)
|
|
|
|
|
|
|
|
|
|
/* Is bb_src dominated by bb_trg. */
|
|
|
|
|
#define IS_DOMINATED(bb_src, bb_trg) \
|
|
|
|
|
( bitset_member (dom[bb_src], bb_trg, bbset_size) )
|
|
|
|
|
|
|
|
|
|
/* Probability: Prob[i] is a float in [0, 1] which is the probability
|
|
|
|
|
of bb i relative to the region entry. */
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static float *prob;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* The probability of bb_src, relative to bb_trg. Note, that while the
|
1997-08-12 06:07:19 +02:00
|
|
|
|
'prob[bb]' is a float in [0, 1], this macro returns an integer
|
|
|
|
|
in [0, 100]. */
|
|
|
|
|
#define GET_SRC_PROB(bb_src, bb_trg) ((int) (100.0 * (prob[bb_src] / \
|
|
|
|
|
prob[bb_trg])))
|
|
|
|
|
|
|
|
|
|
/* Bit-set of edges, where bit i stands for edge i. */
|
|
|
|
|
typedef bitset edgeset;
|
|
|
|
|
|
|
|
|
|
/* Number of edges in the region. */
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static int rgn_nr_edges;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Array of size rgn_nr_edges. */
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static int *rgn_edges;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Number of words in an edgeset. */
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static int edgeset_size;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Mapping from each edge in the graph to its number in the rgn. */
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static int *edge_to_bit;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#define EDGE_TO_BIT(edge) (edge_to_bit[edge])
|
|
|
|
|
|
|
|
|
|
/* The split edges of a source bb is different for each target
|
|
|
|
|
bb. In order to compute this efficiently, the 'potential-split edges'
|
|
|
|
|
are computed for each bb prior to scheduling a region. This is actually
|
|
|
|
|
the split edges of each bb relative to the region entry.
|
|
|
|
|
|
|
|
|
|
pot_split[bb] is the set of potential split edges of bb. */
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static edgeset *pot_split;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* For every bb, a set of its ancestor edges. */
|
1997-08-19 18:15:54 +02:00
|
|
|
|
static edgeset *ancestor_edges;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void compute_dom_prob_ps PROTO ((int));
|
|
|
|
|
|
|
|
|
|
#define ABS_VALUE(x) (((x)<0)?(-(x)):(x))
|
1999-10-19 00:20:27 +02:00
|
|
|
|
#define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
|
|
|
|
|
#define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
|
|
|
|
|
#define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Parameters affecting the decision of rank_for_schedule(). */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#define MIN_DIFF_PRIORITY 2
|
|
|
|
|
#define MIN_PROBABILITY 40
|
|
|
|
|
#define MIN_PROB_DIFF 10
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Speculative scheduling functions. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int check_live_1 PROTO ((int, rtx));
|
|
|
|
|
static void update_live_1 PROTO ((int, rtx));
|
1998-02-17 22:35:43 +01:00
|
|
|
|
static int check_live PROTO ((rtx, int));
|
|
|
|
|
static void update_live PROTO ((rtx, int));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static void set_spec_fed PROTO ((rtx));
|
|
|
|
|
static int is_pfree PROTO ((rtx, int, int));
|
|
|
|
|
static int find_conditional_protection PROTO ((rtx, int));
|
|
|
|
|
static int is_conditionally_protected PROTO ((rtx, int, int));
|
|
|
|
|
static int may_trap_exp PROTO ((rtx, int));
|
1997-12-09 09:20:07 +01:00
|
|
|
|
static int haifa_classify_insn PROTO ((rtx));
|
1998-04-17 01:56:12 +02:00
|
|
|
|
static int is_prisky PROTO ((rtx, int, int));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int is_exception_free PROTO ((rtx, int, int));
|
|
|
|
|
|
|
|
|
|
static char find_insn_mem_list PROTO ((rtx, rtx, rtx, rtx));
|
|
|
|
|
static void compute_block_forward_dependences PROTO ((int));
|
|
|
|
|
static void init_rgn_data_dependences PROTO ((int));
|
|
|
|
|
static void add_branch_dependences PROTO ((rtx, rtx));
|
|
|
|
|
static void compute_block_backward_dependences PROTO ((int));
|
|
|
|
|
void debug_dependencies PROTO ((void));
|
|
|
|
|
|
|
|
|
|
/* Notes handling mechanism:
|
|
|
|
|
=========================
|
|
|
|
|
Generally, NOTES are saved before scheduling and restored after scheduling.
|
|
|
|
|
The scheduler distinguishes between three types of notes:
|
|
|
|
|
|
|
|
|
|
(1) LINE_NUMBER notes, generated and used for debugging. Here,
|
|
|
|
|
before scheduling a region, a pointer to the LINE_NUMBER note is
|
|
|
|
|
added to the insn following it (in save_line_notes()), and the note
|
|
|
|
|
is removed (in rm_line_notes() and unlink_line_notes()). After
|
|
|
|
|
scheduling the region, this pointer is used for regeneration of
|
|
|
|
|
the LINE_NUMBER note (in restore_line_notes()).
|
|
|
|
|
|
|
|
|
|
(2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
|
|
|
|
|
Before scheduling a region, a pointer to the note is added to the insn
|
|
|
|
|
that follows or precedes it. (This happens as part of the data dependence
|
|
|
|
|
computation). After scheduling an insn, the pointer contained in it is
|
|
|
|
|
used for regenerating the corresponding note (in reemit_notes).
|
|
|
|
|
|
|
|
|
|
(3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
|
|
|
|
|
these notes are put in a list (in rm_other_notes() and
|
|
|
|
|
unlink_other_notes ()). After scheduling the block, these notes are
|
|
|
|
|
inserted at the beginning of the block (in schedule_block()). */
|
|
|
|
|
|
|
|
|
|
static rtx unlink_other_notes PROTO ((rtx, rtx));
|
|
|
|
|
static rtx unlink_line_notes PROTO ((rtx, rtx));
|
|
|
|
|
static void rm_line_notes PROTO ((int));
|
|
|
|
|
static void save_line_notes PROTO ((int));
|
|
|
|
|
static void restore_line_notes PROTO ((int));
|
|
|
|
|
static void rm_redundant_line_notes PROTO ((void));
|
|
|
|
|
static void rm_other_notes PROTO ((rtx, rtx));
|
|
|
|
|
static rtx reemit_notes PROTO ((rtx, rtx));
|
|
|
|
|
|
|
|
|
|
static void get_block_head_tail PROTO ((int, rtx *, rtx *));
|
|
|
|
|
|
1998-04-19 04:45:34 +02:00
|
|
|
|
static int queue_to_ready PROTO ((rtx [], int));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-06-21 19:59:03 +02:00
|
|
|
|
static void debug_ready_list PROTO ((rtx[], int));
|
1998-06-21 20:03:21 +02:00
|
|
|
|
static void init_target_units PROTO ((void));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static void insn_print_units PROTO ((rtx));
|
1998-06-21 20:03:21 +02:00
|
|
|
|
static int get_visual_tbl_length PROTO ((void));
|
|
|
|
|
static void init_block_visualization PROTO ((void));
|
rtl.c (rtx_name): Constify a char*.
* rtl.c (rtx_name): Constify a char*.
* rtl.h (rtx_name, fix_sched_param): Likewise.
* gmicro/gmicro.c (rtx_name): Remove redundant declaration.
(mypr): Use accessor macro, not `rtx_name'.
* genemit.c (print_code): Constify a char*.
* genopinit.c (gen_insn): Use accessor macro, not `rtx_name'.
* genpeep.c (print_code): Constify a char*.
* genrecog.c (print_code): Likewise.
* graph.c (start_fct, start_bb, node_data, draw_edge, end_fct,
end_bb): Add static prototype.
(draw_edge): Constify a char*.
(end_bb): Remove unused parameter.
* haifa-sched.c (fix_sched_param, safe_concat, print_exp
print_block_visualization): Constify a char*.
From-SVN: r28782
1999-08-21 00:32:54 +02:00
|
|
|
|
static void print_block_visualization PROTO ((int, const char *));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static void visualize_scheduled_insns PROTO ((int, int));
|
|
|
|
|
static void visualize_no_unit PROTO ((rtx));
|
|
|
|
|
static void visualize_stall_cycles PROTO ((int, int));
|
|
|
|
|
static void print_exp PROTO ((char *, rtx, int));
|
|
|
|
|
static void print_value PROTO ((char *, rtx, int));
|
|
|
|
|
static void print_pattern PROTO ((char *, rtx, int));
|
|
|
|
|
static void print_insn PROTO ((char *, rtx, int));
|
|
|
|
|
void debug_reg_vector PROTO ((regset));
|
|
|
|
|
|
|
|
|
|
static rtx move_insn1 PROTO ((rtx, rtx));
|
|
|
|
|
static rtx move_insn PROTO ((rtx, rtx));
|
|
|
|
|
static rtx group_leader PROTO ((rtx));
|
|
|
|
|
static int set_priorities PROTO ((int));
|
|
|
|
|
static void init_rtx_vector PROTO ((rtx **, rtx *, int, int));
|
|
|
|
|
static void schedule_region PROTO ((int));
|
|
|
|
|
|
|
|
|
|
#endif /* INSN_SCHEDULING */
|
|
|
|
|
|
|
|
|
|
#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
|
|
|
|
|
|
|
|
|
|
/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
|
|
|
|
|
LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
|
|
|
|
|
of dependence that this link represents. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
add_dependence (insn, elem, dep_type)
|
|
|
|
|
rtx insn;
|
|
|
|
|
rtx elem;
|
|
|
|
|
enum reg_note dep_type;
|
|
|
|
|
{
|
|
|
|
|
rtx link, next;
|
|
|
|
|
|
|
|
|
|
/* Don't depend an insn on itself. */
|
|
|
|
|
if (insn == elem)
|
|
|
|
|
return;
|
|
|
|
|
|
1999-02-05 12:43:22 +01:00
|
|
|
|
/* We can get a dependency on deleted insns due to optimizations in
|
|
|
|
|
the register allocation and reloading or due to splitting. Any
|
|
|
|
|
such dependency is useless and can be ignored. */
|
|
|
|
|
if (GET_CODE (elem) == NOTE)
|
|
|
|
|
return;
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
/* If elem is part of a sequence that must be scheduled together, then
|
|
|
|
|
make the dependence point to the last insn of the sequence.
|
|
|
|
|
When HAVE_cc0, it is possible for NOTEs to exist between users and
|
|
|
|
|
setters of the condition codes, so we must skip past notes here.
|
|
|
|
|
Otherwise, NOTEs are impossible here. */
|
|
|
|
|
|
|
|
|
|
next = NEXT_INSN (elem);
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_cc0
|
|
|
|
|
while (next && GET_CODE (next) == NOTE)
|
|
|
|
|
next = NEXT_INSN (next);
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if (next && SCHED_GROUP_P (next)
|
|
|
|
|
&& GET_CODE (next) != CODE_LABEL)
|
|
|
|
|
{
|
|
|
|
|
/* Notes will never intervene here though, so don't bother checking
|
|
|
|
|
for them. */
|
|
|
|
|
/* We must reject CODE_LABELs, so that we don't get confused by one
|
|
|
|
|
that has LABEL_PRESERVE_P set, which is represented by the same
|
|
|
|
|
bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
|
|
|
|
|
SCHED_GROUP_P. */
|
|
|
|
|
while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
|
|
|
|
|
&& GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
|
|
|
|
|
next = NEXT_INSN (next);
|
|
|
|
|
|
|
|
|
|
/* Again, don't depend an insn on itself. */
|
|
|
|
|
if (insn == next)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* Make the dependence to NEXT, the last insn of the group, instead
|
|
|
|
|
of the original ELEM. */
|
|
|
|
|
elem = next;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef INSN_SCHEDULING
|
|
|
|
|
/* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
|
|
|
|
|
No need for interblock dependences with calls, since
|
|
|
|
|
calls are not moved between blocks. Note: the edge where
|
|
|
|
|
elem is a CALL is still required. */
|
|
|
|
|
if (GET_CODE (insn) == CALL_INSN
|
|
|
|
|
&& (INSN_BB (elem) != INSN_BB (insn)))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
|
1999-10-17 08:28:22 +02:00
|
|
|
|
/* If we already have a true dependency for ELEM, then we do not
|
|
|
|
|
need to do anything. Avoiding the list walk below can cut
|
|
|
|
|
compile times dramatically for some code. */
|
1999-10-17 23:27:56 +02:00
|
|
|
|
if (true_dependency_cache
|
|
|
|
|
&& TEST_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem)))
|
1999-10-17 08:28:22 +02:00
|
|
|
|
return;
|
1999-10-19 00:37:30 +02:00
|
|
|
|
#endif
|
1999-10-17 08:28:22 +02:00
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
/* Check that we don't already have this dependence. */
|
|
|
|
|
for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
|
|
|
|
|
if (XEXP (link, 0) == elem)
|
|
|
|
|
{
|
|
|
|
|
/* If this is a more restrictive type of dependence than the existing
|
|
|
|
|
one, then change the existing dependence to this type. */
|
|
|
|
|
if ((int) dep_type < (int) REG_NOTE_KIND (link))
|
|
|
|
|
PUT_REG_NOTE_KIND (link, dep_type);
|
1999-10-17 08:28:22 +02:00
|
|
|
|
|
1999-10-19 00:37:30 +02:00
|
|
|
|
#ifdef INSN_SCHEDULING
|
1999-10-17 08:28:22 +02:00
|
|
|
|
/* If we are adding a true dependency to INSN's LOG_LINKs, then
|
|
|
|
|
note that in the bitmap cache of true dependency information. */
|
1999-10-17 23:27:56 +02:00
|
|
|
|
if ((int)dep_type == 0 && true_dependency_cache)
|
1999-10-17 08:28:22 +02:00
|
|
|
|
SET_BIT (true_dependency_cache[INSN_LUID (insn)], INSN_LUID (elem));
|
1999-10-19 00:37:30 +02:00
|
|
|
|
#endif
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
/* Might want to check one level of transitivity to save conses. */
|
|
|
|
|
|
1998-03-05 03:15:23 +01:00
|
|
|
|
link = alloc_INSN_LIST (elem, LOG_LINKS (insn));
|
|
|
|
|
LOG_LINKS (insn) = link;
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
/* Insn dependency, not data dependency. */
|
|
|
|
|
PUT_REG_NOTE_KIND (link, dep_type);
|
|
|
|
|
}
|
|
|
|
|
|
1999-10-19 00:20:27 +02:00
|
|
|
|
#ifdef HAVE_cc0
|
1997-08-12 06:07:19 +02:00
|
|
|
|
/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
|
|
|
|
|
of INSN. Abort if not found. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
remove_dependence (insn, elem)
|
|
|
|
|
rtx insn;
|
|
|
|
|
rtx elem;
|
|
|
|
|
{
|
1998-03-05 03:15:23 +01:00
|
|
|
|
rtx prev, link, next;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int found = 0;
|
|
|
|
|
|
1998-03-05 03:15:23 +01:00
|
|
|
|
for (prev = 0, link = LOG_LINKS (insn); link; link = next)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-03-05 03:15:23 +01:00
|
|
|
|
next = XEXP (link, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (XEXP (link, 0) == elem)
|
|
|
|
|
{
|
|
|
|
|
if (prev)
|
1998-03-05 03:15:23 +01:00
|
|
|
|
XEXP (prev, 1) = next;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
else
|
1998-03-05 03:15:23 +01:00
|
|
|
|
LOG_LINKS (insn) = next;
|
1999-10-17 08:28:22 +02:00
|
|
|
|
|
1999-10-19 00:37:30 +02:00
|
|
|
|
#ifdef INSN_SCHEDULING
|
1999-10-17 08:28:22 +02:00
|
|
|
|
/* If we are removing a true dependency from the LOG_LINKS list,
|
|
|
|
|
make sure to remove it from the cache too. */
|
1999-10-17 23:27:56 +02:00
|
|
|
|
if (REG_NOTE_KIND (link) == 0 && true_dependency_cache)
|
1999-10-17 08:28:22 +02:00
|
|
|
|
RESET_BIT (true_dependency_cache[INSN_LUID (insn)],
|
|
|
|
|
INSN_LUID (elem));
|
1999-10-19 00:37:30 +02:00
|
|
|
|
#endif
|
1999-10-17 08:28:22 +02:00
|
|
|
|
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_node (link);
|
1998-03-05 03:15:23 +01:00
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
found = 1;
|
|
|
|
|
}
|
1997-12-15 08:05:04 +01:00
|
|
|
|
else
|
|
|
|
|
prev = link;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!found)
|
|
|
|
|
abort ();
|
|
|
|
|
return;
|
|
|
|
|
}
|
1999-10-19 00:20:27 +02:00
|
|
|
|
#endif /* HAVE_cc0 */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
#ifndef INSN_SCHEDULING
|
|
|
|
|
void
|
|
|
|
|
schedule_insns (dump_file)
|
|
|
|
|
FILE *dump_file;
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
#ifndef __GNUC__
|
|
|
|
|
#define __inline
|
|
|
|
|
#endif
|
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
#ifndef HAIFA_INLINE
|
|
|
|
|
#define HAIFA_INLINE __inline
|
|
|
|
|
#endif
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
/* Computation of memory dependencies. */
|
|
|
|
|
|
|
|
|
|
/* The *_insns and *_mems are paired lists. Each pending memory operation
|
|
|
|
|
will have a pointer to the MEM rtx on one list and a pointer to the
|
|
|
|
|
containing insn on the other list in the same place in the list. */
|
|
|
|
|
|
|
|
|
|
/* We can't use add_dependence like the old code did, because a single insn
|
|
|
|
|
may have multiple memory accesses, and hence needs to be on the list
|
|
|
|
|
once for each memory access. Add_dependence won't let you add an insn
|
|
|
|
|
to a list more than once. */
|
|
|
|
|
|
|
|
|
|
/* An INSN_LIST containing all insns with pending read operations. */
|
|
|
|
|
static rtx pending_read_insns;
|
|
|
|
|
|
|
|
|
|
/* An EXPR_LIST containing all MEM rtx's which are pending reads. */
|
|
|
|
|
static rtx pending_read_mems;
|
|
|
|
|
|
|
|
|
|
/* An INSN_LIST containing all insns with pending write operations. */
|
|
|
|
|
static rtx pending_write_insns;
|
|
|
|
|
|
|
|
|
|
/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
|
|
|
|
|
static rtx pending_write_mems;
|
|
|
|
|
|
|
|
|
|
/* Indicates the combined length of the two pending lists. We must prevent
|
|
|
|
|
these lists from ever growing too large since the number of dependencies
|
|
|
|
|
produced is at least O(N*N), and execution time is at least O(4*N*N), as
|
|
|
|
|
a function of the length of these pending lists. */
|
|
|
|
|
|
|
|
|
|
static int pending_lists_length;
|
|
|
|
|
|
|
|
|
|
/* The last insn upon which all memory references must depend.
|
|
|
|
|
This is an insn which flushed the pending lists, creating a dependency
|
|
|
|
|
between it and all previously pending memory references. This creates
|
|
|
|
|
a barrier (or a checkpoint) which no memory reference is allowed to cross.
|
|
|
|
|
|
|
|
|
|
This includes all non constant CALL_INSNs. When we do interprocedural
|
|
|
|
|
alias analysis, this restriction can be relaxed.
|
|
|
|
|
This may also be an INSN that writes memory if the pending lists grow
|
|
|
|
|
too large. */
|
|
|
|
|
|
|
|
|
|
static rtx last_pending_memory_flush;
|
|
|
|
|
|
|
|
|
|
/* The last function call we have seen. All hard regs, and, of course,
|
|
|
|
|
the last function call, must depend on this. */
|
|
|
|
|
|
|
|
|
|
static rtx last_function_call;
|
|
|
|
|
|
|
|
|
|
/* The LOG_LINKS field of this is a list of insns which use a pseudo register
|
|
|
|
|
that does not already cross a call. We create dependencies between each
|
|
|
|
|
of those insn and the next call insn, to ensure that they won't cross a call
|
|
|
|
|
after scheduling is done. */
|
|
|
|
|
|
|
|
|
|
static rtx sched_before_next_call;
|
|
|
|
|
|
|
|
|
|
/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
|
|
|
|
|
so that insns independent of the last scheduled insn will be preferred
|
|
|
|
|
over dependent instructions. */
|
|
|
|
|
|
|
|
|
|
static rtx last_scheduled_insn;
|
|
|
|
|
|
|
|
|
|
/* Data structures for the computation of data dependences in a regions. We
|
|
|
|
|
keep one copy of each of the declared above variables for each bb in the
|
|
|
|
|
region. Before analyzing the data dependences for a bb, its variables
|
|
|
|
|
are initialized as a function of the variables of its predecessors. When
|
|
|
|
|
the analysis for a bb completes, we save the contents of each variable X
|
|
|
|
|
to a corresponding bb_X[bb] variable. For example, pending_read_insns is
|
|
|
|
|
copied to bb_pending_read_insns[bb]. Another change is that few
|
|
|
|
|
variables are now a list of insns rather than a single insn:
|
|
|
|
|
last_pending_memory_flash, last_function_call, reg_last_sets. The
|
|
|
|
|
manipulation of these variables was changed appropriately. */
|
|
|
|
|
|
|
|
|
|
static rtx **bb_reg_last_uses;
|
|
|
|
|
static rtx **bb_reg_last_sets;
|
1999-03-07 12:22:10 +01:00
|
|
|
|
static rtx **bb_reg_last_clobbers;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static rtx *bb_pending_read_insns;
|
|
|
|
|
static rtx *bb_pending_read_mems;
|
|
|
|
|
static rtx *bb_pending_write_insns;
|
|
|
|
|
static rtx *bb_pending_write_mems;
|
|
|
|
|
static int *bb_pending_lists_length;
|
|
|
|
|
|
|
|
|
|
static rtx *bb_last_pending_memory_flush;
|
|
|
|
|
static rtx *bb_last_function_call;
|
|
|
|
|
static rtx *bb_sched_before_next_call;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Functions for construction of the control flow graph. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Return 1 if control flow graph should not be constructed, 0 otherwise.
|
1998-03-08 03:15:26 +01:00
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
We decide not to build the control flow graph if there is possibly more
|
1998-03-08 03:15:26 +01:00
|
|
|
|
than one entry to the function, if computed branches exist, of if we
|
|
|
|
|
have nonlocal gotos. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-03-08 03:15:26 +01:00
|
|
|
|
static int
|
1997-08-12 06:07:19 +02:00
|
|
|
|
is_cfg_nonregular ()
|
|
|
|
|
{
|
|
|
|
|
int b;
|
|
|
|
|
rtx insn;
|
|
|
|
|
RTX_CODE code;
|
|
|
|
|
|
1998-03-08 03:15:26 +01:00
|
|
|
|
/* If we have a label that could be the target of a nonlocal goto, then
|
|
|
|
|
the cfg is not well structured. */
|
1999-02-26 00:45:42 +01:00
|
|
|
|
if (nonlocal_goto_handler_labels)
|
1998-03-08 03:15:26 +01:00
|
|
|
|
return 1;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-03-08 03:15:26 +01:00
|
|
|
|
/* If we have any forced labels, then the cfg is not well structured. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (forced_labels)
|
1998-03-08 03:15:26 +01:00
|
|
|
|
return 1;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-03-28 01:12:41 +01:00
|
|
|
|
/* If this function has a computed jump, then we consider the cfg
|
|
|
|
|
not well structured. */
|
|
|
|
|
if (current_function_has_computed_jump)
|
|
|
|
|
return 1;
|
|
|
|
|
|
1998-03-08 03:15:26 +01:00
|
|
|
|
/* If we have exception handlers, then we consider the cfg not well
|
|
|
|
|
structured. ?!? We should be able to handle this now that flow.c
|
|
|
|
|
computes an accurate cfg for EH. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (exception_handler_labels)
|
1998-03-08 03:15:26 +01:00
|
|
|
|
return 1;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-03-08 03:15:26 +01:00
|
|
|
|
/* If we have non-jumping insns which refer to labels, then we consider
|
|
|
|
|
the cfg not well structured. */
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Check for labels referred to other thn by jumps. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (b = 0; b < n_basic_blocks; b++)
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
|
|
|
|
code = GET_CODE (insn);
|
|
|
|
|
if (GET_RTX_CLASS (code) == 'i')
|
|
|
|
|
{
|
|
|
|
|
rtx note;
|
|
|
|
|
|
|
|
|
|
for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
|
|
|
|
|
if (REG_NOTE_KIND (note) == REG_LABEL)
|
1998-03-08 03:15:26 +01:00
|
|
|
|
return 1;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
if (insn == BLOCK_END (b))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
1998-03-08 03:15:26 +01:00
|
|
|
|
/* All the tests passed. Consider the cfg well structured. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
1998-03-05 23:31:51 +01:00
|
|
|
|
/* Build the control flow graph and set nr_edges.
|
|
|
|
|
|
|
|
|
|
Instead of trying to build a cfg ourselves, we rely on flow to
|
1998-03-08 03:15:26 +01:00
|
|
|
|
do it for us. Stamp out useless code (and bug) duplication.
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-03-08 03:15:26 +01:00
|
|
|
|
Return nonzero if an irregularity in the cfg is found which would
|
|
|
|
|
prevent cross block scheduling. */
|
|
|
|
|
|
|
|
|
|
static int
|
1998-04-17 00:00:09 +02:00
|
|
|
|
build_control_flow (s_preds, s_succs, num_preds, num_succs)
|
|
|
|
|
int_list_ptr *s_preds;
|
|
|
|
|
int_list_ptr *s_succs;
|
|
|
|
|
int *num_preds;
|
|
|
|
|
int *num_succs;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-03-18 08:18:06 +01:00
|
|
|
|
int i;
|
1998-03-05 23:31:51 +01:00
|
|
|
|
int_list_ptr succ;
|
1998-03-08 03:15:26 +01:00
|
|
|
|
int unreachable;
|
1998-03-05 23:31:51 +01:00
|
|
|
|
|
1998-03-08 03:15:26 +01:00
|
|
|
|
/* Count the number of edges in the cfg. */
|
|
|
|
|
nr_edges = 0;
|
|
|
|
|
unreachable = 0;
|
|
|
|
|
for (i = 0; i < n_basic_blocks; i++)
|
|
|
|
|
{
|
|
|
|
|
nr_edges += num_succs[i];
|
1998-05-06 02:12:15 +02:00
|
|
|
|
|
|
|
|
|
/* Unreachable loops with more than one basic block are detected
|
|
|
|
|
during the DFS traversal in find_rgns.
|
|
|
|
|
|
|
|
|
|
Unreachable loops with a single block are detected here. This
|
|
|
|
|
test is redundant with the one in find_rgns, but it's much
|
|
|
|
|
cheaper to go ahead and catch the trivial case here. */
|
1998-04-06 19:50:13 +02:00
|
|
|
|
if (num_preds[i] == 0
|
|
|
|
|
|| (num_preds[i] == 1 && INT_LIST_VAL (s_preds[i]) == i))
|
1998-03-08 03:15:26 +01:00
|
|
|
|
unreachable = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Account for entry/exit edges. */
|
|
|
|
|
nr_edges += 2;
|
|
|
|
|
|
1999-09-04 01:22:50 +02:00
|
|
|
|
in_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
|
|
|
|
|
out_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
|
|
|
|
|
edge_table = (haifa_edge *) xcalloc (nr_edges, sizeof (haifa_edge));
|
1998-03-08 03:15:26 +01:00
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
nr_edges = 0;
|
|
|
|
|
for (i = 0; i < n_basic_blocks; i++)
|
1998-03-05 23:31:51 +01:00
|
|
|
|
for (succ = s_succs[i]; succ; succ = succ->next)
|
|
|
|
|
{
|
|
|
|
|
if (INT_LIST_VAL (succ) != EXIT_BLOCK)
|
|
|
|
|
new_edge (i, INT_LIST_VAL (succ));
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Increment by 1, since edge 0 is unused. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
nr_edges++;
|
|
|
|
|
|
1998-03-08 03:15:26 +01:00
|
|
|
|
return unreachable;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
1998-03-05 23:31:51 +01:00
|
|
|
|
/* Record an edge in the control flow graph from SOURCE to TARGET.
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-03-05 23:31:51 +01:00
|
|
|
|
In theory, this is redundant with the s_succs computed above, but
|
|
|
|
|
we have not converted all of haifa to use information from the
|
|
|
|
|
integer lists. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
new_edge (source, target)
|
|
|
|
|
int source, target;
|
|
|
|
|
{
|
|
|
|
|
int e, next_edge;
|
|
|
|
|
int curr_edge, fst_edge;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Check for duplicates. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
fst_edge = curr_edge = OUT_EDGES (source);
|
|
|
|
|
while (curr_edge)
|
|
|
|
|
{
|
|
|
|
|
if (FROM_BLOCK (curr_edge) == source
|
|
|
|
|
&& TO_BLOCK (curr_edge) == target)
|
|
|
|
|
{
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
curr_edge = NEXT_OUT (curr_edge);
|
|
|
|
|
|
|
|
|
|
if (fst_edge == curr_edge)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
e = ++nr_edges;
|
|
|
|
|
|
|
|
|
|
FROM_BLOCK (e) = source;
|
|
|
|
|
TO_BLOCK (e) = target;
|
|
|
|
|
|
|
|
|
|
if (OUT_EDGES (source))
|
|
|
|
|
{
|
|
|
|
|
next_edge = NEXT_OUT (OUT_EDGES (source));
|
|
|
|
|
NEXT_OUT (OUT_EDGES (source)) = e;
|
|
|
|
|
NEXT_OUT (e) = next_edge;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
OUT_EDGES (source) = e;
|
|
|
|
|
NEXT_OUT (e) = e;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (IN_EDGES (target))
|
|
|
|
|
{
|
|
|
|
|
next_edge = NEXT_IN (IN_EDGES (target));
|
|
|
|
|
NEXT_IN (IN_EDGES (target)) = e;
|
|
|
|
|
NEXT_IN (e) = next_edge;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
IN_EDGES (target) = e;
|
|
|
|
|
NEXT_IN (e) = e;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* BITSET macros for operations on the control flow graph. */
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute bitwise union of two bitsets. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#define BITSET_UNION(set1, set2, len) \
|
|
|
|
|
do { register bitset tp = set1, sp = set2; \
|
|
|
|
|
register int i; \
|
|
|
|
|
for (i = 0; i < len; i++) \
|
|
|
|
|
*(tp++) |= *(sp++); } while (0)
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute bitwise intersection of two bitsets. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#define BITSET_INTER(set1, set2, len) \
|
|
|
|
|
do { register bitset tp = set1, sp = set2; \
|
|
|
|
|
register int i; \
|
|
|
|
|
for (i = 0; i < len; i++) \
|
|
|
|
|
*(tp++) &= *(sp++); } while (0)
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute bitwise difference of two bitsets. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#define BITSET_DIFFER(set1, set2, len) \
|
|
|
|
|
do { register bitset tp = set1, sp = set2; \
|
|
|
|
|
register int i; \
|
|
|
|
|
for (i = 0; i < len; i++) \
|
|
|
|
|
*(tp++) &= ~*(sp++); } while (0)
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Inverts every bit of bitset 'set'. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#define BITSET_INVERT(set, len) \
|
|
|
|
|
do { register bitset tmpset = set; \
|
|
|
|
|
register int i; \
|
|
|
|
|
for (i = 0; i < len; i++, tmpset++) \
|
|
|
|
|
*tmpset = ~*tmpset; } while (0)
|
|
|
|
|
|
|
|
|
|
/* Turn on the index'th bit in bitset set. */
|
|
|
|
|
#define BITSET_ADD(set, index, len) \
|
|
|
|
|
{ \
|
|
|
|
|
if (index >= HOST_BITS_PER_WIDE_INT * len) \
|
|
|
|
|
abort (); \
|
|
|
|
|
else \
|
|
|
|
|
set[index/HOST_BITS_PER_WIDE_INT] |= \
|
|
|
|
|
1 << (index % HOST_BITS_PER_WIDE_INT); \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Turn off the index'th bit in set. */
|
|
|
|
|
#define BITSET_REMOVE(set, index, len) \
|
|
|
|
|
{ \
|
|
|
|
|
if (index >= HOST_BITS_PER_WIDE_INT * len) \
|
|
|
|
|
abort (); \
|
|
|
|
|
else \
|
|
|
|
|
set[index/HOST_BITS_PER_WIDE_INT] &= \
|
|
|
|
|
~(1 << (index%HOST_BITS_PER_WIDE_INT)); \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Check if the index'th bit in bitset set is on. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static char
|
|
|
|
|
bitset_member (set, index, len)
|
|
|
|
|
bitset set;
|
|
|
|
|
int index, len;
|
|
|
|
|
{
|
|
|
|
|
if (index >= HOST_BITS_PER_WIDE_INT * len)
|
|
|
|
|
abort ();
|
|
|
|
|
return (set[index / HOST_BITS_PER_WIDE_INT] &
|
|
|
|
|
1 << (index % HOST_BITS_PER_WIDE_INT)) ? 1 : 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Translate a bit-set SET to a list BL of the bit-set members. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
extract_bitlst (set, len, bl)
|
|
|
|
|
bitset set;
|
|
|
|
|
int len;
|
|
|
|
|
bitlst *bl;
|
|
|
|
|
{
|
|
|
|
|
int i, j, offset;
|
|
|
|
|
unsigned HOST_WIDE_INT word;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* bblst table space is reused in each call to extract_bitlst. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
bitlst_table_last = 0;
|
|
|
|
|
|
|
|
|
|
bl->first_member = &bitlst_table[bitlst_table_last];
|
|
|
|
|
bl->nr_members = 0;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < len; i++)
|
|
|
|
|
{
|
|
|
|
|
word = set[i];
|
|
|
|
|
offset = i * HOST_BITS_PER_WIDE_INT;
|
|
|
|
|
for (j = 0; word; j++)
|
|
|
|
|
{
|
|
|
|
|
if (word & 1)
|
|
|
|
|
{
|
|
|
|
|
bitlst_table[bitlst_table_last++] = offset;
|
|
|
|
|
(bl->nr_members)++;
|
|
|
|
|
}
|
|
|
|
|
word >>= 1;
|
|
|
|
|
++offset;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Functions for the construction of regions. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Print the regions, for debugging purposes. Callable from debugger. */
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
debug_regions ()
|
|
|
|
|
{
|
|
|
|
|
int rgn, bb;
|
|
|
|
|
|
|
|
|
|
fprintf (dump, "\n;; ------------ REGIONS ----------\n\n");
|
|
|
|
|
for (rgn = 0; rgn < nr_regions; rgn++)
|
|
|
|
|
{
|
|
|
|
|
fprintf (dump, ";;\trgn %d nr_blocks %d:\n", rgn,
|
|
|
|
|
rgn_table[rgn].rgn_nr_blocks);
|
|
|
|
|
fprintf (dump, ";;\tbb/block: ");
|
|
|
|
|
|
|
|
|
|
for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
|
|
|
|
|
{
|
|
|
|
|
current_blocks = RGN_BLOCKS (rgn);
|
|
|
|
|
|
|
|
|
|
if (bb != BLOCK_TO_BB (BB_TO_BLOCK (bb)))
|
|
|
|
|
abort ();
|
|
|
|
|
|
|
|
|
|
fprintf (dump, " %d/%d ", bb, BB_TO_BLOCK (bb));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fprintf (dump, "\n\n");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Build a single block region for each basic block in the function.
|
|
|
|
|
This allows for using the same code for interblock and basic block
|
|
|
|
|
scheduling. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
find_single_block_region ()
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < n_basic_blocks; i++)
|
|
|
|
|
{
|
|
|
|
|
rgn_bb_table[i] = i;
|
|
|
|
|
RGN_NR_BLOCKS (i) = 1;
|
|
|
|
|
RGN_BLOCKS (i) = i;
|
|
|
|
|
CONTAINING_RGN (i) = i;
|
|
|
|
|
BLOCK_TO_BB (i) = 0;
|
|
|
|
|
}
|
|
|
|
|
nr_regions = n_basic_blocks;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Update number of blocks and the estimate for number of insns
|
|
|
|
|
in the region. Return 1 if the region is "too large" for interblock
|
|
|
|
|
scheduling (compile time considerations), otherwise return 0. */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
too_large (block, num_bbs, num_insns)
|
|
|
|
|
int block, *num_bbs, *num_insns;
|
|
|
|
|
{
|
|
|
|
|
(*num_bbs)++;
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
(*num_insns) += (INSN_LUID (BLOCK_END (block)) -
|
|
|
|
|
INSN_LUID (BLOCK_HEAD (block)));
|
1998-05-06 18:32:40 +02:00
|
|
|
|
if ((*num_bbs > MAX_RGN_BLOCKS) || (*num_insns > MAX_RGN_INSNS))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return 1;
|
|
|
|
|
else
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
|
|
|
|
|
is still an inner loop. Put in max_hdr[blk] the header of the most inner
|
|
|
|
|
loop containing blk. */
|
|
|
|
|
#define UPDATE_LOOP_RELATIONS(blk, hdr) \
|
|
|
|
|
{ \
|
|
|
|
|
if (max_hdr[blk] == -1) \
|
|
|
|
|
max_hdr[blk] = hdr; \
|
|
|
|
|
else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
|
1998-04-17 00:00:09 +02:00
|
|
|
|
RESET_BIT (inner, hdr); \
|
1997-08-12 06:07:19 +02:00
|
|
|
|
else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
|
|
|
|
|
{ \
|
1998-04-17 00:00:09 +02:00
|
|
|
|
RESET_BIT (inner,max_hdr[blk]); \
|
1997-08-12 06:07:19 +02:00
|
|
|
|
max_hdr[blk] = hdr; \
|
|
|
|
|
} \
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
/* Find regions for interblock scheduling.
|
|
|
|
|
|
|
|
|
|
A region for scheduling can be:
|
|
|
|
|
|
|
|
|
|
* A loop-free procedure, or
|
|
|
|
|
|
|
|
|
|
* A reducible inner loop, or
|
|
|
|
|
|
|
|
|
|
* A basic block not contained in any other region.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
?!? In theory we could build other regions based on extended basic
|
|
|
|
|
blocks or reverse extended basic blocks. Is it worth the trouble?
|
|
|
|
|
|
|
|
|
|
Loop blocks that form a region are put into the region's block list
|
|
|
|
|
in topological order.
|
|
|
|
|
|
|
|
|
|
This procedure stores its results into the following global (ick) variables
|
|
|
|
|
|
|
|
|
|
* rgn_nr
|
|
|
|
|
* rgn_table
|
|
|
|
|
* rgn_bb_table
|
|
|
|
|
* block_to_bb
|
|
|
|
|
* containing region
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
We use dominator relationships to avoid making regions out of non-reducible
|
|
|
|
|
loops.
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
This procedure needs to be converted to work on pred/succ lists instead
|
|
|
|
|
of edge tables. That would simplify it somewhat. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
1998-04-17 00:00:09 +02:00
|
|
|
|
find_rgns (s_preds, s_succs, num_preds, num_succs, dom)
|
|
|
|
|
int_list_ptr *s_preds;
|
|
|
|
|
int_list_ptr *s_succs;
|
|
|
|
|
int *num_preds;
|
|
|
|
|
int *num_succs;
|
|
|
|
|
sbitmap *dom;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
|
|
|
|
int *max_hdr, *dfs_nr, *stack, *queue, *degree;
|
1998-04-17 00:00:09 +02:00
|
|
|
|
char no_loops = 1;
|
Warning Fixes:
* Makefile.in (print-rtl.o): Depend on bitmap.h.
(dbxout.o): Depend on toplev.h.
($(SCHED_PREFIX)sched.o): Likewise.
($(out_object_file)): Likewise for system.h and toplev.h.
(cppmain.o): Depend on gansidecl.h.
(cpplib.o): Likewise.
(cpperror.o): Likewise.
(cppexp.o): Likewise.
(cpphash.o): Likewise.
(cppalloc.o): Likewise.
(fix-header.o): Depend on cpplib.h and cpphash.h.
(scan-decls.o): Depend on gansidecl.h.
* basic-block.h (free_regset_vector): Add prototype.
* cccp.c (check_precompiled): Mark parameter `fname' with
ATTRIBUTE_UNUSED.
(do_assert): Likewise for `op' and `keyword'.
(do_unassert): Likewise.
(do_line): Likewise for `keyword'.
(do_error): Likewise for `op' and `keyword'.
(do_warning): Likewise.
(do_ident): Likewise for `keyword'.
(do_pragma): Likewise for `limit', `op' and `keyword'.
(do_sccs): Likewise.
(do_if): Likewise for `keyword'.
(do_elif): Likewise.
(do_else): Likewise.
(do_endif): Likewise.
* collect2.c (getenv): Remove redundant prototype.
(collect_exit, collect_execute, dump_file): Likewise.
(dump_list): Wrap prototype and definition in COLLECT_EXPORT_LIST.
(dump_prefix_list): Hide prototype and definition.
* sparc.c: Include toplev.h.
(intreg_operand): Mark parameter `mode' with ATTRIBUTE_UNUSED.
(symbolic_memory_operand): Likewise.
(sp64_medium_pic_operand): Likewise.
(data_segment_operand): Likewise.
(text_segment_operand): Likewise.
(splittable_symbolic_memory_operand): Likewise.
(splittable_immediate_memory_operand): Likewise.
(eq_or_neq): Likewise.
(normal_comp_operator): Likewise.
(noov_compare_op): Likewise.
(v9_regcmp_op): Likewise.
(v8plus_regcmp_op): Likewise.
(extend_op): Likewise.
(cc_arithop): Likewise.
(cc_arithopn): Likewise.
(small_int): Likewise.
(uns_small_int): Likewise.
(clobbered_register): Likewise.
(legitimize_pic_address): Likewise.
(delay_operand): Likewise.
(sparc_builtin_saveregs): Remove unused variable `stdarg'.
* sparc.h (order_regs_for_local_alloc, eligible_for_return_delay,
sparc_issue_rate, v8plus_regcmp_p): Add prototypes.
* sparc.md (cmpdi_v8plus): Add abort for default case in switch.
* cppalloc.c: Include gansidecl.h.
* cpperror.c: Include stdarg.h/varargs.h and gansidecl.h.
(cpp_file_line_for_message): Mark parameter `pfile' with
ATTRIBUTE_UNUSED.
(v_cpp_message): New function.
(cpp_message): Use it. Also convert to variable arguments.
(cpp_fatal): Likewise.
(cpp_pfatal_with_name): Constify parameter `name'.
* cppexp.c: Move gansidecl.h before cpplib.h.
* cpphash.c: Likewise.
* cpphash.h (hashf, delete_macro): Add prototypes.
* cpplib.c: Include stdarg.h/varargs.h and move gansidecl.h before
cpplib.h. Don't include errno.h.
(update_path): Add arguments to prototype.
(cpp_fatal, cpp_file_line_for_message, cpp_message, delete_macro,
cpp_print_containing_files): Remove redundant prototypes.
(cpp_hash_cleanup, add_import, append_include_chain,
make_assertion, path_include, initialize_builtins,
initialize_char_syntax, finclude, validate_else, comp_def_part,
lookup_import, redundant_include_p, is_system_include,
read_name_map, read_filename_string, open_include_file,
check_macro_name, compare_defs, compare_token_lists,
eval_if_expression, change_newlines): Add prototype arguments.
(hashf): Remove redundant prototype.
(read_token_list, free_token_list, safe_read, xcalloc, savestring,
conditional_skip, skip_if_group): Add prototype arguments.
(fdopen): Remove redundant prototype.
(do_define, do_line, do_include, do_undef, do_error, do_pragma,
do_ident, do_if, do_xifdef, do_else, do_elif, do_endif, do_sccs,
do_once, do_assert, do_unassert, do_warning): Add prototype arguments.
(struct directive): Add prototype arguments to function pointer
member `func'.
(handle_directive): Add missing arguments to call to `do_line'.
(do_include): Mark parameters `unused1' and `unused2' with
ATTRIBUTE_UNUSED.
(do_line): Likewise for `keyword' and new parameters `unused1' and
`unused2'.
(do_error): Likewise for `keyword'.
(do_warning): Likewise. Also add missing argument `pfile' in call
to cpp_pedwarn.
(do_once): Mark parameter `keyword', `unused1' and `unused2' with
ATTRIBUTE_UNUSED.
(do_ident): Likewise for `keyword', `buf' and `limit'.
(do_pragma): Likewise. Also add missing arguments in call to do_once.
(do_sccs): Mark parameter `keyword', `buf' and `limit' with
ATTRIBUTE_UNUSED.
(do_if): Likewise for `keyword'.
(do_elif): Likewise.
(eval_if_expression): Likewise for `buf' and `length'.
(do_xifdef): Likewise for `unused1' and `unused2'.
(do_else): Likewise for `keyword', `buf' and `limit'.
(do_endif): Likewise.
(parse_name): Add missing argument `pfile' in call to cpp_pedwarn.
(cpp_handle_options): Remove superfluous NULL argument in call to
cpp_fatal.
(cpp_handle_options): Likewise.
(do_assert): Mark parameter `keyword', `buf' and `limit' with
ATTRIBUTE_UNUSED.
(do_unassert): Likewise.
(cpp_print_file_and_line): Add missing argument `pfile' in call to
cpp_file_line_for_message.
(v_cpp_error): New function.
(cpp_error): Use it. Also accept variable arguments.
(v_cpp_warning): New function.
(cpp_warning): Use it. Also accept variable arguments.
(cpp_pedwarn): Accept variable arguments.
(v_cpp_error_with_line): New function
(cpp_error_with_line): Use it. Accept variable arguments.
(v_cpp_warning_with_line): New function.
(cpp_warning_with_line): Use it. Accept variable arguments. Hide
definition.
(cpp_pedwarn_with_line): Accept variable arguments.
(cpp_pedwarn_with_file_and_line): Likewise.
(cpp_error_from_errno): Constify parameter `name'. Add missing
argument `pfile' in call to cpp_file_line_for_message.
(cpp_perror_with_name): Constify parameter `name'.
* cpplib.h: Define PARAMS() in terms of PROTO().
(fatal): Remove redundant prototype.
(cpp_error, cpp_warning, cpp_pedwarn, cpp_error_with_line,
cpp_pedwarn_with_line, cpp_pedwarn_with_file_and_line,
cpp_error_from_errno, cpp_perror_with_name, cpp_pfatal_with_name,
cpp_fatal, cpp_message, cpp_pfatal_with_name,
cpp_file_line_for_message, cpp_print_containing_files): Add
arguments to prototypes.
(scan_decls, cpp_finish): Add prototypes.
* cppmain.c: Include gansidecl.h.
(main): Remove unused variable `i'.
* dbxout.c: Include toplev.h.
* demangle.h (do_tlink, collect_execute, collect_exit,
collect_wait, dump_file, file_exists): Add prototype.
* dwarf2out.c (dwarf_type_encoding_name, decl_start_label): Hide
prototype and definition.
(gen_unspecified_parameters_die): Don't assign results of call to
function new_die() to unused variable `parm_die'.
(dwarf2out_line): Mark parameter `filename' with ATTRIBUTE_UNUSED.
(dwarf2out_define): Likewise for `lineno' and `buffer'.
* dwarfout.c (output_unsigned_leb128, output_signed_leb128): Hide
prototype and definition.
(output_die): Add prototype arguments to function pointer arg.
(output_unspecified_parameters_die): Mark parameter `arg' with
ATTRIBUTE_UNUSED.
* except.c (output_exception_table_entry): Remove unused variable
`eh_entry'.
* except.h (expand_fixup_region_start, expand_fixup_region_end):
Add prototypes.
* expr.c (do_jump_by_parts_equality_rtx): Remove prototype.
* expr.h (do_jump_by_parts_equality_rtx): Add prototype.
* fix-header.c: Include stdarg.h/varargs.h, move gansidecl.h
before cpplib.h, include cpphash.h, remove redundant prototype of
cpp_fatal, don't define `const', add a prototype for `fatal'.
(cpp_file_line_for_message): Add missing arguments `pfile'.
(v_cpp_message): New function.
(cpp_message): Use it.
(v_fatal): New function.
(fatal, cpp_fatal): Use it.
(cpp_pfatal_with_name): Constify parameter `name'.
* flow.c (free_regset_vector): Remove redundant prototype.
* function.c (round_down): Wrap prototype and definition with
macro ARGS_GROW_DOWNWARD.
(record_insns): Wrap prototype and definition with
defined (HAVE_prologue) || defined (HAVE_epilogue).
* gansidecl.h (ATTRIBUTE_PRINTF_4, ATTRIBUTE_PRINTF_5): New macros.
* gen-protos.c: Include gansidecl.h.
(hashf): Don't make it static, constify parameter `name'.
* genattrtab.c (check_attr_test): Change XEXP() to XSTR() to match
specifier %s in calls to function `fatal'.
* haifa-sched.c: Include toplev.h.
(find_rgns): Remove unused variable `j'.
* integrate.c (note_modified_parmregs): Mark parameter `x' with
ATTRIBUTE_UNUSED.
(mark_stores): Likewise.
* jump.c (mark_modified_reg): Likewise.
* output.h (insn_current_reference_address): Add prototype.
(eh_frame_section): Likewise.
* print-rtl.c: Include bitmap.h.
* reload1.c (reload): Wrap variables `note' and `next' in macro
PRESERVE_DEATH_INFO_REGNO_P.
(forget_old_reloads_1): Mark parameter `ignored' with
ATTRIBUTE_UNUSED.
(choose_reload_regs): Remove unused variable `in'.
(reload_cse_invalidate_mem): Mark parameter `ignore' with
ATTRIBUTE_UNUSED.
(reload_cse_check_clobber): Likewise.
* rtl.h (expand_null_return, reg_classes_intersect_p): Add prototype.
(mark_elimination): Fix typo in prototype.
* scan-decls.c: Include gansidecl.h.
* tree.h (using_eh_for_cleanups, supports_one_only): Add prototype.
From-SVN: r19867
1998-05-19 10:42:48 +02:00
|
|
|
|
int node, child, loop_head, i, head, tail;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int count = 0, sp, idx = 0, current_edge = out_edges[0];
|
1998-05-06 02:12:15 +02:00
|
|
|
|
int num_bbs, num_insns, unreachable;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int too_large_failure;
|
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
/* Note if an edge has been passed. */
|
|
|
|
|
sbitmap passed;
|
|
|
|
|
|
|
|
|
|
/* Note if a block is a natural loop header. */
|
|
|
|
|
sbitmap header;
|
|
|
|
|
|
|
|
|
|
/* Note if a block is an natural inner loop header. */
|
|
|
|
|
sbitmap inner;
|
|
|
|
|
|
|
|
|
|
/* Note if a block is in the block queue. */
|
|
|
|
|
sbitmap in_queue;
|
|
|
|
|
|
1998-05-06 18:32:40 +02:00
|
|
|
|
/* Note if a block is in the block queue. */
|
|
|
|
|
sbitmap in_stack;
|
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
/* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
|
|
|
|
|
and a mapping from block to its loop header (if the block is contained
|
|
|
|
|
in a loop, else -1).
|
|
|
|
|
|
|
|
|
|
Store results in HEADER, INNER, and MAX_HDR respectively, these will
|
|
|
|
|
be used as inputs to the second traversal.
|
|
|
|
|
|
|
|
|
|
STACK, SP and DFS_NR are only used during the first traversal. */
|
|
|
|
|
|
|
|
|
|
/* Allocate and initialize variables for the first traversal. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
max_hdr = (int *) alloca (n_basic_blocks * sizeof (int));
|
|
|
|
|
dfs_nr = (int *) alloca (n_basic_blocks * sizeof (int));
|
1997-09-08 18:06:18 +02:00
|
|
|
|
bzero ((char *) dfs_nr, n_basic_blocks * sizeof (int));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
stack = (int *) alloca (nr_edges * sizeof (int));
|
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
inner = sbitmap_alloc (n_basic_blocks);
|
|
|
|
|
sbitmap_ones (inner);
|
|
|
|
|
|
|
|
|
|
header = sbitmap_alloc (n_basic_blocks);
|
|
|
|
|
sbitmap_zero (header);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
passed = sbitmap_alloc (nr_edges);
|
|
|
|
|
sbitmap_zero (passed);
|
|
|
|
|
|
|
|
|
|
in_queue = sbitmap_alloc (n_basic_blocks);
|
|
|
|
|
sbitmap_zero (in_queue);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-06 18:32:40 +02:00
|
|
|
|
in_stack = sbitmap_alloc (n_basic_blocks);
|
|
|
|
|
sbitmap_zero (in_stack);
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (i = 0; i < n_basic_blocks; i++)
|
1998-04-17 00:00:09 +02:00
|
|
|
|
max_hdr[i] = -1;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
/* DFS traversal to find inner loops in the cfg. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
sp = -1;
|
|
|
|
|
while (1)
|
|
|
|
|
{
|
1998-04-17 00:00:09 +02:00
|
|
|
|
if (current_edge == 0 || TEST_BIT (passed, current_edge))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-04-17 00:00:09 +02:00
|
|
|
|
/* We have reached a leaf node or a node that was already
|
1998-05-06 18:32:40 +02:00
|
|
|
|
processed. Pop edges off the stack until we find
|
1998-04-17 00:00:09 +02:00
|
|
|
|
an edge that has not yet been processed. */
|
|
|
|
|
while (sp >= 0
|
|
|
|
|
&& (current_edge == 0 || TEST_BIT (passed, current_edge)))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-04-17 00:00:09 +02:00
|
|
|
|
/* Pop entry off the stack. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
current_edge = stack[sp--];
|
|
|
|
|
node = FROM_BLOCK (current_edge);
|
|
|
|
|
child = TO_BLOCK (current_edge);
|
1998-05-06 18:32:40 +02:00
|
|
|
|
RESET_BIT (in_stack, child);
|
|
|
|
|
if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
|
|
|
|
|
current_edge = NEXT_OUT (current_edge);
|
|
|
|
|
}
|
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
/* See if have finished the DFS tree traversal. */
|
|
|
|
|
if (sp < 0 && TEST_BIT (passed, current_edge))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
1998-04-17 00:00:09 +02:00
|
|
|
|
|
|
|
|
|
/* Nope, continue the traversal with the popped node. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
/* Process a node. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
node = FROM_BLOCK (current_edge);
|
|
|
|
|
child = TO_BLOCK (current_edge);
|
1998-05-06 18:32:40 +02:00
|
|
|
|
SET_BIT (in_stack, node);
|
1998-04-17 00:00:09 +02:00
|
|
|
|
dfs_nr[node] = ++count;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-06 18:32:40 +02:00
|
|
|
|
/* If the successor is in the stack, then we've found a loop.
|
|
|
|
|
Mark the loop, if it is not a natural loop, then it will
|
|
|
|
|
be rejected during the second traversal. */
|
|
|
|
|
if (TEST_BIT (in_stack, child))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
|
|
|
|
no_loops = 0;
|
1998-04-17 00:00:09 +02:00
|
|
|
|
SET_BIT (header, child);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
UPDATE_LOOP_RELATIONS (node, child);
|
1998-04-17 00:00:09 +02:00
|
|
|
|
SET_BIT (passed, current_edge);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
current_edge = NEXT_OUT (current_edge);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
/* If the child was already visited, then there is no need to visit
|
|
|
|
|
it again. Just update the loop relationships and restart
|
|
|
|
|
with a new edge. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (dfs_nr[child])
|
|
|
|
|
{
|
1998-05-06 18:32:40 +02:00
|
|
|
|
if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
|
1998-04-17 00:00:09 +02:00
|
|
|
|
SET_BIT (passed, current_edge);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
current_edge = NEXT_OUT (current_edge);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
/* Push an entry on the stack and continue DFS traversal. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
stack[++sp] = current_edge;
|
1998-04-17 00:00:09 +02:00
|
|
|
|
SET_BIT (passed, current_edge);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
current_edge = OUT_EDGES (child);
|
1999-08-25 07:24:04 +02:00
|
|
|
|
|
|
|
|
|
/* This is temporary until haifa is converted to use rth's new
|
|
|
|
|
cfg routines which have true entry/exit blocks and the
|
|
|
|
|
appropriate edges from/to those blocks.
|
|
|
|
|
|
|
|
|
|
Generally we update dfs_nr for a node when we process its
|
|
|
|
|
out edge. However, if the node has no out edge then we will
|
|
|
|
|
not set dfs_nr for that node. This can confuse the scheduler
|
|
|
|
|
into thinking that we have unreachable blocks, which in turn
|
|
|
|
|
disables cross block scheduling.
|
|
|
|
|
|
|
|
|
|
So, if we have a node with no out edges, go ahead and mark it
|
|
|
|
|
as reachable now. */
|
|
|
|
|
if (current_edge == 0)
|
|
|
|
|
dfs_nr[child] = ++count;
|
1998-04-17 00:00:09 +02:00
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
/* Another check for unreachable blocks. The earlier test in
|
|
|
|
|
is_cfg_nonregular only finds unreachable blocks that do not
|
|
|
|
|
form a loop.
|
1998-04-17 00:00:09 +02:00
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
The DFS traversal will mark every block that is reachable from
|
|
|
|
|
the entry node by placing a nonzero value in dfs_nr. Thus if
|
|
|
|
|
dfs_nr is zero for any block, then it must be unreachable. */
|
|
|
|
|
unreachable = 0;
|
|
|
|
|
for (i = 0; i < n_basic_blocks; i++)
|
|
|
|
|
if (dfs_nr[i] == 0)
|
|
|
|
|
{
|
|
|
|
|
unreachable = 1;
|
|
|
|
|
break;
|
|
|
|
|
}
|
1998-04-17 00:00:09 +02:00
|
|
|
|
|
|
|
|
|
/* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
|
|
|
|
|
to hold degree counts. */
|
|
|
|
|
degree = dfs_nr;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute the in-degree of every block in the graph. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (i = 0; i < n_basic_blocks; i++)
|
1998-04-17 00:00:09 +02:00
|
|
|
|
degree[i] = num_preds[i];
|
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
/* Do not perform region scheduling if there are any unreachable
|
|
|
|
|
blocks. */
|
|
|
|
|
if (!unreachable)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-05-06 02:12:15 +02:00
|
|
|
|
if (no_loops)
|
|
|
|
|
SET_BIT (header, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
/* Second travsersal:find reducible inner loops and topologically sort
|
|
|
|
|
block of each region. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
queue = (int *) alloca (n_basic_blocks * sizeof (int));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-06 18:32:40 +02:00
|
|
|
|
/* Find blocks which are inner loop headers. We still have non-reducible
|
|
|
|
|
loops to consider at this point. */
|
1998-05-06 02:12:15 +02:00
|
|
|
|
for (i = 0; i < n_basic_blocks; i++)
|
|
|
|
|
{
|
|
|
|
|
if (TEST_BIT (header, i) && TEST_BIT (inner, i))
|
|
|
|
|
{
|
|
|
|
|
int_list_ptr ps;
|
1998-05-06 18:32:40 +02:00
|
|
|
|
int j;
|
|
|
|
|
|
|
|
|
|
/* Now check that the loop is reducible. We do this separate
|
|
|
|
|
from finding inner loops so that we do not find a reducible
|
1999-09-06 23:55:23 +02:00
|
|
|
|
loop which contains an inner non-reducible loop.
|
1998-05-06 18:32:40 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
A simple way to find reducible/natural loops is to verify
|
1998-05-06 18:32:40 +02:00
|
|
|
|
that each block in the loop is dominated by the loop
|
|
|
|
|
header.
|
|
|
|
|
|
|
|
|
|
If there exists a block that is not dominated by the loop
|
|
|
|
|
header, then the block is reachable from outside the loop
|
|
|
|
|
and thus the loop is not a natural loop. */
|
|
|
|
|
for (j = 0; j < n_basic_blocks; j++)
|
|
|
|
|
{
|
|
|
|
|
/* First identify blocks in the loop, except for the loop
|
|
|
|
|
entry block. */
|
|
|
|
|
if (i == max_hdr[j] && i != j)
|
|
|
|
|
{
|
|
|
|
|
/* Now verify that the block is dominated by the loop
|
|
|
|
|
header. */
|
|
|
|
|
if (!TEST_BIT (dom[j], i))
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* If we exited the loop early, then I is the header of
|
|
|
|
|
a non-reducible loop and we should quit processing it
|
|
|
|
|
now. */
|
1998-05-06 18:32:40 +02:00
|
|
|
|
if (j != n_basic_blocks)
|
|
|
|
|
continue;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-06 18:32:40 +02:00
|
|
|
|
/* I is a header of an inner loop, or block 0 in a subroutine
|
|
|
|
|
with no loops at all. */
|
1998-05-06 02:12:15 +02:00
|
|
|
|
head = tail = -1;
|
|
|
|
|
too_large_failure = 0;
|
|
|
|
|
loop_head = max_hdr[i];
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
/* Decrease degree of all I's successors for topological
|
1998-05-07 00:51:38 +02:00
|
|
|
|
ordering. */
|
1998-05-06 02:12:15 +02:00
|
|
|
|
for (ps = s_succs[i]; ps; ps = ps->next)
|
|
|
|
|
if (INT_LIST_VAL (ps) != EXIT_BLOCK
|
|
|
|
|
&& INT_LIST_VAL (ps) != ENTRY_BLOCK)
|
1998-05-06 18:32:40 +02:00
|
|
|
|
--degree[INT_LIST_VAL(ps)];
|
1998-04-17 00:00:09 +02:00
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
/* Estimate # insns, and count # blocks in the region. */
|
|
|
|
|
num_bbs = 1;
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
num_insns = (INSN_LUID (BLOCK_END (i))
|
|
|
|
|
- INSN_LUID (BLOCK_HEAD (i)));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Find all loop latches (blocks with back edges to the loop
|
1998-05-06 02:12:15 +02:00
|
|
|
|
header) or all the leaf blocks in the cfg has no loops.
|
|
|
|
|
|
|
|
|
|
Place those blocks into the queue. */
|
|
|
|
|
if (no_loops)
|
|
|
|
|
{
|
|
|
|
|
for (j = 0; j < n_basic_blocks; j++)
|
|
|
|
|
/* Leaf nodes have only a single successor which must
|
|
|
|
|
be EXIT_BLOCK. */
|
|
|
|
|
if (num_succs[j] == 1
|
|
|
|
|
&& INT_LIST_VAL (s_succs[j]) == EXIT_BLOCK)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-05-06 02:12:15 +02:00
|
|
|
|
queue[++tail] = j;
|
|
|
|
|
SET_BIT (in_queue, j);
|
|
|
|
|
|
|
|
|
|
if (too_large (j, &num_bbs, &num_insns))
|
|
|
|
|
{
|
|
|
|
|
too_large_failure = 1;
|
|
|
|
|
break;
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
1998-05-06 02:12:15 +02:00
|
|
|
|
}
|
|
|
|
|
else
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-05-06 02:12:15 +02:00
|
|
|
|
int_list_ptr ps;
|
1998-04-17 00:00:09 +02:00
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
for (ps = s_preds[i]; ps; ps = ps->next)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-05-06 02:12:15 +02:00
|
|
|
|
node = INT_LIST_VAL (ps);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
if (node == ENTRY_BLOCK || node == EXIT_BLOCK)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (max_hdr[node] == loop_head && node != i)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-05-06 02:12:15 +02:00
|
|
|
|
/* This is a loop latch. */
|
|
|
|
|
queue[++tail] = node;
|
|
|
|
|
SET_BIT (in_queue, node);
|
|
|
|
|
|
|
|
|
|
if (too_large (node, &num_bbs, &num_insns))
|
|
|
|
|
{
|
|
|
|
|
too_large_failure = 1;
|
|
|
|
|
break;
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
1998-05-06 02:12:15 +02:00
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
/* Now add all the blocks in the loop to the queue.
|
1998-04-17 00:00:09 +02:00
|
|
|
|
|
|
|
|
|
We know the loop is a natural loop; however the algorithm
|
|
|
|
|
above will not always mark certain blocks as being in the
|
|
|
|
|
loop. Consider:
|
|
|
|
|
node children
|
|
|
|
|
a b,c
|
|
|
|
|
b c
|
|
|
|
|
c a,d
|
|
|
|
|
d b
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
The algorithm in the DFS traversal may not mark B & D as part
|
|
|
|
|
of the loop (ie they will not have max_hdr set to A).
|
|
|
|
|
|
|
|
|
|
We know they can not be loop latches (else they would have
|
|
|
|
|
had max_hdr set since they'd have a backedge to a dominator
|
|
|
|
|
block). So we don't need them on the initial queue.
|
|
|
|
|
|
|
|
|
|
We know they are part of the loop because they are dominated
|
|
|
|
|
by the loop header and can be reached by a backwards walk of
|
|
|
|
|
the edges starting with nodes on the initial queue.
|
|
|
|
|
|
|
|
|
|
It is safe and desirable to include those nodes in the
|
|
|
|
|
loop/scheduling region. To do so we would need to decrease
|
|
|
|
|
the degree of a node if it is the target of a backedge
|
|
|
|
|
within the loop itself as the node is placed in the queue.
|
|
|
|
|
|
|
|
|
|
We do not do this because I'm not sure that the actual
|
|
|
|
|
scheduling code will properly handle this case. ?!? */
|
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
while (head < tail && !too_large_failure)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-05-06 02:12:15 +02:00
|
|
|
|
int_list_ptr ps;
|
|
|
|
|
child = queue[++head];
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
for (ps = s_preds[child]; ps; ps = ps->next)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-05-06 02:12:15 +02:00
|
|
|
|
node = INT_LIST_VAL (ps);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
/* See discussion above about nodes not marked as in
|
|
|
|
|
this loop during the initial DFS traversal. */
|
|
|
|
|
if (node == ENTRY_BLOCK || node == EXIT_BLOCK
|
|
|
|
|
|| max_hdr[node] != loop_head)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-05-06 02:12:15 +02:00
|
|
|
|
tail = -1;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
}
|
1998-05-06 02:12:15 +02:00
|
|
|
|
else if (!TEST_BIT (in_queue, node) && node != i)
|
|
|
|
|
{
|
|
|
|
|
queue[++tail] = node;
|
|
|
|
|
SET_BIT (in_queue, node);
|
|
|
|
|
|
|
|
|
|
if (too_large (node, &num_bbs, &num_insns))
|
|
|
|
|
{
|
|
|
|
|
too_large_failure = 1;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1998-05-06 02:12:15 +02:00
|
|
|
|
if (tail >= 0 && !too_large_failure)
|
|
|
|
|
{
|
|
|
|
|
/* Place the loop header into list of region blocks. */
|
|
|
|
|
degree[i] = -1;
|
|
|
|
|
rgn_bb_table[idx] = i;
|
|
|
|
|
RGN_NR_BLOCKS (nr_regions) = num_bbs;
|
|
|
|
|
RGN_BLOCKS (nr_regions) = idx++;
|
|
|
|
|
CONTAINING_RGN (i) = nr_regions;
|
|
|
|
|
BLOCK_TO_BB (i) = count = 0;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Remove blocks from queue[] when their in degree
|
|
|
|
|
becomes zero. Repeat until no blocks are left on the
|
|
|
|
|
list. This produces a topological list of blocks in
|
|
|
|
|
the region. */
|
1998-05-06 02:12:15 +02:00
|
|
|
|
while (tail >= 0)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-05-06 02:12:15 +02:00
|
|
|
|
int_list_ptr ps;
|
|
|
|
|
|
|
|
|
|
if (head < 0)
|
|
|
|
|
head = tail;
|
|
|
|
|
child = queue[head];
|
|
|
|
|
if (degree[child] == 0)
|
|
|
|
|
{
|
|
|
|
|
degree[child] = -1;
|
|
|
|
|
rgn_bb_table[idx++] = child;
|
|
|
|
|
BLOCK_TO_BB (child) = ++count;
|
|
|
|
|
CONTAINING_RGN (child) = nr_regions;
|
|
|
|
|
queue[head] = queue[tail--];
|
|
|
|
|
|
|
|
|
|
for (ps = s_succs[child]; ps; ps = ps->next)
|
|
|
|
|
if (INT_LIST_VAL (ps) != ENTRY_BLOCK
|
|
|
|
|
&& INT_LIST_VAL (ps) != EXIT_BLOCK)
|
|
|
|
|
--degree[INT_LIST_VAL (ps)];
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
--head;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
1998-05-06 02:12:15 +02:00
|
|
|
|
++nr_regions;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
/* Any block that did not end up in a region is placed into a region
|
|
|
|
|
by itself. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (i = 0; i < n_basic_blocks; i++)
|
|
|
|
|
if (degree[i] >= 0)
|
|
|
|
|
{
|
|
|
|
|
rgn_bb_table[idx] = i;
|
|
|
|
|
RGN_NR_BLOCKS (nr_regions) = 1;
|
|
|
|
|
RGN_BLOCKS (nr_regions) = idx++;
|
|
|
|
|
CONTAINING_RGN (i) = nr_regions++;
|
|
|
|
|
BLOCK_TO_BB (i) = 0;
|
|
|
|
|
}
|
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
free (passed);
|
|
|
|
|
free (header);
|
|
|
|
|
free (inner);
|
|
|
|
|
free (in_queue);
|
1998-05-06 18:32:40 +02:00
|
|
|
|
free (in_stack);
|
1998-04-17 00:00:09 +02:00
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Functions for regions scheduling information. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Compute dominators, probability, and potential-split-edges of bb.
|
|
|
|
|
Assume that these values were already computed for bb's predecessors. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
compute_dom_prob_ps (bb)
|
|
|
|
|
int bb;
|
|
|
|
|
{
|
|
|
|
|
int nxt_in_edge, fst_in_edge, pred;
|
|
|
|
|
int fst_out_edge, nxt_out_edge, nr_out_edges, nr_rgn_out_edges;
|
|
|
|
|
|
|
|
|
|
prob[bb] = 0.0;
|
|
|
|
|
if (IS_RGN_ENTRY (bb))
|
|
|
|
|
{
|
|
|
|
|
BITSET_ADD (dom[bb], 0, bbset_size);
|
|
|
|
|
prob[bb] = 1.0;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fst_in_edge = nxt_in_edge = IN_EDGES (BB_TO_BLOCK (bb));
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Intialize dom[bb] to '111..1'. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
BITSET_INVERT (dom[bb], bbset_size);
|
|
|
|
|
|
|
|
|
|
do
|
|
|
|
|
{
|
|
|
|
|
pred = FROM_BLOCK (nxt_in_edge);
|
|
|
|
|
BITSET_INTER (dom[bb], dom[BLOCK_TO_BB (pred)], bbset_size);
|
|
|
|
|
|
|
|
|
|
BITSET_UNION (ancestor_edges[bb], ancestor_edges[BLOCK_TO_BB (pred)],
|
|
|
|
|
edgeset_size);
|
|
|
|
|
|
|
|
|
|
BITSET_ADD (ancestor_edges[bb], EDGE_TO_BIT (nxt_in_edge), edgeset_size);
|
|
|
|
|
|
|
|
|
|
nr_out_edges = 1;
|
|
|
|
|
nr_rgn_out_edges = 0;
|
|
|
|
|
fst_out_edge = OUT_EDGES (pred);
|
|
|
|
|
nxt_out_edge = NEXT_OUT (fst_out_edge);
|
|
|
|
|
BITSET_UNION (pot_split[bb], pot_split[BLOCK_TO_BB (pred)],
|
|
|
|
|
edgeset_size);
|
|
|
|
|
|
|
|
|
|
BITSET_ADD (pot_split[bb], EDGE_TO_BIT (fst_out_edge), edgeset_size);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* The successor doesn't belong in the region? */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (CONTAINING_RGN (TO_BLOCK (fst_out_edge)) !=
|
|
|
|
|
CONTAINING_RGN (BB_TO_BLOCK (bb)))
|
|
|
|
|
++nr_rgn_out_edges;
|
|
|
|
|
|
|
|
|
|
while (fst_out_edge != nxt_out_edge)
|
|
|
|
|
{
|
|
|
|
|
++nr_out_edges;
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* The successor doesn't belong in the region? */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge)) !=
|
|
|
|
|
CONTAINING_RGN (BB_TO_BLOCK (bb)))
|
|
|
|
|
++nr_rgn_out_edges;
|
|
|
|
|
BITSET_ADD (pot_split[bb], EDGE_TO_BIT (nxt_out_edge), edgeset_size);
|
|
|
|
|
nxt_out_edge = NEXT_OUT (nxt_out_edge);
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Now nr_rgn_out_edges is the number of region-exit edges from
|
|
|
|
|
pred, and nr_out_edges will be the number of pred out edges
|
|
|
|
|
not leaving the region. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
nr_out_edges -= nr_rgn_out_edges;
|
|
|
|
|
if (nr_rgn_out_edges > 0)
|
|
|
|
|
prob[bb] += 0.9 * prob[BLOCK_TO_BB (pred)] / nr_out_edges;
|
|
|
|
|
else
|
|
|
|
|
prob[bb] += prob[BLOCK_TO_BB (pred)] / nr_out_edges;
|
|
|
|
|
nxt_in_edge = NEXT_IN (nxt_in_edge);
|
|
|
|
|
}
|
|
|
|
|
while (fst_in_edge != nxt_in_edge);
|
|
|
|
|
|
|
|
|
|
BITSET_ADD (dom[bb], bb, bbset_size);
|
|
|
|
|
BITSET_DIFFER (pot_split[bb], ancestor_edges[bb], edgeset_size);
|
|
|
|
|
|
|
|
|
|
if (sched_verbose >= 2)
|
|
|
|
|
fprintf (dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb), (int) (100.0 * prob[bb]));
|
|
|
|
|
} /* compute_dom_prob_ps */
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Functions for target info. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
|
|
|
|
|
Note that bb_trg dominates bb_src. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
split_edges (bb_src, bb_trg, bl)
|
|
|
|
|
int bb_src;
|
|
|
|
|
int bb_trg;
|
|
|
|
|
edgelst *bl;
|
|
|
|
|
{
|
|
|
|
|
int es = edgeset_size;
|
|
|
|
|
edgeset src = (edgeset) alloca (es * sizeof (HOST_WIDE_INT));
|
|
|
|
|
|
|
|
|
|
while (es--)
|
|
|
|
|
src[es] = (pot_split[bb_src])[es];
|
|
|
|
|
BITSET_DIFFER (src, pot_split[bb_trg], edgeset_size);
|
|
|
|
|
extract_bitlst (src, edgeset_size, bl);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Find the valid candidate-source-blocks for the target block TRG, compute
|
|
|
|
|
their probability, and check if they are speculative or not.
|
|
|
|
|
For speculative sources, compute their update-blocks and split-blocks. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
compute_trg_info (trg)
|
|
|
|
|
int trg;
|
|
|
|
|
{
|
|
|
|
|
register candidate *sp;
|
|
|
|
|
edgelst el;
|
|
|
|
|
int check_block, update_idx;
|
|
|
|
|
int i, j, k, fst_edge, nxt_edge;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Define some of the fields for the target bb as well. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
sp = candidate_table + trg;
|
|
|
|
|
sp->is_valid = 1;
|
|
|
|
|
sp->is_speculative = 0;
|
|
|
|
|
sp->src_prob = 100;
|
|
|
|
|
|
|
|
|
|
for (i = trg + 1; i < current_nr_blocks; i++)
|
|
|
|
|
{
|
|
|
|
|
sp = candidate_table + i;
|
|
|
|
|
|
|
|
|
|
sp->is_valid = IS_DOMINATED (i, trg);
|
|
|
|
|
if (sp->is_valid)
|
|
|
|
|
{
|
|
|
|
|
sp->src_prob = GET_SRC_PROB (i, trg);
|
|
|
|
|
sp->is_valid = (sp->src_prob >= MIN_PROBABILITY);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (sp->is_valid)
|
|
|
|
|
{
|
|
|
|
|
split_edges (i, trg, &el);
|
|
|
|
|
sp->is_speculative = (el.nr_members) ? 1 : 0;
|
|
|
|
|
if (sp->is_speculative && !flag_schedule_speculative)
|
|
|
|
|
sp->is_valid = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (sp->is_valid)
|
|
|
|
|
{
|
|
|
|
|
sp->split_bbs.first_member = &bblst_table[bblst_last];
|
|
|
|
|
sp->split_bbs.nr_members = el.nr_members;
|
|
|
|
|
for (j = 0; j < el.nr_members; bblst_last++, j++)
|
|
|
|
|
bblst_table[bblst_last] =
|
|
|
|
|
TO_BLOCK (rgn_edges[el.first_member[j]]);
|
|
|
|
|
sp->update_bbs.first_member = &bblst_table[bblst_last];
|
|
|
|
|
update_idx = 0;
|
|
|
|
|
for (j = 0; j < el.nr_members; j++)
|
|
|
|
|
{
|
|
|
|
|
check_block = FROM_BLOCK (rgn_edges[el.first_member[j]]);
|
|
|
|
|
fst_edge = nxt_edge = OUT_EDGES (check_block);
|
|
|
|
|
do
|
|
|
|
|
{
|
|
|
|
|
for (k = 0; k < el.nr_members; k++)
|
|
|
|
|
if (EDGE_TO_BIT (nxt_edge) == el.first_member[k])
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
if (k >= el.nr_members)
|
|
|
|
|
{
|
|
|
|
|
bblst_table[bblst_last++] = TO_BLOCK (nxt_edge);
|
|
|
|
|
update_idx++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nxt_edge = NEXT_OUT (nxt_edge);
|
|
|
|
|
}
|
|
|
|
|
while (fst_edge != nxt_edge);
|
|
|
|
|
}
|
|
|
|
|
sp->update_bbs.nr_members = update_idx;
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
|
|
|
|
|
|
|
|
|
|
sp->is_speculative = 0;
|
|
|
|
|
sp->src_prob = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} /* compute_trg_info */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Print candidates info, for debugging purposes. Callable from debugger. */
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
debug_candidate (i)
|
|
|
|
|
int i;
|
|
|
|
|
{
|
|
|
|
|
if (!candidate_table[i].is_valid)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (candidate_table[i].is_speculative)
|
|
|
|
|
{
|
|
|
|
|
int j;
|
|
|
|
|
fprintf (dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
|
|
|
|
|
|
|
|
|
|
fprintf (dump, "split path: ");
|
|
|
|
|
for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
|
|
|
|
|
{
|
|
|
|
|
int b = candidate_table[i].split_bbs.first_member[j];
|
|
|
|
|
|
|
|
|
|
fprintf (dump, " %d ", b);
|
|
|
|
|
}
|
|
|
|
|
fprintf (dump, "\n");
|
|
|
|
|
|
|
|
|
|
fprintf (dump, "update path: ");
|
|
|
|
|
for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
|
|
|
|
|
{
|
|
|
|
|
int b = candidate_table[i].update_bbs.first_member[j];
|
|
|
|
|
|
|
|
|
|
fprintf (dump, " %d ", b);
|
|
|
|
|
}
|
|
|
|
|
fprintf (dump, "\n");
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
fprintf (dump, " src %d equivalent\n", BB_TO_BLOCK (i));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Print candidates info, for debugging purposes. Callable from debugger. */
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
debug_candidates (trg)
|
|
|
|
|
int trg;
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
fprintf (dump, "----------- candidate table: target: b=%d bb=%d ---\n",
|
|
|
|
|
BB_TO_BLOCK (trg), trg);
|
|
|
|
|
for (i = trg + 1; i < current_nr_blocks; i++)
|
|
|
|
|
debug_candidate (i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Functions for speculative scheduing. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Return 0 if x is a set of a register alive in the beginning of one
|
|
|
|
|
of the split-blocks of src, otherwise return 1. */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
check_live_1 (src, x)
|
|
|
|
|
int src;
|
|
|
|
|
rtx x;
|
|
|
|
|
{
|
1998-02-17 22:35:43 +01:00
|
|
|
|
register int i;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
register int regno;
|
|
|
|
|
register rtx reg = SET_DEST (x);
|
|
|
|
|
|
|
|
|
|
if (reg == 0)
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
|
|
|
|
|
|| GET_CODE (reg) == SIGN_EXTRACT
|
|
|
|
|
|| GET_CODE (reg) == STRICT_LOW_PART)
|
|
|
|
|
reg = XEXP (reg, 0);
|
|
|
|
|
|
1998-09-12 05:45:22 +02:00
|
|
|
|
if (GET_CODE (reg) == PARALLEL
|
|
|
|
|
&& GET_MODE (reg) == BLKmode)
|
|
|
|
|
{
|
|
|
|
|
register int i;
|
|
|
|
|
for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
|
|
|
|
|
if (check_live_1 (src, XVECEXP (reg, 0, i)))
|
|
|
|
|
return 1;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (GET_CODE (reg) != REG)
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
regno = REGNO (reg);
|
|
|
|
|
|
|
|
|
|
if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Global registers are assumed live. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
if (regno < FIRST_PSEUDO_REGISTER)
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Check for hard registers. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
|
|
|
|
|
while (--j >= 0)
|
|
|
|
|
{
|
|
|
|
|
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
|
|
|
|
|
{
|
|
|
|
|
int b = candidate_table[src].split_bbs.first_member[i];
|
|
|
|
|
|
1999-02-26 00:45:42 +01:00
|
|
|
|
if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start,
|
|
|
|
|
regno + j))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Check for psuedo registers. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
|
|
|
|
|
{
|
|
|
|
|
int b = candidate_table[src].split_bbs.first_member[i];
|
|
|
|
|
|
1999-02-26 00:45:42 +01:00
|
|
|
|
if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start, regno))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* If x is a set of a register R, mark that R is alive in the beginning
|
|
|
|
|
of every update-block of src. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
update_live_1 (src, x)
|
|
|
|
|
int src;
|
|
|
|
|
rtx x;
|
|
|
|
|
{
|
1998-02-17 22:35:43 +01:00
|
|
|
|
register int i;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
register int regno;
|
|
|
|
|
register rtx reg = SET_DEST (x);
|
|
|
|
|
|
|
|
|
|
if (reg == 0)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
|
|
|
|
|
|| GET_CODE (reg) == SIGN_EXTRACT
|
|
|
|
|
|| GET_CODE (reg) == STRICT_LOW_PART)
|
|
|
|
|
reg = XEXP (reg, 0);
|
|
|
|
|
|
1998-09-12 05:45:22 +02:00
|
|
|
|
if (GET_CODE (reg) == PARALLEL
|
|
|
|
|
&& GET_MODE (reg) == BLKmode)
|
|
|
|
|
{
|
|
|
|
|
register int i;
|
|
|
|
|
for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
|
|
|
|
|
update_live_1 (src, XVECEXP (reg, 0, i));
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (GET_CODE (reg) != REG)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* Global registers are always live, so the code below does not apply
|
|
|
|
|
to them. */
|
|
|
|
|
|
|
|
|
|
regno = REGNO (reg);
|
|
|
|
|
|
|
|
|
|
if (regno >= FIRST_PSEUDO_REGISTER || !global_regs[regno])
|
|
|
|
|
{
|
|
|
|
|
if (regno < FIRST_PSEUDO_REGISTER)
|
|
|
|
|
{
|
|
|
|
|
int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
|
|
|
|
|
while (--j >= 0)
|
|
|
|
|
{
|
|
|
|
|
for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
|
|
|
|
|
{
|
|
|
|
|
int b = candidate_table[src].update_bbs.first_member[i];
|
|
|
|
|
|
1999-02-26 00:45:42 +01:00
|
|
|
|
SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start,
|
|
|
|
|
regno + j);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
|
|
|
|
|
{
|
|
|
|
|
int b = candidate_table[src].update_bbs.first_member[i];
|
|
|
|
|
|
1999-02-26 00:45:42 +01:00
|
|
|
|
SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start, regno);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Return 1 if insn can be speculatively moved from block src to trg,
|
|
|
|
|
otherwise return 0. Called before first insertion of insn to
|
|
|
|
|
ready-list or before the scheduling. */
|
|
|
|
|
|
|
|
|
|
static int
|
1998-02-17 22:35:43 +01:00
|
|
|
|
check_live (insn, src)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
rtx insn;
|
|
|
|
|
int src;
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Find the registers set by instruction. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (GET_CODE (PATTERN (insn)) == SET
|
|
|
|
|
|| GET_CODE (PATTERN (insn)) == CLOBBER)
|
|
|
|
|
return check_live_1 (src, PATTERN (insn));
|
|
|
|
|
else if (GET_CODE (PATTERN (insn)) == PARALLEL)
|
|
|
|
|
{
|
|
|
|
|
int j;
|
|
|
|
|
for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
|
|
|
|
|
if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
|
|
|
|
|
|| GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
|
|
|
|
|
&& !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Update the live registers info after insn was moved speculatively from
|
|
|
|
|
block src to trg. */
|
|
|
|
|
|
|
|
|
|
static void
|
1998-02-17 22:35:43 +01:00
|
|
|
|
update_live (insn, src)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
rtx insn;
|
1998-02-17 22:35:43 +01:00
|
|
|
|
int src;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Find the registers set by instruction. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (GET_CODE (PATTERN (insn)) == SET
|
|
|
|
|
|| GET_CODE (PATTERN (insn)) == CLOBBER)
|
|
|
|
|
update_live_1 (src, PATTERN (insn));
|
|
|
|
|
else if (GET_CODE (PATTERN (insn)) == PARALLEL)
|
|
|
|
|
{
|
|
|
|
|
int j;
|
|
|
|
|
for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
|
|
|
|
|
if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
|
|
|
|
|
|| GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
|
|
|
|
|
update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Exception Free Loads:
|
|
|
|
|
|
|
|
|
|
We define five classes of speculative loads: IFREE, IRISKY,
|
|
|
|
|
PFREE, PRISKY, and MFREE.
|
|
|
|
|
|
|
|
|
|
IFREE loads are loads that are proved to be exception-free, just
|
|
|
|
|
by examining the load insn. Examples for such loads are loads
|
|
|
|
|
from TOC and loads of global data.
|
|
|
|
|
|
|
|
|
|
IRISKY loads are loads that are proved to be exception-risky,
|
|
|
|
|
just by examining the load insn. Examples for such loads are
|
|
|
|
|
volatile loads and loads from shared memory.
|
|
|
|
|
|
|
|
|
|
PFREE loads are loads for which we can prove, by examining other
|
|
|
|
|
insns, that they are exception-free. Currently, this class consists
|
|
|
|
|
of loads for which we are able to find a "similar load", either in
|
|
|
|
|
the target block, or, if only one split-block exists, in that split
|
|
|
|
|
block. Load2 is similar to load1 if both have same single base
|
|
|
|
|
register. We identify only part of the similar loads, by finding
|
|
|
|
|
an insn upon which both load1 and load2 have a DEF-USE dependence.
|
|
|
|
|
|
|
|
|
|
PRISKY loads are loads for which we can prove, by examining other
|
|
|
|
|
insns, that they are exception-risky. Currently we have two proofs for
|
|
|
|
|
such loads. The first proof detects loads that are probably guarded by a
|
|
|
|
|
test on the memory address. This proof is based on the
|
|
|
|
|
backward and forward data dependence information for the region.
|
|
|
|
|
Let load-insn be the examined load.
|
|
|
|
|
Load-insn is PRISKY iff ALL the following hold:
|
|
|
|
|
|
|
|
|
|
- insn1 is not in the same block as load-insn
|
|
|
|
|
- there is a DEF-USE dependence chain (insn1, ..., load-insn)
|
1999-09-06 23:55:23 +02:00
|
|
|
|
- test-insn is either a compare or a branch, not in the same block
|
|
|
|
|
as load-insn
|
1997-08-12 06:07:19 +02:00
|
|
|
|
- load-insn is reachable from test-insn
|
|
|
|
|
- there is a DEF-USE dependence chain (insn1, ..., test-insn)
|
|
|
|
|
|
|
|
|
|
This proof might fail when the compare and the load are fed
|
|
|
|
|
by an insn not in the region. To solve this, we will add to this
|
|
|
|
|
group all loads that have no input DEF-USE dependence.
|
|
|
|
|
|
|
|
|
|
The second proof detects loads that are directly or indirectly
|
|
|
|
|
fed by a speculative load. This proof is affected by the
|
|
|
|
|
scheduling process. We will use the flag fed_by_spec_load.
|
|
|
|
|
Initially, all insns have this flag reset. After a speculative
|
|
|
|
|
motion of an insn, if insn is either a load, or marked as
|
|
|
|
|
fed_by_spec_load, we will also mark as fed_by_spec_load every
|
|
|
|
|
insn1 for which a DEF-USE dependence (insn, insn1) exists. A
|
|
|
|
|
load which is fed_by_spec_load is also PRISKY.
|
|
|
|
|
|
|
|
|
|
MFREE (maybe-free) loads are all the remaining loads. They may be
|
|
|
|
|
exception-free, but we cannot prove it.
|
|
|
|
|
|
|
|
|
|
Now, all loads in IFREE and PFREE classes are considered
|
|
|
|
|
exception-free, while all loads in IRISKY and PRISKY classes are
|
|
|
|
|
considered exception-risky. As for loads in the MFREE class,
|
|
|
|
|
these are considered either exception-free or exception-risky,
|
|
|
|
|
depending on whether we are pessimistic or optimistic. We have
|
|
|
|
|
to take the pessimistic approach to assure the safety of
|
|
|
|
|
speculative scheduling, but we can take the optimistic approach
|
|
|
|
|
by invoking the -fsched_spec_load_dangerous option. */
|
|
|
|
|
|
|
|
|
|
enum INSN_TRAP_CLASS
|
|
|
|
|
{
|
|
|
|
|
TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
|
|
|
|
|
PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define WORST_CLASS(class1, class2) \
|
|
|
|
|
((class1 > class2) ? class1 : class2)
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Indexed by INSN_UID, and set if there's DEF-USE dependence between
|
|
|
|
|
some speculatively moved load insn and this one. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
char *fed_by_spec_load;
|
|
|
|
|
char *is_load_insn;
|
|
|
|
|
|
|
|
|
|
/* Non-zero if block bb_to is equal to, or reachable from block bb_from. */
|
|
|
|
|
#define IS_REACHABLE(bb_from, bb_to) \
|
|
|
|
|
(bb_from == bb_to \
|
|
|
|
|
|| IS_RGN_ENTRY (bb_from) \
|
|
|
|
|
|| (bitset_member (ancestor_edges[bb_to], \
|
|
|
|
|
EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))), \
|
|
|
|
|
edgeset_size)))
|
|
|
|
|
#define FED_BY_SPEC_LOAD(insn) (fed_by_spec_load[INSN_UID (insn)])
|
|
|
|
|
#define IS_LOAD_INSN(insn) (is_load_insn[INSN_UID (insn)])
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Non-zero iff the address is comprised from at most 1 register. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#define CONST_BASED_ADDRESS_P(x) \
|
|
|
|
|
(GET_CODE (x) == REG \
|
|
|
|
|
|| ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
|
|
|
|
|
|| (GET_CODE (x) == LO_SUM)) \
|
|
|
|
|
&& (GET_CODE (XEXP (x, 0)) == CONST_INT \
|
|
|
|
|
|| GET_CODE (XEXP (x, 1)) == CONST_INT)))
|
|
|
|
|
|
|
|
|
|
/* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
set_spec_fed (load_insn)
|
|
|
|
|
rtx load_insn;
|
|
|
|
|
{
|
|
|
|
|
rtx link;
|
|
|
|
|
|
|
|
|
|
for (link = INSN_DEPEND (load_insn); link; link = XEXP (link, 1))
|
|
|
|
|
if (GET_MODE (link) == VOIDmode)
|
|
|
|
|
FED_BY_SPEC_LOAD (XEXP (link, 0)) = 1;
|
|
|
|
|
} /* set_spec_fed */
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* On the path from the insn to load_insn_bb, find a conditional
|
|
|
|
|
branch depending on insn, that guards the speculative load. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
find_conditional_protection (insn, load_insn_bb)
|
|
|
|
|
rtx insn;
|
|
|
|
|
int load_insn_bb;
|
|
|
|
|
{
|
|
|
|
|
rtx link;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Iterate through DEF-USE forward dependences. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
|
|
|
|
|
{
|
|
|
|
|
rtx next = XEXP (link, 0);
|
1999-10-19 00:20:27 +02:00
|
|
|
|
if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
|
1997-08-12 06:07:19 +02:00
|
|
|
|
CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
|
|
|
|
|
&& IS_REACHABLE (INSN_BB (next), load_insn_bb)
|
|
|
|
|
&& load_insn_bb != INSN_BB (next)
|
|
|
|
|
&& GET_MODE (link) == VOIDmode
|
|
|
|
|
&& (GET_CODE (next) == JUMP_INSN
|
|
|
|
|
|| find_conditional_protection (next, load_insn_bb)))
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
} /* find_conditional_protection */
|
|
|
|
|
|
|
|
|
|
/* Returns 1 if the same insn1 that participates in the computation
|
|
|
|
|
of load_insn's address is feeding a conditional branch that is
|
|
|
|
|
guarding on load_insn. This is true if we find a the two DEF-USE
|
|
|
|
|
chains:
|
|
|
|
|
insn1 -> ... -> conditional-branch
|
|
|
|
|
insn1 -> ... -> load_insn,
|
|
|
|
|
and if a flow path exist:
|
|
|
|
|
insn1 -> ... -> conditional-branch -> ... -> load_insn,
|
|
|
|
|
and if insn1 is on the path
|
|
|
|
|
region-entry -> ... -> bb_trg -> ... load_insn.
|
|
|
|
|
|
|
|
|
|
Locate insn1 by climbing on LOG_LINKS from load_insn.
|
|
|
|
|
Locate the branch by following INSN_DEPEND from insn1. */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
is_conditionally_protected (load_insn, bb_src, bb_trg)
|
|
|
|
|
rtx load_insn;
|
|
|
|
|
int bb_src, bb_trg;
|
|
|
|
|
{
|
|
|
|
|
rtx link;
|
|
|
|
|
|
|
|
|
|
for (link = LOG_LINKS (load_insn); link; link = XEXP (link, 1))
|
|
|
|
|
{
|
|
|
|
|
rtx insn1 = XEXP (link, 0);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Must be a DEF-USE dependence upon non-branch. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (GET_MODE (link) != VOIDmode
|
|
|
|
|
|| GET_CODE (insn1) == JUMP_INSN)
|
|
|
|
|
continue;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (INSN_BB (insn1) == bb_src
|
1999-10-19 00:20:27 +02:00
|
|
|
|
|| (CONTAINING_RGN (BLOCK_NUM (insn1))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
!= CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
|
|
|
|
|
|| (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
|
|
|
|
|
&& !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
|
|
|
|
|
continue;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Now search for the conditional-branch. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (find_conditional_protection (insn1, bb_src))
|
|
|
|
|
return 1;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Recursive step: search another insn1, "above" current insn1. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return is_conditionally_protected (insn1, bb_src, bb_trg);
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* The chain does not exist. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return 0;
|
|
|
|
|
} /* is_conditionally_protected */
|
|
|
|
|
|
|
|
|
|
/* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
|
|
|
|
|
load_insn can move speculatively from bb_src to bb_trg. All the
|
|
|
|
|
following must hold:
|
|
|
|
|
|
|
|
|
|
(1) both loads have 1 base register (PFREE_CANDIDATEs).
|
|
|
|
|
(2) load_insn and load1 have a def-use dependence upon
|
|
|
|
|
the same insn 'insn1'.
|
|
|
|
|
(3) either load2 is in bb_trg, or:
|
|
|
|
|
- there's only one split-block, and
|
|
|
|
|
- load1 is on the escape path, and
|
|
|
|
|
|
|
|
|
|
From all these we can conclude that the two loads access memory
|
|
|
|
|
addresses that differ at most by a constant, and hence if moving
|
|
|
|
|
load_insn would cause an exception, it would have been caused by
|
|
|
|
|
load2 anyhow. */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
is_pfree (load_insn, bb_src, bb_trg)
|
|
|
|
|
rtx load_insn;
|
|
|
|
|
int bb_src, bb_trg;
|
|
|
|
|
{
|
|
|
|
|
rtx back_link;
|
|
|
|
|
register candidate *candp = candidate_table + bb_src;
|
|
|
|
|
|
|
|
|
|
if (candp->split_bbs.nr_members != 1)
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Must have exactly one escape block. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
for (back_link = LOG_LINKS (load_insn);
|
|
|
|
|
back_link; back_link = XEXP (back_link, 1))
|
|
|
|
|
{
|
|
|
|
|
rtx insn1 = XEXP (back_link, 0);
|
|
|
|
|
|
|
|
|
|
if (GET_MODE (back_link) == VOIDmode)
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Found a DEF-USE dependence (insn1, load_insn). */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
rtx fore_link;
|
|
|
|
|
|
|
|
|
|
for (fore_link = INSN_DEPEND (insn1);
|
|
|
|
|
fore_link; fore_link = XEXP (fore_link, 1))
|
|
|
|
|
{
|
|
|
|
|
rtx insn2 = XEXP (fore_link, 0);
|
|
|
|
|
if (GET_MODE (fore_link) == VOIDmode)
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Found a DEF-USE dependence (insn1, insn2). */
|
1997-12-09 09:20:07 +01:00
|
|
|
|
if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* insn2 not guaranteed to be a 1 base reg load. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (INSN_BB (insn2) == bb_trg)
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* insn2 is the similar load, in the target block. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return 1;
|
|
|
|
|
|
1999-10-19 00:20:27 +02:00
|
|
|
|
if (*(candp->split_bbs.first_member) == BLOCK_NUM (insn2))
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* insn2 is a similar load, in a split-block. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Couldn't find a similar load. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return 0;
|
|
|
|
|
} /* is_pfree */
|
|
|
|
|
|
|
|
|
|
/* Returns a class that insn with GET_DEST(insn)=x may belong to,
|
|
|
|
|
as found by analyzing insn's expression. */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
may_trap_exp (x, is_store)
|
|
|
|
|
rtx x;
|
|
|
|
|
int is_store;
|
|
|
|
|
{
|
|
|
|
|
enum rtx_code code;
|
|
|
|
|
|
|
|
|
|
if (x == 0)
|
|
|
|
|
return TRAP_FREE;
|
|
|
|
|
code = GET_CODE (x);
|
|
|
|
|
if (is_store)
|
|
|
|
|
{
|
|
|
|
|
if (code == MEM)
|
|
|
|
|
return TRAP_RISKY;
|
|
|
|
|
else
|
|
|
|
|
return TRAP_FREE;
|
|
|
|
|
}
|
|
|
|
|
if (code == MEM)
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* The insn uses memory: a volatile load. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (MEM_VOLATILE_P (x))
|
|
|
|
|
return IRISKY;
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* An exception-free load. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (!may_trap_p (x))
|
|
|
|
|
return IFREE;
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* A load with 1 base register, to be further checked. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
|
|
|
|
|
return PFREE_CANDIDATE;
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* No info on the load, to be further checked. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return PRISKY_CANDIDATE;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
rtl.h (rtx_format): Constify a char*.
* rtl.h (rtx_format): Constify a char*.
* rtl.c (rtx_format): Likewise.
(copy_rtx, copy_most_rtx, read_rtx): Likewise.
(init_rtl): Use accessor macro, not `rtx_format'.
* alias.c (rtx_equal_for_memref_p, find_symbolic_term): Constify a
char*.
* caller-save.c (mark_referenced_regs): Likewise.
* combine.c (subst, make_compound_operation, known_cond,
gen_rtx_combine, update_table_tick, get_last_value_validate,
use_crosses_set_p, mark_used_regs_combine, move_deaths): Likewise.
* cse.c (rtx_cost, mention_regs, canon_hash, exp_equiv_p,
refers_to_p, canon_reg, fold_rtx, cse_process_notes,
count_reg_usage): Likewise.
* emit-rtl.c (gen_rtx, copy_rtx_if_shared, reset_used_flags):
Likewise.
* final.c (leaf_renumber_regs_insn): Likewise.
* flow.c (mark_used_regs, find_use_as_address, dump_flow_info,
dump_edge_info, count_reg_references): Likewise.
* function.c (fixup_var_refs_1, walk_fixup_memory_subreg,
fixup_stack_1, purge_addressof_1, instantiate_virtual_regs_1):
Likewise.
* gcse.c (oprs_unchanged_p, hash_expr_1, expr_equiv_p,
oprs_not_set_p, expr_killed_p, compute_transp, find_used_regs,
add_label_notes): Likewise.
* genattrtab.c (attr_rtx, attr_copy_rtx, encode_units_mask,
clear_struct_flag, count_sub_rtxs, count_alternatives,
compares_alternatives_p, contained_in_p, walk_attr_value,
write_expr_attr_cache): Likewise.
* genconfig.c (walk_insn_part): Likewise.
* genemit.c (max_operand_1, gen_exp): Likewise.
* genextract.c (walk_rtx): Likewise.
* genflags.c (num_operands): Likewise.
* genoutput.c (scan_operands): Likewise.
* genpeep.c (match_rtx): Likewise.
* genrecog.c (add_to_sequence): Likewise.
* haifa-sched.c (may_trap_exp, sched_analyze_2, attach_deaths):
Likewise.
* integrate.c (save_constants, copy_for_inline,
copy_rtx_and_substitute, subst_constants, restore_constants):
Likewise.
* jump.c (mark_jump_label, invert_exp, redirect_exp,
rtx_renumbered_equal_p, rtx_equal_for_thread_p): Likewise.
* local-alloc.c (contains_replace_regs, memref_referenced_p):
Likewise.
* loop.c (record_excess_regs, rtx_equal_for_loop_p,
add_label_notes, replace_call_address, count_nonfixed_reads,
invariant_p, find_single_use_in_loop, find_mem_givs,
find_life_end, maybe_eliminate_biv_1, update_reg_last_use):
Likewise.
* print-rtl.c (reg_names, print_rtx): Likewise.
* recog.c (validate_replace_rtx_1, find_single_use_1): Likewise.
* reg-stack.c (stack_regs_mentioned_p, record_label_references,
record_reg_life_pat, swap_rtx_condition, goto_block_pat,
print_blocks): Likewise.
* regclass.c (fix_register, record_address_regs,
reg_scan_mark_refs): Likewise.
* regmove.c (stable_but_for_p): Likewise.
* reload.c (loc_mentioned_in_p, operands_match_p,
find_reloads_toplevsubst_reg_equivs, find_reloads_address_1,
copy_replacements, refers_to_regno_for_reload_p,
refers_to_mem_for_reload_p, find_inc_amount, regno_clobbered_p,
reload_when_needed_name, reg_class_names, debug_reload_to_stream):
Likewise.
* reload1.c (eliminate_regs, scan_paradoxical_subregs,
delete_address_reloads_1, count_occurrences,
reload_cse_mem_conflict_p, reload_combine_note_use,
add_auto_inc_notes): Likewise.
* resource.c (mark_referenced_resources, mark_set_resources):
Likewise.
* rtlanal.c (rtx_unstable_p, rtx_varies_p, rtx_addr_varies_p,
reg_mentioned_p, regs_set_between_p, modified_between_p,
modified_in_p, refers_to_regno_p, reg_overlap_mentioned_p,
rtx_equal_p, volatile_insn_p, volatile_refs_p, side_effects_p,
may_trap_p, inequality_comparisons_p, replace_rtx, replace_regs,
jmp_uses_reg_or_mem, for_each_rtx, regno_use_in): Likewise.
* sched.c (sched_analyze_2, attach_deaths): Likewise.
* stupid.c (stupid_mark_refs): Likewise.
* unroll.c (remap_split_bivs): Likewise.
* varasm.c (mark_constants): Likewise.
* a29k/a29k.c (uses_local_reg_p): Likewise.
* alpha/alpha.c (summarize_insn): Likewise.
* arm/arm.c (symbol_mentioned_p, label_mentioned_p,
eliminate_lr2ip): Likewise.
* arm/thumb.c (symbol_mentioned_p, label_mentioned_p): Likewise.
* i386/i386.c (symbolic_reference_mentioned_p, copy_all_rtx,
reg_mentioned_in_mem): Likewise.
* ns32k/ns32k.c (global_symbolic_reference_mentioned_p,
symbolic_reference_mentioned_p): Likewise.
* romp/romp.c (unsigned_comparisons_p, hash_rtx): Likewise.
* sh/sh.c (regs_used, mark_use): Likewise.
* vax/vax.c (vax_rtx_cost): Likewise.
From-SVN: r28784
1999-08-21 01:05:25 +02:00
|
|
|
|
const char *fmt;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int i, insn_class = TRAP_FREE;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Neither store nor load, check if it may cause a trap. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (may_trap_p (x))
|
|
|
|
|
return TRAP_RISKY;
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Recursive step: walk the insn... */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
fmt = GET_RTX_FORMAT (code);
|
|
|
|
|
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
|
|
|
|
{
|
|
|
|
|
if (fmt[i] == 'e')
|
|
|
|
|
{
|
|
|
|
|
int tmp_class = may_trap_exp (XEXP (x, i), is_store);
|
|
|
|
|
insn_class = WORST_CLASS (insn_class, tmp_class);
|
|
|
|
|
}
|
|
|
|
|
else if (fmt[i] == 'E')
|
|
|
|
|
{
|
|
|
|
|
int j;
|
|
|
|
|
for (j = 0; j < XVECLEN (x, i); j++)
|
|
|
|
|
{
|
|
|
|
|
int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
|
|
|
|
|
insn_class = WORST_CLASS (insn_class, tmp_class);
|
|
|
|
|
if (insn_class == TRAP_RISKY || insn_class == IRISKY)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (insn_class == TRAP_RISKY || insn_class == IRISKY)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
return insn_class;
|
|
|
|
|
}
|
|
|
|
|
} /* may_trap_exp */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Classifies insn for the purpose of verifying that it can be
|
|
|
|
|
moved speculatively, by examining it's patterns, returning:
|
|
|
|
|
TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
|
|
|
|
|
TRAP_FREE: non-load insn.
|
|
|
|
|
IFREE: load from a globaly safe location.
|
|
|
|
|
IRISKY: volatile load.
|
|
|
|
|
PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
|
|
|
|
|
being either PFREE or PRISKY. */
|
|
|
|
|
|
|
|
|
|
static int
|
1997-12-09 09:20:07 +01:00
|
|
|
|
haifa_classify_insn (insn)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
rtx pat = PATTERN (insn);
|
|
|
|
|
int tmp_class = TRAP_FREE;
|
|
|
|
|
int insn_class = TRAP_FREE;
|
|
|
|
|
enum rtx_code code;
|
|
|
|
|
|
|
|
|
|
if (GET_CODE (pat) == PARALLEL)
|
|
|
|
|
{
|
|
|
|
|
int i, len = XVECLEN (pat, 0);
|
|
|
|
|
|
|
|
|
|
for (i = len - 1; i >= 0; i--)
|
|
|
|
|
{
|
|
|
|
|
code = GET_CODE (XVECEXP (pat, 0, i));
|
|
|
|
|
switch (code)
|
|
|
|
|
{
|
|
|
|
|
case CLOBBER:
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Test if it is a 'store'. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
|
|
|
|
|
break;
|
|
|
|
|
case SET:
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Test if it is a store. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
|
|
|
|
|
if (tmp_class == TRAP_RISKY)
|
|
|
|
|
break;
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Test if it is a load. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
tmp_class =
|
|
|
|
|
WORST_CLASS (tmp_class,
|
|
|
|
|
may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)), 0));
|
1998-06-17 18:14:09 +02:00
|
|
|
|
break;
|
|
|
|
|
case TRAP_IF:
|
|
|
|
|
tmp_class = TRAP_RISKY;
|
|
|
|
|
break;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
default:;
|
|
|
|
|
}
|
|
|
|
|
insn_class = WORST_CLASS (insn_class, tmp_class);
|
|
|
|
|
if (insn_class == TRAP_RISKY || insn_class == IRISKY)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
code = GET_CODE (pat);
|
|
|
|
|
switch (code)
|
|
|
|
|
{
|
|
|
|
|
case CLOBBER:
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Test if it is a 'store'. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
tmp_class = may_trap_exp (XEXP (pat, 0), 1);
|
|
|
|
|
break;
|
|
|
|
|
case SET:
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Test if it is a store. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
tmp_class = may_trap_exp (SET_DEST (pat), 1);
|
|
|
|
|
if (tmp_class == TRAP_RISKY)
|
|
|
|
|
break;
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Test if it is a load. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
tmp_class =
|
|
|
|
|
WORST_CLASS (tmp_class,
|
|
|
|
|
may_trap_exp (SET_SRC (pat), 0));
|
1998-06-17 18:14:09 +02:00
|
|
|
|
break;
|
|
|
|
|
case TRAP_IF:
|
|
|
|
|
tmp_class = TRAP_RISKY;
|
|
|
|
|
break;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
default:;
|
|
|
|
|
}
|
|
|
|
|
insn_class = tmp_class;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return insn_class;
|
|
|
|
|
|
1997-12-09 09:20:07 +01:00
|
|
|
|
} /* haifa_classify_insn */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
|
|
|
|
|
a load moved speculatively, or if load_insn is protected by
|
|
|
|
|
a compare on load_insn's address). */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
is_prisky (load_insn, bb_src, bb_trg)
|
|
|
|
|
rtx load_insn;
|
|
|
|
|
int bb_src, bb_trg;
|
|
|
|
|
{
|
|
|
|
|
if (FED_BY_SPEC_LOAD (load_insn))
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
if (LOG_LINKS (load_insn) == NULL)
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Dependence may 'hide' out of the region. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
if (is_conditionally_protected (load_insn, bb_src, bb_trg))
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
} /* is_prisky */
|
|
|
|
|
|
|
|
|
|
/* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
|
|
|
|
|
Return 1 if insn is exception-free (and the motion is valid)
|
|
|
|
|
and 0 otherwise. */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
is_exception_free (insn, bb_src, bb_trg)
|
|
|
|
|
rtx insn;
|
|
|
|
|
int bb_src, bb_trg;
|
|
|
|
|
{
|
1997-12-09 09:20:07 +01:00
|
|
|
|
int insn_class = haifa_classify_insn (insn);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Handle non-load insns. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
switch (insn_class)
|
|
|
|
|
{
|
|
|
|
|
case TRAP_FREE:
|
|
|
|
|
return 1;
|
|
|
|
|
case TRAP_RISKY:
|
|
|
|
|
return 0;
|
|
|
|
|
default:;
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Handle loads. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (!flag_schedule_speculative_load)
|
|
|
|
|
return 0;
|
|
|
|
|
IS_LOAD_INSN (insn) = 1;
|
|
|
|
|
switch (insn_class)
|
|
|
|
|
{
|
|
|
|
|
case IFREE:
|
|
|
|
|
return (1);
|
|
|
|
|
case IRISKY:
|
|
|
|
|
return 0;
|
|
|
|
|
case PFREE_CANDIDATE:
|
|
|
|
|
if (is_pfree (insn, bb_src, bb_trg))
|
|
|
|
|
return 1;
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
case PRISKY_CANDIDATE:
|
|
|
|
|
if (!flag_schedule_speculative_load_dangerous
|
|
|
|
|
|| is_prisky (insn, bb_src, bb_trg))
|
|
|
|
|
return 0;
|
|
|
|
|
break;
|
|
|
|
|
default:;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return flag_schedule_speculative_load_dangerous;
|
|
|
|
|
} /* is_exception_free */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Process an insn's memory dependencies. There are four kinds of
|
|
|
|
|
dependencies:
|
|
|
|
|
|
|
|
|
|
(0) read dependence: read follows read
|
|
|
|
|
(1) true dependence: read follows write
|
|
|
|
|
(2) anti dependence: write follows read
|
|
|
|
|
(3) output dependence: write follows write
|
|
|
|
|
|
|
|
|
|
We are careful to build only dependencies which actually exist, and
|
|
|
|
|
use transitivity to avoid building too many links. */
|
|
|
|
|
|
|
|
|
|
/* Return the INSN_LIST containing INSN in LIST, or NULL
|
|
|
|
|
if LIST does not contain INSN. */
|
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static rtx
|
1997-08-12 06:07:19 +02:00
|
|
|
|
find_insn_list (insn, list)
|
|
|
|
|
rtx insn;
|
|
|
|
|
rtx list;
|
|
|
|
|
{
|
|
|
|
|
while (list)
|
|
|
|
|
{
|
|
|
|
|
if (XEXP (list, 0) == insn)
|
|
|
|
|
return list;
|
|
|
|
|
list = XEXP (list, 1);
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
|
|
|
|
|
otherwise. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static char
|
1997-08-12 06:07:19 +02:00
|
|
|
|
find_insn_mem_list (insn, x, list, list1)
|
|
|
|
|
rtx insn, x;
|
|
|
|
|
rtx list, list1;
|
|
|
|
|
{
|
|
|
|
|
while (list)
|
|
|
|
|
{
|
|
|
|
|
if (XEXP (list, 0) == insn
|
|
|
|
|
&& XEXP (list1, 0) == x)
|
|
|
|
|
return 1;
|
|
|
|
|
list = XEXP (list, 1);
|
|
|
|
|
list1 = XEXP (list1, 1);
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Compute the function units used by INSN. This caches the value
|
|
|
|
|
returned by function_units_used. A function unit is encoded as the
|
|
|
|
|
unit number if the value is non-negative and the compliment of a
|
|
|
|
|
mask if the value is negative. A function unit index is the
|
|
|
|
|
non-negative encoding. */
|
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static int
|
1997-08-12 06:07:19 +02:00
|
|
|
|
insn_unit (insn)
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
register int unit = INSN_UNIT (insn);
|
|
|
|
|
|
|
|
|
|
if (unit == 0)
|
|
|
|
|
{
|
|
|
|
|
recog_memoized (insn);
|
|
|
|
|
|
|
|
|
|
/* A USE insn, or something else we don't need to understand.
|
|
|
|
|
We can't pass these directly to function_units_used because it will
|
|
|
|
|
trigger a fatal error for unrecognizable insns. */
|
|
|
|
|
if (INSN_CODE (insn) < 0)
|
|
|
|
|
unit = -1;
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
unit = function_units_used (insn);
|
|
|
|
|
/* Increment non-negative values so we can cache zero. */
|
|
|
|
|
if (unit >= 0)
|
|
|
|
|
unit++;
|
|
|
|
|
}
|
|
|
|
|
/* We only cache 16 bits of the result, so if the value is out of
|
|
|
|
|
range, don't cache it. */
|
|
|
|
|
if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
|
|
|
|
|
|| unit >= 0
|
1999-08-18 07:06:43 +02:00
|
|
|
|
|| (unit & ~((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
INSN_UNIT (insn) = unit;
|
|
|
|
|
}
|
|
|
|
|
return (unit > 0 ? unit - 1 : unit);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Compute the blockage range for executing INSN on UNIT. This caches
|
|
|
|
|
the value returned by the blockage_range_function for the unit.
|
|
|
|
|
These values are encoded in an int where the upper half gives the
|
|
|
|
|
minimum value and the lower half gives the maximum value. */
|
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static unsigned int
|
1997-08-12 06:07:19 +02:00
|
|
|
|
blockage_range (unit, insn)
|
|
|
|
|
int unit;
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
unsigned int blockage = INSN_BLOCKAGE (insn);
|
|
|
|
|
unsigned int range;
|
|
|
|
|
|
1998-10-17 22:26:29 +02:00
|
|
|
|
if ((int) UNIT_BLOCKED (blockage) != unit + 1)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
|
|
|
|
range = function_units[unit].blockage_range_function (insn);
|
|
|
|
|
/* We only cache the blockage range for one unit and then only if
|
|
|
|
|
the values fit. */
|
|
|
|
|
if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
|
|
|
|
|
INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
range = BLOCKAGE_RANGE (blockage);
|
|
|
|
|
|
|
|
|
|
return range;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* A vector indexed by function unit instance giving the last insn to use
|
|
|
|
|
the unit. The value of the function unit instance index for unit U
|
|
|
|
|
instance I is (U + I * FUNCTION_UNITS_SIZE). */
|
|
|
|
|
static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
|
|
|
|
|
|
|
|
|
|
/* A vector indexed by function unit instance giving the minimum time when
|
|
|
|
|
the unit will unblock based on the maximum blockage cost. */
|
|
|
|
|
static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
|
|
|
|
|
|
|
|
|
|
/* A vector indexed by function unit number giving the number of insns
|
|
|
|
|
that remain to use the unit. */
|
|
|
|
|
static int unit_n_insns[FUNCTION_UNITS_SIZE];
|
|
|
|
|
|
|
|
|
|
/* Reset the function unit state to the null state. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
clear_units ()
|
|
|
|
|
{
|
|
|
|
|
bzero ((char *) unit_last_insn, sizeof (unit_last_insn));
|
|
|
|
|
bzero ((char *) unit_tick, sizeof (unit_tick));
|
|
|
|
|
bzero ((char *) unit_n_insns, sizeof (unit_n_insns));
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Return the issue-delay of an insn. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static int
|
1997-08-12 06:07:19 +02:00
|
|
|
|
insn_issue_delay (insn)
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
int i, delay = 0;
|
|
|
|
|
int unit = insn_unit (insn);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Efficiency note: in fact, we are working 'hard' to compute a
|
1997-08-12 06:07:19 +02:00
|
|
|
|
value that was available in md file, and is not available in
|
|
|
|
|
function_units[] structure. It would be nice to have this
|
|
|
|
|
value there, too. */
|
|
|
|
|
if (unit >= 0)
|
|
|
|
|
{
|
|
|
|
|
if (function_units[unit].blockage_range_function &&
|
|
|
|
|
function_units[unit].blockage_function)
|
|
|
|
|
delay = function_units[unit].blockage_function (insn, insn);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
|
|
|
|
|
if ((unit & 1) != 0 && function_units[i].blockage_range_function
|
|
|
|
|
&& function_units[i].blockage_function)
|
|
|
|
|
delay = MAX (delay, function_units[i].blockage_function (insn, insn));
|
|
|
|
|
|
|
|
|
|
return delay;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Return the actual hazard cost of executing INSN on the unit UNIT,
|
|
|
|
|
instance INSTANCE at time CLOCK if the previous actual hazard cost
|
|
|
|
|
was COST. */
|
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static int
|
1997-08-12 06:07:19 +02:00
|
|
|
|
actual_hazard_this_instance (unit, instance, insn, clock, cost)
|
|
|
|
|
int unit, instance, clock, cost;
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
int tick = unit_tick[instance]; /* Issue time of the last issued insn. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
if (tick - clock > cost)
|
|
|
|
|
{
|
|
|
|
|
/* The scheduler is operating forward, so unit's last insn is the
|
|
|
|
|
executing insn and INSN is the candidate insn. We want a
|
|
|
|
|
more exact measure of the blockage if we execute INSN at CLOCK
|
|
|
|
|
given when we committed the execution of the unit's last insn.
|
|
|
|
|
|
|
|
|
|
The blockage value is given by either the unit's max blockage
|
|
|
|
|
constant, blockage range function, or blockage function. Use
|
|
|
|
|
the most exact form for the given unit. */
|
|
|
|
|
|
|
|
|
|
if (function_units[unit].blockage_range_function)
|
|
|
|
|
{
|
|
|
|
|
if (function_units[unit].blockage_function)
|
|
|
|
|
tick += (function_units[unit].blockage_function
|
|
|
|
|
(unit_last_insn[instance], insn)
|
|
|
|
|
- function_units[unit].max_blockage);
|
|
|
|
|
else
|
|
|
|
|
tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
|
|
|
|
|
- function_units[unit].max_blockage);
|
|
|
|
|
}
|
|
|
|
|
if (tick - clock > cost)
|
|
|
|
|
cost = tick - clock;
|
|
|
|
|
}
|
|
|
|
|
return cost;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Record INSN as having begun execution on the units encoded by UNIT at
|
|
|
|
|
time CLOCK. */
|
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static void
|
1997-08-12 06:07:19 +02:00
|
|
|
|
schedule_unit (unit, insn, clock)
|
|
|
|
|
int unit, clock;
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (unit >= 0)
|
|
|
|
|
{
|
|
|
|
|
int instance = unit;
|
|
|
|
|
#if MAX_MULTIPLICITY > 1
|
|
|
|
|
/* Find the first free instance of the function unit and use that
|
|
|
|
|
one. We assume that one is free. */
|
|
|
|
|
for (i = function_units[unit].multiplicity - 1; i > 0; i--)
|
|
|
|
|
{
|
|
|
|
|
if (!actual_hazard_this_instance (unit, instance, insn, clock, 0))
|
|
|
|
|
break;
|
|
|
|
|
instance += FUNCTION_UNITS_SIZE;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
unit_last_insn[instance] = insn;
|
|
|
|
|
unit_tick[instance] = (clock + function_units[unit].max_blockage);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
|
|
|
|
|
if ((unit & 1) != 0)
|
|
|
|
|
schedule_unit (i, insn, clock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Return the actual hazard cost of executing INSN on the units encoded by
|
|
|
|
|
UNIT at time CLOCK if the previous actual hazard cost was COST. */
|
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static int
|
1997-08-12 06:07:19 +02:00
|
|
|
|
actual_hazard (unit, insn, clock, cost)
|
|
|
|
|
int unit, clock, cost;
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (unit >= 0)
|
|
|
|
|
{
|
|
|
|
|
/* Find the instance of the function unit with the minimum hazard. */
|
|
|
|
|
int instance = unit;
|
|
|
|
|
int best_cost = actual_hazard_this_instance (unit, instance, insn,
|
|
|
|
|
clock, cost);
|
1999-09-14 20:44:10 +02:00
|
|
|
|
#if MAX_MULTIPLICITY > 1
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int this_cost;
|
|
|
|
|
|
|
|
|
|
if (best_cost > cost)
|
|
|
|
|
{
|
|
|
|
|
for (i = function_units[unit].multiplicity - 1; i > 0; i--)
|
|
|
|
|
{
|
|
|
|
|
instance += FUNCTION_UNITS_SIZE;
|
|
|
|
|
this_cost = actual_hazard_this_instance (unit, instance, insn,
|
|
|
|
|
clock, cost);
|
|
|
|
|
if (this_cost < best_cost)
|
|
|
|
|
{
|
|
|
|
|
best_cost = this_cost;
|
|
|
|
|
if (this_cost <= cost)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
cost = MAX (cost, best_cost);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
|
|
|
|
|
if ((unit & 1) != 0)
|
|
|
|
|
cost = actual_hazard (i, insn, clock, cost);
|
|
|
|
|
|
|
|
|
|
return cost;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Return the potential hazard cost of executing an instruction on the
|
|
|
|
|
units encoded by UNIT if the previous potential hazard cost was COST.
|
|
|
|
|
An insn with a large blockage time is chosen in preference to one
|
|
|
|
|
with a smaller time; an insn that uses a unit that is more likely
|
|
|
|
|
to be used is chosen in preference to one with a unit that is less
|
|
|
|
|
used. We are trying to minimize a subsequent actual hazard. */
|
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static int
|
1997-08-12 06:07:19 +02:00
|
|
|
|
potential_hazard (unit, insn, cost)
|
|
|
|
|
int unit, cost;
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
int i, ncost;
|
|
|
|
|
unsigned int minb, maxb;
|
|
|
|
|
|
|
|
|
|
if (unit >= 0)
|
|
|
|
|
{
|
|
|
|
|
minb = maxb = function_units[unit].max_blockage;
|
|
|
|
|
if (maxb > 1)
|
|
|
|
|
{
|
|
|
|
|
if (function_units[unit].blockage_range_function)
|
|
|
|
|
{
|
|
|
|
|
maxb = minb = blockage_range (unit, insn);
|
|
|
|
|
maxb = MAX_BLOCKAGE_COST (maxb);
|
|
|
|
|
minb = MIN_BLOCKAGE_COST (minb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (maxb > 1)
|
|
|
|
|
{
|
|
|
|
|
/* Make the number of instructions left dominate. Make the
|
|
|
|
|
minimum delay dominate the maximum delay. If all these
|
|
|
|
|
are the same, use the unit number to add an arbitrary
|
|
|
|
|
ordering. Other terms can be added. */
|
|
|
|
|
ncost = minb * 0x40 + maxb;
|
|
|
|
|
ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
|
|
|
|
|
if (ncost > cost)
|
|
|
|
|
cost = ncost;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
|
|
|
|
|
if ((unit & 1) != 0)
|
|
|
|
|
cost = potential_hazard (i, insn, cost);
|
|
|
|
|
|
|
|
|
|
return cost;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Compute cost of executing INSN given the dependence LINK on the insn USED.
|
|
|
|
|
This is the number of cycles between instruction issue and
|
|
|
|
|
instruction results. */
|
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static int
|
1997-08-12 06:07:19 +02:00
|
|
|
|
insn_cost (insn, link, used)
|
|
|
|
|
rtx insn, link, used;
|
|
|
|
|
{
|
|
|
|
|
register int cost = INSN_COST (insn);
|
|
|
|
|
|
|
|
|
|
if (cost == 0)
|
|
|
|
|
{
|
|
|
|
|
recog_memoized (insn);
|
|
|
|
|
|
|
|
|
|
/* A USE insn, or something else we don't need to understand.
|
|
|
|
|
We can't pass these directly to result_ready_cost because it will
|
|
|
|
|
trigger a fatal error for unrecognizable insns. */
|
|
|
|
|
if (INSN_CODE (insn) < 0)
|
|
|
|
|
{
|
|
|
|
|
INSN_COST (insn) = 1;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
cost = result_ready_cost (insn);
|
|
|
|
|
|
|
|
|
|
if (cost < 1)
|
|
|
|
|
cost = 1;
|
|
|
|
|
|
|
|
|
|
INSN_COST (insn) = cost;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* In this case estimate cost without caring how insn is used. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (link == 0 && used == 0)
|
|
|
|
|
return cost;
|
|
|
|
|
|
|
|
|
|
/* A USE insn should never require the value used to be computed. This
|
|
|
|
|
allows the computation of a function's result and parameter values to
|
|
|
|
|
overlap the return and call. */
|
|
|
|
|
recog_memoized (used);
|
|
|
|
|
if (INSN_CODE (used) < 0)
|
|
|
|
|
LINK_COST_FREE (link) = 1;
|
|
|
|
|
|
|
|
|
|
/* If some dependencies vary the cost, compute the adjustment. Most
|
|
|
|
|
commonly, the adjustment is complete: either the cost is ignored
|
|
|
|
|
(in the case of an output- or anti-dependence), or the cost is
|
|
|
|
|
unchanged. These values are cached in the link as LINK_COST_FREE
|
|
|
|
|
and LINK_COST_ZERO. */
|
|
|
|
|
|
|
|
|
|
if (LINK_COST_FREE (link))
|
1999-07-21 03:15:47 +02:00
|
|
|
|
cost = 0;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#ifdef ADJUST_COST
|
|
|
|
|
else if (!LINK_COST_ZERO (link))
|
|
|
|
|
{
|
|
|
|
|
int ncost = cost;
|
|
|
|
|
|
|
|
|
|
ADJUST_COST (used, link, insn, ncost);
|
1999-07-21 03:15:47 +02:00
|
|
|
|
if (ncost < 1)
|
|
|
|
|
{
|
|
|
|
|
LINK_COST_FREE (link) = 1;
|
|
|
|
|
ncost = 0;
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (cost == ncost)
|
|
|
|
|
LINK_COST_ZERO (link) = 1;
|
|
|
|
|
cost = ncost;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
return cost;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Compute the priority number for INSN. */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
priority (insn)
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
int this_priority;
|
|
|
|
|
rtx link;
|
|
|
|
|
|
|
|
|
|
if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if ((this_priority = INSN_PRIORITY (insn)) == 0)
|
|
|
|
|
{
|
|
|
|
|
if (INSN_DEPEND (insn) == 0)
|
|
|
|
|
this_priority = insn_cost (insn, 0, 0);
|
|
|
|
|
else
|
|
|
|
|
for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
|
|
|
|
|
{
|
|
|
|
|
rtx next;
|
|
|
|
|
int next_priority;
|
|
|
|
|
|
1997-12-15 08:05:04 +01:00
|
|
|
|
if (RTX_INTEGRATED_P (link))
|
|
|
|
|
continue;
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
next = XEXP (link, 0);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Critical path is meaningful in block boundaries only. */
|
1999-10-19 00:20:27 +02:00
|
|
|
|
if (BLOCK_NUM (next) != BLOCK_NUM (insn))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
next_priority = insn_cost (insn, link, next) + priority (next);
|
|
|
|
|
if (next_priority > this_priority)
|
|
|
|
|
this_priority = next_priority;
|
|
|
|
|
}
|
|
|
|
|
INSN_PRIORITY (insn) = this_priority;
|
|
|
|
|
}
|
|
|
|
|
return this_priority;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
|
|
|
|
|
them to the unused_*_list variables, so that they can be reused. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
free_pending_lists ()
|
|
|
|
|
{
|
|
|
|
|
if (current_nr_blocks <= 1)
|
|
|
|
|
{
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (&pending_read_insns);
|
|
|
|
|
free_INSN_LIST_list (&pending_write_insns);
|
|
|
|
|
free_EXPR_LIST_list (&pending_read_mems);
|
|
|
|
|
free_EXPR_LIST_list (&pending_write_mems);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Interblock scheduling. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int bb;
|
|
|
|
|
|
|
|
|
|
for (bb = 0; bb < current_nr_blocks; bb++)
|
|
|
|
|
{
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (&bb_pending_read_insns[bb]);
|
|
|
|
|
free_INSN_LIST_list (&bb_pending_write_insns[bb]);
|
|
|
|
|
free_EXPR_LIST_list (&bb_pending_read_mems[bb]);
|
|
|
|
|
free_EXPR_LIST_list (&bb_pending_write_mems[bb]);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
|
|
|
|
|
The MEM is a memory reference contained within INSN, which we are saving
|
|
|
|
|
so that we can do memory aliasing on it. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
add_insn_mem_dependence (insn_list, mem_list, insn, mem)
|
|
|
|
|
rtx *insn_list, *mem_list, insn, mem;
|
|
|
|
|
{
|
|
|
|
|
register rtx link;
|
|
|
|
|
|
1998-03-05 03:15:23 +01:00
|
|
|
|
link = alloc_INSN_LIST (insn, *insn_list);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
*insn_list = link;
|
|
|
|
|
|
1998-03-05 03:15:23 +01:00
|
|
|
|
link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
*mem_list = link;
|
|
|
|
|
|
|
|
|
|
pending_lists_length++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Make a dependency between every memory reference on the pending lists
|
|
|
|
|
and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
|
|
|
|
|
the read list. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
flush_pending_lists (insn, only_write)
|
|
|
|
|
rtx insn;
|
|
|
|
|
int only_write;
|
|
|
|
|
{
|
|
|
|
|
rtx u;
|
|
|
|
|
rtx link;
|
|
|
|
|
|
|
|
|
|
while (pending_read_insns && ! only_write)
|
|
|
|
|
{
|
|
|
|
|
add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
|
|
|
|
|
|
|
|
|
|
link = pending_read_insns;
|
|
|
|
|
pending_read_insns = XEXP (pending_read_insns, 1);
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_node (link);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
link = pending_read_mems;
|
|
|
|
|
pending_read_mems = XEXP (pending_read_mems, 1);
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_EXPR_LIST_node (link);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
while (pending_write_insns)
|
|
|
|
|
{
|
|
|
|
|
add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
|
|
|
|
|
|
|
|
|
|
link = pending_write_insns;
|
|
|
|
|
pending_write_insns = XEXP (pending_write_insns, 1);
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_node (link);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
link = pending_write_mems;
|
|
|
|
|
pending_write_mems = XEXP (pending_write_mems, 1);
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_EXPR_LIST_node (link);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
pending_lists_length = 0;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* last_pending_memory_flush is now a list of insns. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
|
|
|
|
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (&last_pending_memory_flush);
|
1998-03-05 03:15:23 +01:00
|
|
|
|
last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-09-08 09:34:47 +02:00
|
|
|
|
/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
|
|
|
|
|
rtx, X, creating all dependencies generated by the write to the
|
|
|
|
|
destination of X, and reads of everything mentioned. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
sched_analyze_1 (x, insn)
|
|
|
|
|
rtx x;
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
register int regno;
|
1999-09-08 09:34:47 +02:00
|
|
|
|
register rtx dest = XEXP (x, 0);
|
1999-03-07 12:22:10 +01:00
|
|
|
|
enum rtx_code code = GET_CODE (x);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
if (dest == 0)
|
|
|
|
|
return;
|
|
|
|
|
|
1998-09-12 05:45:22 +02:00
|
|
|
|
if (GET_CODE (dest) == PARALLEL
|
|
|
|
|
&& GET_MODE (dest) == BLKmode)
|
|
|
|
|
{
|
|
|
|
|
register int i;
|
|
|
|
|
for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
|
|
|
|
|
sched_analyze_1 (XVECEXP (dest, 0, i), insn);
|
|
|
|
|
if (GET_CODE (x) == SET)
|
|
|
|
|
sched_analyze_2 (SET_SRC (x), insn);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
|
|
|
|
|
|| GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
|
|
|
|
|
{
|
|
|
|
|
if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
|
|
|
|
|
{
|
|
|
|
|
/* The second and third arguments are values read by this insn. */
|
|
|
|
|
sched_analyze_2 (XEXP (dest, 1), insn);
|
|
|
|
|
sched_analyze_2 (XEXP (dest, 2), insn);
|
|
|
|
|
}
|
1999-09-08 09:34:47 +02:00
|
|
|
|
dest = XEXP (dest, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (GET_CODE (dest) == REG)
|
|
|
|
|
{
|
|
|
|
|
register int i;
|
|
|
|
|
|
|
|
|
|
regno = REGNO (dest);
|
|
|
|
|
|
|
|
|
|
/* A hard reg in a wide mode may really be multiple registers.
|
|
|
|
|
If so, mark all of them just like the first. */
|
|
|
|
|
if (regno < FIRST_PSEUDO_REGISTER)
|
|
|
|
|
{
|
|
|
|
|
i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
|
|
|
|
|
while (--i >= 0)
|
|
|
|
|
{
|
|
|
|
|
rtx u;
|
|
|
|
|
|
|
|
|
|
for (u = reg_last_uses[regno + i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
|
|
|
|
|
|
|
|
|
for (u = reg_last_sets[regno + i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Clobbers need not be ordered with respect to one
|
|
|
|
|
another, but sets must be ordered with respect to a
|
|
|
|
|
pending clobber. */
|
1999-03-07 12:22:10 +01:00
|
|
|
|
if (code == SET)
|
|
|
|
|
{
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (®_last_uses[regno + i]);
|
1999-03-07 12:22:10 +01:00
|
|
|
|
for (u = reg_last_clobbers[regno + i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
|
|
|
|
|
SET_REGNO_REG_SET (reg_pending_sets, regno + i);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-03-07 12:22:10 +01:00
|
|
|
|
/* Function calls clobber all call_used regs. */
|
|
|
|
|
if (global_regs[regno + i]
|
|
|
|
|
|| (code == SET && call_used_regs[regno + i]))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (u = last_function_call; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
rtx u;
|
|
|
|
|
|
|
|
|
|
for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
|
|
|
|
|
|
|
|
|
for (u = reg_last_sets[regno]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
|
|
|
|
|
|
1999-03-07 12:22:10 +01:00
|
|
|
|
if (code == SET)
|
1999-03-09 00:45:12 +01:00
|
|
|
|
{
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (®_last_uses[regno]);
|
1999-03-09 00:45:12 +01:00
|
|
|
|
for (u = reg_last_clobbers[regno]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
|
|
|
|
|
SET_REGNO_REG_SET (reg_pending_sets, regno);
|
|
|
|
|
}
|
1999-03-07 12:22:10 +01:00
|
|
|
|
else
|
|
|
|
|
SET_REGNO_REG_SET (reg_pending_clobbers, regno);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Pseudos that are REG_EQUIV to something may be replaced
|
|
|
|
|
by that during reloading. We need only add dependencies for
|
|
|
|
|
the address in the REG_EQUIV note. */
|
|
|
|
|
if (!reload_completed
|
|
|
|
|
&& reg_known_equiv_p[regno]
|
|
|
|
|
&& GET_CODE (reg_known_value[regno]) == MEM)
|
|
|
|
|
sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
|
|
|
|
|
|
|
|
|
|
/* Don't let it cross a call after scheduling if it doesn't
|
|
|
|
|
already cross one. */
|
|
|
|
|
|
|
|
|
|
if (REG_N_CALLS_CROSSED (regno) == 0)
|
|
|
|
|
for (u = last_function_call; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else if (GET_CODE (dest) == MEM)
|
|
|
|
|
{
|
|
|
|
|
/* Writing memory. */
|
|
|
|
|
|
|
|
|
|
if (pending_lists_length > 32)
|
|
|
|
|
{
|
|
|
|
|
/* Flush all pending reads and writes to prevent the pending lists
|
|
|
|
|
from getting any larger. Insn scheduling runs too slowly when
|
|
|
|
|
these lists get long. The number 32 was chosen because it
|
|
|
|
|
seems like a reasonable number. When compiling GCC with itself,
|
|
|
|
|
this flush occurs 8 times for sparc, and 10 times for m88k using
|
|
|
|
|
the number 32. */
|
|
|
|
|
flush_pending_lists (insn, 0);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
rtx u;
|
|
|
|
|
rtx pending, pending_mem;
|
|
|
|
|
|
|
|
|
|
pending = pending_read_insns;
|
|
|
|
|
pending_mem = pending_read_mems;
|
|
|
|
|
while (pending)
|
|
|
|
|
{
|
1999-08-29 01:20:34 +02:00
|
|
|
|
if (anti_dependence (XEXP (pending_mem, 0), dest))
|
|
|
|
|
add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
pending = XEXP (pending, 1);
|
|
|
|
|
pending_mem = XEXP (pending_mem, 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pending = pending_write_insns;
|
|
|
|
|
pending_mem = pending_write_mems;
|
|
|
|
|
while (pending)
|
|
|
|
|
{
|
1999-08-29 01:20:34 +02:00
|
|
|
|
if (output_dependence (XEXP (pending_mem, 0), dest))
|
|
|
|
|
add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
pending = XEXP (pending, 1);
|
|
|
|
|
pending_mem = XEXP (pending_mem, 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
|
|
|
|
|
|
|
|
|
add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
|
|
|
|
|
insn, dest);
|
|
|
|
|
}
|
|
|
|
|
sched_analyze_2 (XEXP (dest, 0), insn);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Analyze reads. */
|
|
|
|
|
if (GET_CODE (x) == SET)
|
|
|
|
|
sched_analyze_2 (SET_SRC (x), insn);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Analyze the uses of memory and registers in rtx X in INSN. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
sched_analyze_2 (x, insn)
|
|
|
|
|
rtx x;
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
register int i;
|
|
|
|
|
register int j;
|
|
|
|
|
register enum rtx_code code;
|
rtl.h (rtx_format): Constify a char*.
* rtl.h (rtx_format): Constify a char*.
* rtl.c (rtx_format): Likewise.
(copy_rtx, copy_most_rtx, read_rtx): Likewise.
(init_rtl): Use accessor macro, not `rtx_format'.
* alias.c (rtx_equal_for_memref_p, find_symbolic_term): Constify a
char*.
* caller-save.c (mark_referenced_regs): Likewise.
* combine.c (subst, make_compound_operation, known_cond,
gen_rtx_combine, update_table_tick, get_last_value_validate,
use_crosses_set_p, mark_used_regs_combine, move_deaths): Likewise.
* cse.c (rtx_cost, mention_regs, canon_hash, exp_equiv_p,
refers_to_p, canon_reg, fold_rtx, cse_process_notes,
count_reg_usage): Likewise.
* emit-rtl.c (gen_rtx, copy_rtx_if_shared, reset_used_flags):
Likewise.
* final.c (leaf_renumber_regs_insn): Likewise.
* flow.c (mark_used_regs, find_use_as_address, dump_flow_info,
dump_edge_info, count_reg_references): Likewise.
* function.c (fixup_var_refs_1, walk_fixup_memory_subreg,
fixup_stack_1, purge_addressof_1, instantiate_virtual_regs_1):
Likewise.
* gcse.c (oprs_unchanged_p, hash_expr_1, expr_equiv_p,
oprs_not_set_p, expr_killed_p, compute_transp, find_used_regs,
add_label_notes): Likewise.
* genattrtab.c (attr_rtx, attr_copy_rtx, encode_units_mask,
clear_struct_flag, count_sub_rtxs, count_alternatives,
compares_alternatives_p, contained_in_p, walk_attr_value,
write_expr_attr_cache): Likewise.
* genconfig.c (walk_insn_part): Likewise.
* genemit.c (max_operand_1, gen_exp): Likewise.
* genextract.c (walk_rtx): Likewise.
* genflags.c (num_operands): Likewise.
* genoutput.c (scan_operands): Likewise.
* genpeep.c (match_rtx): Likewise.
* genrecog.c (add_to_sequence): Likewise.
* haifa-sched.c (may_trap_exp, sched_analyze_2, attach_deaths):
Likewise.
* integrate.c (save_constants, copy_for_inline,
copy_rtx_and_substitute, subst_constants, restore_constants):
Likewise.
* jump.c (mark_jump_label, invert_exp, redirect_exp,
rtx_renumbered_equal_p, rtx_equal_for_thread_p): Likewise.
* local-alloc.c (contains_replace_regs, memref_referenced_p):
Likewise.
* loop.c (record_excess_regs, rtx_equal_for_loop_p,
add_label_notes, replace_call_address, count_nonfixed_reads,
invariant_p, find_single_use_in_loop, find_mem_givs,
find_life_end, maybe_eliminate_biv_1, update_reg_last_use):
Likewise.
* print-rtl.c (reg_names, print_rtx): Likewise.
* recog.c (validate_replace_rtx_1, find_single_use_1): Likewise.
* reg-stack.c (stack_regs_mentioned_p, record_label_references,
record_reg_life_pat, swap_rtx_condition, goto_block_pat,
print_blocks): Likewise.
* regclass.c (fix_register, record_address_regs,
reg_scan_mark_refs): Likewise.
* regmove.c (stable_but_for_p): Likewise.
* reload.c (loc_mentioned_in_p, operands_match_p,
find_reloads_toplevsubst_reg_equivs, find_reloads_address_1,
copy_replacements, refers_to_regno_for_reload_p,
refers_to_mem_for_reload_p, find_inc_amount, regno_clobbered_p,
reload_when_needed_name, reg_class_names, debug_reload_to_stream):
Likewise.
* reload1.c (eliminate_regs, scan_paradoxical_subregs,
delete_address_reloads_1, count_occurrences,
reload_cse_mem_conflict_p, reload_combine_note_use,
add_auto_inc_notes): Likewise.
* resource.c (mark_referenced_resources, mark_set_resources):
Likewise.
* rtlanal.c (rtx_unstable_p, rtx_varies_p, rtx_addr_varies_p,
reg_mentioned_p, regs_set_between_p, modified_between_p,
modified_in_p, refers_to_regno_p, reg_overlap_mentioned_p,
rtx_equal_p, volatile_insn_p, volatile_refs_p, side_effects_p,
may_trap_p, inequality_comparisons_p, replace_rtx, replace_regs,
jmp_uses_reg_or_mem, for_each_rtx, regno_use_in): Likewise.
* sched.c (sched_analyze_2, attach_deaths): Likewise.
* stupid.c (stupid_mark_refs): Likewise.
* unroll.c (remap_split_bivs): Likewise.
* varasm.c (mark_constants): Likewise.
* a29k/a29k.c (uses_local_reg_p): Likewise.
* alpha/alpha.c (summarize_insn): Likewise.
* arm/arm.c (symbol_mentioned_p, label_mentioned_p,
eliminate_lr2ip): Likewise.
* arm/thumb.c (symbol_mentioned_p, label_mentioned_p): Likewise.
* i386/i386.c (symbolic_reference_mentioned_p, copy_all_rtx,
reg_mentioned_in_mem): Likewise.
* ns32k/ns32k.c (global_symbolic_reference_mentioned_p,
symbolic_reference_mentioned_p): Likewise.
* romp/romp.c (unsigned_comparisons_p, hash_rtx): Likewise.
* sh/sh.c (regs_used, mark_use): Likewise.
* vax/vax.c (vax_rtx_cost): Likewise.
From-SVN: r28784
1999-08-21 01:05:25 +02:00
|
|
|
|
register const char *fmt;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
if (x == 0)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
code = GET_CODE (x);
|
|
|
|
|
|
|
|
|
|
switch (code)
|
|
|
|
|
{
|
|
|
|
|
case CONST_INT:
|
|
|
|
|
case CONST_DOUBLE:
|
|
|
|
|
case SYMBOL_REF:
|
|
|
|
|
case CONST:
|
|
|
|
|
case LABEL_REF:
|
|
|
|
|
/* Ignore constants. Note that we must handle CONST_DOUBLE here
|
|
|
|
|
because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
|
|
|
|
|
this does not mean that this insn is using cc0. */
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
#ifdef HAVE_cc0
|
|
|
|
|
case CC0:
|
|
|
|
|
{
|
|
|
|
|
rtx link, prev;
|
|
|
|
|
|
|
|
|
|
/* User of CC0 depends on immediately preceding insn. */
|
|
|
|
|
SCHED_GROUP_P (insn) = 1;
|
|
|
|
|
|
|
|
|
|
/* There may be a note before this insn now, but all notes will
|
|
|
|
|
be removed before we actually try to schedule the insns, so
|
|
|
|
|
it won't cause a problem later. We must avoid it here though. */
|
|
|
|
|
prev = prev_nonnote_insn (insn);
|
|
|
|
|
|
|
|
|
|
/* Make a copy of all dependencies on the immediately previous insn,
|
|
|
|
|
and add to this insn. This is so that all the dependencies will
|
|
|
|
|
apply to the group. Remove an explicit dependence on this insn
|
|
|
|
|
as SCHED_GROUP_P now represents it. */
|
|
|
|
|
|
|
|
|
|
if (find_insn_list (prev, LOG_LINKS (insn)))
|
|
|
|
|
remove_dependence (insn, prev);
|
|
|
|
|
|
|
|
|
|
for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
|
|
|
|
|
add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
|
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
case REG:
|
|
|
|
|
{
|
|
|
|
|
rtx u;
|
|
|
|
|
int regno = REGNO (x);
|
|
|
|
|
if (regno < FIRST_PSEUDO_REGISTER)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
i = HARD_REGNO_NREGS (regno, GET_MODE (x));
|
|
|
|
|
while (--i >= 0)
|
|
|
|
|
{
|
|
|
|
|
reg_last_uses[regno + i]
|
1998-03-05 03:15:23 +01:00
|
|
|
|
= alloc_INSN_LIST (insn, reg_last_uses[regno + i]);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
for (u = reg_last_sets[regno + i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), 0);
|
|
|
|
|
|
1999-03-07 12:22:10 +01:00
|
|
|
|
/* ??? This should never happen. */
|
|
|
|
|
for (u = reg_last_clobbers[regno + i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), 0);
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if ((call_used_regs[regno + i] || global_regs[regno + i]))
|
|
|
|
|
/* Function calls clobber all call_used regs. */
|
|
|
|
|
for (u = last_function_call; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
reg_last_uses[regno] = alloc_INSN_LIST (insn,
|
|
|
|
|
reg_last_uses[regno]);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
for (u = reg_last_sets[regno]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), 0);
|
|
|
|
|
|
1999-03-07 12:22:10 +01:00
|
|
|
|
/* ??? This should never happen. */
|
|
|
|
|
for (u = reg_last_clobbers[regno]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), 0);
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
/* Pseudos that are REG_EQUIV to something may be replaced
|
|
|
|
|
by that during reloading. We need only add dependencies for
|
|
|
|
|
the address in the REG_EQUIV note. */
|
|
|
|
|
if (!reload_completed
|
|
|
|
|
&& reg_known_equiv_p[regno]
|
|
|
|
|
&& GET_CODE (reg_known_value[regno]) == MEM)
|
|
|
|
|
sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
|
|
|
|
|
|
|
|
|
|
/* If the register does not already cross any calls, then add this
|
|
|
|
|
insn to the sched_before_next_call list so that it will still
|
|
|
|
|
not cross calls after scheduling. */
|
|
|
|
|
if (REG_N_CALLS_CROSSED (regno) == 0)
|
|
|
|
|
add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case MEM:
|
|
|
|
|
{
|
|
|
|
|
/* Reading memory. */
|
|
|
|
|
rtx u;
|
|
|
|
|
rtx pending, pending_mem;
|
|
|
|
|
|
|
|
|
|
pending = pending_read_insns;
|
|
|
|
|
pending_mem = pending_read_mems;
|
|
|
|
|
while (pending)
|
|
|
|
|
{
|
1999-08-29 01:20:34 +02:00
|
|
|
|
if (read_dependence (XEXP (pending_mem, 0), x))
|
|
|
|
|
add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
pending = XEXP (pending, 1);
|
|
|
|
|
pending_mem = XEXP (pending_mem, 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pending = pending_write_insns;
|
|
|
|
|
pending_mem = pending_write_mems;
|
|
|
|
|
while (pending)
|
|
|
|
|
{
|
1999-08-29 01:20:34 +02:00
|
|
|
|
if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
|
|
|
|
|
x, rtx_varies_p))
|
|
|
|
|
add_dependence (insn, XEXP (pending, 0), 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
pending = XEXP (pending, 1);
|
|
|
|
|
pending_mem = XEXP (pending_mem, 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
|
|
|
|
|
|
|
|
|
/* Always add these dependencies to pending_reads, since
|
|
|
|
|
this insn may be followed by a write. */
|
|
|
|
|
add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
|
|
|
|
|
insn, x);
|
|
|
|
|
|
|
|
|
|
/* Take advantage of tail recursion here. */
|
|
|
|
|
sched_analyze_2 (XEXP (x, 0), insn);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
1998-06-17 18:14:09 +02:00
|
|
|
|
/* Force pending stores to memory in case a trap handler needs them. */
|
|
|
|
|
case TRAP_IF:
|
|
|
|
|
flush_pending_lists (insn, 1);
|
|
|
|
|
break;
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
case ASM_OPERANDS:
|
|
|
|
|
case ASM_INPUT:
|
|
|
|
|
case UNSPEC_VOLATILE:
|
|
|
|
|
{
|
|
|
|
|
rtx u;
|
|
|
|
|
|
|
|
|
|
/* Traditional and volatile asm instructions must be considered to use
|
|
|
|
|
and clobber all hard registers, all pseudo-registers and all of
|
|
|
|
|
memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
|
|
|
|
|
|
|
|
|
|
Consider for instance a volatile asm that changes the fpu rounding
|
|
|
|
|
mode. An insn should not be moved across this even if it only uses
|
|
|
|
|
pseudo-regs because it might give an incorrectly rounded result. */
|
|
|
|
|
if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
|
|
|
|
|
{
|
|
|
|
|
int max_reg = max_reg_num ();
|
|
|
|
|
for (i = 0; i < max_reg; i++)
|
|
|
|
|
{
|
|
|
|
|
for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (®_last_uses[i]);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), 0);
|
1999-03-07 12:22:10 +01:00
|
|
|
|
|
|
|
|
|
for (u = reg_last_clobbers[i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
reg_pending_sets_all = 1;
|
|
|
|
|
|
|
|
|
|
flush_pending_lists (insn, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* For all ASM_OPERANDS, we must traverse the vector of input operands.
|
|
|
|
|
We can not just fall through here since then we would be confused
|
|
|
|
|
by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
|
|
|
|
|
traditional asms unlike their normal usage. */
|
|
|
|
|
|
|
|
|
|
if (code == ASM_OPERANDS)
|
|
|
|
|
{
|
|
|
|
|
for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
|
|
|
|
|
sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case PRE_DEC:
|
|
|
|
|
case POST_DEC:
|
|
|
|
|
case PRE_INC:
|
|
|
|
|
case POST_INC:
|
|
|
|
|
/* These both read and modify the result. We must handle them as writes
|
|
|
|
|
to get proper dependencies for following instructions. We must handle
|
|
|
|
|
them as reads to get proper dependencies from this to previous
|
|
|
|
|
instructions. Thus we need to pass them to both sched_analyze_1
|
|
|
|
|
and sched_analyze_2. We must call sched_analyze_2 first in order
|
|
|
|
|
to get the proper antecedent for the read. */
|
|
|
|
|
sched_analyze_2 (XEXP (x, 0), insn);
|
|
|
|
|
sched_analyze_1 (x, insn);
|
|
|
|
|
return;
|
1998-02-17 22:35:43 +01:00
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
break;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Other cases: walk the insn. */
|
|
|
|
|
fmt = GET_RTX_FORMAT (code);
|
|
|
|
|
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
|
|
|
|
{
|
|
|
|
|
if (fmt[i] == 'e')
|
|
|
|
|
sched_analyze_2 (XEXP (x, i), insn);
|
|
|
|
|
else if (fmt[i] == 'E')
|
|
|
|
|
for (j = 0; j < XVECLEN (x, i); j++)
|
|
|
|
|
sched_analyze_2 (XVECEXP (x, i, j), insn);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Analyze an INSN with pattern X to find all dependencies. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
sched_analyze_insn (x, insn, loop_notes)
|
|
|
|
|
rtx x, insn;
|
|
|
|
|
rtx loop_notes;
|
|
|
|
|
{
|
|
|
|
|
register RTX_CODE code = GET_CODE (x);
|
|
|
|
|
rtx link;
|
|
|
|
|
int maxreg = max_reg_num ();
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (code == SET || code == CLOBBER)
|
|
|
|
|
sched_analyze_1 (x, insn);
|
|
|
|
|
else if (code == PARALLEL)
|
|
|
|
|
{
|
|
|
|
|
register int i;
|
|
|
|
|
for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
|
|
|
|
|
{
|
|
|
|
|
code = GET_CODE (XVECEXP (x, 0, i));
|
|
|
|
|
if (code == SET || code == CLOBBER)
|
|
|
|
|
sched_analyze_1 (XVECEXP (x, 0, i), insn);
|
|
|
|
|
else
|
|
|
|
|
sched_analyze_2 (XVECEXP (x, 0, i), insn);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
sched_analyze_2 (x, insn);
|
|
|
|
|
|
|
|
|
|
/* Mark registers CLOBBERED or used by called function. */
|
|
|
|
|
if (GET_CODE (insn) == CALL_INSN)
|
|
|
|
|
for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
|
|
|
|
|
{
|
|
|
|
|
if (GET_CODE (XEXP (link, 0)) == CLOBBER)
|
|
|
|
|
sched_analyze_1 (XEXP (link, 0), insn);
|
|
|
|
|
else
|
|
|
|
|
sched_analyze_2 (XEXP (link, 0), insn);
|
|
|
|
|
}
|
|
|
|
|
|
1998-08-26 17:30:58 +02:00
|
|
|
|
/* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
|
|
|
|
|
block, then we must be sure that no instructions are scheduled across it.
|
1997-08-12 06:07:19 +02:00
|
|
|
|
Otherwise, the reg_n_refs info (which depends on loop_depth) would
|
|
|
|
|
become incorrect. */
|
|
|
|
|
|
|
|
|
|
if (loop_notes)
|
|
|
|
|
{
|
|
|
|
|
int max_reg = max_reg_num ();
|
1998-08-26 17:30:58 +02:00
|
|
|
|
int schedule_barrier_found = 0;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
rtx link;
|
|
|
|
|
|
1998-08-26 17:30:58 +02:00
|
|
|
|
/* Update loop_notes with any notes from this insn. Also determine
|
|
|
|
|
if any of the notes on the list correspond to instruction scheduling
|
|
|
|
|
barriers (loop, eh & setjmp notes, but not range notes. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
link = loop_notes;
|
|
|
|
|
while (XEXP (link, 1))
|
1998-08-26 17:30:58 +02:00
|
|
|
|
{
|
1998-08-27 16:15:32 +02:00
|
|
|
|
if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG
|
|
|
|
|
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END
|
|
|
|
|
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG
|
|
|
|
|
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END
|
|
|
|
|
|| INTVAL (XEXP (link, 0)) == NOTE_INSN_SETJMP)
|
1998-08-26 17:30:58 +02:00
|
|
|
|
schedule_barrier_found = 1;
|
|
|
|
|
|
|
|
|
|
link = XEXP (link, 1);
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
XEXP (link, 1) = REG_NOTES (insn);
|
|
|
|
|
REG_NOTES (insn) = loop_notes;
|
1998-08-26 17:30:58 +02:00
|
|
|
|
|
|
|
|
|
/* Add dependencies if a scheduling barrier was found. */
|
|
|
|
|
if (schedule_barrier_found)
|
|
|
|
|
{
|
|
|
|
|
for (i = 0; i < max_reg; i++)
|
|
|
|
|
{
|
|
|
|
|
rtx u;
|
|
|
|
|
for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (®_last_uses[i]);
|
1998-08-26 17:30:58 +02:00
|
|
|
|
|
|
|
|
|
for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), 0);
|
1999-03-07 12:22:10 +01:00
|
|
|
|
|
|
|
|
|
for (u = reg_last_clobbers[i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), 0);
|
1998-08-26 17:30:58 +02:00
|
|
|
|
}
|
|
|
|
|
reg_pending_sets_all = 1;
|
|
|
|
|
|
|
|
|
|
flush_pending_lists (insn, 0);
|
|
|
|
|
}
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Accumulate clobbers until the next set so that it will be output dependent
|
1999-03-07 12:22:10 +01:00
|
|
|
|
on all of them. At the next set we can clear the clobber list, since
|
1999-09-06 23:55:23 +02:00
|
|
|
|
subsequent sets will be output dependent on it. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
|
|
|
|
|
{
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (®_last_sets[i]);
|
|
|
|
|
free_INSN_LIST_list (®_last_clobbers[i]);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
reg_last_sets[i]
|
1998-03-05 03:15:23 +01:00
|
|
|
|
= alloc_INSN_LIST (insn, NULL_RTX);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
});
|
1999-03-07 12:22:10 +01:00
|
|
|
|
EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
|
|
|
|
|
{
|
|
|
|
|
reg_last_clobbers[i]
|
1999-09-06 23:55:23 +02:00
|
|
|
|
= alloc_INSN_LIST (insn,
|
|
|
|
|
reg_last_clobbers[i]);
|
1999-03-07 12:22:10 +01:00
|
|
|
|
});
|
1997-08-12 06:07:19 +02:00
|
|
|
|
CLEAR_REG_SET (reg_pending_sets);
|
1999-03-07 12:22:10 +01:00
|
|
|
|
CLEAR_REG_SET (reg_pending_clobbers);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
if (reg_pending_sets_all)
|
|
|
|
|
{
|
|
|
|
|
for (i = 0; i < maxreg; i++)
|
1998-03-05 03:15:23 +01:00
|
|
|
|
{
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (®_last_sets[i]);
|
1999-10-19 00:20:27 +02:00
|
|
|
|
free_INSN_LIST_list (®_last_clobbers[i]);
|
1998-03-05 03:15:23 +01:00
|
|
|
|
reg_last_sets[i] = alloc_INSN_LIST (insn, NULL_RTX);
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
reg_pending_sets_all = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Handle function calls and function returns created by the epilogue
|
|
|
|
|
threading code. */
|
|
|
|
|
if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
|
|
|
|
|
{
|
|
|
|
|
rtx dep_insn;
|
|
|
|
|
rtx prev_dep_insn;
|
|
|
|
|
|
|
|
|
|
/* When scheduling instructions, we make sure calls don't lose their
|
|
|
|
|
accompanying USE insns by depending them one on another in order.
|
|
|
|
|
|
|
|
|
|
Also, we must do the same thing for returns created by the epilogue
|
|
|
|
|
threading code. Note this code works only in this special case,
|
|
|
|
|
because other passes make no guarantee that they will never emit
|
|
|
|
|
an instruction between a USE and a RETURN. There is such a guarantee
|
|
|
|
|
for USE instructions immediately before a call. */
|
|
|
|
|
|
|
|
|
|
prev_dep_insn = insn;
|
|
|
|
|
dep_insn = PREV_INSN (insn);
|
|
|
|
|
while (GET_CODE (dep_insn) == INSN
|
|
|
|
|
&& GET_CODE (PATTERN (dep_insn)) == USE
|
|
|
|
|
&& GET_CODE (XEXP (PATTERN (dep_insn), 0)) == REG)
|
|
|
|
|
{
|
|
|
|
|
SCHED_GROUP_P (prev_dep_insn) = 1;
|
|
|
|
|
|
|
|
|
|
/* Make a copy of all dependencies on dep_insn, and add to insn.
|
|
|
|
|
This is so that all of the dependencies will apply to the
|
|
|
|
|
group. */
|
|
|
|
|
|
|
|
|
|
for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
|
|
|
|
|
add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
|
|
|
|
|
|
|
|
|
|
prev_dep_insn = dep_insn;
|
|
|
|
|
dep_insn = PREV_INSN (dep_insn);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
|
|
|
|
|
for every dependency. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
sched_analyze (head, tail)
|
|
|
|
|
rtx head, tail;
|
|
|
|
|
{
|
|
|
|
|
register rtx insn;
|
|
|
|
|
register rtx u;
|
|
|
|
|
rtx loop_notes = 0;
|
|
|
|
|
|
|
|
|
|
for (insn = head;; insn = NEXT_INSN (insn))
|
|
|
|
|
{
|
|
|
|
|
if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
|
|
|
|
|
{
|
1999-08-29 01:20:34 +02:00
|
|
|
|
/* Clear out the stale LOG_LINKS from flow. */
|
|
|
|
|
free_INSN_LIST_list (&LOG_LINKS (insn));
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Make each JUMP_INSN a scheduling barrier for memory
|
|
|
|
|
references. */
|
1998-08-18 23:58:15 +02:00
|
|
|
|
if (GET_CODE (insn) == JUMP_INSN)
|
|
|
|
|
last_pending_memory_flush
|
|
|
|
|
= alloc_INSN_LIST (insn, last_pending_memory_flush);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
sched_analyze_insn (PATTERN (insn), insn, loop_notes);
|
|
|
|
|
loop_notes = 0;
|
|
|
|
|
}
|
|
|
|
|
else if (GET_CODE (insn) == CALL_INSN)
|
|
|
|
|
{
|
|
|
|
|
rtx x;
|
|
|
|
|
register int i;
|
|
|
|
|
|
|
|
|
|
CANT_MOVE (insn) = 1;
|
|
|
|
|
|
1999-08-29 01:20:34 +02:00
|
|
|
|
/* Clear out the stale LOG_LINKS from flow. */
|
|
|
|
|
free_INSN_LIST_list (&LOG_LINKS (insn));
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
/* Any instruction using a hard register which may get clobbered
|
|
|
|
|
by a call needs to be marked as dependent on this call.
|
|
|
|
|
This prevents a use of a hard return reg from being moved
|
|
|
|
|
past a void call (i.e. it does not explicitly set the hard
|
|
|
|
|
return reg). */
|
|
|
|
|
|
|
|
|
|
/* If this call is followed by a NOTE_INSN_SETJMP, then assume that
|
|
|
|
|
all registers, not just hard registers, may be clobbered by this
|
|
|
|
|
call. */
|
|
|
|
|
|
|
|
|
|
/* Insn, being a CALL_INSN, magically depends on
|
|
|
|
|
`last_function_call' already. */
|
|
|
|
|
|
|
|
|
|
if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
|
|
|
|
|
&& NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
|
|
|
|
|
{
|
|
|
|
|
int max_reg = max_reg_num ();
|
|
|
|
|
for (i = 0; i < max_reg; i++)
|
|
|
|
|
{
|
|
|
|
|
for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (®_last_uses[i]);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), 0);
|
1999-03-07 12:22:10 +01:00
|
|
|
|
|
|
|
|
|
for (u = reg_last_clobbers[i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
reg_pending_sets_all = 1;
|
|
|
|
|
|
1999-10-11 01:45:27 +02:00
|
|
|
|
/* Add a pair of REG_SAVE_NOTEs which we will later
|
1997-08-12 06:07:19 +02:00
|
|
|
|
convert back into a NOTE_INSN_SETJMP note. See
|
|
|
|
|
reemit_notes for why we use a pair of NOTEs. */
|
1999-10-11 01:45:27 +02:00
|
|
|
|
REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
|
1998-03-05 03:15:23 +01:00
|
|
|
|
GEN_INT (0),
|
|
|
|
|
REG_NOTES (insn));
|
1999-10-11 01:45:27 +02:00
|
|
|
|
REG_NOTES (insn) = alloc_EXPR_LIST (REG_SAVE_NOTE,
|
1998-03-05 03:15:23 +01:00
|
|
|
|
GEN_INT (NOTE_INSN_SETJMP),
|
|
|
|
|
REG_NOTES (insn));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
|
|
|
|
|
if (call_used_regs[i] || global_regs[i])
|
|
|
|
|
{
|
|
|
|
|
for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
|
|
|
|
|
|
|
|
|
for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
|
|
|
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
|
|
|
|
|
1999-06-20 00:54:14 +02:00
|
|
|
|
SET_REGNO_REG_SET (reg_pending_clobbers, i);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* For each insn which shouldn't cross a call, add a dependence
|
|
|
|
|
between that insn and this call insn. */
|
|
|
|
|
x = LOG_LINKS (sched_before_next_call);
|
|
|
|
|
while (x)
|
|
|
|
|
{
|
|
|
|
|
add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
|
|
|
|
|
x = XEXP (x, 1);
|
|
|
|
|
}
|
1999-09-18 14:06:24 +02:00
|
|
|
|
free_INSN_LIST_list (&LOG_LINKS (sched_before_next_call));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
sched_analyze_insn (PATTERN (insn), insn, loop_notes);
|
|
|
|
|
loop_notes = 0;
|
|
|
|
|
|
|
|
|
|
/* In the absence of interprocedural alias analysis, we must flush
|
|
|
|
|
all pending reads and writes, and start new dependencies starting
|
|
|
|
|
from here. But only flush writes for constant calls (which may
|
|
|
|
|
be passed a pointer to something we haven't written yet). */
|
|
|
|
|
flush_pending_lists (insn, CONST_CALL_P (insn));
|
|
|
|
|
|
|
|
|
|
/* Depend this function call (actually, the user of this
|
|
|
|
|
function call) on all hard register clobberage. */
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* last_function_call is now a list of insns. */
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list(&last_function_call);
|
1998-03-05 03:15:23 +01:00
|
|
|
|
last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* See comments on reemit_notes as to why we do this.
|
|
|
|
|
??? Actually, the reemit_notes just say what is done, not why. */
|
1998-08-31 11:55:31 +02:00
|
|
|
|
|
|
|
|
|
else if (GET_CODE (insn) == NOTE
|
|
|
|
|
&& (NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_START
|
|
|
|
|
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_RANGE_END))
|
|
|
|
|
{
|
1999-10-11 01:45:27 +02:00
|
|
|
|
loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, NOTE_RANGE_INFO (insn),
|
1998-08-31 11:55:31 +02:00
|
|
|
|
loop_notes);
|
1999-10-11 01:45:27 +02:00
|
|
|
|
loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
|
1998-08-31 11:55:31 +02:00
|
|
|
|
GEN_INT (NOTE_LINE_NUMBER (insn)),
|
|
|
|
|
loop_notes);
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
else if (GET_CODE (insn) == NOTE
|
|
|
|
|
&& (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG
|
|
|
|
|
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END
|
|
|
|
|
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
|
|
|
|
|
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END
|
|
|
|
|
|| (NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP
|
|
|
|
|
&& GET_CODE (PREV_INSN (insn)) != CALL_INSN)))
|
|
|
|
|
{
|
1999-09-22 00:28:47 +02:00
|
|
|
|
rtx rtx_region;
|
1999-09-16 23:00:21 +02:00
|
|
|
|
|
1999-09-16 01:05:05 +02:00
|
|
|
|
if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG
|
|
|
|
|
|| NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)
|
1999-09-22 00:28:47 +02:00
|
|
|
|
rtx_region = GEN_INT (NOTE_EH_HANDLER (insn));
|
1999-09-16 23:00:21 +02:00
|
|
|
|
else
|
1999-09-22 00:28:47 +02:00
|
|
|
|
rtx_region = GEN_INT (0);
|
1999-09-16 01:05:05 +02:00
|
|
|
|
|
1999-10-11 01:45:27 +02:00
|
|
|
|
loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
|
1999-09-22 00:28:47 +02:00
|
|
|
|
rtx_region,
|
1999-09-16 23:00:21 +02:00
|
|
|
|
loop_notes);
|
1999-10-11 01:45:27 +02:00
|
|
|
|
loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE,
|
1998-03-05 03:15:23 +01:00
|
|
|
|
GEN_INT (NOTE_LINE_NUMBER (insn)),
|
|
|
|
|
loop_notes);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
CONST_CALL_P (loop_notes) = CONST_CALL_P (insn);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (insn == tail)
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
abort ();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Macros and functions for keeping the priority queue sorted, and
|
|
|
|
|
dealing with queueing and dequeueing of instructions. */
|
|
|
|
|
|
|
|
|
|
#define SCHED_SORT(READY, N_READY) \
|
|
|
|
|
do { if ((N_READY) == 2) \
|
|
|
|
|
swap_sort (READY, N_READY); \
|
|
|
|
|
else if ((N_READY) > 2) \
|
|
|
|
|
qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
|
|
|
|
|
while (0)
|
|
|
|
|
|
|
|
|
|
/* Returns a positive value if x is preferred; returns a negative value if
|
|
|
|
|
y is preferred. Should never return 0, since that will make the sort
|
|
|
|
|
unstable. */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
rank_for_schedule (x, y)
|
gansidecl.h (__attribute__, [...]): Delete.
* gansidecl.h (__attribute__, ATTRIBUTE_UNUSED_LABEL,
ATTRIBUTE_UNUSED, ATTRIBUTE_NORETURN, ATTRIBUTE_PRINTF,
ATTRIBUTE_PRINTF_1, ATTRIBUTE_PRINTF_2, ATTRIBUTE_PRINTF_3,
ATTRIBUTE_PRINTF_4, ATTRIBUTE_PRINTF_5, GENERIC_PTR): Delete.
* c-decl.c (field_decl_cmp): Use PTR instead of GENERIC_PTR.
* cccp.c (pcfinclude): Likewise.
* global.c (allocno_compare): Likewise.
* haifa-sched.c (rank_for_schedule): Likewise.
* local-alloc.c (qty_sugg_compare_1, qty_compare_1): Likewise.
* reload1.c (hard_reg_use_compare, reload_reg_class_lower): Likewise.
* stupid.c (stupid_reg_compare): Likewise.
* tree.c (_obstack_allocated_p): Likewise.
* varray.h (varray_data_tag, VARRAY_GENERIC_PTR_INIT): Likewise.
From-SVN: r29208
1999-09-08 17:44:18 +02:00
|
|
|
|
const PTR x;
|
|
|
|
|
const PTR y;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-03-11 14:18:30 +01:00
|
|
|
|
rtx tmp = *(rtx *)y;
|
|
|
|
|
rtx tmp2 = *(rtx *)x;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
rtx link;
|
1998-06-05 13:32:28 +02:00
|
|
|
|
int tmp_class, tmp2_class, depend_count1, depend_count2;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int val, priority_val, spec_val, prob_val, weight_val;
|
|
|
|
|
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Prefer insn with higher priority. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
|
|
|
|
|
if (priority_val)
|
|
|
|
|
return priority_val;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Prefer an insn with smaller contribution to registers-pressure. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (!reload_completed &&
|
|
|
|
|
(weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
|
|
|
|
|
return (weight_val);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Some comparison make sense in interblock scheduling only. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (INSN_BB (tmp) != INSN_BB (tmp2))
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Prefer an inblock motion on an interblock motion. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if ((INSN_BB (tmp2) == target_bb) && (INSN_BB (tmp) != target_bb))
|
|
|
|
|
return 1;
|
|
|
|
|
if ((INSN_BB (tmp) == target_bb) && (INSN_BB (tmp2) != target_bb))
|
|
|
|
|
return -1;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Prefer a useful motion on a speculative one. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if ((spec_val = IS_SPECULATIVE_INSN (tmp) - IS_SPECULATIVE_INSN (tmp2)))
|
|
|
|
|
return (spec_val);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Prefer a more probable (speculative) insn. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
prob_val = INSN_PROBABILITY (tmp2) - INSN_PROBABILITY (tmp);
|
|
|
|
|
if (prob_val)
|
|
|
|
|
return (prob_val);
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compare insns based on their relation to the last-scheduled-insn. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (last_scheduled_insn)
|
|
|
|
|
{
|
|
|
|
|
/* Classify the instructions into three classes:
|
|
|
|
|
1) Data dependent on last schedule insn.
|
|
|
|
|
2) Anti/Output dependent on last scheduled insn.
|
|
|
|
|
3) Independent of last scheduled insn, or has latency of one.
|
|
|
|
|
Choose the insn from the highest numbered class if different. */
|
|
|
|
|
link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
|
|
|
|
|
if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
|
|
|
|
|
tmp_class = 3;
|
|
|
|
|
else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
|
|
|
|
|
tmp_class = 1;
|
|
|
|
|
else
|
|
|
|
|
tmp_class = 2;
|
|
|
|
|
|
|
|
|
|
link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
|
|
|
|
|
if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
|
|
|
|
|
tmp2_class = 3;
|
|
|
|
|
else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
|
|
|
|
|
tmp2_class = 1;
|
|
|
|
|
else
|
|
|
|
|
tmp2_class = 2;
|
|
|
|
|
|
|
|
|
|
if ((val = tmp2_class - tmp_class))
|
|
|
|
|
return val;
|
|
|
|
|
}
|
|
|
|
|
|
1998-06-05 13:32:28 +02:00
|
|
|
|
/* Prefer the insn which has more later insns that depend on it.
|
|
|
|
|
This gives the scheduler more freedom when scheduling later
|
|
|
|
|
instructions at the expense of added register pressure. */
|
|
|
|
|
depend_count1 = 0;
|
|
|
|
|
for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
|
|
|
|
|
depend_count1++;
|
|
|
|
|
|
|
|
|
|
depend_count2 = 0;
|
|
|
|
|
for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
|
|
|
|
|
depend_count2++;
|
|
|
|
|
|
|
|
|
|
val = depend_count2 - depend_count1;
|
|
|
|
|
if (val)
|
|
|
|
|
return val;
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
/* If insns are equally good, sort by INSN_LUID (original insn order),
|
|
|
|
|
so that we make the sort stable. This minimizes instruction movement,
|
|
|
|
|
thus minimizing sched's effect on debugging and cross-jumping. */
|
|
|
|
|
return INSN_LUID (tmp) - INSN_LUID (tmp2);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Resort the array A in which only element at index N may be out of order. */
|
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static void
|
1997-08-12 06:07:19 +02:00
|
|
|
|
swap_sort (a, n)
|
|
|
|
|
rtx *a;
|
|
|
|
|
int n;
|
|
|
|
|
{
|
|
|
|
|
rtx insn = a[n - 1];
|
|
|
|
|
int i = n - 2;
|
|
|
|
|
|
|
|
|
|
while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
|
|
|
|
|
{
|
|
|
|
|
a[i + 1] = a[i];
|
|
|
|
|
i -= 1;
|
|
|
|
|
}
|
|
|
|
|
a[i + 1] = insn;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int max_priority;
|
|
|
|
|
|
|
|
|
|
/* Add INSN to the insn queue so that it can be executed at least
|
|
|
|
|
N_CYCLES after the currently executing insn. Preserve insns
|
|
|
|
|
chain for debugging purposes. */
|
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static void
|
1997-08-12 06:07:19 +02:00
|
|
|
|
queue_insn (insn, n_cycles)
|
|
|
|
|
rtx insn;
|
|
|
|
|
int n_cycles;
|
|
|
|
|
{
|
|
|
|
|
int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
|
1998-03-05 03:15:23 +01:00
|
|
|
|
rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
insn_queue[next_q] = link;
|
|
|
|
|
q_size += 1;
|
|
|
|
|
|
|
|
|
|
if (sched_verbose >= 2)
|
|
|
|
|
{
|
|
|
|
|
fprintf (dump, ";;\t\tReady-->Q: insn %d: ", INSN_UID (insn));
|
|
|
|
|
|
|
|
|
|
if (INSN_BB (insn) != target_bb)
|
1999-10-19 00:20:27 +02:00
|
|
|
|
fprintf (dump, "(b%d) ", BLOCK_NUM (insn));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
fprintf (dump, "queued for %d cycles.\n", n_cycles);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* PREV is an insn that is ready to execute. Adjust its priority if that
|
1999-10-11 01:45:27 +02:00
|
|
|
|
will help shorten or lengthen register lifetimes as appropriate. Also
|
|
|
|
|
provide a hook for the target to tweek itself. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static void
|
1997-08-12 06:07:19 +02:00
|
|
|
|
adjust_priority (prev)
|
1999-10-11 01:45:27 +02:00
|
|
|
|
rtx prev ATTRIBUTE_UNUSED;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1999-10-11 01:45:27 +02:00
|
|
|
|
/* ??? There used to be code here to try and estimate how an insn
|
|
|
|
|
affected register lifetimes, but it did it by looking at REG_DEAD
|
|
|
|
|
notes, which we removed in schedule_region. Nor did it try to
|
|
|
|
|
take into account register pressure or anything useful like that.
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-10-11 01:45:27 +02:00
|
|
|
|
Revisit when we have a machine model to work with and not before. */
|
1999-07-21 03:15:47 +02:00
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#ifdef ADJUST_PRIORITY
|
1999-07-21 03:15:47 +02:00
|
|
|
|
ADJUST_PRIORITY (prev);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
1998-08-26 20:47:42 +02:00
|
|
|
|
/* Clock at which the previous instruction was issued. */
|
|
|
|
|
static int last_clock_var;
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
/* INSN is the "currently executing insn". Launch each insn which was
|
|
|
|
|
waiting on INSN. READY is a vector of insns which are ready to fire.
|
|
|
|
|
N_READY is the number of elements in READY. CLOCK is the current
|
|
|
|
|
cycle. */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
schedule_insn (insn, ready, n_ready, clock)
|
|
|
|
|
rtx insn;
|
|
|
|
|
rtx *ready;
|
|
|
|
|
int n_ready;
|
|
|
|
|
int clock;
|
|
|
|
|
{
|
|
|
|
|
rtx link;
|
|
|
|
|
int unit;
|
|
|
|
|
|
|
|
|
|
unit = insn_unit (insn);
|
|
|
|
|
|
|
|
|
|
if (sched_verbose >= 2)
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
fprintf (dump, ";;\t\t--> scheduling insn <<<%d>>> on unit ",
|
|
|
|
|
INSN_UID (insn));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
insn_print_units (insn);
|
|
|
|
|
fprintf (dump, "\n");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (sched_verbose && unit == -1)
|
|
|
|
|
visualize_no_unit (insn);
|
|
|
|
|
|
|
|
|
|
if (MAX_BLOCKAGE > 1 || issue_rate > 1 || sched_verbose)
|
|
|
|
|
schedule_unit (unit, insn, clock);
|
|
|
|
|
|
|
|
|
|
if (INSN_DEPEND (insn) == 0)
|
|
|
|
|
return n_ready;
|
|
|
|
|
|
|
|
|
|
/* This is used by the function adjust_priority above. */
|
|
|
|
|
if (n_ready > 0)
|
|
|
|
|
max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
|
|
|
|
|
else
|
|
|
|
|
max_priority = INSN_PRIORITY (insn);
|
|
|
|
|
|
|
|
|
|
for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
|
|
|
|
|
{
|
|
|
|
|
rtx next = XEXP (link, 0);
|
|
|
|
|
int cost = insn_cost (insn, link, next);
|
|
|
|
|
|
|
|
|
|
INSN_TICK (next) = MAX (INSN_TICK (next), clock + cost);
|
|
|
|
|
|
|
|
|
|
if ((INSN_DEP_COUNT (next) -= 1) == 0)
|
|
|
|
|
{
|
|
|
|
|
int effective_cost = INSN_TICK (next) - clock;
|
|
|
|
|
|
|
|
|
|
/* For speculative insns, before inserting to ready/queue,
|
1999-09-06 23:55:23 +02:00
|
|
|
|
check live, exception-free, and issue-delay. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (INSN_BB (next) != target_bb
|
|
|
|
|
&& (!IS_VALID (INSN_BB (next))
|
|
|
|
|
|| CANT_MOVE (next)
|
|
|
|
|
|| (IS_SPECULATIVE_INSN (next)
|
|
|
|
|
&& (insn_issue_delay (next) > 3
|
1998-02-17 22:35:43 +01:00
|
|
|
|
|| !check_live (next, INSN_BB (next))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|| !is_exception_free (next, INSN_BB (next), target_bb)))))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (sched_verbose >= 2)
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
fprintf (dump, ";;\t\tdependences resolved: insn %d ",
|
|
|
|
|
INSN_UID (next));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
if (current_nr_blocks > 1 && INSN_BB (next) != target_bb)
|
1999-10-19 00:20:27 +02:00
|
|
|
|
fprintf (dump, "/b%d ", BLOCK_NUM (next));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
if (effective_cost < 1)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
fprintf (dump, "into ready\n");
|
|
|
|
|
else
|
|
|
|
|
fprintf (dump, "into queue with cost=%d\n", effective_cost);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Adjust the priority of NEXT and either put it on the ready
|
|
|
|
|
list or queue it. */
|
|
|
|
|
adjust_priority (next);
|
1999-07-21 03:15:47 +02:00
|
|
|
|
if (effective_cost < 1)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
ready[n_ready++] = next;
|
|
|
|
|
else
|
|
|
|
|
queue_insn (next, effective_cost);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1998-08-26 20:47:42 +02:00
|
|
|
|
/* Annotate the instruction with issue information -- TImode
|
|
|
|
|
indicates that the instruction is expected not to be able
|
|
|
|
|
to issue on the same cycle as the previous insn. A machine
|
|
|
|
|
may use this information to decide how the instruction should
|
|
|
|
|
be aligned. */
|
|
|
|
|
if (reload_completed && issue_rate > 1)
|
|
|
|
|
{
|
|
|
|
|
PUT_MODE (insn, clock > last_clock_var ? TImode : VOIDmode);
|
|
|
|
|
last_clock_var = clock;
|
|
|
|
|
}
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return n_ready;
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Functions for handling of notes. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Delete notes beginning with INSN and put them in the chain
|
|
|
|
|
of notes ended by NOTE_LIST.
|
|
|
|
|
Returns the insn following the notes. */
|
|
|
|
|
|
|
|
|
|
static rtx
|
|
|
|
|
unlink_other_notes (insn, tail)
|
|
|
|
|
rtx insn, tail;
|
|
|
|
|
{
|
|
|
|
|
rtx prev = PREV_INSN (insn);
|
|
|
|
|
|
|
|
|
|
while (insn != tail && GET_CODE (insn) == NOTE)
|
|
|
|
|
{
|
|
|
|
|
rtx next = NEXT_INSN (insn);
|
|
|
|
|
/* Delete the note from its current position. */
|
|
|
|
|
if (prev)
|
|
|
|
|
NEXT_INSN (prev) = next;
|
|
|
|
|
if (next)
|
|
|
|
|
PREV_INSN (next) = prev;
|
|
|
|
|
|
1999-10-11 01:45:27 +02:00
|
|
|
|
/* See sched_analyze to see how these are handled. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_SETJMP
|
|
|
|
|
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
|
|
|
|
|
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
|
Makefile.in (HOST_RTL): Add $(HOST_PREFIX)bitmap.o.
* Makefile.in (HOST_RTL): Add $(HOST_PREFIX)bitmap.o.
(rtl.o, emit-rtl.o): Add dependency on bitmap.h.
($(HOST_PREFIX_1)rtl.o): Likewise.
($(HOST_PREFIX_1)bitmap.o): New host object.
* emit-rtl.c (toplevel): Include bitmap.h.
(gen_rtx): Handle 't' and 'b' nodes.
* print-rtl.c (print_rtx): Handle printing NOTE_INSN_LIVE notes.
Print block number for block begin/end notes. Print 't' type
nodes as a pointer. Know that the 3rd argument of live range
start/stop notes is really a range_info rtx. If type is 'b', print
out argument as a bitmap.
* rtl.c: Include bitmap.c.
(copy_rtx): Copy tree nodes as is. Copy bitmaps if type is 'b'.
(note_insn_name): Add NOTE_INSN_RANGE_{START,END}, NOTE_INSN_LIVE.
* rtl.def (RANGE_LIVE): New node to hold live information while we
recalculate the basic blocks.
(RANGE_REG, RANGE_INFO): New rtl types for live range splitting.
(RANGE_VAR): New node, to hold information saved in symbol node for New
communicating live range information to the debug output functions.
* rtl.h (rtunion_def): Add rttree and rtbit fields.
(XBITMAP, XTREE): New accessor macros.
(NOTE_LIVE_INFO): Overload NOTE_SOURCE_FILE for NOTE_INSN_LIVE notes.
(NOTE_RANGE_INFO): Similarly for NOTE_INSN_RANGE_{START,END} notes.
(NOTE_BLOCK_LIVE_RANGE_BLOCK): Define.
(NOTE_INSN_RANGE_START, NOTE_INSN_RANGE_END, NOTE_INSN_LIVE): New notes.
(RANGE_LIVE_{BITMAP,ORIG_BLOCK}): New accessor macros.
(RANGE_REG_{SYMBOL,BLOCK}_NODE, RANGE_VAR_*): New accessor macros.
(RANGE_INFO_*): Likewise.
* sched.c (sched_analyze): Keep live range start/stop notes.
(unlink_other_notes): Likewise.
* haifa-sched.c (sched_analyze): Keep live range start/stop notes.
(unlink_other_notes): Likewise.
* tree.h (BLOCK_LIVE_RANGE_{START,END,VAR_FLAG}): New accessor macros.
(BLOCK_LIVE_RANGE_FLAG): Likewise.
(DECL_LIVE_RANGE_RTL): Likewise.
(struct tree_block): Add live_range_flag, live_range_var_flag,
live_range_start and live_range_end.
(struct tree_decl): Add live_range_rtl field.
* gengenrtl.c (type_from_format): Handle 'b' and 't'.
(accessor_from_format): Likewise.
Co-Authored-By: Jeffrey A Law <law@cygnus.com>
From-SVN: r19727
1998-05-13 23:13:47 +02:00
|
|
|
|
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_START
|
|
|
|
|
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_RANGE_END
|
1997-08-12 06:07:19 +02:00
|
|
|
|
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
|
|
|
|
|
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
|
|
|
|
|
{
|
|
|
|
|
/* Insert the note at the end of the notes list. */
|
|
|
|
|
PREV_INSN (insn) = note_list;
|
|
|
|
|
if (note_list)
|
|
|
|
|
NEXT_INSN (note_list) = insn;
|
|
|
|
|
note_list = insn;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
insn = next;
|
|
|
|
|
}
|
|
|
|
|
return insn;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Delete line notes beginning with INSN. Record line-number notes so
|
|
|
|
|
they can be reused. Returns the insn following the notes. */
|
|
|
|
|
|
|
|
|
|
static rtx
|
|
|
|
|
unlink_line_notes (insn, tail)
|
|
|
|
|
rtx insn, tail;
|
|
|
|
|
{
|
|
|
|
|
rtx prev = PREV_INSN (insn);
|
|
|
|
|
|
|
|
|
|
while (insn != tail && GET_CODE (insn) == NOTE)
|
|
|
|
|
{
|
|
|
|
|
rtx next = NEXT_INSN (insn);
|
|
|
|
|
|
|
|
|
|
if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
|
|
|
|
|
{
|
|
|
|
|
/* Delete the note from its current position. */
|
|
|
|
|
if (prev)
|
|
|
|
|
NEXT_INSN (prev) = next;
|
|
|
|
|
if (next)
|
|
|
|
|
PREV_INSN (next) = prev;
|
|
|
|
|
|
|
|
|
|
/* Record line-number notes so they can be reused. */
|
|
|
|
|
LINE_NOTE (insn) = insn;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
prev = insn;
|
|
|
|
|
|
|
|
|
|
insn = next;
|
|
|
|
|
}
|
|
|
|
|
return insn;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Return the head and tail pointers of BB. */
|
|
|
|
|
|
1998-05-12 14:20:18 +02:00
|
|
|
|
HAIFA_INLINE static void
|
1997-08-12 06:07:19 +02:00
|
|
|
|
get_block_head_tail (bb, headp, tailp)
|
|
|
|
|
int bb;
|
|
|
|
|
rtx *headp;
|
|
|
|
|
rtx *tailp;
|
|
|
|
|
{
|
|
|
|
|
|
1997-10-01 07:08:31 +02:00
|
|
|
|
rtx head;
|
|
|
|
|
rtx tail;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int b;
|
|
|
|
|
|
|
|
|
|
b = BB_TO_BLOCK (bb);
|
|
|
|
|
|
|
|
|
|
/* HEAD and TAIL delimit the basic block being scheduled. */
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
head = BLOCK_HEAD (b);
|
|
|
|
|
tail = BLOCK_END (b);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Don't include any notes or labels at the beginning of the
|
|
|
|
|
basic block, or notes at the ends of basic blocks. */
|
|
|
|
|
while (head != tail)
|
|
|
|
|
{
|
|
|
|
|
if (GET_CODE (head) == NOTE)
|
|
|
|
|
head = NEXT_INSN (head);
|
|
|
|
|
else if (GET_CODE (tail) == NOTE)
|
|
|
|
|
tail = PREV_INSN (tail);
|
|
|
|
|
else if (GET_CODE (head) == CODE_LABEL)
|
|
|
|
|
head = NEXT_INSN (head);
|
|
|
|
|
else
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*headp = head;
|
|
|
|
|
*tailp = tail;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Delete line notes from bb. Save them so they can be later restored
|
|
|
|
|
(in restore_line_notes ()). */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
rm_line_notes (bb)
|
|
|
|
|
int bb;
|
|
|
|
|
{
|
|
|
|
|
rtx next_tail;
|
|
|
|
|
rtx tail;
|
|
|
|
|
rtx head;
|
|
|
|
|
rtx insn;
|
|
|
|
|
|
|
|
|
|
get_block_head_tail (bb, &head, &tail);
|
|
|
|
|
|
|
|
|
|
if (head == tail
|
|
|
|
|
&& (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
next_tail = NEXT_INSN (tail);
|
|
|
|
|
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
|
|
|
|
|
{
|
|
|
|
|
rtx prev;
|
|
|
|
|
|
|
|
|
|
/* Farm out notes, and maybe save them in NOTE_LIST.
|
|
|
|
|
This is needed to keep the debugger from
|
|
|
|
|
getting completely deranged. */
|
|
|
|
|
if (GET_CODE (insn) == NOTE)
|
|
|
|
|
{
|
|
|
|
|
prev = insn;
|
|
|
|
|
insn = unlink_line_notes (insn, next_tail);
|
|
|
|
|
|
|
|
|
|
if (prev == tail)
|
|
|
|
|
abort ();
|
|
|
|
|
if (prev == head)
|
|
|
|
|
abort ();
|
|
|
|
|
if (insn == next_tail)
|
|
|
|
|
abort ();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Save line number notes for each insn in bb. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
save_line_notes (bb)
|
|
|
|
|
int bb;
|
|
|
|
|
{
|
|
|
|
|
rtx head, tail;
|
|
|
|
|
rtx next_tail;
|
|
|
|
|
|
|
|
|
|
/* We must use the true line number for the first insn in the block
|
|
|
|
|
that was computed and saved at the start of this pass. We can't
|
|
|
|
|
use the current line number, because scheduling of the previous
|
|
|
|
|
block may have changed the current line number. */
|
|
|
|
|
|
|
|
|
|
rtx line = line_note_head[BB_TO_BLOCK (bb)];
|
|
|
|
|
rtx insn;
|
|
|
|
|
|
|
|
|
|
get_block_head_tail (bb, &head, &tail);
|
|
|
|
|
next_tail = NEXT_INSN (tail);
|
|
|
|
|
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
for (insn = BLOCK_HEAD (BB_TO_BLOCK (bb));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
insn != next_tail;
|
|
|
|
|
insn = NEXT_INSN (insn))
|
|
|
|
|
if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
|
|
|
|
|
line = insn;
|
|
|
|
|
else
|
|
|
|
|
LINE_NOTE (insn) = line;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* After bb was scheduled, insert line notes into the insns list. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
restore_line_notes (bb)
|
|
|
|
|
int bb;
|
|
|
|
|
{
|
|
|
|
|
rtx line, note, prev, new;
|
|
|
|
|
int added_notes = 0;
|
|
|
|
|
int b;
|
|
|
|
|
rtx head, next_tail, insn;
|
|
|
|
|
|
|
|
|
|
b = BB_TO_BLOCK (bb);
|
|
|
|
|
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
head = BLOCK_HEAD (b);
|
|
|
|
|
next_tail = NEXT_INSN (BLOCK_END (b));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Determine the current line-number. We want to know the current
|
|
|
|
|
line number of the first insn of the block here, in case it is
|
|
|
|
|
different from the true line number that was saved earlier. If
|
|
|
|
|
different, then we need a line number note before the first insn
|
|
|
|
|
of this block. If it happens to be the same, then we don't want to
|
|
|
|
|
emit another line number note here. */
|
|
|
|
|
for (line = head; line; line = PREV_INSN (line))
|
|
|
|
|
if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
/* Walk the insns keeping track of the current line-number and inserting
|
|
|
|
|
the line-number notes as needed. */
|
|
|
|
|
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
|
|
|
|
|
if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
|
|
|
|
|
line = insn;
|
|
|
|
|
/* This used to emit line number notes before every non-deleted note.
|
|
|
|
|
However, this confuses a debugger, because line notes not separated
|
|
|
|
|
by real instructions all end up at the same address. I can find no
|
|
|
|
|
use for line number notes before other notes, so none are emitted. */
|
|
|
|
|
else if (GET_CODE (insn) != NOTE
|
|
|
|
|
&& (note = LINE_NOTE (insn)) != 0
|
|
|
|
|
&& note != line
|
|
|
|
|
&& (line == 0
|
|
|
|
|
|| NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
|
|
|
|
|
|| NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
|
|
|
|
|
{
|
|
|
|
|
line = note;
|
|
|
|
|
prev = PREV_INSN (insn);
|
|
|
|
|
if (LINE_NOTE (note))
|
|
|
|
|
{
|
|
|
|
|
/* Re-use the original line-number note. */
|
|
|
|
|
LINE_NOTE (note) = 0;
|
|
|
|
|
PREV_INSN (note) = prev;
|
|
|
|
|
NEXT_INSN (prev) = note;
|
|
|
|
|
PREV_INSN (insn) = note;
|
|
|
|
|
NEXT_INSN (note) = insn;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
added_notes++;
|
|
|
|
|
new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
|
|
|
|
|
NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
|
|
|
|
|
RTX_INTEGRATED_P (new) = RTX_INTEGRATED_P (note);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (sched_verbose && added_notes)
|
|
|
|
|
fprintf (dump, ";; added %d line-number notes\n", added_notes);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* After scheduling the function, delete redundant line notes from the
|
|
|
|
|
insns list. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
rm_redundant_line_notes ()
|
|
|
|
|
{
|
|
|
|
|
rtx line = 0;
|
|
|
|
|
rtx insn = get_insns ();
|
|
|
|
|
int active_insn = 0;
|
|
|
|
|
int notes = 0;
|
|
|
|
|
|
|
|
|
|
/* Walk the insns deleting redundant line-number notes. Many of these
|
|
|
|
|
are already present. The remainder tend to occur at basic
|
|
|
|
|
block boundaries. */
|
|
|
|
|
for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
|
|
|
|
|
if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
|
|
|
|
|
{
|
|
|
|
|
/* If there are no active insns following, INSN is redundant. */
|
|
|
|
|
if (active_insn == 0)
|
|
|
|
|
{
|
|
|
|
|
notes++;
|
|
|
|
|
NOTE_SOURCE_FILE (insn) = 0;
|
|
|
|
|
NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
|
|
|
|
|
}
|
|
|
|
|
/* If the line number is unchanged, LINE is redundant. */
|
|
|
|
|
else if (line
|
|
|
|
|
&& NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
|
|
|
|
|
&& NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
|
|
|
|
|
{
|
|
|
|
|
notes++;
|
|
|
|
|
NOTE_SOURCE_FILE (line) = 0;
|
|
|
|
|
NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
|
|
|
|
|
line = insn;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
line = insn;
|
|
|
|
|
active_insn = 0;
|
|
|
|
|
}
|
|
|
|
|
else if (!((GET_CODE (insn) == NOTE
|
|
|
|
|
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
|
|
|
|
|
|| (GET_CODE (insn) == INSN
|
|
|
|
|
&& (GET_CODE (PATTERN (insn)) == USE
|
|
|
|
|
|| GET_CODE (PATTERN (insn)) == CLOBBER))))
|
|
|
|
|
active_insn++;
|
|
|
|
|
|
|
|
|
|
if (sched_verbose && notes)
|
|
|
|
|
fprintf (dump, ";; deleted %d line-number notes\n", notes);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Delete notes between head and tail and put them in the chain
|
|
|
|
|
of notes ended by NOTE_LIST. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
rm_other_notes (head, tail)
|
|
|
|
|
rtx head;
|
|
|
|
|
rtx tail;
|
|
|
|
|
{
|
|
|
|
|
rtx next_tail;
|
|
|
|
|
rtx insn;
|
|
|
|
|
|
|
|
|
|
if (head == tail
|
|
|
|
|
&& (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
next_tail = NEXT_INSN (tail);
|
|
|
|
|
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
|
|
|
|
|
{
|
|
|
|
|
rtx prev;
|
|
|
|
|
|
|
|
|
|
/* Farm out notes, and maybe save them in NOTE_LIST.
|
|
|
|
|
This is needed to keep the debugger from
|
|
|
|
|
getting completely deranged. */
|
|
|
|
|
if (GET_CODE (insn) == NOTE)
|
|
|
|
|
{
|
|
|
|
|
prev = insn;
|
|
|
|
|
|
|
|
|
|
insn = unlink_other_notes (insn, next_tail);
|
|
|
|
|
|
|
|
|
|
if (prev == tail)
|
|
|
|
|
abort ();
|
|
|
|
|
if (prev == head)
|
|
|
|
|
abort ();
|
|
|
|
|
if (insn == next_tail)
|
|
|
|
|
abort ();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Functions for computation of registers live/usage info. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-10-11 01:45:27 +02:00
|
|
|
|
/* Calculate INSN_REG_WEIGHT for all insns of a block. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
1999-10-11 01:45:27 +02:00
|
|
|
|
find_insn_reg_weight (bb)
|
|
|
|
|
int bb;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
|
|
|
|
rtx insn, next_tail, head, tail;
|
|
|
|
|
|
|
|
|
|
get_block_head_tail (bb, &head, &tail);
|
|
|
|
|
next_tail = NEXT_INSN (tail);
|
|
|
|
|
|
|
|
|
|
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
|
|
|
|
|
{
|
|
|
|
|
int reg_weight = 0;
|
1999-10-11 01:45:27 +02:00
|
|
|
|
rtx x;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
/* Handle register life information. */
|
|
|
|
|
if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
|
|
|
|
|
continue;
|
|
|
|
|
|
1999-10-11 01:45:27 +02:00
|
|
|
|
/* Increment weight for each register born here. */
|
|
|
|
|
x = PATTERN (insn);
|
|
|
|
|
if ((GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
|
|
|
|
|
&& register_operand (SET_DEST (x), VOIDmode))
|
|
|
|
|
reg_weight++;
|
|
|
|
|
else if (GET_CODE (x) == PARALLEL)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1999-10-11 01:45:27 +02:00
|
|
|
|
int j;
|
|
|
|
|
for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
|
|
|
|
|
{
|
|
|
|
|
x = XVECEXP (PATTERN (insn), 0, j);
|
|
|
|
|
if ((GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
|
|
|
|
|
&& register_operand (SET_DEST (x), VOIDmode))
|
|
|
|
|
reg_weight++;
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-10-11 01:45:27 +02:00
|
|
|
|
/* Decrement weight for each register that dies here. */
|
|
|
|
|
for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1999-10-11 01:45:27 +02:00
|
|
|
|
if (REG_NOTE_KIND (x) == REG_DEAD
|
|
|
|
|
|| REG_NOTE_KIND (x) == REG_UNUSED)
|
|
|
|
|
reg_weight--;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-10-11 01:45:27 +02:00
|
|
|
|
INSN_REG_WEIGHT (insn) = reg_weight;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Scheduling clock, modified in schedule_block() and queue_to_ready (). */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
static int clock_var;
|
|
|
|
|
|
|
|
|
|
/* Move insns that became ready to fire from queue to ready list. */
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
queue_to_ready (ready, n_ready)
|
|
|
|
|
rtx ready[];
|
|
|
|
|
int n_ready;
|
|
|
|
|
{
|
|
|
|
|
rtx insn;
|
|
|
|
|
rtx link;
|
|
|
|
|
|
|
|
|
|
q_ptr = NEXT_Q (q_ptr);
|
|
|
|
|
|
|
|
|
|
/* Add all pending insns that can be scheduled without stalls to the
|
|
|
|
|
ready list. */
|
|
|
|
|
for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
|
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
insn = XEXP (link, 0);
|
|
|
|
|
q_size -= 1;
|
|
|
|
|
|
|
|
|
|
if (sched_verbose >= 2)
|
|
|
|
|
fprintf (dump, ";;\t\tQ-->Ready: insn %d: ", INSN_UID (insn));
|
|
|
|
|
|
|
|
|
|
if (sched_verbose >= 2 && INSN_BB (insn) != target_bb)
|
1999-10-19 00:20:27 +02:00
|
|
|
|
fprintf (dump, "(b%d) ", BLOCK_NUM (insn));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
ready[n_ready++] = insn;
|
|
|
|
|
if (sched_verbose >= 2)
|
|
|
|
|
fprintf (dump, "moving to ready without stalls\n");
|
|
|
|
|
}
|
|
|
|
|
insn_queue[q_ptr] = 0;
|
|
|
|
|
|
|
|
|
|
/* If there are no ready insns, stall until one is ready and add all
|
|
|
|
|
of the pending insns at that point to the ready list. */
|
|
|
|
|
if (n_ready == 0)
|
|
|
|
|
{
|
|
|
|
|
register int stalls;
|
|
|
|
|
|
|
|
|
|
for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
|
|
|
|
|
{
|
|
|
|
|
if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
|
|
|
|
|
{
|
|
|
|
|
for (; link; link = XEXP (link, 1))
|
|
|
|
|
{
|
|
|
|
|
insn = XEXP (link, 0);
|
|
|
|
|
q_size -= 1;
|
|
|
|
|
|
|
|
|
|
if (sched_verbose >= 2)
|
|
|
|
|
fprintf (dump, ";;\t\tQ-->Ready: insn %d: ", INSN_UID (insn));
|
|
|
|
|
|
|
|
|
|
if (sched_verbose >= 2 && INSN_BB (insn) != target_bb)
|
1999-10-19 00:20:27 +02:00
|
|
|
|
fprintf (dump, "(b%d) ", BLOCK_NUM (insn));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
ready[n_ready++] = insn;
|
|
|
|
|
if (sched_verbose >= 2)
|
|
|
|
|
fprintf (dump, "moving to ready with %d stalls\n", stalls);
|
|
|
|
|
}
|
|
|
|
|
insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
|
|
|
|
|
|
|
|
|
|
if (n_ready)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (sched_verbose && stalls)
|
|
|
|
|
visualize_stall_cycles (BB_TO_BLOCK (target_bb), stalls);
|
|
|
|
|
q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
|
|
|
|
|
clock_var += stalls;
|
|
|
|
|
}
|
|
|
|
|
return n_ready;
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Print the ready list for debugging purposes. Callable from debugger. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-06-21 19:59:03 +02:00
|
|
|
|
static void
|
1997-08-12 06:07:19 +02:00
|
|
|
|
debug_ready_list (ready, n_ready)
|
|
|
|
|
rtx ready[];
|
|
|
|
|
int n_ready;
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < n_ready; i++)
|
|
|
|
|
{
|
|
|
|
|
fprintf (dump, " %d", INSN_UID (ready[i]));
|
|
|
|
|
if (current_nr_blocks > 1 && INSN_BB (ready[i]) != target_bb)
|
1999-10-19 00:20:27 +02:00
|
|
|
|
fprintf (dump, "/b%d", BLOCK_NUM (ready[i]));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
fprintf (dump, "\n");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Print names of units on which insn can/should execute, for debugging. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
insn_print_units (insn)
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
int unit = insn_unit (insn);
|
|
|
|
|
|
|
|
|
|
if (unit == -1)
|
|
|
|
|
fprintf (dump, "none");
|
|
|
|
|
else if (unit >= 0)
|
|
|
|
|
fprintf (dump, "%s", function_units[unit].name);
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
fprintf (dump, "[");
|
|
|
|
|
for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
|
|
|
|
|
if (unit & 1)
|
|
|
|
|
{
|
|
|
|
|
fprintf (dump, "%s", function_units[i].name);
|
|
|
|
|
if (unit != 1)
|
|
|
|
|
fprintf (dump, " ");
|
|
|
|
|
}
|
|
|
|
|
fprintf (dump, "]");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* MAX_VISUAL_LINES is the maximum number of lines in visualization table
|
|
|
|
|
of a basic block. If more lines are needed, table is splitted to two.
|
|
|
|
|
n_visual_lines is the number of lines printed so far for a block.
|
|
|
|
|
visual_tbl contains the block visualization info.
|
|
|
|
|
vis_no_unit holds insns in a cycle that are not mapped to any unit. */
|
|
|
|
|
#define MAX_VISUAL_LINES 100
|
|
|
|
|
#define INSN_LEN 30
|
|
|
|
|
int n_visual_lines;
|
|
|
|
|
char *visual_tbl;
|
|
|
|
|
int n_vis_no_unit;
|
|
|
|
|
rtx vis_no_unit[10];
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Finds units that are in use in this fuction. Required only
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for visualization. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
init_target_units ()
|
|
|
|
|
{
|
|
|
|
|
rtx insn;
|
|
|
|
|
int unit;
|
|
|
|
|
|
|
|
|
|
for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
|
|
|
|
|
{
|
|
|
|
|
if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
unit = insn_unit (insn);
|
|
|
|
|
|
|
|
|
|
if (unit < 0)
|
|
|
|
|
target_units |= ~unit;
|
|
|
|
|
else
|
|
|
|
|
target_units |= (1 << unit);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Return the length of the visualization table. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
get_visual_tbl_length ()
|
|
|
|
|
{
|
|
|
|
|
int unit, i;
|
|
|
|
|
int n, n1;
|
|
|
|
|
char *s;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute length of one field in line. */
|
1999-08-25 06:58:36 +02:00
|
|
|
|
s = (char *) alloca (INSN_LEN + 6);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
sprintf (s, " %33s", "uname");
|
|
|
|
|
n1 = strlen (s);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute length of one line. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
n = strlen (";; ");
|
|
|
|
|
n += n1;
|
|
|
|
|
for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++)
|
|
|
|
|
if (function_units[unit].bitmask & target_units)
|
|
|
|
|
for (i = 0; i < function_units[unit].multiplicity; i++)
|
|
|
|
|
n += n1;
|
|
|
|
|
n += n1;
|
|
|
|
|
n += strlen ("\n") + 2;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute length of visualization string. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
return (MAX_VISUAL_LINES * n);
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Init block visualization debugging info. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
init_block_visualization ()
|
|
|
|
|
{
|
|
|
|
|
strcpy (visual_tbl, "");
|
|
|
|
|
n_visual_lines = 0;
|
|
|
|
|
n_vis_no_unit = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define BUF_LEN 256
|
|
|
|
|
|
1998-04-30 18:25:19 +02:00
|
|
|
|
static char *
|
|
|
|
|
safe_concat (buf, cur, str)
|
|
|
|
|
char *buf;
|
|
|
|
|
char *cur;
|
rtl.c (rtx_name): Constify a char*.
* rtl.c (rtx_name): Constify a char*.
* rtl.h (rtx_name, fix_sched_param): Likewise.
* gmicro/gmicro.c (rtx_name): Remove redundant declaration.
(mypr): Use accessor macro, not `rtx_name'.
* genemit.c (print_code): Constify a char*.
* genopinit.c (gen_insn): Use accessor macro, not `rtx_name'.
* genpeep.c (print_code): Constify a char*.
* genrecog.c (print_code): Likewise.
* graph.c (start_fct, start_bb, node_data, draw_edge, end_fct,
end_bb): Add static prototype.
(draw_edge): Constify a char*.
(end_bb): Remove unused parameter.
* haifa-sched.c (fix_sched_param, safe_concat, print_exp
print_block_visualization): Constify a char*.
From-SVN: r28782
1999-08-21 00:32:54 +02:00
|
|
|
|
const char *str;
|
1998-04-30 18:25:19 +02:00
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
char *end = buf + BUF_LEN - 2; /* Leave room for null. */
|
1998-04-30 18:25:19 +02:00
|
|
|
|
int c;
|
|
|
|
|
|
|
|
|
|
if (cur > end)
|
|
|
|
|
{
|
|
|
|
|
*end = '\0';
|
|
|
|
|
return end;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
while (cur < end && (c = *str++) != '\0')
|
|
|
|
|
*cur++ = c;
|
|
|
|
|
|
|
|
|
|
*cur = '\0';
|
|
|
|
|
return cur;
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* This recognizes rtx, I classified as expressions. These are always
|
|
|
|
|
represent some action on values or results of other expression, that
|
|
|
|
|
may be stored in objects representing values. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
print_exp (buf, x, verbose)
|
|
|
|
|
char *buf;
|
|
|
|
|
rtx x;
|
|
|
|
|
int verbose;
|
|
|
|
|
{
|
1998-04-30 18:25:19 +02:00
|
|
|
|
char tmp[BUF_LEN];
|
rtl.c (rtx_name): Constify a char*.
* rtl.c (rtx_name): Constify a char*.
* rtl.h (rtx_name, fix_sched_param): Likewise.
* gmicro/gmicro.c (rtx_name): Remove redundant declaration.
(mypr): Use accessor macro, not `rtx_name'.
* genemit.c (print_code): Constify a char*.
* genopinit.c (gen_insn): Use accessor macro, not `rtx_name'.
* genpeep.c (print_code): Constify a char*.
* genrecog.c (print_code): Likewise.
* graph.c (start_fct, start_bb, node_data, draw_edge, end_fct,
end_bb): Add static prototype.
(draw_edge): Constify a char*.
(end_bb): Remove unused parameter.
* haifa-sched.c (fix_sched_param, safe_concat, print_exp
print_block_visualization): Constify a char*.
From-SVN: r28782
1999-08-21 00:32:54 +02:00
|
|
|
|
const char *st[4];
|
1998-04-30 18:25:19 +02:00
|
|
|
|
char *cur = buf;
|
rtl.c (rtx_name): Constify a char*.
* rtl.c (rtx_name): Constify a char*.
* rtl.h (rtx_name, fix_sched_param): Likewise.
* gmicro/gmicro.c (rtx_name): Remove redundant declaration.
(mypr): Use accessor macro, not `rtx_name'.
* genemit.c (print_code): Constify a char*.
* genopinit.c (gen_insn): Use accessor macro, not `rtx_name'.
* genpeep.c (print_code): Constify a char*.
* genrecog.c (print_code): Likewise.
* graph.c (start_fct, start_bb, node_data, draw_edge, end_fct,
end_bb): Add static prototype.
(draw_edge): Constify a char*.
(end_bb): Remove unused parameter.
* haifa-sched.c (fix_sched_param, safe_concat, print_exp
print_block_visualization): Constify a char*.
From-SVN: r28782
1999-08-21 00:32:54 +02:00
|
|
|
|
const char *fun = (char *)0;
|
|
|
|
|
const char *sep;
|
1998-04-30 18:25:19 +02:00
|
|
|
|
rtx op[4];
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++)
|
|
|
|
|
{
|
|
|
|
|
st[i] = (char *)0;
|
|
|
|
|
op[i] = NULL_RTX;
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
switch (GET_CODE (x))
|
|
|
|
|
{
|
|
|
|
|
case PLUS:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
1999-03-07 12:50:32 +01:00
|
|
|
|
if (GET_CODE (XEXP (x, 1)) == CONST_INT
|
|
|
|
|
&& INTVAL (XEXP (x, 1)) < 0)
|
|
|
|
|
{
|
|
|
|
|
st[1] = "-";
|
|
|
|
|
op[1] = GEN_INT (-INTVAL (XEXP (x, 1)));
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
st[1] = "+";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case LO_SUM:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "+low(";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
|
|
|
|
st[2] = ")";
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case MINUS:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "-";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case COMPARE:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "cmp";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case NEG:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
st[0] = "-";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case MULT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "*";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case DIV:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "/";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case UDIV:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "udiv";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case MOD:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "%";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case UMOD:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "umod";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case SMIN:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "smin";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case SMAX:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "smax";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case UMIN:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "umin";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case UMAX:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "umax";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case NOT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
st[0] = "!";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case AND:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "&";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case IOR:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "|";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case XOR:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "^";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case ASHIFT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "<<";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case LSHIFTRT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = " 0>>";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case ASHIFTRT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = ">>";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case ROTATE:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "<-<";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case ROTATERT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = ">->";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case ABS:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "abs";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case SQRT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "sqrt";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case FFS:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "ffs";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case EQ:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "==";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case NE:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "!=";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case GT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = ">";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case GTU:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "gtu";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case LT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "<";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case LTU:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "ltu";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case GE:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = ">=";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case GEU:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "geu";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case LE:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "<=";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case LEU:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "leu";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case SIGN_EXTRACT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = (verbose) ? "sign_extract" : "sxt";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
|
|
|
|
op[2] = XEXP (x, 2);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case ZERO_EXTRACT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = (verbose) ? "zero_extract" : "zxt";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
op[1] = XEXP (x, 1);
|
|
|
|
|
op[2] = XEXP (x, 2);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case SIGN_EXTEND:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = (verbose) ? "sign_extend" : "sxn";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case ZERO_EXTEND:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = (verbose) ? "zero_extend" : "zxn";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case FLOAT_EXTEND:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = (verbose) ? "float_extend" : "fxn";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case TRUNCATE:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = (verbose) ? "trunc" : "trn";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case FLOAT_TRUNCATE:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = (verbose) ? "float_trunc" : "ftr";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case FLOAT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = (verbose) ? "float" : "flt";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case UNSIGNED_FLOAT:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = (verbose) ? "uns_float" : "ufl";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case FIX:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "fix";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case UNSIGNED_FIX:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = (verbose) ? "uns_fix" : "ufx";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case PRE_DEC:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
st[0] = "--";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case PRE_INC:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
st[0] = "++";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case POST_DEC:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "--";
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case POST_INC:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = "++";
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case CALL:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
st[0] = "call ";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (verbose)
|
|
|
|
|
{
|
1998-04-30 18:25:19 +02:00
|
|
|
|
st[1] = " argc:";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case IF_THEN_ELSE:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
st[0] = "{(";
|
|
|
|
|
op[0] = XEXP (x, 0);
|
|
|
|
|
st[1] = ")?";
|
|
|
|
|
op[1] = XEXP (x, 1);
|
|
|
|
|
st[2] = ":";
|
|
|
|
|
op[2] = XEXP (x, 2);
|
|
|
|
|
st[3] = "}";
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case TRAP_IF:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
fun = "trap_if";
|
|
|
|
|
op[0] = TRAP_CONDITION (x);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case UNSPEC:
|
|
|
|
|
case UNSPEC_VOLATILE:
|
|
|
|
|
{
|
1998-04-30 18:25:19 +02:00
|
|
|
|
cur = safe_concat (buf, cur, "unspec");
|
|
|
|
|
if (GET_CODE (x) == UNSPEC_VOLATILE)
|
|
|
|
|
cur = safe_concat (buf, cur, "/v");
|
|
|
|
|
cur = safe_concat (buf, cur, "[");
|
|
|
|
|
sep = "";
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (i = 0; i < XVECLEN (x, 0); i++)
|
|
|
|
|
{
|
1998-04-30 18:25:19 +02:00
|
|
|
|
print_pattern (tmp, XVECEXP (x, 0, i), verbose);
|
|
|
|
|
cur = safe_concat (buf, cur, sep);
|
|
|
|
|
cur = safe_concat (buf, cur, tmp);
|
|
|
|
|
sep = ",";
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
1998-04-30 18:25:19 +02:00
|
|
|
|
cur = safe_concat (buf, cur, "] ");
|
|
|
|
|
sprintf (tmp, "%d", XINT (x, 1));
|
|
|
|
|
cur = safe_concat (buf, cur, tmp);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
default:
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* If (verbose) debug_rtx (x); */
|
1998-05-09 03:53:01 +02:00
|
|
|
|
st[0] = GET_RTX_NAME (GET_CODE (x));
|
1998-04-30 18:25:19 +02:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Print this as a function? */
|
1998-04-30 18:25:19 +02:00
|
|
|
|
if (fun)
|
|
|
|
|
{
|
|
|
|
|
cur = safe_concat (buf, cur, fun);
|
|
|
|
|
cur = safe_concat (buf, cur, "(");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++)
|
|
|
|
|
{
|
|
|
|
|
if (st[i])
|
|
|
|
|
cur = safe_concat (buf, cur, st[i]);
|
|
|
|
|
|
|
|
|
|
if (op[i])
|
|
|
|
|
{
|
|
|
|
|
if (fun && i != 0)
|
|
|
|
|
cur = safe_concat (buf, cur, ",");
|
|
|
|
|
|
|
|
|
|
print_value (tmp, op[i], verbose);
|
|
|
|
|
cur = safe_concat (buf, cur, tmp);
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
1998-04-30 18:25:19 +02:00
|
|
|
|
|
|
|
|
|
if (fun)
|
|
|
|
|
cur = safe_concat (buf, cur, ")");
|
|
|
|
|
} /* print_exp */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Prints rtxes, I customly classified as values. They're constants,
|
|
|
|
|
registers, labels, symbols and memory accesses. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
print_value (buf, x, verbose)
|
|
|
|
|
char *buf;
|
|
|
|
|
rtx x;
|
|
|
|
|
int verbose;
|
|
|
|
|
{
|
|
|
|
|
char t[BUF_LEN];
|
1998-04-30 18:25:19 +02:00
|
|
|
|
char *cur = buf;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
switch (GET_CODE (x))
|
|
|
|
|
{
|
|
|
|
|
case CONST_INT:
|
1999-03-07 12:50:32 +01:00
|
|
|
|
sprintf (t, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
|
1998-04-30 18:25:19 +02:00
|
|
|
|
cur = safe_concat (buf, cur, t);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case CONST_DOUBLE:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
sprintf (t, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
|
|
|
|
|
cur = safe_concat (buf, cur, t);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case CONST_STRING:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
cur = safe_concat (buf, cur, "\"");
|
|
|
|
|
cur = safe_concat (buf, cur, XSTR (x, 0));
|
|
|
|
|
cur = safe_concat (buf, cur, "\"");
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case SYMBOL_REF:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
cur = safe_concat (buf, cur, "`");
|
|
|
|
|
cur = safe_concat (buf, cur, XSTR (x, 0));
|
|
|
|
|
cur = safe_concat (buf, cur, "'");
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case LABEL_REF:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
sprintf (t, "L%d", INSN_UID (XEXP (x, 0)));
|
|
|
|
|
cur = safe_concat (buf, cur, t);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case CONST:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
print_value (t, XEXP (x, 0), verbose);
|
|
|
|
|
cur = safe_concat (buf, cur, "const(");
|
|
|
|
|
cur = safe_concat (buf, cur, t);
|
|
|
|
|
cur = safe_concat (buf, cur, ")");
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case HIGH:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
print_value (t, XEXP (x, 0), verbose);
|
|
|
|
|
cur = safe_concat (buf, cur, "high(");
|
|
|
|
|
cur = safe_concat (buf, cur, t);
|
|
|
|
|
cur = safe_concat (buf, cur, ")");
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case REG:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
if (REGNO (x) < FIRST_PSEUDO_REGISTER)
|
|
|
|
|
{
|
|
|
|
|
int c = reg_names[ REGNO (x) ][0];
|
|
|
|
|
if (c >= '0' && c <= '9')
|
|
|
|
|
cur = safe_concat (buf, cur, "%");
|
|
|
|
|
|
|
|
|
|
cur = safe_concat (buf, cur, reg_names[ REGNO (x) ]);
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
else
|
1998-04-30 18:25:19 +02:00
|
|
|
|
{
|
|
|
|
|
sprintf (t, "r%d", REGNO (x));
|
|
|
|
|
cur = safe_concat (buf, cur, t);
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case SUBREG:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
print_value (t, SUBREG_REG (x), verbose);
|
|
|
|
|
cur = safe_concat (buf, cur, t);
|
1998-05-06 20:00:20 +02:00
|
|
|
|
sprintf (t, "#%d", SUBREG_WORD (x));
|
1998-04-30 18:25:19 +02:00
|
|
|
|
cur = safe_concat (buf, cur, t);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case SCRATCH:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
cur = safe_concat (buf, cur, "scratch");
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case CC0:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
cur = safe_concat (buf, cur, "cc0");
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case PC:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
cur = safe_concat (buf, cur, "pc");
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case MEM:
|
|
|
|
|
print_value (t, XEXP (x, 0), verbose);
|
1998-04-30 18:25:19 +02:00
|
|
|
|
cur = safe_concat (buf, cur, "[");
|
|
|
|
|
cur = safe_concat (buf, cur, t);
|
|
|
|
|
cur = safe_concat (buf, cur, "]");
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
default:
|
1998-04-30 18:25:19 +02:00
|
|
|
|
print_exp (t, x, verbose);
|
|
|
|
|
cur = safe_concat (buf, cur, t);
|
|
|
|
|
break;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
} /* print_value */
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* The next step in insn detalization, its pattern recognition. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
print_pattern (buf, x, verbose)
|
|
|
|
|
char *buf;
|
|
|
|
|
rtx x;
|
|
|
|
|
int verbose;
|
|
|
|
|
{
|
|
|
|
|
char t1[BUF_LEN], t2[BUF_LEN], t3[BUF_LEN];
|
|
|
|
|
|
|
|
|
|
switch (GET_CODE (x))
|
|
|
|
|
{
|
|
|
|
|
case SET:
|
|
|
|
|
print_value (t1, SET_DEST (x), verbose);
|
|
|
|
|
print_value (t2, SET_SRC (x), verbose);
|
|
|
|
|
sprintf (buf, "%s=%s", t1, t2);
|
|
|
|
|
break;
|
|
|
|
|
case RETURN:
|
|
|
|
|
sprintf (buf, "return");
|
|
|
|
|
break;
|
|
|
|
|
case CALL:
|
|
|
|
|
print_exp (buf, x, verbose);
|
|
|
|
|
break;
|
|
|
|
|
case CLOBBER:
|
|
|
|
|
print_value (t1, XEXP (x, 0), verbose);
|
|
|
|
|
sprintf (buf, "clobber %s", t1);
|
|
|
|
|
break;
|
|
|
|
|
case USE:
|
|
|
|
|
print_value (t1, XEXP (x, 0), verbose);
|
|
|
|
|
sprintf (buf, "use %s", t1);
|
|
|
|
|
break;
|
|
|
|
|
case PARALLEL:
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
sprintf (t1, "{");
|
|
|
|
|
for (i = 0; i < XVECLEN (x, 0); i++)
|
|
|
|
|
{
|
|
|
|
|
print_pattern (t2, XVECEXP (x, 0, i), verbose);
|
|
|
|
|
sprintf (t3, "%s%s;", t1, t2);
|
|
|
|
|
strcpy (t1, t3);
|
|
|
|
|
}
|
|
|
|
|
sprintf (buf, "%s}", t1);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case SEQUENCE:
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
sprintf (t1, "%%{");
|
|
|
|
|
for (i = 0; i < XVECLEN (x, 0); i++)
|
|
|
|
|
{
|
|
|
|
|
print_insn (t2, XVECEXP (x, 0, i), verbose);
|
|
|
|
|
sprintf (t3, "%s%s;", t1, t2);
|
|
|
|
|
strcpy (t1, t3);
|
|
|
|
|
}
|
|
|
|
|
sprintf (buf, "%s%%}", t1);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case ASM_INPUT:
|
1998-03-29 00:45:08 +01:00
|
|
|
|
sprintf (buf, "asm {%s}", XSTR (x, 0));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
case ADDR_VEC:
|
|
|
|
|
break;
|
|
|
|
|
case ADDR_DIFF_VEC:
|
|
|
|
|
print_value (buf, XEXP (x, 0), verbose);
|
|
|
|
|
break;
|
|
|
|
|
case TRAP_IF:
|
|
|
|
|
print_value (t1, TRAP_CONDITION (x), verbose);
|
|
|
|
|
sprintf (buf, "trap_if %s", t1);
|
|
|
|
|
break;
|
|
|
|
|
case UNSPEC:
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
sprintf (t1, "unspec{");
|
|
|
|
|
for (i = 0; i < XVECLEN (x, 0); i++)
|
|
|
|
|
{
|
|
|
|
|
print_pattern (t2, XVECEXP (x, 0, i), verbose);
|
|
|
|
|
sprintf (t3, "%s%s;", t1, t2);
|
|
|
|
|
strcpy (t1, t3);
|
|
|
|
|
}
|
|
|
|
|
sprintf (buf, "%s}", t1);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case UNSPEC_VOLATILE:
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
sprintf (t1, "unspec/v{");
|
|
|
|
|
for (i = 0; i < XVECLEN (x, 0); i++)
|
|
|
|
|
{
|
|
|
|
|
print_pattern (t2, XVECEXP (x, 0, i), verbose);
|
|
|
|
|
sprintf (t3, "%s%s;", t1, t2);
|
|
|
|
|
strcpy (t1, t3);
|
|
|
|
|
}
|
|
|
|
|
sprintf (buf, "%s}", t1);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
print_value (buf, x, verbose);
|
|
|
|
|
}
|
|
|
|
|
} /* print_pattern */
|
|
|
|
|
|
|
|
|
|
/* This is the main function in rtl visualization mechanism. It
|
|
|
|
|
accepts an rtx and tries to recognize it as an insn, then prints it
|
1999-09-06 23:55:23 +02:00
|
|
|
|
properly in human readable form, resembling assembler mnemonics.
|
|
|
|
|
For every insn it prints its UID and BB the insn belongs too.
|
|
|
|
|
(Probably the last "option" should be extended somehow, since it
|
|
|
|
|
depends now on sched.c inner variables ...) */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
print_insn (buf, x, verbose)
|
|
|
|
|
char *buf;
|
|
|
|
|
rtx x;
|
|
|
|
|
int verbose;
|
|
|
|
|
{
|
|
|
|
|
char t[BUF_LEN];
|
|
|
|
|
rtx insn = x;
|
|
|
|
|
|
|
|
|
|
switch (GET_CODE (x))
|
|
|
|
|
{
|
|
|
|
|
case INSN:
|
|
|
|
|
print_pattern (t, PATTERN (x), verbose);
|
|
|
|
|
if (verbose)
|
|
|
|
|
sprintf (buf, "b%d: i% 4d: %s", INSN_BB (x),
|
|
|
|
|
INSN_UID (x), t);
|
|
|
|
|
else
|
|
|
|
|
sprintf (buf, "%-4d %s", INSN_UID (x), t);
|
|
|
|
|
break;
|
|
|
|
|
case JUMP_INSN:
|
|
|
|
|
print_pattern (t, PATTERN (x), verbose);
|
|
|
|
|
if (verbose)
|
|
|
|
|
sprintf (buf, "b%d: i% 4d: jump %s", INSN_BB (x),
|
|
|
|
|
INSN_UID (x), t);
|
|
|
|
|
else
|
|
|
|
|
sprintf (buf, "%-4d %s", INSN_UID (x), t);
|
|
|
|
|
break;
|
|
|
|
|
case CALL_INSN:
|
|
|
|
|
x = PATTERN (insn);
|
|
|
|
|
if (GET_CODE (x) == PARALLEL)
|
|
|
|
|
{
|
|
|
|
|
x = XVECEXP (x, 0, 0);
|
|
|
|
|
print_pattern (t, x, verbose);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
strcpy (t, "call <...>");
|
|
|
|
|
if (verbose)
|
|
|
|
|
sprintf (buf, "b%d: i% 4d: %s", INSN_BB (insn),
|
|
|
|
|
INSN_UID (insn), t);
|
|
|
|
|
else
|
|
|
|
|
sprintf (buf, "%-4d %s", INSN_UID (insn), t);
|
|
|
|
|
break;
|
|
|
|
|
case CODE_LABEL:
|
|
|
|
|
sprintf (buf, "L%d:", INSN_UID (x));
|
|
|
|
|
break;
|
|
|
|
|
case BARRIER:
|
|
|
|
|
sprintf (buf, "i% 4d: barrier", INSN_UID (x));
|
|
|
|
|
break;
|
|
|
|
|
case NOTE:
|
|
|
|
|
if (NOTE_LINE_NUMBER (x) > 0)
|
|
|
|
|
sprintf (buf, "%4d note \"%s\" %d", INSN_UID (x),
|
|
|
|
|
NOTE_SOURCE_FILE (x), NOTE_LINE_NUMBER (x));
|
|
|
|
|
else
|
|
|
|
|
sprintf (buf, "%4d %s", INSN_UID (x),
|
|
|
|
|
GET_NOTE_INSN_NAME (NOTE_LINE_NUMBER (x)));
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
if (verbose)
|
|
|
|
|
{
|
|
|
|
|
sprintf (buf, "Not an INSN at all\n");
|
|
|
|
|
debug_rtx (x);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
sprintf (buf, "i%-4d <What?>", INSN_UID (x));
|
|
|
|
|
}
|
|
|
|
|
} /* print_insn */
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Print visualization debugging info. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
print_block_visualization (b, s)
|
|
|
|
|
int b;
|
rtl.c (rtx_name): Constify a char*.
* rtl.c (rtx_name): Constify a char*.
* rtl.h (rtx_name, fix_sched_param): Likewise.
* gmicro/gmicro.c (rtx_name): Remove redundant declaration.
(mypr): Use accessor macro, not `rtx_name'.
* genemit.c (print_code): Constify a char*.
* genopinit.c (gen_insn): Use accessor macro, not `rtx_name'.
* genpeep.c (print_code): Constify a char*.
* genrecog.c (print_code): Likewise.
* graph.c (start_fct, start_bb, node_data, draw_edge, end_fct,
end_bb): Add static prototype.
(draw_edge): Constify a char*.
(end_bb): Remove unused parameter.
* haifa-sched.c (fix_sched_param, safe_concat, print_exp
print_block_visualization): Constify a char*.
From-SVN: r28782
1999-08-21 00:32:54 +02:00
|
|
|
|
const char *s;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
|
|
|
|
int unit, i;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Print header. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
fprintf (dump, "\n;; ==================== scheduling visualization for block %d %s \n", b, s);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Print names of units. */
|
1997-10-07 18:53:16 +02:00
|
|
|
|
fprintf (dump, ";; %-8s", "clock");
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++)
|
|
|
|
|
if (function_units[unit].bitmask & target_units)
|
|
|
|
|
for (i = 0; i < function_units[unit].multiplicity; i++)
|
1997-10-07 18:53:16 +02:00
|
|
|
|
fprintf (dump, " %-33s", function_units[unit].name);
|
|
|
|
|
fprintf (dump, " %-8s\n", "no-unit");
|
|
|
|
|
|
|
|
|
|
fprintf (dump, ";; %-8s", "=====");
|
|
|
|
|
for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++)
|
|
|
|
|
if (function_units[unit].bitmask & target_units)
|
|
|
|
|
for (i = 0; i < function_units[unit].multiplicity; i++)
|
|
|
|
|
fprintf (dump, " %-33s", "==============================");
|
|
|
|
|
fprintf (dump, " %-8s\n", "=======");
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Print insns in each cycle. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
fprintf (dump, "%s\n", visual_tbl);
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Print insns in the 'no_unit' column of visualization. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
visualize_no_unit (insn)
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
vis_no_unit[n_vis_no_unit] = insn;
|
|
|
|
|
n_vis_no_unit++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Print insns scheduled in clock, for visualization. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
visualize_scheduled_insns (b, clock)
|
|
|
|
|
int b, clock;
|
|
|
|
|
{
|
|
|
|
|
int i, unit;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* If no more room, split table into two. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (n_visual_lines >= MAX_VISUAL_LINES)
|
|
|
|
|
{
|
|
|
|
|
print_block_visualization (b, "(incomplete)");
|
|
|
|
|
init_block_visualization ();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n_visual_lines++;
|
|
|
|
|
|
|
|
|
|
sprintf (visual_tbl + strlen (visual_tbl), ";; %-8d", clock);
|
|
|
|
|
for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++)
|
|
|
|
|
if (function_units[unit].bitmask & target_units)
|
|
|
|
|
for (i = 0; i < function_units[unit].multiplicity; i++)
|
|
|
|
|
{
|
|
|
|
|
int instance = unit + i * FUNCTION_UNITS_SIZE;
|
|
|
|
|
rtx insn = unit_last_insn[instance];
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Print insns that still keep the unit busy. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (insn &&
|
|
|
|
|
actual_hazard_this_instance (unit, instance, insn, clock, 0))
|
|
|
|
|
{
|
|
|
|
|
char str[BUF_LEN];
|
|
|
|
|
print_insn (str, insn, 0);
|
|
|
|
|
str[INSN_LEN] = '\0';
|
|
|
|
|
sprintf (visual_tbl + strlen (visual_tbl), " %-33s", str);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
sprintf (visual_tbl + strlen (visual_tbl), " %-33s", "------------------------------");
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Print insns that are not assigned to any unit. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (i = 0; i < n_vis_no_unit; i++)
|
|
|
|
|
sprintf (visual_tbl + strlen (visual_tbl), " %-8d",
|
|
|
|
|
INSN_UID (vis_no_unit[i]));
|
|
|
|
|
n_vis_no_unit = 0;
|
|
|
|
|
|
|
|
|
|
sprintf (visual_tbl + strlen (visual_tbl), "\n");
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Print stalled cycles. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
visualize_stall_cycles (b, stalls)
|
|
|
|
|
int b, stalls;
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* If no more room, split table into two. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (n_visual_lines >= MAX_VISUAL_LINES)
|
|
|
|
|
{
|
|
|
|
|
print_block_visualization (b, "(incomplete)");
|
|
|
|
|
init_block_visualization ();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n_visual_lines++;
|
|
|
|
|
|
|
|
|
|
sprintf (visual_tbl + strlen (visual_tbl), ";; ");
|
|
|
|
|
for (i = 0; i < stalls; i++)
|
|
|
|
|
sprintf (visual_tbl + strlen (visual_tbl), ".");
|
|
|
|
|
sprintf (visual_tbl + strlen (visual_tbl), "\n");
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* move_insn1: Remove INSN from insn chain, and link it after LAST insn. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static rtx
|
|
|
|
|
move_insn1 (insn, last)
|
|
|
|
|
rtx insn, last;
|
|
|
|
|
{
|
|
|
|
|
NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
|
|
|
|
|
PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
|
|
|
|
|
|
|
|
|
|
NEXT_INSN (insn) = NEXT_INSN (last);
|
|
|
|
|
PREV_INSN (NEXT_INSN (last)) = insn;
|
|
|
|
|
|
|
|
|
|
NEXT_INSN (last) = insn;
|
|
|
|
|
PREV_INSN (insn) = last;
|
|
|
|
|
|
|
|
|
|
return insn;
|
|
|
|
|
}
|
|
|
|
|
|
1999-10-11 01:45:27 +02:00
|
|
|
|
/* Search INSN for REG_SAVE_NOTE note pairs for NOTE_INSN_SETJMP,
|
1997-08-12 06:07:19 +02:00
|
|
|
|
NOTE_INSN_{LOOP,EHREGION}_{BEG,END}; and convert them back into
|
1999-10-11 01:45:27 +02:00
|
|
|
|
NOTEs. The REG_SAVE_NOTE note following first one is contains the
|
|
|
|
|
saved value for NOTE_BLOCK_NUMBER which is useful for
|
1997-08-12 06:07:19 +02:00
|
|
|
|
NOTE_INSN_EH_REGION_{BEG,END} NOTEs. LAST is the last instruction
|
|
|
|
|
output by the instruction scheduler. Return the new value of LAST. */
|
|
|
|
|
|
|
|
|
|
static rtx
|
|
|
|
|
reemit_notes (insn, last)
|
|
|
|
|
rtx insn;
|
|
|
|
|
rtx last;
|
|
|
|
|
{
|
|
|
|
|
rtx note, retval;
|
|
|
|
|
|
|
|
|
|
retval = last;
|
|
|
|
|
for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
|
|
|
|
|
{
|
1999-10-11 01:45:27 +02:00
|
|
|
|
if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-08-31 11:55:31 +02:00
|
|
|
|
int note_type = INTVAL (XEXP (note, 0));
|
|
|
|
|
if (note_type == NOTE_INSN_SETJMP)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1998-08-31 11:55:31 +02:00
|
|
|
|
retval = emit_note_after (NOTE_INSN_SETJMP, insn);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
CONST_CALL_P (retval) = CONST_CALL_P (note);
|
1999-09-16 23:00:21 +02:00
|
|
|
|
remove_note (insn, note);
|
|
|
|
|
note = XEXP (note, 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
1998-08-31 11:55:31 +02:00
|
|
|
|
else if (note_type == NOTE_INSN_RANGE_START
|
|
|
|
|
|| note_type == NOTE_INSN_RANGE_END)
|
|
|
|
|
{
|
|
|
|
|
last = emit_note_before (note_type, last);
|
|
|
|
|
remove_note (insn, note);
|
|
|
|
|
note = XEXP (note, 1);
|
|
|
|
|
NOTE_RANGE_INFO (last) = XEXP (note, 0);
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
else
|
|
|
|
|
{
|
1999-07-23 03:26:40 +02:00
|
|
|
|
last = emit_note_before (note_type, last);
|
1999-09-16 23:00:21 +02:00
|
|
|
|
remove_note (insn, note);
|
|
|
|
|
note = XEXP (note, 1);
|
1999-09-16 01:05:05 +02:00
|
|
|
|
if (note_type == NOTE_INSN_EH_REGION_BEG
|
|
|
|
|
|| note_type == NOTE_INSN_EH_REGION_END)
|
1999-09-16 23:00:21 +02:00
|
|
|
|
NOTE_EH_HANDLER (last) = INTVAL (XEXP (note, 0));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
remove_note (insn, note);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return retval;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Move INSN, and all insns which should be issued before it,
|
1997-09-02 06:12:45 +02:00
|
|
|
|
due to SCHED_GROUP_P flag. Reemit notes if needed.
|
|
|
|
|
|
|
|
|
|
Return the last insn emitted by the scheduler, which is the
|
|
|
|
|
return value from the first call to reemit_notes. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static rtx
|
|
|
|
|
move_insn (insn, last)
|
|
|
|
|
rtx insn, last;
|
|
|
|
|
{
|
1997-09-02 06:12:45 +02:00
|
|
|
|
rtx retval = NULL;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1997-09-02 06:12:45 +02:00
|
|
|
|
/* If INSN has SCHED_GROUP_P set, then issue it and any other
|
|
|
|
|
insns with SCHED_GROUP_P set first. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
while (SCHED_GROUP_P (insn))
|
|
|
|
|
{
|
|
|
|
|
rtx prev = PREV_INSN (insn);
|
1997-09-02 06:12:45 +02:00
|
|
|
|
|
|
|
|
|
/* Move a SCHED_GROUP_P insn. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
move_insn1 (insn, last);
|
1997-09-02 06:12:45 +02:00
|
|
|
|
/* If this is the first call to reemit_notes, then record
|
|
|
|
|
its return value. */
|
|
|
|
|
if (retval == NULL_RTX)
|
|
|
|
|
retval = reemit_notes (insn, insn);
|
|
|
|
|
else
|
|
|
|
|
reemit_notes (insn, insn);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
insn = prev;
|
|
|
|
|
}
|
|
|
|
|
|
1997-09-02 06:12:45 +02:00
|
|
|
|
/* Now move the first non SCHED_GROUP_P insn. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
move_insn1 (insn, last);
|
1997-09-02 06:12:45 +02:00
|
|
|
|
|
|
|
|
|
/* If this is the first call to reemit_notes, then record
|
|
|
|
|
its return value. */
|
|
|
|
|
if (retval == NULL_RTX)
|
|
|
|
|
retval = reemit_notes (insn, insn);
|
|
|
|
|
else
|
|
|
|
|
reemit_notes (insn, insn);
|
|
|
|
|
|
|
|
|
|
return retval;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Return an insn which represents a SCHED_GROUP, which is
|
|
|
|
|
the last insn in the group. */
|
|
|
|
|
|
|
|
|
|
static rtx
|
|
|
|
|
group_leader (insn)
|
|
|
|
|
rtx insn;
|
|
|
|
|
{
|
|
|
|
|
rtx prev;
|
|
|
|
|
|
|
|
|
|
do
|
|
|
|
|
{
|
|
|
|
|
prev = insn;
|
|
|
|
|
insn = next_nonnote_insn (insn);
|
|
|
|
|
}
|
|
|
|
|
while (insn && SCHED_GROUP_P (insn) && (GET_CODE (insn) != CODE_LABEL));
|
|
|
|
|
|
|
|
|
|
return prev;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Use forward list scheduling to rearrange insns of block BB in region RGN,
|
|
|
|
|
possibly bringing insns from subsequent blocks in the same region.
|
|
|
|
|
Return number of insns scheduled. */
|
|
|
|
|
|
|
|
|
|
static int
|
1998-02-17 22:35:43 +01:00
|
|
|
|
schedule_block (bb, rgn_n_insns)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int bb;
|
|
|
|
|
int rgn_n_insns;
|
|
|
|
|
{
|
|
|
|
|
/* Local variables. */
|
|
|
|
|
rtx insn, last;
|
|
|
|
|
rtx *ready;
|
|
|
|
|
int n_ready = 0;
|
|
|
|
|
int can_issue_more;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Flow block of this bb. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int b = BB_TO_BLOCK (bb);
|
|
|
|
|
|
|
|
|
|
/* target_n_insns == number of insns in b before scheduling starts.
|
|
|
|
|
sched_target_n_insns == how many of b's insns were scheduled.
|
1999-09-06 23:55:23 +02:00
|
|
|
|
sched_n_insns == how many insns were scheduled in b. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int target_n_insns = 0;
|
|
|
|
|
int sched_target_n_insns = 0;
|
|
|
|
|
int sched_n_insns = 0;
|
|
|
|
|
|
|
|
|
|
#define NEED_NOTHING 0
|
|
|
|
|
#define NEED_HEAD 1
|
|
|
|
|
#define NEED_TAIL 2
|
|
|
|
|
int new_needs;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Head/tail info for this block. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
rtx prev_head;
|
|
|
|
|
rtx next_tail;
|
|
|
|
|
rtx head;
|
|
|
|
|
rtx tail;
|
|
|
|
|
int bb_src;
|
|
|
|
|
|
1997-08-25 21:15:01 +02:00
|
|
|
|
/* We used to have code to avoid getting parameters moved from hard
|
|
|
|
|
argument registers into pseudos.
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1997-08-25 21:15:01 +02:00
|
|
|
|
However, it was removed when it proved to be of marginal benefit
|
|
|
|
|
and caused problems because schedule_block and compute_forward_dependences
|
|
|
|
|
had different notions of what the "head" insn was. */
|
|
|
|
|
get_block_head_tail (bb, &head, &tail);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1997-10-07 17:52:32 +02:00
|
|
|
|
/* Interblock scheduling could have moved the original head insn from this
|
|
|
|
|
block into a proceeding block. This may also cause schedule_block and
|
|
|
|
|
compute_forward_dependences to have different notions of what the
|
|
|
|
|
"head" insn was.
|
|
|
|
|
|
|
|
|
|
If the interblock movement happened to make this block start with
|
|
|
|
|
some notes (LOOP, EH or SETJMP) before the first real insn, then
|
|
|
|
|
HEAD will have various special notes attached to it which must be
|
|
|
|
|
removed so that we don't end up with extra copies of the notes. */
|
|
|
|
|
if (GET_RTX_CLASS (GET_CODE (head)) == 'i')
|
|
|
|
|
{
|
|
|
|
|
rtx note;
|
|
|
|
|
|
|
|
|
|
for (note = REG_NOTES (head); note; note = XEXP (note, 1))
|
1999-10-11 01:45:27 +02:00
|
|
|
|
if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
|
1997-10-07 17:52:32 +02:00
|
|
|
|
remove_note (head, note);
|
|
|
|
|
}
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
next_tail = NEXT_INSN (tail);
|
|
|
|
|
prev_head = PREV_INSN (head);
|
|
|
|
|
|
|
|
|
|
/* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
|
|
|
|
|
to schedule this block. */
|
|
|
|
|
if (head == tail
|
|
|
|
|
&& (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
|
|
|
|
|
return (sched_n_insns);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Debug info. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (sched_verbose)
|
|
|
|
|
{
|
|
|
|
|
fprintf (dump, ";; ======================================================\n");
|
|
|
|
|
fprintf (dump,
|
|
|
|
|
";; -- basic block %d from %d to %d -- %s reload\n",
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
b, INSN_UID (BLOCK_HEAD (b)), INSN_UID (BLOCK_END (b)),
|
1997-08-12 06:07:19 +02:00
|
|
|
|
(reload_completed ? "after" : "before"));
|
|
|
|
|
fprintf (dump, ";; ======================================================\n");
|
|
|
|
|
fprintf (dump, "\n");
|
|
|
|
|
|
|
|
|
|
visual_tbl = (char *) alloca (get_visual_tbl_length ());
|
|
|
|
|
init_block_visualization ();
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Remove remaining note insns from the block, save them in
|
1997-08-12 06:07:19 +02:00
|
|
|
|
note_list. These notes are restored at the end of
|
|
|
|
|
schedule_block (). */
|
|
|
|
|
note_list = 0;
|
|
|
|
|
rm_other_notes (head, tail);
|
|
|
|
|
|
|
|
|
|
target_bb = bb;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Prepare current target block info. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (current_nr_blocks > 1)
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
candidate_table = (candidate *) alloca (current_nr_blocks
|
|
|
|
|
* sizeof (candidate));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
bblst_last = 0;
|
|
|
|
|
/* ??? It is not clear why bblst_size is computed this way. The original
|
|
|
|
|
number was clearly too small as it resulted in compiler failures.
|
|
|
|
|
Multiplying by the original number by 2 (to account for update_bbs
|
|
|
|
|
members) seems to be a reasonable solution. */
|
|
|
|
|
/* ??? Or perhaps there is a bug somewhere else in this file? */
|
|
|
|
|
bblst_size = (current_nr_blocks - bb) * rgn_nr_edges * 2;
|
|
|
|
|
bblst_table = (int *) alloca (bblst_size * sizeof (int));
|
|
|
|
|
|
|
|
|
|
bitlst_table_last = 0;
|
|
|
|
|
bitlst_table_size = rgn_nr_edges;
|
|
|
|
|
bitlst_table = (int *) alloca (rgn_nr_edges * sizeof (int));
|
|
|
|
|
|
|
|
|
|
compute_trg_info (bb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
clear_units ();
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Allocate the ready list. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
ready = (rtx *) alloca ((rgn_n_insns + 1) * sizeof (rtx));
|
|
|
|
|
|
|
|
|
|
/* Print debugging information. */
|
|
|
|
|
if (sched_verbose >= 5)
|
|
|
|
|
debug_dependencies ();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Initialize ready list with all 'ready' insns in target block.
|
|
|
|
|
Count number of insns in the target block being scheduled. */
|
|
|
|
|
n_ready = 0;
|
|
|
|
|
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
|
|
|
|
|
{
|
|
|
|
|
rtx next;
|
|
|
|
|
|
|
|
|
|
if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
|
|
|
|
|
continue;
|
|
|
|
|
next = NEXT_INSN (insn);
|
|
|
|
|
|
|
|
|
|
if (INSN_DEP_COUNT (insn) == 0
|
|
|
|
|
&& (SCHED_GROUP_P (next) == 0 || GET_RTX_CLASS (GET_CODE (next)) != 'i'))
|
|
|
|
|
ready[n_ready++] = insn;
|
|
|
|
|
if (!(SCHED_GROUP_P (insn)))
|
|
|
|
|
target_n_insns++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Add to ready list all 'ready' insns in valid source blocks.
|
|
|
|
|
For speculative insns, check-live, exception-free, and
|
|
|
|
|
issue-delay. */
|
|
|
|
|
for (bb_src = bb + 1; bb_src < current_nr_blocks; bb_src++)
|
|
|
|
|
if (IS_VALID (bb_src))
|
|
|
|
|
{
|
|
|
|
|
rtx src_head;
|
|
|
|
|
rtx src_next_tail;
|
|
|
|
|
rtx tail, head;
|
|
|
|
|
|
|
|
|
|
get_block_head_tail (bb_src, &head, &tail);
|
|
|
|
|
src_next_tail = NEXT_INSN (tail);
|
|
|
|
|
src_head = head;
|
|
|
|
|
|
|
|
|
|
if (head == tail
|
|
|
|
|
&& (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
|
|
|
|
|
{
|
|
|
|
|
if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (!CANT_MOVE (insn)
|
|
|
|
|
&& (!IS_SPECULATIVE_INSN (insn)
|
|
|
|
|
|| (insn_issue_delay (insn) <= 3
|
1998-02-17 22:35:43 +01:00
|
|
|
|
&& check_live (insn, bb_src)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
&& is_exception_free (insn, bb_src, target_bb))))
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
rtx next;
|
|
|
|
|
|
1999-09-05 06:53:04 +02:00
|
|
|
|
/* Note that we havn't squirrled away the notes for
|
|
|
|
|
blocks other than the current. So if this is a
|
|
|
|
|
speculative insn, NEXT might otherwise be a note. */
|
|
|
|
|
next = next_nonnote_insn (insn);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (INSN_DEP_COUNT (insn) == 0
|
|
|
|
|
&& (SCHED_GROUP_P (next) == 0
|
|
|
|
|
|| GET_RTX_CLASS (GET_CODE (next)) != 'i'))
|
|
|
|
|
ready[n_ready++] = insn;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1998-06-26 15:09:01 +02:00
|
|
|
|
#ifdef MD_SCHED_INIT
|
|
|
|
|
MD_SCHED_INIT (dump, sched_verbose);
|
|
|
|
|
#endif
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* No insns scheduled in this block yet. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
last_scheduled_insn = 0;
|
|
|
|
|
|
|
|
|
|
/* Q_SIZE is the total number of insns in the queue. */
|
|
|
|
|
q_ptr = 0;
|
|
|
|
|
q_size = 0;
|
1998-08-26 20:47:42 +02:00
|
|
|
|
last_clock_var = 0;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
bzero ((char *) insn_queue, sizeof (insn_queue));
|
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
/* Start just before the beginning of time. */
|
|
|
|
|
clock_var = -1;
|
|
|
|
|
|
1997-08-12 06:07:19 +02:00
|
|
|
|
/* We start inserting insns after PREV_HEAD. */
|
|
|
|
|
last = prev_head;
|
|
|
|
|
|
|
|
|
|
/* Initialize INSN_QUEUE, LIST and NEW_NEEDS. */
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
new_needs = (NEXT_INSN (prev_head) == BLOCK_HEAD (b)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
? NEED_HEAD : NEED_NOTHING);
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
if (PREV_INSN (next_tail) == BLOCK_END (b))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
new_needs |= NEED_TAIL;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Loop until all the insns in BB are scheduled. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
while (sched_target_n_insns < target_n_insns)
|
|
|
|
|
{
|
|
|
|
|
clock_var++;
|
|
|
|
|
|
|
|
|
|
/* Add to the ready list all pending insns that can be issued now.
|
|
|
|
|
If there are no ready insns, increment clock until one
|
|
|
|
|
is ready and add all pending insns at that point to the ready
|
|
|
|
|
list. */
|
|
|
|
|
n_ready = queue_to_ready (ready, n_ready);
|
|
|
|
|
|
|
|
|
|
if (n_ready == 0)
|
|
|
|
|
abort ();
|
|
|
|
|
|
|
|
|
|
if (sched_verbose >= 2)
|
|
|
|
|
{
|
|
|
|
|
fprintf (dump, ";;\t\tReady list after queue_to_ready: ");
|
|
|
|
|
debug_ready_list (ready, n_ready);
|
|
|
|
|
}
|
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
/* Sort the ready list based on priority. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
SCHED_SORT (ready, n_ready);
|
1999-07-21 03:15:47 +02:00
|
|
|
|
|
|
|
|
|
/* Allow the target to reorder the list, typically for
|
|
|
|
|
better instruction bundling. */
|
1998-06-26 15:09:01 +02:00
|
|
|
|
#ifdef MD_SCHED_REORDER
|
1999-07-21 03:15:47 +02:00
|
|
|
|
MD_SCHED_REORDER (dump, sched_verbose, ready, n_ready, clock_var,
|
|
|
|
|
can_issue_more);
|
|
|
|
|
#else
|
|
|
|
|
can_issue_more = issue_rate;
|
1998-06-26 15:09:01 +02:00
|
|
|
|
#endif
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
if (sched_verbose)
|
|
|
|
|
{
|
1998-05-13 17:32:22 +02:00
|
|
|
|
fprintf (dump, "\n;;\tReady list (t =%3d): ", clock_var);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
debug_ready_list (ready, n_ready);
|
|
|
|
|
}
|
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
/* Issue insns from ready list. */
|
|
|
|
|
while (n_ready != 0 && can_issue_more)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1999-07-21 03:15:47 +02:00
|
|
|
|
/* Select and remove the insn from the ready list. */
|
|
|
|
|
rtx insn = ready[--n_ready];
|
1997-08-12 06:07:19 +02:00
|
|
|
|
int cost = actual_hazard (insn_unit (insn), insn, clock_var, 0);
|
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
if (cost >= 1)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
|
|
|
|
queue_insn (insn, cost);
|
1999-07-21 03:15:47 +02:00
|
|
|
|
continue;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
1997-08-19 20:02:21 +02:00
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
/* An interblock motion? */
|
|
|
|
|
if (INSN_BB (insn) != target_bb)
|
|
|
|
|
{
|
|
|
|
|
rtx temp;
|
1999-10-19 00:20:27 +02:00
|
|
|
|
basic_block b1;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
if (IS_SPECULATIVE_INSN (insn))
|
|
|
|
|
{
|
|
|
|
|
if (!check_live (insn, INSN_BB (insn)))
|
|
|
|
|
continue;
|
|
|
|
|
update_live (insn, INSN_BB (insn));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
/* For speculative load, mark insns fed by it. */
|
|
|
|
|
if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
|
|
|
|
|
set_spec_fed (insn);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
nr_spec++;
|
|
|
|
|
}
|
|
|
|
|
nr_inter++;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-10-19 00:20:27 +02:00
|
|
|
|
/* Find the beginning of the scheduling group; update the
|
|
|
|
|
containing block number for the insns. */
|
1999-07-21 03:15:47 +02:00
|
|
|
|
temp = insn;
|
1999-10-19 00:20:27 +02:00
|
|
|
|
set_block_num (temp, target_bb);
|
|
|
|
|
while (SCHED_GROUP_P (insn))
|
|
|
|
|
{
|
|
|
|
|
temp = PREV_INSN (temp);
|
|
|
|
|
set_block_num (temp, target_bb);
|
|
|
|
|
}
|
1997-08-19 20:02:21 +02:00
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
/* Update source block boundaries. */
|
1999-10-19 00:20:27 +02:00
|
|
|
|
b1 = BLOCK_FOR_INSN (temp);
|
|
|
|
|
if (temp == b1->head && insn == b1->end)
|
1999-07-21 03:15:47 +02:00
|
|
|
|
{
|
|
|
|
|
/* We moved all the insns in the basic block.
|
|
|
|
|
Emit a note after the last insn and update the
|
|
|
|
|
begin/end boundaries to point to the note. */
|
1999-10-19 00:20:27 +02:00
|
|
|
|
rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
|
|
|
|
|
b1->head = note;
|
|
|
|
|
b1->end = note;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
1999-10-19 00:20:27 +02:00
|
|
|
|
else if (insn == b1->end)
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1999-07-21 03:15:47 +02:00
|
|
|
|
/* We took insns from the end of the basic block,
|
|
|
|
|
so update the end of block boundary so that it
|
|
|
|
|
points to the first insn we did not move. */
|
1999-10-19 00:20:27 +02:00
|
|
|
|
b1->end = PREV_INSN (temp);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
1999-10-19 00:20:27 +02:00
|
|
|
|
else if (temp == b1->head)
|
1999-07-21 03:15:47 +02:00
|
|
|
|
{
|
|
|
|
|
/* We took insns from the start of the basic block,
|
|
|
|
|
so update the start of block boundary so that
|
|
|
|
|
it points to the first insn we did not move. */
|
1999-10-19 00:20:27 +02:00
|
|
|
|
b1->head = NEXT_INSN (insn);
|
1999-07-21 03:15:47 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/* In block motion. */
|
|
|
|
|
sched_target_n_insns++;
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
last_scheduled_insn = insn;
|
|
|
|
|
last = move_insn (insn, last);
|
|
|
|
|
sched_n_insns++;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-06-26 15:09:01 +02:00
|
|
|
|
#ifdef MD_SCHED_VARIABLE_ISSUE
|
1999-07-21 03:15:47 +02:00
|
|
|
|
MD_SCHED_VARIABLE_ISSUE (dump, sched_verbose, insn,
|
|
|
|
|
can_issue_more);
|
1998-06-26 15:09:01 +02:00
|
|
|
|
#else
|
1999-07-21 03:15:47 +02:00
|
|
|
|
can_issue_more--;
|
1998-06-26 15:09:01 +02:00
|
|
|
|
#endif
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
n_ready = schedule_insn (insn, ready, n_ready, clock_var);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
/* Close this block after scheduling its jump. */
|
|
|
|
|
if (GET_CODE (last_scheduled_insn) == JUMP_INSN)
|
|
|
|
|
break;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-07-21 03:15:47 +02:00
|
|
|
|
/* Debug info. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (sched_verbose)
|
1999-07-21 03:15:47 +02:00
|
|
|
|
visualize_scheduled_insns (b, clock_var);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Debug info. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (sched_verbose)
|
|
|
|
|
{
|
|
|
|
|
fprintf (dump, ";;\tReady list (final): ");
|
|
|
|
|
debug_ready_list (ready, n_ready);
|
|
|
|
|
print_block_visualization (b, "");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Sanity check -- queue must be empty now. Meaningless if region has
|
1998-05-06 18:32:40 +02:00
|
|
|
|
multiple bbs. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (current_nr_blocks > 1)
|
1998-05-06 18:32:40 +02:00
|
|
|
|
if (!flag_schedule_interblock && q_size != 0)
|
|
|
|
|
abort ();
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Update head/tail boundaries. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
head = NEXT_INSN (prev_head);
|
|
|
|
|
tail = last;
|
|
|
|
|
|
|
|
|
|
/* Restore-other-notes: NOTE_LIST is the end of a chain of notes
|
|
|
|
|
previously found among the insns. Insert them at the beginning
|
|
|
|
|
of the insns. */
|
|
|
|
|
if (note_list != 0)
|
|
|
|
|
{
|
|
|
|
|
rtx note_head = note_list;
|
|
|
|
|
|
|
|
|
|
while (PREV_INSN (note_head))
|
|
|
|
|
{
|
|
|
|
|
note_head = PREV_INSN (note_head);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
PREV_INSN (note_head) = PREV_INSN (head);
|
|
|
|
|
NEXT_INSN (PREV_INSN (head)) = note_head;
|
|
|
|
|
PREV_INSN (head) = note_list;
|
|
|
|
|
NEXT_INSN (note_list) = head;
|
|
|
|
|
head = note_head;
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Update target block boundaries. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (new_needs & NEED_HEAD)
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
BLOCK_HEAD (b) = head;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
if (new_needs & NEED_TAIL)
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
BLOCK_END (b) = tail;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Debugging. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (sched_verbose)
|
|
|
|
|
{
|
|
|
|
|
fprintf (dump, ";; total time = %d\n;; new basic block head = %d\n",
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
clock_var, INSN_UID (BLOCK_HEAD (b)));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
fprintf (dump, ";; new basic block end = %d\n\n",
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
INSN_UID (BLOCK_END (b)));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return (sched_n_insns);
|
|
|
|
|
} /* schedule_block () */
|
|
|
|
|
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Print the bit-set of registers, S, callable from debugger. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
extern void
|
|
|
|
|
debug_reg_vector (s)
|
|
|
|
|
regset s;
|
|
|
|
|
{
|
|
|
|
|
int regno;
|
|
|
|
|
|
|
|
|
|
EXECUTE_IF_SET_IN_REG_SET (s, 0, regno,
|
|
|
|
|
{
|
|
|
|
|
fprintf (dump, " %d", regno);
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
fprintf (dump, "\n");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Use the backward dependences from LOG_LINKS to build
|
|
|
|
|
forward dependences in INSN_DEPEND. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
compute_block_forward_dependences (bb)
|
|
|
|
|
int bb;
|
|
|
|
|
{
|
|
|
|
|
rtx insn, link;
|
|
|
|
|
rtx tail, head;
|
|
|
|
|
rtx next_tail;
|
|
|
|
|
enum reg_note dep_type;
|
|
|
|
|
|
|
|
|
|
get_block_head_tail (bb, &head, &tail);
|
|
|
|
|
next_tail = NEXT_INSN (tail);
|
|
|
|
|
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
|
|
|
|
|
{
|
|
|
|
|
if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
insn = group_leader (insn);
|
|
|
|
|
|
|
|
|
|
for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
|
|
|
|
|
{
|
|
|
|
|
rtx x = group_leader (XEXP (link, 0));
|
|
|
|
|
rtx new_link;
|
|
|
|
|
|
|
|
|
|
if (x != XEXP (link, 0))
|
|
|
|
|
continue;
|
|
|
|
|
|
1999-10-16 10:54:07 +02:00
|
|
|
|
#ifdef ENABLE_CHECKING
|
|
|
|
|
/* If add_dependence is working properly there should never
|
|
|
|
|
be notes, deleted insns or duplicates in the backward
|
|
|
|
|
links. Thus we need not check for them here.
|
|
|
|
|
|
|
|
|
|
However, if we have enabled checking we might as well go
|
|
|
|
|
ahead and verify that add_dependence worked properly. */
|
|
|
|
|
if (GET_CODE (x) == NOTE
|
|
|
|
|
|| INSN_DELETED_P (x)
|
|
|
|
|
|| find_insn_list (insn, INSN_DEPEND (x)))
|
|
|
|
|
abort ();
|
|
|
|
|
#endif
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-03-05 03:15:23 +01:00
|
|
|
|
new_link = alloc_INSN_LIST (insn, INSN_DEPEND (x));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
dep_type = REG_NOTE_KIND (link);
|
|
|
|
|
PUT_REG_NOTE_KIND (new_link, dep_type);
|
|
|
|
|
|
|
|
|
|
INSN_DEPEND (x) = new_link;
|
|
|
|
|
INSN_DEP_COUNT (insn) += 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Initialize variables for region data dependence analysis.
|
1999-09-06 23:55:23 +02:00
|
|
|
|
n_bbs is the number of region blocks. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-05-13 14:55:38 +02:00
|
|
|
|
__inline static void
|
1997-08-12 06:07:19 +02:00
|
|
|
|
init_rgn_data_dependences (n_bbs)
|
|
|
|
|
int n_bbs;
|
|
|
|
|
{
|
|
|
|
|
int bb;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Variables for which one copy exists for each block. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
bzero ((char *) bb_pending_read_insns, n_bbs * sizeof (rtx));
|
|
|
|
|
bzero ((char *) bb_pending_read_mems, n_bbs * sizeof (rtx));
|
|
|
|
|
bzero ((char *) bb_pending_write_insns, n_bbs * sizeof (rtx));
|
|
|
|
|
bzero ((char *) bb_pending_write_mems, n_bbs * sizeof (rtx));
|
|
|
|
|
bzero ((char *) bb_pending_lists_length, n_bbs * sizeof (rtx));
|
|
|
|
|
bzero ((char *) bb_last_pending_memory_flush, n_bbs * sizeof (rtx));
|
|
|
|
|
bzero ((char *) bb_last_function_call, n_bbs * sizeof (rtx));
|
|
|
|
|
bzero ((char *) bb_sched_before_next_call, n_bbs * sizeof (rtx));
|
|
|
|
|
|
|
|
|
|
/* Create an insn here so that we can hang dependencies off of it later. */
|
|
|
|
|
for (bb = 0; bb < n_bbs; bb++)
|
|
|
|
|
{
|
|
|
|
|
bb_sched_before_next_call[bb] =
|
alias.c: Change all uses of gen_rtx(FOO...) to gen_rtx_FOO...
* alias.c: Change all uses of gen_rtx(FOO...) to gen_rtx_FOO;
change gen_rtx(expr...) to gen_rtx_fmt_foo(expr...).
* caller-save.c, calls.c, combine.c, cse.c: Likewise.
* dwarf2out.c, except.c, explow.c, expmed.c, expr.c: Likewise.
* final.c, flow.c, function.c, genpeep.c, haifa-sched.c: Likewise.
* halfpic.c, integrate.c, jump.c, local-alloc.c, loop.c: Likewise.
* profile.c, recog.c, reg-stack.c, regclass.c, regmove.c: Likewise.
* reload.c, reload1.c, reorg.c, sched.c, stmt.c, stupid.c: Likewise.
* unroll.c, varasm.c: Likewise.
* config/alpha/alpha.c, config/alpha/alpha.md: Likewise.
From-SVN: r17357
1998-01-15 00:10:50 +01:00
|
|
|
|
gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
|
|
|
|
|
NULL_RTX, 0, NULL_RTX, NULL_RTX);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
LOG_LINKS (bb_sched_before_next_call[bb]) = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Add dependences so that branches are scheduled to run last in their
|
|
|
|
|
block. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
add_branch_dependences (head, tail)
|
|
|
|
|
rtx head, tail;
|
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
rtx insn, last;
|
|
|
|
|
|
|
|
|
|
/* For all branches, calls, uses, and cc0 setters, force them to remain
|
|
|
|
|
in order at the end of the block by adding dependencies and giving
|
|
|
|
|
the last a high priority. There may be notes present, and prev_head
|
|
|
|
|
may also be a note.
|
|
|
|
|
|
|
|
|
|
Branches must obviously remain at the end. Calls should remain at the
|
|
|
|
|
end since moving them results in worse register allocation. Uses remain
|
|
|
|
|
at the end to ensure proper register allocation. cc0 setters remaim
|
|
|
|
|
at the end because they can't be moved away from their cc0 user. */
|
|
|
|
|
insn = tail;
|
|
|
|
|
last = 0;
|
|
|
|
|
while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
|
|
|
|
|
|| (GET_CODE (insn) == INSN
|
|
|
|
|
&& (GET_CODE (PATTERN (insn)) == USE
|
|
|
|
|
#ifdef HAVE_cc0
|
|
|
|
|
|| sets_cc0_p (PATTERN (insn))
|
|
|
|
|
#endif
|
|
|
|
|
))
|
|
|
|
|
|| GET_CODE (insn) == NOTE)
|
|
|
|
|
{
|
|
|
|
|
if (GET_CODE (insn) != NOTE)
|
|
|
|
|
{
|
|
|
|
|
if (last != 0
|
|
|
|
|
&& !find_insn_list (insn, LOG_LINKS (last)))
|
|
|
|
|
{
|
|
|
|
|
add_dependence (last, insn, REG_DEP_ANTI);
|
|
|
|
|
INSN_REF_COUNT (insn)++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
CANT_MOVE (insn) = 1;
|
|
|
|
|
|
|
|
|
|
last = insn;
|
1997-09-13 21:00:22 +02:00
|
|
|
|
/* Skip over insns that are part of a group.
|
|
|
|
|
Make each insn explicitly depend on the previous insn.
|
|
|
|
|
This ensures that only the group header will ever enter
|
|
|
|
|
the ready queue (and, when scheduled, will automatically
|
|
|
|
|
schedule the SCHED_GROUP_P block). */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
while (SCHED_GROUP_P (insn))
|
1997-09-13 21:00:22 +02:00
|
|
|
|
{
|
|
|
|
|
rtx temp = prev_nonnote_insn (insn);
|
|
|
|
|
add_dependence (insn, temp, REG_DEP_ANTI);
|
|
|
|
|
insn = temp;
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Don't overrun the bounds of the basic block. */
|
|
|
|
|
if (insn == head)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
insn = PREV_INSN (insn);
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Make sure these insns are scheduled last in their block. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
insn = last;
|
|
|
|
|
if (insn != 0)
|
|
|
|
|
while (insn != head)
|
|
|
|
|
{
|
|
|
|
|
insn = prev_nonnote_insn (insn);
|
|
|
|
|
|
|
|
|
|
if (INSN_REF_COUNT (insn) != 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
1999-08-29 01:20:34 +02:00
|
|
|
|
add_dependence (last, insn, REG_DEP_ANTI);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
INSN_REF_COUNT (insn) = 1;
|
|
|
|
|
|
|
|
|
|
/* Skip over insns that are part of a group. */
|
|
|
|
|
while (SCHED_GROUP_P (insn))
|
|
|
|
|
insn = prev_nonnote_insn (insn);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute backward dependences inside bb. In a multiple blocks region:
|
1997-08-12 06:07:19 +02:00
|
|
|
|
(1) a bb is analyzed after its predecessors, and (2) the lists in
|
|
|
|
|
effect at the end of bb (after analyzing for bb) are inherited by
|
|
|
|
|
bb's successrs.
|
|
|
|
|
|
|
|
|
|
Specifically for reg-reg data dependences, the block insns are
|
|
|
|
|
scanned by sched_analyze () top-to-bottom. Two lists are
|
1999-09-06 23:55:23 +02:00
|
|
|
|
maintained by sched_analyze (): reg_last_sets[] for register DEFs,
|
1997-08-12 06:07:19 +02:00
|
|
|
|
and reg_last_uses[] for register USEs.
|
|
|
|
|
|
|
|
|
|
When analysis is completed for bb, we update for its successors:
|
|
|
|
|
; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
|
|
|
|
|
; - USES[succ] = Union (USES [succ], DEFS [bb])
|
|
|
|
|
|
|
|
|
|
The mechanism for computing mem-mem data dependence is very
|
|
|
|
|
similar, and the result is interblock dependences in the region. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
compute_block_backward_dependences (bb)
|
|
|
|
|
int bb;
|
|
|
|
|
{
|
|
|
|
|
int b;
|
|
|
|
|
rtx x;
|
|
|
|
|
rtx head, tail;
|
|
|
|
|
int max_reg = max_reg_num ();
|
|
|
|
|
|
|
|
|
|
b = BB_TO_BLOCK (bb);
|
|
|
|
|
|
|
|
|
|
if (current_nr_blocks == 1)
|
|
|
|
|
{
|
|
|
|
|
reg_last_uses = (rtx *) alloca (max_reg * sizeof (rtx));
|
|
|
|
|
reg_last_sets = (rtx *) alloca (max_reg * sizeof (rtx));
|
1999-03-07 12:22:10 +01:00
|
|
|
|
reg_last_clobbers = (rtx *) alloca (max_reg * sizeof (rtx));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
bzero ((char *) reg_last_uses, max_reg * sizeof (rtx));
|
|
|
|
|
bzero ((char *) reg_last_sets, max_reg * sizeof (rtx));
|
1999-03-07 12:22:10 +01:00
|
|
|
|
bzero ((char *) reg_last_clobbers, max_reg * sizeof (rtx));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
pending_read_insns = 0;
|
|
|
|
|
pending_read_mems = 0;
|
|
|
|
|
pending_write_insns = 0;
|
|
|
|
|
pending_write_mems = 0;
|
|
|
|
|
pending_lists_length = 0;
|
|
|
|
|
last_function_call = 0;
|
|
|
|
|
last_pending_memory_flush = 0;
|
|
|
|
|
sched_before_next_call
|
alias.c: Change all uses of gen_rtx(FOO...) to gen_rtx_FOO...
* alias.c: Change all uses of gen_rtx(FOO...) to gen_rtx_FOO;
change gen_rtx(expr...) to gen_rtx_fmt_foo(expr...).
* caller-save.c, calls.c, combine.c, cse.c: Likewise.
* dwarf2out.c, except.c, explow.c, expmed.c, expr.c: Likewise.
* final.c, flow.c, function.c, genpeep.c, haifa-sched.c: Likewise.
* halfpic.c, integrate.c, jump.c, local-alloc.c, loop.c: Likewise.
* profile.c, recog.c, reg-stack.c, regclass.c, regmove.c: Likewise.
* reload.c, reload1.c, reorg.c, sched.c, stmt.c, stupid.c: Likewise.
* unroll.c, varasm.c: Likewise.
* config/alpha/alpha.c, config/alpha/alpha.md: Likewise.
From-SVN: r17357
1998-01-15 00:10:50 +01:00
|
|
|
|
= gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
|
|
|
|
|
NULL_RTX, 0, NULL_RTX, NULL_RTX);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
LOG_LINKS (sched_before_next_call) = 0;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
reg_last_uses = bb_reg_last_uses[bb];
|
|
|
|
|
reg_last_sets = bb_reg_last_sets[bb];
|
1999-03-07 12:22:10 +01:00
|
|
|
|
reg_last_clobbers = bb_reg_last_clobbers[bb];
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
pending_read_insns = bb_pending_read_insns[bb];
|
|
|
|
|
pending_read_mems = bb_pending_read_mems[bb];
|
|
|
|
|
pending_write_insns = bb_pending_write_insns[bb];
|
|
|
|
|
pending_write_mems = bb_pending_write_mems[bb];
|
|
|
|
|
pending_lists_length = bb_pending_lists_length[bb];
|
|
|
|
|
last_function_call = bb_last_function_call[bb];
|
|
|
|
|
last_pending_memory_flush = bb_last_pending_memory_flush[bb];
|
|
|
|
|
|
|
|
|
|
sched_before_next_call = bb_sched_before_next_call[bb];
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Do the analysis for this block. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
get_block_head_tail (bb, &head, &tail);
|
|
|
|
|
sched_analyze (head, tail);
|
|
|
|
|
add_branch_dependences (head, tail);
|
|
|
|
|
|
|
|
|
|
if (current_nr_blocks > 1)
|
|
|
|
|
{
|
|
|
|
|
int e, first_edge;
|
|
|
|
|
int b_succ, bb_succ;
|
|
|
|
|
int reg;
|
|
|
|
|
rtx link_insn, link_mem;
|
|
|
|
|
rtx u;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* These lists should point to the right place, for correct
|
|
|
|
|
freeing later. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
bb_pending_read_insns[bb] = pending_read_insns;
|
|
|
|
|
bb_pending_read_mems[bb] = pending_read_mems;
|
|
|
|
|
bb_pending_write_insns[bb] = pending_write_insns;
|
|
|
|
|
bb_pending_write_mems[bb] = pending_write_mems;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* bb's structures are inherited by it's successors. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
first_edge = e = OUT_EDGES (b);
|
|
|
|
|
if (e > 0)
|
|
|
|
|
do
|
|
|
|
|
{
|
|
|
|
|
b_succ = TO_BLOCK (e);
|
|
|
|
|
bb_succ = BLOCK_TO_BB (b_succ);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Only bbs "below" bb, in the same region, are interesting. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
|
|
|
|
|
|| bb_succ <= bb)
|
|
|
|
|
{
|
|
|
|
|
e = NEXT_OUT (e);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (reg = 0; reg < max_reg; reg++)
|
|
|
|
|
{
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* reg-last-uses lists are inherited by bb_succ. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (u = reg_last_uses[reg]; u; u = XEXP (u, 1))
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
if (find_insn_list (XEXP (u, 0),
|
|
|
|
|
(bb_reg_last_uses[bb_succ])[reg]))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
(bb_reg_last_uses[bb_succ])[reg]
|
1998-03-05 03:15:23 +01:00
|
|
|
|
= alloc_INSN_LIST (XEXP (u, 0),
|
|
|
|
|
(bb_reg_last_uses[bb_succ])[reg]);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* reg-last-defs lists are inherited by bb_succ. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (u = reg_last_sets[reg]; u; u = XEXP (u, 1))
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
if (find_insn_list (XEXP (u, 0),
|
|
|
|
|
(bb_reg_last_sets[bb_succ])[reg]))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
(bb_reg_last_sets[bb_succ])[reg]
|
1998-03-05 03:15:23 +01:00
|
|
|
|
= alloc_INSN_LIST (XEXP (u, 0),
|
|
|
|
|
(bb_reg_last_sets[bb_succ])[reg]);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
1999-03-07 12:22:10 +01:00
|
|
|
|
|
|
|
|
|
for (u = reg_last_clobbers[reg]; u; u = XEXP (u, 1))
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
if (find_insn_list (XEXP (u, 0),
|
|
|
|
|
(bb_reg_last_clobbers[bb_succ])[reg]))
|
1999-03-07 12:22:10 +01:00
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
(bb_reg_last_clobbers[bb_succ])[reg]
|
|
|
|
|
= alloc_INSN_LIST (XEXP (u, 0),
|
|
|
|
|
(bb_reg_last_clobbers[bb_succ])[reg]);
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Mem read/write lists are inherited by bb_succ. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
link_insn = pending_read_insns;
|
|
|
|
|
link_mem = pending_read_mems;
|
|
|
|
|
while (link_insn)
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
if (!(find_insn_mem_list (XEXP (link_insn, 0),
|
|
|
|
|
XEXP (link_mem, 0),
|
1997-08-12 06:07:19 +02:00
|
|
|
|
bb_pending_read_insns[bb_succ],
|
|
|
|
|
bb_pending_read_mems[bb_succ])))
|
|
|
|
|
add_insn_mem_dependence (&bb_pending_read_insns[bb_succ],
|
|
|
|
|
&bb_pending_read_mems[bb_succ],
|
|
|
|
|
XEXP (link_insn, 0), XEXP (link_mem, 0));
|
|
|
|
|
link_insn = XEXP (link_insn, 1);
|
|
|
|
|
link_mem = XEXP (link_mem, 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
link_insn = pending_write_insns;
|
|
|
|
|
link_mem = pending_write_mems;
|
|
|
|
|
while (link_insn)
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
if (!(find_insn_mem_list (XEXP (link_insn, 0),
|
|
|
|
|
XEXP (link_mem, 0),
|
1997-08-12 06:07:19 +02:00
|
|
|
|
bb_pending_write_insns[bb_succ],
|
|
|
|
|
bb_pending_write_mems[bb_succ])))
|
|
|
|
|
add_insn_mem_dependence (&bb_pending_write_insns[bb_succ],
|
|
|
|
|
&bb_pending_write_mems[bb_succ],
|
|
|
|
|
XEXP (link_insn, 0), XEXP (link_mem, 0));
|
|
|
|
|
|
|
|
|
|
link_insn = XEXP (link_insn, 1);
|
|
|
|
|
link_mem = XEXP (link_mem, 1);
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* last_function_call is inherited by bb_succ. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (u = last_function_call; u; u = XEXP (u, 1))
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
if (find_insn_list (XEXP (u, 0),
|
|
|
|
|
bb_last_function_call[bb_succ]))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
bb_last_function_call[bb_succ]
|
1998-03-05 03:15:23 +01:00
|
|
|
|
= alloc_INSN_LIST (XEXP (u, 0),
|
|
|
|
|
bb_last_function_call[bb_succ]);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* last_pending_memory_flush is inherited by bb_succ. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
if (find_insn_list (XEXP (u, 0),
|
|
|
|
|
bb_last_pending_memory_flush[bb_succ]))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
bb_last_pending_memory_flush[bb_succ]
|
1998-03-05 03:15:23 +01:00
|
|
|
|
= alloc_INSN_LIST (XEXP (u, 0),
|
|
|
|
|
bb_last_pending_memory_flush[bb_succ]);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* sched_before_next_call is inherited by bb_succ. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
x = LOG_LINKS (sched_before_next_call);
|
|
|
|
|
for (; x; x = XEXP (x, 1))
|
|
|
|
|
add_dependence (bb_sched_before_next_call[bb_succ],
|
|
|
|
|
XEXP (x, 0), REG_DEP_ANTI);
|
|
|
|
|
|
|
|
|
|
e = NEXT_OUT (e);
|
|
|
|
|
}
|
|
|
|
|
while (e != first_edge);
|
|
|
|
|
}
|
1998-03-05 03:15:23 +01:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Free up the INSN_LISTs.
|
1998-03-25 23:58:40 +01:00
|
|
|
|
|
|
|
|
|
Note this loop is executed max_reg * nr_regions times. It's first
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
implementation accounted for over 90% of the calls to free_INSN_LIST_list.
|
|
|
|
|
The list was empty for the vast majority of those calls. On the PA, not
|
|
|
|
|
calling free_INSN_LIST_list in those cases improves -O2 compile times by
|
1998-03-25 23:58:40 +01:00
|
|
|
|
3-5% on average. */
|
1998-03-05 03:15:23 +01:00
|
|
|
|
for (b = 0; b < max_reg; ++b)
|
|
|
|
|
{
|
1999-03-07 12:22:10 +01:00
|
|
|
|
if (reg_last_clobbers[b])
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (®_last_clobbers[b]);
|
1998-03-25 23:58:40 +01:00
|
|
|
|
if (reg_last_sets[b])
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (®_last_sets[b]);
|
1998-03-25 23:58:40 +01:00
|
|
|
|
if (reg_last_uses[b])
|
lists.c (unused_insn_list, [...]): New file for maintaining various types of lists.
Wed Aug 25 13:41:47 EDT 1999 Andrew MacLeod <amacleod@cygnus.com>
* lists.c (unused_insn_list, unused_expr_list): New file for
maintaining various types of lists. New statics for maintaining a
cache of available INSN_LIST and EXPR_LIST nodes.
(free_list): Static function for freeing a list of INSN/EXPR nodes.
(alloc_INSN_LIST): Function to get a free INSN_LIST node.
(alloc_EXPR_LIST): Function to get a free EXPR_LIST node.
(init_EXPR_INSN_LIST_cache): Initialize the cache lists.
(free_EXPR_LIST_list): Free an entire list of EXPR_LIST nodes.
(free_INSN_LIST_list): Free an entire list of INSN_LIST nodes.
(free_EXPR_LIST_node): Free an individual EXPR_LIST node.
(free_INSN_LIST_node): Free an individual INSN_LIST node.
* haifa-sched.c (unused_insn_list, unused_expr_list): Moved to flow.c
(free_list, alloc_INSN_LIST, alloc_EXPR_LIST): Moved to flow.c
(remove_dependence, free_pending_lists): Use new global routines.
(flush_pending_lists, sched_analyze_insn): Use new global routines.
(sched_analyze, compute_block_backward_dependences): Use new routines.
(sched_analyze_1, sched_analyze_2): Use new routines.
(schedule_insns): Use new global routines.
* rtl.h (init_EXPR_INSN_LIST_cache, free_EXPR_LIST_list): Add function
prototypes.
(free_INSN_LIST_list, free_EXPR_LIST_node): Add prototypes.
(free_INSN_LIST_node, alloc_INSN_LIST, alloc_EXPR_LIST): Add function
prototypes.
* toplev.c (rest_of_compilation): Initialize node cache.
* Makefile.in (OBJS): Add lists.o to list of object files.
(lists.o): Add dependancies.
From-SVN: r28864
1999-08-25 19:50:53 +02:00
|
|
|
|
free_INSN_LIST_list (®_last_uses[b]);
|
1998-03-05 03:15:23 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Assert that we won't need bb_reg_last_* for this block anymore. */
|
|
|
|
|
if (current_nr_blocks > 1)
|
|
|
|
|
{
|
|
|
|
|
bb_reg_last_uses[bb] = (rtx *) NULL_RTX;
|
|
|
|
|
bb_reg_last_sets[bb] = (rtx *) NULL_RTX;
|
1999-03-07 12:22:10 +01:00
|
|
|
|
bb_reg_last_clobbers[bb] = (rtx *) NULL_RTX;
|
1998-03-05 03:15:23 +01:00
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Print dependences for debugging, callable from debugger. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
debug_dependencies ()
|
|
|
|
|
{
|
|
|
|
|
int bb;
|
|
|
|
|
|
|
|
|
|
fprintf (dump, ";; --------------- forward dependences: ------------ \n");
|
|
|
|
|
for (bb = 0; bb < current_nr_blocks; bb++)
|
|
|
|
|
{
|
|
|
|
|
if (1)
|
|
|
|
|
{
|
|
|
|
|
rtx head, tail;
|
|
|
|
|
rtx next_tail;
|
|
|
|
|
rtx insn;
|
|
|
|
|
|
|
|
|
|
get_block_head_tail (bb, &head, &tail);
|
|
|
|
|
next_tail = NEXT_INSN (tail);
|
|
|
|
|
fprintf (dump, "\n;; --- Region Dependences --- b %d bb %d \n",
|
|
|
|
|
BB_TO_BLOCK (bb), bb);
|
|
|
|
|
|
|
|
|
|
fprintf (dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
|
|
|
|
|
"insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
|
|
|
|
|
fprintf (dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
|
|
|
|
|
"----", "----", "--", "---", "----", "----", "--------", "-----");
|
|
|
|
|
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
|
|
|
|
|
{
|
|
|
|
|
rtx link;
|
|
|
|
|
int unit, range;
|
|
|
|
|
|
|
|
|
|
if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
|
|
|
|
|
{
|
|
|
|
|
int n;
|
|
|
|
|
fprintf (dump, ";; %6d ", INSN_UID (insn));
|
|
|
|
|
if (GET_CODE (insn) == NOTE)
|
1997-08-15 19:48:56 +02:00
|
|
|
|
{
|
|
|
|
|
n = NOTE_LINE_NUMBER (insn);
|
|
|
|
|
if (n < 0)
|
|
|
|
|
fprintf (dump, "%s\n", GET_NOTE_INSN_NAME (n));
|
|
|
|
|
else
|
|
|
|
|
fprintf (dump, "line %d, file %s\n", n,
|
|
|
|
|
NOTE_SOURCE_FILE (insn));
|
|
|
|
|
}
|
|
|
|
|
else
|
1997-08-19 20:02:21 +02:00
|
|
|
|
fprintf (dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unit = insn_unit (insn);
|
|
|
|
|
range = (unit < 0
|
|
|
|
|
|| function_units[unit].blockage_range_function == 0) ? 0 :
|
|
|
|
|
function_units[unit].blockage_range_function (insn);
|
|
|
|
|
fprintf (dump,
|
|
|
|
|
";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
|
|
|
|
|
(SCHED_GROUP_P (insn) ? "+" : " "),
|
|
|
|
|
INSN_UID (insn),
|
|
|
|
|
INSN_CODE (insn),
|
|
|
|
|
INSN_BB (insn),
|
|
|
|
|
INSN_DEP_COUNT (insn),
|
|
|
|
|
INSN_PRIORITY (insn),
|
|
|
|
|
insn_cost (insn, 0, 0),
|
|
|
|
|
(int) MIN_BLOCKAGE_COST (range),
|
|
|
|
|
(int) MAX_BLOCKAGE_COST (range));
|
|
|
|
|
insn_print_units (insn);
|
|
|
|
|
fprintf (dump, "\t: ");
|
|
|
|
|
for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
|
|
|
|
|
fprintf (dump, "%d ", INSN_UID (XEXP (link, 0)));
|
|
|
|
|
fprintf (dump, "\n");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
fprintf (dump, "\n");
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Set_priorities: compute priority of each insn in the block. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
set_priorities (bb)
|
|
|
|
|
int bb;
|
|
|
|
|
{
|
|
|
|
|
rtx insn;
|
|
|
|
|
int n_insn;
|
|
|
|
|
|
|
|
|
|
rtx tail;
|
|
|
|
|
rtx prev_head;
|
|
|
|
|
rtx head;
|
|
|
|
|
|
|
|
|
|
get_block_head_tail (bb, &head, &tail);
|
|
|
|
|
prev_head = PREV_INSN (head);
|
|
|
|
|
|
|
|
|
|
if (head == tail
|
|
|
|
|
&& (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
n_insn = 0;
|
|
|
|
|
for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
|
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
if (GET_CODE (insn) == NOTE)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (!(SCHED_GROUP_P (insn)))
|
|
|
|
|
n_insn++;
|
|
|
|
|
(void) priority (insn);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return n_insn;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Make each element of VECTOR point at an rtx-vector,
|
|
|
|
|
taking the space for all those rtx-vectors from SPACE.
|
|
|
|
|
SPACE is of type (rtx *), but it is really as long as NELTS rtx-vectors.
|
|
|
|
|
BYTES_PER_ELT is the number of bytes in one rtx-vector.
|
1999-09-06 23:55:23 +02:00
|
|
|
|
(this is the same as init_regset_vector () in flow.c) */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
init_rtx_vector (vector, space, nelts, bytes_per_elt)
|
|
|
|
|
rtx **vector;
|
|
|
|
|
rtx *space;
|
|
|
|
|
int nelts;
|
|
|
|
|
int bytes_per_elt;
|
|
|
|
|
{
|
|
|
|
|
register int i;
|
|
|
|
|
register rtx *p = space;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < nelts; i++)
|
|
|
|
|
{
|
|
|
|
|
vector[i] = p;
|
|
|
|
|
p += bytes_per_elt / sizeof (*p);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Schedule a region. A region is either an inner loop, a loop-free
|
|
|
|
|
subroutine, or a single basic block. Each bb in the region is
|
|
|
|
|
scheduled after its flow predecessors. */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
schedule_region (rgn)
|
|
|
|
|
int rgn;
|
|
|
|
|
{
|
|
|
|
|
int bb;
|
|
|
|
|
int rgn_n_insns = 0;
|
|
|
|
|
int sched_rgn_n_insns = 0;
|
1999-10-11 01:45:27 +02:00
|
|
|
|
int initial_deaths;
|
|
|
|
|
sbitmap blocks;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Set variables for the current region. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
current_nr_blocks = RGN_NR_BLOCKS (rgn);
|
|
|
|
|
current_blocks = RGN_BLOCKS (rgn);
|
|
|
|
|
|
|
|
|
|
reg_pending_sets = ALLOCA_REG_SET ();
|
1999-03-07 12:22:10 +01:00
|
|
|
|
reg_pending_clobbers = ALLOCA_REG_SET ();
|
1997-08-12 06:07:19 +02:00
|
|
|
|
reg_pending_sets_all = 0;
|
|
|
|
|
|
1999-10-11 01:45:27 +02:00
|
|
|
|
/* Create a bitmap of the blocks in this region. */
|
|
|
|
|
blocks = sbitmap_alloc (n_basic_blocks);
|
|
|
|
|
sbitmap_zero (blocks);
|
|
|
|
|
|
|
|
|
|
for (bb = current_nr_blocks - 1; bb >= 0; --bb)
|
|
|
|
|
SET_BIT (blocks, BB_TO_BLOCK (bb));
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Initializations for region data dependence analyisis. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (current_nr_blocks > 1)
|
|
|
|
|
{
|
|
|
|
|
rtx *space;
|
|
|
|
|
int maxreg = max_reg_num ();
|
|
|
|
|
|
|
|
|
|
bb_reg_last_uses = (rtx **) alloca (current_nr_blocks * sizeof (rtx *));
|
|
|
|
|
space = (rtx *) alloca (current_nr_blocks * maxreg * sizeof (rtx));
|
|
|
|
|
bzero ((char *) space, current_nr_blocks * maxreg * sizeof (rtx));
|
1999-03-07 12:22:10 +01:00
|
|
|
|
init_rtx_vector (bb_reg_last_uses, space, current_nr_blocks,
|
|
|
|
|
maxreg * sizeof (rtx *));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
bb_reg_last_sets = (rtx **) alloca (current_nr_blocks * sizeof (rtx *));
|
|
|
|
|
space = (rtx *) alloca (current_nr_blocks * maxreg * sizeof (rtx));
|
|
|
|
|
bzero ((char *) space, current_nr_blocks * maxreg * sizeof (rtx));
|
1999-03-07 12:22:10 +01:00
|
|
|
|
init_rtx_vector (bb_reg_last_sets, space, current_nr_blocks,
|
|
|
|
|
maxreg * sizeof (rtx *));
|
|
|
|
|
|
|
|
|
|
bb_reg_last_clobbers =
|
|
|
|
|
(rtx **) alloca (current_nr_blocks * sizeof (rtx *));
|
|
|
|
|
space = (rtx *) alloca (current_nr_blocks * maxreg * sizeof (rtx));
|
|
|
|
|
bzero ((char *) space, current_nr_blocks * maxreg * sizeof (rtx));
|
|
|
|
|
init_rtx_vector (bb_reg_last_clobbers, space, current_nr_blocks,
|
|
|
|
|
maxreg * sizeof (rtx *));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
bb_pending_read_insns = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
|
|
|
|
|
bb_pending_read_mems = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
|
1999-03-07 12:22:10 +01:00
|
|
|
|
bb_pending_write_insns =
|
|
|
|
|
(rtx *) alloca (current_nr_blocks * sizeof (rtx));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
bb_pending_write_mems = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
|
1999-03-07 12:22:10 +01:00
|
|
|
|
bb_pending_lists_length =
|
|
|
|
|
(int *) alloca (current_nr_blocks * sizeof (int));
|
|
|
|
|
bb_last_pending_memory_flush =
|
|
|
|
|
(rtx *) alloca (current_nr_blocks * sizeof (rtx));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
bb_last_function_call = (rtx *) alloca (current_nr_blocks * sizeof (rtx));
|
1999-03-07 12:22:10 +01:00
|
|
|
|
bb_sched_before_next_call =
|
|
|
|
|
(rtx *) alloca (current_nr_blocks * sizeof (rtx));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
init_rgn_data_dependences (current_nr_blocks);
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute LOG_LINKS. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (bb = 0; bb < current_nr_blocks; bb++)
|
|
|
|
|
compute_block_backward_dependences (bb);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute INSN_DEPEND. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (bb = current_nr_blocks - 1; bb >= 0; bb--)
|
|
|
|
|
compute_block_forward_dependences (bb);
|
|
|
|
|
|
1999-10-11 01:45:27 +02:00
|
|
|
|
/* Compute INSN_REG_WEIGHT. */
|
|
|
|
|
for (bb = current_nr_blocks - 1; bb >= 0; bb--)
|
|
|
|
|
find_insn_reg_weight (bb);
|
|
|
|
|
|
|
|
|
|
/* Remove death notes. */
|
|
|
|
|
initial_deaths = count_or_remove_death_notes (blocks, 1);
|
|
|
|
|
|
|
|
|
|
/* Delete line notes and set priorities. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (bb = 0; bb < current_nr_blocks; bb++)
|
|
|
|
|
{
|
|
|
|
|
if (write_symbols != NO_DEBUG)
|
|
|
|
|
{
|
|
|
|
|
save_line_notes (bb);
|
|
|
|
|
rm_line_notes (bb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rgn_n_insns += set_priorities (bb);
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute interblock info: probabilities, split-edges, dominators, etc. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (current_nr_blocks > 1)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
prob = (float *) alloca ((current_nr_blocks) * sizeof (float));
|
|
|
|
|
|
|
|
|
|
bbset_size = current_nr_blocks / HOST_BITS_PER_WIDE_INT + 1;
|
|
|
|
|
dom = (bbset *) alloca (current_nr_blocks * sizeof (bbset));
|
|
|
|
|
for (i = 0; i < current_nr_blocks; i++)
|
|
|
|
|
{
|
|
|
|
|
dom[i] = (bbset) alloca (bbset_size * sizeof (HOST_WIDE_INT));
|
|
|
|
|
bzero ((char *) dom[i], bbset_size * sizeof (HOST_WIDE_INT));
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Edge to bit. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
rgn_nr_edges = 0;
|
|
|
|
|
edge_to_bit = (int *) alloca (nr_edges * sizeof (int));
|
|
|
|
|
for (i = 1; i < nr_edges; i++)
|
|
|
|
|
if (CONTAINING_RGN (FROM_BLOCK (i)) == rgn)
|
|
|
|
|
EDGE_TO_BIT (i) = rgn_nr_edges++;
|
|
|
|
|
rgn_edges = (int *) alloca (rgn_nr_edges * sizeof (int));
|
|
|
|
|
|
|
|
|
|
rgn_nr_edges = 0;
|
|
|
|
|
for (i = 1; i < nr_edges; i++)
|
|
|
|
|
if (CONTAINING_RGN (FROM_BLOCK (i)) == (rgn))
|
|
|
|
|
rgn_edges[rgn_nr_edges++] = i;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Split edges. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
edgeset_size = rgn_nr_edges / HOST_BITS_PER_WIDE_INT + 1;
|
|
|
|
|
pot_split = (edgeset *) alloca (current_nr_blocks * sizeof (edgeset));
|
1999-09-06 23:55:23 +02:00
|
|
|
|
ancestor_edges = (edgeset *) alloca (current_nr_blocks
|
|
|
|
|
* sizeof (edgeset));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (i = 0; i < current_nr_blocks; i++)
|
|
|
|
|
{
|
|
|
|
|
pot_split[i] =
|
|
|
|
|
(edgeset) alloca (edgeset_size * sizeof (HOST_WIDE_INT));
|
|
|
|
|
bzero ((char *) pot_split[i],
|
|
|
|
|
edgeset_size * sizeof (HOST_WIDE_INT));
|
|
|
|
|
ancestor_edges[i] =
|
|
|
|
|
(edgeset) alloca (edgeset_size * sizeof (HOST_WIDE_INT));
|
|
|
|
|
bzero ((char *) ancestor_edges[i],
|
|
|
|
|
edgeset_size * sizeof (HOST_WIDE_INT));
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute probabilities, dominators, split_edges. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (bb = 0; bb < current_nr_blocks; bb++)
|
|
|
|
|
compute_dom_prob_ps (bb);
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Now we can schedule all blocks. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (bb = 0; bb < current_nr_blocks; bb++)
|
|
|
|
|
{
|
1998-02-17 22:35:43 +01:00
|
|
|
|
sched_rgn_n_insns += schedule_block (bb, rgn_n_insns);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
#ifdef USE_C_ALLOCA
|
|
|
|
|
alloca (0);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Sanity check: verify that all region insns were scheduled. */
|
1998-05-06 18:32:40 +02:00
|
|
|
|
if (sched_rgn_n_insns != rgn_n_insns)
|
|
|
|
|
abort ();
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-10-11 01:45:27 +02:00
|
|
|
|
/* Update register life and usage information. Scheduling a multi-block
|
|
|
|
|
region requires a global update. */
|
|
|
|
|
if (current_nr_blocks > 1)
|
|
|
|
|
update_life_info (blocks, UPDATE_LIFE_GLOBAL);
|
|
|
|
|
else
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
1999-10-11 01:45:27 +02:00
|
|
|
|
update_life_info (blocks, UPDATE_LIFE_LOCAL);
|
|
|
|
|
|
|
|
|
|
/* In the single block case, the count of registers that died should
|
|
|
|
|
not have changed during the schedule. */
|
|
|
|
|
if (count_or_remove_death_notes (blocks, 0) != initial_deaths)
|
|
|
|
|
abort ();
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Restore line notes. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (write_symbols != NO_DEBUG)
|
|
|
|
|
{
|
|
|
|
|
for (bb = 0; bb < current_nr_blocks; bb++)
|
|
|
|
|
restore_line_notes (bb);
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Done with this region. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
free_pending_lists ();
|
1997-08-16 07:49:38 +02:00
|
|
|
|
|
|
|
|
|
FREE_REG_SET (reg_pending_sets);
|
1999-03-07 12:22:10 +01:00
|
|
|
|
FREE_REG_SET (reg_pending_clobbers);
|
1999-10-11 01:45:27 +02:00
|
|
|
|
sbitmap_free (blocks);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* The one entry point in this file. DUMP_FILE is the dump file for
|
|
|
|
|
this pass. */
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
schedule_insns (dump_file)
|
|
|
|
|
FILE *dump_file;
|
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
int max_uid;
|
|
|
|
|
int b;
|
|
|
|
|
rtx insn;
|
|
|
|
|
int rgn;
|
|
|
|
|
|
|
|
|
|
int luid;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Disable speculative loads in their presence if cc0 defined. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
#ifdef HAVE_cc0
|
|
|
|
|
flag_schedule_speculative_load = 0;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/* Taking care of this degenerate case makes the rest of
|
|
|
|
|
this code simpler. */
|
|
|
|
|
if (n_basic_blocks == 0)
|
|
|
|
|
return;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Set dump and sched_verbose for the desired debugging output. If no
|
1997-08-12 06:07:19 +02:00
|
|
|
|
dump-file was specified, but -fsched-verbose-N (any N), print to stderr.
|
|
|
|
|
For -fsched-verbose-N, N>=10, print everything to stderr. */
|
|
|
|
|
sched_verbose = sched_verbose_param;
|
|
|
|
|
if (sched_verbose_param == 0 && dump_file)
|
|
|
|
|
sched_verbose = 1;
|
|
|
|
|
dump = ((sched_verbose_param >= 10 || !dump_file) ? stderr : dump_file);
|
|
|
|
|
|
|
|
|
|
nr_inter = 0;
|
|
|
|
|
nr_spec = 0;
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Initialize issue_rate. */
|
1997-08-19 23:22:04 +02:00
|
|
|
|
issue_rate = ISSUE_RATE;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
Makefile.in (flow.o): Depend on TREE_H.
* Makefile.in (flow.o): Depend on TREE_H.
* basic-block.h (REG_SET_EQUAL_P): New.
(XOR_REG_SET): New.
(n_edges): Declare.
(free_regset_vector): Remove declaration.
(flow_delete_insn_chain): Declare.
(enum update_life_extent): New.
(update_life_info, count_or_remove_death_notes): Declare.
* combine.c (distribute_notes) [REG_DEAD]: Stop search at bb->head.
Verify register live at bb->global_live_at_start before adding USE.
* flow.c (HAVE_epilogue, HAVE_prologue): Provide default.
(CLEAN_ALLOCA): New.
(n_edges): New.
(PROP_*): New flags.
(find_basic_blocks_1): Use alloc_EXPR_LIST.
(clear_edges): Zero n_edges.
(make_edge): Increment n_edges.
(split_edge): Don't allocate bb->local_set. Increment n_edges.
(flow_delete_insn_chain): Export.
(delete_block): Decrement n_edges.
(merge_blocks_nomove): Likewise.
(life_analysis): Give life_analysis_1 PROP flags.
(verify_wide_reg_1, verify_wide_reg): New.
(verify_local_live_at_start): New.
(update_life_info): Rewrite to call into propogate_block.
(mark_reg): New.
(mark_regs_live_at_end): After reload, if epilogue as rtl,
always mark stack pointer. Conditionally mark PIC register.
After reload, mark call-saved registers, return regsiters.
(life_analysis_1): Accept PROP flags not remove_dead_code.
Call mark_regs_live_at_end before zeroing regs_ever_live.
Use calculate_global_regs_live. Copy global_live_at_end before
calling final propagate_block. Zero reg_next_use on exit.
(calculate_global_regs_live): New.
(allocate_bb_life_data): Don't allocate bb->local_set.
(init_regset_vector, free_regset_vector): Remove.
(propagate_block): Accept FLAGS not FINAL or REMOVE_DEAD_CODE.
Test flags before every operation. Warn if prologue/epilogue insn
would have been deleted.
(mark_set_regs, mark_set_1): Accept and use FLAGS.
Use alloc_EXPR_LIST.
(mark_used_regs): Accept and use FLAGS, not FINAL.
Remove special handling for RETURN.
(try_pre_increment): Use alloc_EXPR_LIST.
(dump_flow_info): Dump n_edges.
(unlink_insn_chain, split_hard_reg_notes): Remove.
(maybe_add_dead_note, maybe_add_dead_note_use): Remove.
(find_insn_with_note, new_insn_dead_notes): Remove.
(update_n_sets, sets_reg_or_subreg_1, sets_reg_or_subreg): Remove.
(maybe_remove_dead_notes, prepend_reg_notes): Remove.
(replace_insns): Remove.
(count_or_remove_death_notes): New.
(verify_flow_info): Abort on error after all checks.
(remove_edge): Decrement n_edges.
(remove_fake_edges): Tweek format.
* haifa-sched.c (schedule_insns): Use split_all_insns.
* output.h (update_life_info): Remove declaration.
* recog.c (split_all_insns): From the corpse of split_block_insns,
do the whole function block by block. Use update_life_info.
(recog_last_allowed_insn): New.
(recog_next_insn): Mind it.
(peephole2_optimize): Set it. Walk backwards through blocks.
Use update_life_info.
* rtl.h (update_flow_info, replace_insns): Remove declarations.
(split_all_insns): Declare.
* toplev.c (rest_of_compilation): Thread prologue before flow2.
Use split_all_insns.
* i386.md (or -1 peep2s): Disable.
From-SVN: r29877
1999-10-09 21:47:18 +02:00
|
|
|
|
split_all_insns (1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-10-19 00:20:27 +02:00
|
|
|
|
/* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
|
|
|
|
|
pseudos which do not cross calls. */
|
|
|
|
|
max_uid = get_max_uid () + 1;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
c-aux-info.c (concat): Don't define.
* c-aux-info.c (concat): Don't define.
* cccp.c (my_strerror): Likewise. All callers changed to use
xstrerror instead.
(do_include): Call xstrdup, not xmalloc/strcpy.
(grow_outbuf): Don't check if xrealloc returns NULL, it can't.
(xmalloc, xrealloc, xcalloc, xstrdup): Don't define.
* collect2.c (my_strsignal): Likewise. All callers changed to use
strsignal instead.
(locatelib): Call xstrdup, not xmalloc/strcpy.
* 1750a.h (ASM_OUTPUT_INTERNAL_LABEL): Call xmalloc, not malloc.
* dsp16xx.c (override_options): Call xstrdup, not xmalloc/strcpy.
* i370.h (ASM_DECLARE_FUNCTION_NAME): Call xmalloc, not malloc.
* mips.c (build_mips16_call_stub): Call xstrdup, not xmalloc/strcpy.
* cppinit.c (cpp_options_init): Call xcalloc, not xmalloc/bzero.
* dwarfout.c (dwarfout_init): Call concat, not xmalloc/strcpy/...
* except.c (new_eh_region_entry): Call xmalloc/xrealloc, not
malloc/realloc.
(find_all_handler_type_matches): Likewise. Don't check return
value.
(get_new_handler, init_insn_eh_region, process_nestinfo): Call
xmalloc, not malloc.
(init_eh_nesting_info): Likewise. Call xcalloc, not xmalloc/bzero.
* gcc.c (xstrerror, xmalloc, xrealloc): Don't define.
(init_spec): Call xcalloc, not xmalloc/bzero.
(set_spec): Call xstrdup, not save_string.
(record_temp_file): Call xstrdup, not xmalloc/strcpy.
(find_a_file): Call xstrdup, not xmalloc/strcpy.
(process_command): Call xstrdup, not save_string.
(main): Call xcalloc, not xmalloc/bzero.
* gcov.c (xmalloc): Don't define.
(create_program_flow_graph): Call xcalloc, not xmalloc/bzero.
(scan_for_source_files): Call xstrdup, not xmalloc/strcpy.
(output_data): Call xcalloc, not xmalloc/bzero.
* haifa-sched.c (schedule_insns): Call xcalloc, not xmalloc/bzero.
* mips-tdump.c (xmalloc): Don't define.
(print_symbol): Call xmalloc, not malloc.
(read_tfile): Call xcalloc, not calloc.
* mips-tfile.c (xfree, my_strsignal, xmalloc, xcalloc, xrealloc):
Don't define. All callers of xfree/my_strsignal changed to use
free/strsignal instead.
(allocate_cluster): Call xcalloc, not calloc.
* objc/objc-act.c (lang_init): Call concat, not xmalloc/strcpy/...
Fix memory leak, free allocated memory.
* prefix.c (translate_name): Call xstrdup, not save_string.
(update_path): Likewise.
* profile.c (branch_prob): Call xstrdup, not xmalloc/strcpy.
* protoize.c (xstrerror, xmalloc, xrealloc, xfree, savestring2):
Don't define. Callers of xfree/savestring2 changed to use
free/concat instead.
* reload1.c (reload): Call xcalloc, not xmalloc/bzero.
(init_elim_table): Likewise.
* resource.c (init_resource_info): Likewise.
* stupid.c (stupid_life_analysis): Likewise.
* toplev.c (xmalloc, xcalloc, xrealloc, xstrdup): Don't define.
(open_dump_file): Call concat, not xmalloc/strcpy/...
(clean_dump_file): Likewise.
(compile_file): Call xstrdup, not xmalloc/strcpy.
From-SVN: r29148
1999-09-07 04:36:41 +02:00
|
|
|
|
cant_move = xcalloc (max_uid, sizeof (char));
|
|
|
|
|
fed_by_spec_load = xcalloc (max_uid, sizeof (char));
|
|
|
|
|
is_load_insn = xcalloc (max_uid, sizeof (char));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-06-18 22:17:26 +02:00
|
|
|
|
insn_luid = (int *) xmalloc (max_uid * sizeof (int));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-10-17 08:28:22 +02:00
|
|
|
|
insn_luid[0] = 0;
|
|
|
|
|
luid = 1;
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (b = 0; b < n_basic_blocks; b++)
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
|
|
|
|
INSN_LUID (insn) = luid++;
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
if (insn == BLOCK_END (b))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
break;
|
|
|
|
|
}
|
1999-10-17 08:28:22 +02:00
|
|
|
|
|
|
|
|
|
/* ?!? We could save some memory by computing a per-region luid mapping
|
|
|
|
|
which could reduce both the number of vectors in the cache and the size
|
1999-10-17 23:27:56 +02:00
|
|
|
|
of each vector. Instead we just avoid the cache entirely unless the
|
|
|
|
|
average number of instructions in a basic block is very high. See
|
|
|
|
|
the comment before the declaration of true_dependency_cache for
|
|
|
|
|
what we consider "very high". */
|
|
|
|
|
if (luid / n_basic_blocks > 100 * 5)
|
|
|
|
|
{
|
|
|
|
|
true_dependency_cache = sbitmap_vector_alloc (luid, luid);
|
|
|
|
|
sbitmap_vector_zero (true_dependency_cache, luid);
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
nr_regions = 0;
|
|
|
|
|
rgn_table = (region *) alloca ((n_basic_blocks) * sizeof (region));
|
|
|
|
|
rgn_bb_table = (int *) alloca ((n_basic_blocks) * sizeof (int));
|
|
|
|
|
block_to_bb = (int *) alloca ((n_basic_blocks) * sizeof (int));
|
|
|
|
|
containing_rgn = (int *) alloca ((n_basic_blocks) * sizeof (int));
|
|
|
|
|
|
1999-10-19 00:20:27 +02:00
|
|
|
|
compute_bb_for_insn (max_uid);
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute regions for scheduling. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (reload_completed
|
|
|
|
|
|| n_basic_blocks == 1
|
|
|
|
|
|| !flag_schedule_interblock)
|
|
|
|
|
{
|
|
|
|
|
find_single_block_region ();
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Verify that a 'good' control flow graph can be built. */
|
1998-03-08 03:15:26 +01:00
|
|
|
|
if (is_cfg_nonregular ())
|
1997-08-12 06:07:19 +02:00
|
|
|
|
{
|
|
|
|
|
find_single_block_region ();
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
1998-04-17 00:00:09 +02:00
|
|
|
|
int_list_ptr *s_preds, *s_succs;
|
|
|
|
|
int *num_preds, *num_succs;
|
|
|
|
|
sbitmap *dom, *pdom;
|
|
|
|
|
|
|
|
|
|
s_preds = (int_list_ptr *) alloca (n_basic_blocks
|
|
|
|
|
* sizeof (int_list_ptr));
|
|
|
|
|
s_succs = (int_list_ptr *) alloca (n_basic_blocks
|
|
|
|
|
* sizeof (int_list_ptr));
|
|
|
|
|
num_preds = (int *) alloca (n_basic_blocks * sizeof (int));
|
|
|
|
|
num_succs = (int *) alloca (n_basic_blocks * sizeof (int));
|
|
|
|
|
dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
|
|
|
|
|
pdom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
|
|
|
|
|
|
|
|
|
|
/* The scheduler runs after flow; therefore, we can't blindly call
|
|
|
|
|
back into find_basic_blocks since doing so could invalidate the
|
1999-02-26 00:45:42 +01:00
|
|
|
|
info in global_live_at_start.
|
1998-04-17 00:00:09 +02:00
|
|
|
|
|
|
|
|
|
Consider a block consisting entirely of dead stores; after life
|
|
|
|
|
analysis it would be a block of NOTE_INSN_DELETED notes. If
|
|
|
|
|
we call find_basic_blocks again, then the block would be removed
|
|
|
|
|
entirely and invalidate our the register live information.
|
|
|
|
|
|
|
|
|
|
We could (should?) recompute register live information. Doing
|
|
|
|
|
so may even be beneficial. */
|
|
|
|
|
|
1998-05-13 18:52:15 +02:00
|
|
|
|
compute_preds_succs (s_preds, s_succs, num_preds, num_succs);
|
1998-04-17 00:00:09 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Compute the dominators and post dominators. We don't
|
|
|
|
|
currently use post dominators, but we should for
|
|
|
|
|
speculative motion analysis. */
|
1998-04-17 00:00:09 +02:00
|
|
|
|
compute_dominators (dom, pdom, s_preds, s_succs);
|
|
|
|
|
|
1998-03-08 03:15:26 +01:00
|
|
|
|
/* build_control_flow will return nonzero if it detects unreachable
|
|
|
|
|
blocks or any other irregularity with the cfg which prevents
|
|
|
|
|
cross block scheduling. */
|
1998-04-17 00:00:09 +02:00
|
|
|
|
if (build_control_flow (s_preds, s_succs, num_preds, num_succs) != 0)
|
1998-03-08 03:15:26 +01:00
|
|
|
|
find_single_block_region ();
|
|
|
|
|
else
|
1998-04-17 00:00:09 +02:00
|
|
|
|
find_rgns (s_preds, s_succs, num_preds, num_succs, dom);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
if (sched_verbose >= 3)
|
1998-04-17 00:00:09 +02:00
|
|
|
|
debug_regions ();
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1998-04-17 00:00:09 +02:00
|
|
|
|
/* For now. This will move as more and more of haifa is converted
|
1999-09-06 23:55:23 +02:00
|
|
|
|
to using the cfg code in flow.c. */
|
1998-04-17 00:00:09 +02:00
|
|
|
|
free_bb_mem ();
|
|
|
|
|
free (dom);
|
|
|
|
|
free (pdom);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Allocate data for this pass. See comments, above,
|
1998-06-18 22:17:26 +02:00
|
|
|
|
for what these vectors do.
|
|
|
|
|
|
|
|
|
|
We use xmalloc instead of alloca, because max_uid can be very large
|
|
|
|
|
when there is a lot of function inlining. If we used alloca, we could
|
|
|
|
|
exceed stack limits on some hosts for some inputs. */
|
c-aux-info.c (concat): Don't define.
* c-aux-info.c (concat): Don't define.
* cccp.c (my_strerror): Likewise. All callers changed to use
xstrerror instead.
(do_include): Call xstrdup, not xmalloc/strcpy.
(grow_outbuf): Don't check if xrealloc returns NULL, it can't.
(xmalloc, xrealloc, xcalloc, xstrdup): Don't define.
* collect2.c (my_strsignal): Likewise. All callers changed to use
strsignal instead.
(locatelib): Call xstrdup, not xmalloc/strcpy.
* 1750a.h (ASM_OUTPUT_INTERNAL_LABEL): Call xmalloc, not malloc.
* dsp16xx.c (override_options): Call xstrdup, not xmalloc/strcpy.
* i370.h (ASM_DECLARE_FUNCTION_NAME): Call xmalloc, not malloc.
* mips.c (build_mips16_call_stub): Call xstrdup, not xmalloc/strcpy.
* cppinit.c (cpp_options_init): Call xcalloc, not xmalloc/bzero.
* dwarfout.c (dwarfout_init): Call concat, not xmalloc/strcpy/...
* except.c (new_eh_region_entry): Call xmalloc/xrealloc, not
malloc/realloc.
(find_all_handler_type_matches): Likewise. Don't check return
value.
(get_new_handler, init_insn_eh_region, process_nestinfo): Call
xmalloc, not malloc.
(init_eh_nesting_info): Likewise. Call xcalloc, not xmalloc/bzero.
* gcc.c (xstrerror, xmalloc, xrealloc): Don't define.
(init_spec): Call xcalloc, not xmalloc/bzero.
(set_spec): Call xstrdup, not save_string.
(record_temp_file): Call xstrdup, not xmalloc/strcpy.
(find_a_file): Call xstrdup, not xmalloc/strcpy.
(process_command): Call xstrdup, not save_string.
(main): Call xcalloc, not xmalloc/bzero.
* gcov.c (xmalloc): Don't define.
(create_program_flow_graph): Call xcalloc, not xmalloc/bzero.
(scan_for_source_files): Call xstrdup, not xmalloc/strcpy.
(output_data): Call xcalloc, not xmalloc/bzero.
* haifa-sched.c (schedule_insns): Call xcalloc, not xmalloc/bzero.
* mips-tdump.c (xmalloc): Don't define.
(print_symbol): Call xmalloc, not malloc.
(read_tfile): Call xcalloc, not calloc.
* mips-tfile.c (xfree, my_strsignal, xmalloc, xcalloc, xrealloc):
Don't define. All callers of xfree/my_strsignal changed to use
free/strsignal instead.
(allocate_cluster): Call xcalloc, not calloc.
* objc/objc-act.c (lang_init): Call concat, not xmalloc/strcpy/...
Fix memory leak, free allocated memory.
* prefix.c (translate_name): Call xstrdup, not save_string.
(update_path): Likewise.
* profile.c (branch_prob): Call xstrdup, not xmalloc/strcpy.
* protoize.c (xstrerror, xmalloc, xrealloc, xfree, savestring2):
Don't define. Callers of xfree/savestring2 changed to use
free/concat instead.
* reload1.c (reload): Call xcalloc, not xmalloc/bzero.
(init_elim_table): Likewise.
* resource.c (init_resource_info): Likewise.
* stupid.c (stupid_life_analysis): Likewise.
* toplev.c (xmalloc, xcalloc, xrealloc, xstrdup): Don't define.
(open_dump_file): Call concat, not xmalloc/strcpy/...
(clean_dump_file): Likewise.
(compile_file): Call xstrdup, not xmalloc/strcpy.
From-SVN: r29148
1999-09-07 04:36:41 +02:00
|
|
|
|
insn_priority = (int *) xcalloc (max_uid, sizeof (int));
|
|
|
|
|
insn_reg_weight = (int *) xcalloc (max_uid, sizeof (int));
|
|
|
|
|
insn_tick = (int *) xcalloc (max_uid, sizeof (int));
|
|
|
|
|
insn_costs = (short *) xcalloc (max_uid, sizeof (short));
|
|
|
|
|
insn_units = (short *) xcalloc (max_uid, sizeof (short));
|
|
|
|
|
insn_blockage = (unsigned int *) xcalloc (max_uid, sizeof (unsigned int));
|
|
|
|
|
insn_ref_count = (int *) xcalloc (max_uid, sizeof (int));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Allocate for forward dependencies. */
|
c-aux-info.c (concat): Don't define.
* c-aux-info.c (concat): Don't define.
* cccp.c (my_strerror): Likewise. All callers changed to use
xstrerror instead.
(do_include): Call xstrdup, not xmalloc/strcpy.
(grow_outbuf): Don't check if xrealloc returns NULL, it can't.
(xmalloc, xrealloc, xcalloc, xstrdup): Don't define.
* collect2.c (my_strsignal): Likewise. All callers changed to use
strsignal instead.
(locatelib): Call xstrdup, not xmalloc/strcpy.
* 1750a.h (ASM_OUTPUT_INTERNAL_LABEL): Call xmalloc, not malloc.
* dsp16xx.c (override_options): Call xstrdup, not xmalloc/strcpy.
* i370.h (ASM_DECLARE_FUNCTION_NAME): Call xmalloc, not malloc.
* mips.c (build_mips16_call_stub): Call xstrdup, not xmalloc/strcpy.
* cppinit.c (cpp_options_init): Call xcalloc, not xmalloc/bzero.
* dwarfout.c (dwarfout_init): Call concat, not xmalloc/strcpy/...
* except.c (new_eh_region_entry): Call xmalloc/xrealloc, not
malloc/realloc.
(find_all_handler_type_matches): Likewise. Don't check return
value.
(get_new_handler, init_insn_eh_region, process_nestinfo): Call
xmalloc, not malloc.
(init_eh_nesting_info): Likewise. Call xcalloc, not xmalloc/bzero.
* gcc.c (xstrerror, xmalloc, xrealloc): Don't define.
(init_spec): Call xcalloc, not xmalloc/bzero.
(set_spec): Call xstrdup, not save_string.
(record_temp_file): Call xstrdup, not xmalloc/strcpy.
(find_a_file): Call xstrdup, not xmalloc/strcpy.
(process_command): Call xstrdup, not save_string.
(main): Call xcalloc, not xmalloc/bzero.
* gcov.c (xmalloc): Don't define.
(create_program_flow_graph): Call xcalloc, not xmalloc/bzero.
(scan_for_source_files): Call xstrdup, not xmalloc/strcpy.
(output_data): Call xcalloc, not xmalloc/bzero.
* haifa-sched.c (schedule_insns): Call xcalloc, not xmalloc/bzero.
* mips-tdump.c (xmalloc): Don't define.
(print_symbol): Call xmalloc, not malloc.
(read_tfile): Call xcalloc, not calloc.
* mips-tfile.c (xfree, my_strsignal, xmalloc, xcalloc, xrealloc):
Don't define. All callers of xfree/my_strsignal changed to use
free/strsignal instead.
(allocate_cluster): Call xcalloc, not calloc.
* objc/objc-act.c (lang_init): Call concat, not xmalloc/strcpy/...
Fix memory leak, free allocated memory.
* prefix.c (translate_name): Call xstrdup, not save_string.
(update_path): Likewise.
* profile.c (branch_prob): Call xstrdup, not xmalloc/strcpy.
* protoize.c (xstrerror, xmalloc, xrealloc, xfree, savestring2):
Don't define. Callers of xfree/savestring2 changed to use
free/concat instead.
* reload1.c (reload): Call xcalloc, not xmalloc/bzero.
(init_elim_table): Likewise.
* resource.c (init_resource_info): Likewise.
* stupid.c (stupid_life_analysis): Likewise.
* toplev.c (xmalloc, xcalloc, xrealloc, xstrdup): Don't define.
(open_dump_file): Call concat, not xmalloc/strcpy/...
(clean_dump_file): Likewise.
(compile_file): Call xstrdup, not xmalloc/strcpy.
From-SVN: r29148
1999-09-07 04:36:41 +02:00
|
|
|
|
insn_dep_count = (int *) xcalloc (max_uid, sizeof (int));
|
|
|
|
|
insn_depend = (rtx *) xcalloc (max_uid, sizeof (rtx));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
|
|
|
|
init_alias_analysis ();
|
|
|
|
|
|
|
|
|
|
if (write_symbols != NO_DEBUG)
|
|
|
|
|
{
|
|
|
|
|
rtx line;
|
|
|
|
|
|
c-aux-info.c (concat): Don't define.
* c-aux-info.c (concat): Don't define.
* cccp.c (my_strerror): Likewise. All callers changed to use
xstrerror instead.
(do_include): Call xstrdup, not xmalloc/strcpy.
(grow_outbuf): Don't check if xrealloc returns NULL, it can't.
(xmalloc, xrealloc, xcalloc, xstrdup): Don't define.
* collect2.c (my_strsignal): Likewise. All callers changed to use
strsignal instead.
(locatelib): Call xstrdup, not xmalloc/strcpy.
* 1750a.h (ASM_OUTPUT_INTERNAL_LABEL): Call xmalloc, not malloc.
* dsp16xx.c (override_options): Call xstrdup, not xmalloc/strcpy.
* i370.h (ASM_DECLARE_FUNCTION_NAME): Call xmalloc, not malloc.
* mips.c (build_mips16_call_stub): Call xstrdup, not xmalloc/strcpy.
* cppinit.c (cpp_options_init): Call xcalloc, not xmalloc/bzero.
* dwarfout.c (dwarfout_init): Call concat, not xmalloc/strcpy/...
* except.c (new_eh_region_entry): Call xmalloc/xrealloc, not
malloc/realloc.
(find_all_handler_type_matches): Likewise. Don't check return
value.
(get_new_handler, init_insn_eh_region, process_nestinfo): Call
xmalloc, not malloc.
(init_eh_nesting_info): Likewise. Call xcalloc, not xmalloc/bzero.
* gcc.c (xstrerror, xmalloc, xrealloc): Don't define.
(init_spec): Call xcalloc, not xmalloc/bzero.
(set_spec): Call xstrdup, not save_string.
(record_temp_file): Call xstrdup, not xmalloc/strcpy.
(find_a_file): Call xstrdup, not xmalloc/strcpy.
(process_command): Call xstrdup, not save_string.
(main): Call xcalloc, not xmalloc/bzero.
* gcov.c (xmalloc): Don't define.
(create_program_flow_graph): Call xcalloc, not xmalloc/bzero.
(scan_for_source_files): Call xstrdup, not xmalloc/strcpy.
(output_data): Call xcalloc, not xmalloc/bzero.
* haifa-sched.c (schedule_insns): Call xcalloc, not xmalloc/bzero.
* mips-tdump.c (xmalloc): Don't define.
(print_symbol): Call xmalloc, not malloc.
(read_tfile): Call xcalloc, not calloc.
* mips-tfile.c (xfree, my_strsignal, xmalloc, xcalloc, xrealloc):
Don't define. All callers of xfree/my_strsignal changed to use
free/strsignal instead.
(allocate_cluster): Call xcalloc, not calloc.
* objc/objc-act.c (lang_init): Call concat, not xmalloc/strcpy/...
Fix memory leak, free allocated memory.
* prefix.c (translate_name): Call xstrdup, not save_string.
(update_path): Likewise.
* profile.c (branch_prob): Call xstrdup, not xmalloc/strcpy.
* protoize.c (xstrerror, xmalloc, xrealloc, xfree, savestring2):
Don't define. Callers of xfree/savestring2 changed to use
free/concat instead.
* reload1.c (reload): Call xcalloc, not xmalloc/bzero.
(init_elim_table): Likewise.
* resource.c (init_resource_info): Likewise.
* stupid.c (stupid_life_analysis): Likewise.
* toplev.c (xmalloc, xcalloc, xrealloc, xstrdup): Don't define.
(open_dump_file): Call concat, not xmalloc/strcpy/...
(clean_dump_file): Likewise.
(compile_file): Call xstrdup, not xmalloc/strcpy.
From-SVN: r29148
1999-09-07 04:36:41 +02:00
|
|
|
|
line_note = (rtx *) xcalloc (max_uid, sizeof (rtx));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
|
|
|
|
|
bzero ((char *) line_note_head, n_basic_blocks * sizeof (rtx));
|
|
|
|
|
|
|
|
|
|
/* Save-line-note-head:
|
|
|
|
|
Determine the line-number at the start of each basic block.
|
|
|
|
|
This must be computed and saved now, because after a basic block's
|
|
|
|
|
predecessor has been scheduled, it is impossible to accurately
|
|
|
|
|
determine the correct line number for the first insn of the block. */
|
|
|
|
|
|
|
|
|
|
for (b = 0; b < n_basic_blocks; b++)
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
for (line = BLOCK_HEAD (b); line; line = PREV_INSN (line))
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
|
|
|
|
|
{
|
|
|
|
|
line_note_head[b] = line;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Find units used in this fuction, for visualization. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (sched_verbose)
|
|
|
|
|
init_target_units ();
|
|
|
|
|
|
|
|
|
|
/* ??? Add a NOTE after the last insn of the last basic block. It is not
|
|
|
|
|
known why this is done. */
|
|
|
|
|
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
insn = BLOCK_END (n_basic_blocks - 1);
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (NEXT_INSN (insn) == 0
|
|
|
|
|
|| (GET_CODE (insn) != NOTE
|
|
|
|
|
&& GET_CODE (insn) != CODE_LABEL
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
/* Don't emit a NOTE if it would end up between an unconditional
|
|
|
|
|
jump and a BARRIER. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
&& !(GET_CODE (insn) == JUMP_INSN
|
|
|
|
|
&& GET_CODE (NEXT_INSN (insn)) == BARRIER)))
|
basic-block.h (basic_block_head): Rename to x_basic_block_head.
* basic-block.h (basic_block_head): Rename to x_basic_block_head.
(basic_block_end): Rename to x_basic_block_end.
(BLOCK_HEAD, BLOCK_END): Update.
* caller-save.c: Change basic_block_head/end references to
BLOCK_HEAD/END.
* combine.c, flow.c, function.c, gcse.c, global.c: Likewise.
* graph.c, haifa-sched.c, local-alloc.c, regclass.c: Likewise.
* regmove.c, reload1.c, reorg.c, sched.c: Likewise.
From-SVN: r24622
1999-01-11 23:37:20 +01:00
|
|
|
|
emit_note_after (NOTE_INSN_DELETED, BLOCK_END (n_basic_blocks - 1));
|
1997-08-12 06:07:19 +02:00
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Schedule every region in the subroutine. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
for (rgn = 0; rgn < nr_regions; rgn++)
|
|
|
|
|
{
|
|
|
|
|
schedule_region (rgn);
|
|
|
|
|
|
|
|
|
|
#ifdef USE_C_ALLOCA
|
|
|
|
|
alloca (0);
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Reposition the prologue and epilogue notes in case we moved the
|
|
|
|
|
prologue/epilogue insns. */
|
|
|
|
|
if (reload_completed)
|
|
|
|
|
reposition_prologue_and_epilogue_notes (get_insns ());
|
|
|
|
|
|
1999-09-06 23:55:23 +02:00
|
|
|
|
/* Delete redundant line notes. */
|
1997-08-12 06:07:19 +02:00
|
|
|
|
if (write_symbols != NO_DEBUG)
|
|
|
|
|
rm_redundant_line_notes ();
|
|
|
|
|
|
|
|
|
|
if (sched_verbose)
|
|
|
|
|
{
|
|
|
|
|
if (reload_completed == 0 && flag_schedule_interblock)
|
|
|
|
|
{
|
|
|
|
|
fprintf (dump, "\n;; Procedure interblock/speculative motions == %d/%d \n",
|
|
|
|
|
nr_inter, nr_spec);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
if (nr_inter > 0)
|
|
|
|
|
abort ();
|
|
|
|
|
}
|
|
|
|
|
fprintf (dump, "\n\n");
|
|
|
|
|
}
|
1997-08-16 07:49:38 +02:00
|
|
|
|
|
1999-10-17 23:27:56 +02:00
|
|
|
|
if (true_dependency_cache)
|
|
|
|
|
{
|
|
|
|
|
free (true_dependency_cache);
|
1999-10-17 23:46:36 +02:00
|
|
|
|
true_dependency_cache = NULL;
|
1999-10-17 23:27:56 +02:00
|
|
|
|
}
|
1998-06-18 22:17:26 +02:00
|
|
|
|
free (cant_move);
|
|
|
|
|
free (fed_by_spec_load);
|
|
|
|
|
free (is_load_insn);
|
|
|
|
|
free (insn_luid);
|
|
|
|
|
|
|
|
|
|
free (insn_priority);
|
|
|
|
|
free (insn_reg_weight);
|
|
|
|
|
free (insn_tick);
|
|
|
|
|
free (insn_costs);
|
|
|
|
|
free (insn_units);
|
|
|
|
|
free (insn_blockage);
|
|
|
|
|
free (insn_ref_count);
|
|
|
|
|
|
|
|
|
|
free (insn_dep_count);
|
|
|
|
|
free (insn_depend);
|
|
|
|
|
|
|
|
|
|
if (write_symbols != NO_DEBUG)
|
|
|
|
|
free (line_note);
|
|
|
|
|
|
1998-03-08 03:15:26 +01:00
|
|
|
|
if (edge_table)
|
|
|
|
|
{
|
|
|
|
|
free (edge_table);
|
|
|
|
|
edge_table = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (in_edges)
|
|
|
|
|
{
|
|
|
|
|
free (in_edges);
|
|
|
|
|
in_edges = NULL;
|
|
|
|
|
}
|
|
|
|
|
if (out_edges)
|
|
|
|
|
{
|
|
|
|
|
free (out_edges);
|
|
|
|
|
out_edges = NULL;
|
|
|
|
|
}
|
1997-08-12 06:07:19 +02:00
|
|
|
|
}
|
|
|
|
|
#endif /* INSN_SCHEDULING */
|