sbitmap.c: Convert prototypes to ISO C90.

* sbitmap.c: Convert prototypes to ISO C90.
	* sbitmap.h: Likewise.
	* scan-decls.c: Likewise.
	* scan.c: Likewise.
	* sched-deps.c: Likewise.
	* sched-ebb.c: Likewise.
	* sched-int.h: Likewise.
	* sched-rgn.c: Likewise.
	* sched-vis.c: Likewise.
	* sibcall.c: Likewise.
	* simplify-rtx.c: Likewise.
	* sreal.c: Likewise.
	* sreal.h: Likewise.
	* ssa-ccp.c: Likewise.
	* ssa-dce.c: Likewise.
	* ssa.c: Likewise.
	* ssa.h: Likewise.
	* stack.h: Likewise.
	* stmt.c: Likewise.
	* stor-layout.c: Likewise.
	* stringpool.c: Likewise.
	* target.h: Likewise.
	* timevar.c: Likewise.
	* timevar.h: Likewise.
	* tlink.c: Likewise.
	* tracer.c: Likewise.
	* tree-inline.c: Likewise.
	* tree-inline.h: Likewise.
	* tree.c: Likewise.
	* tree.h: Likewise.

From-SVN: r69002
This commit is contained in:
Andreas Jaeger 2003-07-06 14:35:56 +02:00 committed by Andreas Jaeger
parent 6623b2f26d
commit 46c5ad278b
31 changed files with 1651 additions and 2599 deletions

View File

@ -67,6 +67,36 @@
* rtl.c: Likewise.
* rtl.h: Likewise.
* rtlanal.c: Likewise.
* sbitmap.c: Likewise.
* sbitmap.h: Likewise.
* scan-decls.c: Likewise.
* scan.c: Likewise.
* sched-deps.c: Likewise.
* sched-ebb.c: Likewise.
* sched-int.h: Likewise.
* sched-rgn.c: Likewise.
* sched-vis.c: Likewise.
* sibcall.c: Likewise.
* simplify-rtx.c: Likewise.
* sreal.c: Likewise.
* sreal.h: Likewise.
* ssa-ccp.c: Likewise.
* ssa-dce.c: Likewise.
* ssa.c: Likewise.
* ssa.h: Likewise.
* stack.h: Likewise.
* stmt.c: Likewise.
* stor-layout.c: Likewise.
* stringpool.c: Likewise.
* target.h: Likewise.
* timevar.c: Likewise.
* timevar.h: Likewise.
* tlink.c: Likewise.
* tracer.c: Likewise.
* tree-inline.c: Likewise.
* tree-inline.h: Likewise.
* tree.c: Likewise.
* tree.h: Likewise.
2003-07-05 Kazu Hirata <kazu@cs.umass.edu>

View File

@ -32,8 +32,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
/* Allocate a simple bitmap of N_ELMS bits. */
sbitmap
sbitmap_alloc (n_elms)
unsigned int n_elms;
sbitmap_alloc (unsigned int n_elms)
{
unsigned int bytes, size, amt;
sbitmap bmap;
@ -54,10 +53,7 @@ sbitmap_alloc (n_elms)
is zero, and set them to one otherwise. */
sbitmap
sbitmap_resize (bmap, n_elms, def)
sbitmap bmap;
unsigned int n_elms;
int def;
sbitmap_resize (sbitmap bmap, unsigned int n_elms, int def)
{
unsigned int bytes, size, amt;
unsigned int last_bit;
@ -110,8 +106,7 @@ sbitmap_resize (bmap, n_elms, def)
/* Allocate a vector of N_VECS bitmaps of N_ELMS bits. */
sbitmap *
sbitmap_vector_alloc (n_vecs, n_elms)
unsigned int n_vecs, n_elms;
sbitmap_vector_alloc (unsigned int n_vecs, unsigned int n_elms)
{
unsigned int i, bytes, offset, elm_bytes, size, amt, vector_bytes;
sbitmap *bitmap_vector;
@ -153,16 +148,14 @@ sbitmap_vector_alloc (n_vecs, n_elms)
/* Copy sbitmap SRC to DST. */
void
sbitmap_copy (dst, src)
sbitmap dst, src;
sbitmap_copy (sbitmap dst, sbitmap src)
{
memcpy (dst->elms, src->elms, sizeof (SBITMAP_ELT_TYPE) * dst->size);
}
/* Determine if a == b. */
int
sbitmap_equal (a, b)
sbitmap a, b;
sbitmap_equal (sbitmap a, sbitmap b)
{
return !memcmp (a->elms, b->elms, sizeof (SBITMAP_ELT_TYPE) * a->size);
}
@ -170,8 +163,7 @@ sbitmap_equal (a, b)
/* Zero all elements in a bitmap. */
void
sbitmap_zero (bmap)
sbitmap bmap;
sbitmap_zero (sbitmap bmap)
{
memset (bmap->elms, 0, bmap->bytes);
}
@ -179,8 +171,7 @@ sbitmap_zero (bmap)
/* Set all elements in a bitmap to ones. */
void
sbitmap_ones (bmap)
sbitmap bmap;
sbitmap_ones (sbitmap bmap)
{
unsigned int last_bit;
@ -195,9 +186,7 @@ sbitmap_ones (bmap)
/* Zero a vector of N_VECS bitmaps. */
void
sbitmap_vector_zero (bmap, n_vecs)
sbitmap *bmap;
unsigned int n_vecs;
sbitmap_vector_zero (sbitmap *bmap, unsigned int n_vecs)
{
unsigned int i;
@ -208,9 +197,7 @@ sbitmap_vector_zero (bmap, n_vecs)
/* Set a vector of N_VECS bitmaps to ones. */
void
sbitmap_vector_ones (bmap, n_vecs)
sbitmap *bmap;
unsigned int n_vecs;
sbitmap_vector_ones (sbitmap *bmap, unsigned int n_vecs)
{
unsigned int i;
@ -223,8 +210,7 @@ sbitmap_vector_ones (bmap, n_vecs)
Returns true if any change is made. */
bool
sbitmap_union_of_diff_cg (dst, a, b, c)
sbitmap dst, a, b, c;
sbitmap_union_of_diff_cg (sbitmap dst, sbitmap a, sbitmap b, sbitmap c)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -244,8 +230,7 @@ sbitmap_union_of_diff_cg (dst, a, b, c)
}
void
sbitmap_union_of_diff (dst, a, b, c)
sbitmap dst, a, b, c;
sbitmap_union_of_diff (sbitmap dst, sbitmap a, sbitmap b, sbitmap c)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -260,8 +245,7 @@ sbitmap_union_of_diff (dst, a, b, c)
/* Set bitmap DST to the bitwise negation of the bitmap SRC. */
void
sbitmap_not (dst, src)
sbitmap dst, src;
sbitmap_not (sbitmap dst, sbitmap src)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -275,15 +259,14 @@ sbitmap_not (dst, src)
in A and the bits in B. i.e. dst = a & (~b). */
void
sbitmap_difference (dst, a, b)
sbitmap dst, a, b;
sbitmap_difference (sbitmap dst, sbitmap a, sbitmap b)
{
unsigned int i, dst_size = dst->size;
unsigned int min_size = dst->size;
sbitmap_ptr dstp = dst->elms;
sbitmap_ptr ap = a->elms;
sbitmap_ptr bp = b->elms;
/* A should be at least as large as DEST, to have a defined source. */
if (a->size < dst_size)
abort ();
@ -304,8 +287,7 @@ sbitmap_difference (dst, a, b)
Return nonzero if any change is made. */
bool
sbitmap_a_and_b_cg (dst, a, b)
sbitmap dst, a, b;
sbitmap_a_and_b_cg (sbitmap dst, sbitmap a, sbitmap b)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -324,8 +306,7 @@ sbitmap_a_and_b_cg (dst, a, b)
}
void
sbitmap_a_and_b (dst, a, b)
sbitmap dst, a, b;
sbitmap_a_and_b (sbitmap dst, sbitmap a, sbitmap b)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -340,8 +321,7 @@ sbitmap_a_and_b (dst, a, b)
Return nonzero if any change is made. */
bool
sbitmap_a_xor_b_cg (dst, a, b)
sbitmap dst, a, b;
sbitmap_a_xor_b_cg (sbitmap dst, sbitmap a, sbitmap b)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -360,8 +340,7 @@ sbitmap_a_xor_b_cg (dst, a, b)
}
void
sbitmap_a_xor_b (dst, a, b)
sbitmap dst, a, b;
sbitmap_a_xor_b (sbitmap dst, sbitmap a, sbitmap b)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -376,8 +355,7 @@ sbitmap_a_xor_b (dst, a, b)
Return nonzero if any change is made. */
bool
sbitmap_a_or_b_cg (dst, a, b)
sbitmap dst, a, b;
sbitmap_a_or_b_cg (sbitmap dst, sbitmap a, sbitmap b)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -396,8 +374,7 @@ sbitmap_a_or_b_cg (dst, a, b)
}
void
sbitmap_a_or_b (dst, a, b)
sbitmap dst, a, b;
sbitmap_a_or_b (sbitmap dst, sbitmap a, sbitmap b)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -411,8 +388,7 @@ sbitmap_a_or_b (dst, a, b)
/* Return nonzero if A is a subset of B. */
bool
sbitmap_a_subset_b_p (a, b)
sbitmap a, b;
sbitmap_a_subset_b_p (sbitmap a, sbitmap b)
{
unsigned int i, n = a->size;
sbitmap_ptr ap, bp;
@ -428,8 +404,7 @@ sbitmap_a_subset_b_p (a, b)
Return nonzero if any change is made. */
bool
sbitmap_a_or_b_and_c_cg (dst, a, b, c)
sbitmap dst, a, b, c;
sbitmap_a_or_b_and_c_cg (sbitmap dst, sbitmap a, sbitmap b, sbitmap c)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -449,8 +424,7 @@ sbitmap_a_or_b_and_c_cg (dst, a, b, c)
}
void
sbitmap_a_or_b_and_c (dst, a, b, c)
sbitmap dst, a, b, c;
sbitmap_a_or_b_and_c (sbitmap dst, sbitmap a, sbitmap b, sbitmap c)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -466,8 +440,7 @@ sbitmap_a_or_b_and_c (dst, a, b, c)
Return nonzero if any change is made. */
bool
sbitmap_a_and_b_or_c_cg (dst, a, b, c)
sbitmap dst, a, b, c;
sbitmap_a_and_b_or_c_cg (sbitmap dst, sbitmap a, sbitmap b, sbitmap c)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -487,8 +460,7 @@ sbitmap_a_and_b_or_c_cg (dst, a, b, c)
}
void
sbitmap_a_and_b_or_c (dst, a, b, c)
sbitmap dst, a, b, c;
sbitmap_a_and_b_or_c (sbitmap dst, sbitmap a, sbitmap b, sbitmap c)
{
unsigned int i, n = dst->size;
sbitmap_ptr dstp = dst->elms;
@ -505,10 +477,7 @@ sbitmap_a_and_b_or_c (dst, a, b, c)
block number BB, using the new flow graph structures. */
void
sbitmap_intersection_of_succs (dst, src, bb)
sbitmap dst;
sbitmap *src;
int bb;
sbitmap_intersection_of_succs (sbitmap dst, sbitmap *src, int bb)
{
basic_block b = BASIC_BLOCK (bb);
unsigned int set_size = dst->size;
@ -545,10 +514,7 @@ sbitmap_intersection_of_succs (dst, src, bb)
block number BB, using the new flow graph structures. */
void
sbitmap_intersection_of_preds (dst, src, bb)
sbitmap dst;
sbitmap *src;
int bb;
sbitmap_intersection_of_preds (sbitmap dst, sbitmap *src, int bb)
{
basic_block b = BASIC_BLOCK (bb);
unsigned int set_size = dst->size;
@ -585,10 +551,7 @@ sbitmap_intersection_of_preds (dst, src, bb)
block number BB, using the new flow graph structures. */
void
sbitmap_union_of_succs (dst, src, bb)
sbitmap dst;
sbitmap *src;
int bb;
sbitmap_union_of_succs (sbitmap dst, sbitmap *src, int bb)
{
basic_block b = BASIC_BLOCK (bb);
unsigned int set_size = dst->size;
@ -625,10 +588,7 @@ sbitmap_union_of_succs (dst, src, bb)
block number BB, using the new flow graph structures. */
void
sbitmap_union_of_preds (dst, src, bb)
sbitmap dst;
sbitmap *src;
int bb;
sbitmap_union_of_preds (sbitmap dst, sbitmap *src, int bb)
{
basic_block b = BASIC_BLOCK (bb);
unsigned int set_size = dst->size;
@ -665,8 +625,7 @@ sbitmap_union_of_preds (dst, src, bb)
/* Return number of first bit set in the bitmap, -1 if none. */
int
sbitmap_first_set_bit (bmap)
sbitmap bmap;
sbitmap_first_set_bit (sbitmap bmap)
{
unsigned int n;
@ -677,8 +636,7 @@ sbitmap_first_set_bit (bmap)
/* Return number of last bit set in the bitmap, -1 if none. */
int
sbitmap_last_set_bit (bmap)
sbitmap bmap;
sbitmap_last_set_bit (sbitmap bmap)
{
int i;
SBITMAP_ELT_TYPE *ptr = bmap->elms;
@ -708,9 +666,7 @@ sbitmap_last_set_bit (bmap)
}
void
dump_sbitmap (file, bmap)
FILE *file;
sbitmap bmap;
dump_sbitmap (FILE *file, sbitmap bmap)
{
unsigned int i, n, j;
unsigned int set_size = bmap->size;
@ -731,9 +687,7 @@ dump_sbitmap (file, bmap)
}
void
dump_sbitmap_file (file, bmap)
FILE *file;
sbitmap bmap;
dump_sbitmap_file (FILE *file, sbitmap bmap)
{
unsigned int i, pos;
@ -756,18 +710,14 @@ dump_sbitmap_file (file, bmap)
}
void
debug_sbitmap (bmap)
sbitmap bmap;
debug_sbitmap (sbitmap bmap)
{
dump_sbitmap_file (stderr, bmap);
}
void
dump_sbitmap_vector (file, title, subtitle, bmaps, n_maps)
FILE *file;
const char *title, *subtitle;
sbitmap *bmaps;
int n_maps;
dump_sbitmap_vector (FILE *file, const char *title, const char *subtitle,
sbitmap *bmaps, int n_maps)
{
int bb;

View File

@ -19,7 +19,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
02111-1307, USA. */
#ifndef GCC_SBITMAP_H
#define GCC_SBITMAP_H
#define GCC_SBITMAP_H
/* It's not clear yet whether using bitmap.[ch] will be a win.
It should be straightforward to convert so for now we keep things simple
@ -118,63 +118,56 @@ do { \
struct int_list;
extern void dump_sbitmap PARAMS ((FILE *, sbitmap));
extern void dump_sbitmap_file PARAMS ((FILE *, sbitmap));
extern void dump_sbitmap_vector PARAMS ((FILE *, const char *,
const char *, sbitmap *,
int));
extern sbitmap sbitmap_alloc PARAMS ((unsigned int));
extern sbitmap *sbitmap_vector_alloc PARAMS ((unsigned int, unsigned int));
extern sbitmap sbitmap_resize PARAMS ((sbitmap, unsigned int, int));
extern void sbitmap_copy PARAMS ((sbitmap, sbitmap));
extern int sbitmap_equal PARAMS ((sbitmap, sbitmap));
extern void sbitmap_zero PARAMS ((sbitmap));
extern void sbitmap_ones PARAMS ((sbitmap));
extern void sbitmap_vector_zero PARAMS ((sbitmap *, unsigned int));
extern void sbitmap_vector_ones PARAMS ((sbitmap *, unsigned int));
extern void dump_sbitmap (FILE *, sbitmap);
extern void dump_sbitmap_file (FILE *, sbitmap);
extern void dump_sbitmap_vector (FILE *, const char *, const char *, sbitmap *,
int);
extern sbitmap sbitmap_alloc (unsigned int);
extern sbitmap *sbitmap_vector_alloc (unsigned int, unsigned int);
extern sbitmap sbitmap_resize (sbitmap, unsigned int, int);
extern void sbitmap_copy (sbitmap, sbitmap);
extern int sbitmap_equal (sbitmap, sbitmap);
extern void sbitmap_zero (sbitmap);
extern void sbitmap_ones (sbitmap);
extern void sbitmap_vector_zero (sbitmap *, unsigned int);
extern void sbitmap_vector_ones (sbitmap *, unsigned int);
extern void sbitmap_union_of_diff PARAMS ((sbitmap, sbitmap, sbitmap,
sbitmap));
extern bool sbitmap_union_of_diff_cg PARAMS ((sbitmap, sbitmap, sbitmap,
sbitmap));
extern void sbitmap_difference PARAMS ((sbitmap, sbitmap, sbitmap));
extern void sbitmap_not PARAMS ((sbitmap, sbitmap));
extern void sbitmap_a_or_b_and_c PARAMS ((sbitmap, sbitmap, sbitmap,
sbitmap));
extern bool sbitmap_a_or_b_and_c_cg PARAMS ((sbitmap, sbitmap, sbitmap,
sbitmap));
extern void sbitmap_a_and_b_or_c PARAMS ((sbitmap, sbitmap, sbitmap,
sbitmap));
extern bool sbitmap_a_and_b_or_c_cg PARAMS ((sbitmap, sbitmap, sbitmap,
sbitmap));
extern void sbitmap_a_and_b PARAMS ((sbitmap, sbitmap, sbitmap));
extern bool sbitmap_a_and_b_cg PARAMS ((sbitmap, sbitmap, sbitmap));
extern void sbitmap_a_or_b PARAMS ((sbitmap, sbitmap, sbitmap));
extern bool sbitmap_a_or_b_cg PARAMS ((sbitmap, sbitmap, sbitmap));
extern void sbitmap_a_xor_b PARAMS ((sbitmap, sbitmap, sbitmap));
extern bool sbitmap_a_xor_b_cg PARAMS ((sbitmap, sbitmap, sbitmap));
extern bool sbitmap_a_subset_b_p PARAMS ((sbitmap, sbitmap));
extern void sbitmap_union_of_diff (sbitmap, sbitmap, sbitmap, sbitmap);
extern bool sbitmap_union_of_diff_cg (sbitmap, sbitmap, sbitmap, sbitmap);
extern void sbitmap_difference (sbitmap, sbitmap, sbitmap);
extern void sbitmap_not (sbitmap, sbitmap);
extern void sbitmap_a_or_b_and_c (sbitmap, sbitmap, sbitmap, sbitmap);
extern bool sbitmap_a_or_b_and_c_cg (sbitmap, sbitmap, sbitmap, sbitmap);
extern void sbitmap_a_and_b_or_c (sbitmap, sbitmap, sbitmap, sbitmap);
extern bool sbitmap_a_and_b_or_c_cg (sbitmap, sbitmap, sbitmap, sbitmap);
extern void sbitmap_a_and_b (sbitmap, sbitmap, sbitmap);
extern bool sbitmap_a_and_b_cg (sbitmap, sbitmap, sbitmap);
extern void sbitmap_a_or_b (sbitmap, sbitmap, sbitmap);
extern bool sbitmap_a_or_b_cg (sbitmap, sbitmap, sbitmap);
extern void sbitmap_a_xor_b (sbitmap, sbitmap, sbitmap);
extern bool sbitmap_a_xor_b_cg (sbitmap, sbitmap, sbitmap);
extern bool sbitmap_a_subset_b_p (sbitmap, sbitmap);
extern int sbitmap_first_set_bit PARAMS ((sbitmap));
extern int sbitmap_last_set_bit PARAMS ((sbitmap));
extern int sbitmap_first_set_bit (sbitmap);
extern int sbitmap_last_set_bit (sbitmap);
extern void sbitmap_intersect_of_predsucc PARAMS ((sbitmap, sbitmap *,
int, struct int_list **));
extern void sbitmap_intersect_of_predsucc (sbitmap, sbitmap *, int,
struct int_list **);
#define sbitmap_intersect_of_predecessors sbitmap_intersect_of_predsucc
#define sbitmap_intersect_of_successors sbitmap_intersect_of_predsucc
extern void sbitmap_union_of_predsucc PARAMS ((sbitmap, sbitmap *, int,
struct int_list **));
extern void sbitmap_union_of_predsucc (sbitmap, sbitmap *, int,
struct int_list **);
#define sbitmap_union_of_predecessors sbitmap_union_of_predsucc
#define sbitmap_union_of_successors sbitmap_union_of_predsucc
/* Intersection and Union of preds/succs using the new flow graph
/* Intersection and Union of preds/succs using the new flow graph
structure instead of the pred/succ arrays. */
extern void sbitmap_intersection_of_succs PARAMS ((sbitmap, sbitmap *, int));
extern void sbitmap_intersection_of_preds PARAMS ((sbitmap, sbitmap *, int));
extern void sbitmap_union_of_succs PARAMS ((sbitmap, sbitmap *, int));
extern void sbitmap_union_of_preds PARAMS ((sbitmap, sbitmap *, int));
extern void sbitmap_intersection_of_succs (sbitmap, sbitmap *, int);
extern void sbitmap_intersection_of_preds (sbitmap, sbitmap *, int);
extern void sbitmap_union_of_succs (sbitmap, sbitmap *, int);
extern void sbitmap_union_of_preds (sbitmap, sbitmap *, int);
extern void debug_sbitmap PARAMS ((sbitmap));
extern void debug_sbitmap (sbitmap);
#endif /* ! GCC_SBITMAP_H */

View File

@ -1,6 +1,6 @@
/* scan-decls.c - Extracts declarations from cpp output.
Copyright (C) 1993, 1995, 1997, 1998,
1999, 2000 Free Software Foundation, Inc.
1999, 2000, 2003 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
@ -25,8 +25,8 @@ Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include "cpplib.h"
#include "scan.h"
static void skip_to_closing_brace PARAMS ((cpp_reader *));
static const cpp_token *get_a_token PARAMS ((cpp_reader *));
static void skip_to_closing_brace (cpp_reader *);
static const cpp_token *get_a_token (cpp_reader *);
int brace_nesting = 0;
@ -43,8 +43,7 @@ int current_extern_C = 0;
/* Get a token but skip padding. */
static const cpp_token *
get_a_token (pfile)
cpp_reader *pfile;
get_a_token (cpp_reader *pfile)
{
for (;;)
{
@ -55,8 +54,7 @@ get_a_token (pfile)
}
static void
skip_to_closing_brace (pfile)
cpp_reader *pfile;
skip_to_closing_brace (cpp_reader *pfile)
{
int nesting = 1;
for (;;)
@ -95,10 +93,8 @@ Here dname is the actual name being declared.
*/
int
scan_decls (pfile, argc, argv)
cpp_reader *pfile;
int argc ATTRIBUTE_UNUSED;
char **argv ATTRIBUTE_UNUSED;
scan_decls (cpp_reader *pfile, int argc ATTRIBUTE_UNUSED,
char **argv ATTRIBUTE_UNUSED)
{
int saw_extern, saw_inline;
cpp_token prev_id;

View File

@ -1,5 +1,5 @@
/* Utility functions for scan-decls and fix-header programs.
Copyright (C) 1993, 1994, 1998, 2002 Free Software Foundation, Inc.
Copyright (C) 1993, 1994, 1998, 2002, 2003 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
@ -26,9 +26,7 @@ int source_lineno = 1;
sstring source_filename;
void
make_sstring_space (str, count)
sstring *str;
int count;
make_sstring_space (sstring *str, int count)
{
int cur_pos = str->ptr - str->base;
int cur_size = str->limit - str->base;
@ -43,9 +41,7 @@ make_sstring_space (str, count)
}
void
sstring_append (dst, src)
sstring *dst;
sstring *src;
sstring_append (sstring *dst, sstring *src)
{
char *d, *s;
int count = SSTRING_LENGTH (src);
@ -59,10 +55,7 @@ sstring_append (dst, src)
}
int
scan_ident (fp, s, c)
FILE *fp;
sstring *s;
int c;
scan_ident (FILE *fp, sstring *s, int c)
{
s->ptr = s->base;
if (ISIDST (c))
@ -81,10 +74,7 @@ scan_ident (fp, s, c)
}
int
scan_string (fp, s, init)
FILE *fp;
sstring *s;
int init;
scan_string (FILE *fp, sstring *s, int init)
{
int c;
@ -116,9 +106,7 @@ scan_string (fp, s, init)
/* Skip horizontal white spaces (spaces, tabs, and C-style comments). */
int
skip_spaces (fp, c)
FILE *fp;
int c;
skip_spaces (FILE *fp, int c)
{
for (;;)
{
@ -154,10 +142,7 @@ skip_spaces (fp, c)
}
int
read_upto (fp, str, delim)
FILE *fp;
sstring *str;
int delim;
read_upto (FILE *fp, sstring *str, int delim)
{
int ch;
@ -174,9 +159,7 @@ read_upto (fp, str, delim)
}
int
get_token (fp, s)
FILE *fp;
sstring *s;
get_token (FILE *fp, sstring *s)
{
int c;
@ -244,9 +227,7 @@ get_token (fp, s)
}
unsigned int
hashstr (str, len)
const char *str;
unsigned int len;
hashstr (const char *str, unsigned int len)
{
unsigned int n = len;
unsigned int r = 0;

View File

@ -1,7 +1,7 @@
/* Instruction scheduling pass. This file computes dependencies between
instructions.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002 Free Software Foundation, Inc.
1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
@ -92,24 +92,23 @@ static sbitmap *output_dependency_cache;
static sbitmap *forward_dependency_cache;
#endif
static int deps_may_trap_p PARAMS ((rtx));
static void add_dependence_list PARAMS ((rtx, rtx, enum reg_note));
static void add_dependence_list_and_free PARAMS ((rtx, rtx *, enum reg_note));
static void set_sched_group_p PARAMS ((rtx));
static int deps_may_trap_p (rtx);
static void add_dependence_list (rtx, rtx, enum reg_note);
static void add_dependence_list_and_free (rtx, rtx *, enum reg_note);
static void set_sched_group_p (rtx);
static void flush_pending_lists PARAMS ((struct deps *, rtx, int, int));
static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
static void flush_pending_lists (struct deps *, rtx, int, int);
static void sched_analyze_1 (struct deps *, rtx, rtx);
static void sched_analyze_2 (struct deps *, rtx, rtx);
static void sched_analyze_insn (struct deps *, rtx, rtx, rtx);
static rtx get_condition PARAMS ((rtx));
static int conditions_mutex_p PARAMS ((rtx, rtx));
static rtx get_condition (rtx);
static int conditions_mutex_p (rtx, rtx);
/* Return nonzero if a load of the memory reference MEM can cause a trap. */
static int
deps_may_trap_p (mem)
rtx mem;
deps_may_trap_p (rtx mem)
{
rtx addr = XEXP (mem, 0);
@ -124,9 +123,7 @@ deps_may_trap_p (mem)
if LIST does not contain INSN. */
rtx
find_insn_list (insn, list)
rtx insn;
rtx list;
find_insn_list (rtx insn, rtx list)
{
while (list)
{
@ -140,8 +137,7 @@ find_insn_list (insn, list)
/* Find the condition under which INSN is executed. */
static rtx
get_condition (insn)
rtx insn;
get_condition (rtx insn)
{
rtx pat = PATTERN (insn);
rtx cond;
@ -172,8 +168,7 @@ get_condition (insn)
/* Return nonzero if conditions COND1 and COND2 can never be both true. */
static int
conditions_mutex_p (cond1, cond2)
rtx cond1, cond2;
conditions_mutex_p (rtx cond1, rtx cond2)
{
if (GET_RTX_CLASS (GET_CODE (cond1)) == '<'
&& GET_RTX_CLASS (GET_CODE (cond2)) == '<'
@ -190,10 +185,7 @@ conditions_mutex_p (cond1, cond2)
nonzero if a new entry has been added to insn's LOG_LINK. */
int
add_dependence (insn, elem, dep_type)
rtx insn;
rtx elem;
enum reg_note dep_type;
add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
{
rtx link;
int present_p;
@ -289,7 +281,7 @@ add_dependence (insn, elem, dep_type)
abort ();
}
#endif
/* If this is a more restrictive type of dependence than the existing
one, then change the existing dependence to this type. */
if ((int) dep_type < (int) REG_NOTE_KIND (link))
@ -340,9 +332,7 @@ add_dependence (insn, elem, dep_type)
/* A convenience wrapper to operate on an entire list. */
static void
add_dependence_list (insn, list, dep_type)
rtx insn, list;
enum reg_note dep_type;
add_dependence_list (rtx insn, rtx list, enum reg_note dep_type)
{
for (; list; list = XEXP (list, 1))
add_dependence (insn, XEXP (list, 0), dep_type);
@ -351,10 +341,7 @@ add_dependence_list (insn, list, dep_type)
/* Similar, but free *LISTP at the same time. */
static void
add_dependence_list_and_free (insn, listp, dep_type)
rtx insn;
rtx *listp;
enum reg_note dep_type;
add_dependence_list_and_free (rtx insn, rtx *listp, enum reg_note dep_type)
{
rtx list, next;
for (list = *listp, *listp = NULL; list ; list = next)
@ -369,8 +356,7 @@ add_dependence_list_and_free (insn, listp, dep_type)
goes along with that. */
static void
set_sched_group_p (insn)
rtx insn;
set_sched_group_p (rtx insn)
{
rtx prev;
@ -396,9 +382,8 @@ set_sched_group_p (insn)
so that we can do memory aliasing on it. */
void
add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
struct deps *deps;
rtx *insn_list, *mem_list, insn, mem;
add_insn_mem_dependence (struct deps *deps, rtx *insn_list, rtx *mem_list,
rtx insn, rtx mem)
{
rtx link;
@ -421,10 +406,8 @@ add_insn_mem_dependence (deps, insn_list, mem_list, insn, mem)
dependencies for a read operation, similarly with FOR_WRITE. */
static void
flush_pending_lists (deps, insn, for_read, for_write)
struct deps *deps;
rtx insn;
int for_read, for_write;
flush_pending_lists (struct deps *deps, rtx insn, int for_read,
int for_write)
{
if (for_write)
{
@ -449,10 +432,7 @@ flush_pending_lists (deps, insn, for_read, for_write)
destination of X, and reads of everything mentioned. */
static void
sched_analyze_1 (deps, x, insn)
struct deps *deps;
rtx x;
rtx insn;
sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
{
int regno;
rtx dest = XEXP (x, 0);
@ -485,13 +465,13 @@ sched_analyze_1 (deps, x, insn)
|| GET_CODE (dest) == SIGN_EXTRACT
|| read_modify_subreg_p (dest))
{
/* These both read and modify the result. We must handle
/* These both read and modify the result. We must handle
them as writes to get proper dependencies for following
instructions. We must handle them as reads to get proper
dependencies from this to previous instructions.
Thus we need to call sched_analyze_2. */
sched_analyze_2 (deps, XEXP (dest, 0), insn);
sched_analyze_2 (deps, XEXP (dest, 0), insn);
}
if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
{
@ -616,10 +596,7 @@ sched_analyze_1 (deps, x, insn)
/* Analyze the uses of memory and registers in rtx X in INSN. */
static void
sched_analyze_2 (deps, x, insn)
struct deps *deps;
rtx x;
rtx insn;
sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
{
int i;
int j;
@ -816,10 +793,7 @@ sched_analyze_2 (deps, x, insn)
/* Analyze an INSN with pattern X to find all dependencies. */
static void
sched_analyze_insn (deps, x, insn, loop_notes)
struct deps *deps;
rtx x, insn;
rtx loop_notes;
sched_analyze_insn (struct deps *deps, rtx x, rtx insn, rtx loop_notes)
{
RTX_CODE code = GET_CODE (x);
rtx link;
@ -1161,9 +1135,7 @@ sched_analyze_insn (deps, x, insn, loop_notes)
for every dependency. */
void
sched_analyze (deps, head, tail)
struct deps *deps;
rtx head, tail;
sched_analyze (struct deps *deps, rtx head, rtx tail)
{
rtx insn;
rtx loop_notes = 0;
@ -1296,7 +1268,7 @@ sched_analyze (deps, head, tail)
/* Now that we have completed handling INSN, check and see if it is
a CLOBBER beginning a libcall block. If it is, record the
end of the libcall sequence.
end of the libcall sequence.
We want to schedule libcall blocks as a unit before reload. While
this restricts scheduling, it preserves the meaning of a libcall
@ -1307,7 +1279,7 @@ sched_analyze (deps, head, tail)
a libcall block. */
if (!reload_completed
/* Note we may have nested libcall sequences. We only care about
the outermost libcall sequence. */
the outermost libcall sequence. */
&& deps->libcall_block_tail_insn == 0
/* The sequence must start with a clobber of a register. */
&& GET_CODE (insn) == INSN
@ -1348,10 +1320,7 @@ sched_analyze (deps, head, tail)
given DEP_TYPE. The forward dependence should be not exist before. */
void
add_forward_dependence (from, to, dep_type)
rtx from;
rtx to;
enum reg_note dep_type;
add_forward_dependence (rtx from, rtx to, enum reg_note dep_type)
{
rtx new_link;
@ -1359,7 +1328,7 @@ add_forward_dependence (from, to, dep_type)
/* If add_dependence is working properly there should never
be notes, deleted insns or duplicates in the backward
links. Thus we need not check for them here.
However, if we have enabled checking we might as well go
ahead and verify that add_dependence worked properly. */
if (GET_CODE (from) == NOTE
@ -1374,11 +1343,11 @@ add_forward_dependence (from, to, dep_type)
SET_BIT (forward_dependency_cache[INSN_LUID (from)],
INSN_LUID (to));
#endif
new_link = alloc_INSN_LIST (to, INSN_DEPEND (from));
PUT_REG_NOTE_KIND (new_link, dep_type);
INSN_DEPEND (from) = new_link;
INSN_DEP_COUNT (to) += 1;
}
@ -1388,8 +1357,7 @@ add_forward_dependence (from, to, dep_type)
INSN_DEPEND. */
void
compute_forward_dependences (head, tail)
rtx head, tail;
compute_forward_dependences (rtx head, rtx tail)
{
rtx insn, link;
rtx next_tail;
@ -1409,8 +1377,7 @@ compute_forward_dependences (head, tail)
n_bbs is the number of region blocks. */
void
init_deps (deps)
struct deps *deps;
init_deps (struct deps *deps)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
@ -1435,8 +1402,7 @@ init_deps (deps)
/* Free insn lists found in DEPS. */
void
free_deps (deps)
struct deps *deps;
free_deps (struct deps *deps)
{
int i;
@ -1469,8 +1435,7 @@ free_deps (deps)
it is used in the estimate of profitability. */
void
init_dependency_caches (luid)
int luid;
init_dependency_caches (int luid)
{
/* ?!? We could save some memory by computing a per-region luid mapping
which could reduce both the number of vectors in the cache and the size
@ -1496,7 +1461,7 @@ init_dependency_caches (luid)
/* Free the caches allocated in init_dependency_caches. */
void
free_dependency_caches ()
free_dependency_caches (void)
{
if (true_dependency_cache)
{
@ -1517,7 +1482,7 @@ free_dependency_caches ()
code. */
void
init_deps_global ()
init_deps_global (void)
{
reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head);
reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head);
@ -1528,7 +1493,7 @@ init_deps_global ()
/* Free everything used by the dependency analysis code. */
void
finish_deps_global ()
finish_deps_global (void)
{
FREE_REG_SET (reg_pending_sets);
FREE_REG_SET (reg_pending_clobbers);

View File

@ -1,6 +1,6 @@
/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002 Free Software Foundation, Inc.
1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
@ -48,25 +48,25 @@ static int target_n_insns;
static int sched_n_insns;
/* Implementations of the sched_info functions for region scheduling. */
static void init_ready_list PARAMS ((struct ready_list *));
static int can_schedule_ready_p PARAMS ((rtx));
static int new_ready PARAMS ((rtx));
static int schedule_more_p PARAMS ((void));
static const char *ebb_print_insn PARAMS ((rtx, int));
static int rank PARAMS ((rtx, rtx));
static int contributes_to_priority PARAMS ((rtx, rtx));
static void compute_jump_reg_dependencies PARAMS ((rtx, regset));
static basic_block earliest_block_with_similiar_load PARAMS ((basic_block,
rtx));
static void add_deps_for_risky_insns PARAMS ((rtx, rtx));
static basic_block schedule_ebb PARAMS ((rtx, rtx));
static basic_block fix_basic_block_boundaries PARAMS ((basic_block, basic_block, rtx, rtx));
static void add_missing_bbs PARAMS ((rtx, basic_block, basic_block));
static void init_ready_list (struct ready_list *);
static int can_schedule_ready_p (rtx);
static int new_ready (rtx);
static int schedule_more_p (void);
static const char *ebb_print_insn (rtx, int);
static int rank (rtx, rtx);
static int contributes_to_priority (rtx, rtx);
static void compute_jump_reg_dependencies (rtx, regset);
static basic_block earliest_block_with_similiar_load (basic_block, rtx);
static void add_deps_for_risky_insns (rtx, rtx);
static basic_block schedule_ebb (rtx, rtx);
static basic_block fix_basic_block_boundaries (basic_block, basic_block, rtx,
rtx);
static void add_missing_bbs (rtx, basic_block, basic_block);
/* Return nonzero if there are more insns that should be scheduled. */
static int
schedule_more_p ()
schedule_more_p (void)
{
return sched_n_insns < target_n_insns;
}
@ -75,8 +75,7 @@ schedule_more_p ()
once before scheduling a set of insns. */
static void
init_ready_list (ready)
struct ready_list *ready;
init_ready_list (struct ready_list *ready)
{
rtx prev_head = current_sched_info->prev_head;
rtx next_tail = current_sched_info->next_tail;
@ -105,8 +104,7 @@ init_ready_list (ready)
insn can be scheduled, nonzero if we should silently discard it. */
static int
can_schedule_ready_p (insn)
rtx insn ATTRIBUTE_UNUSED;
can_schedule_ready_p (rtx insn ATTRIBUTE_UNUSED)
{
sched_n_insns++;
return 1;
@ -116,8 +114,7 @@ can_schedule_ready_p (insn)
if it should be moved to the ready list or the queue, or zero if we
should silently discard it. */
static int
new_ready (next)
rtx next ATTRIBUTE_UNUSED;
new_ready (rtx next ATTRIBUTE_UNUSED)
{
return 1;
}
@ -128,9 +125,7 @@ new_ready (next)
to be formatted so that multiple output lines will line up nicely. */
static const char *
ebb_print_insn (insn, aligned)
rtx insn;
int aligned ATTRIBUTE_UNUSED;
ebb_print_insn (rtx insn, int aligned ATTRIBUTE_UNUSED)
{
static char tmp[80];
@ -143,8 +138,7 @@ ebb_print_insn (insn, aligned)
is to be preferred. Zero if they are equally good. */
static int
rank (insn1, insn2)
rtx insn1, insn2;
rank (rtx insn1, rtx insn2)
{
basic_block bb1 = BLOCK_FOR_INSN (insn1);
basic_block bb2 = BLOCK_FOR_INSN (insn2);
@ -163,8 +157,8 @@ rank (insn1, insn2)
calculations. */
static int
contributes_to_priority (next, insn)
rtx next ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
rtx insn ATTRIBUTE_UNUSED)
{
return 1;
}
@ -173,9 +167,7 @@ contributes_to_priority (next, insn)
to be set by this jump in SET. */
static void
compute_jump_reg_dependencies (insn, set)
rtx insn;
regset set;
compute_jump_reg_dependencies (rtx insn, regset set)
{
basic_block b = BLOCK_FOR_INSN (insn);
edge e;
@ -210,9 +202,7 @@ static struct sched_info ebb_sched_info =
Place blocks from FIRST to LAST before BEFORE. */
static void
add_missing_bbs (before, first, last)
rtx before;
basic_block first, last;
add_missing_bbs (rtx before, basic_block first, basic_block last)
{
for (; last != first->prev_bb; last = last->prev_bb)
{
@ -229,9 +219,8 @@ add_missing_bbs (before, first, last)
structures between BB and LAST. */
static basic_block
fix_basic_block_boundaries (bb, last, head, tail)
basic_block bb, last;
rtx head, tail;
fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head,
rtx tail)
{
rtx insn = head;
rtx last_inside = bb->head;
@ -288,7 +277,7 @@ fix_basic_block_boundaries (bb, last, head, tail)
In this case we can create new basic block. It is
always exactly one basic block last in the sequence. Handle
it by splitting the edge and repositioning the block.
This is somewhat hackish, but at least avoid cut&paste
This is somewhat hackish, but at least avoid cut&paste
A safer solution can be to bring the code into sequence,
do the split and re-emit it back in case this will ever
@ -359,9 +348,7 @@ fix_basic_block_boundaries (bb, last, head, tail)
blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
static basic_block
earliest_block_with_similiar_load (last_block, load_insn)
basic_block last_block;
rtx load_insn;
earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
{
rtx back_link;
basic_block bb, earliest_block = NULL;
@ -394,7 +381,7 @@ earliest_block_with_similiar_load (last_block, load_insn)
if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
/* insn2 not guaranteed to be a 1 base reg load. */
continue;
for (bb = last_block; bb; bb = bb->aux)
if (insn2_block == bb)
break;
@ -414,15 +401,14 @@ earliest_block_with_similiar_load (last_block, load_insn)
insns in given ebb. */
static void
add_deps_for_risky_insns (head, tail)
rtx head, tail;
add_deps_for_risky_insns (rtx head, rtx tail)
{
rtx insn, prev;
int class;
rtx last_jump = NULL_RTX;
rtx next_tail = NEXT_INSN (tail);
basic_block last_block = NULL, bb;
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
if (GET_CODE (insn) == JUMP_INSN)
{
@ -461,7 +447,7 @@ add_deps_for_risky_insns (head, tail)
if (add_dependence (insn, prev, REG_DEP_ANTI))
add_forward_dependence (prev, insn, REG_DEP_ANTI);
break;
default:
break;
}
@ -479,8 +465,7 @@ add_deps_for_risky_insns (head, tail)
and TAIL. */
static basic_block
schedule_ebb (head, tail)
rtx head, tail;
schedule_ebb (rtx head, rtx tail)
{
int n_insns;
basic_block b;
@ -564,8 +549,7 @@ schedule_ebb (head, tail)
this pass. */
void
schedule_ebbs (dump_file)
FILE *dump_file;
schedule_ebbs (FILE *dump_file)
{
basic_block bb;

View File

@ -1,7 +1,7 @@
/* Instruction scheduling pass. This file contains definitions used
internally in the scheduler.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001 Free Software Foundation, Inc.
1999, 2000, 2001, 2003 Free Software Foundation, Inc.
This file is part of GCC.
@ -123,7 +123,7 @@ struct sched_info
{
/* Add all insns that are initially ready to the ready list. Called once
before scheduling a set of insns. */
void (*init_ready_list) PARAMS ((struct ready_list *));
void (*init_ready_list) (struct ready_list *);
/* Called after taking an insn from the ready list. Returns nonzero if
this insn can be scheduled, nonzero if we should silently discard it. */
int (*can_schedule_ready_p) PARAMS ((rtx));
@ -337,56 +337,55 @@ enum INSN_TRAP_CLASS
#endif
/* Functions in sched-vis.c. */
extern void init_target_units PARAMS ((void));
extern void insn_print_units PARAMS ((rtx));
extern void init_block_visualization PARAMS ((void));
extern void print_block_visualization PARAMS ((const char *));
extern void visualize_scheduled_insns PARAMS ((int));
extern void visualize_no_unit PARAMS ((rtx));
extern void visualize_stall_cycles PARAMS ((int));
extern void visualize_alloc PARAMS ((void));
extern void visualize_free PARAMS ((void));
extern void init_target_units (void);
extern void insn_print_units (rtx);
extern void init_block_visualization (void);
extern void print_block_visualization (const char *);
extern void visualize_scheduled_insns (int);
extern void visualize_no_unit (rtx);
extern void visualize_stall_cycles (int);
extern void visualize_alloc (void);
extern void visualize_free (void);
/* Functions in sched-deps.c. */
extern int add_dependence PARAMS ((rtx, rtx, enum reg_note));
extern void add_insn_mem_dependence PARAMS ((struct deps *, rtx *, rtx *, rtx,
rtx));
extern void sched_analyze PARAMS ((struct deps *, rtx, rtx));
extern void init_deps PARAMS ((struct deps *));
extern void free_deps PARAMS ((struct deps *));
extern void init_deps_global PARAMS ((void));
extern void finish_deps_global PARAMS ((void));
extern void add_forward_dependence PARAMS ((rtx, rtx, enum reg_note));
extern void compute_forward_dependences PARAMS ((rtx, rtx));
extern rtx find_insn_list PARAMS ((rtx, rtx));
extern void init_dependency_caches PARAMS ((int));
extern void free_dependency_caches PARAMS ((void));
extern int add_dependence (rtx, rtx, enum reg_note);
extern void add_insn_mem_dependence (struct deps *, rtx *, rtx *, rtx, rtx);
extern void sched_analyze (struct deps *, rtx, rtx);
extern void init_deps (struct deps *);
extern void free_deps (struct deps *);
extern void init_deps_global (void);
extern void finish_deps_global (void);
extern void add_forward_dependence (rtx, rtx, enum reg_note);
extern void compute_forward_dependences (rtx, rtx);
extern rtx find_insn_list (rtx, rtx);
extern void init_dependency_caches (int);
extern void free_dependency_caches (void);
/* Functions in haifa-sched.c. */
extern int haifa_classify_insn PARAMS ((rtx));
extern void get_block_head_tail PARAMS ((int, rtx *, rtx *));
extern int no_real_insns_p PARAMS ((rtx, rtx));
extern int haifa_classify_insn (rtx);
extern void get_block_head_tail (int, rtx *, rtx *);
extern int no_real_insns_p (rtx, rtx);
extern void rm_line_notes PARAMS ((rtx, rtx));
extern void save_line_notes PARAMS ((int, rtx, rtx));
extern void restore_line_notes PARAMS ((rtx, rtx));
extern void rm_redundant_line_notes PARAMS ((void));
extern void rm_other_notes PARAMS ((rtx, rtx));
extern void rm_line_notes (rtx, rtx);
extern void save_line_notes (int, rtx, rtx);
extern void restore_line_notes (rtx, rtx);
extern void rm_redundant_line_notes (void);
extern void rm_other_notes (rtx, rtx);
extern int insn_issue_delay PARAMS ((rtx));
extern int set_priorities PARAMS ((rtx, rtx));
extern int insn_issue_delay (rtx);
extern int set_priorities (rtx, rtx);
extern rtx sched_emit_insn PARAMS ((rtx));
extern void schedule_block PARAMS ((int, int));
extern void sched_init PARAMS ((FILE *));
extern void sched_finish PARAMS ((void));
extern rtx sched_emit_insn (rtx);
extern void schedule_block (int, int);
extern void sched_init (FILE *);
extern void sched_finish (void);
extern void ready_add PARAMS ((struct ready_list *, rtx));
extern void ready_add (struct ready_list *, rtx);
/* The following are exported for the benefit of debugging functions. It
would be nicer to keep them private to haifa-sched.c. */
extern int insn_unit PARAMS ((rtx));
extern int insn_cost PARAMS ((rtx, rtx, rtx));
extern rtx get_unit_last_insn PARAMS ((int));
extern int actual_hazard_this_instance PARAMS ((int, int, rtx, int, int));
extern void print_insn PARAMS ((char *, rtx, int));
extern int insn_unit (rtx);
extern int insn_cost (rtx, rtx, rtx);
extern rtx get_unit_last_insn (int);
extern int actual_hazard_this_instance (int, int, rtx, int, int);
extern void print_insn (char *, rtx, int);

View File

@ -1,6 +1,6 @@
/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002 Free Software Foundation, Inc.
1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
@ -116,9 +116,9 @@ static int *out_edges;
#define IN_EDGES(block) (in_edges[block])
#define OUT_EDGES(block) (out_edges[block])
static int is_cfg_nonregular PARAMS ((void));
static int build_control_flow PARAMS ((struct edge_list *));
static void new_edge PARAMS ((int, int));
static int is_cfg_nonregular (void);
static int build_control_flow (struct edge_list *);
static void new_edge (int, int);
/* A region is the main entity for interblock scheduling: insns
are allowed to move between blocks in the same region, along
@ -153,12 +153,12 @@ static int *containing_rgn;
#define BLOCK_TO_BB(block) (block_to_bb[block])
#define CONTAINING_RGN(block) (containing_rgn[block])
void debug_regions PARAMS ((void));
static void find_single_block_region PARAMS ((void));
static void find_rgns PARAMS ((struct edge_list *, dominance_info));
static int too_large PARAMS ((int, int *, int *));
void debug_regions (void);
static void find_single_block_region (void);
static void find_rgns (struct edge_list *, dominance_info);
static int too_large (int, int *, int *);
extern void debug_live PARAMS ((int, int));
extern void debug_live (int, int);
/* Blocks of the current region being scheduled. */
static int current_nr_blocks;
@ -177,7 +177,7 @@ bitlst;
static int bitlst_table_last;
static int *bitlst_table;
static void extract_bitlst PARAMS ((sbitmap, bitlst *));
static void extract_bitlst (sbitmap, bitlst *);
/* Target info declarations.
@ -218,10 +218,10 @@ static int target_bb;
typedef bitlst edgelst;
/* Target info functions. */
static void split_edges PARAMS ((int, int, edgelst *));
static void compute_trg_info PARAMS ((int));
void debug_candidate PARAMS ((int));
void debug_candidates PARAMS ((int));
static void split_edges (int, int, edgelst *);
static void compute_trg_info (int);
void debug_candidate (int);
void debug_candidates (int);
/* Dominators array: dom[i] contains the sbitmap of dominators of
bb i in the region. */
@ -269,7 +269,7 @@ static edgeset *pot_split;
/* For every bb, a set of its ancestor edges. */
static edgeset *ancestor_edges;
static void compute_dom_prob_ps PARAMS ((int));
static void compute_dom_prob_ps (int);
#define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
#define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
@ -280,29 +280,29 @@ static void compute_dom_prob_ps PARAMS ((int));
#define MIN_PROBABILITY 40
/* Speculative scheduling functions. */
static int check_live_1 PARAMS ((int, rtx));
static void update_live_1 PARAMS ((int, rtx));
static int check_live PARAMS ((rtx, int));
static void update_live PARAMS ((rtx, int));
static void set_spec_fed PARAMS ((rtx));
static int is_pfree PARAMS ((rtx, int, int));
static int find_conditional_protection PARAMS ((rtx, int));
static int is_conditionally_protected PARAMS ((rtx, int, int));
static int is_prisky PARAMS ((rtx, int, int));
static int is_exception_free PARAMS ((rtx, int, int));
static int check_live_1 (int, rtx);
static void update_live_1 (int, rtx);
static int check_live (rtx, int);
static void update_live (rtx, int);
static void set_spec_fed (rtx);
static int is_pfree (rtx, int, int);
static int find_conditional_protection (rtx, int);
static int is_conditionally_protected (rtx, int, int);
static int is_prisky (rtx, int, int);
static int is_exception_free (rtx, int, int);
static bool sets_likely_spilled PARAMS ((rtx));
static void sets_likely_spilled_1 PARAMS ((rtx, rtx, void *));
static void add_branch_dependences PARAMS ((rtx, rtx));
static void compute_block_backward_dependences PARAMS ((int));
void debug_dependencies PARAMS ((void));
static bool sets_likely_spilled (rtx);
static void sets_likely_spilled_1 (rtx, rtx, void *);
static void add_branch_dependences (rtx, rtx);
static void compute_block_backward_dependences (int);
void debug_dependencies (void);
static void init_regions PARAMS ((void));
static void schedule_region PARAMS ((int));
static rtx concat_INSN_LIST PARAMS ((rtx, rtx));
static void concat_insn_mem_list PARAMS ((rtx, rtx, rtx *, rtx *));
static void propagate_deps PARAMS ((int, struct deps *));
static void free_pending_lists PARAMS ((void));
static void init_regions (void);
static void schedule_region (int);
static rtx concat_INSN_LIST (rtx, rtx);
static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
static void propagate_deps (int, struct deps *);
static void free_pending_lists (void);
/* Functions for construction of the control flow graph. */
@ -313,7 +313,7 @@ static void free_pending_lists PARAMS ((void));
have nonlocal gotos. */
static int
is_cfg_nonregular ()
is_cfg_nonregular (void)
{
basic_block b;
rtx insn;
@ -374,8 +374,7 @@ is_cfg_nonregular ()
prevent cross block scheduling. */
static int
build_control_flow (edge_list)
struct edge_list *edge_list;
build_control_flow (struct edge_list *edge_list)
{
int i, unreachable, num_edges;
basic_block b;
@ -426,8 +425,7 @@ build_control_flow (edge_list)
integer lists. */
static void
new_edge (source, target)
int source, target;
new_edge (int source, int target)
{
int e, next_edge;
int curr_edge, fst_edge;
@ -481,9 +479,7 @@ new_edge (source, target)
/* Translate a bit-set SET to a list BL of the bit-set members. */
static void
extract_bitlst (set, bl)
sbitmap set;
bitlst *bl;
extract_bitlst (sbitmap set, bitlst *bl)
{
int i;
@ -507,7 +503,7 @@ extract_bitlst (set, bl)
/* Print the regions, for debugging purposes. Callable from debugger. */
void
debug_regions ()
debug_regions (void)
{
int rgn, bb;
@ -537,7 +533,7 @@ debug_regions ()
scheduling. */
static void
find_single_block_region ()
find_single_block_region (void)
{
basic_block bb;
@ -559,8 +555,7 @@ find_single_block_region ()
scheduling (compile time considerations), otherwise return 0. */
static int
too_large (block, num_bbs, num_insns)
int block, *num_bbs, *num_insns;
too_large (int block, int *num_bbs, int *num_insns)
{
(*num_bbs)++;
(*num_insns) += (INSN_LUID (BLOCK_END (block)) -
@ -618,9 +613,7 @@ too_large (block, num_bbs, num_insns)
of edge tables. That would simplify it somewhat. */
static void
find_rgns (edge_list, dom)
struct edge_list *edge_list;
dominance_info dom;
find_rgns (struct edge_list *edge_list, dominance_info dom)
{
int *max_hdr, *dfs_nr, *stack, *degree;
char no_loops = 1;
@ -1044,8 +1037,7 @@ find_rgns (edge_list, dom)
Assume that these values were already computed for bb's predecessors. */
static void
compute_dom_prob_ps (bb)
int bb;
compute_dom_prob_ps (int bb)
{
int nxt_in_edge, fst_in_edge, pred;
int fst_out_edge, nxt_out_edge, nr_out_edges, nr_rgn_out_edges;
@ -1123,10 +1115,7 @@ compute_dom_prob_ps (bb)
Note that bb_trg dominates bb_src. */
static void
split_edges (bb_src, bb_trg, bl)
int bb_src;
int bb_trg;
edgelst *bl;
split_edges (int bb_src, int bb_trg, edgelst *bl)
{
sbitmap src = (edgeset) sbitmap_alloc (pot_split[bb_src]->n_bits);
sbitmap_copy (src, pot_split[bb_src]);
@ -1141,8 +1130,7 @@ split_edges (bb_src, bb_trg, bl)
For speculative sources, compute their update-blocks and split-blocks. */
static void
compute_trg_info (trg)
int trg;
compute_trg_info (int trg)
{
candidate *sp;
edgelst el;
@ -1240,8 +1228,7 @@ compute_trg_info (trg)
/* Print candidates info, for debugging purposes. Callable from debugger. */
void
debug_candidate (i)
int i;
debug_candidate (int i)
{
if (!candidate_table[i].is_valid)
return;
@ -1278,8 +1265,7 @@ debug_candidate (i)
/* Print candidates info, for debugging purposes. Callable from debugger. */
void
debug_candidates (trg)
int trg;
debug_candidates (int trg)
{
int i;
@ -1295,9 +1281,7 @@ debug_candidates (trg)
of the split-blocks of src, otherwise return 1. */
static int
check_live_1 (src, x)
int src;
rtx x;
check_live_1 (int src, rtx x)
{
int i;
int regno;
@ -1375,9 +1359,7 @@ check_live_1 (src, x)
of every update-block of src. */
static void
update_live_1 (src, x)
int src;
rtx x;
update_live_1 (int src, rtx x)
{
int i;
int regno;
@ -1443,9 +1425,7 @@ update_live_1 (src, x)
ready-list or before the scheduling. */
static int
check_live (insn, src)
rtx insn;
int src;
check_live (rtx insn, int src)
{
/* Find the registers set by instruction. */
if (GET_CODE (PATTERN (insn)) == SET
@ -1470,9 +1450,7 @@ check_live (insn, src)
block src to trg. */
static void
update_live (insn, src)
rtx insn;
int src;
update_live (rtx insn, int src)
{
/* Find the registers set by instruction. */
if (GET_CODE (PATTERN (insn)) == SET
@ -1498,8 +1476,7 @@ update_live (insn, src)
/* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
static void
set_spec_fed (load_insn)
rtx load_insn;
set_spec_fed (rtx load_insn)
{
rtx link;
@ -1512,9 +1489,7 @@ set_spec_fed (load_insn)
branch depending on insn, that guards the speculative load. */
static int
find_conditional_protection (insn, load_insn_bb)
rtx insn;
int load_insn_bb;
find_conditional_protection (rtx insn, int load_insn_bb)
{
rtx link;
@ -1549,9 +1524,7 @@ find_conditional_protection (insn, load_insn_bb)
Locate the branch by following INSN_DEPEND from insn1. */
static int
is_conditionally_protected (load_insn, bb_src, bb_trg)
rtx load_insn;
int bb_src, bb_trg;
is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
{
rtx link;
@ -1601,9 +1574,7 @@ is_conditionally_protected (load_insn, bb_src, bb_trg)
load2 anyhow. */
static int
is_pfree (load_insn, bb_src, bb_trg)
rtx load_insn;
int bb_src, bb_trg;
is_pfree (rtx load_insn, int bb_src, int bb_trg)
{
rtx back_link;
candidate *candp = candidate_table + bb_src;
@ -1654,9 +1625,7 @@ is_pfree (load_insn, bb_src, bb_trg)
a compare on load_insn's address). */
static int
is_prisky (load_insn, bb_src, bb_trg)
rtx load_insn;
int bb_src, bb_trg;
is_prisky (rtx load_insn, int bb_src, int bb_trg)
{
if (FED_BY_SPEC_LOAD (load_insn))
return 1;
@ -1676,9 +1645,7 @@ is_prisky (load_insn, bb_src, bb_trg)
and 0 otherwise. */
static int
is_exception_free (insn, bb_src, bb_trg)
rtx insn;
int bb_src, bb_trg;
is_exception_free (rtx insn, int bb_src, int bb_trg)
{
int insn_class = haifa_classify_insn (insn);
@ -1727,19 +1694,19 @@ static int sched_n_insns;
static int last_was_jump;
/* Implementations of the sched_info functions for region scheduling. */
static void init_ready_list PARAMS ((struct ready_list *));
static int can_schedule_ready_p PARAMS ((rtx));
static int new_ready PARAMS ((rtx));
static int schedule_more_p PARAMS ((void));
static const char *rgn_print_insn PARAMS ((rtx, int));
static int rgn_rank PARAMS ((rtx, rtx));
static int contributes_to_priority PARAMS ((rtx, rtx));
static void compute_jump_reg_dependencies PARAMS ((rtx, regset));
static void init_ready_list (struct ready_list *);
static int can_schedule_ready_p (rtx);
static int new_ready (rtx);
static int schedule_more_p (void);
static const char *rgn_print_insn (rtx, int);
static int rgn_rank (rtx, rtx);
static int contributes_to_priority (rtx, rtx);
static void compute_jump_reg_dependencies (rtx, regset);
/* Return nonzero if there are more insns that should be scheduled. */
static int
schedule_more_p ()
schedule_more_p (void)
{
return ! last_was_jump && sched_target_n_insns < target_n_insns;
}
@ -1748,8 +1715,7 @@ schedule_more_p ()
once before scheduling a set of insns. */
static void
init_ready_list (ready)
struct ready_list *ready;
init_ready_list (struct ready_list *ready)
{
rtx prev_head = current_sched_info->prev_head;
rtx next_tail = current_sched_info->next_tail;
@ -1835,8 +1801,7 @@ init_ready_list (ready)
insn can be scheduled, nonzero if we should silently discard it. */
static int
can_schedule_ready_p (insn)
rtx insn;
can_schedule_ready_p (rtx insn)
{
if (GET_CODE (insn) == JUMP_INSN)
last_was_jump = 1;
@ -1900,8 +1865,7 @@ can_schedule_ready_p (insn)
if it should be moved to the ready list or the queue, or zero if we
should silently discard it. */
static int
new_ready (next)
rtx next;
new_ready (rtx next)
{
/* For speculative insns, before inserting to ready/queue,
check live, exception-free, and issue-delay. */
@ -1930,9 +1894,7 @@ new_ready (next)
to be formatted so that multiple output lines will line up nicely. */
static const char *
rgn_print_insn (insn, aligned)
rtx insn;
int aligned;
rgn_print_insn (rtx insn, int aligned)
{
static char tmp[80];
@ -1953,8 +1915,7 @@ rgn_print_insn (insn, aligned)
is to be preferred. Zero if they are equally good. */
static int
rgn_rank (insn1, insn2)
rtx insn1, insn2;
rgn_rank (rtx insn1, rtx insn2)
{
/* Some comparison make sense in interblock scheduling only. */
if (INSN_BB (insn1) != INSN_BB (insn2))
@ -1985,8 +1946,7 @@ rgn_rank (insn1, insn2)
calculations. */
static int
contributes_to_priority (next, insn)
rtx next, insn;
contributes_to_priority (rtx next, rtx insn)
{
return BLOCK_NUM (next) == BLOCK_NUM (insn);
}
@ -1995,9 +1955,8 @@ contributes_to_priority (next, insn)
to be set by this jump in SET. */
static void
compute_jump_reg_dependencies (insn, set)
rtx insn ATTRIBUTE_UNUSED;
regset set ATTRIBUTE_UNUSED;
compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
regset set ATTRIBUTE_UNUSED)
{
/* Nothing to do here, since we postprocess jumps in
add_branch_dependences. */
@ -2025,8 +1984,7 @@ static struct sched_info region_sched_info =
/* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
static bool
sets_likely_spilled (pat)
rtx pat;
sets_likely_spilled (rtx pat)
{
bool ret = false;
note_stores (pat, sets_likely_spilled_1, &ret);
@ -2034,9 +1992,7 @@ sets_likely_spilled (pat)
}
static void
sets_likely_spilled_1 (x, pat, data)
rtx x, pat;
void *data;
sets_likely_spilled_1 (rtx x, rtx pat, void *data)
{
bool *ret = (bool *) data;
@ -2051,8 +2007,7 @@ sets_likely_spilled_1 (x, pat, data)
block. */
static void
add_branch_dependences (head, tail)
rtx head, tail;
add_branch_dependences (rtx head, rtx tail)
{
rtx insn, last;
@ -2133,8 +2088,7 @@ static struct deps *bb_deps;
/* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
static rtx
concat_INSN_LIST (copy, old)
rtx copy, old;
concat_INSN_LIST (rtx copy, rtx old)
{
rtx new = old;
for (; copy ; copy = XEXP (copy, 1))
@ -2143,9 +2097,8 @@ concat_INSN_LIST (copy, old)
}
static void
concat_insn_mem_list (copy_insns, copy_mems, old_insns_p, old_mems_p)
rtx copy_insns, copy_mems;
rtx *old_insns_p, *old_mems_p;
concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
rtx *old_mems_p)
{
rtx new_insns = *old_insns_p;
rtx new_mems = *old_mems_p;
@ -2165,9 +2118,7 @@ concat_insn_mem_list (copy_insns, copy_mems, old_insns_p, old_mems_p)
/* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. */
static void
propagate_deps (bb, pred_deps)
int bb;
struct deps *pred_deps;
propagate_deps (int bb, struct deps *pred_deps)
{
int b = BB_TO_BLOCK (bb);
int e, first_edge;
@ -2268,8 +2219,7 @@ propagate_deps (bb, pred_deps)
similar, and the result is interblock dependences in the region. */
static void
compute_block_backward_dependences (bb)
int bb;
compute_block_backward_dependences (int bb)
{
rtx head, tail;
struct deps tmp_deps;
@ -2292,7 +2242,7 @@ compute_block_backward_dependences (bb)
them to the unused_*_list variables, so that they can be reused. */
static void
free_pending_lists ()
free_pending_lists (void)
{
int bb;
@ -2308,7 +2258,7 @@ free_pending_lists ()
/* Print dependences for debugging, callable from debugger. */
void
debug_dependencies ()
debug_dependencies (void)
{
int bb;
@ -2421,8 +2371,7 @@ debug_dependencies ()
scheduled after its flow predecessors. */
static void
schedule_region (rgn)
int rgn;
schedule_region (int rgn)
{
int bb;
int rgn_n_insns = 0;
@ -2520,7 +2469,7 @@ schedule_region (rgn)
/* rm_other_notes only removes notes which are _inside_ the
block---that is, it won't remove notes before the first real insn
or after the last real insn of the block. So if the first insn
or after the last real insn of the block. So if the first insn
has a REG_SAVE_NOTE which would otherwise be emitted before the
insn, it is redundant with the note before the start of the
block, and so we have to take it out. */
@ -2605,7 +2554,7 @@ static int *deaths_in_region;
/* Initialize data structures for region scheduling. */
static void
init_regions ()
init_regions (void)
{
sbitmap blocks;
int rgn;
@ -2690,8 +2639,7 @@ init_regions ()
this pass. */
void
schedule_insns (dump_file)
FILE *dump_file;
schedule_insns (FILE *dump_file)
{
sbitmap large_region_blocks, blocks;
int rgn;

View File

@ -1,6 +1,6 @@
/* Instruction scheduling pass.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2002 Free Software Foundation, Inc.
1999, 2000, 2002, 2003 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
@ -47,17 +47,16 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
static int target_units = 0;
static char *safe_concat PARAMS ((char *, char *, const char *));
static int get_visual_tbl_length PARAMS ((void));
static void print_exp PARAMS ((char *, rtx, int));
static void print_value PARAMS ((char *, rtx, int));
static void print_pattern PARAMS ((char *, rtx, int));
static char *safe_concat (char *, char *, const char *);
static int get_visual_tbl_length (void);
static void print_exp (char *, rtx, int);
static void print_value (char *, rtx, int);
static void print_pattern (char *, rtx, int);
/* Print names of units on which insn can/should execute, for debugging. */
void
insn_print_units (insn)
rtx insn;
insn_print_units (rtx insn)
{
int i;
int unit = insn_unit (insn);
@ -98,7 +97,7 @@ rtx vis_no_unit[MAX_VISUAL_NO_UNIT];
for visualization. */
void
init_target_units ()
init_target_units (void)
{
rtx insn;
int unit;
@ -120,7 +119,7 @@ init_target_units ()
/* Return the length of the visualization table. */
static int
get_visual_tbl_length ()
get_visual_tbl_length (void)
{
int unit, i;
int n, n1;
@ -158,7 +157,7 @@ get_visual_tbl_length ()
/* Init block visualization debugging info. */
void
init_block_visualization ()
init_block_visualization (void)
{
strcpy (visual_tbl, "");
n_visual_lines = 0;
@ -168,10 +167,7 @@ init_block_visualization ()
#define BUF_LEN 2048
static char *
safe_concat (buf, cur, str)
char *buf;
char *cur;
const char *str;
safe_concat (char *buf, char *cur, const char *str)
{
char *end = buf + BUF_LEN - 2; /* Leave room for null. */
int c;
@ -194,10 +190,7 @@ safe_concat (buf, cur, str)
may be stored in objects representing values. */
static void
print_exp (buf, x, verbose)
char *buf;
rtx x;
int verbose;
print_exp (char *buf, rtx x, int verbose)
{
char tmp[BUF_LEN];
const char *st[4];
@ -548,10 +541,7 @@ print_exp (buf, x, verbose)
registers, labels, symbols and memory accesses. */
static void
print_value (buf, x, verbose)
char *buf;
rtx x;
int verbose;
print_value (char *buf, rtx x, int verbose)
{
char t[BUF_LEN];
char *cur = buf;
@ -641,10 +631,7 @@ print_value (buf, x, verbose)
/* The next step in insn detalization, its pattern recognition. */
static void
print_pattern (buf, x, verbose)
char *buf;
rtx x;
int verbose;
print_pattern (char *buf, rtx x, int verbose)
{
char t1[BUF_LEN], t2[BUF_LEN], t3[BUF_LEN];
@ -755,10 +742,7 @@ print_pattern (buf, x, verbose)
depends now on sched.c inner variables ...) */
void
print_insn (buf, x, verbose)
char *buf;
rtx x;
int verbose;
print_insn (char *buf, rtx x, int verbose)
{
char t[BUF_LEN];
rtx insn = x;
@ -824,8 +808,7 @@ print_insn (buf, x, verbose)
description should never use the following function. */
void
print_block_visualization (s)
const char *s;
print_block_visualization (const char *s)
{
int unit, i;
@ -854,8 +837,7 @@ print_block_visualization (s)
/* Print insns in the 'no_unit' column of visualization. */
void
visualize_no_unit (insn)
rtx insn;
visualize_no_unit (rtx insn)
{
if (n_vis_no_unit < MAX_VISUAL_NO_UNIT)
{
@ -867,8 +849,7 @@ visualize_no_unit (insn)
/* Print insns scheduled in clock, for visualization. */
void
visualize_scheduled_insns (clock)
int clock;
visualize_scheduled_insns (int clock)
{
int i, unit;
@ -914,8 +895,7 @@ visualize_scheduled_insns (clock)
/* Print stalled cycles. */
void
visualize_stall_cycles (stalls)
int stalls;
visualize_stall_cycles (int stalls)
{
static const char *const prefix = ";; ";
const char *suffix = "\n";
@ -950,7 +930,7 @@ visualize_stall_cycles (stalls)
/* Allocate data used for visualization during scheduling. */
void
visualize_alloc ()
visualize_alloc (void)
{
visual_tbl = xmalloc (get_visual_tbl_length ());
}
@ -958,7 +938,7 @@ visualize_alloc ()
/* Free data used for visualization. */
void
visualize_free ()
visualize_free (void)
{
free (visual_tbl);
}

View File

@ -1,5 +1,6 @@
/* Generic sibling call optimization support
Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
Copyright (C) 1999, 2000, 2001, 2002, 2003
Free Software Foundation, Inc.
This file is part of GCC.
@ -40,18 +41,18 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
return in the sibcall sequence. */
static rtx return_value_pseudo;
static int identify_call_return_value PARAMS ((rtx, rtx *, rtx *));
static rtx skip_copy_to_return_value PARAMS ((rtx));
static rtx skip_use_of_return_value PARAMS ((rtx, enum rtx_code));
static rtx skip_stack_adjustment PARAMS ((rtx));
static rtx skip_pic_restore PARAMS ((rtx));
static rtx skip_jump_insn PARAMS ((rtx));
static int call_ends_block_p PARAMS ((rtx, rtx));
static int uses_addressof PARAMS ((rtx));
static int sequence_uses_addressof PARAMS ((rtx));
static void purge_reg_equiv_notes PARAMS ((void));
static void purge_mem_unchanging_flag PARAMS ((rtx));
static rtx skip_unreturned_value PARAMS ((rtx));
static int identify_call_return_value (rtx, rtx *, rtx *);
static rtx skip_copy_to_return_value (rtx);
static rtx skip_use_of_return_value (rtx, enum rtx_code);
static rtx skip_stack_adjustment (rtx);
static rtx skip_pic_restore (rtx);
static rtx skip_jump_insn (rtx);
static int call_ends_block_p (rtx, rtx);
static int uses_addressof (rtx);
static int sequence_uses_addressof (rtx);
static void purge_reg_equiv_notes (void);
static void purge_mem_unchanging_flag (rtx);
static rtx skip_unreturned_value (rtx);
/* Examine a CALL_PLACEHOLDER pattern and determine where the call's
return value is located. P_HARD_RETURN receives the hard register
@ -59,9 +60,7 @@ static rtx skip_unreturned_value PARAMS ((rtx));
that the sequence used. Return nonzero if the values were located. */
static int
identify_call_return_value (cp, p_hard_return, p_soft_return)
rtx cp;
rtx *p_hard_return, *p_soft_return;
identify_call_return_value (rtx cp, rtx *p_hard_return, rtx *p_soft_return)
{
rtx insn, set, hard, soft;
@ -142,8 +141,7 @@ identify_call_return_value (cp, p_hard_return, p_soft_return)
copy. Otherwise return ORIG_INSN. */
static rtx
skip_copy_to_return_value (orig_insn)
rtx orig_insn;
skip_copy_to_return_value (rtx orig_insn)
{
rtx insn, set = NULL_RTX;
rtx hardret, softret;
@ -219,9 +217,7 @@ skip_copy_to_return_value (orig_insn)
value, return insn. Otherwise return ORIG_INSN. */
static rtx
skip_use_of_return_value (orig_insn, code)
rtx orig_insn;
enum rtx_code code;
skip_use_of_return_value (rtx orig_insn, enum rtx_code code)
{
rtx insn;
@ -240,8 +236,7 @@ skip_use_of_return_value (orig_insn, code)
/* In case function does not return value, we get clobber of pseudo followed
by set to hard return value. */
static rtx
skip_unreturned_value (orig_insn)
rtx orig_insn;
skip_unreturned_value (rtx orig_insn)
{
rtx insn = next_nonnote_insn (orig_insn);
@ -271,8 +266,7 @@ skip_unreturned_value (orig_insn)
Otherwise return ORIG_INSN. */
static rtx
skip_stack_adjustment (orig_insn)
rtx orig_insn;
skip_stack_adjustment (rtx orig_insn)
{
rtx insn, set = NULL_RTX;
@ -296,8 +290,7 @@ skip_stack_adjustment (orig_insn)
return it. Otherwise return ORIG_INSN. */
static rtx
skip_pic_restore (orig_insn)
rtx orig_insn;
skip_pic_restore (rtx orig_insn)
{
rtx insn, set = NULL_RTX;
@ -316,8 +309,7 @@ skip_pic_restore (orig_insn)
Otherwise return ORIG_INSN. */
static rtx
skip_jump_insn (orig_insn)
rtx orig_insn;
skip_jump_insn (rtx orig_insn)
{
rtx insn;
@ -335,9 +327,7 @@ skip_jump_insn (orig_insn)
goes all the way to END, the end of a basic block. Return 1 if so. */
static int
call_ends_block_p (insn, end)
rtx insn;
rtx end;
call_ends_block_p (rtx insn, rtx end)
{
rtx new_insn;
/* END might be a note, so get the last nonnote insn of the block. */
@ -394,8 +384,7 @@ call_ends_block_p (insn, end)
is found outside of some MEM expression, else return zero. */
static int
uses_addressof (x)
rtx x;
uses_addressof (rtx x)
{
RTX_CODE code;
int i, j;
@ -441,8 +430,7 @@ uses_addressof (x)
of insns. */
static int
sequence_uses_addressof (seq)
rtx seq;
sequence_uses_addressof (rtx seq)
{
rtx insn;
@ -474,7 +462,7 @@ sequence_uses_addressof (seq)
/* Remove all REG_EQUIV notes found in the insn chain. */
static void
purge_reg_equiv_notes ()
purge_reg_equiv_notes (void)
{
rtx insn;
@ -498,8 +486,7 @@ purge_reg_equiv_notes ()
/* Clear RTX_UNCHANGING_P flag of incoming argument MEMs. */
static void
purge_mem_unchanging_flag (x)
rtx x;
purge_mem_unchanging_flag (rtx x)
{
RTX_CODE code;
int i, j;
@ -538,9 +525,7 @@ purge_mem_unchanging_flag (x)
the CALL_PLACEHOLDER insn; USE tells which child to use. */
void
replace_call_placeholder (insn, use)
rtx insn;
sibcall_use_t use;
replace_call_placeholder (rtx insn, sibcall_use_t use)
{
if (use == sibcall_use_tail_recursion)
emit_insn_before (XEXP (PATTERN (insn), 2), insn);
@ -571,7 +556,7 @@ replace_call_placeholder (insn, use)
Replace the CALL_PLACEHOLDER with an appropriate insn chain. */
void
optimize_sibling_and_tail_recursive_calls ()
optimize_sibling_and_tail_recursive_calls (void)
{
rtx insn, insns;
basic_block alternate_exit = EXIT_BLOCK_PTR;

View File

@ -49,19 +49,15 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#define HWI_SIGN_EXTEND(low) \
((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
const void *));
static rtx simplify_plus_minus PARAMS ((enum rtx_code,
enum machine_mode, rtx,
rtx, int));
static rtx neg_const_int (enum machine_mode, rtx);
static int simplify_plus_minus_op_data_cmp (const void *, const void *);
static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
rtx, int);
/* Negate a CONST_INT rtx, truncating (because a conversion from a
maximally negative number can overflow). */
static rtx
neg_const_int (mode, i)
enum machine_mode mode;
rtx i;
neg_const_int (enum machine_mode mode, rtx i)
{
return gen_int_mode (- INTVAL (i), mode);
}
@ -71,10 +67,8 @@ neg_const_int (mode, i)
seeing if the expression folds. */
rtx
simplify_gen_binary (code, mode, op0, op1)
enum rtx_code code;
enum machine_mode mode;
rtx op0, op1;
simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
rtx op1)
{
rtx tem;
@ -104,8 +98,7 @@ simplify_gen_binary (code, mode, op0, op1)
/* If X is a MEM referencing the constant pool, return the real value.
Otherwise return X. */
rtx
avoid_constant_pool_reference (x)
rtx x;
avoid_constant_pool_reference (rtx x)
{
rtx c, tmp, addr;
enum machine_mode cmode;
@ -163,11 +156,8 @@ avoid_constant_pool_reference (x)
the specified operation. */
rtx
simplify_gen_unary (code, mode, op, op_mode)
enum rtx_code code;
enum machine_mode mode;
rtx op;
enum machine_mode op_mode;
simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
enum machine_mode op_mode)
{
rtx tem;
@ -181,10 +171,8 @@ simplify_gen_unary (code, mode, op, op_mode)
/* Likewise for ternary operations. */
rtx
simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
enum rtx_code code;
enum machine_mode mode, op0_mode;
rtx op0, op1, op2;
simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
{
rtx tem;
@ -201,11 +189,8 @@ simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
*/
rtx
simplify_gen_relational (code, mode, cmp_mode, op0, op1)
enum rtx_code code;
enum machine_mode mode;
enum machine_mode cmp_mode;
rtx op0, op1;
simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
enum machine_mode cmp_mode, rtx op0, rtx op1)
{
rtx tem;
@ -248,10 +233,7 @@ simplify_gen_relational (code, mode, cmp_mode, op0, op1)
resulting RTX. Return a new RTX which is as simplified as possible. */
rtx
simplify_replace_rtx (x, old, new)
rtx x;
rtx old;
rtx new;
simplify_replace_rtx (rtx x, rtx old, rtx new)
{
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
@ -321,7 +303,7 @@ simplify_replace_rtx (x, old, new)
rtx exp;
exp = simplify_gen_subreg (GET_MODE (x),
simplify_replace_rtx (SUBREG_REG (x),
old, new),
old, new),
GET_MODE (SUBREG_REG (x)),
SUBREG_BYTE (x));
if (exp)
@ -363,11 +345,8 @@ simplify_replace_rtx (x, old, new)
MODE with input operand OP whose mode was originally OP_MODE.
Return zero if no simplification can be made. */
rtx
simplify_unary_operation (code, mode, op, op_mode)
enum rtx_code code;
enum machine_mode mode;
rtx op;
enum machine_mode op_mode;
simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
rtx op, enum machine_mode op_mode)
{
unsigned int width = GET_MODE_BITSIZE (mode);
rtx trueop = avoid_constant_pool_reference (op);
@ -872,10 +851,8 @@ simplify_unary_operation (code, mode, op, op_mode)
Don't use this for relational operations such as EQ or LT.
Use simplify_relational_operation instead. */
rtx
simplify_binary_operation (code, mode, op0, op1)
enum rtx_code code;
enum machine_mode mode;
rtx op0, op1;
simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
rtx op0, rtx op1)
{
HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
HOST_WIDE_INT val;
@ -1898,9 +1875,7 @@ struct simplify_plus_minus_op_data
};
static int
simplify_plus_minus_op_data_cmp (p1, p2)
const void *p1;
const void *p2;
simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
{
const struct simplify_plus_minus_op_data *d1 = p1;
const struct simplify_plus_minus_op_data *d2 = p2;
@ -1910,11 +1885,8 @@ simplify_plus_minus_op_data_cmp (p1, p2)
}
static rtx
simplify_plus_minus (code, mode, op0, op1, force)
enum rtx_code code;
enum machine_mode mode;
rtx op0, op1;
int force;
simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
rtx op1, int force)
{
struct simplify_plus_minus_op_data ops[8];
rtx result, tem;
@ -2160,10 +2132,8 @@ simplify_plus_minus (code, mode, op0, op1, force)
it returns either const_true_rtx or const0_rtx. */
rtx
simplify_relational_operation (code, mode, op0, op1)
enum rtx_code code;
enum machine_mode mode;
rtx op0, op1;
simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
rtx op0, rtx op1)
{
int equal, op0lt, op0ltu, op1lt, op1ltu;
rtx tem;
@ -2397,7 +2367,7 @@ simplify_relational_operation (code, mode, op0, op1)
return const_true_rtx;
}
break;
default:
break;
}
@ -2449,10 +2419,9 @@ simplify_relational_operation (code, mode, op0, op1)
a constant. Return 0 if no simplifications is possible. */
rtx
simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
enum rtx_code code;
enum machine_mode mode, op0_mode;
rtx op0, op1, op2;
simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
enum machine_mode op0_mode, rtx op0, rtx op1,
rtx op2)
{
unsigned int width = GET_MODE_BITSIZE (mode);
@ -2603,10 +2572,8 @@ simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
Return 0 if no simplifications is possible. */
rtx
simplify_subreg (outermode, op, innermode, byte)
rtx op;
unsigned int byte;
enum machine_mode outermode, innermode;
simplify_subreg (enum machine_mode outermode, rtx op,
enum machine_mode innermode, unsigned int byte)
{
/* Little bit of sanity checking. */
if (innermode == VOIDmode || outermode == VOIDmode
@ -2989,10 +2956,8 @@ simplify_subreg (outermode, op, innermode, byte)
/* Make a SUBREG operation or equivalent if it folds. */
rtx
simplify_gen_subreg (outermode, op, innermode, byte)
rtx op;
unsigned int byte;
enum machine_mode outermode, innermode;
simplify_gen_subreg (enum machine_mode outermode, rtx op,
enum machine_mode innermode, unsigned int byte)
{
rtx new;
/* Little bit of sanity checking. */
@ -3061,8 +3026,7 @@ simplify_gen_subreg (outermode, op, innermode, byte)
simplification and 1 for tree simplification. */
rtx
simplify_rtx (x)
rtx x;
simplify_rtx (rtx x)
{
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);

View File

@ -24,7 +24,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
Value of sreal is
x = sig * 2 ^ exp
where
where
sig = significant
(for < 64-bit machines sig = sig_lo + sig_hi * 2 ^ SREAL_PART_BITS)
exp = exponent
@ -35,13 +35,13 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
Only a half of significant bits is used (in normalized sreals) so that we do
not have problems with overflow, for example when c->sig = a->sig * b->sig.
So the precision for 64-bit and 32-bit machines is 32-bit.
Invariant: The numbers are normalized before and after each call of sreal_*.
Normalized sreals:
All numbers (except zero) meet following conditions:
SREAL_MIN_SIG <= sig && sig <= SREAL_MAX_SIG
-SREAL_MAX_EXP <= exp && exp <= SREAL_MAX_EXP
-SREAL_MAX_EXP <= exp && exp <= SREAL_MAX_EXP
If the number would be too large, it is set to upper bounds of these
conditions.
@ -56,16 +56,14 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "tm.h"
#include "sreal.h"
static inline void copy PARAMS ((sreal *, sreal *));
static inline void shift_right PARAMS ((sreal *, int));
static void normalize PARAMS ((sreal *));
static inline void copy (sreal *, sreal *);
static inline void shift_right (sreal *, int);
static void normalize (sreal *);
/* Print the content of struct sreal. */
void
dump_sreal (file, x)
FILE *file;
sreal *x;
dump_sreal (FILE *file, sreal *x)
{
#if SREAL_PART_BITS < 32
fprintf (file, "((" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^16 + "
@ -79,9 +77,7 @@ dump_sreal (file, x)
/* Copy the sreal number. */
static inline void
copy (r, a)
sreal *r;
sreal *a;
copy (sreal *r, sreal *a)
{
#if SREAL_PART_BITS < 32
r->sig_lo = a->sig_lo;
@ -96,9 +92,7 @@ copy (r, a)
When the most significant bit shifted out is 1, add 1 to X (rounding). */
static inline void
shift_right (x, s)
sreal *x;
int s;
shift_right (sreal *x, int s)
{
#ifdef ENABLE_CHECKING
if (s <= 0 || s > SREAL_BITS)
@ -143,13 +137,12 @@ shift_right (x, s)
/* Normalize *X. */
static void
normalize (x)
sreal *x;
normalize (sreal *x)
{
#if SREAL_PART_BITS < 32
int shift;
HOST_WIDE_INT mask;
if (x->sig_lo == 0 && x->sig_hi == 0)
{
x->exp = -SREAL_MAX_EXP;
@ -280,10 +273,7 @@ normalize (x)
/* Set *R to SIG * 2 ^ EXP. Return R. */
sreal *
sreal_init (r, sig, exp)
sreal *r;
unsigned HOST_WIDE_INT sig;
signed int exp;
sreal_init (sreal *r, unsigned HOST_WIDE_INT sig, signed int exp)
{
#if SREAL_PART_BITS < 32
r->sig_lo = 0;
@ -300,8 +290,7 @@ sreal_init (r, sig, exp)
/* Return integer value of *R. */
HOST_WIDE_INT
sreal_to_int (r)
sreal *r;
sreal_to_int (sreal *r)
{
#if SREAL_PART_BITS < 32
if (r->exp <= -SREAL_BITS)
@ -325,9 +314,7 @@ sreal_to_int (r)
/* Compare *A and *B. Return -1 if *A < *B, 1 if *A > *B and 0 if *A == *B. */
int
sreal_compare (a, b)
sreal *a;
sreal *b;
sreal_compare (sreal *a, sreal *b)
{
if (a->exp > b->exp)
return 1;
@ -354,10 +341,7 @@ sreal_compare (a, b)
/* *R = *A + *B. Return R. */
sreal *
sreal_add (r, a, b)
sreal *r;
sreal *a;
sreal *b;
sreal_add (sreal *r, sreal *a, sreal *b)
{
int dexp;
sreal tmp;
@ -411,10 +395,7 @@ sreal_add (r, a, b)
/* *R = *A - *B. Return R. */
sreal *
sreal_sub (r, a, b)
sreal *r;
sreal *a;
sreal *b;
sreal_sub (sreal *r, sreal *a, sreal *b)
{
int dexp;
sreal tmp;
@ -467,10 +448,7 @@ sreal_sub (r, a, b)
/* *R = *A * *B. Return R. */
sreal *
sreal_mul (r, a, b)
sreal *r;
sreal *a;
sreal *b;
sreal_mul (sreal *r, sreal *a, sreal *b)
{
#if SREAL_PART_BITS < 32
if (a->sig_hi < SREAL_MIN_SIG || b->sig_hi < SREAL_MIN_SIG)
@ -526,10 +504,7 @@ sreal_mul (r, a, b)
/* *R = *A / *B. Return R. */
sreal *
sreal_div (r, a, b)
sreal *r;
sreal *a;
sreal *b;
sreal_div (sreal *r, sreal *a, sreal *b)
{
#if SREAL_PART_BITS < 32
unsigned HOST_WIDE_INT tmp, tmp1, tmp2;

View File

@ -1,5 +1,5 @@
/* Definitions for simple data type for positive real numbers.
Copyright (C) 2002 Free Software Foundation, Inc.
Copyright (C) 2002, 2003 Free Software Foundation, Inc.
This file is part of GCC.
@ -53,15 +53,13 @@ typedef struct sreal
signed int exp; /* Exponent. */
} sreal;
extern void dump_sreal PARAMS ((FILE *, sreal *));
extern sreal *sreal_init PARAMS ((sreal *,
unsigned HOST_WIDE_INT,
signed int));
extern HOST_WIDE_INT sreal_to_int PARAMS ((sreal *));
extern int sreal_compare PARAMS ((sreal *, sreal *));
extern sreal *sreal_add PARAMS ((sreal *, sreal *, sreal *));
extern sreal *sreal_sub PARAMS ((sreal *, sreal *, sreal *));
extern sreal *sreal_mul PARAMS ((sreal *, sreal *, sreal *));
extern sreal *sreal_div PARAMS ((sreal *, sreal *, sreal *));
extern void dump_sreal (FILE *, sreal *);
extern sreal *sreal_init (sreal *, unsigned HOST_WIDE_INT, signed int);
extern HOST_WIDE_INT sreal_to_int (sreal *);
extern int sreal_compare (sreal *, sreal *);
extern sreal *sreal_add (sreal *, sreal *, sreal *);
extern sreal *sreal_sub (sreal *, sreal *, sreal *);
extern sreal *sreal_mul (sreal *, sreal *, sreal *);
extern sreal *sreal_div (sreal *, sreal *, sreal *);
#endif

View File

@ -1,5 +1,5 @@
/* Conditional constant propagation pass for the GNU compiler.
Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
Copyright (C) 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
Original framework by Daniel Berlin <dan@cgsoftware.com>
Fleshed out and major cleanups by Jeff Law <law@redhat.com>
@ -124,24 +124,22 @@ static sbitmap ssa_edges;
#define SSA_NAME(x) REGNO (SET_DEST (x))
#define EIE(x,y) EDGE_INDEX (edges, x, y)
static void visit_phi_node PARAMS ((rtx, basic_block));
static void visit_expression PARAMS ((rtx, basic_block));
static void defs_to_undefined PARAMS ((rtx));
static void defs_to_varying PARAMS ((rtx));
static void examine_flow_edges PARAMS ((void));
static int mark_references PARAMS ((rtx *, void *));
static void follow_def_use_chains PARAMS ((void));
static void optimize_unexecutable_edges PARAMS ((struct edge_list *, sbitmap));
static void ssa_ccp_substitute_constants PARAMS ((void));
static void ssa_ccp_df_delete_unreachable_insns PARAMS ((void));
static void ssa_fast_dce PARAMS ((struct df *));
static void visit_phi_node (rtx, basic_block);
static void visit_expression (rtx, basic_block);
static void defs_to_undefined (rtx);
static void defs_to_varying (rtx);
static void examine_flow_edges (void);
static int mark_references (rtx *, void *);
static void follow_def_use_chains (void);
static void optimize_unexecutable_edges (struct edge_list *, sbitmap);
static void ssa_ccp_substitute_constants (void);
static void ssa_ccp_df_delete_unreachable_insns (void);
static void ssa_fast_dce (struct df *);
/* Loop through the PHI_NODE's parameters for BLOCK and compare their
lattice values to determine PHI_NODE's lattice value. */
static void
visit_phi_node (phi_node, block)
rtx phi_node;
basic_block block;
visit_phi_node (rtx phi_node, basic_block block)
{
unsigned int i;
rtx phi_node_expr = NULL;
@ -210,8 +208,7 @@ visit_phi_node (phi_node, block)
/* Sets all defs in an insn to UNDEFINED. */
static void
defs_to_undefined (insn)
rtx insn;
defs_to_undefined (rtx insn)
{
struct df_link *currdef;
for (currdef = DF_INSN_DEFS (df_analyzer, insn); currdef;
@ -225,8 +222,7 @@ defs_to_undefined (insn)
/* Sets all defs in an insn to VARYING. */
static void
defs_to_varying (insn)
rtx insn;
defs_to_varying (rtx insn)
{
struct df_link *currdef;
for (currdef = DF_INSN_DEFS (df_analyzer, insn); currdef;
@ -241,9 +237,7 @@ defs_to_varying (insn)
/* Go through the expression, call the appropriate evaluation routines
to attempt cprop */
static void
visit_expression (insn, block)
rtx insn;
basic_block block;
visit_expression (rtx insn, basic_block block)
{
rtx src, dest, set;
@ -625,7 +619,7 @@ visit_expression (insn, block)
/* Iterate over the FLOW_EDGES work list. Simulate the target block
for each edge. */
static void
examine_flow_edges ()
examine_flow_edges (void)
{
while (flow_edges != NULL)
{
@ -693,7 +687,7 @@ examine_flow_edges ()
simulate the uses of the definition. */
static void
follow_def_use_chains ()
follow_def_use_chains (void)
{
/* Iterate over all the entries on the SSA_EDGES worklist. */
while (sbitmap_first_set_bit (ssa_edges) >= 0)
@ -736,9 +730,8 @@ follow_def_use_chains ()
the edge from the CFG. Note we do not delete unreachable blocks
yet as the DF analyzer can not deal with that yet. */
static void
optimize_unexecutable_edges (edges, executable_edges)
struct edge_list *edges;
sbitmap executable_edges;
optimize_unexecutable_edges (struct edge_list *edges,
sbitmap executable_edges)
{
int i;
basic_block bb;
@ -849,7 +842,7 @@ optimize_unexecutable_edges (edges, executable_edges)
replace uses with the known constant value. */
static void
ssa_ccp_substitute_constants ()
ssa_ccp_substitute_constants (void)
{
unsigned int i;
@ -928,7 +921,7 @@ ssa_ccp_substitute_constants ()
updates for the DF analyzer. */
static void
ssa_ccp_df_delete_unreachable_insns ()
ssa_ccp_df_delete_unreachable_insns (void)
{
basic_block b;
@ -975,7 +968,7 @@ ssa_ccp_df_delete_unreachable_insns ()
operate on so that it can be called for sub-graphs. */
void
ssa_const_prop ()
ssa_const_prop (void)
{
unsigned int i;
edge curredge;
@ -1088,9 +1081,7 @@ ssa_const_prop ()
}
static int
mark_references (current_rtx, data)
rtx *current_rtx;
void *data;
mark_references (rtx *current_rtx, void *data)
{
rtx x = *current_rtx;
sbitmap worklist = (sbitmap) data;
@ -1141,8 +1132,7 @@ mark_references (current_rtx, data)
}
static void
ssa_fast_dce (df)
struct df *df;
ssa_fast_dce (struct df *df)
{
sbitmap worklist = sbitmap_alloc (VARRAY_SIZE (ssa_definition));
sbitmap_ones (worklist);

View File

@ -1,5 +1,5 @@
/* Dead-code elimination pass for the GNU compiler.
Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
Copyright (C) 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
Written by Jeffrey D. Oldham <oldham@codesourcery.com>.
This file is part of GCC.
@ -93,30 +93,23 @@ typedef struct {
/* Local function prototypes. */
static control_dependent_block_to_edge_map control_dependent_block_to_edge_map_create
PARAMS((size_t num_basic_blocks));
(size_t num_basic_blocks);
static void set_control_dependent_block_to_edge_map_bit
PARAMS ((control_dependent_block_to_edge_map c, basic_block bb,
int edge_index));
(control_dependent_block_to_edge_map c, basic_block bb, int edge_index);
static void control_dependent_block_to_edge_map_free
PARAMS ((control_dependent_block_to_edge_map c));
(control_dependent_block_to_edge_map c);
static void find_all_control_dependences
PARAMS ((struct edge_list *el, dominance_info pdom,
control_dependent_block_to_edge_map cdbte));
(struct edge_list *el, dominance_info pdom,
control_dependent_block_to_edge_map cdbte);
static void find_control_dependence
PARAMS ((struct edge_list *el, int edge_index, dominance_info pdom,
control_dependent_block_to_edge_map cdbte));
static basic_block find_pdom
PARAMS ((dominance_info pdom, basic_block block));
static int inherently_necessary_register_1
PARAMS ((rtx *current_rtx, void *data));
static int inherently_necessary_register
PARAMS ((rtx current_rtx));
static int find_inherently_necessary
PARAMS ((rtx current_rtx));
static int propagate_necessity_through_operand
PARAMS ((rtx *current_rtx, void *data));
static void note_inherently_necessary_set
PARAMS ((rtx, rtx, void *));
(struct edge_list *el, int edge_index, dominance_info pdom,
control_dependent_block_to_edge_map cdbte);
static basic_block find_pdom (dominance_info pdom, basic_block block);
static int inherently_necessary_register_1 (rtx *current_rtx, void *data);
static int inherently_necessary_register (rtx current_rtx);
static int find_inherently_necessary (rtx current_rtx);
static int propagate_necessity_through_operand (rtx *current_rtx, void *data);
static void note_inherently_necessary_set (rtx, rtx, void *);
/* Unnecessary insns are indicated using insns' in_struct bit. */
@ -126,8 +119,7 @@ static void note_inherently_necessary_set
#define RESURRECT_INSN(INSN) INSN_DEAD_CODE_P(INSN) = 0
/* Return nonzero if INSN is unnecessary. */
#define UNNECESSARY_P(INSN) INSN_DEAD_CODE_P(INSN)
static void mark_all_insn_unnecessary
PARAMS ((void));
static void mark_all_insn_unnecessary (void);
/* Execute CODE with free variable INSN for all unnecessary insns in
an unspecified order, producing no output. */
#define EXECUTE_IF_UNNECESSARY(INSN, CODE) \
@ -142,11 +134,9 @@ static void mark_all_insn_unnecessary
}
/* Find the label beginning block BB. */
static rtx find_block_label
PARAMS ((basic_block bb));
static rtx find_block_label (basic_block bb);
/* Remove INSN, updating its basic block structure. */
static void delete_insn_bb
PARAMS ((rtx insn));
static void delete_insn_bb (rtx insn);
/* Recording which blocks are control dependent on which edges. We
expect each block to be control dependent on very few edges so we
@ -161,8 +151,7 @@ static void delete_insn_bb
control_dependent_block_to_edge_map_free (). */
static control_dependent_block_to_edge_map
control_dependent_block_to_edge_map_create (num_basic_blocks)
size_t num_basic_blocks;
control_dependent_block_to_edge_map_create (size_t num_basic_blocks)
{
int i;
control_dependent_block_to_edge_map c
@ -180,10 +169,8 @@ control_dependent_block_to_edge_map_create (num_basic_blocks)
control-dependent. */
static void
set_control_dependent_block_to_edge_map_bit (c, bb, edge_index)
control_dependent_block_to_edge_map c;
basic_block bb;
int edge_index;
set_control_dependent_block_to_edge_map_bit (control_dependent_block_to_edge_map c,
basic_block bb, int edge_index)
{
if (bb->index - (INVALID_BLOCK+1) >= c->length)
abort ();
@ -205,8 +192,7 @@ set_control_dependent_block_to_edge_map_bit (c, bb, edge_index)
/* Destroy a control_dependent_block_to_edge_map C. */
static void
control_dependent_block_to_edge_map_free (c)
control_dependent_block_to_edge_map c;
control_dependent_block_to_edge_map_free (control_dependent_block_to_edge_map c)
{
int i;
for (i = 0; i < c->length; ++i)
@ -220,10 +206,8 @@ control_dependent_block_to_edge_map_free (c)
which should be empty. */
static void
find_all_control_dependences (el, pdom, cdbte)
struct edge_list *el;
dominance_info pdom;
control_dependent_block_to_edge_map cdbte;
find_all_control_dependences (struct edge_list *el, dominance_info pdom,
control_dependent_block_to_edge_map cdbte)
{
int i;
@ -238,11 +222,9 @@ find_all_control_dependences (el, pdom, cdbte)
with zeros in each (block b', edge) position. */
static void
find_control_dependence (el, edge_index, pdom, cdbte)
struct edge_list *el;
int edge_index;
dominance_info pdom;
control_dependent_block_to_edge_map cdbte;
find_control_dependence (struct edge_list *el, int edge_index,
dominance_info pdom,
control_dependent_block_to_edge_map cdbte)
{
basic_block current_block;
basic_block ending_block;
@ -269,9 +251,7 @@ find_control_dependence (el, edge_index, pdom, cdbte)
negative numbers. */
static basic_block
find_pdom (pdom, block)
dominance_info pdom;
basic_block block;
find_pdom (dominance_info pdom, basic_block block)
{
if (!block)
abort ();
@ -300,9 +280,8 @@ find_pdom (pdom, block)
particular PC values. */
static int
inherently_necessary_register_1 (current_rtx, data)
rtx *current_rtx;
void *data ATTRIBUTE_UNUSED;
inherently_necessary_register_1 (rtx *current_rtx,
void *data ATTRIBUTE_UNUSED)
{
rtx x = *current_rtx;
@ -332,8 +311,7 @@ inherently_necessary_register_1 (current_rtx, data)
/* Return nonzero if the insn CURRENT_RTX is inherently necessary. */
static int
inherently_necessary_register (current_rtx)
rtx current_rtx;
inherently_necessary_register (rtx current_rtx)
{
return for_each_rtx (&current_rtx,
&inherently_necessary_register_1, NULL);
@ -345,10 +323,7 @@ inherently_necessary_register (current_rtx)
nonzero value in inherently_necessary_p if such a store is found. */
static void
note_inherently_necessary_set (dest, set, data)
rtx set ATTRIBUTE_UNUSED;
rtx dest;
void *data;
note_inherently_necessary_set (rtx dest, rtx set ATTRIBUTE_UNUSED, void *data)
{
int *inherently_necessary_set_p = (int *) data;
@ -370,8 +345,7 @@ note_inherently_necessary_set (dest, set, data)
Return nonzero iff inherently necessary. */
static int
find_inherently_necessary (x)
rtx x;
find_inherently_necessary (rtx x)
{
if (x == NULL_RTX)
return 0;
@ -416,9 +390,7 @@ find_inherently_necessary (x)
instructions. */
static int
propagate_necessity_through_operand (current_rtx, data)
rtx *current_rtx;
void *data;
propagate_necessity_through_operand (rtx *current_rtx, void *data)
{
rtx x = *current_rtx;
varray_type *unprocessed_instructions = (varray_type *) data;
@ -447,21 +419,20 @@ propagate_necessity_through_operand (current_rtx, data)
/* Indicate all insns initially assumed to be unnecessary. */
static void
mark_all_insn_unnecessary ()
mark_all_insn_unnecessary (void)
{
rtx insn;
for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn)) {
if (INSN_P (insn))
KILL_INSN (insn);
}
}
/* Find the label beginning block BB, adding one if necessary. */
static rtx
find_block_label (bb)
basic_block bb;
find_block_label (basic_block bb)
{
rtx insn = bb->head;
if (LABEL_P (insn))
@ -478,8 +449,7 @@ find_block_label (bb)
/* Remove INSN, updating its basic block structure. */
static void
delete_insn_bb (insn)
rtx insn;
delete_insn_bb (rtx insn)
{
if (!insn)
abort ();
@ -498,7 +468,7 @@ delete_insn_bb (insn)
/* Perform the dead-code elimination. */
void
ssa_eliminate_dead_code ()
ssa_eliminate_dead_code (void)
{
rtx insn;
basic_block bb;
@ -734,7 +704,7 @@ ssa_eliminate_dead_code ()
if (INSN_P (insn))
RESURRECT_INSN (insn);
}
if (VARRAY_ACTIVE_SIZE (unprocessed_instructions) != 0)
abort ();
control_dependent_block_to_edge_map_free (cdbte);

356
gcc/ssa.c
View File

@ -127,36 +127,24 @@ struct ssa_rename_from_hash_table_data {
partition reg_partition;
};
static rtx gen_sequence
PARAMS ((void));
static void ssa_rename_from_initialize
PARAMS ((void));
static rtx ssa_rename_from_lookup
PARAMS ((int reg));
static unsigned int original_register
PARAMS ((unsigned int regno));
static void ssa_rename_from_insert
PARAMS ((unsigned int reg, rtx r));
static void ssa_rename_from_free
PARAMS ((void));
typedef int (*srf_trav) PARAMS ((int regno, rtx r, sbitmap canonical_elements, partition reg_partition));
static void ssa_rename_from_traverse
PARAMS ((htab_trav callback_function, sbitmap canonical_elements, partition reg_partition));
/*static Avoid warning message. */ void ssa_rename_from_print
PARAMS ((void));
static int ssa_rename_from_print_1
PARAMS ((void **slot, void *data));
static hashval_t ssa_rename_from_hash_function
PARAMS ((const void * srfp));
static int ssa_rename_from_equal
PARAMS ((const void *srfp1, const void *srfp2));
static void ssa_rename_from_delete
PARAMS ((void *srfp));
static rtx gen_sequence (void);
static void ssa_rename_from_initialize (void);
static rtx ssa_rename_from_lookup (int reg);
static unsigned int original_register (unsigned int regno);
static void ssa_rename_from_insert (unsigned int reg, rtx r);
static void ssa_rename_from_free (void);
typedef int (*srf_trav) (int regno, rtx r, sbitmap canonical_elements,
partition reg_partition);
static void ssa_rename_from_traverse (htab_trav callback_function,
sbitmap canonical_elements, partition reg_partition);
/*static Avoid warning message. */ void ssa_rename_from_print (void);
static int ssa_rename_from_print_1 (void **slot, void *data);
static hashval_t ssa_rename_from_hash_function (const void * srfp);
static int ssa_rename_from_equal (const void *srfp1, const void *srfp2);
static void ssa_rename_from_delete (void *srfp);
static rtx ssa_rename_to_lookup
PARAMS ((rtx reg));
static void ssa_rename_to_insert
PARAMS ((rtx reg, rtx r));
static rtx ssa_rename_to_lookup (rtx reg);
static void ssa_rename_to_insert (rtx reg, rtx r);
/* The number of registers that were live on entry to the SSA routines. */
static unsigned int ssa_max_reg_num;
@ -165,87 +153,66 @@ static unsigned int ssa_max_reg_num;
struct rename_context;
static inline rtx * phi_alternative
PARAMS ((rtx, int));
static void compute_dominance_frontiers_1
PARAMS ((sbitmap *frontiers, dominance_info idom, int bb, sbitmap done));
static void find_evaluations_1
PARAMS ((rtx dest, rtx set, void *data));
static void find_evaluations
PARAMS ((sbitmap *evals, int nregs));
static void compute_iterated_dominance_frontiers
PARAMS ((sbitmap *idfs, sbitmap *frontiers, sbitmap *evals, int nregs));
static void insert_phi_node
PARAMS ((int regno, int b));
static void insert_phi_nodes
PARAMS ((sbitmap *idfs, sbitmap *evals, int nregs));
static void create_delayed_rename
PARAMS ((struct rename_context *, rtx *));
static void apply_delayed_renames
PARAMS ((struct rename_context *));
static int rename_insn_1
PARAMS ((rtx *ptr, void *data));
static void rename_block
PARAMS ((int b, dominance_info dom));
static void rename_registers
PARAMS ((int nregs, dominance_info idom));
static inline rtx * phi_alternative (rtx, int);
static void compute_dominance_frontiers_1 (sbitmap *frontiers,
dominance_info idom, int bb,
sbitmap done);
static void find_evaluations_1 (rtx dest, rtx set, void *data);
static void find_evaluations (sbitmap *evals, int nregs);
static void compute_iterated_dominance_frontiers (sbitmap *idfs,
sbitmap *frontiers,
sbitmap *evals, int nregs);
static void insert_phi_node (int regno, int b);
static void insert_phi_nodes (sbitmap *idfs, sbitmap *evals, int nregs);
static void create_delayed_rename (struct rename_context *, rtx *);
static void apply_delayed_renames (struct rename_context *);
static int rename_insn_1 (rtx *ptr, void *data);
static void rename_block (int b, dominance_info dom);
static void rename_registers (int nregs, dominance_info idom);
static inline int ephi_add_node
PARAMS ((rtx reg, rtx *nodes, int *n_nodes));
static int * ephi_forward
PARAMS ((int t, sbitmap visited, sbitmap *succ, int *tstack));
static void ephi_backward
PARAMS ((int t, sbitmap visited, sbitmap *pred, rtx *nodes));
static void ephi_create
PARAMS ((int t, sbitmap visited, sbitmap *pred, sbitmap *succ, rtx *nodes));
static void eliminate_phi
PARAMS ((edge e, partition reg_partition));
static int make_regs_equivalent_over_bad_edges
PARAMS ((int bb, partition reg_partition));
static inline int ephi_add_node (rtx reg, rtx *nodes, int *n_nodes);
static int * ephi_forward (int t, sbitmap visited, sbitmap *succ, int *tstack);
static void ephi_backward (int t, sbitmap visited, sbitmap *pred, rtx *nodes);
static void ephi_create (int t, sbitmap visited, sbitmap *pred,
sbitmap *succ, rtx *nodes);
static void eliminate_phi (edge e, partition reg_partition);
static int make_regs_equivalent_over_bad_edges (int bb,
partition reg_partition);
/* These are used only in the conservative register partitioning
algorithms. */
static int make_equivalent_phi_alternatives_equivalent
PARAMS ((int bb, partition reg_partition));
static partition compute_conservative_reg_partition
PARAMS ((void));
static int record_canonical_element_1
PARAMS ((void **srfp, void *data));
static int check_hard_regs_in_partition
PARAMS ((partition reg_partition));
(int bb, partition reg_partition);
static partition compute_conservative_reg_partition (void);
static int record_canonical_element_1 (void **srfp, void *data);
static int check_hard_regs_in_partition (partition reg_partition);
/* These are used in the register coalescing algorithm. */
static int coalesce_if_unconflicting
PARAMS ((partition p, conflict_graph conflicts, int reg1, int reg2));
static int coalesce_regs_in_copies
PARAMS ((basic_block bb, partition p, conflict_graph conflicts));
static int coalesce_reg_in_phi
PARAMS ((rtx, int dest_regno, int src_regno, void *data));
static int coalesce_regs_in_successor_phi_nodes
PARAMS ((basic_block bb, partition p, conflict_graph conflicts));
static partition compute_coalesced_reg_partition
PARAMS ((void));
static int mark_reg_in_phi
PARAMS ((rtx *ptr, void *data));
static void mark_phi_and_copy_regs
PARAMS ((regset phi_set));
static int coalesce_if_unconflicting (partition p, conflict_graph conflicts,
int reg1, int reg2);
static int coalesce_regs_in_copies (basic_block bb, partition p,
conflict_graph conflicts);
static int coalesce_reg_in_phi (rtx, int dest_regno, int src_regno,
void *data);
static int coalesce_regs_in_successor_phi_nodes (basic_block bb,
partition p,
conflict_graph conflicts);
static partition compute_coalesced_reg_partition (void);
static int mark_reg_in_phi (rtx *ptr, void *data);
static void mark_phi_and_copy_regs (regset phi_set);
static int rename_equivalent_regs_in_insn
PARAMS ((rtx *ptr, void *data));
static void rename_equivalent_regs
PARAMS ((partition reg_partition));
static int rename_equivalent_regs_in_insn (rtx *ptr, void *data);
static void rename_equivalent_regs (partition reg_partition);
/* Deal with hard registers. */
static int conflicting_hard_regs_p
PARAMS ((int reg1, int reg2));
static int conflicting_hard_regs_p (int reg1, int reg2);
/* ssa_rename_to maps registers and machine modes to SSA pseudo registers. */
/* Find the register associated with REG in the indicated mode. */
static rtx
ssa_rename_to_lookup (reg)
rtx reg;
ssa_rename_to_lookup (rtx reg)
{
if (!HARD_REGISTER_P (reg))
return ssa_rename_to_pseudo[REGNO (reg) - FIRST_PSEUDO_REGISTER];
@ -256,9 +223,7 @@ ssa_rename_to_lookup (reg)
/* Store a new value mapping REG to R in ssa_rename_to. */
static void
ssa_rename_to_insert(reg, r)
rtx reg;
rtx r;
ssa_rename_to_insert (rtx reg, rtx r)
{
if (!HARD_REGISTER_P (reg))
ssa_rename_to_pseudo[REGNO (reg) - FIRST_PSEUDO_REGISTER] = r;
@ -269,7 +234,7 @@ ssa_rename_to_insert(reg, r)
/* Prepare ssa_rename_from for use. */
static void
ssa_rename_from_initialize ()
ssa_rename_from_initialize (void)
{
/* We use an arbitrary initial hash table size of 64. */
ssa_rename_from_ht = htab_create (64,
@ -282,8 +247,7 @@ ssa_rename_from_initialize ()
found. */
static rtx
ssa_rename_from_lookup (reg)
int reg;
ssa_rename_from_lookup (int reg)
{
ssa_rename_from_pair srfp;
ssa_rename_from_pair *answer;
@ -299,8 +263,7 @@ ssa_rename_from_lookup (reg)
Otherwise, return this register number REGNO. */
static unsigned int
original_register (regno)
unsigned int regno;
original_register (unsigned int regno)
{
rtx original_rtx = ssa_rename_from_lookup (regno);
return original_rtx != NULL_RTX ? REGNO (original_rtx) : regno;
@ -309,9 +272,7 @@ original_register (regno)
/* Add mapping from R to REG to ssa_rename_from even if already present. */
static void
ssa_rename_from_insert (reg, r)
unsigned int reg;
rtx r;
ssa_rename_from_insert (unsigned int reg, rtx r)
{
void **slot;
ssa_rename_from_pair *srfp = xmalloc (sizeof (ssa_rename_from_pair));
@ -329,11 +290,8 @@ ssa_rename_from_insert (reg, r)
current use of this function. */
static void
ssa_rename_from_traverse (callback_function,
canonical_elements, reg_partition)
htab_trav callback_function;
sbitmap canonical_elements;
partition reg_partition;
ssa_rename_from_traverse (htab_trav callback_function,
sbitmap canonical_elements, partition reg_partition)
{
struct ssa_rename_from_hash_table_data srfhd;
srfhd.canonical_elements = canonical_elements;
@ -344,7 +302,7 @@ ssa_rename_from_traverse (callback_function,
/* Destroy ssa_rename_from. */
static void
ssa_rename_from_free ()
ssa_rename_from_free (void)
{
htab_delete (ssa_rename_from_ht);
}
@ -353,7 +311,7 @@ ssa_rename_from_free ()
/* static Avoid erroneous error message. */
void
ssa_rename_from_print ()
ssa_rename_from_print (void)
{
printf ("ssa_rename_from's hash table contents:\n");
htab_traverse (ssa_rename_from_ht, &ssa_rename_from_print_1, NULL);
@ -363,9 +321,7 @@ ssa_rename_from_print ()
attribute DATA. Used as a callback function with htab_traverse (). */
static int
ssa_rename_from_print_1 (slot, data)
void **slot;
void *data ATTRIBUTE_UNUSED;
ssa_rename_from_print_1 (void **slot, void *data ATTRIBUTE_UNUSED)
{
ssa_rename_from_pair * p = *slot;
printf ("ssa_rename_from maps pseudo %i to original %i.\n",
@ -376,8 +332,7 @@ ssa_rename_from_print_1 (slot, data)
/* Given a hash entry SRFP, yield a hash value. */
static hashval_t
ssa_rename_from_hash_function (srfp)
const void *srfp;
ssa_rename_from_hash_function (const void *srfp)
{
return ((const ssa_rename_from_pair *) srfp)->reg;
}
@ -385,9 +340,7 @@ ssa_rename_from_hash_function (srfp)
/* Test whether two hash table entries SRFP1 and SRFP2 are equal. */
static int
ssa_rename_from_equal (srfp1, srfp2)
const void *srfp1;
const void *srfp2;
ssa_rename_from_equal (const void *srfp1, const void *srfp2)
{
return ssa_rename_from_hash_function (srfp1) ==
ssa_rename_from_hash_function (srfp2);
@ -396,8 +349,7 @@ ssa_rename_from_equal (srfp1, srfp2)
/* Delete the hash table entry SRFP. */
static void
ssa_rename_from_delete (srfp)
void *srfp;
ssa_rename_from_delete (void *srfp)
{
free (srfp);
}
@ -406,9 +358,7 @@ ssa_rename_from_delete (srfp)
for predecessor block C. */
static inline rtx *
phi_alternative (set, c)
rtx set;
int c;
phi_alternative (rtx set, int c)
{
rtvec phi_vec = XVEC (SET_SRC (set), 0);
int v;
@ -425,9 +375,7 @@ phi_alternative (set, c)
found for C. */
int
remove_phi_alternative (set, block)
rtx set;
basic_block block;
remove_phi_alternative (rtx set, basic_block block)
{
rtvec phi_vec = XVEC (SET_SRC (set), 0);
int num_elem = GET_NUM_ELEM (phi_vec);
@ -458,10 +406,8 @@ static sbitmap *fe_evals;
static int fe_current_bb;
static void
find_evaluations_1 (dest, set, data)
rtx dest;
rtx set ATTRIBUTE_UNUSED;
void *data ATTRIBUTE_UNUSED;
find_evaluations_1 (rtx dest, rtx set ATTRIBUTE_UNUSED,
void *data ATTRIBUTE_UNUSED)
{
if (GET_CODE (dest) == REG
&& CONVERT_REGISTER_TO_SSA_P (REGNO (dest)))
@ -469,9 +415,7 @@ find_evaluations_1 (dest, set, data)
}
static void
find_evaluations (evals, nregs)
sbitmap *evals;
int nregs;
find_evaluations (sbitmap *evals, int nregs)
{
basic_block bb;
@ -515,11 +459,8 @@ find_evaluations (evals, nregs)
*/
static void
compute_dominance_frontiers_1 (frontiers, idom, bb, done)
sbitmap *frontiers;
dominance_info idom;
int bb;
sbitmap done;
compute_dominance_frontiers_1 (sbitmap *frontiers, dominance_info idom,
int bb, sbitmap done)
{
basic_block b = BASIC_BLOCK (bb);
edge e;
@ -559,9 +500,7 @@ compute_dominance_frontiers_1 (frontiers, idom, bb, done)
}
void
compute_dominance_frontiers (frontiers, idom)
sbitmap *frontiers;
dominance_info idom;
compute_dominance_frontiers (sbitmap *frontiers, dominance_info idom)
{
sbitmap done = sbitmap_alloc (last_basic_block);
sbitmap_zero (done);
@ -580,11 +519,8 @@ compute_dominance_frontiers (frontiers, idom)
*/
static void
compute_iterated_dominance_frontiers (idfs, frontiers, evals, nregs)
sbitmap *idfs;
sbitmap *frontiers;
sbitmap *evals;
int nregs;
compute_iterated_dominance_frontiers (sbitmap *idfs, sbitmap *frontiers,
sbitmap *evals, int nregs)
{
sbitmap worklist;
int reg, passes = 0;
@ -638,8 +574,7 @@ compute_iterated_dominance_frontiers (idfs, frontiers, evals, nregs)
/* Insert the phi nodes. */
static void
insert_phi_node (regno, bb)
int regno, bb;
insert_phi_node (int regno, int bb)
{
basic_block b = BASIC_BLOCK (bb);
edge e;
@ -683,10 +618,7 @@ insert_phi_node (regno, bb)
}
static void
insert_phi_nodes (idfs, evals, nregs)
sbitmap *idfs;
sbitmap *evals ATTRIBUTE_UNUSED;
int nregs;
insert_phi_nodes (sbitmap *idfs, sbitmap *evals ATTRIBUTE_UNUSED, int nregs)
{
int reg;
@ -738,9 +670,7 @@ struct rename_context
/* Queue the rename of *REG_LOC. */
static void
create_delayed_rename (c, reg_loc)
struct rename_context *c;
rtx *reg_loc;
create_delayed_rename (struct rename_context *c, rtx *reg_loc)
{
struct rename_set_data *r;
r = (struct rename_set_data *) xmalloc (sizeof(*r));
@ -770,8 +700,7 @@ create_delayed_rename (c, reg_loc)
applying all the renames on NEW_RENAMES. */
static void
apply_delayed_renames (c)
struct rename_context *c;
apply_delayed_renames (struct rename_context *c)
{
struct rename_set_data *r;
struct rename_set_data *last_r = NULL;
@ -820,9 +749,7 @@ apply_delayed_renames (c)
Mark pseudos that are set for later update. Transform uses of pseudos. */
static int
rename_insn_1 (ptr, data)
rtx *ptr;
void *data;
rename_insn_1 (rtx *ptr, void *data)
{
rtx x = *ptr;
struct rename_context *context = data;
@ -976,7 +903,7 @@ rename_insn_1 (ptr, data)
}
static rtx
gen_sequence ()
gen_sequence (void)
{
rtx first_insn = get_insns ();
rtx result;
@ -998,9 +925,7 @@ gen_sequence ()
}
static void
rename_block (bb, idom)
int bb;
dominance_info idom;
rename_block (int bb, dominance_info idom)
{
basic_block b = BASIC_BLOCK (bb);
edge e;
@ -1134,9 +1059,7 @@ rename_block (bb, idom)
}
static void
rename_registers (nregs, idom)
int nregs;
dominance_info idom;
rename_registers (int nregs, dominance_info idom)
{
VARRAY_RTX_INIT (ssa_definition, nregs * 3, "ssa_definition");
ssa_rename_from_initialize ();
@ -1157,7 +1080,7 @@ rename_registers (nregs, idom)
/* The main entry point for moving to SSA. */
void
convert_to_ssa ()
convert_to_ssa (void)
{
/* Element I is the set of blocks that set register I. */
sbitmap *evals;
@ -1247,9 +1170,7 @@ convert_to_ssa ()
index of this register in the node set. */
static inline int
ephi_add_node (reg, nodes, n_nodes)
rtx reg, *nodes;
int *n_nodes;
ephi_add_node (rtx reg, rtx *nodes, int *n_nodes)
{
int i;
for (i = *n_nodes - 1; i >= 0; --i)
@ -1266,11 +1187,7 @@ ephi_add_node (reg, nodes, n_nodes)
no other dependencies. */
static int *
ephi_forward (t, visited, succ, tstack)
int t;
sbitmap visited;
sbitmap *succ;
int *tstack;
ephi_forward (int t, sbitmap visited, sbitmap *succ, int *tstack)
{
int s;
@ -1290,10 +1207,7 @@ ephi_forward (t, visited, succ, tstack)
a cycle in the graph, copying the data forward as we go. */
static void
ephi_backward (t, visited, pred, nodes)
int t;
sbitmap visited, *pred;
rtx *nodes;
ephi_backward (int t, sbitmap visited, sbitmap *pred, rtx *nodes)
{
int p;
@ -1313,10 +1227,7 @@ ephi_backward (t, visited, pred, nodes)
and any cycle of which it is a member. */
static void
ephi_create (t, visited, pred, succ, nodes)
int t;
sbitmap visited, *pred, *succ;
rtx *nodes;
ephi_create (int t, sbitmap visited, sbitmap *pred, sbitmap *succ, rtx *nodes)
{
rtx reg_u = NULL_RTX;
int unvisited_predecessors = 0;
@ -1372,9 +1283,7 @@ ephi_create (t, visited, pred, succ, nodes)
/* Convert the edge to normal form. */
static void
eliminate_phi (e, reg_partition)
edge e;
partition reg_partition;
eliminate_phi (edge e, partition reg_partition)
{
int n_nodes;
sbitmap *pred, *succ;
@ -1501,9 +1410,7 @@ out:
regs were not already in the same class. */
static int
make_regs_equivalent_over_bad_edges (bb, reg_partition)
int bb;
partition reg_partition;
make_regs_equivalent_over_bad_edges (int bb, partition reg_partition)
{
int changed = 0;
basic_block b = BASIC_BLOCK (bb);
@ -1574,9 +1481,7 @@ make_regs_equivalent_over_bad_edges (bb, reg_partition)
Return nonzero if any new register classes were unioned. */
static int
make_equivalent_phi_alternatives_equivalent (bb, reg_partition)
int bb;
partition reg_partition;
make_equivalent_phi_alternatives_equivalent (int bb, partition reg_partition)
{
int changed = 0;
basic_block b = BASIC_BLOCK (bb);
@ -1659,7 +1564,7 @@ make_equivalent_phi_alternatives_equivalent (bb, reg_partition)
See Morgan 7.3.1. */
static partition
compute_conservative_reg_partition ()
compute_conservative_reg_partition (void)
{
basic_block bb;
int changed = 0;
@ -1721,11 +1626,8 @@ compute_conservative_reg_partition ()
See Morgan figure 11.15. */
static int
coalesce_if_unconflicting (p, conflicts, reg1, reg2)
partition p;
conflict_graph conflicts;
int reg1;
int reg2;
coalesce_if_unconflicting (partition p, conflict_graph conflicts,
int reg1, int reg2)
{
int reg;
@ -1769,10 +1671,7 @@ coalesce_if_unconflicting (p, conflicts, reg1, reg2)
See Morgan figure 11.14. */
static int
coalesce_regs_in_copies (bb, p, conflicts)
basic_block bb;
partition p;
conflict_graph conflicts;
coalesce_regs_in_copies (basic_block bb, partition p, conflict_graph conflicts)
{
int changed = 0;
rtx insn;
@ -1831,11 +1730,8 @@ struct phi_coalesce_context
phi_coalesce_context struct. */
static int
coalesce_reg_in_phi (insn, dest_regno, src_regno, data)
rtx insn ATTRIBUTE_UNUSED;
int dest_regno;
int src_regno;
void *data;
coalesce_reg_in_phi (rtx insn ATTRIBUTE_UNUSED, int dest_regno,
int src_regno, void *data)
{
struct phi_coalesce_context *context =
(struct phi_coalesce_context *) data;
@ -1857,10 +1753,8 @@ coalesce_reg_in_phi (insn, dest_regno, src_regno, data)
See Morgan figure 11.14. */
static int
coalesce_regs_in_successor_phi_nodes (bb, p, conflicts)
basic_block bb;
partition p;
conflict_graph conflicts;
coalesce_regs_in_successor_phi_nodes (basic_block bb, partition p,
conflict_graph conflicts)
{
struct phi_coalesce_context context;
context.p = p;
@ -1878,7 +1772,7 @@ coalesce_regs_in_successor_phi_nodes (bb, p, conflicts)
The caller is responsible for deallocating the returned partition. */
static partition
compute_coalesced_reg_partition ()
compute_coalesced_reg_partition (void)
{
basic_block bb;
int changed = 0;
@ -1936,9 +1830,7 @@ compute_coalesced_reg_partition ()
set all regs. Called from for_each_rtx. */
static int
mark_reg_in_phi (ptr, data)
rtx *ptr;
void *data;
mark_reg_in_phi (rtx *ptr, void *data)
{
rtx expr = *ptr;
regset set = (regset) data;
@ -1962,8 +1854,7 @@ mark_reg_in_phi (ptr, data)
ssa_definition. */
static void
mark_phi_and_copy_regs (phi_set)
regset phi_set;
mark_phi_and_copy_regs (regset phi_set)
{
unsigned int reg;
@ -2007,9 +1898,7 @@ mark_phi_and_copy_regs (phi_set)
partition which specifies equivalences. */
static int
rename_equivalent_regs_in_insn (ptr, data)
rtx *ptr;
void* data;
rename_equivalent_regs_in_insn (rtx *ptr, void* data)
{
rtx x = *ptr;
partition reg_partition = (partition) data;
@ -2058,9 +1947,7 @@ rename_equivalent_regs_in_insn (ptr, data)
as a callback function for traversing ssa_rename_from. */
static int
record_canonical_element_1 (srfp, data)
void **srfp;
void *data;
record_canonical_element_1 (void **srfp, void *data)
{
unsigned int reg = ((ssa_rename_from_pair *) *srfp)->reg;
sbitmap canonical_elements =
@ -2078,8 +1965,7 @@ record_canonical_element_1 (srfp, data)
nonzero if this is the case, i.e., the partition is acceptable. */
static int
check_hard_regs_in_partition (reg_partition)
partition reg_partition;
check_hard_regs_in_partition (partition reg_partition)
{
/* CANONICAL_ELEMENTS has a nonzero bit if a class with the given register
number and machine mode has already been seen. This is a
@ -2122,8 +2008,7 @@ check_hard_regs_in_partition (reg_partition)
any SEQUENCE insns. */
static void
rename_equivalent_regs (reg_partition)
partition reg_partition;
rename_equivalent_regs (partition reg_partition)
{
basic_block b;
@ -2169,7 +2054,7 @@ rename_equivalent_regs (reg_partition)
/* The main entry point for moving from SSA. */
void
convert_from_ssa ()
convert_from_ssa (void)
{
basic_block b, bb;
partition reg_partition;
@ -2257,10 +2142,7 @@ convert_from_ssa ()
value. Otherwise, returns zero. */
int
for_each_successor_phi (bb, fn, data)
basic_block bb;
successor_phi_fn fn;
void *data;
for_each_successor_phi (basic_block bb, successor_phi_fn fn, void *data)
{
edge e;
@ -2317,9 +2199,7 @@ for_each_successor_phi (bb, fn, data)
different hard registers. */
static int
conflicting_hard_regs_p (reg1, reg2)
int reg1;
int reg2;
conflicting_hard_regs_p (int reg1, int reg2)
{
int orig_reg1 = original_register (reg1);
int orig_reg2 = original_register (reg2);

View File

@ -1,5 +1,5 @@
/* Static Single Assignment (SSA) definitions for GCC
Copyright (C) 2000, 2001 Free Software Foundation, Inc.
Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc.
Written by Jeffrey D. Oldham <oldham@codesourcery.com>.
This file is part of GCC.
@ -21,23 +21,21 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
/* Main SSA routines. */
extern void convert_to_ssa PARAMS ((void));
extern void convert_from_ssa PARAMS ((void));
typedef int (*successor_phi_fn) PARAMS ((rtx, int, int, void *));
extern int for_each_successor_phi PARAMS ((basic_block bb,
successor_phi_fn,
void *));
void compute_dominance_frontiers PARAMS ((sbitmap *frontiers,
dominance_info idom));
extern int remove_phi_alternative PARAMS ((rtx, basic_block));
extern void convert_to_ssa (void);
extern void convert_from_ssa (void);
typedef int (*successor_phi_fn) (rtx, int, int, void *);
extern int for_each_successor_phi (basic_block bb, successor_phi_fn,
void *);
void compute_dominance_frontiers (sbitmap *frontiers, dominance_info idom);
extern int remove_phi_alternative (rtx, basic_block);
/* Optimizations. */
/* In ssa-dce.c */
extern void ssa_eliminate_dead_code PARAMS ((void));
extern void ssa_eliminate_dead_code (void);
/* In ssa-ccp.c */
extern void ssa_const_prop PARAMS ((void));
extern void ssa_const_prop (void);
/* SSA definitions and uses. */

View File

@ -1,5 +1,5 @@
/* stack.h - structed access to object stacks
Copyright (C) 1988, 2000 Free Software Foundation, Inc.
Copyright (C) 1988, 2000, 2003 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com).
This program is free software; you can redistribute it and/or modify it
@ -21,7 +21,7 @@ Boston, MA 02111-1307, USA. */
on top of obstacks for GNU C++. */
/* Stack of data placed on obstacks. */
struct stack_level
{
/* Pointer back to previous such level. */
@ -38,5 +38,5 @@ struct stack_level
int limit;
};
struct stack_level *push_stack_level PARAMS ((struct obstack *, char *, int));
struct stack_level *pop_stack_level PARAMS ((struct stack_level *));
struct stack_level *push_stack_level (struct obstack *, char *, int);
struct stack_level *pop_stack_level (struct stack_level *);

File diff suppressed because it is too large Load Diff

View File

@ -59,18 +59,16 @@ unsigned int set_alignment = 0;
called only by a front end. */
static int reference_types_internal = 0;
static void finalize_record_size PARAMS ((record_layout_info));
static void finalize_type_size PARAMS ((tree));
static void place_union_field PARAMS ((record_layout_info, tree));
static void finalize_record_size (record_layout_info);
static void finalize_type_size (tree);
static void place_union_field (record_layout_info, tree);
#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
static int excess_unit_span PARAMS ((HOST_WIDE_INT, HOST_WIDE_INT,
HOST_WIDE_INT, HOST_WIDE_INT,
tree));
static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
HOST_WIDE_INT, tree);
#endif
static unsigned int update_alignment_for_field
PARAMS ((record_layout_info, tree,
unsigned int));
extern void debug_rli PARAMS ((record_layout_info));
static unsigned int update_alignment_for_field (record_layout_info, tree,
unsigned int);
extern void debug_rli (record_layout_info);
/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
@ -85,7 +83,7 @@ int immediate_size_expand;
by front end. */
void
internal_reference_types ()
internal_reference_types (void)
{
reference_types_internal = 1;
}
@ -93,7 +91,7 @@ internal_reference_types ()
/* Get a list of all the objects put on the pending sizes list. */
tree
get_pending_sizes ()
get_pending_sizes (void)
{
tree chain = pending_sizes;
tree t;
@ -109,8 +107,7 @@ get_pending_sizes ()
/* Return nonzero if EXPR is present on the pending sizes list. */
int
is_pending_size (expr)
tree expr;
is_pending_size (tree expr)
{
tree t;
@ -123,8 +120,7 @@ is_pending_size (expr)
/* Add EXPR to the pending sizes list. */
void
put_pending_size (expr)
tree expr;
put_pending_size (tree expr)
{
/* Strip any simple arithmetic from EXPR to see if it has an underlying
SAVE_EXPR. */
@ -138,8 +134,7 @@ put_pending_size (expr)
empty. */
void
put_pending_sizes (chain)
tree chain;
put_pending_sizes (tree chain)
{
if (pending_sizes)
abort ();
@ -151,8 +146,7 @@ put_pending_sizes (chain)
to serve as the actual size-expression for a type or decl. */
tree
variable_size (size)
tree size;
variable_size (tree size)
{
tree save;
@ -215,10 +209,7 @@ variable_size (size)
be used. */
enum machine_mode
mode_for_size (size, class, limit)
unsigned int size;
enum mode_class class;
int limit;
mode_for_size (unsigned int size, enum mode_class class, int limit)
{
enum machine_mode mode;
@ -237,10 +228,7 @@ mode_for_size (size, class, limit)
/* Similar, except passed a tree node. */
enum machine_mode
mode_for_size_tree (size, class, limit)
tree size;
enum mode_class class;
int limit;
mode_for_size_tree (tree size, enum mode_class class, int limit)
{
if (TREE_CODE (size) != INTEGER_CST
|| TREE_OVERFLOW (size)
@ -257,9 +245,7 @@ mode_for_size_tree (size, class, limit)
contains at least the requested number of bits. */
enum machine_mode
smallest_mode_for_size (size, class)
unsigned int size;
enum mode_class class;
smallest_mode_for_size (unsigned int size, enum mode_class class)
{
enum machine_mode mode;
@ -276,8 +262,7 @@ smallest_mode_for_size (size, class)
/* Find an integer mode of the exact same size, or BLKmode on failure. */
enum machine_mode
int_mode_for_mode (mode)
enum machine_mode mode;
int_mode_for_mode (enum machine_mode mode)
{
switch (GET_MODE_CLASS (mode))
{
@ -311,8 +296,7 @@ int_mode_for_mode (mode)
BIGGEST_ALIGNMENT. */
unsigned int
get_mode_alignment (mode)
enum machine_mode mode;
get_mode_alignment (enum machine_mode mode)
{
unsigned int alignment;
@ -334,9 +318,7 @@ get_mode_alignment (mode)
This can only be applied to objects of a sizetype. */
tree
round_up (value, divisor)
tree value;
int divisor;
round_up (tree value, int divisor)
{
tree arg = size_int_type (divisor, TREE_TYPE (value));
@ -346,9 +328,7 @@ round_up (value, divisor)
/* Likewise, but round down. */
tree
round_down (value, divisor)
tree value;
int divisor;
round_down (tree value, int divisor)
{
tree arg = size_int_type (divisor, TREE_TYPE (value));
@ -382,9 +362,7 @@ do_type_align (tree type, tree decl)
the record will be aligned to suit. */
void
layout_decl (decl, known_align)
tree decl;
unsigned int known_align;
layout_decl (tree decl, unsigned int known_align)
{
tree type = TREE_TYPE (decl);
enum tree_code code = TREE_CODE (decl);
@ -557,11 +535,10 @@ layout_decl (decl, known_align)
/* Hook for a front-end function that can modify the record layout as needed
immediately before it is finalized. */
void (*lang_adjust_rli) PARAMS ((record_layout_info)) = 0;
void (*lang_adjust_rli) (record_layout_info) = 0;
void
set_lang_adjust_rli (f)
void (*f) PARAMS ((record_layout_info));
set_lang_adjust_rli (void (*f) (record_layout_info))
{
lang_adjust_rli = f;
}
@ -574,8 +551,7 @@ set_lang_adjust_rli (f)
out the record. */
record_layout_info
start_record_layout (t)
tree t;
start_record_layout (tree t)
{
record_layout_info rli
= (record_layout_info) xmalloc (sizeof (struct record_layout_info_s));
@ -608,8 +584,7 @@ start_record_layout (t)
the offset/bitpos forms and byte and bit offsets. */
tree
bit_from_pos (offset, bitpos)
tree offset, bitpos;
bit_from_pos (tree offset, tree bitpos)
{
return size_binop (PLUS_EXPR, bitpos,
size_binop (MULT_EXPR, convert (bitsizetype, offset),
@ -617,8 +592,7 @@ bit_from_pos (offset, bitpos)
}
tree
byte_from_pos (offset, bitpos)
tree offset, bitpos;
byte_from_pos (tree offset, tree bitpos)
{
return size_binop (PLUS_EXPR, offset,
convert (sizetype,
@ -627,10 +601,8 @@ byte_from_pos (offset, bitpos)
}
void
pos_from_bit (poffset, pbitpos, off_align, pos)
tree *poffset, *pbitpos;
unsigned int off_align;
tree pos;
pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
tree pos)
{
*poffset = size_binop (MULT_EXPR,
convert (sizetype,
@ -644,9 +616,7 @@ pos_from_bit (poffset, pbitpos, off_align, pos)
normalize the offsets so they are within the alignment. */
void
normalize_offset (poffset, pbitpos, off_align)
tree *poffset, *pbitpos;
unsigned int off_align;
normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
{
/* If the bit position is now larger than it should be, adjust it
downwards. */
@ -668,8 +638,7 @@ normalize_offset (poffset, pbitpos, off_align)
/* Print debugging information about the information in RLI. */
void
debug_rli (rli)
record_layout_info rli;
debug_rli (record_layout_info rli)
{
print_node_brief (stderr, "type", rli->t, 0);
print_node_brief (stderr, "\noffset", rli->offset, 0);
@ -692,8 +661,7 @@ debug_rli (rli)
BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
void
normalize_rli (rli)
record_layout_info rli;
normalize_rli (record_layout_info rli)
{
normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
}
@ -701,8 +669,7 @@ normalize_rli (rli)
/* Returns the size in bytes allocated so far. */
tree
rli_size_unit_so_far (rli)
record_layout_info rli;
rli_size_unit_so_far (record_layout_info rli)
{
return byte_from_pos (rli->offset, rli->bitpos);
}
@ -710,8 +677,7 @@ rli_size_unit_so_far (rli)
/* Returns the size in bits allocated so far. */
tree
rli_size_so_far (rli)
record_layout_info rli;
rli_size_so_far (record_layout_info rli)
{
return bit_from_pos (rli->offset, rli->bitpos);
}
@ -722,10 +688,8 @@ rli_size_so_far (rli)
the FIELD. */
static unsigned int
update_alignment_for_field (rli, field, known_align)
record_layout_info rli;
tree field;
unsigned int known_align;
update_alignment_for_field (record_layout_info rli, tree field,
unsigned int known_align)
{
/* The alignment required for FIELD. */
unsigned int desired_align;
@ -756,10 +720,10 @@ update_alignment_for_field (rli, field, known_align)
applies if there was an immediately prior, nonzero-size
bitfield. (That's the way it is, experimentally.) */
if (! integer_zerop (DECL_SIZE (field))
? ! DECL_PACKED (field)
: (rli->prev_field
&& DECL_BIT_FIELD_TYPE (rli->prev_field)
&& ! integer_zerop (DECL_SIZE (rli->prev_field))))
? ! DECL_PACKED (field)
: (rli->prev_field
&& DECL_BIT_FIELD_TYPE (rli->prev_field)
&& ! integer_zerop (DECL_SIZE (rli->prev_field))))
{
unsigned int type_align = TYPE_ALIGN (type);
type_align = MAX (type_align, desired_align);
@ -816,9 +780,7 @@ update_alignment_for_field (rli, field, known_align)
/* Called from place_field to handle unions. */
static void
place_union_field (rli, field)
record_layout_info rli;
tree field;
place_union_field (record_layout_info rli, tree field)
{
update_alignment_for_field (rli, field, /*known_align=*/0);
@ -841,9 +803,8 @@ place_union_field (rli, field)
at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
units of alignment than the underlying TYPE. */
static int
excess_unit_span (byte_offset, bit_offset, size, align, type)
HOST_WIDE_INT byte_offset, bit_offset, size, align;
tree type;
excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
{
/* Note that the calculation of OFFSET might overflow; we calculate it so
that we still get the right result as long as ALIGN is a power of two. */
@ -862,9 +823,7 @@ excess_unit_span (byte_offset, bit_offset, size, align, type)
callers that desire that behavior must manually perform that step.) */
void
place_field (rli, field)
record_layout_info rli;
tree field;
place_field (record_layout_info rli, tree field)
{
/* The alignment required for FIELD. */
unsigned int desired_align;
@ -915,7 +874,7 @@ place_field (rli, field)
& - tree_low_cst (rli->offset, 1)));
else
known_align = rli->offset_align;
desired_align = update_alignment_for_field (rli, field, known_align);
if (warn_packed && DECL_PACKED (field))
@ -1056,7 +1015,7 @@ place_field (rli, field)
if ((* targetm.ms_bitfield_layout_p) (rli->t)
&& ((DECL_BIT_FIELD_TYPE (field) && ! DECL_PACKED (field))
|| (rli->prev_field && ! DECL_PACKED (rli->prev_field))))
|| (rli->prev_field && ! DECL_PACKED (rli->prev_field))))
{
/* At this point, either the prior or current are bitfields,
(possibly both), and we're dealing with MS packing. */
@ -1251,8 +1210,7 @@ place_field (rli, field)
indicated by RLI. */
static void
finalize_record_size (rli)
record_layout_info rli;
finalize_record_size (record_layout_info rli)
{
tree unpadded_size, unpadded_size_unit;
@ -1333,8 +1291,7 @@ finalize_record_size (rli)
/* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
void
compute_record_mode (type)
tree type;
compute_record_mode (tree type)
{
tree field;
enum machine_mode mode = VOIDmode;
@ -1419,8 +1376,7 @@ compute_record_mode (type)
out. */
static void
finalize_type_size (type)
tree type;
finalize_type_size (tree type)
{
/* Normally, use the alignment corresponding to the mode chosen.
However, where strict alignment is not required, avoid
@ -1501,9 +1457,7 @@ finalize_type_size (type)
G++ 3.2 ABI. */
void
finish_record_layout (rli, free_p)
record_layout_info rli;
int free_p;
finish_record_layout (record_layout_info rli, int free_p)
{
/* Compute the final size. */
finalize_record_size (rli);
@ -1535,11 +1489,8 @@ finish_record_layout (rli, free_p)
ALIGN_TYPE. */
void
finish_builtin_struct (type, name, fields, align_type)
tree type;
const char *name;
tree fields;
tree align_type;
finish_builtin_struct (tree type, const char *name, tree fields,
tree align_type)
{
tree tail, next;
@ -1578,8 +1529,7 @@ finish_builtin_struct (type, name, fields, align_type)
If the type is incomplete, its TYPE_SIZE remains zero. */
void
layout_type (type)
tree type;
layout_type (tree type)
{
if (type == 0)
abort ();
@ -1885,8 +1835,7 @@ layout_type (type)
/* Create and return a type for signed integers of PRECISION bits. */
tree
make_signed_type (precision)
int precision;
make_signed_type (int precision)
{
tree type = make_node (INTEGER_TYPE);
@ -1899,8 +1848,7 @@ make_signed_type (precision)
/* Create and return a type for unsigned integers of PRECISION bits. */
tree
make_unsigned_type (precision)
int precision;
make_unsigned_type (int precision)
{
tree type = make_node (INTEGER_TYPE);
@ -1914,7 +1862,7 @@ make_unsigned_type (precision)
value to enable integer types to be created. */
void
initialize_sizetypes ()
initialize_sizetypes (void)
{
tree t = make_node (INTEGER_TYPE);
@ -1947,8 +1895,7 @@ initialize_sizetypes ()
Also update the type of any standard type's sizes made so far. */
void
set_sizetype (type)
tree type;
set_sizetype (tree type)
{
int oprecision = TYPE_PRECISION (type);
/* The *bitsizetype types use a precision that avoids overflows when
@ -2027,8 +1974,7 @@ set_sizetype (type)
E.g. for Pascal, when the -fsigned-char option is given. */
void
fixup_signed_type (type)
tree type;
fixup_signed_type (tree type)
{
int precision = TYPE_PRECISION (type);
@ -2065,8 +2011,7 @@ fixup_signed_type (type)
and for enumeral types. */
void
fixup_unsigned_type (type)
tree type;
fixup_unsigned_type (tree type)
{
int precision = TYPE_PRECISION (type);
@ -2108,11 +2053,8 @@ fixup_unsigned_type (type)
all the conditions. */
enum machine_mode
get_best_mode (bitsize, bitpos, align, largest_mode, volatilep)
int bitsize, bitpos;
unsigned int align;
enum machine_mode largest_mode;
int volatilep;
get_best_mode (int bitsize, int bitpos, unsigned int align,
enum machine_mode largest_mode, int volatilep)
{
enum machine_mode mode;
unsigned int unit = 0;

View File

@ -1,5 +1,5 @@
/* String pool for GCC.
Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
Copyright (C) 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of GCC.
@ -49,13 +49,13 @@ const char digit_vector[] = {
struct ht *ident_hash;
static struct obstack string_stack;
static hashnode alloc_node PARAMS ((hash_table *));
static int mark_ident PARAMS ((struct cpp_reader *, hashnode, const void *));
static int ht_copy_and_clear PARAMS ((struct cpp_reader *, hashnode, const void *));
static hashnode alloc_node (hash_table *);
static int mark_ident (struct cpp_reader *, hashnode, const void *);
static int ht_copy_and_clear (struct cpp_reader *, hashnode, const void *);
/* Initialize the string pool. */
void
init_stringpool ()
init_stringpool (void)
{
/* Create with 16K (2^14) entries. */
ident_hash = ht_create (14);
@ -65,8 +65,7 @@ init_stringpool ()
/* Allocate a hash node. */
static hashnode
alloc_node (table)
hash_table *table ATTRIBUTE_UNUSED;
alloc_node (hash_table *table ATTRIBUTE_UNUSED)
{
return GCC_IDENT_TO_HT_IDENT (make_node (IDENTIFIER_NODE));
}
@ -78,9 +77,7 @@ alloc_node (table)
returned this time too. */
const char *
ggc_alloc_string (contents, length)
const char *contents;
int length;
ggc_alloc_string (const char *contents, int length)
{
if (length == -1)
length = strlen (contents);
@ -99,8 +96,7 @@ ggc_alloc_string (contents, length)
the same node is returned this time. */
tree
get_identifier (text)
const char *text;
get_identifier (const char *text)
{
hashnode ht_node = ht_lookup (ident_hash,
(const unsigned char *) text,
@ -114,9 +110,7 @@ get_identifier (text)
known. */
tree
get_identifier_with_length (text, length)
const char *text;
unsigned int length;
get_identifier_with_length (const char *text, unsigned int length)
{
hashnode ht_node = ht_lookup (ident_hash,
(const unsigned char *) text,
@ -131,8 +125,7 @@ get_identifier_with_length (text, length)
NULL_TREE. */
tree
maybe_get_identifier (text)
const char *text;
maybe_get_identifier (const char *text)
{
hashnode ht_node;
@ -147,7 +140,7 @@ maybe_get_identifier (text)
/* Report some basic statistics about the string pool. */
void
stringpool_statistics ()
stringpool_statistics (void)
{
ht_dump_statistics (ident_hash);
}
@ -155,10 +148,8 @@ stringpool_statistics ()
/* Mark an identifier for GC. */
static int
mark_ident (pfile, h, v)
struct cpp_reader *pfile ATTRIBUTE_UNUSED;
hashnode h;
const void *v ATTRIBUTE_UNUSED;
mark_ident (struct cpp_reader *pfile ATTRIBUTE_UNUSED, hashnode h,
const void *v ATTRIBUTE_UNUSED)
{
gt_ggc_m_9tree_node (HT_IDENT_TO_GCC_IDENT (h));
return 1;
@ -169,7 +160,7 @@ mark_ident (pfile, h, v)
treatment for strings. */
void
ggc_mark_stringpool ()
ggc_mark_stringpool (void)
{
ht_forall (ident_hash, mark_ident, NULL);
}
@ -179,8 +170,7 @@ ggc_mark_stringpool ()
to strings. */
void
gt_ggc_m_S (x)
void *x ATTRIBUTE_UNUSED;
gt_ggc_m_S (void *x ATTRIBUTE_UNUSED)
{
}
@ -188,19 +178,16 @@ gt_ggc_m_S (x)
strings don't contain pointers). */
void
gt_pch_p_S (obj, x, op, cookie)
void *obj ATTRIBUTE_UNUSED;
void *x ATTRIBUTE_UNUSED;
gt_pointer_operator op ATTRIBUTE_UNUSED;
void *cookie ATTRIBUTE_UNUSED;
gt_pch_p_S (void *obj ATTRIBUTE_UNUSED, void *x ATTRIBUTE_UNUSED,
gt_pointer_operator op ATTRIBUTE_UNUSED,
void *cookie ATTRIBUTE_UNUSED)
{
}
/* PCH pointer-walking routine for strings. */
void
gt_pch_n_S (x)
const void *x;
gt_pch_n_S (const void *x)
{
gt_pch_note_object ((void *)x, (void *)x, &gt_pch_p_S);
}
@ -216,11 +203,8 @@ struct string_pool_data GTY(())
static GTY(()) struct string_pool_data * spd;
static int
ht_copy_and_clear (r, hp, ht2_p)
cpp_reader *r ATTRIBUTE_UNUSED;
hashnode hp;
const void *ht2_p;
static int
ht_copy_and_clear (cpp_reader *r ATTRIBUTE_UNUSED, hashnode hp, const void *ht2_p)
{
cpp_hashnode *h = CPP_HASHNODE (hp);
struct ht *ht2 = (struct ht *) ht2_p;
@ -244,10 +228,10 @@ ht_copy_and_clear (r, hp, ht2_p)
static struct ht *saved_ident_hash;
void
gt_pch_save_stringpool ()
gt_pch_save_stringpool (void)
{
unsigned int i;
spd = ggc_alloc (sizeof (*spd));
spd->nslots = ident_hash->nslots;
spd->nelements = ident_hash->nelements;
@ -264,7 +248,7 @@ gt_pch_save_stringpool ()
}
void
gt_pch_fixup_stringpool ()
gt_pch_fixup_stringpool (void)
{
ht_forall (saved_ident_hash, ht_copy_and_clear, ident_hash);
ht_destroy (saved_ident_hash);
@ -272,10 +256,10 @@ gt_pch_fixup_stringpool ()
}
void
gt_pch_restore_stringpool ()
gt_pch_restore_stringpool (void)
{
unsigned int i;
ident_hash->nslots = spd->nslots;
ident_hash->nelements = spd->nelements;
ident_hash->entries = xrealloc (ident_hash->entries,

View File

@ -67,70 +67,69 @@ struct gcc_target
ALIGNED_P indicates whether it is aligned. Return true if
successful. Only handles cases for which BYTE_OP, ALIGNED_OP
and UNALIGNED_OP are NULL. */
bool (* integer) PARAMS ((rtx x, unsigned int size, int aligned_p));
bool (* integer) (rtx x, unsigned int size, int aligned_p);
/* Output code that will globalize a label. */
void (* globalize_label) PARAMS ((FILE *, const char *));
void (* globalize_label) (FILE *, const char *);
/* Output an internal label. */
void (* internal_label) PARAMS ((FILE *, const char *, unsigned long));
void (* internal_label) (FILE *, const char *, unsigned long);
/* Emit an assembler directive to set visibility for the symbol
associated with the tree decl. */
void (* visibility) PARAMS ((tree, int));
void (* visibility) (tree, int);
/* Output the assembler code for entry to a function. */
void (* function_prologue) PARAMS ((FILE *, HOST_WIDE_INT));
void (* function_prologue) (FILE *, HOST_WIDE_INT);
/* Output the assembler code for end of prologue. */
void (* function_end_prologue) PARAMS ((FILE *));
void (* function_end_prologue) (FILE *);
/* Output the assembler code for start of epilogue. */
void (* function_begin_epilogue) PARAMS ((FILE *));
void (* function_begin_epilogue) (FILE *);
/* Output the assembler code for function exit. */
void (* function_epilogue) PARAMS ((FILE *, HOST_WIDE_INT));
void (* function_epilogue) (FILE *, HOST_WIDE_INT);
/* Switch to an arbitrary section NAME with attributes as
specified by FLAGS. */
void (* named_section) PARAMS ((const char *, unsigned int));
void (* named_section) (const char *, unsigned int);
/* Switch to the section that holds the exception table. */
void (* exception_section) PARAMS ((void));
void (* exception_section) (void);
/* Switch to the section that holds the exception frames. */
void (* eh_frame_section) PARAMS ((void));
void (* eh_frame_section) (void);
/* Select and switch to a section for EXP. It may be a DECL or a
constant. RELOC is nonzero if runtime relocations must be applied;
bit 1 will be set if the runtime relocations require non-local
name resolution. ALIGN is the required alignment of the data. */
void (* select_section) PARAMS ((tree, int, unsigned HOST_WIDE_INT));
void (* select_section) (tree, int, unsigned HOST_WIDE_INT);
/* Select and switch to a section for X with MODE. ALIGN is
the desired alignment of the data. */
void (* select_rtx_section) PARAMS ((enum machine_mode, rtx,
unsigned HOST_WIDE_INT));
void (* select_rtx_section) (enum machine_mode, rtx,
unsigned HOST_WIDE_INT);
/* Select a unique section name for DECL. RELOC is the same as
for SELECT_SECTION. */
void (* unique_section) PARAMS ((tree, int));
void (* unique_section) (tree, int);
/* Output a constructor for a symbol with a given priority. */
void (* constructor) PARAMS ((rtx, int));
void (* constructor) (rtx, int);
/* Output a destructor for a symbol with a given priority. */
void (* destructor) PARAMS ((rtx, int));
void (* destructor) (rtx, int);
/* Output the assembler code for a thunk function. THUNK_DECL is the
declaration for the thunk function itself, FUNCTION is the decl for
the target function. DELTA is an immediate constant offset to be
added to THIS. If VCALL_OFFSET is nonzero, the word at
*(*this + vcall_offset) should be added to THIS. */
void (* output_mi_thunk) PARAMS ((FILE *file, tree thunk_decl,
HOST_WIDE_INT delta,
HOST_WIDE_INT vcall_offset,
tree function_decl));
void (* output_mi_thunk) (FILE *file, tree thunk_decl,
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
tree function_decl);
/* Determine whether output_mi_thunk would succeed. */
/* ??? Ideally, this hook would not exist, and success or failure
@ -138,18 +137,17 @@ struct gcc_target
too much undo-able setup involved in invoking output_mi_thunk.
Could be fixed by making output_mi_thunk emit rtl instead of
text to the output file. */
bool (* can_output_mi_thunk) PARAMS ((tree thunk_decl,
HOST_WIDE_INT delta,
HOST_WIDE_INT vcall_offset,
tree function_decl));
bool (* can_output_mi_thunk) (tree thunk_decl, HOST_WIDE_INT delta,
HOST_WIDE_INT vcall_offset,
tree function_decl);
/* Output any boilerplate text needed at the beginning of a
translation unit. */
void (*file_start) PARAMS ((void));
void (*file_start) (void);
/* Output any boilerplate text needed at the end of a
translation unit. */
void (*file_end) PARAMS ((void));
void (*file_end) (void);
} asm_out;
/* Functions relating to instruction scheduling. */
@ -158,41 +156,41 @@ struct gcc_target
/* Given the current cost, COST, of an insn, INSN, calculate and
return a new cost based on its relationship to DEP_INSN through
the dependence LINK. The default is to make no adjustment. */
int (* adjust_cost) PARAMS ((rtx insn, rtx link, rtx def_insn, int cost));
int (* adjust_cost) (rtx insn, rtx link, rtx def_insn, int cost);
/* Adjust the priority of an insn as you see fit. Returns the new
priority. */
int (* adjust_priority) PARAMS ((rtx, int));
int (* adjust_priority) (rtx, int);
/* Function which returns the maximum number of insns that can be
scheduled in the same machine cycle. This must be constant
over an entire compilation. The default is 1. */
int (* issue_rate) PARAMS ((void));
int (* issue_rate) (void);
/* Calculate how much this insn affects how many more insns we
can emit this cycle. Default is they all cost the same. */
int (* variable_issue) PARAMS ((FILE *, int, rtx, int));
int (* variable_issue) (FILE *, int, rtx, int);
/* Initialize machine-dependent scheduling code. */
void (* md_init) PARAMS ((FILE *, int, int));
void (* md_init) (FILE *, int, int);
/* Finalize machine-dependent scheduling code. */
void (* md_finish) PARAMS ((FILE *, int));
void (* md_finish) (FILE *, int);
/* Reorder insns in a machine-dependent fashion, in two different
places. Default does nothing. */
int (* reorder) PARAMS ((FILE *, int, rtx *, int *, int));
int (* reorder2) PARAMS ((FILE *, int, rtx *, int *, int));
int (* reorder) (FILE *, int, rtx *, int *, int);
int (* reorder2) (FILE *, int, rtx *, int *, int);
/* The following member value is a pointer to a function called
after evaluation forward dependencies of insns in chain given
by two parameter values (head and tail correspondingly). */
void (* dependencies_evaluation_hook) PARAMS ((rtx, rtx));
void (* dependencies_evaluation_hook) (rtx, rtx);
/* The following member value is a pointer to a function returning
nonzero if we should use DFA based scheduling. The default is
to use the old pipeline scheduler. */
int (* use_dfa_pipeline_interface) PARAMS ((void));
int (* use_dfa_pipeline_interface) (void);
/* The values of all the following members are used only for the
DFA based scheduler: */
/* The values of the following four members are pointers to
@ -206,10 +204,10 @@ struct gcc_target
the memebers result in not changing the automaton state when
the new simulated processor cycle correspondingly starts and
finishes. */
void (* init_dfa_pre_cycle_insn) PARAMS ((void));
rtx (* dfa_pre_cycle_insn) PARAMS ((void));
void (* init_dfa_post_cycle_insn) PARAMS ((void));
rtx (* dfa_post_cycle_insn) PARAMS ((void));
void (* init_dfa_pre_cycle_insn) (void);
rtx (* dfa_pre_cycle_insn) (void);
void (* init_dfa_post_cycle_insn) (void);
rtx (* dfa_post_cycle_insn) (void);
/* The following member value is a pointer to a function returning value
which defines how many insns in queue `ready' will we try for
multi-pass scheduling. if the member value is nonzero and the
@ -217,13 +215,13 @@ struct gcc_target
multi-pass scheduling for the first cycle. In other words, we will
try to choose ready insn which permits to start maximum number of
insns on the same cycle. */
int (* first_cycle_multipass_dfa_lookahead) PARAMS ((void));
int (* first_cycle_multipass_dfa_lookahead) (void);
/* The following member value is pointer to a function controlling
what insns from the ready insn queue will be considered for the
multipass insn scheduling. If the hook returns zero for insn
passed as the parameter, the insn will be not chosen to be
issued. */
int (* first_cycle_multipass_dfa_lookahead_guard) PARAMS ((rtx));
int (* first_cycle_multipass_dfa_lookahead_guard) (rtx);
/* The following member value is pointer to a function called by
the insn scheduler before issuing insn passed as the third
parameter on given cycle. If the hook returns nonzero, the
@ -236,7 +234,7 @@ struct gcc_target
parameter values are correspondingly processor cycle on which
the previous insn has been issued and the current processor
cycle. */
int (* dfa_new_cycle) PARAMS ((FILE *, int, rtx, int, int, int *));
int (* dfa_new_cycle) (FILE *, int, rtx, int, int, int *);
/* The values of the following members are pointers to functions
used to improve the first cycle multipass scheduling by
inserting nop insns. dfa_scheduler_bubble gives a function
@ -247,15 +245,15 @@ struct gcc_target
init_dfa_scheduler_bubbles is used. The default values of the
members result in not inserting nop insns during the multipass
scheduling. */
void (* init_dfa_bubbles) PARAMS ((void));
rtx (* dfa_bubble) PARAMS ((int));
void (* init_dfa_bubbles) (void);
rtx (* dfa_bubble) (int);
} sched;
/* Given two decls, merge their attributes and return the result. */
tree (* merge_decl_attributes) PARAMS ((tree, tree));
tree (* merge_decl_attributes) (tree, tree);
/* Given two types, merge their attributes and return the result. */
tree (* merge_type_attributes) PARAMS ((tree, tree));
tree (* merge_type_attributes) (tree, tree);
/* Table of machine attributes and functions to handle them.
Ignored if NULL. */
@ -264,92 +262,92 @@ struct gcc_target
/* Return zero if the attributes on TYPE1 and TYPE2 are incompatible,
one if they are compatible and two if they are nearly compatible
(which causes a warning to be generated). */
int (* comp_type_attributes) PARAMS ((tree type1, tree type2));
int (* comp_type_attributes) (tree type1, tree type2);
/* Assign default attributes to the newly defined TYPE. */
void (* set_default_type_attributes) PARAMS ((tree type));
void (* set_default_type_attributes) (tree type);
/* Insert attributes on the newly created DECL. */
void (* insert_attributes) PARAMS ((tree decl, tree *attributes));
void (* insert_attributes) (tree decl, tree *attributes);
/* Return true if FNDECL (which has at least one machine attribute)
can be inlined despite its machine attributes, false otherwise. */
bool (* function_attribute_inlinable_p) PARAMS ((tree fndecl));
bool (* function_attribute_inlinable_p) (tree fndecl);
/* Return true if bitfields in RECORD_TYPE should follow the
Microsoft Visual C++ bitfield layout rules. */
bool (* ms_bitfield_layout_p) PARAMS ((tree record_type));
bool (* ms_bitfield_layout_p) (tree record_type);
/* Set up target-specific built-in functions. */
void (* init_builtins) PARAMS ((void));
void (* init_builtins) (void);
/* Expand a target-specific builtin. */
rtx (* expand_builtin) PARAMS ((tree exp, rtx target, rtx subtarget,
enum machine_mode mode, int ignore));
rtx (* expand_builtin) (tree exp, rtx target, rtx subtarget,
enum machine_mode mode, int ignore);
/* Given a decl, a section name, and whether the decl initializer
has relocs, choose attributes for the section. */
/* ??? Should be merged with SELECT_SECTION and UNIQUE_SECTION. */
unsigned int (* section_type_flags) PARAMS ((tree, const char *, int));
unsigned int (* section_type_flags) (tree, const char *, int);
/* True if new jumps cannot be created, to replace existing ones or
not, at the current point in the compilation. */
bool (* cannot_modify_jumps_p) PARAMS ((void));
bool (* cannot_modify_jumps_p) (void);
/* Return a register class for which branch target register
optimizations should be applied. */
int (* branch_target_register_class) PARAMS ((void));
int (* branch_target_register_class) (void);
/* Return true if branch target register optimizations should include
callee-saved registers that are not already live during the current
function. AFTER_PE_GEN is true if prologues and epilogues have
already been generated. */
bool (* branch_target_register_callee_saved) PARAMS ((bool after_pe_gen));
bool (* branch_target_register_callee_saved) (bool after_pe_gen);
/* True if the constant X cannot be placed in the constant pool. */
bool (* cannot_force_const_mem) PARAMS ((rtx));
bool (* cannot_force_const_mem) (rtx);
/* True if the insn X cannot be duplicated. */
bool (* cannot_copy_insn_p) PARAMS ((rtx));
bool (* cannot_copy_insn_p) (rtx);
/* Given an address RTX, undo the effects of LEGITIMIZE_ADDRESS. */
rtx (* delegitimize_address) PARAMS ((rtx));
rtx (* delegitimize_address) (rtx);
/* True if it is OK to do sibling call optimization for the specified
call expression EXP. DECL will be the called function, or NULL if
this is an indirect call. */
bool (*function_ok_for_sibcall) PARAMS ((tree decl, tree exp));
bool (*function_ok_for_sibcall) (tree decl, tree exp);
/* True if EXP should be placed in a "small data" section. */
bool (* in_small_data_p) PARAMS ((tree));
bool (* in_small_data_p) (tree);
/* True if EXP names an object for which name resolution must resolve
to the current module. */
bool (* binds_local_p) PARAMS ((tree));
bool (* binds_local_p) (tree);
/* Do something target-specific to record properties of the DECL into
the associated SYMBOL_REF. */
void (* encode_section_info) PARAMS ((tree, rtx, int));
void (* encode_section_info) (tree, rtx, int);
/* Undo the effects of encode_section_info on the symbol string. */
const char * (* strip_name_encoding) PARAMS ((const char *));
const char * (* strip_name_encoding) (const char *);
/* True if MODE is valid for a pointer in __attribute__((mode("MODE"))). */
bool (* valid_pointer_mode) PARAMS ((enum machine_mode mode));
bool (* valid_pointer_mode) (enum machine_mode mode);
/* True if a vector is opaque. */
bool (* vector_opaque_p) PARAMS ((tree));
bool (* vector_opaque_p) (tree);
/* Compute a (partial) cost for rtx X. Return true if the complete
cost has been computed, and false if subexpressions should be
scanned. In either case, *TOTAL contains the cost result. */
/* Note that CODE and OUTER_CODE ought to be RTX_CODE, but that's
not necessarily defined at this point. */
bool (* rtx_costs) PARAMS ((rtx x, int code, int outer_code, int *total));
bool (* rtx_costs) (rtx x, int code, int outer_code, int *total);
/* Compute the cost of X, used as an address. Never called with
invalid addresses. */
int (* address_cost) PARAMS ((rtx x));
int (* address_cost) (rtx x);
/* Given a register, this hook should return a parallel of registers
to represent where to find the register pieces. Define this hook
@ -357,11 +355,11 @@ struct gcc_target
non-contiguous locations, or if the register should be
represented in more than one register in Dwarf. Otherwise, this
hook should return NULL_RTX. */
rtx (* dwarf_register_span) PARAMS ((rtx));
rtx (* dwarf_register_span) (rtx);
/* Do machine-dependent code transformations. Called just before
delayed-branch scheduling. */
void (* machine_dependent_reorg) PARAMS ((void));
void (* machine_dependent_reorg) (void);
/* Leave the boolean fields at the end. */

View File

@ -71,7 +71,7 @@ struct tms
information). */
#ifdef HAVE_TIMES
# if defined HAVE_DECL_TIMES && !HAVE_DECL_TIMES
extern clock_t times PARAMS ((struct tms *));
extern clock_t times (struct tms *);
# endif
# define USE_TIMES
# define HAVE_USER_TIME
@ -168,19 +168,17 @@ static struct timevar_stack_def *unused_stack_instances;
element. */
static struct timevar_time_def start_time;
static void get_time
PARAMS ((struct timevar_time_def *));
static void timevar_accumulate
PARAMS ((struct timevar_time_def *, struct timevar_time_def *,
struct timevar_time_def *));
static void get_time (struct timevar_time_def *);
static void timevar_accumulate (struct timevar_time_def *,
struct timevar_time_def *,
struct timevar_time_def *);
/* Fill the current times into TIME. The definition of this function
also defines any or all of the HAVE_USER_TIME, HAVE_SYS_TIME, and
HAVE_WALL_TIME macros. */
static void
get_time (now)
struct timevar_time_def *now;
get_time (struct timevar_time_def *now)
{
now->user = 0;
now->sys = 0;
@ -211,10 +209,9 @@ get_time (now)
/* Add the difference between STOP_TIME and START_TIME to TIMER. */
static void
timevar_accumulate (timer, start_time, stop_time)
struct timevar_time_def *timer;
struct timevar_time_def *start_time;
struct timevar_time_def *stop_time;
timevar_accumulate (struct timevar_time_def *timer,
struct timevar_time_def *start_time,
struct timevar_time_def *stop_time)
{
timer->user += stop_time->user - start_time->user;
timer->sys += stop_time->sys - start_time->sys;
@ -224,7 +221,7 @@ timevar_accumulate (timer, start_time, stop_time)
/* Initialize timing variables. */
void
timevar_init ()
timevar_init (void)
{
timevar_enable = true;
@ -253,8 +250,7 @@ timevar_init ()
TIMEVAR cannot be running as a standalone timer. */
void
timevar_push (timevar)
timevar_id_t timevar;
timevar_push (timevar_id_t timevar)
{
struct timevar_def *tv = &timevars[timevar];
struct timevar_stack_def *context;
@ -306,8 +302,7 @@ timevar_push (timevar)
timing variable. */
void
timevar_pop (timevar)
timevar_id_t timevar;
timevar_pop (timevar_id_t timevar)
{
struct timevar_time_def now;
struct timevar_stack_def *popped = stack;
@ -346,8 +341,7 @@ timevar_pop (timevar)
attributed to TIMEVAR. */
void
timevar_start (timevar)
timevar_id_t timevar;
timevar_start (timevar_id_t timevar)
{
struct timevar_def *tv = &timevars[timevar];
@ -370,8 +364,7 @@ timevar_start (timevar)
is attributed to it. */
void
timevar_stop (timevar)
timevar_id_t timevar;
timevar_stop (timevar_id_t timevar)
{
struct timevar_def *tv = &timevars[timevar];
struct timevar_time_def now;
@ -391,9 +384,7 @@ timevar_stop (timevar)
update-to-date information even if TIMEVAR is currently running. */
void
timevar_get (timevar, elapsed)
timevar_id_t timevar;
struct timevar_time_def *elapsed;
timevar_get (timevar_id_t timevar, struct timevar_time_def *elapsed)
{
struct timevar_def *tv = &timevars[timevar];
struct timevar_time_def now;
@ -419,8 +410,7 @@ timevar_get (timevar, elapsed)
for normalizing the others, and is displayed last. */
void
timevar_print (fp)
FILE *fp;
timevar_print (FILE *fp)
{
/* Only print stuff if we have some sort of time information. */
#if defined (HAVE_USER_TIME) || defined (HAVE_SYS_TIME) || defined (HAVE_WALL_TIME)
@ -517,9 +507,7 @@ timevar_print (fp)
TOTAL (given in microseconds). */
void
print_time (str, total)
const char *str;
long total;
print_time (const char *str, long total)
{
long all_time = get_run_time ();
fprintf (stderr,

View File

@ -79,15 +79,15 @@ timevar_id_t;
/* Execute the sequence: timevar_pop (TV), return (E); */
#define POP_TIMEVAR_AND_RETURN(TV, E) return (timevar_pop (TV), (E))
extern void timevar_init PARAMS ((void));
extern void timevar_push PARAMS ((timevar_id_t));
extern void timevar_pop PARAMS ((timevar_id_t));
extern void timevar_start PARAMS ((timevar_id_t));
extern void timevar_stop PARAMS ((timevar_id_t));
extern void timevar_get PARAMS ((timevar_id_t, struct timevar_time_def *));
extern void timevar_print PARAMS ((FILE *));
extern void timevar_init (void);
extern void timevar_push (timevar_id_t);
extern void timevar_pop (timevar_id_t);
extern void timevar_start (timevar_id_t);
extern void timevar_stop (timevar_id_t);
extern void timevar_get (timevar_id_t, struct timevar_time_def *);
extern void timevar_print (FILE *);
/* Provided for backward compatibility. */
extern void print_time PARAMS ((const char *, long));
extern void print_time (const char *, long);
#endif /* ! GCC_TIMEVAR_H */

View File

@ -1,7 +1,8 @@
/* Scan linker error messages for missing template instantiations and provide
them.
Copyright (C) 1995, 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
Copyright (C) 1995, 1998, 1999, 2000, 2001, 2003
Free Software Foundation, Inc.
Contributed by Jason Merrill (jason@cygnus.com).
This file is part of GCC.
@ -67,13 +68,11 @@ typedef struct demangled_hash_entry
/* Hash and comparison functions for these hash tables. */
static int hash_string_eq PARAMS ((const void *, const void *));
static hashval_t hash_string_hash PARAMS ((const void *));
static int hash_string_eq (const void *, const void *);
static hashval_t hash_string_hash (const void *);
static int
hash_string_eq (s1_p, s2_p)
const void *s1_p;
const void *s2_p;
hash_string_eq (const void *s1_p, const void *s2_p)
{
const char *const *s1 = (const char *const *) s1_p;
const char *s2 = (const char *) s2_p;
@ -81,8 +80,7 @@ hash_string_eq (s1_p, s2_p)
}
static hashval_t
hash_string_hash (s_p)
const void *s_p;
hash_string_hash (const void *s_p)
{
const char *const *s = (const char *const *) s_p;
return (*htab_hash_string) (*s);
@ -90,35 +88,31 @@ hash_string_hash (s_p)
static htab_t symbol_table;
static struct symbol_hash_entry * symbol_hash_lookup PARAMS ((const char *,
int));
static struct file_hash_entry * file_hash_lookup PARAMS ((const char *));
static struct demangled_hash_entry *
demangled_hash_lookup PARAMS ((const char *, int));
static void symbol_push PARAMS ((symbol *));
static symbol * symbol_pop PARAMS ((void));
static void file_push PARAMS ((file *));
static file * file_pop PARAMS ((void));
static void tlink_init PARAMS ((void));
static int tlink_execute PARAMS ((const char *, char **, const char *));
static char * frob_extension PARAMS ((const char *, const char *));
static char * obstack_fgets PARAMS ((FILE *, struct obstack *));
static char * tfgets PARAMS ((FILE *));
static char * pfgets PARAMS ((FILE *));
static void freadsym PARAMS ((FILE *, file *, int));
static void read_repo_file PARAMS ((file *));
static void maybe_tweak PARAMS ((char *, file *));
static int recompile_files PARAMS ((void));
static int read_repo_files PARAMS ((char **));
static void demangle_new_symbols PARAMS ((void));
static int scan_linker_output PARAMS ((const char *));
static struct symbol_hash_entry * symbol_hash_lookup (const char *, int);
static struct file_hash_entry * file_hash_lookup (const char *);
static struct demangled_hash_entry *demangled_hash_lookup (const char *, int);
static void symbol_push (symbol *);
static symbol * symbol_pop (void);
static void file_push (file *);
static file * file_pop (void);
static void tlink_init (void);
static int tlink_execute (const char *, char **, const char *);
static char * frob_extension (const char *, const char *);
static char * obstack_fgets (FILE *, struct obstack *);
static char * tfgets (FILE *);
static char * pfgets (FILE *);
static void freadsym (FILE *, file *, int);
static void read_repo_file (file *);
static void maybe_tweak (char *, file *);
static int recompile_files (void);
static int read_repo_files (char **);
static void demangle_new_symbols (void);
static int scan_linker_output (const char *);
/* Look up an entry in the symbol hash table. */
static struct symbol_hash_entry *
symbol_hash_lookup (string, create)
const char *string;
int create;
symbol_hash_lookup (const char *string, int create)
{
void **e;
e = htab_find_slot_with_hash (symbol_table, string,
@ -140,8 +134,7 @@ static htab_t file_table;
/* Look up an entry in the file hash table. */
static struct file_hash_entry *
file_hash_lookup (string)
const char *string;
file_hash_lookup (const char *string)
{
void **e;
e = htab_find_slot_with_hash (file_table, string,
@ -161,9 +154,7 @@ static htab_t demangled_table;
/* Look up an entry in the demangled name hash table. */
static struct demangled_hash_entry *
demangled_hash_lookup (string, create)
const char *string;
int create;
demangled_hash_lookup (const char *string, int create)
{
void **e;
e = htab_find_slot_with_hash (demangled_table, string,
@ -199,8 +190,7 @@ struct obstack file_stack_obstack;
struct file_stack_entry *file_stack;
static void
symbol_push (p)
symbol *p;
symbol_push (symbol *p)
{
struct symbol_stack_entry *ep = (struct symbol_stack_entry *) obstack_alloc
(&symbol_stack_obstack, sizeof (struct symbol_stack_entry));
@ -210,7 +200,7 @@ symbol_push (p)
}
static symbol *
symbol_pop ()
symbol_pop (void)
{
struct symbol_stack_entry *ep = symbol_stack;
symbol *p;
@ -223,8 +213,7 @@ symbol_pop ()
}
static void
file_push (p)
file *p;
file_push (file *p)
{
struct file_stack_entry *ep;
@ -240,7 +229,7 @@ file_push (p)
}
static file *
file_pop ()
file_pop (void)
{
struct file_stack_entry *ep = file_stack;
file *p;
@ -258,7 +247,7 @@ file_pop ()
/* Initialize the tlink machinery. Called from do_tlink. */
static void
tlink_init ()
tlink_init (void)
{
const char *p;
@ -268,7 +257,7 @@ tlink_init ()
NULL);
demangled_table = htab_create (500, hash_string_hash, hash_string_eq,
NULL);
obstack_begin (&symbol_stack_obstack, 0);
obstack_begin (&file_stack_obstack, 0);
@ -286,19 +275,14 @@ tlink_init ()
}
static int
tlink_execute (prog, argv, redir)
const char *prog;
char **argv;
const char *redir;
tlink_execute (const char *prog, char **argv, const char *redir)
{
collect_execute (prog, argv, redir);
return collect_wait (prog);
}
static char *
frob_extension (s, ext)
const char *s;
const char *ext;
frob_extension (const char *s, const char *ext)
{
const char *p = strrchr (s, '/');
if (! p)
@ -312,9 +296,7 @@ frob_extension (s, ext)
}
static char *
obstack_fgets (stream, ob)
FILE *stream;
struct obstack *ob;
obstack_fgets (FILE *stream, struct obstack *ob)
{
int c;
while ((c = getc (stream)) != EOF && c != '\n')
@ -326,15 +308,13 @@ obstack_fgets (stream, ob)
}
static char *
tfgets (stream)
FILE *stream;
tfgets (FILE *stream)
{
return obstack_fgets (stream, &temporary_obstack);
}
static char *
pfgets (stream)
FILE *stream;
pfgets (FILE *stream)
{
return xstrdup (tfgets (stream));
}
@ -348,10 +328,7 @@ pfgets (stream)
XXX "provided" is unimplemented, both here and in the compiler. */
static void
freadsym (stream, f, chosen)
FILE *stream;
file *f;
int chosen;
freadsym (FILE *stream, file *f, int chosen)
{
symbol *sym;
@ -391,8 +368,7 @@ freadsym (stream, f, chosen)
/* Read in the repo file denoted by F, and record all its information. */
static void
read_repo_file (f)
file *f;
read_repo_file (file *f)
{
char c;
FILE *stream = fopen (f->key, "r");
@ -438,9 +414,7 @@ read_repo_file (f)
this one wants to emit it as well. */
static void
maybe_tweak (line, f)
char *line;
file *f;
maybe_tweak (char *line, file *f)
{
symbol *sym = symbol_hash_lookup (line + 2, false);
@ -463,7 +437,7 @@ maybe_tweak (line, f)
XXX Should this use collect_execute instead of system? */
static int
recompile_files ()
recompile_files (void)
{
file *f;
@ -519,8 +493,7 @@ recompile_files ()
.rpo files associated with them, and read in the information. */
static int
read_repo_files (object_lst)
char **object_lst;
read_repo_files (char **object_lst)
{
char **object = object_lst;
@ -552,7 +525,7 @@ read_repo_files (object_lst)
/* Add the demangled forms of any new symbols to the hash table. */
static void
demangle_new_symbols ()
demangle_new_symbols (void)
{
symbol *sym;
@ -573,8 +546,7 @@ demangle_new_symbols ()
adjust the settings for each symbol encountered. */
static int
scan_linker_output (fname)
const char *fname;
scan_linker_output (const char *fname)
{
FILE *stream = fopen (fname, "r");
char *line;
@ -702,8 +674,7 @@ scan_linker_output (fname)
to provide missing definitions. Currently ignored. */
void
do_tlink (ld_argv, object_lst)
char **ld_argv, **object_lst ATTRIBUTE_UNUSED;
do_tlink (char **ld_argv, char **object_lst ATTRIBUTE_UNUSED)
{
int exit = tlink_execute ("ld", ld_argv, ldout);

View File

@ -48,14 +48,14 @@
#include "params.h"
#include "coverage.h"
static int count_insns PARAMS ((basic_block));
static bool ignore_bb_p PARAMS ((basic_block));
static bool better_p PARAMS ((edge, edge));
static edge find_best_successor PARAMS ((basic_block));
static edge find_best_predecessor PARAMS ((basic_block));
static int find_trace PARAMS ((basic_block, basic_block *));
static void tail_duplicate PARAMS ((void));
static void layout_superblocks PARAMS ((void));
static int count_insns (basic_block);
static bool ignore_bb_p (basic_block);
static bool better_p (edge, edge);
static edge find_best_successor (basic_block);
static edge find_best_predecessor (basic_block);
static int find_trace (basic_block, basic_block *);
static void tail_duplicate (void);
static void layout_superblocks (void);
/* Minimal outgoing edge probability considered for superblock formation. */
static int probability_cutoff;
@ -68,8 +68,7 @@ static int branch_ratio_cutoff;
/* Return true if we should ignore the basic block for purposes of tracing. */
static bool
ignore_bb_p (bb)
basic_block bb;
ignore_bb_p (basic_block bb)
{
if (bb->index < 0)
return true;
@ -81,8 +80,7 @@ ignore_bb_p (bb)
/* Return number of instructions in the block. */
static int
count_insns (bb)
basic_block bb;
count_insns (basic_block bb)
{
rtx insn;
int n = 0;
@ -95,8 +93,7 @@ count_insns (bb)
/* Return true if E1 is more frequent than E2. */
static bool
better_p (e1, e2)
edge e1, e2;
better_p (edge e1, edge e2)
{
if (e1->count != e2->count)
return e1->count > e2->count;
@ -114,8 +111,7 @@ better_p (e1, e2)
/* Return most frequent successor of basic block BB. */
static edge
find_best_successor (bb)
basic_block bb;
find_best_successor (basic_block bb)
{
edge e;
edge best = NULL;
@ -133,8 +129,7 @@ find_best_successor (bb)
/* Return most frequent predecessor of basic block BB. */
static edge
find_best_predecessor (bb)
basic_block bb;
find_best_predecessor (basic_block bb)
{
edge e;
edge best = NULL;
@ -154,9 +149,7 @@ find_best_predecessor (bb)
Return number of basic blocks recorded. */
static int
find_trace (bb, trace)
basic_block bb;
basic_block *trace;
find_trace (basic_block bb, basic_block *trace)
{
int i = 0;
edge e;
@ -198,7 +191,7 @@ find_trace (bb, trace)
if profitable. */
static void
tail_duplicate ()
tail_duplicate (void)
{
fibnode_t *blocks = xcalloc (last_basic_block, sizeof (fibnode_t));
basic_block *trace = xmalloc (sizeof (basic_block) * n_basic_blocks);
@ -322,7 +315,7 @@ tail_duplicate ()
change though. */
static void
layout_superblocks ()
layout_superblocks (void)
{
basic_block end = ENTRY_BLOCK_PTR->succ->dest;
basic_block bb = ENTRY_BLOCK_PTR->succ->dest->next_bb;
@ -361,7 +354,7 @@ layout_superblocks ()
/* Main entry point to this file. */
void
tracer ()
tracer (void)
{
if (n_basic_blocks <= 1)
return;

View File

@ -110,26 +110,26 @@ typedef struct inline_data
/* Prototypes. */
static tree declare_return_variable PARAMS ((inline_data *, tree, tree *));
static tree copy_body_r PARAMS ((tree *, int *, void *));
static tree copy_body PARAMS ((inline_data *));
static tree expand_call_inline PARAMS ((tree *, int *, void *));
static void expand_calls_inline PARAMS ((tree *, inline_data *));
static int inlinable_function_p PARAMS ((tree, inline_data *, int));
static tree remap_decl PARAMS ((tree, inline_data *));
static tree declare_return_variable (inline_data *, tree, tree *);
static tree copy_body_r (tree *, int *, void *);
static tree copy_body (inline_data *);
static tree expand_call_inline (tree *, int *, void *);
static void expand_calls_inline (tree *, inline_data *);
static int inlinable_function_p (tree, inline_data *, int);
static tree remap_decl (tree, inline_data *);
#ifndef INLINER_FOR_JAVA
static tree initialize_inlined_parameters PARAMS ((inline_data *, tree, tree));
static void remap_block PARAMS ((tree, tree, inline_data *));
static void copy_scope_stmt PARAMS ((tree *, int *, inline_data *));
static tree initialize_inlined_parameters (inline_data *, tree, tree);
static void remap_block (tree, tree, inline_data *);
static void copy_scope_stmt (tree *, int *, inline_data *);
#else /* INLINER_FOR_JAVA */
static tree initialize_inlined_parameters PARAMS ((inline_data *, tree, tree, tree));
static void remap_block PARAMS ((tree *, tree, inline_data *));
static tree add_stmt_to_compound PARAMS ((tree, tree, tree));
static tree initialize_inlined_parameters (inline_data *, tree, tree, tree);
static void remap_block (tree *, tree, inline_data *);
static tree add_stmt_to_compound (tree, tree, tree);
#endif /* INLINER_FOR_JAVA */
static tree find_alloca_call_1 PARAMS ((tree *, int *, void *));
static tree find_alloca_call PARAMS ((tree));
static tree find_builtin_longjmp_call_1 PARAMS ((tree *, int *, void *));
static tree find_builtin_longjmp_call PARAMS ((tree));
static tree find_alloca_call_1 (tree *, int *, void *);
static tree find_alloca_call (tree);
static tree find_builtin_longjmp_call_1 (tree *, int *, void *);
static tree find_builtin_longjmp_call (tree);
/* The approximate number of instructions per statement. This number
need not be particularly accurate; it is used only to make
@ -139,9 +139,7 @@ static tree find_builtin_longjmp_call PARAMS ((tree));
/* Remap DECL during the copying of the BLOCK tree for the function. */
static tree
remap_decl (decl, id)
tree decl;
inline_data *id;
remap_decl (tree decl, inline_data *id)
{
splay_tree_node n;
tree fn;
@ -221,14 +219,10 @@ remap_decl (decl, id)
static void
#ifndef INLINER_FOR_JAVA
remap_block (scope_stmt, decls, id)
tree scope_stmt;
remap_block (tree scope_stmt, tree decls, inline_data *id)
#else /* INLINER_FOR_JAVA */
remap_block (block, decls, id)
tree *block;
remap_block (tree *block, tree decls, inline_data *id)
#endif /* INLINER_FOR_JAVA */
tree decls;
inline_data *id;
{
#ifndef INLINER_FOR_JAVA
/* We cannot do this in the cleanup for a TARGET_EXPR since we do
@ -389,10 +383,7 @@ remap_block (block, decls, id)
/* Copy the SCOPE_STMT pointed to by TP. */
static void
copy_scope_stmt (tp, walk_subtrees, id)
tree *tp;
int *walk_subtrees;
inline_data *id;
copy_scope_stmt (tree *tp, int *walk_subtrees, inline_data *id)
{
tree block;
@ -414,10 +405,7 @@ copy_scope_stmt (tp, walk_subtrees, id)
/* Called from copy_body via walk_tree. DATA is really an
`inline_data *'. */
static tree
copy_body_r (tp, walk_subtrees, data)
tree *tp;
int *walk_subtrees;
void *data;
copy_body_r (tree *tp, int *walk_subtrees, void *data)
{
inline_data* id;
tree fn;
@ -610,8 +598,7 @@ copy_body_r (tp, walk_subtrees, data)
another function. */
static tree
copy_body (id)
inline_data *id;
copy_body (inline_data *id)
{
tree body;
@ -626,15 +613,9 @@ copy_body (id)
static tree
#ifndef INLINER_FOR_JAVA
initialize_inlined_parameters (id, args, fn)
initialize_inlined_parameters (inline_data *id, tree args, tree fn)
#else /* INLINER_FOR_JAVA */
initialize_inlined_parameters (id, args, fn, block)
#endif /* INLINER_FOR_JAVA */
inline_data *id;
tree args;
tree fn;
#ifdef INLINER_FOR_JAVA
tree block;
initialize_inlined_parameters (inline_data *id, tree args, tree fn, tree block)
#endif /* INLINER_FOR_JAVA */
{
tree init_stmts;
@ -823,16 +804,12 @@ initialize_inlined_parameters (id, args, fn, block)
#ifndef INLINER_FOR_JAVA
static tree
declare_return_variable (id, return_slot_addr, use_stmt)
struct inline_data *id;
tree return_slot_addr;
tree *use_stmt;
declare_return_variable (struct inline_data *id, tree return_slot_addr,
tree *use_stmt)
#else /* INLINER_FOR_JAVA */
static tree
declare_return_variable (id, return_slot_addr, var)
struct inline_data *id;
tree return_slot_addr;
tree *var;
declare_return_variable (struct inline_data *id, tree return_slot_addr,
tree *var)
#endif /* INLINER_FOR_JAVA */
{
tree fn = VARRAY_TOP_TREE (id->fns);
@ -901,19 +878,15 @@ declare_return_variable (id, return_slot_addr, var)
/* Returns nonzero if a function can be inlined as a tree. */
int
tree_inlinable_function_p (fn, nolimit)
tree fn;
int nolimit;
tree_inlinable_function_p (tree fn, int nolimit)
{
return inlinable_function_p (fn, NULL, nolimit);
}
/* If *TP is possibly call to alloca, return nonzero. */
static tree
find_alloca_call_1 (tp, walk_subtrees, data)
tree *tp;
int *walk_subtrees ATTRIBUTE_UNUSED;
void *data ATTRIBUTE_UNUSED;
find_alloca_call_1 (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
void *data ATTRIBUTE_UNUSED)
{
if (alloca_call_p (*tp))
return *tp;
@ -922,8 +895,7 @@ find_alloca_call_1 (tp, walk_subtrees, data)
/* Return subexpression representing possible alloca call, if any. */
static tree
find_alloca_call (exp)
tree exp;
find_alloca_call (tree exp)
{
location_t saved_loc = input_location;
tree ret = walk_tree (&exp, find_alloca_call_1, NULL, NULL);
@ -932,10 +904,8 @@ find_alloca_call (exp)
}
static tree
find_builtin_longjmp_call_1 (tp, walk_subtrees, data)
tree *tp;
int *walk_subtrees ATTRIBUTE_UNUSED;
void *data ATTRIBUTE_UNUSED;
find_builtin_longjmp_call_1 (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
void *data ATTRIBUTE_UNUSED)
{
tree exp = *tp, decl;
@ -951,8 +921,7 @@ find_builtin_longjmp_call_1 (tp, walk_subtrees, data)
}
static tree
find_builtin_longjmp_call (exp)
tree exp;
find_builtin_longjmp_call (tree exp)
{
location_t saved_loc = input_location;
tree ret = walk_tree (&exp, find_builtin_longjmp_call_1, NULL, NULL);
@ -965,10 +934,7 @@ find_builtin_longjmp_call (exp)
can be inlined at all. */
static int
inlinable_function_p (fn, id, nolimit)
tree fn;
inline_data *id;
int nolimit;
inlinable_function_p (tree fn, inline_data *id, int nolimit)
{
int inlinable;
int currfn_insns;
@ -983,13 +949,13 @@ inlinable_function_p (fn, id, nolimit)
inlined. (It is important that this hook be called early because
in C++ it may result in template instantiation.) */
inlinable = !(*lang_hooks.tree_inlining.cannot_inline_tree_fn) (&fn);
/* We may be here either because fn is declared inline or because
we use -finline-functions. For the second case, we are more
restrictive. */
if (DID_INLINE_FUNC (fn))
max_inline_insns_single = MAX_INLINE_INSNS_AUTO;
/* The number of instructions (estimated) of current function. */
currfn_insns = DECL_NUM_STMTS (fn) * INSNS_PER_STMT;
@ -1098,10 +1064,7 @@ inlinable_function_p (fn, id, nolimit)
/* If *TP is a CALL_EXPR, replace it with its inline expansion. */
static tree
expand_call_inline (tp, walk_subtrees, data)
tree *tp;
int *walk_subtrees;
void *data;
expand_call_inline (tree *tp, int *walk_subtrees, void *data)
{
inline_data *id;
tree t;
@ -1159,7 +1122,7 @@ expand_call_inline (tp, walk_subtrees, data)
*walk_subtrees = 0;
/* Update the source position. */
push_srcloc (EXPR_WFL_FILENAME (t), EXPR_WFL_LINENO (t));
walk_tree (&EXPR_WFL_NODE (t), expand_call_inline, data,
walk_tree (&EXPR_WFL_NODE (t), expand_call_inline, data,
id->tree_pruner);
/* Restore the original source position. */
pop_srcloc ();
@ -1461,9 +1424,7 @@ expand_call_inline (tp, walk_subtrees, data)
expansions as appropriate. */
static void
expand_calls_inline (tp, id)
tree *tp;
inline_data *id;
expand_calls_inline (tree *tp, inline_data *id)
{
/* Search through *TP, replacing all calls to inline functions by
appropriate equivalents. Use walk_tree in no-duplicates mode
@ -1477,8 +1438,7 @@ expand_calls_inline (tp, id)
/* Expand calls to inline functions in the body of FN. */
void
optimize_inline_calls (fn)
tree fn;
optimize_inline_calls (tree fn)
{
inline_data id;
tree prev_fn;
@ -1532,9 +1492,7 @@ optimize_inline_calls (fn)
declarations according to the ARG_MAP splay_tree. */
void
clone_body (clone, fn, arg_map)
tree clone, fn;
void *arg_map;
clone_body (tree clone, tree fn, void *arg_map)
{
inline_data id;
@ -1564,11 +1522,7 @@ clone_body (clone, fn, arg_map)
once. */
tree
walk_tree (tp, func, data, htab_)
tree *tp;
walk_tree_fn func;
void *data;
void *htab_;
walk_tree (tree *tp, walk_tree_fn func, void *data, void *htab_)
{
htab_t htab = (htab_t) htab_;
enum tree_code code;
@ -1814,10 +1768,7 @@ walk_tree (tp, func, data, htab_)
once. */
tree
walk_tree_without_duplicates (tp, func, data)
tree *tp;
walk_tree_fn func;
void *data;
walk_tree_without_duplicates (tree *tp, walk_tree_fn func, void *data)
{
tree result;
htab_t htab;
@ -1831,10 +1782,7 @@ walk_tree_without_duplicates (tp, func, data)
/* Passed to walk_tree. Copies the node pointed to, if appropriate. */
tree
copy_tree_r (tp, walk_subtrees, data)
tree *tp;
int *walk_subtrees;
void *data ATTRIBUTE_UNUSED;
copy_tree_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
enum tree_code code = TREE_CODE (*tp);
@ -1884,11 +1832,7 @@ copy_tree_r (tp, walk_subtrees, data)
ST. FN is the function into which the copy will be placed. */
void
remap_save_expr (tp, st_, fn, walk_subtrees)
tree *tp;
void *st_;
tree fn;
int *walk_subtrees;
remap_save_expr (tree *tp, void *st_, tree fn, int *walk_subtrees)
{
splay_tree st = (splay_tree) st_;
splay_tree_node n;
@ -1928,8 +1872,7 @@ remap_save_expr (tp, st_, fn, walk_subtrees)
COMPOUND_EXPR and add STMT to it. */
static tree
add_stmt_to_compound (existing, type, stmt)
tree existing, type, stmt;
add_stmt_to_compound (tree existing, tree type, tree stmt)
{
if (!stmt)
return existing;

View File

@ -1,5 +1,5 @@
/* Tree inlining hooks and declarations.
Copyright 2001 Free Software Foundation, Inc.
Copyright 2001, 2003 Free Software Foundation, Inc.
Contributed by Alexandre Oliva <aoliva@redhat.com>
This file is part of GCC.
@ -24,13 +24,13 @@ Boston, MA 02111-1307, USA. */
/* Function prototypes. */
void optimize_inline_calls PARAMS ((tree));
int tree_inlinable_function_p PARAMS ((tree, int));
tree walk_tree PARAMS ((tree*, walk_tree_fn, void*, void*));
tree walk_tree_without_duplicates PARAMS ((tree*, walk_tree_fn, void*));
tree copy_tree_r PARAMS ((tree*, int*, void*));
void clone_body PARAMS ((tree, tree, void*));
void remap_save_expr PARAMS ((tree*, void*, tree, int*));
void optimize_inline_calls (tree);
int tree_inlinable_function_p (tree, int);
tree walk_tree (tree*, walk_tree_fn, void*, void*);
tree walk_tree_without_duplicates (tree*, walk_tree_fn, void*);
tree copy_tree_r (tree*, int*, void*);
void clone_body (tree, tree, void*);
void remap_save_expr (tree*, void*, tree, int*);
/* 0 if we should not perform inlining.
1 if we should expand functions calls inline at the tree level.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff