basic-block.h: Fix comment typos.

* basic-block.h: Fix comment typos.
	* bb-reorder.c: Likewise.
	* c-format.c: Likewise.
	* cfgcleanup.c: Likewise.
	* cfghooks.h: Likewise.
	* cfgloop.c: Likewise.
	* cfgloopmanip.c: Likewise.
	* cfgrtl.c: Likewise.
	* cgraph.h: Likewise.
	* cgraphunit.c: Likewise.
	* combine.c: Likewise.
	* convert.c: Likewise.
	* dbxout.c: Likewise.
	* df.c: Likewise.
	* df.h: Likewise.
	* diagnostic.c: Likewise.
	* dwarf2out.c: Likewise.
	* et-forest.h: Likewise.
	* flow.c: Likewise.
	* fold-const.c: Likewise.
	* function.h: Likewise.
	* gcov-io.h: Likewise.
	* gcov.c: Likewise.
	* gcse.c: Likewise.
	* genautomata.c: Likewise.
	* ggc-common.c: Likewise.
	* ggc-page.c: Likewise.
	* loop-unroll.c: Likewise.
	* loop-unswitch.c: Likewise.
	* loop.c: Likewise.
	* mips-tfile.c: Likewise.
	* optabs.c: Likewise.
	* ra-build.c: Likewise.
	* ra-colorize.c: Likewise.
	* ra-rewrite.c: Likewise.
	* ra.h: Likewise.
	* regmove.c: Likewise.
	* reload.c: Likewise.
	* rtlanal.c: Likewise.
	* sched-ebb.c: Likewise.
	* sched-int.h: Likewise.
	* sched-vis.c: Likewise.
	* sreal.c: Likewise.
	* ssa-ccp.c: Likewise.
	* ssa.c: Likewise.
	* toplev.c: Likewise.
	* tree-inline.c: Likewise.
	* value-prof.c: Likewise.
	* value-prof.h: Likewise.

From-SVN: r68770
This commit is contained in:
Kazu Hirata 2003-07-01 12:18:01 +00:00 committed by Kazu Hirata
parent 0d0a1710d9
commit e0bb17a83f
50 changed files with 135 additions and 83 deletions

View File

@ -1,3 +1,55 @@
2003-07-01 Kazu Hirata <kazu@cs.umass.edu>
* basic-block.h: Fix comment typos.
* bb-reorder.c: Likewise.
* c-format.c: Likewise.
* cfgcleanup.c: Likewise.
* cfghooks.h: Likewise.
* cfgloop.c: Likewise.
* cfgloopmanip.c: Likewise.
* cfgrtl.c: Likewise.
* cgraph.h: Likewise.
* cgraphunit.c: Likewise.
* combine.c: Likewise.
* convert.c: Likewise.
* dbxout.c: Likewise.
* df.c: Likewise.
* df.h: Likewise.
* diagnostic.c: Likewise.
* dwarf2out.c: Likewise.
* et-forest.h: Likewise.
* flow.c: Likewise.
* fold-const.c: Likewise.
* function.h: Likewise.
* gcov-io.h: Likewise.
* gcov.c: Likewise.
* gcse.c: Likewise.
* genautomata.c: Likewise.
* ggc-common.c: Likewise.
* ggc-page.c: Likewise.
* loop-unroll.c: Likewise.
* loop-unswitch.c: Likewise.
* loop.c: Likewise.
* mips-tfile.c: Likewise.
* optabs.c: Likewise.
* ra-build.c: Likewise.
* ra-colorize.c: Likewise.
* ra-rewrite.c: Likewise.
* ra.h: Likewise.
* regmove.c: Likewise.
* reload.c: Likewise.
* rtlanal.c: Likewise.
* sched-ebb.c: Likewise.
* sched-int.h: Likewise.
* sched-vis.c: Likewise.
* sreal.c: Likewise.
* ssa-ccp.c: Likewise.
* ssa.c: Likewise.
* toplev.c: Likewise.
* tree-inline.c: Likewise.
* value-prof.c: Likewise.
* value-prof.h: Likewise.
2003-07-01 Nathan Sidwell <nathan@codesourcery.com>
* rtl.h (emit_line_note_after): Remove.

View File

@ -487,7 +487,7 @@ enum update_life_extent
| PROP_ALLOW_CFG_CHANGES \
| PROP_SCAN_DEAD_STORES)
#define CLEANUP_EXPENSIVE 1 /* Do relativly expensive optimizations
#define CLEANUP_EXPENSIVE 1 /* Do relatively expensive optimizations
except for edge forwarding */
#define CLEANUP_CROSSJUMP 2 /* Do crossjumping. */
#define CLEANUP_POST_REGSTACK 4 /* We run after reg-stack and need

View File

@ -1069,8 +1069,8 @@ reorder_basic_blocks (void)
set_edge_can_fallthru_flag ();
mark_dfs_back_edges ();
/* We are estimating the lenght of uncond jump insn only once since the code
for getting the insn lenght always returns the minimal length now. */
/* We are estimating the length of uncond jump insn only once since the code
for getting the insn length always returns the minimal length now. */
if (uncond_jump_length == 0)
uncond_jump_length = get_uncond_jump_length ();

View File

@ -2715,7 +2715,7 @@ handle_format_attribute (tree *node, tree name ATTRIBUTE_UNUSED, tree args,
if (info.format_type == asm_fprintf_format_type)
init_dynamic_asm_fprintf_info();
/* If this is one of the diagnostic attributes, then we have to
intialize `location_t' and `tree' at runtime. */
initialize `location_t' and `tree' at runtime. */
else if (info.format_type == gcc_diag_format_type
|| info.format_type == gcc_cdiag_format_type
|| info.format_type == gcc_cxxdiag_format_type)

View File

@ -765,7 +765,7 @@ merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
Return NULL iff the attempt failed, otherwise return basic block
where cleanup_cfg should continue. Because the merging commonly
moves basic block away or introduces another optimization
possiblity, return basic block just before B so cleanup_cfg don't
possibility, return basic block just before B so cleanup_cfg don't
need to iterate.
It may be good idea to return basic block before C in the case
@ -1353,7 +1353,7 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
return false;
}
/* We don't need to match the rest of edges as above checks should be enought
/* We don't need to match the rest of edges as above checks should be enough
to ensure that they are equivalent. */
return true;
}

View File

@ -32,7 +32,7 @@ struct cfg_hooks
/* Basic CFG manipulation. */
/* Redirect edge E to the given basic block B and update underlying program
representation. Returns false when edge is not easilly redirectable for
representation. Returns false when edge is not easily redirectable for
whatever reason. */
bool (*redirect_edge_and_branch) (edge e, basic_block b);

View File

@ -1104,8 +1104,8 @@ cancel_loop_tree (struct loops *loops, struct loop *loop)
cancel_loop (loops, loop);
}
/* Checks that LOOPS are allright:
-- sizes of loops are allright
/* Checks that LOOPS are all right:
-- sizes of loops are all right
-- results of get_loop_body really belong to the loop
-- loop header have just single entry edge and single latch edge
-- loop latches have only single successor that is header of their loop

View File

@ -471,7 +471,7 @@ add_loop (struct loops *loops, struct loop *loop)
free (bbs);
}
/* Multiply all frequencies of basic blocks in array BBS of lenght NBBS
/* Multiply all frequencies of basic blocks in array BBS of length NBBS
by NUM/DEN. */
static void
scale_bbs_frequencies (basic_block *bbs, int nbbs, int num, int den)
@ -604,7 +604,7 @@ unloop (struct loops *loops, struct loop *loop)
edge *edges;
unsigned n_edges;
/* This is relatively straigtforward. The dominators are unchanged, as
/* This is relatively straightforward. The dominators are unchanged, as
loop header dominates loop latch, so the only thing we have to care of
is the placement of loops and basic blocks inside the loop tree. We
move them all to the loop->outer, and then let fix_bb_placements do
@ -831,7 +831,7 @@ loop_delete_branch_edge (edge e, int really_delete)
Additionally, we perform following manipulation with edges:
We have two special edges given. LATCH_EDGE is the latch edge of the
duplicated loop and leads into its header (one of blocks in BBS);
it does not have neccessarily lead from one of the blocks, because
it does not have necessarily lead from one of the blocks, because
we may be copying the loop body several times in unrolling.
Edge ENTRY leads also leads to header, and it is either latch or entry
edge. Copy of LATCH_EDGE is redirected to header and is stored in

View File

@ -905,7 +905,7 @@ force_nonfallthru_and_redirect (edge e, basic_block target)
if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR
&& any_condjump_p (e->src->end)
/* When called from cfglayout, fallthru edges do not
neccessarily go to the next block. */
necessarily go to the next block. */
&& e->src->next_bb == e->dest
&& JUMP_LABEL (e->src->end) == e->dest->head)
{
@ -1331,9 +1331,9 @@ mark_killed_regs (rtx reg, rtx set ATTRIBUTE_UNUSED, void *data)
/* Similar to insert_insn_on_edge, tries to put INSN to edge E. Additionally
it checks whether this will not clobber the registers that are live on the
edge (i.e. it requieres liveness information to be up-to-date) and if there
edge (i.e. it requires liveness information to be up-to-date) and if there
are some, then it tries to save and restore them. Returns true if
succesful. */
successful. */
bool
safe_insert_insn_on_edge (rtx insn, edge e)
{

View File

@ -27,10 +27,10 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
struct cgraph_local_info GTY(())
{
/* Set when function function is visiable in current compilation unit only
/* Set when function function is visible in current compilation unit only
and it's address is never taken. */
bool local;
/* Set when function is small enought to be inlinable many times. */
/* Set when function is small enough to be inlinable many times. */
bool inline_many;
/* Set when function can be inlined once (false only for functions calling
alloca, using varargs and so on). */
@ -58,7 +58,7 @@ struct cgraph_rtl_info GTY(())
/* The cgraph data strutcture.
Each function decl has assigned cgraph_node listing calees and callers. */
Each function decl has assigned cgraph_node listing callees and callers. */
struct cgraph_node GTY(())
{
@ -79,7 +79,7 @@ struct cgraph_node GTY(())
or it's address is taken. */
bool needed;
/* Set when function is reachable by call from other function
that is eighter reachable or needed. */
that is either reachable or needed. */
bool reachable;
/* Set when the frontend has been asked to lower representation of this
function into trees. Callees lists are not available when lowered

View File

@ -44,7 +44,7 @@ static void cgraph_mark_functions_to_inline_once PARAMS ((void));
static void cgraph_optimize_function PARAMS ((struct cgraph_node *));
/* Analyze function once it is parsed. Set up the local information
available - create cgraph edges for function calles via BODY. */
available - create cgraph edges for function calls via BODY. */
void
cgraph_finalize_function (decl, body)
@ -57,7 +57,7 @@ cgraph_finalize_function (decl, body)
if (/* Externally visible functions must be output. The exception are
COMDAT functions that must be output only when they are needed.
Similarly are handled defered functions and
Similarly are handled deferred functions and
external functions (GCC extension "extern inline") */
(TREE_PUBLIC (decl) && !DECL_COMDAT (decl) && !DECL_EXTERNAL (decl))
/* ??? Constructors and destructors not called otherwise can be inlined
@ -294,7 +294,7 @@ cgraph_expand_function (node)
Attempt to topologically sort the nodes so function is output when
all called functions are already assembled to allow data to be
propagated accross the callgraph. Use a stack to get smaller distance
propagated across the callgraph. Use a stack to get smaller distance
between a function and it's callees (later we may choose to use a more
sophisticated algorithm for function reordering; we will likely want
to use subsections to make the output functions appear in top-down

View File

@ -4103,7 +4103,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int last,
/* (float_truncate:SF (float_truncate:DF foo:XF))
= (float_truncate:SF foo:XF).
This may elliminate double rounding, so it is unsafe.
This may eliminate double rounding, so it is unsafe.
(float_truncate:SF (float_extend:XF foo:DF))
= (float_truncate:SF foo:DF).

View File

@ -152,7 +152,7 @@ convert_to_real (tree type, tree expr)
if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
newtype = TREE_TYPE (arg0);
/* Be curefull about integer to fp conversions.
/* Be careful about integer to fp conversions.
These may overflow still. */
if (FLOAT_TYPE_P (TREE_TYPE (arg0))
&& TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)

View File

@ -2014,7 +2014,7 @@ dbxout_symbol (tree decl, int local ATTRIBUTE_UNUSED)
|| DECL_IGNORED_P (decl))
DBXOUT_DECR_NESTING_AND_RETURN (0);
/* If we are to generate only the symbols actualy used then such
/* If we are to generate only the symbols actually used then such
symbol nodees are flagged with TREE_USED. Ignore any that
aren't flaged as TREE_USED. */

View File

@ -860,7 +860,7 @@ df_def_record_1 (struct df *df, rtx x, basic_block bb, rtx insn)
rtx dst;
enum df_ref_flags flags = 0;
/* We may recursivly call ourselves on EXPR_LIST when dealing with PARALLEL
/* We may recursively call ourselves on EXPR_LIST when dealing with PARALLEL
construct. */
if (GET_CODE (x) == EXPR_LIST || GET_CODE (x) == CLOBBER)
loc = &XEXP (x, 0);

View File

@ -92,7 +92,7 @@ struct insn_info
{
struct df_link *defs; /* Head of insn-def chain. */
struct df_link *uses; /* Head of insn-use chain. */
/* ???? The following luid field should be considerd private so that
/* ???? The following luid field should be considered private so that
we can change it on the fly to accommodate new insns? */
int luid; /* Logical UID. */
};

View File

@ -46,7 +46,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
/* Format an integer given by va_arg (ARG, type-specifier T) where
type-specifier is a precision modifier as indicated by PREC. F is
a string used to construct the appropciate format-specifier. */
a string used to construct the appropriate format-specifier. */
#define output_integer_with_precision(BUFFER, ARG, PREC, T, F) \
do \
switch (PREC) \
@ -332,7 +332,7 @@ output_append_r (output_buffer *buffer, const char *start, int length)
output_text_length (buffer) += length;
}
/* Append a string deliminated by START and END to BUFFER. No wrapping is
/* Append a string delimited by START and END to BUFFER. No wrapping is
done. However, if beginning a new line then emit BUFFER->state.prefix
and skip any leading whitespace if appropriate. The caller must ensure
that it is safe to do so. */

View File

@ -3523,7 +3523,7 @@ static int current_function_has_inlines;
static int comp_unit_has_inlines;
#endif
/* Number of file tables emited in maybe_emit_file(). */
/* Number of file tables emitted in maybe_emit_file(). */
static GTY(()) int emitcount = 0;
/* Number of internal labels generated by gen_internal_sym(). */

View File

@ -41,7 +41,7 @@ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
the sequence is 1 2 4 2 5 3 1 3 1 4 1.
The sequence is stored in a sligtly modified splay tree.
The sequence is stored in a slightly modified splay tree.
In order to support various types of node values, a hashtable
is used to convert node values to the internal representation. */

View File

@ -585,7 +585,7 @@ verify_local_live_at_start (regset new_live_at_start, basic_block bb)
/* Updates life information starting with the basic blocks set in BLOCKS.
If BLOCKS is null, consider it to be the universal set.
If EXTENT is UPDATE_LIFE_LOCAL, such as after splitting or peepholeing,
If EXTENT is UPDATE_LIFE_LOCAL, such as after splitting or peepholing,
we are only expecting local modifications to basic blocks. If we find
extra registers live at the beginning of a block, then we either killed
useful data, or we have a broken split that wants data not provided.

View File

@ -5779,7 +5779,7 @@ fold (expr)
/* Preserve the MINUS_EXPR if the negative part of the literal is
greater than the positive part. Otherwise, the multiplicative
folding code (i.e extract_muldiv) may be fooled in case
unsigned constants are substracted, like in the following
unsigned constants are subtracted, like in the following
example: ((X*2 + 4) - 8U)/2. */
if (minus_lit0 && lit0)
{

View File

@ -485,7 +485,7 @@ struct function GTY(())
/* Nonzero if the current function needs an lsda for exception handling. */
unsigned int uses_eh_lsda : 1;
/* Nonzero if code to initialize arg_pointer_save_area has been emited. */
/* Nonzero if code to initialize arg_pointer_save_area has been emitted. */
unsigned int arg_pointer_save_area_init : 1;
/* How commonly executed the function is. Initialized during branch

View File

@ -88,7 +88,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
most significant is allocated first. Unused levels are zero.
Active levels are odd-valued, so that the LSB of the level is one.
A sub-level incorporates the values of its superlevels. This
formatting allows you to determine the tag heirarchy, without
formatting allows you to determine the tag hierarchy, without
understanding the tags themselves, and is similar to the standard
section numbering used in technical documents. Level values
[1..3f] are used for common tags, values [41..9f] for the graph
@ -514,7 +514,7 @@ gcov_is_error (void)
}
#if IN_LIBGCOV
/* Move to beginning of file and intialize for writing. */
/* Move to beginning of file and initialize for writing. */
static inline void
gcov_rewrite (void)

View File

@ -1147,7 +1147,7 @@ solve_flow_graph (fn)
{
arc->is_unconditional = 1;
/* If this block is instrumenting a call, it might be
an artifical block. It is not artificial if it has
an artificial block. It is not artificial if it has
a non-fallthrough exit, or the destination of this
arc has more than one entry. Mark the destination
block as a return site, if none of those conditions

View File

@ -4842,7 +4842,7 @@ reg_killed_on_edge (reg, e)
JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
Returns nonzero if a change was made.
During the jump bypassing pass, we may place copies of SETCC instuctions
During the jump bypassing pass, we may place copies of SETCC instructions
on CFG edges. The following routine must be careful to pay attention to
these inserted insns when performing its transformations. */
@ -4885,8 +4885,8 @@ bypass_block (bb, setcc, jump)
continue;
/* The irreducible loops created by redirecting of edges entering the
loop from outside would decrease effectivity of some of the following
optimalizations, so prevent this. */
loop from outside would decrease effectiveness of some of the following
optimizations, so prevent this. */
if (may_be_loop_header
&& !(e->flags & EDGE_DFS_BACK))
continue;
@ -5839,7 +5839,7 @@ add_label_notes (x, insn)
if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
{
/* This code used to ignore labels that referred to dispatch tables to
avoid flow generating (slighly) worse code.
avoid flow generating (slightly) worse code.
We no longer ignore such label references (see LABEL_REF handling in
mark_jump_label for additional information). */
@ -6448,7 +6448,7 @@ hoist_code ()
to avoid any possible code expansion due to register
allocation issues; however experiments have shown that
the vast majority of hoistable expressions are only movable
from two successors, so raising this threshhold is likely
from two successors, so raising this threshold is likely
to nullify any benefit we get from code hoisting. */
if (hoistable > 1)
{
@ -7163,7 +7163,7 @@ extract_mentioned_regs_helper (x, accum)
The things are complicated a bit by fact that there already may be stores
to the same MEM from other blocks; also caller must take care of the
neccessary cleanup of the temporary markers after end of the basic block.
necessary cleanup of the temporary markers after end of the basic block.
*/
static void
@ -7222,7 +7222,7 @@ find_moveable_store (insn, regs_set_before, regs_set_after)
ANTIC_STORE_LIST (ptr));
}
/* It is not neccessary to check whether store is available if we did
/* It is not necessary to check whether store is available if we did
it successfully before; if we failed before, do not bother to check
until we reach the insn that caused us to fail. */
check_available = 0;
@ -7608,7 +7608,7 @@ build_store_vectors ()
if (store_killed_after (ptr->pattern, ptr->pattern_regs, bb->head,
bb, regs_set_in_block, NULL))
{
/* It should not be neccessary to consider the expression
/* It should not be necessary to consider the expression
killed if it is both anticipatable and available. */
if (!TEST_BIT (st_antloc[bb->index], ptr->index)
|| !TEST_BIT (ae_gen[bb->index], ptr->index))

View File

@ -1616,7 +1616,7 @@ n_sep_els (char *s, int sep, int par_flag)
elements in the string and number of elements through els_num.
Take parentheses into account if PAREN_P has nonzero value. The
function also inserts the end marker NULL at the end of vector.
Return 0 for the null string, -1 if parantheses are not balanced. */
Return 0 for the null string, -1 if parentheses are not balanced. */
static char **
get_str_vect (char *str, int *els_num, int sep, int paren_p)
{

View File

@ -484,7 +484,7 @@ gt_pch_save (FILE *f)
ggc_pch_prepare_write (state.d, state.f);
/* Pad the PCH file so that the mmaped area starts on a page boundary. */
/* Pad the PCH file so that the mmapped area starts on a page boundary. */
{
long o;
o = ftell (state.f) + sizeof (mmi);

View File

@ -853,8 +853,8 @@ adjust_depth (void)
{
top = G.by_depth[G.by_depth_in_use-1];
/* Peel back indicies in depth that index into by_depth, so that
as new elements are added to by_depth, we note the indicies
/* Peel back indices in depth that index into by_depth, so that
as new elements are added to by_depth, we note the indices
of those elements, if they are for new context depths. */
while (G.depth_in_use > (size_t)top->context_depth+1)
--G.depth_in_use;

View File

@ -32,7 +32,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "expr.h"
/* This pass performs loop unrolling and peeling. We only perform these
optimalizations on innermost loops (with single exception) because
optimizations on innermost loops (with single exception) because
the impact on performance is greatest here, and we want to avoid
unnecessary code size growth. The gain is caused by greater sequentiality
of code, better code to optimize for futher passes and in some cases
@ -511,7 +511,7 @@ decide_unroll_constant_iterations (loops, loop, flags)
/* Success; now compute number of iterations to unroll. We alter
nunroll so that as few as possible copies of loop body are
neccesary, while still not decreasing the number of unrollings
necessary, while still not decreasing the number of unrollings
too much (at most by 1). */
best_copies = 2 * nunroll + 10;
@ -1153,7 +1153,7 @@ decide_unroll_stupid (loops, loop, flags)
}
/* Success. Now force nunroll to be power of 2, as it seems that this
improves results (partially because of better aligments, partially
improves results (partially because of better alignments, partially
because of some dark magic). */
for (i = 1; 2 * i <= nunroll; i *= 2);

View File

@ -32,7 +32,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "expr.h"
/* This pass moves constant conditions out of loops, duplicating the loop
in progres, i.e. this code:
in progress, i.e. this code:
while (loop_cond)
{

View File

@ -71,7 +71,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#define gen_prefetch(a,b,c) (abort(), NULL_RTX)
#endif
/* Give up the prefetch optimizations once we exceed a given threshhold.
/* Give up the prefetch optimizations once we exceed a given threshold.
It is unlikely that we would be able to optimize something in a loop
with so many detected prefetches. */
#define MAX_PREFETCHES 100
@ -1774,7 +1774,7 @@ add_label_notes (x, insns)
if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
{
/* This code used to ignore labels that referred to dispatch tables to
avoid flow generating (slighly) worse code.
avoid flow generating (slightly) worse code.
We no longer ignore such label references (see LABEL_REF handling in
mark_jump_label for additional information). */
@ -10650,7 +10650,7 @@ loop_insn_sink (loop, pattern)
return loop_insn_emit_before (loop, 0, loop->sink, pattern);
}
/* bl->final_value can be eighter general_operand or PLUS of general_operand
/* bl->final_value can be either general_operand or PLUS of general_operand
and constant. Emit sequence of instructions to load it into REG. */
static rtx
gen_load_of_final_value (reg, final_value)

View File

@ -163,7 +163,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
Each file table has offsets for where the line numbers, local
strings, local symbols, and procedure table starts from within the
global tables, and the indexs are reset to 0 for each of those
global tables, and the indices are reset to 0 for each of those
tables for the file.
The procedure table contains the binary equivalents of the .ent

View File

@ -5123,7 +5123,7 @@ expand_fix (to, from, unsignedp)
In the other path we know the value is positive in the range 2^63..2^64-1
inclusive. (as for other imput overflow happens and result is undefined)
So we know that the most important bit set in mantisa corresponds to
So we know that the most important bit set in mantissa corresponds to
2^63. The subtraction of 2^63 should not generate any rounding as it
simply clears out that bit. The rest is trivial. */

View File

@ -622,7 +622,7 @@ struct curr_use {
4 if both are SUBREG's of different size, but have bytes in common.
-1 is a special case, for when DEF and USE refer to the same regno, but
have for other reasons no bits in common (can only happen with
subregs refering to different words, or to words which already were
subregs referring to different words, or to words which already were
defined for this USE).
Furthermore it modifies use->undefined to clear the bits which get defined
by DEF (only for cases with partial overlap).

View File

@ -1748,7 +1748,7 @@ try_recolor_web (web)
}
/* Mark colors for which some wide webs are involved. For
those the independent sets are not simply one-node graphs, so
they can't be recolored independ from their neighborhood. This
they can't be recolored independent from their neighborhood. This
means, that our cost calculation can be incorrect (assuming it
can avoid spilling a web because it thinks some colors are available,
although it's neighbors which itself need recoloring might take

View File

@ -1545,7 +1545,7 @@ detect_web_parts_to_rebuild ()
/* We also recheck unconditionally all uses of any hardregs. This means
we _can_ delete all these uses from the live_at_end[] bitmaps.
And because we sometimes delete insn refering to hardregs (when
And because we sometimes delete insn referring to hardregs (when
they became useless because they setup a rematerializable pseudo, which
then was rematerialized), some of those uses will go away with the next
df_analyse(). This means we even _must_ delete those uses from

View File

@ -466,7 +466,7 @@ extern struct dlist *web_lists[(int) LAST_NODE_TYPE];
/* The largest DF_REF_ID of defs resp. uses, as it was in the
last pass. In the first pass this is zero. Used to distinguish new
from old refrences. */
from old references. */
extern unsigned int last_def_id;
extern unsigned int last_use_id;
@ -563,7 +563,7 @@ extern int flag_ra_break_aliases;
extern int flag_ra_merge_spill_costs;
/* Nonzero if we want to spill at every use, instead of at deaths,
or intereference region borders. */
or interference region borders. */
extern int flag_ra_spill_every_use;
/* Nonzero to output all notes in the debug dumps. */

View File

@ -2323,14 +2323,14 @@ record_stack_memrefs (xp, data)
return 1;
case REG:
/* ??? We want be able to handle non-memory stack pointer
references later. For now just discard all insns refering to
references later. For now just discard all insns referring to
stack pointer outside mem expressions. We would probably
want to teach validate_replace to simplify expressions first.
We can't just compare with STACK_POINTER_RTX because the
reference to the stack pointer might be in some other mode.
In particular, an explicit clobber in an asm statement will
result in a QImode clober. */
result in a QImode clobber. */
if (REGNO (x) == STACK_POINTER_REGNUM)
return 1;
break;

View File

@ -5319,7 +5319,7 @@ find_reloads_address_1 (mode, x, context, loc, opnum, type, ind_levels, insn)
GET_MODE (orig_op1))));
}
/* Plus in the index register may be created only as a result of
register remateralization for expresion like &localvar*4. Reload it.
register remateralization for expression like &localvar*4. Reload it.
It may be possible to combine the displacement on the outer level,
but it is probably not worthwhile to do so. */
if (context)

View File

@ -3600,7 +3600,7 @@ hoist_test_store (x, val, live)
if (rtx_equal_p (x, val))
return true;
/* Allow subreg of X in case it is not writting just part of multireg pseudo.
/* Allow subreg of X in case it is not writing just part of multireg pseudo.
Then we would need to update all users to care hoisting the store too.
Caller may represent that by specifying whole subreg as val. */
@ -3621,7 +3621,7 @@ hoist_test_store (x, val, live)
if (!REG_P (x))
return false;
/* Pseudo registers can be allways replaced by another pseudo to avoid
/* Pseudo registers can be always replaced by another pseudo to avoid
the side effect, for hard register we must ensure that they are dead.
Eventually we may want to add code to try turn pseudos to hards, but it
is unlikely useful. */

View File

@ -206,7 +206,7 @@ static struct sched_info ebb_sched_info =
0, 1
};
/* It is possible that ebb scheduling elliminated some blocks.
/* It is possible that ebb scheduling eliminated some blocks.
Place blocks from FIRST to LAST before BEFORE. */
static void
@ -268,7 +268,7 @@ fix_basic_block_boundaries (bb, last, head, tail)
last_inside = insn;
}
/* Control flow instruction terminate basic block. It is possible
that we've elliminated some basic blocks (made them empty).
that we've eliminated some basic blocks (made them empty).
Find the proper basic block using BLOCK_FOR_INSN and arrange things in
a sensible way by inserting empty basic blocks as needed. */
if (control_flow_insn_p (insn) || (insn == tail && last_inside))
@ -303,7 +303,7 @@ fix_basic_block_boundaries (bb, last, head, tail)
h = curr_bb->head;
curr_bb->head = head;
curr_bb->end = insn;
/* Edge splitting created missplaced BASIC_BLOCK note, kill
/* Edge splitting created misplaced BASIC_BLOCK note, kill
it. */
delete_insn (h);
}
@ -453,7 +453,7 @@ add_deps_for_risky_insns (head, tail)
case TRAP_RISKY:
case IRISKY:
case PRISKY_CANDIDATE:
/* ??? We could implement better checking PRISKY_CANDIATEs
/* ??? We could implement better checking PRISKY_CANDIDATEs
analogous to sched-rgn.c. */
/* We can not change the mode of the backward
dependency because REG_DEP_ANTI has the lowest

View File

@ -188,7 +188,7 @@ struct haifa_insn_data
int priority;
/* The number of incoming edges in the forward dependency graph.
As scheduling proceds, counts are decreased. An insn moves to
As scheduling proceeds, counts are decreased. An insn moves to
the ready queue when its counter reaches zero. */
int dep_count;

View File

@ -544,7 +544,7 @@ print_exp (buf, x, verbose)
cur = safe_concat (buf, cur, ")");
} /* print_exp */
/* Prints rtxes, I customly classified as values. They're constants,
/* Prints rtxes, I customarily classified as values. They're constants,
registers, labels, symbols and memory accesses. */
static void

View File

@ -34,7 +34,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
otherwise two HOST_WIDE_INTs are used for the significant.
Only a half of significant bits is used (in normalized sreals) so that we do
not have problems with overflow, for example when c->sig = a->sig * b->sig.
So the precission for 64-bit and 32-bit machines is 32-bit.
So the precision for 64-bit and 32-bit machines is 32-bit.
Invariant: The numbers are normalized before and after each call of sreal_*.

View File

@ -338,7 +338,7 @@ visit_expression (insn, block)
blocks as executable if they have not already been
marked.
One day we may try do better with swtich tables and
One day we may try do better with switch tables and
other computed jumps. */
for (curredge = block->succ; curredge;
curredge = curredge->succ_next)

View File

@ -499,7 +499,7 @@ find_evaluations (evals, nregs)
/* Computing the Dominance Frontier:
As decribed in Morgan, section 3.5, this may be done simply by
As described in Morgan, section 3.5, this may be done simply by
walking the dominator tree bottom-up, computing the frontier for
the children before the parent. When considering a block B,
there are two cases:

View File

@ -3280,7 +3280,7 @@ rest_of_handle_loop_optimize (tree decl, rtx insns)
ggc_collect ();
}
/* Perform loop optimalizations. It might be better to do them a bit
/* Perform loop optimizations. It might be better to do them a bit
sooner, but we want the profile feedback to work more
efficiently. */
static void
@ -3296,7 +3296,7 @@ rest_of_handle_loop2 (tree decl, rtx insns)
if (loops)
{
/* The optimalizations: */
/* The optimizations: */
if (flag_unswitch_loops)
unswitch_loops (loops);

View File

@ -1017,7 +1017,7 @@ inlinable_function_p (fn, id, nolimit)
&& currfn_insns > max_inline_insns_single)
inlinable = 0;
/* We can't inline functions that call __builtin_longjmp at all.
The non-local goto machenery really requires the destination
The non-local goto machinery really requires the destination
be in a different function. If we allow the function calling
__builtin_longjmp to be inlined into the function calling
__builtin_setjmp, Things will Go Awry. */

View File

@ -40,7 +40,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
insn_values_to_profile function. This function is called from branch_prob
in profile.c and the requested values are instrumented by it in the first
compilation with -fprofile-arcs. The optimization may then read the
gathered data in the second compilation with -fbranch-probablities (the
gathered data in the second compilation with -fbranch-probabilities (the
description of an exact way how to do it will be added here once the
code responsible for reading of the data is merged). */

View File

@ -35,10 +35,10 @@ struct histogram_value
{
rtx value; /* The value to profile. */
enum machine_mode mode; /* And its mode. */
rtx seq; /* Insns requiered to count the profiled value. */
rtx seq; /* Insns required to count the profiled value. */
rtx insn; /* Insn before that to measure. */
enum hist_type type; /* Type of information to measure. */
unsigned n_counters; /* Number of requiered counters. */
unsigned n_counters; /* Number of required counters. */
union
{
struct