ifcvt.c: Fix comment typos.

* ifcvt.c: Fix comment typos.
	* lcm.c: Likewise.
	* libgcc2.c: Likewise.
	* local-alloc.c: Likewise.
	* loop.c: Likewise.
	* predict.c: Likewise.
	* ra-build.c: Likewise.
	* ra.c: Likewise.
	* ra-colorize.c: Likewise.
	* ra.h: Likewise.
	* ra-rewrite.c: Likewise.
	* regmove.c: Likewise.
	* reload.h: Likewise.
	* rtlanal.c: Likewise.
	* toplev.c: Likewise.
	* tree.h: Likewise.
	* unwind-dw2-fde-glibc.c: Likewise.
	* vmsdbgout.c: Likewise.

From-SVN: r61421
This commit is contained in:
Kazu Hirata 2003-01-17 03:28:11 +00:00 committed by Kazu Hirata
parent fd2190ca49
commit 3d042e770b
19 changed files with 57 additions and 36 deletions

View File

@ -1,3 +1,24 @@
2003-01-16 Kazu Hirata <kazu@cs.umass.edu>
* ifcvt.c: Fix comment typos.
* lcm.c: Likewise.
* libgcc2.c: Likewise.
* local-alloc.c: Likewise.
* loop.c: Likewise.
* predict.c: Likewise.
* ra-build.c: Likewise.
* ra.c: Likewise.
* ra-colorize.c: Likewise.
* ra.h: Likewise.
* ra-rewrite.c: Likewise.
* regmove.c: Likewise.
* reload.h: Likewise.
* rtlanal.c: Likewise.
* toplev.c: Likewise.
* tree.h: Likewise.
* unwind-dw2-fde-glibc.c: Likewise.
* vmsdbgout.c: Likewise.
2003-01-16 Richard Henderson <rth@redhat.com>
* dwarf2out.c (struct file_table): Remove.

View File

@ -2282,7 +2282,7 @@ find_if_block (ce_info)
int max_insns = MAX_CONDITIONAL_EXECUTE;
int n_insns;
/* Determine if the preceeding block is an && or || block. */
/* Determine if the preceding block is an && or || block. */
if ((n_insns = block_jumps_and_fallthru_p (bb, else_bb)) >= 0)
{
ce_info->and_and_p = TRUE;
@ -2877,7 +2877,7 @@ dead_or_predicable (test_bb, merge_bb, other_bb, new_dest, reversep)
if (HAVE_conditional_execution)
{
/* In the conditional execution case, we have things easy. We know
the condition is reversable. We don't have to check life info,
the condition is reversible. We don't have to check life info,
becase we're going to conditionally execute the code anyway.
All that's left is making sure the insns involved can actually
be predicated. */

View File

@ -307,7 +307,7 @@ compute_laterin (edge_list, earliest, antloc, later, laterin)
qin = worklist;
/* Note that we do not use the last allocated element for our queue,
as EXIT_BLOCK is never inserted into it. In fact the above allocation
of n_basic_blocks + 1 elements is not encessary. */
of n_basic_blocks + 1 elements is not necessary. */
qend = &worklist[n_basic_blocks];
qlen = n_basic_blocks;
@ -849,9 +849,9 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
The LCM algorithm is then run over the flow graph to determine where to
place the sets to the highest-priority value in respect of first the first
insn in any one block. Any adjustments required to the transparancy
insn in any one block. Any adjustments required to the transparency
vectors are made, then the next iteration starts for the next-lower
priority mode, till for each entity all modes are exhasted.
priority mode, till for each entity all modes are exhausted.
More details are located in the code for optimize_mode_switching(). */

View File

@ -234,7 +234,7 @@ __mulvdi3 (DWtype u, DWtype v)
#endif
/* Unless shift functions are defined whith full ANSI prototypes,
/* Unless shift functions are defined with full ANSI prototypes,
parameter b will be promoted to int if word_type is smaller than an int. */
#ifdef L_lshrdi3
DWtype
@ -1347,7 +1347,7 @@ gcov_exit (void)
#if defined (TARGET_HAS_F_SETLKW)
/* After a fork, another process might try to read and/or write
the same file simultanously. So if we can, lock the file to
the same file simultaneously. So if we can, lock the file to
avoid race conditions. */
while (fcntl (fileno (da_file), F_SETLKW, &s_flock)
&& errno == EINTR)

View File

@ -1179,7 +1179,7 @@ update_equiv_regs ()
}
/* Mark REG as having no known equivalence.
Some instructions might have been proceessed before and furnished
Some instructions might have been processed before and furnished
with REG_EQUIV notes for this register; these notes will have to be
removed.
STORE is the piece of RTL that does the non-constant / conflicting
@ -1327,7 +1327,7 @@ block_alloc (b)
must match operand zero. In that case, skip any
operand that doesn't list operand 0 since we know that
the operand always conflicts with operand 0. We
ignore commutatity in this case to keep things simple. */
ignore commutativity in this case to keep things simple. */
if (n_matching_alts == recog_data.n_alternatives
&& 0 == requires_inout (recog_data.constraints[i]))
continue;

View File

@ -82,7 +82,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
/* Parameterize some prefetch heuristics so they can be turned on and off
easily for performance testing on new architecures. These can be
easily for performance testing on new architectures. These can be
defined in target-dependent files. */
/* Prefetch is worthwhile only when loads/stores are dense. */
@ -793,7 +793,7 @@ scan_loop (loop, flags)
}
}
/* For parallels, add any possible uses to the depencies, as
/* For parallels, add any possible uses to the dependencies, as
we can't move the insn without resolving them first. */
if (GET_CODE (PATTERN (p)) == PARALLEL)
{
@ -3620,7 +3620,7 @@ check_store (x, pat, data)
/* Like rtx_equal_p, but attempts to swap commutative operands. This is
important to get some addresses combined. Later more sophisticated
transformations can be added when necesary.
transformations can be added when necessary.
??? Same trick with swapping operand is done at several other places.
It can be nice to develop some common way to handle this. */
@ -5537,7 +5537,7 @@ valid_initial_value_p (x, insn, call_seen, loop_start)
as a possible giv. INSN is the insn whose pattern X comes from.
NOT_EVERY_ITERATION is 1 if the insn might not be executed during
every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
more thanonce in each loop iteration. */
more than once in each loop iteration. */
static void
find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
@ -5742,7 +5742,7 @@ record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
rtx set = single_set (insn);
rtx temp;
/* Attempt to prove constantness of the values. Don't let simplity_rtx
/* Attempt to prove constantness of the values. Don't let simplify_rtx
undo the MULT canonicalization that we performed earlier. */
temp = simplify_rtx (add_val);
if (temp
@ -6686,7 +6686,7 @@ simplify_giv_expr (loop, x, ext_val, benefit)
arg1)),
ext_val, benefit);
}
/* Porpagate the MULT expressions to the intermost nodes. */
/* Propagate the MULT expressions to the intermost nodes. */
else if (GET_CODE (arg0) == PLUS)
{
/* (invar_0 + invar_1) * invar_2. Distribute. */
@ -7372,7 +7372,7 @@ check_ext_dependent_givs (bl, loop_info)
constants in order to be certain of no overflow. */
/* ??? An unknown iteration count with an increment of +-1
combined with friendly exit tests of against an invariant
value is also ameanable to optimization. Not implemented. */
value is also amenable to optimization. Not implemented. */
if (loop_info->n_iterations > 0
&& bl->initial_value
&& GET_CODE (bl->initial_value) == CONST_INT
@ -7394,7 +7394,7 @@ check_ext_dependent_givs (bl, loop_info)
neg_incr = 1, abs_incr = -abs_incr;
total_incr = abs_incr * loop_info->n_iterations;
/* Check for host arithmatic overflow. */
/* Check for host arithmetic overflow. */
if (total_incr / loop_info->n_iterations == abs_incr)
{
unsigned HOST_WIDE_INT u_max;
@ -7407,7 +7407,7 @@ check_ext_dependent_givs (bl, loop_info)
/* Check zero extension of biv ok. */
if (start_val >= 0
/* Check for host arithmatic overflow. */
/* Check for host arithmetic overflow. */
&& (neg_incr
? u_end_val < u_start_val
: u_end_val > u_start_val)
@ -7425,7 +7425,7 @@ check_ext_dependent_givs (bl, loop_info)
keep this fact in mind -- myself included on occasion.
So leave alone with the signed overflow optimizations. */
if (start_val >= -s_max - 1
/* Check for host arithmatic overflow. */
/* Check for host arithmetic overflow. */
&& (neg_incr
? s_end_val < start_val
: s_end_val > start_val)
@ -10541,7 +10541,7 @@ loop_insn_sink (loop, pattern)
}
/* bl->final_value can be eighter general_operand or PLUS of general_operand
and constant. Emit sequence of intructions to load it into REG */
and constant. Emit sequence of instructions to load it into REG. */
static rtx
gen_load_of_final_value (reg, final_value)
rtx reg, final_value;

View File

@ -570,7 +570,7 @@ estimate_probability (loops_info)
if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
;
/* Comparisons with 0 are often used for booleans and there is
nothing usefull to predict about them. */
nothing useful to predict about them. */
else if (XEXP (cond, 1) == const0_rtx
|| XEXP (cond, 0) == const0_rtx)
;
@ -586,7 +586,7 @@ estimate_probability (loops_info)
if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
;
/* Comparisons with 0 are often used for booleans and there is
nothing usefull to predict about them. */
nothing useful to predict about them. */
else if (XEXP (cond, 1) == const0_rtx
|| XEXP (cond, 0) == const0_rtx)
;

View File

@ -2653,7 +2653,7 @@ detect_remat_webs ()
oldwebs can't have their references changed. The
incremental machinery barfs on that. */
|| (!rtx_unstable_p (src) && !contains_pseudo (src))
/* Additionally also memrefs to stack-slots are usefull, when
/* Additionally also memrefs to stack-slots are useful, when
we created them ourself. They might not have set their
unchanging flag set, but nevertheless they are stable across
the livetime in question. */

View File

@ -1511,7 +1511,7 @@ colorize_one_web (web, hard)
struct web *aw = alias (w);
/* If we are a spill-temp, we also look at webs coalesced
to precolored ones. Otherwise we only look at webs which
themself were colored, or coalesced to one. */
themselves were colored, or coalesced to one. */
if (aw->type == PRECOLORED && w != aw && web->spill_temp
&& flag_ra_optimistic_coalescing)
{

View File

@ -1489,7 +1489,7 @@ detect_web_parts_to_rebuild ()
sbitmap_zero (already_webs);
/* We need to recheck all uses of all webs involved in spilling (and the
uses added by spill insns, but those are not analyzed yet).
Those are the spilled webs themself, webs coalesced to spilled ones,
Those are the spilled webs themselves, webs coalesced to spilled ones,
and webs conflicting with any of them. */
for (pass = 0; pass < 2; pass++)
for (d = (pass == 0) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)

View File

@ -681,7 +681,7 @@ reg_alloc ()
/* Setup debugging levels. */
switch (0)
{
/* Some usefull presets of the debug level, I often use. */
/* Some useful presets of the debug level, I often use. */
case 0: debug_new_regalloc = DUMP_EVER; break;
case 1: debug_new_regalloc = DUMP_COSTS; break;
case 2: debug_new_regalloc = DUMP_IGRAPH_M; break;
@ -807,7 +807,7 @@ reg_alloc ()
/* Those new pseudos need to have their REFS count set. */
reg_scan_update (get_insns (), NULL, max_regno);
max_regno = max_reg_num ();
/* And they need usefull classes too. */
/* And they need useful classes too. */
regclass (get_insns (), max_reg_num (), rtl_dump_file);
rtl_dump_file = ra_dump_file;

View File

@ -258,7 +258,7 @@ struct web
/* Number of usable colors in usable_regs. */
int num_freedom;
/* After successfull coloring the graph each web gets a new reg rtx,
/* After successful coloring the graph each web gets a new reg rtx,
with which the original uses and defs are replaced. This is it. */
rtx reg_rtx;

View File

@ -647,7 +647,7 @@ optimize_reg_copy_2 (insn, dest, src)
}
/* INSN is a ZERO_EXTEND or SIGN_EXTEND of SRC to DEST.
Look if SRC dies there, and if it is only set once, by loading
it from memory. If so, try to encorporate the zero/sign extension
it from memory. If so, try to incorporate the zero/sign extension
into the memory read, change SRC to the mode of DEST, and alter
the remaining accesses to use the appropriate SUBREG. This allows
SRC and DEST to be tied later. */

View File

@ -269,8 +269,8 @@ extern void transfer_replacements PARAMS ((int, int));
/* IN_RTX is the value loaded by a reload that we now decided to inherit,
or a subpart of it. If we have any replacements registered for IN_RTX,
chancel the reloads that were supposed to load them.
Return nonzero if we chanceled any reloads. */
cancel the reloads that were supposed to load them.
Return nonzero if we canceled any reloads. */
extern int remove_address_replacements PARAMS ((rtx in_rtx));
/* Like rtx_equal_p except that it allows a REG and a SUBREG to match

View File

@ -3420,7 +3420,7 @@ hoist_test_store (x, val, live)
/* Pseudo registers can be allways replaced by another pseudo to avoid
the side effect, for hard register we must ensure that they are dead.
Eventually we may want to add code to try turn pseudos to hards, but it
is unlikely usefull. */
is unlikely useful. */
if (REGNO (x) < FIRST_PSEUDO_REGISTER)
{

View File

@ -3150,7 +3150,7 @@ rest_of_compilation (decl)
= combine_instructions (insns, max_reg_num ());
/* Combining insns may have turned an indirect jump into a
direct jump. Rebuid the JUMP_LABEL fields of jumping
direct jump. Rebuild the JUMP_LABEL fields of jumping
instructions. */
if (rebuild_jump_labels_after_combine)
{

View File

@ -682,7 +682,7 @@ extern void tree_vec_elt_check_failed PARAMS ((int, int, const char *,
bounded pointer. It is insufficient to determine the boundedness
of an expression EXP with BOUNDED_POINTER_TYPE_P (TREE_TYPE (EXP)),
since we allow pointer to be temporarily cast to integer for
rounding up to an alignment boudary in a way that preserves the
rounding up to an alignment boundary in a way that preserves the
pointer's bounds.
In an IDENTIFIER_NODE, nonzero means that the name is prefixed with

View File

@ -155,7 +155,7 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr)
data->dbase = NULL;
if (p_dynamic)
{
/* For dynamicly linked executables and shared libraries,
/* For dynamically linked executables and shared libraries,
DT_PLTGOT is the gp value for that object. */
ElfW(Dyn) *dyn = (ElfW(Dyn) *) (p_dynamic->p_vaddr + load_base);
for (; dyn->d_tag != DT_NULL ; dyn++)

View File

@ -352,7 +352,7 @@ static char text_end_label[MAX_ARTIFICIAL_LABEL_BYTES];
#endif
/* This is similar to the default ASM_OUTPUT_ASCII, except that no trailing
newline is produced. When flag_verbose_asm is asserted, we add commnetary
newline is produced. When flag_verbose_asm is asserted, we add commentary
at the end of the line, so we must avoid output of a newline here. */
#ifndef ASM_OUTPUT_DEBUG_STRING
#define ASM_OUTPUT_DEBUG_STRING(FILE,P) \