basic-block.h, [...]: Fix comment typos.

* basic-block.h, config/i386/winnt.c, config/pa/pa.c,
	config/s390/s390.c, dfp.c, expr.c, fold-const.c, params.def,
	reload.c, struct-equiv.c, tree-ssa-ccp.c, tree-ssa-pre.c,
	tree-ssa-reassoc.c, tree-ssa-structalias.c: Fix comment typos.
	* doc/invoke.texi, doc/tm.texi: Fix typos.

From-SVN: r108626
This commit is contained in:
Kazu Hirata 2005-12-16 06:09:31 +00:00 committed by Kazu Hirata
parent bcf51da29c
commit 6416ae7f28
17 changed files with 30 additions and 22 deletions

View File

@ -1,3 +1,11 @@
2005-12-16 Kazu Hirata <kazu@codesourcery.com>
* basic-block.h, config/i386/winnt.c, config/pa/pa.c,
config/s390/s390.c, dfp.c, expr.c, fold-const.c, params.def,
reload.c, struct-equiv.c, tree-ssa-ccp.c, tree-ssa-pre.c,
tree-ssa-reassoc.c, tree-ssa-structalias.c: Fix comment typos.
* doc/invoke.texi, doc/tm.texi: Fix typos.
2005-12-16 Ben Elliston <bje@au.ibm.com>
* real.c (decimal_quad_format): Correct values for emin, emax.

View File

@ -1115,7 +1115,7 @@ struct equiv_info
NEED_RERUN is set. This has to be tested by the caller to re-run
the comparison if the match appears otherwise sound. The state kept in
x_start, y_start, equiv_used and check_input_conflict ensures that
we won't loop indefinetly. */
we won't loop indefinitely. */
bool need_rerun;
/* If there is indication of an input conflict at the end,
CHECK_INPUT_CONFLICT is set so that we'll check for input conflicts
@ -1156,7 +1156,7 @@ struct equiv_info
that are being compared. A final jump insn will not be included. */
rtx x_end, y_end;
/* If we are matching tablejumps, X_LABEL in X_BLOCK coresponds to
/* If we are matching tablejumps, X_LABEL in X_BLOCK corresponds to
Y_LABEL in Y_BLOCK. */
rtx x_label, y_label;

View File

@ -155,7 +155,7 @@ i386_pe_dllimport_p (tree decl)
/* The DECL_DLLIMPORT_P flag was set for decls in the class definition
by targetm.cxx.adjust_class_at_definition. Check again to emit
warnings if the class attribute has been overriden by an
warnings if the class attribute has been overridden by an
out-of-class definition. */
if (associated_type (decl)
&& lookup_attribute ("dllimport",

View File

@ -5304,7 +5304,7 @@ output_deferred_plabels (void)
/* If we have some deferred plabels, then we need to switch into the
data or readonly data section, and align it to a 4 byte boundary
before outputing the deferred plabels. */
before outputting the deferred plabels. */
if (n_deferred_plabels)
{
switch_to_section (flag_pic ? data_section : readonly_data_section);

View File

@ -3998,7 +3998,7 @@ struct alignment_context
rtx shift; /* Bit offset with regard to lsb. */
rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
rtx modemaski; /* ~modemask */
bool aligned; /* True if memory is aliged, false else. */
bool aligned; /* True if memory is aligned, false else. */
};
/* A subroutine of s390_expand_cs_hqi and s390_expand_atomic to initialize

View File

@ -333,7 +333,7 @@ decimal_from_binary (REAL_VALUE_TYPE *to, const REAL_VALUE_TYPE *from)
}
/* Helper function to real.c:do_compare() to handle decimal internal
represenation including when one of the operands is still in the
representation including when one of the operands is still in the
binary internal representation. */
int

View File

@ -5853,7 +5853,7 @@ A, the growth of unit is 300\% and yet such inlining is very sane. For very
large units consisting of small inlininable functions however the overall unit
growth limit is needed to avoid exponential explosion of code size. Thus for
smaller units, the size is increased to @option{--param large-unit-insns}
before aplying @option{--param inline-unit-growth}. The default is 10000
before applying @option{--param inline-unit-growth}. The default is 10000
@item inline-unit-growth
Specifies maximal overall growth of the compilation unit caused by inlining.

View File

@ -3032,7 +3032,7 @@ The final value should conincide with that calculated by
Normally the CFA is calculated as an offset from the argument pointer,
via @code{ARG_POINTER_CFA_OFFSET}, but if the argument pointer is
variable due to the ABI, this may not be possible. If this macro is
defined, it imples that the virtual register instantiation should be
defined, it implies that the virtual register instantiation should be
based on the frame pointer instead of the argument pointer. Only one
of @code{FRAME_POINTER_CFA_OFFSET} and @code{ARG_POINTER_CFA_OFFSET}
should be defined.

View File

@ -8090,7 +8090,7 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
else
comparison_code = unsignedp ? LEU : LE;
/* Canonicalize to comparsions against 0. */
/* Canonicalize to comparisons against 0. */
if (op1 == const1_rtx)
{
/* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)

View File

@ -6809,7 +6809,7 @@ fold_unary (enum tree_code code, tree type, tree op0)
if (TREE_TYPE (op0) == type)
return op0;
/* If we have (type) (a CMP b) and type is an integal type, return
/* If we have (type) (a CMP b) and type is an integral type, return
new expression involving the new type. */
if (COMPARISON_CLASS_P (op0) && INTEGRAL_TYPE_P (type))
return fold_build2 (TREE_CODE (op0), type, TREE_OPERAND (op0, 0),
@ -8408,7 +8408,7 @@ fold_binary (enum tree_code code, tree type, tree op0, tree op1)
}
/* Optimize tan(x)/sin(x) as 1.0/cos(x) if we don't care about
NaNs or Infintes. */
NaNs or Infinities. */
if (((fcode0 == BUILT_IN_TAN && fcode1 == BUILT_IN_SIN)
|| (fcode0 == BUILT_IN_TANF && fcode1 == BUILT_IN_SINF)
|| (fcode0 == BUILT_IN_TANL && fcode1 == BUILT_IN_SINL)))

View File

@ -321,7 +321,7 @@ DEFPARAM(HOT_BB_FREQUENCY_FRACTION,
the other loops cold that is not usually the case. So we need to artificially
flatten the profile.
We need to cut the maximal predicted iterations to large enought iterations
We need to cut the maximal predicted iterations to large enough iterations
so the loop appears important, but safely within HOT_BB_COUNT_FRACTION
range. */

View File

@ -366,7 +366,7 @@ push_secondary_reload (int in_p, rtx x, int opnum, int optional,
gcc_assert (insn_data[(int) icode].n_operands == 3);
/* ??? We currently have no way to represent a reload that needs
an icode to reload from an intermediate tertiaty reload register.
an icode to reload from an intermediate tertiary reload register.
We should probably have a new field in struct reload to tag a
chain of scratch operand reloads onto. */
gcc_assert (class == NO_REGS);

View File

@ -54,7 +54,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
the number of inputs an miss an input conflict. Sufficient information
is gathered so that when we make another pass, we won't have to backtrack
at the same point.
Another issue is that information in memory atttributes and/or REG_NOTES
Another issue is that information in memory attributes and/or REG_NOTES
might have to be merged or discarded to make a valid match. We don't want
to discard such information when we are not certain that we want to merge
the two (partial) blocks.
@ -99,7 +99,7 @@ static bool resolve_input_conflict (struct equiv_info *info);
SECONDARY_MEMORY_NEEDED, cannot be done directly. For our purposes, we
consider them impossible to generate after reload (even though some
might be synthesized when you throw enough code at them).
Since we don't know while procesing a cross-jump if a local register
Since we don't know while processing a cross-jump if a local register
that is currently live will eventually be live and thus be an input,
we keep track of potential inputs that would require an impossible move
by using a prohibitively high cost for them.
@ -201,7 +201,7 @@ merge_memattrs (rtx x, rtx y)
}
/* In SET, assign the bit for the register number of REG the value VALUE.
If REG is a hard register, do so for all its consituent registers.
If REG is a hard register, do so for all its constituent registers.
Return the number of registers that have become included (as a positive
number) or excluded (as a negative number). */
static int
@ -1128,7 +1128,7 @@ struct_equiv_block_eq (int mode, struct equiv_info *info)
if (mode & STRUCT_EQUIV_MATCH_JUMPS)
{
/* The caller is expected to have comapred the jumps already, but we
/* The caller is expected to have compared the jumps already, but we
need to match them again to get any local registers and inputs. */
gcc_assert (!info->cur.x_start == !info->cur.y_start);
if (info->cur.x_start)

View File

@ -276,7 +276,7 @@ debug_lattice_value (prop_value_t val)
/* The regular is_gimple_min_invariant does a shallow test of the object.
It assumes that full gimplification has happened, or will happen on the
object. For a value coming from DECL_INITIAL, this is not true, so we
have to be more strict outselves. */
have to be more strict ourselves. */
static bool
ccp_decl_initial_min_invariant (tree t)

View File

@ -555,7 +555,7 @@ bitmap_set_copy (bitmap_set_t dest, bitmap_set_t orig)
bitmap_copy (dest->values, orig->values);
}
/* Perform bitmapped set rperation DEST &= ORIG. */
/* Perform bitmapped set operation DEST &= ORIG. */
static void
bitmap_set_and (bitmap_set_t dest, bitmap_set_t orig)

View File

@ -69,7 +69,7 @@ Boston, MA 02110-1301, USA. */
In order to promote the most redundancy elimination, you want
binary expressions whose operands are the same rank (or
preferrably, the same value) exposed to the redundancy eliminator,
preferably, the same value) exposed to the redundancy eliminator,
for possible elimination.
So the way to do this if we really cared, is to build the new op

View File

@ -3294,7 +3294,7 @@ find_func_aliases (tree origt)
gcc_assert (found);
}
/* Assign all the passed arguments to the approriate incoming
/* Assign all the passed arguments to the appropriate incoming
parameters of the function. */
fi = get_varinfo (varid);
arglist = TREE_OPERAND (rhsop, 1);
@ -3683,7 +3683,7 @@ create_function_info_for (tree decl, const char *name)
arg = DECL_ARGUMENTS (decl);
/* Set up varirables for each argument. */
/* Set up variables for each argument. */
for (i = 1; i < vi->fullsize; i++)
{
varinfo_t argvi;