fixed-bit.c, [...]: Fix comment typos.

* config/fixed-bit.c, config/i386/cpuid.h, config/i386/i386.c,
	config/i386/i386.md, config/i386/sse.md, function.c, jump.c,
	modulo-sched.c, ra-conflict.c, toplev.c, tree-eh.c, tree-sra.c,
	tree-ssa-dse.c, tree-vect-analyze.c, tree-vect-patterns.c,
	tree-vect-transform.c: Fix comment typos.
	* doc/extend.texi: Fix a typo.

From-SVN: r129291
This commit is contained in:
Kazu Hirata 2007-10-14 01:36:18 +00:00 committed by Kazu Hirata
parent d06a846ba9
commit 84fbffb2c2
18 changed files with 32 additions and 23 deletions

View File

@ -1,3 +1,12 @@
2007-10-14 Kazu Hirata <kazu@codesourcery.com>
* config/fixed-bit.c, config/i386/cpuid.h, config/i386/i386.c,
config/i386/i386.md, config/i386/sse.md, function.c, jump.c,
modulo-sched.c, ra-conflict.c, toplev.c, tree-eh.c, tree-sra.c,
tree-ssa-dse.c, tree-vect-analyze.c, tree-vect-patterns.c,
tree-vect-transform.c: Fix comment typos.
* doc/extend.texi: Fix a typo.
2007-10-13 David Edelsohn <edelsohn@gnu.org>
* config/rs6000/aix53.h: New file.

View File

@ -465,7 +465,7 @@ FIXED_DIVHELPER (FIXED_C_TYPE a, FIXED_C_TYPE b, word_type satp)
r = pos_a >> (FIXED_WIDTH - FBITS);
#endif
/* Unsigned divide r by pos_b to quo_r. The remanider is in mod. */
/* Unsigned divide r by pos_b to quo_r. The remainder is in mod. */
quo_r = (UINT_C_TYPE)r / (UINT_C_TYPE)pos_b;
mod = (UINT_C_TYPE)r % (UINT_C_TYPE)pos_b;
quo_s = 0;

View File

@ -117,7 +117,7 @@ __get_cpuid_max (unsigned int __ext, unsigned int *__sig)
/* Return cpuid data for requested cpuid level, as found in returned
eax, ebx, ecx and edx registers. The function checks if cpuid is
supported and returns 1 for valid cpuid information or 0 for
unsupported cpuid level. All pointers are requred to be non-null. */
unsupported cpuid level. All pointers are required to be non-null. */
static __inline int
__get_cpuid (unsigned int __level,

View File

@ -1429,7 +1429,7 @@ unsigned int ix86_tune_features[X86_TUNE_LAST] = {
replacement is long decoded, so this split helps here as well. */
m_K6,
/* X86_TUNE_USE_VECTOR_CONVERTS: Preffer vector packed SSE conversion
/* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
from integer to FP. */
m_AMDFAM10,
};
@ -13442,8 +13442,8 @@ ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
#define PPERM_REV_INV 0x60 /* bit reverse & invert src */
#define PPERM_ZERO 0x80 /* all 0's */
#define PPERM_ONES 0xa0 /* all 1's */
#define PPERM_SIGN 0xc0 /* propigate sign bit */
#define PPERM_INV_SIGN 0xe0 /* invert & propigate sign */
#define PPERM_SIGN 0xc0 /* propagate sign bit */
#define PPERM_INV_SIGN 0xe0 /* invert & propagate sign */
#define PPERM_SRC1 0x00 /* use first source byte */
#define PPERM_SRC2 0x10 /* use second source byte */
@ -24879,7 +24879,7 @@ ix86_expand_round (rtx operand0, rtx operand1)
/* Validate whether a SSE5 instruction is valid or not.
OPERANDS is the array of operands.
NUM is the number of operands.
USES_OC0 is true if the instruction uses OC0 and provides 4 varients.
USES_OC0 is true if the instruction uses OC0 and provides 4 variants.
NUM_MEMORY is the maximum number of memory operands to accept. */
bool ix86_sse5_valid_op_p (rtx operands[], rtx insn, int num, bool uses_oc0, int num_memory)
{
@ -24960,7 +24960,7 @@ bool ix86_sse5_valid_op_p (rtx operands[], rtx insn, int num, bool uses_oc0, int
else if (num == 4 && num_memory == 2)
{
/* If there are two memory operations, we can load one of the memory ops
into the destination register. This is for optimizating the
into the destination register. This is for optimizing the
multiply/add ops, which the combiner has optimized both the multiply
and the add insns to have a memory operation. We have to be careful
that the destination doesn't overlap with the inputs. */

View File

@ -207,7 +207,7 @@
(UNSPECV_PROLOGUE_USE 14)
])
;; Constants to represent pcomtrue/pcomfalse varients
;; Constants to represent pcomtrue/pcomfalse variants
(define_constants
[(PCOM_FALSE 0)
(PCOM_TRUE 1)
@ -4840,7 +4840,7 @@
}
/* Offload operand of cvtsi2ss and cvtsi2sd into memory for
!TARGET_INTER_UNIT_CONVERSIONS
It is neccesary for the patterns to not accept nonemmory operands
It is necessary for the patterns to not accept nonmemory operands
as we would optimize out later. */
else if (!TARGET_INTER_UNIT_CONVERSIONS
&& TARGET_SSE_MATH && SSE_FLOAT_MODE_P (GET_MODE (operands[0]))

View File

@ -7749,7 +7749,7 @@
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
;; SSE5 parallel integer mutliply/add instructions for the intrinisics
;; SSE5 parallel integer multiply/add instructions for the intrinisics
(define_insn "sse5_pmacsswd"
[(set (match_operand:V4SI 0 "register_operand" "=x,x,x")
(ss_plus:V4SI

View File

@ -8143,7 +8143,7 @@ v2di __builtin_ia32_pshlq (v2di, v2di)
v8hi __builtin_ia32_pshlw (v8hi, v8hi)
@end smallexample
The following builtin-in functions are avaialble when @option{-msse5}
The following builtin-in functions are available when @option{-msse5}
is used. The second argument must be an integer constant and generate
the machine instruction that is part of the name with the @samp{_imm}
suffix removed.

View File

@ -5702,7 +5702,7 @@ match_asm_constraints_1 (rtx insn, rtx *p_sets, int noutputs)
asm ("" : "=r" (output), "=m" (input) : "0" (input))
Here 'input' is used in two occurences as input (once for the
Here 'input' is used in two occurrences as input (once for the
input operand, once for the address in the second output operand).
If we would replace only the occurence of the input operand (to
make the matching) we would be left with this:
@ -5714,7 +5714,7 @@ match_asm_constraints_1 (rtx insn, rtx *p_sets, int noutputs)
value, but different pseudos) where we formerly had only one.
With more complicated asms this might lead to reload failures
which wouldn't have happen without this pass. So, iterate over
all operands and replace all occurences of the register used. */
all operands and replace all occurrences of the register used. */
for (j = 0; j < noutputs; j++)
if (!rtx_equal_p (SET_DEST (p_sets[j]), input)
&& reg_overlap_mentioned_p (input, SET_DEST (p_sets[j])))

View File

@ -975,7 +975,7 @@ mark_jump_label (rtx x, rtx insn, int in_mem)
(insn != NULL && x == PATTERN (insn) && JUMP_P (insn)));
}
/* Worker function for mark_jump_label. IN_MEM is TRUE when X occurrs
/* Worker function for mark_jump_label. IN_MEM is TRUE when X occurs
within a (MEM ...). IS_TARGET is TRUE when X is to be treated as a
jump-target; when the JUMP_LABEL field of INSN should be set or a
REG_LABEL_TARGET note should be added, not a REG_LABEL_OPERAND

View File

@ -1760,7 +1760,7 @@ ps_insert_empty_row (partial_schedule_ptr ps, int split_row,
/* Given U_NODE which is the node that failed to be scheduled; LOW and
UP which are the boundaries of it's scheduling window; compute using
SCHED_NODES and II a row in the partial schedule that can be splitted
SCHED_NODES and II a row in the partial schedule that can be split
which will separate a critical predecessor from a critical successor
thereby expanding the window, and return it. */
static int

View File

@ -1086,7 +1086,7 @@ global_conflicts (void)
}
/* Early clobbers, by definition, need to not only
clobber the registers that are live accross the insn
clobber the registers that are live across the insn
but need to clobber the registers that die within the
insn. The clobbering for registers live across the
insn is handled above. */

View File

@ -2152,7 +2152,7 @@ lang_dependent_init (const char *name)
void
target_reinit (void)
{
/* Reinitialise RTL backend. */
/* Reinitialize RTL backend. */
backend_init_target ();
/* Reinitialize lang-dependent parts. */

View File

@ -2173,7 +2173,7 @@ optimize_double_finally (tree one, tree two)
}
/* Perform EH refactoring optimizations that are simpler to do when code
flow has been lowered but EH structurs haven't. */
flow has been lowered but EH structures haven't. */
static void
refactor_eh_r (tree t)

View File

@ -2876,7 +2876,7 @@ struct bitfield_overlap_info
};
/* Return true if a BIT_FIELD_REF<(FLD->parent), BLEN, BPOS>
expression (refereced as BF below) accesses any of the bits in FLD,
expression (referenced as BF below) accesses any of the bits in FLD,
false if it doesn't. If DATA is non-null, its field_len and
field_pos are filled in such that BIT_FIELD_REF<(FLD->parent),
field_len, field_pos> (referenced as BFLD below) represents the

View File

@ -653,7 +653,7 @@ execute_simple_dse (void)
bitmap_ior_into (variables_loaded,
LOADED_SYMS (bsi_stmt (bsi)));
/* Look for statements writting into the write only variables.
/* Look for statements writing into the write only variables.
And try to remove them. */
FOR_EACH_BB (bb)

View File

@ -2279,7 +2279,7 @@ vect_analyze_group_access (struct data_reference *dr)
/* Analyze the access pattern of the data-reference DR.
In case of non-consecutive accesse call vect_analyze_group_access() to
In case of non-consecutive accesses call vect_analyze_group_access() to
analyze groups of strided accesses. */
static bool

View File

@ -545,7 +545,7 @@ vect_recog_pow_pattern (tree last_stmt, tree *type_in, tree *type_out)
stmts that constitute the pattern. In this case it will be:
WIDEN_SUM <x_t, sum_0>
Note: The widneing-sum idiom is a widening reduction pattern that is
Note: The widening-sum idiom is a widening reduction pattern that is
vectorized without preserving all the intermediate results. It
produces only N/2 (widened) results (by summing up pairs of
intermediate results) rather than all N results. Therefore, we

View File

@ -1381,7 +1381,7 @@ vect_get_constant_vectors (slp_tree slp_node, VEC(tree,heap) **vec_oprnds,
}
/* Get vectorized defintions from SLP_NODE that contains corresponding
/* Get vectorized definitions from SLP_NODE that contains corresponding
vectorized def-stmts. */
static void