builtins.c, [...]: Fix comment typos.

* builtins.c, c-pragma.h, c-typeck.c, cgraph.c, cgraphunit.c,
	combine.c, common.opt, config/dfp-bit.c, config/i386/i386.c,
	config/m68k/m68k.c, config/m68k/m68k.md, config/mt/mt.c,
	config/mt/mt.h, config/s390/s390.md, df-core.c, df-problems.c,
	df-scan.c, df.h, diagnostic.c, expr.c, function.h, gimplify.c,
	loop-invariant.c, omp-low.c, opts.c, passes.c,
	rtl-factoring.c, rtlanal.c, struct-equiv.c, tree-cfgcleanup.c,
	tree-ssa-loop-niter.c, tree-ssa-loop-prefetch.c,
	tree-ssa-structalias.c, tree-ssa-threadedge.c,
	tree-ssa-threadupdate.c, tree-vect-patterns.c,
	tree-vect-transform.c, tree-vectorizer.h, tree-vrp.c,
	unwind-dw2.c: Fix comment typos.  Follow spelling conventions.

From-SVN: r111721
This commit is contained in:
Kazu Hirata 2006-03-04 23:05:24 +00:00 committed by Kazu Hirata
parent 8156c8b0b9
commit c0220ea4c5
41 changed files with 84 additions and 69 deletions

View File

@ -1,3 +1,18 @@
2006-03-04 Kazu Hirata <kazu@codesourcery.com>
* builtins.c, c-pragma.h, c-typeck.c, cgraph.c, cgraphunit.c,
combine.c, common.opt, config/dfp-bit.c, config/i386/i386.c,
config/m68k/m68k.c, config/m68k/m68k.md, config/mt/mt.c,
config/mt/mt.h, config/s390/s390.md, df-core.c, df-problems.c,
df-scan.c, df.h, diagnostic.c, expr.c, function.h, gimplify.c,
loop-invariant.c, omp-low.c, opts.c, passes.c,
rtl-factoring.c, rtlanal.c, struct-equiv.c, tree-cfgcleanup.c,
tree-ssa-loop-niter.c, tree-ssa-loop-prefetch.c,
tree-ssa-structalias.c, tree-ssa-threadedge.c,
tree-ssa-threadupdate.c, tree-vect-patterns.c,
tree-vect-transform.c, tree-vectorizer.h, tree-vrp.c,
unwind-dw2.c: Fix comment typos. Follow spelling conventions.
2006-03-04 Kazu Hirata <kazu@codesourcery.com>
* dwarf2out.c (dwarf2out_add_library_unit_info): Remove.

View File

@ -498,7 +498,7 @@ expand_builtin_return_addr (enum built_in_function fndecl_code, int count)
/* For a zero count, we don't care what frame address we return, so frame
pointer elimination is OK, and using the soft frame pointer is OK.
For a non-zero count, we require a stable offset from the current frame
For a nonzero count, we require a stable offset from the current frame
pointer to the previous one, so we must use the hard frame pointer, and
we must disable frame pointer elimination. */
if (count == 0)

View File

@ -25,7 +25,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#include <cpplib.h> /* For enum cpp_ttype. */
/* Pragma identifiers built in to the front end parsers. Identifiers
for anciliary handlers will follow these. */
for ancillary handlers will follow these. */
typedef enum pragma_kind {
PRAGMA_NONE = 0,

View File

@ -8456,7 +8456,7 @@ c_expr_to_decl (tree expr, bool *tc ATTRIBUTE_UNUSED,
}
/* Like c_begin_compound_stmt, except force the retension of the BLOCK. */
/* Like c_begin_compound_stmt, except force the retention of the BLOCK. */
tree
c_begin_omp_parallel (void)

View File

@ -114,7 +114,7 @@ struct cgraph_node *cgraph_nodes;
struct cgraph_node *cgraph_nodes_queue;
/* Queue of cgraph nodes scheduled to be expanded. This is a
secondary queue used during optimization to accomodate passes that
secondary queue used during optimization to accommodate passes that
may generate new functions that need to be optimized and expanded. */
struct cgraph_node *cgraph_expand_queue;

View File

@ -1692,7 +1692,7 @@ save_inline_function_body (struct cgraph_node *node)
cgraph_lower_function (node);
/* In non-unit-at-a-time we construct full fledged clone we never output to
assembly file. This clone is pointed out by inline_decl of orginal function
assembly file. This clone is pointed out by inline_decl of original function
and inlining infrastructure knows how to deal with this. */
if (!flag_unit_at_a_time)
{

View File

@ -11123,7 +11123,7 @@ record_truncated_value (rtx x)
x = SUBREG_REG (x);
}
/* ??? For hard-regs we now record everthing. We might be able to
/* ??? For hard-regs we now record everything. We might be able to
optimize this using last_set_mode. */
else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
truncated_mode = GET_MODE (x);

View File

@ -557,7 +557,7 @@ Give external symbols a leading underscore
floop-optimize
Common
Does nothing. Preserved for backward compatability.
Does nothing. Preserved for backward compatibility.
fmath-errno
Common Report Var(flag_errno_math) Init(1)
@ -723,7 +723,7 @@ Add a common subexpression elimination pass after loop optimizations
frerun-loop-opt
Common
Does nothing. Preserved for backward compatability.
Does nothing. Preserved for backward compatibility.
frounding-math
Common Report Var(flag_rounding_math)
@ -842,7 +842,7 @@ Use a stack protection method for every function
fstrength-reduce
Common
Does nothing. Preserved for backward compatability.
Does nothing. Preserved for backward compatibility.
; Nonzero if we should do (language-dependent) alias analysis.
; Typically, this analysis will assume that expressions of certain

View File

@ -415,7 +415,7 @@ DFP_TO_INT (DFP_C_TYPE x)
decNumberFromString (&qval, (char *) "1.0", &context);
/* Force the exponent to zero. */
decNumberQuantize (&n1, &n2, &qval, &context);
/* This is based on text in N1107 secton 5.1; it might turn out to be
/* This is based on text in N1107 section 5.1; it might turn out to be
undefined behavior instead. */
if (context.status & DEC_Invalid_operation)
{
@ -510,7 +510,7 @@ BFP_TO_DFP (BFP_TYPE x)
/* Use a C library function to write the floating point value to a string. */
#ifdef BFP_VIA_TYPE
/* FIXME: Is threre a better way to output an XFmode variable in C? */
/* FIXME: Is there a better way to output an XFmode variable in C? */
sprintf (buf, BFP_FMT, (BFP_VIA_TYPE) x);
#else
sprintf (buf, BFP_FMT, x);

View File

@ -742,7 +742,7 @@ const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_G
/* We probably ought to watch for partial register stalls on Generic32
compilation setting as well. However in current implementation the
partial register stalls are not eliminated very well - they can
be introduced via subregs synthetized by combine and can happen
be introduced via subregs synthesized by combine and can happen
in caller/callee saving sequences.
Because this option pays back little on PPro based chips and is in conflict
with partial reg. dependencies used by Athlon/P4 based chips, it is better
@ -777,7 +777,7 @@ const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_GENERIC;
const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_GENERIC;
const int x86_shift1 = ~m_486;
const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_GENERIC;
/* In Generic model we have an confict here in between PPro/Pentium4 based chips
/* In Generic model we have an conflict here in between PPro/Pentium4 based chips
that thread 128bit SSE registers as single units versus K8 based chips that
divide SSE registers to two 64bit halves.
x86_sse_partial_reg_dependency promote all store destinations to be 128bit

View File

@ -2342,7 +2342,7 @@ fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
normally.
Note SCRATCH_REG may not be in the proper mode depending on how it
will be used. This routine is resposible for creating a new copy
will be used. This routine is responsible for creating a new copy
of SCRATCH_REG in the proper mode. */
int
@ -3647,7 +3647,7 @@ m68k_regno_mode_ok (int regno, enum machine_mode mode)
/* Return floating point values in a 68881 register. This makes 68881 code
a little bit faster. It also makes -msoft-float code incompatible with
hard-float code, so people have to be careful not to mix the two.
For ColdFire it was decided the ABI incopmatibility is undesirable.
For ColdFire it was decided the ABI incompatibility is undesirable.
If there is need for a hard-float ABI it is probably worth doing it
properly and also passing function arguments in FP registers. */
rtx

View File

@ -138,7 +138,7 @@
(define_mode_attr round [(SF "%$") (DF "%&") (XF "")])
;; Mnemonic infix to round result for mul or div instruction
(define_mode_attr round_mul [(SF "sgl") (DF "%&") (XF "")])
;; Suffix specifiying source operand format
;; Suffix specifying source operand format
(define_mode_attr prec [(SF "s") (DF "d") (XF "x")])
;; Allowable D registers
(define_mode_attr dreg [(SF "d") (DF "") (XF "")])

View File

@ -683,7 +683,7 @@ mt_legitimate_simple_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
}
/* Helper function of GO_IF_LEGITIMATE_ADDRESS. Return non-zero if
/* Helper function of GO_IF_LEGITIMATE_ADDRESS. Return nonzero if
XINSN is a legitimate address on MT. */
int
mt_legitimate_address_p (enum machine_mode mode, rtx xinsn, int strict)
@ -1648,9 +1648,9 @@ void mt_add_loop (void)
}
/* Maxium loop nesting depth. */
/* Maximum loop nesting depth. */
#define MAX_LOOP_DEPTH 4
/* Maxium size of a loop (allows some headroom for delayed branch slot
/* Maximum size of a loop (allows some headroom for delayed branch slot
filling. */
#define MAX_LOOP_LENGTH (200 * 4)
@ -1850,7 +1850,7 @@ mt_scan_loop (loop_info loop, rtx reg, rtx dbnz)
loop iterations. It can be nested with an automatically maintained
stack of counter and end address registers. It's an ideal
candidate for doloop. Unfortunately, gcc presumes that loops
always end with an explicit instriction, and the doloop_begin
always end with an explicit instruction, and the doloop_begin
instruction is not a flow control instruction so it can be
scheduled earlier than just before the start of the loop. To make
matters worse, the optimization pipeline can duplicate loop exit

View File

@ -512,7 +512,7 @@ extern struct mt_frame_info current_frame_info;
{FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM} \
}
/* A C expression that returns non-zero if the compiler is allowed to try to
/* A C expression that returns nonzero if the compiler is allowed to try to
replace register number FROM with register number TO. */
#define CAN_ELIMINATE(FROM, TO) \
((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM \

View File

@ -454,7 +454,7 @@
; Load-and-Test instructions
;
; tst(di|si) intruction pattern(s).
; tst(di|si) instruction pattern(s).
(define_insn "*tstdi_sign"
[(set (reg CC_REGNUM)
@ -542,7 +542,7 @@
"lt<g>r\t%0,%0"
[(set_attr "op_type" "RR<E>")])
; tst(hi|qi) intruction pattern(s).
; tst(hi|qi) instruction pattern(s).
(define_insn "*tst<mode>CCT"
[(set (reg CC_REGNUM)

View File

@ -80,12 +80,12 @@ calls to add a problem for a given instance of df must occur before
the first call to DF_RESCAN_BLOCKS or DF_ANALYZE.
For all of the problems defined in df-problems.c, there are
convienence functions named DF_*_ADD_PROBLEM.
convenience functions named DF_*_ADD_PROBLEM.
Problems can be dependent on other problems. For instance, solving
def-use or use-def chains is dependant on solving reaching
definitions. As long as these dependancies are listed in the problem
definitions. As long as these dependencies are listed in the problem
definition, the order of adding the problems is not material.
Otherwise, the problems will be solved in the order of calls to
df_add_problem. Note that it is not necessary to have a problem. In
@ -100,7 +100,7 @@ to analyze the entire function and no call to df_set_blocks is made.
When a subset is given, the analysis behaves as if the function only
contains those blocks and any edges that occur directly between the
blocks in the set. Care should be taken to call df_set_blocks right
before the call to analyze in order to eliminate the possiblity that
before the call to analyze in order to eliminate the possibility that
optimizations that reorder blocks invalidate the bitvector.
@ -220,7 +220,7 @@ There are 4 ways to obtain access to refs:
register and are put there to keep the code from forgetting about
them.
Artifical defs occur at the end of the entry block. These arise
Artificial defs occur at the end of the entry block. These arise
from registers that are live at entry to the function.
2) All of the uses and defs associated with each pseudo or hard

View File

@ -184,7 +184,7 @@ df_get_live_out (struct df *df, basic_block bb)
----------------------------------------------------------------------------*/
/* Generic versions to get the void* version of the block info. Only
used inside the problem instace vectors. */
used inside the problem instance vectors. */
/* Grow the bb_info array. */
@ -2781,7 +2781,7 @@ df_chain_bb_reset (struct dataflow *dflow, unsigned int bb_index)
}
}
/* Get rid of any chains in artifical uses or defs. */
/* Get rid of any chains in artificial uses or defs. */
if (problem_data->flags & DF_DU_CHAIN)
{
struct df_ref *def;

View File

@ -549,7 +549,7 @@ df_ref_create (struct df *df, rtx reg, rtx *loc, rtx insn,
----------------------------------------------------------------------------*/
/* Get the artifical uses for a basic block. */
/* Get the artificial uses for a basic block. */
struct df_ref *
df_get_artificial_defs (struct df *df, unsigned int bb_index)
@ -559,7 +559,7 @@ df_get_artificial_defs (struct df *df, unsigned int bb_index)
}
/* Get the artifical uses for a basic block. */
/* Get the artificial uses for a basic block. */
struct df_ref *
df_get_artificial_uses (struct df *df, unsigned int bb_index)
@ -797,7 +797,7 @@ df_bb_refs_delete (struct dataflow *dflow, int bb_index)
}
}
/* Get rid of any artifical uses or defs. */
/* Get rid of any artificial uses or defs. */
if (bb_info)
{
def = bb_info->artificial_defs;

View File

@ -271,7 +271,7 @@ struct df_ref_info
unsigned int bitmap_size; /* Number of refs seen. */
/* True if refs table is organized so that every reference for a
pseudo is contigious. */
pseudo is contiguous. */
bool refs_organized;
/* True if the next refs should be added immediately or false to
defer to later to reorganize the table. */

View File

@ -383,7 +383,7 @@ diagnostic_report_diagnostic (diagnostic_context *context,
option. */
if (context->classify_diagnostic[diagnostic->option_index] != DK_UNSPECIFIED)
diagnostic->kind = context->classify_diagnostic[diagnostic->option_index];
/* This allows for future extenions, like temporarily disabling
/* This allows for future extensions, like temporarily disabling
warnings for ranges of source code. */
if (diagnostic->kind == DK_IGNORED)
return;

View File

@ -6282,7 +6282,7 @@ expand_operands (tree exp0, tree exp1, rtx target, rtx *op0, rtx *op1,
}
/* Return a MEM that constains constant EXP. DEFER is as for
/* Return a MEM that contains constant EXP. DEFER is as for
output_constant_def and MODIFIER is as for expand_expr. */
static rtx

View File

@ -386,7 +386,7 @@ struct function GTY(())
unsigned int calls_alloca : 1;
/* Nonzero if function being compiled called builtin_return_addr or
builtin_frame_address with non-zero count. */
builtin_frame_address with nonzero count. */
unsigned int accesses_prior_frames : 1;
/* Nonzero if the function calls __builtin_eh_return. */

View File

@ -404,8 +404,8 @@ find_single_pointer_decl (tree t)
if (walk_tree (&t, find_single_pointer_decl_1, &decl, NULL))
{
/* find_single_pointer_decl_1 returns a non-zero value, causing
walk_tree to return a non-zero value, to indicate that it
/* find_single_pointer_decl_1 returns a nonzero value, causing
walk_tree to return a nonzero value, to indicate that it
found more than one pointer DECL. */
return NULL_TREE;
}
@ -4884,7 +4884,7 @@ gimplify_omp_atomic_fetch_op (tree *expr_p, tree addr, tree rhs, int index)
}
/* A subroutine of gimplify_omp_atomic_pipeline. Walk *EXPR_P and replace
appearences of *LHS_ADDR with LHS_VAR. If an expression does not involve
appearances of *LHS_ADDR with LHS_VAR. If an expression does not involve
the lhs, evaluate it into a temporary. Return 1 if the lhs appeared as
a subexpression, 0 if it did not, or -1 if an error was encountered. */

View File

@ -936,7 +936,7 @@ get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed)
{
/* Hoisting constant pool constants into stack regs may cost more than
just single register. On x87, the balance is affected both by the
small number of FP registers, and by its register stack organisation,
small number of FP registers, and by its register stack organization,
that forces us to add compensation code in and around the loop to
shuffle the operands to the top of stack before use, and pop them
from the stack after the loop finishes.

View File

@ -47,7 +47,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
phases. The first phase scans the function looking for OMP statements
and then for variables that must be replaced to satisfy data sharing
clauses. The second phase expands code for the constructs, as well as
re-gimplifing things when variables have been replaced with complex
re-gimplifying things when variables have been replaced with complex
expressions.
Final code generation is done by pass_expand_omp. The flowgraph is
@ -2042,7 +2042,7 @@ expand_parallel_call (struct omp_region *region, basic_block bb, tree ws_args)
clauses = OMP_PARALLEL_CLAUSES (region->entry);
push_gimplify_context ();
/* Determine what flavour of GOMP_parallel_start we will be
/* Determine what flavor of GOMP_parallel_start we will be
emitting. */
start_ix = BUILT_IN_GOMP_PARALLEL_START;
if (is_combined_parallel (region))

View File

@ -1085,7 +1085,7 @@ common_handle_option (size_t scode, const char *arg, int value,
case OPT_floop_optimize:
case OPT_frerun_loop_opt:
case OPT_fstrength_reduce:
/* These are no-ops, preserved for backward compatability. */
/* These are no-ops, preserved for backward compatibility. */
break;
default:

View File

@ -364,7 +364,7 @@ register_dump_files_1 (struct tree_opt_pass *pass, bool ipa, int properties)
/* Register the dump files for the pipeline starting at PASS. IPA is
true if the pass is inter-procedural, and PROPERTIES reflects the
properties that are guarenteed to be available at the beginning of
properties that are guaranteed to be available at the beginning of
the pipeline. */
static void

View File

@ -125,7 +125,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
TODO:
- Use REG_ALLOC_ORDER when choosing link register.
- Handle JUMP_INSNs. Also handle volatile function calls (handle them
simmilar to unconditional jumps.)
similar to unconditional jumps.)
- Test command line option -fpic.
*/
@ -227,7 +227,7 @@ typedef struct seq_block_def
struct seq_block_def *next_seq_block;
} *seq_block;
/* Contains same sequence candidates for futher searching. */
/* Contains same sequence candidates for further searching. */
typedef struct hash_bucket_def
{
/* The hash value of the group. */
@ -888,7 +888,7 @@ determine_seq_blocks (void)
}
/* Ensure that SB contains a seq_block with the appropriate length.
Insert a new seq_block if neccessary. */
Insert a new seq_block if necessary. */
if (!seq_blocks || ((*mseq)->abstracted_length < seq_blocks->length))
{
sb = (seq_block) xmalloc (sizeof (struct seq_block_def));
@ -979,7 +979,7 @@ split_blocks_after_seqs (void)
}
}
/* Splits the best pattern sequence accoring to SEQ_BLOCKS. Emits pseudo-call
/* Splits the best pattern sequence according to SEQ_BLOCKS. Emits pseudo-call
and -return insns before and after the sequence. */
static void
@ -1373,7 +1373,7 @@ rtl_seqabstr (void)
/* Iterate until there are no sequences to abstract. */
for (iter = 1;; iter++)
{
/* Recompute gain for sequences if neccessary and select sequence with
/* Recompute gain for sequences if necessary and select sequence with
biggest gain. */
recompute_gain ();
if (!pattern_seqs)

View File

@ -2196,7 +2196,7 @@ may_trap_after_code_motion_p (rtx x)
return may_trap_p_1 (x, MTP_AFTER_MOVE);
}
/* Same as above, but additionally return non-zero if evaluating rtx X might
/* Same as above, but additionally return nonzero if evaluating rtx X might
cause a fault. We define a fault for the purpose of this function as a
erroneous execution condition that cannot be encountered during the normal
execution of a valid program; the typical example is an unaligned memory

View File

@ -483,7 +483,7 @@ rtx_equiv_p (rtx *xp, rtx y, int rvalue, struct equiv_info *info)
pair, use the old one. If the width is the same, use the
old one if the modes match, but the new if they don't.
We don't want to get too fancy with subreg_regno_offset
here, so we just test two straightforwad cases each. */
here, so we just test two straightforward cases each. */
if (info->live_update
&& (x_mode != GET_MODE (info->x_local[i])
? size >= size_i : size > size_i))

View File

@ -782,7 +782,7 @@ merge_phi_nodes (void)
break;
}
/* If the loop above iterated thorugh all the PHI nodes
/* If the loop above iterated through all the PHI nodes
in BB, then we can merge the PHIs from BB into DEST. */
if (!phi)
*current++ = bb;

View File

@ -623,7 +623,7 @@ number_of_iterations_cond (tree type, affine_iv *iv0, enum tree_code code,
return false;
}
/* If the loop exits immediatelly, there is nothing to do. */
/* If the loop exits immediately, there is nothing to do. */
if (zero_p (fold_build2 (code, boolean_type_node, iv0->base, iv1->base)))
{
niter->niter = build_int_cst_type (unsigned_type_for (type), 0);

View File

@ -115,7 +115,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
/* Magic constants follow. These should be replaced by machine specific
numbers. */
/* A number that should rouhgly correspond to the number of instructions
/* A number that should roughly correspond to the number of instructions
executed before the prefetch is completed. */
#ifndef PREFETCH_LATENCY
@ -768,7 +768,7 @@ schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
/* For now we just take memory references one by one and issue
prefetches for as many as possible. The groups are sorted
starting with the largest step, since the references with
large step are more likely to cause many cache mises. */
large step are more likely to cause many cache misses. */
for (; groups; groups = groups->next)
for (ref = groups->refs; ref; ref = ref->next)

View File

@ -1040,7 +1040,7 @@ merge_graph_nodes (constraint_graph_t graph, unsigned int to,
graph->zero_weight_succs[from]);
}
/* Merge all the non-zero weighted predecessor edges. */
/* Merge all the nonzero weighted predecessor edges. */
for (i = 0; VEC_iterate (constraint_edge_t, predvec, i, c); i++)
{
unsigned int d = c->dest;
@ -1064,7 +1064,7 @@ merge_graph_nodes (constraint_graph_t graph, unsigned int to,
}
/* Merge all the non-zero weighted successor edges. */
/* Merge all the nonzero weighted successor edges. */
for (i = 0; VEC_iterate (constraint_edge_t, succvec, i, c); i++)
{
unsigned int d = c->dest;
@ -4049,7 +4049,7 @@ intra_create_variable_infos (void)
lhs.type = SCALAR;
lhs.var = create_variable_info_for (t, alias_get_name (t));
/* With flag_argument_noalias greater than one means that the incomming
/* With flag_argument_noalias greater than one means that the incoming
argument cannot alias anything except for itself so create a HEAP
variable. */
if (POINTER_TYPE_P (TREE_TYPE (t))

View File

@ -120,7 +120,7 @@ remove_temporary_equivalences (VEC(tree, heap) **stack)
dest = VEC_pop (tree, *stack);
/* A NULL value indicates we should stop unwinding, oherwise
/* A NULL value indicates we should stop unwinding, otherwise
pop off the next entry as they're recorded in pairs. */
if (dest == NULL)
break;

View File

@ -811,7 +811,7 @@ thread_block (basic_block bb)
}
/* Walk through the registered jump threads and convert them into a
form convienent for this pass.
form convenient for this pass.
Any block which has incoming edges threaded to outgoing edges
will have its entry in THREADED_BLOCK set.

View File

@ -41,7 +41,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#include "recog.h"
#include "toplev.h"
/* Funcion prototypes */
/* Function prototypes */
static void vect_pattern_recog_1
(tree (* ) (tree, tree *, tree *), block_stmt_iterator);
static bool widened_name_p (tree, tree, tree *, tree *);
@ -133,7 +133,7 @@ widened_name_p (tree name, tree use_stmt, tree *half_type, tree *def_stmt)
S7 sum_1 = prod + sum_0;
where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
same size of 'TYPE1' or bigger. This is a sepcial case of a reduction
same size of 'TYPE1' or bigger. This is a special case of a reduction
computation.
Input:
@ -455,7 +455,7 @@ vect_recog_widen_sum_pattern (tree last_stmt, tree *type_in, tree *type_out)
If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
to the available target pattern.
This function also does some bookeeping, as explained in the documentation
This function also does some bookkeeping, as explained in the documentation
for vect_recog_pattern. */
static void
@ -578,7 +578,7 @@ vect_pattern_recog_1 (
remain irrelevant unless used by stmts other than S4.
If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
(because they are marked as irrelevent). It will vectorize S6, and record
(because they are marked as irrelevant). It will vectorize S6, and record
a pointer to the new vector stmt VS6 both from S6 (as usual), and also
from S4. We do that so that when we get to vectorizing stmts that use the
def of S4 (like S5 that uses a_0), we'll know where to take the relevant

View File

@ -857,7 +857,7 @@ vect_create_epilog_for_reduction (tree vect_def, tree stmt,
exit_bsi = bsi_start (exit_bb);
/* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
(i.e. when reduc_code is not available) and in the final adjusment code
(i.e. when reduc_code is not available) and in the final adjustment code
(if needed). Also get the original scalar reduction variable as
defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
represents a reduction pattern), the tree-code and scalar-def are
@ -2945,7 +2945,7 @@ vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
append_to_statement_list_force (and_stmt, cond_expr_stmt_list);
/* Make and_tmp the left operand of the conditional test against zero.
if and_tmp has a non-zero bit then some address is unaligned. */
if and_tmp has a nonzero bit then some address is unaligned. */
ptrsize_zero = build_int_cst (int_ptrsize_type, 0);
return build2 (EQ_EXPR, boolean_type_node,
and_tmp_name, ptrsize_zero);

View File

@ -208,7 +208,7 @@ typedef struct _stmt_vec_info {
/* Stmt is part of some pattern (computation idiom) */
bool in_pattern_p;
/* Used for various bookeeping purposes, generally holding a pointer to
/* Used for various bookkeeping purposes, generally holding a pointer to
some other stmt S that is in some way "related" to this stmt.
Current use of this field is:
If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is

View File

@ -1058,7 +1058,7 @@ extract_range_from_assert (value_range_t *vr_p, tree expr)
there are three cases to consider.
1. The VR_ANTI_RANGE range is competely within the
1. The VR_ANTI_RANGE range is completely within the
VR_RANGE and the endpoints of the ranges are
different. In that case the resulting range
should be whichever range is more precise.

View File

@ -228,7 +228,7 @@ _Unwind_SetGRValue (struct _Unwind_Context *context, int index,
context->reg[index] = (void *) (_Unwind_Internal_Ptr) val;
}
/* Return non-zero if register INDEX is stored by value rather than
/* Return nonzero if register INDEX is stored by value rather than
by reference. */
static inline int