diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 0d6d87b2326..cff6712fe0b 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,24 @@ +2003-01-16 Kazu Hirata + + * ifcvt.c: Fix comment typos. + * lcm.c: Likewise. + * libgcc2.c: Likewise. + * local-alloc.c: Likewise. + * loop.c: Likewise. + * predict.c: Likewise. + * ra-build.c: Likewise. + * ra.c: Likewise. + * ra-colorize.c: Likewise. + * ra.h: Likewise. + * ra-rewrite.c: Likewise. + * regmove.c: Likewise. + * reload.h: Likewise. + * rtlanal.c: Likewise. + * toplev.c: Likewise. + * tree.h: Likewise. + * unwind-dw2-fde-glibc.c: Likewise. + * vmsdbgout.c: Likewise. + 2003-01-16 Richard Henderson * dwarf2out.c (struct file_table): Remove. diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c index 21a7e648319..72dc2e12f65 100644 --- a/gcc/ifcvt.c +++ b/gcc/ifcvt.c @@ -2282,7 +2282,7 @@ find_if_block (ce_info) int max_insns = MAX_CONDITIONAL_EXECUTE; int n_insns; - /* Determine if the preceeding block is an && or || block. */ + /* Determine if the preceding block is an && or || block. */ if ((n_insns = block_jumps_and_fallthru_p (bb, else_bb)) >= 0) { ce_info->and_and_p = TRUE; @@ -2877,7 +2877,7 @@ dead_or_predicable (test_bb, merge_bb, other_bb, new_dest, reversep) if (HAVE_conditional_execution) { /* In the conditional execution case, we have things easy. We know - the condition is reversable. We don't have to check life info, + the condition is reversible. We don't have to check life info, becase we're going to conditionally execute the code anyway. All that's left is making sure the insns involved can actually be predicated. */ diff --git a/gcc/lcm.c b/gcc/lcm.c index c43b1b1da66..8bbe893c823 100644 --- a/gcc/lcm.c +++ b/gcc/lcm.c @@ -307,7 +307,7 @@ compute_laterin (edge_list, earliest, antloc, later, laterin) qin = worklist; /* Note that we do not use the last allocated element for our queue, as EXIT_BLOCK is never inserted into it. In fact the above allocation - of n_basic_blocks + 1 elements is not encessary. */ + of n_basic_blocks + 1 elements is not necessary. */ qend = &worklist[n_basic_blocks]; qlen = n_basic_blocks; @@ -849,9 +849,9 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill, The LCM algorithm is then run over the flow graph to determine where to place the sets to the highest-priority value in respect of first the first - insn in any one block. Any adjustments required to the transparancy + insn in any one block. Any adjustments required to the transparency vectors are made, then the next iteration starts for the next-lower - priority mode, till for each entity all modes are exhasted. + priority mode, till for each entity all modes are exhausted. More details are located in the code for optimize_mode_switching(). */ diff --git a/gcc/libgcc2.c b/gcc/libgcc2.c index ac978ab48ad..6867c0d1430 100644 --- a/gcc/libgcc2.c +++ b/gcc/libgcc2.c @@ -234,7 +234,7 @@ __mulvdi3 (DWtype u, DWtype v) #endif -/* Unless shift functions are defined whith full ANSI prototypes, +/* Unless shift functions are defined with full ANSI prototypes, parameter b will be promoted to int if word_type is smaller than an int. */ #ifdef L_lshrdi3 DWtype @@ -1347,7 +1347,7 @@ gcov_exit (void) #if defined (TARGET_HAS_F_SETLKW) /* After a fork, another process might try to read and/or write - the same file simultanously. So if we can, lock the file to + the same file simultaneously. So if we can, lock the file to avoid race conditions. */ while (fcntl (fileno (da_file), F_SETLKW, &s_flock) && errno == EINTR) diff --git a/gcc/local-alloc.c b/gcc/local-alloc.c index 36dca9be193..2e91ae1b8d0 100644 --- a/gcc/local-alloc.c +++ b/gcc/local-alloc.c @@ -1179,7 +1179,7 @@ update_equiv_regs () } /* Mark REG as having no known equivalence. - Some instructions might have been proceessed before and furnished + Some instructions might have been processed before and furnished with REG_EQUIV notes for this register; these notes will have to be removed. STORE is the piece of RTL that does the non-constant / conflicting @@ -1327,7 +1327,7 @@ block_alloc (b) must match operand zero. In that case, skip any operand that doesn't list operand 0 since we know that the operand always conflicts with operand 0. We - ignore commutatity in this case to keep things simple. */ + ignore commutativity in this case to keep things simple. */ if (n_matching_alts == recog_data.n_alternatives && 0 == requires_inout (recog_data.constraints[i])) continue; diff --git a/gcc/loop.c b/gcc/loop.c index 8c3ddc751da..55c2c42b78b 100644 --- a/gcc/loop.c +++ b/gcc/loop.c @@ -82,7 +82,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2 /* Parameterize some prefetch heuristics so they can be turned on and off - easily for performance testing on new architecures. These can be + easily for performance testing on new architectures. These can be defined in target-dependent files. */ /* Prefetch is worthwhile only when loads/stores are dense. */ @@ -793,7 +793,7 @@ scan_loop (loop, flags) } } - /* For parallels, add any possible uses to the depencies, as + /* For parallels, add any possible uses to the dependencies, as we can't move the insn without resolving them first. */ if (GET_CODE (PATTERN (p)) == PARALLEL) { @@ -3620,7 +3620,7 @@ check_store (x, pat, data) /* Like rtx_equal_p, but attempts to swap commutative operands. This is important to get some addresses combined. Later more sophisticated - transformations can be added when necesary. + transformations can be added when necessary. ??? Same trick with swapping operand is done at several other places. It can be nice to develop some common way to handle this. */ @@ -5537,7 +5537,7 @@ valid_initial_value_p (x, insn, call_seen, loop_start) as a possible giv. INSN is the insn whose pattern X comes from. NOT_EVERY_ITERATION is 1 if the insn might not be executed during every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed - more thanonce in each loop iteration. */ + more than once in each loop iteration. */ static void find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple) @@ -5742,7 +5742,7 @@ record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val, rtx set = single_set (insn); rtx temp; - /* Attempt to prove constantness of the values. Don't let simplity_rtx + /* Attempt to prove constantness of the values. Don't let simplify_rtx undo the MULT canonicalization that we performed earlier. */ temp = simplify_rtx (add_val); if (temp @@ -6686,7 +6686,7 @@ simplify_giv_expr (loop, x, ext_val, benefit) arg1)), ext_val, benefit); } - /* Porpagate the MULT expressions to the intermost nodes. */ + /* Propagate the MULT expressions to the intermost nodes. */ else if (GET_CODE (arg0) == PLUS) { /* (invar_0 + invar_1) * invar_2. Distribute. */ @@ -7372,7 +7372,7 @@ check_ext_dependent_givs (bl, loop_info) constants in order to be certain of no overflow. */ /* ??? An unknown iteration count with an increment of +-1 combined with friendly exit tests of against an invariant - value is also ameanable to optimization. Not implemented. */ + value is also amenable to optimization. Not implemented. */ if (loop_info->n_iterations > 0 && bl->initial_value && GET_CODE (bl->initial_value) == CONST_INT @@ -7394,7 +7394,7 @@ check_ext_dependent_givs (bl, loop_info) neg_incr = 1, abs_incr = -abs_incr; total_incr = abs_incr * loop_info->n_iterations; - /* Check for host arithmatic overflow. */ + /* Check for host arithmetic overflow. */ if (total_incr / loop_info->n_iterations == abs_incr) { unsigned HOST_WIDE_INT u_max; @@ -7407,7 +7407,7 @@ check_ext_dependent_givs (bl, loop_info) /* Check zero extension of biv ok. */ if (start_val >= 0 - /* Check for host arithmatic overflow. */ + /* Check for host arithmetic overflow. */ && (neg_incr ? u_end_val < u_start_val : u_end_val > u_start_val) @@ -7425,7 +7425,7 @@ check_ext_dependent_givs (bl, loop_info) keep this fact in mind -- myself included on occasion. So leave alone with the signed overflow optimizations. */ if (start_val >= -s_max - 1 - /* Check for host arithmatic overflow. */ + /* Check for host arithmetic overflow. */ && (neg_incr ? s_end_val < start_val : s_end_val > start_val) @@ -10541,7 +10541,7 @@ loop_insn_sink (loop, pattern) } /* bl->final_value can be eighter general_operand or PLUS of general_operand - and constant. Emit sequence of intructions to load it into REG */ + and constant. Emit sequence of instructions to load it into REG. */ static rtx gen_load_of_final_value (reg, final_value) rtx reg, final_value; diff --git a/gcc/predict.c b/gcc/predict.c index 4ca71d3b0d2..71009faafdb 100644 --- a/gcc/predict.c +++ b/gcc/predict.c @@ -570,7 +570,7 @@ estimate_probability (loops_info) if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0)))) ; /* Comparisons with 0 are often used for booleans and there is - nothing usefull to predict about them. */ + nothing useful to predict about them. */ else if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 0) == const0_rtx) ; @@ -586,7 +586,7 @@ estimate_probability (loops_info) if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0)))) ; /* Comparisons with 0 are often used for booleans and there is - nothing usefull to predict about them. */ + nothing useful to predict about them. */ else if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 0) == const0_rtx) ; diff --git a/gcc/ra-build.c b/gcc/ra-build.c index fa90ab4f98f..d5d923eba02 100644 --- a/gcc/ra-build.c +++ b/gcc/ra-build.c @@ -2653,7 +2653,7 @@ detect_remat_webs () oldwebs can't have their references changed. The incremental machinery barfs on that. */ || (!rtx_unstable_p (src) && !contains_pseudo (src)) - /* Additionally also memrefs to stack-slots are usefull, when + /* Additionally also memrefs to stack-slots are useful, when we created them ourself. They might not have set their unchanging flag set, but nevertheless they are stable across the livetime in question. */ diff --git a/gcc/ra-colorize.c b/gcc/ra-colorize.c index b1da016e78a..074f7356dac 100644 --- a/gcc/ra-colorize.c +++ b/gcc/ra-colorize.c @@ -1511,7 +1511,7 @@ colorize_one_web (web, hard) struct web *aw = alias (w); /* If we are a spill-temp, we also look at webs coalesced to precolored ones. Otherwise we only look at webs which - themself were colored, or coalesced to one. */ + themselves were colored, or coalesced to one. */ if (aw->type == PRECOLORED && w != aw && web->spill_temp && flag_ra_optimistic_coalescing) { diff --git a/gcc/ra-rewrite.c b/gcc/ra-rewrite.c index e10ddd3551d..22381967d71 100644 --- a/gcc/ra-rewrite.c +++ b/gcc/ra-rewrite.c @@ -1489,7 +1489,7 @@ detect_web_parts_to_rebuild () sbitmap_zero (already_webs); /* We need to recheck all uses of all webs involved in spilling (and the uses added by spill insns, but those are not analyzed yet). - Those are the spilled webs themself, webs coalesced to spilled ones, + Those are the spilled webs themselves, webs coalesced to spilled ones, and webs conflicting with any of them. */ for (pass = 0; pass < 2; pass++) for (d = (pass == 0) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next) diff --git a/gcc/ra.c b/gcc/ra.c index 785ef949bc2..dfd4ef5b519 100644 --- a/gcc/ra.c +++ b/gcc/ra.c @@ -681,7 +681,7 @@ reg_alloc () /* Setup debugging levels. */ switch (0) { - /* Some usefull presets of the debug level, I often use. */ + /* Some useful presets of the debug level, I often use. */ case 0: debug_new_regalloc = DUMP_EVER; break; case 1: debug_new_regalloc = DUMP_COSTS; break; case 2: debug_new_regalloc = DUMP_IGRAPH_M; break; @@ -807,7 +807,7 @@ reg_alloc () /* Those new pseudos need to have their REFS count set. */ reg_scan_update (get_insns (), NULL, max_regno); max_regno = max_reg_num (); - /* And they need usefull classes too. */ + /* And they need useful classes too. */ regclass (get_insns (), max_reg_num (), rtl_dump_file); rtl_dump_file = ra_dump_file; diff --git a/gcc/ra.h b/gcc/ra.h index 04962df941b..522b77a7586 100644 --- a/gcc/ra.h +++ b/gcc/ra.h @@ -258,7 +258,7 @@ struct web /* Number of usable colors in usable_regs. */ int num_freedom; - /* After successfull coloring the graph each web gets a new reg rtx, + /* After successful coloring the graph each web gets a new reg rtx, with which the original uses and defs are replaced. This is it. */ rtx reg_rtx; diff --git a/gcc/regmove.c b/gcc/regmove.c index 0e6a595ced9..3553a40bc3a 100644 --- a/gcc/regmove.c +++ b/gcc/regmove.c @@ -647,7 +647,7 @@ optimize_reg_copy_2 (insn, dest, src) } /* INSN is a ZERO_EXTEND or SIGN_EXTEND of SRC to DEST. Look if SRC dies there, and if it is only set once, by loading - it from memory. If so, try to encorporate the zero/sign extension + it from memory. If so, try to incorporate the zero/sign extension into the memory read, change SRC to the mode of DEST, and alter the remaining accesses to use the appropriate SUBREG. This allows SRC and DEST to be tied later. */ diff --git a/gcc/reload.h b/gcc/reload.h index 04632245912..adc2984c0b3 100644 --- a/gcc/reload.h +++ b/gcc/reload.h @@ -269,8 +269,8 @@ extern void transfer_replacements PARAMS ((int, int)); /* IN_RTX is the value loaded by a reload that we now decided to inherit, or a subpart of it. If we have any replacements registered for IN_RTX, - chancel the reloads that were supposed to load them. - Return nonzero if we chanceled any reloads. */ + cancel the reloads that were supposed to load them. + Return nonzero if we canceled any reloads. */ extern int remove_address_replacements PARAMS ((rtx in_rtx)); /* Like rtx_equal_p except that it allows a REG and a SUBREG to match diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index 030682e43f9..5e0677066a8 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -3420,7 +3420,7 @@ hoist_test_store (x, val, live) /* Pseudo registers can be allways replaced by another pseudo to avoid the side effect, for hard register we must ensure that they are dead. Eventually we may want to add code to try turn pseudos to hards, but it - is unlikely usefull. */ + is unlikely useful. */ if (REGNO (x) < FIRST_PSEUDO_REGISTER) { diff --git a/gcc/toplev.c b/gcc/toplev.c index b860500a871..80ddd8eb42c 100644 --- a/gcc/toplev.c +++ b/gcc/toplev.c @@ -3150,7 +3150,7 @@ rest_of_compilation (decl) = combine_instructions (insns, max_reg_num ()); /* Combining insns may have turned an indirect jump into a - direct jump. Rebuid the JUMP_LABEL fields of jumping + direct jump. Rebuild the JUMP_LABEL fields of jumping instructions. */ if (rebuild_jump_labels_after_combine) { diff --git a/gcc/tree.h b/gcc/tree.h index 269d05e8305..212cfa427c7 100644 --- a/gcc/tree.h +++ b/gcc/tree.h @@ -682,7 +682,7 @@ extern void tree_vec_elt_check_failed PARAMS ((int, int, const char *, bounded pointer. It is insufficient to determine the boundedness of an expression EXP with BOUNDED_POINTER_TYPE_P (TREE_TYPE (EXP)), since we allow pointer to be temporarily cast to integer for - rounding up to an alignment boudary in a way that preserves the + rounding up to an alignment boundary in a way that preserves the pointer's bounds. In an IDENTIFIER_NODE, nonzero means that the name is prefixed with diff --git a/gcc/unwind-dw2-fde-glibc.c b/gcc/unwind-dw2-fde-glibc.c index ba9e3d27d03..d4b62349fca 100644 --- a/gcc/unwind-dw2-fde-glibc.c +++ b/gcc/unwind-dw2-fde-glibc.c @@ -155,7 +155,7 @@ _Unwind_IteratePhdrCallback (struct dl_phdr_info *info, size_t size, void *ptr) data->dbase = NULL; if (p_dynamic) { - /* For dynamicly linked executables and shared libraries, + /* For dynamically linked executables and shared libraries, DT_PLTGOT is the gp value for that object. */ ElfW(Dyn) *dyn = (ElfW(Dyn) *) (p_dynamic->p_vaddr + load_base); for (; dyn->d_tag != DT_NULL ; dyn++) diff --git a/gcc/vmsdbgout.c b/gcc/vmsdbgout.c index 278d16b6458..a5cb8a2524d 100644 --- a/gcc/vmsdbgout.c +++ b/gcc/vmsdbgout.c @@ -352,7 +352,7 @@ static char text_end_label[MAX_ARTIFICIAL_LABEL_BYTES]; #endif /* This is similar to the default ASM_OUTPUT_ASCII, except that no trailing - newline is produced. When flag_verbose_asm is asserted, we add commnetary + newline is produced. When flag_verbose_asm is asserted, we add commentary at the end of the line, so we must avoid output of a newline here. */ #ifndef ASM_OUTPUT_DEBUG_STRING #define ASM_OUTPUT_DEBUG_STRING(FILE,P) \