From bd4b6b0d78e2ab3ffbd4e53f534eb1f85c1b203c Mon Sep 17 00:00:00 2001 From: Jeff Law Date: Tue, 12 Jan 2016 15:38:06 -0700 Subject: [PATCH] * tree-ssa-threadupdate.c: Various whitespace and typo fixes. From-SVN: r232297 --- gcc/ChangeLog | 4 ++ gcc/tree-ssa-threadupdate.c | 78 ++++++++++++++++++------------------- 2 files changed, 43 insertions(+), 39 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 8df676c7d67..3c2cf3f9e8c 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,7 @@ +2016-01-12 Jeff Law + + * tree-ssa-threadupdate.c: Various whitespace and typo fixes. + 2016-01-12 Olivier Hainque * gcc.c (spec_undefvar_allowed): New global. diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c index e118c497e96..1bf9ae66772 100644 --- a/gcc/tree-ssa-threadupdate.c +++ b/gcc/tree-ssa-threadupdate.c @@ -353,7 +353,7 @@ lookup_redirection_data (edge e, enum insert_option insert) struct redirection_data *elt; vec *path = THREAD_PATH (e); - /* Build a hash table element so we can see if E is already + /* Build a hash table element so we can see if E is already in the table. */ elt = XNEW (struct redirection_data); elt->path = path; @@ -635,21 +635,21 @@ any_remaining_duplicated_blocks (vec *path, are not part of any jump threading path, but add profile counts along the path. - In the aboe example, after all jump threading is complete, we will + In the above example, after all jump threading is complete, we will end up with the following control flow: - A B C - | | | - Ea| |Eb |Ec - | | | - v v v - Ja J Jc - / \ / \Eon' / \ + A B C + | | | + Ea| |Eb |Ec + | | | + v v v + Ja J Jc + / \ / \Eon' / \ Eona/ \ ---/---\-------- \Eonc - / \ / / \ \ + / \ / / \ \ v v v v v Sona Soff Son Sonc - \ /\ / + \ /\ / \___________ / \ _____/ \ / \/ vv v @@ -793,19 +793,19 @@ compute_path_counts (struct redirection_data *rd, coming into the path that will contribute to the count flowing into the path successor. */ if (has_joiner && epath != elast) - { - /* Look for other incoming edges after joiner. */ - FOR_EACH_EDGE (ein, ei, epath->dest->preds) - { - if (ein != epath - /* Ignore in edges from blocks we have duplicated for a - threading path, which have duplicated edge counts until - they are redirected by an invocation of this routine. */ - && !bitmap_bit_p (local_info->duplicate_blocks, - ein->src->index)) - nonpath_count += ein->count; - } - } + { + /* Look for other incoming edges after joiner. */ + FOR_EACH_EDGE (ein, ei, epath->dest->preds) + { + if (ein != epath + /* Ignore in edges from blocks we have duplicated for a + threading path, which have duplicated edge counts until + they are redirected by an invocation of this routine. */ + && !bitmap_bit_p (local_info->duplicate_blocks, + ein->src->index)) + nonpath_count += ein->count; + } + } if (cur_count < path_out_count) path_out_count = cur_count; if (epath->count < min_path_count) @@ -827,14 +827,14 @@ compute_path_counts (struct redirection_data *rd, difference between elast->count and nonpath_count. Otherwise the edge counts after threading will not be sane. */ if (has_joiner && path_out_count < elast->count - nonpath_count) - { - path_out_count = elast->count - nonpath_count; - /* But neither can we go above the minimum count along the path - we are duplicating. This can be an issue due to profile - insanities coming in to this pass. */ - if (path_out_count > min_path_count) - path_out_count = min_path_count; - } + { + path_out_count = elast->count - nonpath_count; + /* But neither can we go above the minimum count along the path + we are duplicating. This can be an issue due to profile + insanities coming in to this pass. */ + if (path_out_count > min_path_count) + path_out_count = min_path_count; + } *path_in_count_ptr = path_in_count; *path_out_count_ptr = path_out_count; @@ -1268,17 +1268,17 @@ ssa_fix_duplicate_block_edges (struct redirection_data *rd, thread path (path_in_freq). If we had a joiner, it would have been updated at the end of that handling to the edge frequency along the duplicated joiner path edge. */ - update_profile (epath, NULL, path_out_count, path_out_count, - cur_path_freq); + update_profile (epath, NULL, path_out_count, path_out_count, + cur_path_freq); } /* Increment the index into the duplicated path when we processed a duplicated block. */ if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK || (*path)[i]->type == EDGE_COPY_SRC_BLOCK) - { + { count++; - } + } } /* Now walk orig blocks and update their probabilities, since the @@ -2383,7 +2383,7 @@ valid_jump_thread_path (vec *path) struct loop *loop = e->dest->loop_father; if (e->dest != (*path)[j+1]->e->src) - return false; + return false; /* If we're threading through the loop latch back into the same loop and the destination does not dominate the loop @@ -2705,7 +2705,7 @@ register_jump_thread (vec *path) for (unsigned int i = 0; i < path->length (); i++) { if ((*path)[i]->e == NULL) - { + { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, @@ -2715,7 +2715,7 @@ register_jump_thread (vec *path) delete_jump_thread_path (path); return; - } + } /* Only the FSM threader is allowed to thread across backedges in the CFG. */