* tree-ssa-threadupdate.c: Various whitespace and typo fixes.

From-SVN: r232297
This commit is contained in:
Jeff Law 2016-01-12 15:38:06 -07:00 committed by Jeff Law
parent 26b5ace7ea
commit bd4b6b0d78
2 changed files with 43 additions and 39 deletions

View File

@ -1,3 +1,7 @@
2016-01-12 Jeff Law <law@redhat.com>
* tree-ssa-threadupdate.c: Various whitespace and typo fixes.
2016-01-12 Olivier Hainque <hainque@adacore.com>
* gcc.c (spec_undefvar_allowed): New global.

View File

@ -353,7 +353,7 @@ lookup_redirection_data (edge e, enum insert_option insert)
struct redirection_data *elt;
vec<jump_thread_edge *> *path = THREAD_PATH (e);
/* Build a hash table element so we can see if E is already
/* Build a hash table element so we can see if E is already
in the table. */
elt = XNEW (struct redirection_data);
elt->path = path;
@ -635,21 +635,21 @@ any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
are not part of any jump threading path, but add profile counts along
the path.
In the aboe example, after all jump threading is complete, we will
In the above example, after all jump threading is complete, we will
end up with the following control flow:
A B C
| | |
Ea| |Eb |Ec
| | |
v v v
Ja J Jc
/ \ / \Eon' / \
A B C
| | |
Ea| |Eb |Ec
| | |
v v v
Ja J Jc
/ \ / \Eon' / \
Eona/ \ ---/---\-------- \Eonc
/ \ / / \ \
/ \ / / \ \
v v v v v
Sona Soff Son Sonc
\ /\ /
\ /\ /
\___________ / \ _____/
\ / \/
vv v
@ -793,19 +793,19 @@ compute_path_counts (struct redirection_data *rd,
coming into the path that will contribute to the count flowing
into the path successor. */
if (has_joiner && epath != elast)
{
/* Look for other incoming edges after joiner. */
FOR_EACH_EDGE (ein, ei, epath->dest->preds)
{
if (ein != epath
/* Ignore in edges from blocks we have duplicated for a
threading path, which have duplicated edge counts until
they are redirected by an invocation of this routine. */
&& !bitmap_bit_p (local_info->duplicate_blocks,
ein->src->index))
nonpath_count += ein->count;
}
}
{
/* Look for other incoming edges after joiner. */
FOR_EACH_EDGE (ein, ei, epath->dest->preds)
{
if (ein != epath
/* Ignore in edges from blocks we have duplicated for a
threading path, which have duplicated edge counts until
they are redirected by an invocation of this routine. */
&& !bitmap_bit_p (local_info->duplicate_blocks,
ein->src->index))
nonpath_count += ein->count;
}
}
if (cur_count < path_out_count)
path_out_count = cur_count;
if (epath->count < min_path_count)
@ -827,14 +827,14 @@ compute_path_counts (struct redirection_data *rd,
difference between elast->count and nonpath_count. Otherwise the edge
counts after threading will not be sane. */
if (has_joiner && path_out_count < elast->count - nonpath_count)
{
path_out_count = elast->count - nonpath_count;
/* But neither can we go above the minimum count along the path
we are duplicating. This can be an issue due to profile
insanities coming in to this pass. */
if (path_out_count > min_path_count)
path_out_count = min_path_count;
}
{
path_out_count = elast->count - nonpath_count;
/* But neither can we go above the minimum count along the path
we are duplicating. This can be an issue due to profile
insanities coming in to this pass. */
if (path_out_count > min_path_count)
path_out_count = min_path_count;
}
*path_in_count_ptr = path_in_count;
*path_out_count_ptr = path_out_count;
@ -1268,17 +1268,17 @@ ssa_fix_duplicate_block_edges (struct redirection_data *rd,
thread path (path_in_freq). If we had a joiner, it would have
been updated at the end of that handling to the edge frequency
along the duplicated joiner path edge. */
update_profile (epath, NULL, path_out_count, path_out_count,
cur_path_freq);
update_profile (epath, NULL, path_out_count, path_out_count,
cur_path_freq);
}
/* Increment the index into the duplicated path when we processed
a duplicated block. */
if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
|| (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
{
{
count++;
}
}
}
/* Now walk orig blocks and update their probabilities, since the
@ -2383,7 +2383,7 @@ valid_jump_thread_path (vec<jump_thread_edge *> *path)
struct loop *loop = e->dest->loop_father;
if (e->dest != (*path)[j+1]->e->src)
return false;
return false;
/* If we're threading through the loop latch back into the
same loop and the destination does not dominate the loop
@ -2705,7 +2705,7 @@ register_jump_thread (vec<jump_thread_edge *> *path)
for (unsigned int i = 0; i < path->length (); i++)
{
if ((*path)[i]->e == NULL)
{
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file,
@ -2715,7 +2715,7 @@ register_jump_thread (vec<jump_thread_edge *> *path)
delete_jump_thread_path (path);
return;
}
}
/* Only the FSM threader is allowed to thread across
backedges in the CFG. */