re PR rtl-optimization/89768 (ICE in compare_and_jump_seq at loop-unroll.c:838)

PR rtl-optimization/89768
	* loop-unroll.c (unroll_loop_constant_iterations): Use gen_int_mode
	instead of GEN_INT.
	(unroll_loop_runtime_iterations): Likewise.

From-SVN: r269812
This commit is contained in:
Jakub Jelinek 2019-03-19 20:04:14 +01:00 committed by Jakub Jelinek
parent ea5ac5a69b
commit 2a23a1c39f
2 changed files with 11 additions and 4 deletions

View File

@ -1,3 +1,10 @@
2019-03-19 Jakub Jelinek <jakub@redhat.com>
PR rtl-optimization/89768
* loop-unroll.c (unroll_loop_constant_iterations): Use gen_int_mode
instead of GEN_INT.
(unroll_loop_runtime_iterations): Likewise.
2019-03-19 Martin Sebor <msebor@redhat.com> 2019-03-19 Martin Sebor <msebor@redhat.com>
PR tree-optimization/89644 PR tree-optimization/89644

View File

@ -652,7 +652,7 @@ unroll_loop_constant_iterations (struct loop *loop)
if (loop->any_likely_upper_bound) if (loop->any_likely_upper_bound)
loop->nb_iterations_likely_upper_bound loop->nb_iterations_likely_upper_bound
= wi::udiv_trunc (loop->nb_iterations_likely_upper_bound, max_unroll + 1); = wi::udiv_trunc (loop->nb_iterations_likely_upper_bound, max_unroll + 1);
desc->niter_expr = GEN_INT (desc->niter); desc->niter_expr = gen_int_mode (desc->niter, desc->mode);
/* Remove the edges. */ /* Remove the edges. */
FOR_EACH_VEC_ELT (remove_edges, i, e) FOR_EACH_VEC_ELT (remove_edges, i, e)
@ -1020,9 +1020,9 @@ unroll_loop_runtime_iterations (struct loop *loop)
preheader = split_edge (loop_preheader_edge (loop)); preheader = split_edge (loop_preheader_edge (loop));
/* Add in count of edge from switch block. */ /* Add in count of edge from switch block. */
preheader->count += iter_count; preheader->count += iter_count;
branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ, branch_code = compare_and_jump_seq (copy_rtx (niter),
block_label (preheader), p, gen_int_mode (j, desc->mode), EQ,
NULL); block_label (preheader), p, NULL);
/* We rely on the fact that the compare and jump cannot be optimized out, /* We rely on the fact that the compare and jump cannot be optimized out,
and hence the cfg we create is correct. */ and hence the cfg we create is correct. */