tree-vect-loop.c (get_reduction_op): New function.

2015-05-22  Richard Biener  <rguenther@suse.de>

	* tree-vect-loop.c (get_reduction_op): New function.
	(vect_model_reduction_cost): Use it, add reduc_index parameter.
	Make ready for BB reductions.
	(vect_create_epilog_for_reduction): Use get_reduction_op.
	(vectorizable_reduction): Init reduc_index to a valid value.
	Adjust vect_model_reduction_cost call.
	* tree-vect-slp.c (vect_get_constant_vectors): Use the proper
	operand for reduction defaults.  Add SAD_EXPR support.
	Assert we have a neutral op for SLP reductions.
	* tree-vect-stmts.c (vect_mark_stmts_to_be_vectorized): When
	walking pattern stmt ops only recurse to SSA names.

From-SVN: r223526
This commit is contained in:
Richard Biener 2015-05-22 09:00:28 +00:00 committed by Richard Biener
parent 328dc4771b
commit afbe632536
4 changed files with 65 additions and 53 deletions

View File

@ -1,3 +1,17 @@
2015-05-22 Richard Biener <rguenther@suse.de>
* tree-vect-loop.c (get_reduction_op): New function.
(vect_model_reduction_cost): Use it, add reduc_index parameter.
Make ready for BB reductions.
(vect_create_epilog_for_reduction): Use get_reduction_op.
(vectorizable_reduction): Init reduc_index to a valid value.
Adjust vect_model_reduction_cost call.
* tree-vect-slp.c (vect_get_constant_vectors): Use the proper
operand for reduction defaults. Add SAD_EXPR support.
Assert we have a neutral op for SLP reductions.
* tree-vect-stmts.c (vect_mark_stmts_to_be_vectorized): When
walking pattern stmt ops only recurse to SSA names.
2015-05-22 Richard Biener <rguenther@suse.de>
* tree-vect-patterns.c (vect_recog_dot_prod_pattern): Replace

View File

@ -3166,6 +3166,29 @@ have_whole_vector_shift (enum machine_mode mode)
return true;
}
/* Return the reduction operand (with index REDUC_INDEX) of STMT. */
static tree
get_reduction_op (gimple stmt, int reduc_index)
{
switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
{
case GIMPLE_SINGLE_RHS:
gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
== ternary_op);
return TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
case GIMPLE_UNARY_RHS:
return gimple_assign_rhs1 (stmt);
case GIMPLE_BINARY_RHS:
return (reduc_index
? gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt));
case GIMPLE_TERNARY_RHS:
return gimple_op (stmt, reduc_index + 1);
default:
gcc_unreachable ();
}
}
/* TODO: Close dependency between vect_model_*_cost and vectorizable_*
functions. Design better to avoid maintenance issues. */
@ -3177,7 +3200,7 @@ have_whole_vector_shift (enum machine_mode mode)
static bool
vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
int ncopies)
int ncopies, int reduc_index)
{
int prologue_cost = 0, epilogue_cost = 0;
enum tree_code code;
@ -3187,32 +3210,23 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
tree reduction_op;
machine_mode mode;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
struct loop *loop = NULL;
void *target_cost_data;
if (loop_vinfo)
{
loop = LOOP_VINFO_LOOP (loop_vinfo);
target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
}
else
target_cost_data = BB_VINFO_TARGET_COST_DATA (STMT_VINFO_BB_VINFO (stmt_info));
/* Cost of reduction op inside loop. */
unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
stmt_info, 0, vect_body);
stmt = STMT_VINFO_STMT (stmt_info);
switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
{
case GIMPLE_SINGLE_RHS:
gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
break;
case GIMPLE_UNARY_RHS:
reduction_op = gimple_assign_rhs1 (stmt);
break;
case GIMPLE_BINARY_RHS:
reduction_op = gimple_assign_rhs2 (stmt);
break;
case GIMPLE_TERNARY_RHS:
reduction_op = gimple_assign_rhs3 (stmt);
break;
default:
gcc_unreachable ();
}
reduction_op = get_reduction_op (stmt, reduc_index);
vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
if (!vectype)
@ -3245,7 +3259,7 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
We have a reduction operator that will reduce the vector in one statement.
Also requires scalar extract. */
if (!nested_in_vect_loop_p (loop, orig_stmt))
if (!loop || !nested_in_vect_loop_p (loop, orig_stmt))
{
if (reduc_code != ERROR_MARK)
{
@ -3992,26 +4006,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt,
gcc_assert (!slp_node);
}
switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
{
case GIMPLE_SINGLE_RHS:
gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
== ternary_op);
reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
break;
case GIMPLE_UNARY_RHS:
reduction_op = gimple_assign_rhs1 (stmt);
break;
case GIMPLE_BINARY_RHS:
reduction_op = reduc_index ?
gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
break;
case GIMPLE_TERNARY_RHS:
reduction_op = gimple_op (stmt, reduc_index + 1);
break;
default:
gcc_unreachable ();
}
reduction_op = get_reduction_op (stmt, reduc_index);
vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
gcc_assert (vectype);
@ -4845,8 +4840,6 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
tree ops[3];
bool nested_cycle = false, found_nested_cycle_def = false;
gimple reduc_def_stmt = NULL;
/* The default is that the reduction variable is the last in statement. */
int reduc_index = 2;
bool double_reduc = false, dummy;
basic_block def_bb;
struct loop * def_stmt_loop, *outer_loop = NULL;
@ -4951,6 +4944,8 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
default:
gcc_unreachable ();
}
/* The default is that the reduction variable is the last in statement. */
int reduc_index = op_type - 1;
if (code == COND_EXPR && slp_node)
return false;
@ -5248,7 +5243,8 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
if (!vec_stmt) /* transformation not required. */
{
if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies,
reduc_index))
return false;
STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
return true;

View File

@ -2664,11 +2664,14 @@ vect_get_constant_vectors (tree op, slp_tree slp_node,
struct loop *loop;
gimple_seq ctor_seq = NULL;
vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
nunits = TYPE_VECTOR_SUBPARTS (vector_type);
if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
&& reduc_index != -1)
{
op_num = reduc_index - 1;
op = gimple_op (stmt, reduc_index);
op_num = reduc_index;
op = gimple_op (stmt, op_num + 1);
/* For additional copies (see the explanation of NUMBER_OF_COPIES below)
we need either neutral operands or the original operands. See
get_initial_def_for_reduction() for details. */
@ -2676,6 +2679,7 @@ vect_get_constant_vectors (tree op, slp_tree slp_node,
{
case WIDEN_SUM_EXPR:
case DOT_PROD_EXPR:
case SAD_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
case BIT_IOR_EXPR:
@ -2716,6 +2720,7 @@ vect_get_constant_vectors (tree op, slp_tree slp_node,
break;
default:
gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo));
neutral_op = NULL;
}
}
@ -2735,10 +2740,6 @@ vect_get_constant_vectors (tree op, slp_tree slp_node,
else
constant_p = false;
vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
gcc_assert (vector_type);
nunits = TYPE_VECTOR_SUBPARTS (vector_type);
/* NUMBER_OF_COPIES is the number of times we need to use the same values in
created vectors. It is greater than 1 if unrolling is performed.

View File

@ -812,8 +812,9 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
for (; i < gimple_num_ops (stmt); i++)
{
op = gimple_op (stmt, i);
if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
&worklist, false))
if (TREE_CODE (op) == SSA_NAME
&& !process_use (stmt, op, loop_vinfo, live_p, relevant,
&worklist, false))
return false;
}
}