dumpfile.c (dump_enabled_p): Make it inline and move the definition to dumpfile.h.

2012-10-24  Sharad Singhai  <singhai@google.com>

	* dumpfile.c (dump_enabled_p): Make it inline and move the definition
	to dumpfile.h.
	(dump_kind_p): Deleted. Functionality replaced by dump_enabled_p.
	Make alt_dump_file extern.
	* dumpfile.h (dump_enabled_p): Move inline definition here.
	(dump_kind_p): Delete declaration.
	Add extern declaration of alt_dump_file.
	* toplev.c: Move dump_file and dump_file_name to dumpfile.c.
	* tree-vect-loop-manip.c: Replace all uses of dump_kind_p with
	dump_enabled_p.
	* tree-vectorizer.c: Likewise.
	* tree-vect-loop.c: Likewise.
	* tree-vect-data-refs.c: Likewise.
	* tree-vect-patterns.c: Likewise.
	* tree-vect-stmts.c: Likewise.
	* tree-vect-slp.c: Likewise.

From-SVN: r192773
This commit is contained in:
Sharad Singhai 2012-10-24 17:58:14 +00:00 committed by Sharad Singhai
parent c1a4d0b580
commit 73fbfcad22
11 changed files with 515 additions and 506 deletions

View File

@ -1,3 +1,22 @@
2012-10-24 Sharad Singhai <singhai@google.com>
* dumpfile.c (dump_enabled_p): Make it inline and move the definition
to dumpfile.h.
(dump_kind_p): Deleted. Functionality replaced by dump_enabled_p.
Make alt_dump_file extern.
* dumpfile.h (dump_enabled_p): Move inline definition here.
(dump_kind_p): Delete declaration.
Add extern declaration of alt_dump_file.
* toplev.c: Move dump_file and dump_file_name to dumpfile.c.
* tree-vect-loop-manip.c: Replace all uses of dump_kind_p with
dump_enabled_p.
* tree-vectorizer.c: Likewise.
* tree-vect-loop.c: Likewise.
* tree-vect-data-refs.c: Likewise.
* tree-vect-patterns.c: Likewise.
* tree-vect-stmts.c: Likewise.
* tree-vect-slp.c: Likewise.
2012-10-24 Richard Sandiford <rdsandiford@googlemail.com>
* expmed.c (lowpart_bit_field_p): Add missing == 0 check.

View File

@ -32,12 +32,18 @@ along with GCC; see the file COPYING3. If not see
static int pflags; /* current dump_flags */
static int alt_flags; /* current opt_info flags */
static FILE *alt_dump_file = NULL;
static void dump_loc (int, FILE *, source_location);
static int dump_phase_enabled_p (int);
static FILE *dump_open_alternate_stream (struct dump_file_info *);
/* These are currently used for communicating between passes.
However, instead of accessing them directly, the passes can use
dump_printf () for dumps. */
FILE *dump_file = NULL;
FILE *alt_dump_file = NULL;
const char *dump_file_name;
/* Table of tree dump switches. This must be consistent with the
TREE_DUMP_INDEX enumeration in dumpfile.h. */
static struct dump_file_info dump_files[TDI_end] =
@ -514,14 +520,6 @@ dump_phase_enabled_p (int phase)
}
}
/* Return true if any of the dumps are enabled, false otherwise. */
inline bool
dump_enabled_p (void)
{
return (dump_file || alt_dump_file);
}
/* Returns nonzero if tree dump PHASE has been initialized. */
int
@ -836,16 +834,6 @@ opt_info_switch_p (const char *arg)
return opt_info_enable_all ((TDF_TREE | TDF_RTL | TDF_IPA), flags, filename);
}
/* Return true if any dumps are enabled for the given MSG_TYPE, false
otherwise. */
bool
dump_kind_p (int msg_type)
{
return (dump_file && (msg_type & pflags))
|| (alt_dump_file && (msg_type & alt_flags));
}
/* Print basic block on the dump streams. */
void

View File

@ -108,7 +108,6 @@ struct dump_file_info
int num; /* dump file number */
};
/* In dumpfile.c */
extern char *get_dump_file_name (int);
extern int dump_initialized_p (int);
@ -120,8 +119,6 @@ extern void dump_node (const_tree, int, FILE *);
extern int dump_switch_p (const char *);
extern int opt_info_switch_p (const char *);
extern const char *dump_flag_name (int);
extern bool dump_kind_p (int);
extern inline bool dump_enabled_p (void);
extern void dump_printf (int, const char *, ...) ATTRIBUTE_PRINTF_2;
extern void dump_printf_loc (int, source_location,
const char *, ...) ATTRIBUTE_PRINTF_3;
@ -142,10 +139,19 @@ extern void dump_bb (FILE *, basic_block, int, int);
/* Global variables used to communicate with passes. */
extern FILE *dump_file;
extern FILE *alt_dump_file;
extern int dump_flags;
extern const char *dump_file_name;
/* Return the dump_file_info for the given phase. */
extern struct dump_file_info *get_dump_file_info (int);
/* Return true if any of the dumps are enabled, false otherwise. */
static inline bool
dump_enabled_p (void)
{
return (dump_file || alt_dump_file);
}
#endif /* GCC_DUMPFILE_H */

View File

@ -169,8 +169,6 @@ const char *user_label_prefix;
FILE *asm_out_file;
FILE *aux_info_file;
FILE *stack_usage_file = NULL;
FILE *dump_file = NULL;
const char *dump_file_name;
/* The current working directory of a translation. It's generally the
directory from which compilation was initiated, but a preprocessed

View File

@ -60,7 +60,7 @@ vect_lanes_optab_supported_p (const char *name, convert_optab optab,
if (array_mode == BLKmode)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]",
GET_MODE_NAME (mode), count);
@ -69,14 +69,14 @@ vect_lanes_optab_supported_p (const char *name, convert_optab optab,
if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cannot use %s<%s><%s>", name,
GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
return false;
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"can use %s<%s><%s>", name, GET_MODE_NAME (array_mode),
GET_MODE_NAME (mode));
@ -439,7 +439,7 @@ vect_check_interleaving (struct data_reference *dra,
if (diff_mod_size == 0)
{
vect_update_interleaving_chain (drb, dra);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving ");
@ -462,7 +462,7 @@ vect_check_interleaving (struct data_reference *dra,
if (diff_mod_size == 0)
{
vect_update_interleaving_chain (dra, drb);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving ");
@ -524,7 +524,7 @@ vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
return false;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"mark for run-time aliasing test between ");
@ -535,7 +535,7 @@ vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
if (optimize_loop_nest_for_size_p (loop))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not supported when optimizing for size.");
return false;
@ -544,7 +544,7 @@ vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
/* FORNOW: We don't support versioning with outer-loop vectorization. */
if (loop->inner)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not yet supported for outer-loops.");
return false;
@ -555,7 +555,7 @@ vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST
|| TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not yet supported for non-constant "
"step");
@ -611,7 +611,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
if (loop_vinfo)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias required: "
@ -637,7 +637,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
if (DR_IS_READ (dra) && DR_IS_READ (drb))
return false;
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't determine dependence between ");
@ -666,7 +666,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
if (dra != drb && vect_check_interleaving (dra, drb))
return false;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"determined dependence between ");
@ -686,7 +686,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
/* Loop-based vectorization and known data dependence. */
if (DDR_NUM_DIST_VECTS (ddr) == 0)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias required: "
@ -704,13 +704,13 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
{
int dist = dist_v[loop_depth];
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance = %d.", dist);
if (dist == 0)
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance == 0 between ");
@ -737,7 +737,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
/* If DDR_REVERSED_P the order of the data-refs in DDR was
reversed (to make distance vector positive), and the actual
distance is negative. */
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"dependence distance negative.");
continue;
@ -749,7 +749,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
/* The dependence distance requires reduction of the maximal
vectorization factor. */
*max_vf = abs (dist);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"adjusting maximal vectorization factor to %i",
*max_vf);
@ -759,13 +759,13 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
{
/* Dependence distance does not create dependence, as far as
vectorization is concerned, in this case. */
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance >= VF.");
continue;
}
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized, possible dependence "
@ -795,7 +795,7 @@ vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
VEC (ddr_p, heap) *ddrs = NULL;
struct data_dependence_relation *ddr;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_dependences ===");
if (loop_vinfo)
@ -837,7 +837,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
tree misalign;
tree aligned_to, alignment;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_compute_data_ref_alignment:");
@ -870,7 +870,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"inner step divides the vector-size.");
misalign = STMT_VINFO_DR_INIT (stmt_info);
@ -879,7 +879,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
}
else
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"inner step doesn't divide the vector-size.");
misalign = NULL_TREE;
@ -898,7 +898,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"SLP: step doesn't divide the vector-size.");
misalign = NULL_TREE;
@ -911,7 +911,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
if ((aligned_to && tree_int_cst_compare (aligned_to, alignment) < 0)
|| !misalign)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown alignment for access: ");
@ -941,7 +941,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype))
|| (TREE_STATIC (base) && flag_section_anchors))
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"can't force alignment of ref: ");
@ -953,7 +953,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
/* Force the alignment of the decl.
NOTE: This is the only change to the code we make during
the analysis phase, before deciding to vectorize the loop. */
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
@ -987,7 +987,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
if (!host_integerp (misalign, 1))
{
/* Negative or overflowed misalignment value. */
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unexpected misalign value");
return false;
@ -995,7 +995,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
SET_DR_MISALIGNMENT (dr, TREE_INT_CST_LOW (misalign));
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
@ -1095,7 +1095,7 @@ vect_update_misalignment_for_peel (struct data_reference *dr,
return;
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.");
SET_DR_MISALIGNMENT (dr, -1);
}
@ -1142,7 +1142,7 @@ vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
if (!supportable_dr_alignment)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
if (DR_IS_READ (dr))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@ -1157,8 +1157,7 @@ vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
}
return false;
}
if (supportable_dr_alignment != dr_aligned
&& dump_kind_p (MSG_NOTE))
if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Vectorizing an unaligned access.");
}
@ -1215,7 +1214,7 @@ vector_alignment_reachable_p (struct data_reference *dr)
{
HOST_WIDE_INT elmsize =
int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
@ -1224,7 +1223,7 @@ vector_alignment_reachable_p (struct data_reference *dr)
}
if (DR_MISALIGNMENT (dr) % elmsize)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"data size does not divide the misalignment.\n");
return false;
@ -1235,7 +1234,7 @@ vector_alignment_reachable_p (struct data_reference *dr)
{
tree type = TREE_TYPE (DR_REF (dr));
bool is_packed = not_size_aligned (DR_REF (dr));
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown misalignment, is_packed = %d",is_packed);
if (targetm.vectorize.vector_alignment_reachable (type, is_packed))
@ -1269,7 +1268,7 @@ vect_get_data_access_cost (struct data_reference *dr,
else
vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_get_data_access_cost: inside_cost = %d, "
"outside_cost = %d.", *inside_cost, *outside_cost);
@ -1567,7 +1566,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
unsigned int nelements, mis, same_align_drs_max = 0;
stmt_vector_for_cost body_cost_vec = NULL;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_enhance_data_refs_alignment ===");
@ -1622,7 +1621,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
and so we can't generate the new base for the pointer. */
if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"strided load prevents peeling");
do_peeling = false;
@ -1738,7 +1737,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
{
if (!aligned_access_p (dr))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vector alignment may not be reachable");
break;
@ -1879,7 +1878,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
npeel /= GROUP_SIZE (stmt_info);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Try peeling by %d", npeel);
}
@ -1951,7 +1950,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
else
LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = DR_MISALIGNMENT (dr0);
SET_DR_MISALIGNMENT (dr0, 0);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using peeling.");
@ -2077,12 +2076,12 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
dr = STMT_VINFO_DATA_REF (stmt_info);
SET_DR_MISALIGNMENT (dr, 0);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using versioning.");
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Versioning for alignment will be applied.");
@ -2148,7 +2147,7 @@ vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
{
int dist = dist_v[loop_depth];
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance = %d.", dist);
@ -2159,7 +2158,7 @@ vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
/* Two references with distance zero have the same alignment. */
VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a), drb);
VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b), dra);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"accesses have the same alignment.");
@ -2183,7 +2182,7 @@ bool
vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_refs_alignment ===");
@ -2201,7 +2200,7 @@ vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't calculate alignment "
"for data ref.");
@ -2254,7 +2253,7 @@ vect_analyze_group_access (struct data_reference *dr)
{
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected single element interleaving ");
@ -2265,13 +2264,13 @@ vect_analyze_group_access (struct data_reference *dr)
if (loop_vinfo)
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Data access with gaps requires scalar "
"epilogue loop");
if (loop->inner)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Peeling for outer loop is not"
" supported");
@ -2284,7 +2283,7 @@ vect_analyze_group_access (struct data_reference *dr)
return true;
}
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not consecutive access ");
@ -2324,7 +2323,7 @@ vect_analyze_group_access (struct data_reference *dr)
{
if (DR_IS_WRITE (data_ref))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Two store stmts share the same dr.");
return false;
@ -2335,7 +2334,7 @@ vect_analyze_group_access (struct data_reference *dr)
if (GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next))
|| GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev)))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"READ_WRITE dependence in interleaving.");
return false;
@ -2355,7 +2354,7 @@ vect_analyze_group_access (struct data_reference *dr)
next_step = DR_STEP (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
if (tree_int_cst_compare (step, next_step))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not consecutive access in interleaving");
return false;
@ -2372,7 +2371,7 @@ vect_analyze_group_access (struct data_reference *dr)
slp_impossible = true;
if (DR_IS_WRITE (data_ref))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps");
return false;
@ -2401,7 +2400,7 @@ vect_analyze_group_access (struct data_reference *dr)
greater than STEP. */
if (dr_step && dr_step < count_in_bytes + gaps * type_size)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaving size is greater than step for ");
@ -2424,7 +2423,7 @@ vect_analyze_group_access (struct data_reference *dr)
}
else
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps");
return false;
@ -2434,7 +2433,7 @@ vect_analyze_group_access (struct data_reference *dr)
/* Check that STEP is a multiple of type size. */
if (dr_step && (dr_step % type_size) != 0)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"step is not a multiple of type size: step ");
@ -2450,7 +2449,7 @@ vect_analyze_group_access (struct data_reference *dr)
groupsize = count;
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving of size %d", (int)groupsize);
@ -2469,13 +2468,13 @@ vect_analyze_group_access (struct data_reference *dr)
/* There is a gap in the end of the group. */
if (groupsize - last_accessed_element > 0 && loop_vinfo)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Data access with gaps requires scalar "
"epilogue loop");
if (loop->inner)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Peeling for outer loop is not supported");
return false;
@ -2508,7 +2507,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
if (loop_vinfo && !step)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data-ref access in loop");
return false;
@ -2531,7 +2530,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
step = STMT_VINFO_DR_STEP (stmt_info);
if (integer_zerop (step))
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"zero step in outer loop.");
if (DR_IS_READ (dr))
@ -2557,7 +2556,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
if (loop && nested_in_vect_loop_p (loop, stmt))
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"grouped access in outer loop.");
return false;
@ -2588,7 +2587,7 @@ vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
VEC (data_reference_p, heap) *datarefs;
struct data_reference *dr;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_ref_accesses ===");
@ -2601,7 +2600,7 @@ vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
&& !vect_analyze_data_ref_access (dr))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated access pattern.");
@ -2631,7 +2630,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
unsigned i, j;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_prune_runtime_alias_test_list ===");
@ -2649,7 +2648,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
if (vect_vfa_range_equal (ddr_i, ddr_j))
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"found equal ranges ");
@ -2677,7 +2676,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
if (VEC_length (ddr_p, ddrs) >
(unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"disable versioning for alias - max number of "
@ -2964,7 +2963,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
tree scalar_type;
bool res, stop_bb_analysis = false;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_refs ===\n");
@ -2979,7 +2978,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (!res)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop contains function calls"
" or data references that cannot be analyzed");
@ -3011,7 +3010,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
&BB_VINFO_DDRS (bb_vinfo), NULL, true))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: basic block contains function"
" calls or data references that cannot be"
@ -3035,7 +3034,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (!dr || !DR_REF (dr))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data-ref ");
return false;
@ -3081,7 +3080,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (!gather)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: data ref analysis "
@ -3102,7 +3101,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: base addr of dr is a "
"constant");
@ -3121,7 +3120,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (TREE_THIS_VOLATILE (DR_REF (dr)))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: volatile type ");
@ -3140,7 +3139,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (stmt_can_throw_internal (stmt))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: statement can throw an "
@ -3163,7 +3162,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
&& DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: statement is bitfield "
@ -3189,7 +3188,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (is_gimple_call (stmt))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: dr in a call ");
@ -3232,7 +3231,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
tree inner_base = build_fold_indirect_ref
(fold_build_pointer_plus (base, init));
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"analyze in outer-loop: ");
@ -3245,7 +3244,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (pbitpos % BITS_PER_UNIT != 0)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"failed: bit offset alignment.\n");
return false;
@ -3255,7 +3254,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
&base_iv, false))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"failed: evolution of base is not affine.\n");
return false;
@ -3278,7 +3277,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
&offset_iv, false))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"evolution of offset is not affine.\n");
return false;
@ -3303,7 +3302,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
size_int (highest_pow2_factor (offset_iv.base));
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"\touter base_address: ");
@ -3327,7 +3326,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (STMT_VINFO_DATA_REF (stmt_info))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: more than one data ref "
@ -3355,7 +3354,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
get_vectype_for_scalar_type (scalar_type);
if (!STMT_VINFO_VECTYPE (stmt_info))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: no vectype for stmt: ");
@ -3406,7 +3405,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
{
STMT_VINFO_DATA_REF (stmt_info) = NULL;
free_data_ref (dr);
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: not suitable for gather "
@ -3459,7 +3458,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
if (bad)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: data dependence conflict"
@ -3480,7 +3479,7 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
= vect_check_strided_load (stmt, loop_vinfo, NULL, NULL);
if (!strided_load)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: not suitable for strided "
@ -3668,7 +3667,7 @@ vect_create_addr_base_for_vector_ref (gimple stmt,
mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (vec_stmt));
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "created ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vec_stmt);
@ -3790,7 +3789,7 @@ vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
in LOOP. */
base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr)));
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
tree data_ref_base = base_name;
dump_printf_loc (MSG_NOTE, vect_location,
@ -4120,7 +4119,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
/* vect_permute_store_chain requires the group size to be a power of two. */
if (exact_log2 (count) == -1)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"the size of the group of accesses"
" is not a power of 2");
@ -4146,7 +4145,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
}
}
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf (MSG_MISSED_OPTIMIZATION,
"interleave op not supported by target.");
return false;
@ -4564,7 +4563,7 @@ vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
/* vect_permute_load_chain requires the group size to be a power of two. */
if (exact_log2 (count) == -1)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"the size of the group of accesses"
" is not a power of 2");
@ -4588,7 +4587,7 @@ vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
}
}
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"extract even/odd not supported by target");
return false;

View File

@ -792,7 +792,7 @@ slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
free_stmt_vec_info (orig_cond);
loop_loc = find_loop_location (loop);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
if (LOCATION_LOCUS (loop_loc) != UNKNOWN_LOC)
dump_printf (MSG_NOTE, "\nloop at %s:%d: ", LOC_FILE (loop_loc),
@ -1683,7 +1683,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
/* Analyze phi functions of the loop header. */
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "vect_can_advance_ivs_p:");
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
@ -1691,7 +1691,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
tree evolution_part;
phi = gsi_stmt (gsi);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
@ -1702,7 +1702,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
if (virtual_operand_p (PHI_RESULT (phi)))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"virtual phi. skip.");
continue;
@ -1712,7 +1712,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (phi)) == vect_reduction_def)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduc phi. skip.");
continue;
@ -1725,13 +1725,13 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
if (!access_fn)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"No Access function.");
return false;
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Access function of PHI: ");
@ -1742,7 +1742,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
if (evolution_part == NULL_TREE)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf (MSG_MISSED_OPTIMIZATION, "No evolution.");
return false;
}
@ -1827,7 +1827,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
phi = gsi_stmt (gsi);
phi1 = gsi_stmt (gsi1);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_update_ivs_after_vectorizer: phi: ");
@ -1837,7 +1837,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
/* Skip virtual phi's. */
if (virtual_operand_p (PHI_RESULT (phi)))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"virtual phi. skip.");
continue;
@ -1847,7 +1847,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
stmt_info = vinfo_for_stmt (phi);
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduc phi. skip.");
continue;
@ -1910,7 +1910,7 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
tree cond_expr = NULL_TREE;
gimple_seq cond_expr_stmt_list = NULL;
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"=== vect_do_peeling_for_loop_bound ===");
@ -2022,7 +2022,7 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters, int
{
int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"known peeling = %d.", npeel);
@ -2076,7 +2076,7 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters, int
if (TREE_CODE (loop_niters) != INTEGER_CST)
iters = fold_build2 (MIN_EXPR, niters_type, iters, loop_niters);
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"niters for prolog loop: ");
@ -2134,7 +2134,7 @@ vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
struct data_reference *dr;
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"=== vect_update_inits_of_dr ===");
@ -2163,7 +2163,7 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo,
int max_iter;
int bound = 0;
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"=== vect_do_peeling_for_alignment ===");
@ -2475,7 +2475,7 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"create runtime check for data references ");
@ -2506,7 +2506,7 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
*cond_expr = part_cond_expr;
}
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"created %u versioning for alias checks.\n",
VEC_length (ddr_p, may_alias_ddrs));

File diff suppressed because it is too large Load Diff

View File

@ -416,7 +416,7 @@ vect_recog_dot_prod_pattern (VEC (gimple, heap) **stmts, tree *type_in,
pattern_stmt = gimple_build_assign_with_ops (DOT_PROD_EXPR, var,
oprnd00, oprnd01, oprnd1);
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_dot_prod_pattern: detected: ");
@ -676,7 +676,7 @@ vect_recog_widen_mult_pattern (VEC (gimple, heap) **stmts,
return NULL;
/* Pattern detected. */
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_widen_mult_pattern: detected: ");
@ -699,7 +699,7 @@ vect_recog_widen_mult_pattern (VEC (gimple, heap) **stmts,
pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0,
oprnd1);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
VEC_safe_push (gimple, heap, *stmts, last_stmt);
@ -912,7 +912,7 @@ vect_recog_widen_sum_pattern (VEC (gimple, heap) **stmts, tree *type_in,
pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var,
oprnd0, oprnd1);
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_widen_sum_pattern: detected: ");
@ -1217,7 +1217,7 @@ vect_recog_over_widening_pattern (VEC (gimple, heap) **stmts,
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)) = pattern_stmt;
new_pattern_def_seq (vinfo_for_stmt (stmt), new_def_stmt);
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"created pattern stmt: ");
@ -1285,7 +1285,7 @@ vect_recog_over_widening_pattern (VEC (gimple, heap) **stmts,
return NULL;
/* Pattern detected. */
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_over_widening_pattern: detected: ");
@ -1421,7 +1421,7 @@ vect_recog_widen_shift_pattern (VEC (gimple, heap) **stmts,
return NULL;
/* Pattern detected. */
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_widen_shift_pattern: detected: ");
@ -1445,7 +1445,7 @@ vect_recog_widen_shift_pattern (VEC (gimple, heap) **stmts,
pattern_stmt =
gimple_build_assign_with_ops (WIDEN_LSHIFT_EXPR, var, oprnd0, oprnd1);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
VEC_safe_push (gimple, heap, *stmts, last_stmt);
@ -1567,7 +1567,7 @@ vect_recog_vector_vector_shift_pattern (VEC (gimple, heap) **stmts,
}
/* Pattern detected. */
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_vector_vector_shift_pattern: detected: ");
@ -1575,7 +1575,7 @@ vect_recog_vector_vector_shift_pattern (VEC (gimple, heap) **stmts,
var = vect_recog_temp_ssa_var (TREE_TYPE (oprnd0), NULL);
pattern_stmt = gimple_build_assign_with_ops (rhs_code, var, oprnd0, def);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
VEC_safe_push (gimple, heap, *stmts, last_stmt);
@ -1685,7 +1685,7 @@ vect_recog_divmod_pattern (VEC (gimple, heap) **stmts,
return NULL;
/* Pattern detected. */
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_divmod_pattern: detected: ");
@ -1789,7 +1789,7 @@ vect_recog_divmod_pattern (VEC (gimple, heap) **stmts,
signmask);
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt,
0);
@ -2031,7 +2031,7 @@ vect_recog_divmod_pattern (VEC (gimple, heap) **stmts,
}
/* Pattern detected. */
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_divmod_pattern: detected: ");
@ -2199,7 +2199,7 @@ vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **stmts, tree *type_in,
*type_in = vecitype;
*type_out = vectype;
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_mixed_size_cond_pattern: detected: ");
@ -2592,7 +2592,7 @@ vect_recog_bool_pattern (VEC (gimple, heap) **stmts, tree *type_in,
*type_out = vectype;
*type_in = vectype;
VEC_safe_push (gimple, heap, *stmts, last_stmt);
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_bool_pattern: detected: ");
@ -2638,7 +2638,7 @@ vect_recog_bool_pattern (VEC (gimple, heap) **stmts, tree *type_in,
*type_out = vectype;
*type_in = vectype;
VEC_safe_push (gimple, heap, *stmts, last_stmt);
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"vect_recog_bool_pattern: detected: ");
return pattern_stmt;
@ -2788,7 +2788,7 @@ vect_pattern_recog_1 (vect_recog_func_ptr vect_recog_func,
}
/* Found a vectorizable pattern. */
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"pattern recognized: ");
@ -2814,7 +2814,7 @@ vect_pattern_recog_1 (vect_recog_func_ptr vect_recog_func,
{
stmt_info = vinfo_for_stmt (stmt);
pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"additional pattern stmt: ");
@ -2915,7 +2915,7 @@ vect_pattern_recog (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
VEC (gimple, heap) *stmts_to_replace = VEC_alloc (gimple, heap, 1);
gimple stmt;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_pattern_recog ===");

View File

@ -238,7 +238,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
&def, &dt)
|| (!def_stmt && dt != vect_constant_def))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: can't find def for ");
@ -263,7 +263,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
pattern = true;
if (!first && !oprnd_info->first_pattern)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: some of the stmts"
@ -279,7 +279,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (dt == vect_unknown_def_type)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported pattern.");
return false;
@ -296,7 +296,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
break;
default:
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported defining stmt: ");
return false;
@ -361,7 +361,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
{
if (number_of_oprnds != 2)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different types ");
@ -388,7 +388,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
&& !types_compatible_p (oprnd_info->first_def_type,
TREE_TYPE (def_op0))))
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Swapping operands of ");
@ -400,7 +400,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
}
else
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different types ");
@ -435,7 +435,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
default:
/* FORNOW: Not supported. */
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: illegal type of def ");
@ -504,7 +504,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
/* For every stmt in NODE find its def stmt/s. */
FOR_EACH_VEC_ELT (gimple, stmts, i, stmt)
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
@ -513,7 +513,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
/* Fail to vectorize statements marked as unvectorizable. */
if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unvectorizable statement ");
@ -527,7 +527,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
lhs = gimple_get_lhs (stmt);
if (lhs == NULL_TREE)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: not GIMPLE_ASSIGN nor "
@ -544,7 +544,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
&& (cond = gimple_assign_rhs1 (stmt))
&& !COMPARISON_CLASS_P (cond))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: condition is not "
@ -560,7 +560,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported data-type ");
@ -591,7 +591,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
|| !gimple_call_nothrow_p (stmt)
|| gimple_call_chain (stmt))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported call type ");
@ -631,7 +631,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (!optab)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: no optab.");
vect_free_oprnd_info (&oprnds_info);
@ -640,7 +640,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
icode = (int) optab_handler (optab, vec_mode);
if (icode == CODE_FOR_nothing)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: "
"op not supported by target.");
@ -674,7 +674,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
|| first_stmt_code == COMPONENT_REF
|| first_stmt_code == MEM_REF)))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different operation "
@ -689,7 +689,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (need_same_oprnds
&& !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different shift "
@ -710,7 +710,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
|| gimple_call_fntype (first_stmt)
!= gimple_call_fntype (stmt))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different calls in ");
@ -749,7 +749,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
|| (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
&& GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: grouped "
@ -767,7 +767,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (loop_vinfo
&& GROUP_SIZE (vinfo_for_stmt (stmt)) > ncopies * group_size)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: the number "
@ -792,7 +792,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
&& rhs_code != REALPART_EXPR
&& rhs_code != IMAGPART_EXPR)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION,
vect_location,
@ -817,7 +817,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (vect_supportable_dr_alignment (first_dr, false)
== dr_unaligned_unsupported)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION,
vect_location,
@ -857,7 +857,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
{
/* Not grouped load. */
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: not grouped load ");
@ -875,7 +875,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
&& rhs_code != COND_EXPR
&& rhs_code != CALL_EXPR)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: operation");
@ -895,7 +895,7 @@ vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
first_cond_code = TREE_CODE (cond_expr);
else if (first_cond_code != TREE_CODE (cond_expr))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different"
@ -1080,7 +1080,7 @@ vect_supported_slp_permutation_p (slp_instance instance)
/* Check that the loads are all in the same interleaving chain. */
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (scalar_stmt)) != first_load)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported data "
@ -1169,7 +1169,7 @@ vect_supported_load_permutation_p (slp_instance slp_instn, int group_size,
if (!slp_instn)
return false;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
FOR_EACH_VEC_ELT (int, load_permutation, i, next)
@ -1376,7 +1376,7 @@ vect_supported_load_permutation_p (slp_instance slp_instn, int group_size,
if (vect_supportable_dr_alignment (dr, false)
== dr_unaligned_unsupported)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION,
vect_location,
@ -1536,7 +1536,7 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (!vectype)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported data-type ");
@ -1556,7 +1556,7 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
if (unrolling_factor != 1 && !loop_vinfo)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unrolling required in basic"
" block SLP");
@ -1618,7 +1618,7 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (unrolling_factor != 1 && !loop_vinfo)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unrolling required in basic"
" block SLP");
@ -1645,7 +1645,7 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (!vect_supported_load_permutation_p (new_instance, group_size,
load_permutation))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported load "
@ -1685,7 +1685,7 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
VEC_safe_push (slp_instance, heap, BB_VINFO_SLP_INSTANCES (bb_vinfo),
new_instance);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
vect_print_slp_tree (MSG_NOTE, node);
return true;
@ -1717,7 +1717,7 @@ vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
gimple first_element;
bool ok = false;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===");
if (loop_vinfo)
@ -1736,7 +1736,7 @@ vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
if (bb_vinfo && !ok)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Failed to SLP the basic block.");
@ -1780,7 +1780,7 @@ vect_make_slp_decision (loop_vec_info loop_vinfo)
slp_instance instance;
int decided_to_slp = 0;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ===");
FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
@ -1798,7 +1798,7 @@ vect_make_slp_decision (loop_vec_info loop_vinfo)
LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
if (decided_to_slp && dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (decided_to_slp && dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"Decided to SLP %d instances. Unrolling factor %d",
decided_to_slp, unrolling_factor);
@ -1863,7 +1863,7 @@ vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
VEC (slp_instance, heap) *slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
slp_instance instance;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ===");
FOR_EACH_VEC_ELT (slp_instance, slp_instances, i, instance)
@ -2060,7 +2060,7 @@ vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
@ -2097,7 +2097,7 @@ vect_slp_analyze_bb_1 (basic_block bb)
if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data-ref in basic "
"block.\n");
@ -2109,7 +2109,7 @@ vect_slp_analyze_bb_1 (basic_block bb)
ddrs = BB_VINFO_DDRS (bb_vinfo);
if (!VEC_length (ddr_p, ddrs))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: not enough data-refs in "
"basic block.\n");
@ -2123,7 +2123,7 @@ vect_slp_analyze_bb_1 (basic_block bb)
if (!vect_analyze_data_ref_dependences (NULL, bb_vinfo, &max_vf)
|| min_vf > max_vf)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data dependence "
"in basic block.\n");
@ -2134,7 +2134,7 @@ vect_slp_analyze_bb_1 (basic_block bb)
if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: bad data alignment in basic "
"block.\n");
@ -2145,7 +2145,7 @@ vect_slp_analyze_bb_1 (basic_block bb)
if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data access in "
"basic block.\n");
@ -2158,7 +2158,7 @@ vect_slp_analyze_bb_1 (basic_block bb)
trees. */
if (!vect_analyze_slp (NULL, bb_vinfo))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: failed to find SLP opportunities "
"in basic block.\n");
@ -2179,7 +2179,7 @@ vect_slp_analyze_bb_1 (basic_block bb)
if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported alignment in basic "
"block.\n");
@ -2189,7 +2189,7 @@ vect_slp_analyze_bb_1 (basic_block bb)
if (!vect_slp_analyze_operations (bb_vinfo))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: bad operation in basic block.\n");
@ -2201,7 +2201,7 @@ vect_slp_analyze_bb_1 (basic_block bb)
if (flag_vect_cost_model
&& !vect_bb_vectorization_profitable_p (bb_vinfo))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization is not "
"profitable.\n");
@ -2210,7 +2210,7 @@ vect_slp_analyze_bb_1 (basic_block bb)
return NULL;
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Basic block will be vectorized using SLP\n");
@ -2226,7 +2226,7 @@ vect_slp_analyze_bb (basic_block bb)
gimple_stmt_iterator gsi;
unsigned int vector_sizes;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
@ -2240,7 +2240,7 @@ vect_slp_analyze_bb (basic_block bb)
if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: too many instructions in "
"basic block.\n");
@ -2267,7 +2267,7 @@ vect_slp_analyze_bb (basic_block bb)
/* Try the next biggest vector size. */
current_vector_size = 1 << floor_log2 (vector_sizes);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"***** Re-trying analysis with "
"vector size %d\n", current_vector_size);
@ -2292,7 +2292,7 @@ vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
stmt_info_for_cost *si;
void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_update_slp_costs_according_to_vf ===");
@ -2800,7 +2800,7 @@ vect_get_mask_element (gimple stmt, int first_mask_element, int m,
the next vector as well. */
if (only_one_vec && *current_mask_element >= mask_nunits)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"permutation requires at least two vectors ");
@ -2818,7 +2818,7 @@ vect_get_mask_element (gimple stmt, int first_mask_element, int m,
/* We either need the first vector too or have already moved to the
next vector. In both cases, this permutation needs three
vectors. */
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"permutation requires at "
@ -2884,7 +2884,7 @@ vect_transform_slp_perm_load (gimple stmt, VEC (tree, heap) *dr_chain,
if (!can_vec_perm_p (mode, false, NULL))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vect permute for ");
@ -2964,7 +2964,7 @@ vect_transform_slp_perm_load (gimple stmt, VEC (tree, heap) *dr_chain,
if (!can_vec_perm_p (mode, false, mask))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION,
vect_location,
@ -3068,7 +3068,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance,
SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE,vect_location,
"------>vectorizing SLP node starting from: ");
@ -3177,7 +3177,7 @@ vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
/* Schedule the tree of INSTANCE. */
is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
instance, vf);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vectorizing stmts using SLP.");
}
@ -3222,7 +3222,7 @@ vect_slp_transform_bb (basic_block bb)
gcc_assert (bb_vinfo);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
@ -3230,7 +3230,7 @@ vect_slp_transform_bb (basic_block bb)
gimple stmt = gsi_stmt (si);
stmt_vec_info stmt_info;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"------>SLPing statement: ");
@ -3248,7 +3248,7 @@ vect_slp_transform_bb (basic_block bb)
}
}
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf (MSG_OPTIMIZED_LOCATIONS, "BASIC BLOCK VECTORIZED\n");
destroy_bb_vec_info (bb_vinfo);

View File

@ -190,7 +190,7 @@ vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
gimple pattern_stmt;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"mark relevant %d, live %d.", relevant, live_p);
@ -246,7 +246,7 @@ vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"last stmt in pattern. don't mark"
" relevant/live.");
@ -265,7 +265,7 @@ vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
&& STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"already marked relevant/live.");
return;
@ -310,7 +310,7 @@ vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
if (gimple_code (stmt) != GIMPLE_PHI)
if (gimple_vdef (stmt))
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vec_stmt_relevant_p: stmt has vdefs.");
*relevant = vect_used_in_scope;
@ -324,7 +324,7 @@ vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
basic_block bb = gimple_bb (USE_STMT (use_p));
if (!flow_bb_inside_loop_p (loop, bb))
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vec_stmt_relevant_p: used out of loop.");
@ -437,7 +437,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
if (!vect_is_simple_use (use, stmt, loop_vinfo, NULL, &def_stmt, &def, &dt))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported use in stmt.");
return false;
@ -449,7 +449,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
def_bb = gimple_bb (def_stmt);
if (!flow_bb_inside_loop_p (loop, def_bb))
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.");
return true;
}
@ -467,7 +467,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
&& STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
&& bb->loop_father == def_bb->loop_father)
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"reduc-stmt defining reduc-phi in the same nest.");
if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
@ -487,7 +487,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
... */
if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"outer-loop def-stmt defining inner-loop stmt.");
@ -525,7 +525,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
stmt # use (d) */
else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"inner-loop def-stmt defining outer-loop stmt.");
@ -589,7 +589,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
enum vect_relevant relevant, tmp_relevant;
enum vect_def_type def_type;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_mark_stmts_to_be_vectorized ===");
@ -602,7 +602,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
{
phi = gsi_stmt (si);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
@ -614,7 +614,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
stmt = gsi_stmt (si);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
@ -632,7 +632,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
ssa_op_iter iter;
stmt = VEC_pop (gimple, worklist);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
@ -677,7 +677,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
/* fall through */
default:
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of reduction.");
VEC_free (gimple, heap, worklist);
@ -692,7 +692,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
&& tmp_relevant != vect_used_in_outer_by_reduction
&& tmp_relevant != vect_used_in_outer)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of nested cycle.");
@ -707,7 +707,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
if (tmp_relevant != vect_unused_in_scope
&& tmp_relevant != vect_used_by_reduction)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of double reduction.");
@ -830,7 +830,7 @@ vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
stmt_info, 0, vect_body);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_simple_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
@ -876,7 +876,7 @@ vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
stmt_info, 0, vect_prologue);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_promotion_demotion_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
@ -960,7 +960,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: strided group_size = %d .",
group_size);
@ -969,7 +969,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
/* Costs of the stores. */
vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
@ -994,7 +994,7 @@ vect_get_store_cost (struct data_reference *dr, int ncopies,
vector_store, stmt_info, 0,
vect_body);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: aligned.");
break;
@ -1006,7 +1006,7 @@ vect_get_store_cost (struct data_reference *dr, int ncopies,
*inside_cost += record_stmt_cost (body_cost_vec, ncopies,
unaligned_store, stmt_info,
DR_MISALIGNMENT (dr), vect_body);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: unaligned supported by "
"hardware.");
@ -1017,7 +1017,7 @@ vect_get_store_cost (struct data_reference *dr, int ncopies,
{
*inside_cost = VECT_MAX_COST;
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vect_model_store_cost: unsupported access.");
break;
@ -1076,7 +1076,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
inside_cost += record_stmt_cost (body_cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: strided group_size = %d .",
group_size);
@ -1100,7 +1100,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
&inside_cost, &prologue_cost,
prologue_cost_vec, body_cost_vec, true);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
@ -1127,7 +1127,7 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
*inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
stmt_info, 0, vect_body);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: aligned.");
@ -1140,7 +1140,7 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
unaligned_load, stmt_info,
DR_MISALIGNMENT (dr), vect_body);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: unaligned supported by "
"hardware.");
@ -1161,7 +1161,7 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
*inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
stmt_info, 0, vect_body);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: explicit realign");
@ -1169,7 +1169,7 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
}
case dr_explicit_realign_optimized:
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: unaligned software "
"pipelined.");
@ -1197,7 +1197,7 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
*inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
stmt_info, 0, vect_body);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: explicit realign optimized");
@ -1208,7 +1208,7 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
{
*inside_cost = VECT_MAX_COST;
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vect_model_load_cost: unsupported access.");
break;
@ -1258,7 +1258,7 @@ vect_init_vector_1 (gimple stmt, gimple new_stmt, gimple_stmt_iterator *gsi)
}
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"created new init_stmt: ");
@ -1340,7 +1340,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
bool is_simple_use;
tree vector_type;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_get_vec_def_for_operand: ");
@ -1350,7 +1350,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
is_simple_use = vect_is_simple_use (op, stmt, loop_vinfo, NULL,
&def_stmt, &def, &dt);
gcc_assert (is_simple_use);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
int loc_printed = 0;
if (def)
@ -1382,7 +1382,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
*scalar_def = op;
/* Create 'vect_cst_ = {cst,cst,...,cst}' */
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Create vector_cst. nunits = %d", nunits);
@ -1399,7 +1399,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
*scalar_def = def;
/* Create 'vec_inv = {inv,inv,..,inv}' */
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Create vector_inv.");
return vect_init_vector (stmt, def, vector_type, NULL);
@ -1661,7 +1661,7 @@ vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
bb_vinfo));
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
@ -1764,7 +1764,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (rhs_type
&& !types_compatible_p (rhs_type, TREE_TYPE (op)))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"argument types differ.");
return false;
@ -1775,7 +1775,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[i], &opvectype))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
@ -1786,7 +1786,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
else if (opvectype
&& opvectype != vectype_in)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"argument vector types differ.");
return false;
@ -1800,7 +1800,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
gcc_assert (vectype_in);
if (!vectype_in)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
@ -1829,7 +1829,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
if (fndecl == NULL_TREE)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"function is not vectorizable.");
@ -1852,7 +1852,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ===");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
return true;
@ -1860,7 +1860,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/** Transform. **/
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform call.");
/* Handle def. */
@ -2375,7 +2375,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
&& (TYPE_PRECISION (rhs_type)
!= GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"type conversion to/from bit-precision unsupported.");
return false;
@ -2385,7 +2385,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype_in))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
@ -2407,7 +2407,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
if (!ok)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
@ -2422,7 +2422,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
gcc_assert (vectype_in);
if (!vectype_in)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
@ -2466,7 +2466,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
break;
/* FALLTHRU */
unsupported:
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"conversion not supported by target.");
return false;
@ -2565,7 +2565,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
if (!vec_stmt) /* transformation not required. */
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_conversion ===");
if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
@ -2588,7 +2588,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
}
/** Transform. **/
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform conversion. ncopies = %d.", ncopies);
@ -2941,7 +2941,7 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype_in))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
@ -2970,7 +2970,7 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
> TYPE_PRECISION (TREE_TYPE (op)))
&& TYPE_UNSIGNED (TREE_TYPE (op))))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"type conversion to/from bit-precision "
"unsupported.");
@ -2980,7 +2980,7 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_assignment ===");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
@ -2988,7 +2988,7 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
}
/** Transform. **/
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.");
/* Handle def. */
@ -3135,7 +3135,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
!= GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bit-precision shifts not supported.");
return false;
@ -3145,7 +3145,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
@ -3158,7 +3158,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
gcc_assert (vectype);
if (!vectype)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
return false;
@ -3173,7 +3173,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
if (!vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
&def, &dt[1], &op1_vectype))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
@ -3218,7 +3218,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
}
else
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"operand mode requires invariant argument.");
return false;
@ -3228,7 +3228,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
if (!scalar_shift_arg)
{
optab = optab_for_tree_code (code, vectype, optab_vector);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vector/vector shift/rotate found.");
@ -3237,7 +3237,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
if (op1_vectype == NULL_TREE
|| TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unusable type for last operand in"
" vector/vector shift/rotate.");
@ -3252,7 +3252,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
if (optab
&& optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vector/scalar shift/rotate found.");
}
@ -3265,7 +3265,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
{
scalar_shift_arg = false;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vector/vector shift/rotate found.");
@ -3282,7 +3282,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
&& TYPE_MODE (TREE_TYPE (vectype))
!= TYPE_MODE (TREE_TYPE (op1)))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unusable type for last operand in"
" vector/vector shift/rotate.");
@ -3302,7 +3302,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
/* Supportable by target? */
if (!optab)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.");
return false;
@ -3311,7 +3311,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
icode = (int) optab_handler (optab, vec_mode);
if (icode == CODE_FOR_nothing)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"op not supported by target.");
/* Check only during analysis. */
@ -3319,7 +3319,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
|| (vf < vect_min_worthwhile_factor (code)
&& !vec_stmt))
return false;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "proceeding using word mode.");
}
@ -3328,7 +3328,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
&& vf < vect_min_worthwhile_factor (code)
&& !vec_stmt)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.");
return false;
@ -3337,7 +3337,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_shift ===");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
return true;
@ -3345,7 +3345,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
/** Transform. **/
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform binary/unary operation.");
@ -3382,7 +3382,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
optab_op2_mode = insn_data[icode].operand[2].mode;
if (!VECTOR_MODE_P (optab_op2_mode))
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"operand 1 using scalar mode.");
vec_oprnd1 = op1;
@ -3510,7 +3510,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
op_type = TREE_CODE_LENGTH (code);
if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"num. args = %d (not unary/binary/ternary op).",
op_type);
@ -3529,7 +3529,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
&& code != BIT_XOR_EXPR
&& code != BIT_AND_EXPR)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bit-precision arithmetic not supported.");
return false;
@ -3539,7 +3539,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
@ -3552,7 +3552,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
gcc_assert (vectype);
if (!vectype)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
@ -3574,7 +3574,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
if (!vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
&def, &dt[1]))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
@ -3586,7 +3586,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
if (!vect_is_simple_use (op2, stmt, loop_vinfo, bb_vinfo, &def_stmt,
&def, &dt[2]))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
@ -3628,7 +3628,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
optab = optab_for_tree_code (code, vectype, optab_default);
if (!optab)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.");
return false;
@ -3638,14 +3638,14 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
if (icode == CODE_FOR_nothing)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"op not supported by target.");
/* Check only during analysis. */
if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
|| (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
return false;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "proceeding using word mode.");
}
@ -3654,7 +3654,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
&& !vec_stmt
&& vf < vect_min_worthwhile_factor (code))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.");
return false;
@ -3663,7 +3663,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_operation ===");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
@ -3672,7 +3672,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
/** Transform. **/
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform binary/unary operation.");
@ -3860,7 +3860,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/* FORNOW. This restriction should be relaxed. */
if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.");
return false;
@ -3894,7 +3894,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (!vect_is_simple_use (op, stmt, loop_vinfo, bb_vinfo, &def_stmt,
&def, &dt))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
@ -3915,7 +3915,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
size_zero_node) < 0)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"negative step for store.");
return false;
@ -3946,7 +3946,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (!vect_is_simple_use (op, next_stmt, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
return false;
@ -4008,7 +4008,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
group_size = vec_num = 1;
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform store. ncopies = %d", ncopies);
@ -4396,7 +4396,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/* FORNOW. This restriction should be relaxed. */
if (nested_in_vect_loop && ncopies > 1)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.");
return false;
@ -4436,7 +4436,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
(e.g. - data copies). */
if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Aligned load, but unsupported type.");
return false;
@ -4472,7 +4472,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
&def_stmt, &def, &gather_dt,
&gather_off_vectype))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"gather index use not simple.");
return false;
@ -4492,7 +4492,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
size_zero_node) < 0;
if (negative && ncopies > 1)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types with negative step.");
return false;
@ -4505,14 +4505,14 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (alignment_support_scheme != dr_aligned
&& alignment_support_scheme != dr_unaligned_supported)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"negative step but alignment required.");
return false;
}
if (!perm_mask_for_reverse (vectype))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"negative step and reversing not supported.");
return false;
@ -4527,7 +4527,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
return true;
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform load. ncopies = %d", ncopies);
@ -5334,7 +5334,7 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
/* FORNOW: not yet supported. */
if (STMT_VINFO_LIVE_P (stmt_info))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"value used after loop.");
return false;
@ -5534,7 +5534,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
gimple pattern_stmt;
gimple_seq pattern_def_seq;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
@ -5542,7 +5542,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
if (gimple_has_volatile_ops (stmt))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: stmt has volatile operands");
@ -5575,7 +5575,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
/* Analyze PATTERN_STMT instead of the original stmt. */
stmt = pattern_stmt;
stmt_info = vinfo_for_stmt (pattern_stmt);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
@ -5584,7 +5584,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
}
else
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.");
return true;
@ -5597,7 +5597,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
{
/* Analyze PATTERN_STMT too. */
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
@ -5621,7 +5621,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
{
/* Analyze def stmt of STMT if it's a pattern stmt. */
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def statement: ");
@ -5660,7 +5660,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
gcc_assert (PURE_SLP_STMT (stmt_info));
scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
@ -5670,7 +5670,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not SLPed: unsupported data-type ");
@ -5680,7 +5680,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
return false;
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
@ -5724,7 +5724,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
if (!ok)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: relevant stmt not ");
@ -5746,7 +5746,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
if (!ok)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: live stmt not ");
@ -5846,7 +5846,7 @@ vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
default:
if (!STMT_VINFO_LIVE_P (stmt_info))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"stmt not supported.");
gcc_unreachable ();
@ -5871,7 +5871,7 @@ vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
tree scalar_dest;
gimple exit_phi;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Record the vdef for outer-loop vectorization.");
@ -6108,7 +6108,7 @@ get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
return NULL_TREE;
vectype = build_vector_type (scalar_type, nunits);
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype with %d units of type ", nunits);
@ -6118,7 +6118,7 @@ get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
if (!vectype)
return NULL_TREE;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
@ -6127,7 +6127,7 @@ get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
if (!VECTOR_MODE_P (TYPE_MODE (vectype))
&& !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"mode not supported by target.");
return NULL_TREE;
@ -6198,7 +6198,7 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
*def_stmt = NULL;
*def = NULL_TREE;
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"vect_is_simple_use: operand ");
@ -6220,14 +6220,14 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
if (TREE_CODE (operand) == PAREN_EXPR)
{
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "non-associatable copy.");
operand = TREE_OPERAND (operand, 0);
}
if (TREE_CODE (operand) != SSA_NAME)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not ssa-name.");
return false;
@ -6236,13 +6236,13 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
*def_stmt = SSA_NAME_DEF_STMT (operand);
if (*def_stmt == NULL)
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no def_stmt.");
return false;
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
@ -6274,13 +6274,13 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
&& *dt == vect_double_reduction_def
&& gimple_code (stmt) != GIMPLE_PHI))
{
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported pattern.");
return false;
}
if (dump_kind_p (MSG_NOTE))
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "type of def: %d.", *dt);
switch (gimple_code (*def_stmt))
@ -6299,7 +6299,7 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
break;
/* FALLTHRU */
default:
if (dump_kind_p (MSG_MISSED_OPTIMIZATION))
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported defining stmt: ");
return false;

View File

@ -107,7 +107,7 @@ vectorize_loops (void)
loop_vec_info loop_vinfo;
vect_location = find_loop_location (loop);
if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOC
&& dump_kind_p (MSG_ALL))
&& dump_enabled_p ())
dump_printf (MSG_ALL, "\nAnalyzing loop at %s:%d\n",
LOC_FILE (vect_location), LOC_LINE (vect_location));
@ -118,7 +118,7 @@ vectorize_loops (void)
continue;
if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOC
&& dump_kind_p (MSG_ALL))
&& dump_enabled_p ())
dump_printf (MSG_ALL, "\n\nVectorizing loop at %s:%d\n",
LOC_FILE (vect_location), LOC_LINE (vect_location));
vect_transform_loop (loop_vinfo);
@ -128,8 +128,8 @@ vectorize_loops (void)
vect_location = UNKNOWN_LOC;
statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
if (dump_kind_p (MSG_ALL)
|| (num_vectorized_loops > 0 && dump_kind_p (MSG_ALL)))
if (dump_enabled_p ()
|| (num_vectorized_loops > 0 && dump_enabled_p ()))
dump_printf_loc (MSG_ALL, vect_location,
"vectorized %u loops in function.\n",
num_vectorized_loops);
@ -170,7 +170,7 @@ execute_vect_slp (void)
if (vect_slp_analyze_bb (bb))
{
vect_slp_transform_bb (bb);
if (dump_kind_p (MSG_OPTIMIZED_LOCATIONS))
if (dump_enabled_p ())
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
"basic block vectorized using SLP\n");
}