Enhances the gcov program summary by adding a histogram of arc counter entries.

Enhances the gcov program summary by adding a histogram of arc counter
entries. This is used to compute working set information in the compiler
for use by optimizations that need information on hot vs cold counter
values or the rough working set size in terms of the number of counters.
Each working set data point is the minimum counter value and number of
counters required to reach a given percentage of the cumulative counter
sum across the profiled execution (sum_all in the program summary).

2012-09-04  Teresa Johnson  <tejohnson@google.com>

	* libgcc/libgcov.c (struct gcov_summary_buffer): New structure.
	(gcov_histogram_insert): New function.
	(gcov_compute_histogram): Ditto.
	(gcov_exit): Invoke gcov_compute_histogram, and perform merging of
	histograms during summary merging.
	* gcc/gcov-io.c (gcov_write_summary): Write out non-zero histogram
	entries to function summary along with an occupancy bit vector.
	(gcov_read_summary): Read in the histogram entries.
	(gcov_histo_index): New function.
	(void gcov_histogram_merge): Ditto.
	* gcc/gcov-io.h (gcov_type_unsigned): New type.
	(struct gcov_bucket_type): Ditto.
	(struct gcov_ctr_summary): Include histogram.
	(GCOV_TAG_SUMMARY_LENGTH): Update to include histogram entries.
	(GCOV_HISTOGRAM_SIZE): New macro.
	(GCOV_HISTOGRAM_BITVECTOR_SIZE): Ditto.
	* gcc/profile.c (NUM_GCOV_WORKING_SETS): Ditto.
	(gcov_working_sets): New global variable.
	(compute_working_sets): New function.
	(find_working_set): Ditto.
	(get_exec_counts): Invoke compute_working_sets.
	* gcc/coverage.c (read_counts_file): Merge histograms, and
	fix bug with accessing summary info for non-summable counters.
	* gcc/basic-block.h (gcov_type_unsigned): New type.
	(struct gcov_working_set_info): Ditto.
	(find_working_set): Declare.
	* gcc/gcov-dump.c (tag_summary): Dump out histogram.

From-SVN: r190952
This commit is contained in:
Teresa Johnson 2012-09-04 21:16:18 +00:00
parent bde6de5d4b
commit 9f71de8404
7 changed files with 608 additions and 17 deletions

View File

@ -31,6 +31,7 @@ along with GCC; see the file COPYING3. If not see
flow graph is manipulated by various optimizations. A signed type
makes those easy to detect. */
typedef HOST_WIDEST_INT gcov_type;
typedef unsigned HOST_WIDEST_INT gcov_type_unsigned;
/* Control flow edge information. */
struct GTY((user)) edge_def {
@ -91,6 +92,16 @@ enum cfg_edge_flags {
profile.c. */
extern const struct gcov_ctr_summary *profile_info;
/* Working set size statistics for a given percentage of the entire
profile (sum_all from the counter summary). */
typedef struct gcov_working_set_info
{
/* Number of hot counters included in this working set. */
unsigned num_counters;
/* Smallest counter included in this working set. */
gcov_type min_counter;
} gcov_working_set_t;
/* Declared in cfgloop.h. */
struct loop;
@ -897,4 +908,7 @@ extern void rtl_profile_for_bb (basic_block);
extern void rtl_profile_for_edge (edge);
extern void default_rtl_profile (void);
/* In profile.c. */
extern gcov_working_set_t *find_working_set(unsigned pct_times_10);
#endif /* GCC_BASIC_BLOCK_H */

View File

@ -248,6 +248,13 @@ read_counts_file (void)
summary.ctrs[ix].run_max = sum.ctrs[ix].run_max;
summary.ctrs[ix].sum_max += sum.ctrs[ix].sum_max;
}
if (new_summary)
memcpy (summary.ctrs[GCOV_COUNTER_ARCS].histogram,
sum.ctrs[GCOV_COUNTER_ARCS].histogram,
sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
else
gcov_histogram_merge (summary.ctrs[GCOV_COUNTER_ARCS].histogram,
sum.ctrs[GCOV_COUNTER_ARCS].histogram);
new_summary = 0;
}
else if (GCOV_TAG_IS_COUNTER (tag) && fn_ident)
@ -268,8 +275,9 @@ read_counts_file (void)
entry->ctr = elt.ctr;
entry->lineno_checksum = lineno_checksum;
entry->cfg_checksum = cfg_checksum;
entry->summary = summary.ctrs[elt.ctr];
entry->summary.num = n_counts;
if (elt.ctr < GCOV_COUNTERS_SUMMABLE)
entry->summary = summary.ctrs[elt.ctr];
entry->summary.num = n_counts;
entry->counts = XCNEWVEC (gcov_type, n_counts);
}
else if (entry->lineno_checksum != lineno_checksum

View File

@ -447,7 +447,8 @@ tag_summary (const char *filename ATTRIBUTE_UNUSED,
unsigned tag ATTRIBUTE_UNUSED, unsigned length ATTRIBUTE_UNUSED)
{
struct gcov_summary summary;
unsigned ix;
unsigned ix, h_ix;
gcov_bucket_type *histo_bucket;
gcov_read_summary (&summary);
printf (" checksum=0x%08x", summary.checksum);
@ -465,5 +466,24 @@ tag_summary (const char *filename ATTRIBUTE_UNUSED,
(HOST_WIDEST_INT)summary.ctrs[ix].run_max);
printf (", sum_max=" HOST_WIDEST_INT_PRINT_DEC,
(HOST_WIDEST_INT)summary.ctrs[ix].sum_max);
if (ix != GCOV_COUNTER_ARCS)
continue;
printf ("\n");
print_prefix (filename, 0, 0);
printf ("\t\tcounter histogram:");
for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
{
histo_bucket = &summary.ctrs[ix].histogram[h_ix];
if (!histo_bucket->num_counters)
continue;
printf ("\n");
print_prefix (filename, 0, 0);
printf ("\t\t%d: num counts=%u, min counter="
HOST_WIDEST_INT_PRINT_DEC ", cum_counter="
HOST_WIDEST_INT_PRINT_DEC,
h_ix, histo_bucket->num_counters,
(HOST_WIDEST_INT)histo_bucket->min_value,
(HOST_WIDEST_INT)histo_bucket->cum_value);
}
}
}

View File

@ -368,10 +368,25 @@ gcov_write_tag_length (gcov_unsigned_t tag, gcov_unsigned_t length)
GCOV_LINKAGE void
gcov_write_summary (gcov_unsigned_t tag, const struct gcov_summary *summary)
{
unsigned ix;
unsigned ix, h_ix, bv_ix, h_cnt = 0;
const struct gcov_ctr_summary *csum;
unsigned histo_bitvector[GCOV_HISTOGRAM_BITVECTOR_SIZE];
gcov_write_tag_length (tag, GCOV_TAG_SUMMARY_LENGTH);
/* Count number of non-zero histogram entries, and fill in a bit vector
of non-zero indices. The histogram is only currently computed for arc
counters. */
for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
histo_bitvector[bv_ix] = 0;
csum = &summary->ctrs[GCOV_COUNTER_ARCS];
for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
{
if (csum->histogram[h_ix].num_counters > 0)
{
histo_bitvector[h_ix / 32] |= 1 << (h_ix % 32);
h_cnt++;
}
}
gcov_write_tag_length (tag, GCOV_TAG_SUMMARY_LENGTH(h_cnt));
gcov_write_unsigned (summary->checksum);
for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++)
{
@ -380,6 +395,22 @@ gcov_write_summary (gcov_unsigned_t tag, const struct gcov_summary *summary)
gcov_write_counter (csum->sum_all);
gcov_write_counter (csum->run_max);
gcov_write_counter (csum->sum_max);
if (ix != GCOV_COUNTER_ARCS)
{
for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
gcov_write_unsigned (0);
continue;
}
for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
gcov_write_unsigned (histo_bitvector[bv_ix]);
for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
{
if (!csum->histogram[h_ix].num_counters)
continue;
gcov_write_unsigned (csum->histogram[h_ix].num_counters);
gcov_write_counter (csum->histogram[h_ix].min_value);
gcov_write_counter (csum->histogram[h_ix].cum_value);
}
}
}
#endif /* IN_LIBGCOV */
@ -488,8 +519,10 @@ gcov_read_string (void)
GCOV_LINKAGE void
gcov_read_summary (struct gcov_summary *summary)
{
unsigned ix;
unsigned ix, h_ix, bv_ix, h_cnt = 0;
struct gcov_ctr_summary *csum;
unsigned histo_bitvector[GCOV_HISTOGRAM_BITVECTOR_SIZE];
unsigned cur_bitvector;
summary->checksum = gcov_read_unsigned ();
for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++)
@ -499,6 +532,43 @@ gcov_read_summary (struct gcov_summary *summary)
csum->sum_all = gcov_read_counter ();
csum->run_max = gcov_read_counter ();
csum->sum_max = gcov_read_counter ();
memset (csum->histogram, 0,
sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
for (bv_ix = 0; bv_ix < GCOV_HISTOGRAM_BITVECTOR_SIZE; bv_ix++)
{
histo_bitvector[bv_ix] = gcov_read_unsigned ();
h_cnt += __builtin_popcountll (histo_bitvector[bv_ix]);
}
bv_ix = 0;
h_ix = 0;
cur_bitvector = 0;
while (h_cnt--)
{
/* Find the index corresponding to the next entry we will read in.
First find the next non-zero bitvector and re-initialize
the histogram index accordingly, then right shift and increment
the index until we find a set bit. */
while (!cur_bitvector)
{
h_ix = bv_ix * 32;
cur_bitvector = histo_bitvector[bv_ix++];
gcc_assert(bv_ix <= GCOV_HISTOGRAM_BITVECTOR_SIZE);
}
while (!(cur_bitvector & 0x1))
{
h_ix++;
cur_bitvector >>= 1;
}
gcc_assert(h_ix < GCOV_HISTOGRAM_SIZE);
csum->histogram[h_ix].num_counters = gcov_read_unsigned ();
csum->histogram[h_ix].min_value = gcov_read_counter ();
csum->histogram[h_ix].cum_value = gcov_read_counter ();
/* Shift off the index we are done with and increment to the
corresponding next histogram entry. */
cur_bitvector >>= 1;
h_ix++;
}
}
}
@ -550,3 +620,184 @@ gcov_time (void)
return status.st_mtime;
}
#endif /* IN_GCOV */
#if IN_LIBGCOV || !IN_GCOV
/* Determine the index into histogram for VALUE. */
static unsigned
gcov_histo_index(gcov_type value)
{
gcov_type_unsigned v = (gcov_type_unsigned)value;
unsigned r = 0;
unsigned prev2bits = 0;
/* Find index into log2 scale histogram, where each of the log2
sized buckets is divided into 4 linear sub-buckets for better
focus in the higher buckets. */
/* Find the place of the most-significant bit set. */
if (v > 0)
r = 63 - __builtin_clzll (v);
/* If at most the 2 least significant bits are set (value is
0 - 3) then that value is our index into the lowest set of
four buckets. */
if (r < 2)
return (unsigned)value;
gcc_assert (r < 64);
/* Find the two next most significant bits to determine which
of the four linear sub-buckets to select. */
prev2bits = (v >> (r - 2)) & 0x3;
/* Finally, compose the final bucket index from the log2 index and
the next 2 bits. The minimum r value at this point is 2 since we
returned above if r was 2 or more, so the minimum bucket at this
point is 4. */
return (r - 1) * 4 + prev2bits;
}
/* Merge SRC_HISTO into TGT_HISTO. The counters are assumed to be in
the same relative order in both histograms, and are matched up
and merged in reverse order. Each counter is assigned an equal portion of
its entry's original cumulative counter value when computing the
new merged cum_value. */
static void gcov_histogram_merge(gcov_bucket_type *tgt_histo,
gcov_bucket_type *src_histo)
{
int src_i, tgt_i, tmp_i = 0;
unsigned src_num, tgt_num, merge_num;
gcov_type src_cum, tgt_cum, merge_src_cum, merge_tgt_cum, merge_cum;
gcov_type merge_min;
gcov_bucket_type tmp_histo[GCOV_HISTOGRAM_SIZE];
int src_done = 0;
memset(tmp_histo, 0, sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
/* Assume that the counters are in the same relative order in both
histograms. Walk the histograms from largest to smallest entry,
matching up and combining counters in order. */
src_num = 0;
src_cum = 0;
src_i = GCOV_HISTOGRAM_SIZE - 1;
for (tgt_i = GCOV_HISTOGRAM_SIZE - 1; tgt_i >= 0 && !src_done; tgt_i--)
{
tgt_num = tgt_histo[tgt_i].num_counters;
tgt_cum = tgt_histo[tgt_i].cum_value;
/* Keep going until all of the target histogram's counters at this
position have been matched and merged with counters from the
source histogram. */
while (tgt_num > 0 && !src_done)
{
/* If this is either the first time through this loop or we just
exhausted the previous non-zero source histogram entry, look
for the next non-zero source histogram entry. */
if (!src_num)
{
/* Locate the next non-zero entry. */
while (src_i >= 0 && !src_histo[src_i].num_counters)
src_i--;
/* If source histogram has fewer counters, then just copy over the
remaining target counters and quit. */
if (src_i < 0)
{
tmp_histo[tgt_i].num_counters += tgt_num;
tmp_histo[tgt_i].cum_value += tgt_cum;
if (!tmp_histo[tgt_i].min_value ||
tgt_histo[tgt_i].min_value < tmp_histo[tgt_i].min_value)
tmp_histo[tgt_i].min_value = tgt_histo[tgt_i].min_value;
while (--tgt_i >= 0)
{
tmp_histo[tgt_i].num_counters
+= tgt_histo[tgt_i].num_counters;
tmp_histo[tgt_i].cum_value += tgt_histo[tgt_i].cum_value;
if (!tmp_histo[tgt_i].min_value ||
tgt_histo[tgt_i].min_value
< tmp_histo[tgt_i].min_value)
tmp_histo[tgt_i].min_value = tgt_histo[tgt_i].min_value;
}
src_done = 1;
break;
}
src_num = src_histo[src_i].num_counters;
src_cum = src_histo[src_i].cum_value;
}
/* The number of counters to merge on this pass is the minimum
of the remaining counters from the current target and source
histogram entries. */
merge_num = tgt_num;
if (src_num < merge_num)
merge_num = src_num;
/* The merged min_value is the sum of the min_values from target
and source. */
merge_min = tgt_histo[tgt_i].min_value + src_histo[src_i].min_value;
/* Compute the portion of source and target entries' cum_value
that will be apportioned to the counters being merged.
The total remaining cum_value from each entry is divided
equally among the counters from that histogram entry if we
are not merging all of them. */
merge_src_cum = src_cum;
if (merge_num < src_num)
merge_src_cum = merge_num * src_cum / src_num;
merge_tgt_cum = tgt_cum;
if (merge_num < tgt_num)
merge_tgt_cum = merge_num * tgt_cum / tgt_num;
/* The merged cum_value is the sum of the source and target
components. */
merge_cum = merge_src_cum + merge_tgt_cum;
/* Update the remaining number of counters and cum_value left
to be merged from this source and target entry. */
src_cum -= merge_src_cum;
tgt_cum -= merge_tgt_cum;
src_num -= merge_num;
tgt_num -= merge_num;
/* The merged counters get placed in the new merged histogram
at the entry for the merged min_value. */
tmp_i = gcov_histo_index(merge_min);
gcc_assert (tmp_i < GCOV_HISTOGRAM_SIZE);
tmp_histo[tmp_i].num_counters += merge_num;
tmp_histo[tmp_i].cum_value += merge_cum;
if (!tmp_histo[tmp_i].min_value ||
merge_min < tmp_histo[tmp_i].min_value)
tmp_histo[tmp_i].min_value = merge_min;
/* Ensure the search for the next non-zero src_histo entry starts
at the next smallest histogram bucket. */
if (!src_num)
src_i--;
}
}
gcc_assert (tgt_i < 0);
/* In the case where there were more counters in the source histogram,
accumulate the remaining unmerged cumulative counter values. Add
those to the smallest non-zero target histogram entry. Otherwise,
the total cumulative counter values in the histogram will be smaller
than the sum_all stored in the summary, which will complicate
computing the working set information from the histogram later on. */
if (src_num)
src_i--;
while (src_i >= 0)
{
src_cum += src_histo[src_i].cum_value;
src_i--;
}
/* At this point, tmp_i should be the smallest non-zero entry in the
tmp_histo. */
gcc_assert(tmp_i >= 0 && tmp_i < GCOV_HISTOGRAM_SIZE
&& tmp_histo[tmp_i].num_counters > 0);
tmp_histo[tmp_i].cum_value += src_cum;
/* Finally, copy the merged histogram into tgt_histo. */
memcpy(tgt_histo, tmp_histo, sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
}
#endif /* IN_LIBGCOV || !IN_GCOV */

View File

@ -139,7 +139,9 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
counts: header int64:count*
summary: int32:checksum {count-summary}GCOV_COUNTERS_SUMMABLE
count-summary: int32:num int32:runs int64:sum
int64:max int64:sum_max
int64:max int64:sum_max histogram
histogram: {int32:bitvector}8 histogram-buckets*
histogram-buckets: int32:num int64:min int64:sum
The ANNOUNCE_FUNCTION record is the same as that in the note file,
but without the source location. The COUNTS gives the
@ -171,8 +173,10 @@ typedef unsigned gcov_unsigned_t __attribute__ ((mode (SI)));
typedef unsigned gcov_position_t __attribute__ ((mode (SI)));
#if LONG_LONG_TYPE_SIZE > 32
typedef signed gcov_type __attribute__ ((mode (DI)));
typedef unsigned gcov_type_unsigned __attribute__ ((mode (DI)));
#else
typedef signed gcov_type __attribute__ ((mode (SI)));
typedef unsigned gcov_type_unsigned __attribute__ ((mode (SI)));
#endif
#else
#if BITS_PER_UNIT == 16
@ -180,16 +184,20 @@ typedef unsigned gcov_unsigned_t __attribute__ ((mode (HI)));
typedef unsigned gcov_position_t __attribute__ ((mode (HI)));
#if LONG_LONG_TYPE_SIZE > 32
typedef signed gcov_type __attribute__ ((mode (SI)));
typedef unsigned gcov_type_unsigned __attribute__ ((mode (SI)));
#else
typedef signed gcov_type __attribute__ ((mode (HI)));
typedef unsigned gcov_type_unsigned __attribute__ ((mode (HI)));
#endif
#else
typedef unsigned gcov_unsigned_t __attribute__ ((mode (QI)));
typedef unsigned gcov_position_t __attribute__ ((mode (QI)));
#if LONG_LONG_TYPE_SIZE > 32
typedef signed gcov_type __attribute__ ((mode (HI)));
typedef unsigned gcov_type_unsigned __attribute__ ((mode (HI)));
#else
typedef signed gcov_type __attribute__ ((mode (QI)));
typedef unsigned gcov_type_unsigned __attribute__ ((mode (QI)));
#endif
#endif
#endif
@ -210,6 +218,7 @@ typedef unsigned gcov_position_t;
#if IN_GCOV
#define GCOV_LINKAGE static
typedef HOST_WIDEST_INT gcov_type;
typedef unsigned HOST_WIDEST_INT gcov_type_unsigned;
#if IN_GCOV > 0
#include <sys/types.h>
#endif
@ -309,8 +318,9 @@ typedef HOST_WIDEST_INT gcov_type;
#define GCOV_TAG_COUNTER_NUM(LENGTH) ((LENGTH) / 2)
#define GCOV_TAG_OBJECT_SUMMARY ((gcov_unsigned_t)0xa1000000) /* Obsolete */
#define GCOV_TAG_PROGRAM_SUMMARY ((gcov_unsigned_t)0xa3000000)
#define GCOV_TAG_SUMMARY_LENGTH \
(1 + GCOV_COUNTERS_SUMMABLE * (2 + 3 * 2))
#define GCOV_TAG_SUMMARY_LENGTH(NUM) \
(1 + GCOV_COUNTERS_SUMMABLE * (10 + 3 * 2) + (NUM) * 5)
/* Counters that are collected. */
#define GCOV_COUNTER_ARCS 0 /* Arc transitions. */
@ -389,6 +399,29 @@ typedef HOST_WIDEST_INT gcov_type;
/* Structured records. */
/* Structure used for each bucket of the log2 histogram of counter values. */
typedef struct
{
/* Number of counters whose profile count falls within the bucket. */
gcov_unsigned_t num_counters;
/* Smallest profile count included in this bucket. */
gcov_type min_value;
/* Cumulative value of the profile counts in this bucket. */
gcov_type cum_value;
} gcov_bucket_type;
/* For a log2 scale histogram with each range split into 4
linear sub-ranges, there will be at most 64 (max gcov_type bit size) - 1 log2
ranges since the lowest 2 log2 values share the lowest 4 linear
sub-range (values 0 - 3). This is 252 total entries (63*4). */
#define GCOV_HISTOGRAM_SIZE 252
/* How many unsigned ints are required to hold a bit vector of non-zero
histogram entries when the histogram is written to the gcov file.
This is essentially a ceiling divide by 32 bits. */
#define GCOV_HISTOGRAM_BITVECTOR_SIZE (GCOV_HISTOGRAM_SIZE + 31) / 32
/* Cumulative counter data. */
struct gcov_ctr_summary
{
@ -397,6 +430,8 @@ struct gcov_ctr_summary
gcov_type sum_all; /* sum of all counters accumulated. */
gcov_type run_max; /* maximum value on a single run. */
gcov_type sum_max; /* sum of individual run max values. */
gcov_bucket_type histogram[GCOV_HISTOGRAM_SIZE]; /* histogram of
counter values. */
};
/* Object & program summary record. */

View File

@ -84,6 +84,15 @@ struct bb_info {
const struct gcov_ctr_summary *profile_info;
/* Number of data points in the working set summary array. Using 128
provides information for at least every 1% increment of the total
profile size. The last entry is hardwired to 99.9% of the total. */
#define NUM_GCOV_WORKING_SETS 128
/* Counter working set information computed from the current counter
summary. Not initialized unless profile_info summary is non-NULL. */
static gcov_working_set_t gcov_working_sets[NUM_GCOV_WORKING_SETS];
/* Collect statistics on the performance of this pass for the entire source
file. */
@ -192,6 +201,152 @@ instrument_values (histogram_values values)
}
/* Compute the working set information from the counter histogram in
the profile summary. This is an array of information corresponding to a
range of percentages of the total execution count (sum_all), and includes
the number of counters required to cover that working set percentage and
the minimum counter value in that working set. */
static void
compute_working_sets (void)
{
gcov_type working_set_cum_values[NUM_GCOV_WORKING_SETS];
gcov_type ws_cum_hotness_incr;
gcov_type cum, tmp_cum;
const gcov_bucket_type *histo_bucket;
unsigned ws_ix, c_num, count, pctinc, pct;
int h_ix;
gcov_working_set_t *ws_info;
if (!profile_info)
return;
/* Compute the amount of sum_all that the cumulative hotness grows
by in each successive working set entry, which depends on the
number of working set entries. */
ws_cum_hotness_incr = profile_info->sum_all / NUM_GCOV_WORKING_SETS;
/* Next fill in an array of the cumulative hotness values corresponding
to each working set summary entry we are going to compute below.
Skip 0% statistics, which can be extrapolated from the
rest of the summary data. */
cum = ws_cum_hotness_incr;
for (ws_ix = 0; ws_ix < NUM_GCOV_WORKING_SETS;
ws_ix++, cum += ws_cum_hotness_incr)
working_set_cum_values[ws_ix] = cum;
/* The last summary entry is reserved for (roughly) 99.9% of the
working set. Divide by 1024 so it becomes a shift, which gives
almost exactly 99.9%. */
working_set_cum_values[NUM_GCOV_WORKING_SETS-1]
= profile_info->sum_all - profile_info->sum_all/1024;
/* Next, walk through the histogram in decending order of hotness
and compute the statistics for the working set summary array.
As histogram entries are accumulated, we check to see which
working set entries have had their expected cum_value reached
and fill them in, walking the working set entries in increasing
size of cum_value. */
ws_ix = 0; /* The current entry into the working set array. */
cum = 0; /* The current accumulated counter sum. */
count = 0; /* The current accumulated count of block counters. */
for (h_ix = GCOV_HISTOGRAM_SIZE - 1;
h_ix >= 0 && ws_ix < NUM_GCOV_WORKING_SETS; h_ix--)
{
histo_bucket = &profile_info->histogram[h_ix];
/* If we haven't reached the required cumulative counter value for
the current working set percentage, simply accumulate this histogram
entry into the running sums and continue to the next histogram
entry. */
if (cum + histo_bucket->cum_value < working_set_cum_values[ws_ix])
{
cum += histo_bucket->cum_value;
count += histo_bucket->num_counters;
continue;
}
/* If adding the current histogram entry's cumulative counter value
causes us to exceed the current working set size, then estimate
how many of this histogram entry's counter values are required to
reach the working set size, and fill in working set entries
as we reach their expected cumulative value. */
for (c_num = 0, tmp_cum = cum;
c_num < histo_bucket->num_counters && ws_ix < NUM_GCOV_WORKING_SETS;
c_num++)
{
count++;
/* If we haven't reached the last histogram entry counter, add
in the minimum value again. This will underestimate the
cumulative sum so far, because many of the counter values in this
entry may have been larger than the minimum. We could add in the
average value every time, but that would require an expensive
divide operation. */
if (c_num + 1 < histo_bucket->num_counters)
tmp_cum += histo_bucket->min_value;
/* If we have reached the last histogram entry counter, then add
in the entire cumulative value. */
else
tmp_cum = cum + histo_bucket->cum_value;
/* Next walk through successive working set entries and fill in
the statistics for any whose size we have reached by accumulating
this histogram counter. */
while (tmp_cum >= working_set_cum_values[ws_ix]
&& ws_ix < NUM_GCOV_WORKING_SETS)
{
gcov_working_sets[ws_ix].num_counters = count;
gcov_working_sets[ws_ix].min_counter
= histo_bucket->min_value;
ws_ix++;
}
}
/* Finally, update the running cumulative value since we were
using a temporary above. */
cum += histo_bucket->cum_value;
}
gcc_assert (ws_ix == NUM_GCOV_WORKING_SETS);
if (dump_file)
{
fprintf (dump_file, "Counter working sets:\n");
/* Multiply the percentage by 100 to avoid float. */
pctinc = 100 * 100 / NUM_GCOV_WORKING_SETS;
for (ws_ix = 0, pct = pctinc; ws_ix < NUM_GCOV_WORKING_SETS;
ws_ix++, pct += pctinc)
{
if (ws_ix == NUM_GCOV_WORKING_SETS - 1)
pct = 9990;
ws_info = &gcov_working_sets[ws_ix];
/* Print out the percentage using int arithmatic to avoid float. */
fprintf (dump_file, "\t\t%u.%02u%%: num counts=%u, min counter="
HOST_WIDEST_INT_PRINT_DEC "\n",
pct / 100, pct - (pct / 100 * 100),
ws_info->num_counters,
(HOST_WIDEST_INT)ws_info->min_counter);
}
}
}
/* Given a the desired percentage of the full profile (sum_all from the
summary), multiplied by 10 to avoid float in PCT_TIMES_10, returns
the corresponding working set information. If an exact match for
the percentage isn't found, the closest value is used. */
gcov_working_set_t *
find_working_set (unsigned pct_times_10)
{
unsigned i;
if (!profile_info)
return NULL;
gcc_assert (pct_times_10 <= 1000);
if (pct_times_10 >= 999)
return &gcov_working_sets[NUM_GCOV_WORKING_SETS - 1];
i = pct_times_10 * NUM_GCOV_WORKING_SETS / 1000;
if (!i)
return &gcov_working_sets[0];
return &gcov_working_sets[i - 1];
}
/* Computes hybrid profile for all matching entries in da_file.
CFG_CHECKSUM is the precomputed checksum for the CFG. */
@ -219,6 +374,8 @@ get_exec_counts (unsigned cfg_checksum, unsigned lineno_checksum)
if (!counts)
return NULL;
compute_working_sets();
if (dump_file && profile_info)
fprintf(dump_file, "Merged %u profiles with maximal count %u.\n",
profile_info->runs, (unsigned) profile_info->sum_max);

View File

@ -97,6 +97,12 @@ struct gcov_fn_buffer
/* note gcov_fn_info ends in a trailing array. */
};
struct gcov_summary_buffer
{
struct gcov_summary_buffer *next;
struct gcov_summary summary;
};
/* Chain of per-object gcov structures. */
static struct gcov_info *gcov_list;
@ -276,6 +282,76 @@ gcov_version (struct gcov_info *ptr, gcov_unsigned_t version,
return 1;
}
/* Insert counter VALUE into HISTOGRAM. */
static void
gcov_histogram_insert(gcov_bucket_type *histogram, gcov_type value)
{
unsigned i;
i = gcov_histo_index(value);
histogram[i].num_counters++;
histogram[i].cum_value += value;
if (value < histogram[i].min_value)
histogram[i].min_value = value;
}
/* Computes a histogram of the arc counters to place in the summary SUM. */
static void
gcov_compute_histogram (struct gcov_summary *sum)
{
struct gcov_info *gi_ptr;
const struct gcov_fn_info *gfi_ptr;
const struct gcov_ctr_info *ci_ptr;
struct gcov_ctr_summary *cs_ptr;
unsigned t_ix, f_ix, ctr_info_ix, ix;
int h_ix;
/* This currently only applies to arc counters. */
t_ix = GCOV_COUNTER_ARCS;
/* First check if there are any counts recorded for this counter. */
cs_ptr = &(sum->ctrs[t_ix]);
if (!cs_ptr->num)
return;
for (h_ix = 0; h_ix < GCOV_HISTOGRAM_SIZE; h_ix++)
{
cs_ptr->histogram[h_ix].num_counters = 0;
cs_ptr->histogram[h_ix].min_value = cs_ptr->run_max;
cs_ptr->histogram[h_ix].cum_value = 0;
}
/* Walk through all the per-object structures and record each of
the count values in histogram. */
for (gi_ptr = gcov_list; gi_ptr; gi_ptr = gi_ptr->next)
{
if (!gi_ptr->merge[t_ix])
continue;
/* Find the appropriate index into the gcov_ctr_info array
for the counter we are currently working on based on the
existence of the merge function pointer for this object. */
for (ix = 0, ctr_info_ix = 0; ix < t_ix; ix++)
{
if (gi_ptr->merge[ix])
ctr_info_ix++;
}
for (f_ix = 0; f_ix != gi_ptr->n_functions; f_ix++)
{
gfi_ptr = gi_ptr->functions[f_ix];
if (!gfi_ptr || gfi_ptr->key != gi_ptr)
continue;
ci_ptr = &gfi_ptr->ctrs[ctr_info_ix];
for (ix = 0; ix < ci_ptr->num; ix++)
gcov_histogram_insert (cs_ptr->histogram, ci_ptr->values[ix]);
}
}
}
/* Dump the coverage counts. We merge with existing counts when
possible, to avoid growing the .da files ad infinitum. We use this
program's checksum to make sure we only accumulate whole program
@ -347,6 +423,7 @@ gcov_exit (void)
}
}
}
gcov_compute_histogram (&this_prg);
{
/* Check if the level of dirs to strip off specified. */
@ -400,6 +477,8 @@ gcov_exit (void)
const char *fname, *s;
struct gcov_fn_buffer *fn_buffer = 0;
struct gcov_fn_buffer **fn_tail = &fn_buffer;
struct gcov_summary_buffer *next_sum_buffer, *sum_buffer = 0;
struct gcov_summary_buffer **sum_tail = &sum_buffer;
fname = gi_ptr->filename;
@ -482,17 +561,29 @@ gcov_exit (void)
f_ix--;
length = gcov_read_unsigned ();
if (length != GCOV_TAG_SUMMARY_LENGTH)
goto read_mismatch;
gcov_read_summary (&tmp);
if ((error = gcov_is_error ()))
goto read_error;
if (summary_pos || tmp.checksum != crc32)
goto next_summary;
if (summary_pos)
{
/* Save all summaries after the one that will be
merged into below. These will need to be rewritten
as histogram merging may change the number of non-zero
histogram entries that will be emitted, and thus the
size of the merged summary. */
(*sum_tail) = (struct gcov_summary_buffer *)
malloc (sizeof(struct gcov_summary_buffer));
(*sum_tail)->summary = tmp;
(*sum_tail)->next = 0;
sum_tail = &((*sum_tail)->next);
goto next_summary;
}
if (tmp.checksum != crc32)
goto next_summary;
for (t_ix = 0; t_ix != GCOV_COUNTERS_SUMMABLE; t_ix++)
if (tmp.ctrs[t_ix].num != this_prg.ctrs[t_ix].num)
goto next_summary;
goto next_summary;
prg = tmp;
summary_pos = eof_pos;
@ -598,11 +689,16 @@ gcov_exit (void)
if (gi_ptr->merge[t_ix])
{
if (!cs_prg->runs++)
cs_prg->num = cs_tprg->num;
cs_prg->num = cs_tprg->num;
cs_prg->sum_all += cs_tprg->sum_all;
if (cs_prg->run_max < cs_tprg->run_max)
cs_prg->run_max = cs_tprg->run_max;
cs_prg->sum_max += cs_tprg->run_max;
if (cs_prg->runs == 1)
memcpy (cs_prg->histogram, cs_tprg->histogram,
sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
else
gcov_histogram_merge (cs_prg->histogram, cs_tprg->histogram);
}
else if (cs_prg->runs)
goto read_mismatch;
@ -635,8 +731,18 @@ gcov_exit (void)
/* Generate whole program statistics. */
gcov_write_summary (GCOV_TAG_PROGRAM_SUMMARY, &prg);
if (summary_pos < eof_pos)
gcov_seek (eof_pos);
/* Rewrite all the summaries that were after the summary we merged
into. This is necessary as the merged summary may have a different
size due to the number of non-zero histogram entries changing after
merging. */
while (sum_buffer)
{
gcov_write_summary (GCOV_TAG_PROGRAM_SUMMARY, &sum_buffer->summary);
next_sum_buffer = sum_buffer->next;
free (sum_buffer);
sum_buffer = next_sum_buffer;
}
/* Write execution counts for each function. */
for (f_ix = 0; (unsigned)f_ix != gi_ptr->n_functions; f_ix++)