summaryrefslogtreecommitdiff
path: root/gcc/gcov-io.c
diff options
context:
space:
mode:
authortejohnson <tejohnson@138bc75d-0d04-0410-961f-82ee72b054a4>2013-04-03 20:51:28 +0000
committertejohnson <tejohnson@138bc75d-0d04-0410-961f-82ee72b054a4>2013-04-03 20:51:28 +0000
commit8515a84d85b234ea831bb275a6c01fbd3cdd4118 (patch)
treeadc203a589f57f6641e15408a2579a630f743208 /gcc/gcov-io.c
parent687a1ea1e7115e06810cc1e7b59d3831eabdb0dc (diff)
downloadgcc-8515a84d85b234ea831bb275a6c01fbd3cdd4118.tar.gz
This patch enables the gcov-dump tool to optionally compute and dump
the working set information from the counter histogram, via a new -w option. This is useful to help understand and tune how the compiler will use the counter histogram, since it first computes the working set and selects thresholds based on that. This required moving the bulk of the compute_working_sets functionality into gcov-io.c so that it was accessible by gcov-dump.c. 2013-04-03 Teresa Johnson <tejohnson@google.com> * gcov-io.c (compute_working_sets): Moved most of body of old compute_working_sets here from profile.c. * gcov-io.h (NUM_GCOV_WORKING_SETS): Moved here from profile.c. (gcov_working_set_t): Moved typedef here from basic-block.h (compute_working_set): Declare. * profile.c (NUM_GCOV_WORKING_SETS): Moved to gcov-io.h. (get_working_sets): Renamed from compute_working_set, replace most of body with call to new compute_working_sets. (get_exec_counts): Replace call to compute_working_sets to get_working_sets. * profile.h (get_working_sets): Renamed from compute_working_set. * lto-cgraph.c (input_symtab): Replace call to compute_working_sets to get_working_sets. * basic-block.h (gcov_working_set_t): Moved to gcov-io.h. * gcov-dump.c (dump_working_sets): New function. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@197457 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/gcov-io.c')
-rw-r--r--gcc/gcov-io.c107
1 files changed, 107 insertions, 0 deletions
diff --git a/gcc/gcov-io.c b/gcc/gcov-io.c
index 116cc03ac9f..441aad92833 100644
--- a/gcc/gcov-io.c
+++ b/gcc/gcov-io.c
@@ -837,3 +837,110 @@ static void gcov_histogram_merge (gcov_bucket_type *tgt_histo,
memcpy(tgt_histo, tmp_histo, sizeof (gcov_bucket_type) * GCOV_HISTOGRAM_SIZE);
}
#endif /* !IN_GCOV */
+
+/* This is used by gcov-dump (IN_GCOV == -1) and in the compiler
+ (!IN_GCOV && !IN_LIBGCOV). */
+#if IN_GCOV <= 0 && !IN_LIBGCOV
+/* Compute the working set information from the counter histogram in
+ the profile summary. This is an array of information corresponding to a
+ range of percentages of the total execution count (sum_all), and includes
+ the number of counters required to cover that working set percentage and
+ the minimum counter value in that working set. */
+
+GCOV_LINKAGE void
+compute_working_sets (const struct gcov_ctr_summary *summary,
+ gcov_working_set_t *gcov_working_sets)
+{
+ gcov_type working_set_cum_values[NUM_GCOV_WORKING_SETS];
+ gcov_type ws_cum_hotness_incr;
+ gcov_type cum, tmp_cum;
+ const gcov_bucket_type *histo_bucket;
+ unsigned ws_ix, c_num, count;
+ int h_ix;
+
+ /* Compute the amount of sum_all that the cumulative hotness grows
+ by in each successive working set entry, which depends on the
+ number of working set entries. */
+ ws_cum_hotness_incr = summary->sum_all / NUM_GCOV_WORKING_SETS;
+
+ /* Next fill in an array of the cumulative hotness values corresponding
+ to each working set summary entry we are going to compute below.
+ Skip 0% statistics, which can be extrapolated from the
+ rest of the summary data. */
+ cum = ws_cum_hotness_incr;
+ for (ws_ix = 0; ws_ix < NUM_GCOV_WORKING_SETS;
+ ws_ix++, cum += ws_cum_hotness_incr)
+ working_set_cum_values[ws_ix] = cum;
+ /* The last summary entry is reserved for (roughly) 99.9% of the
+ working set. Divide by 1024 so it becomes a shift, which gives
+ almost exactly 99.9%. */
+ working_set_cum_values[NUM_GCOV_WORKING_SETS-1]
+ = summary->sum_all - summary->sum_all/1024;
+
+ /* Next, walk through the histogram in decending order of hotness
+ and compute the statistics for the working set summary array.
+ As histogram entries are accumulated, we check to see which
+ working set entries have had their expected cum_value reached
+ and fill them in, walking the working set entries in increasing
+ size of cum_value. */
+ ws_ix = 0; /* The current entry into the working set array. */
+ cum = 0; /* The current accumulated counter sum. */
+ count = 0; /* The current accumulated count of block counters. */
+ for (h_ix = GCOV_HISTOGRAM_SIZE - 1;
+ h_ix >= 0 && ws_ix < NUM_GCOV_WORKING_SETS; h_ix--)
+ {
+ histo_bucket = &summary->histogram[h_ix];
+
+ /* If we haven't reached the required cumulative counter value for
+ the current working set percentage, simply accumulate this histogram
+ entry into the running sums and continue to the next histogram
+ entry. */
+ if (cum + histo_bucket->cum_value < working_set_cum_values[ws_ix])
+ {
+ cum += histo_bucket->cum_value;
+ count += histo_bucket->num_counters;
+ continue;
+ }
+
+ /* If adding the current histogram entry's cumulative counter value
+ causes us to exceed the current working set size, then estimate
+ how many of this histogram entry's counter values are required to
+ reach the working set size, and fill in working set entries
+ as we reach their expected cumulative value. */
+ for (c_num = 0, tmp_cum = cum;
+ c_num < histo_bucket->num_counters && ws_ix < NUM_GCOV_WORKING_SETS;
+ c_num++)
+ {
+ count++;
+ /* If we haven't reached the last histogram entry counter, add
+ in the minimum value again. This will underestimate the
+ cumulative sum so far, because many of the counter values in this
+ entry may have been larger than the minimum. We could add in the
+ average value every time, but that would require an expensive
+ divide operation. */
+ if (c_num + 1 < histo_bucket->num_counters)
+ tmp_cum += histo_bucket->min_value;
+ /* If we have reached the last histogram entry counter, then add
+ in the entire cumulative value. */
+ else
+ tmp_cum = cum + histo_bucket->cum_value;
+
+ /* Next walk through successive working set entries and fill in
+ the statistics for any whose size we have reached by accumulating
+ this histogram counter. */
+ while (ws_ix < NUM_GCOV_WORKING_SETS
+ && tmp_cum >= working_set_cum_values[ws_ix])
+ {
+ gcov_working_sets[ws_ix].num_counters = count;
+ gcov_working_sets[ws_ix].min_counter
+ = histo_bucket->min_value;
+ ws_ix++;
+ }
+ }
+ /* Finally, update the running cumulative value since we were
+ using a temporary above. */
+ cum += histo_bucket->cum_value;
+ }
+ gcc_assert (ws_ix == NUM_GCOV_WORKING_SETS);
+}
+#endif /* IN_GCOV <= 0 && !IN_LIBGCOV */