diff options
author | Simon Marlow <marlowsd@gmail.com> | 2013-09-04 10:37:10 +0100 |
---|---|---|
committer | Simon Marlow <marlowsd@gmail.com> | 2013-09-04 11:00:32 +0100 |
commit | aa779e092c4f4d6a6691f3a4fc4074e6359337f8 (patch) | |
tree | f4c4e22da3aa71eff569b01af603836d7b5fd6a5 /rts/Stats.c | |
parent | 5a3918febb7354e0900c4f04151599d833716032 (diff) | |
download | haskell-aa779e092c4f4d6a6691f3a4fc4074e6359337f8.tar.gz |
Don't move Capabilities in setNumCapabilities (#8209)
We have various problems with reallocating the array of Capabilities,
due to threads in waitForReturnCapability that are already holding a
pointer to a Capability.
Rather than add more locking to make this safer, I decided it would be
easier to ensure that we never move the Capabilities at all. The
capabilities array is now an array of pointers to Capabaility. There
are extra indirections, but it rarely matters - we don't often access
Capabilities via the array, normally we already have a pointer to
one. I ran the parallel benchmarks and didn't see any difference.
Diffstat (limited to 'rts/Stats.c')
-rw-r--r-- | rts/Stats.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/rts/Stats.c b/rts/Stats.c index 3dc1ebe0fb..c19f23c59d 100644 --- a/rts/Stats.c +++ b/rts/Stats.c @@ -346,10 +346,10 @@ calcTotalAllocated(void) W_ tot_alloc = 0; W_ n; for (n = 0; n < n_capabilities; n++) { - tot_alloc += capabilities[n].total_allocated; - traceEventHeapAllocated(&capabilities[n], + tot_alloc += capabilities[n]->total_allocated; + traceEventHeapAllocated(capabilities[n], CAPSET_HEAP_DEFAULT, - capabilities[n].total_allocated * sizeof(W_)); + capabilities[n]->total_allocated * sizeof(W_)); } return tot_alloc; @@ -730,12 +730,12 @@ stat_exit (void) nat i; SparkCounters sparks = { 0, 0, 0, 0, 0, 0}; for (i = 0; i < n_capabilities; i++) { - sparks.created += capabilities[i].spark_stats.created; - sparks.dud += capabilities[i].spark_stats.dud; - sparks.overflowed+= capabilities[i].spark_stats.overflowed; - sparks.converted += capabilities[i].spark_stats.converted; - sparks.gcd += capabilities[i].spark_stats.gcd; - sparks.fizzled += capabilities[i].spark_stats.fizzled; + sparks.created += capabilities[i]->spark_stats.created; + sparks.dud += capabilities[i]->spark_stats.dud; + sparks.overflowed+= capabilities[i]->spark_stats.overflowed; + sparks.converted += capabilities[i]->spark_stats.converted; + sparks.gcd += capabilities[i]->spark_stats.gcd; + sparks.fizzled += capabilities[i]->spark_stats.fizzled; } statsPrintf(" SPARKS: %" FMT_Word " (%" FMT_Word " converted, %" FMT_Word " overflowed, %" FMT_Word " dud, %" FMT_Word " GC'd, %" FMT_Word " fizzled)\n\n", @@ -900,10 +900,10 @@ statDescribeGens(void) mut = 0; for (i = 0; i < n_capabilities; i++) { - mut += countOccupied(capabilities[i].mut_lists[g]); + mut += countOccupied(capabilities[i]->mut_lists[g]); // Add the pinned object block. - bd = capabilities[i].pinned_object_block; + bd = capabilities[i]->pinned_object_block; if (bd != NULL) { gen_live += bd->free - bd->start; gen_blocks += bd->blocks; @@ -999,12 +999,12 @@ extern void getSparkStats( SparkCounters *s ) { s->gcd = 0; s->fizzled = 0; for (i = 0; i < n_capabilities; i++) { - s->created += capabilities[i].spark_stats.created; - s->dud += capabilities[i].spark_stats.dud; - s->overflowed+= capabilities[i].spark_stats.overflowed; - s->converted += capabilities[i].spark_stats.converted; - s->gcd += capabilities[i].spark_stats.gcd; - s->fizzled += capabilities[i].spark_stats.fizzled; + s->created += capabilities[i]->spark_stats.created; + s->dud += capabilities[i]->spark_stats.dud; + s->overflowed+= capabilities[i]->spark_stats.overflowed; + s->converted += capabilities[i]->spark_stats.converted; + s->gcd += capabilities[i]->spark_stats.gcd; + s->fizzled += capabilities[i]->spark_stats.fizzled; } } #endif |