summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2022-11-10 20:15:09 -0500
committerMarge Bot <ben+marge-bot@smart-cactus.org>2022-12-16 16:12:45 -0500
commite0affaa9fc3e6dc0e65808afa383426b7fe9420a (patch)
treeb8315fa018b69b61cb15b71f8da856d0dd61d704
parentf8e901dcc090ef81723fb9f3d8ea0a1baac4cbc3 (diff)
downloadhaskell-e0affaa9fc3e6dc0e65808afa383426b7fe9420a.tar.gz
rts: Encapsulate access to capabilities array
-rw-r--r--rts/Capability.c44
-rw-r--r--rts/Capability.h5
-rw-r--r--rts/IOManager.c4
-rw-r--r--rts/Printer.c4
-rw-r--r--rts/Profiling.c2
-rw-r--r--rts/Proftimer.c5
-rw-r--r--rts/RetainerProfile.c4
-rw-r--r--rts/RtsAPI.c4
-rw-r--r--rts/RtsStartup.c2
-rw-r--r--rts/Schedule.c74
-rw-r--r--rts/Stats.c16
-rw-r--r--rts/Threads.c2
-rw-r--r--rts/TraverseHeap.c2
-rw-r--r--rts/eventlog/EventLog.c4
-rw-r--r--rts/posix/Signals.c4
-rw-r--r--rts/sm/Compact.c2
-rw-r--r--rts/sm/GC.c46
-rw-r--r--rts/sm/MarkWeak.c2
-rw-r--r--rts/sm/NonMoving.c14
-rw-r--r--rts/sm/NonMovingMark.c6
-rw-r--r--rts/sm/NonMovingSweep.c2
-rw-r--r--rts/sm/Sanity.c20
-rw-r--r--rts/sm/Storage.c36
23 files changed, 156 insertions, 148 deletions
diff --git a/rts/Capability.c b/rts/Capability.c
index 35214a6470..26afddc4df 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -140,7 +140,7 @@ findSpark (Capability *cap)
/* visit cap.s 0..n-1 in sequence until a theft succeeds. We could
start at a random place instead of 0 as well. */
for ( i=0 ; i < getNumCapabilities() ; i++ ) {
- robbed = capabilities[i];
+ robbed = getCapability(i);
if (cap == robbed) // ourselves...
continue;
@@ -183,7 +183,7 @@ anySparks (void)
uint32_t i;
for (i=0; i < getNumCapabilities(); i++) {
- if (!emptySparkPoolCap(capabilities[i])) {
+ if (!emptySparkPoolCap(getCapability(i))) {
return true;
}
}
@@ -407,7 +407,7 @@ void initCapabilities (void)
// a worker Task to each Capability, which will quickly put the
// Capability on the free list when it finds nothing to do.
for (i = 0; i < n_numa_nodes; i++) {
- last_free_capability[i] = capabilities[0];
+ last_free_capability[i] = getCapability(0);
}
}
@@ -465,7 +465,7 @@ void contextSwitchAllCapabilities(void)
{
uint32_t i;
for (i=0; i < getNumCapabilities(); i++) {
- contextSwitchCapability(capabilities[i], true);
+ contextSwitchCapability(getCapability(i), true);
}
}
@@ -473,7 +473,7 @@ void interruptAllCapabilities(void)
{
uint32_t i;
for (i=0; i < getNumCapabilities(); i++) {
- interruptCapability(capabilities[i]);
+ interruptCapability(getCapability(i));
}
}
@@ -827,8 +827,7 @@ static Capability * find_capability_for_task(const Task * task)
{
if (task->preferred_capability != -1) {
// Does the task have a preferred capability? If so, use it
- return capabilities[task->preferred_capability %
- enabled_capabilities];
+ return getCapability(task->preferred_capability % enabled_capabilities);
} else {
// Try last_free_capability first
Capability *cap = RELAXED_LOAD(&last_free_capability[task->node]);
@@ -845,8 +844,8 @@ static Capability * find_capability_for_task(const Task * task)
i += n_numa_nodes) {
// visits all the capabilities on this node, because
// cap[i]->node == i % n_numa_nodes
- if (!RELAXED_LOAD(&capabilities[i]->running_task)) {
- return capabilities[i];
+ if (!RELAXED_LOAD(&getCapability(i)->running_task)) {
+ return getCapability(i);
}
}
@@ -1251,7 +1250,7 @@ shutdownCapabilities(Task *task, bool safe)
uint32_t i;
for (i=0; i < getNumCapabilities(); i++) {
ASSERT(task->incall->tso == NULL);
- shutdownCapability(capabilities[i], task, safe);
+ shutdownCapability(getCapability(i), task, safe);
}
#if defined(THREADED_RTS)
ASSERT(checkSparkCountInvariant());
@@ -1277,9 +1276,11 @@ freeCapabilities (void)
#if defined(THREADED_RTS)
uint32_t i;
for (i=0; i < getNumCapabilities(); i++) {
- freeCapability(capabilities[i]);
- if (capabilities[i] != &MainCapability)
- stgFree(capabilities[i]);
+ Capability *cap = getCapability(i);
+ freeCapability(cap);
+ if (cap != &MainCapability) {
+ stgFree(cap);
+ }
}
#else
freeCapability(&MainCapability);
@@ -1333,7 +1334,7 @@ markCapabilities (evac_fn evac, void *user)
{
uint32_t n;
for (n = 0; n < getNumCapabilities(); n++) {
- markCapability(evac, user, capabilities[n], false);
+ markCapability(evac, user, getCapability(n), false);
}
}
@@ -1345,13 +1346,14 @@ bool checkSparkCountInvariant (void)
uint32_t i;
for (i = 0; i < getNumCapabilities(); i++) {
- sparks.created += capabilities[i]->spark_stats.created;
- sparks.dud += capabilities[i]->spark_stats.dud;
- sparks.overflowed+= capabilities[i]->spark_stats.overflowed;
- sparks.converted += capabilities[i]->spark_stats.converted;
- sparks.gcd += capabilities[i]->spark_stats.gcd;
- sparks.fizzled += capabilities[i]->spark_stats.fizzled;
- remaining += sparkPoolSize(capabilities[i]->sparks);
+ Capability *cap = getCapability(i);
+ sparks.created += cap->spark_stats.created;
+ sparks.dud += cap->spark_stats.dud;
+ sparks.overflowed+= cap->spark_stats.overflowed;
+ sparks.converted += cap->spark_stats.converted;
+ sparks.gcd += cap->spark_stats.gcd;
+ sparks.fizzled += cap->spark_stats.fizzled;
+ remaining += sparkPoolSize(cap->sparks);
}
/* The invariant is
diff --git a/rts/Capability.h b/rts/Capability.h
index 9cbfbd4bdd..2681962be6 100644
--- a/rts/Capability.h
+++ b/rts/Capability.h
@@ -263,6 +263,11 @@ INLINE_HEADER void releaseCapability_ (Capability* cap STG_UNUSED,
// Array of all the capabilities
extern Capability **capabilities;
+INLINE_HEADER Capability *getCapability(uint32_t i)
+{
+ return RELAXED_LOAD(&capabilities)[i];
+}
+
//
// Types of global synchronisation
//
diff --git a/rts/IOManager.c b/rts/IOManager.c
index 21407a7814..74d48ed541 100644
--- a/rts/IOManager.c
+++ b/rts/IOManager.c
@@ -177,8 +177,8 @@ void markCapabilityIOManager(evac_fn evac USED_IF_NOT_THREADS,
void
setIOManagerControlFd(uint32_t cap_no USED_IF_THREADS, int fd USED_IF_THREADS) {
#if defined(THREADED_RTS)
- if (cap_no < n_capabilities) {
- RELAXED_STORE(&capabilities[cap_no]->iomgr->control_fd, fd);
+ if (cap_no < getNumCapabilities()) {
+ RELAXED_STORE(&getCapability(cap_no)->iomgr->control_fd, fd);
} else {
errorBelch("warning: setIOManagerControlFd called with illegal capability number.");
}
diff --git a/rts/Printer.c b/rts/Printer.c
index 3dbb371a0b..ccd82be877 100644
--- a/rts/Printer.c
+++ b/rts/Printer.c
@@ -716,7 +716,7 @@ void printWeakLists()
for (uint32_t cap_idx = 0; cap_idx < getNumCapabilities(); ++cap_idx) {
debugBelch("Capability %d:\n", cap_idx);
- Capability *cap = capabilities[cap_idx];
+ Capability *cap = getCapability(cap_idx);
for (StgWeak *weak = cap->weak_ptr_list_hd; weak; weak = weak->link) {
printClosure((StgClosure*)weak);
}
@@ -742,7 +742,7 @@ void printLargeAndPinnedObjects()
debugBelch("====== PINNED OBJECTS ======\n");
for (uint32_t cap_idx = 0; cap_idx < getNumCapabilities(); ++cap_idx) {
- Capability *cap = capabilities[cap_idx];
+ Capability *cap = getCapability(cap_idx);
debugBelch("Capability %d: Current pinned object block: %p\n",
cap_idx, (void*)cap->pinned_object_block);
diff --git a/rts/Profiling.c b/rts/Profiling.c
index dd0f4bc915..9dde1f2860 100644
--- a/rts/Profiling.c
+++ b/rts/Profiling.c
@@ -154,7 +154,7 @@ void initProfiling (void)
{
uint32_t n;
for (n=0; n < getNumCapabilities(); n++) {
- capabilities[n]->r.rCCCS = CCS_SYSTEM;
+ getCapability(n)->r.rCCCS = CCS_SYSTEM;
}
}
diff --git a/rts/Proftimer.c b/rts/Proftimer.c
index 59758f8540..22ef1c4171 100644
--- a/rts/Proftimer.c
+++ b/rts/Proftimer.c
@@ -123,8 +123,9 @@ handleProfTick(void)
if (RELAXED_LOAD_ALWAYS(&do_prof_ticks)) {
uint32_t n;
for (n=0; n < getNumCapabilities(); n++) {
- capabilities[n]->r.rCCCS->time_ticks++;
- traceProfSampleCostCentre(capabilities[n], capabilities[n]->r.rCCCS, total_ticks);
+ Capability *cap = getCapability(n);
+ cap->r.rCCCS->time_ticks++;
+ traceProfSampleCostCentre(cap, cap->r.rCCCS, total_ticks);
}
}
#endif
diff --git a/rts/RetainerProfile.c b/rts/RetainerProfile.c
index a7e0cb0501..e47ec12942 100644
--- a/rts/RetainerProfile.c
+++ b/rts/RetainerProfile.c
@@ -394,8 +394,8 @@ computeRetainerSet( traverseState *ts )
for (n = 0; n < getNumCapabilities(); n++) {
// NB: after a GC, all nursery weak_ptr_lists have been migrated
// to the global lists living in the generations
- ASSERT(capabilities[n]->weak_ptr_list_hd == NULL);
- ASSERT(capabilities[n]->weak_ptr_list_tl == NULL);
+ ASSERT(getCapability(n)->weak_ptr_list_hd == NULL);
+ ASSERT(getCapability(n)->weak_ptr_list_tl == NULL);
}
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (weak = generations[g].weak_ptr_list; weak != NULL; weak = weak->link) {
diff --git a/rts/RtsAPI.c b/rts/RtsAPI.c
index 74e6924ae3..46cb8a91f8 100644
--- a/rts/RtsAPI.c
+++ b/rts/RtsAPI.c
@@ -808,7 +808,7 @@ static void assert_isPausedOnMyTask(const char *functionName)
// Check that we own all capabilities.
for (unsigned int i = 0; i < getNumCapabilities(); i++)
{
- Capability *cap = capabilities[i];
+ Capability *cap = getCapability(i);
if (cap->running_task != task)
{
errorBelch (
@@ -943,7 +943,7 @@ void hs_try_putmvar (/* in */ int capability,
capability = 0;
}
}
- cap = capabilities[capability % enabled_capabilities];
+ cap = getCapability(capability % enabled_capabilities);
#if !defined(THREADED_RTS)
diff --git a/rts/RtsStartup.c b/rts/RtsStartup.c
index f4ad851e12..83f43bf619 100644
--- a/rts/RtsStartup.c
+++ b/rts/RtsStartup.c
@@ -483,7 +483,7 @@ hs_exit_(bool wait_foreign)
/* run C finalizers for all active weak pointers */
for (i = 0; i < getNumCapabilities(); i++) {
- runAllCFinalizers(capabilities[i]->weak_ptr_list_hd);
+ runAllCFinalizers(getCapability(i)->weak_ptr_list_hd);
}
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
runAllCFinalizers(generations[g].weak_ptr_list);
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 80d573d975..9dfb08bdf5 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -390,7 +390,7 @@ schedule (Capability *initialCapability, Task *task)
// it was originally on.
#if defined(THREADED_RTS)
if (cap->disabled && !t->bound) {
- Capability *dest_cap = capabilities[cap->no % enabled_capabilities];
+ Capability *dest_cap = getCapability(cap->no % enabled_capabilities);
migrateThread(cap, t, dest_cap);
continue;
}
@@ -724,7 +724,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
{
#if defined(THREADED_RTS)
- Capability *free_caps[getNumCapabilities()], *cap0;
+ Capability *free_caps[getNumCapabilities()];
uint32_t i, n_wanted_caps, n_free_caps;
uint32_t spare_threads = cap->n_run_queue > 0 ? cap->n_run_queue - 1 : 0;
@@ -744,7 +744,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
for (i = (cap->no + 1) % getNumCapabilities(), n_free_caps=0;
n_free_caps < n_wanted_caps && i != cap->no;
i = (i + 1) % getNumCapabilities()) {
- cap0 = capabilities[i];
+ Capability *cap0 = getCapability(i);
if (cap != cap0 && !cap0->disabled && tryGrabCapability(cap0,task)) {
if (!emptyRunQueue(cap0)
|| RELAXED_LOAD(&cap0->n_returning_tasks) != 0
@@ -1535,7 +1535,7 @@ static void acquireAllCapabilities(Capability *cap, Task *task)
for (i=0; i < getNumCapabilities(); i++) {
debugTrace(DEBUG_sched, "grabbing all the capabilities (%d/%d)",
i, getNumCapabilities());
- tmpcap = capabilities[i];
+ tmpcap = getCapability(i);
if (tmpcap != cap) {
// we better hope this task doesn't get migrated to
// another Capability while we're waiting for this one.
@@ -1566,7 +1566,7 @@ void releaseAllCapabilities(uint32_t n, Capability *keep_cap, Task *task)
uint32_t i;
ASSERT( task != NULL);
for (i = 0; i < n; i++) {
- Capability *tmpcap = capabilities[i];
+ Capability *tmpcap = getCapability(i);
if (keep_cap != tmpcap) {
task->cap = tmpcap;
releaseCapability(tmpcap);
@@ -1687,10 +1687,10 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
// those first:
uint32_t n_idle = need_idle;
for (i=0; i < getNumCapabilities(); i++) {
- if (capabilities[i]->disabled) {
+ if (getCapability(i)->disabled) {
idle_cap[i] = true;
} else if (n_idle > 0 &&
- capabilities[i]->running_task == NULL) {
+ getCapability(i)->running_task == NULL) {
debugTrace(DEBUG_sched, "asking for cap %d to be idle", i);
n_idle--;
idle_cap[i] = true;
@@ -1766,14 +1766,14 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
collect_gen >= RtsFlags.ParFlags.parGcLoadBalancingGen))
{
for (i=0; i < n_capabilities; i++) {
- if (capabilities[i]->disabled) {
- idle_cap[i] = tryGrabCapability(capabilities[i], task);
+ if (getCapability(i)->disabled) {
+ idle_cap[i] = tryGrabCapability(getCapability(i), task);
if (idle_cap[i]) {
n_idle_caps++;
}
} else {
if (i != cap->no && idle_cap[i]) {
- Capability *tmpcap = capabilities[i];
+ Capability *tmpcap = getCapability(i);
task->cap = tmpcap;
waitForCapability(&tmpcap, task);
n_idle_caps++;
@@ -1784,15 +1784,15 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
else
{
for (i=0; i < n_capabilities; i++) {
- if (capabilities[i]->disabled) {
- idle_cap[i] = tryGrabCapability(capabilities[i], task);
+ if (getCapability(i)->disabled) {
+ idle_cap[i] = tryGrabCapability(getCapability(i), task);
if (idle_cap[i]) {
n_idle_caps++;
}
} else if (i != cap->no &&
- capabilities[i]->idle >=
+ getCapability(i)->idle >=
RtsFlags.ParFlags.parGcNoSyncWithIdle) {
- idle_cap[i] = tryGrabCapability(capabilities[i], task);
+ idle_cap[i] = tryGrabCapability(getCapability(i), task);
if (idle_cap[i]) {
n_idle_caps++;
} else {
@@ -1804,7 +1804,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
debugTrace(DEBUG_sched, "%d idle caps", n_idle_caps);
for (i=0; i < n_capabilities; i++) {
- NONATOMIC_ADD(&capabilities[i]->idle, 1);
+ NONATOMIC_ADD(&getCapability(i)->idle, 1);
}
// For all capabilities participating in this GC, wait until
@@ -1834,10 +1834,10 @@ delete_threads_and_gc:
// threads. It just avoids the GC having to do any work to
// figure out that any remaining sparks are garbage.
for (i = 0; i < n_capabilities; i++) {
- capabilities[i]->spark_stats.gcd +=
- sparkPoolSize(capabilities[i]->sparks);
+ getCapability(i)->spark_stats.gcd +=
+ sparkPoolSize(getCapability(i)->sparks);
// No race here since all Caps are stopped.
- discardSparksCap(capabilities[i]);
+ discardSparksCap(getCapability(i));
}
#endif
RELAXED_STORE(&sched_state, SCHED_SHUTTING_DOWN);
@@ -1853,10 +1853,10 @@ delete_threads_and_gc:
#if defined(THREADED_RTS)
for (i = enabled_capabilities; i < n_capabilities; i++) {
Capability *tmp_cap, *dest_cap;
- tmp_cap = capabilities[i];
+ tmp_cap = getCapability(i);
ASSERT(tmp_cap->disabled);
if (i != cap->no) {
- dest_cap = capabilities[i % enabled_capabilities];
+ dest_cap = getCapability(i % enabled_capabilities);
while (!emptyRunQueue(tmp_cap)) {
tso = popRunQueue(tmp_cap);
migrateThread(tmp_cap, tso, dest_cap);
@@ -1941,11 +1941,11 @@ delete_threads_and_gc:
for (i = 0; i < n_capabilities; i++) {
if (i != cap->no) {
if (idle_cap[i]) {
- ASSERT(capabilities[i]->running_task == task);
- task->cap = capabilities[i];
- releaseCapability(capabilities[i]);
+ ASSERT(getCapability(i)->running_task == task);
+ task->cap = getCapability(i);
+ releaseCapability(getCapability(i));
} else {
- ASSERT(capabilities[i]->running_task != task);
+ ASSERT(getCapability(i)->running_task != task);
}
}
}
@@ -2055,7 +2055,7 @@ forkProcess(HsStablePtr *entry
ACQUIRE_LOCK(&stable_name_mutex);
for (i=0; i < n_capabilities; i++) {
- ACQUIRE_LOCK(&capabilities[i]->lock);
+ ACQUIRE_LOCK(&getCapability(i)->lock);
}
// Take task lock after capability lock to avoid order inversion (#17275).
@@ -2089,8 +2089,8 @@ forkProcess(HsStablePtr *entry
#endif
for (i=0; i < n_capabilities; i++) {
- releaseCapability_(capabilities[i],false);
- RELEASE_LOCK(&capabilities[i]->lock);
+ releaseCapability_(getCapability(i),false);
+ RELEASE_LOCK(&getCapability(i)->lock);
}
exitMyTask();
@@ -2112,7 +2112,7 @@ forkProcess(HsStablePtr *entry
initMutex(&task->lock);
for (i=0; i < n_capabilities; i++) {
- initMutex(&capabilities[i]->lock);
+ initMutex(&getCapability(i)->lock);
}
initMutex(&all_tasks_mutex);
@@ -2148,7 +2148,7 @@ forkProcess(HsStablePtr *entry
discardTasksExcept(task);
for (i=0; i < n_capabilities; i++) {
- cap = capabilities[i];
+ cap = getCapability(i);
// Empty the run queue. It seems tempting to let all the
// killed threads stay on the run queue as zombies to be
@@ -2180,7 +2180,7 @@ forkProcess(HsStablePtr *entry
releaseCapability(cap);
}
}
- cap = capabilities[0];
+ cap = getCapability(0);
task->cap = cap;
// Empty the threads lists. Otherwise, the garbage
@@ -2300,8 +2300,8 @@ setNumCapabilities (uint32_t new_n_capabilities USED_IF_THREADS)
// structures, the nursery, etc.
//
for (n = new_n_capabilities; n < enabled_capabilities; n++) {
- capabilities[n]->disabled = true;
- traceCapDisable(capabilities[n]);
+ getCapability(n)->disabled = true;
+ traceCapDisable(getCapability(n));
}
enabled_capabilities = new_n_capabilities;
}
@@ -2312,8 +2312,8 @@ setNumCapabilities (uint32_t new_n_capabilities USED_IF_THREADS)
// enable any disabled capabilities, up to the required number
for (n = enabled_capabilities;
n < new_n_capabilities && n < n_capabilities; n++) {
- capabilities[n]->disabled = false;
- traceCapEnable(capabilities[n]);
+ getCapability(n)->disabled = false;
+ traceCapEnable(getCapability(n));
}
enabled_capabilities = n;
@@ -2612,9 +2612,9 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
if (cpu == cap->no) {
appendToRunQueue(cap,tso);
} else {
- migrateThread(cap, tso, capabilities[cpu]);
+ migrateThread(cap, tso, getCapability(cpu));
}
- contextSwitchCapability(capabilities[cpu], false);
+ contextSwitchCapability(getCapability(cpu), false);
#else
appendToRunQueue(cap,tso);
contextSwitchCapability(cap, false);
@@ -2700,7 +2700,7 @@ startWorkerTasks (uint32_t from USED_IF_THREADS, uint32_t to USED_IF_THREADS)
Capability *cap;
for (i = from; i < to; i++) {
- cap = capabilities[i];
+ cap = getCapability(i);
ACQUIRE_LOCK(&cap->lock);
startWorkerTask(cap);
RELEASE_LOCK(&cap->lock);
diff --git a/rts/Stats.c b/rts/Stats.c
index cf6af35f30..a2701bd0c8 100644
--- a/rts/Stats.c
+++ b/rts/Stats.c
@@ -1351,14 +1351,14 @@ stat_exitReport (void)
sum.bound_task_count = taskCount - workerCount;
for (uint32_t i = 0; i < getNumCapabilities(); i++) {
- sum.sparks.created += capabilities[i]->spark_stats.created;
- sum.sparks.dud += capabilities[i]->spark_stats.dud;
+ sum.sparks.created += getCapability(i)->spark_stats.created;
+ sum.sparks.dud += getCapability(i)->spark_stats.dud;
sum.sparks.overflowed+=
- capabilities[i]->spark_stats.overflowed;
+ getCapability(i)->spark_stats.overflowed;
sum.sparks.converted +=
- capabilities[i]->spark_stats.converted;
- sum.sparks.gcd += capabilities[i]->spark_stats.gcd;
- sum.sparks.fizzled += capabilities[i]->spark_stats.fizzled;
+ getCapability(i)->spark_stats.converted;
+ sum.sparks.gcd += getCapability(i)->spark_stats.gcd;
+ sum.sparks.fizzled += getCapability(i)->spark_stats.fizzled;
}
sum.sparks_count = sum.sparks.created
@@ -1650,10 +1650,10 @@ statDescribeGens(void)
mut = 0;
for (i = 0; i < getNumCapabilities(); i++) {
- mut += countOccupied(capabilities[i]->mut_lists[g]);
+ mut += countOccupied(getCapability(i)->mut_lists[g]);
// Add the pinned object block.
- bd = capabilities[i]->pinned_object_block;
+ bd = getCapability(i)->pinned_object_block;
if (bd != NULL) {
gen_live += bd->free - bd->start;
gen_blocks += bd->blocks;
diff --git a/rts/Threads.c b/rts/Threads.c
index 8d59956b50..2dce11a901 100644
--- a/rts/Threads.c
+++ b/rts/Threads.c
@@ -988,7 +988,7 @@ printAllThreads(void)
debugBelch("all threads:\n");
for (i = 0; i < getNumCapabilities(); i++) {
- cap = capabilities[i];
+ cap = getCapability(i);
debugBelch("threads on capability %d:\n", cap->no);
for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
printThreadStatus(t);
diff --git a/rts/TraverseHeap.c b/rts/TraverseHeap.c
index 60b6b6fbcd..027f99ebe0 100644
--- a/rts/TraverseHeap.c
+++ b/rts/TraverseHeap.c
@@ -1098,7 +1098,7 @@ resetMutableObjects(traverseState* ts)
// because we can find MUT_VAR objects which have not been
// visited during heap traversal.
for (n = 0; n < getNumCapabilities(); n++) {
- for (bd = capabilities[n]->mut_lists[g]; bd != NULL; bd = bd->link) {
+ for (bd = getCapability(n)->mut_lists[g]; bd != NULL; bd = bd->link) {
for (ml = bd->start; ml < bd->free; ml++) {
traverseMaybeInitClosureData(ts, (StgClosure *)*ml);
}
diff --git a/rts/eventlog/EventLog.c b/rts/eventlog/EventLog.c
index 7b4150c72d..1522645112 100644
--- a/rts/eventlog/EventLog.c
+++ b/rts/eventlog/EventLog.c
@@ -1572,7 +1572,7 @@ void flushAllCapsEventsBufs()
RELEASE_LOCK(&eventBufMutex);
for (unsigned int i=0; i < getNumCapabilities(); i++) {
- flushLocalEventsBuf(capabilities[i]);
+ flushLocalEventsBuf(getCapability(i));
}
flushEventLogWriter();
}
@@ -1593,7 +1593,7 @@ void flushEventLog(Capability **cap USED_IF_THREADS)
flushAllCapsEventsBufs();
releaseAllCapabilities(getNumCapabilities(), cap ? *cap : NULL, task);
#else
- flushLocalEventsBuf(capabilities[0]);
+ flushLocalEventsBuf(getCapability(0));
#endif
flushEventLogWriter();
}
diff --git a/rts/posix/Signals.c b/rts/posix/Signals.c
index 8c29564ace..0d252d56b2 100644
--- a/rts/posix/Signals.c
+++ b/rts/posix/Signals.c
@@ -203,11 +203,11 @@ ioManagerDie (void)
{
// Shut down IO managers
for (i=0; i < getNumCapabilities(); i++) {
- const int fd = RELAXED_LOAD(&capabilities[i]->iomgr->control_fd);
+ const int fd = RELAXED_LOAD(&getCapability(i)->iomgr->control_fd);
if (0 <= fd) {
r = write(fd, &byte, 1);
if (r == -1) { sysErrorBelch("ioManagerDie: write"); }
- RELAXED_STORE(&capabilities[i]->iomgr->control_fd, -1);
+ RELAXED_STORE(&getCapability(i)->iomgr->control_fd, -1);
}
}
}
diff --git a/rts/sm/Compact.c b/rts/sm/Compact.c
index aa901ee172..f6e65ecc9a 100644
--- a/rts/sm/Compact.c
+++ b/rts/sm/Compact.c
@@ -994,7 +994,7 @@ compact(StgClosure *static_objects,
// mutable lists
for (W_ g = 1; g < RtsFlags.GcFlags.generations; g++) {
for (W_ n = 0; n < getNumCapabilities(); n++) {
- for (bdescr *bd = capabilities[n]->mut_lists[g];
+ for (bdescr *bd = getCapability(n)->mut_lists[g];
bd != NULL; bd = bd->link) {
for (P_ p = bd->start; p < bd->free; p++) {
thread((StgClosure **)p);
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index 8776f80b51..2c1b680864 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -341,8 +341,8 @@ GarbageCollect (uint32_t collect_gen,
// attribute any costs to CCS_GC
#if defined(PROFILING)
for (n = 0; n < getNumCapabilities(); n++) {
- save_CCS[n] = capabilities[n]->r.rCCCS;
- capabilities[n]->r.rCCCS = CCS_GC;
+ save_CCS[n] = getCapability(n)->r.rCCCS;
+ getCapability(n)->r.rCCCS = CCS_GC;
}
#endif
@@ -506,18 +506,18 @@ GarbageCollect (uint32_t collect_gen,
if (!is_par_gc()) {
for (n = 0; n < getNumCapabilities(); n++) {
#if defined(THREADED_RTS)
- scavenge_capability_mut_Lists1(capabilities[n]);
+ scavenge_capability_mut_Lists1(getCapability(n));
#else
- scavenge_capability_mut_lists(capabilities[n]);
+ scavenge_capability_mut_lists(getCapability(n));
#endif
}
} else {
scavenge_capability_mut_lists(gct->cap);
for (n = 0; n < getNumCapabilities(); n++) {
if (idle_cap[n]) {
- markCapability(mark_root, gct, capabilities[n],
+ markCapability(mark_root, gct, getCapability(n),
true/*don't mark sparks*/);
- scavenge_capability_mut_lists(capabilities[n]);
+ scavenge_capability_mut_lists(getCapability(n));
}
}
}
@@ -530,7 +530,7 @@ GarbageCollect (uint32_t collect_gen,
gct->evac_gen_no = 0;
if (!is_par_gc()) {
for (n = 0; n < getNumCapabilities(); n++) {
- markCapability(mark_root, gct, capabilities[n],
+ markCapability(mark_root, gct, getCapability(n),
true/*don't mark sparks*/);
}
} else {
@@ -573,12 +573,12 @@ GarbageCollect (uint32_t collect_gen,
#if defined(THREADED_RTS)
if (!is_par_gc()) {
for (n = 0; n < getNumCapabilities(); n++) {
- pruneSparkQueue(false, capabilities[n]);
+ pruneSparkQueue(false, getCapability(n));
}
} else {
for (n = 0; n < getNumCapabilities(); n++) {
if (n == cap->no || idle_cap[n]) {
- pruneSparkQueue(false, capabilities[n]);
+ pruneSparkQueue(false, getCapability(n));
}
}
}
@@ -683,7 +683,7 @@ GarbageCollect (uint32_t collect_gen,
if (g > 0) {
W_ mut_list_size = 0;
for (n = 0; n < getNumCapabilities(); n++) {
- mut_list_size += countOccupied(capabilities[n]->mut_lists[g]);
+ mut_list_size += countOccupied(getCapability(n)->mut_lists[g]);
}
copied += mut_list_size;
@@ -847,7 +847,7 @@ GarbageCollect (uint32_t collect_gen,
if (RtsFlags.GcFlags.useNonmoving) {
RELEASE_SM_LOCK;
for (n = 0; n < getNumCapabilities(); n++) {
- nonmovingAddUpdRemSetBlocks(&capabilities[n]->upd_rem_set.queue);
+ nonmovingAddUpdRemSetBlocks(&getCapability(n)->upd_rem_set.queue);
}
ACQUIRE_SM_LOCK;
}
@@ -1079,7 +1079,7 @@ GarbageCollect (uint32_t collect_gen,
// restore enclosing cost centre
#if defined(PROFILING)
for (n = 0; n < getNumCapabilities(); n++) {
- capabilities[n]->r.rCCCS = save_CCS[n];
+ getCapability(n)->r.rCCCS = save_CCS[n];
}
#endif
@@ -1128,7 +1128,7 @@ new_gc_thread (uint32_t n, gc_thread *t)
uint32_t g;
gen_workspace *ws;
- t->cap = capabilities[n];
+ t->cap = getCapability(n);
#if defined(THREADED_RTS)
t->id = 0;
@@ -1479,9 +1479,9 @@ waitForGcThreads (Capability *cap, bool idle_cap[])
for(i = 0; i < getNumCapabilities(); ++i) {
if (i == me || idle_cap[i]) { continue; }
if (SEQ_CST_LOAD(&gc_threads[i]->wakeup) != GC_THREAD_STANDING_BY) {
- prodCapability(capabilities[i], cap->running_task);
+ prodCapability(getCapability(i), cap->running_task);
write_barrier();
- interruptCapability(capabilities[i]);
+ interruptCapability(getCapability(i));
}
}
// this 1ms timeout is not well justified. It's the shortest timeout we
@@ -1632,18 +1632,18 @@ prepare_collected_gen (generation *gen)
if (RtsFlags.GcFlags.useNonmoving && g == oldest_gen->no) {
// Nonmoving heap's mutable list is always a root.
for (i = 0; i < getNumCapabilities(); i++) {
- stash_mut_list(capabilities[i], g);
+ stash_mut_list(getCapability(i), g);
}
} else if (g != 0) {
// Otherwise throw away the current mutable list. Invariant: the
// mutable list always has at least one block; this means we can avoid
// a check for NULL in recordMutable().
for (i = 0; i < getNumCapabilities(); i++) {
- bdescr *old = RELAXED_LOAD(&capabilities[i]->mut_lists[g]);
+ bdescr *old = RELAXED_LOAD(&getCapability(i)->mut_lists[g]);
freeChain(old);
bdescr *new = allocBlockOnNode(capNoToNumaNode(i));
- RELAXED_STORE(&capabilities[i]->mut_lists[g], new);
+ RELAXED_STORE(&getCapability(i)->mut_lists[g], new);
}
}
@@ -1775,7 +1775,7 @@ prepare_uncollected_gen (generation *gen)
// allocate a fresh block for each one. We'll traverse these
// mutable lists as roots early on in the GC.
for (i = 0; i < getNumCapabilities(); i++) {
- stash_mut_list(capabilities[i], gen->no);
+ stash_mut_list(getCapability(i), gen->no);
}
ASSERT(gen->scavenged_large_objects == NULL);
@@ -1852,7 +1852,7 @@ collect_pinned_object_blocks (void)
bdescr *last = NULL;
if (use_nonmoving && gen == oldest_gen) {
// Mark objects as belonging to the nonmoving heap
- for (bdescr *bd = RELAXED_LOAD(&capabilities[n]->pinned_object_blocks); bd != NULL; bd = bd->link) {
+ for (bdescr *bd = RELAXED_LOAD(&getCapability(n)->pinned_object_blocks); bd != NULL; bd = bd->link) {
bd->flags |= BF_NONMOVING;
bd->gen = oldest_gen;
bd->gen_no = oldest_gen->no;
@@ -1861,7 +1861,7 @@ collect_pinned_object_blocks (void)
last = bd;
}
} else {
- for (bdescr *bd = capabilities[n]->pinned_object_blocks; bd != NULL; bd = bd->link) {
+ for (bdescr *bd = getCapability(n)->pinned_object_blocks; bd != NULL; bd = bd->link) {
last = bd;
}
}
@@ -1871,8 +1871,8 @@ collect_pinned_object_blocks (void)
if (gen->large_objects != NULL) {
gen->large_objects->u.back = last;
}
- gen->large_objects = RELAXED_LOAD(&capabilities[n]->pinned_object_blocks);
- RELAXED_STORE(&capabilities[n]->pinned_object_blocks, NULL);
+ gen->large_objects = RELAXED_LOAD(&getCapability(n)->pinned_object_blocks);
+ RELAXED_STORE(&getCapability(n)->pinned_object_blocks, NULL);
}
}
}
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index 9883def2a8..99383ebd42 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -386,7 +386,7 @@ void collectFreshWeakPtrs()
uint32_t i;
// move recently allocated weak_ptr_list to the old list as well
for (i = 0; i < getNumCapabilities(); i++) {
- Capability *cap = capabilities[i];
+ Capability *cap = getCapability(i);
if (cap->weak_ptr_list_tl != NULL) {
IF_DEBUG(sanity, checkWeakPtrSanity(cap->weak_ptr_list_hd, cap->weak_ptr_list_tl));
cap->weak_ptr_list_tl->link = g0->weak_ptr_list;
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index f1e2e73fff..0f4af4ed69 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -794,7 +794,7 @@ void nonmovingAddCapabilities(uint32_t new_n_caps)
// Initialize current segments for the new capabilities
for (unsigned int j = old_n_caps; j < new_n_caps; j++) {
- allocs[i]->current[j] = nonmovingAllocSegment(capabilities[j]->node);
+ allocs[i]->current[j] = nonmovingAllocSegment(getCapability(j)->node);
nonmovingInitSegment(allocs[i]->current[j], NONMOVING_ALLOCA0 + i);
SET_SEGMENT_STATE(allocs[i]->current[j], CURRENT);
allocs[i]->current[j]->link = NULL;
@@ -946,7 +946,7 @@ void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads)
markCAFs((evac_fn)markQueueAddRoot, mark_queue);
for (unsigned int n = 0; n < getNumCapabilities(); ++n) {
markCapability((evac_fn)markQueueAddRoot, mark_queue,
- capabilities[n], true/*don't mark sparks*/);
+ getCapability(n), true/*don't mark sparks*/);
}
nonmovingMarkWeakPtrList(mark_queue, *dead_weaks);
markStablePtrTable((evac_fn)markQueueAddRoot, mark_queue);
@@ -1147,7 +1147,7 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO *
#if defined(THREADED_RTS)
// Just pick a random capability. Not sure if this is a good idea -- we use
// only one capability for all finalizers.
- scheduleFinalizers(capabilities[0], *dead_weaks);
+ scheduleFinalizers(getCapability(0), *dead_weaks);
// Note that this mutates heap and causes running write barriers.
// See Note [Unintentional marking in resurrectThreads] in NonMovingMark.c
// for how we deal with this.
@@ -1189,7 +1189,7 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO *
// See Note [Spark management under the nonmoving collector].
#if defined(THREADED_RTS)
for (uint32_t n = 0; n < getNumCapabilities(); n++) {
- pruneSparkQueue(true, capabilities[n]);
+ pruneSparkQueue(true, getCapability(n));
}
#endif
@@ -1265,7 +1265,7 @@ void assert_in_nonmoving_heap(StgPtr p)
if (bd->flags & BF_LARGE) {
// It should be in a capability (if it's not filled yet) or in non-moving heap
for (uint32_t cap = 0; cap < getNumCapabilities(); ++cap) {
- if (bd == capabilities[cap]->pinned_object_block) {
+ if (bd == getCapability(cap)->pinned_object_block) {
return;
}
}
@@ -1498,10 +1498,10 @@ void nonmovingPrintSweepList()
void check_in_mut_list(StgClosure *p)
{
for (uint32_t cap_n = 0; cap_n < getNumCapabilities(); ++cap_n) {
- for (bdescr *bd = capabilities[cap_n]->mut_lists[oldest_gen->no]; bd; bd = bd->link) {
+ for (bdescr *bd = getCapability(cap_n)->mut_lists[oldest_gen->no]; bd; bd = bd->link) {
for (StgPtr q = bd->start; q < bd->free; ++q) {
if (*((StgPtr**)q) == (StgPtr*)p) {
- debugBelch("Object is in mut list of cap %d: %p\n", cap_n, capabilities[cap_n]->mut_lists[oldest_gen->no]);
+ debugBelch("Object is in mut list of cap %d: %p\n", cap_n, getCapability(cap_n)->mut_lists[oldest_gen->no]);
return;
}
}
diff --git a/rts/sm/NonMovingMark.c b/rts/sm/NonMovingMark.c
index 35780fcd49..d9758b943f 100644
--- a/rts/sm/NonMovingMark.c
+++ b/rts/sm/NonMovingMark.c
@@ -324,7 +324,7 @@ void nonmovingBeginFlush(Task *task)
// logic won't have been hit. Make sure that everyone so far has flushed.
// Ideally we want to mark asynchronously with syncing.
for (uint32_t i = 0; i < getNumCapabilities(); i++) {
- nonmovingFlushCapUpdRemSetBlocks(capabilities[i]);
+ nonmovingFlushCapUpdRemSetBlocks(getCapability(i));
}
}
@@ -399,7 +399,7 @@ void nonmovingFinishFlush(Task *task)
{
// See Note [Unintentional marking in resurrectThreads]
for (uint32_t i = 0; i < getNumCapabilities(); i++) {
- reset_upd_rem_set(&capabilities[i]->upd_rem_set);
+ reset_upd_rem_set(&getCapability(i)->upd_rem_set);
}
// Also reset upd_rem_set_block_list in case some of the UpdRemSets were
// filled and we flushed them.
@@ -1362,7 +1362,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
#if defined(DEBUG)
bool found_it = false;
for (uint32_t i = 0; i < getNumCapabilities(); ++i) {
- if (capabilities[i]->pinned_object_block == bd) {
+ if (getCapability(i)->pinned_object_block == bd) {
found_it = true;
break;
}
diff --git a/rts/sm/NonMovingSweep.c b/rts/sm/NonMovingSweep.c
index 977af49b04..ad2b422307 100644
--- a/rts/sm/NonMovingSweep.c
+++ b/rts/sm/NonMovingSweep.c
@@ -280,7 +280,7 @@ dirty_BLOCKING_QUEUE:
void nonmovingSweepMutLists()
{
for (uint32_t n = 0; n < getNumCapabilities(); n++) {
- Capability *cap = capabilities[n];
+ Capability *cap = getCapability(n);
bdescr *old_mut_list = cap->mut_lists[oldest_gen->no];
cap->mut_lists[oldest_gen->no] = allocBlockOnNode_lock(cap->node);
for (bdescr *bd = old_mut_list; bd; bd = bd->link) {
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index 461edd2171..a77eb08d7a 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -835,7 +835,7 @@ checkLocalMutableLists (uint32_t cap_no)
{
uint32_t g;
for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
- checkMutableList(capabilities[cap_no]->mut_lists[g], g);
+ checkMutableList(getCapability(cap_no)->mut_lists[g], g);
}
}
@@ -1036,7 +1036,7 @@ findMemoryLeak (void)
uint32_t g, i, j;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (i = 0; i < getNumCapabilities(); i++) {
- markBlocks(capabilities[i]->mut_lists[g]);
+ markBlocks(getCapability(i)->mut_lists[g]);
markBlocks(gc_threads[i]->gens[g].part_list);
markBlocks(gc_threads[i]->gens[g].scavd_list);
markBlocks(gc_threads[i]->gens[g].todo_bd);
@@ -1052,8 +1052,8 @@ findMemoryLeak (void)
for (i = 0; i < getNumCapabilities(); i++) {
markBlocks(gc_threads[i]->free_blocks);
- markBlocks(capabilities[i]->pinned_object_block);
- markBlocks(capabilities[i]->upd_rem_set.queue.blocks);
+ markBlocks(getCapability(i)->pinned_object_block);
+ markBlocks(getCapability(i)->upd_rem_set.queue.blocks);
}
if (RtsFlags.GcFlags.useNonmoving) {
@@ -1220,7 +1220,7 @@ memInventory (bool show)
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
gen_blocks[g] = 0;
for (i = 0; i < getNumCapabilities(); i++) {
- gen_blocks[g] += countBlocks(capabilities[i]->mut_lists[g]);
+ gen_blocks[g] += countBlocks(getCapability(i)->mut_lists[g]);
gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list);
gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list);
gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd);
@@ -1235,11 +1235,11 @@ memInventory (bool show)
for (i = 0; i < getNumCapabilities(); i++) {
W_ n = countBlocks(gc_threads[i]->free_blocks);
gc_free_blocks += n;
- if (capabilities[i]->pinned_object_block != NULL) {
- nursery_blocks += capabilities[i]->pinned_object_block->blocks;
+ if (getCapability(i)->pinned_object_block != NULL) {
+ nursery_blocks += getCapability(i)->pinned_object_block->blocks;
}
- nursery_blocks += countBlocks(capabilities[i]->pinned_object_blocks);
- free_pinned_blocks += countBlocks(capabilities[i]->pinned_object_empty);
+ nursery_blocks += countBlocks(getCapability(i)->pinned_object_blocks);
+ free_pinned_blocks += countBlocks(getCapability(i)->pinned_object_empty);
}
#if defined(PROFILING)
@@ -1259,7 +1259,7 @@ memInventory (bool show)
// count UpdRemSet blocks
for (i = 0; i < getNumCapabilities(); ++i) {
- upd_rem_set_blocks += countBlocks(capabilities[i]->upd_rem_set.queue.blocks);
+ upd_rem_set_blocks += countBlocks(getCapability(i)->upd_rem_set.queue.blocks);
}
upd_rem_set_blocks += countBlocks(upd_rem_set_block_list);
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 99a8c18033..1760fed51a 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -293,8 +293,8 @@ void storageAddCapabilities (uint32_t from, uint32_t to)
// we've moved the nurseries, so we have to update the rNursery
// pointers from the Capabilities.
for (i = 0; i < from; i++) {
- uint32_t index = capabilities[i]->r.rNursery - old_nurseries;
- capabilities[i]->r.rNursery = &nurseries[index];
+ uint32_t index = getCapability(i)->r.rNursery - old_nurseries;
+ getCapability(i)->r.rNursery = &nurseries[index];
}
/* The allocation area. Policy: keep the allocation area
@@ -316,7 +316,7 @@ void storageAddCapabilities (uint32_t from, uint32_t to)
// allocate a block for each mut list
for (n = from; n < to; n++) {
for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
- capabilities[n]->mut_lists[g] =
+ getCapability(n)->mut_lists[g] =
allocBlockOnNode(capNoToNumaNode(n));
}
}
@@ -325,7 +325,7 @@ void storageAddCapabilities (uint32_t from, uint32_t to)
if (RtsFlags.GcFlags.useNonmoving) {
nonmovingAddCapabilities(to);
for (i = 0; i < to; ++i) {
- init_upd_rem_set(&capabilities[i]->upd_rem_set);
+ init_upd_rem_set(&getCapability(i)->upd_rem_set);
}
}
@@ -376,7 +376,7 @@ void listAllBlocks (ListBlocksCb cb, void *user)
uint32_t g, i;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (i = 0; i < getNumCapabilities(); i++) {
- cb(user, capabilities[i]->mut_lists[g]);
+ cb(user, getCapability(i)->mut_lists[g]);
cb(user, gc_threads[i]->gens[g].part_list);
cb(user, gc_threads[i]->gens[g].scavd_list);
cb(user, gc_threads[i]->gens[g].todo_bd);
@@ -388,11 +388,11 @@ void listAllBlocks (ListBlocksCb cb, void *user)
cb(user, nurseries[i].blocks);
}
for (i = 0; i < getNumCapabilities(); i++) {
- if (capabilities[i]->pinned_object_block != NULL) {
- cb(user, capabilities[i]->pinned_object_block);
+ if (getCapability(i)->pinned_object_block != NULL) {
+ cb(user, getCapability(i)->pinned_object_block);
}
- cb(user, capabilities[i]->pinned_object_blocks);
- cb(user, capabilities[i]->pinned_object_empty);
+ cb(user, getCapability(i)->pinned_object_blocks);
+ cb(user, getCapability(i)->pinned_object_empty);
}
}
@@ -784,8 +784,8 @@ assignNurseriesToCapabilities (uint32_t from, uint32_t to)
uint32_t i, node;
for (i = from; i < to; i++) {
- node = capabilities[i]->node;
- assignNurseryToCapability(capabilities[i], next_nursery[node]);
+ node = getCapability(i)->node;
+ assignNurseryToCapability(getCapability(i), next_nursery[node]);
next_nursery[node] += n_numa_nodes;
}
}
@@ -1569,11 +1569,11 @@ calcTotalAllocated (void)
W_ n;
for (n = 0; n < getNumCapabilities(); n++) {
- tot_alloc += capabilities[n]->total_allocated;
+ tot_alloc += getCapability(n)->total_allocated;
- traceEventHeapAllocated(capabilities[n],
+ traceEventHeapAllocated(getCapability(n),
CAPSET_HEAP_DEFAULT,
- capabilities[n]->total_allocated * sizeof(W_));
+ getCapability(n)->total_allocated * sizeof(W_));
}
return tot_alloc;
@@ -1592,10 +1592,10 @@ updateNurseriesStats (void)
for (i = 0; i < getNumCapabilities(); i++) {
// The current nursery block and the current allocate block have not
// yet been accounted for in cap->total_allocated, so we add them here.
- bd = capabilities[i]->r.rCurrentNursery;
- if (bd) finishedNurseryBlock(capabilities[i], bd);
- bd = capabilities[i]->r.rCurrentAlloc;
- if (bd) finishedNurseryBlock(capabilities[i], bd);
+ bd = getCapability(i)->r.rCurrentNursery;
+ if (bd) finishedNurseryBlock(getCapability(i), bd);
+ bd = getCapability(i)->r.rCurrentAlloc;
+ if (bd) finishedNurseryBlock(getCapability(i), bd);
}
}