summaryrefslogtreecommitdiff
path: root/rts/Schedule.c
diff options
context:
space:
mode:
Diffstat (limited to 'rts/Schedule.c')
-rw-r--r--rts/Schedule.c87
1 files changed, 35 insertions, 52 deletions
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 408146f195..07ebec62b1 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -376,7 +376,7 @@ schedule (Capability *initialCapability, Task *task)
// it was originally on.
#ifdef THREADED_RTS
if (cap->disabled && !t->bound) {
- Capability *dest_cap = &capabilities[cap->no % enabled_capabilities];
+ Capability *dest_cap = capabilities[cap->no % enabled_capabilities];
migrateThread(cap, t, dest_cap);
continue;
}
@@ -716,7 +716,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
// First grab as many free Capabilities as we can.
for (i=0, n_free_caps=0; i < n_capabilities; i++) {
- cap0 = &capabilities[i];
+ cap0 = capabilities[i];
if (cap != cap0 && !cap0->disabled && tryGrabCapability(cap0,task)) {
if (!emptyRunQueue(cap0)
|| cap0->returning_tasks_hd != NULL
@@ -1395,7 +1395,7 @@ static void acquireAllCapabilities(Capability *cap, Task *task)
for (i=0; i < n_capabilities; i++) {
debugTrace(DEBUG_sched, "grabbing all the capabilies (%d/%d)", i, n_capabilities);
- tmpcap = &capabilities[i];
+ tmpcap = capabilities[i];
if (tmpcap != cap) {
// we better hope this task doesn't get migrated to
// another Capability while we're waiting for this one.
@@ -1418,8 +1418,8 @@ static void releaseAllCapabilities(nat n, Capability *cap, Task *task)
for (i = 0; i < n; i++) {
if (cap->no != i) {
- task->cap = &capabilities[i];
- releaseCapability(&capabilities[i]);
+ task->cap = capabilities[i];
+ releaseCapability(capabilities[i]);
}
}
task->cap = cap;
@@ -1540,21 +1540,21 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
|| (RtsFlags.ParFlags.parGcLoadBalancingEnabled &&
collect_gen >= RtsFlags.ParFlags.parGcLoadBalancingGen)) {
for (i=0; i < n_capabilities; i++) {
- if (capabilities[i].disabled) {
- idle_cap[i] = tryGrabCapability(&capabilities[i], task);
+ if (capabilities[i]->disabled) {
+ idle_cap[i] = tryGrabCapability(capabilities[i], task);
} else {
idle_cap[i] = rtsFalse;
}
}
} else {
for (i=0; i < n_capabilities; i++) {
- if (capabilities[i].disabled) {
- idle_cap[i] = tryGrabCapability(&capabilities[i], task);
+ if (capabilities[i]->disabled) {
+ idle_cap[i] = tryGrabCapability(capabilities[i], task);
} else if (i == cap->no ||
- capabilities[i].idle < RtsFlags.ParFlags.parGcNoSyncWithIdle) {
+ capabilities[i]->idle < RtsFlags.ParFlags.parGcNoSyncWithIdle) {
idle_cap[i] = rtsFalse;
} else {
- idle_cap[i] = tryGrabCapability(&capabilities[i], task);
+ idle_cap[i] = tryGrabCapability(capabilities[i], task);
if (!idle_cap[i]) {
n_failed_trygrab_idles++;
} else {
@@ -1575,7 +1575,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
for (i=0; i < n_capabilities; i++) {
gc_threads[i]->idle = idle_cap[i];
- capabilities[i].idle++;
+ capabilities[i]->idle++;
}
// For all capabilities participating in this GC, wait until
@@ -1606,10 +1606,10 @@ delete_threads_and_gc:
// threads. It just avoids the GC having to do any work to
// figure out that any remaining sparks are garbage.
for (i = 0; i < n_capabilities; i++) {
- capabilities[i].spark_stats.gcd +=
- sparkPoolSize(capabilities[i].sparks);
+ capabilities[i]->spark_stats.gcd +=
+ sparkPoolSize(capabilities[i]->sparks);
// No race here since all Caps are stopped.
- discardSparksCap(&capabilities[i]);
+ discardSparksCap(capabilities[i]);
}
#endif
sched_state = SCHED_SHUTTING_DOWN;
@@ -1625,10 +1625,10 @@ delete_threads_and_gc:
#if defined(THREADED_RTS)
for (i = enabled_capabilities; i < n_capabilities; i++) {
Capability *tmp_cap, *dest_cap;
- tmp_cap = &capabilities[i];
+ tmp_cap = capabilities[i];
ASSERT(tmp_cap->disabled);
if (i != cap->no) {
- dest_cap = &capabilities[i % enabled_capabilities];
+ dest_cap = capabilities[i % enabled_capabilities];
while (!emptyRunQueue(tmp_cap)) {
tso = popRunQueue(tmp_cap);
migrateThread(tmp_cap, tso, dest_cap);
@@ -1703,11 +1703,11 @@ delete_threads_and_gc:
for (i = 0; i < n_capabilities; i++) {
if (i != cap->no) {
if (idle_cap[i]) {
- ASSERT(capabilities[i].running_task == task);
- task->cap = &capabilities[i];
- releaseCapability(&capabilities[i]);
+ ASSERT(capabilities[i]->running_task == task);
+ task->cap = capabilities[i];
+ releaseCapability(capabilities[i]);
} else {
- ASSERT(capabilities[i].running_task != task);
+ ASSERT(capabilities[i]->running_task != task);
}
}
}
@@ -1799,7 +1799,7 @@ forkProcess(HsStablePtr *entry
ACQUIRE_LOCK(&task->lock);
for (i=0; i < n_capabilities; i++) {
- ACQUIRE_LOCK(&capabilities[i].lock);
+ ACQUIRE_LOCK(&capabilities[i]->lock);
}
stopTimer(); // See #4074
@@ -1820,8 +1820,8 @@ forkProcess(HsStablePtr *entry
RELEASE_LOCK(&task->lock);
for (i=0; i < n_capabilities; i++) {
- releaseCapability_(&capabilities[i],rtsFalse);
- RELEASE_LOCK(&capabilities[i].lock);
+ releaseCapability_(capabilities[i],rtsFalse);
+ RELEASE_LOCK(&capabilities[i]->lock);
}
boundTaskExiting(task);
@@ -1837,7 +1837,7 @@ forkProcess(HsStablePtr *entry
initMutex(&task->lock);
for (i=0; i < n_capabilities; i++) {
- initMutex(&capabilities[i].lock);
+ initMutex(&capabilities[i]->lock);
}
#endif
@@ -1871,7 +1871,7 @@ forkProcess(HsStablePtr *entry
discardTasksExcept(task);
for (i=0; i < n_capabilities; i++) {
- cap = &capabilities[i];
+ cap = capabilities[i];
// Empty the run queue. It seems tempting to let all the
// killed threads stay on the run queue as zombies to be
@@ -1900,7 +1900,7 @@ forkProcess(HsStablePtr *entry
releaseCapability(cap);
}
}
- cap = &capabilities[0];
+ cap = capabilities[0];
task->cap = cap;
// Empty the threads lists. Otherwise, the garbage
@@ -1965,8 +1965,7 @@ setNumCapabilities (nat new_n_capabilities USED_IF_THREADS)
Task *task;
Capability *cap;
nat sync;
- StgTSO* t;
- nat g, n;
+ nat n;
Capability *old_capabilities = NULL;
nat old_n_capabilities = n_capabilities;
@@ -2013,8 +2012,8 @@ setNumCapabilities (nat new_n_capabilities USED_IF_THREADS)
// structures, the nursery, etc.
//
for (n = new_n_capabilities; n < enabled_capabilities; n++) {
- capabilities[n].disabled = rtsTrue;
- traceCapDisable(&capabilities[n]);
+ capabilities[n]->disabled = rtsTrue;
+ traceCapDisable(capabilities[n]);
}
enabled_capabilities = new_n_capabilities;
}
@@ -2025,8 +2024,8 @@ setNumCapabilities (nat new_n_capabilities USED_IF_THREADS)
// enable any disabled capabilities, up to the required number
for (n = enabled_capabilities;
n < new_n_capabilities && n < n_capabilities; n++) {
- capabilities[n].disabled = rtsFalse;
- traceCapEnable(&capabilities[n]);
+ capabilities[n]->disabled = rtsFalse;
+ traceCapEnable(capabilities[n]);
}
enabled_capabilities = n;
@@ -2042,23 +2041,10 @@ setNumCapabilities (nat new_n_capabilities USED_IF_THREADS)
// Resize the capabilities array
// NB. after this, capabilities points somewhere new. Any pointers
// of type (Capability *) are now invalid.
- old_capabilities = moreCapabilities(n_capabilities, new_n_capabilities);
-
- // update our own cap pointer
- cap = &capabilities[cap->no];
+ moreCapabilities(n_capabilities, new_n_capabilities);
// Resize and update storage manager data structures
storageAddCapabilities(n_capabilities, new_n_capabilities);
-
- // Update (Capability *) refs in the Task manager.
- updateCapabilityRefs();
-
- // Update (Capability *) refs from TSOs
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (t = generations[g].threads; t != END_TSO_QUEUE; t = t->global_link) {
- t->cap = &capabilities[t->cap->no];
- }
- }
}
}
@@ -2324,7 +2310,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
if (cpu == cap->no) {
appendToRunQueue(cap,tso);
} else {
- migrateThread(cap, tso, &capabilities[cpu]);
+ migrateThread(cap, tso, capabilities[cpu]);
}
#else
appendToRunQueue(cap,tso);
@@ -2407,7 +2393,7 @@ startWorkerTasks (nat from USED_IF_THREADS, nat to USED_IF_THREADS)
Capability *cap;
for (i = from; i < to; i++) {
- cap = &capabilities[i];
+ cap = capabilities[i];
ACQUIRE_LOCK(&cap->lock);
startWorkerTask(cap);
RELEASE_LOCK(&cap->lock);
@@ -2506,9 +2492,6 @@ freeScheduler( void )
// Capability).
if (still_running == 0) {
freeCapabilities();
- if (n_capabilities != 1) {
- stgFree(capabilities);
- }
}
RELEASE_LOCK(&sched_mutex);
#if defined(THREADED_RTS)