diff options
Diffstat (limited to 'rts/sm/Scav.c')
-rw-r--r-- | rts/sm/Scav.c | 160 |
1 files changed, 80 insertions, 80 deletions
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index 595d8275cf..940f11fea4 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -51,7 +51,7 @@ static void scavenge_large_bitmap (StgPtr p, static void scavengeTSO (StgTSO *tso) { - rtsBool saved_eager; + bool saved_eager; debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id); @@ -66,7 +66,7 @@ scavengeTSO (StgTSO *tso) } saved_eager = gct->eager_promotion; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tso->blocked_exceptions); evacuate((StgClosure **)&tso->bq); @@ -107,10 +107,10 @@ scavengeTSO (StgTSO *tso) static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a) { W_ m; - rtsBool any_failed; + bool any_failed; StgPtr p, q; - any_failed = rtsFalse; + any_failed = false; p = (StgPtr)&a->payload[0]; for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++) { @@ -119,9 +119,9 @@ static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a) evacuate((StgClosure**)p); } if (gct->failed_to_evac) { - any_failed = rtsTrue; + any_failed = true; *mutArrPtrsCard(a,m) = 1; - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; } else { *mutArrPtrsCard(a,m) = 0; } @@ -133,9 +133,9 @@ static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a) evacuate((StgClosure**)p); } if (gct->failed_to_evac) { - any_failed = rtsTrue; + any_failed = true; *mutArrPtrsCard(a,m) = 1; - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; } else { *mutArrPtrsCard(a,m) = 0; } @@ -150,9 +150,9 @@ static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a) { W_ m; StgPtr p, q; - rtsBool any_failed; + bool any_failed; - any_failed = rtsFalse; + any_failed = false; for (m = 0; m < mutArrPtrsCards(a->ptrs); m++) { if (*mutArrPtrsCard(a,m) != 0) { @@ -163,8 +163,8 @@ static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a) evacuate((StgClosure**)p); } if (gct->failed_to_evac) { - any_failed = rtsTrue; - gct->failed_to_evac = rtsFalse; + any_failed = true; + gct->failed_to_evac = false; } else { *mutArrPtrsCard(a,m) = 0; } @@ -408,7 +408,7 @@ scavenge_block (bdescr *bd) { StgPtr p, q; const StgInfoTable *info; - rtsBool saved_eager_promotion; + bool saved_eager_promotion; gen_workspace *ws; debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p", @@ -417,7 +417,7 @@ scavenge_block (bdescr *bd) gct->scan_bd = bd; gct->evac_gen_no = bd->gen_no; saved_eager_promotion = gct->eager_promotion; - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; ws = &gct->gens[bd->gen->no]; @@ -441,7 +441,7 @@ scavenge_block (bdescr *bd) case MVAR_DIRTY: { StgMVar *mvar = ((StgMVar *)p); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&mvar->head); evacuate((StgClosure **)&mvar->tail); evacuate((StgClosure **)&mvar->value); @@ -459,7 +459,7 @@ scavenge_block (bdescr *bd) case TVAR: { StgTVar *tvar = ((StgTVar *)p); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tvar->current_value); evacuate((StgClosure **)&tvar->first_watch_queue_entry); gct->eager_promotion = saved_eager_promotion; @@ -590,7 +590,7 @@ scavenge_block (bdescr *bd) case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate(&((StgMutVar *)p)->var); gct->eager_promotion = saved_eager_promotion; @@ -606,7 +606,7 @@ scavenge_block (bdescr *bd) { StgBlockingQueue *bq = (StgBlockingQueue *)p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate(&bq->bh); evacuate((StgClosure**)&bq->owner); evacuate((StgClosure**)&bq->queue); @@ -661,7 +661,7 @@ scavenge_block (bdescr *bd) // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p); @@ -672,7 +672,7 @@ scavenge_block (bdescr *bd) } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // always put it on the mutable list. + gct->failed_to_evac = true; // always put it on the mutable list. break; } @@ -702,7 +702,7 @@ scavenge_block (bdescr *bd) // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p); for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) { evacuate((StgClosure **)p); @@ -715,7 +715,7 @@ scavenge_block (bdescr *bd) ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info; } - gct->failed_to_evac = rtsTrue; // always put it on the mutable list. + gct->failed_to_evac = true; // always put it on the mutable list. break; } @@ -751,7 +751,7 @@ scavenge_block (bdescr *bd) { StgStack *stack = (StgStack*)p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; scavenge_stack(stack->sp, stack->stack + stack->stack_size); stack->dirty = gct->failed_to_evac; @@ -765,7 +765,7 @@ scavenge_block (bdescr *bd) { StgPtr end; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { @@ -774,7 +774,7 @@ scavenge_block (bdescr *bd) p += info->layout.payload.nptrs; gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable + gct->failed_to_evac = true; // mutable break; } @@ -783,7 +783,7 @@ scavenge_block (bdescr *bd) StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { evacuate((StgClosure **)&e->tvar); @@ -791,7 +791,7 @@ scavenge_block (bdescr *bd) evacuate((StgClosure **)&e->new_value); } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable + gct->failed_to_evac = true; // mutable p += sizeofW(StgTRecChunk); break; } @@ -816,7 +816,7 @@ scavenge_block (bdescr *bd) * the current object points to into the current generation. */ if (gct->failed_to_evac) { - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; if (bd->gen_no > 0) { recordMutableGen_GC((StgClosure *)q, bd->gen_no); } @@ -856,7 +856,7 @@ scavenge_mark_stack(void) { StgPtr p, q; const StgInfoTable *info; - rtsBool saved_eager_promotion; + bool saved_eager_promotion; gct->evac_gen_no = oldest_gen->no; saved_eager_promotion = gct->eager_promotion; @@ -873,7 +873,7 @@ scavenge_mark_stack(void) case MVAR_DIRTY: { StgMVar *mvar = ((StgMVar *)p); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&mvar->head); evacuate((StgClosure **)&mvar->tail); evacuate((StgClosure **)&mvar->value); @@ -890,7 +890,7 @@ scavenge_mark_stack(void) case TVAR: { StgTVar *tvar = ((StgTVar *)p); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tvar->current_value); evacuate((StgClosure **)&tvar->first_watch_queue_entry); gct->eager_promotion = saved_eager_promotion; @@ -997,7 +997,7 @@ scavenge_mark_stack(void) case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: { - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate(&((StgMutVar *)p)->var); gct->eager_promotion = saved_eager_promotion; @@ -1013,7 +1013,7 @@ scavenge_mark_stack(void) { StgBlockingQueue *bq = (StgBlockingQueue *)p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate(&bq->bh); evacuate((StgClosure**)&bq->owner); evacuate((StgClosure**)&bq->queue); @@ -1064,7 +1064,7 @@ scavenge_mark_stack(void) // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); @@ -1075,7 +1075,7 @@ scavenge_mark_stack(void) } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable anyhow. + gct->failed_to_evac = true; // mutable anyhow. break; } @@ -1102,14 +1102,14 @@ scavenge_mark_stack(void) // follow everything { StgPtr next; - rtsBool saved_eager; + bool saved_eager; // We don't eagerly promote objects pointed to by a mutable // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. saved_eager = gct->eager_promotion; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p); for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) { evacuate((StgClosure **)p); @@ -1122,7 +1122,7 @@ scavenge_mark_stack(void) ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info; } - gct->failed_to_evac = rtsTrue; // mutable anyhow. + gct->failed_to_evac = true; // mutable anyhow. break; } @@ -1157,7 +1157,7 @@ scavenge_mark_stack(void) { StgStack *stack = (StgStack*)p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; scavenge_stack(stack->sp, stack->stack + stack->stack_size); stack->dirty = gct->failed_to_evac; @@ -1170,7 +1170,7 @@ scavenge_mark_stack(void) { StgPtr end; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { @@ -1178,7 +1178,7 @@ scavenge_mark_stack(void) } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable + gct->failed_to_evac = true; // mutable break; } @@ -1187,7 +1187,7 @@ scavenge_mark_stack(void) StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { evacuate((StgClosure **)&e->tvar); @@ -1195,7 +1195,7 @@ scavenge_mark_stack(void) evacuate((StgClosure **)&e->new_value); } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable + gct->failed_to_evac = true; // mutable break; } @@ -1205,7 +1205,7 @@ scavenge_mark_stack(void) } if (gct->failed_to_evac) { - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; if (gct->evac_gen_no) { recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no); } @@ -1221,12 +1221,12 @@ scavenge_mark_stack(void) objects can have this property. -------------------------------------------------------------------------- */ -static rtsBool +static bool scavenge_one(StgPtr p) { const StgInfoTable *info; - rtsBool no_luck; - rtsBool saved_eager_promotion; + bool no_luck; + bool saved_eager_promotion; saved_eager_promotion = gct->eager_promotion; @@ -1239,7 +1239,7 @@ scavenge_one(StgPtr p) case MVAR_DIRTY: { StgMVar *mvar = ((StgMVar *)p); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&mvar->head); evacuate((StgClosure **)&mvar->tail); evacuate((StgClosure **)&mvar->value); @@ -1256,7 +1256,7 @@ scavenge_one(StgPtr p) case TVAR: { StgTVar *tvar = ((StgTVar *)p); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tvar->current_value); evacuate((StgClosure **)&tvar->first_watch_queue_entry); gct->eager_promotion = saved_eager_promotion; @@ -1321,7 +1321,7 @@ scavenge_one(StgPtr p) case MUT_VAR_DIRTY: { StgPtr q = p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate(&((StgMutVar *)p)->var); gct->eager_promotion = saved_eager_promotion; @@ -1337,7 +1337,7 @@ scavenge_one(StgPtr p) { StgBlockingQueue *bq = (StgBlockingQueue *)p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate(&bq->bh); evacuate((StgClosure**)&bq->owner); evacuate((StgClosure**)&bq->queue); @@ -1388,7 +1388,7 @@ scavenge_one(StgPtr p) // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); @@ -1399,7 +1399,7 @@ scavenge_one(StgPtr p) } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; break; } @@ -1423,14 +1423,14 @@ scavenge_one(StgPtr p) case SMALL_MUT_ARR_PTRS_DIRTY: { StgPtr next, q; - rtsBool saved_eager; + bool saved_eager; // We don't eagerly promote objects pointed to by a mutable // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. saved_eager = gct->eager_promotion; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; q = p; next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p); for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) { @@ -1444,7 +1444,7 @@ scavenge_one(StgPtr p) ((StgClosure *)q)->header.info = &stg_SMALL_MUT_ARR_PTRS_CLEAN_info; } - gct->failed_to_evac = rtsTrue; + gct->failed_to_evac = true; break; } @@ -1479,7 +1479,7 @@ scavenge_one(StgPtr p) { StgStack *stack = (StgStack*)p; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; scavenge_stack(stack->sp, stack->stack + stack->stack_size); stack->dirty = gct->failed_to_evac; @@ -1492,7 +1492,7 @@ scavenge_one(StgPtr p) { StgPtr end; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { @@ -1500,7 +1500,7 @@ scavenge_one(StgPtr p) } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable + gct->failed_to_evac = true; // mutable break; } @@ -1510,7 +1510,7 @@ scavenge_one(StgPtr p) StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { evacuate((StgClosure **)&e->tvar); @@ -1518,7 +1518,7 @@ scavenge_one(StgPtr p) evacuate((StgClosure **)&e->new_value); } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsTrue; // mutable + gct->failed_to_evac = true; // mutable break; } @@ -1562,7 +1562,7 @@ scavenge_one(StgPtr p) } no_luck = gct->failed_to_evac; - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; return (no_luck); } @@ -1636,9 +1636,9 @@ scavenge_mutable_list(bdescr *bd, generation *gen) continue; case MUT_ARR_PTRS_DIRTY: { - rtsBool saved_eager_promotion; + bool saved_eager_promotion; saved_eager_promotion = gct->eager_promotion; - gct->eager_promotion = rtsFalse; + gct->eager_promotion = false; scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p); @@ -1649,7 +1649,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen) } gct->eager_promotion = saved_eager_promotion; - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; recordMutableGen_GC((StgClosure *)p,gen_no); continue; } @@ -1743,7 +1743,7 @@ scavenge_static(void) * in case we visit this object again. */ if (gct->failed_to_evac) { - gct->failed_to_evac = rtsFalse; + gct->failed_to_evac = false; recordMutableGen_GC((StgClosure *)p,oldest_gen->no); } break; @@ -1779,7 +1779,7 @@ scavenge_static(void) barf("scavenge_static: strange closure %d", (int)(info->type)); } - ASSERT(gct->failed_to_evac == rtsFalse); + ASSERT(gct->failed_to_evac == false); } } @@ -2009,20 +2009,20 @@ scavenge_large (gen_workspace *ws) is other work we can usefully be doing. ------------------------------------------------------------------------- */ -static rtsBool +static bool scavenge_find_work (void) { int g; gen_workspace *ws; - rtsBool did_something, did_anything; + bool did_something, did_anything; bdescr *bd; gct->scav_find_work++; - did_anything = rtsFalse; + did_anything = false; loop: - did_something = rtsFalse; + did_something = false; for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) { ws = &gct->gens[g]; @@ -2033,26 +2033,26 @@ loop: if (ws->todo_bd->u.scan < ws->todo_free) { scavenge_block(ws->todo_bd); - did_something = rtsTrue; + did_something = true; break; } // If we have any large objects to scavenge, do them now. if (ws->todo_large_objects) { scavenge_large(ws); - did_something = rtsTrue; + did_something = true; break; } if ((bd = grab_local_todo_block(ws)) != NULL) { scavenge_block(bd); - did_something = rtsTrue; + did_something = true; break; } } if (did_something) { - did_anything = rtsTrue; + did_anything = true; goto loop; } @@ -2062,13 +2062,13 @@ loop: for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) { if ((bd = steal_todo_block(g)) != NULL) { scavenge_block(bd); - did_something = rtsTrue; + did_something = true; break; } } if (did_something) { - did_anything = rtsTrue; + did_anything = true; goto loop; } } @@ -2086,10 +2086,10 @@ loop: void scavenge_loop(void) { - rtsBool work_to_do; + bool work_to_do; loop: - work_to_do = rtsFalse; + work_to_do = false; // scavenge static objects if (major_gc && gct->static_objects != END_OF_STATIC_OBJECT_LIST) { @@ -2100,7 +2100,7 @@ loop: // scavenge objects in compacted generation if (mark_stack_bd != NULL && !mark_stack_empty()) { scavenge_mark_stack(); - work_to_do = rtsTrue; + work_to_do = true; } // Order is important here: we want to deal in full blocks as |