// scavenge only the marked areas of a MUT_ARR_PTRS static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a) { lnat m; StgPtr p, q; rtsBool any_failed; any_failed = rtsFalse; for (m = 0; m < mutArrPtrsCards(a->ptrs); m++) { if (*mutArrPtrsCard(a,m) != 0) { p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS]; q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS), (StgPtr)&a->payload[a->ptrs]); for (; p < q; p++) { evacuate((StgClosure**)p); } if (gct->failed_to_evac) { any_failed = rtsTrue; gct->failed_to_evac = rtsFalse; } else { *mutArrPtrsCard(a,m) = 0; } } } gct->failed_to_evac = any_failed; return (StgPtr)a + mut_arr_ptrs_sizeW(a); }
static void update_fwd_large( bdescr *bd ) { StgPtr p; const StgInfoTable* info; for (; bd != NULL; bd = bd->link) { p = bd->start; info = get_itbl((StgClosure *)p); switch (info->type) { case ARR_WORDS: // nothing to follow continue; case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_DIRTY: case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: // follow everything { StgPtr next; next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { thread((StgClosure **)p); } continue; } case TSO: thread_TSO((StgTSO *)p); continue; case AP_STACK: thread_AP_STACK((StgAP_STACK *)p); continue; case PAP: thread_PAP((StgPAP *)p); continue; default: barf("update_fwd_large: unknown/strange object %d", (int)(info->type)); } } }
static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a) { lnat m; rtsBool any_failed; StgPtr p, q; any_failed = rtsFalse; p = (StgPtr)&a->payload[0]; for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++) { q = p + (1 << MUT_ARR_PTRS_CARD_BITS); for (; p < q; p++) { evacuate((StgClosure**)p); } if (gct->failed_to_evac) { any_failed = rtsTrue; *mutArrPtrsCard(a,m) = 1; gct->failed_to_evac = rtsFalse; } else { *mutArrPtrsCard(a,m) = 0; } } q = (StgPtr)&a->payload[a->ptrs]; if (p < q) { for (; p < q; p++) { evacuate((StgClosure**)p); } if (gct->failed_to_evac) { any_failed = rtsTrue; *mutArrPtrsCard(a,m) = 1; gct->failed_to_evac = rtsFalse; } else { *mutArrPtrsCard(a,m) = 0; } } gct->failed_to_evac = any_failed; return (StgPtr)a + mut_arr_ptrs_sizeW(a); }
REGPARM1 GNUC_ATTR_HOT void evacuate(StgClosure **p) { bdescr *bd = NULL; nat gen_no; StgClosure *q; const StgInfoTable *info; StgWord tag; q = *p; loop: /* The tag and the pointer are split, to be merged after evacing */ tag = GET_CLOSURE_TAG(q); q = UNTAG_CLOSURE(q); ASSERTM(LOOKS_LIKE_CLOSURE_PTR(q), "invalid closure, info=%p", q->header.info); if (!HEAP_ALLOCED_GC(q)) { if (!major_gc) return; info = get_itbl(q); switch (info->type) { case THUNK_STATIC: if (info->srt_bitmap != 0) { evacuate_static_object(THUNK_STATIC_LINK((StgClosure *)q), q); } return; case FUN_STATIC: if (info->srt_bitmap != 0) { evacuate_static_object(FUN_STATIC_LINK((StgClosure *)q), q); } return; case IND_STATIC: /* If q->saved_info != NULL, then it's a revertible CAF - it'll be * on the CAF list, so don't do anything with it here (we'll * scavenge it later). */ evacuate_static_object(IND_STATIC_LINK((StgClosure *)q), q); return; case CONSTR_STATIC: evacuate_static_object(STATIC_LINK(info,(StgClosure *)q), q); return; case CONSTR_NOCAF_STATIC: /* no need to put these on the static linked list, they don't need * to be scavenged. */ return; default: barf("evacuate(static): strange closure type %d", (int)(info->type)); } } bd = Bdescr((P_)q); if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) { // pointer into to-space: just return it. It might be a pointer // into a generation that we aren't collecting (> N), or it // might just be a pointer into to-space. The latter doesn't // happen often, but allowing it makes certain things a bit // easier; e.g. scavenging an object is idempotent, so it's OK to // have an object on the mutable list multiple times. if (bd->flags & BF_EVACUATED) { // We aren't copying this object, so we have to check // whether it is already in the target generation. (this is // the write barrier). if (bd->gen_no < gct->evac_gen_no) { gct->failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } return; } /* evacuate large objects by re-linking them onto a different list. */ if (bd->flags & BF_LARGE) { evacuate_large((P_)q); return; } /* If the object is in a gen that we're compacting, then we * need to use an alternative evacuate procedure. */ if (!is_marked((P_)q,bd)) { mark((P_)q,bd); push_mark_stack((P_)q); } return; } gen_no = bd->dest_no; info = q->header.info; if (IS_FORWARDING_PTR(info)) { /* Already evacuated, just return the forwarding address. * HOWEVER: if the requested destination generation (gct->evac_gen) is * older than the actual generation (because the object was * already evacuated to a younger generation) then we have to * set the gct->failed_to_evac flag to indicate that we couldn't * manage to promote the object to the desired generation. */ /* * Optimisation: the check is fairly expensive, but we can often * shortcut it if either the required generation is 0, or the * current object (the EVACUATED) is in a high enough generation. * We know that an EVACUATED always points to an object in the * same or an older generation. gen is the lowest generation that the * current object would be evacuated to, so we only do the full * check if gen is too low. */ StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info); *p = TAG_CLOSURE(tag,e); if (gen_no < gct->evac_gen_no) { // optimisation if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) { gct->failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } } return; } switch (INFO_PTR_TO_STRUCT(info)->type) { case WHITEHOLE: goto loop; // For ints and chars of low value, save space by replacing references to // these with closures with references to common, shared ones in the RTS. // // * Except when compiling into Windows DLLs which don't support cross-package // data references very well. // case CONSTR_0_1: { #if defined(COMPILING_WINDOWS_DLL) copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag); #else StgWord w = (StgWord)q->payload[0]; if (info == Czh_con_info && // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && (StgChar)w <= MAX_CHARLIKE) { *p = TAG_CLOSURE(tag, (StgClosure *)CHARLIKE_CLOSURE((StgChar)w) ); } else if (info == Izh_con_info && (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) { *p = TAG_CLOSURE(tag, (StgClosure *)INTLIKE_CLOSURE((StgInt)w) ); } else { copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag); } #endif return; } case FUN_0_1: case FUN_1_0: case CONSTR_1_0: copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag); return; case THUNK_1_0: case THUNK_0_1: copy(p,info,q,sizeofW(StgThunk)+1,gen_no); return; case THUNK_1_1: case THUNK_2_0: case THUNK_0_2: #ifdef NO_PROMOTE_THUNKS #error bitrotted #endif copy(p,info,q,sizeofW(StgThunk)+2,gen_no); return; case FUN_1_1: case FUN_2_0: case FUN_0_2: case CONSTR_1_1: case CONSTR_2_0: copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag); return; case CONSTR_0_2: copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag); return; case THUNK: copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no); return; case FUN: case CONSTR: copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no,tag); return; case BLACKHOLE: { StgClosure *r; const StgInfoTable *i; r = ((StgInd*)q)->indirectee; if (GET_CLOSURE_TAG(r) == 0) { i = r->header.info; if (IS_FORWARDING_PTR(i)) { r = (StgClosure *)UN_FORWARDING_PTR(i); i = r->header.info; } if (i == &stg_TSO_info || i == &stg_WHITEHOLE_info || i == &stg_BLOCKING_QUEUE_CLEAN_info || i == &stg_BLOCKING_QUEUE_DIRTY_info) { copy(p,info,q,sizeofW(StgInd),gen_no); return; } ASSERT(i != &stg_IND_info); } q = r; *p = r; goto loop; } case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: case MVAR_CLEAN: case MVAR_DIRTY: case TVAR: case BLOCKING_QUEUE: case WEAK: case PRIM: case MUT_PRIM: copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no); return; case BCO: copy(p,info,q,bco_sizeW((StgBCO *)q),gen_no); return; case THUNK_SELECTOR: eval_thunk_selector(p, (StgSelector *)q, rtsTrue); return; case IND: // follow chains of indirections, don't evacuate them q = ((StgInd*)q)->indirectee; *p = q; goto loop; case RET_BCO: case RET_SMALL: case RET_BIG: case UPDATE_FRAME: case UNDERFLOW_FRAME: case STOP_FRAME: case CATCH_FRAME: case CATCH_STM_FRAME: case CATCH_RETRY_FRAME: case ATOMICALLY_FRAME: // shouldn't see these barf("evacuate: stack frame at %p\n", q); case PAP: copy(p,info,q,pap_sizeW((StgPAP*)q),gen_no); return; case AP: copy(p,info,q,ap_sizeW((StgAP*)q),gen_no); return; case AP_STACK: copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen_no); return; case ARR_WORDS: // just copy the block copy(p,info,q,arr_words_sizeW((StgArrBytes *)q),gen_no); return; case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_DIRTY: case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: // just copy the block copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no); return; case SMALL_MUT_ARR_PTRS_CLEAN: case SMALL_MUT_ARR_PTRS_DIRTY: case SMALL_MUT_ARR_PTRS_FROZEN: case SMALL_MUT_ARR_PTRS_FROZEN0: // just copy the block copy(p,info,q,small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)q),gen_no); return; case TSO: copy(p,info,q,sizeofW(StgTSO),gen_no); return; case STACK: { StgStack *stack = (StgStack *)q; /* To evacuate a small STACK, we need to adjust the stack pointer */ { StgStack *new_stack; StgPtr r, s; rtsBool mine; mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack), sizeofW(StgStack), gen_no); if (mine) { new_stack = (StgStack *)*p; move_STACK(stack, new_stack); for (r = stack->sp, s = new_stack->sp; r < stack->stack + stack->stack_size;) { *s++ = *r++; } } return; } } case TREC_CHUNK: copy(p,info,q,sizeofW(StgTRecChunk),gen_no); return; default: barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type)); } barf("evacuate"); }
static void verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block) { bdescr *bd; StgPtr p; const StgInfoTable *info; StgClosure *q; p = (P_)firstBlockGetCompact(block); bd = Bdescr((P_)block); while (p < bd->free) { q = (StgClosure*)p; ASSERT(LOOKS_LIKE_CLOSURE_PTR(q)); info = get_itbl(q); switch (info->type) { case CONSTR_1_0: check_object_in_compact(str, UNTAG_CLOSURE(q->payload[0])); /* fallthrough */ case CONSTR_0_1: p += sizeofW(StgClosure) + 1; break; case CONSTR_2_0: check_object_in_compact(str, UNTAG_CLOSURE(q->payload[1])); /* fallthrough */ case CONSTR_1_1: check_object_in_compact(str, UNTAG_CLOSURE(q->payload[0])); /* fallthrough */ case CONSTR_0_2: p += sizeofW(StgClosure) + 2; break; case CONSTR: case PRIM: case CONSTR_NOCAF: { uint32_t i; for (i = 0; i < info->layout.payload.ptrs; i++) { check_object_in_compact(str, UNTAG_CLOSURE(q->payload[i])); } p += sizeofW(StgClosure) + info->layout.payload.ptrs + info->layout.payload.nptrs; break; } case ARR_WORDS: p += arr_words_sizeW((StgArrBytes*)p); break; case MUT_ARR_PTRS_FROZEN_CLEAN: case MUT_ARR_PTRS_FROZEN_DIRTY: verify_mut_arr_ptrs(str, (StgMutArrPtrs*)p); p += mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); break; case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN: case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY: { uint32_t i; StgSmallMutArrPtrs *arr = (StgSmallMutArrPtrs*)p; for (i = 0; i < arr->ptrs; i++) check_object_in_compact(str, UNTAG_CLOSURE(arr->payload[i])); p += sizeofW(StgSmallMutArrPtrs) + arr->ptrs; break; } case COMPACT_NFDATA: p += sizeofW(StgCompactNFData); break; default: barf("verify_consistency_block"); } } return; }
static void searchHeapBlocks (HashTable *addrs, bdescr *bd) { StgPtr p; StgInfoTable *info; nat size; rtsBool prim; for (; bd != NULL; bd = bd->link) { if (bd->flags & BF_PINNED) { // Assume that objects in PINNED blocks cannot refer to continue; } p = bd->start; while (p < bd->free) { info = get_itbl((StgClosure *)p); prim = rtsFalse; switch (info->type) { case THUNK: size = thunk_sizeW_fromITBL(info); break; case THUNK_1_1: case THUNK_0_2: case THUNK_2_0: size = sizeofW(StgThunkHeader) + 2; break; case THUNK_1_0: case THUNK_0_1: case THUNK_SELECTOR: size = sizeofW(StgThunkHeader) + 1; break; case CONSTR: case FUN: case FUN_1_0: case FUN_0_1: case FUN_1_1: case FUN_0_2: case FUN_2_0: case CONSTR_1_0: case CONSTR_0_1: case CONSTR_1_1: case CONSTR_0_2: case CONSTR_2_0: size = sizeW_fromITBL(info); break; case BLACKHOLE: case BLOCKING_QUEUE: prim = rtsTrue; size = sizeW_fromITBL(info); break; case IND: // Special case/Delicate Hack: INDs don't normally // appear, since we're doing this heap census right // after GC. However, GarbageCollect() also does // resurrectThreads(), which can update some // blackholes when it calls raiseAsync() on the // resurrected threads. So we know that any IND will // be the size of a BLACKHOLE. prim = rtsTrue; size = BLACKHOLE_sizeW(); break; case BCO: prim = rtsTrue; size = bco_sizeW((StgBCO *)p); break; case MVAR_CLEAN: case MVAR_DIRTY: case TVAR: case WEAK: case PRIM: case MUT_PRIM: case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: prim = rtsTrue; size = sizeW_fromITBL(info); break; case AP: prim = rtsTrue; size = ap_sizeW((StgAP *)p); break; case PAP: prim = rtsTrue; size = pap_sizeW((StgPAP *)p); break; case AP_STACK: { StgAP_STACK *ap = (StgAP_STACK *)p; prim = rtsTrue; size = ap_stack_sizeW(ap); searchStackChunk(addrs, (StgPtr)ap->payload, (StgPtr)ap->payload + ap->size); break; } case ARR_WORDS: prim = rtsTrue; size = arr_words_sizeW((StgArrBytes*)p); break; case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_DIRTY: case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: prim = rtsTrue; size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p); break; case SMALL_MUT_ARR_PTRS_CLEAN: case SMALL_MUT_ARR_PTRS_DIRTY: case SMALL_MUT_ARR_PTRS_FROZEN: case SMALL_MUT_ARR_PTRS_FROZEN0: prim = rtsTrue; size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p); break; case TSO: prim = rtsTrue; size = sizeofW(StgTSO); break; case STACK: { StgStack *stack = (StgStack*)p; prim = rtsTrue; searchStackChunk(addrs, stack->sp, stack->stack + stack->stack_size); size = stack_sizeW(stack); break; } case TREC_CHUNK: prim = rtsTrue; size = sizeofW(StgTRecChunk); break; default: barf("heapCensus, unknown object: %d", info->type); } if (!prim) { checkAddress(addrs,info); } p += size; } } }
// ToDo: too big to inline static /* STATIC_INLINE */ StgPtr thread_obj (StgInfoTable *info, StgPtr p) { switch (info->type) { case THUNK_0_1: return p + sizeofW(StgThunk) + 1; case FUN_0_1: case CONSTR_0_1: return p + sizeofW(StgHeader) + 1; case FUN_1_0: case CONSTR_1_0: thread(&((StgClosure *)p)->payload[0]); return p + sizeofW(StgHeader) + 1; case THUNK_1_0: thread(&((StgThunk *)p)->payload[0]); return p + sizeofW(StgThunk) + 1; case THUNK_0_2: return p + sizeofW(StgThunk) + 2; case FUN_0_2: case CONSTR_0_2: return p + sizeofW(StgHeader) + 2; case THUNK_1_1: thread(&((StgThunk *)p)->payload[0]); return p + sizeofW(StgThunk) + 2; case FUN_1_1: case CONSTR_1_1: thread(&((StgClosure *)p)->payload[0]); return p + sizeofW(StgHeader) + 2; case THUNK_2_0: thread(&((StgThunk *)p)->payload[0]); thread(&((StgThunk *)p)->payload[1]); return p + sizeofW(StgThunk) + 2; case FUN_2_0: case CONSTR_2_0: thread(&((StgClosure *)p)->payload[0]); thread(&((StgClosure *)p)->payload[1]); return p + sizeofW(StgHeader) + 2; #ifdef ALLOW_INTERPRETER case BCO: { StgBCO *bco = (StgBCO *)p; thread_(&bco->instrs); thread_(&bco->literals); thread_(&bco->ptrs); return p + bco_sizeW(bco); } #endif // ALLOW_INTERPRETER case THUNK: { StgPtr end; end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgThunk *)p)->payload; p < end; p++) { thread((StgClosure **)p); } return p + info->layout.payload.nptrs; } case FUN: case CONSTR: case STABLE_NAME: case IND_PERM: case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: case CAF_BLACKHOLE: case SE_CAF_BLACKHOLE: case SE_BLACKHOLE: case BLACKHOLE: { StgPtr end; end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { thread((StgClosure **)p); } return p + info->layout.payload.nptrs; } case WEAK: { StgWeak *w = (StgWeak *)p; thread(&w->key); thread(&w->value); thread(&w->finalizer); if (w->link != NULL) { thread_(&w->link); } return p + sizeofW(StgWeak); } case IND_OLDGEN: case IND_OLDGEN_PERM: thread(&((StgInd *)p)->indirectee); return p + sizeofW(StgInd); case THUNK_SELECTOR: { StgSelector *s = (StgSelector *)p; thread(&s->selectee); return p + THUNK_SELECTOR_sizeW(); } case AP_STACK: return thread_AP_STACK((StgAP_STACK *)p); case PAP: return thread_PAP((StgPAP *)p); case AP: return thread_AP((StgAP *)p); case ARR_WORDS: return p + arr_words_sizeW((StgArrWords *)p); case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_DIRTY: case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: // follow everything { StgPtr next; next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { thread((StgClosure **)p); } return p; } case TSO: return thread_TSO((StgTSO *)p); default: barf("update_fwd: unknown/strange object %d", (int)(info->type)); return NULL; } }
StgOffset checkClosure( StgClosure* p ) { const StgInfoTable *info; ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); p = UNTAG_CLOSURE(p); /* Is it a static closure (i.e. in the data segment)? */ if (!HEAP_ALLOCED(p)) { ASSERT(closure_STATIC(p)); } else { ASSERT(!closure_STATIC(p)); } info = p->header.info; if (IS_FORWARDING_PTR(info)) { barf("checkClosure: found EVACUATED closure %d", info->type); } info = INFO_PTR_TO_STRUCT(info); switch (info->type) { case MVAR_CLEAN: case MVAR_DIRTY: { StgMVar *mvar = (StgMVar *)p; ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head)); ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail)); ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value)); return sizeofW(StgMVar); } case THUNK: case THUNK_1_0: case THUNK_0_1: case THUNK_1_1: case THUNK_0_2: case THUNK_2_0: { nat i; for (i = 0; i < info->layout.payload.ptrs; i++) { ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i])); } return thunk_sizeW_fromITBL(info); } case FUN: case FUN_1_0: case FUN_0_1: case FUN_1_1: case FUN_0_2: case FUN_2_0: case CONSTR: case CONSTR_1_0: case CONSTR_0_1: case CONSTR_1_1: case CONSTR_0_2: case CONSTR_2_0: case IND_PERM: case BLACKHOLE: case PRIM: case MUT_PRIM: case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: case CONSTR_STATIC: case CONSTR_NOCAF_STATIC: case THUNK_STATIC: case FUN_STATIC: { nat i; for (i = 0; i < info->layout.payload.ptrs; i++) { ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i])); } return sizeW_fromITBL(info); } case BLOCKING_QUEUE: { StgBlockingQueue *bq = (StgBlockingQueue *)p; // NO: the BH might have been updated now // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE); ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh)); ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO); ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE || bq->queue->header.info == &stg_MSG_BLACKHOLE_info); ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE || get_itbl((StgClosure *)(bq->link))->type == IND || get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE); return sizeofW(StgBlockingQueue); } case BCO: { StgBCO *bco = (StgBCO *)p; ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs)); ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals)); ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs)); return bco_sizeW(bco); } case IND_STATIC: /* (1, 0) closure */ ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee)); return sizeW_fromITBL(info); case WEAK: /* deal with these specially - the info table isn't * representative of the actual layout. */ { StgWeak *w = (StgWeak *)p; ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key)); ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value)); ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer)); if (w->link) { ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link)); } return sizeW_fromITBL(info); } case THUNK_SELECTOR: ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee)); return THUNK_SELECTOR_sizeW(); case IND: { /* we don't expect to see any of these after GC * but they might appear during execution */ StgInd *ind = (StgInd *)p; ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee)); return sizeofW(StgInd); } case RET_BCO: case RET_SMALL: case RET_BIG: case RET_DYN: case UPDATE_FRAME: case UNDERFLOW_FRAME: case STOP_FRAME: case CATCH_FRAME: case ATOMICALLY_FRAME: case CATCH_RETRY_FRAME: case CATCH_STM_FRAME: barf("checkClosure: stack frame"); case AP: { StgAP* ap = (StgAP *)p; checkPAP (ap->fun, ap->payload, ap->n_args); return ap_sizeW(ap); } case PAP: { StgPAP* pap = (StgPAP *)p; checkPAP (pap->fun, pap->payload, pap->n_args); return pap_sizeW(pap); } case AP_STACK: { StgAP_STACK *ap = (StgAP_STACK *)p; ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun)); checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size); return ap_stack_sizeW(ap); } case ARR_WORDS: return arr_words_sizeW((StgArrWords *)p); case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_DIRTY: case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: { StgMutArrPtrs* a = (StgMutArrPtrs *)p; nat i; for (i = 0; i < a->ptrs; i++) { ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i])); } return mut_arr_ptrs_sizeW(a); } case TSO: checkTSO((StgTSO *)p); return sizeofW(StgTSO); case STACK: checkSTACK((StgStack*)p); return stack_sizeW((StgStack*)p); case TREC_CHUNK: { nat i; StgTRecChunk *tc = (StgTRecChunk *)p; ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk)); for (i = 0; i < tc -> next_entry_idx; i ++) { ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar)); ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value)); ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value)); } return sizeofW(StgTRecChunk); } default: barf("checkClosure (closure type %d)", info->type); } }