static void thread_stack(StgPtr p, StgPtr stack_end) { const StgRetInfoTable* info; StgWord bitmap; nat size; // highly similar to scavenge_stack, but we do pointer threading here. while (p < stack_end) { // *p must be the info pointer of an activation // record. All activation records have 'bitmap' style layout // info. // info = get_ret_itbl((StgClosure *)p); switch (info->i.type) { // Dynamic bitmap: the mask is stored on the stack case RET_DYN: { StgWord dyn; dyn = ((StgRetDyn *)p)->liveness; // traverse the bitmap first bitmap = RET_DYN_LIVENESS(dyn); p = (P_)&((StgRetDyn *)p)->payload[0]; size = RET_DYN_BITMAP_SIZE; while (size > 0) { if ((bitmap & 1) == 0) { thread((StgClosure **)p); } p++; bitmap = bitmap >> 1; size--; } // skip over the non-ptr words p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE; // follow the ptr words for (size = RET_DYN_PTRS(dyn); size > 0; size--) { thread((StgClosure **)p); p++; } continue; } // small bitmap (<= 32 entries, or 64 on a 64-bit machine) case UPDATE_FRAME: case STOP_FRAME: case CATCH_FRAME: case RET_SMALL: bitmap = BITMAP_BITS(info->i.layout.bitmap); size = BITMAP_SIZE(info->i.layout.bitmap); p++; // NOTE: the payload starts immediately after the info-ptr, we // don't have an StgHeader in the same sense as a heap closure. while (size > 0) { if ((bitmap & 1) == 0) { thread((StgClosure **)p); } p++; bitmap = bitmap >> 1; size--; } continue; #ifdef ALLOW_INTERPRETER case RET_BCO: { StgBCO *bco; nat size; p++; bco = (StgBCO *)*p; thread((StgClosure **)p); p++; size = BCO_BITMAP_SIZE(bco); thread_large_bitmap(p, BCO_BITMAP(bco), size); p += size; continue; } #endif // ALLOW_INTERPRETER // large bitmap (> 32 entries, or 64 on a 64-bit machine) case RET_BIG: p++; size = GET_LARGE_BITMAP(&info->i)->size; thread_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size); p += size; continue; case RET_FUN: { StgRetFun *ret_fun = (StgRetFun *)p; StgFunInfoTable *fun_info; fun_info = FUN_INFO_PTR_TO_STRUCT(UNTAG_CLOSURE((StgClosure *) get_threaded_info((StgPtr)ret_fun->fun))); // *before* threading it! thread(&ret_fun->fun); p = thread_arg_block(fun_info, ret_fun->payload); continue; } default: barf("thread_stack: weird activation record found on stack: %d", (int)(info->i.type)); } } }
static void scavenge_stack(StgPtr p, StgPtr stack_end) { const StgRetInfoTable* info; StgWord bitmap; nat size; /* * Each time around this loop, we are looking at a chunk of stack * that starts with an activation record. */ while (p < stack_end) { info = get_ret_itbl((StgClosure *)p); switch (info->i.type) { case UPDATE_FRAME: // In SMP, we can get update frames that point to indirections // when two threads evaluate the same thunk. We do attempt to // discover this situation in threadPaused(), but it's // possible that the following sequence occurs: // // A B // enter T // enter T // blackhole T // update T // GC // // Now T is an indirection, and the update frame is already // marked on A's stack, so we won't traverse it again in // threadPaused(). We could traverse the whole stack again // before GC, but that seems like overkill. // // Scavenging this update frame as normal would be disastrous; // the updatee would end up pointing to the value. So we // check whether the value after evacuation is a BLACKHOLE, // and if not, we change the update frame to an stg_enter // frame that simply returns the value. Hence, blackholing is // compulsory (otherwise we would have to check for thunks // too). // // Note [upd-black-hole] // One slight hiccup is that the THUNK_SELECTOR machinery can // overwrite the updatee with an IND. In parallel GC, this // could even be happening concurrently, so we can't check for // the IND. Fortunately if we assume that blackholing is // happening (either lazy or eager), then we can be sure that // the updatee is never a THUNK_SELECTOR and we're ok. // NB. this is a new invariant: blackholing is not optional. { StgUpdateFrame *frame = (StgUpdateFrame *)p; StgClosure *v; evacuate(&frame->updatee); v = frame->updatee; if (GET_CLOSURE_TAG(v) != 0 || (get_itbl(v)->type != BLACKHOLE)) { // blackholing is compulsory, see above. frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info; } ASSERT(v->header.info != &stg_TSO_info); p += sizeofW(StgUpdateFrame); continue; } // small bitmap (< 32 entries, or 64 on a 64-bit machine) case CATCH_STM_FRAME: case CATCH_RETRY_FRAME: case ATOMICALLY_FRAME: case UNDERFLOW_FRAME: case STOP_FRAME: case CATCH_FRAME: case RET_SMALL: bitmap = BITMAP_BITS(info->i.layout.bitmap); size = BITMAP_SIZE(info->i.layout.bitmap); // NOTE: the payload starts immediately after the info-ptr, we // don't have an StgHeader in the same sense as a heap closure. p++; p = scavenge_small_bitmap(p, size, bitmap); follow_srt: if (major_gc) scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap); continue; case RET_BCO: { StgBCO *bco; nat size; p++; evacuate((StgClosure **)p); bco = (StgBCO *)*p; p++; size = BCO_BITMAP_SIZE(bco); scavenge_large_bitmap(p, BCO_BITMAP(bco), size); p += size; continue; } // large bitmap (> 32 entries, or > 64 on a 64-bit machine) case RET_BIG: { nat size; size = GET_LARGE_BITMAP(&info->i)->size; p++; scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size); p += size; // and don't forget to follow the SRT goto follow_srt; } // Dynamic bitmap: the mask is stored on the stack, and // there are a number of non-pointers followed by a number // of pointers above the bitmapped area. (see StgMacros.h, // HEAP_CHK_GEN). case RET_DYN: { StgWord dyn; dyn = ((StgRetDyn *)p)->liveness; // traverse the bitmap first bitmap = RET_DYN_LIVENESS(dyn); p = (P_)&((StgRetDyn *)p)->payload[0]; size = RET_DYN_BITMAP_SIZE; p = scavenge_small_bitmap(p, size, bitmap); // skip over the non-ptr words p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE; // follow the ptr words for (size = RET_DYN_PTRS(dyn); size > 0; size--) { evacuate((StgClosure **)p); p++; } continue; } case RET_FUN: { StgRetFun *ret_fun = (StgRetFun *)p; StgFunInfoTable *fun_info; evacuate(&ret_fun->fun); fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun)); p = scavenge_arg_block(fun_info, ret_fun->payload); goto follow_srt; } default: barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type)); } } }
// check an individual stack object StgOffset checkStackFrame( StgPtr c ) { nat size; const StgRetInfoTable* info; info = get_ret_itbl((StgClosure *)c); /* All activation records have 'bitmap' style layout info. */ switch (info->i.type) { case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */ { StgWord dyn; StgPtr p; StgRetDyn* r; r = (StgRetDyn *)c; dyn = r->liveness; p = (P_)(r->payload); checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE); p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE; // skip over the non-pointers p += RET_DYN_NONPTRS(dyn); // follow the ptr words for (size = RET_DYN_PTRS(dyn); size > 0; size--) { checkClosureShallow((StgClosure *)*p); p++; } return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE + RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn); } case UPDATE_FRAME: ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee)); case ATOMICALLY_FRAME: case CATCH_RETRY_FRAME: case CATCH_STM_FRAME: case CATCH_FRAME: // small bitmap cases (<= 32 entries) case UNDERFLOW_FRAME: case STOP_FRAME: case RET_SMALL: size = BITMAP_SIZE(info->i.layout.bitmap); checkSmallBitmap((StgPtr)c + 1, BITMAP_BITS(info->i.layout.bitmap), size); return 1 + size; case RET_BCO: { StgBCO *bco; nat size; bco = (StgBCO *)*(c+1); size = BCO_BITMAP_SIZE(bco); checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size); return 2 + size; } case RET_BIG: // large bitmap (> 32 entries) size = GET_LARGE_BITMAP(&info->i)->size; checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size); return 1 + size; case RET_FUN: { StgFunInfoTable *fun_info; StgRetFun *ret_fun; ret_fun = (StgRetFun *)c; fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun)); size = ret_fun->size; switch (fun_info->f.fun_type) { case ARG_GEN: checkSmallBitmap((StgPtr)ret_fun->payload, BITMAP_BITS(fun_info->f.b.bitmap), size); break; case ARG_GEN_BIG: checkLargeBitmap((StgPtr)ret_fun->payload, GET_FUN_LARGE_BITMAP(fun_info), size); break; default: checkSmallBitmap((StgPtr)ret_fun->payload, BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]), size); break; } return sizeofW(StgRetFun) + size; } default: barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type); } }
// check an individual stack object StgOffset checkStackFrame( StgPtr c ) { nat size; const StgRetInfoTable* info; info = get_ret_itbl((StgClosure *)c); /* All activation records have 'bitmap' style layout info. */ switch (info->i.type) { case UPDATE_FRAME: ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee)); case ATOMICALLY_FRAME: case CATCH_RETRY_FRAME: case CATCH_STM_FRAME: case CATCH_FRAME: // small bitmap cases (<= 32 entries) case UNDERFLOW_FRAME: case STOP_FRAME: case RET_SMALL: size = BITMAP_SIZE(info->i.layout.bitmap); checkSmallBitmap((StgPtr)c + 1, BITMAP_BITS(info->i.layout.bitmap), size); return 1 + size; case RET_BCO: { StgBCO *bco; nat size; bco = (StgBCO *)*(c+1); size = BCO_BITMAP_SIZE(bco); checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size); return 2 + size; } case RET_BIG: // large bitmap (> 32 entries) size = GET_LARGE_BITMAP(&info->i)->size; checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size); return 1 + size; case RET_FUN: { StgFunInfoTable *fun_info; StgRetFun *ret_fun; ret_fun = (StgRetFun *)c; fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun)); size = ret_fun->size; switch (fun_info->f.fun_type) { case ARG_GEN: checkSmallBitmap((StgPtr)ret_fun->payload, BITMAP_BITS(fun_info->f.b.bitmap), size); break; case ARG_GEN_BIG: checkLargeBitmap((StgPtr)ret_fun->payload, GET_FUN_LARGE_BITMAP(fun_info), size); break; default: checkSmallBitmap((StgPtr)ret_fun->payload, BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]), size); break; } return sizeofW(StgRetFun) + size; } default: barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type); } }