STATIC_INLINE GNUC_ATTR_HOT void copy_tag(StgClosure **p, const StgInfoTable *info, StgClosure *src, nat size, nat gen_no, StgWord tag) { StgPtr to, from; nat i; to = alloc_for_copy(size,gen_no); from = (StgPtr)src; to[0] = (W_)info; for (i = 1; i < size; i++) { // unroll for small i to[i] = from[i]; } // if (to+size+2 < bd->start + BLOCK_SIZE_W) { // __builtin_prefetch(to + size + 2, 1); // } #if defined(PARALLEL_GC) { const StgInfoTable *new_info; new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to)); if (new_info != info) { #ifdef PROFILING // We copied this object at the same time as another // thread. We'll evacuate the object again and the copy // we just made will be discarded at the next GC, but we // may have copied it after the other thread called // SET_EVACUAEE_FOR_LDV(), which would confuse the LDV // profiler when it encounters this closure in // processHeapClosureForDead. So we reset the LDVW field // here. LDVW(to) = 0; #endif return evacuate(p); // does the failed_to_evac stuff } else { *p = TAG_CLOSURE(tag,(StgClosure*)to); } } #else src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to); *p = TAG_CLOSURE(tag,(StgClosure*)to); #endif #ifdef PROFILING // We store the size of the just evacuated object in the LDV word so that // the profiler can guess the position of the next object later. // This is safe only if we are sure that no other thread evacuates // the object again, so we cannot use copy_tag_nolock when PROFILING. SET_EVACUAEE_FOR_LDV(from, size); #endif }
STATIC_INLINE void copy_tag_nolock(StgClosure **p, const StgInfoTable *info, StgClosure *src, nat size, nat gen_no, StgWord tag) { StgPtr to, from; nat i; to = alloc_for_copy(size,gen_no); from = (StgPtr)src; to[0] = (W_)info; for (i = 1; i < size; i++) { // unroll for small i to[i] = from[i]; } // if somebody else reads the forwarding pointer, we better make // sure there's a closure at the end of it. write_barrier(); *p = TAG_CLOSURE(tag,(StgClosure*)to); src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to); // if (to+size+2 < bd->start + BLOCK_SIZE_W) { // __builtin_prefetch(to + size + 2, 1); // } #ifdef PROFILING // We store the size of the just evacuated object in the LDV word so that // the profiler can guess the position of the next object later. SET_EVACUAEE_FOR_LDV(from, size); #endif }
STATIC_INLINE GNUC_ATTR_HOT void copy_tag(StgClosure **p, const StgInfoTable *info, StgClosure *src, nat size, nat gen_no, StgWord tag) { StgPtr to, from; nat i; to = alloc_for_copy(size,gen_no); from = (StgPtr)src; to[0] = (W_)info; for (i = 1; i < size; i++) { // unroll for small i to[i] = from[i]; } // if (to+size+2 < bd->start + BLOCK_SIZE_W) { // __builtin_prefetch(to + size + 2, 1); // } #if defined(PARALLEL_GC) { const StgInfoTable *new_info; new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to)); if (new_info != info) { return evacuate(p); // does the failed_to_evac stuff } else { *p = TAG_CLOSURE(tag,(StgClosure*)to); } } #else src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to); *p = TAG_CLOSURE(tag,(StgClosure*)to); #endif #ifdef PROFILING // We store the size of the just evacuated object in the LDV word so that // the profiler can guess the position of the next object later. SET_EVACUAEE_FOR_LDV(from, size); #endif }
/* * get at the real stuff...remove indirections. * It untags pointers before dereferencing and * retags the real stuff with its tag (if there * is any) when returning. * * ToDo: move to a better home. */ static StgClosure* removeIndirections(StgClosure* p) { StgWord tag = GET_CLOSURE_TAG(p); StgClosure* q = UNTAG_CLOSURE(p); while (get_itbl(q)->type == IND || get_itbl(q)->type == IND_STATIC || get_itbl(q)->type == IND_PERM) { q = ((StgInd *)q)->indirectee; tag = GET_CLOSURE_TAG(q); q = UNTAG_CLOSURE(q); } return TAG_CLOSURE(tag,q); }
REGPARM1 GNUC_ATTR_HOT void evacuate(StgClosure **p) { bdescr *bd = NULL; nat gen_no; StgClosure *q; const StgInfoTable *info; StgWord tag; q = *p; loop: /* The tag and the pointer are split, to be merged after evacing */ tag = GET_CLOSURE_TAG(q); q = UNTAG_CLOSURE(q); ASSERTM(LOOKS_LIKE_CLOSURE_PTR(q), "invalid closure, info=%p", q->header.info); if (!HEAP_ALLOCED_GC(q)) { if (!major_gc) return; info = get_itbl(q); switch (info->type) { case THUNK_STATIC: if (info->srt_bitmap != 0) { evacuate_static_object(THUNK_STATIC_LINK((StgClosure *)q), q); } return; case FUN_STATIC: if (info->srt_bitmap != 0) { evacuate_static_object(FUN_STATIC_LINK((StgClosure *)q), q); } return; case IND_STATIC: /* If q->saved_info != NULL, then it's a revertible CAF - it'll be * on the CAF list, so don't do anything with it here (we'll * scavenge it later). */ evacuate_static_object(IND_STATIC_LINK((StgClosure *)q), q); return; case CONSTR_STATIC: evacuate_static_object(STATIC_LINK(info,(StgClosure *)q), q); return; case CONSTR_NOCAF_STATIC: /* no need to put these on the static linked list, they don't need * to be scavenged. */ return; default: barf("evacuate(static): strange closure type %d", (int)(info->type)); } } bd = Bdescr((P_)q); if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) { // pointer into to-space: just return it. It might be a pointer // into a generation that we aren't collecting (> N), or it // might just be a pointer into to-space. The latter doesn't // happen often, but allowing it makes certain things a bit // easier; e.g. scavenging an object is idempotent, so it's OK to // have an object on the mutable list multiple times. if (bd->flags & BF_EVACUATED) { // We aren't copying this object, so we have to check // whether it is already in the target generation. (this is // the write barrier). if (bd->gen_no < gct->evac_gen_no) { gct->failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } return; } /* evacuate large objects by re-linking them onto a different list. */ if (bd->flags & BF_LARGE) { evacuate_large((P_)q); return; } /* If the object is in a gen that we're compacting, then we * need to use an alternative evacuate procedure. */ if (!is_marked((P_)q,bd)) { mark((P_)q,bd); push_mark_stack((P_)q); } return; } gen_no = bd->dest_no; info = q->header.info; if (IS_FORWARDING_PTR(info)) { /* Already evacuated, just return the forwarding address. * HOWEVER: if the requested destination generation (gct->evac_gen) is * older than the actual generation (because the object was * already evacuated to a younger generation) then we have to * set the gct->failed_to_evac flag to indicate that we couldn't * manage to promote the object to the desired generation. */ /* * Optimisation: the check is fairly expensive, but we can often * shortcut it if either the required generation is 0, or the * current object (the EVACUATED) is in a high enough generation. * We know that an EVACUATED always points to an object in the * same or an older generation. gen is the lowest generation that the * current object would be evacuated to, so we only do the full * check if gen is too low. */ StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info); *p = TAG_CLOSURE(tag,e); if (gen_no < gct->evac_gen_no) { // optimisation if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) { gct->failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } } return; } switch (INFO_PTR_TO_STRUCT(info)->type) { case WHITEHOLE: goto loop; // For ints and chars of low value, save space by replacing references to // these with closures with references to common, shared ones in the RTS. // // * Except when compiling into Windows DLLs which don't support cross-package // data references very well. // case CONSTR_0_1: { #if defined(COMPILING_WINDOWS_DLL) copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag); #else StgWord w = (StgWord)q->payload[0]; if (info == Czh_con_info && // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && (StgChar)w <= MAX_CHARLIKE) { *p = TAG_CLOSURE(tag, (StgClosure *)CHARLIKE_CLOSURE((StgChar)w) ); } else if (info == Izh_con_info && (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) { *p = TAG_CLOSURE(tag, (StgClosure *)INTLIKE_CLOSURE((StgInt)w) ); } else { copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag); } #endif return; } case FUN_0_1: case FUN_1_0: case CONSTR_1_0: copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag); return; case THUNK_1_0: case THUNK_0_1: copy(p,info,q,sizeofW(StgThunk)+1,gen_no); return; case THUNK_1_1: case THUNK_2_0: case THUNK_0_2: #ifdef NO_PROMOTE_THUNKS #error bitrotted #endif copy(p,info,q,sizeofW(StgThunk)+2,gen_no); return; case FUN_1_1: case FUN_2_0: case FUN_0_2: case CONSTR_1_1: case CONSTR_2_0: copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag); return; case CONSTR_0_2: copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag); return; case THUNK: copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no); return; case FUN: case CONSTR: copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no,tag); return; case BLACKHOLE: { StgClosure *r; const StgInfoTable *i; r = ((StgInd*)q)->indirectee; if (GET_CLOSURE_TAG(r) == 0) { i = r->header.info; if (IS_FORWARDING_PTR(i)) { r = (StgClosure *)UN_FORWARDING_PTR(i); i = r->header.info; } if (i == &stg_TSO_info || i == &stg_WHITEHOLE_info || i == &stg_BLOCKING_QUEUE_CLEAN_info || i == &stg_BLOCKING_QUEUE_DIRTY_info) { copy(p,info,q,sizeofW(StgInd),gen_no); return; } ASSERT(i != &stg_IND_info); } q = r; *p = r; goto loop; } case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: case MVAR_CLEAN: case MVAR_DIRTY: case TVAR: case BLOCKING_QUEUE: case WEAK: case PRIM: case MUT_PRIM: copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no); return; case BCO: copy(p,info,q,bco_sizeW((StgBCO *)q),gen_no); return; case THUNK_SELECTOR: eval_thunk_selector(p, (StgSelector *)q, rtsTrue); return; case IND: // follow chains of indirections, don't evacuate them q = ((StgInd*)q)->indirectee; *p = q; goto loop; case RET_BCO: case RET_SMALL: case RET_BIG: case UPDATE_FRAME: case UNDERFLOW_FRAME: case STOP_FRAME: case CATCH_FRAME: case CATCH_STM_FRAME: case CATCH_RETRY_FRAME: case ATOMICALLY_FRAME: // shouldn't see these barf("evacuate: stack frame at %p\n", q); case PAP: copy(p,info,q,pap_sizeW((StgPAP*)q),gen_no); return; case AP: copy(p,info,q,ap_sizeW((StgAP*)q),gen_no); return; case AP_STACK: copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen_no); return; case ARR_WORDS: // just copy the block copy(p,info,q,arr_words_sizeW((StgArrBytes *)q),gen_no); return; case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_DIRTY: case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: // just copy the block copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no); return; case SMALL_MUT_ARR_PTRS_CLEAN: case SMALL_MUT_ARR_PTRS_DIRTY: case SMALL_MUT_ARR_PTRS_FROZEN: case SMALL_MUT_ARR_PTRS_FROZEN0: // just copy the block copy(p,info,q,small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)q),gen_no); return; case TSO: copy(p,info,q,sizeofW(StgTSO),gen_no); return; case STACK: { StgStack *stack = (StgStack *)q; /* To evacuate a small STACK, we need to adjust the stack pointer */ { StgStack *new_stack; StgPtr r, s; rtsBool mine; mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack), sizeofW(StgStack), gen_no); if (mine) { new_stack = (StgStack *)*p; move_STACK(stack, new_stack); for (r = stack->sp, s = new_stack->sp; r < stack->stack + stack->stack_size;) { *s++ = *r++; } } return; } } case TREC_CHUNK: copy(p,info,q,sizeofW(StgTRecChunk),gen_no); return; default: barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type)); } barf("evacuate"); }
StgClosure * isAlive(StgClosure *p) { const StgInfoTable *info; bdescr *bd; StgWord tag; StgClosure *q; while (1) { /* The tag and the pointer are split, to be merged later when needed. */ tag = GET_CLOSURE_TAG(p); q = UNTAG_CLOSURE(p); ASSERT(LOOKS_LIKE_CLOSURE_PTR(q)); // ignore static closures // // ToDo: This means we never look through IND_STATIC, which means // isRetainer needs to handle the IND_STATIC case rather than // raising an error. // // ToDo: for static closures, check the static link field. // Problem here is that we sometimes don't set the link field, eg. // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs. // if (!HEAP_ALLOCED_GC(q)) { return p; } // ignore closures in generations that we're not collecting. bd = Bdescr((P_)q); // if it's a pointer into to-space, then we're done if (bd->flags & BF_EVACUATED) { return p; } // large objects use the evacuated flag if (bd->flags & BF_LARGE) { return NULL; } // check the mark bit for compacted steps if ((bd->flags & BF_MARKED) && is_marked((P_)q,bd)) { return p; } info = q->header.info; if (IS_FORWARDING_PTR(info)) { // alive! return TAG_CLOSURE(tag,(StgClosure*)UN_FORWARDING_PTR(info)); } info = INFO_PTR_TO_STRUCT(info); switch (info->type) { case IND: case IND_STATIC: case IND_PERM: // follow indirections p = ((StgInd *)q)->indirectee; continue; case BLACKHOLE: p = ((StgInd*)q)->indirectee; if (GET_CLOSURE_TAG(p) != 0) { continue; } else { return NULL; } default: // dead. return NULL; } } }