static void thread_static( StgClosure* p ) { const StgInfoTable *info; // keep going until we've threaded all the objects on the linked // list... while (p != END_OF_STATIC_LIST) { info = get_itbl(p); switch (info->type) { case IND_STATIC: thread(&((StgInd *)p)->indirectee); p = *IND_STATIC_LINK(p); continue; case THUNK_STATIC: p = *THUNK_STATIC_LINK(p); continue; case FUN_STATIC: p = *FUN_STATIC_LINK(p); continue; case CONSTR_STATIC: p = *STATIC_LINK(info,p); continue; default: barf("thread_static: strange closure %d", (int)(info->type)); } } }
/* Check the static objects list. */ void checkStaticObjects ( StgClosure* static_objects ) { StgClosure *p = static_objects; const StgInfoTable *info; while (p != END_OF_STATIC_OBJECT_LIST) { p = UNTAG_STATIC_LIST_PTR(p); checkClosure(p); info = get_itbl(p); switch (info->type) { case IND_STATIC: { const StgClosure *indirectee; indirectee = UNTAG_CONST_CLOSURE(((StgIndStatic *)p)->indirectee); ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee)); ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info)); p = *IND_STATIC_LINK((StgClosure *)p); break; } case THUNK_STATIC: p = *THUNK_STATIC_LINK((StgClosure *)p); break; case FUN_STATIC: p = *STATIC_LINK(info,(StgClosure *)p); break; case CONSTR: case CONSTR_NOCAF: case CONSTR_1_0: case CONSTR_2_0: case CONSTR_1_1: p = *STATIC_LINK(info,(StgClosure *)p); break; default: barf("checkStaticObjetcs: strange closure %p (%s)", p, info_type(p)); } } }
/* Check the static objects list. */ void checkStaticObjects ( StgClosure* static_objects ) { StgClosure *p = static_objects; StgInfoTable *info; while (p != END_OF_STATIC_LIST) { checkClosure(p); info = get_itbl(p); switch (info->type) { case IND_STATIC: { StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee); ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee)); ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info)); p = *IND_STATIC_LINK((StgClosure *)p); break; } case THUNK_STATIC: p = *THUNK_STATIC_LINK((StgClosure *)p); break; case FUN_STATIC: p = *FUN_STATIC_LINK((StgClosure *)p); break; case CONSTR_STATIC: p = *STATIC_LINK(info,(StgClosure *)p); break; default: barf("checkStaticObjetcs: strange closure %p (%s)", p, #ifndef HaLVM_TARGET_OS info_type(p) #else "[HaLVM has no info_type()]" #endif ); } } }
REGPARM1 GNUC_ATTR_HOT void evacuate(StgClosure **p) { bdescr *bd = NULL; nat gen_no; StgClosure *q; const StgInfoTable *info; StgWord tag; q = *p; loop: /* The tag and the pointer are split, to be merged after evacing */ tag = GET_CLOSURE_TAG(q); q = UNTAG_CLOSURE(q); ASSERTM(LOOKS_LIKE_CLOSURE_PTR(q), "invalid closure, info=%p", q->header.info); if (!HEAP_ALLOCED_GC(q)) { if (!major_gc) return; info = get_itbl(q); switch (info->type) { case THUNK_STATIC: if (info->srt_bitmap != 0) { evacuate_static_object(THUNK_STATIC_LINK((StgClosure *)q), q); } return; case FUN_STATIC: if (info->srt_bitmap != 0) { evacuate_static_object(FUN_STATIC_LINK((StgClosure *)q), q); } return; case IND_STATIC: /* If q->saved_info != NULL, then it's a revertible CAF - it'll be * on the CAF list, so don't do anything with it here (we'll * scavenge it later). */ evacuate_static_object(IND_STATIC_LINK((StgClosure *)q), q); return; case CONSTR_STATIC: evacuate_static_object(STATIC_LINK(info,(StgClosure *)q), q); return; case CONSTR_NOCAF_STATIC: /* no need to put these on the static linked list, they don't need * to be scavenged. */ return; default: barf("evacuate(static): strange closure type %d", (int)(info->type)); } } bd = Bdescr((P_)q); if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) { // pointer into to-space: just return it. It might be a pointer // into a generation that we aren't collecting (> N), or it // might just be a pointer into to-space. The latter doesn't // happen often, but allowing it makes certain things a bit // easier; e.g. scavenging an object is idempotent, so it's OK to // have an object on the mutable list multiple times. if (bd->flags & BF_EVACUATED) { // We aren't copying this object, so we have to check // whether it is already in the target generation. (this is // the write barrier). if (bd->gen_no < gct->evac_gen_no) { gct->failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } return; } /* evacuate large objects by re-linking them onto a different list. */ if (bd->flags & BF_LARGE) { evacuate_large((P_)q); return; } /* If the object is in a gen that we're compacting, then we * need to use an alternative evacuate procedure. */ if (!is_marked((P_)q,bd)) { mark((P_)q,bd); push_mark_stack((P_)q); } return; } gen_no = bd->dest_no; info = q->header.info; if (IS_FORWARDING_PTR(info)) { /* Already evacuated, just return the forwarding address. * HOWEVER: if the requested destination generation (gct->evac_gen) is * older than the actual generation (because the object was * already evacuated to a younger generation) then we have to * set the gct->failed_to_evac flag to indicate that we couldn't * manage to promote the object to the desired generation. */ /* * Optimisation: the check is fairly expensive, but we can often * shortcut it if either the required generation is 0, or the * current object (the EVACUATED) is in a high enough generation. * We know that an EVACUATED always points to an object in the * same or an older generation. gen is the lowest generation that the * current object would be evacuated to, so we only do the full * check if gen is too low. */ StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info); *p = TAG_CLOSURE(tag,e); if (gen_no < gct->evac_gen_no) { // optimisation if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) { gct->failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } } return; } switch (INFO_PTR_TO_STRUCT(info)->type) { case WHITEHOLE: goto loop; // For ints and chars of low value, save space by replacing references to // these with closures with references to common, shared ones in the RTS. // // * Except when compiling into Windows DLLs which don't support cross-package // data references very well. // case CONSTR_0_1: { #if defined(COMPILING_WINDOWS_DLL) copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag); #else StgWord w = (StgWord)q->payload[0]; if (info == Czh_con_info && // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && (StgChar)w <= MAX_CHARLIKE) { *p = TAG_CLOSURE(tag, (StgClosure *)CHARLIKE_CLOSURE((StgChar)w) ); } else if (info == Izh_con_info && (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) { *p = TAG_CLOSURE(tag, (StgClosure *)INTLIKE_CLOSURE((StgInt)w) ); } else { copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag); } #endif return; } case FUN_0_1: case FUN_1_0: case CONSTR_1_0: copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag); return; case THUNK_1_0: case THUNK_0_1: copy(p,info,q,sizeofW(StgThunk)+1,gen_no); return; case THUNK_1_1: case THUNK_2_0: case THUNK_0_2: #ifdef NO_PROMOTE_THUNKS #error bitrotted #endif copy(p,info,q,sizeofW(StgThunk)+2,gen_no); return; case FUN_1_1: case FUN_2_0: case FUN_0_2: case CONSTR_1_1: case CONSTR_2_0: copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag); return; case CONSTR_0_2: copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag); return; case THUNK: copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no); return; case FUN: case CONSTR: copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no,tag); return; case BLACKHOLE: { StgClosure *r; const StgInfoTable *i; r = ((StgInd*)q)->indirectee; if (GET_CLOSURE_TAG(r) == 0) { i = r->header.info; if (IS_FORWARDING_PTR(i)) { r = (StgClosure *)UN_FORWARDING_PTR(i); i = r->header.info; } if (i == &stg_TSO_info || i == &stg_WHITEHOLE_info || i == &stg_BLOCKING_QUEUE_CLEAN_info || i == &stg_BLOCKING_QUEUE_DIRTY_info) { copy(p,info,q,sizeofW(StgInd),gen_no); return; } ASSERT(i != &stg_IND_info); } q = r; *p = r; goto loop; } case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: case MVAR_CLEAN: case MVAR_DIRTY: case TVAR: case BLOCKING_QUEUE: case WEAK: case PRIM: case MUT_PRIM: copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no); return; case BCO: copy(p,info,q,bco_sizeW((StgBCO *)q),gen_no); return; case THUNK_SELECTOR: eval_thunk_selector(p, (StgSelector *)q, rtsTrue); return; case IND: // follow chains of indirections, don't evacuate them q = ((StgInd*)q)->indirectee; *p = q; goto loop; case RET_BCO: case RET_SMALL: case RET_BIG: case UPDATE_FRAME: case UNDERFLOW_FRAME: case STOP_FRAME: case CATCH_FRAME: case CATCH_STM_FRAME: case CATCH_RETRY_FRAME: case ATOMICALLY_FRAME: // shouldn't see these barf("evacuate: stack frame at %p\n", q); case PAP: copy(p,info,q,pap_sizeW((StgPAP*)q),gen_no); return; case AP: copy(p,info,q,ap_sizeW((StgAP*)q),gen_no); return; case AP_STACK: copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen_no); return; case ARR_WORDS: // just copy the block copy(p,info,q,arr_words_sizeW((StgArrBytes *)q),gen_no); return; case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_DIRTY: case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: // just copy the block copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no); return; case SMALL_MUT_ARR_PTRS_CLEAN: case SMALL_MUT_ARR_PTRS_DIRTY: case SMALL_MUT_ARR_PTRS_FROZEN: case SMALL_MUT_ARR_PTRS_FROZEN0: // just copy the block copy(p,info,q,small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)q),gen_no); return; case TSO: copy(p,info,q,sizeofW(StgTSO),gen_no); return; case STACK: { StgStack *stack = (StgStack *)q; /* To evacuate a small STACK, we need to adjust the stack pointer */ { StgStack *new_stack; StgPtr r, s; rtsBool mine; mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack), sizeofW(StgStack), gen_no); if (mine) { new_stack = (StgStack *)*p; move_STACK(stack, new_stack); for (r = stack->sp, s = new_stack->sp; r < stack->stack + stack->stack_size;) { *s++ = *r++; } } return; } } case TREC_CHUNK: copy(p,info,q,sizeofW(StgTRecChunk),gen_no); return; default: barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type)); } barf("evacuate"); }
// // Check whether we can unload any object code. This is called at the // appropriate point during a GC, where all the heap data is nice and // packed together and we have a linked list of the static objects. // // The check involves a complete heap traversal, but you only pay for // this (a) when you have called unloadObj(), and (b) at a major GC, // which is much more expensive than the traversal we're doing here. // void checkUnload (StgClosure *static_objects) { nat g, n; HashTable *addrs; StgClosure* p; const StgInfoTable *info; ObjectCode *oc, *prev, *next; gen_workspace *ws; StgClosure* link; if (unloaded_objects == NULL) return; ACQUIRE_LOCK(&linker_unloaded_mutex); // Mark every unloadable object as unreferenced initially for (oc = unloaded_objects; oc; oc = oc->next) { IF_DEBUG(linker, debugBelch("Checking whether to unload %" PATH_FMT "\n", oc->fileName)); oc->referenced = rtsFalse; } addrs = allocHashTable(); for (p = static_objects; p != END_OF_STATIC_OBJECT_LIST; p = link) { p = UNTAG_STATIC_LIST_PTR(p); checkAddress(addrs, p); info = get_itbl(p); link = *STATIC_LINK(info, p); } // CAFs on revertible_caf_list are not on static_objects for (p = (StgClosure*)revertible_caf_list; p != END_OF_CAF_LIST; p = ((StgIndStatic *)p)->static_link) { p = UNTAG_STATIC_LIST_PTR(p); checkAddress(addrs, p); } for (g = 0; g < RtsFlags.GcFlags.generations; g++) { searchHeapBlocks (addrs, generations[g].blocks); searchHeapBlocks (addrs, generations[g].large_objects); for (n = 0; n < n_capabilities; n++) { ws = &gc_threads[n]->gens[g]; searchHeapBlocks(addrs, ws->todo_bd); searchHeapBlocks(addrs, ws->part_list); searchHeapBlocks(addrs, ws->scavd_list); } } #ifdef PROFILING /* Traverse the cost centre tree, calling checkAddress on each CCS/CC */ searchCostCentres(addrs, CCS_MAIN); /* Also check each cost centre in the CC_LIST */ CostCentre *cc; for (cc = CC_LIST; cc != NULL; cc = cc->link) { checkAddress(addrs, cc); } #endif /* PROFILING */ // Look through the unloadable objects, and any object that is still // marked as unreferenced can be physically unloaded, because we // have no references to it. prev = NULL; for (oc = unloaded_objects; oc; oc = next) { next = oc->next; if (oc->referenced == 0) { if (prev == NULL) { unloaded_objects = oc->next; } else { prev->next = oc->next; } IF_DEBUG(linker, debugBelch("Unloading object file %" PATH_FMT "\n", oc->fileName)); freeObjectCode(oc); } else { IF_DEBUG(linker, debugBelch("Object file still in use: %" PATH_FMT "\n", oc->fileName)); prev = oc; } } freeHashTable(addrs, NULL); RELEASE_LOCK(&linker_unloaded_mutex); }
// // Check whether we can unload any object code. This is called at the // appropriate point during a GC, where all the heap data is nice and // packed together and we have a linked list of the static objects. // // The check involves a complete heap traversal, but you only pay for // this (a) when you have called unloadObj(), and (b) at a major GC, // which is much more expensive than the traversal we're doing here. // void checkUnload (StgClosure *static_objects) { nat g, n; HashTable *addrs; StgClosure* p; const StgInfoTable *info; ObjectCode *oc, *prev, *next; gen_workspace *ws; StgClosure* link; if (unloaded_objects == NULL) return; // Mark every unloadable object as unreferenced initially for (oc = unloaded_objects; oc; oc = oc->next) { IF_DEBUG(linker, debugBelch("Checking whether to unload %" PATH_FMT "\n", oc->fileName)); oc->referenced = rtsFalse; } addrs = allocHashTable(); for (p = static_objects; p != END_OF_STATIC_LIST; p = link) { checkAddress(addrs, p); info = get_itbl(p); link = *STATIC_LINK(info, p); } for (g = 0; g < RtsFlags.GcFlags.generations; g++) { searchHeapBlocks (addrs, generations[g].blocks); searchHeapBlocks (addrs, generations[g].large_objects); for (n = 0; n < n_capabilities; n++) { ws = &gc_threads[n]->gens[g]; searchHeapBlocks(addrs, ws->todo_bd); searchHeapBlocks(addrs, ws->part_list); searchHeapBlocks(addrs, ws->scavd_list); } } // Look through the unloadable objects, and any object that is still // marked as unreferenced can be physically unloaded, because we // have no references to it. prev = NULL; for (oc = unloaded_objects; oc; prev = oc, oc = next) { next = oc->next; if (oc->referenced == 0) { if (prev == NULL) { unloaded_objects = oc->next; } else { prev->next = oc->next; } IF_DEBUG(linker, debugBelch("Unloading object file %" PATH_FMT "\n", oc->fileName)); freeObjectCode(oc); } else { IF_DEBUG(linker, debugBelch("Object file still in use: %" PATH_FMT "\n", oc->fileName)); } } freeHashTable(addrs, NULL); }
static void scavenge_static(void) { StgClosure* p; const StgInfoTable *info; debugTrace(DEBUG_gc, "scavenging static objects"); /* Always evacuate straight to the oldest generation for static * objects */ gct->evac_gen_no = oldest_gen->no; /* keep going until we've scavenged all the objects on the linked list... */ while (1) { /* get the next static object from the list. Remember, there might * be more stuff on this list after each evacuation... * (static_objects is a global) */ p = gct->static_objects; if (p == END_OF_STATIC_LIST) { break; } ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); info = get_itbl(p); /* if (info->type==RBH) info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure */ // make sure the info pointer is into text space /* Take this object *off* the static_objects list, * and put it on the scavenged_static_objects list. */ gct->static_objects = *STATIC_LINK(info,p); *STATIC_LINK(info,p) = gct->scavenged_static_objects; gct->scavenged_static_objects = p; switch (info -> type) { case IND_STATIC: { StgInd *ind = (StgInd *)p; evacuate(&ind->indirectee); /* might fail to evacuate it, in which case we have to pop it * back on the mutable list of the oldest generation. We * leave it *on* the scavenged_static_objects list, though, * in case we visit this object again. */ if (gct->failed_to_evac) { gct->failed_to_evac = rtsFalse; recordMutableGen_GC((StgClosure *)p,oldest_gen->no); } break; } case THUNK_STATIC: scavenge_thunk_srt(info); break; case FUN_STATIC: scavenge_fun_srt(info); break; case CONSTR_STATIC: { StgPtr q, next; next = (P_)p->payload + info->layout.payload.ptrs; // evacuate the pointers for (q = (P_)p->payload; q < next; q++) { evacuate((StgClosure **)q); } break; } default: barf("scavenge_static: strange closure %d", (int)(info->type)); } ASSERT(gct->failed_to_evac == rtsFalse); } }