STATIC_INLINE void check_object_in_compact (StgCompactNFData *str, StgClosure *p) { bdescr *bd; // Only certain static closures are allowed to be referenced from // a compact, but let's be generous here and assume that all // static closures are OK. if (!HEAP_ALLOCED(p)) return; bd = Bdescr((P_)p); ASSERT((bd->flags & BF_COMPACT) != 0 && objectGetCompact(p) == str); }
StgWord compactContains (StgCompactNFData *str, StgPtr what) { bdescr *bd; // This check is the reason why this needs to be // implemented in C instead of (possibly faster) Cmm if (!HEAP_ALLOCED (what)) return 0; // Note that we don't care about tags, they are eaten // away by the Bdescr operation anyway bd = Bdescr((P_)what); return (bd->flags & BF_COMPACT) != 0 && (str == NULL || objectGetCompact((StgClosure*)what) == str); }
// // shouldCompact(c,p): returns: // SHOULDCOMPACT_IN_CNF if the object is in c // SHOULDCOMPACT_STATIC if the object is static // SHOULDCOMPACT_NOTIN_CNF if the object is dynamic and not in c // StgWord shouldCompact (StgCompactNFData *str, StgClosure *p) { bdescr *bd; if (!HEAP_ALLOCED(p)) return SHOULDCOMPACT_STATIC; // we have to copy static closures too bd = Bdescr((P_)p); if (bd->flags & BF_PINNED) { return SHOULDCOMPACT_PINNED; } if ((bd->flags & BF_COMPACT) && objectGetCompact(p) == str) { return SHOULDCOMPACT_IN_CNF; } else { return SHOULDCOMPACT_NOTIN_CNF; } }
/* ---------------------------------------------------------------------------- Evacuate an object inside a CompactNFData These are treated in a similar way to large objects. We remove the block from the compact_objects list of the generation it is on, and link it onto the live_compact_objects list of the destination generation. It is assumed that objects in the struct live in the same generation as the struct itself all the time. ------------------------------------------------------------------------- */ STATIC_INLINE void evacuate_compact (StgPtr p) { StgCompactNFData *str; bdescr *bd; generation *gen, *new_gen; uint32_t gen_no, new_gen_no; // We need to find the Compact# corresponding to this pointer, because it // will give us the first block in the compact chain, which is the one we // that gets linked onto the compact_objects list. str = objectGetCompact((StgClosure*)p); ASSERT(get_itbl((StgClosure*)str)->type == COMPACT_NFDATA); bd = Bdescr((StgPtr)str); gen_no = bd->gen_no; // already evacuated? (we're about to do the same check, // but we avoid taking the spin-lock) if (bd->flags & BF_EVACUATED) { /* Don't forget to set the gct->failed_to_evac flag if we didn't get * the desired destination (see comments in evacuate()). */ if (gen_no < gct->evac_gen_no) { gct->failed_to_evac = true; TICK_GC_FAILED_PROMOTION(); } return; } gen = bd->gen; gen_no = bd->gen_no; ACQUIRE_SPIN_LOCK(&gen->sync); // already evacuated? if (bd->flags & BF_EVACUATED) { /* Don't forget to set the gct->failed_to_evac flag if we didn't get * the desired destination (see comments in evacuate()). */ if (gen_no < gct->evac_gen_no) { gct->failed_to_evac = true; TICK_GC_FAILED_PROMOTION(); } RELEASE_SPIN_LOCK(&gen->sync); return; } // remove from compact_objects list if (bd->u.back) { bd->u.back->link = bd->link; } else { // first object in the list gen->compact_objects = bd->link; } if (bd->link) { bd->link->u.back = bd->u.back; } /* link it on to the evacuated compact object list of the destination gen */ new_gen_no = bd->dest_no; if (new_gen_no < gct->evac_gen_no) { if (gct->eager_promotion) { new_gen_no = gct->evac_gen_no; } else { gct->failed_to_evac = true; } } new_gen = &generations[new_gen_no]; // Note: for speed we only update the generation of the first block here // This means that bdescr of subsequent blocks will think they are in // the wrong generation // (This should not be a problem because there is no code that checks // for that - the only code touching the generation of the block is // in the GC, and that should never see blocks other than the first) bd->flags |= BF_EVACUATED; initBdescr(bd, new_gen, new_gen->to); if (str->hash) { gen_workspace *ws = &gct->gens[new_gen_no]; bd->link = ws->todo_large_objects; ws->todo_large_objects = bd; } else { if (new_gen != gen) { ACQUIRE_SPIN_LOCK(&new_gen->sync); } dbl_link_onto(bd, &new_gen->live_compact_objects); new_gen->n_live_compact_blocks += str->totalW / BLOCK_SIZE_W; if (new_gen != gen) { RELEASE_SPIN_LOCK(&new_gen->sync); } } RELEASE_SPIN_LOCK(&gen->sync); // Note: the object did not move in memory, because it lives // in pinned (BF_COMPACT) allocation, so we do not need to rewrite it // or muck with forwarding pointers // Also there is no tag to worry about on the struct (tags are used // for constructors and functions, but a struct is neither). There // might be a tag on the object pointer, but again we don't change // the pointer because we don't move the object so we don't need to // rewrite the tag. }