Exemple #1
0
MessageThrowTo *
throwTo (Capability *cap,       // the Capability we hold
         StgTSO *source,        // the TSO sending the exception (or NULL)
         StgTSO *target,        // the TSO receiving the exception
         StgClosure *exception) // the exception closure
{
    MessageThrowTo *msg;

    msg = (MessageThrowTo *) allocate(cap, sizeofW(MessageThrowTo));
    // the message starts locked; see below
    SET_HDR(msg, &stg_WHITEHOLE_info, CCS_SYSTEM);
    msg->source      = source;
    msg->target      = target;
    msg->exception   = exception;

    switch (throwToMsg(cap, msg))
    {
    case THROWTO_SUCCESS:
        // unlock the message now, otherwise we leave a WHITEHOLE in
        // the heap (#6103)
        SET_HDR(msg, &stg_MSG_THROWTO_info, CCS_SYSTEM);
        return NULL;

    case THROWTO_BLOCKED:
    default:
        // the caller will unlock the message when it is ready.  We
        // cannot unlock it yet, because the calling thread will need
        // to tidy up its state first.
        return msg;
    }
}
Exemple #2
0
static StgCompactNFDataBlock *
compactAppendBlock (Capability       *cap,
                    StgCompactNFData *str,
                    StgWord           aligned_size)
{
    StgCompactNFDataBlock *block;
    bdescr *bd;

    block = compactAllocateBlockInternal(cap, aligned_size,
                                         compactGetFirstBlock(str),
                                         ALLOCATE_APPEND);
    block->owner = str;
    block->next = NULL;

    ASSERT(str->last->next == NULL);
    str->last->next = block;
    str->last = block;

    bd = Bdescr((P_)block);
    bd->free = (StgPtr)((W_)block + sizeof(StgCompactNFDataBlock));
    ASSERT(bd->free == (StgPtr)block + sizeofW(StgCompactNFDataBlock));

    str->totalW += bd->blocks * BLOCK_SIZE_W;

    return block;
}
Exemple #3
0
STATIC_INLINE StgPtr
thread_AP_STACK (StgAP_STACK *ap)
{
    thread(&ap->fun);
    thread_stack((P_)ap->payload, (P_)ap->payload + ap->size);
    return (P_)ap + sizeofW(StgAP_STACK) + ap->size;
}
Exemple #4
0
void
LDV_recordDead( StgClosure *c, nat size )
{
    void *id;
    nat t;
    counter *ctr;

    if (era > 0 && closureSatisfiesConstraints(c)) {
        size -= sizeofW(StgProfHeader);
        ASSERT(LDVW(c) != 0);
        if ((LDVW((c)) & LDV_STATE_MASK) == LDV_STATE_CREATE) {
            t = (LDVW((c)) & LDV_CREATE_MASK) >> LDV_SHIFT;
            if (t < era) {
                if (RtsFlags.ProfFlags.bioSelector == NULL) {
                    censuses[t].void_total   += (long)size;
                    censuses[era].void_total -= (long)size;
                    ASSERT(censuses[t].void_total < censuses[t].not_used);
                } else {
                    id = closureIdentity(c);
                    ctr = lookupHashTable(censuses[t].hash, (StgWord)id);
                    ASSERT( ctr != NULL );
                    ctr->c.ldv.void_total += (long)size;
                    ctr = lookupHashTable(censuses[era].hash, (StgWord)id);
                    if (ctr == NULL) {
                        ctr = arenaAlloc(censuses[era].arena, sizeof(counter));
                        initLDVCtr(ctr);
                        insertHashTable(censuses[era].hash, (StgWord)id, ctr);
                        ctr->identity = id;
                        ctr->next = censuses[era].ctrs;
                        censuses[era].ctrs = ctr;
                    }
                    ctr->c.ldv.void_total -= (long)size;
                }
            }
        } else {
Exemple #5
0
HaskellObj
rts_mkDouble (Capability *cap, HsDouble d)
{
  StgClosure *p = (StgClosure *)allocate(cap,CONSTR_sizeW(0,sizeofW(StgDouble)));
  SET_HDR(p, Dzh_con_info, CCS_SYSTEM);
  ASSIGN_DBL((P_)p->payload, (StgDouble)d);
  return p;
}
Exemple #6
0
HaskellObj
rts_mkFunPtr (Capability *cap, HsFunPtr a)
{
  StgClosure *p = (StgClosure *)allocate(cap,sizeofW(StgHeader)+1);
  SET_HDR(p, FunPtr_con_info, CCS_SYSTEM);
  p->payload[0]  = (StgClosure *)a;
  return p;
}
Exemple #7
0
void
evacuate_BLACKHOLE(StgClosure **p)
{
    bdescr *bd;
    uint32_t gen_no;
    StgClosure *q;
    const StgInfoTable *info;
    q = *p;

    // closure is required to be a heap-allocated BLACKHOLE
    ASSERT(HEAP_ALLOCED_GC(q));
    ASSERT(GET_CLOSURE_TAG(q) == 0);

    bd = Bdescr((P_)q);

    // blackholes can't be in a compact
    ASSERT((bd->flags & BF_COMPACT) == 0);

    // blackholes *can* be in a large object: when raiseAsync() creates an
    // AP_STACK the payload might be large enough to create a large object.
    // See #14497.
    if (bd->flags & BF_LARGE) {
        evacuate_large((P_)q);
        return;
    }
    if (bd->flags & BF_EVACUATED) {
        if (bd->gen_no < gct->evac_gen_no) {
            gct->failed_to_evac = true;
            TICK_GC_FAILED_PROMOTION();
        }
        return;
    }
    if (bd->flags & BF_MARKED) {
        if (!is_marked((P_)q,bd)) {
            mark((P_)q,bd);
            push_mark_stack((P_)q);
        }
        return;
    }
    gen_no = bd->dest_no;
    info = q->header.info;
    if (IS_FORWARDING_PTR(info))
    {
        StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
        *p = e;
        if (gen_no < gct->evac_gen_no) {  // optimisation
            if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) {
                gct->failed_to_evac = true;
                TICK_GC_FAILED_PROMOTION();
            }
        }
        return;
    }

    ASSERT(INFO_PTR_TO_STRUCT(info)->type == BLACKHOLE);
    copy(p,info,q,sizeofW(StgInd),gen_no);
}
Exemple #8
0
void
checkHeapChunk(StgPtr start, StgPtr end)
{
  StgPtr p;
  nat size;

  for (p=start; p<end; p+=size) {
    ASSERT(LOOKS_LIKE_INFO_PTR(*p));
    size = checkClosure((StgClosure *)p);
    /* This is the smallest size of closure that can live in the heap. */
    ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
  }
}
Exemple #9
0
static struct stack_gap *
updateAdjacentFrames (Capability *cap, StgTSO *tso, StgUpdateFrame *upd,
                      uint32_t count, struct stack_gap *next)
{
    StgClosure *updatee;
    struct stack_gap *gap;
    uint32_t i;

    // The first one (highest address) is the frame we take the
    // "master" updatee from; all the others will be made indirections
    // to this one.  It is essential that we do it this way around: we
    // used to make the lowest-addressed frame the "master" frame and
    // shuffle it down, but a bad case cropped up (#5505) where this
    // happened repeatedly, generating a chain of indirections which
    // the GC repeatedly traversed (indirection chains longer than one
    // are not supposed to happen).  So now after identifying a block
    // of adjacent update frames we walk downwards again updating them
    // all to point to the highest one, before squeezing out all but
    // the highest one.
    updatee = upd->updatee;
    count--;

    upd--;
    gap = (struct stack_gap*)upd;

    for (i = count; i > 0; i--, upd--) {
        /*
         * Check two things: that the two update frames
         * don't point to the same object, and that the
         * updatee_bypass isn't already an indirection.
         * Both of these cases only happen when we're in a
         * block hole-style loop (and there are multiple
         * update frames on the stack pointing to the same
         * closure), but they can both screw us up if we
         * don't check.
         */
        if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
            updateThunk(cap, tso, upd->updatee, updatee);
        }
    }

    gap->gap_size = count * sizeofW(StgUpdateFrame);
    gap->next_gap = next;

    return gap;
}
Exemple #10
0
STATIC_INLINE StgInd *
lockCAF (StgRegTable *reg, StgIndStatic *caf)
{
    const StgInfoTable *orig_info;
    Capability *cap = regTableToCapability(reg);
    StgInd *bh;

    orig_info = caf->header.info;

#ifdef THREADED_RTS
    const StgInfoTable *cur_info;

    if (orig_info == &stg_IND_STATIC_info ||
        orig_info == &stg_WHITEHOLE_info) {
        // already claimed by another thread; re-enter the CAF
        return NULL;
    }

    cur_info = (const StgInfoTable *)
        cas((StgVolatilePtr)&caf->header.info,
            (StgWord)orig_info,
            (StgWord)&stg_WHITEHOLE_info);

    if (cur_info != orig_info) {
        // already claimed by another thread; re-enter the CAF
        return NULL;
    }

    // successfully claimed by us; overwrite with IND_STATIC
#endif

    // For the benefit of revertCAFs(), save the original info pointer
    caf->saved_info = orig_info;

    // Allocate the blackhole indirection closure
    bh = (StgInd *)allocate(cap, sizeofW(*bh));
    SET_HDR(bh, &stg_CAF_BLACKHOLE_info, caf->header.prof.ccs);
    bh->indirectee = (StgClosure *)cap->r.rCurrentTSO;

    caf->indirectee = (StgClosure *)bh;
    write_barrier();
    SET_INFO((StgClosure*)caf,&stg_IND_STATIC_info);

    return bh;
}
Exemple #11
0
void checkHeapChain (bdescr *bd)
{
    StgPtr p;

    for (; bd != NULL; bd = bd->link) {
        if(!(bd->flags & BF_SWEPT)) {
            p = bd->start;
            while (p < bd->free) {
                nat size = checkClosure((StgClosure *)p);
                /* This is the smallest size of closure that can live in the heap */
                ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) );
                p += size;
	    
                /* skip over slop */
                while (p < bd->free &&
                       (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; }
            }
	}
    }
}
Exemple #12
0
static void *
stgAllocStable(size_t size_in_bytes, StgStablePtr *stable)
{
  StgArrWords* arr;
  nat data_size_in_words, total_size_in_words;
  
  /* round up to a whole number of words */
  data_size_in_words  = ROUNDUP_BYTES_TO_WDS(size_in_bytes);
  total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
  
  /* allocate and fill it in */
  arr = (StgArrWords *)allocate(total_size_in_words);
  SET_ARR_HDR(arr, &stg_ARR_WORDS_info, CCCS, size_in_bytes);
 
  /* obtain a stable ptr */
  *stable = getStablePtr((StgPtr)arr);

  /* and return a ptr to the goods inside the array */
  return(&(arr->payload));
}
Exemple #13
0
/*
   Check that all TSOs have been evacuated.
   Optionally also check the sanity of the TSOs.
*/
void
checkGlobalTSOList (rtsBool checkTSOs)
{
  StgTSO *tso;
  nat g;

  for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
      for (tso=generations[g].threads; tso != END_TSO_QUEUE; 
           tso = tso->global_link) {
          ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
          ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
          if (checkTSOs)
              checkTSO(tso);

          // If this TSO is dirty and in an old generation, it better
          // be on the mutable list.
          if (tso->dirty) {
              ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
              tso->flags &= ~TSO_MARKED;
          }

          {
              StgStack *stack;
              StgUnderflowFrame *frame;

              stack = tso->stackobj;
              while (1) {
                  if (stack->dirty & 1) {
                      ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
                      stack->dirty &= ~TSO_MARKED;
                  }
                  frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
                                                - sizeofW(StgUnderflowFrame));
                  if (frame->info != &stg_stack_underflow_frame_info
                      || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
                  stack = frame->next_chunk;
              }
          }
      }
  }
}
Exemple #14
0
static void searchHeapBlocks (HashTable *addrs, bdescr *bd)
{
    StgPtr p;
    StgInfoTable *info;
    nat size;
    rtsBool prim;

    for (; bd != NULL; bd = bd->link) {

        if (bd->flags & BF_PINNED) {
            // Assume that objects in PINNED blocks cannot refer to
            continue;
        }

        p = bd->start;
        while (p < bd->free) {
            info = get_itbl((StgClosure *)p);
            prim = rtsFalse;

            switch (info->type) {

            case THUNK:
                size = thunk_sizeW_fromITBL(info);
                break;

            case THUNK_1_1:
            case THUNK_0_2:
            case THUNK_2_0:
                size = sizeofW(StgThunkHeader) + 2;
                break;

            case THUNK_1_0:
            case THUNK_0_1:
            case THUNK_SELECTOR:
                size = sizeofW(StgThunkHeader) + 1;
                break;

            case CONSTR:
            case FUN:
            case FUN_1_0:
            case FUN_0_1:
            case FUN_1_1:
            case FUN_0_2:
            case FUN_2_0:
            case CONSTR_1_0:
            case CONSTR_0_1:
            case CONSTR_1_1:
            case CONSTR_0_2:
            case CONSTR_2_0:
                size = sizeW_fromITBL(info);
                break;

            case BLACKHOLE:
            case BLOCKING_QUEUE:
                prim = rtsTrue;
                size = sizeW_fromITBL(info);
                break;

            case IND:
                // Special case/Delicate Hack: INDs don't normally
                // appear, since we're doing this heap census right
                // after GC.  However, GarbageCollect() also does
                // resurrectThreads(), which can update some
                // blackholes when it calls raiseAsync() on the
                // resurrected threads.  So we know that any IND will
                // be the size of a BLACKHOLE.
                prim = rtsTrue;
                size = BLACKHOLE_sizeW();
                break;

            case BCO:
                prim = rtsTrue;
                size = bco_sizeW((StgBCO *)p);
                break;

            case MVAR_CLEAN:
            case MVAR_DIRTY:
            case TVAR:
            case WEAK:
            case PRIM:
            case MUT_PRIM:
            case MUT_VAR_CLEAN:
            case MUT_VAR_DIRTY:
                prim = rtsTrue;
                size = sizeW_fromITBL(info);
                break;

            case AP:
                prim = rtsTrue;
                size = ap_sizeW((StgAP *)p);
                break;

            case PAP:
                prim = rtsTrue;
                size = pap_sizeW((StgPAP *)p);
                break;

            case AP_STACK:
            {
                StgAP_STACK *ap = (StgAP_STACK *)p;
                prim = rtsTrue;
                size = ap_stack_sizeW(ap);
                searchStackChunk(addrs, (StgPtr)ap->payload,
                                 (StgPtr)ap->payload + ap->size);
                break;
            }

            case ARR_WORDS:
                prim = rtsTrue;
                size = arr_words_sizeW((StgArrBytes*)p);
                break;

            case MUT_ARR_PTRS_CLEAN:
            case MUT_ARR_PTRS_DIRTY:
            case MUT_ARR_PTRS_FROZEN:
            case MUT_ARR_PTRS_FROZEN0:
                prim = rtsTrue;
                size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
                break;

            case SMALL_MUT_ARR_PTRS_CLEAN:
            case SMALL_MUT_ARR_PTRS_DIRTY:
            case SMALL_MUT_ARR_PTRS_FROZEN:
            case SMALL_MUT_ARR_PTRS_FROZEN0:
                prim = rtsTrue;
                size = small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)p);
                break;

            case TSO:
                prim = rtsTrue;
                size = sizeofW(StgTSO);
                break;

            case STACK: {
                StgStack *stack = (StgStack*)p;
                prim = rtsTrue;
                searchStackChunk(addrs, stack->sp,
                                 stack->stack + stack->stack_size);
                size = stack_sizeW(stack);
                break;
            }

            case TREC_CHUNK:
                prim = rtsTrue;
                size = sizeofW(StgTRecChunk);
                break;

            default:
                barf("heapCensus, unknown object: %d", info->type);
            }

            if (!prim) {
                checkAddress(addrs,info);
            }

            p += size;
        }
    }
}
Exemple #15
0
int
main(int argc, char *argv[])
{
#ifndef GEN_HASKELL
    printf("/* This file is created automatically.  Do not edit by hand.*/\n\n");

    printf("#define STD_HDR_SIZE   %" FMT_SizeT "\n", (size_t)sizeofW(StgHeader) - sizeofW(StgProfHeader));
    /* grrr.. PROFILING is on so we need to subtract sizeofW(StgProfHeader) */
    printf("#define PROF_HDR_SIZE  %" FMT_SizeT "\n", (size_t)sizeofW(StgProfHeader));

    printf("#define BLOCK_SIZE   %u\n", BLOCK_SIZE);
    printf("#define MBLOCK_SIZE   %u\n", MBLOCK_SIZE);
    printf("#define BLOCKS_PER_MBLOCK  %" FMT_SizeT "\n", (lnat)BLOCKS_PER_MBLOCK);
    // could be derived, but better to save doing the calculation twice

    printf("\n\n");
#endif

    field_offset(StgRegTable, rR1);
    field_offset(StgRegTable, rR2);
    field_offset(StgRegTable, rR3);
    field_offset(StgRegTable, rR4);
    field_offset(StgRegTable, rR5);
    field_offset(StgRegTable, rR6);
    field_offset(StgRegTable, rR7);
    field_offset(StgRegTable, rR8);
    field_offset(StgRegTable, rR9);
    field_offset(StgRegTable, rR10);
    field_offset(StgRegTable, rF1);
    field_offset(StgRegTable, rF2);
    field_offset(StgRegTable, rF3);
    field_offset(StgRegTable, rF4);
    field_offset(StgRegTable, rD1);
    field_offset(StgRegTable, rD2);
    field_offset(StgRegTable, rL1);
    field_offset(StgRegTable, rSp);
    field_offset(StgRegTable, rSpLim);
    field_offset(StgRegTable, rHp);
    field_offset(StgRegTable, rHpLim);
    field_offset(StgRegTable, rCCCS);
    field_offset(StgRegTable, rCurrentTSO);
    field_offset(StgRegTable, rCurrentNursery);
    field_offset(StgRegTable, rHpAlloc);
    struct_field(StgRegTable, rRet);
    struct_field(StgRegTable, rNursery);

    def_offset("stgEagerBlackholeInfo", FUN_OFFSET(stgEagerBlackholeInfo));
    def_offset("stgGCEnter1", FUN_OFFSET(stgGCEnter1));
    def_offset("stgGCFun", FUN_OFFSET(stgGCFun));

    field_offset(Capability, r);
    field_offset(Capability, lock);
    struct_field(Capability, no);
    struct_field(Capability, mut_lists);
    struct_field(Capability, context_switch);
    struct_field(Capability, interrupt);
    struct_field(Capability, sparks);

    struct_field(bdescr, start);
    struct_field(bdescr, free);
    struct_field(bdescr, blocks);
    struct_field(bdescr, gen_no);
    struct_field(bdescr, link);

    struct_size(generation);
    struct_field(generation, n_new_large_words);

    struct_size(CostCentreStack);
    struct_field(CostCentreStack, ccsID);
    struct_field(CostCentreStack, mem_alloc);
    struct_field(CostCentreStack, scc_count);
    struct_field(CostCentreStack, prevStack);

    struct_field(CostCentre, ccID);
    struct_field(CostCentre, link);

    struct_field(StgHeader, info);
    struct_field_("StgHeader_ccs",  StgHeader, prof.ccs);
    struct_field_("StgHeader_ldvw", StgHeader, prof.hp.ldvw);

    struct_size(StgSMPThunkHeader);

    closure_payload(StgClosure,payload);

    struct_field(StgEntCounter, allocs);
    struct_field(StgEntCounter, registeredp);
    struct_field(StgEntCounter, link);
    struct_field(StgEntCounter, entry_count);

    closure_size(StgUpdateFrame);
    closure_size(StgCatchFrame);
    closure_size(StgStopFrame);

    closure_size(StgMutArrPtrs);
    closure_field(StgMutArrPtrs, ptrs);
    closure_field(StgMutArrPtrs, size);

    closure_size(StgArrWords);
    closure_field(StgArrWords, bytes);
    closure_payload(StgArrWords, payload);

    closure_field(StgTSO, _link);
    closure_field(StgTSO, global_link);
    closure_field(StgTSO, what_next);
    closure_field(StgTSO, why_blocked);
    closure_field(StgTSO, block_info);
    closure_field(StgTSO, blocked_exceptions);
    closure_field(StgTSO, id);
    closure_field(StgTSO, cap);
    closure_field(StgTSO, saved_errno);
    closure_field(StgTSO, trec);
    closure_field(StgTSO, flags);
    closure_field(StgTSO, dirty);
    closure_field(StgTSO, bq);
    closure_field_("StgTSO_cccs", StgTSO, prof.cccs);
    closure_field(StgTSO, stackobj);

    closure_field(StgStack, sp);
    closure_field_offset(StgStack, stack);
    closure_field(StgStack, stack_size);
    closure_field(StgStack, dirty);

    struct_size(StgTSOProfInfo);

    opt_struct_size(StgTSOProfInfo,PROFILING);

    closure_field(StgUpdateFrame, updatee);

    closure_field(StgCatchFrame, handler);
    closure_field(StgCatchFrame, exceptions_blocked);

    closure_size(StgPAP);
    closure_field(StgPAP, n_args);
    closure_field_gcptr(StgPAP, fun);
    closure_field(StgPAP, arity);
    closure_payload(StgPAP, payload);

    thunk_size(StgAP);
    closure_field(StgAP, n_args);
    closure_field_gcptr(StgAP, fun);
    closure_payload(StgAP, payload);

    thunk_size(StgAP_STACK);
    closure_field(StgAP_STACK, size);
    closure_field_gcptr(StgAP_STACK, fun);
    closure_payload(StgAP_STACK, payload);

    thunk_size(StgSelector);

    closure_field_gcptr(StgInd, indirectee);

    closure_size(StgMutVar);
    closure_field(StgMutVar, var);

    closure_size(StgAtomicallyFrame);
    closure_field(StgAtomicallyFrame, code);
    closure_field(StgAtomicallyFrame, next_invariant_to_check);
    closure_field(StgAtomicallyFrame, result);

    closure_field(StgInvariantCheckQueue, invariant);
    closure_field(StgInvariantCheckQueue, my_execution);
    closure_field(StgInvariantCheckQueue, next_queue_entry);

    closure_field(StgAtomicInvariant, code);

    closure_field(StgTRecHeader, enclosing_trec);

    closure_size(StgCatchSTMFrame);
    closure_field(StgCatchSTMFrame, handler);
    closure_field(StgCatchSTMFrame, code);

    closure_size(StgCatchRetryFrame);
    closure_field(StgCatchRetryFrame, running_alt_code);
    closure_field(StgCatchRetryFrame, first_code);
    closure_field(StgCatchRetryFrame, alt_code);

    closure_field(StgTVarWatchQueue, closure);
    closure_field(StgTVarWatchQueue, next_queue_entry);
    closure_field(StgTVarWatchQueue, prev_queue_entry);

    closure_field(StgTVar, current_value);

    closure_size(StgWeak);
    closure_field(StgWeak,link);
    closure_field(StgWeak,key);
    closure_field(StgWeak,value);
    closure_field(StgWeak,finalizer);
    closure_field(StgWeak,cfinalizer);

    closure_size(StgDeadWeak);
    closure_field(StgDeadWeak,link);

    closure_size(StgMVar);
    closure_field(StgMVar,head);
    closure_field(StgMVar,tail);
    closure_field(StgMVar,value);

    closure_size(StgMVarTSOQueue);
    closure_field(StgMVarTSOQueue, link);
    closure_field(StgMVarTSOQueue, tso);

    closure_size(StgBCO);
    closure_field(StgBCO, instrs);
    closure_field(StgBCO, literals);
    closure_field(StgBCO, ptrs);
    closure_field(StgBCO, arity);
    closure_field(StgBCO, size);
    closure_payload(StgBCO, bitmap);

    closure_size(StgStableName);
    closure_field(StgStableName,sn);

    closure_size(StgBlockingQueue);
    closure_field(StgBlockingQueue, bh);
    closure_field(StgBlockingQueue, owner);
    closure_field(StgBlockingQueue, queue);
    closure_field(StgBlockingQueue, link);

    closure_size(MessageBlackHole);
    closure_field(MessageBlackHole, link);
    closure_field(MessageBlackHole, tso);
    closure_field(MessageBlackHole, bh);

    struct_field_("RtsFlags_ProfFlags_showCCSOnException",
		  RTS_FLAGS, ProfFlags.showCCSOnException);
    struct_field_("RtsFlags_DebugFlags_apply",
		  RTS_FLAGS, DebugFlags.apply);
    struct_field_("RtsFlags_DebugFlags_sanity",
		  RTS_FLAGS, DebugFlags.sanity);
    struct_field_("RtsFlags_DebugFlags_weak",
		  RTS_FLAGS, DebugFlags.weak);
    struct_field_("RtsFlags_GcFlags_initialStkSize",
		  RTS_FLAGS, GcFlags.initialStkSize);
    struct_field_("RtsFlags_MiscFlags_tickInterval",
		  RTS_FLAGS, MiscFlags.tickInterval);

    struct_size(StgFunInfoExtraFwd);
    struct_field(StgFunInfoExtraFwd, slow_apply);
    struct_field(StgFunInfoExtraFwd, fun_type);
    struct_field(StgFunInfoExtraFwd, arity);
    struct_field_("StgFunInfoExtraFwd_bitmap", StgFunInfoExtraFwd, b.bitmap);

    struct_size(StgFunInfoExtraRev);
    struct_field(StgFunInfoExtraRev, slow_apply_offset);
    struct_field(StgFunInfoExtraRev, fun_type);
    struct_field(StgFunInfoExtraRev, arity);
    struct_field_("StgFunInfoExtraRev_bitmap", StgFunInfoExtraRev, b.bitmap);

    struct_field(StgLargeBitmap, size);
    field_offset(StgLargeBitmap, bitmap);

    struct_size(snEntry);
    struct_field(snEntry,sn_obj);
    struct_field(snEntry,addr);

#ifdef mingw32_HOST_OS
    struct_size(StgAsyncIOResult);
    struct_field(StgAsyncIOResult, reqID);
    struct_field(StgAsyncIOResult, len);
    struct_field(StgAsyncIOResult, errCode);
#endif

    return 0;
}
Exemple #16
0
static void
scavenge_stack(StgPtr p, StgPtr stack_end)
{
  const StgRetInfoTable* info;
  StgWord bitmap;
  nat size;

  /* 
   * Each time around this loop, we are looking at a chunk of stack
   * that starts with an activation record. 
   */

  while (p < stack_end) {
    info  = get_ret_itbl((StgClosure *)p);
      
    switch (info->i.type) {
	
    case UPDATE_FRAME:
	// In SMP, we can get update frames that point to indirections
	// when two threads evaluate the same thunk.  We do attempt to
	// discover this situation in threadPaused(), but it's
	// possible that the following sequence occurs:
	//
	//        A             B
	//                  enter T
	//     enter T
	//     blackhole T
	//                  update T
	//     GC
	//
	// Now T is an indirection, and the update frame is already
	// marked on A's stack, so we won't traverse it again in
	// threadPaused().  We could traverse the whole stack again
	// before GC, but that seems like overkill.
	//
	// Scavenging this update frame as normal would be disastrous;
	// the updatee would end up pointing to the value.  So we
	// check whether the value after evacuation is a BLACKHOLE,
	// and if not, we change the update frame to an stg_enter
	// frame that simply returns the value.  Hence, blackholing is
        // compulsory (otherwise we would have to check for thunks
        // too).
        //
        // Note [upd-black-hole]
        // One slight hiccup is that the THUNK_SELECTOR machinery can
        // overwrite the updatee with an IND.  In parallel GC, this
        // could even be happening concurrently, so we can't check for
        // the IND.  Fortunately if we assume that blackholing is
        // happening (either lazy or eager), then we can be sure that
        // the updatee is never a THUNK_SELECTOR and we're ok.
        // NB. this is a new invariant: blackholing is not optional.
    {
        StgUpdateFrame *frame = (StgUpdateFrame *)p;
        StgClosure *v;

        evacuate(&frame->updatee);
        v = frame->updatee;
        if (GET_CLOSURE_TAG(v) != 0 ||
            (get_itbl(v)->type != BLACKHOLE)) {
            // blackholing is compulsory, see above.
            frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
        }
        ASSERT(v->header.info != &stg_TSO_info);
        p += sizeofW(StgUpdateFrame);
        continue;
    }

      // small bitmap (< 32 entries, or 64 on a 64-bit machine) 
    case CATCH_STM_FRAME:
    case CATCH_RETRY_FRAME:
    case ATOMICALLY_FRAME:
    case UNDERFLOW_FRAME:
    case STOP_FRAME:
    case CATCH_FRAME:
    case RET_SMALL:
	bitmap = BITMAP_BITS(info->i.layout.bitmap);
	size   = BITMAP_SIZE(info->i.layout.bitmap);
	// NOTE: the payload starts immediately after the info-ptr, we
	// don't have an StgHeader in the same sense as a heap closure.
	p++;
	p = scavenge_small_bitmap(p, size, bitmap);

    follow_srt:
	if (major_gc) 
	    scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
	continue;

    case RET_BCO: {
	StgBCO *bco;
	nat size;

	p++;
	evacuate((StgClosure **)p);
	bco = (StgBCO *)*p;
	p++;
	size = BCO_BITMAP_SIZE(bco);
	scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
	p += size;
	continue;
    }

      // large bitmap (> 32 entries, or > 64 on a 64-bit machine) 
    case RET_BIG:
    {
	nat size;

	size = GET_LARGE_BITMAP(&info->i)->size;
	p++;
	scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
	p += size;
	// and don't forget to follow the SRT 
	goto follow_srt;
    }

      // Dynamic bitmap: the mask is stored on the stack, and
      // there are a number of non-pointers followed by a number
      // of pointers above the bitmapped area.  (see StgMacros.h,
      // HEAP_CHK_GEN).
    case RET_DYN:
    {
	StgWord dyn;
	dyn = ((StgRetDyn *)p)->liveness;

	// traverse the bitmap first
	bitmap = RET_DYN_LIVENESS(dyn);
	p      = (P_)&((StgRetDyn *)p)->payload[0];
	size   = RET_DYN_BITMAP_SIZE;
	p = scavenge_small_bitmap(p, size, bitmap);

	// skip over the non-ptr words
	p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
	
	// follow the ptr words
	for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
	    evacuate((StgClosure **)p);
	    p++;
	}
	continue;
    }

    case RET_FUN:
    {
	StgRetFun *ret_fun = (StgRetFun *)p;
	StgFunInfoTable *fun_info;

	evacuate(&ret_fun->fun);
 	fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
	p = scavenge_arg_block(fun_info, ret_fun->payload);
	goto follow_srt;
    }

    default:
	barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
    }
  }		     
}
Exemple #17
0
// check an individual stack object
StgOffset 
checkStackFrame( StgPtr c )
{
    nat size;
    const StgRetInfoTable* info;

    info = get_ret_itbl((StgClosure *)c);

    /* All activation records have 'bitmap' style layout info. */
    switch (info->i.type) {
    case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
    {
	StgWord dyn;
	StgPtr p;
	StgRetDyn* r;
	
	r = (StgRetDyn *)c;
	dyn = r->liveness;
	
	p = (P_)(r->payload);
	checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
	p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;

	// skip over the non-pointers
	p += RET_DYN_NONPTRS(dyn);
	
	// follow the ptr words
	for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
	    checkClosureShallow((StgClosure *)*p);
	    p++;
	}
	
	return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
	    RET_DYN_NONPTR_REGS_SIZE +
	    RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
    }

    case UPDATE_FRAME:
      ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
    case ATOMICALLY_FRAME:
    case CATCH_RETRY_FRAME:
    case CATCH_STM_FRAME:
    case CATCH_FRAME:
      // small bitmap cases (<= 32 entries)
    case UNDERFLOW_FRAME:
    case STOP_FRAME:
    case RET_SMALL:
	size = BITMAP_SIZE(info->i.layout.bitmap);
	checkSmallBitmap((StgPtr)c + 1, 
			 BITMAP_BITS(info->i.layout.bitmap), size);
	return 1 + size;

    case RET_BCO: {
	StgBCO *bco;
	nat size;
	bco = (StgBCO *)*(c+1);
	size = BCO_BITMAP_SIZE(bco);
	checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
	return 2 + size;
    }

    case RET_BIG: // large bitmap (> 32 entries)
	size = GET_LARGE_BITMAP(&info->i)->size;
	checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
	return 1 + size;

    case RET_FUN:
    {
	StgFunInfoTable *fun_info;
	StgRetFun *ret_fun;

	ret_fun = (StgRetFun *)c;
	fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
	size = ret_fun->size;
	switch (fun_info->f.fun_type) {
	case ARG_GEN:
	    checkSmallBitmap((StgPtr)ret_fun->payload, 
			     BITMAP_BITS(fun_info->f.b.bitmap), size);
	    break;
	case ARG_GEN_BIG:
	    checkLargeBitmap((StgPtr)ret_fun->payload,
			     GET_FUN_LARGE_BITMAP(fun_info), size);
	    break;
	default:
	    checkSmallBitmap((StgPtr)ret_fun->payload,
			     BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
			     size);
	    break;
	}
	return sizeofW(StgRetFun) + size;
    }

    default:
	barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
    }
}
Exemple #18
0
static void
verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block)
{
    bdescr *bd;
    StgPtr p;
    const StgInfoTable *info;
    StgClosure *q;

    p = (P_)firstBlockGetCompact(block);
    bd = Bdescr((P_)block);
    while (p < bd->free) {
        q = (StgClosure*)p;

        ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));

        info = get_itbl(q);
        switch (info->type) {
        case CONSTR_1_0:
            check_object_in_compact(str, UNTAG_CLOSURE(q->payload[0]));
            /* fallthrough */
        case CONSTR_0_1:
            p += sizeofW(StgClosure) + 1;
            break;

        case CONSTR_2_0:
            check_object_in_compact(str, UNTAG_CLOSURE(q->payload[1]));
            /* fallthrough */
        case CONSTR_1_1:
            check_object_in_compact(str, UNTAG_CLOSURE(q->payload[0]));
            /* fallthrough */
        case CONSTR_0_2:
            p += sizeofW(StgClosure) + 2;
            break;

        case CONSTR:
        case PRIM:
        case CONSTR_NOCAF:
        {
            uint32_t i;

            for (i = 0; i < info->layout.payload.ptrs; i++) {
                check_object_in_compact(str, UNTAG_CLOSURE(q->payload[i]));
            }
            p += sizeofW(StgClosure) + info->layout.payload.ptrs +
                info->layout.payload.nptrs;
            break;
        }

        case ARR_WORDS:
            p += arr_words_sizeW((StgArrBytes*)p);
            break;

        case MUT_ARR_PTRS_FROZEN_CLEAN:
        case MUT_ARR_PTRS_FROZEN_DIRTY:
            verify_mut_arr_ptrs(str, (StgMutArrPtrs*)p);
            p += mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
            break;

        case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
        case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
        {
            uint32_t i;
            StgSmallMutArrPtrs *arr = (StgSmallMutArrPtrs*)p;

            for (i = 0; i < arr->ptrs; i++)
                check_object_in_compact(str, UNTAG_CLOSURE(arr->payload[i]));

            p += sizeofW(StgSmallMutArrPtrs) + arr->ptrs;
            break;
        }

        case COMPACT_NFDATA:
            p += sizeofW(StgCompactNFData);
            break;

        default:
            barf("verify_consistency_block");
        }
    }

    return;
}
Exemple #19
0
static GNUC_ATTR_HOT void
scavenge_block (bdescr *bd)
{
  StgPtr p, q;
  StgInfoTable *info;
  rtsBool saved_eager_promotion;
  gen_workspace *ws;

  debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
	     bd->start, bd->gen_no, bd->u.scan);

  gct->scan_bd = bd;
  gct->evac_gen_no = bd->gen_no;
  saved_eager_promotion = gct->eager_promotion;
  gct->failed_to_evac = rtsFalse;

  ws = &gct->gens[bd->gen->no];

  p = bd->u.scan;
  
  // we might be evacuating into the very object that we're
  // scavenging, so we have to check the real bd->free pointer each
  // time around the loop.
  while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {

      ASSERT(bd->link == NULL);
    ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
    info = get_itbl((StgClosure *)p);
    
    ASSERT(gct->thunk_selector_depth == 0);

    q = p;
    switch (info->type) {

    case MVAR_CLEAN:
    case MVAR_DIRTY:
    { 
	StgMVar *mvar = ((StgMVar *)p);
	gct->eager_promotion = rtsFalse;
	evacuate((StgClosure **)&mvar->head);
	evacuate((StgClosure **)&mvar->tail);
	evacuate((StgClosure **)&mvar->value);
	gct->eager_promotion = saved_eager_promotion;

	if (gct->failed_to_evac) {
	    mvar->header.info = &stg_MVAR_DIRTY_info;
	} else {
	    mvar->header.info = &stg_MVAR_CLEAN_info;
	}
	p += sizeofW(StgMVar);
	break;
    }

    case FUN_2_0:
	scavenge_fun_srt(info);
	evacuate(&((StgClosure *)p)->payload[1]);
	evacuate(&((StgClosure *)p)->payload[0]);
	p += sizeofW(StgHeader) + 2;
	break;

    case THUNK_2_0:
	scavenge_thunk_srt(info);
	evacuate(&((StgThunk *)p)->payload[1]);
	evacuate(&((StgThunk *)p)->payload[0]);
	p += sizeofW(StgThunk) + 2;
	break;

    case CONSTR_2_0:
	evacuate(&((StgClosure *)p)->payload[1]);
	evacuate(&((StgClosure *)p)->payload[0]);
	p += sizeofW(StgHeader) + 2;
	break;
	
    case THUNK_1_0:
	scavenge_thunk_srt(info);
	evacuate(&((StgThunk *)p)->payload[0]);
	p += sizeofW(StgThunk) + 1;
	break;
	
    case FUN_1_0:
	scavenge_fun_srt(info);
    case CONSTR_1_0:
	evacuate(&((StgClosure *)p)->payload[0]);
	p += sizeofW(StgHeader) + 1;
	break;
	
    case THUNK_0_1:
	scavenge_thunk_srt(info);
	p += sizeofW(StgThunk) + 1;
	break;
	
    case FUN_0_1:
	scavenge_fun_srt(info);
    case CONSTR_0_1:
	p += sizeofW(StgHeader) + 1;
	break;
	
    case THUNK_0_2:
	scavenge_thunk_srt(info);
	p += sizeofW(StgThunk) + 2;
	break;
	
    case FUN_0_2:
	scavenge_fun_srt(info);
    case CONSTR_0_2:
	p += sizeofW(StgHeader) + 2;
	break;
	
    case THUNK_1_1:
	scavenge_thunk_srt(info);
	evacuate(&((StgThunk *)p)->payload[0]);
	p += sizeofW(StgThunk) + 2;
	break;

    case FUN_1_1:
	scavenge_fun_srt(info);
    case CONSTR_1_1:
	evacuate(&((StgClosure *)p)->payload[0]);
	p += sizeofW(StgHeader) + 2;
	break;
	
    case FUN:
	scavenge_fun_srt(info);
	goto gen_obj;

    case THUNK:
    {
	StgPtr end;

	scavenge_thunk_srt(info);
	end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
	for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
	    evacuate((StgClosure **)p);
	}
	p += info->layout.payload.nptrs;
	break;
    }
	
    gen_obj:
    case CONSTR:
    case WEAK:
    case PRIM:
    {
	StgPtr end;

	end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
	for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
	    evacuate((StgClosure **)p);
	}
	p += info->layout.payload.nptrs;
	break;
    }

    case BCO: {
	StgBCO *bco = (StgBCO *)p;
	evacuate((StgClosure **)&bco->instrs);
	evacuate((StgClosure **)&bco->literals);
	evacuate((StgClosure **)&bco->ptrs);
	p += bco_sizeW(bco);
	break;
    }

    case IND_PERM:
    case BLACKHOLE:
	evacuate(&((StgInd *)p)->indirectee);
	p += sizeofW(StgInd);
	break;

    case MUT_VAR_CLEAN:
    case MUT_VAR_DIRTY:
	gct->eager_promotion = rtsFalse;
	evacuate(&((StgMutVar *)p)->var);
	gct->eager_promotion = saved_eager_promotion;

	if (gct->failed_to_evac) {
	    ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
	} else {
	    ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
	}
	p += sizeofW(StgMutVar);
	break;

    case BLOCKING_QUEUE:
    {
        StgBlockingQueue *bq = (StgBlockingQueue *)p;
        
	gct->eager_promotion = rtsFalse;
        evacuate(&bq->bh);
        evacuate((StgClosure**)&bq->owner);
        evacuate((StgClosure**)&bq->queue);
        evacuate((StgClosure**)&bq->link);
	gct->eager_promotion = saved_eager_promotion;

	if (gct->failed_to_evac) {
	    bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
	} else {
	    bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
	}
        p += sizeofW(StgBlockingQueue);
        break;
    }

    case THUNK_SELECTOR:
    { 
	StgSelector *s = (StgSelector *)p;
	evacuate(&s->selectee);
	p += THUNK_SELECTOR_sizeW();
	break;
    }

    // A chunk of stack saved in a heap object
    case AP_STACK:
    {
	StgAP_STACK *ap = (StgAP_STACK *)p;

	evacuate(&ap->fun);
	scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
	p = (StgPtr)ap->payload + ap->size;
	break;
    }

    case PAP:
	p = scavenge_PAP((StgPAP *)p);
	break;

    case AP:
	p = scavenge_AP((StgAP *)p);
	break;

    case ARR_WORDS:
	// nothing to follow 
	p += arr_words_sizeW((StgArrWords *)p);
	break;

    case MUT_ARR_PTRS_CLEAN:
    case MUT_ARR_PTRS_DIRTY:
    {
        // We don't eagerly promote objects pointed to by a mutable
        // array, but if we find the array only points to objects in
        // the same or an older generation, we mark it "clean" and
        // avoid traversing it during minor GCs.
        gct->eager_promotion = rtsFalse;

        p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);

	if (gct->failed_to_evac) {
	    ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
	} else {
	    ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
	}

	gct->eager_promotion = saved_eager_promotion;
	gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
	break;
    }

    case MUT_ARR_PTRS_FROZEN:
    case MUT_ARR_PTRS_FROZEN0:
	// follow everything 
    {
        p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);

	// If we're going to put this object on the mutable list, then
	// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
	if (gct->failed_to_evac) {
            ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
	} else {
	    ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
	}
	break;
    }

    case TSO:
    { 
        scavengeTSO((StgTSO *)p);
        p += sizeofW(StgTSO);
	break;
    }

    case STACK:
    {
        StgStack *stack = (StgStack*)p;

        gct->eager_promotion = rtsFalse;

        scavenge_stack(stack->sp, stack->stack + stack->stack_size);
        stack->dirty = gct->failed_to_evac;
        p += stack_sizeW(stack);

        gct->eager_promotion = saved_eager_promotion;
        break;
    }

    case MUT_PRIM:
      {
	StgPtr end;

	gct->eager_promotion = rtsFalse;

	end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
	for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
	    evacuate((StgClosure **)p);
	}
	p += info->layout.payload.nptrs;

	gct->eager_promotion = saved_eager_promotion;
	gct->failed_to_evac = rtsTrue; // mutable
	break;
      }

    case TREC_CHUNK:
      {
	StgWord i;
	StgTRecChunk *tc = ((StgTRecChunk *) p);
	TRecEntry *e = &(tc -> entries[0]);
	gct->eager_promotion = rtsFalse;
	evacuate((StgClosure **)&tc->prev_chunk);
	for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
	  evacuate((StgClosure **)&e->tvar);
	  evacuate((StgClosure **)&e->expected_value);
	  evacuate((StgClosure **)&e->new_value);
	}
	gct->eager_promotion = saved_eager_promotion;
	gct->failed_to_evac = rtsTrue; // mutable
	p += sizeofW(StgTRecChunk);
	break;
      }

    default:
	barf("scavenge: unimplemented/strange closure type %d @ %p", 
	     info->type, p);
    }

    /*
     * We need to record the current object on the mutable list if
     *  (a) It is actually mutable, or 
     *  (b) It contains pointers to a younger generation.
     * Case (b) arises if we didn't manage to promote everything that
     * the current object points to into the current generation.
     */
    if (gct->failed_to_evac) {
	gct->failed_to_evac = rtsFalse;
	if (bd->gen_no > 0) {
	    recordMutableGen_GC((StgClosure *)q, bd->gen_no);
	}
    }
  }

  if (p > bd->free)  {
      gct->copied += ws->todo_free - bd->free;
      bd->free = p;
  }

  debugTrace(DEBUG_gc, "   scavenged %ld bytes",
             (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));

  // update stats: this is a block that has been scavenged
  gct->scanned += bd->free - bd->u.scan;
  bd->u.scan = bd->free;

  if (bd != ws->todo_bd) {
      // we're not going to evac any more objects into
      // this block, so push it now.
      push_scanned_block(bd, ws);
  }

  gct->scan_bd = NULL;
}
Exemple #20
0
int
main(int argc, char *argv[])
{
    printf("-- This file is created automatically.  Do not edit by hand.\n\n");

    printf("#define STD_HDR_SIZE   %d\n", sizeofW(StgHeader));
    printf("#define PROF_HDR_SIZE  %d\n", sizeofW(StgProfHeader));
    printf("#define GRAN_HDR_SIZE  %d\n", sizeofW(StgGranHeader));

    printf("#define ARR_WORDS_HDR_SIZE  %d\n", 
	   sizeofW(StgArrWords) - sizeofW(StgHeader));

    printf("#define ARR_PTRS_HDR_SIZE   %d\n", 
	   sizeofW(StgMutArrPtrs) - sizeofW(StgHeader));

    printf("#define STD_ITBL_SIZE   %d\n", sizeofW(StgInfoTable));
    printf("#define RET_ITBL_SIZE   %d\n", sizeofW(StgRetInfoTable) - sizeofW(StgInfoTable));
    printf("#define PROF_ITBL_SIZE  %d\n", sizeofW(StgProfInfo));
    printf("#define GRAN_ITBL_SIZE  %d\n", 0);
    printf("#define TICKY_ITBL_SIZE %d\n", sizeofW(StgTickyInfo));

    printf("#define STD_UF_SIZE   %d\n", sizeofW(StgUpdateFrame));
    printf("#define GRAN_UF_SIZE   %d\n",  
	   sizeofW(StgUpdateFrame) + sizeofW(StgGranHeader));
    printf("#define PROF_UF_SIZE   %d\n",  
	   sizeofW(StgUpdateFrame) + sizeofW(StgProfHeader));

    printf("#define UF_RET     %d\n",
	   OFFSET(StgUpdateFrame,header.info));

    printf("#define UF_UPDATEE %d\n",
	   OFFSET(StgUpdateFrame,updatee) / sizeof(W_));

    printf("#define BLOCK_SIZE   %d\n", BLOCK_SIZE);
    printf("#define MBLOCK_SIZE   %d\n", MBLOCK_SIZE);  
    return 0;
}
Exemple #21
0
static void
stackSqueeze(Capability *cap, StgTSO *tso, StgPtr bottom)
{
    StgPtr frame;
    uint32_t adjacent_update_frames;
    struct stack_gap *gap;

    // Stage 1:
    //    Traverse the stack upwards, replacing adjacent update frames
    //    with a single update frame and a "stack gap".  A stack gap
    //    contains two values: the size of the gap, and the distance
    //    to the next gap (or the stack top).

    frame = tso->stackobj->sp;

    ASSERT(frame < bottom);

    adjacent_update_frames = 0;
    gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));

    while (frame <= bottom)
    {
        switch (get_ret_itbl((StgClosure *)frame)->i.type) {

        case UPDATE_FRAME:
        {
            if (adjacent_update_frames > 0) {
                TICK_UPD_SQUEEZED();
            }
            adjacent_update_frames++;

            frame += sizeofW(StgUpdateFrame);
            continue;
        }

        default:
            // we're not in a gap... check whether this is the end of a gap
            // (an update frame can't be the end of a gap).
            if (adjacent_update_frames > 1) {
                gap = updateAdjacentFrames(cap, tso,
                                           (StgUpdateFrame*)(frame - sizeofW(StgUpdateFrame)),
                                           adjacent_update_frames, gap);
            }
            adjacent_update_frames = 0;

            frame += stack_frame_sizeW((StgClosure *)frame);
            continue;
        }
    }

    if (adjacent_update_frames > 1) {
        gap = updateAdjacentFrames(cap, tso,
                                   (StgUpdateFrame*)(frame - sizeofW(StgUpdateFrame)),
                                   adjacent_update_frames, gap);
    }

    // Now we have a stack with gap-structs in it, and we have to walk down
    // shoving the stack up to fill in the gaps.  A diagram might
    // help:
    //
    //    +| ********* |
    //     | ********* | <- sp
    //     |           |
    //     |           | <- gap_start
    //     | ......... |                |
    //     | stack_gap | <- gap         | chunk_size
    //     | ......... |                |
    //     | ......... | <- gap_end     v
    //     | ********* |
    //     | ********* |
    //     | ********* |
    //    -| ********* |
    //
    // 'sp'  points the the current top-of-stack
    // 'gap' points to the stack_gap structure inside the gap
    // *****   indicates real stack data
    // .....   indicates gap
    // <empty> indicates unused
    //
    {
        StgWord8 *sp;
        StgWord8 *gap_start, *next_gap_start, *gap_end;
        uint32_t chunk_size;

        next_gap_start = (StgWord8*)gap + sizeof(StgUpdateFrame);
        sp = next_gap_start;

        while ((StgPtr)gap > tso->stackobj->sp) {

            // we're working in *bytes* now...
            gap_start = next_gap_start;
            gap_end = gap_start - gap->gap_size * sizeof(W_);

            gap = gap->next_gap;
            next_gap_start = (StgWord8*)gap + sizeof(StgUpdateFrame);

            chunk_size = gap_end - next_gap_start;
            sp -= chunk_size;
            memmove(sp, next_gap_start, chunk_size);
        }

        tso->stackobj->sp = (StgPtr)sp;
    }
}
Exemple #22
0
STATIC_INLINE StgPtr
allocate_NONUPD (Capability *cap, int n_words)
{
    return allocate(cap, stg_max(sizeofW(StgHeader)+MIN_PAYLOAD_SIZE, n_words));
}
Exemple #23
0
REGPARM1 GNUC_ATTR_HOT void
evacuate(StgClosure **p)
{
  bdescr *bd = NULL;
  nat gen_no;
  StgClosure *q;
  const StgInfoTable *info;
  StgWord tag;

  q = *p;

loop:
  /* The tag and the pointer are split, to be merged after evacing */
  tag = GET_CLOSURE_TAG(q);
  q = UNTAG_CLOSURE(q);

  ASSERTM(LOOKS_LIKE_CLOSURE_PTR(q), "invalid closure, info=%p", q->header.info);

  if (!HEAP_ALLOCED_GC(q)) {

      if (!major_gc) return;

      info = get_itbl(q);
      switch (info->type) {

      case THUNK_STATIC:
          if (info->srt_bitmap != 0) {
              evacuate_static_object(THUNK_STATIC_LINK((StgClosure *)q), q);
          }
          return;

      case FUN_STATIC:
          if (info->srt_bitmap != 0) {
              evacuate_static_object(FUN_STATIC_LINK((StgClosure *)q), q);
          }
          return;

      case IND_STATIC:
          /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
           * on the CAF list, so don't do anything with it here (we'll
           * scavenge it later).
           */
          evacuate_static_object(IND_STATIC_LINK((StgClosure *)q), q);
          return;

      case CONSTR_STATIC:
          evacuate_static_object(STATIC_LINK(info,(StgClosure *)q), q);
          return;

      case CONSTR_NOCAF_STATIC:
          /* no need to put these on the static linked list, they don't need
           * to be scavenged.
           */
          return;

      default:
          barf("evacuate(static): strange closure type %d", (int)(info->type));
      }
  }

  bd = Bdescr((P_)q);

  if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) {

      // pointer into to-space: just return it.  It might be a pointer
      // into a generation that we aren't collecting (> N), or it
      // might just be a pointer into to-space.  The latter doesn't
      // happen often, but allowing it makes certain things a bit
      // easier; e.g. scavenging an object is idempotent, so it's OK to
      // have an object on the mutable list multiple times.
      if (bd->flags & BF_EVACUATED) {
          // We aren't copying this object, so we have to check
          // whether it is already in the target generation.  (this is
          // the write barrier).
          if (bd->gen_no < gct->evac_gen_no) {
              gct->failed_to_evac = rtsTrue;
              TICK_GC_FAILED_PROMOTION();
          }
          return;
      }

      /* evacuate large objects by re-linking them onto a different list.
       */
      if (bd->flags & BF_LARGE) {
          evacuate_large((P_)q);
          return;
      }

      /* If the object is in a gen that we're compacting, then we
       * need to use an alternative evacuate procedure.
       */
      if (!is_marked((P_)q,bd)) {
          mark((P_)q,bd);
          push_mark_stack((P_)q);
      }
      return;
  }

  gen_no = bd->dest_no;

  info = q->header.info;
  if (IS_FORWARDING_PTR(info))
  {
    /* Already evacuated, just return the forwarding address.
     * HOWEVER: if the requested destination generation (gct->evac_gen) is
     * older than the actual generation (because the object was
     * already evacuated to a younger generation) then we have to
     * set the gct->failed_to_evac flag to indicate that we couldn't
     * manage to promote the object to the desired generation.
     */
    /*
     * Optimisation: the check is fairly expensive, but we can often
     * shortcut it if either the required generation is 0, or the
     * current object (the EVACUATED) is in a high enough generation.
     * We know that an EVACUATED always points to an object in the
     * same or an older generation.  gen is the lowest generation that the
     * current object would be evacuated to, so we only do the full
     * check if gen is too low.
     */
      StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
      *p = TAG_CLOSURE(tag,e);
      if (gen_no < gct->evac_gen_no) {  // optimisation
          if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) {
              gct->failed_to_evac = rtsTrue;
              TICK_GC_FAILED_PROMOTION();
          }
      }
      return;
  }

  switch (INFO_PTR_TO_STRUCT(info)->type) {

  case WHITEHOLE:
      goto loop;

  // For ints and chars of low value, save space by replacing references to
  //    these with closures with references to common, shared ones in the RTS.
  //
  // * Except when compiling into Windows DLLs which don't support cross-package
  //    data references very well.
  //
  case CONSTR_0_1:
  {
#if defined(COMPILING_WINDOWS_DLL)
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
#else
      StgWord w = (StgWord)q->payload[0];
      if (info == Czh_con_info &&
          // unsigned, so always true:  (StgChar)w >= MIN_CHARLIKE &&
          (StgChar)w <= MAX_CHARLIKE) {
          *p =  TAG_CLOSURE(tag,
                            (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
                           );
      }
      else if (info == Izh_con_info &&
          (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
          *p = TAG_CLOSURE(tag,
                             (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
                             );
      }
      else {
          copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
      }
#endif
      return;
  }

  case FUN_0_1:
  case FUN_1_0:
  case CONSTR_1_0:
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
      return;

  case THUNK_1_0:
  case THUNK_0_1:
      copy(p,info,q,sizeofW(StgThunk)+1,gen_no);
      return;

  case THUNK_1_1:
  case THUNK_2_0:
  case THUNK_0_2:
#ifdef NO_PROMOTE_THUNKS
#error bitrotted
#endif
    copy(p,info,q,sizeofW(StgThunk)+2,gen_no);
    return;

  case FUN_1_1:
  case FUN_2_0:
  case FUN_0_2:
  case CONSTR_1_1:
  case CONSTR_2_0:
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
      return;

  case CONSTR_0_2:
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
      return;

  case THUNK:
      copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
      return;

  case FUN:
  case CONSTR:
      copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no,tag);
      return;

  case BLACKHOLE:
  {
      StgClosure *r;
      const StgInfoTable *i;
      r = ((StgInd*)q)->indirectee;
      if (GET_CLOSURE_TAG(r) == 0) {
          i = r->header.info;
          if (IS_FORWARDING_PTR(i)) {
              r = (StgClosure *)UN_FORWARDING_PTR(i);
              i = r->header.info;
          }
          if (i == &stg_TSO_info
              || i == &stg_WHITEHOLE_info
              || i == &stg_BLOCKING_QUEUE_CLEAN_info
              || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
              copy(p,info,q,sizeofW(StgInd),gen_no);
              return;
          }
          ASSERT(i != &stg_IND_info);
      }
      q = r;
      *p = r;
      goto loop;
  }

  case MUT_VAR_CLEAN:
  case MUT_VAR_DIRTY:
  case MVAR_CLEAN:
  case MVAR_DIRTY:
  case TVAR:
  case BLOCKING_QUEUE:
  case WEAK:
  case PRIM:
  case MUT_PRIM:
      copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
      return;

  case BCO:
      copy(p,info,q,bco_sizeW((StgBCO *)q),gen_no);
      return;

  case THUNK_SELECTOR:
      eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
      return;

  case IND:
    // follow chains of indirections, don't evacuate them
    q = ((StgInd*)q)->indirectee;
    *p = q;
    goto loop;

  case RET_BCO:
  case RET_SMALL:
  case RET_BIG:
  case UPDATE_FRAME:
  case UNDERFLOW_FRAME:
  case STOP_FRAME:
  case CATCH_FRAME:
  case CATCH_STM_FRAME:
  case CATCH_RETRY_FRAME:
  case ATOMICALLY_FRAME:
    // shouldn't see these
    barf("evacuate: stack frame at %p\n", q);

  case PAP:
      copy(p,info,q,pap_sizeW((StgPAP*)q),gen_no);
      return;

  case AP:
      copy(p,info,q,ap_sizeW((StgAP*)q),gen_no);
      return;

  case AP_STACK:
      copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen_no);
      return;

  case ARR_WORDS:
      // just copy the block
      copy(p,info,q,arr_words_sizeW((StgArrBytes *)q),gen_no);
      return;

  case MUT_ARR_PTRS_CLEAN:
  case MUT_ARR_PTRS_DIRTY:
  case MUT_ARR_PTRS_FROZEN:
  case MUT_ARR_PTRS_FROZEN0:
      // just copy the block
      copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no);
      return;

  case SMALL_MUT_ARR_PTRS_CLEAN:
  case SMALL_MUT_ARR_PTRS_DIRTY:
  case SMALL_MUT_ARR_PTRS_FROZEN:
  case SMALL_MUT_ARR_PTRS_FROZEN0:
      // just copy the block
      copy(p,info,q,small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)q),gen_no);
      return;

  case TSO:
      copy(p,info,q,sizeofW(StgTSO),gen_no);
      return;

  case STACK:
    {
      StgStack *stack = (StgStack *)q;

      /* To evacuate a small STACK, we need to adjust the stack pointer
       */
      {
          StgStack *new_stack;
          StgPtr r, s;
          rtsBool mine;

          mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack),
                          sizeofW(StgStack), gen_no);
          if (mine) {
              new_stack = (StgStack *)*p;
              move_STACK(stack, new_stack);
              for (r = stack->sp, s = new_stack->sp;
                   r < stack->stack + stack->stack_size;) {
                  *s++ = *r++;
              }
          }
          return;
      }
    }

  case TREC_CHUNK:
      copy(p,info,q,sizeofW(StgTRecChunk),gen_no);
      return;

  default:
    barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
  }

  barf("evacuate");
}
Exemple #24
0
StgOffset 
checkClosure( StgClosure* p )
{
    const StgInfoTable *info;

    ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));

    p = UNTAG_CLOSURE(p);
    /* Is it a static closure (i.e. in the data segment)? */
    if (!HEAP_ALLOCED(p)) {
	ASSERT(closure_STATIC(p));
    } else {
	ASSERT(!closure_STATIC(p));
    }

    info = p->header.info;

    if (IS_FORWARDING_PTR(info)) {
        barf("checkClosure: found EVACUATED closure %d", info->type);
    }
    info = INFO_PTR_TO_STRUCT(info);

    switch (info->type) {

    case MVAR_CLEAN:
    case MVAR_DIRTY:
      { 
	StgMVar *mvar = (StgMVar *)p;
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
	return sizeofW(StgMVar);
      }

    case THUNK:
    case THUNK_1_0:
    case THUNK_0_1:
    case THUNK_1_1:
    case THUNK_0_2:
    case THUNK_2_0:
      {
	nat i;
	for (i = 0; i < info->layout.payload.ptrs; i++) {
	  ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
	}
	return thunk_sizeW_fromITBL(info);
      }

    case FUN:
    case FUN_1_0:
    case FUN_0_1:
    case FUN_1_1:
    case FUN_0_2:
    case FUN_2_0:
    case CONSTR:
    case CONSTR_1_0:
    case CONSTR_0_1:
    case CONSTR_1_1:
    case CONSTR_0_2:
    case CONSTR_2_0:
    case IND_PERM:
    case BLACKHOLE:
    case PRIM:
    case MUT_PRIM:
    case MUT_VAR_CLEAN:
    case MUT_VAR_DIRTY:
    case CONSTR_STATIC:
    case CONSTR_NOCAF_STATIC:
    case THUNK_STATIC:
    case FUN_STATIC:
	{
	    nat i;
	    for (i = 0; i < info->layout.payload.ptrs; i++) {
		ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
	    }
	    return sizeW_fromITBL(info);
	}

    case BLOCKING_QUEUE:
    {
        StgBlockingQueue *bq = (StgBlockingQueue *)p;

        // NO: the BH might have been updated now
        // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
        ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));

        ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
        ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE 
               || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
        ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE || 
               get_itbl((StgClosure *)(bq->link))->type == IND ||
               get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);

        return sizeofW(StgBlockingQueue);
    }

    case BCO: {
	StgBCO *bco = (StgBCO *)p;
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
	return bco_sizeW(bco);
    }

    case IND_STATIC: /* (1, 0) closure */
      ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
      return sizeW_fromITBL(info);

    case WEAK:
      /* deal with these specially - the info table isn't
       * representative of the actual layout.
       */
      { StgWeak *w = (StgWeak *)p;
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
	if (w->link) {
	  ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
	}
	return sizeW_fromITBL(info);
      }

    case THUNK_SELECTOR:
	    ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
	    return THUNK_SELECTOR_sizeW();

    case IND:
	{ 
  	    /* we don't expect to see any of these after GC
	     * but they might appear during execution
	     */
	    StgInd *ind = (StgInd *)p;
	    ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
	    return sizeofW(StgInd);
	}

    case RET_BCO:
    case RET_SMALL:
    case RET_BIG:
    case RET_DYN:
    case UPDATE_FRAME:
    case UNDERFLOW_FRAME:
    case STOP_FRAME:
    case CATCH_FRAME:
    case ATOMICALLY_FRAME:
    case CATCH_RETRY_FRAME:
    case CATCH_STM_FRAME:
	    barf("checkClosure: stack frame");

    case AP:
    {
	StgAP* ap = (StgAP *)p;
	checkPAP (ap->fun, ap->payload, ap->n_args);
	return ap_sizeW(ap);
    }

    case PAP:
    {
	StgPAP* pap = (StgPAP *)p;
	checkPAP (pap->fun, pap->payload, pap->n_args);
	return pap_sizeW(pap);
    }

    case AP_STACK:
    { 
	StgAP_STACK *ap = (StgAP_STACK *)p;
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
	checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
	return ap_stack_sizeW(ap);
    }

    case ARR_WORDS:
	    return arr_words_sizeW((StgArrWords *)p);

    case MUT_ARR_PTRS_CLEAN:
    case MUT_ARR_PTRS_DIRTY:
    case MUT_ARR_PTRS_FROZEN:
    case MUT_ARR_PTRS_FROZEN0:
	{
	    StgMutArrPtrs* a = (StgMutArrPtrs *)p;
	    nat i;
	    for (i = 0; i < a->ptrs; i++) {
		ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
	    }
	    return mut_arr_ptrs_sizeW(a);
	}

    case TSO:
        checkTSO((StgTSO *)p);
        return sizeofW(StgTSO);

    case STACK:
        checkSTACK((StgStack*)p);
        return stack_sizeW((StgStack*)p);

    case TREC_CHUNK:
      {
        nat i;
        StgTRecChunk *tc = (StgTRecChunk *)p;
        ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
        for (i = 0; i < tc -> next_entry_idx; i ++) {
          ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
          ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
          ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
        }
        return sizeofW(StgTRecChunk);
      }
      
    default:
	    barf("checkClosure (closure type %d)", info->type);
    }
}
Exemple #25
0
Fichier : Weak.c Projet : A1kmm/ghc
void
scheduleFinalizers(Capability *cap, StgWeak *list)
{
    StgWeak *w;
    StgTSO *t;
    StgMutArrPtrs *arr;
    StgWord size;
    nat n, i;
    Task *task;

    task = myTask();
    if (task != NULL) {
        task->running_finalizers = rtsTrue;
    }

    // count number of finalizers, and kill all the weak pointers first...
    n = 0;
    for (w = list; w; w = w->link) { 
	StgArrWords *farr;

	// Better not be a DEAD_WEAK at this stage; the garbage
	// collector removes DEAD_WEAKs from the weak pointer list.
	ASSERT(w->header.info != &stg_DEAD_WEAK_info);

	if (w->finalizer != &stg_NO_FINALIZER_closure) {
	    n++;
	}

	farr = (StgArrWords *)UNTAG_CLOSURE(w->cfinalizer);

	if ((StgClosure *)farr != &stg_NO_FINALIZER_closure)
	    runCFinalizer((void *)farr->payload[0],
	                  (void *)farr->payload[1],
	                  (void *)farr->payload[2],
	                  farr->payload[3]);

#ifdef PROFILING
        // A weak pointer is inherently used, so we do not need to call
        // LDV_recordDead().
	//
        // Furthermore, when PROFILING is turned on, dead weak
        // pointers are exactly as large as weak pointers, so there is
        // no need to fill the slop, either.  See stg_DEAD_WEAK_info
        // in StgMiscClosures.hc.
#endif
	SET_HDR(w, &stg_DEAD_WEAK_info, w->header.prof.ccs);
    }
	
    if (task != NULL) {
        task->running_finalizers = rtsFalse;
    }

    // No finalizers to run?
    if (n == 0) return;

    debugTrace(DEBUG_weak, "weak: batching %d finalizers", n);

    size = n + mutArrPtrsCardTableSize(n);
    arr = (StgMutArrPtrs *)allocate(cap, sizeofW(StgMutArrPtrs) + size);
    TICK_ALLOC_PRIM(sizeofW(StgMutArrPtrs), n, 0);
    SET_HDR(arr, &stg_MUT_ARR_PTRS_FROZEN_info, CCS_SYSTEM);
    arr->ptrs = n;
    arr->size = size;

    n = 0;
    for (w = list; w; w = w->link) {
	if (w->finalizer != &stg_NO_FINALIZER_closure) {
	    arr->payload[n] = w->finalizer;
	    n++;
	}
    }
    // set all the cards to 1
    for (i = n; i < size; i++) {
        arr->payload[i] = (StgClosure *)(W_)(-1);
    }

    t = createIOThread(cap, 
		       RtsFlags.GcFlags.initialStkSize, 
		       rts_apply(cap,
			   rts_apply(cap,
			       (StgClosure *)runFinalizerBatch_closure,
			       rts_mkInt(cap,n)), 
			   (StgClosure *)arr)
	);
    scheduleThread(cap,t);
}
Exemple #26
0
/* ---------------------------------------------------------------------------
   Create a new thread.

   The new thread starts with the given stack size.  Before the
   scheduler can run, however, this thread needs to have a closure
   (and possibly some arguments) pushed on its stack.  See
   pushClosure() in Schedule.h.

   createGenThread() and createIOThread() (in SchedAPI.h) are
   convenient packaged versions of this function.
   ------------------------------------------------------------------------ */
StgTSO *
createThread(Capability *cap, W_ size)
{
    StgTSO *tso;
    StgStack *stack;
    nat stack_size;

    /* sched_mutex is *not* required */

    /* catch ridiculously small stack sizes */
    if (size < MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO)) {
        size = MIN_STACK_WORDS + sizeofW(StgStack) + sizeofW(StgTSO);
    }

    /* The size argument we are given includes all the per-thread
     * overheads:
     *
     *    - The TSO structure
     *    - The STACK header
     *
     * This is so that we can use a nice round power of 2 for the
     * default stack size (e.g. 1k), and if we're allocating lots of
     * threads back-to-back they'll fit nicely in a block.  It's a bit
     * of a benchmark hack, but it doesn't do any harm.
     */
    stack_size = round_to_mblocks(size - sizeofW(StgTSO));
    stack = (StgStack *)allocate(cap, stack_size);
    TICK_ALLOC_STACK(stack_size);
    SET_HDR(stack, &stg_STACK_info, cap->r.rCCCS);
    stack->stack_size   = stack_size - sizeofW(StgStack);
    stack->sp           = stack->stack + stack->stack_size;
    stack->dirty        = 1;

    tso = (StgTSO *)allocate(cap, sizeofW(StgTSO));
    TICK_ALLOC_TSO();
    SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM);

    // Always start with the compiled code evaluator
    tso->what_next = ThreadRunGHC;
    tso->why_blocked  = NotBlocked;
    tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
    tso->blocked_exceptions = END_BLOCKED_EXCEPTIONS_QUEUE;
    tso->bq = (StgBlockingQueue *)END_TSO_QUEUE;
    tso->flags = 0;
    tso->dirty = 1;
    tso->_link = END_TSO_QUEUE;

    tso->saved_errno = 0;
    tso->bound = NULL;
    tso->cap = cap;

    tso->stackobj       = stack;
    tso->tot_stack_size = stack->stack_size;

    ASSIGN_Int64((W_*)&(tso->alloc_limit), 0);

    tso->trec = NO_TREC;

#ifdef PROFILING
    tso->prof.cccs = CCS_MAIN;
#endif

    // put a stop frame on the stack
    stack->sp -= sizeofW(StgStopFrame);
    SET_HDR((StgClosure*)stack->sp,
            (StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);

    /* Link the new thread on the global thread list.
     */
    ACQUIRE_LOCK(&sched_mutex);
    tso->id = next_thread_id++;  // while we have the mutex
    tso->global_link = g0->threads;
    g0->threads = tso;
    RELEASE_LOCK(&sched_mutex);

    // ToDo: report the stack size in the event?
    traceEventCreateThread(cap, tso);

    return tso;
}
Exemple #27
0
void
tryWakeupThread (Capability *cap, StgTSO *tso)
{
    traceEventThreadWakeup (cap, tso, tso->cap->no);

#ifdef THREADED_RTS
    if (tso->cap != cap)
    {
        MessageWakeup *msg;
        msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup));
        SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM);
        msg->tso = tso;
        sendMessage(cap, tso->cap, (Message*)msg);
        debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d",
                      (W_)tso->id, tso->cap->no);
        return;
    }
#endif

    switch (tso->why_blocked)
    {
    case BlockedOnMVar:
    case BlockedOnMVarRead:
    {
        if (tso->_link == END_TSO_QUEUE) {
            tso->block_info.closure = (StgClosure*)END_TSO_QUEUE;
            goto unblock;
        } else {
            return;
        }
    }

    case BlockedOnMsgThrowTo:
    {
        const StgInfoTable *i;

        i = lockClosure(tso->block_info.closure);
        unlockClosure(tso->block_info.closure, i);
        if (i != &stg_MSG_NULL_info) {
            debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)",
                          (W_)tso->id, tso->block_info.throwto->header.info);
            return;
        }

        // remove the block frame from the stack
        ASSERT(tso->stackobj->sp[0] == (StgWord)&stg_block_throwto_info);
        tso->stackobj->sp += 3;
        goto unblock;
    }

    case BlockedOnBlackHole:
    case BlockedOnSTM:
    case ThreadMigrating:
        goto unblock;

    default:
        // otherwise, do nothing
        return;
    }

unblock:
    // just run the thread now, if the BH is not really available,
    // we'll block again.
    tso->why_blocked = NotBlocked;
    appendToRunQueue(cap,tso);

    // We used to set the context switch flag here, which would
    // trigger a context switch a short time in the future (at the end
    // of the current nursery block).  The idea is that we have just
    // woken up a thread, so we may need to load-balance and migrate
    // threads to other CPUs.  On the other hand, setting the context
    // switch flag here unfairly penalises the current thread by
    // yielding its time slice too early.
    //
    // The synthetic benchmark nofib/smp/chan can be used to show the
    // difference quite clearly.

    // cap->context_switch = 1;
}
Exemple #28
0
// ToDo: too big to inline
static /* STATIC_INLINE */ StgPtr
thread_obj (StgInfoTable *info, StgPtr p)
{
    switch (info->type) {
    case THUNK_0_1:
	return p + sizeofW(StgThunk) + 1;

    case FUN_0_1:
    case CONSTR_0_1:
	return p + sizeofW(StgHeader) + 1;
	
    case FUN_1_0:
    case CONSTR_1_0:
	thread(&((StgClosure *)p)->payload[0]);
	return p + sizeofW(StgHeader) + 1;
	
    case THUNK_1_0:
	thread(&((StgThunk *)p)->payload[0]);
	return p + sizeofW(StgThunk) + 1;
	
    case THUNK_0_2:
	return p + sizeofW(StgThunk) + 2;

    case FUN_0_2:
    case CONSTR_0_2:
	return p + sizeofW(StgHeader) + 2;
	
    case THUNK_1_1:
	thread(&((StgThunk *)p)->payload[0]);
	return p + sizeofW(StgThunk) + 2;

    case FUN_1_1:
    case CONSTR_1_1:
	thread(&((StgClosure *)p)->payload[0]);
	return p + sizeofW(StgHeader) + 2;
	
    case THUNK_2_0:
	thread(&((StgThunk *)p)->payload[0]);
	thread(&((StgThunk *)p)->payload[1]);
	return p + sizeofW(StgThunk) + 2;

    case FUN_2_0:
    case CONSTR_2_0:
	thread(&((StgClosure *)p)->payload[0]);
	thread(&((StgClosure *)p)->payload[1]);
	return p + sizeofW(StgHeader) + 2;
	
#ifdef ALLOW_INTERPRETER
    case BCO: {
	StgBCO *bco = (StgBCO *)p;
	thread_(&bco->instrs);
	thread_(&bco->literals);
	thread_(&bco->ptrs);
	return p + bco_sizeW(bco);
    }
#endif // ALLOW_INTERPRETER

    case THUNK:
    {
	StgPtr end;
	
	end = (P_)((StgThunk *)p)->payload + 
	    info->layout.payload.ptrs;
	for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
	    thread((StgClosure **)p);
	}
	return p + info->layout.payload.nptrs;
    }

    case FUN:
    case CONSTR:
    case STABLE_NAME:
    case IND_PERM:
    case MUT_VAR_CLEAN:
    case MUT_VAR_DIRTY:
    case CAF_BLACKHOLE:
    case SE_CAF_BLACKHOLE:
    case SE_BLACKHOLE:
    case BLACKHOLE:
    {
	StgPtr end;
	
	end = (P_)((StgClosure *)p)->payload + 
	    info->layout.payload.ptrs;
	for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
	    thread((StgClosure **)p);
	}
	return p + info->layout.payload.nptrs;
    }
    
    case WEAK:
    {
	StgWeak *w = (StgWeak *)p;
	thread(&w->key);
	thread(&w->value);
	thread(&w->finalizer);
	if (w->link != NULL) {
	    thread_(&w->link);
	}
	return p + sizeofW(StgWeak);
    }
    
    case IND_OLDGEN:
    case IND_OLDGEN_PERM:
	thread(&((StgInd *)p)->indirectee);
	return p + sizeofW(StgInd);

    case THUNK_SELECTOR:
    { 
	StgSelector *s = (StgSelector *)p;
	thread(&s->selectee);
	return p + THUNK_SELECTOR_sizeW();
    }
    
    case AP_STACK:
	return thread_AP_STACK((StgAP_STACK *)p);
	
    case PAP:
	return thread_PAP((StgPAP *)p);

    case AP:
	return thread_AP((StgAP *)p);
	
    case ARR_WORDS:
	return p + arr_words_sizeW((StgArrWords *)p);
	
    case MUT_ARR_PTRS_CLEAN:
    case MUT_ARR_PTRS_DIRTY:
    case MUT_ARR_PTRS_FROZEN:
    case MUT_ARR_PTRS_FROZEN0:
	// follow everything 
    {
	StgPtr next;
	
	next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
	for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
	    thread((StgClosure **)p);
	}
	return p;
    }
    
    case TSO:
	return thread_TSO((StgTSO *)p);
    
    default:
	barf("update_fwd: unknown/strange object  %d", (int)(info->type));
	return NULL;
    }
}
Exemple #29
0
nat messageBlackHole(Capability *cap, MessageBlackHole *msg)
{
    const StgInfoTable *info;
    StgClosure *p;
    StgBlockingQueue *bq;
    StgClosure *bh = UNTAG_CLOSURE(msg->bh);
    StgTSO *owner;

    debugTraceCap(DEBUG_sched, cap, "message: thread %d blocking on blackhole %p", 
                  (lnat)msg->tso->id, msg->bh);

    info = bh->header.info;

    // If we got this message in our inbox, it might be that the
    // BLACKHOLE has already been updated, and GC has shorted out the
    // indirection, so the pointer no longer points to a BLACKHOLE at
    // all.
    if (info != &stg_BLACKHOLE_info && 
        info != &stg_CAF_BLACKHOLE_info && 
        info != &__stg_EAGER_BLACKHOLE_info &&
        info != &stg_WHITEHOLE_info) {
        // if it is a WHITEHOLE, then a thread is in the process of
        // trying to BLACKHOLE it.  But we know that it was once a
        // BLACKHOLE, so there is at least a valid pointer in the
        // payload, so we can carry on.
        return 0;
    }

    // The blackhole must indirect to a TSO, a BLOCKING_QUEUE, an IND,
    // or a value.
loop:
    // NB. VOLATILE_LOAD(), because otherwise gcc hoists the load
    // and turns this into an infinite loop.
    p = UNTAG_CLOSURE((StgClosure*)VOLATILE_LOAD(&((StgInd*)bh)->indirectee));
    info = p->header.info;

    if (info == &stg_IND_info)
    {
        // This could happen, if e.g. we got a BLOCKING_QUEUE that has
        // just been replaced with an IND by another thread in
        // updateThunk().  In which case, if we read the indirectee
        // again we should get the value.
        goto loop;
    }

    else if (info == &stg_TSO_info)
    {
        owner = (StgTSO*)p;

#ifdef THREADED_RTS
        if (owner->cap != cap) {
            sendMessage(cap, owner->cap, (Message*)msg);
            debugTraceCap(DEBUG_sched, cap, "forwarding message to cap %d", owner->cap->no);
            return 1;
        }
#endif
        // owner is the owner of the BLACKHOLE, and resides on this
        // Capability.  msg->tso is the first thread to block on this
        // BLACKHOLE, so we first create a BLOCKING_QUEUE object.

        bq = (StgBlockingQueue*)allocate(cap, sizeofW(StgBlockingQueue));
            
        // initialise the BLOCKING_QUEUE object
        SET_HDR(bq, &stg_BLOCKING_QUEUE_DIRTY_info, CCS_SYSTEM);
        bq->bh = bh;
        bq->queue = msg;
        bq->owner = owner;
        
        msg->link = (MessageBlackHole*)END_TSO_QUEUE;
        
        // All BLOCKING_QUEUES are linked in a list on owner->bq, so
        // that we can search through them in the event that there is
        // a collision to update a BLACKHOLE and a BLOCKING_QUEUE
        // becomes orphaned (see updateThunk()).
        bq->link = owner->bq;
        owner->bq = bq;
        dirty_TSO(cap, owner); // we modified owner->bq

        // If the owner of the blackhole is currently runnable, then
        // bump it to the front of the run queue.  This gives the
        // blocked-on thread a little boost which should help unblock
        // this thread, and may avoid a pile-up of other threads
        // becoming blocked on the same BLACKHOLE (#3838).
        //
        // NB. we check to make sure that the owner is not the same as
        // the current thread, since in that case it will not be on
        // the run queue.
        if (owner->why_blocked == NotBlocked && owner->id != msg->tso->id) {
            removeFromRunQueue(cap, owner);
            pushOnRunQueue(cap,owner);
        }

        // point to the BLOCKING_QUEUE from the BLACKHOLE
        write_barrier(); // make the BQ visible
        ((StgInd*)bh)->indirectee = (StgClosure *)bq;
        recordClosureMutated(cap,bh); // bh was mutated

        debugTraceCap(DEBUG_sched, cap, "thread %d blocked on thread %d", 
                      (lnat)msg->tso->id, (lnat)owner->id);

        return 1; // blocked
    }
    else if (info == &stg_BLOCKING_QUEUE_CLEAN_info || 
             info == &stg_BLOCKING_QUEUE_DIRTY_info)
    {
        StgBlockingQueue *bq = (StgBlockingQueue *)p;

        ASSERT(bq->bh == bh);

        owner = bq->owner;

        ASSERT(owner != END_TSO_QUEUE);

#ifdef THREADED_RTS
        if (owner->cap != cap) {
            sendMessage(cap, owner->cap, (Message*)msg);
            debugTraceCap(DEBUG_sched, cap, "forwarding message to cap %d", owner->cap->no);
            return 1;
        }
#endif

        msg->link = bq->queue;
        bq->queue = msg;
        recordClosureMutated(cap,(StgClosure*)msg);

        if (info == &stg_BLOCKING_QUEUE_CLEAN_info) {
            bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
            recordClosureMutated(cap,(StgClosure*)bq);
        }

        debugTraceCap(DEBUG_sched, cap, "thread %d blocked on thread %d", 
                      (lnat)msg->tso->id, (lnat)owner->id);

        // See above, #3838
        if (owner->why_blocked == NotBlocked && owner->id != msg->tso->id) {
            removeFromRunQueue(cap, owner);
            pushOnRunQueue(cap,owner);
        }

        return 1; // blocked
    }
    
    return 0; // not blocked
}
Exemple #30
0
/* -----------------------------------------------------------------------------
 * Pausing a thread
 *
 * We have to prepare for GC - this means doing lazy black holing
 * here.  We also take the opportunity to do stack squeezing if it's
 * turned on.
 * -------------------------------------------------------------------------- */
void
threadPaused(Capability *cap, StgTSO *tso)
{
    StgClosure *frame;
    const StgRetInfoTable *info;
    const StgInfoTable *bh_info;
    const StgInfoTable *cur_bh_info USED_IF_THREADS;
    StgClosure *bh;
    StgPtr stack_end;
    uint32_t words_to_squeeze = 0;
    uint32_t weight           = 0;
    uint32_t weight_pending   = 0;
    bool prev_was_update_frame = false;
    StgWord heuristic_says_squeeze;

    // Check to see whether we have threads waiting to raise
    // exceptions, and we're not blocking exceptions, or are blocked
    // interruptibly.  This is important; if a thread is running with
    // TSO_BLOCKEX and becomes blocked interruptibly, this is the only
    // place we ensure that the blocked_exceptions get a chance.
    maybePerformBlockedException (cap, tso);
    if (tso->what_next == ThreadKilled) { return; }

    // NB. Updatable thunks *must* be blackholed, either by eager blackholing or
    // lazy blackholing.  See Note [upd-black-hole] in sm/Scav.c.

    stack_end = tso->stackobj->stack + tso->stackobj->stack_size;

    frame = (StgClosure *)tso->stackobj->sp;

    while ((P_)frame < stack_end) {
        info = get_ret_itbl(frame);

        switch (info->i.type) {

        case UPDATE_FRAME:

            // If we've already marked this frame, then stop here.
            if (frame->header.info == (StgInfoTable *)&stg_marked_upd_frame_info) {
                if (prev_was_update_frame) {
                    words_to_squeeze += sizeofW(StgUpdateFrame);
                    weight += weight_pending;
                    weight_pending = 0;
                }
                goto end;
            }

            SET_INFO(frame, (StgInfoTable *)&stg_marked_upd_frame_info);

            bh = ((StgUpdateFrame *)frame)->updatee;
            bh_info = bh->header.info;

#if defined(THREADED_RTS)
        retry:
#endif
            // Note [suspend duplicate work]
            //
            // If the info table is a WHITEHOLE or a BLACKHOLE, then
            // another thread has claimed it (via the SET_INFO()
            // below), or is in the process of doing so.  In that case
            // we want to suspend the work that the current thread has
            // done on this thunk and wait until the other thread has
            // finished.
            //
            // If eager blackholing is taking place, it could be the
            // case that the blackhole points to the current
            // TSO. e.g.:
            //
            //    this thread                   other thread
            //    --------------------------------------------------------
            //                                  c->indirectee = other_tso;
            //                                  c->header.info = EAGER_BH
            //                                  threadPaused():
            //                                    c->header.info = WHITEHOLE
            //                                    c->indirectee = other_tso
            //    c->indirectee = this_tso;
            //    c->header.info = EAGER_BH
            //                                    c->header.info = BLACKHOLE
            //    threadPaused()
            //    *** c->header.info is now BLACKHOLE,
            //        c->indirectee  points to this_tso
            //
            // So in this case do *not* suspend the work of the
            // current thread, because the current thread will become
            // deadlocked on itself.  See #5226 for an instance of
            // this bug.
            //
            // Note that great care is required when entering computations
            // suspended by this mechanism. See Note [AP_STACKs must be eagerly
            // blackholed] for details.
            if (((bh_info == &stg_BLACKHOLE_info)
                 && ((StgInd*)bh)->indirectee != (StgClosure*)tso)
                || (bh_info == &stg_WHITEHOLE_info))
            {
                debugTrace(DEBUG_squeeze,
                           "suspending duplicate work: %ld words of stack",
                           (long)((StgPtr)frame - tso->stackobj->sp));

                // If this closure is already an indirection, then
                // suspend the computation up to this point.
                // NB. check raiseAsync() to see what happens when
                // we're in a loop (#2783).
                suspendComputation(cap,tso,(StgUpdateFrame*)frame);

                // Now drop the update frame, and arrange to return
                // the value to the frame underneath:
                tso->stackobj->sp = (StgPtr)frame + sizeofW(StgUpdateFrame) - 2;
                tso->stackobj->sp[1] = (StgWord)bh;
                ASSERT(bh->header.info != &stg_TSO_info);
                tso->stackobj->sp[0] = (W_)&stg_enter_info;

                // And continue with threadPaused; there might be
                // yet more computation to suspend.
                frame = (StgClosure *)(tso->stackobj->sp + 2);
                prev_was_update_frame = false;
                continue;
            }

            // We should never have made it here in the event of blackholes that
            // we already own; they should have been marked when we blackholed
            // them and consequently we should have stopped our stack walk
            // above.
            ASSERT(!((bh_info == &stg_BLACKHOLE_info)
                     && (((StgInd*)bh)->indirectee == (StgClosure*)tso)));

            // zero out the slop so that the sanity checker can tell
            // where the next closure is.
            OVERWRITING_CLOSURE(bh);

            // an EAGER_BLACKHOLE or CAF_BLACKHOLE gets turned into a
            // BLACKHOLE here.
#if defined(THREADED_RTS)
            // first we turn it into a WHITEHOLE to claim it, and if
            // successful we write our TSO and then the BLACKHOLE info pointer.
            cur_bh_info = (const StgInfoTable *)
                cas((StgVolatilePtr)&bh->header.info,
                    (StgWord)bh_info,
                    (StgWord)&stg_WHITEHOLE_info);

            if (cur_bh_info != bh_info) {
                bh_info = cur_bh_info;
                goto retry;
            }
#endif

            // The payload of the BLACKHOLE points to the TSO
            ((StgInd *)bh)->indirectee = (StgClosure *)tso;
            write_barrier();
            SET_INFO(bh,&stg_BLACKHOLE_info);

            // .. and we need a write barrier, since we just mutated the closure:
            recordClosureMutated(cap,bh);

            // We pretend that bh has just been created.
            LDV_RECORD_CREATE(bh);

            frame = (StgClosure *) ((StgUpdateFrame *)frame + 1);
            if (prev_was_update_frame) {
                words_to_squeeze += sizeofW(StgUpdateFrame);
                weight += weight_pending;
                weight_pending = 0;
            }
            prev_was_update_frame = true;
            break;

        case UNDERFLOW_FRAME:
        case STOP_FRAME:
            goto end;

            // normal stack frames; do nothing except advance the pointer
        default:
        {
            uint32_t frame_size = stack_frame_sizeW(frame);
            weight_pending += frame_size;
            frame = (StgClosure *)((StgPtr)frame + frame_size);
            prev_was_update_frame = false;
        }
        }
    }

end:
    // Should we squeeze or not?  Arbitrary heuristic: we squeeze if
    // the number of words we have to shift down is less than the
    // number of stack words we squeeze away by doing so.
    // The threshold was bumped from 5 to 8 as a result of #2797
    heuristic_says_squeeze = ((weight <= 8 && words_to_squeeze > 0)
                            || weight < words_to_squeeze);

    debugTrace(DEBUG_squeeze,
        "words_to_squeeze: %d, weight: %d, squeeze: %s",
        words_to_squeeze, weight,
        heuristic_says_squeeze ? "YES" : "NO");

    if (RtsFlags.GcFlags.squeezeUpdFrames == true &&
        heuristic_says_squeeze) {
        stackSqueeze(cap, tso, (StgPtr)frame);
        tso->flags |= TSO_SQUEEZED;
        // This flag tells threadStackOverflow() that the stack was
        // squeezed, because it may not need to be expanded.
    } else {
        tso->flags &= ~TSO_SQUEEZED;
    }
}