Beispiel #1
0
static void
eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
                 // NB. for legacy reasons, p & q are swapped around :(
{
    nat field;
    StgInfoTable *info;
    StgWord info_ptr;
    StgClosure *selectee;
    StgSelector *prev_thunk_selector;
    bdescr *bd;
    StgClosure *val;

    prev_thunk_selector = NULL;
    // this is a chain of THUNK_SELECTORs that we are going to update
    // to point to the value of the current THUNK_SELECTOR.  Each
    // closure on the chain is a WHITEHOLE, and points to the next in the
    // chain with payload[0].

selector_chain:

    bd = Bdescr((StgPtr)p);
    if (HEAP_ALLOCED_GC(p)) {
        // If the THUNK_SELECTOR is in to-space or in a generation that we
        // are not collecting, then bale out early.  We won't be able to
        // save any space in any case, and updating with an indirection is
        // trickier in a non-collected gen: we would have to update the
        // mutable list.
        if (bd->flags & BF_EVACUATED) {
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            *q = (StgClosure *)p;
            // shortcut, behave as for:  if (evac) evacuate(q);
            if (evac && bd->gen_no < gct->evac_gen_no) {
                gct->failed_to_evac = rtsTrue;
                TICK_GC_FAILED_PROMOTION();
            }
            return;
        }
        // we don't update THUNK_SELECTORS in the compacted
        // generation, because compaction does not remove the INDs
        // that result, this causes confusion later
        // (scavenge_mark_stack doesn't deal with IND).  BEWARE!  This
        // bit is very tricky to get right.  If you make changes
        // around here, test by compiling stage 3 with +RTS -c -RTS.
        if (bd->flags & BF_MARKED) {
            // must call evacuate() to mark this closure if evac==rtsTrue
            *q = (StgClosure *)p;
            if (evac) evacuate(q);
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            return;
        }
    }


    // WHITEHOLE the selector thunk, since it is now under evaluation.
    // This is important to stop us going into an infinite loop if
    // this selector thunk eventually refers to itself.
#if defined(THREADED_RTS)
    // In threaded mode, we'll use WHITEHOLE to lock the selector
    // thunk while we evaluate it.
    {
        do {
            info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
        } while (info_ptr == (W_)&stg_WHITEHOLE_info);

        // make sure someone else didn't get here first...
        if (IS_FORWARDING_PTR(info_ptr) ||
            INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->type != THUNK_SELECTOR) {
            // v. tricky now.  The THUNK_SELECTOR has been evacuated
            // by another thread, and is now either a forwarding ptr or IND.
            // We need to extract ourselves from the current situation
            // as cleanly as possible.
            //   - unlock the closure
            //   - update *q, we may have done *some* evaluation
            //   - if evac, we need to call evacuate(), because we
            //     need the write-barrier stuff.
            //   - undo the chain we've built to point to p.
            SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
            *q = (StgClosure *)p;
            if (evac) evacuate(q);
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            return;
        }
    }
#else
    // Save the real info pointer (NOTE: not the same as get_itbl()).
    info_ptr = (StgWord)p->header.info;
    SET_INFO((StgClosure *)p,&stg_WHITEHOLE_info);
#endif

    field = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->layout.selector_offset;

    // The selectee might be a constructor closure,
    // so we untag the pointer.
    selectee = UNTAG_CLOSURE(p->selectee);

selector_loop:
    // selectee now points to the closure that we're trying to select
    // a field from.  It may or may not be in to-space: we try not to
    // end up in to-space, but it's impractical to avoid it in
    // general.  The compacting GC scatters to-space pointers in
    // from-space during marking, for example.  We rely on the property
    // that evacuate() doesn't mind if it gets passed a to-space pointer.

    info = (StgInfoTable*)selectee->header.info;

    if (IS_FORWARDING_PTR(info)) {
        // We don't follow pointers into to-space; the constructor
        // has already been evacuated, so we won't save any space
        // leaks by evaluating this selector thunk anyhow.
        goto bale_out;
    }

    info = INFO_PTR_TO_STRUCT(info);
    switch (info->type) {
      case WHITEHOLE:
          goto bale_out; // about to be evacuated by another thread (or a loop).

      case CONSTR:
      case CONSTR_1_0:
      case CONSTR_0_1:
      case CONSTR_2_0:
      case CONSTR_1_1:
      case CONSTR_0_2:
      case CONSTR_STATIC:
      case CONSTR_NOCAF_STATIC:
          {
              // check that the size is in range
              ASSERT(field <  (StgWord32)(info->layout.payload.ptrs +
                                          info->layout.payload.nptrs));

              // Select the right field from the constructor
              val = selectee->payload[field];

#ifdef PROFILING
              // For the purposes of LDV profiling, we have destroyed
              // the original selector thunk, p.
              if (era > 0) {
                  // Only modify the info pointer when LDV profiling is
                  // enabled.  Note that this is incompatible with parallel GC,
                  // because it would allow other threads to start evaluating
                  // the same selector thunk.
                  SET_INFO((StgClosure*)p, (StgInfoTable *)info_ptr);
                  OVERWRITING_CLOSURE((StgClosure*)p);
                  SET_INFO((StgClosure*)p, &stg_WHITEHOLE_info);
              }
#endif

              // the closure in val is now the "value" of the
              // THUNK_SELECTOR in p.  However, val may itself be a
              // THUNK_SELECTOR, in which case we want to continue
              // evaluating until we find the real value, and then
              // update the whole chain to point to the value.
          val_loop:
              info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
              if (!IS_FORWARDING_PTR(info_ptr))
              {
                  info = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr);
                  switch (info->type) {
                  case IND:
                  case IND_STATIC:
                      val = ((StgInd *)val)->indirectee;
                      goto val_loop;
                  case THUNK_SELECTOR:
                      ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
                      prev_thunk_selector = p;
                      p = (StgSelector*)val;
                      goto selector_chain;
                  default:
                      break;
                  }
              }
              ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
              prev_thunk_selector = p;

              *q = val;

              // update the other selectors in the chain *before*
              // evacuating the value.  This is necessary in the case
              // where the value turns out to be one of the selectors
              // in the chain (i.e. we have a loop), and evacuating it
              // would corrupt the chain.
              unchain_thunk_selectors(prev_thunk_selector, val);

              // evacuate() cannot recurse through
              // eval_thunk_selector(), because we know val is not
              // a THUNK_SELECTOR.
              if (evac) evacuate(q);
              return;
          }

      case IND:
      case IND_STATIC:
          // Again, we might need to untag a constructor.
          selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
          goto selector_loop;

      case BLACKHOLE:
      {
          StgClosure *r;
          const StgInfoTable *i;
          r = ((StgInd*)selectee)->indirectee;

          // establish whether this BH has been updated, and is now an
          // indirection, as in evacuate().
          if (GET_CLOSURE_TAG(r) == 0) {
              i = r->header.info;
              if (IS_FORWARDING_PTR(i)) {
                  r = (StgClosure *)UN_FORWARDING_PTR(i);
                  i = r->header.info;
              }
              if (i == &stg_TSO_info
                  || i == &stg_WHITEHOLE_info
                  || i == &stg_BLOCKING_QUEUE_CLEAN_info
                  || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
                  goto bale_out;
              }
              ASSERT(i != &stg_IND_info);
          }

          selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
          goto selector_loop;
      }

      case THUNK_SELECTOR:
      {
          StgClosure *val;

          // recursively evaluate this selector.  We don't want to
          // recurse indefinitely, so we impose a depth bound.
          if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
              goto bale_out;
          }

          gct->thunk_selector_depth++;
          // rtsFalse says "don't evacuate the result".  It will,
          // however, update any THUNK_SELECTORs that are evaluated
          // along the way.
          eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
          gct->thunk_selector_depth--;

          // did we actually manage to evaluate it?
          if (val == selectee) goto bale_out;

          // Of course this pointer might be tagged...
          selectee = UNTAG_CLOSURE(val);
          goto selector_loop;
      }

      case AP:
      case AP_STACK:
      case THUNK:
      case THUNK_1_0:
      case THUNK_0_1:
      case THUNK_2_0:
      case THUNK_1_1:
      case THUNK_0_2:
      case THUNK_STATIC:
          // not evaluated yet
          goto bale_out;

      default:
        barf("eval_thunk_selector: strange selectee %d",
             (int)(info->type));
    }

bale_out:
    // We didn't manage to evaluate this thunk; restore the old info
    // pointer.  But don't forget: we still need to evacuate the thunk itself.
    SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
    // THREADED_RTS: we just unlocked the thunk, so another thread
    // might get in and update it.  copy() will lock it again and
    // check whether it was updated in the meantime.
    *q = (StgClosure *)p;
    if (evac) {
        copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest_no);
    }
    unchain_thunk_selectors(prev_thunk_selector, *q);
    return;
}
Beispiel #2
0
StgOffset 
checkClosure( StgClosure* p )
{
    const StgInfoTable *info;

    ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));

    p = UNTAG_CLOSURE(p);
    /* Is it a static closure (i.e. in the data segment)? */
    if (!HEAP_ALLOCED(p)) {
	ASSERT(closure_STATIC(p));
    } else {
	ASSERT(!closure_STATIC(p));
    }

    info = p->header.info;

    if (IS_FORWARDING_PTR(info)) {
        barf("checkClosure: found EVACUATED closure %d", info->type);
    }
    info = INFO_PTR_TO_STRUCT(info);

    switch (info->type) {

    case MVAR_CLEAN:
    case MVAR_DIRTY:
      { 
	StgMVar *mvar = (StgMVar *)p;
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
	return sizeofW(StgMVar);
      }

    case THUNK:
    case THUNK_1_0:
    case THUNK_0_1:
    case THUNK_1_1:
    case THUNK_0_2:
    case THUNK_2_0:
      {
	nat i;
	for (i = 0; i < info->layout.payload.ptrs; i++) {
	  ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
	}
	return thunk_sizeW_fromITBL(info);
      }

    case FUN:
    case FUN_1_0:
    case FUN_0_1:
    case FUN_1_1:
    case FUN_0_2:
    case FUN_2_0:
    case CONSTR:
    case CONSTR_1_0:
    case CONSTR_0_1:
    case CONSTR_1_1:
    case CONSTR_0_2:
    case CONSTR_2_0:
    case IND_PERM:
    case BLACKHOLE:
    case PRIM:
    case MUT_PRIM:
    case MUT_VAR_CLEAN:
    case MUT_VAR_DIRTY:
    case CONSTR_STATIC:
    case CONSTR_NOCAF_STATIC:
    case THUNK_STATIC:
    case FUN_STATIC:
	{
	    nat i;
	    for (i = 0; i < info->layout.payload.ptrs; i++) {
		ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
	    }
	    return sizeW_fromITBL(info);
	}

    case BLOCKING_QUEUE:
    {
        StgBlockingQueue *bq = (StgBlockingQueue *)p;

        // NO: the BH might have been updated now
        // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
        ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));

        ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
        ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE 
               || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
        ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE || 
               get_itbl((StgClosure *)(bq->link))->type == IND ||
               get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);

        return sizeofW(StgBlockingQueue);
    }

    case BCO: {
	StgBCO *bco = (StgBCO *)p;
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
	return bco_sizeW(bco);
    }

    case IND_STATIC: /* (1, 0) closure */
      ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
      return sizeW_fromITBL(info);

    case WEAK:
      /* deal with these specially - the info table isn't
       * representative of the actual layout.
       */
      { StgWeak *w = (StgWeak *)p;
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
	if (w->link) {
	  ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
	}
	return sizeW_fromITBL(info);
      }

    case THUNK_SELECTOR:
	    ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
	    return THUNK_SELECTOR_sizeW();

    case IND:
	{ 
  	    /* we don't expect to see any of these after GC
	     * but they might appear during execution
	     */
	    StgInd *ind = (StgInd *)p;
	    ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
	    return sizeofW(StgInd);
	}

    case RET_BCO:
    case RET_SMALL:
    case RET_BIG:
    case RET_DYN:
    case UPDATE_FRAME:
    case UNDERFLOW_FRAME:
    case STOP_FRAME:
    case CATCH_FRAME:
    case ATOMICALLY_FRAME:
    case CATCH_RETRY_FRAME:
    case CATCH_STM_FRAME:
	    barf("checkClosure: stack frame");

    case AP:
    {
	StgAP* ap = (StgAP *)p;
	checkPAP (ap->fun, ap->payload, ap->n_args);
	return ap_sizeW(ap);
    }

    case PAP:
    {
	StgPAP* pap = (StgPAP *)p;
	checkPAP (pap->fun, pap->payload, pap->n_args);
	return pap_sizeW(pap);
    }

    case AP_STACK:
    { 
	StgAP_STACK *ap = (StgAP_STACK *)p;
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
	checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
	return ap_stack_sizeW(ap);
    }

    case ARR_WORDS:
	    return arr_words_sizeW((StgArrWords *)p);

    case MUT_ARR_PTRS_CLEAN:
    case MUT_ARR_PTRS_DIRTY:
    case MUT_ARR_PTRS_FROZEN:
    case MUT_ARR_PTRS_FROZEN0:
	{
	    StgMutArrPtrs* a = (StgMutArrPtrs *)p;
	    nat i;
	    for (i = 0; i < a->ptrs; i++) {
		ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
	    }
	    return mut_arr_ptrs_sizeW(a);
	}

    case TSO:
        checkTSO((StgTSO *)p);
        return sizeofW(StgTSO);

    case STACK:
        checkSTACK((StgStack*)p);
        return stack_sizeW((StgStack*)p);

    case TREC_CHUNK:
      {
        nat i;
        StgTRecChunk *tc = (StgTRecChunk *)p;
        ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
        for (i = 0; i < tc -> next_entry_idx; i ++) {
          ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
          ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
          ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
        }
        return sizeofW(StgTRecChunk);
      }
      
    default:
	    barf("checkClosure (closure type %d)", info->type);
    }
}
Beispiel #3
0
// ToDo: too big to inline
static /* STATIC_INLINE */ StgPtr
thread_obj (StgInfoTable *info, StgPtr p)
{
    switch (info->type) {
    case THUNK_0_1:
	return p + sizeofW(StgThunk) + 1;

    case FUN_0_1:
    case CONSTR_0_1:
	return p + sizeofW(StgHeader) + 1;
	
    case FUN_1_0:
    case CONSTR_1_0:
	thread(&((StgClosure *)p)->payload[0]);
	return p + sizeofW(StgHeader) + 1;
	
    case THUNK_1_0:
	thread(&((StgThunk *)p)->payload[0]);
	return p + sizeofW(StgThunk) + 1;
	
    case THUNK_0_2:
	return p + sizeofW(StgThunk) + 2;

    case FUN_0_2:
    case CONSTR_0_2:
	return p + sizeofW(StgHeader) + 2;
	
    case THUNK_1_1:
	thread(&((StgThunk *)p)->payload[0]);
	return p + sizeofW(StgThunk) + 2;

    case FUN_1_1:
    case CONSTR_1_1:
	thread(&((StgClosure *)p)->payload[0]);
	return p + sizeofW(StgHeader) + 2;
	
    case THUNK_2_0:
	thread(&((StgThunk *)p)->payload[0]);
	thread(&((StgThunk *)p)->payload[1]);
	return p + sizeofW(StgThunk) + 2;

    case FUN_2_0:
    case CONSTR_2_0:
	thread(&((StgClosure *)p)->payload[0]);
	thread(&((StgClosure *)p)->payload[1]);
	return p + sizeofW(StgHeader) + 2;
	
#ifdef ALLOW_INTERPRETER
    case BCO: {
	StgBCO *bco = (StgBCO *)p;
	thread_(&bco->instrs);
	thread_(&bco->literals);
	thread_(&bco->ptrs);
	return p + bco_sizeW(bco);
    }
#endif // ALLOW_INTERPRETER

    case THUNK:
    {
	StgPtr end;
	
	end = (P_)((StgThunk *)p)->payload + 
	    info->layout.payload.ptrs;
	for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
	    thread((StgClosure **)p);
	}
	return p + info->layout.payload.nptrs;
    }

    case FUN:
    case CONSTR:
    case STABLE_NAME:
    case IND_PERM:
    case MUT_VAR_CLEAN:
    case MUT_VAR_DIRTY:
    case CAF_BLACKHOLE:
    case SE_CAF_BLACKHOLE:
    case SE_BLACKHOLE:
    case BLACKHOLE:
    {
	StgPtr end;
	
	end = (P_)((StgClosure *)p)->payload + 
	    info->layout.payload.ptrs;
	for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
	    thread((StgClosure **)p);
	}
	return p + info->layout.payload.nptrs;
    }
    
    case WEAK:
    {
	StgWeak *w = (StgWeak *)p;
	thread(&w->key);
	thread(&w->value);
	thread(&w->finalizer);
	if (w->link != NULL) {
	    thread_(&w->link);
	}
	return p + sizeofW(StgWeak);
    }
    
    case IND_OLDGEN:
    case IND_OLDGEN_PERM:
	thread(&((StgInd *)p)->indirectee);
	return p + sizeofW(StgInd);

    case THUNK_SELECTOR:
    { 
	StgSelector *s = (StgSelector *)p;
	thread(&s->selectee);
	return p + THUNK_SELECTOR_sizeW();
    }
    
    case AP_STACK:
	return thread_AP_STACK((StgAP_STACK *)p);
	
    case PAP:
	return thread_PAP((StgPAP *)p);

    case AP:
	return thread_AP((StgAP *)p);
	
    case ARR_WORDS:
	return p + arr_words_sizeW((StgArrWords *)p);
	
    case MUT_ARR_PTRS_CLEAN:
    case MUT_ARR_PTRS_DIRTY:
    case MUT_ARR_PTRS_FROZEN:
    case MUT_ARR_PTRS_FROZEN0:
	// follow everything 
    {
	StgPtr next;
	
	next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
	for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
	    thread((StgClosure **)p);
	}
	return p;
    }
    
    case TSO:
	return thread_TSO((StgTSO *)p);
    
    default:
	barf("update_fwd: unknown/strange object  %d", (int)(info->type));
	return NULL;
    }
}
Beispiel #4
0
static GNUC_ATTR_HOT void
scavenge_block (bdescr *bd)
{
  StgPtr p, q;
  StgInfoTable *info;
  rtsBool saved_eager_promotion;
  gen_workspace *ws;

  debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
	     bd->start, bd->gen_no, bd->u.scan);

  gct->scan_bd = bd;
  gct->evac_gen_no = bd->gen_no;
  saved_eager_promotion = gct->eager_promotion;
  gct->failed_to_evac = rtsFalse;

  ws = &gct->gens[bd->gen->no];

  p = bd->u.scan;
  
  // we might be evacuating into the very object that we're
  // scavenging, so we have to check the real bd->free pointer each
  // time around the loop.
  while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {

      ASSERT(bd->link == NULL);
    ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
    info = get_itbl((StgClosure *)p);
    
    ASSERT(gct->thunk_selector_depth == 0);

    q = p;
    switch (info->type) {

    case MVAR_CLEAN:
    case MVAR_DIRTY:
    { 
	StgMVar *mvar = ((StgMVar *)p);
	gct->eager_promotion = rtsFalse;
	evacuate((StgClosure **)&mvar->head);
	evacuate((StgClosure **)&mvar->tail);
	evacuate((StgClosure **)&mvar->value);
	gct->eager_promotion = saved_eager_promotion;

	if (gct->failed_to_evac) {
	    mvar->header.info = &stg_MVAR_DIRTY_info;
	} else {
	    mvar->header.info = &stg_MVAR_CLEAN_info;
	}
	p += sizeofW(StgMVar);
	break;
    }

    case FUN_2_0:
	scavenge_fun_srt(info);
	evacuate(&((StgClosure *)p)->payload[1]);
	evacuate(&((StgClosure *)p)->payload[0]);
	p += sizeofW(StgHeader) + 2;
	break;

    case THUNK_2_0:
	scavenge_thunk_srt(info);
	evacuate(&((StgThunk *)p)->payload[1]);
	evacuate(&((StgThunk *)p)->payload[0]);
	p += sizeofW(StgThunk) + 2;
	break;

    case CONSTR_2_0:
	evacuate(&((StgClosure *)p)->payload[1]);
	evacuate(&((StgClosure *)p)->payload[0]);
	p += sizeofW(StgHeader) + 2;
	break;
	
    case THUNK_1_0:
	scavenge_thunk_srt(info);
	evacuate(&((StgThunk *)p)->payload[0]);
	p += sizeofW(StgThunk) + 1;
	break;
	
    case FUN_1_0:
	scavenge_fun_srt(info);
    case CONSTR_1_0:
	evacuate(&((StgClosure *)p)->payload[0]);
	p += sizeofW(StgHeader) + 1;
	break;
	
    case THUNK_0_1:
	scavenge_thunk_srt(info);
	p += sizeofW(StgThunk) + 1;
	break;
	
    case FUN_0_1:
	scavenge_fun_srt(info);
    case CONSTR_0_1:
	p += sizeofW(StgHeader) + 1;
	break;
	
    case THUNK_0_2:
	scavenge_thunk_srt(info);
	p += sizeofW(StgThunk) + 2;
	break;
	
    case FUN_0_2:
	scavenge_fun_srt(info);
    case CONSTR_0_2:
	p += sizeofW(StgHeader) + 2;
	break;
	
    case THUNK_1_1:
	scavenge_thunk_srt(info);
	evacuate(&((StgThunk *)p)->payload[0]);
	p += sizeofW(StgThunk) + 2;
	break;

    case FUN_1_1:
	scavenge_fun_srt(info);
    case CONSTR_1_1:
	evacuate(&((StgClosure *)p)->payload[0]);
	p += sizeofW(StgHeader) + 2;
	break;
	
    case FUN:
	scavenge_fun_srt(info);
	goto gen_obj;

    case THUNK:
    {
	StgPtr end;

	scavenge_thunk_srt(info);
	end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
	for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
	    evacuate((StgClosure **)p);
	}
	p += info->layout.payload.nptrs;
	break;
    }
	
    gen_obj:
    case CONSTR:
    case WEAK:
    case PRIM:
    {
	StgPtr end;

	end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
	for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
	    evacuate((StgClosure **)p);
	}
	p += info->layout.payload.nptrs;
	break;
    }

    case BCO: {
	StgBCO *bco = (StgBCO *)p;
	evacuate((StgClosure **)&bco->instrs);
	evacuate((StgClosure **)&bco->literals);
	evacuate((StgClosure **)&bco->ptrs);
	p += bco_sizeW(bco);
	break;
    }

    case IND_PERM:
    case BLACKHOLE:
	evacuate(&((StgInd *)p)->indirectee);
	p += sizeofW(StgInd);
	break;

    case MUT_VAR_CLEAN:
    case MUT_VAR_DIRTY:
	gct->eager_promotion = rtsFalse;
	evacuate(&((StgMutVar *)p)->var);
	gct->eager_promotion = saved_eager_promotion;

	if (gct->failed_to_evac) {
	    ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
	} else {
	    ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
	}
	p += sizeofW(StgMutVar);
	break;

    case BLOCKING_QUEUE:
    {
        StgBlockingQueue *bq = (StgBlockingQueue *)p;
        
	gct->eager_promotion = rtsFalse;
        evacuate(&bq->bh);
        evacuate((StgClosure**)&bq->owner);
        evacuate((StgClosure**)&bq->queue);
        evacuate((StgClosure**)&bq->link);
	gct->eager_promotion = saved_eager_promotion;

	if (gct->failed_to_evac) {
	    bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
	} else {
	    bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
	}
        p += sizeofW(StgBlockingQueue);
        break;
    }

    case THUNK_SELECTOR:
    { 
	StgSelector *s = (StgSelector *)p;
	evacuate(&s->selectee);
	p += THUNK_SELECTOR_sizeW();
	break;
    }

    // A chunk of stack saved in a heap object
    case AP_STACK:
    {
	StgAP_STACK *ap = (StgAP_STACK *)p;

	evacuate(&ap->fun);
	scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
	p = (StgPtr)ap->payload + ap->size;
	break;
    }

    case PAP:
	p = scavenge_PAP((StgPAP *)p);
	break;

    case AP:
	p = scavenge_AP((StgAP *)p);
	break;

    case ARR_WORDS:
	// nothing to follow 
	p += arr_words_sizeW((StgArrWords *)p);
	break;

    case MUT_ARR_PTRS_CLEAN:
    case MUT_ARR_PTRS_DIRTY:
    {
        // We don't eagerly promote objects pointed to by a mutable
        // array, but if we find the array only points to objects in
        // the same or an older generation, we mark it "clean" and
        // avoid traversing it during minor GCs.
        gct->eager_promotion = rtsFalse;

        p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);

	if (gct->failed_to_evac) {
	    ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
	} else {
	    ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
	}

	gct->eager_promotion = saved_eager_promotion;
	gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
	break;
    }

    case MUT_ARR_PTRS_FROZEN:
    case MUT_ARR_PTRS_FROZEN0:
	// follow everything 
    {
        p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);

	// If we're going to put this object on the mutable list, then
	// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
	if (gct->failed_to_evac) {
            ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
	} else {
	    ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
	}
	break;
    }

    case TSO:
    { 
        scavengeTSO((StgTSO *)p);
        p += sizeofW(StgTSO);
	break;
    }

    case STACK:
    {
        StgStack *stack = (StgStack*)p;

        gct->eager_promotion = rtsFalse;

        scavenge_stack(stack->sp, stack->stack + stack->stack_size);
        stack->dirty = gct->failed_to_evac;
        p += stack_sizeW(stack);

        gct->eager_promotion = saved_eager_promotion;
        break;
    }

    case MUT_PRIM:
      {
	StgPtr end;

	gct->eager_promotion = rtsFalse;

	end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
	for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
	    evacuate((StgClosure **)p);
	}
	p += info->layout.payload.nptrs;

	gct->eager_promotion = saved_eager_promotion;
	gct->failed_to_evac = rtsTrue; // mutable
	break;
      }

    case TREC_CHUNK:
      {
	StgWord i;
	StgTRecChunk *tc = ((StgTRecChunk *) p);
	TRecEntry *e = &(tc -> entries[0]);
	gct->eager_promotion = rtsFalse;
	evacuate((StgClosure **)&tc->prev_chunk);
	for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
	  evacuate((StgClosure **)&e->tvar);
	  evacuate((StgClosure **)&e->expected_value);
	  evacuate((StgClosure **)&e->new_value);
	}
	gct->eager_promotion = saved_eager_promotion;
	gct->failed_to_evac = rtsTrue; // mutable
	p += sizeofW(StgTRecChunk);
	break;
      }

    default:
	barf("scavenge: unimplemented/strange closure type %d @ %p", 
	     info->type, p);
    }

    /*
     * We need to record the current object on the mutable list if
     *  (a) It is actually mutable, or 
     *  (b) It contains pointers to a younger generation.
     * Case (b) arises if we didn't manage to promote everything that
     * the current object points to into the current generation.
     */
    if (gct->failed_to_evac) {
	gct->failed_to_evac = rtsFalse;
	if (bd->gen_no > 0) {
	    recordMutableGen_GC((StgClosure *)q, bd->gen_no);
	}
    }
  }

  if (p > bd->free)  {
      gct->copied += ws->todo_free - bd->free;
      bd->free = p;
  }

  debugTrace(DEBUG_gc, "   scavenged %ld bytes",
             (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));

  // update stats: this is a block that has been scavenged
  gct->scanned += bd->free - bd->u.scan;
  bd->u.scan = bd->free;

  if (bd != ws->todo_bd) {
      // we're not going to evac any more objects into
      // this block, so push it now.
      push_scanned_block(bd, ws);
  }

  gct->scan_bd = NULL;
}