コード例 #1
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
STATIC_INLINE StgPtr
scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
{
    StgPtr p;
    StgWord bitmap;
    nat size;

    p = (StgPtr)args;
    switch (fun_info->f.fun_type) {
    case ARG_GEN:
	bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
	size = BITMAP_SIZE(fun_info->f.b.bitmap);
	goto small_bitmap;
    case ARG_GEN_BIG:
	size = GET_FUN_LARGE_BITMAP(fun_info)->size;
	scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
	p += size;
	break;
    default:
	bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
	size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
    small_bitmap:
	while (size > 0) {
	    if ((bitmap & 1) == 0) {
		evacuate((StgClosure **)p);
	    }
	    p++;
	    bitmap = bitmap >> 1;
	    size--;
	}
	break;
    }
    return p;
}
コード例 #2
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
// scavenge only the marked areas of a MUT_ARR_PTRS
static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
{
    lnat m;
    StgPtr p, q;
    rtsBool any_failed;

    any_failed = rtsFalse;
    for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
    {
        if (*mutArrPtrsCard(a,m) != 0) {
            p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
            q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
                        (StgPtr)&a->payload[a->ptrs]);
            for (; p < q; p++) {
                evacuate((StgClosure**)p);
            }
            if (gct->failed_to_evac) {
                any_failed = rtsTrue;
                gct->failed_to_evac = rtsFalse;
            } else {
                *mutArrPtrsCard(a,m) = 0;
            }
        }
    }

    gct->failed_to_evac = any_failed;
    return (StgPtr)a + mut_arr_ptrs_sizeW(a);
}
コード例 #3
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
/* Similar to scavenge_large_bitmap(), but we don't write back the
 * pointers we get back from evacuate().
 */
static void
scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
{
    nat i, b, size;
    StgWord bitmap;
    StgClosure **p;
    
    b = 0;
    bitmap = large_srt->l.bitmap[b];
    size   = (nat)large_srt->l.size;
    p      = (StgClosure **)large_srt->srt;
    for (i = 0; i < size; ) {
	if ((bitmap & 1) != 0) {
	    evacuate(p);
	}
	i++;
	p++;
	if (i % BITS_IN(W_) == 0) {
	    b++;
	    bitmap = large_srt->l.bitmap[b];
	} else {
	    bitmap = bitmap >> 1;
	}
    }
}
コード例 #4
0
ファイル: copy_gc.c プロジェクト: dec9ue/copy_gc
void scavenge_small(void *obj, struct s_gc* s_gc){
	void** obj_ptr = (void**)obj;
	unsigned int nptrs = GET_NPTR(obj);
	debug("%s : obj   %08x nptr: %d (s:%d n:%d)\n",__FUNCTION__,(unsigned int)(obj),nptrs,GET_SIZE(obj),GET_NPTR(obj));
	for(;nptrs > 0;nptrs--){
		evacuate(obj_ptr,s_gc);
		obj_ptr++;
	}
}
コード例 #5
0
ファイル: copy_gc.c プロジェクト: dec9ue/copy_gc
void scavenge_big(void *obj, struct s_gc* s_gc){
	struct big_bdescr* big_block = (struct big_bdescr*)GET_BLOCK(obj);
	unsigned int nptrs = big_block->nptrs;
	void** obj_ptr = (void**)obj;
	debug("%s : obj   %08x\n",__FUNCTION__,(unsigned int)(obj));
	for(;nptrs > 0;nptrs--){
		evacuate(obj_ptr,s_gc);
		obj_ptr++;
	}
	
}
コード例 #6
0
ファイル: Evac.c プロジェクト: 23Skidoo/ghc
STATIC_INLINE GNUC_ATTR_HOT void
copy_tag(StgClosure **p, const StgInfoTable *info,
         StgClosure *src, nat size, nat gen_no, StgWord tag)
{
    StgPtr to, from;
    nat i;

    to = alloc_for_copy(size,gen_no);

    from = (StgPtr)src;
    to[0] = (W_)info;
    for (i = 1; i < size; i++) { // unroll for small i
        to[i] = from[i];
    }

//  if (to+size+2 < bd->start + BLOCK_SIZE_W) {
//      __builtin_prefetch(to + size + 2, 1);
//  }

#if defined(PARALLEL_GC)
    {
        const StgInfoTable *new_info;
        new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
        if (new_info != info) {
#ifdef PROFILING
            // We copied this object at the same time as another
            // thread.  We'll evacuate the object again and the copy
            // we just made will be discarded at the next GC, but we
            // may have copied it after the other thread called
            // SET_EVACUAEE_FOR_LDV(), which would confuse the LDV
            // profiler when it encounters this closure in
            // processHeapClosureForDead.  So we reset the LDVW field
            // here.
            LDVW(to) = 0;
#endif
            return evacuate(p); // does the failed_to_evac stuff
        } else {
            *p = TAG_CLOSURE(tag,(StgClosure*)to);
        }
    }
#else
    src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
    *p = TAG_CLOSURE(tag,(StgClosure*)to);
#endif

#ifdef PROFILING
    // We store the size of the just evacuated object in the LDV word so that
    // the profiler can guess the position of the next object later.
    // This is safe only if we are sure that no other thread evacuates
    // the object again, so we cannot use copy_tag_nolock when PROFILING.
    SET_EVACUAEE_FOR_LDV(from, size);
#endif
}
コード例 #7
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
{
    lnat m;
    rtsBool any_failed;
    StgPtr p, q;

    any_failed = rtsFalse;
    p = (StgPtr)&a->payload[0];
    for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
    {
        q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
        for (; p < q; p++) {
            evacuate((StgClosure**)p);
        }
        if (gct->failed_to_evac) {
            any_failed = rtsTrue;
            *mutArrPtrsCard(a,m) = 1;
            gct->failed_to_evac = rtsFalse;
        } else {
            *mutArrPtrsCard(a,m) = 0;
        }
    }

    q = (StgPtr)&a->payload[a->ptrs];
    if (p < q) {
        for (; p < q; p++) {
            evacuate((StgClosure**)p);
        }
        if (gct->failed_to_evac) {
            any_failed = rtsTrue;
            *mutArrPtrsCard(a,m) = 1;
            gct->failed_to_evac = rtsFalse;
        } else {
            *mutArrPtrsCard(a,m) = 0;
        }
    }

    gct->failed_to_evac = any_failed;
    return (StgPtr)a + mut_arr_ptrs_sizeW(a);
}
コード例 #8
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
STATIC_INLINE StgPtr
scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
{
    while (size > 0) {
	if ((bitmap & 1) == 0) {
	    evacuate((StgClosure **)p);
	}
	p++;
	bitmap = bitmap >> 1;
	size--;
    }
    return p;
}
コード例 #9
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
/* evacuate the SRT.  If srt_bitmap is zero, then there isn't an
 * srt field in the info table.  That's ok, because we'll
 * never dereference it.
 */
STATIC_INLINE GNUC_ATTR_HOT void
scavenge_srt (StgClosure **srt, nat srt_bitmap)
{
  nat bitmap;
  StgClosure **p;

  bitmap = srt_bitmap;
  p = srt;

  if (bitmap == (StgHalfWord)(-1)) {  
      scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
      return;
  }

  while (bitmap != 0) {
      if ((bitmap & 1) != 0) {
#if defined(COMPILING_WINDOWS_DLL)
	  // Special-case to handle references to closures hiding out in DLLs, since
	  // double indirections required to get at those. The code generator knows
	  // which is which when generating the SRT, so it stores the (indirect)
	  // reference to the DLL closure in the table by first adding one to it.
	  // We check for this here, and undo the addition before evacuating it.
	  // 
	  // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
	  // closure that's fixed at link-time, and no extra magic is required.
	  if ( (lnat)(*srt) & 0x1 ) {
	      evacuate( (StgClosure**) ((lnat) (*srt) & ~0x1));
	  } else {
	      evacuate(p);
	  }
#else
	  evacuate(p);
#endif
      }
      p++;
      bitmap = bitmap >> 1;
  }
}
コード例 #10
0
ファイル: Evac.c プロジェクト: DJacquard/ghc
/* Special version of copy() for when we only want to copy the info
 * pointer of an object, but reserve some padding after it.  This is
 * used to optimise evacuation of TSOs.
 */
static bool
copyPart(StgClosure **p, StgClosure *src, uint32_t size_to_reserve,
         uint32_t size_to_copy, uint32_t gen_no)
{
    StgPtr to, from;
    uint32_t i;
    StgWord info;

#if defined(PARALLEL_GC)
spin:
        info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
        if (info == (W_)&stg_WHITEHOLE_info) {
#if defined(PROF_SPIN)
            whitehole_gc_spin++;
#endif
            busy_wait_nop();
            goto spin;
        }
    if (IS_FORWARDING_PTR(info)) {
        src->header.info = (const StgInfoTable *)info;
        evacuate(p); // does the failed_to_evac stuff
        return false;
    }
#else
    info = (W_)src->header.info;
#endif

    to = alloc_for_copy(size_to_reserve, gen_no);

    from = (StgPtr)src;
    to[0] = info;
    for (i = 1; i < size_to_copy; i++) { // unroll for small i
        to[i] = from[i];
    }

    write_barrier();
    src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
    *p = (StgClosure *)to;

#if defined(PROFILING)
    // We store the size of the just evacuated object in the LDV word so that
    // the profiler can guess the position of the next object later.
    SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
    // fill the slop
    if (size_to_reserve - size_to_copy > 0)
        LDV_FILL_SLOP(to + size_to_copy, (int)(size_to_reserve - size_to_copy));
#endif

    return true;
}
コード例 #11
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
static void
scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
{
    nat i, j, b;
    StgWord bitmap;
    
    b = 0;

    for (i = 0; i < size; b++) {
        bitmap = large_bitmap->bitmap[b];
        j = stg_min(size-i, BITS_IN(W_));
        i += j;
        for (; j > 0; j--, p++) {
            if ((bitmap & 1) == 0) {
                evacuate((StgClosure **)p);
            }
	    bitmap = bitmap >> 1;
        }            
    }
}
コード例 #12
0
ファイル: Evac.c プロジェクト: Mikolaj/ghc
STATIC_INLINE GNUC_ATTR_HOT void
copy_tag(StgClosure **p, const StgInfoTable *info, 
         StgClosure *src, nat size, nat gen_no, StgWord tag)
{
    StgPtr to, from;
    nat i;

    to = alloc_for_copy(size,gen_no);
    
    from = (StgPtr)src;
    to[0] = (W_)info;
    for (i = 1; i < size; i++) { // unroll for small i
	to[i] = from[i];
    }

//  if (to+size+2 < bd->start + BLOCK_SIZE_W) {
//      __builtin_prefetch(to + size + 2, 1);
//  }

#if defined(PARALLEL_GC)
    {
        const StgInfoTable *new_info;
        new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
        if (new_info != info) {
            return evacuate(p); // does the failed_to_evac stuff
        } else {
            *p = TAG_CLOSURE(tag,(StgClosure*)to);
        }
    }
#else
    src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
    *p = TAG_CLOSURE(tag,(StgClosure*)to);
#endif

#ifdef PROFILING
    // We store the size of the just evacuated object in the LDV word so that
    // the profiler can guess the position of the next object later.
    SET_EVACUAEE_FOR_LDV(from, size);
#endif
}
コード例 #13
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
STATIC_INLINE GNUC_ATTR_HOT StgPtr
scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
{
    StgPtr p;
    StgWord bitmap;
    StgFunInfoTable *fun_info;
    
    fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
    ASSERT(fun_info->i.type != PAP);
    p = (StgPtr)payload;

    switch (fun_info->f.fun_type) {
    case ARG_GEN:
	bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
	goto small_bitmap;
    case ARG_GEN_BIG:
	scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
	p += size;
	break;
    case ARG_BCO:
	scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
	p += size;
	break;
    default:
	bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
    small_bitmap:
	while (size > 0) {
	    if ((bitmap & 1) == 0) {
		evacuate((StgClosure **)p);
	    }
	    p++;
	    bitmap = bitmap >> 1;
	    size--;
	}
	break;
    }
    return p;
}
コード例 #14
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
static void
scavengeTSO (StgTSO *tso)
{
    rtsBool saved_eager;

    debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);

    // update the pointer from the Task.
    if (tso->bound != NULL) {
        tso->bound->tso = tso;
    }

    saved_eager = gct->eager_promotion;
    gct->eager_promotion = rtsFalse;

    evacuate((StgClosure **)&tso->blocked_exceptions);
    evacuate((StgClosure **)&tso->bq);
    
    // scavange current transaction record
    evacuate((StgClosure **)&tso->trec);

    evacuate((StgClosure **)&tso->stackobj);

    evacuate((StgClosure **)&tso->_link);
    if (   tso->why_blocked == BlockedOnMVar
	|| tso->why_blocked == BlockedOnBlackHole
	|| tso->why_blocked == BlockedOnMsgThrowTo
        || tso->why_blocked == NotBlocked
	) {
	evacuate(&tso->block_info.closure);
    }
#ifdef THREADED_RTS
    // in the THREADED_RTS, block_info.closure must always point to a
    // valid closure, because we assume this in throwTo().  In the
    // non-threaded RTS it might be a FD (for
    // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
    else {
        tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
    }
#endif

    tso->dirty = gct->failed_to_evac;

    gct->eager_promotion = saved_eager;
}
コード例 #15
0
ファイル: Evac.c プロジェクト: 23Skidoo/ghc
static void
eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
                 // NB. for legacy reasons, p & q are swapped around :(
{
    nat field;
    StgInfoTable *info;
    StgWord info_ptr;
    StgClosure *selectee;
    StgSelector *prev_thunk_selector;
    bdescr *bd;
    StgClosure *val;

    prev_thunk_selector = NULL;
    // this is a chain of THUNK_SELECTORs that we are going to update
    // to point to the value of the current THUNK_SELECTOR.  Each
    // closure on the chain is a WHITEHOLE, and points to the next in the
    // chain with payload[0].

selector_chain:

    bd = Bdescr((StgPtr)p);
    if (HEAP_ALLOCED_GC(p)) {
        // If the THUNK_SELECTOR is in to-space or in a generation that we
        // are not collecting, then bale out early.  We won't be able to
        // save any space in any case, and updating with an indirection is
        // trickier in a non-collected gen: we would have to update the
        // mutable list.
        if (bd->flags & BF_EVACUATED) {
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            *q = (StgClosure *)p;
            // shortcut, behave as for:  if (evac) evacuate(q);
            if (evac && bd->gen_no < gct->evac_gen_no) {
                gct->failed_to_evac = rtsTrue;
                TICK_GC_FAILED_PROMOTION();
            }
            return;
        }
        // we don't update THUNK_SELECTORS in the compacted
        // generation, because compaction does not remove the INDs
        // that result, this causes confusion later
        // (scavenge_mark_stack doesn't deal with IND).  BEWARE!  This
        // bit is very tricky to get right.  If you make changes
        // around here, test by compiling stage 3 with +RTS -c -RTS.
        if (bd->flags & BF_MARKED) {
            // must call evacuate() to mark this closure if evac==rtsTrue
            *q = (StgClosure *)p;
            if (evac) evacuate(q);
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            return;
        }
    }


    // WHITEHOLE the selector thunk, since it is now under evaluation.
    // This is important to stop us going into an infinite loop if
    // this selector thunk eventually refers to itself.
#if defined(THREADED_RTS)
    // In threaded mode, we'll use WHITEHOLE to lock the selector
    // thunk while we evaluate it.
    {
        do {
            info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
        } while (info_ptr == (W_)&stg_WHITEHOLE_info);

        // make sure someone else didn't get here first...
        if (IS_FORWARDING_PTR(info_ptr) ||
            INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->type != THUNK_SELECTOR) {
            // v. tricky now.  The THUNK_SELECTOR has been evacuated
            // by another thread, and is now either a forwarding ptr or IND.
            // We need to extract ourselves from the current situation
            // as cleanly as possible.
            //   - unlock the closure
            //   - update *q, we may have done *some* evaluation
            //   - if evac, we need to call evacuate(), because we
            //     need the write-barrier stuff.
            //   - undo the chain we've built to point to p.
            SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
            *q = (StgClosure *)p;
            if (evac) evacuate(q);
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            return;
        }
    }
#else
    // Save the real info pointer (NOTE: not the same as get_itbl()).
    info_ptr = (StgWord)p->header.info;
    SET_INFO((StgClosure *)p,&stg_WHITEHOLE_info);
#endif

    field = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->layout.selector_offset;

    // The selectee might be a constructor closure,
    // so we untag the pointer.
    selectee = UNTAG_CLOSURE(p->selectee);

selector_loop:
    // selectee now points to the closure that we're trying to select
    // a field from.  It may or may not be in to-space: we try not to
    // end up in to-space, but it's impractical to avoid it in
    // general.  The compacting GC scatters to-space pointers in
    // from-space during marking, for example.  We rely on the property
    // that evacuate() doesn't mind if it gets passed a to-space pointer.

    info = (StgInfoTable*)selectee->header.info;

    if (IS_FORWARDING_PTR(info)) {
        // We don't follow pointers into to-space; the constructor
        // has already been evacuated, so we won't save any space
        // leaks by evaluating this selector thunk anyhow.
        goto bale_out;
    }

    info = INFO_PTR_TO_STRUCT(info);
    switch (info->type) {
      case WHITEHOLE:
          goto bale_out; // about to be evacuated by another thread (or a loop).

      case CONSTR:
      case CONSTR_1_0:
      case CONSTR_0_1:
      case CONSTR_2_0:
      case CONSTR_1_1:
      case CONSTR_0_2:
      case CONSTR_STATIC:
      case CONSTR_NOCAF_STATIC:
          {
              // check that the size is in range
              ASSERT(field <  (StgWord32)(info->layout.payload.ptrs +
                                          info->layout.payload.nptrs));

              // Select the right field from the constructor
              val = selectee->payload[field];

#ifdef PROFILING
              // For the purposes of LDV profiling, we have destroyed
              // the original selector thunk, p.
              if (era > 0) {
                  // Only modify the info pointer when LDV profiling is
                  // enabled.  Note that this is incompatible with parallel GC,
                  // because it would allow other threads to start evaluating
                  // the same selector thunk.
                  SET_INFO((StgClosure*)p, (StgInfoTable *)info_ptr);
                  OVERWRITING_CLOSURE((StgClosure*)p);
                  SET_INFO((StgClosure*)p, &stg_WHITEHOLE_info);
              }
#endif

              // the closure in val is now the "value" of the
              // THUNK_SELECTOR in p.  However, val may itself be a
              // THUNK_SELECTOR, in which case we want to continue
              // evaluating until we find the real value, and then
              // update the whole chain to point to the value.
          val_loop:
              info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
              if (!IS_FORWARDING_PTR(info_ptr))
              {
                  info = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr);
                  switch (info->type) {
                  case IND:
                  case IND_STATIC:
                      val = ((StgInd *)val)->indirectee;
                      goto val_loop;
                  case THUNK_SELECTOR:
                      ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
                      prev_thunk_selector = p;
                      p = (StgSelector*)val;
                      goto selector_chain;
                  default:
                      break;
                  }
              }
              ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
              prev_thunk_selector = p;

              *q = val;

              // update the other selectors in the chain *before*
              // evacuating the value.  This is necessary in the case
              // where the value turns out to be one of the selectors
              // in the chain (i.e. we have a loop), and evacuating it
              // would corrupt the chain.
              unchain_thunk_selectors(prev_thunk_selector, val);

              // evacuate() cannot recurse through
              // eval_thunk_selector(), because we know val is not
              // a THUNK_SELECTOR.
              if (evac) evacuate(q);
              return;
          }

      case IND:
      case IND_STATIC:
          // Again, we might need to untag a constructor.
          selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
          goto selector_loop;

      case BLACKHOLE:
      {
          StgClosure *r;
          const StgInfoTable *i;
          r = ((StgInd*)selectee)->indirectee;

          // establish whether this BH has been updated, and is now an
          // indirection, as in evacuate().
          if (GET_CLOSURE_TAG(r) == 0) {
              i = r->header.info;
              if (IS_FORWARDING_PTR(i)) {
                  r = (StgClosure *)UN_FORWARDING_PTR(i);
                  i = r->header.info;
              }
              if (i == &stg_TSO_info
                  || i == &stg_WHITEHOLE_info
                  || i == &stg_BLOCKING_QUEUE_CLEAN_info
                  || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
                  goto bale_out;
              }
              ASSERT(i != &stg_IND_info);
          }

          selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
          goto selector_loop;
      }

      case THUNK_SELECTOR:
      {
          StgClosure *val;

          // recursively evaluate this selector.  We don't want to
          // recurse indefinitely, so we impose a depth bound.
          if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
              goto bale_out;
          }

          gct->thunk_selector_depth++;
          // rtsFalse says "don't evacuate the result".  It will,
          // however, update any THUNK_SELECTORs that are evaluated
          // along the way.
          eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
          gct->thunk_selector_depth--;

          // did we actually manage to evaluate it?
          if (val == selectee) goto bale_out;

          // Of course this pointer might be tagged...
          selectee = UNTAG_CLOSURE(val);
          goto selector_loop;
      }

      case AP:
      case AP_STACK:
      case THUNK:
      case THUNK_1_0:
      case THUNK_0_1:
      case THUNK_2_0:
      case THUNK_1_1:
      case THUNK_0_2:
      case THUNK_STATIC:
          // not evaluated yet
          goto bale_out;

      default:
        barf("eval_thunk_selector: strange selectee %d",
             (int)(info->type));
    }

bale_out:
    // We didn't manage to evaluate this thunk; restore the old info
    // pointer.  But don't forget: we still need to evacuate the thunk itself.
    SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
    // THREADED_RTS: we just unlocked the thunk, so another thread
    // might get in and update it.  copy() will lock it again and
    // check whether it was updated in the meantime.
    *q = (StgClosure *)p;
    if (evac) {
        copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest_no);
    }
    unchain_thunk_selectors(prev_thunk_selector, *q);
    return;
}
コード例 #16
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
static void
scavenge_stack(StgPtr p, StgPtr stack_end)
{
  const StgRetInfoTable* info;
  StgWord bitmap;
  nat size;

  /* 
   * Each time around this loop, we are looking at a chunk of stack
   * that starts with an activation record. 
   */

  while (p < stack_end) {
    info  = get_ret_itbl((StgClosure *)p);
      
    switch (info->i.type) {
	
    case UPDATE_FRAME:
	// In SMP, we can get update frames that point to indirections
	// when two threads evaluate the same thunk.  We do attempt to
	// discover this situation in threadPaused(), but it's
	// possible that the following sequence occurs:
	//
	//        A             B
	//                  enter T
	//     enter T
	//     blackhole T
	//                  update T
	//     GC
	//
	// Now T is an indirection, and the update frame is already
	// marked on A's stack, so we won't traverse it again in
	// threadPaused().  We could traverse the whole stack again
	// before GC, but that seems like overkill.
	//
	// Scavenging this update frame as normal would be disastrous;
	// the updatee would end up pointing to the value.  So we
	// check whether the value after evacuation is a BLACKHOLE,
	// and if not, we change the update frame to an stg_enter
	// frame that simply returns the value.  Hence, blackholing is
        // compulsory (otherwise we would have to check for thunks
        // too).
        //
        // Note [upd-black-hole]
        // One slight hiccup is that the THUNK_SELECTOR machinery can
        // overwrite the updatee with an IND.  In parallel GC, this
        // could even be happening concurrently, so we can't check for
        // the IND.  Fortunately if we assume that blackholing is
        // happening (either lazy or eager), then we can be sure that
        // the updatee is never a THUNK_SELECTOR and we're ok.
        // NB. this is a new invariant: blackholing is not optional.
    {
        StgUpdateFrame *frame = (StgUpdateFrame *)p;
        StgClosure *v;

        evacuate(&frame->updatee);
        v = frame->updatee;
        if (GET_CLOSURE_TAG(v) != 0 ||
            (get_itbl(v)->type != BLACKHOLE)) {
            // blackholing is compulsory, see above.
            frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
        }
        ASSERT(v->header.info != &stg_TSO_info);
        p += sizeofW(StgUpdateFrame);
        continue;
    }

      // small bitmap (< 32 entries, or 64 on a 64-bit machine) 
    case CATCH_STM_FRAME:
    case CATCH_RETRY_FRAME:
    case ATOMICALLY_FRAME:
    case UNDERFLOW_FRAME:
    case STOP_FRAME:
    case CATCH_FRAME:
    case RET_SMALL:
	bitmap = BITMAP_BITS(info->i.layout.bitmap);
	size   = BITMAP_SIZE(info->i.layout.bitmap);
	// NOTE: the payload starts immediately after the info-ptr, we
	// don't have an StgHeader in the same sense as a heap closure.
	p++;
	p = scavenge_small_bitmap(p, size, bitmap);

    follow_srt:
	if (major_gc) 
	    scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
	continue;

    case RET_BCO: {
	StgBCO *bco;
	nat size;

	p++;
	evacuate((StgClosure **)p);
	bco = (StgBCO *)*p;
	p++;
	size = BCO_BITMAP_SIZE(bco);
	scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
	p += size;
	continue;
    }

      // large bitmap (> 32 entries, or > 64 on a 64-bit machine) 
    case RET_BIG:
    {
	nat size;

	size = GET_LARGE_BITMAP(&info->i)->size;
	p++;
	scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
	p += size;
	// and don't forget to follow the SRT 
	goto follow_srt;
    }

      // Dynamic bitmap: the mask is stored on the stack, and
      // there are a number of non-pointers followed by a number
      // of pointers above the bitmapped area.  (see StgMacros.h,
      // HEAP_CHK_GEN).
    case RET_DYN:
    {
	StgWord dyn;
	dyn = ((StgRetDyn *)p)->liveness;

	// traverse the bitmap first
	bitmap = RET_DYN_LIVENESS(dyn);
	p      = (P_)&((StgRetDyn *)p)->payload[0];
	size   = RET_DYN_BITMAP_SIZE;
	p = scavenge_small_bitmap(p, size, bitmap);

	// skip over the non-ptr words
	p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
	
	// follow the ptr words
	for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
	    evacuate((StgClosure **)p);
	    p++;
	}
	continue;
    }

    case RET_FUN:
    {
	StgRetFun *ret_fun = (StgRetFun *)p;
	StgFunInfoTable *fun_info;

	evacuate(&ret_fun->fun);
 	fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
	p = scavenge_arg_block(fun_info, ret_fun->payload);
	goto follow_srt;
    }

    default:
	barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
    }
  }		     
}
コード例 #17
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
STATIC_INLINE GNUC_ATTR_HOT StgPtr
scavenge_PAP (StgPAP *pap)
{
    evacuate(&pap->fun);
    return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
}
コード例 #18
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
static void
scavenge_static(void)
{
  StgClosure* p;
  const StgInfoTable *info;

  debugTrace(DEBUG_gc, "scavenging static objects");

  /* Always evacuate straight to the oldest generation for static
   * objects */
  gct->evac_gen_no = oldest_gen->no;

  /* keep going until we've scavenged all the objects on the linked
     list... */

  while (1) {
      
    /* get the next static object from the list.  Remember, there might
     * be more stuff on this list after each evacuation...
     * (static_objects is a global)
     */
    p = gct->static_objects;
    if (p == END_OF_STATIC_LIST) {
    	  break;
    }
    
    ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
    info = get_itbl(p);
    /*
    	if (info->type==RBH)
    	info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
    */
    // make sure the info pointer is into text space 
    
    /* Take this object *off* the static_objects list,
     * and put it on the scavenged_static_objects list.
     */
    gct->static_objects = *STATIC_LINK(info,p);
    *STATIC_LINK(info,p) = gct->scavenged_static_objects;
    gct->scavenged_static_objects = p;
    
    switch (info -> type) {
      
    case IND_STATIC:
      {
	StgInd *ind = (StgInd *)p;
	evacuate(&ind->indirectee);

	/* might fail to evacuate it, in which case we have to pop it
	 * back on the mutable list of the oldest generation.  We
	 * leave it *on* the scavenged_static_objects list, though,
	 * in case we visit this object again.
	 */
	if (gct->failed_to_evac) {
	  gct->failed_to_evac = rtsFalse;
	  recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
	}
	break;
      }
      
    case THUNK_STATIC:
      scavenge_thunk_srt(info);
      break;

    case FUN_STATIC:
      scavenge_fun_srt(info);
      break;
      
    case CONSTR_STATIC:
      {	
	StgPtr q, next;
	
	next = (P_)p->payload + info->layout.payload.ptrs;
	// evacuate the pointers 
	for (q = (P_)p->payload; q < next; q++) {
	    evacuate((StgClosure **)q);
	}
	break;
      }
      
    default:
      barf("scavenge_static: strange closure %d", (int)(info->type));
    }

    ASSERT(gct->failed_to_evac == rtsFalse);
  }
}
コード例 #19
0
ファイル: copy_gc.c プロジェクト: dec9ue/copy_gc
void perform_gc(struct s_arena* arena){
	struct s_gc s_gc;
	init_gc(&s_gc,arena);

	/* phase 1 mark or evac root */
	evacuate(arena->root,&s_gc);
	
	/* phase 2 process scavenge queue */
	do{
		void* scav_obj;
		scav_obj  = find_small_object(&s_gc);
		if( scav_obj != NULL){
			scavenge_small(scav_obj,&s_gc);
			continue;
		}
		scav_obj  = find_big_object(&s_gc);
		if( scav_obj != NULL){
			scavenge_big(scav_obj,&s_gc);
			continue;
		}
		/* no scavenging object */
		break;
	}while( 1 );

	debug("========================================================\n",NULL);
	debug("%s : small %016x(%d) big %016x(%d)\n",__FUNCTION__,s_gc.small_evaced,s_gc.small_evaced,s_gc.big_evaced,s_gc.big_evaced);
	debug("========================================================\n",NULL);
	
	/* phase 3 adjust objects */
	/* big dead objects : free */
	struct bdescr* block;
	block =  TAILQ_FIRST(&(arena->big_blocks));
	while(block){
		debug("-",NULL);
		struct bdescr* new_block = TAILQ_NEXT(block,link);
		debug("%s : %08x dead big\n",__FUNCTION__,(unsigned int)block);
		/* finalize */
		/* TODO if any finalizer, call here (before small blocks released)*/
		free_big_block(arena,(struct big_bdescr*)block);
		block = new_block;
	}

	/* big live objects : clear used bit & move to arena->blocks */
	TAILQ_INIT(&(arena->big_blocks));
	TAILQ_CONCAT(&(arena->big_blocks),&(s_gc.big_live_queue),link);

	block =  TAILQ_FIRST(&(arena->big_blocks));
	while(block){
		debug("+",NULL);
		struct bdescr* new_block = TAILQ_NEXT(block,link);
		debug("%s : %08x live big\n",__FUNCTION__,(unsigned int)block);
		((struct big_bdescr*)block)-> used = 0;
		block = new_block;
	}

	/* free old spaces */
	block = TAILQ_FIRST(&(arena->blocks));
	while(block){
		debug("-",NULL);
		struct bdescr* next_block = TAILQ_NEXT(block,link);
		free_single_block(arena,(struct single_bdescr*)block);
		debug("%s : %08x dead small\n",__FUNCTION__,(unsigned int)block);
		block = next_block;
	}

	/* move live small area */
	TAILQ_INIT(&(arena->blocks));
	TAILQ_CONCAT(&(arena->blocks),&(s_gc.to_space_queue),link);

#ifdef CGC_DEBUG
	block = TAILQ_FIRST(&(arena->blocks));
	while(block){
		debug("+",NULL);
		struct bdescr* next_block = TAILQ_NEXT(block,link);
		debug("%s : %08x live small\n",__FUNCTION__,(unsigned int)block);
		block = next_block;
	}
#endif
	debug("========================================================\n",NULL);
	debug("%s : end\n",__FUNCTION__);
	debug("========================================================\n",NULL);
}
コード例 #20
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
STATIC_INLINE StgPtr
scavenge_AP (StgAP *ap)
{
    evacuate(&ap->fun);
    return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
}
コード例 #21
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
static rtsBool
scavenge_one(StgPtr p)
{
    const StgInfoTable *info;
    rtsBool no_luck;
    rtsBool saved_eager_promotion;
    
    saved_eager_promotion = gct->eager_promotion;

    ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
    info = get_itbl((StgClosure *)p);
    
    switch (info->type) {
	
    case MVAR_CLEAN:
    case MVAR_DIRTY:
    { 
	StgMVar *mvar = ((StgMVar *)p);
	gct->eager_promotion = rtsFalse;
	evacuate((StgClosure **)&mvar->head);
	evacuate((StgClosure **)&mvar->tail);
	evacuate((StgClosure **)&mvar->value);
	gct->eager_promotion = saved_eager_promotion;

	if (gct->failed_to_evac) {
	    mvar->header.info = &stg_MVAR_DIRTY_info;
	} else {
	    mvar->header.info = &stg_MVAR_CLEAN_info;
	}
	break;
    }

    case THUNK:
    case THUNK_1_0:
    case THUNK_0_1:
    case THUNK_1_1:
    case THUNK_0_2:
    case THUNK_2_0:
    {
	StgPtr q, end;
	
	end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
	for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
	    evacuate((StgClosure **)q);
	}
	break;
    }

    case FUN:
    case FUN_1_0:			// hardly worth specialising these guys
    case FUN_0_1:
    case FUN_1_1:
    case FUN_0_2:
    case FUN_2_0:
    case CONSTR:
    case CONSTR_1_0:
    case CONSTR_0_1:
    case CONSTR_1_1:
    case CONSTR_0_2:
    case CONSTR_2_0:
    case WEAK:
    case PRIM:
    case IND_PERM:
    {
	StgPtr q, end;
	
	end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
	for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
	    evacuate((StgClosure **)q);
	}
	break;
    }
    
    case MUT_VAR_CLEAN:
    case MUT_VAR_DIRTY: {
	StgPtr q = p;

	gct->eager_promotion = rtsFalse;
	evacuate(&((StgMutVar *)p)->var);
	gct->eager_promotion = saved_eager_promotion;

	if (gct->failed_to_evac) {
	    ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
	} else {
	    ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
	}
	break;
    }

    case BLOCKING_QUEUE:
    {
        StgBlockingQueue *bq = (StgBlockingQueue *)p;
        
        gct->eager_promotion = rtsFalse;
        evacuate(&bq->bh);
        evacuate((StgClosure**)&bq->owner);
        evacuate((StgClosure**)&bq->queue);
        evacuate((StgClosure**)&bq->link);
        gct->eager_promotion = saved_eager_promotion;
        
        if (gct->failed_to_evac) {
            bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
        } else {
            bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
        }
        break;
    }

    case THUNK_SELECTOR:
    { 
	StgSelector *s = (StgSelector *)p;
	evacuate(&s->selectee);
	break;
    }
    
    case AP_STACK:
    {
	StgAP_STACK *ap = (StgAP_STACK *)p;

	evacuate(&ap->fun);
	scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
	p = (StgPtr)ap->payload + ap->size;
	break;
    }

    case PAP:
	p = scavenge_PAP((StgPAP *)p);
	break;

    case AP:
	p = scavenge_AP((StgAP *)p);
	break;

    case ARR_WORDS:
	// nothing to follow 
	break;

    case MUT_ARR_PTRS_CLEAN:
    case MUT_ARR_PTRS_DIRTY:
    {
	// We don't eagerly promote objects pointed to by a mutable
	// array, but if we find the array only points to objects in
	// the same or an older generation, we mark it "clean" and
	// avoid traversing it during minor GCs.
	gct->eager_promotion = rtsFalse;

        scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);

	if (gct->failed_to_evac) {
	    ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
	} else {
	    ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
	}

	gct->eager_promotion = saved_eager_promotion;
	gct->failed_to_evac = rtsTrue;
	break;
    }

    case MUT_ARR_PTRS_FROZEN:
    case MUT_ARR_PTRS_FROZEN0:
    {
	// follow everything 
        scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
        
	// If we're going to put this object on the mutable list, then
	// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
	if (gct->failed_to_evac) {
	    ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
	} else {
	    ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
	}
	break;
    }

    case TSO:
    {
	scavengeTSO((StgTSO*)p);
	break;
    }
  
    case STACK:
    {
        StgStack *stack = (StgStack*)p;

        gct->eager_promotion = rtsFalse;

        scavenge_stack(stack->sp, stack->stack + stack->stack_size);
        stack->dirty = gct->failed_to_evac;

        gct->eager_promotion = saved_eager_promotion;
        break;
    }

    case MUT_PRIM:
    {
	StgPtr end;
        
	gct->eager_promotion = rtsFalse;
        
	end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
	for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
	    evacuate((StgClosure **)p);
	}

	gct->eager_promotion = saved_eager_promotion;
	gct->failed_to_evac = rtsTrue; // mutable
	break;

    }

    case TREC_CHUNK:
      {
	StgWord i;
	StgTRecChunk *tc = ((StgTRecChunk *) p);
	TRecEntry *e = &(tc -> entries[0]);
	gct->eager_promotion = rtsFalse;
	evacuate((StgClosure **)&tc->prev_chunk);
	for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
	  evacuate((StgClosure **)&e->tvar);
	  evacuate((StgClosure **)&e->expected_value);
	  evacuate((StgClosure **)&e->new_value);
	}
	gct->eager_promotion = saved_eager_promotion;
	gct->failed_to_evac = rtsTrue; // mutable
	break;
      }

    case IND:
        // IND can happen, for example, when the interpreter allocates
        // a gigantic AP closure (more than one block), which ends up
        // on the large-object list and then gets updated.  See #3424.
    case BLACKHOLE:
    case IND_STATIC:
	evacuate(&((StgInd *)p)->indirectee);

#if 0 && defined(DEBUG)
      if (RtsFlags.DebugFlags.gc) 
      /* Debugging code to print out the size of the thing we just
       * promoted 
       */
      { 
	StgPtr start = gen->scan;
	bdescr *start_bd = gen->scan_bd;
	nat size = 0;
	scavenge(&gen);
	if (start_bd != gen->scan_bd) {
	  size += (P_)BLOCK_ROUND_UP(start) - start;
	  start_bd = start_bd->link;
	  while (start_bd != gen->scan_bd) {
	    size += BLOCK_SIZE_W;
	    start_bd = start_bd->link;
	  }
	  size += gen->scan -
	    (P_)BLOCK_ROUND_DOWN(gen->scan);
	} else {
	  size = gen->scan - start;
	}
	debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
      }
#endif
      break;

    default:
	barf("scavenge_one: strange object %d", (int)(info->type));
    }    

    no_luck = gct->failed_to_evac;
    gct->failed_to_evac = rtsFalse;
    return (no_luck);
}
コード例 #22
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
static void
scavenge_mark_stack(void)
{
    StgPtr p, q;
    StgInfoTable *info;
    rtsBool saved_eager_promotion;

    gct->evac_gen_no = oldest_gen->no;
    saved_eager_promotion = gct->eager_promotion;

    while ((p = pop_mark_stack())) {

	ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
	info = get_itbl((StgClosure *)p);
	
	q = p;
        switch (info->type) {
	    
        case MVAR_CLEAN:
        case MVAR_DIRTY:
        { 
            StgMVar *mvar = ((StgMVar *)p);
            gct->eager_promotion = rtsFalse;
            evacuate((StgClosure **)&mvar->head);
            evacuate((StgClosure **)&mvar->tail);
            evacuate((StgClosure **)&mvar->value);
            gct->eager_promotion = saved_eager_promotion;
            
            if (gct->failed_to_evac) {
                mvar->header.info = &stg_MVAR_DIRTY_info;
            } else {
                mvar->header.info = &stg_MVAR_CLEAN_info;
            }
            break;
        }

	case FUN_2_0:
	    scavenge_fun_srt(info);
	    evacuate(&((StgClosure *)p)->payload[1]);
	    evacuate(&((StgClosure *)p)->payload[0]);
	    break;

	case THUNK_2_0:
	    scavenge_thunk_srt(info);
	    evacuate(&((StgThunk *)p)->payload[1]);
	    evacuate(&((StgThunk *)p)->payload[0]);
	    break;

	case CONSTR_2_0:
	    evacuate(&((StgClosure *)p)->payload[1]);
	    evacuate(&((StgClosure *)p)->payload[0]);
	    break;
	
	case FUN_1_0:
	case FUN_1_1:
	    scavenge_fun_srt(info);
	    evacuate(&((StgClosure *)p)->payload[0]);
	    break;

	case THUNK_1_0:
	case THUNK_1_1:
	    scavenge_thunk_srt(info);
	    evacuate(&((StgThunk *)p)->payload[0]);
	    break;

	case CONSTR_1_0:
	case CONSTR_1_1:
	    evacuate(&((StgClosure *)p)->payload[0]);
	    break;
	
	case FUN_0_1:
	case FUN_0_2:
	    scavenge_fun_srt(info);
	    break;

	case THUNK_0_1:
	case THUNK_0_2:
	    scavenge_thunk_srt(info);
	    break;

	case CONSTR_0_1:
	case CONSTR_0_2:
	    break;
	
	case FUN:
	    scavenge_fun_srt(info);
	    goto gen_obj;

	case THUNK:
	{
	    StgPtr end;
	    
	    scavenge_thunk_srt(info);
	    end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
	    for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
		evacuate((StgClosure **)p);
	    }
	    break;
	}
	
	gen_obj:
	case CONSTR:
	case WEAK:
	case PRIM:
	{
	    StgPtr end;
	    
	    end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
	    for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
		evacuate((StgClosure **)p);
	    }
	    break;
	}

	case BCO: {
	    StgBCO *bco = (StgBCO *)p;
	    evacuate((StgClosure **)&bco->instrs);
	    evacuate((StgClosure **)&bco->literals);
	    evacuate((StgClosure **)&bco->ptrs);
	    break;
	}

	case IND_PERM:
	    // don't need to do anything here: the only possible case
	    // is that we're in a 1-space compacting collector, with
	    // no "old" generation.
	    break;

	case IND:
        case BLACKHOLE:
	    evacuate(&((StgInd *)p)->indirectee);
	    break;

	case MUT_VAR_CLEAN:
	case MUT_VAR_DIRTY: {
	    gct->eager_promotion = rtsFalse;
	    evacuate(&((StgMutVar *)p)->var);
	    gct->eager_promotion = saved_eager_promotion;
	    
	    if (gct->failed_to_evac) {
		((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
	    } else {
		((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
	    }
	    break;
	}

        case BLOCKING_QUEUE:
        {
            StgBlockingQueue *bq = (StgBlockingQueue *)p;
            
            gct->eager_promotion = rtsFalse;
            evacuate(&bq->bh);
            evacuate((StgClosure**)&bq->owner);
            evacuate((StgClosure**)&bq->queue);
            evacuate((StgClosure**)&bq->link);
            gct->eager_promotion = saved_eager_promotion;
            
            if (gct->failed_to_evac) {
                bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
            } else {
                bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
            }
            break;
        }

	case ARR_WORDS:
	    break;

	case THUNK_SELECTOR:
	{ 
	    StgSelector *s = (StgSelector *)p;
	    evacuate(&s->selectee);
	    break;
	}

	// A chunk of stack saved in a heap object
	case AP_STACK:
	{
	    StgAP_STACK *ap = (StgAP_STACK *)p;
	    
	    evacuate(&ap->fun);
	    scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
	    break;
	}

	case PAP:
	    scavenge_PAP((StgPAP *)p);
	    break;

	case AP:
	    scavenge_AP((StgAP *)p);
	    break;
      
	case MUT_ARR_PTRS_CLEAN:
	case MUT_ARR_PTRS_DIRTY:
	    // follow everything 
	{
	    // We don't eagerly promote objects pointed to by a mutable
	    // array, but if we find the array only points to objects in
	    // the same or an older generation, we mark it "clean" and
	    // avoid traversing it during minor GCs.
	    gct->eager_promotion = rtsFalse;

            scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);

            if (gct->failed_to_evac) {
                ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
            } else {
                ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
            }

	    gct->eager_promotion = saved_eager_promotion;
	    gct->failed_to_evac = rtsTrue; // mutable anyhow.
	    break;
	}

	case MUT_ARR_PTRS_FROZEN:
	case MUT_ARR_PTRS_FROZEN0:
	    // follow everything 
	{
	    StgPtr q = p;
	    
            scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);

	    // If we're going to put this object on the mutable list, then
	    // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
	    if (gct->failed_to_evac) {
		((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
	    } else {
		((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
	    }
	    break;
	}

	case TSO:
	{ 
            scavengeTSO((StgTSO*)p);
	    break;
	}

        case STACK:
        {
            StgStack *stack = (StgStack*)p;

            gct->eager_promotion = rtsFalse;

            scavenge_stack(stack->sp, stack->stack + stack->stack_size);
            stack->dirty = gct->failed_to_evac;

            gct->eager_promotion = saved_eager_promotion;
            break;
        }

        case MUT_PRIM:
        {
            StgPtr end;
            
            gct->eager_promotion = rtsFalse;
            
            end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
            for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
                evacuate((StgClosure **)p);
            }
            
            gct->eager_promotion = saved_eager_promotion;
            gct->failed_to_evac = rtsTrue; // mutable
            break;
        }

	case TREC_CHUNK:
	  {
	    StgWord i;
	    StgTRecChunk *tc = ((StgTRecChunk *) p);
	    TRecEntry *e = &(tc -> entries[0]);
	    gct->eager_promotion = rtsFalse;
	    evacuate((StgClosure **)&tc->prev_chunk);
	    for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
	      evacuate((StgClosure **)&e->tvar);
	      evacuate((StgClosure **)&e->expected_value);
	      evacuate((StgClosure **)&e->new_value);
	    }
	    gct->eager_promotion = saved_eager_promotion;
	    gct->failed_to_evac = rtsTrue; // mutable
	    break;
	  }

	default:
	    barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p", 
		 info->type, p);
	}

	if (gct->failed_to_evac) {
	    gct->failed_to_evac = rtsFalse;
            if (gct->evac_gen_no) {
                recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
	    }
	}
    } // while (p = pop_mark_stack())
}
コード例 #23
0
ファイル: Scav.c プロジェクト: bogiebro/ghc
static GNUC_ATTR_HOT void
scavenge_block (bdescr *bd)
{
  StgPtr p, q;
  StgInfoTable *info;
  rtsBool saved_eager_promotion;
  gen_workspace *ws;

  debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
	     bd->start, bd->gen_no, bd->u.scan);

  gct->scan_bd = bd;
  gct->evac_gen_no = bd->gen_no;
  saved_eager_promotion = gct->eager_promotion;
  gct->failed_to_evac = rtsFalse;

  ws = &gct->gens[bd->gen->no];

  p = bd->u.scan;
  
  // we might be evacuating into the very object that we're
  // scavenging, so we have to check the real bd->free pointer each
  // time around the loop.
  while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {

      ASSERT(bd->link == NULL);
    ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
    info = get_itbl((StgClosure *)p);
    
    ASSERT(gct->thunk_selector_depth == 0);

    q = p;
    switch (info->type) {

    case MVAR_CLEAN:
    case MVAR_DIRTY:
    { 
	StgMVar *mvar = ((StgMVar *)p);
	gct->eager_promotion = rtsFalse;
	evacuate((StgClosure **)&mvar->head);
	evacuate((StgClosure **)&mvar->tail);
	evacuate((StgClosure **)&mvar->value);
	gct->eager_promotion = saved_eager_promotion;

	if (gct->failed_to_evac) {
	    mvar->header.info = &stg_MVAR_DIRTY_info;
	} else {
	    mvar->header.info = &stg_MVAR_CLEAN_info;
	}
	p += sizeofW(StgMVar);
	break;
    }

    case FUN_2_0:
	scavenge_fun_srt(info);
	evacuate(&((StgClosure *)p)->payload[1]);
	evacuate(&((StgClosure *)p)->payload[0]);
	p += sizeofW(StgHeader) + 2;
	break;

    case THUNK_2_0:
	scavenge_thunk_srt(info);
	evacuate(&((StgThunk *)p)->payload[1]);
	evacuate(&((StgThunk *)p)->payload[0]);
	p += sizeofW(StgThunk) + 2;
	break;

    case CONSTR_2_0:
	evacuate(&((StgClosure *)p)->payload[1]);
	evacuate(&((StgClosure *)p)->payload[0]);
	p += sizeofW(StgHeader) + 2;
	break;
	
    case THUNK_1_0:
	scavenge_thunk_srt(info);
	evacuate(&((StgThunk *)p)->payload[0]);
	p += sizeofW(StgThunk) + 1;
	break;
	
    case FUN_1_0:
	scavenge_fun_srt(info);
    case CONSTR_1_0:
	evacuate(&((StgClosure *)p)->payload[0]);
	p += sizeofW(StgHeader) + 1;
	break;
	
    case THUNK_0_1:
	scavenge_thunk_srt(info);
	p += sizeofW(StgThunk) + 1;
	break;
	
    case FUN_0_1:
	scavenge_fun_srt(info);
    case CONSTR_0_1:
	p += sizeofW(StgHeader) + 1;
	break;
	
    case THUNK_0_2:
	scavenge_thunk_srt(info);
	p += sizeofW(StgThunk) + 2;
	break;
	
    case FUN_0_2:
	scavenge_fun_srt(info);
    case CONSTR_0_2:
	p += sizeofW(StgHeader) + 2;
	break;
	
    case THUNK_1_1:
	scavenge_thunk_srt(info);
	evacuate(&((StgThunk *)p)->payload[0]);
	p += sizeofW(StgThunk) + 2;
	break;

    case FUN_1_1:
	scavenge_fun_srt(info);
    case CONSTR_1_1:
	evacuate(&((StgClosure *)p)->payload[0]);
	p += sizeofW(StgHeader) + 2;
	break;
	
    case FUN:
	scavenge_fun_srt(info);
	goto gen_obj;

    case THUNK:
    {
	StgPtr end;

	scavenge_thunk_srt(info);
	end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
	for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
	    evacuate((StgClosure **)p);
	}
	p += info->layout.payload.nptrs;
	break;
    }
	
    gen_obj:
    case CONSTR:
    case WEAK:
    case PRIM:
    {
	StgPtr end;

	end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
	for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
	    evacuate((StgClosure **)p);
	}
	p += info->layout.payload.nptrs;
	break;
    }

    case BCO: {
	StgBCO *bco = (StgBCO *)p;
	evacuate((StgClosure **)&bco->instrs);
	evacuate((StgClosure **)&bco->literals);
	evacuate((StgClosure **)&bco->ptrs);
	p += bco_sizeW(bco);
	break;
    }

    case IND_PERM:
    case BLACKHOLE:
	evacuate(&((StgInd *)p)->indirectee);
	p += sizeofW(StgInd);
	break;

    case MUT_VAR_CLEAN:
    case MUT_VAR_DIRTY:
	gct->eager_promotion = rtsFalse;
	evacuate(&((StgMutVar *)p)->var);
	gct->eager_promotion = saved_eager_promotion;

	if (gct->failed_to_evac) {
	    ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
	} else {
	    ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
	}
	p += sizeofW(StgMutVar);
	break;

    case BLOCKING_QUEUE:
    {
        StgBlockingQueue *bq = (StgBlockingQueue *)p;
        
	gct->eager_promotion = rtsFalse;
        evacuate(&bq->bh);
        evacuate((StgClosure**)&bq->owner);
        evacuate((StgClosure**)&bq->queue);
        evacuate((StgClosure**)&bq->link);
	gct->eager_promotion = saved_eager_promotion;

	if (gct->failed_to_evac) {
	    bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
	} else {
	    bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
	}
        p += sizeofW(StgBlockingQueue);
        break;
    }

    case THUNK_SELECTOR:
    { 
	StgSelector *s = (StgSelector *)p;
	evacuate(&s->selectee);
	p += THUNK_SELECTOR_sizeW();
	break;
    }

    // A chunk of stack saved in a heap object
    case AP_STACK:
    {
	StgAP_STACK *ap = (StgAP_STACK *)p;

	evacuate(&ap->fun);
	scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
	p = (StgPtr)ap->payload + ap->size;
	break;
    }

    case PAP:
	p = scavenge_PAP((StgPAP *)p);
	break;

    case AP:
	p = scavenge_AP((StgAP *)p);
	break;

    case ARR_WORDS:
	// nothing to follow 
	p += arr_words_sizeW((StgArrWords *)p);
	break;

    case MUT_ARR_PTRS_CLEAN:
    case MUT_ARR_PTRS_DIRTY:
    {
        // We don't eagerly promote objects pointed to by a mutable
        // array, but if we find the array only points to objects in
        // the same or an older generation, we mark it "clean" and
        // avoid traversing it during minor GCs.
        gct->eager_promotion = rtsFalse;

        p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);

	if (gct->failed_to_evac) {
	    ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
	} else {
	    ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
	}

	gct->eager_promotion = saved_eager_promotion;
	gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
	break;
    }

    case MUT_ARR_PTRS_FROZEN:
    case MUT_ARR_PTRS_FROZEN0:
	// follow everything 
    {
        p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);

	// If we're going to put this object on the mutable list, then
	// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
	if (gct->failed_to_evac) {
            ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
	} else {
	    ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
	}
	break;
    }

    case TSO:
    { 
        scavengeTSO((StgTSO *)p);
        p += sizeofW(StgTSO);
	break;
    }

    case STACK:
    {
        StgStack *stack = (StgStack*)p;

        gct->eager_promotion = rtsFalse;

        scavenge_stack(stack->sp, stack->stack + stack->stack_size);
        stack->dirty = gct->failed_to_evac;
        p += stack_sizeW(stack);

        gct->eager_promotion = saved_eager_promotion;
        break;
    }

    case MUT_PRIM:
      {
	StgPtr end;

	gct->eager_promotion = rtsFalse;

	end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
	for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
	    evacuate((StgClosure **)p);
	}
	p += info->layout.payload.nptrs;

	gct->eager_promotion = saved_eager_promotion;
	gct->failed_to_evac = rtsTrue; // mutable
	break;
      }

    case TREC_CHUNK:
      {
	StgWord i;
	StgTRecChunk *tc = ((StgTRecChunk *) p);
	TRecEntry *e = &(tc -> entries[0]);
	gct->eager_promotion = rtsFalse;
	evacuate((StgClosure **)&tc->prev_chunk);
	for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
	  evacuate((StgClosure **)&e->tvar);
	  evacuate((StgClosure **)&e->expected_value);
	  evacuate((StgClosure **)&e->new_value);
	}
	gct->eager_promotion = saved_eager_promotion;
	gct->failed_to_evac = rtsTrue; // mutable
	p += sizeofW(StgTRecChunk);
	break;
      }

    default:
	barf("scavenge: unimplemented/strange closure type %d @ %p", 
	     info->type, p);
    }

    /*
     * We need to record the current object on the mutable list if
     *  (a) It is actually mutable, or 
     *  (b) It contains pointers to a younger generation.
     * Case (b) arises if we didn't manage to promote everything that
     * the current object points to into the current generation.
     */
    if (gct->failed_to_evac) {
	gct->failed_to_evac = rtsFalse;
	if (bd->gen_no > 0) {
	    recordMutableGen_GC((StgClosure *)q, bd->gen_no);
	}
    }
  }

  if (p > bd->free)  {
      gct->copied += ws->todo_free - bd->free;
      bd->free = p;
  }

  debugTrace(DEBUG_gc, "   scavenged %ld bytes",
             (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));

  // update stats: this is a block that has been scavenged
  gct->scanned += bd->free - bd->u.scan;
  bd->u.scan = bd->free;

  if (bd != ws->todo_bd) {
      // we're not going to evac any more objects into
      // this block, so push it now.
      push_scanned_block(bd, ws);
  }

  gct->scan_bd = NULL;
}