Ejemplo n.º 1
0
static void
checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
{ 
    StgClosure *fun;
    StgFunInfoTable *fun_info;
    
    fun = UNTAG_CLOSURE(tagged_fun);
    ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
    fun_info = get_fun_itbl(fun);
    
    switch (fun_info->f.fun_type) {
    case ARG_GEN:
	checkSmallBitmap( (StgPtr)payload, 
			  BITMAP_BITS(fun_info->f.b.bitmap), n_args );
	break;
    case ARG_GEN_BIG:
	checkLargeBitmap( (StgPtr)payload, 
			  GET_FUN_LARGE_BITMAP(fun_info), 
			  n_args );
	break;
    case ARG_BCO:
	checkLargeBitmap( (StgPtr)payload, 
			  BCO_BITMAP(fun), 
			  n_args );
	break;
    default:
	checkSmallBitmap( (StgPtr)payload, 
			  BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
			  n_args );
	break;
    }

    ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
           : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
}
Ejemplo n.º 2
0
/*
 * get at the real stuff...remove indirections.
 */
static StgClosure*
removeIndirections (StgClosure* p)
{
    StgClosure* q;

    while (1)
    {
        q = UNTAG_CLOSURE(p);

        switch (get_itbl(q)->type) {
        case IND:
        case IND_STATIC:
            p = ((StgInd *)q)->indirectee;
            continue;

        case BLACKHOLE:
            p = ((StgInd *)q)->indirectee;
            if (GET_CLOSURE_TAG(p) != 0) {
                continue;
            } else {
                break;
            }

        default:
            break;
        }
        return p;
    }
}
Ejemplo n.º 3
0
// Traverse a threaded chain and pull out the info pointer at the end.
// The info pointer is also tagged with the appropriate pointer tag
// for this closure, which should be attached to the pointer
// subsequently passed to unthread().
STATIC_INLINE StgWord
get_threaded_info( StgPtr p )
{
    StgWord q;
    
    q = (W_)GET_INFO(UNTAG_CLOSURE((StgClosure *)p));

loop:
    switch (GET_CLOSURE_TAG((StgClosure *)q)) 
    {
    case 0:
        ASSERT(LOOKS_LIKE_INFO_PTR(q));
        return q;
    case 1:
    {
        StgWord r = *(StgPtr)(q-1);
        ASSERT(LOOKS_LIKE_INFO_PTR(UNTAG_CLOSURE((StgClosure *)r)));
        return r;
    }
    case 2:
        q = *(StgPtr)(q-2);
        goto loop;
    default:
        barf("get_threaded_info");
    }
}
Ejemplo n.º 4
0
STATIC_INLINE void
unthread( StgPtr p, StgWord free )
{
    StgWord q, r;
    StgPtr q0;

    q = *p;
loop:
    switch (GET_CLOSURE_TAG((StgClosure *)q))
    {
    case 0:
        // nothing to do; the chain is length zero
        return;
    case 1:
        q0 = (StgPtr)(q-1);
        r = *q0;  // r is the info ptr, tagged with the pointer-tag
        *q0 = free;
        *p = (StgWord)UNTAG_CLOSURE((StgClosure *)r);
        return;
    case 2:
        q0 = (StgPtr)(q-2);
        r = *q0;
        *q0 = free;
        q = r;
        goto loop;
    default:
        barf("unthread");
    }
}
Ejemplo n.º 5
0
Archivo: Stable.c Proyecto: Eufavn/ghc
/*
 * get at the real stuff...remove indirections.
 * It untags pointers before dereferencing and
 * retags the real stuff with its tag (if there
 * is any) when returning.
 *
 * ToDo: move to a better home.
 */
static
StgClosure*
removeIndirections(StgClosure* p)
{
  StgWord tag = GET_CLOSURE_TAG(p);
  StgClosure* q = UNTAG_CLOSURE(p);

  while (get_itbl(q)->type == IND ||
         get_itbl(q)->type == IND_STATIC ||
         get_itbl(q)->type == IND_PERM) {
      q = ((StgInd *)q)->indirectee;
      tag = GET_CLOSURE_TAG(q);
      q = UNTAG_CLOSURE(q);
  }

  return TAG_CLOSURE(tag,q);
}
Ejemplo n.º 6
0
Archivo: Evac.c Proyecto: DJacquard/ghc
void
evacuate_BLACKHOLE(StgClosure **p)
{
    bdescr *bd;
    uint32_t gen_no;
    StgClosure *q;
    const StgInfoTable *info;
    q = *p;

    // closure is required to be a heap-allocated BLACKHOLE
    ASSERT(HEAP_ALLOCED_GC(q));
    ASSERT(GET_CLOSURE_TAG(q) == 0);

    bd = Bdescr((P_)q);

    // blackholes can't be in a compact
    ASSERT((bd->flags & BF_COMPACT) == 0);

    // blackholes *can* be in a large object: when raiseAsync() creates an
    // AP_STACK the payload might be large enough to create a large object.
    // See #14497.
    if (bd->flags & BF_LARGE) {
        evacuate_large((P_)q);
        return;
    }
    if (bd->flags & BF_EVACUATED) {
        if (bd->gen_no < gct->evac_gen_no) {
            gct->failed_to_evac = true;
            TICK_GC_FAILED_PROMOTION();
        }
        return;
    }
    if (bd->flags & BF_MARKED) {
        if (!is_marked((P_)q,bd)) {
            mark((P_)q,bd);
            push_mark_stack((P_)q);
        }
        return;
    }
    gen_no = bd->dest_no;
    info = q->header.info;
    if (IS_FORWARDING_PTR(info))
    {
        StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
        *p = e;
        if (gen_no < gct->evac_gen_no) {  // optimisation
            if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) {
                gct->failed_to_evac = true;
                TICK_GC_FAILED_PROMOTION();
            }
        }
        return;
    }

    ASSERT(INFO_PTR_TO_STRUCT(info)->type == BLACKHOLE);
    copy(p,info,q,sizeofW(StgInd),gen_no);
}
Ejemplo n.º 7
0
STATIC_INLINE void
thread (StgClosure **p)
{
    StgClosure *q0;
    StgPtr q;
    StgWord iptr;
    bdescr *bd;

    q0  = *p;
    q   = (StgPtr)UNTAG_CLOSURE(q0);

    // It doesn't look like a closure at the moment, because the info
    // ptr is possibly threaded:
    // ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
    
    if (HEAP_ALLOCED(q)) {
	bd = Bdescr(q); 
	// a handy way to discover whether the ptr is into the
	// compacted area of the old gen, is that the EVACUATED flag
	// is zero (it's non-zero for all the other areas of live
	// memory).
	if ((bd->flags & BF_EVACUATED) == 0)
        {
            iptr = *q;
            switch (GET_CLOSURE_TAG((StgClosure *)iptr))
            {
            case 0: 
                // this is the info pointer; we are creating a new chain.
                // save the original tag at the end of the chain.
                *p = (StgClosure *)((StgWord)iptr + GET_CLOSURE_TAG(q0));
                *q = (StgWord)p + 1;
                break;
            case 1:
            case 2:
                // this is a chain of length 1 or more
                *p = (StgClosure *)iptr;
                *q = (StgWord)p + 2;
                break;
            }
        }
    }
}
Ejemplo n.º 8
0
Archivo: Compact.c Proyecto: Lemmih/ghc
STATIC_INLINE void
thread (StgClosure **p)
{
    StgClosure *q0;
    StgPtr q;
    StgWord iptr;
    bdescr *bd;

    q0  = *p;
    q   = (StgPtr)UNTAG_CLOSURE(q0);

    // It doesn't look like a closure at the moment, because the info
    // ptr is possibly threaded:
    // ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
    
    if (HEAP_ALLOCED(q)) {
	bd = Bdescr(q); 

	if (bd->flags & BF_MARKED)
        {
            iptr = *q;
            switch (GET_CLOSURE_TAG((StgClosure *)iptr))
            {
            case 0: 
                // this is the info pointer; we are creating a new chain.
                // save the original tag at the end of the chain.
                *p = (StgClosure *)((StgWord)iptr + GET_CLOSURE_TAG(q0));
                *q = (StgWord)p + 1;
                break;
            case 1:
            case 2:
                // this is a chain of length 1 or more
                *p = (StgClosure *)iptr;
                *q = (StgWord)p + 2;
                break;
            }
        }
    }
}
Ejemplo n.º 9
0
Archivo: Evac.c Proyecto: 23Skidoo/ghc
static void
eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
                 // NB. for legacy reasons, p & q are swapped around :(
{
    nat field;
    StgInfoTable *info;
    StgWord info_ptr;
    StgClosure *selectee;
    StgSelector *prev_thunk_selector;
    bdescr *bd;
    StgClosure *val;

    prev_thunk_selector = NULL;
    // this is a chain of THUNK_SELECTORs that we are going to update
    // to point to the value of the current THUNK_SELECTOR.  Each
    // closure on the chain is a WHITEHOLE, and points to the next in the
    // chain with payload[0].

selector_chain:

    bd = Bdescr((StgPtr)p);
    if (HEAP_ALLOCED_GC(p)) {
        // If the THUNK_SELECTOR is in to-space or in a generation that we
        // are not collecting, then bale out early.  We won't be able to
        // save any space in any case, and updating with an indirection is
        // trickier in a non-collected gen: we would have to update the
        // mutable list.
        if (bd->flags & BF_EVACUATED) {
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            *q = (StgClosure *)p;
            // shortcut, behave as for:  if (evac) evacuate(q);
            if (evac && bd->gen_no < gct->evac_gen_no) {
                gct->failed_to_evac = rtsTrue;
                TICK_GC_FAILED_PROMOTION();
            }
            return;
        }
        // we don't update THUNK_SELECTORS in the compacted
        // generation, because compaction does not remove the INDs
        // that result, this causes confusion later
        // (scavenge_mark_stack doesn't deal with IND).  BEWARE!  This
        // bit is very tricky to get right.  If you make changes
        // around here, test by compiling stage 3 with +RTS -c -RTS.
        if (bd->flags & BF_MARKED) {
            // must call evacuate() to mark this closure if evac==rtsTrue
            *q = (StgClosure *)p;
            if (evac) evacuate(q);
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            return;
        }
    }


    // WHITEHOLE the selector thunk, since it is now under evaluation.
    // This is important to stop us going into an infinite loop if
    // this selector thunk eventually refers to itself.
#if defined(THREADED_RTS)
    // In threaded mode, we'll use WHITEHOLE to lock the selector
    // thunk while we evaluate it.
    {
        do {
            info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
        } while (info_ptr == (W_)&stg_WHITEHOLE_info);

        // make sure someone else didn't get here first...
        if (IS_FORWARDING_PTR(info_ptr) ||
            INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->type != THUNK_SELECTOR) {
            // v. tricky now.  The THUNK_SELECTOR has been evacuated
            // by another thread, and is now either a forwarding ptr or IND.
            // We need to extract ourselves from the current situation
            // as cleanly as possible.
            //   - unlock the closure
            //   - update *q, we may have done *some* evaluation
            //   - if evac, we need to call evacuate(), because we
            //     need the write-barrier stuff.
            //   - undo the chain we've built to point to p.
            SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
            *q = (StgClosure *)p;
            if (evac) evacuate(q);
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            return;
        }
    }
#else
    // Save the real info pointer (NOTE: not the same as get_itbl()).
    info_ptr = (StgWord)p->header.info;
    SET_INFO((StgClosure *)p,&stg_WHITEHOLE_info);
#endif

    field = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->layout.selector_offset;

    // The selectee might be a constructor closure,
    // so we untag the pointer.
    selectee = UNTAG_CLOSURE(p->selectee);

selector_loop:
    // selectee now points to the closure that we're trying to select
    // a field from.  It may or may not be in to-space: we try not to
    // end up in to-space, but it's impractical to avoid it in
    // general.  The compacting GC scatters to-space pointers in
    // from-space during marking, for example.  We rely on the property
    // that evacuate() doesn't mind if it gets passed a to-space pointer.

    info = (StgInfoTable*)selectee->header.info;

    if (IS_FORWARDING_PTR(info)) {
        // We don't follow pointers into to-space; the constructor
        // has already been evacuated, so we won't save any space
        // leaks by evaluating this selector thunk anyhow.
        goto bale_out;
    }

    info = INFO_PTR_TO_STRUCT(info);
    switch (info->type) {
      case WHITEHOLE:
          goto bale_out; // about to be evacuated by another thread (or a loop).

      case CONSTR:
      case CONSTR_1_0:
      case CONSTR_0_1:
      case CONSTR_2_0:
      case CONSTR_1_1:
      case CONSTR_0_2:
      case CONSTR_STATIC:
      case CONSTR_NOCAF_STATIC:
          {
              // check that the size is in range
              ASSERT(field <  (StgWord32)(info->layout.payload.ptrs +
                                          info->layout.payload.nptrs));

              // Select the right field from the constructor
              val = selectee->payload[field];

#ifdef PROFILING
              // For the purposes of LDV profiling, we have destroyed
              // the original selector thunk, p.
              if (era > 0) {
                  // Only modify the info pointer when LDV profiling is
                  // enabled.  Note that this is incompatible with parallel GC,
                  // because it would allow other threads to start evaluating
                  // the same selector thunk.
                  SET_INFO((StgClosure*)p, (StgInfoTable *)info_ptr);
                  OVERWRITING_CLOSURE((StgClosure*)p);
                  SET_INFO((StgClosure*)p, &stg_WHITEHOLE_info);
              }
#endif

              // the closure in val is now the "value" of the
              // THUNK_SELECTOR in p.  However, val may itself be a
              // THUNK_SELECTOR, in which case we want to continue
              // evaluating until we find the real value, and then
              // update the whole chain to point to the value.
          val_loop:
              info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
              if (!IS_FORWARDING_PTR(info_ptr))
              {
                  info = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr);
                  switch (info->type) {
                  case IND:
                  case IND_STATIC:
                      val = ((StgInd *)val)->indirectee;
                      goto val_loop;
                  case THUNK_SELECTOR:
                      ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
                      prev_thunk_selector = p;
                      p = (StgSelector*)val;
                      goto selector_chain;
                  default:
                      break;
                  }
              }
              ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
              prev_thunk_selector = p;

              *q = val;

              // update the other selectors in the chain *before*
              // evacuating the value.  This is necessary in the case
              // where the value turns out to be one of the selectors
              // in the chain (i.e. we have a loop), and evacuating it
              // would corrupt the chain.
              unchain_thunk_selectors(prev_thunk_selector, val);

              // evacuate() cannot recurse through
              // eval_thunk_selector(), because we know val is not
              // a THUNK_SELECTOR.
              if (evac) evacuate(q);
              return;
          }

      case IND:
      case IND_STATIC:
          // Again, we might need to untag a constructor.
          selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
          goto selector_loop;

      case BLACKHOLE:
      {
          StgClosure *r;
          const StgInfoTable *i;
          r = ((StgInd*)selectee)->indirectee;

          // establish whether this BH has been updated, and is now an
          // indirection, as in evacuate().
          if (GET_CLOSURE_TAG(r) == 0) {
              i = r->header.info;
              if (IS_FORWARDING_PTR(i)) {
                  r = (StgClosure *)UN_FORWARDING_PTR(i);
                  i = r->header.info;
              }
              if (i == &stg_TSO_info
                  || i == &stg_WHITEHOLE_info
                  || i == &stg_BLOCKING_QUEUE_CLEAN_info
                  || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
                  goto bale_out;
              }
              ASSERT(i != &stg_IND_info);
          }

          selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
          goto selector_loop;
      }

      case THUNK_SELECTOR:
      {
          StgClosure *val;

          // recursively evaluate this selector.  We don't want to
          // recurse indefinitely, so we impose a depth bound.
          if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
              goto bale_out;
          }

          gct->thunk_selector_depth++;
          // rtsFalse says "don't evacuate the result".  It will,
          // however, update any THUNK_SELECTORs that are evaluated
          // along the way.
          eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
          gct->thunk_selector_depth--;

          // did we actually manage to evaluate it?
          if (val == selectee) goto bale_out;

          // Of course this pointer might be tagged...
          selectee = UNTAG_CLOSURE(val);
          goto selector_loop;
      }

      case AP:
      case AP_STACK:
      case THUNK:
      case THUNK_1_0:
      case THUNK_0_1:
      case THUNK_2_0:
      case THUNK_1_1:
      case THUNK_0_2:
      case THUNK_STATIC:
          // not evaluated yet
          goto bale_out;

      default:
        barf("eval_thunk_selector: strange selectee %d",
             (int)(info->type));
    }

bale_out:
    // We didn't manage to evaluate this thunk; restore the old info
    // pointer.  But don't forget: we still need to evacuate the thunk itself.
    SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
    // THREADED_RTS: we just unlocked the thunk, so another thread
    // might get in and update it.  copy() will lock it again and
    // check whether it was updated in the meantime.
    *q = (StgClosure *)p;
    if (evac) {
        copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest_no);
    }
    unchain_thunk_selectors(prev_thunk_selector, *q);
    return;
}
Ejemplo n.º 10
0
Archivo: Evac.c Proyecto: 23Skidoo/ghc
REGPARM1 GNUC_ATTR_HOT void
evacuate(StgClosure **p)
{
  bdescr *bd = NULL;
  nat gen_no;
  StgClosure *q;
  const StgInfoTable *info;
  StgWord tag;

  q = *p;

loop:
  /* The tag and the pointer are split, to be merged after evacing */
  tag = GET_CLOSURE_TAG(q);
  q = UNTAG_CLOSURE(q);

  ASSERTM(LOOKS_LIKE_CLOSURE_PTR(q), "invalid closure, info=%p", q->header.info);

  if (!HEAP_ALLOCED_GC(q)) {

      if (!major_gc) return;

      info = get_itbl(q);
      switch (info->type) {

      case THUNK_STATIC:
          if (info->srt_bitmap != 0) {
              evacuate_static_object(THUNK_STATIC_LINK((StgClosure *)q), q);
          }
          return;

      case FUN_STATIC:
          if (info->srt_bitmap != 0) {
              evacuate_static_object(FUN_STATIC_LINK((StgClosure *)q), q);
          }
          return;

      case IND_STATIC:
          /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
           * on the CAF list, so don't do anything with it here (we'll
           * scavenge it later).
           */
          evacuate_static_object(IND_STATIC_LINK((StgClosure *)q), q);
          return;

      case CONSTR_STATIC:
          evacuate_static_object(STATIC_LINK(info,(StgClosure *)q), q);
          return;

      case CONSTR_NOCAF_STATIC:
          /* no need to put these on the static linked list, they don't need
           * to be scavenged.
           */
          return;

      default:
          barf("evacuate(static): strange closure type %d", (int)(info->type));
      }
  }

  bd = Bdescr((P_)q);

  if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) {

      // pointer into to-space: just return it.  It might be a pointer
      // into a generation that we aren't collecting (> N), or it
      // might just be a pointer into to-space.  The latter doesn't
      // happen often, but allowing it makes certain things a bit
      // easier; e.g. scavenging an object is idempotent, so it's OK to
      // have an object on the mutable list multiple times.
      if (bd->flags & BF_EVACUATED) {
          // We aren't copying this object, so we have to check
          // whether it is already in the target generation.  (this is
          // the write barrier).
          if (bd->gen_no < gct->evac_gen_no) {
              gct->failed_to_evac = rtsTrue;
              TICK_GC_FAILED_PROMOTION();
          }
          return;
      }

      /* evacuate large objects by re-linking them onto a different list.
       */
      if (bd->flags & BF_LARGE) {
          evacuate_large((P_)q);
          return;
      }

      /* If the object is in a gen that we're compacting, then we
       * need to use an alternative evacuate procedure.
       */
      if (!is_marked((P_)q,bd)) {
          mark((P_)q,bd);
          push_mark_stack((P_)q);
      }
      return;
  }

  gen_no = bd->dest_no;

  info = q->header.info;
  if (IS_FORWARDING_PTR(info))
  {
    /* Already evacuated, just return the forwarding address.
     * HOWEVER: if the requested destination generation (gct->evac_gen) is
     * older than the actual generation (because the object was
     * already evacuated to a younger generation) then we have to
     * set the gct->failed_to_evac flag to indicate that we couldn't
     * manage to promote the object to the desired generation.
     */
    /*
     * Optimisation: the check is fairly expensive, but we can often
     * shortcut it if either the required generation is 0, or the
     * current object (the EVACUATED) is in a high enough generation.
     * We know that an EVACUATED always points to an object in the
     * same or an older generation.  gen is the lowest generation that the
     * current object would be evacuated to, so we only do the full
     * check if gen is too low.
     */
      StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
      *p = TAG_CLOSURE(tag,e);
      if (gen_no < gct->evac_gen_no) {  // optimisation
          if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) {
              gct->failed_to_evac = rtsTrue;
              TICK_GC_FAILED_PROMOTION();
          }
      }
      return;
  }

  switch (INFO_PTR_TO_STRUCT(info)->type) {

  case WHITEHOLE:
      goto loop;

  // For ints and chars of low value, save space by replacing references to
  //    these with closures with references to common, shared ones in the RTS.
  //
  // * Except when compiling into Windows DLLs which don't support cross-package
  //    data references very well.
  //
  case CONSTR_0_1:
  {
#if defined(COMPILING_WINDOWS_DLL)
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
#else
      StgWord w = (StgWord)q->payload[0];
      if (info == Czh_con_info &&
          // unsigned, so always true:  (StgChar)w >= MIN_CHARLIKE &&
          (StgChar)w <= MAX_CHARLIKE) {
          *p =  TAG_CLOSURE(tag,
                            (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
                           );
      }
      else if (info == Izh_con_info &&
          (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
          *p = TAG_CLOSURE(tag,
                             (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
                             );
      }
      else {
          copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
      }
#endif
      return;
  }

  case FUN_0_1:
  case FUN_1_0:
  case CONSTR_1_0:
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
      return;

  case THUNK_1_0:
  case THUNK_0_1:
      copy(p,info,q,sizeofW(StgThunk)+1,gen_no);
      return;

  case THUNK_1_1:
  case THUNK_2_0:
  case THUNK_0_2:
#ifdef NO_PROMOTE_THUNKS
#error bitrotted
#endif
    copy(p,info,q,sizeofW(StgThunk)+2,gen_no);
    return;

  case FUN_1_1:
  case FUN_2_0:
  case FUN_0_2:
  case CONSTR_1_1:
  case CONSTR_2_0:
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
      return;

  case CONSTR_0_2:
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
      return;

  case THUNK:
      copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
      return;

  case FUN:
  case CONSTR:
      copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no,tag);
      return;

  case BLACKHOLE:
  {
      StgClosure *r;
      const StgInfoTable *i;
      r = ((StgInd*)q)->indirectee;
      if (GET_CLOSURE_TAG(r) == 0) {
          i = r->header.info;
          if (IS_FORWARDING_PTR(i)) {
              r = (StgClosure *)UN_FORWARDING_PTR(i);
              i = r->header.info;
          }
          if (i == &stg_TSO_info
              || i == &stg_WHITEHOLE_info
              || i == &stg_BLOCKING_QUEUE_CLEAN_info
              || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
              copy(p,info,q,sizeofW(StgInd),gen_no);
              return;
          }
          ASSERT(i != &stg_IND_info);
      }
      q = r;
      *p = r;
      goto loop;
  }

  case MUT_VAR_CLEAN:
  case MUT_VAR_DIRTY:
  case MVAR_CLEAN:
  case MVAR_DIRTY:
  case TVAR:
  case BLOCKING_QUEUE:
  case WEAK:
  case PRIM:
  case MUT_PRIM:
      copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
      return;

  case BCO:
      copy(p,info,q,bco_sizeW((StgBCO *)q),gen_no);
      return;

  case THUNK_SELECTOR:
      eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
      return;

  case IND:
    // follow chains of indirections, don't evacuate them
    q = ((StgInd*)q)->indirectee;
    *p = q;
    goto loop;

  case RET_BCO:
  case RET_SMALL:
  case RET_BIG:
  case UPDATE_FRAME:
  case UNDERFLOW_FRAME:
  case STOP_FRAME:
  case CATCH_FRAME:
  case CATCH_STM_FRAME:
  case CATCH_RETRY_FRAME:
  case ATOMICALLY_FRAME:
    // shouldn't see these
    barf("evacuate: stack frame at %p\n", q);

  case PAP:
      copy(p,info,q,pap_sizeW((StgPAP*)q),gen_no);
      return;

  case AP:
      copy(p,info,q,ap_sizeW((StgAP*)q),gen_no);
      return;

  case AP_STACK:
      copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen_no);
      return;

  case ARR_WORDS:
      // just copy the block
      copy(p,info,q,arr_words_sizeW((StgArrBytes *)q),gen_no);
      return;

  case MUT_ARR_PTRS_CLEAN:
  case MUT_ARR_PTRS_DIRTY:
  case MUT_ARR_PTRS_FROZEN:
  case MUT_ARR_PTRS_FROZEN0:
      // just copy the block
      copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no);
      return;

  case SMALL_MUT_ARR_PTRS_CLEAN:
  case SMALL_MUT_ARR_PTRS_DIRTY:
  case SMALL_MUT_ARR_PTRS_FROZEN:
  case SMALL_MUT_ARR_PTRS_FROZEN0:
      // just copy the block
      copy(p,info,q,small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)q),gen_no);
      return;

  case TSO:
      copy(p,info,q,sizeofW(StgTSO),gen_no);
      return;

  case STACK:
    {
      StgStack *stack = (StgStack *)q;

      /* To evacuate a small STACK, we need to adjust the stack pointer
       */
      {
          StgStack *new_stack;
          StgPtr r, s;
          rtsBool mine;

          mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack),
                          sizeofW(StgStack), gen_no);
          if (mine) {
              new_stack = (StgStack *)*p;
              move_STACK(stack, new_stack);
              for (r = stack->sp, s = new_stack->sp;
                   r < stack->stack + stack->stack_size;) {
                  *s++ = *r++;
              }
          }
          return;
      }
    }

  case TREC_CHUNK:
      copy(p,info,q,sizeofW(StgTRecChunk),gen_no);
      return;

  default:
    barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
  }

  barf("evacuate");
}
Ejemplo n.º 11
0
Archivo: GCAux.c Proyecto: chansuke/ghc
StgClosure *
isAlive(StgClosure *p)
{
  const StgInfoTable *info;
  bdescr *bd;
  StgWord tag;
  StgClosure *q;

  while (1) {
    /* The tag and the pointer are split, to be merged later when needed. */
    tag = GET_CLOSURE_TAG(p);
    q = UNTAG_CLOSURE(p);

    ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));

    // ignore static closures 
    //
    // ToDo: This means we never look through IND_STATIC, which means
    // isRetainer needs to handle the IND_STATIC case rather than
    // raising an error.
    //
    // ToDo: for static closures, check the static link field.
    // Problem here is that we sometimes don't set the link field, eg.
    // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
    //
    if (!HEAP_ALLOCED_GC(q)) {
	return p;
    }

    // ignore closures in generations that we're not collecting. 
    bd = Bdescr((P_)q);

    // if it's a pointer into to-space, then we're done
    if (bd->flags & BF_EVACUATED) {
	return p;
    }

    // large objects use the evacuated flag
    if (bd->flags & BF_LARGE) {
        return NULL;
    }

    // check the mark bit for compacted steps
    if ((bd->flags & BF_MARKED) && is_marked((P_)q,bd)) {
	return p;
    }

    info = q->header.info;

    if (IS_FORWARDING_PTR(info)) {
        // alive! 
        return TAG_CLOSURE(tag,(StgClosure*)UN_FORWARDING_PTR(info));
    }

    info = INFO_PTR_TO_STRUCT(info);

    switch (info->type) {

    case IND:
    case IND_STATIC:
    case IND_PERM:
      // follow indirections 
      p = ((StgInd *)q)->indirectee;
      continue;

    case BLACKHOLE:
        p = ((StgInd*)q)->indirectee;
        if (GET_CLOSURE_TAG(p) != 0) {
            continue;
        } else {
            return NULL;
        }

    default:
      // dead. 
      return NULL;
    }
  }
}
Ejemplo n.º 12
0
Archivo: Sparks.c Proyecto: albertz/ghc
void
pruneSparkQueue (Capability *cap)
{ 
    SparkPool *pool;
    StgClosurePtr spark, tmp, *elements;
    nat n, pruned_sparks; // stats only
    StgWord botInd,oldBotInd,currInd; // indices in array (always < size)
    const StgInfoTable *info;
    
    n = 0;
    pruned_sparks = 0;
    
    pool = cap->sparks;
    
    // it is possible that top > bottom, indicating an empty pool.  We
    // fix that here; this is only necessary because the loop below
    // assumes it.
    if (pool->top > pool->bottom)
        pool->top = pool->bottom;

    // Take this opportunity to reset top/bottom modulo the size of
    // the array, to avoid overflow.  This is only possible because no
    // stealing is happening during GC.
    pool->bottom  -= pool->top & ~pool->moduloSize;
    pool->top     &= pool->moduloSize;
    pool->topBound = pool->top;

    debugTrace(DEBUG_sparks,
               "markSparkQueue: current spark queue len=%ld; (hd=%ld; tl=%ld)",
               sparkPoolSize(pool), pool->bottom, pool->top);

    ASSERT_WSDEQUE_INVARIANTS(pool);

    elements = (StgClosurePtr *)pool->elements;

    /* We have exclusive access to the structure here, so we can reset
       bottom and top counters, and prune invalid sparks. Contents are
       copied in-place if they are valuable, otherwise discarded. The
       routine uses "real" indices t and b, starts by computing them
       as the modulus size of top and bottom,

       Copying:

       At the beginning, the pool structure can look like this:
       ( bottom % size >= top % size , no wrap-around)
                  t          b
       ___________***********_________________

       or like this ( bottom % size < top % size, wrap-around )
                  b         t
       ***********__________******************
       As we need to remove useless sparks anyway, we make one pass
       between t and b, moving valuable content to b and subsequent
       cells (wrapping around when the size is reached).

                     b      t
       ***********OOO_______XX_X__X?**********
                     ^____move?____/

       After this movement, botInd becomes the new bottom, and old
       bottom becomes the new top index, both as indices in the array
       size range.
    */
    // starting here
    currInd = (pool->top) & (pool->moduloSize); // mod

    // copies of evacuated closures go to space from botInd on
    // we keep oldBotInd to know when to stop
    oldBotInd = botInd = (pool->bottom) & (pool->moduloSize); // mod

    // on entry to loop, we are within the bounds
    ASSERT( currInd < pool->size && botInd  < pool->size );

    while (currInd != oldBotInd ) {
      /* must use != here, wrap-around at size
	 subtle: loop not entered if queue empty
       */

      /* check element at currInd. if valuable, evacuate and move to
	 botInd, otherwise move on */
      spark = elements[currInd];

      // We have to be careful here: in the parallel GC, another
      // thread might evacuate this closure while we're looking at it,
      // so grab the info pointer just once.
      if (GET_CLOSURE_TAG(spark) != 0) {
          // Tagged pointer is a value, so the spark has fizzled.  It
          // probably never happens that we get a tagged pointer in
          // the spark pool, because we would have pruned the spark
          // during the previous GC cycle if it turned out to be
          // evaluated, but it doesn't hurt to have this check for
          // robustness.
          pruned_sparks++;
          cap->sparks_fizzled++;
      } else {
          info = spark->header.info;
          if (IS_FORWARDING_PTR(info)) {
              tmp = (StgClosure*)UN_FORWARDING_PTR(info);
              /* if valuable work: shift inside the pool */
              if (closure_SHOULD_SPARK(tmp)) {
                  elements[botInd] = tmp; // keep entry (new address)
                  botInd++;
                  n++;
              } else {
                  pruned_sparks++; // discard spark
                  cap->sparks_fizzled++;
              }
          } else if (HEAP_ALLOCED(spark)) {
              if ((Bdescr((P_)spark)->flags & BF_EVACUATED)) {
                  if (closure_SHOULD_SPARK(spark)) {
                      elements[botInd] = spark; // keep entry (new address)
                      botInd++;
                      n++;
                  } else {
                      pruned_sparks++; // discard spark
                      cap->sparks_fizzled++;
                  }
              } else {
                  pruned_sparks++; // discard spark
                  cap->sparks_gcd++;
              }
          } else {
              if (INFO_PTR_TO_STRUCT(info)->type == THUNK_STATIC) {
                  if (*THUNK_STATIC_LINK(spark) != NULL) {
                      elements[botInd] = spark; // keep entry (new address)
                      botInd++;
                      n++;
                  } else {
                      pruned_sparks++; // discard spark
                      cap->sparks_gcd++;
                  }
              } else {
                  pruned_sparks++; // discard spark
                  cap->sparks_fizzled++;
              }
          }
      }

      currInd++;

      // in the loop, we may reach the bounds, and instantly wrap around
      ASSERT( currInd <= pool->size && botInd <= pool->size );
      if ( currInd == pool->size ) { currInd = 0; }
      if ( botInd == pool->size )  { botInd = 0;  }

    } // while-loop over spark pool elements

    ASSERT(currInd == oldBotInd);

    pool->top = oldBotInd; // where we started writing
    pool->topBound = pool->top;

    pool->bottom = (oldBotInd <= botInd) ? botInd : (botInd + pool->size); 
    // first free place we did not use (corrected by wraparound)

    debugTrace(DEBUG_sparks, "pruned %d sparks", pruned_sparks);
    
    debugTrace(DEBUG_sparks,
               "new spark queue len=%ld; (hd=%ld; tl=%ld)",
               sparkPoolSize(pool), pool->bottom, pool->top);

    ASSERT_WSDEQUE_INVARIANTS(pool);
}
Ejemplo n.º 13
0
static nat
update_bkwd_compact( step *stp )
{
    StgPtr p, free;
#if 0
    StgWord m;
#endif
    bdescr *bd, *free_bd;
    StgInfoTable *info;
    nat size, free_blocks;
    StgWord iptr;

    bd = free_bd = stp->old_blocks;
    free = free_bd->start;
    free_blocks = 1;

    // cycle through all the blocks in the step
    for (; bd != NULL; bd = bd->link) {
	p = bd->start;

	while (p < bd->free ) {

	    while ( p < bd->free && !is_marked(p,bd) ) {
		p++;
	    }
	    if (p >= bd->free) {
		break;
	    }

#if 0
    next:
	m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord))));
	m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1));

	while ( p < bd->free ) {

	    if ((m & 1) == 0) {
		m >>= 1;
		p++;
		if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) {
		    goto next;
		} else {
		    continue;
		}
	    }
#endif

	    if (!is_marked(p+1,bd)) {
		// don't forget to update the free ptr in the block desc.
		free_bd->free = free;
		free_bd = free_bd->link;
		free = free_bd->start;
		free_blocks++;
	    }

            iptr = get_threaded_info(p);
	    unthread(p, (StgWord)free + GET_CLOSURE_TAG((StgClosure *)iptr));
	    ASSERT(LOOKS_LIKE_INFO_PTR(((StgClosure *)p)->header.info));
	    info = get_itbl((StgClosure *)p);
	    size = closure_sizeW_((StgClosure *)p,info);

	    if (free != p) {
		move(free,p,size);
	    }

	    // relocate TSOs
	    if (info->type == TSO) {
		move_TSO((StgTSO *)p, (StgTSO *)free);
	    }

	    free += size;
	    p += size;
#if 0
	    goto next;
#endif
	}
    }

    // free the remaining blocks and count what's left.
    free_bd->free = free;
    if (free_bd->link != NULL) {
	freeChain(free_bd->link);
	free_bd->link = NULL;
    }

    return free_blocks;
}
Ejemplo n.º 14
0
static void
update_fwd_compact( bdescr *blocks )
{
    StgPtr p, q, free;
#if 0
    StgWord m;
#endif
    bdescr *bd, *free_bd;
    StgInfoTable *info;
    nat size;
    StgWord iptr;

    bd = blocks;
    free_bd = blocks;
    free = free_bd->start;

    // cycle through all the blocks in the step
    for (; bd != NULL; bd = bd->link) {
	p = bd->start;

	while (p < bd->free ) {

	    while ( p < bd->free && !is_marked(p,bd) ) {
		p++;
	    }
	    if (p >= bd->free) {
		break;
	    }

#if 0
    next:
	m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord))));
	m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1));

	while ( p < bd->free ) {

	    if ((m & 1) == 0) {
		m >>= 1;
		p++;
		if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) {
		    goto next;
		} else {
		    continue;
		}
	    }
#endif

	    // Problem: we need to know the destination for this cell
	    // in order to unthread its info pointer.  But we can't
	    // know the destination without the size, because we may
	    // spill into the next block.  So we have to run down the 
	    // threaded list and get the info ptr first.
            //
            // ToDo: one possible avenue of attack is to use the fact
            // that if (p&BLOCK_MASK) >= (free&BLOCK_MASK), then we
            // definitely have enough room.  Also see bug #1147.
            iptr = get_threaded_info(p);
	    info = INFO_PTR_TO_STRUCT(UNTAG_CLOSURE((StgClosure *)iptr));

	    q = p;

	    p = thread_obj(info, p);

	    size = p - q;
	    if (free + size > free_bd->start + BLOCK_SIZE_W) {
		// unset the next bit in the bitmap to indicate that
		// this object needs to be pushed into the next
		// block.  This saves us having to run down the
		// threaded info pointer list twice during the next pass.
		unmark(q+1,bd);
		free_bd = free_bd->link;
		free = free_bd->start;
	    } else {
		ASSERT(is_marked(q+1,bd));
	    }

	    unthread(q,(StgWord)free + GET_CLOSURE_TAG((StgClosure *)iptr));
	    free += size;
#if 0
	    goto next;
#endif
	}
    }
}
Ejemplo n.º 15
0
Archivo: Scav.c Proyecto: bogiebro/ghc
static void
scavenge_stack(StgPtr p, StgPtr stack_end)
{
  const StgRetInfoTable* info;
  StgWord bitmap;
  nat size;

  /* 
   * Each time around this loop, we are looking at a chunk of stack
   * that starts with an activation record. 
   */

  while (p < stack_end) {
    info  = get_ret_itbl((StgClosure *)p);
      
    switch (info->i.type) {
	
    case UPDATE_FRAME:
	// In SMP, we can get update frames that point to indirections
	// when two threads evaluate the same thunk.  We do attempt to
	// discover this situation in threadPaused(), but it's
	// possible that the following sequence occurs:
	//
	//        A             B
	//                  enter T
	//     enter T
	//     blackhole T
	//                  update T
	//     GC
	//
	// Now T is an indirection, and the update frame is already
	// marked on A's stack, so we won't traverse it again in
	// threadPaused().  We could traverse the whole stack again
	// before GC, but that seems like overkill.
	//
	// Scavenging this update frame as normal would be disastrous;
	// the updatee would end up pointing to the value.  So we
	// check whether the value after evacuation is a BLACKHOLE,
	// and if not, we change the update frame to an stg_enter
	// frame that simply returns the value.  Hence, blackholing is
        // compulsory (otherwise we would have to check for thunks
        // too).
        //
        // Note [upd-black-hole]
        // One slight hiccup is that the THUNK_SELECTOR machinery can
        // overwrite the updatee with an IND.  In parallel GC, this
        // could even be happening concurrently, so we can't check for
        // the IND.  Fortunately if we assume that blackholing is
        // happening (either lazy or eager), then we can be sure that
        // the updatee is never a THUNK_SELECTOR and we're ok.
        // NB. this is a new invariant: blackholing is not optional.
    {
        StgUpdateFrame *frame = (StgUpdateFrame *)p;
        StgClosure *v;

        evacuate(&frame->updatee);
        v = frame->updatee;
        if (GET_CLOSURE_TAG(v) != 0 ||
            (get_itbl(v)->type != BLACKHOLE)) {
            // blackholing is compulsory, see above.
            frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
        }
        ASSERT(v->header.info != &stg_TSO_info);
        p += sizeofW(StgUpdateFrame);
        continue;
    }

      // small bitmap (< 32 entries, or 64 on a 64-bit machine) 
    case CATCH_STM_FRAME:
    case CATCH_RETRY_FRAME:
    case ATOMICALLY_FRAME:
    case UNDERFLOW_FRAME:
    case STOP_FRAME:
    case CATCH_FRAME:
    case RET_SMALL:
	bitmap = BITMAP_BITS(info->i.layout.bitmap);
	size   = BITMAP_SIZE(info->i.layout.bitmap);
	// NOTE: the payload starts immediately after the info-ptr, we
	// don't have an StgHeader in the same sense as a heap closure.
	p++;
	p = scavenge_small_bitmap(p, size, bitmap);

    follow_srt:
	if (major_gc) 
	    scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
	continue;

    case RET_BCO: {
	StgBCO *bco;
	nat size;

	p++;
	evacuate((StgClosure **)p);
	bco = (StgBCO *)*p;
	p++;
	size = BCO_BITMAP_SIZE(bco);
	scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
	p += size;
	continue;
    }

      // large bitmap (> 32 entries, or > 64 on a 64-bit machine) 
    case RET_BIG:
    {
	nat size;

	size = GET_LARGE_BITMAP(&info->i)->size;
	p++;
	scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
	p += size;
	// and don't forget to follow the SRT 
	goto follow_srt;
    }

      // Dynamic bitmap: the mask is stored on the stack, and
      // there are a number of non-pointers followed by a number
      // of pointers above the bitmapped area.  (see StgMacros.h,
      // HEAP_CHK_GEN).
    case RET_DYN:
    {
	StgWord dyn;
	dyn = ((StgRetDyn *)p)->liveness;

	// traverse the bitmap first
	bitmap = RET_DYN_LIVENESS(dyn);
	p      = (P_)&((StgRetDyn *)p)->payload[0];
	size   = RET_DYN_BITMAP_SIZE;
	p = scavenge_small_bitmap(p, size, bitmap);

	// skip over the non-ptr words
	p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
	
	// follow the ptr words
	for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
	    evacuate((StgClosure **)p);
	    p++;
	}
	continue;
    }

    case RET_FUN:
    {
	StgRetFun *ret_fun = (StgRetFun *)p;
	StgFunInfoTable *fun_info;

	evacuate(&ret_fun->fun);
 	fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
	p = scavenge_arg_block(fun_info, ret_fun->payload);
	goto follow_srt;
    }

    default:
	barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
    }
  }		     
}