示例#1
0
// Traverse a threaded chain and pull out the info pointer at the end.
// The info pointer is also tagged with the appropriate pointer tag
// for this closure, which should be attached to the pointer
// subsequently passed to unthread().
STATIC_INLINE StgWord
get_threaded_info( StgPtr p )
{
    StgWord q;
    
    q = (W_)GET_INFO(UNTAG_CLOSURE((StgClosure *)p));

loop:
    switch (GET_CLOSURE_TAG((StgClosure *)q)) 
    {
    case 0:
        ASSERT(LOOKS_LIKE_INFO_PTR(q));
        return q;
    case 1:
    {
        StgWord r = *(StgPtr)(q-1);
        ASSERT(LOOKS_LIKE_INFO_PTR(UNTAG_CLOSURE((StgClosure *)r)));
        return r;
    }
    case 2:
        q = *(StgPtr)(q-2);
        goto loop;
    default:
        barf("get_threaded_info");
    }
}
示例#2
0
void gtc_heap_view_closure_ptrs_in_pap_payload(StgClosure *ptrs[], StgWord *nptrs, StgClosure *fun, StgClosure **payload, StgWord size) {
    StgWord bitmap;
    StgFunInfoTable *fun_info;

    fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
    // ASSERT(fun_info->i.type != PAP);
    StgClosure **p = payload;

    switch (fun_info->f.fun_type) {
    case ARG_GEN:
        bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
        goto small_bitmap;
    case ARG_GEN_BIG:
        gtc_heap_view_closure_ptrs_in_large_bitmap(ptrs, nptrs, payload, GET_FUN_LARGE_BITMAP(fun_info), size);
        break;
    case ARG_BCO:
        gtc_heap_view_closure_ptrs_in_large_bitmap(ptrs, nptrs, payload, BCO_BITMAP(fun), size);
        break;
    default:
        bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
    small_bitmap:
        while (size > 0) {
            if ((bitmap & 1) == 0) {
                ptrs[(*nptrs)++] = *p;
            }
            bitmap = bitmap >> 1;
            p++;
            size--;
        }
        break;
    }
}
示例#3
0
STATIC_INLINE void
unthread( StgPtr p, StgWord free )
{
    StgWord q, r;
    StgPtr q0;

    q = *p;
loop:
    switch (GET_CLOSURE_TAG((StgClosure *)q))
    {
    case 0:
        // nothing to do; the chain is length zero
        return;
    case 1:
        q0 = (StgPtr)(q-1);
        r = *q0;  // r is the info ptr, tagged with the pointer-tag
        *q0 = free;
        *p = (StgWord)UNTAG_CLOSURE((StgClosure *)r);
        return;
    case 2:
        q0 = (StgPtr)(q-2);
        r = *q0;
        *q0 = free;
        q = r;
        goto loop;
    default:
        barf("unthread");
    }
}
示例#4
0
文件: Sanity.c 项目: jweijers/ghc
static void
checkPAP (StgClosure *tagged_fun, StgClosure** payload, StgWord n_args)
{ 
    StgClosure *fun;
    StgFunInfoTable *fun_info;
    
    fun = UNTAG_CLOSURE(tagged_fun);
    ASSERT(LOOKS_LIKE_CLOSURE_PTR(fun));
    fun_info = get_fun_itbl(fun);
    
    switch (fun_info->f.fun_type) {
    case ARG_GEN:
	checkSmallBitmap( (StgPtr)payload, 
			  BITMAP_BITS(fun_info->f.b.bitmap), n_args );
	break;
    case ARG_GEN_BIG:
	checkLargeBitmap( (StgPtr)payload, 
			  GET_FUN_LARGE_BITMAP(fun_info), 
			  n_args );
	break;
    case ARG_BCO:
	checkLargeBitmap( (StgPtr)payload, 
			  BCO_BITMAP(fun), 
			  n_args );
	break;
    default:
	checkSmallBitmap( (StgPtr)payload, 
			  BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
			  n_args );
	break;
    }

    ASSERT(fun_info->f.arity > TAG_MASK ? GET_CLOSURE_TAG(tagged_fun) == 0
           : GET_CLOSURE_TAG(tagged_fun) == fun_info->f.arity);
}
示例#5
0
文件: Stable.c 项目: alexbiehl/ghc
/*
 * get at the real stuff...remove indirections.
 */
static StgClosure*
removeIndirections (StgClosure* p)
{
    StgClosure* q;

    while (1)
    {
        q = UNTAG_CLOSURE(p);

        switch (get_itbl(q)->type) {
        case IND:
        case IND_STATIC:
            p = ((StgInd *)q)->indirectee;
            continue;

        case BLACKHOLE:
            p = ((StgInd *)q)->indirectee;
            if (GET_CLOSURE_TAG(p) != 0) {
                continue;
            } else {
                break;
            }

        default:
            break;
        }
        return p;
    }
}
示例#6
0
文件: Weak.c 项目: A1kmm/ghc
void
runAllCFinalizers(StgWeak *list)
{
    StgWeak *w;
    Task *task;

    task = myTask();
    if (task != NULL) {
        task->running_finalizers = rtsTrue;
    }

    for (w = list; w; w = w->link) {
	StgArrWords *farr;

	farr = (StgArrWords *)UNTAG_CLOSURE(w->cfinalizer);

	if ((StgClosure *)farr != &stg_NO_FINALIZER_closure)
	    runCFinalizer((void *)farr->payload[0],
	                  (void *)farr->payload[1],
	                  (void *)farr->payload[2],
	                  farr->payload[3]);
    }

    if (task != NULL) {
        task->running_finalizers = rtsFalse;
    }
}
示例#7
0
文件: Stable.c 项目: Eufavn/ghc
/*
 * get at the real stuff...remove indirections.
 * It untags pointers before dereferencing and
 * retags the real stuff with its tag (if there
 * is any) when returning.
 *
 * ToDo: move to a better home.
 */
static
StgClosure*
removeIndirections(StgClosure* p)
{
  StgWord tag = GET_CLOSURE_TAG(p);
  StgClosure* q = UNTAG_CLOSURE(p);

  while (get_itbl(q)->type == IND ||
         get_itbl(q)->type == IND_STATIC ||
         get_itbl(q)->type == IND_PERM) {
      q = ((StgInd *)q)->indirectee;
      tag = GET_CLOSURE_TAG(q);
      q = UNTAG_CLOSURE(q);
  }

  return TAG_CLOSURE(tag,q);
}
示例#8
0
文件: CNF.c 项目: goldfirere/ghc
static void
verify_mut_arr_ptrs (StgCompactNFData *str,
                     StgMutArrPtrs    *a)
{
    StgPtr p, q;

    p = (StgPtr)&a->payload[0];
    q = (StgPtr)&a->payload[a->ptrs];
    for (; p < q; p++) {
        check_object_in_compact(str, UNTAG_CLOSURE(*(StgClosure**)p));
    }

    return;
}
示例#9
0
文件: Sanity.c 项目: jweijers/ghc
static void 
checkClosureShallow( StgClosure* p )
{
    StgClosure *q;

    q = UNTAG_CLOSURE(p);
    ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));

    /* Is it a static closure? */
    if (!HEAP_ALLOCED(q)) {
	ASSERT(closure_STATIC(q));
    } else {
	ASSERT(!closure_STATIC(q));
    }
}
示例#10
0
文件: Stable.c 项目: alexbiehl/ghc
StgWord
lookupStableName (StgPtr p)
{
  StgWord sn;
  const void* sn_tmp;

  stableLock();

  if (stable_name_free == NULL) {
    enlargeStableNameTable();
  }

  /* removing indirections increases the likelihood
   * of finding a match in the stable name hash table.
   */
  p = (StgPtr)removeIndirections((StgClosure*)p);

  // register the untagged pointer.  This just makes things simpler.
  p = (StgPtr)UNTAG_CLOSURE((StgClosure*)p);

  sn_tmp = lookupHashTable(addrToStableHash,(W_)p);
  sn = (StgWord)sn_tmp;

  if (sn != 0) {
    ASSERT(stable_name_table[sn].addr == p);
    debugTrace(DEBUG_stable, "cached stable name %ld at %p",sn,p);
    stableUnlock();
    return sn;
  }

  sn = stable_name_free - stable_name_table;
  stable_name_free  = (snEntry*)(stable_name_free->addr);
  stable_name_table[sn].addr = p;
  stable_name_table[sn].sn_obj = NULL;
  /* debugTrace(DEBUG_stable, "new stable name %d at %p\n",sn,p); */

  /* add the new stable name to the hash table */
  insertHashTable(addrToStableHash, (W_)p, (void *)sn);

  stableUnlock();

  return sn;
}
示例#11
0
STATIC_INLINE StgPtr
thread_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
{
    StgPtr p;
    StgWord bitmap;
    StgFunInfoTable *fun_info;

    fun_info = FUN_INFO_PTR_TO_STRUCT(UNTAG_CLOSURE((StgClosure *)
                        get_threaded_info((StgPtr)fun)));
    ASSERT(fun_info->i.type != PAP);

    p = (StgPtr)payload;

    switch (fun_info->f.fun_type) {
    case ARG_GEN:
	bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
	goto small_bitmap;
    case ARG_GEN_BIG:
	thread_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
	p += size;
	break;
#ifdef ALLOW_INTERPRETER
    case ARG_BCO:
	thread_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
	p += size;
	break;
#endif // ALLOW_INTERPRETER
    default:
	bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
    small_bitmap:
	while (size > 0) {
	    if ((bitmap & 1) == 0) {
		thread((StgClosure **)p);
	    }
	    p++;
	    bitmap = bitmap >> 1;
	    size--;
	}
	break;
    }

    return p;
}
示例#12
0
文件: Sanity.c 项目: dmjio/ghc
/*
  Check the static objects list.
*/
void
checkStaticObjects ( StgClosure* static_objects )
{
  StgClosure *p = static_objects;
  StgInfoTable *info;

  while (p != END_OF_STATIC_LIST) {
    checkClosure(p);
    info = get_itbl(p);
    switch (info->type) {
    case IND_STATIC:
      { 
        StgClosure *indirectee = UNTAG_CLOSURE(((StgIndStatic *)p)->indirectee);

	ASSERT(LOOKS_LIKE_CLOSURE_PTR(indirectee));
	ASSERT(LOOKS_LIKE_INFO_PTR((StgWord)indirectee->header.info));
	p = *IND_STATIC_LINK((StgClosure *)p);
	break;
      }

    case THUNK_STATIC:
      p = *THUNK_STATIC_LINK((StgClosure *)p);
      break;

    case FUN_STATIC:
      p = *FUN_STATIC_LINK((StgClosure *)p);
      break;

    case CONSTR_STATIC:
      p = *STATIC_LINK(info,(StgClosure *)p);
      break;

    default:
      barf("checkStaticObjetcs: strange closure %p (%s)", p,
#ifndef HaLVM_TARGET_OS
          info_type(p)
#else    
          "[HaLVM has no info_type()]"
#endif
          );
    }
  }
}
示例#13
0
STATIC_INLINE void
thread (StgClosure **p)
{
    StgClosure *q0;
    StgPtr q;
    StgWord iptr;
    bdescr *bd;

    q0  = *p;
    q   = (StgPtr)UNTAG_CLOSURE(q0);

    // It doesn't look like a closure at the moment, because the info
    // ptr is possibly threaded:
    // ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
    
    if (HEAP_ALLOCED(q)) {
	bd = Bdescr(q); 
	// a handy way to discover whether the ptr is into the
	// compacted area of the old gen, is that the EVACUATED flag
	// is zero (it's non-zero for all the other areas of live
	// memory).
	if ((bd->flags & BF_EVACUATED) == 0)
        {
            iptr = *q;
            switch (GET_CLOSURE_TAG((StgClosure *)iptr))
            {
            case 0: 
                // this is the info pointer; we are creating a new chain.
                // save the original tag at the end of the chain.
                *p = (StgClosure *)((StgWord)iptr + GET_CLOSURE_TAG(q0));
                *q = (StgWord)p + 1;
                break;
            case 1:
            case 2:
                // this is a chain of length 1 or more
                *p = (StgClosure *)iptr;
                *q = (StgWord)p + 2;
                break;
            }
        }
    }
}
示例#14
0
文件: Sparks.c 项目: albertz/ghc
StgInt
newSpark (StgRegTable *reg, StgClosure *p)
{
    Capability *cap = regTableToCapability(reg);
    SparkPool *pool = cap->sparks;

    /* I am not sure whether this is the right thing to do.
     * Maybe it is better to exploit the tag information
     * instead of throwing it away?
     */
    p = UNTAG_CLOSURE(p);

    if (closure_SHOULD_SPARK(p)) {
        pushWSDeque(pool,p);
        cap->sparks_created++;
    } else {
        cap->sparks_dud++;
    }

    return 1;
}
示例#15
0
文件: Compact.c 项目: Lemmih/ghc
STATIC_INLINE void
thread (StgClosure **p)
{
    StgClosure *q0;
    StgPtr q;
    StgWord iptr;
    bdescr *bd;

    q0  = *p;
    q   = (StgPtr)UNTAG_CLOSURE(q0);

    // It doesn't look like a closure at the moment, because the info
    // ptr is possibly threaded:
    // ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
    
    if (HEAP_ALLOCED(q)) {
	bd = Bdescr(q); 

	if (bd->flags & BF_MARKED)
        {
            iptr = *q;
            switch (GET_CLOSURE_TAG((StgClosure *)iptr))
            {
            case 0: 
                // this is the info pointer; we are creating a new chain.
                // save the original tag at the end of the chain.
                *p = (StgClosure *)((StgWord)iptr + GET_CLOSURE_TAG(q0));
                *q = (StgWord)p + 1;
                break;
            case 1:
            case 2:
                // this is a chain of length 1 or more
                *p = (StgClosure *)iptr;
                *q = (StgWord)p + 2;
                break;
            }
        }
    }
}
示例#16
0
文件: Scav.c 项目: bogiebro/ghc
STATIC_INLINE GNUC_ATTR_HOT StgPtr
scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
{
    StgPtr p;
    StgWord bitmap;
    StgFunInfoTable *fun_info;
    
    fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
    ASSERT(fun_info->i.type != PAP);
    p = (StgPtr)payload;

    switch (fun_info->f.fun_type) {
    case ARG_GEN:
	bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
	goto small_bitmap;
    case ARG_GEN_BIG:
	scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
	p += size;
	break;
    case ARG_BCO:
	scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
	p += size;
	break;
    default:
	bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
    small_bitmap:
	while (size > 0) {
	    if ((bitmap & 1) == 0) {
		evacuate((StgClosure **)p);
	    }
	    p++;
	    bitmap = bitmap >> 1;
	    size--;
	}
	break;
    }
    return p;
}
示例#17
0
文件: Messages.c 项目: Eufavn/ghc
StgTSO * blackHoleOwner (StgClosure *bh)
{
    const StgInfoTable *info;
    StgClosure *p;

    info = bh->header.info;

    if (info != &stg_BLACKHOLE_info &&
        info != &stg_CAF_BLACKHOLE_info && 
        info != &__stg_EAGER_BLACKHOLE_info &&
        info != &stg_WHITEHOLE_info) {
        return NULL;
    }

    // The blackhole must indirect to a TSO, a BLOCKING_QUEUE, an IND,
    // or a value.
loop:
    // NB. VOLATILE_LOAD(), because otherwise gcc hoists the load
    // and turns this into an infinite loop.
    p = UNTAG_CLOSURE((StgClosure*)VOLATILE_LOAD(&((StgInd*)bh)->indirectee));
    info = p->header.info;

    if (info == &stg_IND_info) goto loop;

    else if (info == &stg_TSO_info)
    {
        return (StgTSO*)p;
    }
    else if (info == &stg_BLOCKING_QUEUE_CLEAN_info || 
             info == &stg_BLOCKING_QUEUE_DIRTY_info)
    {
        StgBlockingQueue *bq = (StgBlockingQueue *)p;
        return bq->owner;
    }
    
    return NULL; // not blocked
}
示例#18
0
static int
findPtrBlocks (StgPtr p, bdescr *bd, StgPtr arr[], int arr_size, int i)
{
    StgPtr q, r, end;
    for (; bd; bd = bd->link) {
        searched++;
        for (q = bd->start; q < bd->free; q++) {
            if (UNTAG_CLOSURE((StgClosure*)*q) == (StgClosure *)p) {
                if (i < arr_size) {
                    for (r = bd->start; r < bd->free; r = end) {
                        // skip over zeroed-out slop
                        while (*r == 0) r++;
                        if (!LOOKS_LIKE_CLOSURE_PTR(r)) {
                            debugBelch("%p found at %p, no closure at %p\n",
                                       p, q, r);
                            break;
                        }
                        end = r + closure_sizeW((StgClosure*)r);
                        if (q < end) {
                            debugBelch("%p = ", r);
                            printClosure((StgClosure *)r);
                            arr[i++] = r;
                            break;
                        }
                    }
                    if (r >= bd->free) {
                        debugBelch("%p found at %p, closure?", p, q);
                    }
                } else {
                    return i;
                }
            }
        }
    }
    return i;
}
示例#19
0
文件: Evac.c 项目: 23Skidoo/ghc
REGPARM1 GNUC_ATTR_HOT void
evacuate(StgClosure **p)
{
  bdescr *bd = NULL;
  nat gen_no;
  StgClosure *q;
  const StgInfoTable *info;
  StgWord tag;

  q = *p;

loop:
  /* The tag and the pointer are split, to be merged after evacing */
  tag = GET_CLOSURE_TAG(q);
  q = UNTAG_CLOSURE(q);

  ASSERTM(LOOKS_LIKE_CLOSURE_PTR(q), "invalid closure, info=%p", q->header.info);

  if (!HEAP_ALLOCED_GC(q)) {

      if (!major_gc) return;

      info = get_itbl(q);
      switch (info->type) {

      case THUNK_STATIC:
          if (info->srt_bitmap != 0) {
              evacuate_static_object(THUNK_STATIC_LINK((StgClosure *)q), q);
          }
          return;

      case FUN_STATIC:
          if (info->srt_bitmap != 0) {
              evacuate_static_object(FUN_STATIC_LINK((StgClosure *)q), q);
          }
          return;

      case IND_STATIC:
          /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
           * on the CAF list, so don't do anything with it here (we'll
           * scavenge it later).
           */
          evacuate_static_object(IND_STATIC_LINK((StgClosure *)q), q);
          return;

      case CONSTR_STATIC:
          evacuate_static_object(STATIC_LINK(info,(StgClosure *)q), q);
          return;

      case CONSTR_NOCAF_STATIC:
          /* no need to put these on the static linked list, they don't need
           * to be scavenged.
           */
          return;

      default:
          barf("evacuate(static): strange closure type %d", (int)(info->type));
      }
  }

  bd = Bdescr((P_)q);

  if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) {

      // pointer into to-space: just return it.  It might be a pointer
      // into a generation that we aren't collecting (> N), or it
      // might just be a pointer into to-space.  The latter doesn't
      // happen often, but allowing it makes certain things a bit
      // easier; e.g. scavenging an object is idempotent, so it's OK to
      // have an object on the mutable list multiple times.
      if (bd->flags & BF_EVACUATED) {
          // We aren't copying this object, so we have to check
          // whether it is already in the target generation.  (this is
          // the write barrier).
          if (bd->gen_no < gct->evac_gen_no) {
              gct->failed_to_evac = rtsTrue;
              TICK_GC_FAILED_PROMOTION();
          }
          return;
      }

      /* evacuate large objects by re-linking them onto a different list.
       */
      if (bd->flags & BF_LARGE) {
          evacuate_large((P_)q);
          return;
      }

      /* If the object is in a gen that we're compacting, then we
       * need to use an alternative evacuate procedure.
       */
      if (!is_marked((P_)q,bd)) {
          mark((P_)q,bd);
          push_mark_stack((P_)q);
      }
      return;
  }

  gen_no = bd->dest_no;

  info = q->header.info;
  if (IS_FORWARDING_PTR(info))
  {
    /* Already evacuated, just return the forwarding address.
     * HOWEVER: if the requested destination generation (gct->evac_gen) is
     * older than the actual generation (because the object was
     * already evacuated to a younger generation) then we have to
     * set the gct->failed_to_evac flag to indicate that we couldn't
     * manage to promote the object to the desired generation.
     */
    /*
     * Optimisation: the check is fairly expensive, but we can often
     * shortcut it if either the required generation is 0, or the
     * current object (the EVACUATED) is in a high enough generation.
     * We know that an EVACUATED always points to an object in the
     * same or an older generation.  gen is the lowest generation that the
     * current object would be evacuated to, so we only do the full
     * check if gen is too low.
     */
      StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
      *p = TAG_CLOSURE(tag,e);
      if (gen_no < gct->evac_gen_no) {  // optimisation
          if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) {
              gct->failed_to_evac = rtsTrue;
              TICK_GC_FAILED_PROMOTION();
          }
      }
      return;
  }

  switch (INFO_PTR_TO_STRUCT(info)->type) {

  case WHITEHOLE:
      goto loop;

  // For ints and chars of low value, save space by replacing references to
  //    these with closures with references to common, shared ones in the RTS.
  //
  // * Except when compiling into Windows DLLs which don't support cross-package
  //    data references very well.
  //
  case CONSTR_0_1:
  {
#if defined(COMPILING_WINDOWS_DLL)
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
#else
      StgWord w = (StgWord)q->payload[0];
      if (info == Czh_con_info &&
          // unsigned, so always true:  (StgChar)w >= MIN_CHARLIKE &&
          (StgChar)w <= MAX_CHARLIKE) {
          *p =  TAG_CLOSURE(tag,
                            (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
                           );
      }
      else if (info == Izh_con_info &&
          (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
          *p = TAG_CLOSURE(tag,
                             (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
                             );
      }
      else {
          copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
      }
#endif
      return;
  }

  case FUN_0_1:
  case FUN_1_0:
  case CONSTR_1_0:
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
      return;

  case THUNK_1_0:
  case THUNK_0_1:
      copy(p,info,q,sizeofW(StgThunk)+1,gen_no);
      return;

  case THUNK_1_1:
  case THUNK_2_0:
  case THUNK_0_2:
#ifdef NO_PROMOTE_THUNKS
#error bitrotted
#endif
    copy(p,info,q,sizeofW(StgThunk)+2,gen_no);
    return;

  case FUN_1_1:
  case FUN_2_0:
  case FUN_0_2:
  case CONSTR_1_1:
  case CONSTR_2_0:
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
      return;

  case CONSTR_0_2:
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
      return;

  case THUNK:
      copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
      return;

  case FUN:
  case CONSTR:
      copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no,tag);
      return;

  case BLACKHOLE:
  {
      StgClosure *r;
      const StgInfoTable *i;
      r = ((StgInd*)q)->indirectee;
      if (GET_CLOSURE_TAG(r) == 0) {
          i = r->header.info;
          if (IS_FORWARDING_PTR(i)) {
              r = (StgClosure *)UN_FORWARDING_PTR(i);
              i = r->header.info;
          }
          if (i == &stg_TSO_info
              || i == &stg_WHITEHOLE_info
              || i == &stg_BLOCKING_QUEUE_CLEAN_info
              || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
              copy(p,info,q,sizeofW(StgInd),gen_no);
              return;
          }
          ASSERT(i != &stg_IND_info);
      }
      q = r;
      *p = r;
      goto loop;
  }

  case MUT_VAR_CLEAN:
  case MUT_VAR_DIRTY:
  case MVAR_CLEAN:
  case MVAR_DIRTY:
  case TVAR:
  case BLOCKING_QUEUE:
  case WEAK:
  case PRIM:
  case MUT_PRIM:
      copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
      return;

  case BCO:
      copy(p,info,q,bco_sizeW((StgBCO *)q),gen_no);
      return;

  case THUNK_SELECTOR:
      eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
      return;

  case IND:
    // follow chains of indirections, don't evacuate them
    q = ((StgInd*)q)->indirectee;
    *p = q;
    goto loop;

  case RET_BCO:
  case RET_SMALL:
  case RET_BIG:
  case UPDATE_FRAME:
  case UNDERFLOW_FRAME:
  case STOP_FRAME:
  case CATCH_FRAME:
  case CATCH_STM_FRAME:
  case CATCH_RETRY_FRAME:
  case ATOMICALLY_FRAME:
    // shouldn't see these
    barf("evacuate: stack frame at %p\n", q);

  case PAP:
      copy(p,info,q,pap_sizeW((StgPAP*)q),gen_no);
      return;

  case AP:
      copy(p,info,q,ap_sizeW((StgAP*)q),gen_no);
      return;

  case AP_STACK:
      copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen_no);
      return;

  case ARR_WORDS:
      // just copy the block
      copy(p,info,q,arr_words_sizeW((StgArrBytes *)q),gen_no);
      return;

  case MUT_ARR_PTRS_CLEAN:
  case MUT_ARR_PTRS_DIRTY:
  case MUT_ARR_PTRS_FROZEN:
  case MUT_ARR_PTRS_FROZEN0:
      // just copy the block
      copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no);
      return;

  case SMALL_MUT_ARR_PTRS_CLEAN:
  case SMALL_MUT_ARR_PTRS_DIRTY:
  case SMALL_MUT_ARR_PTRS_FROZEN:
  case SMALL_MUT_ARR_PTRS_FROZEN0:
      // just copy the block
      copy(p,info,q,small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)q),gen_no);
      return;

  case TSO:
      copy(p,info,q,sizeofW(StgTSO),gen_no);
      return;

  case STACK:
    {
      StgStack *stack = (StgStack *)q;

      /* To evacuate a small STACK, we need to adjust the stack pointer
       */
      {
          StgStack *new_stack;
          StgPtr r, s;
          rtsBool mine;

          mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack),
                          sizeofW(StgStack), gen_no);
          if (mine) {
              new_stack = (StgStack *)*p;
              move_STACK(stack, new_stack);
              for (r = stack->sp, s = new_stack->sp;
                   r < stack->stack + stack->stack_size;) {
                  *s++ = *r++;
              }
          }
          return;
      }
    }

  case TREC_CHUNK:
      copy(p,info,q,sizeofW(StgTRecChunk),gen_no);
      return;

  default:
    barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
  }

  barf("evacuate");
}
示例#20
0
文件: GCAux.c 项目: chansuke/ghc
StgClosure *
isAlive(StgClosure *p)
{
  const StgInfoTable *info;
  bdescr *bd;
  StgWord tag;
  StgClosure *q;

  while (1) {
    /* The tag and the pointer are split, to be merged later when needed. */
    tag = GET_CLOSURE_TAG(p);
    q = UNTAG_CLOSURE(p);

    ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));

    // ignore static closures 
    //
    // ToDo: This means we never look through IND_STATIC, which means
    // isRetainer needs to handle the IND_STATIC case rather than
    // raising an error.
    //
    // ToDo: for static closures, check the static link field.
    // Problem here is that we sometimes don't set the link field, eg.
    // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
    //
    if (!HEAP_ALLOCED_GC(q)) {
	return p;
    }

    // ignore closures in generations that we're not collecting. 
    bd = Bdescr((P_)q);

    // if it's a pointer into to-space, then we're done
    if (bd->flags & BF_EVACUATED) {
	return p;
    }

    // large objects use the evacuated flag
    if (bd->flags & BF_LARGE) {
        return NULL;
    }

    // check the mark bit for compacted steps
    if ((bd->flags & BF_MARKED) && is_marked((P_)q,bd)) {
	return p;
    }

    info = q->header.info;

    if (IS_FORWARDING_PTR(info)) {
        // alive! 
        return TAG_CLOSURE(tag,(StgClosure*)UN_FORWARDING_PTR(info));
    }

    info = INFO_PTR_TO_STRUCT(info);

    switch (info->type) {

    case IND:
    case IND_STATIC:
    case IND_PERM:
      // follow indirections 
      p = ((StgInd *)q)->indirectee;
      continue;

    case BLACKHOLE:
        p = ((StgInd*)q)->indirectee;
        if (GET_CLOSURE_TAG(p) != 0) {
            continue;
        } else {
            return NULL;
        }

    default:
      // dead. 
      return NULL;
    }
  }
}
示例#21
0
文件: CNF.c 项目: goldfirere/ghc
static void
verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block)
{
    bdescr *bd;
    StgPtr p;
    const StgInfoTable *info;
    StgClosure *q;

    p = (P_)firstBlockGetCompact(block);
    bd = Bdescr((P_)block);
    while (p < bd->free) {
        q = (StgClosure*)p;

        ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));

        info = get_itbl(q);
        switch (info->type) {
        case CONSTR_1_0:
            check_object_in_compact(str, UNTAG_CLOSURE(q->payload[0]));
            /* fallthrough */
        case CONSTR_0_1:
            p += sizeofW(StgClosure) + 1;
            break;

        case CONSTR_2_0:
            check_object_in_compact(str, UNTAG_CLOSURE(q->payload[1]));
            /* fallthrough */
        case CONSTR_1_1:
            check_object_in_compact(str, UNTAG_CLOSURE(q->payload[0]));
            /* fallthrough */
        case CONSTR_0_2:
            p += sizeofW(StgClosure) + 2;
            break;

        case CONSTR:
        case PRIM:
        case CONSTR_NOCAF:
        {
            uint32_t i;

            for (i = 0; i < info->layout.payload.ptrs; i++) {
                check_object_in_compact(str, UNTAG_CLOSURE(q->payload[i]));
            }
            p += sizeofW(StgClosure) + info->layout.payload.ptrs +
                info->layout.payload.nptrs;
            break;
        }

        case ARR_WORDS:
            p += arr_words_sizeW((StgArrBytes*)p);
            break;

        case MUT_ARR_PTRS_FROZEN_CLEAN:
        case MUT_ARR_PTRS_FROZEN_DIRTY:
            verify_mut_arr_ptrs(str, (StgMutArrPtrs*)p);
            p += mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
            break;

        case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
        case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
        {
            uint32_t i;
            StgSmallMutArrPtrs *arr = (StgSmallMutArrPtrs*)p;

            for (i = 0; i < arr->ptrs; i++)
                check_object_in_compact(str, UNTAG_CLOSURE(arr->payload[i]));

            p += sizeofW(StgSmallMutArrPtrs) + arr->ptrs;
            break;
        }

        case COMPACT_NFDATA:
            p += sizeofW(StgCompactNFData);
            break;

        default:
            barf("verify_consistency_block");
        }
    }

    return;
}
示例#22
0
文件: Sanity.c 项目: jweijers/ghc
StgOffset 
checkClosure( StgClosure* p )
{
    const StgInfoTable *info;

    ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));

    p = UNTAG_CLOSURE(p);
    /* Is it a static closure (i.e. in the data segment)? */
    if (!HEAP_ALLOCED(p)) {
	ASSERT(closure_STATIC(p));
    } else {
	ASSERT(!closure_STATIC(p));
    }

    info = p->header.info;

    if (IS_FORWARDING_PTR(info)) {
        barf("checkClosure: found EVACUATED closure %d", info->type);
    }
    info = INFO_PTR_TO_STRUCT(info);

    switch (info->type) {

    case MVAR_CLEAN:
    case MVAR_DIRTY:
      { 
	StgMVar *mvar = (StgMVar *)p;
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->tail));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->value));
	return sizeofW(StgMVar);
      }

    case THUNK:
    case THUNK_1_0:
    case THUNK_0_1:
    case THUNK_1_1:
    case THUNK_0_2:
    case THUNK_2_0:
      {
	nat i;
	for (i = 0; i < info->layout.payload.ptrs; i++) {
	  ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgThunk *)p)->payload[i]));
	}
	return thunk_sizeW_fromITBL(info);
      }

    case FUN:
    case FUN_1_0:
    case FUN_0_1:
    case FUN_1_1:
    case FUN_0_2:
    case FUN_2_0:
    case CONSTR:
    case CONSTR_1_0:
    case CONSTR_0_1:
    case CONSTR_1_1:
    case CONSTR_0_2:
    case CONSTR_2_0:
    case IND_PERM:
    case BLACKHOLE:
    case PRIM:
    case MUT_PRIM:
    case MUT_VAR_CLEAN:
    case MUT_VAR_DIRTY:
    case CONSTR_STATIC:
    case CONSTR_NOCAF_STATIC:
    case THUNK_STATIC:
    case FUN_STATIC:
	{
	    nat i;
	    for (i = 0; i < info->layout.payload.ptrs; i++) {
		ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->payload[i]));
	    }
	    return sizeW_fromITBL(info);
	}

    case BLOCKING_QUEUE:
    {
        StgBlockingQueue *bq = (StgBlockingQueue *)p;

        // NO: the BH might have been updated now
        // ASSERT(get_itbl(bq->bh)->type == BLACKHOLE);
        ASSERT(LOOKS_LIKE_CLOSURE_PTR(bq->bh));

        ASSERT(get_itbl((StgClosure *)(bq->owner))->type == TSO);
        ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE 
               || bq->queue->header.info == &stg_MSG_BLACKHOLE_info);
        ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE || 
               get_itbl((StgClosure *)(bq->link))->type == IND ||
               get_itbl((StgClosure *)(bq->link))->type == BLOCKING_QUEUE);

        return sizeofW(StgBlockingQueue);
    }

    case BCO: {
	StgBCO *bco = (StgBCO *)p;
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->instrs));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->literals));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(bco->ptrs));
	return bco_sizeW(bco);
    }

    case IND_STATIC: /* (1, 0) closure */
      ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgIndStatic*)p)->indirectee));
      return sizeW_fromITBL(info);

    case WEAK:
      /* deal with these specially - the info table isn't
       * representative of the actual layout.
       */
      { StgWeak *w = (StgWeak *)p;
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->key));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->value));
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->finalizer));
	if (w->link) {
	  ASSERT(LOOKS_LIKE_CLOSURE_PTR(w->link));
	}
	return sizeW_fromITBL(info);
      }

    case THUNK_SELECTOR:
	    ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
	    return THUNK_SELECTOR_sizeW();

    case IND:
	{ 
  	    /* we don't expect to see any of these after GC
	     * but they might appear during execution
	     */
	    StgInd *ind = (StgInd *)p;
	    ASSERT(LOOKS_LIKE_CLOSURE_PTR(ind->indirectee));
	    return sizeofW(StgInd);
	}

    case RET_BCO:
    case RET_SMALL:
    case RET_BIG:
    case RET_DYN:
    case UPDATE_FRAME:
    case UNDERFLOW_FRAME:
    case STOP_FRAME:
    case CATCH_FRAME:
    case ATOMICALLY_FRAME:
    case CATCH_RETRY_FRAME:
    case CATCH_STM_FRAME:
	    barf("checkClosure: stack frame");

    case AP:
    {
	StgAP* ap = (StgAP *)p;
	checkPAP (ap->fun, ap->payload, ap->n_args);
	return ap_sizeW(ap);
    }

    case PAP:
    {
	StgPAP* pap = (StgPAP *)p;
	checkPAP (pap->fun, pap->payload, pap->n_args);
	return pap_sizeW(pap);
    }

    case AP_STACK:
    { 
	StgAP_STACK *ap = (StgAP_STACK *)p;
	ASSERT(LOOKS_LIKE_CLOSURE_PTR(ap->fun));
	checkStackChunk((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
	return ap_stack_sizeW(ap);
    }

    case ARR_WORDS:
	    return arr_words_sizeW((StgArrWords *)p);

    case MUT_ARR_PTRS_CLEAN:
    case MUT_ARR_PTRS_DIRTY:
    case MUT_ARR_PTRS_FROZEN:
    case MUT_ARR_PTRS_FROZEN0:
	{
	    StgMutArrPtrs* a = (StgMutArrPtrs *)p;
	    nat i;
	    for (i = 0; i < a->ptrs; i++) {
		ASSERT(LOOKS_LIKE_CLOSURE_PTR(a->payload[i]));
	    }
	    return mut_arr_ptrs_sizeW(a);
	}

    case TSO:
        checkTSO((StgTSO *)p);
        return sizeofW(StgTSO);

    case STACK:
        checkSTACK((StgStack*)p);
        return stack_sizeW((StgStack*)p);

    case TREC_CHUNK:
      {
        nat i;
        StgTRecChunk *tc = (StgTRecChunk *)p;
        ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->prev_chunk));
        for (i = 0; i < tc -> next_entry_idx; i ++) {
          ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].tvar));
          ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].expected_value));
          ASSERT(LOOKS_LIKE_CLOSURE_PTR(tc->entries[i].new_value));
        }
        return sizeofW(StgTRecChunk);
      }
      
    default:
	    barf("checkClosure (closure type %d)", info->type);
    }
}
示例#23
0
文件: Sanity.c 项目: jweijers/ghc
// check an individual stack object
StgOffset 
checkStackFrame( StgPtr c )
{
    nat size;
    const StgRetInfoTable* info;

    info = get_ret_itbl((StgClosure *)c);

    /* All activation records have 'bitmap' style layout info. */
    switch (info->i.type) {
    case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
    {
	StgWord dyn;
	StgPtr p;
	StgRetDyn* r;
	
	r = (StgRetDyn *)c;
	dyn = r->liveness;
	
	p = (P_)(r->payload);
	checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
	p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;

	// skip over the non-pointers
	p += RET_DYN_NONPTRS(dyn);
	
	// follow the ptr words
	for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
	    checkClosureShallow((StgClosure *)*p);
	    p++;
	}
	
	return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
	    RET_DYN_NONPTR_REGS_SIZE +
	    RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
    }

    case UPDATE_FRAME:
      ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
    case ATOMICALLY_FRAME:
    case CATCH_RETRY_FRAME:
    case CATCH_STM_FRAME:
    case CATCH_FRAME:
      // small bitmap cases (<= 32 entries)
    case UNDERFLOW_FRAME:
    case STOP_FRAME:
    case RET_SMALL:
	size = BITMAP_SIZE(info->i.layout.bitmap);
	checkSmallBitmap((StgPtr)c + 1, 
			 BITMAP_BITS(info->i.layout.bitmap), size);
	return 1 + size;

    case RET_BCO: {
	StgBCO *bco;
	nat size;
	bco = (StgBCO *)*(c+1);
	size = BCO_BITMAP_SIZE(bco);
	checkLargeBitmap((StgPtr)c + 2, BCO_BITMAP(bco), size);
	return 2 + size;
    }

    case RET_BIG: // large bitmap (> 32 entries)
	size = GET_LARGE_BITMAP(&info->i)->size;
	checkLargeBitmap((StgPtr)c + 1, GET_LARGE_BITMAP(&info->i), size);
	return 1 + size;

    case RET_FUN:
    {
	StgFunInfoTable *fun_info;
	StgRetFun *ret_fun;

	ret_fun = (StgRetFun *)c;
	fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
	size = ret_fun->size;
	switch (fun_info->f.fun_type) {
	case ARG_GEN:
	    checkSmallBitmap((StgPtr)ret_fun->payload, 
			     BITMAP_BITS(fun_info->f.b.bitmap), size);
	    break;
	case ARG_GEN_BIG:
	    checkLargeBitmap((StgPtr)ret_fun->payload,
			     GET_FUN_LARGE_BITMAP(fun_info), size);
	    break;
	default:
	    checkSmallBitmap((StgPtr)ret_fun->payload,
			     BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]),
			     size);
	    break;
	}
	return sizeofW(StgRetFun) + size;
    }

    default:
	barf("checkStackFrame: weird activation record found on stack (%p %d).",c,info->i.type);
    }
}
示例#24
0
void
printClosure( StgClosure *obj )
{
    obj = UNTAG_CLOSURE(obj);

    StgInfoTable *info;
    info = get_itbl(obj);

    switch ( info->type ) {
    case INVALID_OBJECT:
            barf("Invalid object");

    case CONSTR:
    case CONSTR_1_0: case CONSTR_0_1:
    case CONSTR_1_1: case CONSTR_0_2: case CONSTR_2_0:
    case CONSTR_STATIC:
    case CONSTR_NOCAF_STATIC:
        {
            StgWord i, j;
            StgConInfoTable *con_info = get_con_itbl (obj);

            debugBelch("%s(", GET_CON_DESC(con_info));
            for (i = 0; i < info->layout.payload.ptrs; ++i) {
		if (i != 0) debugBelch(", ");
                printPtr((StgPtr)obj->payload[i]);
            }
            for (j = 0; j < info->layout.payload.nptrs; ++j) {
		if (i != 0 || j != 0) debugBelch(", ");
                debugBelch("%p#", obj->payload[i+j]);
            }
            debugBelch(")\n");
            break;
        }

    case FUN:
    case FUN_1_0: case FUN_0_1: 
    case FUN_1_1: case FUN_0_2: case FUN_2_0:
    case FUN_STATIC:
	debugBelch("FUN/%d(",(int)itbl_to_fun_itbl(info)->f.arity);
	printPtr((StgPtr)obj->header.info);
#ifdef PROFILING
	debugBelch(", %s", obj->header.prof.ccs->cc->label);
#endif
	printStdObjPayload(obj);
	break;

    case PRIM:
	debugBelch("PRIM(");
	printPtr((StgPtr)obj->header.info);
	printStdObjPayload(obj);
	break;

    case THUNK:
    case THUNK_1_0: case THUNK_0_1:
    case THUNK_1_1: case THUNK_0_2: case THUNK_2_0:
    case THUNK_STATIC:
            /* ToDo: will this work for THUNK_STATIC too? */
#ifdef PROFILING
            printThunkObject((StgThunk *)obj,GET_PROF_DESC(info));
#else
            printThunkObject((StgThunk *)obj,"THUNK");
#endif
            break;

    case THUNK_SELECTOR:
	printStdObjHdr(obj, "THUNK_SELECTOR");
	debugBelch(", %p)\n", ((StgSelector *)obj)->selectee);
	break;

    case BCO:
            disassemble( (StgBCO*)obj );
            break;

    case AP:
        {
	    StgAP* ap = (StgAP*)obj;
            StgWord i;
            debugBelch("AP("); printPtr((StgPtr)ap->fun);
            for (i = 0; i < ap->n_args; ++i) {
                debugBelch(", ");
                printPtr((P_)ap->payload[i]);
            }
            debugBelch(")\n");
            break;
        }

    case PAP:
        {
	    StgPAP* pap = (StgPAP*)obj;
            StgWord i;
            debugBelch("PAP/%d(",(int)pap->arity); 
	    printPtr((StgPtr)pap->fun);
            for (i = 0; i < pap->n_args; ++i) {
                debugBelch(", ");
                printPtr((StgPtr)pap->payload[i]);
            }
            debugBelch(")\n");
            break;
        }

    case AP_STACK:
        {
	    StgAP_STACK* ap = (StgAP_STACK*)obj;
            StgWord i;
            debugBelch("AP_STACK("); printPtr((StgPtr)ap->fun);
            for (i = 0; i < ap->size; ++i) {
                debugBelch(", ");
                printPtr((P_)ap->payload[i]);
            }
            debugBelch(")\n");
            break;
        }

    case IND:
            debugBelch("IND("); 
            printPtr((StgPtr)((StgInd*)obj)->indirectee);
            debugBelch(")\n"); 
            break;

    case IND_PERM:
            debugBelch("IND("); 
            printPtr((StgPtr)((StgInd*)obj)->indirectee);
            debugBelch(")\n"); 
            break;

    case IND_STATIC:
            debugBelch("IND_STATIC("); 
            printPtr((StgPtr)((StgInd*)obj)->indirectee);
            debugBelch(")\n"); 
            break;

    case BLACKHOLE:
            debugBelch("BLACKHOLE("); 
            printPtr((StgPtr)((StgInd*)obj)->indirectee);
            debugBelch(")\n"); 
            break;

    /* Cannot happen -- use default case.
    case RET_BCO:
    case RET_SMALL:
    case RET_BIG:
    case RET_DYN:
    case RET_FUN:
    */

    case UPDATE_FRAME:
        {
            StgUpdateFrame* u = (StgUpdateFrame*)obj;
            debugBelch("UPDATE_FRAME(");
            printPtr((StgPtr)GET_INFO(u));
            debugBelch(",");
            printPtr((StgPtr)u->updatee);
            debugBelch(")\n"); 
            break;
        }

    case CATCH_FRAME:
        {
            StgCatchFrame* u = (StgCatchFrame*)obj;
            debugBelch("CATCH_FRAME(");
            printPtr((StgPtr)GET_INFO(u));
            debugBelch(",");
            printPtr((StgPtr)u->handler);
            debugBelch(")\n"); 
            break;
        }

    case UNDERFLOW_FRAME:
        {
            StgUnderflowFrame* u = (StgUnderflowFrame*)obj;
            debugBelch("UNDERFLOW_FRAME(");
            printPtr((StgPtr)u->next_chunk);
            debugBelch(")\n"); 
            break;
        }

    case STOP_FRAME:
        {
            StgStopFrame* u = (StgStopFrame*)obj;
            debugBelch("STOP_FRAME(");
            printPtr((StgPtr)GET_INFO(u));
            debugBelch(")\n"); 
            break;
        }

    case ARR_WORDS:
        {
            StgWord i;
            debugBelch("ARR_WORDS(\"");
	    for (i=0; i<arr_words_words((StgArrWords *)obj); i++)
	      debugBelch("%" FMT_Word, (W_)((StgArrWords *)obj)->payload[i]);
            debugBelch("\")\n");
            break;
        }

    case MUT_ARR_PTRS_CLEAN:
	debugBelch("MUT_ARR_PTRS_CLEAN(size=%" FMT_Word ")\n", (W_)((StgMutArrPtrs *)obj)->ptrs);
	break;

    case MUT_ARR_PTRS_DIRTY:
	debugBelch("MUT_ARR_PTRS_DIRTY(size=%" FMT_Word ")\n", (W_)((StgMutArrPtrs *)obj)->ptrs);
	break;

    case MUT_ARR_PTRS_FROZEN:
	debugBelch("MUT_ARR_PTRS_FROZEN(size=%" FMT_Word ")\n", (W_)((StgMutArrPtrs *)obj)->ptrs);
	break;

    case MVAR_CLEAN:
    case MVAR_DIRTY:
        {
	  StgMVar* mv = (StgMVar*)obj;
	  debugBelch("MVAR(head=%p, tail=%p, value=%p)\n", mv->head, mv->tail, mv->value);
          break;
        }

    case MUT_VAR_CLEAN:
        {
	  StgMutVar* mv = (StgMutVar*)obj;
	  debugBelch("MUT_VAR_CLEAN(var=%p)\n", mv->var);
          break;
        }

    case MUT_VAR_DIRTY:
        {
	  StgMutVar* mv = (StgMutVar*)obj;
	  debugBelch("MUT_VAR_DIRTY(var=%p)\n", mv->var);
          break;
        }

    case WEAK:
            debugBelch("WEAK("); 
	    debugBelch(" key=%p value=%p finalizer=%p", 
		    (StgPtr)(((StgWeak*)obj)->key),
		    (StgPtr)(((StgWeak*)obj)->value),
		    (StgPtr)(((StgWeak*)obj)->finalizer));
            debugBelch(")\n"); 
	    /* ToDo: chase 'link' ? */
            break;

    case TSO:
      debugBelch("TSO("); 
      debugBelch("%lu (%p)",(unsigned long)(((StgTSO*)obj)->id), (StgTSO*)obj);
      debugBelch(")\n"); 
      break;

#if 0
      /* Symptomatic of a problem elsewhere, have it fall-through & fail */
    case EVACUATED:
      debugBelch("EVACUATED("); 
      printClosure((StgEvacuated*)obj->evacuee);
      debugBelch(")\n"); 
      break;
#endif

    default:
            //barf("printClosure %d",get_itbl(obj)->type);
            debugBelch("*** printClosure: unknown type %d ****\n",
                    (int)get_itbl(obj)->type );
            barf("printClosure %d",get_itbl(obj)->type);
            return;
    }
}
示例#25
0
文件: Messages.c 项目: Eufavn/ghc
nat messageBlackHole(Capability *cap, MessageBlackHole *msg)
{
    const StgInfoTable *info;
    StgClosure *p;
    StgBlockingQueue *bq;
    StgClosure *bh = UNTAG_CLOSURE(msg->bh);
    StgTSO *owner;

    debugTraceCap(DEBUG_sched, cap, "message: thread %d blocking on blackhole %p", 
                  (lnat)msg->tso->id, msg->bh);

    info = bh->header.info;

    // If we got this message in our inbox, it might be that the
    // BLACKHOLE has already been updated, and GC has shorted out the
    // indirection, so the pointer no longer points to a BLACKHOLE at
    // all.
    if (info != &stg_BLACKHOLE_info && 
        info != &stg_CAF_BLACKHOLE_info && 
        info != &__stg_EAGER_BLACKHOLE_info &&
        info != &stg_WHITEHOLE_info) {
        // if it is a WHITEHOLE, then a thread is in the process of
        // trying to BLACKHOLE it.  But we know that it was once a
        // BLACKHOLE, so there is at least a valid pointer in the
        // payload, so we can carry on.
        return 0;
    }

    // The blackhole must indirect to a TSO, a BLOCKING_QUEUE, an IND,
    // or a value.
loop:
    // NB. VOLATILE_LOAD(), because otherwise gcc hoists the load
    // and turns this into an infinite loop.
    p = UNTAG_CLOSURE((StgClosure*)VOLATILE_LOAD(&((StgInd*)bh)->indirectee));
    info = p->header.info;

    if (info == &stg_IND_info)
    {
        // This could happen, if e.g. we got a BLOCKING_QUEUE that has
        // just been replaced with an IND by another thread in
        // updateThunk().  In which case, if we read the indirectee
        // again we should get the value.
        goto loop;
    }

    else if (info == &stg_TSO_info)
    {
        owner = (StgTSO*)p;

#ifdef THREADED_RTS
        if (owner->cap != cap) {
            sendMessage(cap, owner->cap, (Message*)msg);
            debugTraceCap(DEBUG_sched, cap, "forwarding message to cap %d", owner->cap->no);
            return 1;
        }
#endif
        // owner is the owner of the BLACKHOLE, and resides on this
        // Capability.  msg->tso is the first thread to block on this
        // BLACKHOLE, so we first create a BLOCKING_QUEUE object.

        bq = (StgBlockingQueue*)allocate(cap, sizeofW(StgBlockingQueue));
            
        // initialise the BLOCKING_QUEUE object
        SET_HDR(bq, &stg_BLOCKING_QUEUE_DIRTY_info, CCS_SYSTEM);
        bq->bh = bh;
        bq->queue = msg;
        bq->owner = owner;
        
        msg->link = (MessageBlackHole*)END_TSO_QUEUE;
        
        // All BLOCKING_QUEUES are linked in a list on owner->bq, so
        // that we can search through them in the event that there is
        // a collision to update a BLACKHOLE and a BLOCKING_QUEUE
        // becomes orphaned (see updateThunk()).
        bq->link = owner->bq;
        owner->bq = bq;
        dirty_TSO(cap, owner); // we modified owner->bq

        // If the owner of the blackhole is currently runnable, then
        // bump it to the front of the run queue.  This gives the
        // blocked-on thread a little boost which should help unblock
        // this thread, and may avoid a pile-up of other threads
        // becoming blocked on the same BLACKHOLE (#3838).
        //
        // NB. we check to make sure that the owner is not the same as
        // the current thread, since in that case it will not be on
        // the run queue.
        if (owner->why_blocked == NotBlocked && owner->id != msg->tso->id) {
            removeFromRunQueue(cap, owner);
            pushOnRunQueue(cap,owner);
        }

        // point to the BLOCKING_QUEUE from the BLACKHOLE
        write_barrier(); // make the BQ visible
        ((StgInd*)bh)->indirectee = (StgClosure *)bq;
        recordClosureMutated(cap,bh); // bh was mutated

        debugTraceCap(DEBUG_sched, cap, "thread %d blocked on thread %d", 
                      (lnat)msg->tso->id, (lnat)owner->id);

        return 1; // blocked
    }
    else if (info == &stg_BLOCKING_QUEUE_CLEAN_info || 
             info == &stg_BLOCKING_QUEUE_DIRTY_info)
    {
        StgBlockingQueue *bq = (StgBlockingQueue *)p;

        ASSERT(bq->bh == bh);

        owner = bq->owner;

        ASSERT(owner != END_TSO_QUEUE);

#ifdef THREADED_RTS
        if (owner->cap != cap) {
            sendMessage(cap, owner->cap, (Message*)msg);
            debugTraceCap(DEBUG_sched, cap, "forwarding message to cap %d", owner->cap->no);
            return 1;
        }
#endif

        msg->link = bq->queue;
        bq->queue = msg;
        recordClosureMutated(cap,(StgClosure*)msg);

        if (info == &stg_BLOCKING_QUEUE_CLEAN_info) {
            bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
            recordClosureMutated(cap,(StgClosure*)bq);
        }

        debugTraceCap(DEBUG_sched, cap, "thread %d blocked on thread %d", 
                      (lnat)msg->tso->id, (lnat)owner->id);

        // See above, #3838
        if (owner->why_blocked == NotBlocked && owner->id != msg->tso->id) {
            removeFromRunQueue(cap, owner);
            pushOnRunQueue(cap,owner);
        }

        return 1; // blocked
    }
    
    return 0; // not blocked
}
示例#26
0
文件: Interpreter.c 项目: A1kmm/ghc
Capability *
interpretBCO (Capability* cap)
{
    // Use of register here is primarily to make it clear to compilers
    // that these entities are non-aliasable.
    register StgPtr       Sp;    // local state -- stack pointer
    register StgPtr       SpLim; // local state -- stack lim pointer
    register StgClosure   *tagged_obj = 0, *obj;
    nat n, m;

    LOAD_STACK_POINTERS;

    cap->r.rHpLim = (P_)1; // HpLim is the context-switch flag; when it
                           // goes to zero we must return to the scheduler.

    // ------------------------------------------------------------------------
    // Case 1:
    // 
    //       We have a closure to evaluate.  Stack looks like:
    //       
    //      	|   XXXX_info   |
    //      	+---------------+
    //       Sp |      -------------------> closure
    //      	+---------------+
    //       
    if (Sp[0] == (W_)&stg_enter_info) {
       Sp++;
       goto eval;
    }

    // ------------------------------------------------------------------------
    // Case 2:
    // 
    //       We have a BCO application to perform.  Stack looks like:
    //
    //      	|     ....      |
    //      	+---------------+
    //      	|     arg1      |
    //      	+---------------+
    //      	|     BCO       |
    //      	+---------------+
    //       Sp |   RET_BCO     |
    //      	+---------------+
    //       
    else if (Sp[0] == (W_)&stg_apply_interp_info) {
	obj = UNTAG_CLOSURE((StgClosure *)Sp[1]);
	Sp += 2;
	goto run_BCO_fun;
    }

    // ------------------------------------------------------------------------
    // Case 3:
    //
    //       We have an unboxed value to return.  See comment before
    //       do_return_unboxed, below.
    //
    else {
	goto do_return_unboxed;
    }

    // Evaluate the object on top of the stack.
eval:
    tagged_obj = (StgClosure*)Sp[0]; Sp++;

eval_obj:
    obj = UNTAG_CLOSURE(tagged_obj);
    INTERP_TICK(it_total_evals);

    IF_DEBUG(interpreter,
             debugBelch(
             "\n---------------------------------------------------------------\n");
             debugBelch("Evaluating: "); printObj(obj);
             debugBelch("Sp = %p\n", Sp);
             debugBelch("\n" );

             printStackChunk(Sp,cap->r.rCurrentTSO->stackobj->stack+cap->r.rCurrentTSO->stackobj->stack_size);
             debugBelch("\n\n");
            );
示例#27
0
文件: Evac.c 项目: 23Skidoo/ghc
static void
eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
                 // NB. for legacy reasons, p & q are swapped around :(
{
    nat field;
    StgInfoTable *info;
    StgWord info_ptr;
    StgClosure *selectee;
    StgSelector *prev_thunk_selector;
    bdescr *bd;
    StgClosure *val;

    prev_thunk_selector = NULL;
    // this is a chain of THUNK_SELECTORs that we are going to update
    // to point to the value of the current THUNK_SELECTOR.  Each
    // closure on the chain is a WHITEHOLE, and points to the next in the
    // chain with payload[0].

selector_chain:

    bd = Bdescr((StgPtr)p);
    if (HEAP_ALLOCED_GC(p)) {
        // If the THUNK_SELECTOR is in to-space or in a generation that we
        // are not collecting, then bale out early.  We won't be able to
        // save any space in any case, and updating with an indirection is
        // trickier in a non-collected gen: we would have to update the
        // mutable list.
        if (bd->flags & BF_EVACUATED) {
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            *q = (StgClosure *)p;
            // shortcut, behave as for:  if (evac) evacuate(q);
            if (evac && bd->gen_no < gct->evac_gen_no) {
                gct->failed_to_evac = rtsTrue;
                TICK_GC_FAILED_PROMOTION();
            }
            return;
        }
        // we don't update THUNK_SELECTORS in the compacted
        // generation, because compaction does not remove the INDs
        // that result, this causes confusion later
        // (scavenge_mark_stack doesn't deal with IND).  BEWARE!  This
        // bit is very tricky to get right.  If you make changes
        // around here, test by compiling stage 3 with +RTS -c -RTS.
        if (bd->flags & BF_MARKED) {
            // must call evacuate() to mark this closure if evac==rtsTrue
            *q = (StgClosure *)p;
            if (evac) evacuate(q);
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            return;
        }
    }


    // WHITEHOLE the selector thunk, since it is now under evaluation.
    // This is important to stop us going into an infinite loop if
    // this selector thunk eventually refers to itself.
#if defined(THREADED_RTS)
    // In threaded mode, we'll use WHITEHOLE to lock the selector
    // thunk while we evaluate it.
    {
        do {
            info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
        } while (info_ptr == (W_)&stg_WHITEHOLE_info);

        // make sure someone else didn't get here first...
        if (IS_FORWARDING_PTR(info_ptr) ||
            INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->type != THUNK_SELECTOR) {
            // v. tricky now.  The THUNK_SELECTOR has been evacuated
            // by another thread, and is now either a forwarding ptr or IND.
            // We need to extract ourselves from the current situation
            // as cleanly as possible.
            //   - unlock the closure
            //   - update *q, we may have done *some* evaluation
            //   - if evac, we need to call evacuate(), because we
            //     need the write-barrier stuff.
            //   - undo the chain we've built to point to p.
            SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
            *q = (StgClosure *)p;
            if (evac) evacuate(q);
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            return;
        }
    }
#else
    // Save the real info pointer (NOTE: not the same as get_itbl()).
    info_ptr = (StgWord)p->header.info;
    SET_INFO((StgClosure *)p,&stg_WHITEHOLE_info);
#endif

    field = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->layout.selector_offset;

    // The selectee might be a constructor closure,
    // so we untag the pointer.
    selectee = UNTAG_CLOSURE(p->selectee);

selector_loop:
    // selectee now points to the closure that we're trying to select
    // a field from.  It may or may not be in to-space: we try not to
    // end up in to-space, but it's impractical to avoid it in
    // general.  The compacting GC scatters to-space pointers in
    // from-space during marking, for example.  We rely on the property
    // that evacuate() doesn't mind if it gets passed a to-space pointer.

    info = (StgInfoTable*)selectee->header.info;

    if (IS_FORWARDING_PTR(info)) {
        // We don't follow pointers into to-space; the constructor
        // has already been evacuated, so we won't save any space
        // leaks by evaluating this selector thunk anyhow.
        goto bale_out;
    }

    info = INFO_PTR_TO_STRUCT(info);
    switch (info->type) {
      case WHITEHOLE:
          goto bale_out; // about to be evacuated by another thread (or a loop).

      case CONSTR:
      case CONSTR_1_0:
      case CONSTR_0_1:
      case CONSTR_2_0:
      case CONSTR_1_1:
      case CONSTR_0_2:
      case CONSTR_STATIC:
      case CONSTR_NOCAF_STATIC:
          {
              // check that the size is in range
              ASSERT(field <  (StgWord32)(info->layout.payload.ptrs +
                                          info->layout.payload.nptrs));

              // Select the right field from the constructor
              val = selectee->payload[field];

#ifdef PROFILING
              // For the purposes of LDV profiling, we have destroyed
              // the original selector thunk, p.
              if (era > 0) {
                  // Only modify the info pointer when LDV profiling is
                  // enabled.  Note that this is incompatible with parallel GC,
                  // because it would allow other threads to start evaluating
                  // the same selector thunk.
                  SET_INFO((StgClosure*)p, (StgInfoTable *)info_ptr);
                  OVERWRITING_CLOSURE((StgClosure*)p);
                  SET_INFO((StgClosure*)p, &stg_WHITEHOLE_info);
              }
#endif

              // the closure in val is now the "value" of the
              // THUNK_SELECTOR in p.  However, val may itself be a
              // THUNK_SELECTOR, in which case we want to continue
              // evaluating until we find the real value, and then
              // update the whole chain to point to the value.
          val_loop:
              info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
              if (!IS_FORWARDING_PTR(info_ptr))
              {
                  info = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr);
                  switch (info->type) {
                  case IND:
                  case IND_STATIC:
                      val = ((StgInd *)val)->indirectee;
                      goto val_loop;
                  case THUNK_SELECTOR:
                      ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
                      prev_thunk_selector = p;
                      p = (StgSelector*)val;
                      goto selector_chain;
                  default:
                      break;
                  }
              }
              ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
              prev_thunk_selector = p;

              *q = val;

              // update the other selectors in the chain *before*
              // evacuating the value.  This is necessary in the case
              // where the value turns out to be one of the selectors
              // in the chain (i.e. we have a loop), and evacuating it
              // would corrupt the chain.
              unchain_thunk_selectors(prev_thunk_selector, val);

              // evacuate() cannot recurse through
              // eval_thunk_selector(), because we know val is not
              // a THUNK_SELECTOR.
              if (evac) evacuate(q);
              return;
          }

      case IND:
      case IND_STATIC:
          // Again, we might need to untag a constructor.
          selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
          goto selector_loop;

      case BLACKHOLE:
      {
          StgClosure *r;
          const StgInfoTable *i;
          r = ((StgInd*)selectee)->indirectee;

          // establish whether this BH has been updated, and is now an
          // indirection, as in evacuate().
          if (GET_CLOSURE_TAG(r) == 0) {
              i = r->header.info;
              if (IS_FORWARDING_PTR(i)) {
                  r = (StgClosure *)UN_FORWARDING_PTR(i);
                  i = r->header.info;
              }
              if (i == &stg_TSO_info
                  || i == &stg_WHITEHOLE_info
                  || i == &stg_BLOCKING_QUEUE_CLEAN_info
                  || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
                  goto bale_out;
              }
              ASSERT(i != &stg_IND_info);
          }

          selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
          goto selector_loop;
      }

      case THUNK_SELECTOR:
      {
          StgClosure *val;

          // recursively evaluate this selector.  We don't want to
          // recurse indefinitely, so we impose a depth bound.
          if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
              goto bale_out;
          }

          gct->thunk_selector_depth++;
          // rtsFalse says "don't evacuate the result".  It will,
          // however, update any THUNK_SELECTORs that are evaluated
          // along the way.
          eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
          gct->thunk_selector_depth--;

          // did we actually manage to evaluate it?
          if (val == selectee) goto bale_out;

          // Of course this pointer might be tagged...
          selectee = UNTAG_CLOSURE(val);
          goto selector_loop;
      }

      case AP:
      case AP_STACK:
      case THUNK:
      case THUNK_1_0:
      case THUNK_0_1:
      case THUNK_2_0:
      case THUNK_1_1:
      case THUNK_0_2:
      case THUNK_STATIC:
          // not evaluated yet
          goto bale_out;

      default:
        barf("eval_thunk_selector: strange selectee %d",
             (int)(info->type));
    }

bale_out:
    // We didn't manage to evaluate this thunk; restore the old info
    // pointer.  But don't forget: we still need to evacuate the thunk itself.
    SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
    // THREADED_RTS: we just unlocked the thunk, so another thread
    // might get in and update it.  copy() will lock it again and
    // check whether it was updated in the meantime.
    *q = (StgClosure *)p;
    if (evac) {
        copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest_no);
    }
    unchain_thunk_selectors(prev_thunk_selector, *q);
    return;
}
示例#28
0
static void
thread_stack(StgPtr p, StgPtr stack_end)
{
    const StgRetInfoTable* info;
    StgWord bitmap;
    nat size;
    
    // highly similar to scavenge_stack, but we do pointer threading here.
    
    while (p < stack_end) {

	// *p must be the info pointer of an activation
	// record.  All activation records have 'bitmap' style layout
	// info.
	//
	info  = get_ret_itbl((StgClosure *)p);
	
	switch (info->i.type) {
	    
	    // Dynamic bitmap: the mask is stored on the stack 
	case RET_DYN:
	{
	    StgWord dyn;
	    dyn = ((StgRetDyn *)p)->liveness;

	    // traverse the bitmap first
	    bitmap = RET_DYN_LIVENESS(dyn);
	    p      = (P_)&((StgRetDyn *)p)->payload[0];
	    size   = RET_DYN_BITMAP_SIZE;
	    while (size > 0) {
		if ((bitmap & 1) == 0) {
		    thread((StgClosure **)p);
		}
		p++;
		bitmap = bitmap >> 1;
		size--;
	    }
	    
	    // skip over the non-ptr words
	    p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
	    
	    // follow the ptr words
	    for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
		thread((StgClosure **)p);
		p++;
	    }
	    continue;
	}
	    
	    // small bitmap (<= 32 entries, or 64 on a 64-bit machine) 
	case UPDATE_FRAME:
	case STOP_FRAME:
	case CATCH_FRAME:
	case RET_SMALL:
	    bitmap = BITMAP_BITS(info->i.layout.bitmap);
	    size   = BITMAP_SIZE(info->i.layout.bitmap);
	    p++;
	    // NOTE: the payload starts immediately after the info-ptr, we
	    // don't have an StgHeader in the same sense as a heap closure.
	    while (size > 0) {
		if ((bitmap & 1) == 0) {
		    thread((StgClosure **)p);
		}
		p++;
		bitmap = bitmap >> 1;
		size--;
	    }
	    continue;

#ifdef ALLOW_INTERPRETER
	case RET_BCO: {
	    StgBCO *bco;
	    nat size;
	    
	    p++;
	    bco = (StgBCO *)*p;
	    thread((StgClosure **)p);
	    p++;
	    size = BCO_BITMAP_SIZE(bco);
	    thread_large_bitmap(p, BCO_BITMAP(bco), size);
	    p += size;
	    continue;
	}
#endif // ALLOW_INTERPRETER

	    // large bitmap (> 32 entries, or 64 on a 64-bit machine) 
	case RET_BIG:
	    p++;
	    size = GET_LARGE_BITMAP(&info->i)->size;
	    thread_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
	    p += size;
	    continue;

	case RET_FUN:
	{
	    StgRetFun *ret_fun = (StgRetFun *)p;
	    StgFunInfoTable *fun_info;
	    
	    fun_info = FUN_INFO_PTR_TO_STRUCT(UNTAG_CLOSURE((StgClosure *)
                           get_threaded_info((StgPtr)ret_fun->fun)));
	         // *before* threading it!
	    thread(&ret_fun->fun);
	    p = thread_arg_block(fun_info, ret_fun->payload);
	    continue;
	}

	default:
	    barf("thread_stack: weird activation record found on stack: %d", 
		 (int)(info->i.type));
	}
    }
}
示例#29
0
static void
gtk2hs_closure_marshal(GClosure *closure,
                       GValue *return_value,
                       guint n_param_values,
                       const GValue *param_values,
                       gpointer invocation_hint,
                       gpointer marshal_data)
{

    Gtk2HsClosure *hc = (Gtk2HsClosure *)closure;
    HaskellObj call, ret;
#ifdef GHC_RTS_USES_CAPABILITY
    Capability *cap;
#else
    SchedulerStatus cap;
#endif
    guint i;

    WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): about to run callback, n_param_values=%d", hc->callback, n_param_values));
#ifdef GHC_RTS_USES_CAPABILITY
    cap = rts_lock();
#else
    rts_lock();
#endif

    call = (StgClosure *)deRefStablePtr(hc->callback);

    /* construct the function call */
    for (i = 0; i < n_param_values; i++) {
        WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): param_values[%d]=%s :: %s",
                           hc->callback,
                           i,
                           g_strdup_value_contents(&param_values[i]),
                           g_type_name(G_VALUE_TYPE(&param_values[i]))));
        call = rts_apply(CAP call, gtk2hs_value_as_haskellobj(CAP &param_values[i]));
    }

    WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): about to rts_evalIO", hc->callback));

    /* perform the call */
#if __GLASGOW_HASKELL__>=704
    rts_evalIO(&cap, rts_apply(CAP (HaskellObj)runIO_closure, call),&ret);
#else
    cap=rts_evalIO(CAP rts_apply(CAP (HaskellObj)runIO_closure, call),&ret);
#endif

    WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): about to rts_checkSchedStatus", hc->callback));

    /* barf if anything went wrong */
    /* TODO: pass a sensible value for call site so we get better error messages */
    /* or perhaps we can propogate any error? */
    rts_checkSchedStatus("gtk2hs_closure_marshal", cap);
    WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): ret=%p", hc->callback, ret));

    if (return_value) {
        WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): return_value :: %s, ret=%p, UNTAG_CLOSURE(ret)=%p",
                           hc->callback,
                           /*                           g_strdup_value_contents(return_value), */
                           g_type_name(G_VALUE_TYPE(return_value)),
                           ret,
                           UNTAG_CLOSURE(ret)));
        gtk2hs_value_from_haskellobj(return_value, ret);
    }

#ifdef GHC_RTS_USES_CAPABILITY
    rts_unlock(cap);
#else
    rts_unlock();
#endif
    WHEN_DEBUG(g_debug("gtk2hs_closure_marshal(%p): done running callback", hc->callback));
}
示例#30
0
static void
update_fwd_compact( bdescr *blocks )
{
    StgPtr p, q, free;
#if 0
    StgWord m;
#endif
    bdescr *bd, *free_bd;
    StgInfoTable *info;
    nat size;
    StgWord iptr;

    bd = blocks;
    free_bd = blocks;
    free = free_bd->start;

    // cycle through all the blocks in the step
    for (; bd != NULL; bd = bd->link) {
	p = bd->start;

	while (p < bd->free ) {

	    while ( p < bd->free && !is_marked(p,bd) ) {
		p++;
	    }
	    if (p >= bd->free) {
		break;
	    }

#if 0
    next:
	m = * ((StgPtr)bd->u.bitmap + ((p - bd->start) / (BITS_IN(StgWord))));
	m >>= ((p - bd->start) & (BITS_IN(StgWord) - 1));

	while ( p < bd->free ) {

	    if ((m & 1) == 0) {
		m >>= 1;
		p++;
		if (((StgWord)p & (sizeof(W_) * BITS_IN(StgWord))) == 0) {
		    goto next;
		} else {
		    continue;
		}
	    }
#endif

	    // Problem: we need to know the destination for this cell
	    // in order to unthread its info pointer.  But we can't
	    // know the destination without the size, because we may
	    // spill into the next block.  So we have to run down the 
	    // threaded list and get the info ptr first.
            //
            // ToDo: one possible avenue of attack is to use the fact
            // that if (p&BLOCK_MASK) >= (free&BLOCK_MASK), then we
            // definitely have enough room.  Also see bug #1147.
            iptr = get_threaded_info(p);
	    info = INFO_PTR_TO_STRUCT(UNTAG_CLOSURE((StgClosure *)iptr));

	    q = p;

	    p = thread_obj(info, p);

	    size = p - q;
	    if (free + size > free_bd->start + BLOCK_SIZE_W) {
		// unset the next bit in the bitmap to indicate that
		// this object needs to be pushed into the next
		// block.  This saves us having to run down the
		// threaded info pointer list twice during the next pass.
		unmark(q+1,bd);
		free_bd = free_bd->link;
		free = free_bd->start;
	    } else {
		ASSERT(is_marked(q+1,bd));
	    }

	    unthread(q,(StgWord)free + GET_CLOSURE_TAG((StgClosure *)iptr));
	    free += size;
#if 0
	    goto next;
#endif
	}
    }
}