示例#1
0
文件: Storage.c 项目: ForkBackups/ghc
void freeExec (void *addr)
{
    StgPtr p = (StgPtr)addr - 1;
    bdescr *bd = Bdescr((StgPtr)p);

    if ((bd->flags & BF_EXEC) == 0) {
        barf("freeExec: not executable");
    }

    if (*(StgPtr)p == 0) {
        barf("freeExec: already free?");
    }

    ACQUIRE_SM_LOCK;

    bd->gen_no -= *(StgPtr)p;
    *(StgPtr)p = 0;

    if (bd->gen_no == 0) {
        // Free the block if it is empty, but not if it is the block at
        // the head of the queue.
        if (bd != exec_block) {
            debugTrace(DEBUG_gc, "free exec block %p", bd->start);
            dbl_link_remove(bd, &exec_block);
            setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsFalse);
            freeGroup(bd);
        } else {
            bd->free = bd->start;
        }
    }

    RELEASE_SM_LOCK
}
示例#2
0
文件: Sanity.c 项目: DJacquard/ghc
static void
checkCompactObjects(bdescr *bd)
{
    // Compact objects are similar to large objects,
    // but they have a StgCompactNFDataBlock at the beginning,
    // before the actual closure

    for ( ; bd != NULL; bd = bd->link) {
        StgCompactNFDataBlock *block, *last;
        StgCompactNFData *str;
        StgWord totalW;

        ASSERT(bd->flags & BF_COMPACT);

        block = (StgCompactNFDataBlock*)bd->start;
        str = block->owner;
        ASSERT((W_)str == (W_)block + sizeof(StgCompactNFDataBlock));

        totalW = 0;
        for ( ; block ; block = block->next) {
            last = block;
            ASSERT(block->owner == str);

            totalW += Bdescr((P_)block)->blocks * BLOCK_SIZE_W;
        }

        ASSERT(str->totalW == totalW);
        ASSERT(str->last == last);
    }
}
示例#3
0
文件: Sanity.c 项目: jweijers/ghc
/*
   Check that all TSOs have been evacuated.
   Optionally also check the sanity of the TSOs.
*/
void
checkGlobalTSOList (rtsBool checkTSOs)
{
  StgTSO *tso;
  nat g;

  for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
      for (tso=generations[g].threads; tso != END_TSO_QUEUE; 
           tso = tso->global_link) {
          ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso));
          ASSERT(get_itbl((StgClosure *)tso)->type == TSO);
          if (checkTSOs)
              checkTSO(tso);

          // If this TSO is dirty and in an old generation, it better
          // be on the mutable list.
          if (tso->dirty) {
              ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED));
              tso->flags &= ~TSO_MARKED;
          }

          {
              StgStack *stack;
              StgUnderflowFrame *frame;

              stack = tso->stackobj;
              while (1) {
                  if (stack->dirty & 1) {
                      ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
                      stack->dirty &= ~TSO_MARKED;
                  }
                  frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
                                                - sizeofW(StgUnderflowFrame));
                  if (frame->info != &stg_stack_underflow_frame_info
                      || frame->next_chunk == (StgStack*)END_TSO_QUEUE) break;
                  stack = frame->next_chunk;
              }
          }
      }
  }
}
示例#4
0
文件: CNF.c 项目: goldfirere/ghc
void
compactMarkKnown(StgCompactNFData *str)
{
    bdescr *bd;
    StgCompactNFDataBlock *block;

    block = compactGetFirstBlock(str);
    for ( ; block; block = block->next) {
        bd = Bdescr((StgPtr)block);
        bd->flags |= BF_KNOWN;
    }
}
示例#5
0
文件: CNF.c 项目: goldfirere/ghc
STATIC_INLINE void
check_object_in_compact (StgCompactNFData *str, StgClosure *p)
{
    bdescr *bd;

    // Only certain static closures are allowed to be referenced from
    // a compact, but let's be generous here and assume that all
    // static closures are OK.
    if (!HEAP_ALLOCED(p))
        return;

    bd = Bdescr((P_)p);
    ASSERT((bd->flags & BF_COMPACT) != 0 && objectGetCompact(p) == str);
}
示例#6
0
文件: CNF.c 项目: goldfirere/ghc
void
compactFree(StgCompactNFData *str)
{
    StgCompactNFDataBlock *block, *next;
    bdescr *bd;

    block = compactGetFirstBlock(str);

    for ( ; block; block = next) {
        next = block->next;
        bd = Bdescr((StgPtr)block);
        ASSERT((bd->flags & BF_EVACUATED) == 0);
        freeGroup(bd);
    }
}
示例#7
0
文件: CNF.c 项目: goldfirere/ghc
static bool
block_is_full (StgCompactNFDataBlock *block)
{
    bdescr *bd;

    // We consider a block full if we could not fit
    // an entire closure with 7 payload items
    // (this leaves a slop of 64 bytes at most, but
    // it avoids leaving a block almost empty to fit
    // a large byte array, while at the same time
    // it avoids trying to allocate a large closure
    // in a chain of almost empty blocks)

    bd = Bdescr((StgPtr)block);
    return (!has_room_for(bd,7));
}
示例#8
0
文件: CNF.c 项目: goldfirere/ghc
StgWord
compactContains (StgCompactNFData *str, StgPtr what)
{
    bdescr *bd;

    // This check is the reason why this needs to be
    // implemented in C instead of (possibly faster) Cmm
    if (!HEAP_ALLOCED (what))
        return 0;

    // Note that we don't care about tags, they are eaten
    // away by the Bdescr operation anyway
    bd = Bdescr((P_)what);
    return (bd->flags & BF_COMPACT) != 0 &&
        (str == NULL || objectGetCompact((StgClosure*)what) == str);
}
示例#9
0
文件: CNF.c 项目: goldfirere/ghc
//
// shouldCompact(c,p): returns:
//    SHOULDCOMPACT_IN_CNF if the object is in c
//    SHOULDCOMPACT_STATIC if the object is static
//    SHOULDCOMPACT_NOTIN_CNF if the object is dynamic and not in c
//
StgWord shouldCompact (StgCompactNFData *str, StgClosure *p)
{
    bdescr *bd;

    if (!HEAP_ALLOCED(p))
        return SHOULDCOMPACT_STATIC;  // we have to copy static closures too

    bd = Bdescr((P_)p);
    if (bd->flags & BF_PINNED) {
        return SHOULDCOMPACT_PINNED;
    }
    if ((bd->flags & BF_COMPACT) && objectGetCompact(p) == str) {
        return SHOULDCOMPACT_IN_CNF;
    } else {
        return SHOULDCOMPACT_NOTIN_CNF;
    }
}
示例#10
0
文件: CNF.c 项目: goldfirere/ghc
StgCompactNFDataBlock *
compactAllocateBlock(Capability            *cap,
                     StgWord                size,
                     StgCompactNFDataBlock *previous)
{
    StgWord aligned_size;
    StgCompactNFDataBlock *block;
    bdescr *bd;

    aligned_size = BLOCK_ROUND_UP(size);

    // We do not link the new object into the generation ever
    // - we cannot let the GC know about this object until we're done
    // importing it and we have fixed up all info tables and stuff
    //
    // but we do update n_compact_blocks, otherwise memInventory()
    // in Sanity will think we have a memory leak, because it compares
    // the blocks he knows about with the blocks obtained by the
    // block allocator
    // (if by chance a memory leak does happen due to a bug somewhere
    // else, memInventory will also report that all compact blocks
    // associated with this compact are leaked - but they are not really,
    // we have a pointer to them and we're not losing track of it, it's
    // just we can't use the GC until we're done with the import)
    //
    // (That btw means that the high level import code must be careful
    // not to lose the pointer, so don't use the primops directly
    // unless you know what you're doing!)

    // Other trickery: we pass NULL as first, which means our blocks
    // are always in generation 0
    // This is correct because the GC has never seen the blocks so
    // it had no chance of promoting them

    block = compactAllocateBlockInternal(cap, aligned_size, NULL,
                                         previous != NULL ? ALLOCATE_IMPORT_APPEND : ALLOCATE_IMPORT_NEW);
    if (previous != NULL)
        previous->next = block;

    bd = Bdescr((P_)block);
    bd->free = (P_)((W_)bd->start + size);

    return block;
}
示例#11
0
STATIC_INLINE void
thread (StgClosure **p)
{
    StgClosure *q0;
    StgPtr q;
    StgWord iptr;
    bdescr *bd;

    q0  = *p;
    q   = (StgPtr)UNTAG_CLOSURE(q0);

    // It doesn't look like a closure at the moment, because the info
    // ptr is possibly threaded:
    // ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
    
    if (HEAP_ALLOCED(q)) {
	bd = Bdescr(q); 
	// a handy way to discover whether the ptr is into the
	// compacted area of the old gen, is that the EVACUATED flag
	// is zero (it's non-zero for all the other areas of live
	// memory).
	if ((bd->flags & BF_EVACUATED) == 0)
        {
            iptr = *q;
            switch (GET_CLOSURE_TAG((StgClosure *)iptr))
            {
            case 0: 
                // this is the info pointer; we are creating a new chain.
                // save the original tag at the end of the chain.
                *p = (StgClosure *)((StgWord)iptr + GET_CLOSURE_TAG(q0));
                *q = (StgWord)p + 1;
                break;
            case 1:
            case 2:
                // this is a chain of length 1 or more
                *p = (StgClosure *)iptr;
                *q = (StgWord)p + 2;
                break;
            }
        }
    }
}
示例#12
0
文件: Compact.c 项目: Lemmih/ghc
STATIC_INLINE void
thread (StgClosure **p)
{
    StgClosure *q0;
    StgPtr q;
    StgWord iptr;
    bdescr *bd;

    q0  = *p;
    q   = (StgPtr)UNTAG_CLOSURE(q0);

    // It doesn't look like a closure at the moment, because the info
    // ptr is possibly threaded:
    // ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
    
    if (HEAP_ALLOCED(q)) {
	bd = Bdescr(q); 

	if (bd->flags & BF_MARKED)
        {
            iptr = *q;
            switch (GET_CLOSURE_TAG((StgClosure *)iptr))
            {
            case 0: 
                // this is the info pointer; we are creating a new chain.
                // save the original tag at the end of the chain.
                *p = (StgClosure *)((StgWord)iptr + GET_CLOSURE_TAG(q0));
                *q = (StgWord)p + 1;
                break;
            case 1:
            case 2:
                // this is a chain of length 1 or more
                *p = (StgClosure *)iptr;
                *q = (StgWord)p + 2;
                break;
            }
        }
    }
}
示例#13
0
文件: CNF.c 项目: goldfirere/ghc
StgCompactNFData *
compactNew (Capability *cap, StgWord size)
{
    StgWord aligned_size;
    StgCompactNFDataBlock *block;
    StgCompactNFData *self;
    bdescr *bd;

    aligned_size = BLOCK_ROUND_UP(size + sizeof(StgCompactNFData)
                                  + sizeof(StgCompactNFDataBlock));

    // Don't allow sizes larger than a megablock, because we can't use the
    // memory after the first mblock for storing objects.
    if (aligned_size >= BLOCK_SIZE * BLOCKS_PER_MBLOCK)
        aligned_size = BLOCK_SIZE * BLOCKS_PER_MBLOCK;

    block = compactAllocateBlockInternal(cap, aligned_size, NULL,
                                         ALLOCATE_NEW);

    self = firstBlockGetCompact(block);
    SET_HDR((StgClosure*)self, &stg_COMPACT_NFDATA_CLEAN_info, CCS_SYSTEM);
    self->autoBlockW = aligned_size / sizeof(StgWord);
    self->nursery = block;
    self->last = block;
    self->hash = NULL;

    block->owner = self;

    bd = Bdescr((P_)block);
    bd->free = (StgPtr)((W_)self + sizeof(StgCompactNFData));
    self->hp = bd->free;
    self->hpLim = bd->start + bd->blocks * BLOCK_SIZE_W;

    self->totalW = bd->blocks * BLOCK_SIZE_W;

    debugTrace(DEBUG_compact, "compactNew: size %" FMT_Word, size);

    return self;
}
示例#14
0
文件: Sanity.c 项目: jweijers/ghc
static void
checkMutableList( bdescr *mut_bd, nat gen )
{
    bdescr *bd;
    StgPtr q;
    StgClosure *p;

    for (bd = mut_bd; bd != NULL; bd = bd->link) {
	for (q = bd->start; q < bd->free; q++) {
	    p = (StgClosure *)*q;
            ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
            checkClosure(p);

            switch (get_itbl(p)->type) {
            case TSO:
                ((StgTSO *)p)->flags |= TSO_MARKED;
                break;
            case STACK:
                ((StgStack *)p)->dirty |= TSO_MARKED;
                break;
            }
        }
    }
}
示例#15
0
文件: CNF.c 项目: goldfirere/ghc
StgWord
countCompactBlocks(bdescr *outer)
{
    StgCompactNFDataBlock *block;
    W_ count;

    count = 0;
    while (outer) {
        bdescr *inner;

        block = (StgCompactNFDataBlock*)(outer->start);
        do {
            inner = Bdescr((P_)block);
            ASSERT(inner->flags & BF_COMPACT);

            count += inner->blocks;
            block = block->next;
        } while(block);

        outer = outer->link;
    }

    return count;
}
示例#16
0
文件: Storage.c 项目: LeapYear/ghc
StgPtr
allocatePinned (Capability *cap, W_ n)
{
    StgPtr p;
    bdescr *bd;

    // If the request is for a large object, then allocate()
    // will give us a pinned object anyway.
    if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
        p = allocate(cap, n);
        Bdescr(p)->flags |= BF_PINNED;
        return p;
    }

    TICK_ALLOC_HEAP_NOCTR(WDS(n));
    CCS_ALLOC(cap->r.rCCCS,n);
    if (cap->r.rCurrentTSO != NULL) {
        // cap->r.rCurrentTSO->alloc_limit -= n*sizeof(W_);
        ASSIGN_Int64((W_*)&(cap->r.rCurrentTSO->alloc_limit),
                     (PK_Int64((W_*)&(cap->r.rCurrentTSO->alloc_limit))
                      - n*sizeof(W_)));
    }

    bd = cap->pinned_object_block;
    
    // If we don't have a block of pinned objects yet, or the current
    // one isn't large enough to hold the new object, get a new one.
    if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {

        // stash the old block on cap->pinned_object_blocks.  On the
        // next GC cycle these objects will be moved to
        // g0->large_objects.
        if (bd != NULL) {
            // add it to the allocation stats when the block is full
            finishedNurseryBlock(cap, bd);
            dbl_link_onto(bd, &cap->pinned_object_blocks);
        }

        // We need to find another block.  We could just allocate one,
        // but that means taking a global lock and we really want to
        // avoid that (benchmarks that allocate a lot of pinned
        // objects scale really badly if we do this).
        //
        // So first, we try taking the next block from the nursery, in
        // the same way as allocate(), but note that we can only take
        // an *empty* block, because we're about to mark it as
        // BF_PINNED | BF_LARGE.
        bd = cap->r.rCurrentNursery->link;
        if (bd == NULL) { // must be empty!
            // The nursery is empty, or the next block is non-empty:
            // allocate a fresh block (we can't fail here).

            // XXX in the case when the next nursery block is
            // non-empty we aren't exerting any pressure to GC soon,
            // so if this case ever happens then we could in theory
            // keep allocating for ever without calling the GC. We
            // can't bump g0->n_new_large_words because that will be
            // counted towards allocation, and we're already counting
            // our pinned obects as allocation in
            // collect_pinned_object_blocks in the GC.
            ACQUIRE_SM_LOCK;
            bd = allocBlock();
            RELEASE_SM_LOCK;
            initBdescr(bd, g0, g0);
        } else {
            newNurseryBlock(bd);
            // we have a block in the nursery: steal it
            cap->r.rCurrentNursery->link = bd->link;
            if (bd->link != NULL) {
                bd->link->u.back = cap->r.rCurrentNursery;
            }
            cap->r.rNursery->n_blocks -= bd->blocks;
        }

        cap->pinned_object_block = bd;
        bd->flags  = BF_PINNED | BF_LARGE | BF_EVACUATED;

        // The pinned_object_block remains attached to the capability
        // until it is full, even if a GC occurs.  We want this
        // behaviour because otherwise the unallocated portion of the
        // block would be forever slop, and under certain workloads
        // (allocating a few ByteStrings per GC) we accumulate a lot
        // of slop.
        //
        // So, the pinned_object_block is initially marked
        // BF_EVACUATED so the GC won't touch it.  When it is full,
        // we place it on the large_objects list, and at the start of
        // the next GC the BF_EVACUATED flag will be cleared, and the
        // block will be promoted as usual (if anything in it is
        // live).
    }

    p = bd->free;
    bd->free += n;
    return p;
}
示例#17
0
文件: Storage.c 项目: LeapYear/ghc
bdescr *
_bdescr (StgPtr p)
{
    return Bdescr(p);
}
示例#18
0
文件: CNF.c 项目: goldfirere/ghc
static void
verify_consistency_block (StgCompactNFData *str, StgCompactNFDataBlock *block)
{
    bdescr *bd;
    StgPtr p;
    const StgInfoTable *info;
    StgClosure *q;

    p = (P_)firstBlockGetCompact(block);
    bd = Bdescr((P_)block);
    while (p < bd->free) {
        q = (StgClosure*)p;

        ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));

        info = get_itbl(q);
        switch (info->type) {
        case CONSTR_1_0:
            check_object_in_compact(str, UNTAG_CLOSURE(q->payload[0]));
            /* fallthrough */
        case CONSTR_0_1:
            p += sizeofW(StgClosure) + 1;
            break;

        case CONSTR_2_0:
            check_object_in_compact(str, UNTAG_CLOSURE(q->payload[1]));
            /* fallthrough */
        case CONSTR_1_1:
            check_object_in_compact(str, UNTAG_CLOSURE(q->payload[0]));
            /* fallthrough */
        case CONSTR_0_2:
            p += sizeofW(StgClosure) + 2;
            break;

        case CONSTR:
        case PRIM:
        case CONSTR_NOCAF:
        {
            uint32_t i;

            for (i = 0; i < info->layout.payload.ptrs; i++) {
                check_object_in_compact(str, UNTAG_CLOSURE(q->payload[i]));
            }
            p += sizeofW(StgClosure) + info->layout.payload.ptrs +
                info->layout.payload.nptrs;
            break;
        }

        case ARR_WORDS:
            p += arr_words_sizeW((StgArrBytes*)p);
            break;

        case MUT_ARR_PTRS_FROZEN_CLEAN:
        case MUT_ARR_PTRS_FROZEN_DIRTY:
            verify_mut_arr_ptrs(str, (StgMutArrPtrs*)p);
            p += mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
            break;

        case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
        case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
        {
            uint32_t i;
            StgSmallMutArrPtrs *arr = (StgSmallMutArrPtrs*)p;

            for (i = 0; i < arr->ptrs; i++)
                check_object_in_compact(str, UNTAG_CLOSURE(arr->payload[i]));

            p += sizeofW(StgSmallMutArrPtrs) + arr->ptrs;
            break;
        }

        case COMPACT_NFDATA:
            p += sizeofW(StgCompactNFData);
            break;

        default:
            barf("verify_consistency_block");
        }
    }

    return;
}
示例#19
0
文件: Sparks.c 项目: albertz/ghc
void
pruneSparkQueue (Capability *cap)
{ 
    SparkPool *pool;
    StgClosurePtr spark, tmp, *elements;
    nat n, pruned_sparks; // stats only
    StgWord botInd,oldBotInd,currInd; // indices in array (always < size)
    const StgInfoTable *info;
    
    n = 0;
    pruned_sparks = 0;
    
    pool = cap->sparks;
    
    // it is possible that top > bottom, indicating an empty pool.  We
    // fix that here; this is only necessary because the loop below
    // assumes it.
    if (pool->top > pool->bottom)
        pool->top = pool->bottom;

    // Take this opportunity to reset top/bottom modulo the size of
    // the array, to avoid overflow.  This is only possible because no
    // stealing is happening during GC.
    pool->bottom  -= pool->top & ~pool->moduloSize;
    pool->top     &= pool->moduloSize;
    pool->topBound = pool->top;

    debugTrace(DEBUG_sparks,
               "markSparkQueue: current spark queue len=%ld; (hd=%ld; tl=%ld)",
               sparkPoolSize(pool), pool->bottom, pool->top);

    ASSERT_WSDEQUE_INVARIANTS(pool);

    elements = (StgClosurePtr *)pool->elements;

    /* We have exclusive access to the structure here, so we can reset
       bottom and top counters, and prune invalid sparks. Contents are
       copied in-place if they are valuable, otherwise discarded. The
       routine uses "real" indices t and b, starts by computing them
       as the modulus size of top and bottom,

       Copying:

       At the beginning, the pool structure can look like this:
       ( bottom % size >= top % size , no wrap-around)
                  t          b
       ___________***********_________________

       or like this ( bottom % size < top % size, wrap-around )
                  b         t
       ***********__________******************
       As we need to remove useless sparks anyway, we make one pass
       between t and b, moving valuable content to b and subsequent
       cells (wrapping around when the size is reached).

                     b      t
       ***********OOO_______XX_X__X?**********
                     ^____move?____/

       After this movement, botInd becomes the new bottom, and old
       bottom becomes the new top index, both as indices in the array
       size range.
    */
    // starting here
    currInd = (pool->top) & (pool->moduloSize); // mod

    // copies of evacuated closures go to space from botInd on
    // we keep oldBotInd to know when to stop
    oldBotInd = botInd = (pool->bottom) & (pool->moduloSize); // mod

    // on entry to loop, we are within the bounds
    ASSERT( currInd < pool->size && botInd  < pool->size );

    while (currInd != oldBotInd ) {
      /* must use != here, wrap-around at size
	 subtle: loop not entered if queue empty
       */

      /* check element at currInd. if valuable, evacuate and move to
	 botInd, otherwise move on */
      spark = elements[currInd];

      // We have to be careful here: in the parallel GC, another
      // thread might evacuate this closure while we're looking at it,
      // so grab the info pointer just once.
      if (GET_CLOSURE_TAG(spark) != 0) {
          // Tagged pointer is a value, so the spark has fizzled.  It
          // probably never happens that we get a tagged pointer in
          // the spark pool, because we would have pruned the spark
          // during the previous GC cycle if it turned out to be
          // evaluated, but it doesn't hurt to have this check for
          // robustness.
          pruned_sparks++;
          cap->sparks_fizzled++;
      } else {
          info = spark->header.info;
          if (IS_FORWARDING_PTR(info)) {
              tmp = (StgClosure*)UN_FORWARDING_PTR(info);
              /* if valuable work: shift inside the pool */
              if (closure_SHOULD_SPARK(tmp)) {
                  elements[botInd] = tmp; // keep entry (new address)
                  botInd++;
                  n++;
              } else {
                  pruned_sparks++; // discard spark
                  cap->sparks_fizzled++;
              }
          } else if (HEAP_ALLOCED(spark)) {
              if ((Bdescr((P_)spark)->flags & BF_EVACUATED)) {
                  if (closure_SHOULD_SPARK(spark)) {
                      elements[botInd] = spark; // keep entry (new address)
                      botInd++;
                      n++;
                  } else {
                      pruned_sparks++; // discard spark
                      cap->sparks_fizzled++;
                  }
              } else {
                  pruned_sparks++; // discard spark
                  cap->sparks_gcd++;
              }
          } else {
              if (INFO_PTR_TO_STRUCT(info)->type == THUNK_STATIC) {
                  if (*THUNK_STATIC_LINK(spark) != NULL) {
                      elements[botInd] = spark; // keep entry (new address)
                      botInd++;
                      n++;
                  } else {
                      pruned_sparks++; // discard spark
                      cap->sparks_gcd++;
                  }
              } else {
                  pruned_sparks++; // discard spark
                  cap->sparks_fizzled++;
              }
          }
      }

      currInd++;

      // in the loop, we may reach the bounds, and instantly wrap around
      ASSERT( currInd <= pool->size && botInd <= pool->size );
      if ( currInd == pool->size ) { currInd = 0; }
      if ( botInd == pool->size )  { botInd = 0;  }

    } // while-loop over spark pool elements

    ASSERT(currInd == oldBotInd);

    pool->top = oldBotInd; // where we started writing
    pool->topBound = pool->top;

    pool->bottom = (oldBotInd <= botInd) ? botInd : (botInd + pool->size); 
    // first free place we did not use (corrected by wraparound)

    debugTrace(DEBUG_sparks, "pruned %d sparks", pruned_sparks);
    
    debugTrace(DEBUG_sparks,
               "new spark queue len=%ld; (hd=%ld; tl=%ld)",
               sparkPoolSize(pool), pool->bottom, pool->top);

    ASSERT_WSDEQUE_INVARIANTS(pool);
}
示例#20
0
文件: Evac.c 项目: DJacquard/ghc
/* ----------------------------------------------------------------------------
   Evacuate an object inside a CompactNFData

   These are treated in a similar way to large objects.  We remove the block
   from the compact_objects list of the generation it is on, and link it onto
   the live_compact_objects list of the destination generation.

   It is assumed that objects in the struct live in the same generation
   as the struct itself all the time.
   ------------------------------------------------------------------------- */
STATIC_INLINE void
evacuate_compact (StgPtr p)
{
    StgCompactNFData *str;
    bdescr *bd;
    generation *gen, *new_gen;
    uint32_t gen_no, new_gen_no;

    // We need to find the Compact# corresponding to this pointer, because it
    // will give us the first block in the compact chain, which is the one we
    // that gets linked onto the compact_objects list.
    str = objectGetCompact((StgClosure*)p);
    ASSERT(get_itbl((StgClosure*)str)->type == COMPACT_NFDATA);

    bd = Bdescr((StgPtr)str);
    gen_no = bd->gen_no;

    // already evacuated? (we're about to do the same check,
    // but we avoid taking the spin-lock)
    if (bd->flags & BF_EVACUATED) {
        /* Don't forget to set the gct->failed_to_evac flag if we didn't get
         * the desired destination (see comments in evacuate()).
         */
        if (gen_no < gct->evac_gen_no) {
            gct->failed_to_evac = true;
            TICK_GC_FAILED_PROMOTION();
        }
        return;
    }

    gen = bd->gen;
    gen_no = bd->gen_no;
    ACQUIRE_SPIN_LOCK(&gen->sync);

    // already evacuated?
    if (bd->flags & BF_EVACUATED) {
        /* Don't forget to set the gct->failed_to_evac flag if we didn't get
         * the desired destination (see comments in evacuate()).
         */
        if (gen_no < gct->evac_gen_no) {
            gct->failed_to_evac = true;
            TICK_GC_FAILED_PROMOTION();
        }
        RELEASE_SPIN_LOCK(&gen->sync);
        return;
    }

    // remove from compact_objects list
    if (bd->u.back) {
        bd->u.back->link = bd->link;
    } else { // first object in the list
        gen->compact_objects = bd->link;
    }
    if (bd->link) {
        bd->link->u.back = bd->u.back;
    }

    /* link it on to the evacuated compact object list of the destination gen
     */
    new_gen_no = bd->dest_no;

    if (new_gen_no < gct->evac_gen_no) {
        if (gct->eager_promotion) {
            new_gen_no = gct->evac_gen_no;
        } else {
            gct->failed_to_evac = true;
        }
    }

    new_gen = &generations[new_gen_no];

    // Note: for speed we only update the generation of the first block here
    // This means that bdescr of subsequent blocks will think they are in
    // the wrong generation
    // (This should not be a problem because there is no code that checks
    // for that - the only code touching the generation of the block is
    // in the GC, and that should never see blocks other than the first)
    bd->flags |= BF_EVACUATED;
    initBdescr(bd, new_gen, new_gen->to);

    if (str->hash) {
        gen_workspace *ws = &gct->gens[new_gen_no];
        bd->link = ws->todo_large_objects;
        ws->todo_large_objects = bd;
    } else {
        if (new_gen != gen) { ACQUIRE_SPIN_LOCK(&new_gen->sync); }
        dbl_link_onto(bd, &new_gen->live_compact_objects);
        new_gen->n_live_compact_blocks += str->totalW / BLOCK_SIZE_W;
        if (new_gen != gen) { RELEASE_SPIN_LOCK(&new_gen->sync); }
    }

    RELEASE_SPIN_LOCK(&gen->sync);

    // Note: the object did not move in memory, because it lives
    // in pinned (BF_COMPACT) allocation, so we do not need to rewrite it
    // or muck with forwarding pointers
    // Also there is no tag to worry about on the struct (tags are used
    // for constructors and functions, but a struct is neither). There
    // might be a tag on the object pointer, but again we don't change
    // the pointer because we don't move the object so we don't need to
    // rewrite the tag.
}
示例#21
0
文件: Evac.c 项目: 23Skidoo/ghc
REGPARM1 GNUC_ATTR_HOT void
evacuate(StgClosure **p)
{
  bdescr *bd = NULL;
  nat gen_no;
  StgClosure *q;
  const StgInfoTable *info;
  StgWord tag;

  q = *p;

loop:
  /* The tag and the pointer are split, to be merged after evacing */
  tag = GET_CLOSURE_TAG(q);
  q = UNTAG_CLOSURE(q);

  ASSERTM(LOOKS_LIKE_CLOSURE_PTR(q), "invalid closure, info=%p", q->header.info);

  if (!HEAP_ALLOCED_GC(q)) {

      if (!major_gc) return;

      info = get_itbl(q);
      switch (info->type) {

      case THUNK_STATIC:
          if (info->srt_bitmap != 0) {
              evacuate_static_object(THUNK_STATIC_LINK((StgClosure *)q), q);
          }
          return;

      case FUN_STATIC:
          if (info->srt_bitmap != 0) {
              evacuate_static_object(FUN_STATIC_LINK((StgClosure *)q), q);
          }
          return;

      case IND_STATIC:
          /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
           * on the CAF list, so don't do anything with it here (we'll
           * scavenge it later).
           */
          evacuate_static_object(IND_STATIC_LINK((StgClosure *)q), q);
          return;

      case CONSTR_STATIC:
          evacuate_static_object(STATIC_LINK(info,(StgClosure *)q), q);
          return;

      case CONSTR_NOCAF_STATIC:
          /* no need to put these on the static linked list, they don't need
           * to be scavenged.
           */
          return;

      default:
          barf("evacuate(static): strange closure type %d", (int)(info->type));
      }
  }

  bd = Bdescr((P_)q);

  if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) {

      // pointer into to-space: just return it.  It might be a pointer
      // into a generation that we aren't collecting (> N), or it
      // might just be a pointer into to-space.  The latter doesn't
      // happen often, but allowing it makes certain things a bit
      // easier; e.g. scavenging an object is idempotent, so it's OK to
      // have an object on the mutable list multiple times.
      if (bd->flags & BF_EVACUATED) {
          // We aren't copying this object, so we have to check
          // whether it is already in the target generation.  (this is
          // the write barrier).
          if (bd->gen_no < gct->evac_gen_no) {
              gct->failed_to_evac = rtsTrue;
              TICK_GC_FAILED_PROMOTION();
          }
          return;
      }

      /* evacuate large objects by re-linking them onto a different list.
       */
      if (bd->flags & BF_LARGE) {
          evacuate_large((P_)q);
          return;
      }

      /* If the object is in a gen that we're compacting, then we
       * need to use an alternative evacuate procedure.
       */
      if (!is_marked((P_)q,bd)) {
          mark((P_)q,bd);
          push_mark_stack((P_)q);
      }
      return;
  }

  gen_no = bd->dest_no;

  info = q->header.info;
  if (IS_FORWARDING_PTR(info))
  {
    /* Already evacuated, just return the forwarding address.
     * HOWEVER: if the requested destination generation (gct->evac_gen) is
     * older than the actual generation (because the object was
     * already evacuated to a younger generation) then we have to
     * set the gct->failed_to_evac flag to indicate that we couldn't
     * manage to promote the object to the desired generation.
     */
    /*
     * Optimisation: the check is fairly expensive, but we can often
     * shortcut it if either the required generation is 0, or the
     * current object (the EVACUATED) is in a high enough generation.
     * We know that an EVACUATED always points to an object in the
     * same or an older generation.  gen is the lowest generation that the
     * current object would be evacuated to, so we only do the full
     * check if gen is too low.
     */
      StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
      *p = TAG_CLOSURE(tag,e);
      if (gen_no < gct->evac_gen_no) {  // optimisation
          if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) {
              gct->failed_to_evac = rtsTrue;
              TICK_GC_FAILED_PROMOTION();
          }
      }
      return;
  }

  switch (INFO_PTR_TO_STRUCT(info)->type) {

  case WHITEHOLE:
      goto loop;

  // For ints and chars of low value, save space by replacing references to
  //    these with closures with references to common, shared ones in the RTS.
  //
  // * Except when compiling into Windows DLLs which don't support cross-package
  //    data references very well.
  //
  case CONSTR_0_1:
  {
#if defined(COMPILING_WINDOWS_DLL)
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
#else
      StgWord w = (StgWord)q->payload[0];
      if (info == Czh_con_info &&
          // unsigned, so always true:  (StgChar)w >= MIN_CHARLIKE &&
          (StgChar)w <= MAX_CHARLIKE) {
          *p =  TAG_CLOSURE(tag,
                            (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
                           );
      }
      else if (info == Izh_con_info &&
          (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
          *p = TAG_CLOSURE(tag,
                             (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
                             );
      }
      else {
          copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
      }
#endif
      return;
  }

  case FUN_0_1:
  case FUN_1_0:
  case CONSTR_1_0:
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag);
      return;

  case THUNK_1_0:
  case THUNK_0_1:
      copy(p,info,q,sizeofW(StgThunk)+1,gen_no);
      return;

  case THUNK_1_1:
  case THUNK_2_0:
  case THUNK_0_2:
#ifdef NO_PROMOTE_THUNKS
#error bitrotted
#endif
    copy(p,info,q,sizeofW(StgThunk)+2,gen_no);
    return;

  case FUN_1_1:
  case FUN_2_0:
  case FUN_0_2:
  case CONSTR_1_1:
  case CONSTR_2_0:
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
      return;

  case CONSTR_0_2:
      copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag);
      return;

  case THUNK:
      copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
      return;

  case FUN:
  case CONSTR:
      copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no,tag);
      return;

  case BLACKHOLE:
  {
      StgClosure *r;
      const StgInfoTable *i;
      r = ((StgInd*)q)->indirectee;
      if (GET_CLOSURE_TAG(r) == 0) {
          i = r->header.info;
          if (IS_FORWARDING_PTR(i)) {
              r = (StgClosure *)UN_FORWARDING_PTR(i);
              i = r->header.info;
          }
          if (i == &stg_TSO_info
              || i == &stg_WHITEHOLE_info
              || i == &stg_BLOCKING_QUEUE_CLEAN_info
              || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
              copy(p,info,q,sizeofW(StgInd),gen_no);
              return;
          }
          ASSERT(i != &stg_IND_info);
      }
      q = r;
      *p = r;
      goto loop;
  }

  case MUT_VAR_CLEAN:
  case MUT_VAR_DIRTY:
  case MVAR_CLEAN:
  case MVAR_DIRTY:
  case TVAR:
  case BLOCKING_QUEUE:
  case WEAK:
  case PRIM:
  case MUT_PRIM:
      copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no);
      return;

  case BCO:
      copy(p,info,q,bco_sizeW((StgBCO *)q),gen_no);
      return;

  case THUNK_SELECTOR:
      eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
      return;

  case IND:
    // follow chains of indirections, don't evacuate them
    q = ((StgInd*)q)->indirectee;
    *p = q;
    goto loop;

  case RET_BCO:
  case RET_SMALL:
  case RET_BIG:
  case UPDATE_FRAME:
  case UNDERFLOW_FRAME:
  case STOP_FRAME:
  case CATCH_FRAME:
  case CATCH_STM_FRAME:
  case CATCH_RETRY_FRAME:
  case ATOMICALLY_FRAME:
    // shouldn't see these
    barf("evacuate: stack frame at %p\n", q);

  case PAP:
      copy(p,info,q,pap_sizeW((StgPAP*)q),gen_no);
      return;

  case AP:
      copy(p,info,q,ap_sizeW((StgAP*)q),gen_no);
      return;

  case AP_STACK:
      copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen_no);
      return;

  case ARR_WORDS:
      // just copy the block
      copy(p,info,q,arr_words_sizeW((StgArrBytes *)q),gen_no);
      return;

  case MUT_ARR_PTRS_CLEAN:
  case MUT_ARR_PTRS_DIRTY:
  case MUT_ARR_PTRS_FROZEN:
  case MUT_ARR_PTRS_FROZEN0:
      // just copy the block
      copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no);
      return;

  case SMALL_MUT_ARR_PTRS_CLEAN:
  case SMALL_MUT_ARR_PTRS_DIRTY:
  case SMALL_MUT_ARR_PTRS_FROZEN:
  case SMALL_MUT_ARR_PTRS_FROZEN0:
      // just copy the block
      copy(p,info,q,small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs *)q),gen_no);
      return;

  case TSO:
      copy(p,info,q,sizeofW(StgTSO),gen_no);
      return;

  case STACK:
    {
      StgStack *stack = (StgStack *)q;

      /* To evacuate a small STACK, we need to adjust the stack pointer
       */
      {
          StgStack *new_stack;
          StgPtr r, s;
          rtsBool mine;

          mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack),
                          sizeofW(StgStack), gen_no);
          if (mine) {
              new_stack = (StgStack *)*p;
              move_STACK(stack, new_stack);
              for (r = stack->sp, s = new_stack->sp;
                   r < stack->stack + stack->stack_size;) {
                  *s++ = *r++;
              }
          }
          return;
      }
    }

  case TREC_CHUNK:
      copy(p,info,q,sizeofW(StgTRecChunk),gen_no);
      return;

  default:
    barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
  }

  barf("evacuate");
}
示例#22
0
文件: GCAux.c 项目: chansuke/ghc
StgClosure *
isAlive(StgClosure *p)
{
  const StgInfoTable *info;
  bdescr *bd;
  StgWord tag;
  StgClosure *q;

  while (1) {
    /* The tag and the pointer are split, to be merged later when needed. */
    tag = GET_CLOSURE_TAG(p);
    q = UNTAG_CLOSURE(p);

    ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));

    // ignore static closures 
    //
    // ToDo: This means we never look through IND_STATIC, which means
    // isRetainer needs to handle the IND_STATIC case rather than
    // raising an error.
    //
    // ToDo: for static closures, check the static link field.
    // Problem here is that we sometimes don't set the link field, eg.
    // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
    //
    if (!HEAP_ALLOCED_GC(q)) {
	return p;
    }

    // ignore closures in generations that we're not collecting. 
    bd = Bdescr((P_)q);

    // if it's a pointer into to-space, then we're done
    if (bd->flags & BF_EVACUATED) {
	return p;
    }

    // large objects use the evacuated flag
    if (bd->flags & BF_LARGE) {
        return NULL;
    }

    // check the mark bit for compacted steps
    if ((bd->flags & BF_MARKED) && is_marked((P_)q,bd)) {
	return p;
    }

    info = q->header.info;

    if (IS_FORWARDING_PTR(info)) {
        // alive! 
        return TAG_CLOSURE(tag,(StgClosure*)UN_FORWARDING_PTR(info));
    }

    info = INFO_PTR_TO_STRUCT(info);

    switch (info->type) {

    case IND:
    case IND_STATIC:
    case IND_PERM:
      // follow indirections 
      p = ((StgInd *)q)->indirectee;
      continue;

    case BLACKHOLE:
        p = ((StgInd*)q)->indirectee;
        if (GET_CLOSURE_TAG(p) != 0) {
            continue;
        } else {
            return NULL;
        }

    default:
      // dead. 
      return NULL;
    }
  }
}
示例#23
0
文件: Evac.c 项目: 23Skidoo/ghc
STATIC_INLINE void
evacuate_large(StgPtr p)
{
  bdescr *bd;
  generation *gen, *new_gen;
  nat gen_no, new_gen_no;
  gen_workspace *ws;

  bd = Bdescr(p);
  gen = bd->gen;
  gen_no = bd->gen_no;
  ACQUIRE_SPIN_LOCK(&gen->sync);

  // already evacuated?
  if (bd->flags & BF_EVACUATED) {
    /* Don't forget to set the gct->failed_to_evac flag if we didn't get
     * the desired destination (see comments in evacuate()).
     */
    if (gen_no < gct->evac_gen_no) {
        gct->failed_to_evac = rtsTrue;
        TICK_GC_FAILED_PROMOTION();
    }
    RELEASE_SPIN_LOCK(&gen->sync);
    return;
  }

  // remove from large_object list
  if (bd->u.back) {
    bd->u.back->link = bd->link;
  } else { // first object in the list
    gen->large_objects = bd->link;
  }
  if (bd->link) {
    bd->link->u.back = bd->u.back;
  }

  /* link it on to the evacuated large object list of the destination gen
   */
  new_gen_no = bd->dest_no;

  if (new_gen_no < gct->evac_gen_no) {
      if (gct->eager_promotion) {
          new_gen_no = gct->evac_gen_no;
      } else {
          gct->failed_to_evac = rtsTrue;
      }
  }

  ws = &gct->gens[new_gen_no];
  new_gen = &generations[new_gen_no];

  bd->flags |= BF_EVACUATED;
  initBdescr(bd, new_gen, new_gen->to);

  // If this is a block of pinned objects, we don't have to scan
  // these objects, because they aren't allowed to contain any
  // pointers.  For these blocks, we skip the scavenge stage and put
  // them straight on the scavenged_large_objects list.
  if (bd->flags & BF_PINNED) {
      ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS);
      if (new_gen != gen) { ACQUIRE_SPIN_LOCK(&new_gen->sync); }
      dbl_link_onto(bd, &new_gen->scavenged_large_objects);
      new_gen->n_scavenged_large_blocks += bd->blocks;
      if (new_gen != gen) { RELEASE_SPIN_LOCK(&new_gen->sync); }
  } else {
      bd->link = ws->todo_large_objects;
      ws->todo_large_objects = bd;
  }

  RELEASE_SPIN_LOCK(&gen->sync);
}
示例#24
0
文件: CNF.c 项目: goldfirere/ghc
static StgCompactNFDataBlock *
compactAllocateBlockInternal(Capability            *cap,
                             StgWord                aligned_size,
                             StgCompactNFDataBlock *first,
                             AllocateOp             operation)
{
    StgCompactNFDataBlock *self;
    bdescr *block, *head;
    uint32_t n_blocks;
    generation *g;

    n_blocks = aligned_size / BLOCK_SIZE;

    // Attempting to allocate an object larger than maxHeapSize
    // should definitely be disallowed.  (bug #1791)
    if ((RtsFlags.GcFlags.maxHeapSize > 0 &&
         n_blocks >= RtsFlags.GcFlags.maxHeapSize) ||
        n_blocks >= HS_INT32_MAX)   // avoid overflow when
                                    // calling allocGroup() below
    {
        reportHeapOverflow();
        // reportHeapOverflow() doesn't exit (see #2592), but we aren't
        // in a position to do a clean shutdown here: we
        // either have to allocate the memory or exit now.
        // Allocating the memory would be bad, because the user
        // has requested that we not exceed maxHeapSize, so we
        // just exit.
        stg_exit(EXIT_HEAPOVERFLOW);
    }

    // It is imperative that first is the first block in the compact
    // (or NULL if the compact does not exist yet)
    // because the evacuate code does not update the generation of
    // blocks other than the first (so we would get the statistics
    // wrong and crash in Sanity)
    if (first != NULL) {
        block = Bdescr((P_)first);
        g = block->gen;
    } else {
        g = g0;
    }

    ACQUIRE_SM_LOCK;
    block = allocGroup(n_blocks);
    switch (operation) {
    case ALLOCATE_NEW:
        ASSERT(first == NULL);
        ASSERT(g == g0);
        dbl_link_onto(block, &g0->compact_objects);
        g->n_compact_blocks += block->blocks;
        g->n_new_large_words += aligned_size / sizeof(StgWord);
        break;

    case ALLOCATE_IMPORT_NEW:
        dbl_link_onto(block, &g0->compact_blocks_in_import);
        /* fallthrough */
    case ALLOCATE_IMPORT_APPEND:
        ASSERT(first == NULL);
        ASSERT(g == g0);
        g->n_compact_blocks_in_import += block->blocks;
        g->n_new_large_words += aligned_size / sizeof(StgWord);
        break;

    case ALLOCATE_APPEND:
        g->n_compact_blocks += block->blocks;
        if (g == g0)
            g->n_new_large_words += aligned_size / sizeof(StgWord);
        break;

    default:
#if defined(DEBUG)
        ASSERT(!"code should not be reached");
#else
        RTS_UNREACHABLE;
#endif
    }
    RELEASE_SM_LOCK;

    cap->total_allocated += aligned_size / sizeof(StgWord);

    self = (StgCompactNFDataBlock*) block->start;
    self->self = self;
    self->next = NULL;

    head = block;
    initBdescr(head, g, g);
    head->flags = BF_COMPACT;
    for (block = head + 1, n_blocks --; n_blocks > 0; block++, n_blocks--) {
        block->link = head;
        block->blocks = 0;
        block->flags = BF_COMPACT;
    }

    return self;
}
示例#25
0
文件: Evac.c 项目: 23Skidoo/ghc
static void
eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
                 // NB. for legacy reasons, p & q are swapped around :(
{
    nat field;
    StgInfoTable *info;
    StgWord info_ptr;
    StgClosure *selectee;
    StgSelector *prev_thunk_selector;
    bdescr *bd;
    StgClosure *val;

    prev_thunk_selector = NULL;
    // this is a chain of THUNK_SELECTORs that we are going to update
    // to point to the value of the current THUNK_SELECTOR.  Each
    // closure on the chain is a WHITEHOLE, and points to the next in the
    // chain with payload[0].

selector_chain:

    bd = Bdescr((StgPtr)p);
    if (HEAP_ALLOCED_GC(p)) {
        // If the THUNK_SELECTOR is in to-space or in a generation that we
        // are not collecting, then bale out early.  We won't be able to
        // save any space in any case, and updating with an indirection is
        // trickier in a non-collected gen: we would have to update the
        // mutable list.
        if (bd->flags & BF_EVACUATED) {
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            *q = (StgClosure *)p;
            // shortcut, behave as for:  if (evac) evacuate(q);
            if (evac && bd->gen_no < gct->evac_gen_no) {
                gct->failed_to_evac = rtsTrue;
                TICK_GC_FAILED_PROMOTION();
            }
            return;
        }
        // we don't update THUNK_SELECTORS in the compacted
        // generation, because compaction does not remove the INDs
        // that result, this causes confusion later
        // (scavenge_mark_stack doesn't deal with IND).  BEWARE!  This
        // bit is very tricky to get right.  If you make changes
        // around here, test by compiling stage 3 with +RTS -c -RTS.
        if (bd->flags & BF_MARKED) {
            // must call evacuate() to mark this closure if evac==rtsTrue
            *q = (StgClosure *)p;
            if (evac) evacuate(q);
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            return;
        }
    }


    // WHITEHOLE the selector thunk, since it is now under evaluation.
    // This is important to stop us going into an infinite loop if
    // this selector thunk eventually refers to itself.
#if defined(THREADED_RTS)
    // In threaded mode, we'll use WHITEHOLE to lock the selector
    // thunk while we evaluate it.
    {
        do {
            info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
        } while (info_ptr == (W_)&stg_WHITEHOLE_info);

        // make sure someone else didn't get here first...
        if (IS_FORWARDING_PTR(info_ptr) ||
            INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->type != THUNK_SELECTOR) {
            // v. tricky now.  The THUNK_SELECTOR has been evacuated
            // by another thread, and is now either a forwarding ptr or IND.
            // We need to extract ourselves from the current situation
            // as cleanly as possible.
            //   - unlock the closure
            //   - update *q, we may have done *some* evaluation
            //   - if evac, we need to call evacuate(), because we
            //     need the write-barrier stuff.
            //   - undo the chain we've built to point to p.
            SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
            *q = (StgClosure *)p;
            if (evac) evacuate(q);
            unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
            return;
        }
    }
#else
    // Save the real info pointer (NOTE: not the same as get_itbl()).
    info_ptr = (StgWord)p->header.info;
    SET_INFO((StgClosure *)p,&stg_WHITEHOLE_info);
#endif

    field = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr)->layout.selector_offset;

    // The selectee might be a constructor closure,
    // so we untag the pointer.
    selectee = UNTAG_CLOSURE(p->selectee);

selector_loop:
    // selectee now points to the closure that we're trying to select
    // a field from.  It may or may not be in to-space: we try not to
    // end up in to-space, but it's impractical to avoid it in
    // general.  The compacting GC scatters to-space pointers in
    // from-space during marking, for example.  We rely on the property
    // that evacuate() doesn't mind if it gets passed a to-space pointer.

    info = (StgInfoTable*)selectee->header.info;

    if (IS_FORWARDING_PTR(info)) {
        // We don't follow pointers into to-space; the constructor
        // has already been evacuated, so we won't save any space
        // leaks by evaluating this selector thunk anyhow.
        goto bale_out;
    }

    info = INFO_PTR_TO_STRUCT(info);
    switch (info->type) {
      case WHITEHOLE:
          goto bale_out; // about to be evacuated by another thread (or a loop).

      case CONSTR:
      case CONSTR_1_0:
      case CONSTR_0_1:
      case CONSTR_2_0:
      case CONSTR_1_1:
      case CONSTR_0_2:
      case CONSTR_STATIC:
      case CONSTR_NOCAF_STATIC:
          {
              // check that the size is in range
              ASSERT(field <  (StgWord32)(info->layout.payload.ptrs +
                                          info->layout.payload.nptrs));

              // Select the right field from the constructor
              val = selectee->payload[field];

#ifdef PROFILING
              // For the purposes of LDV profiling, we have destroyed
              // the original selector thunk, p.
              if (era > 0) {
                  // Only modify the info pointer when LDV profiling is
                  // enabled.  Note that this is incompatible with parallel GC,
                  // because it would allow other threads to start evaluating
                  // the same selector thunk.
                  SET_INFO((StgClosure*)p, (StgInfoTable *)info_ptr);
                  OVERWRITING_CLOSURE((StgClosure*)p);
                  SET_INFO((StgClosure*)p, &stg_WHITEHOLE_info);
              }
#endif

              // the closure in val is now the "value" of the
              // THUNK_SELECTOR in p.  However, val may itself be a
              // THUNK_SELECTOR, in which case we want to continue
              // evaluating until we find the real value, and then
              // update the whole chain to point to the value.
          val_loop:
              info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
              if (!IS_FORWARDING_PTR(info_ptr))
              {
                  info = INFO_PTR_TO_STRUCT((StgInfoTable *)info_ptr);
                  switch (info->type) {
                  case IND:
                  case IND_STATIC:
                      val = ((StgInd *)val)->indirectee;
                      goto val_loop;
                  case THUNK_SELECTOR:
                      ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
                      prev_thunk_selector = p;
                      p = (StgSelector*)val;
                      goto selector_chain;
                  default:
                      break;
                  }
              }
              ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
              prev_thunk_selector = p;

              *q = val;

              // update the other selectors in the chain *before*
              // evacuating the value.  This is necessary in the case
              // where the value turns out to be one of the selectors
              // in the chain (i.e. we have a loop), and evacuating it
              // would corrupt the chain.
              unchain_thunk_selectors(prev_thunk_selector, val);

              // evacuate() cannot recurse through
              // eval_thunk_selector(), because we know val is not
              // a THUNK_SELECTOR.
              if (evac) evacuate(q);
              return;
          }

      case IND:
      case IND_STATIC:
          // Again, we might need to untag a constructor.
          selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
          goto selector_loop;

      case BLACKHOLE:
      {
          StgClosure *r;
          const StgInfoTable *i;
          r = ((StgInd*)selectee)->indirectee;

          // establish whether this BH has been updated, and is now an
          // indirection, as in evacuate().
          if (GET_CLOSURE_TAG(r) == 0) {
              i = r->header.info;
              if (IS_FORWARDING_PTR(i)) {
                  r = (StgClosure *)UN_FORWARDING_PTR(i);
                  i = r->header.info;
              }
              if (i == &stg_TSO_info
                  || i == &stg_WHITEHOLE_info
                  || i == &stg_BLOCKING_QUEUE_CLEAN_info
                  || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
                  goto bale_out;
              }
              ASSERT(i != &stg_IND_info);
          }

          selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
          goto selector_loop;
      }

      case THUNK_SELECTOR:
      {
          StgClosure *val;

          // recursively evaluate this selector.  We don't want to
          // recurse indefinitely, so we impose a depth bound.
          if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
              goto bale_out;
          }

          gct->thunk_selector_depth++;
          // rtsFalse says "don't evacuate the result".  It will,
          // however, update any THUNK_SELECTORs that are evaluated
          // along the way.
          eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
          gct->thunk_selector_depth--;

          // did we actually manage to evaluate it?
          if (val == selectee) goto bale_out;

          // Of course this pointer might be tagged...
          selectee = UNTAG_CLOSURE(val);
          goto selector_loop;
      }

      case AP:
      case AP_STACK:
      case THUNK:
      case THUNK_1_0:
      case THUNK_0_1:
      case THUNK_2_0:
      case THUNK_1_1:
      case THUNK_0_2:
      case THUNK_STATIC:
          // not evaluated yet
          goto bale_out;

      default:
        barf("eval_thunk_selector: strange selectee %d",
             (int)(info->type));
    }

bale_out:
    // We didn't manage to evaluate this thunk; restore the old info
    // pointer.  But don't forget: we still need to evacuate the thunk itself.
    SET_INFO((StgClosure *)p, (const StgInfoTable *)info_ptr);
    // THREADED_RTS: we just unlocked the thunk, so another thread
    // might get in and update it.  copy() will lock it again and
    // check whether it was updated in the meantime.
    *q = (StgClosure *)p;
    if (evac) {
        copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest_no);
    }
    unchain_thunk_selectors(prev_thunk_selector, *q);
    return;
}
示例#26
0
文件: CNF.c 项目: goldfirere/ghc
void *
allocateForCompact (Capability *cap,
                    StgCompactNFData *str,
                    StgWord sizeW)
{
    StgPtr to;
    StgWord next_size;
    StgCompactNFDataBlock *block;
    bdescr *bd;

    ASSERT(str->nursery != NULL);
    ASSERT(str->hp > Bdescr((P_)str->nursery)->start);
    ASSERT(str->hp <= Bdescr((P_)str->nursery)->start +
           Bdescr((P_)str->nursery)->blocks * BLOCK_SIZE_W);

 retry:
    if (str->hp + sizeW < str->hpLim) {
        to = str->hp;
        str->hp += sizeW;
        return to;
    }

    bd = Bdescr((P_)str->nursery);
    bd->free = str->hp;

    // We know it doesn't fit in the nursery
    // if it is a large object, allocate a new block
    if (sizeW > LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
        next_size = BLOCK_ROUND_UP(sizeW*sizeof(W_) +
                                   sizeof(StgCompactNFData));
        block = compactAppendBlock(cap, str, next_size);
        bd = Bdescr((P_)block);
        to = bd->free;
        bd->free += sizeW;
        return to;
    }

    // move the nursery past full blocks
    if (block_is_full (str->nursery)) {
        do {
            str->nursery = str->nursery->next;
        } while (str->nursery && block_is_full(str->nursery));

        if (str->nursery == NULL) {
            str->nursery = compactAppendBlock(cap, str,
                                              str->autoBlockW * sizeof(W_));
        }
        bd = Bdescr((P_)str->nursery);
        str->hp = bd->free;
        str->hpLim = bd->start + bd->blocks * BLOCK_SIZE_W;
        goto retry;
    }

    // try subsequent blocks
    for (block = str->nursery->next; block != NULL; block = block->next) {
        bd = Bdescr((P_)block);
        if (has_room_for(bd,sizeW)) {
            to = bd->free;
            bd->free += sizeW;
            return to;
        }
    }

    // If all else fails, allocate a new block of the right size.
    next_size = stg_max(str->autoBlockW * sizeof(StgWord),
                    BLOCK_ROUND_UP(sizeW * sizeof(StgWord)
                                   + sizeof(StgCompactNFDataBlock)));

    block = compactAppendBlock(cap, str, next_size);
    bd = Bdescr((P_)block);
    to = bd->free;
    bd->free += sizeW;
    return to;
}