Ejemplo n.º 1
0
void sendMessage(Capability *from_cap, Capability *to_cap, Message *msg)
{
    ACQUIRE_LOCK(&to_cap->lock);

#ifdef DEBUG    
    {
        const StgInfoTable *i = msg->header.info;
        if (i != &stg_MSG_THROWTO_info &&
            i != &stg_MSG_BLACKHOLE_info &&
            i != &stg_MSG_TRY_WAKEUP_info &&
            i != &stg_IND_info && // can happen if a MSG_BLACKHOLE is revoked
            i != &stg_WHITEHOLE_info) {
            barf("sendMessage: %p", i);
        }
    }
#endif

    msg->link = to_cap->inbox;
    to_cap->inbox = msg;

    recordClosureMutated(from_cap,(StgClosure*)msg);

    if (to_cap->running_task == NULL) {
	to_cap->running_task = myTask(); 
            // precond for releaseCapability_()
        releaseCapability_(to_cap,rtsFalse);
    } else {
        interruptCapability(to_cap);
    }

    RELEASE_LOCK(&to_cap->lock);
}
Ejemplo n.º 2
0
void
dirty_STACK (Capability *cap, StgStack *stack)
{
    if (stack->dirty == 0) {
        stack->dirty = 1;
        recordClosureMutated(cap,(StgClosure*)stack);
    }
}
Ejemplo n.º 3
0
void
dirty_TSO (Capability *cap, StgTSO *tso)
{
    if (tso->dirty == 0) {
        tso->dirty = 1;
        recordClosureMutated(cap,(StgClosure*)tso);
    }
}
Ejemplo n.º 4
0
void
dirty_TVAR(Capability *cap, StgTVar *p)
{
    if (p->header.info == &stg_TVAR_CLEAN_info) {
        p->header.info = &stg_TVAR_DIRTY_info;
        recordClosureMutated(cap,(StgClosure*)p);
    }
}
Ejemplo n.º 5
0
void
setTSOPrev (Capability *cap, StgTSO *tso, StgTSO *target)
{
    if (tso->dirty == 0) {
        tso->dirty = 1;
        recordClosureMutated(cap,(StgClosure*)tso);
    }
    tso->block_info.prev = target;
}
Ejemplo n.º 6
0
// Setting a TSO's link field with a write barrier.
// It is *not* necessary to call this function when
//    * setting the link field to END_TSO_QUEUE
//    * setting the link field of the currently running TSO, as it
//      will already be dirty.
void
setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target)
{
    if (tso->dirty == 0) {
        tso->dirty = 1;
        recordClosureMutated(cap,(StgClosure*)tso);
    }
    tso->_link = target;
}
Ejemplo n.º 7
0
/*
   This is the write barrier for MUT_VARs, a.k.a. IORefs.  A
   MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
   is.  When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
   and is put on the mutable list.
*/
void
dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
{
    Capability *cap = regTableToCapability(reg);
    if (p->header.info == &stg_MUT_VAR_CLEAN_info) {
        p->header.info = &stg_MUT_VAR_DIRTY_info;
        recordClosureMutated(cap,p);
    }
}
Ejemplo n.º 8
0
Archivo: CNF.c Proyecto: goldfirere/ghc
void
insertCompactHash (Capability *cap,
                   StgCompactNFData *str,
                   StgClosure *p, StgClosure *to)
{
    insertHashTable(str->hash, (StgWord)p, (const void*)to);
    if (str->header.info == &stg_COMPACT_NFDATA_CLEAN_info) {
        str->header.info = &stg_COMPACT_NFDATA_DIRTY_info;
        recordClosureMutated(cap, (StgClosure*)str);
    }
}
Ejemplo n.º 9
0
/*
   This is the write barrier for MVARs.  An MVAR_CLEAN objects is not
   on the mutable list; a MVAR_DIRTY is.  When written to, a
   MVAR_CLEAN turns into a MVAR_DIRTY and is put on the mutable list.
   The check for MVAR_CLEAN is inlined at the call site for speed,
   this really does make a difference on concurrency-heavy benchmarks
   such as Chaneneos and cheap-concurrency.
*/
void
dirty_MVAR(StgRegTable *reg, StgClosure *p)
{
    recordClosureMutated(regTableToCapability(reg),p);
}
Ejemplo n.º 10
0
/* -----------------------------------------------------------------------------
 * Pausing a thread
 *
 * We have to prepare for GC - this means doing lazy black holing
 * here.  We also take the opportunity to do stack squeezing if it's
 * turned on.
 * -------------------------------------------------------------------------- */
void
threadPaused(Capability *cap, StgTSO *tso)
{
    StgClosure *frame;
    const StgRetInfoTable *info;
    const StgInfoTable *bh_info;
    const StgInfoTable *cur_bh_info USED_IF_THREADS;
    StgClosure *bh;
    StgPtr stack_end;
    uint32_t words_to_squeeze = 0;
    uint32_t weight           = 0;
    uint32_t weight_pending   = 0;
    bool prev_was_update_frame = false;
    StgWord heuristic_says_squeeze;

    // Check to see whether we have threads waiting to raise
    // exceptions, and we're not blocking exceptions, or are blocked
    // interruptibly.  This is important; if a thread is running with
    // TSO_BLOCKEX and becomes blocked interruptibly, this is the only
    // place we ensure that the blocked_exceptions get a chance.
    maybePerformBlockedException (cap, tso);
    if (tso->what_next == ThreadKilled) { return; }

    // NB. Updatable thunks *must* be blackholed, either by eager blackholing or
    // lazy blackholing.  See Note [upd-black-hole] in sm/Scav.c.

    stack_end = tso->stackobj->stack + tso->stackobj->stack_size;

    frame = (StgClosure *)tso->stackobj->sp;

    while ((P_)frame < stack_end) {
        info = get_ret_itbl(frame);

        switch (info->i.type) {

        case UPDATE_FRAME:

            // If we've already marked this frame, then stop here.
            if (frame->header.info == (StgInfoTable *)&stg_marked_upd_frame_info) {
                if (prev_was_update_frame) {
                    words_to_squeeze += sizeofW(StgUpdateFrame);
                    weight += weight_pending;
                    weight_pending = 0;
                }
                goto end;
            }

            SET_INFO(frame, (StgInfoTable *)&stg_marked_upd_frame_info);

            bh = ((StgUpdateFrame *)frame)->updatee;
            bh_info = bh->header.info;

#if defined(THREADED_RTS)
        retry:
#endif
            // Note [suspend duplicate work]
            //
            // If the info table is a WHITEHOLE or a BLACKHOLE, then
            // another thread has claimed it (via the SET_INFO()
            // below), or is in the process of doing so.  In that case
            // we want to suspend the work that the current thread has
            // done on this thunk and wait until the other thread has
            // finished.
            //
            // If eager blackholing is taking place, it could be the
            // case that the blackhole points to the current
            // TSO. e.g.:
            //
            //    this thread                   other thread
            //    --------------------------------------------------------
            //                                  c->indirectee = other_tso;
            //                                  c->header.info = EAGER_BH
            //                                  threadPaused():
            //                                    c->header.info = WHITEHOLE
            //                                    c->indirectee = other_tso
            //    c->indirectee = this_tso;
            //    c->header.info = EAGER_BH
            //                                    c->header.info = BLACKHOLE
            //    threadPaused()
            //    *** c->header.info is now BLACKHOLE,
            //        c->indirectee  points to this_tso
            //
            // So in this case do *not* suspend the work of the
            // current thread, because the current thread will become
            // deadlocked on itself.  See #5226 for an instance of
            // this bug.
            //
            // Note that great care is required when entering computations
            // suspended by this mechanism. See Note [AP_STACKs must be eagerly
            // blackholed] for details.
            if (((bh_info == &stg_BLACKHOLE_info)
                 && ((StgInd*)bh)->indirectee != (StgClosure*)tso)
                || (bh_info == &stg_WHITEHOLE_info))
            {
                debugTrace(DEBUG_squeeze,
                           "suspending duplicate work: %ld words of stack",
                           (long)((StgPtr)frame - tso->stackobj->sp));

                // If this closure is already an indirection, then
                // suspend the computation up to this point.
                // NB. check raiseAsync() to see what happens when
                // we're in a loop (#2783).
                suspendComputation(cap,tso,(StgUpdateFrame*)frame);

                // Now drop the update frame, and arrange to return
                // the value to the frame underneath:
                tso->stackobj->sp = (StgPtr)frame + sizeofW(StgUpdateFrame) - 2;
                tso->stackobj->sp[1] = (StgWord)bh;
                ASSERT(bh->header.info != &stg_TSO_info);
                tso->stackobj->sp[0] = (W_)&stg_enter_info;

                // And continue with threadPaused; there might be
                // yet more computation to suspend.
                frame = (StgClosure *)(tso->stackobj->sp + 2);
                prev_was_update_frame = false;
                continue;
            }

            // We should never have made it here in the event of blackholes that
            // we already own; they should have been marked when we blackholed
            // them and consequently we should have stopped our stack walk
            // above.
            ASSERT(!((bh_info == &stg_BLACKHOLE_info)
                     && (((StgInd*)bh)->indirectee == (StgClosure*)tso)));

            // zero out the slop so that the sanity checker can tell
            // where the next closure is.
            OVERWRITING_CLOSURE(bh);

            // an EAGER_BLACKHOLE or CAF_BLACKHOLE gets turned into a
            // BLACKHOLE here.
#if defined(THREADED_RTS)
            // first we turn it into a WHITEHOLE to claim it, and if
            // successful we write our TSO and then the BLACKHOLE info pointer.
            cur_bh_info = (const StgInfoTable *)
                cas((StgVolatilePtr)&bh->header.info,
                    (StgWord)bh_info,
                    (StgWord)&stg_WHITEHOLE_info);

            if (cur_bh_info != bh_info) {
                bh_info = cur_bh_info;
                goto retry;
            }
#endif

            // The payload of the BLACKHOLE points to the TSO
            ((StgInd *)bh)->indirectee = (StgClosure *)tso;
            write_barrier();
            SET_INFO(bh,&stg_BLACKHOLE_info);

            // .. and we need a write barrier, since we just mutated the closure:
            recordClosureMutated(cap,bh);

            // We pretend that bh has just been created.
            LDV_RECORD_CREATE(bh);

            frame = (StgClosure *) ((StgUpdateFrame *)frame + 1);
            if (prev_was_update_frame) {
                words_to_squeeze += sizeofW(StgUpdateFrame);
                weight += weight_pending;
                weight_pending = 0;
            }
            prev_was_update_frame = true;
            break;

        case UNDERFLOW_FRAME:
        case STOP_FRAME:
            goto end;

            // normal stack frames; do nothing except advance the pointer
        default:
        {
            uint32_t frame_size = stack_frame_sizeW(frame);
            weight_pending += frame_size;
            frame = (StgClosure *)((StgPtr)frame + frame_size);
            prev_was_update_frame = false;
        }
        }
    }

end:
    // Should we squeeze or not?  Arbitrary heuristic: we squeeze if
    // the number of words we have to shift down is less than the
    // number of stack words we squeeze away by doing so.
    // The threshold was bumped from 5 to 8 as a result of #2797
    heuristic_says_squeeze = ((weight <= 8 && words_to_squeeze > 0)
                            || weight < words_to_squeeze);

    debugTrace(DEBUG_squeeze,
        "words_to_squeeze: %d, weight: %d, squeeze: %s",
        words_to_squeeze, weight,
        heuristic_says_squeeze ? "YES" : "NO");

    if (RtsFlags.GcFlags.squeezeUpdFrames == true &&
        heuristic_says_squeeze) {
        stackSqueeze(cap, tso, (StgPtr)frame);
        tso->flags |= TSO_SQUEEZED;
        // This flag tells threadStackOverflow() that the stack was
        // squeezed, because it may not need to be expanded.
    } else {
        tso->flags &= ~TSO_SQUEEZED;
    }
}
Ejemplo n.º 11
0
nat messageBlackHole(Capability *cap, MessageBlackHole *msg)
{
    const StgInfoTable *info;
    StgClosure *p;
    StgBlockingQueue *bq;
    StgClosure *bh = UNTAG_CLOSURE(msg->bh);
    StgTSO *owner;

    debugTraceCap(DEBUG_sched, cap, "message: thread %d blocking on blackhole %p", 
                  (lnat)msg->tso->id, msg->bh);

    info = bh->header.info;

    // If we got this message in our inbox, it might be that the
    // BLACKHOLE has already been updated, and GC has shorted out the
    // indirection, so the pointer no longer points to a BLACKHOLE at
    // all.
    if (info != &stg_BLACKHOLE_info && 
        info != &stg_CAF_BLACKHOLE_info && 
        info != &__stg_EAGER_BLACKHOLE_info &&
        info != &stg_WHITEHOLE_info) {
        // if it is a WHITEHOLE, then a thread is in the process of
        // trying to BLACKHOLE it.  But we know that it was once a
        // BLACKHOLE, so there is at least a valid pointer in the
        // payload, so we can carry on.
        return 0;
    }

    // The blackhole must indirect to a TSO, a BLOCKING_QUEUE, an IND,
    // or a value.
loop:
    // NB. VOLATILE_LOAD(), because otherwise gcc hoists the load
    // and turns this into an infinite loop.
    p = UNTAG_CLOSURE((StgClosure*)VOLATILE_LOAD(&((StgInd*)bh)->indirectee));
    info = p->header.info;

    if (info == &stg_IND_info)
    {
        // This could happen, if e.g. we got a BLOCKING_QUEUE that has
        // just been replaced with an IND by another thread in
        // updateThunk().  In which case, if we read the indirectee
        // again we should get the value.
        goto loop;
    }

    else if (info == &stg_TSO_info)
    {
        owner = (StgTSO*)p;

#ifdef THREADED_RTS
        if (owner->cap != cap) {
            sendMessage(cap, owner->cap, (Message*)msg);
            debugTraceCap(DEBUG_sched, cap, "forwarding message to cap %d", owner->cap->no);
            return 1;
        }
#endif
        // owner is the owner of the BLACKHOLE, and resides on this
        // Capability.  msg->tso is the first thread to block on this
        // BLACKHOLE, so we first create a BLOCKING_QUEUE object.

        bq = (StgBlockingQueue*)allocate(cap, sizeofW(StgBlockingQueue));
            
        // initialise the BLOCKING_QUEUE object
        SET_HDR(bq, &stg_BLOCKING_QUEUE_DIRTY_info, CCS_SYSTEM);
        bq->bh = bh;
        bq->queue = msg;
        bq->owner = owner;
        
        msg->link = (MessageBlackHole*)END_TSO_QUEUE;
        
        // All BLOCKING_QUEUES are linked in a list on owner->bq, so
        // that we can search through them in the event that there is
        // a collision to update a BLACKHOLE and a BLOCKING_QUEUE
        // becomes orphaned (see updateThunk()).
        bq->link = owner->bq;
        owner->bq = bq;
        dirty_TSO(cap, owner); // we modified owner->bq

        // If the owner of the blackhole is currently runnable, then
        // bump it to the front of the run queue.  This gives the
        // blocked-on thread a little boost which should help unblock
        // this thread, and may avoid a pile-up of other threads
        // becoming blocked on the same BLACKHOLE (#3838).
        //
        // NB. we check to make sure that the owner is not the same as
        // the current thread, since in that case it will not be on
        // the run queue.
        if (owner->why_blocked == NotBlocked && owner->id != msg->tso->id) {
            removeFromRunQueue(cap, owner);
            pushOnRunQueue(cap,owner);
        }

        // point to the BLOCKING_QUEUE from the BLACKHOLE
        write_barrier(); // make the BQ visible
        ((StgInd*)bh)->indirectee = (StgClosure *)bq;
        recordClosureMutated(cap,bh); // bh was mutated

        debugTraceCap(DEBUG_sched, cap, "thread %d blocked on thread %d", 
                      (lnat)msg->tso->id, (lnat)owner->id);

        return 1; // blocked
    }
    else if (info == &stg_BLOCKING_QUEUE_CLEAN_info || 
             info == &stg_BLOCKING_QUEUE_DIRTY_info)
    {
        StgBlockingQueue *bq = (StgBlockingQueue *)p;

        ASSERT(bq->bh == bh);

        owner = bq->owner;

        ASSERT(owner != END_TSO_QUEUE);

#ifdef THREADED_RTS
        if (owner->cap != cap) {
            sendMessage(cap, owner->cap, (Message*)msg);
            debugTraceCap(DEBUG_sched, cap, "forwarding message to cap %d", owner->cap->no);
            return 1;
        }
#endif

        msg->link = bq->queue;
        bq->queue = msg;
        recordClosureMutated(cap,(StgClosure*)msg);

        if (info == &stg_BLOCKING_QUEUE_CLEAN_info) {
            bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
            recordClosureMutated(cap,(StgClosure*)bq);
        }

        debugTraceCap(DEBUG_sched, cap, "thread %d blocked on thread %d", 
                      (lnat)msg->tso->id, (lnat)owner->id);

        // See above, #3838
        if (owner->why_blocked == NotBlocked && owner->id != msg->tso->id) {
            removeFromRunQueue(cap, owner);
            pushOnRunQueue(cap,owner);
        }

        return 1; // blocked
    }
    
    return 0; // not blocked
}