// If we update a closure that we know we BLACKHOLE'd, and the closure // no longer points to the current TSO as its owner, then there may be // an orphaned BLOCKING_QUEUE closure with blocked threads attached to // it. We therefore traverse the BLOCKING_QUEUEs attached to the // current TSO to see if any can now be woken up. void checkBlockingQueues (Capability *cap, StgTSO *tso) { StgBlockingQueue *bq, *next; StgClosure *p; debugTraceCap(DEBUG_sched, cap, "collision occurred; checking blocking queues for thread %ld", (W_)tso->id); for (bq = tso->bq; bq != (StgBlockingQueue*)END_TSO_QUEUE; bq = next) { next = bq->link; if (bq->header.info == &stg_IND_info) { // ToDo: could short it out right here, to avoid // traversing this IND multiple times. continue; } p = bq->bh; if (p->header.info != &stg_BLACKHOLE_info || ((StgInd *)p)->indirectee != (StgClosure*)bq) { wakeBlockingQueue(cap,bq); } } }
nat throwToMsg (Capability *cap, MessageThrowTo *msg) { StgWord status; StgTSO *target = msg->target; Capability *target_cap; goto check_target; retry: write_barrier(); debugTrace(DEBUG_sched, "throwTo: retrying..."); check_target: ASSERT(target != END_TSO_QUEUE); // Thread already dead? if (target->what_next == ThreadComplete || target->what_next == ThreadKilled) { return THROWTO_SUCCESS; } debugTraceCap(DEBUG_sched, cap, "throwTo: from thread %lu to thread %lu", (unsigned long)msg->source->id, (unsigned long)msg->target->id); #ifdef DEBUG traceThreadStatus(DEBUG_sched, target); #endif target_cap = target->cap; if (target->cap != cap) { throwToSendMsg(cap, target_cap, msg); return THROWTO_BLOCKED; } status = target->why_blocked; switch (status) { case NotBlocked: { if ((target->flags & TSO_BLOCKEX) == 0) { // It's on our run queue and not blocking exceptions raiseAsync(cap, target, msg->exception, rtsFalse, NULL); return THROWTO_SUCCESS; } else { blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; } } case BlockedOnMsgThrowTo: { const StgInfoTable *i; MessageThrowTo *m; m = target->block_info.throwto; // target is local to this cap, but has sent a throwto // message to another cap. // // The source message is locked. We need to revoke the // target's message so that we can raise the exception, so // we attempt to lock it. // There's a possibility of a deadlock if two threads are both // trying to throwTo each other (or more generally, a cycle of // threads). To break the symmetry we compare the addresses // of the MessageThrowTo objects, and the one for which m < // msg gets to spin, while the other can only try to lock // once, but must then back off and unlock both before trying // again. if (m < msg) { i = lockClosure((StgClosure *)m); } else { i = tryLockClosure((StgClosure *)m); if (i == NULL) { // debugBelch("collision\n"); throwToSendMsg(cap, target->cap, msg); return THROWTO_BLOCKED; } } if (i == &stg_MSG_NULL_info) { // we know there's a MSG_TRY_WAKEUP on the way, so we // might as well just do it now. The message will // be a no-op when it arrives. unlockClosure((StgClosure*)m, i); tryWakeupThread(cap, target); goto retry; } if (i != &stg_MSG_THROWTO_info) { // if it's a MSG_NULL, this TSO has been woken up by another Cap unlockClosure((StgClosure*)m, i); goto retry; } if ((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0)) { unlockClosure((StgClosure*)m, i); blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; } // nobody else can wake up this TSO after we claim the message doneWithMsgThrowTo(m); raiseAsync(cap, target, msg->exception, rtsFalse, NULL); return THROWTO_SUCCESS; } case BlockedOnMVar: case BlockedOnMVarRead: { /* To establish ownership of this TSO, we need to acquire a lock on the MVar that it is blocked on. */ StgMVar *mvar; StgInfoTable *info USED_IF_THREADS; mvar = (StgMVar *)target->block_info.closure; // ASSUMPTION: tso->block_info must always point to a // closure. In the threaded RTS it does. switch (get_itbl((StgClosure *)mvar)->type) { case MVAR_CLEAN: case MVAR_DIRTY: break; default: goto retry; } info = lockClosure((StgClosure *)mvar); // we have the MVar, let's check whether the thread // is still blocked on the same MVar. if ((target->why_blocked != BlockedOnMVar && target->why_blocked != BlockedOnMVarRead) || (StgMVar *)target->block_info.closure != mvar) { unlockClosure((StgClosure *)mvar, info); goto retry; } if (target->_link == END_TSO_QUEUE) { // the MVar operation has already completed. There is a // MSG_TRY_WAKEUP on the way, but we can just wake up the // thread now anyway and ignore the message when it // arrives. unlockClosure((StgClosure *)mvar, info); tryWakeupThread(cap, target); goto retry; } if ((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0)) { blockedThrowTo(cap,target,msg); unlockClosure((StgClosure *)mvar, info); return THROWTO_BLOCKED; } else { // revoke the MVar operation removeFromMVarBlockedQueue(target); raiseAsync(cap, target, msg->exception, rtsFalse, NULL); unlockClosure((StgClosure *)mvar, info); return THROWTO_SUCCESS; } } case BlockedOnBlackHole: { if (target->flags & TSO_BLOCKEX) { // BlockedOnBlackHole is not interruptible. blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; } else { // Revoke the message by replacing it with IND. We're not // locking anything here, so we might still get a TRY_WAKEUP // message from the owner of the blackhole some time in the // future, but that doesn't matter. ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info); OVERWRITE_INFO(target->block_info.bh, &stg_IND_info); raiseAsync(cap, target, msg->exception, rtsFalse, NULL); return THROWTO_SUCCESS; } } case BlockedOnSTM: lockTSO(target); // Unblocking BlockedOnSTM threads requires the TSO to be // locked; see STM.c:unpark_tso(). if (target->why_blocked != BlockedOnSTM) { unlockTSO(target); goto retry; } if ((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0)) { blockedThrowTo(cap,target,msg); unlockTSO(target); return THROWTO_BLOCKED; } else { raiseAsync(cap, target, msg->exception, rtsFalse, NULL); unlockTSO(target); return THROWTO_SUCCESS; } case BlockedOnCCall_Interruptible: #ifdef THREADED_RTS { Task *task = NULL; // walk suspended_ccalls to find the correct worker thread InCall *incall; for (incall = cap->suspended_ccalls; incall != NULL; incall = incall->next) { if (incall->suspended_tso == target) { task = incall->task; break; } } if (task != NULL) { blockedThrowTo(cap, target, msg); if (!((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0))) { interruptWorkerTask(task); } return THROWTO_BLOCKED; } else { debugTraceCap(DEBUG_sched, cap, "throwTo: could not find worker thread to kill"); } // fall to next } #endif case BlockedOnCCall: blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; #ifndef THREADEDED_RTS case BlockedOnRead: case BlockedOnWrite: case BlockedOnDelay: #if defined(mingw32_HOST_OS) case BlockedOnDoProc: #endif if ((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0)) { blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; } else { removeFromQueues(cap,target); raiseAsync(cap, target, msg->exception, rtsFalse, NULL); return THROWTO_SUCCESS; } #endif case ThreadMigrating: // if is is ThreadMigrating and tso->cap is ours, then it // *must* be migrating *to* this capability. If it were // migrating away from the capability, then tso->cap would // point to the destination. // // There is a MSG_WAKEUP in the message queue for this thread, // but we can just do it preemptively: tryWakeupThread(cap, target); // and now retry, the thread should be runnable. goto retry; default: barf("throwTo: unrecognised why_blocked (%d)", target->why_blocked); } barf("throwTo"); }
void executeMessage (Capability *cap, Message *m) { const StgInfoTable *i; loop: write_barrier(); // allow m->header to be modified by another thread i = m->header.info; if (i == &stg_MSG_TRY_WAKEUP_info) { StgTSO *tso = ((MessageWakeup *)m)->tso; debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld", (lnat)tso->id); tryWakeupThread(cap, tso); } else if (i == &stg_MSG_THROWTO_info) { MessageThrowTo *t = (MessageThrowTo *)m; nat r; const StgInfoTable *i; i = lockClosure((StgClosure*)m); if (i != &stg_MSG_THROWTO_info) { unlockClosure((StgClosure*)m, i); goto loop; } debugTraceCap(DEBUG_sched, cap, "message: throwTo %ld -> %ld", (lnat)t->source->id, (lnat)t->target->id); ASSERT(t->source->why_blocked == BlockedOnMsgThrowTo); ASSERT(t->source->block_info.closure == (StgClosure *)m); r = throwToMsg(cap, t); switch (r) { case THROWTO_SUCCESS: { // this message is done StgTSO *source = t->source; doneWithMsgThrowTo(t); tryWakeupThread(cap, source); break; } case THROWTO_BLOCKED: // unlock the message unlockClosure((StgClosure*)m, &stg_MSG_THROWTO_info); break; } } else if (i == &stg_MSG_BLACKHOLE_info) { nat r; MessageBlackHole *b = (MessageBlackHole*)m; r = messageBlackHole(cap, b); if (r == 0) { tryWakeupThread(cap, b->tso); } return; } else if (i == &stg_IND_info || i == &stg_MSG_NULL_info) { // message was revoked return; } else if (i == &stg_WHITEHOLE_info) { goto loop; } else { barf("executeMessage: %p", i); } }
nat messageBlackHole(Capability *cap, MessageBlackHole *msg) { const StgInfoTable *info; StgClosure *p; StgBlockingQueue *bq; StgClosure *bh = UNTAG_CLOSURE(msg->bh); StgTSO *owner; debugTraceCap(DEBUG_sched, cap, "message: thread %d blocking on blackhole %p", (lnat)msg->tso->id, msg->bh); info = bh->header.info; // If we got this message in our inbox, it might be that the // BLACKHOLE has already been updated, and GC has shorted out the // indirection, so the pointer no longer points to a BLACKHOLE at // all. if (info != &stg_BLACKHOLE_info && info != &stg_CAF_BLACKHOLE_info && info != &__stg_EAGER_BLACKHOLE_info && info != &stg_WHITEHOLE_info) { // if it is a WHITEHOLE, then a thread is in the process of // trying to BLACKHOLE it. But we know that it was once a // BLACKHOLE, so there is at least a valid pointer in the // payload, so we can carry on. return 0; } // The blackhole must indirect to a TSO, a BLOCKING_QUEUE, an IND, // or a value. loop: // NB. VOLATILE_LOAD(), because otherwise gcc hoists the load // and turns this into an infinite loop. p = UNTAG_CLOSURE((StgClosure*)VOLATILE_LOAD(&((StgInd*)bh)->indirectee)); info = p->header.info; if (info == &stg_IND_info) { // This could happen, if e.g. we got a BLOCKING_QUEUE that has // just been replaced with an IND by another thread in // updateThunk(). In which case, if we read the indirectee // again we should get the value. goto loop; } else if (info == &stg_TSO_info) { owner = (StgTSO*)p; #ifdef THREADED_RTS if (owner->cap != cap) { sendMessage(cap, owner->cap, (Message*)msg); debugTraceCap(DEBUG_sched, cap, "forwarding message to cap %d", owner->cap->no); return 1; } #endif // owner is the owner of the BLACKHOLE, and resides on this // Capability. msg->tso is the first thread to block on this // BLACKHOLE, so we first create a BLOCKING_QUEUE object. bq = (StgBlockingQueue*)allocate(cap, sizeofW(StgBlockingQueue)); // initialise the BLOCKING_QUEUE object SET_HDR(bq, &stg_BLOCKING_QUEUE_DIRTY_info, CCS_SYSTEM); bq->bh = bh; bq->queue = msg; bq->owner = owner; msg->link = (MessageBlackHole*)END_TSO_QUEUE; // All BLOCKING_QUEUES are linked in a list on owner->bq, so // that we can search through them in the event that there is // a collision to update a BLACKHOLE and a BLOCKING_QUEUE // becomes orphaned (see updateThunk()). bq->link = owner->bq; owner->bq = bq; dirty_TSO(cap, owner); // we modified owner->bq // If the owner of the blackhole is currently runnable, then // bump it to the front of the run queue. This gives the // blocked-on thread a little boost which should help unblock // this thread, and may avoid a pile-up of other threads // becoming blocked on the same BLACKHOLE (#3838). // // NB. we check to make sure that the owner is not the same as // the current thread, since in that case it will not be on // the run queue. if (owner->why_blocked == NotBlocked && owner->id != msg->tso->id) { removeFromRunQueue(cap, owner); pushOnRunQueue(cap,owner); } // point to the BLOCKING_QUEUE from the BLACKHOLE write_barrier(); // make the BQ visible ((StgInd*)bh)->indirectee = (StgClosure *)bq; recordClosureMutated(cap,bh); // bh was mutated debugTraceCap(DEBUG_sched, cap, "thread %d blocked on thread %d", (lnat)msg->tso->id, (lnat)owner->id); return 1; // blocked } else if (info == &stg_BLOCKING_QUEUE_CLEAN_info || info == &stg_BLOCKING_QUEUE_DIRTY_info) { StgBlockingQueue *bq = (StgBlockingQueue *)p; ASSERT(bq->bh == bh); owner = bq->owner; ASSERT(owner != END_TSO_QUEUE); #ifdef THREADED_RTS if (owner->cap != cap) { sendMessage(cap, owner->cap, (Message*)msg); debugTraceCap(DEBUG_sched, cap, "forwarding message to cap %d", owner->cap->no); return 1; } #endif msg->link = bq->queue; bq->queue = msg; recordClosureMutated(cap,(StgClosure*)msg); if (info == &stg_BLOCKING_QUEUE_CLEAN_info) { bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; recordClosureMutated(cap,(StgClosure*)bq); } debugTraceCap(DEBUG_sched, cap, "thread %d blocked on thread %d", (lnat)msg->tso->id, (lnat)owner->id); // See above, #3838 if (owner->why_blocked == NotBlocked && owner->id != msg->tso->id) { removeFromRunQueue(cap, owner); pushOnRunQueue(cap,owner); } return 1; // blocked } return 0; // not blocked }
void tryWakeupThread (Capability *cap, StgTSO *tso) { traceEventThreadWakeup (cap, tso, tso->cap->no); #ifdef THREADED_RTS if (tso->cap != cap) { MessageWakeup *msg; msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup)); SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM); msg->tso = tso; sendMessage(cap, tso->cap, (Message*)msg); debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d", (W_)tso->id, tso->cap->no); return; } #endif switch (tso->why_blocked) { case BlockedOnMVar: case BlockedOnMVarRead: { if (tso->_link == END_TSO_QUEUE) { tso->block_info.closure = (StgClosure*)END_TSO_QUEUE; goto unblock; } else { return; } } case BlockedOnMsgThrowTo: { const StgInfoTable *i; i = lockClosure(tso->block_info.closure); unlockClosure(tso->block_info.closure, i); if (i != &stg_MSG_NULL_info) { debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)", (W_)tso->id, tso->block_info.throwto->header.info); return; } // remove the block frame from the stack ASSERT(tso->stackobj->sp[0] == (StgWord)&stg_block_throwto_info); tso->stackobj->sp += 3; goto unblock; } case BlockedOnBlackHole: case BlockedOnSTM: case ThreadMigrating: goto unblock; default: // otherwise, do nothing return; } unblock: // just run the thread now, if the BH is not really available, // we'll block again. tso->why_blocked = NotBlocked; appendToRunQueue(cap,tso); // We used to set the context switch flag here, which would // trigger a context switch a short time in the future (at the end // of the current nursery block). The idea is that we have just // woken up a thread, so we may need to load-balance and migrate // threads to other CPUs. On the other hand, setting the context // switch flag here unfairly penalises the current thread by // yielding its time slice too early. // // The synthetic benchmark nofib/smp/chan can be used to show the // difference quite clearly. // cap->context_switch = 1; }