void throwToSelf (Capability *cap, StgTSO *tso, StgClosure *exception) { MessageThrowTo *m; m = throwTo(cap, tso, tso, exception); if (m != NULL) { // throwTo leaves it locked unlockClosure((StgClosure*)m, &stg_MSG_THROWTO_info); } }
nat throwToMsg (Capability *cap, MessageThrowTo *msg) { StgWord status; StgTSO *target = msg->target; Capability *target_cap; goto check_target; retry: write_barrier(); debugTrace(DEBUG_sched, "throwTo: retrying..."); check_target: ASSERT(target != END_TSO_QUEUE); // Thread already dead? if (target->what_next == ThreadComplete || target->what_next == ThreadKilled) { return THROWTO_SUCCESS; } debugTraceCap(DEBUG_sched, cap, "throwTo: from thread %lu to thread %lu", (unsigned long)msg->source->id, (unsigned long)msg->target->id); #ifdef DEBUG traceThreadStatus(DEBUG_sched, target); #endif target_cap = target->cap; if (target->cap != cap) { throwToSendMsg(cap, target_cap, msg); return THROWTO_BLOCKED; } status = target->why_blocked; switch (status) { case NotBlocked: { if ((target->flags & TSO_BLOCKEX) == 0) { // It's on our run queue and not blocking exceptions raiseAsync(cap, target, msg->exception, rtsFalse, NULL); return THROWTO_SUCCESS; } else { blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; } } case BlockedOnMsgThrowTo: { const StgInfoTable *i; MessageThrowTo *m; m = target->block_info.throwto; // target is local to this cap, but has sent a throwto // message to another cap. // // The source message is locked. We need to revoke the // target's message so that we can raise the exception, so // we attempt to lock it. // There's a possibility of a deadlock if two threads are both // trying to throwTo each other (or more generally, a cycle of // threads). To break the symmetry we compare the addresses // of the MessageThrowTo objects, and the one for which m < // msg gets to spin, while the other can only try to lock // once, but must then back off and unlock both before trying // again. if (m < msg) { i = lockClosure((StgClosure *)m); } else { i = tryLockClosure((StgClosure *)m); if (i == NULL) { // debugBelch("collision\n"); throwToSendMsg(cap, target->cap, msg); return THROWTO_BLOCKED; } } if (i == &stg_MSG_NULL_info) { // we know there's a MSG_TRY_WAKEUP on the way, so we // might as well just do it now. The message will // be a no-op when it arrives. unlockClosure((StgClosure*)m, i); tryWakeupThread(cap, target); goto retry; } if (i != &stg_MSG_THROWTO_info) { // if it's a MSG_NULL, this TSO has been woken up by another Cap unlockClosure((StgClosure*)m, i); goto retry; } if ((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0)) { unlockClosure((StgClosure*)m, i); blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; } // nobody else can wake up this TSO after we claim the message doneWithMsgThrowTo(m); raiseAsync(cap, target, msg->exception, rtsFalse, NULL); return THROWTO_SUCCESS; } case BlockedOnMVar: case BlockedOnMVarRead: { /* To establish ownership of this TSO, we need to acquire a lock on the MVar that it is blocked on. */ StgMVar *mvar; StgInfoTable *info USED_IF_THREADS; mvar = (StgMVar *)target->block_info.closure; // ASSUMPTION: tso->block_info must always point to a // closure. In the threaded RTS it does. switch (get_itbl((StgClosure *)mvar)->type) { case MVAR_CLEAN: case MVAR_DIRTY: break; default: goto retry; } info = lockClosure((StgClosure *)mvar); // we have the MVar, let's check whether the thread // is still blocked on the same MVar. if ((target->why_blocked != BlockedOnMVar && target->why_blocked != BlockedOnMVarRead) || (StgMVar *)target->block_info.closure != mvar) { unlockClosure((StgClosure *)mvar, info); goto retry; } if (target->_link == END_TSO_QUEUE) { // the MVar operation has already completed. There is a // MSG_TRY_WAKEUP on the way, but we can just wake up the // thread now anyway and ignore the message when it // arrives. unlockClosure((StgClosure *)mvar, info); tryWakeupThread(cap, target); goto retry; } if ((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0)) { blockedThrowTo(cap,target,msg); unlockClosure((StgClosure *)mvar, info); return THROWTO_BLOCKED; } else { // revoke the MVar operation removeFromMVarBlockedQueue(target); raiseAsync(cap, target, msg->exception, rtsFalse, NULL); unlockClosure((StgClosure *)mvar, info); return THROWTO_SUCCESS; } } case BlockedOnBlackHole: { if (target->flags & TSO_BLOCKEX) { // BlockedOnBlackHole is not interruptible. blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; } else { // Revoke the message by replacing it with IND. We're not // locking anything here, so we might still get a TRY_WAKEUP // message from the owner of the blackhole some time in the // future, but that doesn't matter. ASSERT(target->block_info.bh->header.info == &stg_MSG_BLACKHOLE_info); OVERWRITE_INFO(target->block_info.bh, &stg_IND_info); raiseAsync(cap, target, msg->exception, rtsFalse, NULL); return THROWTO_SUCCESS; } } case BlockedOnSTM: lockTSO(target); // Unblocking BlockedOnSTM threads requires the TSO to be // locked; see STM.c:unpark_tso(). if (target->why_blocked != BlockedOnSTM) { unlockTSO(target); goto retry; } if ((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0)) { blockedThrowTo(cap,target,msg); unlockTSO(target); return THROWTO_BLOCKED; } else { raiseAsync(cap, target, msg->exception, rtsFalse, NULL); unlockTSO(target); return THROWTO_SUCCESS; } case BlockedOnCCall_Interruptible: #ifdef THREADED_RTS { Task *task = NULL; // walk suspended_ccalls to find the correct worker thread InCall *incall; for (incall = cap->suspended_ccalls; incall != NULL; incall = incall->next) { if (incall->suspended_tso == target) { task = incall->task; break; } } if (task != NULL) { blockedThrowTo(cap, target, msg); if (!((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0))) { interruptWorkerTask(task); } return THROWTO_BLOCKED; } else { debugTraceCap(DEBUG_sched, cap, "throwTo: could not find worker thread to kill"); } // fall to next } #endif case BlockedOnCCall: blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; #ifndef THREADEDED_RTS case BlockedOnRead: case BlockedOnWrite: case BlockedOnDelay: #if defined(mingw32_HOST_OS) case BlockedOnDoProc: #endif if ((target->flags & TSO_BLOCKEX) && ((target->flags & TSO_INTERRUPTIBLE) == 0)) { blockedThrowTo(cap,target,msg); return THROWTO_BLOCKED; } else { removeFromQueues(cap,target); raiseAsync(cap, target, msg->exception, rtsFalse, NULL); return THROWTO_SUCCESS; } #endif case ThreadMigrating: // if is is ThreadMigrating and tso->cap is ours, then it // *must* be migrating *to* this capability. If it were // migrating away from the capability, then tso->cap would // point to the destination. // // There is a MSG_WAKEUP in the message queue for this thread, // but we can just do it preemptively: tryWakeupThread(cap, target); // and now retry, the thread should be runnable. goto retry; default: barf("throwTo: unrecognised why_blocked (%d)", target->why_blocked); } barf("throwTo"); }
void executeMessage (Capability *cap, Message *m) { const StgInfoTable *i; loop: write_barrier(); // allow m->header to be modified by another thread i = m->header.info; if (i == &stg_MSG_TRY_WAKEUP_info) { StgTSO *tso = ((MessageWakeup *)m)->tso; debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld", (lnat)tso->id); tryWakeupThread(cap, tso); } else if (i == &stg_MSG_THROWTO_info) { MessageThrowTo *t = (MessageThrowTo *)m; nat r; const StgInfoTable *i; i = lockClosure((StgClosure*)m); if (i != &stg_MSG_THROWTO_info) { unlockClosure((StgClosure*)m, i); goto loop; } debugTraceCap(DEBUG_sched, cap, "message: throwTo %ld -> %ld", (lnat)t->source->id, (lnat)t->target->id); ASSERT(t->source->why_blocked == BlockedOnMsgThrowTo); ASSERT(t->source->block_info.closure == (StgClosure *)m); r = throwToMsg(cap, t); switch (r) { case THROWTO_SUCCESS: { // this message is done StgTSO *source = t->source; doneWithMsgThrowTo(t); tryWakeupThread(cap, source); break; } case THROWTO_BLOCKED: // unlock the message unlockClosure((StgClosure*)m, &stg_MSG_THROWTO_info); break; } } else if (i == &stg_MSG_BLACKHOLE_info) { nat r; MessageBlackHole *b = (MessageBlackHole*)m; r = messageBlackHole(cap, b); if (r == 0) { tryWakeupThread(cap, b->tso); } return; } else if (i == &stg_IND_info || i == &stg_MSG_NULL_info) { // message was revoked return; } else if (i == &stg_WHITEHOLE_info) { goto loop; } else { barf("executeMessage: %p", i); } }
void tryWakeupThread (Capability *cap, StgTSO *tso) { traceEventThreadWakeup (cap, tso, tso->cap->no); #ifdef THREADED_RTS if (tso->cap != cap) { MessageWakeup *msg; msg = (MessageWakeup *)allocate(cap,sizeofW(MessageWakeup)); SET_HDR(msg, &stg_MSG_TRY_WAKEUP_info, CCS_SYSTEM); msg->tso = tso; sendMessage(cap, tso->cap, (Message*)msg); debugTraceCap(DEBUG_sched, cap, "message: try wakeup thread %ld on cap %d", (W_)tso->id, tso->cap->no); return; } #endif switch (tso->why_blocked) { case BlockedOnMVar: case BlockedOnMVarRead: { if (tso->_link == END_TSO_QUEUE) { tso->block_info.closure = (StgClosure*)END_TSO_QUEUE; goto unblock; } else { return; } } case BlockedOnMsgThrowTo: { const StgInfoTable *i; i = lockClosure(tso->block_info.closure); unlockClosure(tso->block_info.closure, i); if (i != &stg_MSG_NULL_info) { debugTraceCap(DEBUG_sched, cap, "thread %ld still blocked on throwto (%p)", (W_)tso->id, tso->block_info.throwto->header.info); return; } // remove the block frame from the stack ASSERT(tso->stackobj->sp[0] == (StgWord)&stg_block_throwto_info); tso->stackobj->sp += 3; goto unblock; } case BlockedOnBlackHole: case BlockedOnSTM: case ThreadMigrating: goto unblock; default: // otherwise, do nothing return; } unblock: // just run the thread now, if the BH is not really available, // we'll block again. tso->why_blocked = NotBlocked; appendToRunQueue(cap,tso); // We used to set the context switch flag here, which would // trigger a context switch a short time in the future (at the end // of the current nursery block). The idea is that we have just // woken up a thread, so we may need to load-balance and migrate // threads to other CPUs. On the other hand, setting the context // switch flag here unfairly penalises the current thread by // yielding its time slice too early. // // The synthetic benchmark nofib/smp/chan can be used to show the // difference quite clearly. // cap->context_switch = 1; }