void CThreadPool::_terminateThread() { _lock(6); VTHREAD_ID_TYPE nextThreadToSwitchTo=0; for (int i=0;i<int(_allThreadData.size());i++) { int fql=int(_threadQueue.size()); if (VThread::areThreadIDsSame(_allThreadData[i]->threadID,_threadQueue[fql-1])) { if (_showThreadSwitches) { std::string tmp("==q Terminating thread: "); tmp+=boost::lexical_cast<std::string>((unsigned long)_allThreadData[i]->threadID); tmp+="\n"; CDebugLogFile::addDebugText(false,tmp.c_str()); // printf("Terminating thread: %lu\n",(unsigned long)_allThreadData[i]->threadID); } _allThreadData[i]->threadID=VTHREAD_ID_DEAD; // To indicate we need clean-up nextThreadToSwitchTo=_threadQueue[fql-2]; // This will be the next thread we wanna switch to break; } } _unlock(6); switchToThread(nextThreadToSwitchTo); // We switch to the calling thread (previous thread) }
void chooseThread(void) { word_t prio; word_t dom; tcb_t *thread; if (CONFIG_NUM_DOMAINS > 1) { dom = ksCurDomain; } else { dom = 0; } if (likely(ksReadyQueuesL1Bitmap[dom])) { uint32_t l1index = (wordBits - 1) - CLZ(ksReadyQueuesL1Bitmap[dom]); uint32_t l2index = (wordBits - 1) - CLZ(ksReadyQueuesL2Bitmap[dom][l1index]); prio = l1index_to_prio(l1index) | l2index; thread = ksReadyQueues[ready_queues_index(dom, prio)].head; assert(thread); assert(isRunnable(thread)); switchToThread(thread); return; } switchToIdleThread(); }
void chooseThread(void) { word_t prio; word_t dom; tcb_t *thread; if (CONFIG_NUM_DOMAINS > 1) { dom = ksCurDomain; } else { dom = 0; } //printf("\n====In chooseThread=====\n"); //printf("domain is %d\n", dom); if (likely(ksReadyQueuesL1Bitmap[dom])) { uint32_t l1index = (wordBits - 1) - CLZ(ksReadyQueuesL1Bitmap[dom]); uint32_t l2index = (wordBits - 1) - CLZ(ksReadyQueuesL2Bitmap[dom][l1index]); prio = l1index_to_prio(l1index) | l2index; thread = ksReadyQueues[ready_queues_index(dom, prio)].head; assert(thread); assert(isRunnable(thread)); //printf("will call switchToThread(%p)\n", thread); //printf("its prio is %d\n", thread->tcbPriority); switchToThread(thread); return; } //printf(" IDLE THREAD\n"); switchToIdleThread(); }
void Interpreter::doOnSameThread(ThreadId runThreadId) { while (true) { if ((m_opsCursor >= m_opsInBuffer) && (!readOps())) { if (runThreadId) switchToThread(0); return; } for (; m_opsCursor < m_opsInBuffer; ++m_opsCursor) { Op op = m_ops[m_opsCursor]; ThreadId threadId = op.threadId; if (m_useThreadId && (runThreadId != threadId)) { switchToThread(threadId); break; } doMallocOp(op, m_currentThreadId); } } }
void chooseThread(void) { int p; tcb_t *thread; for (p = seL4_MaxPrio; p != -1; p--) { unsigned int domprio = ksCurDomain * CONFIG_NUM_PRIORITIES + p; thread = ksReadyQueues[domprio].head; if (thread != NULL) { assert(isRunnable(thread)); switchToThread(thread); return; } } switchToIdleThread(); }
int CThreadPool::handleAllThreads_withResumeLocation(int location) { int retVal=0; _lock(8); bool doAll=false; if (location==-1) // Will resume all unhandled threads (to be called at the end of the main script) { location=0; doAll=true; } for (int j=3*location;j<3*location+3;j++) { for (int i=1;i<int(_allThreadData.size());i++) { if ((_allThreadData[i]->threadResumeLocationAndOrder==j)||doAll) { // We first execute those with 0, then 1, then 2! (then 3 in the sensing phase!) if ((_allThreadData[i]->threadExecutionTime==-1)&&(_allThreadData[i]->threadInstanceIndex==App::ct->getCurrentInstanceIndex()) ) { // Following is a special condition to support free-running mode: if ( (!_allThreadData[i]->threadShouldRunFreely)&&(!_allThreadData[i]->threadSwitchShouldTriggerNoOtherThread) ) { if (_showThreadSwitches) { std::string tmp("==. In fiber/thread handling routine (fiberID/threadID: "); tmp+=boost::lexical_cast<std::string>((unsigned long)_threadQueue[_threadQueue.size()-1]); tmp+=")\n"; CDebugLogFile::addDebugText(false,tmp.c_str()); // printf("In fiber/thread handling routine (fiberID/threadID: %lu)\n",(unsigned long)_threadQueue[_threadQueue.size()-1]); } _unlock(8); switchToThread((VTHREAD_ID_TYPE)_allThreadData[i]->threadID); _lock(8); i=0; // We re-check from the beginning retVal++; } } } } if (doAll) break; } _unlock(8); return(retVal); }
bool CThreadPool::switchBackToPreviousThread() { _lock(4); int fql=int(_threadQueue.size()); if (fql>1) { // Switch back only if not main thread int totalTimeInMs=VDateTime::getTimeDiffInMs(_threadStartTime[fql-1]); for (int i=0;i<int(_allThreadData.size());i++) { if (VThread::areThreadIDsSame(_allThreadData[i]->threadID,_threadQueue[fql-1])) { _allThreadData[i]->threadExecutionTime=totalTimeInMs; break; } } _unlock(4); switchToThread(_threadQueue[fql-2]); // has its own locking / unlocking return(true); } _unlock(4); return(false); }
void schedule(void) { word_t action; action = (word_t)ksSchedulerAction; if (action == (word_t)SchedulerAction_ChooseNewThread) { if (isRunnable(ksCurThread)) { tcbSchedEnqueue(ksCurThread); } if (ksDomainTime == 0) { nextDomain(); } chooseThread(); ksSchedulerAction = SchedulerAction_ResumeCurrentThread; } else if (action != (word_t)SchedulerAction_ResumeCurrentThread) { if (isRunnable(ksCurThread)) { tcbSchedEnqueue(ksCurThread); } /* SwitchToThread */ switchToThread(ksSchedulerAction); ksSchedulerAction = SchedulerAction_ResumeCurrentThread; } }
void schedule(void) { word_t action; action = (word_t)ksSchedulerAction; //printf("\n=======In schedule======\n"); if (action == (word_t)SchedulerAction_ChooseNewThread) { //printf("in action_choosenewthread\n"); if (isRunnable(ksCurThread)) { tcbSchedEnqueue(ksCurThread); } if (CONFIG_NUM_DOMAINS > 1 && ksDomainTime == 0) { nextDomain(); } //printf("go to choosethread\n"); chooseThread(); ksSchedulerAction = SchedulerAction_ResumeCurrentThread; } else if (action != (word_t)SchedulerAction_ResumeCurrentThread) { if (isRunnable(ksCurThread)) { tcbSchedEnqueue(ksCurThread); } /* SwitchToThread */ switchToThread(ksSchedulerAction); ksSchedulerAction = SchedulerAction_ResumeCurrentThread; } //printf(" WILL RUN: %d of domain %d\n", ksCurThread->tcbPriority, ksCurThread->tcbDomain); }
HOT void returnFromMethod(VMGlobals *g) { PyrFrame *returnFrame, *curframe, *homeContext; PyrMethod *meth; PyrMethodRaw *methraw; curframe = g->frame; //assert(slotRawFrame(&curframe->context) == NULL); /*if (gTraceInterpreter) { post("returnFromMethod %s:%s\n", slotRawClass(&g->method->ownerclass)->name.us->name, g->slotRawSymbol(&method->name)->name); post("tailcall %d\n", g->tailCall); }*/ #ifdef GC_SANITYCHECK g->gc->SanityCheck(); #endif homeContext = slotRawFrame(&slotRawFrame(&curframe->context)->homeContext); if (homeContext == NULL) { null_return: #if TAILCALLOPTIMIZE if (g->tailCall) return; // do nothing. #endif /* static bool once = true; if (once || gTraceInterpreter) { once = false; post("return all the way out. sd %d\n", g->sp - g->gc->Stack()->slots); postfl("%s:%s\n", slotRawClass(&g->method->ownerclass)->name.us->name, g->slotRawSymbol(&method->name)->name ); post("tailcall %d\n", g->tailCall); post("homeContext %p\n", homeContext); post("returnFrame %p\n", returnFrame); dumpObjectSlot(&homeContext->caller); DumpStack(g, g->sp); DumpBackTrace(g); } gTraceInterpreter = false; */ //if (IsNil(&homeContext->caller)) return; // do nothing. // return all the way out. PyrSlot *bottom = g->gc->Stack()->slots; slotCopy(bottom, g->sp); g->sp = bottom; // ??!! pop everybody g->method = NULL; g->block = NULL; g->frame = NULL; longjmp(g->escapeInterpreter, 2); } else { returnFrame = slotRawFrame(&homeContext->caller); if (returnFrame == NULL) goto null_return; // make sure returnFrame is a caller and find earliest stack frame { PyrFrame *tempFrame = curframe; while (tempFrame != returnFrame) { tempFrame = slotRawFrame(&tempFrame->caller); if (!tempFrame) { if (isKindOf((PyrObject*)g->thread, class_routine) && NotNil(&g->thread->parent)) { // not found, so yield to parent thread and continue searching. PyrSlot value; slotCopy(&value, g->sp); int numArgsPushed = 1; switchToThread(g, slotRawThread(&g->thread->parent), tSuspended, &numArgsPushed); // on the other side of the looking glass, put the yielded value on the stack as the result.. g->sp -= numArgsPushed - 1; slotCopy(g->sp, &value); curframe = tempFrame = g->frame; } else { slotCopy(&g->sp[2], &g->sp[0]); slotCopy(g->sp, &g->receiver); g->sp++; SetObject(g->sp, g->method); g->sp++; sendMessage(g, getsym("outOfContextReturn"), 3); return; } } } } { PyrFrame *tempFrame = curframe; while (tempFrame != returnFrame) { meth = slotRawMethod(&tempFrame->method); methraw = METHRAW(meth); PyrFrame *nextFrame = slotRawFrame(&tempFrame->caller); if (!methraw->needsHeapContext) { SetInt(&tempFrame->caller, 0); } else { if (tempFrame != homeContext) SetInt(&tempFrame->caller, 0); } tempFrame = nextFrame; } } // return to it g->ip = (unsigned char *)slotRawPtr(&returnFrame->ip); g->frame = returnFrame; g->block = slotRawBlock(&returnFrame->method); homeContext = slotRawFrame(&returnFrame->homeContext); meth = slotRawMethod(&homeContext->method); methraw = METHRAW(meth); #if DEBUGMETHODS if (gTraceInterpreter) { postfl("%s:%s <- %s:%s\n", slotRawSymbol(&slotRawClass(&meth->ownerclass)->name)->name, slotRawSymbol(&meth->name)->name, slotRawSymbol(&slotRawClass(&g->method->ownerclass)->name)->name, slotRawSymbol(&g->method->name)->name ); } #endif g->method = meth; slotCopy(&g->receiver, &homeContext->vars[0]); } #ifdef GC_SANITYCHECK g->gc->SanityCheck(); #endif }
BOOT_CODE bool_t create_initial_thread( cap_t root_cnode_cap, cap_t it_pd_cap, vptr_t ui_v_entry, vptr_t bi_frame_vptr, vptr_t ipcbuf_vptr, cap_t ipcbuf_cap ) { pptr_t pptr; cap_t cap; tcb_t* tcb; deriveCap_ret_t dc_ret; /* allocate TCB */ pptr = alloc_region(TCB_BLOCK_SIZE_BITS); if (!pptr) { printf("Kernel init failed: Unable to allocate tcb for initial thread\n"); return false; } memzero((void*)pptr, 1 << TCB_BLOCK_SIZE_BITS); tcb = TCB_PTR(pptr + TCB_OFFSET); tcb->tcbTimeSlice = CONFIG_TIME_SLICE; Arch_initContext(&tcb->tcbArch.tcbContext); /* derive a copy of the IPC buffer cap for inserting */ dc_ret = deriveCap(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_IPCBUF), ipcbuf_cap); if (dc_ret.status != EXCEPTION_NONE) { printf("Failed to derive copy of IPC Buffer\n"); return false; } /* initialise TCB (corresponds directly to abstract specification) */ cteInsert( root_cnode_cap, SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_CNODE), SLOT_PTR(pptr, tcbCTable) ); cteInsert( it_pd_cap, SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), SLOT_PTR(pptr, tcbVTable) ); cteInsert( dc_ret.cap, SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_IPCBUF), SLOT_PTR(pptr, tcbBuffer) ); tcb->tcbIPCBuffer = ipcbuf_vptr; setRegister(tcb, capRegister, bi_frame_vptr); setNextPC(tcb, ui_v_entry); /* initialise TCB */ tcb->tcbPriority = seL4_MaxPrio; setupReplyMaster(tcb); setThreadState(tcb, ThreadState_Running); ksSchedulerAction = SchedulerAction_ResumeCurrentThread; ksCurThread = ksIdleThread; ksCurDomain = ksDomSchedule[ksDomScheduleIdx].domain; ksDomainTime = ksDomSchedule[ksDomScheduleIdx].length; assert(ksCurDomain < CONFIG_NUM_DOMAINS && ksDomainTime > 0); /* initialise current thread pointer */ switchToThread(tcb); /* initialises ksCurThread */ /* create initial thread's TCB cap */ cap = cap_thread_cap_new(TCB_REF(tcb)); write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_TCB), cap); #ifdef DEBUG setThreadName(tcb, "rootserver"); #endif return true; }