static void fiber_yieldto(Fiber* curfib,Fiber* fiber){ ucontext_t* cur=&curfib->ctx; ucontext_t* newctx=&fiber->ctx; co_current=fiber; swapcontext(cur,newctx); }
inline void switch_to_fiber(fiber_t& fib, fiber_t& prev) { swapcontext(&prev, &fib); }
void _yield() { if(swapcontext(&_context_child, &_context_parent) == -1) throw std::runtime_error("yield: fail @ swapcontext"); }
void func2(void) { if (swapcontext(&ctx[2], &ctx[1]) != 0) err(3, 1); entered_func2++; }
/* NOTE: Assumes signal has been blocked before entering this call */ void swapcur(gtthread_t cur) { gtthread_int_t *cur_int, *target; cur_int = find_thread(cur); target = (gtthread_int_t *) steque_front(&run_queue); swapcontext(&cur_int->context, &target->context); }
int main (void) { atexit (check_called); char st1[32768]; puts ("making contexts"); if (getcontext (&ctx[0]) != 0) { if (errno == ENOSYS) { back_in_main = 1; exit (0); } printf ("%s: getcontext: %m\n", __FUNCTION__); exit (1); } if (getcontext (&ctx[1]) != 0) { printf ("%s: getcontext: %m\n", __FUNCTION__); exit (1); } ctx[1].uc_stack.ss_sp = st1; ctx[1].uc_stack.ss_size = sizeof st1; ctx[1].uc_link = &ctx[0]; errno = 0; makecontext (&ctx[1], (void (*) (void)) f1, 33, 0x00000001 << flag, 0x00000004 << flag, 0x00000012 << flag, 0x00000048 << flag, 0x00000123 << flag, 0x0000048d << flag, 0x00001234 << flag, 0x000048d1 << flag, 0x00012345 << flag, 0x00048d15 << flag, 0x00123456 << flag, 0x0048d159 << flag, 0x01234567 << flag, 0x048d159e << flag, 0x12345678 << flag, 0x48d159e2 << flag, 0x23456789 << flag, 0x8d159e26 << flag, 0x3456789a << flag, 0xd159e26a << flag, 0x456789ab << flag, 0x159e26af << flag, 0x56789abc << flag, 0x59e26af3 << flag, 0x6789abcd << flag, 0x9e26af37 << flag, 0x789abcde << flag, 0xe26af37b << flag, 0x89abcdef << flag, 0x26af37bc << flag, 0x9abcdef0 << flag, 0x6af37bc3 << flag, 0xabcdef0f << flag); /* Without this check, a stub makecontext can make us spin forever. */ if (errno == ENOSYS) { puts ("makecontext not implemented"); back_in_main = 1; return 0; } /* Play some tricks with this context. */ if (++global == 1) if (setcontext (&ctx[1]) != 0) { printf ("%s: setcontext: %m\n", __FUNCTION__); exit (1); } if (global != 2) { printf ("%s: 'global' not incremented twice\n", __FUNCTION__); exit (1); } if (getcontext (&ctx[2]) != 0) { printf ("%s: second getcontext: %m\n", __FUNCTION__); exit (1); } ctx[2].uc_stack.ss_sp = st2; ctx[2].uc_stack.ss_size = sizeof st2; ctx[2].uc_link = &ctx[1]; makecontext (&ctx[2], f2, 0); puts ("swapping contexts"); if (swapcontext (&ctx[0], &ctx[2]) != 0) { printf ("%s: swapcontext: %m\n", __FUNCTION__); exit (1); } puts ("back at main program"); back_in_main = 1; if (was_in_f1 < 2) { puts ("didn't reach f1 twice"); exit (1); } if (was_in_f2 == 0) { puts ("didn't reach f2"); exit (1); } puts ("test succeeded"); return 0; }
/* * Fonctionnement : yield() appelé depuis le main envoie vers le sommet de la file, * sinon renvoie vers le main */ extern int thread_yield(void) { thread_init_function(); thread_t tmp = threadList.currentThread; thread_t thread; tmp->current_priority--; //Recherche du premier thread prêt if(!TAILQ_EMPTY(&threadList.list)) //si il y a des éléments dans la liste des threads prêts { do { thread = TAILQ_FIRST(&(threadList.list)); if ((thread == tmp) && (TAILQ_NEXT(thread, entries) == NULL)) { return 0; } if(thread->current_priority <= thread->default_priority-threadList.max_priority) { thread->current_priority = thread->default_priority; } else if(thread->current_priority <= 0) { thread->current_priority--; } /* //si le premier thread est le thread courant, on prend le suivant if ((thread == tmp) && (TAILQ_NEXT(thread, entries) != NULL)) { thread = TAILQ_NEXT(thread, entries); }*/ //si le thread courant est le seul thread prêt, on continue l'exécution TAILQ_REMOVE(&(threadList.list), thread, entries); TAILQ_INSERT_TAIL(&(threadList.list), thread, entries); } while(thread->current_priority < 0); } else if (!TAILQ_EMPTY(&threadList.list_sleeping))// si il n'y a plus que des threads endormis { thread = TAILQ_FIRST(&(threadList.list_sleeping)); thread->state = READY; if(thread->default_priority > threadList.max_priority) threadList.max_priority = thread->default_priority; TAILQ_REMOVE(&(threadList.list_sleeping), thread, entries); TAILQ_INSERT_HEAD(&(threadList.list), thread, entries); } else //si tous les threads sont morts ou endormis { fprintf(stderr, "Fin : Plus de threads prets ou endormis\n"); return 0; } //Màj du currentThread dans la threadList threadList.currentThread = thread; #ifdef DEBUG_MODE thread->nb_calls++; //printf("Using thread %d (time %d) with priority: %d/%d\n", thread->id, thread->nb_calls, thread->current_priority, thread->default_priority); #endif //Changement de contexte swapcontext(&(tmp->context), &(threadList.currentThread->context)); return 0; }
static void f2 (void) { puts("start f2"); swapcontext(&ctx[2], &ctx[1]); printf("2"); puts("finish f2"); }
void schedule() { //cs printf("activated\n"); if(Current_Thread==NULL) // Main Thread was executing previously.. { TERMINATED=0; if(!isempty(&ready_queue)) // If there are more threads... { Current_Thread=top(&ready_queue); //c printf("yes!! %p\n", Current_Thread); Global_id=Current_Thread->thread_id; swapcontext(&Main, &(Current_Thread->threads_context)); } else // If all threads are dead... { //c printf("main is getting set!\n"); setcontext(&Main); } } else //if someother thread was executing... { struct Task* tmp=next(&ready_queue, Current_Thread); //c printf("other way around and %p and current:%p\n", tmp, Current_Thread); if(tmp==NULL) //if this was the last thread in the queue.... { //execute main fn. //c printf("it was null\n"); if(TERMINATED==1) { //c printf("its gone!!\n"); TERMINATED=0; remov(&ready_queue, Global_id); Current_Thread=NULL; setcontext(&Main); } else { struct Task* tmp1=Current_Thread; Current_Thread=NULL; swapcontext(&(tmp1->threads_context), &Main); } } else { struct Task* tmp2=Current_Thread; Current_Thread=tmp; if(TERMINATED==1) { TERMINATED=0; remov(&ready_queue, Global_id); Global_id=tmp->thread_id; //c printf("context set for %p\n", tmp); setcontext(&(tmp->threads_context)); } else { Global_id=tmp->thread_id; //c printf("running:%p\n", tmp); swapcontext(&(tmp2->threads_context), &(tmp->threads_context)); } } } }
void thread_defer_self(thread_context_t *ctx) { swapcontext(&ctx->self->context, &ctx->poller); }
static void f1 (void) { puts("start f1"); swapcontext(&ctx[1], &ctx[2]); printf("1"); puts("finish f1"); }
/* * stress_stackmmap * stress a file memory map'd stack */ static int stress_stackmmap(const args_t *args) { int fd; volatile int rc = EXIT_FAILURE; /* could be clobbered */ char filename[PATH_MAX]; page_size = args->page_size; page_mask = ~(page_size - 1); /* Create file back'd mmaping for the stack */ if (stress_temp_dir_mk_args(args) < 0) return EXIT_FAILURE; (void)stress_temp_filename_args(args, filename, sizeof(filename), mwc32()); fd = open(filename, O_SYNC | O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); if (fd < 0) { pr_fail_err("mmap'd stack file open"); goto tidy_dir; } (void)unlink(filename); if (ftruncate(fd, MMAPSTACK_SIZE) < 0) { pr_fail_err("ftruncate"); (void)close(fd); goto tidy_dir; } stack_mmap = mmap(NULL, MMAPSTACK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (stack_mmap == MAP_FAILED) { if (errno == ENXIO) { pr_inf("%s: skipping stressor, mmap not possible on file %s\n", args->name, filename); rc = EXIT_NO_RESOURCE; (void)close(fd); goto tidy_dir; } pr_fail_err("mmap"); (void)close(fd); goto tidy_dir; } (void)close(fd); if (shim_madvise(stack_mmap, MMAPSTACK_SIZE, MADV_RANDOM) < 0) { pr_dbg("%s: madvise failed: errno=%d (%s)\n", args->name, errno, strerror(errno)); } (void)memset(stack_mmap, 0, MMAPSTACK_SIZE); (void)memset(&c_test, 0, sizeof(c_test)); if (getcontext(&c_test) < 0) { pr_fail_err("getcontext"); goto tidy_mmap; } c_test.uc_stack.ss_sp = stack_mmap; c_test.uc_stack.ss_size = MMAPSTACK_SIZE; c_test.uc_link = &c_main; /* * set jmp handler to jmp back into the loop on a full * stack segfault. Use swapcontext to jump into a * new context using the new mmap'd stack */ do { pid_t pid; again: if (!g_keep_stressing_flag) break; pid = fork(); if (pid < 0) { if ((errno == EAGAIN) || (errno == ENOMEM)) goto again; pr_err("%s: fork failed: errno=%d (%s)\n", args->name, errno, strerror(errno)); } else if (pid > 0) { int status, waitret; /* Parent, wait for child */ (void)setpgid(pid, g_pgrp); waitret = shim_waitpid(pid, &status, 0); if (waitret < 0) { if (errno != EINTR) pr_dbg("%s: waitpid(): errno=%d (%s)\n", args->name, errno, strerror(errno)); (void)kill(pid, SIGTERM); (void)kill(pid, SIGKILL); (void)shim_waitpid(pid, &status, 0); } } else if (pid == 0) { /* Child */ (void)setpgid(0, g_pgrp); stress_parent_died_alarm(); /* Make sure this is killable by OOM killer */ set_oom_adjustment(args->name, true); (void)makecontext(&c_test, stress_stackmmap_push_start, 0); (void)swapcontext(&c_main, &c_test); _exit(0); } inc_counter(args); } while (keep_stressing()); rc = EXIT_SUCCESS; tidy_mmap: (void)munmap(stack_mmap, MMAPSTACK_SIZE); tidy_dir: (void)stress_temp_dir_rm_args(args); return rc; }
void mcos_resched(mInfo_t * mInfo) { fcb_t * cfcb; fcb_t * nfcb; int16_t pid; uint32_t bitmap; // Grab and save the vector bitmap and clear the bitmap. bitmap = mcos_clear_interrupts(mInfo); // Look for interrupts and add the fcb_t to the ready queue. if ( unlikely(bitmap) ) { int32_t bit; dbgPrintf("Bitmap Vectors %08x\n", bitmap); // Loop testing the bitmap bits to see which vectors are active. while( (bit = ffs(bitmap)) ) { bit--; // If the vector array value is not EMPTY then make the pid ready if ( mInfo->vector[bit] != FCB_EMPTY ) { // Can not call mcos_resume inside of mcos_resched its recursive. mcos_ready(mInfo, mInfo->vector[bit], RESCHED_NO); } // Clear the bit and loop again bitmap &= ~(1 << bit); } } // If the current thread and we are the highest priority then continue running. if ( ((cfcb = mInfo->curr)->state == FCB_CURR) && (mcos_lastkey(mInfo, mInfo->rdytail) < cfcb->priority) ) { return; } // Force context switch if ( unlikely(cfcb->state == FCB_CURR) ) { cfcb->state = FCB_READY; cfcb->semid = 0; mcos_insert(mInfo, cfcb->pid, mInfo->rdyhead, cfcb->priority); } // MCOS exits when it does not have any fibers to run. if ( unlikely((pid = mcos_getlast(mInfo, mInfo->rdytail)) == E_EMPTY) ) { dbgPrintf("No more fibers to run!\n"); return; } nfcb = mcos_getfcb(mInfo, pid); // Setup for the new state. nfcb->state = FCB_CURR; mInfo->curr = nfcb; mInfo->currpid = pid; // Count the number of times the scheduler has been called. mInfo->scheduler_cnt++; dbgPrintf("Switch to (%s) state: %s\n", nfcb->name, mcos_state(nfcb->state)); swapcontext(cfcb->uctx, nfcb->uctx); }
/** * Helper function for yield. */ static void yield_helper(int is_alarm_safe) { if(is_alarm_safe) sigprocmask(SIG_BLOCK, &vtalrm, NULL); // Don't need to do anything if there's just one thread in the queue. if(steque_size(&g_threads_steque) == 1) return; gtthread_t *old_thread = steque_pop(&g_threads_steque); gtthread_t *new_thread = NULL; /* Find an eligible new thread - i.e., a thread that isn't queued for cancelation. */ if(!is_alarm_safe) sigprocmask(SIG_BLOCK, &vtalrm, NULL); while(steque_size(&g_threads_steque) > 0) { new_thread = steque_front(&g_threads_steque); /* Cancels threads when it's their turn to run */ int i; int canceled=0; for(i=0; i < steque_size(&g_cancelatorium); i++) { if((long) steque_front(&g_cancelatorium) == new_thread->id) { new_thread->is_finished = 1; new_thread->retval = (void *) -1; steque_pop(&g_cancelatorium); steque_pop(&g_threads_steque); steque_enqueue(&g_dead_threads_steque, new_thread); canceled=1; joininator(new_thread); // Attempt to join the thread you just canceled. break; } if(steque_size(&g_cancelatorium) > 0) steque_cycle(&g_cancelatorium); } if(!canceled) break; } /* If the thread that yielded finsihed executing, put it in the finished steque. */ if(old_thread->is_finished) { steque_enqueue(&g_dead_threads_steque, old_thread); joininator(old_thread); } else { steque_enqueue(&g_threads_steque, old_thread); } if(!is_alarm_safe) sigprocmask(SIG_UNBLOCK, &vtalrm, NULL); // All threads have finished running. Is this necessary? if(steque_size(&g_threads_steque) == 0) exit(0); // Don't context switch if the original thread is the only one left in the queue. if(gtthread_equal(*((gtthread_t *) steque_front(&g_threads_steque)), *old_thread)) return; if(is_alarm_safe) { T.it_value.tv_usec = global_period; // Reset timer so that the next period can start immediately. sigprocmask(SIG_UNBLOCK, &vtalrm, NULL); } swapcontext(old_thread->context, new_thread->context); }
void timer_handler(int signum){ /*static int count =0;*/ dlink_t* next_run_dlink; dlink_t* old_running_link; BlockSignals(); /*Get next running link*/ next_run_dlink = NextRunningLink(schedule_queue); if(next_run_dlink==NULL){ if(schedule_queue->begin->thread_block->returnval != NULL){ exit(*(int *)(schedule_queue->begin->thread_block->returnval)); } else{ exit(0); } } /*Save the context of present running link //local_context = *(ucontext_t*)return_context; //schedule_queue->running->thread_block->thread_context.uc_mcontext = local_context.uc_mcontext;*/ /*Make present running link as running 0*/ schedule_queue->running->thread_block->runnning = 0; /*Make temp_dlink as the present running link*/ next_run_dlink->thread_block->runnning = 1; /*if((schedule_queue->running->thread_block->cancelled||schedule_queue->running->thread_block->exited||schedule_queue->running->thread_block->finished)){ schedule_queue->running = next_run_dlink; if() old_running_link }*/ old_running_link = schedule_queue->running; schedule_queue->running = next_run_dlink; InitTimer(); UnblockSignals(); swapcontext(&(old_running_link->thread_block->thread_context),&(schedule_queue->running->thread_block->thread_context)); /*while(1){ if(swapcontext(schedule_queue->running->thread_block->thread_context.uc_link,&(schedule_queue->running->thread_block->thread_context))==-1){ fprintf(stderr,"ERROR:swapcontext() Failed!!!"); exit(1); } //Dude I am done with my work BlockSignals(); //Get next running link next_run_dlink = NextRunningLink(schedule_queue); //If returned link was the running link if(next_run_dlink == schedule_queue->running){ //Dude I ran everything for you exit(0); } //Else execute the next running link else{ //Make schedule queue running go finished schedule_queue->running->thread_block->finished = 1; schedule_queue->running->thread_block->runnning = 0; //Make temp_dlink as the present running link next_run_dlink->thread_block->runnning = 1; schedule_queue->running = next_run_dlink; //Initialize timer, gonna give this guy full time stamp InitTimer(); //Unblock Signals UnblockSignals(); } }*/ }
void barrier() { // switch to next thread int old_thread_id = set_next_current_thread_id(); swapcontext(&thread_state[old_thread_id], &thread_state[current_thread_id()]); }
/* TODO: do not start task immediately */ void hb_taskResume( void * pTaskPtr ) { PHB_TASKINFO pTask = ( PHB_TASKINFO ) pTaskPtr; if( s_currTask != pTask ) { switch( pTask->state ) { #if ! defined( HB_HAS_UCONTEXT ) case TASK_INIT: /* save current execution context */ if( setjmp( s_currTask->context ) == 0 ) { s_currTask = pTask; hb_taskStart(); /* unreachable code */ } break; #endif case TASK_SLEEPING: hb_taskWakeUp( pTask ); /* no break */ #if defined( HB_HAS_UCONTEXT ) case TASK_INIT: #endif case TASK_SUSPEND: pTask->state = TASK_RUNNING; /* no break */ case TASK_RUNNING: #if defined( HB_HAS_UCONTEXT ) { PHB_TASKINFO pCurrTask = s_currTask; s_currTask = pTask; /* save current execution context and switch to the new one */ swapcontext( &pCurrTask->context, &pTask->context ); } #else /* save current execution context */ if( setjmp( s_currTask->context ) == 0 ) { s_currTask = pTask; /* switch execution context */ longjmp( pTask->context, 1 ); /* unreachable code */ } #endif break; case TASK_DONE: break; case TASK_ZOMBIE: /* It should not happen - it's bug in user code */ hb_errInternal( HB_EI_ERRUNRECOV, "TaskResume: zombie", NULL, NULL ); /* default: hb_errInternal( HB_EI_ERRUNRECOV, "TaskResume: corrupt", NULL, NULL ); */ } } }
void jump(context* ofc, context* nfc) { swapcontext(&ofc->value, &nfc->value); }
TestCaseResult _test_case_run_single_test(TestContext *self, TestCase *test) { GHashTable *leak_table = NULL; gpointer leak_handler = (self->m_leakwatch ? tinu_leakwatch_simple(&leak_table) : NULL); gpointer stack = g_malloc0(TEST_CTX_STACK_SIZE); ucontext_t main_ctx; g_test_case_current_result = TEST_NONE; if (self->m_sighandle) { _signal_on(); if (getcontext(&g_test_ucontext) == -1) { log_error("Cannot get main context", msg_tag_errno(), NULL); g_test_case_current_result = TEST_INTERNAL; goto test_case_run_done; } g_test_ucontext.uc_stack.ss_sp = stack; g_test_ucontext.uc_stack.ss_size = TEST_CTX_STACK_SIZE; g_test_ucontext.uc_link = &main_ctx; makecontext(&g_test_ucontext, (void (*)())(&_test_case_run_intern), 2, self, test); if (swapcontext(&main_ctx, &g_test_ucontext) == -1) { log_error("Cannot change context", msg_tag_errno(), NULL); g_test_case_current_result = TEST_INTERNAL; goto test_case_run_done; } g_free(stack); _signal_off(); } else _test_case_run_intern(self, test); test_case_run_done: switch (g_test_case_current_result) { case TEST_PASSED : log_notice("Test case run successfull", msg_tag_str("case", test->m_name), msg_tag_str("suite", test->m_suite->m_name), NULL); break; case TEST_FAILED : log_warn("Test case run failed", msg_tag_str("case", test->m_name), msg_tag_str("suite", test->m_suite->m_name), NULL); break; case TEST_ABORT : log_error("Test case run failed abruptly", msg_tag_str("case", test->m_name), msg_tag_str("suite", test->m_suite->m_name), NULL); break; case TEST_SEGFAULT : log_error("Test case run produced a segmentation fault", msg_tag_str("case", test->m_name), msg_tag_str("suite", test->m_suite->m_name), NULL); break; case TEST_INTERNAL : log_crit("Test case run failed due to an internal error", msg_tag_str("case", test->m_name), msg_tag_str("suite", test->m_suite->m_name), NULL); break; default : g_assert_not_reached(); } if (leak_handler) { tinu_unregister_watch(leak_handler); if (g_test_case_current_result == TEST_PASSED) tinu_leakwatch_simple_dump(leak_table, LOG_WARNING); g_hash_table_destroy(leak_table); } _test_run_hooks(TEST_HOOK_AFTER_TEST, test, g_test_case_current_result); return g_test_case_current_result; }
bool Context::SwapIn() { memcpy(impl_->shared_stack_ + impl_->shared_stack_cap_ - impl_->stack_size_, impl_->stack_, impl_->stack_size_); return 0 == swapcontext(&impl_->GetTlsContext(), &impl_->ctx_); }
uint32_t Processer::Run(ThreadLocalInfo &info, uint32_t &done_count) { info.current_task = NULL; done_count = 0; uint32_t c = 0; SList<Task> slist = runnable_list_.pop_all(); uint32_t do_count = slist.size(); DebugPrint(dbg_scheduler, "Run [Proc(%d) do_count:%u] --------------------------", id_, do_count); SList<Task>::iterator it = slist.begin(); for (; it != slist.end(); ++c) { Task* tk = &*it; info.current_task = tk; tk->state_ = TaskState::runnable; DebugPrint(dbg_switch, "enter task(%s)", tk->DebugInfo()); RestoreStack(tk); int ret = swapcontext(&info.scheduler, &tk->ctx_); if (ret) { fprintf(stderr, "swapcontext error:%s\n", strerror(errno)); runnable_list_.push(tk); ThrowError(eCoErrorCode::ec_swapcontext_failed); } DebugPrint(dbg_switch, "leave task(%s) state=%d", tk->DebugInfo(), tk->state_); info.current_task = NULL; switch (tk->state_) { case TaskState::runnable: ++it; break; case TaskState::io_block: it = slist.erase(it); g_Scheduler.io_wait_.SchedulerSwitch(tk); break; case TaskState::sleep: it = slist.erase(it); g_Scheduler.sleep_wait_.SchedulerSwitch(tk); break; case TaskState::sys_block: case TaskState::user_block: { if (tk->block_) { it = slist.erase(it); if (!tk->block_->AddWaitTask(tk)) runnable_list_.push(tk); tk->block_ = NULL; } else { std::unique_lock<LFLock> lock(g_Scheduler.user_wait_lock_); auto &zone = g_Scheduler.user_wait_tasks_[tk->user_wait_type_]; auto &wait_pair = zone[tk->user_wait_id_]; auto &task_queue = wait_pair.second; if (wait_pair.first) { --wait_pair.first; tk->state_ = TaskState::runnable; ++it; } else { it = slist.erase(it); task_queue.push(tk); } g_Scheduler.ClearWaitPairWithoutLock(tk->user_wait_type_, tk->user_wait_id_, zone, wait_pair); } } break; case TaskState::done: default: --task_count_; ++done_count; it = slist.erase(it); DebugPrint(dbg_task, "task(%s) done.", tk->DebugInfo()); if (tk->eptr_) { std::exception_ptr ep = tk->eptr_; runnable_list_.push(slist); tk->DecrementRef(); std::rethrow_exception(ep); } else tk->DecrementRef(); break; } } if (do_count) runnable_list_.push(slist); return c; }
/* Wrapper function to perform a context switch and handle any errors. */ void context_swap(ucontext_t *active, ucontext_t *other) { if (swapcontext(active, other) == -1) printf("ERROR: Error switching context.\n"); }
int main(void) { start(51); #ifdef __GNUC__ printf("(GCC) "); #else printf("(ACK) "); #endif fflush(stdout); atexit(verify_main_reenter); /* Save current context in ctx[0] */ if (getcontext(&ctx[0]) != 0) { /* Don't verify reentering main, not going to happen */ atexit(just_exit); err(1, 1); } ctx[1] = ctx[0]; ctx[1].uc_stack.ss_sp = st_stack; ctx[1].uc_stack.ss_size = SSIZE; ctx[1].uc_link = &ctx[0]; /* When done running, return here */ /* ctx[1] is going to run func1 and then return here (uc_link). */ /* We'll see later on whether makecontext worked. */ makecontext(&ctx[1], (void (*) (void)) func1, 28, (0x0000001 << shift), (0x0000004 << shift), (0x0000010 << shift), (0x0000040 << shift), (0x0000100 << shift), (0x0000400 << shift), (0x0001000 << shift), (0x0004000 << shift), (0x0010000 << shift), (0x0040000 << shift), (0x0100000 << shift), (0x0400000 << shift), (0x1000000 << shift), (0x4000000 << shift), (0x0000002 << shift), (0x0000008 << shift), (0x0000020 << shift), (0x0000080 << shift), (0x0000200 << shift), (0x0000800 << shift), (0x0002000 << shift), (0x0008000 << shift), (0x0020000 << shift), (0x0080000 << shift), (0x0200000 << shift), (0x0800000 << shift), (0x2000000 << shift), (0x8000000 << shift)); if (++global == 1) { /* First time we're here. Let's run ctx[1] and return to ctx[0] when * we're done. Note that we return to above the 'makecontext' call. */ if (setcontext(&ctx[1]) != 0) err(1, 2); } if (global != 2) { /* When ++global was 1 we let ctx[1] run and returned to ctx[0], so above ++global is executed again and should've become 2. */ err(1, 3); } /* Setup ctx[2] to run func2 */ if (getcontext(&ctx[2]) != 0) err(1, 4); ctx[2].uc_stack.ss_sp = malloc(SSIZE); ctx[2].uc_stack.ss_size = SSIZE; ctx[2].uc_link = &ctx[1]; makecontext(&ctx[2], (void (*) (void)) func2, 0); /* Now things become tricky. ctx[2] is set up such that when it finishes running, and starts ctx[1] again. However, func1 swaps back to func2. Then, when func2 has finished running, we continue with ctx[1] and, finally, we return to ctx[0]. */ if (swapcontext(&ctx[0], &ctx[2]) != 0) err(1, 5); /* makecontext failed? */ reentered_main = 1; /* The call graph is as follows: * * ######## * /--------># main # * 7 /----########----\ * | | ^ | * | 1 2 3 * | V | V * #########----/ ######### * # func1 #<-------4-------# func2 # * #########--------5------>######### * ^ | * | | * \---------6--------/ * * Main calls func1, func1 increases entered_func1, and returns to main. Main * calls func2, swaps to func1, swaps to func2, which increases entered_func2, * continues with func1, which increases entered_func1 again, continues to * main, where reentered_main is set to 1. In effect, entered_func1 == 2, * entered_func2 == 1, reentered_main == 1. Verify that. */ if (entered_func1 != 2) err(1, 6); if (entered_func2 != 1) err(1, 7); /* reentered_main == 1 is verified upon exit */ /* Try to allocate too small a stack */ free(ctx[2].uc_stack.ss_sp); /* Deallocate stack space first */ if (getcontext(&ctx[2]) != 0) err(1, 8); ctx[2].uc_stack.ss_sp = malloc(MINSIGSTKSZ-1); ctx[2].uc_stack.ss_size = MINSIGSTKSZ-1; ctx[2].uc_link = &ctx[0]; makecontext(&ctx[2], (void (*) (void)) fail, 0); /* Because makecontext is void, we can only detect an error by trying to use the invalid context */ if (swapcontext(&ctx[0], &ctx[2]) == 0) err(1, 9); /* Try to allocate a huge stack to force the usage of brk/sbrk system call to enlarge the data segment. Because we are fiddling with the stack pointer, the OS might think the stack segment and data segment have collided and kill us. This is wrong and therefore the following should work. */ free(ctx[2].uc_stack.ss_sp); /* Deallocate stack space first */ if (getcontext(&ctx[2]) != 0) err(1, 14); ctx[2].uc_stack.ss_sp = malloc(8 * 1024 * 1024); /* 8 MB */ ctx[2].uc_stack.ss_size = 8 * 1024 * 1024; ctx[2].uc_link = &ctx[0]; makecontext(&ctx[2], (void (*) (void)) test_brk, 0); if (swapcontext(&ctx[0], &ctx[2]) != 0) err(1, 15); ctx[1].uc_link = &ctx[0]; ctx[2].uc_link = NULL; makecontext(&ctx[1], (void (*) (void)) do_parent, 0); makecontext(&ctx[2], (void (*) (void)) do_child, 0); if (swapcontext(&ctx[0], &ctx[2]) == -1) err(1, 16); quit(); return(-1); }
void threadlet::switch_tasklet(tasklet& tasklet_) { tasklet_._pthreadlet = this; swapcontext(&_ucontext, &tasklet_._ucontext); }
int s_poll_wait(s_poll_set *fds, int sec, int msec) { CONTEXT *context; /* current context */ static CONTEXT *to_free=NULL; /* delayed memory deallocation */ /* FIXME: msec parameter is currently ignored with UCONTEXT threads */ (void)msec; /* squash the unused parameter warning */ /* remove the current context from ready queue */ context=ready_head; ready_head=ready_head->next; if(!ready_head) /* the queue is empty */ ready_tail=NULL; /* it is safe to s_log() after new ready_head is set */ /* it is illegal to deallocate the stack of the current context */ if(to_free) { /* a delayed deallocation is scheduled */ #ifdef DEBUG_UCONTEXT s_log(LOG_DEBUG, "Releasing context %ld", to_free->id); #endif str_free(to_free->stack); str_free(to_free); to_free=NULL; } /* manage the current thread */ if(fds) { /* something to wait for -> swap the context */ context->fds=fds; /* set file descriptors to wait for */ context->finish=sec<0 ? -1 : time(NULL)+sec; /* append the current context to the waiting queue */ context->next=NULL; if(waiting_tail) waiting_tail->next=context; waiting_tail=context; if(!waiting_head) waiting_head=context; } else { /* nothing to wait for -> drop the context */ to_free=context; /* schedule for delayed deallocation */ } while(!ready_head) /* wait until there is a thread to switch to */ scan_waiting_queue(); /* switch threads */ if(fds) { /* swap the current context */ if(context->id!=ready_head->id) { #ifdef DEBUG_UCONTEXT s_log(LOG_DEBUG, "Context swap: %ld -> %ld", context->id, ready_head->id); #endif swapcontext(&context->context, &ready_head->context); #ifdef DEBUG_UCONTEXT s_log(LOG_DEBUG, "Current context: %ld", ready_head->id); #endif } return ready_head->ready; } else { /* drop the current context */ #ifdef DEBUG_UCONTEXT s_log(LOG_DEBUG, "Context set: %ld (dropped) -> %ld", context->id, ready_head->id); #endif setcontext(&ready_head->context); ioerror("setcontext"); /* should not ever happen */ return 0; } }
void uthread_exit() { // will reference the next node that contains the next thread to run when the current thread context exits node_t* next; // will reference the current thread context which is about to exit ucontext_t* old_thread; // will reference the next thread context which is about to exit ucontext_t* next_thread; // will keep track of whether or not this function has been called yet static volatile int exit_called = 0; volatile int get_context_return_value = 0; // return if system_init() was not called yet if(!init_called) return; // speical case there are no other threads to execute if( is_empty( ready_queue )) { // speical case - the user is done using uthread libary so the main_context should be freed if(exit_called == 1) { free(main_context); return; } else { // special case - there are no more threads but the user is not nessarly done using the libary exit_called = 1; free(current_context); swapcontext(current_context, main_context); return; } } // threads still exist on the ready queue if code reaches here // pop off the next thread to run next = pop(ready_queue); // extract the nodes thread context next_thread = next->uthread; // free the popped node as it is no longer needed free(next); // set the value of old thread to the curent_context old_thread = current_context; // set current context to the next_thread current_context = next_thread; // call getcontext() on old_thread to prepair for the context switch get_context_return_value = getcontext(old_thread); // if the getcontext() retuned SUCCESS, then free the memoery allocation of the old_trhead and set the current context to next_thread if (get_context_return_value == SUCCESS) { free(old_thread); setcontext(next_thread); } return; }
void next() { if(_end) throw stop_iteration(); if(swapcontext(&_context_parent, &_context_child) == -1) throw std::runtime_error("next: fail @ swapcontext"); }
/** * Name: uthread_create( void, int ) * Description: This function creates a new user-level thread which runs func(), with priority number specified by argument priority. * Params: func() - The function to be called when this to be created uthread starts execution priority - The prioirty number of this to be created uthread * Return: int - This function returns SUCCESS if succeeds, or RETURN_ERROR otherwise. */ int uthread_create(void func(), int priority) { // a boolean value used to keep track of whether this is the first time the user is calling this function static volatile int first_time = TRUE; // the context struct that will hold the to be created uthread ucontext_t* ucp; // a node that will hold the 'ucp' and have a priority based on the 'priority' argument node_t *new_node; // the node that will popped off if this is the first call to uthread_create() node_t* next_node; // the node that will started if this is the first call to uthread_create() ucontext_t* next_thread; // return RETURN_ERROR if system_init() was not called yet if(!init_called) return RETURN_ERROR; // allocate the new thread's context on the heap, returning RETURN_ERROR immediately if allocation fails ucp = (ucontext_t*)malloc(sizeof(ucontext_t)); if(!ucp) return RETURN_ERROR; // allocate the new thread's stack on the heap, returning RETURN_ERROR immediately if allocation fails ucp->uc_stack.ss_sp = malloc(UTHREAD_STACK_SIZE); if(!ucp->uc_stack.ss_sp) return RETURN_ERROR; ucp->uc_stack.ss_size = UTHREAD_STACK_SIZE; // if getcontext() fails return RETURN_ERROR immediately if(getcontext(ucp) == RETURN_ERROR) return RETURN_ERROR; // set up alternate thread of control in ucp, which has previously been initalised useing getcontext makecontext(ucp, (void (*)(void)) func, 0); // allocate a node filled with 'ucp' and 'priority' to be placed on the ready queue on the heap, returning RETURN_ERROR immediately if allocation fails new_node = (node_t*) malloc(sizeof(node_t)); if(!new_node) return RETURN_ERROR; // initiallize the new node with the proper values new_node->priority = priority; new_node->next = NULL_NODE; new_node->prev = NULL_NODE; new_node->uthread = ucp; // push the new node onto the ready queue push(ready_queue, new_node); // if this is the first time calling this function, go ahead and pop the newly created thread off the ready queue and start the execution of the thread by swaping to its context. if(first_time) { next_node = pop(ready_queue); next_thread = next_node->uthread; free(next_node); current_context = next_thread; first_time = FALSE; swapcontext(main_context, next_thread); } // if PC reaches here then return 0 indicating success return SUCCESS; }
int MyThreadJoin(MyThread thread) { //printf("MyThreadJoin: START ... \n"); if(thread==NULL) { //printf("NULL thread, returning from MyThreadJoin"); return 0; } struct node *child_th; child_th=(struct node *)thread; //printf("MyThreadJoin: child thread= %d , its parent thread= %d , current invoking thread= %d \n",child_th->tid,child_th->pid,curr_th->tid); if(curr_th->tid != child_th->pid) { //printf("MyThreadJoin: FAILURE : specified thread is not an immediate child of invoking thread...\n"); return -1; } //printf("Search of specified child thread in ready queue...\n"); // If the child has already terminated, do not block. Note: A child may have terminated without the parent having joined with it. int cid=child_th->tid; int child_exist=0; // check for child thread into Ready Queue struct node *tmp=front; while(tmp!=NULL && tmp->tid != cid ) { tmp=tmp->next; } if(tmp!=NULL && tmp->tid ==cid) { child_exist=1; //printf("Specified child thread exists in Ready queue... \n"); } //If child not found in Ready Queue, check in Blocked queue if(child_exist==0) { //printf("Specified child thread not in ready queue...search in blocked queue\n"); tmp=bfront; while(tmp!=NULL && tmp->tid != cid) { tmp=tmp->next; } if(tmp!=NULL && tmp->tid == cid) { child_exist=1; //printf("Specified child thread exists in Blocked queue... \n"); } } if(child_exist==0) { //printf("Specified child thread is already terminated. Not blocking the invoking thread...\n"); return 0; } else if(child_exist == 1) { //printf("Specified child thread exists ... \n"); } //todo: remove this ////printf("MyThreadJoin: RETURN ... \n"); //return 0; //Child thread exists, need to block the invoking thread and move into Blocked queue //After blocking, run the first ready thread //saving join pointers for future failure recovery struct join_list *old_jfront=curr_th->join_th; struct join_list *old_jrear=curr_th->join_th_rear; // Before blocking, need to update join_list of this current thread... //printf("MyThreadJoin : need to update join_list of this current thread:%d \n",curr_th->tid); struct join_list *jtmp=(struct join_list *)malloc(sizeof(struct join_list)); if(jtmp==NULL) { //printf("No memory allocated... Maximum thread limit reached...returning gracefully...\n"); swapcontext(&dmy_cnxt,&init_cnxt); } jtmp->ch_tid=cid; jtmp->link=NULL; if((curr_th->join_th)==NULL) { curr_th->join_th=jtmp; curr_th->join_th_rear=curr_th->join_th; } else { curr_th->join_th_rear->link=jtmp; curr_th->join_th_rear=jtmp; } struct node * blocked_th=binsert_q(curr_th->cnxt,curr_th->tid,curr_th->pid,curr_th->join_th,curr_th->join_th_rear); //printf("MyThreadJoin : current thread:%d entered in BQ , but actual current thread is= %d\n",blocked_th->tid, curr_th->tid); int old_th=curr_th->tid; int old_pr=curr_th->pid; ucontext_t old_cnxt=curr_th->cnxt; ucontext_t new_cnxt; struct node * new_th=pop_q(); if(new_th == NULL) { //printf(" MyThreadJoin: ready queue empty ... \n"); swapcontext(&dmy_cnxt,&init_cnxt); } new_cnxt=new_th->cnxt; curr_th->tid=new_th->tid; curr_th->pid=new_th->pid; curr_th->cnxt=new_th->cnxt; curr_th->join_th=new_th->join_th; curr_th->join_th_rear=new_th->join_th_rear; //printf("MyThreadJoin: swap context from thread : %d to first ready thread : %d ... \n",old_th,curr_th->tid); if(swapcontext(&(blocked_th->cnxt),&new_cnxt)==-1) { curr_th->tid=old_th; curr_th->pid=old_pr; curr_th->cnxt=old_cnxt; curr_th->join_th=old_jfront; curr_th->join_th_rear=old_jrear; bremoveNode(curr_th->tid); //printf("MyThreadJoin: Blocking swapcontext error..."); } //printf("MyThreadJoin: curr_thread= %d , parent_th = %d ,EXIT... \n",curr_th->tid,curr_th->pid); }
int poller_resume(mrkthr_ctx_t *ctx) { int res; /* * Can only be the result of yield or start, ie, the state cannot be * dormant or resumed. */ if (!(ctx->co.state & CO_STATE_RESUMABLE)) { /* This is an error (currently no reason is known, though) */ sleepq_remove(ctx); /* not sure if we can push it here ... */ push_free_ctx(ctx); TRRET(RESUME + 1); } ctx->co.state = CO_STATE_RESUMED; me = ctx; #ifdef TRACE_VERBOSE CTRACE("resuming >>>"); //mrkthr_dump(ctx); #endif PROFILE_STOP(mrkthr_sched0_p); PROFILE_START(mrkthr_swap_p); res = swapcontext(&main_uc, &me->co.uc); PROFILE_STOP(mrkthr_swap_p); PROFILE_START(mrkthr_sched0_p); #ifdef TRACE_VERBOSE CTRACE("back from resume <<<"); //mrkthr_dump(me); #endif if (errno == EINTR) { CTRACE("ignoring EINTR"); #ifdef TRACE_VERBOSE //mrkthr_dump(ctx); #endif errno = 0; return 0; } /* no one in the thread context may touch me */ assert(me == ctx); me = NULL; if (ctx->co.state & CO_STATE_RESUMABLE) { return ctx->co.rc; } else if (ctx->co.state == CO_STATE_RESUMED) { /* * This is the case of the exited (dead) thread. */ #ifdef TRACE_VERBOSE CTRACE("Assuming exited (dead) ..."); //mrkthr_dump(ctx); #endif sleepq_remove(ctx); push_free_ctx(ctx); //TRRET(RESUME + 2); //return MRKTHR_CO_RC_EXITED; return ctx->co.rc; } else { CTRACE("Unknown case:"); mrkthr_dump(ctx); FAIL("resume"); } return res; }