void tpl_switch_context_from_it( tpl_context *old_context, tpl_context *new_context) { assert( *new_context != co_current() ); if( *new_context == &idle_task_context ) { /* idle_task activation */ co_call( idle_task_context ); } else co_call( *new_context ); }
void tpl_switch_context( tpl_context *old_context, tpl_context *new_context) { assert( *new_context != co_current() ); tpl_release_task_lock(); if( *new_context == &idle_task_context ) { /* idle_task activation */ co_call( idle_task_context ); } else co_call( *new_context ); tpl_get_task_lock(); }
/* Returns 0 if the user is successfully authenticated */ static int pam_auth_pass(void* ctx, const char* pass, unsigned pass_len) { struct pam_ctx_st * pctx = ctx; if (pass == NULL || pass_len+1 > sizeof(pctx->password)) return -1; if (pctx->state != PAM_S_WAIT_FOR_PASS) { syslog(LOG_AUTH, "PAM auth: conversation in wrong state (%d/expecting %d)", pctx->state, PAM_S_WAIT_FOR_PASS); return ERR_AUTH_FAIL; } memcpy(pctx->password, pass, pass_len); pctx->password[pass_len] = 0; pctx->cr_ret = PAM_CONV_ERR; co_call(pctx->cr); if (pctx->cr_ret != PAM_SUCCESS) { syslog(LOG_AUTH, "PAM-auth pam_auth_pass: %s", pam_strerror(pctx->ph, pctx->cr_ret)); return ERR_AUTH_FAIL; } if (pctx->state != PAM_S_COMPLETE) return ERR_AUTH_CONTINUE; return 0; }
void gsoc_task_scheduler_loop() { int i = 0; while (1) { /* Never return from this function. So the caller of this function (task scheduling point ABI) doesn't have to think about context switch from this funciton. */ gsoc_task* next_task; int victim; ++i; next_task = gsoc_taskqueue_pop(_workers[_thread_id].taskq); if (__builtin_expect(next_task == NULL, 0)) { victim = gsoc_worker_choose_victim(_thread_id, _num_workers); next_task = gsoc_taskqueue_take(_workers[victim].taskq); if (!next_task) { if (_num_team_tasks == 0) return; /* to main */ continue; /* try again */ } } _workers[_thread_id].current_task = next_task; co_call(_workers[_thread_id].current_task); } }
void co_resume(void) { cothread_ctx *tctx = co_get_thread_ctx(); co_call(tctx->co_curr->restarget); tctx->co_curr->restarget = tctx->co_curr->caller; }
void * co_resume(void *data) { data = co_call(co_current->resumeto, data); co_current->resumeto = co_current->caller; return data; }
void thread_exit(void *ret) { thread_t *t = current_thread; sanity_check_threadcounts(); tdebug("current=%s\n", current_thread?current_thread->name : "NULL"); if (current_thread == main_thread && main_exited == 0) { // the case when the user calls thread_exit() in main thread is complicated // we cannot simply terminate the main thread, because we need that stack to terminate the // whole program normally. so we call exit() to make the c runtime help us get the stack // context where we can just return to terminate the whole program // this will call exit_func() and in turn call thread_exit() again main_exited = 1; exit (0); } // note the thread exit in the blocking graph t->curr_stats.node = bg_exit_node; current_thread->prev_stats.node->num_here--; current_thread->curr_stats.node->num_here++; if( bg_save_stats ) { bg_update_stats(); } // update thread counts num_runnable_threads--; if( t->daemon ) num_daemon_threads--; t->state = ZOMBIE; num_zombie_threads++; // deallocate the TCB // keep the thread, if the thread is Joinable, and we want the return value for something if ( !( t->joinable ) ) { // tell the scheduler thread to delete the current one current_thread_exited = 1; } else { t->ret = ret; if (t->join_thread) thread_resume(t->join_thread); } sanity_check_threadcounts(); // squirrel away the stack limit--not that we'll need it again current_thread->stack_bottom = stack_bottom; current_thread->stack_fingerprint = stack_fingerprint; // give control back to the scheduler #ifdef NO_SCHEDULER_THREAD do_scheduler(NULL); #else co_call(scheduler_thread->coro, NULL); #endif }
static void del_helper(void **args) { for (;;) { if (args != helper_args) fatal("resume to deleted coroutine"); co_delete(co_current->caller); args = co_call(args[0], args[1]); } }
void gsoc_encounter_taskwait_directive() { if (_workers[_thread_id].current_task->num_children == 0 || _workers[_thread_id].current_task->cutoff) return; /* This task sleeps until the last child wakes it up */ _workers[_thread_id].current_task->waiting = true; co_call(_workers[_thread_id].scheduler_task); }
static enum CoopthRet do_call(struct coopth_per_thread_t *pth) { enum CoopthRet ret; co_call(pth->thread); ret = pth->data.ret; if (ret == COOPTH_DONE && !pth->data.attached) { /* delete detached thread ASAP or leavedos() will complain */ return COOPTH_DELETE; } return ret; }
static void co_del_helper(void *data) { coroutine *cdh; for (;;) { cdh = co_dhelper; co_dhelper = NULL; co_delete(co_curr->caller); co_call((coroutine_t) cdh); if (!co_dhelper) { fprintf(stderr, "[PCL] Resume to delete helper coroutine: curr=%p\n", co_curr); exit(1); } } }
void co_exit_to(struct coroutine *new_co, void *data) { static struct coroutine *helper = 0; static char stk[256]; helper_args[0] = new_co; helper_args[1] = data; if (helper == 0) helper = co_create(del_helper, stk, sizeof(stk)); /* we must leave this coroutine. so call the helper. */ co_call(helper, helper_args); fatal("stale coroutine called"); }
/* Called by pthread_create() */ void* start_thread(int* rank) { _thread_id = *rank; gsoc_setaffinity(*rank); co_vp_init(); /* Necessary to set initial value for "co_curr__" in pcl.c. Without this, SEGV would happen because swapcontext(co_curr__->context, co_next->context) is called in pcl.c internally. */ if (*rank == 0) fprintf(stderr, "Starting Master Thread on CPU%d. Scheduler is %p\n", sched_getcpu(), _workers[_thread_id].scheduler_task); else fprintf(stderr, "Starting Slave Thread on CPU%d. Scheduler is %p\n", sched_getcpu(), _workers[_thread_id].scheduler_task); co_call(_workers[_thread_id].scheduler_task); return NULL; }
static int pam_auth_msg(void* ctx, void *pool, passwd_msg_st *pst) { struct pam_ctx_st * pctx = ctx; size_t prompt_hash = 0; if (pctx->state != PAM_S_INIT && pctx->state != PAM_S_WAIT_FOR_PASS) { return 0; } if (pctx->state == PAM_S_INIT) { /* get the prompt */ pctx->cr_ret = PAM_CONV_ERR; co_call(pctx->cr); if (pctx->cr_ret != PAM_SUCCESS) { syslog(LOG_AUTH, "PAM-auth pam_auth_msg: %s", pam_strerror(pctx->ph, pctx->cr_ret)); return ERR_AUTH_FAIL; } } if (pctx->msg.length == 0) { if (pctx->changing) pst->msg_str = talloc_strdup(pool, "Please enter the new password."); /* else use the default prompt */ } else { if (str_append_data(&pctx->msg, "\0", 1) < 0) return -1; prompt_hash = hash_any(pctx->msg.data, pctx->msg.length, 0); pst->msg_str = talloc_strdup(pool, (char*)pctx->msg.data); } pst->counter = pctx->passwd_counter; /* differentiate password prompts, if the hash of the prompt * is different. */ if (pctx->prev_prompt_hash != prompt_hash) pctx->passwd_counter++; pctx->prev_prompt_hash = prompt_hash; return 0; }
static void co_del_helper(void *data) { cothread_ctx *tctx; coroutine *cdh; for (;;) { tctx = co_get_thread_ctx(); cdh = tctx->co_dhelper; tctx->co_dhelper = NULL; co_delete(tctx->co_curr->caller); co_call((coroutine_t) cdh); if (tctx->co_dhelper == NULL) { fprintf(stderr, "[PCL] Resume to delete helper coroutine: curr=%p caller=%p\n", tctx->co_curr, tctx->co_curr->caller); exit(1); } } }
/* puts the Ruby coroutine in control */ static void relay_from_main_to_ruby() { printf("Relay: main => ruby\n"); #ifdef DEMONSTRATE_PCL co_call(ruby_coroutine); #endif #ifdef DEMONSTRATE_PTHREAD pthread_mutex_unlock(&ruby_coroutine_lock); pthread_mutex_lock(&main_coroutine_lock); #endif #ifdef DEMONSTRATE_UCONTEXT swapcontext(&main_coroutine, &ruby_coroutine); #endif printf("Relay: main <= ruby\n"); }
void co_exit_to(coroutine_t coro) { cothread_ctx *tctx = co_get_thread_ctx(); coroutine *co = (coroutine *) coro; if (tctx->dchelper == NULL && (tctx->dchelper = co_create(co_del_helper, NULL, tctx->stk, sizeof(tctx->stk))) == NULL) { fprintf(stderr, "[PCL] Unable to create delete helper coroutine: curr=%p\n", tctx->co_curr); exit(1); } tctx->co_dhelper = co; co_call((coroutine_t) tctx->dchelper); fprintf(stderr, "[PCL] Stale coroutine called: curr=%p exitto=%p caller=%p\n", tctx->co_curr, co, tctx->co_curr->caller); exit(1); }
void co_exit_to(coroutine_t coro) { coroutine *co = (coroutine *) coro; static coroutine *dchelper = NULL; static char stk[CO_MIN_SIZE]; if (!dchelper && !(dchelper = co_create(co_del_helper, NULL, stk, sizeof(stk)))) { fprintf(stderr, "[PCL] Unable to create delete helper coroutine: curr=%p\n", co_curr); exit(1); } co_dhelper = co; co_call((coroutine_t) dchelper); fprintf(stderr, "[PCL] Stale coroutine called: curr=%p\n", co_curr); exit(1); }
void *worker(void *arg) { int term = 0; int id = *(int *)arg; /* if (co_thread_init() < 0) { perror("co_thread_init failed in worker\n"); exit(-1); } */ printf("Created worker %d\n", id); do { // mutual exclusion pull task from queue if (pthread_mutex_lock(&lock) != 0) { perror("pthread_mutex_lock error"); exit(-1); } // BEGIN CRITICAL SECTION printf("Worker %d calling task\n", id); co_call(task); printf("Worker %d reads counter %d\n", id, counter); if (counter >= COUNTER_MAX) { term = 1; } // END CRITICAL SECTION if (pthread_mutex_unlock(&lock) != 0) { perror("pthread_mutex_unlock error"); exit(-1); } printf("Worker %d unlock and sleep\n", id); sleep(1); } while (term==0); //co_thread_cleanup(); return 0; }
static int eph_new_conn(int sfd, void *func) { struct eph_conn *conn = (struct eph_conn *)malloc(sizeof(struct eph_conn)); struct epoll_event ev; if (!conn) return -1; memset(conn, 0, sizeof(*conn)); DBL_INIT_LIST_HEAD(&conn->lnk); conn->sfd = sfd; conn->events = 0; conn->revents = 0; conn->nbytes = conn->rindex = 0; if (!(conn->co = co_create(func, conn, NULL, stksize))) { free(conn); return -1; } DBL_LIST_ADDT(&conn->lnk, &chash[sfd % chash_size]); ev.events = 0; ev.data.ptr = conn; if (epoll_ctl(kdpfd, EPOLL_CTL_ADD, sfd, &ev) < 0) { fprintf(stderr, "epoll set insertion error: fd=%d\n", sfd); DBL_LIST_DEL(&conn->lnk); co_delete(conn->co); free(conn); return -1; } ++numfds; co_call(conn->co); return 0; }
void co_resume(void) { co_call(co_curr->restarget); co_curr->restarget = co_curr->caller; }
/** * Main scheduling loop **/ static void* do_scheduler(void *arg) { static cpu_tick_t next_poll=0, next_overload_check=0, next_info_dump=0, next_graph_stats=0, now=0; static int pollcount=1000; static int init_done = 0; (void) arg; // suppress GCC "unused parameter" warning in_scheduler = 1; // make sure we start out by saving edge stats for a while if( !init_done ) { init_done = 1; if (conf_no_statcollect) bg_save_stats = 0; else bg_save_stats = 1; GET_REAL_CPU_TICKS( now ); next_graph_stats = now + 1 * ticks_per_second; start_timer(&scheduler_timer); } while( 1 ) { //current_thread = scheduler_thread; sanity_check_threadcounts(); sanity_check_io_stats(); // wake up threads that have timeouts sleepq_check(0); sanity_check_threadcounts(); // break out if there are only daemon threads if(unlikely (num_suspended_threads == 0 && num_runnable_threads == num_daemon_threads)) { // dump the blocking graph if( exit_func_done && conf_dump_blocking_graph ) { tdebug("dumping blocking graph from do_scheduler()\n"); dump_blocking_graph(); } // go back to mainthread, which should now be in exit_func() current_thread = main_thread; in_scheduler = 0; co_call(main_thread->coro, NULL); in_scheduler = 1; if( unlikely(current_thread_exited) ) { // free memory from deleted threads current_thread_exited=0; if (current_thread != main_thread) // main_thread is needed for whole program exit free_thread( current_thread ); } return NULL; } // cheesy way of handling things with timing requirements { GET_REAL_CPU_TICKS( now ); // toggle stats collection if( conf_no_statcollect == 0 && next_graph_stats < now ) { bg_save_stats = 1 - bg_save_stats; if( bg_save_stats ) { // record stats for 100 ms next_graph_stats = now + 100 * ticks_per_millisecond; // update the stats epoch, to allow proper handling of the first data items bg_stats_epoch++; } else { // avoid stats for 2000 ms next_graph_stats = now + 2000 * ticks_per_millisecond; } //output(" *********************** graph stats %s\n", bg_save_stats ? "ON" : "OFF" ); } // resource utalization if( unlikely (next_overload_check < now) ) { check_overload( now ); next_overload_check = now + OVERLOAD_CHECK_INTERVAL; } // poll if( likely( (int)io_polling_func) ) { if( num_runnable_threads==0 || --pollcount <= 0 || next_poll < now ) { //if( num_runnable_threads==0 ) { // poll long long timeout = 0; if( num_runnable_threads==0 ) { if (first_wake_usecs == 0) { timeout = -1; } else { // there are threads in the sleep queue // so poll for i/o till at most that time unsigned long long now; now = current_usecs(); tdebug ("first_wake: %lld, now: %lld\n", first_wake_usecs, now); if (first_wake_usecs > now) timeout = first_wake_usecs - now; } } stop_timer(&scheduler_timer); //if( timeout != -1 ) output("timeout is not zero\n"); io_polling_func( timeout ); // allow blocking start_timer(&scheduler_timer); sanity_check_threadcounts(); #ifndef USE_NIO // sleep for a bit, if there was nothing to do // FIXME: let the IO functions block instead?? if( num_runnable_threads == 0 ) { syscall(SYS_sched_yield); } #endif // vary the poll rate depending on the workload #if 0 if( num_runnable_threads < 5 ) { next_poll = now + (10*ticks_per_millisecond); pollcount = 1000; } else if( num_runnable_threads < 10 ) { next_poll = now + (50*ticks_per_millisecond); pollcount = 2000; } else { next_poll = now + (100*ticks_per_millisecond); pollcount = 3000; } #else next_poll = now + (ticks_per_millisecond << 13); pollcount = 10000; #endif } } // debug stats if( 0 && next_info_dump < now ) { dump_debug_info(); next_info_dump = now + 5 * ticks_per_second; } } // get the head of the run list current_thread = sched_next_thread(); // scheduler gave an invlid even though there are runnable // threads. This indicates that every runnable thead is likely to // require use of an overloaded resource. if( !valid_thread(current_thread) ) { pollcount = 0; continue; } // barf, if the returned thread is still on the sleep queue assert( current_thread->sleep == -1 ); tdebug("running TID %d (%s)\n", current_thread->tid, current_thread->name ? current_thread->name : "no name"); sanity_check_threadcounts(); // call thread stop_timer(&scheduler_timer); start_timer(&app_timer); in_scheduler = 0; co_call(current_thread->coro, NULL); in_scheduler = 1; stop_timer(&app_timer); start_timer(&scheduler_timer); if( unlikely(current_thread_exited) ) { // free memory from deleted threads current_thread_exited=0; if (current_thread != main_thread) // main_thread is needed for whole program exit free_thread( current_thread ); } #ifdef NO_SCHEDULER_THREAD return NULL; #endif } return NULL; }
/** * perform necessary management to yield the current thread * if suspended == TRUE && timeout != 0 -> the thread is added * to the sleep queue and later waken up when the clock times out * returns FALSE if time-out actually happens, TRUE if waken up * by other threads, INTERRUPTED if interrupted by a signal **/ static int thread_yield_internal(int suspended, unsigned long long timeout) { // now we use a per-thread errno stored in thread_t int savederrno; int rv = OK; tdebug("current_thread=%p\n",current_thread); savederrno = errno; // decide what to do with the thread if( !suspended ) // just add it to the runlist sched_add_thread( current_thread ); else if( timeout ) // add to the sleep list sleepq_add_thread( current_thread, timeout); { #ifdef SHOW_EDGE_TIMES cpu_tick_t start, end, rstart, rend; GET_CPU_TICKS(start); GET_REAL_CPU_TICKS(rstart); #endif // figure out the current node in the graph if( !conf_no_stacktrace ) bg_backtrace_set_node(); // FIXME: fake out what cil would do... current_thread->curr_stats.node = bg_dummy_node; // we should already have been told the node by CIL or directly by the programmer assert( current_thread->curr_stats.node != NULL ); // update node counts current_thread->prev_stats.node->num_here--; current_thread->curr_stats.node->num_here++; // update the blocking graph info if( bg_save_stats ) bg_update_stats(); #ifdef SHOW_EDGE_TIMES GET_CPU_TICKS(end); GET_REAL_CPU_TICKS(rend); { thread_stats_t *curr = ¤t_thread->curr_stats; thread_stats_t *prev = ¤t_thread->prev_stats; output(" %3d -> %-3d %7lld ticks (%lld ms) %7lld rticks (%lld ms) ", prev->node->node_num, curr->node->node_num, curr->cpu_ticks - prev->cpu_ticks, (curr->cpu_ticks - prev->cpu_ticks) / ticks_per_millisecond, # ifdef USE_PERFCTR curr->real_ticks - prev->real_ticks, (curr->real_ticks - prev->real_ticks) / ticks_per_millisecond # else curr->cpu_ticks - prev->cpu_ticks, (curr->cpu_ticks - prev->cpu_ticks) / ticks_per_millisecond # endif ); output("update bg node %d: %lld (%lld ms) real: %lld (%lld ms)\n", current_thread->curr_stats.node->node_num, (end-start), (end-start)/ticks_per_millisecond, (rend-rstart), (rend-rstart)/ticks_per_millisecond); } #endif } // squirrel away the stack limit for next time current_thread->stack_bottom = stack_bottom; current_thread->stack_fingerprint = stack_fingerprint; // switch to the scheduler thread #ifdef NO_SCHEDULER_THREAD do_scheduler(NULL); #else co_call(scheduler_thread->coro, NULL); #endif // set up stack limit for new thread stack_bottom = current_thread->stack_bottom; stack_fingerprint = current_thread->stack_fingerprint; // rotate the stats if( bg_save_stats ) { current_thread->prev_stats = current_thread->curr_stats; // update thread time, to skip time asleep GET_CPU_TICKS( current_thread->prev_stats.cpu_ticks ); current_thread->prev_stats.cpu_ticks -= ticks_diff; // FIXME: subtract out time to do debug output #ifdef USE_PERFCTR GET_REAL_CPU_TICKS( current_thread->prev_stats.real_ticks ); current_thread->prev_stats.real_ticks -= ticks_rdiff; // FIXME: subtract out time to do debug output #endif } else { current_thread->prev_stats.node = current_thread->curr_stats.node; } // check whether time-out happens if (suspended && timeout && current_thread->timeout) { rv = TIMEDOUT; current_thread->timeout = 0; } // check for and process pending signals if ( likely(!current_thread->sig_waiting) ) { if (sig_process_pending()) rv = INTERRUPTED; } else { // if sig_waiting is 1, sigwait() itself will handle the remaining rv = INTERRUPTED; } errno = savederrno; return rv; }