/* trampoline function for pth_uctx_make() */ static void pth_uctx_trampoline(void) { volatile pth_uctx_trampoline_t ctx; /* move context information from global to local storage */ ctx.mctx_parent = pth_gctx_get()->pth_uctx_trampoline_ctx.mctx_parent; ctx.uctx_this = pth_gctx_get()->pth_uctx_trampoline_ctx.uctx_this; ctx.uctx_after = pth_gctx_get()->pth_uctx_trampoline_ctx.uctx_after; ctx.start_func = pth_gctx_get()->pth_uctx_trampoline_ctx.start_func; ctx.start_arg = pth_gctx_get()->pth_uctx_trampoline_ctx.start_arg; /* switch back to parent */ pth_mctx_switch(&(ctx.uctx_this->uc_mctx), ctx.mctx_parent); /* enter start function */ (*ctx.start_func)(ctx.start_arg); /* switch to successor user-space context */ if (ctx.uctx_after != NULL) pth_mctx_restore(&(ctx.uctx_after->uc_mctx)); /* terminate process (the only reasonable thing to do here) */ exit(0); /* NOTREACHED */ return; }
/* make setup of user-space context structure */ int pth_uctx_make( pth_uctx_t uctx, char *sk_addr, size_t sk_size, const sigset_t *sigmask, void (*start_func)(void *), void *start_arg, pth_uctx_t uctx_after) { pth_mctx_t mctx_parent; sigset_t ss; /* argument sanity checking */ if (uctx == NULL || start_func == NULL || sk_size < 16*1024) return pth_error(FALSE, EINVAL); /* configure run-time stack */ if (sk_addr == NULL) { if ((sk_addr = (char *)malloc(sk_size)) == NULL) return pth_error(FALSE, errno); uctx->uc_stack_own = TRUE; } else uctx->uc_stack_own = FALSE; uctx->uc_stack_ptr = sk_addr; uctx->uc_stack_len = sk_size; /* configure the underlying machine context */ if (!pth_mctx_set(&uctx->uc_mctx, pth_uctx_trampoline, uctx->uc_stack_ptr, uctx->uc_stack_ptr+uctx->uc_stack_len)) return pth_error(FALSE, errno); /* move context information into global storage for the trampoline jump */ pth_gctx_get()->pth_uctx_trampoline_ctx.mctx_parent = &mctx_parent; pth_gctx_get()->pth_uctx_trampoline_ctx.uctx_this = uctx; pth_gctx_get()->pth_uctx_trampoline_ctx.uctx_after = uctx_after; pth_gctx_get()->pth_uctx_trampoline_ctx.start_func = start_func; pth_gctx_get()->pth_uctx_trampoline_ctx.start_arg = start_arg; /* optionally establish temporary signal mask */ if (sigmask != NULL) sigprocmask(SIG_SETMASK, sigmask, &ss); /* perform the trampoline step */ pth_mctx_switch(&mctx_parent, &(uctx->uc_mctx)); /* optionally restore original signal mask */ if (sigmask != NULL) sigprocmask(SIG_SETMASK, &ss, NULL); /* finally flag that the context is now configured */ uctx->uc_mctx_set = TRUE; return TRUE; }
/* switch from current to other user-space context */ int pth_uctx_switch( pth_uctx_t uctx_from, pth_uctx_t uctx_to) { /* argument sanity checking */ if (uctx_from == NULL || uctx_to == NULL) return pth_error(FALSE, EINVAL); if (!(uctx_to->uc_mctx_set)) return pth_error(FALSE, EPERM); /* switch underlying machine context */ uctx_from->uc_mctx_set = TRUE; pth_mctx_switch(&(uctx_from->uc_mctx), &(uctx_to->uc_mctx)); return TRUE; }
/* the heart of this library: the thread scheduler */ intern void *pth_scheduler(void *dummy) { sigset_t sigs; pth_time_t running; pth_time_t snapshot; struct sigaction sa; sigset_t ss; int sig; pth_t t; /* * bootstrapping */ pth_debug1("pth_scheduler: bootstrapping"); /* mark this thread as the special scheduler thread */ pth_sched->state = PTH_STATE_SCHEDULER; /* block all signals in the scheduler thread */ sigfillset(&sigs); pth_sc(sigprocmask)(SIG_SETMASK, &sigs, NULL); /* initialize the snapshot time for bootstrapping the loop */ pth_time_set(&snapshot, PTH_TIME_NOW); /* * endless scheduler loop */ for (;;) { /* * Move threads from new queue to ready queue and optionally * give them maximum priority so they start immediately. */ while ((t = pth_pqueue_tail(&pth_NQ)) != NULL) { pth_pqueue_delete(&pth_NQ, t); t->state = PTH_STATE_READY; if (pth_favournew) pth_pqueue_insert(&pth_RQ, pth_pqueue_favorite_prio(&pth_RQ), t); else pth_pqueue_insert(&pth_RQ, PTH_PRIO_STD, t); pth_debug2("pth_scheduler: new thread \"%s\" moved to top of ready queue", t->name); } /* Calculate thread target runtime for ready queue */ pth_pqueue_calc_target(&pth_RQ); /* Assign tikets to threads in ready queue */ pth_pqueue_issue_tk(&pth_RQ); /* * Update average scheduler load */ pth_scheduler_load(&snapshot); /* * Find next thread in ready queue */ /* generate lottery number randomly */ /* pth_current = pth_pqueue_delmax(&pth_RQ); */ int ltr_num = rand() % pth_RQ.total_tk; pth_current = pth_pqueue_deltk(&pth_RQ, ltr_num); if (pth_current == NULL) { fprintf(stderr, "**Pth** SCHEDULER INTERNAL ERROR: " "no more thread(s) available to schedule!?!?\n"); abort(); } pth_debug4("pth_scheduler: thread \"%s\" selected (prio=%d, qprio=%d)", pth_current->name, pth_current->prio, pth_current->q_prio); /* * Raise additionally thread-specific signals * (they are delivered when we switch the context) * * Situation is ('#' = signal pending): * process pending (pth_sigpending): ----#### * thread pending (pth_current->sigpending): --##--## * Result has to be: * process new pending: --###### */ if (pth_current->sigpendcnt > 0) { sigpending(&pth_sigpending); for (sig = 1; sig < PTH_NSIG; sig++) if (sigismember(&pth_current->sigpending, sig)) if (!sigismember(&pth_sigpending, sig)) kill(getpid(), sig); } /* * Set running start time for new thread * and perform a context switch to it */ pth_debug3("pth_scheduler: switching to thread 0x%lx (\"%s\")", (unsigned long)pth_current, pth_current->name); /* update thread times */ pth_time_set(&pth_current->lastran, PTH_TIME_NOW); /* update scheduler times */ pth_time_set(&running, &pth_current->lastran); pth_time_sub(&running, &snapshot); pth_time_add(&pth_sched->running, &running); /* ** ENTERING THREAD ** - by switching the machine context */ pth_current->dispatches++; pth_mctx_switch(&pth_sched->mctx, &pth_current->mctx); /* update scheduler times */ pth_time_set(&snapshot, PTH_TIME_NOW); pth_debug3("pth_scheduler: cameback from thread 0x%lx (\"%s\")", (unsigned long)pth_current, pth_current->name); /* * Calculate and update the time the previous thread was running */ pth_time_set(&running, &snapshot); pth_time_sub(&running, &pth_current->lastran); pth_time_add(&pth_current->running, &running); /* Update actual time of all threads in ready queue */ pth_pqueue_update_a_rt(&pth_RQ); pth_debug3("pth_scheduler: thread \"%s\" ran %.6f", pth_current->name, pth_time_t2d(&running)); /* Additional debugging code */ pth_debug2("pth_scheduler: lottery number %d", ltr_num); pth_debug2("pth_scheduler: total ticket %d", pth_RQ.total_tk); pth_debug3("pth_scheduler: thread has %d offset and %d tickets", (pth_current->tk).offset, (pth_current->tk).tk_num); pth_debug3("pth_scheduler: thread has %.6f running time and %.6f lifetime", pth_time_t2d(&pth_current->running), pth_time_t2d(&lifetime)); pth_debug2("pth_scheduler: thread has %.6f target runtime", (pth_current->cpu_rt).target); pth_debug2("pth_scheduler: thread has %.6f actual runtime", (pth_current->cpu_rt).actual); /* * Remove still pending thread-specific signals * (they are re-delivered next time) * * Situation is ('#' = signal pending): * thread old pending (pth_current->sigpending): --##--## * process old pending (pth_sigpending): ----#### * process still pending (sigstillpending): ---#-#-# * Result has to be: * process new pending: -----#-# * thread new pending (pth_current->sigpending): ---#---# */ if (pth_current->sigpendcnt > 0) { sigset_t sigstillpending; sigpending(&sigstillpending); for (sig = 1; sig < PTH_NSIG; sig++) { if (sigismember(&pth_current->sigpending, sig)) { if (!sigismember(&sigstillpending, sig)) { /* thread (and perhaps also process) signal delivered */ sigdelset(&pth_current->sigpending, sig); pth_current->sigpendcnt--; } else if (!sigismember(&pth_sigpending, sig)) { /* thread signal not delivered */ pth_util_sigdelete(sig); } } } } /* * Check for stack overflow */ if (pth_current->stackguard != NULL) { if (*pth_current->stackguard != 0xDEAD) { pth_debug3("pth_scheduler: stack overflow detected for thread 0x%lx (\"%s\")", (unsigned long)pth_current, pth_current->name); /* * if the application doesn't catch SIGSEGVs, we terminate * manually with a SIGSEGV now, but output a reasonable message. */ if (sigaction(SIGSEGV, NULL, &sa) == 0) { if (sa.sa_handler == SIG_DFL) { fprintf(stderr, "**Pth** STACK OVERFLOW: thread pid_t=0x%lx, name=\"%s\"\n", (unsigned long)pth_current, pth_current->name); kill(getpid(), SIGSEGV); sigfillset(&ss); sigdelset(&ss, SIGSEGV); sigsuspend(&ss); abort(); } } /* * else we terminate the thread only and send us a SIGSEGV * which allows the application to handle the situation... */ pth_current->join_arg = (void *)0xDEAD; pth_current->state = PTH_STATE_DEAD; kill(getpid(), SIGSEGV); } } /* * If previous thread is now marked as dead, kick it out */ if (pth_current->state == PTH_STATE_DEAD) { pth_debug2("pth_scheduler: marking thread \"%s\" as dead", pth_current->name); if (!pth_current->joinable) pth_tcb_free(pth_current); else pth_pqueue_insert(&pth_DQ, PTH_PRIO_STD, pth_current); pth_current = NULL; } /* * If thread wants to wait for an event * move it to waiting queue now */ if (pth_current != NULL && pth_current->state == PTH_STATE_WAITING) { pth_debug2("pth_scheduler: moving thread \"%s\" to waiting queue", pth_current->name); pth_pqueue_insert(&pth_WQ, pth_current->prio, pth_current); pth_current = NULL; } /* * migrate old treads in ready queue into higher * priorities to avoid starvation and insert last running * thread back into this queue, too. */ /* Disable this auto-increment mechanism */ /* pth_pqueue_increase(&pth_RQ); */ if (pth_current != NULL) pth_pqueue_insert(&pth_RQ, pth_current->prio, pth_current); /* * Manage the events in the waiting queue, i.e. decide whether their * events occurred and move them to the ready queue. But wait only if * we have already no new or ready threads. */ if ( pth_pqueue_elements(&pth_RQ) == 0 && pth_pqueue_elements(&pth_NQ) == 0) /* still no NEW or READY threads, so we have to wait for new work */ pth_sched_eventmanager(&snapshot, FALSE /* wait */); else /* already NEW or READY threads exists, so just poll for even more work */ pth_sched_eventmanager(&snapshot, TRUE /* poll */); } /* NOTREACHED */ return NULL; }