static int impl_tasklet_setup(PyTaskletObject *task, PyObject *args, PyObject *kwds) { PyThreadState *ts = PyThreadState_GET(); PyFrameObject *frame; PyObject *func; assert(PyTasklet_Check(task)); if (ts->st.main == NULL) return PyTasklet_Setup_M(task, args, kwds); func = task->tempval; if (func == NULL) RUNTIME_ERROR("the tasklet was not bound to a function", -1); if ((frame = (PyFrameObject *) slp_cframe_newfunc(func, args, kwds, 0)) == NULL) { return -1; } if (bind_tasklet_to_frame(task, frame)) { Py_DECREF(frame); return -1; } TASKLET_SETVAL(task, Py_None); Py_INCREF(task); slp_current_insert(task); return 0; }
static void tasklet_clear(PyTaskletObject *t) { tasklet_clear_frames(t); TASKLET_SETVAL(t, Py_None); /* always non-zero */ /* unlink task from cstate */ if (t->cstate != NULL && t->cstate->task == t) t->cstate->task = NULL; Py_CLEAR(t->cstate); }
PyTaskletObject * PyTasklet_Bind(PyTaskletObject *task, PyObject *func) { if (func == NULL || !PyCallable_Check(func)) TYPE_ERROR("tasklet function must be a callable", NULL); if (task->f.frame != NULL) RUNTIME_ERROR( "tasklet is already bound to a frame", NULL); TASKLET_SETVAL(task, func); Py_INCREF(task); return task; }
PyObject * slp_bomb_explode(PyTaskletObject *task) { PyBombObject *bomb = (PyBombObject *) task->tempval; assert(PyBomb_Check(bomb)); Py_XINCREF(bomb->curexc_type); Py_XINCREF(bomb->curexc_value); Py_XINCREF(bomb->curexc_traceback); PyErr_Restore(bomb->curexc_type, bomb->curexc_value, bomb->curexc_traceback); Py_DECREF(bomb); /* avoid periodical re-bombing */ TASKLET_SETVAL(task, Py_None); return NULL; }
PyObject * PyStackless_Schedule(PyObject *retval, int remove) { STACKLESS_GETARG(); PyThreadState *ts = PyThreadState_GET(); PyTaskletObject *prev = ts->st.current, *next = prev->next; PyObject *ret = NULL; if (ts->st.main == NULL) return PyStackless_Schedule_M(retval, remove); Py_INCREF(prev); TASKLET_SETVAL(prev, retval); if (remove) { slp_current_remove(); Py_DECREF(prev); } ret = slp_schedule_task(prev, next, stackless); Py_DECREF(prev); return ret; }
static PyObject * generic_channel_action(PyChannelObject *self, PyObject *arg, int dir, int stackless) { PyThreadState *ts = PyThreadState_GET(); PyTaskletObject *source = ts->st.current; PyTaskletObject *target = self->head; int cando = dir > 0 ? self->balance < 0 : self->balance > 0; int interthread = cando ? target->cstate->tstate != ts : 0; PyObject *tmpval, *retval; int fail; assert(abs(dir) == 1); /* set the channel tmpval here, for the callback */ TASKLET_CLAIMVAL(source, &tmpval); TASKLET_SETVAL(source, arg); /* note that notify might release the GIL. */ /* XXX for the moment, we notify late on interthread */ if (!interthread) NOTIFY_CHANNEL(self, source, dir, cando, NULL); if (cando) /* communication 1): there is somebody waiting */ fail = generic_channel_cando(ts, &retval, self, dir, stackless); else fail = generic_channel_block(ts, &retval, self, dir, stackless); if (fail) { TASKLET_SETVAL_OWN(source, tmpval); return NULL; } Py_DECREF(tmpval); if (interthread) NOTIFY_CHANNEL(self, source, dir, cando, NULL); return retval; }
PyObject * PyStackless_Schedule(PyObject *retval, int remove) { STACKLESS_GETARG(); PyThreadState *ts = PyThreadState_GET(); PyTaskletObject *prev = ts->st.current, *next = prev->next; PyObject *ret = NULL; int switched; if (ts->st.main == NULL) return PyStackless_Schedule_M(retval, remove); /* make sure we hold a reference to the previous tasklet */ Py_INCREF(prev); TASKLET_SETVAL(prev, retval); if (remove) { slp_current_remove(); Py_DECREF(prev); if (next == prev) next = 0; /* we were the last runnable tasklet */ } /* we mustn't DECREF prev here (after the slp_schedule_task(). * This could be the last reference, thus * promting emergency reactivation of the tasklet, * and soft switching isn't really done until we have unwound. * Use the delayed release mechanism instead. */ assert(ts->st.del_post_switch == NULL); ts->st.del_post_switch = (PyObject*)prev; ret = slp_schedule_task(prev, next, stackless, &switched); /* however, if this was a no-op (e.g. prev==next, or an error occurred) * we need to decref prev ourselves */ if (!switched) Py_CLEAR(ts->st.del_post_switch); return ret; }
static PyObject * tasklet_setstate(PyObject *self, PyObject *args) { PyTaskletObject *t = (PyTaskletObject *) self; PyObject *tempval, *lis; int flags, nesting_level; PyFrameObject *f; Py_ssize_t i, nframes; int j; if (!PyArg_ParseTuple(args, "iOiO!:tasklet", &flags, &tempval, &nesting_level, &PyList_Type, &lis)) return NULL; nframes = PyList_GET_SIZE(lis); TASKLET_SETVAL(t, tempval); /* There is a unpickling race condition. While it is rare, * sometimes tasklets get their setstate call after the * channel they are blocked on. If this happens and we * do not account for it, they will be left in a broken * state where they are on the channels chain, but have * cleared their blocked flag. * * We will assume that the presence of a chain, can only * mean that the chain is that of a channel, rather than * that of the main tasklet/scheduler. And therefore * they can leave their blocked flag in place because the * channel would have set it. */ j = t->flags.blocked; *(int *)&t->flags = flags; if (t->next == NULL) { t->flags.blocked = 0; } else { t->flags.blocked = j; } /* t->nesting_level = nesting_level; XXX how do we handle this? XXX to be done: pickle the cstate without a ref to the task. XXX This should make it not runnable in the future. */ if (nframes > 0) { PyFrameObject *back; f = (PyFrameObject *) PyList_GET_ITEM(lis, 0); if ((f = slp_ensure_new_frame(f)) == NULL) return NULL; back = f; for (i=1; i<nframes; ++i) { f = (PyFrameObject *) PyList_GET_ITEM(lis, i); if ((f = slp_ensure_new_frame(f)) == NULL) return NULL; Py_INCREF(back); f->f_back = back; back = f; } t->f.frame = f; } /* walk frames again and calculate recursion_depth */ for (f = t->f.frame; f != NULL; f = f->f_back) { if (PyFrame_Check(f) && f->f_execute != PyEval_EvalFrameEx_slp) { /* * we count running frames which *have* added * to recursion_depth */ ++t->recursion_depth; } } Py_INCREF(self); return self; }
static PyObject * schedule_task_block(PyTaskletObject *prev, int stackless) { PyThreadState *ts = PyThreadState_GET(); PyObject *retval; PyTaskletObject *next = NULL; PyObject *unlocker_lock; if (check_for_deadlock()) { /* revive real main if floating */ if (ts == slp_initial_tstate && ts->st.main->next == NULL) { /* emulate old revive_main behavior: * passing a value only if it is an exception */ if (PyBomb_Check(prev->tempval)) TASKLET_SETVAL(ts->st.main, prev->tempval); return slp_schedule_task(prev, ts->st.main, stackless); } if (!(retval = make_deadlock_bomb())) return NULL; TASKLET_SETVAL_OWN(prev, retval); return slp_schedule_task(prev, prev, stackless); } #ifdef WITH_THREAD if (ts->st.thread.self_lock == NULL) { if (!(ts->st.thread.self_lock = new_lock())) return NULL; acquire_lock(ts->st.thread.self_lock, 1); if (!(ts->st.thread.unlock_lock = new_lock())) return NULL; } /* let somebody reactivate us */ ts->st.thread.is_locked = 1; /* flag as blocked and wait */ PyEval_SaveThread(); PR("locker waiting for my lock"); acquire_lock(ts->st.thread.self_lock, 1); PR("HAVE my lock"); PyEval_RestoreThread(ts); if (temp.unlock_target != NULL) { next = temp.unlock_target; temp.unlock_target = NULL; } else next = prev; /* * get in shape. can't do this with schedule here because * hard switching might not get us back, soon enough. */ if (next->flags.blocked) { /* unblock from channel */ slp_channel_remove_slow(next); slp_current_insert(next); } else if (next->next == NULL) { /* reactivate floating task */ Py_INCREF(next); slp_current_insert(next); } if (temp.other_lock != NULL) { PR("releasing unlocker"); unlocker_lock = temp.other_lock; temp.other_lock = NULL; release_lock(unlocker_lock); Py_DECREF(unlocker_lock); } ts->st.thread.is_locked = 0; #else (void)unlocker_lock; next = prev; #endif /* this must be after releasing the locks because of hard switching */ retval = slp_schedule_task(prev, next, stackless); PR("schedule() is done"); return retval; }
static PyObject * tasklet_end(PyObject *retval) { PyThreadState *ts = PyThreadState_GET(); PyTaskletObject *task = ts->st.current; PyTaskletObject *next; int ismain = task == ts->st.main; /* * see whether we have a SystemExit, which is no error. * Note that TaskletExit is a subclass. * otherwise make the exception into a bomb. */ if (retval == NULL) { if (PyErr_ExceptionMatches(PyExc_SystemExit)) { /* but if it is truly a SystemExit on the main thread, we want the exit! */ if (ts == slp_initial_tstate && !PyErr_ExceptionMatches(PyExc_TaskletExit)) PyStackless_HandleSystemExit(); PyErr_Clear(); Py_INCREF(Py_None); retval = Py_None; } else { retval = slp_curexc_to_bomb(); if (retval == NULL) return NULL; } } /* * put the result back into the dead tasklet, to give * possible referers access to the return value */ TASKLET_SETVAL_OWN(task, retval); if (ismain) { /* * See whether we need to adjust main's context before * returning */ if (ts->st.serial_last_jump != ts->st.serial) { slp_transfer_return(task->cstate); } } /* remove from runnables */ slp_current_remove(); /* * clean up any current exception - this tasklet is dead. * This only happens if we are killing tasklets in the middle * of their execution. */ if (ts->exc_type != NULL && ts->exc_type != Py_None) { Py_DECREF(ts->exc_type); Py_XDECREF(ts->exc_value); Py_XDECREF(ts->exc_traceback); ts->exc_type = ts->exc_value = ts->exc_traceback = NULL; } /* capture all exceptions */ if (ismain) { /* * Main wants to exit. We clean up, but leave the * runnables chain intact. */ ts->st.main = NULL; Py_DECREF(task); return schedule_task_destruct(task, task); } next = ts->st.current; if (next == NULL) { int blocked = ts->st.main->flags.blocked; if (blocked) { char *txt; /* main was blocked and nobody can send */ if (blocked < 0) txt = "the main tasklet is receiving" " without a sender available."; else txt = "the main tasklet is sending" " without a receiver available."; PyErr_SetString(PyExc_StopIteration, txt); /* fall through to error handling */ retval = slp_curexc_to_bomb(); if (retval == NULL) return NULL; } next = ts->st.main; } if (PyBomb_Check(retval)) { /* * error handling: continue in the context of the main tasklet. */ next = ts->st.main; TASKLET_SETVAL(next, retval); } return schedule_task_destruct(task, next); }
static PyObject * generic_channel_action(PyChannelObject *self, PyObject *arg, int dir, int stackless) { PyThreadState *ts = PyThreadState_GET(); PyTaskletObject *source = ts->st.current; PyTaskletObject *target = self->head; int cando = dir > 0 ? self->balance < 0 : self->balance > 0; int interthread = cando ? target->cstate->tstate != ts : 0; PyObject *retval; int runflags = 0; assert(abs(dir) == 1); TASKLET_SETVAL(source, arg); /* note that notify might release the GIL. */ /* XXX for the moment, we notify late on interthread */ if (!interthread) NOTIFY_CHANNEL(self, source, dir, cando, NULL); if (cando) { /* communication 1): there is somebody waiting */ target = slp_channel_remove(self, -dir); /* exchange data */ TASKLET_SWAPVAL(source, target); if (interthread) { ; /* interthread, always keep target! slp_current_insert(target);*/ } else { if (self->flags.schedule_all) { /* target goes last */ slp_current_insert(target); /* always schedule away from source */ target = source->next; } else if (self->flags.preference == -dir) { /* move target after source */ ts->st.current = source->next; slp_current_insert(target); ts->st.current = source; /* don't mess with this scheduling behaviour: */ runflags = PY_WATCHDOG_NO_SOFT_IRQ; } else { /* otherwise we return to the caller */ slp_current_insert(target); target = source; /* don't mess with this scheduling behaviour: */ runflags = PY_WATCHDOG_NO_SOFT_IRQ; } } } else { /* communication 2): there is nobody waiting, so we must switch */ if (source->flags.block_trap) RUNTIME_ERROR("this tasklet does not like to be" " blocked.", NULL); if (self->flags.closing) { PyErr_SetNone(PyExc_StopIteration); return NULL; } slp_current_remove(); slp_channel_insert(self, source, dir); target = ts->st.current; /* Make sure that the channel will exist past the actual switch, if * we are softswitching. A temporary channel might disappear. */ if (Py_REFCNT(self)) { assert(ts->st.del_post_switch == NULL); ts->st.del_post_switch = (PyObject*)self; Py_INCREF(self); } } ts->st.runflags |= runflags; /* extra info for slp_schedule_task */ retval = slp_schedule_task(source, target, stackless, 0); if (interthread) { if (cando) { Py_DECREF(target); } NOTIFY_CHANNEL(self, source, dir, cando, NULL); } return retval; }
static PyObject * generic_channel_action(PyChannelObject *self, PyObject *arg, int dir, int stackless) { PyThreadState *ts = PyThreadState_GET(); PyTaskletObject *source = ts->st.current; PyTaskletObject *target = self->head; int cando = dir > 0 ? self->balance < 0 : self->balance > 0; int interthread = cando ? target->cstate->tstate != ts : 0; PyObject *retval; assert(abs(dir) == 1); TASKLET_SETVAL(source, arg); /* note that notify might release the GIL. */ /* XXX for the moment, we notify late on interthread */ if (!interthread) NOTIFY_CHANNEL(self, source, dir, cando, NULL); if (cando) { /* communication 1): there is somebody waiting */ target = slp_channel_remove(self, -dir); /* exchange data */ TASKLET_SWAPVAL(source, target); if (interthread) { /* interthread, always keep target! */ slp_current_insert(target); } else { if (self->flags.schedule_all) { /* target goes last */ slp_current_insert(target); /* always schedule away from source */ target = source->next; } else if (self->flags.preference == -dir) { /* move target after source */ ts->st.current = source->next; slp_current_insert(target); ts->st.current = source; } else { /* otherwise we return to the caller */ slp_current_insert(target); target = source; } } } else { /* communication 2): there is nobody waiting */ if (source->flags.block_trap) RUNTIME_ERROR("this tasklet does not like to be" " blocked.", NULL); if (self->flags.closing) { PyErr_SetNone(PyExc_StopIteration); return NULL; } slp_current_remove(); slp_channel_insert(self, source, dir); target = ts->st.current; } retval = slp_schedule_task(source, target, stackless); if (interthread) NOTIFY_CHANNEL(self, source, dir, cando, NULL); return retval; }