static int impl_tasklet_setup(PyTaskletObject *task, PyObject *args, PyObject *kwds) { PyThreadState *ts = PyThreadState_GET(); PyFrameObject *frame; PyObject *func; assert(PyTasklet_Check(task)); if (ts->st.main == NULL) return PyTasklet_Setup_M(task, args, kwds); func = task->tempval; if (func == NULL) RUNTIME_ERROR("the tasklet was not bound to a function", -1); if ((frame = (PyFrameObject *) slp_cframe_newfunc(func, args, kwds, 0)) == NULL) { return -1; } if (bind_tasklet_to_frame(task, frame)) { Py_DECREF(frame); return -1; } TASKLET_SETVAL(task, Py_None); Py_INCREF(task); slp_current_insert(task); return 0; }
static void kill_wrap_bad_guy(PyTaskletObject *prev, PyTaskletObject *bad_guy) { /* * just in case a transfer didn't work, we pack the bad * tasklet into the exception and remove it from the runnables. * */ PyThreadState *ts = PyThreadState_GET(); PyObject *newval = PyTuple_New(2); if (bad_guy->next != NULL) { ts->st.current = bad_guy; slp_current_remove(); } /* restore last tasklet */ if (prev->next == NULL) slp_current_insert(prev); ts->frame = prev->f.frame; ts->st.current = prev; if (newval != NULL) { /* merge bad guy into exception */ PyObject *exc, *val, *tb; PyErr_Fetch(&exc, &val, &tb); PyTuple_SET_ITEM(newval, 0, val); PyTuple_SET_ITEM(newval, 1, (PyObject*)bad_guy); Py_INCREF(bad_guy); PyErr_Restore(exc, newval, tb); } }
static TASKLET_INSERT_HEAD(impl_tasklet_insert) { PyThreadState *ts = PyThreadState_GET(); assert(PyTasklet_Check(task)); if (ts->st.main == NULL) return slp_current_wrapper(PyTasklet_Insert, task); if (task->flags.blocked) RUNTIME_ERROR("You cannot run a blocked tasklet", -1); if (task->f.frame == NULL && task != ts->st.current) RUNTIME_ERROR("You cannot run an unbound(dead) tasklet", -1); if (task->next == NULL) { Py_INCREF(task); slp_current_insert(task); /* The tasklet may belong to a different thread, and that thread may * be blocked, waiting for something to do! */ slp_thread_unblock(task->cstate->tstate); } return 0; }
int initialize_main_and_current(void) { PyThreadState *ts = PyThreadState_GET(); PyTaskletObject *task; PyObject *noargs; /* refuse executing main in an unhandled error context */ if (! (PyErr_Occurred() == NULL || PyErr_Occurred() == Py_None) ) { #ifdef _DEBUG PyObject *type, *value, *traceback; PyErr_Fetch(&type, &value, &traceback); Py_XINCREF(type); Py_XINCREF(value); Py_XINCREF(traceback); PyErr_Restore(type, value, traceback); printf("Pending error while entering Stackless subsystem:\n"); PyErr_Print(); printf("Above exception is re-raised to the caller.\n"); PyErr_Restore(type, value, traceback); #endif return 1; } noargs = PyTuple_New(0); task = (PyTaskletObject *) PyTasklet_Type.tp_new( &PyTasklet_Type, noargs, NULL); Py_DECREF(noargs); if (task == NULL) return -1; assert(task->cstate != NULL); ts->st.main = task; Py_INCREF(task); slp_current_insert(task); ts->st.current = task; NOTIFY_SCHEDULE(NULL, task, -1); return 0; }
PyObject * slp_schedule_task(PyTaskletObject *prev, PyTaskletObject *next, int stackless) { PyThreadState *ts = PyThreadState_GET(); PyCStackObject **cstprev; PyObject *retval; int (*transfer)(PyCStackObject **, PyCStackObject *, PyTaskletObject *); if (next == NULL) { return schedule_task_block(prev, stackless); } #ifdef WITH_THREAD /* note that next->cstate is undefined if it is ourself */ if (next->cstate != NULL && next->cstate->tstate != ts) { return schedule_task_unblock(prev, next, stackless); } #endif if (next->flags.blocked) { /* unblock from channel */ slp_channel_remove_slow(next); slp_current_insert(next); } else if (next->next == NULL) { /* reactivate floating task */ Py_INCREF(next); slp_current_insert(next); } if (prev == next) { retval = prev->tempval; Py_INCREF(retval); if (PyBomb_Check(retval)) retval = slp_bomb_explode(prev); return retval; } NOTIFY_SCHEDULE(prev, next, NULL); ts->st.ticker = ts->st.interval; prev->recursion_depth = ts->recursion_depth; prev->f.frame = ts->frame; if (!stackless || ts->st.nesting_level != 0) goto hard_switching; /* start of soft switching code */ if (prev->cstate != ts->st.initial_stub) { Py_DECREF(prev->cstate); prev->cstate = ts->st.initial_stub; Py_INCREF(prev->cstate); } if (ts != slp_initial_tstate) { /* ensure to get all tasklets into the other thread's chain */ if (slp_ensure_linkage(prev) || slp_ensure_linkage(next)) return NULL; } /* handle exception */ if (ts->exc_type == Py_None) { Py_XDECREF(ts->exc_type); ts->exc_type = NULL; } if (ts->exc_type != NULL) { /* build a shadow frame */ PyCFrameObject *f = slp_cframe_new(restore_exception, 1); if (f == NULL) return NULL; f->ob1 = ts->exc_type; f->ob2 = ts->exc_value; f->ob3 = ts->exc_traceback; prev->f.frame = (PyFrameObject *) f; ts->exc_type = ts->exc_value = ts->exc_traceback = NULL; } if (ts->use_tracing || ts->tracing) { /* build a shadow frame */ PyCFrameObject *f = slp_cframe_new(restore_tracing, 1); if (f == NULL) return NULL; f->any1 = ts->c_tracefunc; f->any2 = ts->c_profilefunc; ts->c_tracefunc = ts->c_profilefunc = NULL; f->ob1 = ts->c_traceobj; f->ob2 = ts->c_profileobj; /* trace/profile does not add references */ Py_XINCREF(f->ob1); Py_XINCREF(f->ob2); ts->c_traceobj = ts->c_profileobj = NULL; f->i = ts->tracing; f->n = ts->use_tracing; ts->tracing = ts->use_tracing = 0; prev->f.frame = (PyFrameObject *) f; } ts->frame = next->f.frame; next->f.frame = NULL; ts->recursion_depth = next->recursion_depth; ts->st.current = next; retval = next->tempval; assert(next->cstate != NULL); if (next->cstate->nesting_level != 0) { /* create a helper frame to restore the target stack */ ts->frame = (PyFrameObject *) slp_cframe_new(jump_soft_to_hard, 1); if (ts->frame == NULL) { ts->frame = prev->f.frame; return NULL; } /* note that we don't explode the bomb now and don't incref! */ return STACKLESS_PACK(retval); } Py_INCREF(retval); if (PyBomb_Check(retval)) retval = slp_bomb_explode(next); return STACKLESS_PACK(retval); hard_switching: /* since we change the stack we must assure that the protocol was met */ STACKLESS_ASSERT(); /* note: nesting_level is handled in cstack_new */ cstprev = &prev->cstate; ts->st.current = next; if (ts->exc_type == Py_None) { Py_XDECREF(ts->exc_type); ts->exc_type = NULL; } ts->recursion_depth = next->recursion_depth; ts->frame = next->f.frame; next->f.frame = NULL; ++ts->st.nesting_level; if (ts->exc_type != NULL || ts->use_tracing || ts->tracing) transfer = transfer_with_exc; else transfer = slp_transfer; if (transfer(cstprev, next->cstate, prev) == 0) { --ts->st.nesting_level; retval = prev->tempval; Py_INCREF(retval); if (PyBomb_Check(retval)) retval = slp_bomb_explode(prev); return retval; } else { --ts->st.nesting_level; kill_wrap_bad_guy(prev, next); return NULL; } }
static PyObject *schedule_task_unblock(PyTaskletObject *prev, PyTaskletObject *next, int stackless) { PyThreadState *ts = PyThreadState_GET(); PyThreadState *nts = next->cstate->tstate; PyObject *retval; long thread_id = nts->thread_id; PyObject *unlock_lock; if (ts->st.thread.self_lock == NULL) { if (!(ts->st.thread.self_lock = new_lock())) return NULL; acquire_lock(ts->st.thread.self_lock, 1); if (!(ts->st.thread.unlock_lock = new_lock())) return NULL; if (interthread_lock == NULL) { if (!(interthread_lock = PyThread_allocate_lock())) return NULL; } } /* * make sure nobody else tries a transaction at the same time * on this tasklet's thread state, because two tasklets of the * same thread could be talking to different threads. They must * be serviced in fifo order. */ if (nts->st.thread.unlock_lock == NULL) { if (!(nts->st.thread.unlock_lock = new_lock())) return NULL; } unlock_lock = nts->st.thread.unlock_lock; Py_INCREF(unlock_lock); PyEval_SaveThread(); PR("unblocker waiting for unlocker lock"); acquire_lock(unlock_lock, 1); PR("unblocker HAS unlocker lock"); /* * also make sure that only one single interthread transaction * is performed at any time. */ PR("unblocker waiting for interthread lock"); PyThread_acquire_lock(interthread_lock, 1); PR("unblocker HAS interthread lock"); PyEval_RestoreThread(ts); /* get myself ready */ retval = slp_schedule_task(prev, prev, stackless); /* see whether the other thread still exists and is really blocked */ if (is_thread_alive(thread_id) && nts->st.thread.is_locked) { /* tell the blocker what comes next */ temp.unlock_target = next; temp.other_lock = ts->st.thread.self_lock; /* give it an extra ref, in case I would die early */ Py_INCREF(temp.other_lock); /* unblock it */ release_lock(nts->st.thread.self_lock); /* wait for the transaction to finish */ PyEval_SaveThread(); PR("unblocker waiting for own lock"); acquire_lock(ts->st.thread.self_lock, 1); PR("unblocker HAS own lock"); PyEval_RestoreThread(ts); } else { PR("unlocker: other is NOT LOCKED or dead"); if (next->flags.blocked) { /* unblock from channel */ slp_channel_remove_slow(next); slp_current_insert(next); } else if (next->next == NULL) { /* reactivate floating task */ Py_INCREF(next); slp_current_insert(next); } } PR("unblocker releasing interthread lock"); PyThread_release_lock(interthread_lock); PR("unblocker RELEASED interthread lock"); PR("unblocker releasing unlocker lock"); release_lock(unlock_lock); Py_DECREF(unlock_lock); PR("unblocker RELEASED unlocker lock"); return retval; }
static PyObject * schedule_task_block(PyTaskletObject *prev, int stackless) { PyThreadState *ts = PyThreadState_GET(); PyObject *retval; PyTaskletObject *next = NULL; PyObject *unlocker_lock; if (check_for_deadlock()) { /* revive real main if floating */ if (ts == slp_initial_tstate && ts->st.main->next == NULL) { /* emulate old revive_main behavior: * passing a value only if it is an exception */ if (PyBomb_Check(prev->tempval)) TASKLET_SETVAL(ts->st.main, prev->tempval); return slp_schedule_task(prev, ts->st.main, stackless); } if (!(retval = make_deadlock_bomb())) return NULL; TASKLET_SETVAL_OWN(prev, retval); return slp_schedule_task(prev, prev, stackless); } #ifdef WITH_THREAD if (ts->st.thread.self_lock == NULL) { if (!(ts->st.thread.self_lock = new_lock())) return NULL; acquire_lock(ts->st.thread.self_lock, 1); if (!(ts->st.thread.unlock_lock = new_lock())) return NULL; } /* let somebody reactivate us */ ts->st.thread.is_locked = 1; /* flag as blocked and wait */ PyEval_SaveThread(); PR("locker waiting for my lock"); acquire_lock(ts->st.thread.self_lock, 1); PR("HAVE my lock"); PyEval_RestoreThread(ts); if (temp.unlock_target != NULL) { next = temp.unlock_target; temp.unlock_target = NULL; } else next = prev; /* * get in shape. can't do this with schedule here because * hard switching might not get us back, soon enough. */ if (next->flags.blocked) { /* unblock from channel */ slp_channel_remove_slow(next); slp_current_insert(next); } else if (next->next == NULL) { /* reactivate floating task */ Py_INCREF(next); slp_current_insert(next); } if (temp.other_lock != NULL) { PR("releasing unlocker"); unlocker_lock = temp.other_lock; temp.other_lock = NULL; release_lock(unlocker_lock); Py_DECREF(unlocker_lock); } ts->st.thread.is_locked = 0; #else (void)unlocker_lock; next = prev; #endif /* this must be after releasing the locks because of hard switching */ retval = slp_schedule_task(prev, next, stackless); PR("schedule() is done"); return retval; }
static PyObject * generic_channel_action(PyChannelObject *self, PyObject *arg, int dir, int stackless) { PyThreadState *ts = PyThreadState_GET(); PyTaskletObject *source = ts->st.current; PyTaskletObject *target = self->head; int cando = dir > 0 ? self->balance < 0 : self->balance > 0; int interthread = cando ? target->cstate->tstate != ts : 0; PyObject *retval; int runflags = 0; assert(abs(dir) == 1); TASKLET_SETVAL(source, arg); /* note that notify might release the GIL. */ /* XXX for the moment, we notify late on interthread */ if (!interthread) NOTIFY_CHANNEL(self, source, dir, cando, NULL); if (cando) { /* communication 1): there is somebody waiting */ target = slp_channel_remove(self, -dir); /* exchange data */ TASKLET_SWAPVAL(source, target); if (interthread) { ; /* interthread, always keep target! slp_current_insert(target);*/ } else { if (self->flags.schedule_all) { /* target goes last */ slp_current_insert(target); /* always schedule away from source */ target = source->next; } else if (self->flags.preference == -dir) { /* move target after source */ ts->st.current = source->next; slp_current_insert(target); ts->st.current = source; /* don't mess with this scheduling behaviour: */ runflags = PY_WATCHDOG_NO_SOFT_IRQ; } else { /* otherwise we return to the caller */ slp_current_insert(target); target = source; /* don't mess with this scheduling behaviour: */ runflags = PY_WATCHDOG_NO_SOFT_IRQ; } } } else { /* communication 2): there is nobody waiting, so we must switch */ if (source->flags.block_trap) RUNTIME_ERROR("this tasklet does not like to be" " blocked.", NULL); if (self->flags.closing) { PyErr_SetNone(PyExc_StopIteration); return NULL; } slp_current_remove(); slp_channel_insert(self, source, dir); target = ts->st.current; /* Make sure that the channel will exist past the actual switch, if * we are softswitching. A temporary channel might disappear. */ if (Py_REFCNT(self)) { assert(ts->st.del_post_switch == NULL); ts->st.del_post_switch = (PyObject*)self; Py_INCREF(self); } } ts->st.runflags |= runflags; /* extra info for slp_schedule_task */ retval = slp_schedule_task(source, target, stackless, 0); if (interthread) { if (cando) { Py_DECREF(target); } NOTIFY_CHANNEL(self, source, dir, cando, NULL); } return retval; }
static int generic_channel_cando(PyThreadState *ts, PyObject **result, PyChannelObject *self, int dir, int stackless) { PyTaskletObject *source = ts->st.current; PyTaskletObject *switchto, *target, *next; int interthread; int oldflags, runflags = 0; int switched, fail; /* swap data and perform necessary scheduling */ switchto = target = slp_channel_remove(self, NULL, NULL, &next); interthread = target->cstate->tstate != ts; /* exchange data */ TASKLET_SWAPVAL(source, target); if (interthread) { ; /* nothing happens, the target is merely made runnable */ } else { if (self->flags.schedule_all) { /* target goes last */ slp_current_insert(target); /* always schedule away from source */ switchto = source->next; } else if (self->flags.preference == -dir) { /* move target after source */ ts->st.current = source->next; slp_current_insert(target); ts->st.current = source; /* don't mess with this scheduling behaviour: */ runflags = PY_WATCHDOG_NO_SOFT_IRQ; } else { /* otherwise we return to the caller */ slp_current_insert(target); switchto = source; /* don't mess with this scheduling behaviour: */ runflags = PY_WATCHDOG_NO_SOFT_IRQ; } } /* Make sure that the channel will exist past the actual switch, if * we are softswitching. A temporary channel might disappear. */ assert(ts->st.del_post_switch == NULL); if (source != target && Py_REFCNT(self)) { ts->st.del_post_switch = (PyObject*)self; Py_INCREF(self); } oldflags = ts->st.runflags; ts->st.runflags |= runflags; /* extra info for slp_schedule_task */ fail = slp_schedule_task(result, source, switchto, stackless, &switched); if (fail || !switched) Py_CLEAR(ts->st.del_post_switch); if (fail) { ts->st.runflags = oldflags; if (!interthread) { slp_current_uninsert(target); ts->st.current = source; } slp_channel_insert(self, target, -dir, next); TASKLET_SWAPVAL(source, target); } else { if (interthread) Py_DECREF(target); } return fail; }
static PyObject * generic_channel_action(PyChannelObject *self, PyObject *arg, int dir, int stackless) { PyThreadState *ts = PyThreadState_GET(); PyTaskletObject *source = ts->st.current; PyTaskletObject *target = self->head; int cando = dir > 0 ? self->balance < 0 : self->balance > 0; int interthread = cando ? target->cstate->tstate != ts : 0; PyObject *retval; assert(abs(dir) == 1); TASKLET_SETVAL(source, arg); /* note that notify might release the GIL. */ /* XXX for the moment, we notify late on interthread */ if (!interthread) NOTIFY_CHANNEL(self, source, dir, cando, NULL); if (cando) { /* communication 1): there is somebody waiting */ target = slp_channel_remove(self, -dir); /* exchange data */ TASKLET_SWAPVAL(source, target); if (interthread) { /* interthread, always keep target! */ slp_current_insert(target); } else { if (self->flags.schedule_all) { /* target goes last */ slp_current_insert(target); /* always schedule away from source */ target = source->next; } else if (self->flags.preference == -dir) { /* move target after source */ ts->st.current = source->next; slp_current_insert(target); ts->st.current = source; } else { /* otherwise we return to the caller */ slp_current_insert(target); target = source; } } } else { /* communication 2): there is nobody waiting */ if (source->flags.block_trap) RUNTIME_ERROR("this tasklet does not like to be" " blocked.", NULL); if (self->flags.closing) { PyErr_SetNone(PyExc_StopIteration); return NULL; } slp_current_remove(); slp_channel_insert(self, source, dir); target = ts->st.current; } retval = slp_schedule_task(source, target, stackless); if (interthread) NOTIFY_CHANNEL(self, source, dir, cando, NULL); return retval; }