Esempio n. 1
0
void slp_kill_tasks_with_stacks(PyThreadState *ts)
{
	int count = 0;

	while (1) {
		PyCStackObject *csfirst = slp_cstack_chain, *cs;
		PyTaskletObject *t, *task;
		PyTaskletObject **chain;

		if (csfirst == NULL)
			break;
		for (cs = csfirst; ; cs = cs->next) {
			if (count && cs == csfirst) {
				/* nothing found */
				return;
			}
			++count;
			if (cs->task == NULL)
				continue;
			if (ts != NULL && cs->tstate != ts)
				continue;
			break;
		} 
		count = 0;
		t = cs->task;
		Py_INCREF(t);

		/* We need to ensure that the tasklet 't' is in the scheduler
		 * tasklet chain before this one (our main).  This ensures
		 * that this one is directly switched back to after 't' is
		 * killed.  The reason we do this this is because if another
		 * tasklet is switched to, this is of course it being scheduled
		 * and run.  Why we do not need to do this for tasklets blocked
		 * on channels is that when they are scheduled to be run and
		 * killed, they will be implicitly placed before this one,
		 * leaving it to run next.
		 */
		if (!t->flags.blocked && t != cs->tstate->st.main) {
			if (t->next && t->prev) { /* it may have been removed() */
				chain = &t;
				SLP_CHAIN_REMOVE(PyTaskletObject, chain, task, next, prev)
			}
			chain = &cs->tstate->st.main;
			task = cs->task;
			SLP_CHAIN_INSERT(PyTaskletObject, chain, task, next, prev);
			cs->tstate->st.current = cs->tstate->st.main;
			t = cs->task;
		}

		Py_INCREF(t); /* because the following steals a reference */
		PyTasklet_Kill(t);
		PyErr_Clear();

		if (t->f.frame == 0) {
			/* ensure a valid tstate */
			t->cstate->tstate = slp_initial_tstate;
		}
		Py_DECREF(t);
	}
Esempio n. 2
0
void
slp_current_insert(PyTaskletObject *task)
{
    PyThreadState *ts = task->cstate->tstate;
    PyTaskletObject **chain = &ts->st.current;

    SLP_CHAIN_INSERT(PyTaskletObject, chain, task, next, prev);
    ++ts->st.runcount;
}
Esempio n. 3
0
PyCStackObject *
slp_cstack_new(PyCStackObject **cst, intptr_t *stackref, PyTaskletObject *task)
{
	PyThreadState *ts = PyThreadState_GET();
	intptr_t *stackbase = ts->st.cstack_base;
	ptrdiff_t size = stackbase - stackref;

	assert(size >= 0);

	if (*cst != NULL) {
		if ((*cst)->task == task)
			(*cst)->task = NULL;
		Py_DECREF(*cst);
	}
	if (size < CSTACK_SLOTS && ((*cst) = cstack_cache[size])) {
		/* take stack from cache */
		cstack_cache[size] = (PyCStackObject *) (*cst)->startaddr;
		--cstack_cachecount;
		_Py_NewReference((PyObject *)(*cst));
	}
	else
		*cst = PyObject_NewVar(PyCStackObject, &PyCStack_Type, size);
	if (*cst == NULL) return NULL;

	(*cst)->startaddr = stackbase;
	(*cst)->next = (*cst)->prev = NULL;
	SLP_CHAIN_INSERT(PyCStackObject, &slp_cstack_chain, *cst, next, prev);
	(*cst)->serial = ts->st.serial;
	(*cst)->task = task;
	(*cst)->tstate = ts;
	(*cst)->nesting_level = ts->st.nesting_level;
#ifdef _SEH32
	//save the SEH handler
	(*cst)->exception_list = (DWORD)
                    __readfsdword(FIELD_OFFSET(NT_TIB, ExceptionList));
#endif
	return *cst;
}