コード例 #1
0
ファイル: stacklessmodule.c プロジェクト: d11/rts
PyObject *
PyStackless_RunWatchdogEx(long timeout, int flags)
{
	PyThreadState *ts = PyThreadState_GET();
	PyTaskletObject *victim;
	PyObject *retval;

	if (ts->st.main == NULL)
		return PyStackless_RunWatchdog_M(timeout, flags);
	if (ts->st.current != ts->st.main)
		RUNTIME_ERROR(
		    "run() must be run from the main tasklet.",
		    NULL);

	if (timeout <= 0)
		ts->st.interrupt = NULL;
	else
		ts->st.interrupt = interrupt_timeout_return;

	ts->st.interval = timeout;
	ts->st.tick_watermark = ts->st.tick_counter + timeout;

	/* remove main. Will get back at the end. */
	slp_current_remove();
	Py_DECREF(ts->st.main);

    /* now let them run until the end. */
    ts->st.runflags = flags;
    retval = slp_schedule_task(ts->st.main, ts->st.current, 0, 0);
    ts->st.runflags = 0;
    ts->st.interrupt = NULL;

	/* retval really should be PyNone here (or NULL).  Technically, it is the
	 * tempval of some tasklet that has quit.  Even so, it is quite
	 * useless to use.  run() must return None, or a tasklet
	 */
	Py_XDECREF(retval);
	if (retval == NULL) /* an exception has occoured */
		return NULL;

	/*
	 * back in main.
	 * We were either revived by slp_tasklet_end or the interrupt.
	 * If we were using hard interrupts (bit 1 in flags not set)
	 * we need to return the interrupted tasklet)
	 */
	if (ts->st.runcount > 1 && !(flags & PY_WATCHDOG_SOFT)) {
		/* remove victim. It is sitting next to us. */
		ts->st.current = (PyTaskletObject*)ts->st.main->next;
		victim = slp_current_remove();
		ts->st.current = (PyTaskletObject*)ts->st.main;
		return (PyObject*) victim;
	} else
		Py_RETURN_NONE;
}
コード例 #2
0
static TASKLET_RUN_HEAD(impl_tasklet_run)
{
    STACKLESS_GETARG();
    PyThreadState *ts = PyThreadState_GET();

    assert(PyTasklet_Check(task));
    if (ts->st.main == NULL) return PyTasklet_Run_M(task);
    if (PyTasklet_Insert(task))
        return NULL;
    return slp_schedule_task(ts->st.current, task, stackless, 0);
}
コード例 #3
0
ファイル: stacklessmodule.c プロジェクト: develersrl/dspython
PyObject *
PyStackless_RunWatchdog(long timeout)
{
	PyThreadState *ts = PyThreadState_GET();
	PyTaskletObject *victim;
	PyObject *retval;
	int err;

	if (ts->st.main == NULL) return PyStackless_RunWatchdog_M(timeout);
	if (ts->st.current != ts->st.main)
		RUNTIME_ERROR(
		    "run() must be run from the main tasklet.",
		    NULL);
	
	if (timeout <= 0) {
		ts->st.interrupt = NULL;
	} 
	else {
		ts->st.interrupt = interrupt_timeout_return;
	}

	ts->st.interval = timeout;

	/* remove main. Will get back at the end. */
	slp_current_remove();
	Py_DECREF(ts->st.main);

	/* now let them run until the end. */
	retval = slp_schedule_task(ts->st.main, ts->st.current, 0);

	ts->st.interrupt = NULL;

	err = retval == NULL;

	if (err) /* an exception has occoured */
		return NULL;

	/* 
	 * back in main.
	 * We were either revived by slp_tasklet_end or the interrupt.
	 */
	if (ts->st.runcount > 1) {
		/* remove victim. It is sitting next to us. */
		ts->st.current = (PyTaskletObject*)ts->st.main->next;
		victim = slp_current_remove();
		ts->st.current = (PyTaskletObject*)ts->st.main;
		Py_DECREF(retval);
		return (PyObject*) victim;
	}
	return retval;
}
コード例 #4
0
static TASKLET_RAISE_EXCEPTION_HEAD(impl_tasklet_raise_exception)
{
    STACKLESS_GETARG();
    PyThreadState *ts = PyThreadState_GET();
    PyObject *bomb;

    if (ts->st.main == NULL)
        return PyTasklet_RaiseException_M(self, klass, args);
    bomb = slp_make_bomb(klass, args, "tasklet.raise_exception");
    if (bomb == NULL)
        return NULL;
    TASKLET_SETVAL_OWN(self, bomb);
    /* if the tasklet is dead, do not run it (no frame) but explode */
    if (slp_get_frame(self) == NULL) {
        TASKLET_CLAIMVAL(self, &bomb);
        return slp_bomb_explode(bomb);
    }
    return slp_schedule_task(ts->st.current, self, stackless, 0);
}
コード例 #5
0
static PyObject *
schedule_task_destruct(PyTaskletObject *prev, PyTaskletObject *next)
{
	/*
	 * The problem is to leave the dying tasklet alive
	 * until we have done the switch.
	 * This cannot easily be done by hard switching, since
	 * the C stack we are leaving is never returned to,
	 * and who should do the dereferencing?
	 * Therefore, we enforce a soft-switch.
	 */
	PyThreadState *ts = PyThreadState_GET();
	PyObject *retval;

	/* we should have no nesting level */
	assert(ts->st.nesting_level == 0);
	/* even there is a (buggy) nesting, ensure soft switch */
	if (ts->st.nesting_level != 0) {
		printf("XXX error, nesting_level = %d\n", ts->st.nesting_level);
		ts->st.nesting_level = 0;
	}

	/* update what's not yet updated */
	assert(ts->recursion_depth == 0);
	prev->recursion_depth = 0;
	assert(ts->frame == NULL);
	prev->f.frame = NULL;

	/* do a soft switch */
	if (prev != next)
		retval = slp_schedule_task(prev, next, 1);
	else {
		retval = prev->tempval;
		Py_INCREF(retval);
		if (PyBomb_Check(retval))
			retval = slp_bomb_explode(prev);
	}

	prev->ob_type->tp_clear((PyObject *)prev);
	/* now it is safe to derefence prev */
	Py_DECREF(prev);
	return retval;
}
コード例 #6
0
ファイル: stacklessmodule.c プロジェクト: develersrl/dspython
PyObject *
PyStackless_Schedule(PyObject *retval, int remove)
{
	STACKLESS_GETARG();
	PyThreadState *ts = PyThreadState_GET();
	PyTaskletObject *prev = ts->st.current, *next = prev->next;
	PyObject *ret = NULL;

	if (ts->st.main == NULL) return PyStackless_Schedule_M(retval, remove);
	Py_INCREF(prev);
	TASKLET_SETVAL(prev, retval);
	if (remove) {
		slp_current_remove();
		Py_DECREF(prev);
	}
	ret = slp_schedule_task(prev, next, stackless);
	Py_DECREF(prev);
	return ret;
}
コード例 #7
0
ファイル: stacklessmodule.c プロジェクト: develersrl/dspython
static PyObject *
interrupt_timeout_return(void)
{
	PyThreadState *ts = PyThreadState_GET();
	PyTaskletObject *current = ts->st.current;
	
	/*
	 * Tasklet has to be prevented from returning if atomic or
	 * if nesting_level is relevant 
	 */
	if (current->flags.atomic || ts->st.schedlock ||
	    ( ts->st.nesting_level && !current->flags.ignore_nesting ) ) {
		ts->st.ticker = ts->st.interval;
		current->flags.pending_irq = 1;
		Py_INCREF(Py_None);
		return Py_None;
	}
	else
		current->flags.pending_irq = 0;

	return slp_schedule_task(ts->st.current, ts->st.main, 1);
}
コード例 #8
0
static int
generic_channel_block(PyThreadState *ts, PyObject **result, PyChannelObject *self, int dir, int stackless)
{
    PyTaskletObject *target, *source = ts->st.current;
    int fail, switched;

    /* communication 2): there is nobody waiting, so we must switch */
    if (source->flags.block_trap)
        RUNTIME_ERROR("this tasklet does not like to be"
                        " blocked.", -1);
    if (self->flags.closing) {
        PyErr_SetString(PyExc_ValueError, "Send/receive operation on a closed channel");
        return -1;
    }

    slp_current_remove();
    slp_channel_insert(self, source, dir, NULL);
    target = ts->st.current;

    /* Make sure that the channel will exist past the actual switch, if
    * we are softswitching.  A temporary channel might disappear.
    */
    assert(ts->st.del_post_switch == NULL);
    if (Py_REFCNT(self)) {
        ts->st.del_post_switch = (PyObject*)self;
        Py_INCREF(self);
    }

    fail =  slp_schedule_task(result, source, target, stackless, &switched);
    if (fail || !switched)
        Py_CLEAR(ts->st.del_post_switch);
    if (fail) {
        /* undo our tasklet shuffling */
        slp_channel_remove(self, source, NULL, NULL);
        slp_current_unremove(source);
    }
    return fail;
}
コード例 #9
0
ファイル: stacklessmodule.c プロジェクト: d11/rts
PyObject *
PyStackless_Schedule(PyObject *retval, int remove)
{
    STACKLESS_GETARG();
    PyThreadState *ts = PyThreadState_GET();
    PyTaskletObject *prev = ts->st.current, *next = prev->next;
    PyObject *ret = NULL;
    int switched;

    if (ts->st.main == NULL) return PyStackless_Schedule_M(retval, remove);
    /* make sure we hold a reference to the previous tasklet */
    Py_INCREF(prev);
    TASKLET_SETVAL(prev, retval);
    if (remove) {
        slp_current_remove();
        Py_DECREF(prev);
        if (next == prev)
            next = 0; /* we were the last runnable tasklet */
    }
    /* we mustn't DECREF prev here (after the slp_schedule_task().
     * This could be the last reference, thus
     * promting emergency reactivation of the tasklet,
     * and soft switching isn't really done until we have unwound.
     * Use the delayed release mechanism instead.
     */
    assert(ts->st.del_post_switch == NULL);
    ts->st.del_post_switch = (PyObject*)prev;

    ret = slp_schedule_task(prev, next, stackless, &switched);

    /* however, if this was a no-op (e.g. prev==next, or an error occurred)
     * we need to decref prev ourselves
     */
    if (!switched)
        Py_CLEAR(ts->st.del_post_switch);
    return ret;
}
コード例 #10
0
static PyObject *schedule_task_unblock(PyTaskletObject *prev,
				       PyTaskletObject *next,
				       int stackless)
{
	PyThreadState *ts = PyThreadState_GET();
	PyThreadState *nts = next->cstate->tstate;
	PyObject *retval;
	long thread_id = nts->thread_id;
	PyObject *unlock_lock;

	if (ts->st.thread.self_lock == NULL) {
		if (!(ts->st.thread.self_lock = new_lock()))
			return NULL;
		acquire_lock(ts->st.thread.self_lock, 1);
		if (!(ts->st.thread.unlock_lock = new_lock()))
			return NULL;
		if (interthread_lock == NULL) {
			if (!(interthread_lock = PyThread_allocate_lock()))
				return NULL;
		}
	}

	/*
	 * make sure nobody else tries a transaction at the same time
	 * on this tasklet's thread state, because two tasklets of the
	 * same thread could be talking to different threads. They must
	 * be serviced in fifo order.
	 */

	if (nts->st.thread.unlock_lock == NULL) {
		if (!(nts->st.thread.unlock_lock = new_lock()))
			return NULL;
	}
	unlock_lock = nts->st.thread.unlock_lock;
	Py_INCREF(unlock_lock);

	PyEval_SaveThread();
	PR("unblocker waiting for unlocker lock");
	acquire_lock(unlock_lock, 1);
	PR("unblocker HAS unlocker lock");

	/*
	 * also make sure that only one single interthread transaction
	 * is performed at any time.
	 */

	PR("unblocker waiting for interthread lock");
	PyThread_acquire_lock(interthread_lock, 1);
	PR("unblocker HAS interthread lock");
	PyEval_RestoreThread(ts);

	/* get myself ready */
	retval = slp_schedule_task(prev, prev, stackless);

	/* see whether the other thread still exists and is really blocked */
	if (is_thread_alive(thread_id) && nts->st.thread.is_locked) {
		/* tell the blocker what comes next */
		temp.unlock_target = next;
		temp.other_lock = ts->st.thread.self_lock;
		/* give it an extra ref, in case I would die early */
		Py_INCREF(temp.other_lock);

		/* unblock it */
		release_lock(nts->st.thread.self_lock);

		/* wait for the transaction to finish */

		PyEval_SaveThread();
		PR("unblocker waiting for own lock");
		acquire_lock(ts->st.thread.self_lock, 1);
		PR("unblocker HAS own lock");
		PyEval_RestoreThread(ts);
	}
	else {
		PR("unlocker: other is NOT LOCKED or dead");
		if (next->flags.blocked) {
			/* unblock from channel */
			slp_channel_remove_slow(next);
			slp_current_insert(next);
		}
		else if (next->next == NULL) {
			/* reactivate floating task */
			Py_INCREF(next);
			slp_current_insert(next);
		}
	}
	PR("unblocker releasing interthread lock");
	PyThread_release_lock(interthread_lock);
	PR("unblocker RELEASED interthread lock");
	PR("unblocker releasing unlocker lock");
	release_lock(unlock_lock);
	Py_DECREF(unlock_lock);
	PR("unblocker RELEASED unlocker lock");

	return retval;
}
コード例 #11
0
static PyObject *
schedule_task_block(PyTaskletObject *prev, int stackless)
{
	PyThreadState *ts = PyThreadState_GET();
	PyObject *retval;
	PyTaskletObject *next = NULL;
	PyObject *unlocker_lock;

	if (check_for_deadlock()) {
		/* revive real main if floating */
		if (ts == slp_initial_tstate && ts->st.main->next == NULL) {
			/* emulate old revive_main behavior:
			 * passing a value only if it is an exception
			 */
			if (PyBomb_Check(prev->tempval))
				TASKLET_SETVAL(ts->st.main, prev->tempval);
			return slp_schedule_task(prev, ts->st.main, stackless);
		}
		if (!(retval = make_deadlock_bomb()))
			return NULL;
		TASKLET_SETVAL_OWN(prev, retval);
		return slp_schedule_task(prev, prev, stackless);
	}
#ifdef WITH_THREAD
	if (ts->st.thread.self_lock == NULL) {
		if (!(ts->st.thread.self_lock = new_lock()))
			return NULL;
		acquire_lock(ts->st.thread.self_lock, 1);
		if (!(ts->st.thread.unlock_lock = new_lock()))
			return NULL;
	}

	/* let somebody reactivate us */

	ts->st.thread.is_locked = 1; /* flag as blocked and wait */

	PyEval_SaveThread();
	PR("locker waiting for my lock");
	acquire_lock(ts->st.thread.self_lock, 1);
	PR("HAVE my lock");
	PyEval_RestoreThread(ts);

	if (temp.unlock_target != NULL) {
		next = temp.unlock_target;
		temp.unlock_target = NULL;
	}
	else
		next = prev;

	/*
	 * get in shape. can't do this with schedule here because
	 * hard switching might not get us back, soon enough.
	 */
	if (next->flags.blocked) {
		/* unblock from channel */
		slp_channel_remove_slow(next);
		slp_current_insert(next);
	}
	else if (next->next == NULL) {
		/* reactivate floating task */
		Py_INCREF(next);
		slp_current_insert(next);
	}

	if (temp.other_lock != NULL) {
		PR("releasing unlocker");
		unlocker_lock = temp.other_lock;
		temp.other_lock = NULL;
		release_lock(unlocker_lock);
		Py_DECREF(unlocker_lock);
	}

	ts->st.thread.is_locked = 0;
#else
	(void)unlocker_lock;
	next = prev;
#endif
	/* this must be after releasing the locks because of hard switching */
	retval = slp_schedule_task(prev, next, stackless);
	PR("schedule() is done");
	return retval;
}
コード例 #12
0
ファイル: channelobject.c プロジェクト: d11/rts
static PyObject *
generic_channel_action(PyChannelObject *self, PyObject *arg, int dir, int stackless)
{
    PyThreadState *ts = PyThreadState_GET();
    PyTaskletObject *source = ts->st.current;
    PyTaskletObject *target = self->head;
    int cando = dir > 0 ? self->balance < 0 : self->balance > 0;
    int interthread = cando ? target->cstate->tstate != ts : 0;
    PyObject *retval;
    int runflags = 0;

    assert(abs(dir) == 1);

    TASKLET_SETVAL(source, arg);

    /* note that notify might release the GIL. */
    /* XXX for the moment, we notify late on interthread */
    if (!interthread)
        NOTIFY_CHANNEL(self, source, dir, cando, NULL);

    if (cando) {
        /* communication 1): there is somebody waiting */
        target = slp_channel_remove(self, -dir);
        /* exchange data */
        TASKLET_SWAPVAL(source, target);

        if (interthread) {
            ;
            /* interthread, always keep target!
            slp_current_insert(target);*/
        }
        else {
            if (self->flags.schedule_all) {
                /* target goes last */
                slp_current_insert(target);
                /* always schedule away from source */
                target = source->next;
            }
            else if (self->flags.preference == -dir) {
                /* move target after source */
                ts->st.current = source->next;
                slp_current_insert(target);
                ts->st.current = source;
                /* don't mess with this scheduling behaviour: */
                runflags = PY_WATCHDOG_NO_SOFT_IRQ;
            }
            else {
                /* otherwise we return to the caller */
                slp_current_insert(target);
                target = source;
                /* don't mess with this scheduling behaviour: */
                runflags = PY_WATCHDOG_NO_SOFT_IRQ;
            }
        }
    }
    else {
        /* communication 2): there is nobody waiting, so we must switch */
        if (source->flags.block_trap)
            RUNTIME_ERROR("this tasklet does not like to be"
                          " blocked.", NULL);
        if (self->flags.closing) {
            PyErr_SetNone(PyExc_StopIteration);
            return NULL;
        }
        slp_current_remove();
        slp_channel_insert(self, source, dir);
        target = ts->st.current;

        /* Make sure that the channel will exist past the actual switch, if
         * we are softswitching.  A temporary channel might disappear.
         */
        if (Py_REFCNT(self)) {
            assert(ts->st.del_post_switch == NULL);
            ts->st.del_post_switch = (PyObject*)self;
            Py_INCREF(self);
        }
    }
    ts->st.runflags |= runflags; /* extra info for slp_schedule_task */
    retval = slp_schedule_task(source, target, stackless, 0);

    if (interthread) {
        if (cando) {
            Py_DECREF(target);
        }
        NOTIFY_CHANNEL(self, source, dir, cando, NULL);
    }
    return retval;
}
コード例 #13
0
static int
generic_channel_cando(PyThreadState *ts, PyObject **result, PyChannelObject *self, int dir, int stackless)
{
    PyTaskletObject *source = ts->st.current;
    PyTaskletObject *switchto, *target, *next;
    int interthread;
    int oldflags, runflags = 0;
    int switched, fail;

    /* swap data and perform necessary scheduling */

    switchto = target = slp_channel_remove(self, NULL, NULL, &next);
    interthread = target->cstate->tstate != ts;
    /* exchange data */
    TASKLET_SWAPVAL(source, target);

    if (interthread) {
        ; /* nothing happens, the target is merely made runnable */
    } else {
        if (self->flags.schedule_all) {
            /* target goes last */
            slp_current_insert(target);
            /* always schedule away from source */
            switchto = source->next;
        }
        else if (self->flags.preference == -dir) {
            /* move target after source */
            ts->st.current = source->next;
            slp_current_insert(target);
            ts->st.current = source;
            /* don't mess with this scheduling behaviour: */
            runflags = PY_WATCHDOG_NO_SOFT_IRQ;
        }
        else {
            /* otherwise we return to the caller */
            slp_current_insert(target);
            switchto = source;
            /* don't mess with this scheduling behaviour: */
            runflags = PY_WATCHDOG_NO_SOFT_IRQ;
        }
    }

    /* Make sure that the channel will exist past the actual switch, if
    * we are softswitching.  A temporary channel might disappear.
    */
    assert(ts->st.del_post_switch == NULL);
    if (source != target && Py_REFCNT(self)) {
        ts->st.del_post_switch = (PyObject*)self;
        Py_INCREF(self);
    }

    oldflags = ts->st.runflags;
    ts->st.runflags |= runflags; /* extra info for slp_schedule_task */    
    fail = slp_schedule_task(result, source, switchto, stackless, &switched);

    if (fail || !switched)
        Py_CLEAR(ts->st.del_post_switch);
    if (fail) {
        ts->st.runflags = oldflags;
        if (!interthread) {
            slp_current_uninsert(target);
            ts->st.current = source;
        }
        slp_channel_insert(self, target, -dir, next);
        TASKLET_SWAPVAL(source, target);
    } else {
        if (interthread)
            Py_DECREF(target);
    }
    return fail;
}
コード例 #14
0
static PyObject *
generic_channel_action(PyChannelObject *self, PyObject *arg, int dir, int stackless)
{
	PyThreadState *ts = PyThreadState_GET();
	PyTaskletObject *source = ts->st.current;
	PyTaskletObject *target = self->head;
	int cando = dir > 0 ? self->balance < 0 : self->balance > 0;
	int interthread = cando ? target->cstate->tstate != ts : 0;
	PyObject *retval;

	assert(abs(dir) == 1);

	TASKLET_SETVAL(source, arg);

	/* note that notify might release the GIL. */
	/* XXX for the moment, we notify late on interthread */
	if (!interthread)
		NOTIFY_CHANNEL(self, source, dir, cando, NULL);

	if (cando) {
		/* communication 1): there is somebody waiting */
		target = slp_channel_remove(self, -dir);
		/* exchange data */
		TASKLET_SWAPVAL(source, target);

		if (interthread) {
			/* interthread, always keep target! */
			slp_current_insert(target);
		}
		else {
			if (self->flags.schedule_all) {
				/* target goes last */
				slp_current_insert(target);
				/* always schedule away from source */
				target = source->next;
			}
			else if (self->flags.preference == -dir) {
				/* move target after source */
				ts->st.current = source->next;
				slp_current_insert(target);
				ts->st.current = source;
			}
			else {
				/* otherwise we return to the caller */
				slp_current_insert(target);
				target = source;
			}
		}
	}
	else {
		/* communication 2): there is nobody waiting */
		if (source->flags.block_trap)
			RUNTIME_ERROR("this tasklet does not like to be"
				      " blocked.", NULL);
		if (self->flags.closing) {
			PyErr_SetNone(PyExc_StopIteration);
			return NULL;
		}
		slp_current_remove();
		slp_channel_insert(self, source, dir);
		target = ts->st.current;
	}
	retval = slp_schedule_task(source, target, stackless);
	if (interthread)
		NOTIFY_CHANNEL(self, source, dir, cando, NULL);
	return retval;
}