Ejemplo n.º 1
0
static struct xnthread *
xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner)
{
	const int use_fastlock = xnsynch_fastlock_p(synch);
	xnhandle_t lastownerh, newownerh;
	struct xnthread *newowner;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(lastowner, XNOTHER)) {
		if (xnthread_get_rescnt(lastowner) == 0)
			xnshadow_send_sig(lastowner, SIGDEBUG,
					  SIGDEBUG_MIGRATE_PRIOINV, 1);
		else
			xnthread_dec_rescnt(lastowner);
	}
#endif
	lastownerh = xnthread_handle(lastowner);

	if (use_fastlock &&
	    likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
		return NULL;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_release, "synch %p", synch);

	holder = getpq(&synch->pendq);
	if (holder) {
		newowner = link2thread(holder, plink);
		newowner->wchan = NULL;
		newowner->wwake = synch;
		synch->owner = newowner;
		xnthread_set_info(newowner, XNWAKEN);
		xnpod_resume_thread(newowner, XNPEND);

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			xnsynch_clear_boost(synch, lastowner);

		newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner),
						     xnsynch_pended_p(synch));
	} else {
		newowner = NULL;
		synch->owner = NULL;
		newownerh = XN_NO_HANDLE;
	}
	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
		xnarch_atomic_set(lockp, newownerh);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return newowner;
}
Ejemplo n.º 2
0
void xnsynch_renice_sleeper(xnthread_t *thread)
{
	xnsynch_t *synch = thread->wchan;
	xnthread_t *owner;

	if (!testbits(synch->status, XNSYNCH_PRIO))
		return;

	removepq(&synch->pendq, &thread->plink);
	insertpqf(&synch->pendq, &thread->plink, thread->cprio);
	owner = synch->owner;

	if (owner != NULL && thread->cprio > owner->cprio) {
		/* The new priority of the sleeping thread is higher
		 * than the priority of the current owner of the
		 * resource: we need to update the PI state. */
		if (testbits(synch->status, XNSYNCH_CLAIMED)) {
			/* The resource is already claimed, just
			   reorder the claim queue. */
			removepq(&owner->claimq, &synch->link);
			insertpqf(&owner->claimq, &synch->link, thread->cprio);
		} else {
			/* The resource was NOT claimed, claim it now
			 * and boost the owner. */
			__setbits(synch->status, XNSYNCH_CLAIMED);
			insertpqf(&owner->claimq, &synch->link, thread->cprio);
			owner->bprio = owner->cprio;
			xnthread_set_state(owner, XNBOOST);
		}
		/* Renice the owner thread, progressing in the PI
		   chain as needed. */
		xnsynch_renice_thread(owner, thread->cprio);
	}
}
Ejemplo n.º 3
0
xnflags_t xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout,
			   xntmode_t timeout_mode)
{
	struct xnthread *thread = xnpod_current_thread();
	spl_t s;

	XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_sleepon,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);

	if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
		appendpq(&synch->pendq, &thread->plink);
	else /* i.e. priority-sorted */
		insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));

	xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);

	xnlock_put_irqrestore(&nklock, s);

	return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK);
}
Ejemplo n.º 4
0
ssize_t xnpipe_send(int minor, struct xnpipe_mh *mh, size_t size, int flags)
{
	struct xnpipe_state *state;
	int need_sched = 0;
	spl_t s;

	if (minor < 0 || minor >= XNPIPE_NDEVS)
		return -ENODEV;

	if (size <= sizeof(*mh))
		return -EINVAL;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EBADF;
	}

	inith(xnpipe_m_link(mh));
	xnpipe_m_size(mh) = size - sizeof(*mh);
	xnpipe_m_rdoff(mh) = 0;
	state->ionrd += xnpipe_m_size(mh);

	if (flags & XNPIPE_URGENT)
		prependq(&state->outq, xnpipe_m_link(mh));
	else
		appendq(&state->outq, xnpipe_m_link(mh));

	if (!testbits(state->status, XNPIPE_USER_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return (ssize_t) size;
	}

	if (testbits(state->status, XNPIPE_USER_WREAD)) {
		/*
		 * Wake up the regular Linux task waiting for input
		 * from the Xenomai side.
		 */
		__setbits(state->status, XNPIPE_USER_WREAD_READY);
		need_sched = 1;
	}

	if (state->asyncq) {	/* Schedule asynch sig. */
		__setbits(state->status, XNPIPE_USER_SIGIO);
		need_sched = 1;
	}

	if (need_sched)
		xnpipe_schedule_request();

	xnlock_put_irqrestore(&nklock, s);

	return (ssize_t) size;
}
Ejemplo n.º 5
0
static int xnpipe_open(struct inode *inode, struct file *file)
{
	int minor, err = 0, sigpending;
	struct xnpipe_state *state;
	spl_t s;

	minor = MINOR(inode->i_rdev);

	if (minor >= XNPIPE_NDEVS)
		return -ENXIO;	/* TssTss... stop playing with mknod() ;o) */

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	/* Enforce exclusive open for the message queues. */
	if (testbits(state->status, XNPIPE_USER_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EBUSY;
	}

	__setbits(state->status, XNPIPE_USER_CONN);

	file->private_data = state;
	init_waitqueue_head(&state->readq);
	init_waitqueue_head(&state->syncq);
	state->wcount = 0;

	__clrbits(state->status,
		  XNPIPE_USER_ALL_WAIT | XNPIPE_USER_ALL_READY |
		  XNPIPE_USER_SIGIO);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		if (testbits(file->f_flags, O_NONBLOCK)) {
			xnpipe_cleanup_user_conn(state, s);
			xnlock_put_irqrestore(&nklock, s);
			return -EWOULDBLOCK;
		}

		sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
					 testbits(state->status,
						  XNPIPE_KERN_CONN));
		if (sigpending) {
			xnpipe_cleanup_user_conn(state, s);
			xnlock_put_irqrestore(&nklock, s);
			return -ERESTARTSYS;
		}
	}

	if (err)
		xnpipe_cleanup_user_conn(state, s);

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 6
0
struct xnthread *xnsynch_release(struct xnsynch *synch)
{
	const int use_fastlock = xnsynch_fastlock_p(synch);
	struct xnthread *newowner, *lastowner;
	xnhandle_t lastownerh, newownerh;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

	lastownerh = xnthread_handle(xnpod_current_thread());

	if (use_fastlock &&
	    likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
		return NULL;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_release, "synch %p", synch);

	holder = getpq(&synch->pendq);
	if (holder) {
		newowner = link2thread(holder, plink);
		newowner->wchan = NULL;
		newowner->wwake = synch;
		lastowner = synch->owner;
		synch->owner = newowner;
		xnthread_set_info(newowner, XNWAKEN);
		xnpod_resume_thread(newowner, XNPEND);

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			xnsynch_clear_boost(synch, lastowner);

		newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner),
						     xnsynch_pended_p(synch));
	} else {
		newowner = NULL;
		synch->owner = NULL;
		newownerh = XN_NO_HANDLE;
	}
	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
		xnarch_atomic_set(lockp, newownerh);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return newowner;
}
Ejemplo n.º 7
0
static void
xntimer_adjust_periodic(xntimer_t *timer, xnsticks_t delta)
{
	xnticks_t now = timer->base->jiffies;
	xnsticks_t diff;
	xntlholder_date(&timer->plink) -= delta;
	diff = now - xntlholder_date(&timer->plink);

	if (testbits(timer->status, XNTIMER_PERIODIC)) {
		xnticks_t period = xntimer_interval(timer);
		xnticks_t mod;

		timer->pexpect -= delta;

		if ((xnsticks_t) (diff - period) >= 0) {
			/* timer should tick several times before now, instead
			 of calling timer->handler several times, we change
			 the timer date without changing its pexpect, so that
			 timer we can call timer->handler only once and the lost
			 ticks will be counted as overruns. */
			mod = xnarch_mod64(diff, period);
			xntimerh_date(&timer->aplink) += diff - mod;
		} else if (delta < 0
			   && testbits(timer->status, XNTIMER_FIRED)
			   && (xnsticks_t) (diff + period) <= 0) {
			/* timer is periodic and NOT waiting for its first shot,
			   so we make it tick sooner than its original date in
			   order to avoid the case where by adjusting time to a
			   sooner date, real-time periodic timers do not tick
			   until the original date has passed. */
			mod = xnarch_mod64(-diff, period);
			xntimerh_date(&timer->aplink) += diff + mod;
			timer->pexpect += diff + mod;
		}
	}

	if (diff >= 0) {
		xnstat_counter_inc(&timer->fired);
		timer->handler(timer);

		if (!xntimer_reload_p(timer))
			return;

		xntlholder_date(&timer->plink) += timer->interval;
	}

	xntimer_enqueue_periodic(timer);
}
Ejemplo n.º 8
0
int xnpipe_connect(int minor, struct xnpipe_operations *ops, void *xstate)
{
	struct xnpipe_state *state;
	int need_sched = 0, ret;
	spl_t s;

	minor = xnpipe_minor_alloc(minor);
	if (minor < 0)
		return minor;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	ret = xnpipe_set_ops(state, ops);
	if (ret) {
		xnlock_put_irqrestore(&nklock, s);
		return ret;
	}

	__setbits(state->status, XNPIPE_KERN_CONN);
	xnsynch_init(&state->synchbase, XNSYNCH_FIFO, NULL);
	state->xstate = xstate;
	state->ionrd = 0;

	if (testbits(state->status, XNPIPE_USER_CONN)) {
		if (testbits(state->status, XNPIPE_USER_WREAD)) {
			/*
			 * Wake up the regular Linux task waiting for
			 * the kernel side to connect (xnpipe_open).
			 */
			__setbits(state->status, XNPIPE_USER_WREAD_READY);
			need_sched = 1;
		}

		if (state->asyncq) {	/* Schedule asynch sig. */
			__setbits(state->status, XNPIPE_USER_SIGIO);
			need_sched = 1;
		}
	}

	if (need_sched)
		xnpipe_schedule_request();

	xnlock_put_irqrestore(&nklock, s);

	return minor;
}
Ejemplo n.º 9
0
ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, ssize_t size)
{
	struct xnpipe_state *state;
	spl_t s;

	if (minor < 0 || minor >= XNPIPE_NDEVS)
		return -ENODEV;

	if (size < 0)
		return -EINVAL;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EBADF;
	}

	xnpipe_m_size(mh) += size;
	state->ionrd += size;

	xnlock_put_irqrestore(&nklock, s);

	return (ssize_t) size;
}
Ejemplo n.º 10
0
int xnsynch_flush(struct xnsynch *synch, xnflags_t reason)
{
	struct xnpholder *holder;
	int status;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_flush, "synch %p reason %lu",
		   synch, reason);

	status = emptypq_p(&synch->pendq) ? XNSYNCH_DONE : XNSYNCH_RESCHED;

	while ((holder = getpq(&synch->pendq)) != NULL) {
		struct xnthread *sleeper = link2thread(holder, plink);
		xnthread_set_info(sleeper, reason);
		sleeper->wchan = NULL;
		xnpod_resume_thread(sleeper, XNPEND);
	}

	if (testbits(synch->status, XNSYNCH_CLAIMED)) {
		xnsynch_clear_boost(synch, synch->owner);
		status = XNSYNCH_RESCHED;
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return status;
}
Ejemplo n.º 11
0
xnpholder_t *xnsynch_wakeup_this_sleeper(xnsynch_t *synch, xnpholder_t *holder)
{
	xnthread_t *thread, *lastowner;
	xnpholder_t *nholder;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	lastowner = synch->owner;
	nholder = poppq(&synch->pendq, holder);

	thread = link2thread(holder, plink);
	thread->wchan = NULL;
	thread->wwake = synch;
	synch->owner = thread;
	xnthread_set_info(thread, XNWAKEN);
	trace_mark(xn_nucleus_synch_wakeup_all,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);
	xnpod_resume_thread(thread, XNPEND);

	if (testbits(synch->status, XNSYNCH_CLAIMED))
		xnsynch_clear_boost(synch, lastowner);

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return nholder;
}
Ejemplo n.º 12
0
struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch)
{
	struct xnthread *thread = NULL;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));

	xnlock_get_irqsave(&nklock, s);

	holder = getpq(&synch->pendq);
	if (holder) {
		thread = link2thread(holder, plink);
		thread->wchan = NULL;
		trace_mark(xn_nucleus, synch_wakeup_one,
			   "thread %p thread_name %s synch %p",
			   thread, xnthread_name(thread), synch);
		xnpod_resume_thread(thread, XNPEND);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return thread;
}
Ejemplo n.º 13
0
void xntslave_adjust(xntslave_t *slave, xnsticks_t delta)
{
	int nr_cpus, cpu, n;
	xnqueue_t adjq;

	initq(&adjq);
	for (cpu = 0, nr_cpus = xnarch_num_online_cpus(); cpu < nr_cpus; cpu++) {
		struct percpu_cascade *pc = &slave->cascade[cpu];
		xnholder_t *adjholder;

		for (n = 0; n < XNTIMER_WHEELSIZE; n++) {
			xnqueue_t *q = &pc->wheel[n];
			xntlholder_t *holder;

			for (holder = xntlist_head(q); holder;
			     holder = xntlist_next(q, holder)) {
				xntimer_t *timer = plink2timer(holder);
				if (testbits(timer->status, XNTIMER_REALTIME)) {
					inith(&timer->adjlink);
					appendq(&adjq, &timer->adjlink);
				}
			}
		}

		while ((adjholder = getq(&adjq))) {
			xntimer_t *timer = adjlink2timer(adjholder);
			xntimer_dequeue_periodic(timer);
			xntimer_adjust_periodic(timer, delta);
		}
	}
}
Ejemplo n.º 14
0
void xntimer_adjust_all_aperiodic(xnsticks_t delta)
{
	unsigned cpu, nr_cpus;
	xnqueue_t adjq;

	initq(&adjq);
	delta = xnarch_ns_to_tsc(delta);
	for (cpu = 0, nr_cpus = xnarch_num_online_cpus(); cpu < nr_cpus; cpu++) {
		xnsched_t *sched = xnpod_sched_slot(cpu);
		xntimerq_t *q = &sched->timerqueue;
		xnholder_t *adjholder;
		xntimerh_t *holder;
		xntimerq_it_t it;

		for (holder = xntimerq_it_begin(q, &it); holder;
		     holder = xntimerq_it_next(q, &it, holder)) {
			xntimer_t *timer = aplink2timer(holder);
			if (testbits(timer->status, XNTIMER_REALTIME)) {
				inith(&timer->adjlink);
				appendq(&adjq, &timer->adjlink);
			}
		}

		while ((adjholder = getq(&adjq))) {
			xntimer_t *timer = adjlink2timer(adjholder);
			xntimer_dequeue_aperiodic(timer);
			xntimer_adjust_aperiodic(timer, delta);
		}

		if (sched != xnpod_current_sched())
			xntimer_next_remote_shot(sched);
		else
			xntimer_next_local_shot(sched);
	}
}
Ejemplo n.º 15
0
/***
 *  rt_packet_recvmsg
 */
ssize_t rt_packet_recvmsg(struct rtdm_dev_context *sockctx,
                          rtdm_user_info_t *user_info, struct msghdr *msg,
                          int msg_flags)
{
    struct rtsocket     *sock = (struct rtsocket *)&sockctx->dev_private;
    size_t              len   = rt_iovec_len(msg->msg_iov, msg->msg_iovlen);
    size_t              copy_len;
    size_t              real_len;
    struct rtskb        *skb;
    struct ethhdr       *eth;
    struct sockaddr_ll  *sll;
    int                 ret;
    nanosecs_t          timeout = sock->timeout;


    /* non-blocking receive? */
    if (testbits(msg_flags, MSG_DONTWAIT))
        timeout = -1;

    ret = rtdm_sem_timeddown(&sock->pending_sem, timeout, NULL);
    if (unlikely(ret < 0)) {
        if ((ret != -EWOULDBLOCK) && (ret != -ETIMEDOUT))
            ret = -EBADF;   /* socket has been closed */
        return ret;
    }

    skb = rtskb_dequeue_chain(&sock->incoming);
    RTNET_ASSERT(skb != NULL, return -EFAULT;);
Ejemplo n.º 16
0
void xnsynch_requeue_sleeper(struct xnthread *thread)
{
	struct xnsynch *synch = thread->wchan;
	struct xnthread *owner;

	if (!testbits(synch->status, XNSYNCH_PRIO))
		return;

	removepq(&synch->pendq, &thread->plink);
	insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));
	owner = synch->owner;

	if (owner != NULL && w_cprio(thread) > w_cprio(owner)) {
		/*
		 * The new (weighted) priority of the sleeping thread
		 * is higher than the priority of the current owner of
		 * the resource: we need to update the PI state.
		 */
		if (testbits(synch->status, XNSYNCH_CLAIMED)) {
			/*
			 * The resource is already claimed, just
			 * reorder the claim queue.
			 */
			removepq(&owner->claimq, &synch->link);
			insertpqf(&owner->claimq, &synch->link,
				  w_cprio(thread));
		} else {
			/*
			 * The resource was NOT claimed, claim it now
			 * and boost the owner.
			 */
			__setbits(synch->status, XNSYNCH_CLAIMED);
			insertpqf(&owner->claimq, &synch->link,
				  w_cprio(thread));
			if (!xnthread_test_state(owner, XNBOOST)) {
				owner->bprio = owner->cprio;
				xnthread_set_state(owner, XNBOOST);
			}
		}
		/*
		 * Renice the owner thread, progressing in the PI
		 * chain as needed.
		 */
		xnsynch_renice_thread(owner, thread);
	}
}
Ejemplo n.º 17
0
static inline void xnpipe_dequeue_wait(struct xnpipe_state *state, int mask)
{
	if (testbits(state->status, mask))
		if (--state->wcount == 0) {
			removeq(&xnpipe_sleepq, &state->slink);
			__clrbits(state->status, mask);
		}
}
Ejemplo n.º 18
0
static inline void xnpipe_dequeue_all(struct xnpipe_state *state, int mask)
{
	if (testbits(state->status, mask)) {
		if (state->wcount) {
			state->wcount = 0;
			removeq(&xnpipe_sleepq, &state->slink);
			__clrbits(state->status, mask);
		}
	}
}
Ejemplo n.º 19
0
int xntimer_start_aperiodic(xntimer_t *timer,
			    xnticks_t value, xnticks_t interval,
			    xntmode_t mode)
{
	xnticks_t date, now;

	trace_mark(xn_nucleus, timer_start,
		   "timer %p base %s value %Lu interval %Lu mode %u",
		   timer, xntimer_base(timer)->name, value, interval, mode);

	if (!testbits(timer->status, XNTIMER_DEQUEUED))
		xntimer_dequeue_aperiodic(timer);

	now = xnarch_get_cpu_tsc();

	__clrbits(timer->status,
		  XNTIMER_REALTIME | XNTIMER_FIRED | XNTIMER_PERIODIC);
	switch (mode) {
	case XN_RELATIVE:
		if ((xnsticks_t)value < 0)
			return -ETIMEDOUT;
		date = xnarch_ns_to_tsc(value) + now;
		break;
	case XN_REALTIME:
		__setbits(timer->status, XNTIMER_REALTIME);
		value -= nktbase.wallclock_offset;
		/* fall through */
	default: /* XN_ABSOLUTE || XN_REALTIME */
		date = xnarch_ns_to_tsc(value);
		if ((xnsticks_t)(date - now) <= 0)
			return -ETIMEDOUT;
		break;
	}

	xntimerh_date(&timer->aplink) = date;

	timer->interval = XN_INFINITE;
	if (interval != XN_INFINITE) {
		timer->interval = xnarch_ns_to_tsc(interval);
		timer->pexpect = date;
		__setbits(timer->status, XNTIMER_PERIODIC);
	}

	xntimer_enqueue_aperiodic(timer);
	if (xntimer_heading_p(timer)) {
		if (xntimer_sched(timer) != xnpod_current_sched())
			xntimer_next_remote_shot(xntimer_sched(timer));
		else
			xntimer_next_local_shot(xntimer_sched(timer));
	}

	return 0;
}
Ejemplo n.º 20
0
/**
 * Migrate a timer.
 *
 * This call migrates a timer to another cpu. In order to avoid pathological
 * cases, it must be called from the CPU to which @a timer is currently
 * attached.
 *
 * @param timer The address of the timer object to be migrated.
 *
 * @param sched The address of the destination CPU xnsched_t structure.
 *
 * @retval -EINVAL if @a timer is queued on another CPU than current ;
 * @retval 0 otherwise.
 *
 */
int xntimer_migrate(xntimer_t *timer, xnsched_t *sched)
{
	int err = 0;
	int queued;
	spl_t s;

	trace_mark(xn_nucleus, timer_migrate, "timer %p cpu %d",
		   timer, (int)xnsched_cpu(sched));

	xnlock_get_irqsave(&nklock, s);

	if (sched == timer->sched)
		goto unlock_and_exit;

	queued = !testbits(timer->status, XNTIMER_DEQUEUED);

	/* Avoid the pathological case where the timer interrupt did not occur yet
	   for the current date on the timer source CPU, whereas we are trying to
	   migrate it to a CPU where the timer interrupt already occured. This would
	   not be a problem in aperiodic mode. */

	if (queued) {

		if (timer->sched != xnpod_current_sched()) {
			err = -EINVAL;
			goto unlock_and_exit;
		}

#ifdef CONFIG_XENO_OPT_TIMING_PERIODIC
		timer->base->ops->stop_timer(timer);
#else /* !CONFIG_XENO_OPT_TIMING_PERIODIC */
		xntimer_stop_aperiodic(timer);
#endif /* !CONFIG_XENO_OPT_TIMING_PERIODIC */
	}

	timer->sched = sched;

	if (queued)
#ifdef CONFIG_XENO_OPT_TIMING_PERIODIC
		timer->base->ops->move_timer(timer);
#else /* !CONFIG_XENO_OPT_TIMING_PERIODIC */
		xntimer_move_aperiodic(timer);
#endif /* !CONFIG_XENO_OPT_TIMING_PERIODIC */

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 21
0
void xnintr_clock_handler(void)
{
	xnstat_exectime_t *prev;
	struct xnsched *sched;
	unsigned cpu;

	cpu = xnarch_current_cpu();

	if (!cpumask_test_cpu(cpu, &xnarch_supported_cpus)) {
		xnarch_relay_tick();
		return;
	}

	sched = xnpod_sched_slot(cpu);

	prev = xnstat_exectime_switch(sched,
		&nkclock.stat[xnsched_cpu(sched)].account);
	xnstat_counter_inc(&nkclock.stat[xnsched_cpu(sched)].hits);

	trace_mark(xn_nucleus, irq_enter, "irq %u", XNARCH_TIMER_IRQ);
	trace_mark(xn_nucleus, tbase_tick, "base %s", nktbase.name);

	++sched->inesting;
	__setbits(sched->lflags, XNINIRQ);

	xnlock_get(&nklock);
	xntimer_tick_aperiodic();
	xnlock_put(&nklock);

	xnstat_exectime_switch(sched, prev);

	if (--sched->inesting == 0) {
		__clrbits(sched->lflags, XNINIRQ);
		xnpod_schedule();
		sched = xnpod_current_sched();
	}
	/*
	 * If the clock interrupt preempted a real-time thread, any
	 * transition to the root thread has already triggered a host
	 * tick propagation from xnpod_schedule(), so at this point,
	 * we only need to propagate the host tick in case the
	 * interrupt preempted the root thread.
	 */
	if (testbits(sched->lflags, XNHTICK) &&
	    xnthread_test_state(sched->curr, XNROOT))
		xnintr_host_tick(sched);

	trace_mark(xn_nucleus, irq_exit, "irq %u", XNARCH_TIMER_IRQ);
}
Ejemplo n.º 22
0
int xnpipe_flush(int minor, int mode)
{
	struct xnpipe_state *state;
	int msgcount;
	spl_t s;

	if (minor < 0 || minor >= XNPIPE_NDEVS)
		return -ENODEV;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EBADF;
	}

	msgcount = countq(&state->outq) + countq(&state->inq);

	if (mode & XNPIPE_OFLUSH)
		state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s);

	if (mode & XNPIPE_IFLUSH)
		xnpipe_flushq(state, inq, free_ibuf, s);

	if (testbits(state->status, XNPIPE_USER_WSYNC) &&
	    msgcount > countq(&state->outq) + countq(&state->inq)) {
		__setbits(state->status, XNPIPE_USER_WSYNC_READY);
		xnpipe_schedule_request();
	}

	xnlock_put_irqrestore(&nklock, s);

	return 0;
}
Ejemplo n.º 23
0
static inline int xntimer_heading_p(struct xntimer *timer)
{
	struct xnsched *sched = timer->sched;
	xntimerq_it_t it;
	xntimerh_t *h;

	h = xntimerq_it_begin(&sched->timerqueue, &it);
	if (h == &timer->aplink)
		return 1;

	if (testbits(sched->status, XNHDEFER)) {
		h = xntimerq_it_next(&sched->timerqueue, &it, h);
		if (h == &timer->aplink)
			return 1;
	}

	return 0;
}
Ejemplo n.º 24
0
void xnsynch_forget_sleeper(struct xnthread *thread)
{
	struct xnsynch *synch = thread->wchan;
	struct xnthread *owner, *target;
	struct xnpholder *h;

	trace_mark(xn_nucleus, synch_forget,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);

	xnthread_clear_state(thread, XNPEND);
	thread->wchan = NULL;
	removepq(&synch->pendq, &thread->plink);

	if (testbits(synch->status, XNSYNCH_CLAIMED)) {
		/* Find the highest priority needed to enforce the PIP. */
		owner = synch->owner;

		if (emptypq_p(&synch->pendq))
			/* No more sleepers: clear the boost. */
			xnsynch_clear_boost(synch, owner);
		else {
			target = link2thread(getheadpq(&synch->pendq), plink);
			h = getheadpq(&owner->claimq);
			if (w_cprio(target) != h->prio) {
				/*
				 * Reorder the claim queue, and lower
				 * the priority to the required
				 * minimum needed to prevent priority
				 * inversion.
				 */
				removepq(&owner->claimq, &synch->link);
				insertpqf(&owner->claimq, &synch->link,
					  w_cprio(target));

				h = getheadpq(&owner->claimq);
				if (h->prio < w_cprio(owner))
					xnsynch_renice_thread(owner, target);
			}
		}
	}

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));
}
Ejemplo n.º 25
0
static int timer_vfile_show(struct xnvfile_regular_iterator *it, void *data)
{
	const char *tm_status, *wd_status = "";

	if (xnpod_active_p() && xntbase_enabled_p(&nktbase)) {
		tm_status = testbits(nktbase.status, XNTBLCK) ? "locked" : "on";
#ifdef CONFIG_XENO_OPT_WATCHDOG
		wd_status = "+watchdog";
#endif /* CONFIG_XENO_OPT_WATCHDOG */
	} else
		tm_status = "off";

	xnvfile_printf(it,
		       "status=%s%s:setup=%Lu:clock=%Lu:timerdev=%s:clockdev=%s\n",
		       tm_status, wd_status, xnarch_tsc_to_ns(nktimerlat),
		       xntbase_get_rawclock(&nktbase),
		       XNARCH_TIMER_DEVICE, XNARCH_CLOCK_DEVICE);
	return 0;
}
Ejemplo n.º 26
0
static int xnpipe_release(struct inode *inode, struct file *file)
{
	struct xnpipe_state *state = file->private_data;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	xnpipe_dequeue_all(state, XNPIPE_USER_WREAD);
	xnpipe_dequeue_all(state, XNPIPE_USER_WSYNC);

	if (testbits(state->status, XNPIPE_KERN_CONN)) {
		/* Unblock waiters. */
		if (xnsynch_nsleepers(&state->synchbase) > 0) {
			xnsynch_flush(&state->synchbase, XNRMID);
			xnpod_schedule();
		}
	}

	if (state->ops.input)
		state->ops.input(NULL, -EPIPE, state->xstate);

	if (state->asyncq) {	/* Clear the async queue */
		removeq(&xnpipe_asyncq, &state->alink);
		__clrbits(state->status, XNPIPE_USER_SIGIO);
		xnlock_put_irqrestore(&nklock, s);
		fasync_helper(-1, file, 0, &state->asyncq);
		xnlock_get_irqsave(&nklock, s);
	}

	xnpipe_cleanup_user_conn(state, s);
	/*
	 * The extra state may not be available from now on, if
	 * xnpipe_disconnect() entered lingering close before we got
	 * there; so calling xnpipe_cleanup_user_conn() should be the
	 * last thing we do.
	 */
	xnlock_put_irqrestore(&nklock, s);

	return 0;
}
Ejemplo n.º 27
0
static int xntimer_start_periodic(xntimer_t *timer,
				  xnticks_t value, xnticks_t interval,
				  xntmode_t mode)
{
	trace_mark(xn_nucleus, timer_start,
		   "timer %p base %s value %Lu interval %Lu mode %u", timer,
		   xntimer_base(timer)->name, value, interval, mode);

	if (!testbits(timer->status, XNTIMER_DEQUEUED))
		xntimer_dequeue_periodic(timer);

	__clrbits(timer->status,
		  XNTIMER_REALTIME | XNTIMER_FIRED | XNTIMER_PERIODIC);
	switch (mode) {
	case XN_RELATIVE:
		if ((xnsticks_t)value < 0)
			return -ETIMEDOUT;
		value += timer->base->jiffies;
		break;
	case XN_REALTIME:
		__setbits(timer->status, XNTIMER_REALTIME);
		value -= timer->base->wallclock_offset;
		/* fall through */
	default: /* XN_ABSOLUTE || XN_REALTIME */
		if ((xnsticks_t)(value - timer->base->jiffies) <= 0)
			return -ETIMEDOUT;
		break;
	}

	xntlholder_date(&timer->plink) = value;
	timer->interval = interval;
	if (interval != XN_INFINITE) {
		__setbits(timer->status, XNTIMER_PERIODIC);
		timer->pexpect = value;
	}

	xntimer_enqueue_periodic(timer);

	return 0;
}
Ejemplo n.º 28
0
void xnsynch_forget_sleeper(xnthread_t *thread)
{
	xnsynch_t *synch = thread->wchan;

	trace_mark(xn_nucleus_synch_forget,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);

	xnthread_clear_state(thread, XNPEND);
	thread->wchan = NULL;
	removepq(&synch->pendq, &thread->plink);

	if (testbits(synch->status, XNSYNCH_CLAIMED)) {
		/* Find the highest priority needed to enforce the PIP. */
		xnthread_t *owner = synch->owner;
		int rprio;

		if (emptypq_p(&synch->pendq))
			/* No more sleepers: clear the boost. */
			xnsynch_clear_boost(synch, owner);
		else if (getheadpq(&synch->pendq)->prio !=
			 getheadpq(&owner->claimq)->prio) {
			/* Reorder the claim queue, and lower the priority to the
			   required minimum needed to prevent priority
			   inversion. */
			removepq(&owner->claimq, &synch->link);

			insertpqf(&owner->claimq,
				  &synch->link, getheadpq(&synch->pendq)->prio);

			rprio = getheadpq(&owner->claimq)->prio;

			if (rprio < owner->cprio)
				xnsynch_renice_thread(owner, rprio);
		}
	}

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));
}
Ejemplo n.º 29
0
static unsigned xnpipe_poll(struct file *file, poll_table *pt)
{
	struct xnpipe_state *state = file->private_data;
	unsigned r_mask = 0, w_mask = 0;
	spl_t s;

	poll_wait(file, &state->readq, pt);

	xnlock_get_irqsave(&nklock, s);

	if (testbits(state->status, XNPIPE_KERN_CONN))
		w_mask |= (POLLOUT | POLLWRNORM);
	else
		r_mask |= POLLHUP;

	if (!emptyq_p(&state->outq))
		r_mask |= (POLLIN | POLLRDNORM);
	else
		/*
		 * Procs which have issued a timed out poll req will
		 * remain linked to the sleepers queue, and will be
		 * silently unlinked the next time the Xenomai side
		 * kicks xnpipe_wakeup_proc.
		 */
		xnpipe_enqueue_wait(state, XNPIPE_USER_WREAD);

	xnlock_put_irqrestore(&nklock, s);

	/*
	 * A descriptor is always ready for writing with the current
	 * implementation, so there is no need to have/handle the
	 * writeq queue so far.
	 */

	return r_mask | w_mask;
}
Ejemplo n.º 30
0
static inline int xnpipe_minor_alloc(int minor)
{
	spl_t s;

	if ((minor < 0 && minor != XNPIPE_MINOR_AUTO) || minor >= XNPIPE_NDEVS)
		return -ENODEV;

	xnlock_get_irqsave(&nklock, s);

	if (minor == XNPIPE_MINOR_AUTO)
		minor = find_first_zero_bit(xnpipe_bitmap, XNPIPE_NDEVS);

	if (minor == XNPIPE_NDEVS ||
	    testbits(xnpipe_bitmap[minor / BITS_PER_LONG],
		     1UL << (minor % BITS_PER_LONG)))
		minor = -EBUSY;
	else
		__setbits(xnpipe_bitmap[minor / BITS_PER_LONG],
			  1UL << (minor % BITS_PER_LONG));

	xnlock_put_irqrestore(&nklock, s);

	return minor;
}