Ejemplo n.º 1
0
void xntbase_start(xntbase_t *base)
{
	xnticks_t start_date;
	spl_t s;

	if (base == &nktbase || xntbase_enabled_p(base))
		return;

	trace_mark(xn_nucleus, tbase_start, "base %s", base->name);

	xnlock_get_irqsave(&nklock, s);

	start_date = xnarch_get_cpu_time();

	/* Only synchronise non-isolated time bases on the master base. */
	if (!xntbase_isolated_p(base)) {
		base->wallclock_offset = xntbase_ns2ticks(base,
			start_date + nktbase.wallclock_offset);
		__setbits(base->status, XNTBSET);
	}

	start_date += base->tickvalue;
	__setbits(base->status, XNTBRUN);

	xnlock_put_irqrestore(&nklock, s);

	xntslave_start(base2slave(base), start_date, base->tickvalue);
}
Ejemplo n.º 2
0
ssize_t xnpipe_send(int minor, struct xnpipe_mh *mh, size_t size, int flags)
{
	struct xnpipe_state *state;
	int need_sched = 0;
	spl_t s;

	if (minor < 0 || minor >= XNPIPE_NDEVS)
		return -ENODEV;

	if (size <= sizeof(*mh))
		return -EINVAL;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EBADF;
	}

	inith(xnpipe_m_link(mh));
	xnpipe_m_size(mh) = size - sizeof(*mh);
	xnpipe_m_rdoff(mh) = 0;
	state->ionrd += xnpipe_m_size(mh);

	if (flags & XNPIPE_URGENT)
		prependq(&state->outq, xnpipe_m_link(mh));
	else
		appendq(&state->outq, xnpipe_m_link(mh));

	if (!testbits(state->status, XNPIPE_USER_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return (ssize_t) size;
	}

	if (testbits(state->status, XNPIPE_USER_WREAD)) {
		/*
		 * Wake up the regular Linux task waiting for input
		 * from the Xenomai side.
		 */
		__setbits(state->status, XNPIPE_USER_WREAD_READY);
		need_sched = 1;
	}

	if (state->asyncq) {	/* Schedule asynch sig. */
		__setbits(state->status, XNPIPE_USER_SIGIO);
		need_sched = 1;
	}

	if (need_sched)
		xnpipe_schedule_request();

	xnlock_put_irqrestore(&nklock, s);

	return (ssize_t) size;
}
Ejemplo n.º 3
0
int xntimer_start_aperiodic(xntimer_t *timer,
			    xnticks_t value, xnticks_t interval,
			    xntmode_t mode)
{
	xnticks_t date, now;

	trace_mark(xn_nucleus, timer_start,
		   "timer %p base %s value %Lu interval %Lu mode %u",
		   timer, xntimer_base(timer)->name, value, interval, mode);

	if (!testbits(timer->status, XNTIMER_DEQUEUED))
		xntimer_dequeue_aperiodic(timer);

	now = xnarch_get_cpu_tsc();

	__clrbits(timer->status,
		  XNTIMER_REALTIME | XNTIMER_FIRED | XNTIMER_PERIODIC);
	switch (mode) {
	case XN_RELATIVE:
		if ((xnsticks_t)value < 0)
			return -ETIMEDOUT;
		date = xnarch_ns_to_tsc(value) + now;
		break;
	case XN_REALTIME:
		__setbits(timer->status, XNTIMER_REALTIME);
		value -= nktbase.wallclock_offset;
		/* fall through */
	default: /* XN_ABSOLUTE || XN_REALTIME */
		date = xnarch_ns_to_tsc(value);
		if ((xnsticks_t)(date - now) <= 0)
			return -ETIMEDOUT;
		break;
	}

	xntimerh_date(&timer->aplink) = date;

	timer->interval = XN_INFINITE;
	if (interval != XN_INFINITE) {
		timer->interval = xnarch_ns_to_tsc(interval);
		timer->pexpect = date;
		__setbits(timer->status, XNTIMER_PERIODIC);
	}

	xntimer_enqueue_aperiodic(timer);
	if (xntimer_heading_p(timer)) {
		if (xntimer_sched(timer) != xnpod_current_sched())
			xntimer_next_remote_shot(xntimer_sched(timer));
		else
			xntimer_next_local_shot(xntimer_sched(timer));
	}

	return 0;
}
Ejemplo n.º 4
0
int xnpipe_connect(int minor, struct xnpipe_operations *ops, void *xstate)
{
	struct xnpipe_state *state;
	int need_sched = 0, ret;
	spl_t s;

	minor = xnpipe_minor_alloc(minor);
	if (minor < 0)
		return minor;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	ret = xnpipe_set_ops(state, ops);
	if (ret) {
		xnlock_put_irqrestore(&nklock, s);
		return ret;
	}

	__setbits(state->status, XNPIPE_KERN_CONN);
	xnsynch_init(&state->synchbase, XNSYNCH_FIFO, NULL);
	state->xstate = xstate;
	state->ionrd = 0;

	if (testbits(state->status, XNPIPE_USER_CONN)) {
		if (testbits(state->status, XNPIPE_USER_WREAD)) {
			/*
			 * Wake up the regular Linux task waiting for
			 * the kernel side to connect (xnpipe_open).
			 */
			__setbits(state->status, XNPIPE_USER_WREAD_READY);
			need_sched = 1;
		}

		if (state->asyncq) {	/* Schedule asynch sig. */
			__setbits(state->status, XNPIPE_USER_SIGIO);
			need_sched = 1;
		}
	}

	if (need_sched)
		xnpipe_schedule_request();

	xnlock_put_irqrestore(&nklock, s);

	return minor;
}
Ejemplo n.º 5
0
int xnintr_attach(xnintr_t *intr, void *cookie)
{
	int ret;

	trace_mark(xn_nucleus, irq_attach, "irq %u name %s",
		   intr->irq, intr->name);

	intr->cookie = cookie;
	memset(&intr->stat, 0, sizeof(intr->stat));

#ifdef CONFIG_SMP
	xnarch_set_irq_affinity(intr->irq, nkaffinity);
#endif /* CONFIG_SMP */

	down(&intrlock);

	if (__testbits(intr->flags, XN_ISR_ATTACHED)) {
		ret = -EBUSY;
		goto out;
	}

	ret = xnintr_irq_attach(intr);
	if (ret)
		goto out;

	__setbits(intr->flags, XN_ISR_ATTACHED);
	xnintr_stat_counter_inc();
out:
	up(&intrlock);

	return ret;
}
Ejemplo n.º 6
0
_WCRTLINK CHAR_TYPE *__F_NAME(strpbrk,wcspbrk) ( const CHAR_TYPE *str, const CHAR_TYPE *charset )
{
#if defined(__WIDECHAR__)

    const CHAR_TYPE     *p1;
    const CHAR_TYPE     *p2;
    CHAR_TYPE           tc1;
    CHAR_TYPE           tc2;
    size_t              len;

    len = 0;
    for( p1 = str; tc1 = *p1; p1++, len++ ) {
        for( p2 = charset; tc2 = *p2; p2++ ) {
            if( tc1 == tc2 )
                break;
        }
        if( tc2 != NULLCHAR )
            return( (CHAR_TYPE *)p1 );
    }
    return( NULL );

#else
    char            tc;
    unsigned char   vector[ CHARVECTOR_SIZE ];

    __setbits( vector, charset );
    for( ; tc = *str; ++str ) {
        /* quit when we find any char in charset */
        if( GETCHARBIT( vector, tc ) != 0 )
             return( (char *)str );
    }
    return( NULL );
#endif
}
Ejemplo n.º 7
0
static inline void xnpipe_enqueue_wait(struct xnpipe_state *state, int mask)
{
	if (state->wcount != 0x7fffffff && state->wcount++ == 0)
		appendq(&xnpipe_sleepq, &state->slink);

	__setbits(state->status, mask);
}
Ejemplo n.º 8
0
_WCRTLINK size_t __F_NAME(strcspn,wcscspn) ( const CHAR_TYPE *str, const CHAR_TYPE *charset )
{
#if defined(__WIDECHAR__)

    const CHAR_TYPE             *p1;
    const CHAR_TYPE             *p2;
    CHAR_TYPE           tc1;
    CHAR_TYPE           tc2;
    size_t              len;

    len = 0;
    for( p1 = str; tc1 = *p1; p1++, len++ ) {
        for( p2 = charset; tc2 = *p2; p2++ ) {
            if( tc1 == tc2 ) break;
        }
        if( tc2 != NULLCHAR ) break;
    }
    return( len );

#else
    unsigned /*char*/ tc;
    unsigned char vector[32];
    size_t len;

    __setbits( vector, charset );
    len = 0;
    for( ; (tc = (unsigned char) *str); ++len, ++str ) {
        /* quit if we find any char in charset */
        if( ( vector[ tc >> 3 ] & _Bits[ tc & 0x07 ] ) != 0 )  break;
    }
    return( len );
#endif
}
Ejemplo n.º 9
0
void xnsynch_renice_sleeper(xnthread_t *thread)
{
	xnsynch_t *synch = thread->wchan;
	xnthread_t *owner;

	if (!testbits(synch->status, XNSYNCH_PRIO))
		return;

	removepq(&synch->pendq, &thread->plink);
	insertpqf(&synch->pendq, &thread->plink, thread->cprio);
	owner = synch->owner;

	if (owner != NULL && thread->cprio > owner->cprio) {
		/* The new priority of the sleeping thread is higher
		 * than the priority of the current owner of the
		 * resource: we need to update the PI state. */
		if (testbits(synch->status, XNSYNCH_CLAIMED)) {
			/* The resource is already claimed, just
			   reorder the claim queue. */
			removepq(&owner->claimq, &synch->link);
			insertpqf(&owner->claimq, &synch->link, thread->cprio);
		} else {
			/* The resource was NOT claimed, claim it now
			 * and boost the owner. */
			__setbits(synch->status, XNSYNCH_CLAIMED);
			insertpqf(&owner->claimq, &synch->link, thread->cprio);
			owner->bprio = owner->cprio;
			xnthread_set_state(owner, XNBOOST);
		}
		/* Renice the owner thread, progressing in the PI
		   chain as needed. */
		xnsynch_renice_thread(owner, thread->cprio);
	}
}
Ejemplo n.º 10
0
void xntslave_destroy(xntslave_t *slave)
{
	int nr_cpus, cpu, n;
	spl_t s;

	for (cpu = 0, nr_cpus = xnarch_num_online_cpus(); cpu < nr_cpus; cpu++) {

		struct percpu_cascade *pc = &slave->cascade[cpu];

		xnlock_get_irqsave(&nklock, s);
		xntimer_destroy(&pc->timer);
		xnlock_put_irqrestore(&nklock, s);

		for (n = 0; n < XNTIMER_WHEELSIZE; n++) {

			xnqueue_t *timerq = &pc->wheel[n];
			xntlholder_t *holder;

			while ((holder = xntlist_head(timerq)) != NULL) {
				__setbits(plink2timer(holder)->status, XNTIMER_DEQUEUED);
				xntlist_remove(timerq, holder);
			}
		}
	}
}
Ejemplo n.º 11
0
void xntimer_freeze(void)
{
	int nr_cpus, cpu;
	spl_t s;

	trace_mark(xn_nucleus, timer_freeze, MARK_NOARGS);

	xnlock_get_irqsave(&nklock, s);

	nr_cpus = xnarch_num_online_cpus();

	for (cpu = 0; cpu < nr_cpus; cpu++) {

		xntimerq_t *timerq = &xnpod_sched_slot(cpu)->timerqueue;
		xntimerh_t *holder;

		while ((holder = xntimerq_head(timerq)) != NULL) {
			__setbits(aplink2timer(holder)->status, XNTIMER_DEQUEUED);
			xntimerq_remove(timerq, holder);
		}

		/* Dequeuing all timers from the master time base
		 * freezes all slave time bases the same way, so there
		 * is no need to handle anything more here. */
	}

	xnlock_put_irqrestore(&nklock, s);
}
Ejemplo n.º 12
0
static inline void xntimer_dequeue_periodic(xntimer_t *timer)
{
	unsigned slot = (xntlholder_date(&timer->plink) & XNTIMER_WHEELMASK);
	unsigned cpu = xnsched_cpu(timer->sched);
	struct percpu_cascade *pc = &base2slave(timer->base)->cascade[cpu];
	xntlist_remove(&pc->wheel[slot], &timer->plink);
	__setbits(timer->status, XNTIMER_DEQUEUED);
}
Ejemplo n.º 13
0
void addmlq(struct xnsched_mlq *q,
	    struct xnpholder *h, int idx, int lifo)
{
	struct xnqueue *queue = &q->queue[idx];
	int hi = idx / BITS_PER_LONG;
	int lo = idx % BITS_PER_LONG;

	if (lifo)
		prependq(queue, &h->plink);
	else
		appendq(queue, &h->plink);

	h->prio = idx;
	q->elems++;
	__setbits(q->himap, 1UL << hi);
	__setbits(q->lomap[hi], 1UL << lo);
}
Ejemplo n.º 14
0
static int xnpipe_open(struct inode *inode, struct file *file)
{
	int minor, err = 0, sigpending;
	struct xnpipe_state *state;
	spl_t s;

	minor = MINOR(inode->i_rdev);

	if (minor >= XNPIPE_NDEVS)
		return -ENXIO;	/* TssTss... stop playing with mknod() ;o) */

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	/* Enforce exclusive open for the message queues. */
	if (testbits(state->status, XNPIPE_USER_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EBUSY;
	}

	__setbits(state->status, XNPIPE_USER_CONN);

	file->private_data = state;
	init_waitqueue_head(&state->readq);
	init_waitqueue_head(&state->syncq);
	state->wcount = 0;

	__clrbits(state->status,
		  XNPIPE_USER_ALL_WAIT | XNPIPE_USER_ALL_READY |
		  XNPIPE_USER_SIGIO);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		if (testbits(file->f_flags, O_NONBLOCK)) {
			xnpipe_cleanup_user_conn(state, s);
			xnlock_put_irqrestore(&nklock, s);
			return -EWOULDBLOCK;
		}

		sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
					 testbits(state->status,
						  XNPIPE_KERN_CONN));
		if (sigpending) {
			xnpipe_cleanup_user_conn(state, s);
			xnlock_put_irqrestore(&nklock, s);
			return -ERESTARTSYS;
		}
	}

	if (err)
		xnpipe_cleanup_user_conn(state, s);

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 15
0
int xnmap_remove(xnmap_t *map, int key)
{
	int ofkey = key - map->offset, hi, lo;
	spl_t s;

	if (ofkey < 0 || ofkey >= map->nkeys)
		return -ESRCH;

	hi = ofkey / BITS_PER_LONG;
	lo = ofkey % BITS_PER_LONG;
	xnlock_get_irqsave(&nklock, s);
	map->objarray[ofkey] = NULL;
	__setbits(map->himap, 1UL << hi);
	__setbits(map->lomap[hi], 1UL << lo);
	--map->ukeys;
	xnlock_put_irqrestore(&nklock, s);

	return 0;
}
Ejemplo n.º 16
0
static inline int xnintr_irq_attach(xnintr_t *intr)
{
	xnintr_irq_t *shirq = &xnirqs[intr->irq];
	xnintr_t *prev, **p = &shirq->handlers;
	int err;

	if (intr->irq >= RTHAL_NR_IRQS)
		return -EINVAL;

	if (__testbits(intr->flags, XN_ISR_ATTACHED))
		return -EPERM;

	if ((prev = *p) != NULL) {
		/* Check on whether the shared mode is allowed. */
		if (!(prev->flags & intr->flags & XN_ISR_SHARED) ||
		    (prev->iack != intr->iack)
		    || ((prev->flags & XN_ISR_EDGE) !=
			(intr->flags & XN_ISR_EDGE)))
			return -EBUSY;

		/* Get a position at the end of the list to insert the new element. */
		while (prev) {
			p = &prev->next;
			prev = *p;
		}
	} else {
		/* Initialize the corresponding interrupt channel */
		void (*handler) (unsigned, void *) = &xnintr_irq_handler;

		if (intr->flags & XN_ISR_SHARED) {
			if (intr->flags & XN_ISR_EDGE)
				handler = &xnintr_edge_shirq_handler;
			else
				handler = &xnintr_shirq_handler;

		}
		shirq->unhandled = 0;

		err = xnarch_hook_irq(intr->irq, handler, intr->iack, intr);
		if (err)
			return err;
	}

	__setbits(intr->flags, XN_ISR_ATTACHED);

	intr->next = NULL;

	/* Add the given interrupt object. No need to synchronise with the IRQ
	   handler, we are only extending the chain. */
	*p = intr;

	return 0;
}
Ejemplo n.º 17
0
static int xntimer_start_periodic(xntimer_t *timer,
				  xnticks_t value, xnticks_t interval,
				  xntmode_t mode)
{
	trace_mark(xn_nucleus, timer_start,
		   "timer %p base %s value %Lu interval %Lu mode %u", timer,
		   xntimer_base(timer)->name, value, interval, mode);

	if (!testbits(timer->status, XNTIMER_DEQUEUED))
		xntimer_dequeue_periodic(timer);

	__clrbits(timer->status,
		  XNTIMER_REALTIME | XNTIMER_FIRED | XNTIMER_PERIODIC);
	switch (mode) {
	case XN_RELATIVE:
		if ((xnsticks_t)value < 0)
			return -ETIMEDOUT;
		value += timer->base->jiffies;
		break;
	case XN_REALTIME:
		__setbits(timer->status, XNTIMER_REALTIME);
		value -= timer->base->wallclock_offset;
		/* fall through */
	default: /* XN_ABSOLUTE || XN_REALTIME */
		if ((xnsticks_t)(value - timer->base->jiffies) <= 0)
			return -ETIMEDOUT;
		break;
	}

	xntlholder_date(&timer->plink) = value;
	timer->interval = interval;
	if (interval != XN_INFINITE) {
		__setbits(timer->status, XNTIMER_PERIODIC);
		timer->pexpect = value;
	}

	xntimer_enqueue_periodic(timer);

	return 0;
}
Ejemplo n.º 18
0
void xntimer_destroy(xntimer_t *timer)
{
	spl_t s;
	
	xnlock_get_irqsave(&nklock, s);
	xntimer_stop(timer);
	__setbits(timer->status, XNTIMER_KILLED);
	timer->sched = NULL;
#ifdef CONFIG_XENO_OPT_STATS
	removeq(&xntimer_base(timer)->timerq, &timer->tblink);
	xntimer_base(timer)->timerq_rev++;
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */
	xnlock_put_irqrestore(&nklock, s);
}
Ejemplo n.º 19
0
void xnintr_clock_handler(void)
{
	xnstat_exectime_t *prev;
	struct xnsched *sched;
	unsigned cpu;

	cpu = xnarch_current_cpu();

	if (!cpumask_test_cpu(cpu, &xnarch_supported_cpus)) {
		xnarch_relay_tick();
		return;
	}

	sched = xnpod_sched_slot(cpu);

	prev = xnstat_exectime_switch(sched,
		&nkclock.stat[xnsched_cpu(sched)].account);
	xnstat_counter_inc(&nkclock.stat[xnsched_cpu(sched)].hits);

	trace_mark(xn_nucleus, irq_enter, "irq %u", XNARCH_TIMER_IRQ);
	trace_mark(xn_nucleus, tbase_tick, "base %s", nktbase.name);

	++sched->inesting;
	__setbits(sched->lflags, XNINIRQ);

	xnlock_get(&nklock);
	xntimer_tick_aperiodic();
	xnlock_put(&nklock);

	xnstat_exectime_switch(sched, prev);

	if (--sched->inesting == 0) {
		__clrbits(sched->lflags, XNINIRQ);
		xnpod_schedule();
		sched = xnpod_current_sched();
	}
	/*
	 * If the clock interrupt preempted a real-time thread, any
	 * transition to the root thread has already triggered a host
	 * tick propagation from xnpod_schedule(), so at this point,
	 * we only need to propagate the host tick in case the
	 * interrupt preempted the root thread.
	 */
	if (testbits(sched->lflags, XNHTICK) &&
	    xnthread_test_state(sched->curr, XNROOT))
		xnintr_host_tick(sched);

	trace_mark(xn_nucleus, irq_exit, "irq %u", XNARCH_TIMER_IRQ);
}
Ejemplo n.º 20
0
void xnsynch_requeue_sleeper(struct xnthread *thread)
{
	struct xnsynch *synch = thread->wchan;
	struct xnthread *owner;

	if (!testbits(synch->status, XNSYNCH_PRIO))
		return;

	removepq(&synch->pendq, &thread->plink);
	insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));
	owner = synch->owner;

	if (owner != NULL && w_cprio(thread) > w_cprio(owner)) {
		/*
		 * The new (weighted) priority of the sleeping thread
		 * is higher than the priority of the current owner of
		 * the resource: we need to update the PI state.
		 */
		if (testbits(synch->status, XNSYNCH_CLAIMED)) {
			/*
			 * The resource is already claimed, just
			 * reorder the claim queue.
			 */
			removepq(&owner->claimq, &synch->link);
			insertpqf(&owner->claimq, &synch->link,
				  w_cprio(thread));
		} else {
			/*
			 * The resource was NOT claimed, claim it now
			 * and boost the owner.
			 */
			__setbits(synch->status, XNSYNCH_CLAIMED);
			insertpqf(&owner->claimq, &synch->link,
				  w_cprio(thread));
			if (!xnthread_test_state(owner, XNBOOST)) {
				owner->bprio = owner->cprio;
				xnthread_set_state(owner, XNBOOST);
			}
		}
		/*
		 * Renice the owner thread, progressing in the PI
		 * chain as needed.
		 */
		xnsynch_renice_thread(owner, thread);
	}
}
Ejemplo n.º 21
0
void xntimer_tick_periodic_inner(xntslave_t *slave)
{
	xnsched_t *sched = xnpod_current_sched();
	xntbase_t *base = &slave->base;
	xntlholder_t *holder;
	xnqueue_t *timerq;
	xntimer_t *timer;

	/*
	 * Update the periodic clocks keeping the things strictly
	 * monotonous (this routine is run on every cpu, but only CPU
	 * XNTIMER_KEEPER_ID should do this).
	 */
	if (sched == xnpod_sched_slot(XNTIMER_KEEPER_ID))
		++base->jiffies;

	timerq = &slave->cascade[xnsched_cpu(sched)].wheel[base->jiffies & XNTIMER_WHEELMASK];

	while ((holder = xntlist_head(timerq)) != NULL) {
		timer = plink2timer(holder);

		if ((xnsticks_t) (xntlholder_date(&timer->plink)
				  - base->jiffies) > 0)
			break;

		trace_mark(xn_nucleus, timer_expire, "timer %p", timer);

		xntimer_dequeue_periodic(timer);
		xnstat_counter_inc(&timer->fired);
		timer->handler(timer);

		if (!xntimer_reload_p(timer))
			continue;

		__setbits(timer->status, XNTIMER_FIRED);
		xntlholder_date(&timer->plink) = base->jiffies + timer->interval;
		xntimer_enqueue_periodic(timer);
	}

	xnsched_tick(sched->curr, base); /* Do time-slicing if required. */
}
Ejemplo n.º 22
0
int xnintr_attach(xnintr_t *intr, void *cookie)
{
	int ret;
	spl_t s;

	trace_mark(xn_nucleus, irq_attach, "irq %u name %s",
		   intr->irq, intr->name);

	intr->cookie = cookie;
	memset(&intr->stat, 0, sizeof(intr->stat));

#ifdef CONFIG_SMP
	xnarch_set_irq_affinity(intr->irq, nkaffinity);
#endif /* CONFIG_SMP */

	xnlock_get_irqsave(&intrlock, s);

	if (intr->irq >= XNARCH_NR_IRQS) {
		ret = -EINVAL;
		goto out;
	}

	if (__testbits(intr->flags, XN_ISR_ATTACHED)) {
		ret = -EBUSY;
		goto out;
	}

	ret = xnintr_irq_attach(intr);
	if (ret)
		goto out;

	__setbits(intr->flags, XN_ISR_ATTACHED);
	xnintr_stat_counter_inc();
out:
	xnlock_put_irqrestore(&intrlock, s);

	return ret;
}
Ejemplo n.º 23
0
void xntbase_adjust_time(xntbase_t *base, xnsticks_t delta)
{
	xnticks_t now;

#ifdef CONFIG_XENO_OPT_TIMING_PERIODIC
	if (xntbase_isolated_p(base)) {
		/* Only update the specified isolated base. */
		base->wallclock_offset += delta;
		__setbits(base->status, XNTBSET);
		xntslave_adjust(base2slave(base), delta);

	} else {
		xnholder_t *holder;
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */
		/* Update all non-isolated bases in the system. */
		nktbase.wallclock_offset += xntbase_ticks2ns(base, delta);
		now = xnarch_get_cpu_time() + nktbase.wallclock_offset;
		xntimer_adjust_all_aperiodic(xntbase_ticks2ns(base, delta));

#ifdef CONFIG_XENO_OPT_TIMING_PERIODIC
		for (holder = getheadq(&nktimebaseq);
		     holder != NULL; holder = nextq(&nktimebaseq, holder)) {
			xntbase_t *tbase = link2tbase(holder);
			if (tbase == &nktbase || xntbase_isolated_p(tbase))
				continue;

			tbase->wallclock_offset =
				xntbase_ns2ticks(tbase, now) -
				xntbase_get_jiffies(tbase);
			xntslave_adjust(base2slave(tbase), delta);
		}
	}
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */

	trace_mark(xn_nucleus, tbase_adjust, "base %s delta %Lu",
		   base->name, delta);
}
Ejemplo n.º 24
0
static inline int xnpipe_minor_alloc(int minor)
{
	spl_t s;

	if ((minor < 0 && minor != XNPIPE_MINOR_AUTO) || minor >= XNPIPE_NDEVS)
		return -ENODEV;

	xnlock_get_irqsave(&nklock, s);

	if (minor == XNPIPE_MINOR_AUTO)
		minor = find_first_zero_bit(xnpipe_bitmap, XNPIPE_NDEVS);

	if (minor == XNPIPE_NDEVS ||
	    testbits(xnpipe_bitmap[minor / BITS_PER_LONG],
		     1UL << (minor % BITS_PER_LONG)))
		minor = -EBUSY;
	else
		__setbits(xnpipe_bitmap[minor / BITS_PER_LONG],
			  1UL << (minor % BITS_PER_LONG));

	xnlock_put_irqrestore(&nklock, s);

	return minor;
}
Ejemplo n.º 25
0
int xnpipe_flush(int minor, int mode)
{
	struct xnpipe_state *state;
	int msgcount;
	spl_t s;

	if (minor < 0 || minor >= XNPIPE_NDEVS)
		return -ENODEV;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EBADF;
	}

	msgcount = countq(&state->outq) + countq(&state->inq);

	if (mode & XNPIPE_OFLUSH)
		state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s);

	if (mode & XNPIPE_IFLUSH)
		xnpipe_flushq(state, inq, free_ibuf, s);

	if (testbits(state->status, XNPIPE_USER_WSYNC) &&
	    msgcount > countq(&state->outq) + countq(&state->inq)) {
		__setbits(state->status, XNPIPE_USER_WSYNC_READY);
		xnpipe_schedule_request();
	}

	xnlock_put_irqrestore(&nklock, s);

	return 0;
}
Ejemplo n.º 26
0
xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
			  xntmode_t timeout_mode)
{
	struct xnthread *thread = xnpod_current_thread(), *owner;
	xnhandle_t threadh = xnthread_handle(thread), fastlock, old;
	const int use_fastlock = xnsynch_fastlock_p(synch);
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

	trace_mark(xn_nucleus, synch_acquire, "synch %p", synch);

      redo:

	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);

		fastlock = xnarch_atomic_cmpxchg(lockp,
						 XN_NO_HANDLE, threadh);

		if (likely(fastlock == XN_NO_HANDLE)) {
			if (xnthread_test_state(thread, XNOTHER))
				xnthread_inc_rescnt(thread);
			xnthread_clear_info(thread,
					    XNRMID | XNTIMEO | XNBREAK);
			return 0;
		}

		xnlock_get_irqsave(&nklock, s);

		/* Set claimed bit.
		   In case it appears to be set already, re-read its state
		   under nklock so that we don't miss any change between the
		   lock-less read and here. But also try to avoid cmpxchg
		   where possible. Only if it appears not to be set, start
		   with cmpxchg directly. */
		if (xnsynch_fast_is_claimed(fastlock)) {
			old = xnarch_atomic_get(lockp);
			goto test_no_owner;
		}
		do {
			old = xnarch_atomic_cmpxchg(lockp, fastlock,
					xnsynch_fast_set_claimed(fastlock, 1));
			if (likely(old == fastlock))
				break;

		  test_no_owner:
			if (old == XN_NO_HANDLE) {
				/* Owner called xnsynch_release
				   (on another cpu) */
				xnlock_put_irqrestore(&nklock, s);
				goto redo;
			}
			fastlock = old;
		} while (!xnsynch_fast_is_claimed(fastlock));

		owner = xnthread_lookup(xnsynch_fast_mask_claimed(fastlock));

		if (!owner) {
			/* The handle is broken, therefore pretend that the synch
			   object was deleted to signal an error. */
			xnthread_set_info(thread, XNRMID);
			goto unlock_and_exit;
		}

		xnsynch_set_owner(synch, owner);
	} else {
		xnlock_get_irqsave(&nklock, s);

		owner = synch->owner;

		if (!owner) {
			synch->owner = thread;
			if (xnthread_test_state(thread, XNOTHER))
				xnthread_inc_rescnt(thread);
			xnthread_clear_info(thread,
					    XNRMID | XNTIMEO | XNBREAK);
			goto unlock_and_exit;
		}
	}

	xnsynch_detect_relaxed_owner(synch, thread);

	if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
		appendpq(&synch->pendq, &thread->plink);
	else if (w_cprio(thread) > w_cprio(owner)) {
		if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) {
			/* Ownership is still pending, steal the resource. */
			synch->owner = thread;
			xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
			xnthread_set_info(owner, XNROBBED);
			goto grab_and_exit;
		}

		insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));

		if (testbits(synch->status, XNSYNCH_PIP)) {
			if (!xnthread_test_state(owner, XNBOOST)) {
				owner->bprio = owner->cprio;
				xnthread_set_state(owner, XNBOOST);
			}

			if (testbits(synch->status, XNSYNCH_CLAIMED))
				removepq(&owner->claimq, &synch->link);
			else
				__setbits(synch->status, XNSYNCH_CLAIMED);

			insertpqf(&owner->claimq, &synch->link, w_cprio(thread));
			xnsynch_renice_thread(owner, thread);
		}
	} else
		insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));

	xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);

	thread->wwake = NULL;
	xnthread_clear_info(thread, XNWAKEN);

	if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK))
		goto unlock_and_exit;

	if (xnthread_test_info(thread, XNROBBED)) {
		/* Somebody stole us the ownership while we were ready
		   to run, waiting for the CPU: we need to wait again
		   for the resource. */
		if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) {
			xnlock_put_irqrestore(&nklock, s);
			goto redo;
		}
		timeout = xntimer_get_timeout_stopped(&thread->rtimer);
		if (timeout > 1) { /* Otherwise, it's too late. */
			xnlock_put_irqrestore(&nklock, s);
			goto redo;
		}
		xnthread_set_info(thread, XNTIMEO);
	} else {

	      grab_and_exit:

		if (xnthread_test_state(thread, XNOTHER))
			xnthread_inc_rescnt(thread);

		if (use_fastlock) {
			xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
			/* We are the new owner, update the fastlock
			   accordingly. */
			if (xnsynch_pended_p(synch))
				threadh =
				    xnsynch_fast_set_claimed(threadh, 1);
			xnarch_atomic_set(lockp, threadh);
		}
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK);
}
Ejemplo n.º 27
0
void xntimer_next_local_shot(xnsched_t *sched)
{
	struct xntimer *timer;
	xnsticks_t delay;
	xntimerq_it_t it;
	xntimerh_t *h;

	/*
	 * Do not reprogram locally when inside the tick handler -
	 * will be done on exit anyway. Also exit if there is no
	 * pending timer.
	 */
	if (testbits(sched->status, XNINTCK))
		return;

	h = xntimerq_it_begin(&sched->timerqueue, &it);
	if (h == NULL)
		return;

	/*
	 * Here we try to defer the host tick heading the timer queue,
	 * so that it does not preempt a real-time activity uselessly,
	 * in two cases:
	 *
	 * 1) a rescheduling is pending for the current CPU. We may
	 * assume that a real-time thread is about to resume, so we
	 * want to move the host tick out of the way until the host
	 * kernel resumes, unless there is no other outstanding
	 * timers.
	 *
	 * 2) the current thread is running in primary mode, in which
	 * case we may also defer the host tick until the host kernel
	 * resumes.
	 *
	 * The host tick deferral is cleared whenever Xenomai is about
	 * to yield control to the host kernel (see
	 * __xnpod_schedule()), or a timer with an earlier timeout
	 * date is scheduled, whichever comes first.
	 */
	__clrbits(sched->status, XNHDEFER);
	timer = aplink2timer(h);
	if (unlikely(timer == &sched->htimer)) {
		if (xnsched_self_resched_p(sched) ||
		    !xnthread_test_state(sched->curr, XNROOT)) {
			h = xntimerq_it_next(&sched->timerqueue, &it, h);
			if (h) {
				__setbits(sched->status, XNHDEFER);
				timer = aplink2timer(h);
			}
		}
	}

	delay = xntimerh_date(&timer->aplink) -
		(xnarch_get_cpu_tsc() + nklatency);

	if (delay < 0)
		delay = 0;
	else if (delay > ULONG_MAX)
		delay = ULONG_MAX;

	xnarch_trace_tick((unsigned)delay);

	xnarch_program_timer_shot(delay);
}
Ejemplo n.º 28
0
static inline void xntimer_dequeue_aperiodic(xntimer_t *timer)
{
	xntimerq_remove(&timer->sched->timerqueue, &timer->aplink);
	__setbits(timer->status, XNTIMER_DEQUEUED);
}
Ejemplo n.º 29
0
void xntimer_tick_aperiodic(void)
{
	xnsched_t *sched = xnpod_current_sched();
	xntimerq_t *timerq = &sched->timerqueue;
	xntimerh_t *holder;
	xntimer_t *timer;
	xnsticks_t delta;
	xnticks_t now;

	/*
	 * Optimisation: any local timer reprogramming triggered by
	 * invoked timer handlers can wait until we leave the tick
	 * handler. Use this status flag as hint to
	 * xntimer_start_aperiodic.
	 */
	__setbits(sched->status, XNINTCK);

	now = xnarch_get_cpu_tsc();
	while ((holder = xntimerq_head(timerq)) != NULL) {
		timer = aplink2timer(holder);
		/*
		 * If the delay to the next shot is greater than the
		 * intrinsic latency value, we may stop scanning the
		 * timer queue there, since timeout dates are ordered
		 * by increasing values.
		 */
		delta = (xnsticks_t)(xntimerh_date(&timer->aplink) - now);
		if (delta > (xnsticks_t)nklatency)
			break;

		trace_mark(xn_nucleus, timer_expire, "timer %p", timer);

		xntimer_dequeue_aperiodic(timer);
		xnstat_counter_inc(&timer->fired);

		if (likely(timer != &sched->htimer)) {
			if (likely(!testbits(nktbase.status, XNTBLCK)
				   || testbits(timer->status, XNTIMER_NOBLCK))) {
				timer->handler(timer);
				now = xnarch_get_cpu_tsc();
				/*
				 * If the elapsed timer has no reload
				 * value, or was re-enqueued or killed
				 * by the timeout handler: don't not
				 * re-enqueue it for the next shot.
				 */
				if (!xntimer_reload_p(timer))
					continue;
				__setbits(timer->status, XNTIMER_FIRED);
			} else if (likely(!testbits(timer->status, XNTIMER_PERIODIC))) {
				/*
				 * Postpone the next tick to a
				 * reasonable date in the future,
				 * waiting for the timebase to be
				 * unlocked at some point.
				 */
				xntimerh_date(&timer->aplink) = xntimerh_date(&sched->htimer.aplink);
				continue;
			}
		} else {
			/*
			 * By postponing the propagation of the
			 * low-priority host tick to the interrupt
			 * epilogue (see xnintr_irq_handler()), we
			 * save some I-cache, which translates into
			 * precious microsecs on low-end hw.
			 */
			__setbits(sched->status, XNHTICK);
			__clrbits(sched->status, XNHDEFER);
			if (!testbits(timer->status, XNTIMER_PERIODIC))
				continue;
		}

		do {
			xntimerh_date(&timer->aplink) += timer->interval;
		} while (xntimerh_date(&timer->aplink) < now + nklatency);
		xntimer_enqueue_aperiodic(timer);
	}

	__clrbits(sched->status, XNINTCK);

	xntimer_next_local_shot(sched);
}
Ejemplo n.º 30
0
/*
 * Low-level interrupt handler dispatching non-shared ISRs -- Called with
 * interrupts off.
 */
static void xnintr_irq_handler(unsigned irq, void *cookie)
{
	struct xnsched *sched = xnpod_current_sched();
	xnstat_exectime_t *prev;
	struct xnintr *intr;
	xnticks_t start;
	int s;

	prev  = xnstat_exectime_get_current(sched);
	start = xnstat_exectime_now();
	trace_mark(xn_nucleus, irq_enter, "irq %u", irq);

	++sched->inesting;
	__setbits(sched->lflags, XNINIRQ);

	xnlock_get(&xnirqs[irq].lock);

#ifdef CONFIG_SMP
	/*
	 * In SMP case, we have to reload the cookie under the per-IRQ
	 * lock to avoid racing with xnintr_detach.  However, we
	 * assume that no CPU migration will occur while running the
	 * interrupt service routine, so the scheduler pointer will
	 * remain valid throughout this function.
	 */
	intr = xnarch_get_irq_cookie(irq);
	if (unlikely(!intr)) {
		s = 0;
		goto unlock_and_exit;
	}
#else
	/* cookie always valid, attach/detach happens with IRQs disabled */
	intr = cookie;
#endif
	s = intr->isr(intr);
	if (unlikely(s == XN_ISR_NONE)) {
		if (++intr->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else {
		xnstat_counter_inc(&intr->stat[xnsched_cpu(sched)].hits);
		xnstat_exectime_lazy_switch(sched,
			&intr->stat[xnsched_cpu(sched)].account,
			start);
		intr->unhandled = 0;
	}

#ifdef CONFIG_SMP
 unlock_and_exit:
#endif
	xnlock_put(&xnirqs[irq].lock);

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	xnstat_exectime_switch(sched, prev);

	if (--sched->inesting == 0) {
		__clrbits(sched->lflags, XNINIRQ);
		xnpod_schedule();
	}

	trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
}