Ejemplo n.º 1
0
Archivo: select.c Proyecto: ArcEye/RTAI
/**
 * Bind a file descriptor (represented by its @a xnselect structure) to a
 * selector block.
 *
 * @param select_block pointer to the @a struct @a xnselect to be bound;
 *
 * @param binding pointer to a newly allocated (using xnmalloc) @a struct
 * @a xnselect_binding;
 *
 * @param selector pointer to the selector structure;
 *
 * @param type type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a
 * XNSELECT_EXCEPT);
 *
 * @param index index of the file descriptor (represented by @a select_block) in the bit fields used by the @a selector structure;
 *
 * @param state current state of the file descriptor>.
 *
 * @a select_block must have been initialized with xnselect_init(),
 * the @a xnselector structure must have been initialized with
 * xnselector_init(), @a binding may be uninitialized.
 *
 * This service must be called with nklock locked, irqs off. For this reason,
 * the @a binding parameter must have been allocated by the caller outside the
 * locking section.
 *
 * @retval -EINVAL if @a type or @a index is invalid;
 * @retval 0 otherwise.
 */
int xnselect_bind(struct xnselect *select_block,
		  struct xnselect_binding *binding,
		  struct xnselector *selector,
		  unsigned type,
		  unsigned index,
		  unsigned state)
{
	if (type >= XNSELECT_MAX_TYPES || index > __FD_SETSIZE)
		return -EINVAL;

	binding->selector = selector;
	binding->fd = select_block;
	binding->type = type;
	binding->bit_index = index;
	inith(&binding->link);
	inith(&binding->slink);

	appendq(&selector->bindings, &binding->slink);
	appendq(&select_block->bindings, &binding->link);
	__FD_SET__(index, &selector->fds[type].expected);
	if (state) {
		__FD_SET__(index, &selector->fds[type].pending);
		if (xnselect_wakeup(selector))
			xnpod_schedule();
	} else
		__FD_CLR__(index, &selector->fds[type].pending);

	return 0;
}
Ejemplo n.º 2
0
int xnpipe_mount(void)
{
	struct xnpipe_state *state;
	int i;

	for (state = &xnpipe_states[0];
	     state < &xnpipe_states[XNPIPE_NDEVS]; state++) {
		inith(&state->slink);
		inith(&state->alink);
		state->status = 0;
		state->asyncq = NULL;
		initq(&state->inq);
		initq(&state->outq);
	}

	initq(&xnpipe_sleepq);
	initq(&xnpipe_asyncq);

	xnpipe_class = class_create(THIS_MODULE, "rtpipe");
	if (IS_ERR(xnpipe_class)) {
		xnlogerr("error creating rtpipe class, err=%ld.\n",
			 PTR_ERR(xnpipe_class));
		return -EBUSY;
	}

	for (i = 0; i < XNPIPE_NDEVS; i++) {
		DECLARE_DEVHANDLE(cldev);
		cldev = wrap_device_create(xnpipe_class, NULL,
					   MKDEV(XNPIPE_DEV_MAJOR, i),
					   NULL, "rtp%d", i);
		if (IS_ERR(cldev)) {
			xnlogerr
			    ("can't add device class, major=%d, minor=%d, err=%ld\n",
			     XNPIPE_DEV_MAJOR, i, PTR_ERR(cldev));
			class_destroy(xnpipe_class);
			return -EBUSY;
		}
	}

	if (register_chrdev(XNPIPE_DEV_MAJOR, "rtpipe", &xnpipe_fops)) {
		xnlogerr
		    ("unable to reserve major #%d for message pipe support.\n",
		     XNPIPE_DEV_MAJOR);
		return -EBUSY;
	}

	xnpipe_wakeup_apc =
	    rthal_apc_alloc("pipe_wakeup", &xnpipe_wakeup_proc, NULL);

	return 0;
}
Ejemplo n.º 3
0
void xntslave_adjust(xntslave_t *slave, xnsticks_t delta)
{
	int nr_cpus, cpu, n;
	xnqueue_t adjq;

	initq(&adjq);
	for (cpu = 0, nr_cpus = xnarch_num_online_cpus(); cpu < nr_cpus; cpu++) {
		struct percpu_cascade *pc = &slave->cascade[cpu];
		xnholder_t *adjholder;

		for (n = 0; n < XNTIMER_WHEELSIZE; n++) {
			xnqueue_t *q = &pc->wheel[n];
			xntlholder_t *holder;

			for (holder = xntlist_head(q); holder;
			     holder = xntlist_next(q, holder)) {
				xntimer_t *timer = plink2timer(holder);
				if (testbits(timer->status, XNTIMER_REALTIME)) {
					inith(&timer->adjlink);
					appendq(&adjq, &timer->adjlink);
				}
			}
		}

		while ((adjholder = getq(&adjq))) {
			xntimer_t *timer = adjlink2timer(adjholder);
			xntimer_dequeue_periodic(timer);
			xntimer_adjust_periodic(timer, delta);
		}
	}
}
Ejemplo n.º 4
0
void xntimer_adjust_all_aperiodic(xnsticks_t delta)
{
	unsigned cpu, nr_cpus;
	xnqueue_t adjq;

	initq(&adjq);
	delta = xnarch_ns_to_tsc(delta);
	for (cpu = 0, nr_cpus = xnarch_num_online_cpus(); cpu < nr_cpus; cpu++) {
		xnsched_t *sched = xnpod_sched_slot(cpu);
		xntimerq_t *q = &sched->timerqueue;
		xnholder_t *adjholder;
		xntimerh_t *holder;
		xntimerq_it_t it;

		for (holder = xntimerq_it_begin(q, &it); holder;
		     holder = xntimerq_it_next(q, &it, holder)) {
			xntimer_t *timer = aplink2timer(holder);
			if (testbits(timer->status, XNTIMER_REALTIME)) {
				inith(&timer->adjlink);
				appendq(&adjq, &timer->adjlink);
			}
		}

		while ((adjholder = getq(&adjq))) {
			xntimer_t *timer = adjlink2timer(adjholder);
			xntimer_dequeue_aperiodic(timer);
			xntimer_adjust_aperiodic(timer, delta);
		}

		if (sched != xnpod_current_sched())
			xntimer_next_remote_shot(sched);
		else
			xntimer_next_local_shot(sched);
	}
}
Ejemplo n.º 5
0
void xnmod_alloc_glinks(xnqueue_t *freehq)
{
	xngholder_t *sholder, *eholder;

	sholder = xnheap_alloc(&kheap,
			       sizeof(xngholder_t) * XNMOD_GHOLDER_REALLOC);

	if (!sholder) {
		/* If we are running out of memory but still have some free
		   holders, just return silently, hoping that the contention
		   will disappear before we have no other choice than
		   allocating memory eventually. Otherwise, we have to raise a
		   fatal error right now. */

		if (emptyq_p(freehq))
			xnpod_fatal("cannot allocate generic holders");

		return;
	}

	for (eholder = sholder + XNMOD_GHOLDER_REALLOC;
	     sholder < eholder; sholder++) {
		inith(&sholder->glink.plink);
		appendq(freehq, &sholder->glink.plink);
	}
}
Ejemplo n.º 6
0
MSG_Q_ID msgQCreate(int nb_msgs, int length, int flags)
{
	static unsigned long msgq_ids;
	wind_msgq_t *queue;
	xnflags_t bflags = 0;
	int i, msg_size;
	char *msgs_mem;
	spl_t s;

	check_NOT_ISR_CALLABLE(return 0);

	error_check(nb_msgs <= 0, S_msgQLib_INVALID_QUEUE_TYPE, return 0);

	error_check(flags & ~WIND_MSG_Q_OPTION_MASK,
		    S_msgQLib_INVALID_QUEUE_TYPE, return 0);

	error_check(length < 0, S_msgQLib_INVALID_MSG_LENGTH, return 0);

	msgs_mem = xnmalloc(sizeof(wind_msgq_t) +
			    nb_msgs * (sizeof(wind_msg_t) + length));

	error_check(msgs_mem == NULL, S_memLib_NOT_ENOUGH_MEMORY, return 0);

	queue = (wind_msgq_t *)msgs_mem;
	msgs_mem += sizeof(wind_msgq_t);

	queue->magic = WIND_MSGQ_MAGIC;
	queue->msg_length = length;
	queue->free_list = NULL;
	initq(&queue->msgq);
	inith(&queue->rlink);
	queue->rqueue = &wind_get_rholder()->msgQq;

	/* init of the synch object : */
	if (flags & MSG_Q_PRIORITY)
		bflags |= XNSYNCH_PRIO;

	xnsynch_init(&queue->synchbase, bflags, NULL);

	msg_size = sizeof(wind_msg_t) + length;

	for (i = 0; i < nb_msgs; ++i, msgs_mem += msg_size)
		free_msg(queue, (wind_msg_t *)msgs_mem);

	xnlock_get_irqsave(&nklock, s);
	appendq(queue->rqueue, &queue->rlink);
	xnlock_put_irqrestore(&nklock, s);

	sprintf(queue->name, "mq%lu", msgq_ids++);

	if (xnregistry_enter(queue->name, queue,
			     &queue->handle, &msgq_pnode)) {
		wind_errnoset(S_objLib_OBJ_ID_ERROR);
		msgQDelete((MSG_Q_ID)queue);
		return 0;
	}

	return (MSG_Q_ID)queue;
}
Ejemplo n.º 7
0
static int __wind_wd_wait(struct task_struct *curr, struct pt_regs *regs)
{
	xnholder_t *holder;
	wind_rholder_t *rh;
	WIND_TCB *pTcb;
	wind_wd_t *wd;
	int err = 0;
	spl_t s;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(wd->wdt)))
		return -EFAULT;

	rh = wind_get_rholder();

	xnlock_get_irqsave(&nklock, s);

	pTcb = __wind_task_current(curr);

	if (xnthread_base_priority(&pTcb->threadbase) != XNCORE_IRQ_PRIO)
		/* Renice the waiter above all regular tasks if needed. */
		xnpod_renice_thread(&pTcb->threadbase, XNCORE_IRQ_PRIO);

	if (!emptyq_p(&rh->wdpending))
		goto pull_event;

	xnsynch_sleep_on(&rh->wdsynch, XN_INFINITE, XN_RELATIVE);

	if (xnthread_test_info(&pTcb->threadbase, XNBREAK)) {
		err = -EINTR;	/* Unblocked. */
		goto unlock_and_exit;
	}
	
	if (xnthread_test_info(&pTcb->threadbase, XNRMID)) {
		err = -EIDRM;	/* Watchdog deleted while pending. */
		goto unlock_and_exit;
	}

 pull_event:

	holder = getq(&rh->wdpending);

	if (holder) {
		wd = link2wind_wd(holder);
		/* We need the following to mark the watchdog as unqueued. */
		inith(holder);
		xnlock_put_irqrestore(&nklock, s);
		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs),
				  &wd->wdt, sizeof(wd->wdt));
		return 0;
	}

 unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 8
0
ssize_t xnpipe_send(int minor, struct xnpipe_mh *mh, size_t size, int flags)
{
	struct xnpipe_state *state;
	int need_sched = 0;
	spl_t s;

	if (minor < 0 || minor >= XNPIPE_NDEVS)
		return -ENODEV;

	if (size <= sizeof(*mh))
		return -EINVAL;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EBADF;
	}

	inith(xnpipe_m_link(mh));
	xnpipe_m_size(mh) = size - sizeof(*mh);
	xnpipe_m_rdoff(mh) = 0;
	state->ionrd += xnpipe_m_size(mh);

	if (flags & XNPIPE_URGENT)
		prependq(&state->outq, xnpipe_m_link(mh));
	else
		appendq(&state->outq, xnpipe_m_link(mh));

	if (!testbits(state->status, XNPIPE_USER_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return (ssize_t) size;
	}

	if (testbits(state->status, XNPIPE_USER_WREAD)) {
		/*
		 * Wake up the regular Linux task waiting for input
		 * from the Xenomai side.
		 */
		__setbits(state->status, XNPIPE_USER_WREAD_READY);
		need_sched = 1;
	}

	if (state->asyncq) {	/* Schedule asynch sig. */
		__setbits(state->status, XNPIPE_USER_SIGIO);
		need_sched = 1;
	}

	if (need_sched)
		xnpipe_schedule_request();

	xnlock_put_irqrestore(&nklock, s);

	return (ssize_t) size;
}
Ejemplo n.º 9
0
static int __wind_wd_wait(struct pt_regs *regs)
{
	union xnsched_policy_param param;
	xnholder_t *holder;
	wind_rholder_t *rh;
	WIND_TCB *pTcb;
	wind_wd_t *wd;
	int err = 0;
	spl_t s;

	rh = wind_get_rholder();

	xnlock_get_irqsave(&nklock, s);

	pTcb = __wind_task_current(current);

	if (xnthread_base_priority(&pTcb->threadbase) != XNSCHED_IRQ_PRIO) {
		/* Boost the waiter above all regular tasks if needed. */
		param.rt.prio = XNSCHED_IRQ_PRIO;
		xnpod_set_thread_schedparam(&pTcb->threadbase,
					    &xnsched_class_rt, &param);
	}

	if (!emptyq_p(&rh->wdpending))
		goto pull_event;

	xnsynch_sleep_on(&rh->wdsynch, XN_INFINITE, XN_RELATIVE);

	if (xnthread_test_info(&pTcb->threadbase, XNBREAK)) {
		err = -EINTR;	/* Unblocked. */
		goto unlock_and_exit;
	}

	if (xnthread_test_info(&pTcb->threadbase, XNRMID)) {
		err = -EIDRM;	/* Watchdog deleted while pending. */
		goto unlock_and_exit;
	}

      pull_event:

	holder = getq(&rh->wdpending);

	if (holder) {
		wd = link2wind_wd(holder);
		/* We need the following to mark the watchdog as unqueued. */
		inith(holder);
		xnlock_put_irqrestore(&nklock, s);
		return __xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs),
					      &wd->wdt, sizeof(wd->wdt));
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Ejemplo n.º 10
0
Archivo: select.c Proyecto: ArcEye/RTAI
/**
 * Destroy a selector block.
 *
 * All bindings with file descriptor are destroyed.
 *
 * @param selector the selector block to be destroyed
 */
void xnselector_destroy(struct xnselector *selector)
{
	spl_t s;

	inith(&selector->destroy_link);
	xnlock_get_irqsave(&nklock, s);
	appendq(&xnselectors, &selector->destroy_link);
	xnlock_put_irqrestore(&nklock, s);
	rthal_apc_schedule(xnselect_apc);
}
Ejemplo n.º 11
0
static void pse51_shm_init(pse51_shm_t * shm)
{
	shm->addr = NULL;
	shm->size = 0;
	sema_init(&shm->maplock, 1);
	initq(&shm->mappings);

	inith(&shm->link);
	appendq(&pse51_shmq, &shm->link);
}
Ejemplo n.º 12
0
int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode)
{
	int ret = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if (bufsz == 0)
		return -EINVAL;

	bf->bufmem = xnarch_alloc_host_mem(bufsz);
	if (bf->bufmem == NULL)
		return -ENOMEM;

	xnsynch_init(&bf->isynch_base, mode & B_PRIO, NULL);
	xnsynch_init(&bf->osynch_base, mode & B_PRIO, NULL);

	bf->handle = 0;	/* i.e. (still) unregistered buffer. */
	xnobject_copy_name(bf->name, name);
	inith(&bf->rlink);
	bf->rqueue = &xeno_get_rholder()->bufferq;
	xnlock_get_irqsave(&nklock, s);
	appendq(bf->rqueue, &bf->rlink);
	xnlock_put_irqrestore(&nklock, s);

	bf->mode = mode;
	bf->bufsz = bufsz;
	bf->rdoff = 0;
	bf->wroff = 0;
	bf->fillsz = 0;
	bf->rdtoken = 0;
	bf->wrtoken = 0;

#ifndef __XENO_SIM__
	bf->cpid = 0;
#endif
	bf->magic = XENO_BUFFER_MAGIC;

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		ret = xnregistry_enter(bf->name, bf, &bf->handle,
				       &__buffer_pnode.node);

		if (ret)
			rt_buffer_delete(bf);
	}

	return ret;
}
Ejemplo n.º 13
0
/* get a message from the free list */
static inline wind_msg_t *get_free_msg(wind_msgq_t *queue)
{
	wind_msg_t *msg;

	if (queue->free_list == NULL)
		return NULL;

	msg = link2wind_msg(queue->free_list);
	queue->free_list = queue->free_list->next;
	inith(&msg->link);

	return msg;
}
Ejemplo n.º 14
0
int rt_event_create(RT_EVENT *event,
		    const char *name, unsigned long ivalue, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnsynch_init(&event->synch_base, mode & EV_PRIO);
	event->value = ivalue;
	event->handle = 0;	/* i.e. (still) unregistered event. */
	event->magic = XENO_EVENT_MAGIC;
	xnobject_copy_name(event->name, name);
	inith(&event->rlink);
	event->rqueue = &xeno_get_rholder()->eventq;
	xnlock_get_irqsave(&nklock, s);
	appendq(event->rqueue, &event->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	event->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

#ifdef CONFIG_XENO_OPT_REGISTRY
	/* <!> Since xnregister_enter() may reschedule, only register
	   complete objects, so that the registry cannot return handles to
	   half-baked objects... */

	if (name) {
		xnpnode_t *pnode = &__event_pnode;

		if (!*name) {
			/* Since this is an anonymous object (empty name on entry)
			   from user-space, it gets registered under an unique
			   internal name but is not exported through /proc. */
			xnobject_create_name(event->name, sizeof(event->name),
					     (void *)event);
			pnode = NULL;
		}

		err =
		    xnregistry_enter(event->name, event, &event->handle, pnode);

		if (err)
			rt_event_delete(event);
	}
#endif /* CONFIG_XENO_OPT_REGISTRY */

	return err;
}
Ejemplo n.º 15
0
int sc_screate(unsigned initval, int opt, int *errp)
{
	int bflags = 0, semid;
	vrtxsem_t *sem;
	spl_t s;

	if (opt & ~1) {
		*errp = ER_IIP;
		return -1;
	}

	sem = (vrtxsem_t *)xnmalloc(sizeof(*sem));

	if (!sem) {
		*errp = ER_NOCB;
		return -1;
	}

	semid = xnmap_enter(vrtx_sem_idmap, -1, sem);

	if (semid < 0) {
		*errp = ER_NOCB;
		xnfree(sem);
		return -1;
	}

	if (opt == 0)
		bflags = XNSYNCH_PRIO;
	else
		bflags = XNSYNCH_FIFO;

	xnsynch_init(&sem->synchbase, bflags | XNSYNCH_DREORD);
	inith(&sem->link);
	sem->semid = semid;
	sem->magic = VRTX_SEM_MAGIC;
	sem->count = initval;

	xnlock_get_irqsave(&nklock, s);
	appendq(&vrtx_sem_q, &sem->link);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_REGISTRY
	sprintf(sem->name, "sem%d", semid);
	xnregistry_enter(sem->name, sem, &sem->handle, &__sem_pnode);
#endif /* CONFIG_XENO_OPT_REGISTRY */

	*errp = RET_OK;

	return semid;
}
Ejemplo n.º 16
0
int xntbase_alloc(const char *name, u_long period, u_long flags,
		  xntbase_t **basep)
{
	xntslave_t *slave;
	xntbase_t *base;
	spl_t s;

	if (flags & ~XNTBISO)
		return -EINVAL;

	if (period == XN_APERIODIC_TICK) {
		*basep = &nktbase;
		xnarch_declare_tbase(&nktbase);
		return 0;
	}

	slave = (xntslave_t *)xnarch_alloc_host_mem(sizeof(*slave));

	if (!slave)
		return -ENOMEM;

	base = &slave->base;
	base->tickvalue = period;
	base->ticks2sec = 1000000000UL / period;
	base->wallclock_offset = 0;
	base->jiffies = 0;
	base->hook = NULL;
	base->ops = &nktimer_ops_periodic;
	base->name = name;
	inith(&base->link);
	xntslave_init(slave);

	/* Set initial status:
	   Not running, no time set, unlocked, isolated if requested. */
	base->status = flags;

	*basep = base;
#ifdef CONFIG_XENO_OPT_STATS
	initq(&base->timerq);
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */
	xntbase_declare_proc(base);
	xnlock_get_irqsave(&nklock, s);
	appendq(&nktimebaseq, &base->link);
	xnlock_put_irqrestore(&nklock, s);

	xnarch_declare_tbase(base);

	return 0;
}
Ejemplo n.º 17
0
int sc_mcreate(unsigned int opt, int *errp)
{
	int bflags, mid;
	vrtxmx_t *mx;
	spl_t s;

	switch (opt) {
	case 0:
		bflags = XNSYNCH_PRIO;
		break;
	case 1:
		bflags = XNSYNCH_FIFO;
		break;
	case 2:
		bflags = XNSYNCH_PRIO | XNSYNCH_PIP;
		break;
	default:
		*errp = ER_IIP;
		return 0;
	}

	mx = xnmalloc(sizeof(*mx));
	if (mx == NULL) {
		*errp = ER_NOCB;
		return -1;
	}

	mid = xnmap_enter(vrtx_mx_idmap, -1, mx);
	if (mid < 0) {
		xnfree(mx);
		return -1;
	}

	inith(&mx->link);
	mx->mid = mid;
	xnsynch_init(&mx->synchbase, bflags | XNSYNCH_DREORD | XNSYNCH_OWNER,
		     NULL);

	xnlock_get_irqsave(&nklock, s);
	appendq(&vrtx_mx_q, &mx->link);
	xnlock_put_irqrestore(&nklock, s);

	sprintf(mx->name, "mx%d", mid);
	xnregistry_enter(mx->name, mx, &mx->handle, &__mutex_pnode.node);

	*errp = RET_OK;

	return mid;
}
Ejemplo n.º 18
0
/* Called with nklock locked, irq off. */
static int pse51_sem_init_inner(pse51_sem_t * sem, int pshared, unsigned value)
{
	if (value > (unsigned)SEM_VALUE_MAX)
		return EINVAL;

	sem->magic = PSE51_SEM_MAGIC;
	inith(&sem->link);
	appendq(&pse51_kqueues(pshared)->semq, &sem->link);
	xnsynch_init(&sem->synchbase, XNSYNCH_PRIO, NULL);
	sem->value = value;
	sem->pshared = pshared;
	sem->is_named = 0;
	sem->owningq = pse51_kqueues(pshared);

	return 0;
}
Ejemplo n.º 19
0
int rt_intr_create(RT_INTR *intr,
		   const char *name,
		   unsigned irq, rt_isr_t isr, rt_iack_t iack, int mode)
{
	int err;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if (name)
		xnobject_copy_name(intr->name, name);
	else
		/* Kernel-side "anonymous" objects (name == NULL) get unique names.
		 * Nevertheless, they will not be exported via the registry. */
		xnobject_create_name(intr->name, sizeof(intr->name), isr);

	xnintr_init(&intr->intr_base, intr->name, irq, isr, iack, mode);
#ifdef CONFIG_XENO_OPT_PERVASIVE
	xnsynch_init(&intr->synch_base, XNSYNCH_PRIO, NULL);
	intr->pending = 0;
	intr->cpid = 0;
	intr->mode = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */
	intr->magic = XENO_INTR_MAGIC;
	intr->handle = 0;	/* i.e. (still) unregistered interrupt. */
	inith(&intr->rlink);
	intr->rqueue = &xeno_get_rholder()->intrq;
	xnlock_get_irqsave(&nklock, s);
	appendq(intr->rqueue, &intr->rlink);
	xnlock_put_irqrestore(&nklock, s);

	err = xnintr_attach(&intr->intr_base, intr);

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (!err && name)
		err = xnregistry_enter(intr->name, intr, &intr->handle,
				       &__intr_pnode);
	if (err)
		rt_intr_delete(intr);

	return err;
}
Ejemplo n.º 20
0
void __xntimer_init(xntimer_t *timer, xntbase_t *base,
		    void (*handler) (xntimer_t *timer))
{
	/* CAUTION: Setup from xntimer_init() must not depend on the
	   periodic/aperiodic timing mode. */

	xntimerh_init(&timer->aplink);
	xntimerh_date(&timer->aplink) = XN_INFINITE;
#ifdef CONFIG_XENO_OPT_TIMING_PERIODIC
	timer->base = base;
	xntlholder_init(&timer->plink);
	xntlholder_date(&timer->plink) = XN_INFINITE;
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */
	xntimer_set_priority(timer, XNTIMER_STDPRIO);
	timer->status = XNTIMER_DEQUEUED;
	timer->handler = handler;
	timer->interval = 0;
	timer->sched = xnpod_current_sched();

#ifdef CONFIG_XENO_OPT_STATS
	{
		spl_t s;

		if (!xnpod_current_thread() || xnpod_shadow_p())
			snprintf(timer->name, XNOBJECT_NAME_LEN, "%d/%s",
				 current->pid, current->comm);
		else
			xnobject_copy_name(timer->name,
					   xnpod_current_thread()->name);

		inith(&timer->tblink);
		xnstat_counter_set(&timer->scheduled, 0);
		xnstat_counter_set(&timer->fired, 0);

		xnlock_get_irqsave(&nklock, s);
		appendq(&base->timerq, &timer->tblink);
		base->timerq_rev++;
		xnlock_put_irqrestore(&nklock, s);
	}
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */

	xnarch_init_display_context(timer);
}
Ejemplo n.º 21
0
int rt_sem_create(RT_SEM *sem, const char *name, unsigned long icount, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if ((mode & S_PULSE) && icount > 0)
		return -EINVAL;

	xnsynch_init(&sem->synch_base, mode & S_PRIO, NULL);
	sem->count = icount;
	sem->mode = mode;
	sem->handle = 0;	/* i.e. (still) unregistered semaphore. */
	sem->magic = XENO_SEM_MAGIC;
	xnobject_copy_name(sem->name, name);
	inith(&sem->rlink);
	sem->rqueue = &xeno_get_rholder()->semq;
	xnlock_get_irqsave(&nklock, s);
	appendq(sem->rqueue, &sem->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	sem->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(sem->name, sem, &sem->handle,
				       &__sem_pnode);
		if (err)
			rt_sem_delete(sem);
	}

	return err;
}
Ejemplo n.º 22
0
int rt_event_create(RT_EVENT *event,
		    const char *name, unsigned long ivalue, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnsynch_init(&event->synch_base, mode & EV_PRIO, NULL);
	event->value = ivalue;
	event->handle = 0;	/* i.e. (still) unregistered event. */
	event->magic = XENO_EVENT_MAGIC;
	xnobject_copy_name(event->name, name);
	inith(&event->rlink);
	event->rqueue = &xeno_get_rholder()->eventq;
	xnlock_get_irqsave(&nklock, s);
	appendq(event->rqueue, &event->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	event->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(event->name, event, &event->handle,
				       &__event_pnode);

		if (err)
			rt_event_delete(event);
	}

	return err;
}
Ejemplo n.º 23
0
static xnshm_a_t *kalloc_new_shm(unsigned long name, int size)
{
	xnshm_a_t *p;

	p = xnheap_alloc(&kheap, sizeof(xnshm_a_t));
	if (!p)
		return NULL;

	p->chunk = xnheap_alloc(&kheap, size);
	if (!p->chunk) {
		xnheap_free(&kheap, p);
		return NULL;
	}
	memset(p->chunk, 0, size);

	inith(&p->link);
	p->ref = 1;
	p->name = name;
	p->size = size;
	p->heap = &kheap;

	return p;
}
Ejemplo n.º 24
0
int rt_cond_create(RT_COND *cond, const char *name)
{
    int err = 0;
    spl_t s;

    if (xnpod_asynch_p())
        return -EPERM;

    xnsynch_init(&cond->synch_base, XNSYNCH_PRIO, NULL);
    cond->handle = 0;	/* i.e. (still) unregistered cond. */
    cond->magic = XENO_COND_MAGIC;
    xnobject_copy_name(cond->name, name);
    inith(&cond->rlink);
    cond->rqueue = &xeno_get_rholder()->condq;
    xnlock_get_irqsave(&nklock, s);
    appendq(cond->rqueue, &cond->rlink);
    xnlock_put_irqrestore(&nklock, s);

#ifndef __XENO_SIM__
    cond->cpid = 0;
#endif

    /*
     * <!> Since xnregister_enter() may reschedule, only register
     * complete objects, so that the registry cannot return
     * handles to half-baked objects...
     */
    if (name) {
        err = xnregistry_enter(cond->name, cond, &cond->handle,
                               &__cond_pnode.node);

        if (err)
            rt_cond_delete(cond);
    }

    return err;
}
Ejemplo n.º 25
0
u_long t_create(const char *name,
		u_long prio,
		u_long sstack, u_long ustack, u_long flags, u_long *tid_r)
{
	union xnsched_policy_param param;
	struct xnthread_init_attr attr;
	xnflags_t bflags = 0;
	psostask_t *task;
	u_long err;
	spl_t s;
	int n;

	/* Xenomai extension: we accept priority level #0 for creating
	   non-RT tasks (i.e. underlaid by SCHED_NORMAL pthreads),
	   which are allowed to call into the pSOS emulator, usually
	   for synchronization services. */

	if (prio > 255)
		return ERR_PRIOR;

	task = (psostask_t *)xnmalloc(sizeof(*task));

	if (!task)
		return ERR_NOTCB;

	if (flags & T_FPU)
		bflags |= XNFPU;

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (flags & T_SHADOW)
		bflags |= XNSHADOW;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	ustack += sstack;

	if (!(flags & T_SHADOW) && ustack < 1024) {
		xnfree(task);
		return ERR_TINYSTK;
	}

	if (name && *name)
		xnobject_copy_name(task->name, name);
	else
		/* i.e. Anonymous object which must be accessible from
		   user-space. */
		sprintf(task->name, "anon_task%lu", psos_task_ids++);

	attr.tbase = psos_tbase;
	attr.name = task->name;
	attr.flags = bflags;
	attr.ops = &psos_task_ops;
	attr.stacksize = ustack;
	param.rt.prio = prio;

	if (xnpod_init_thread(&task->threadbase,
			      &attr, &xnsched_class_rt, &param) != 0) {
		xnfree(task);
		return ERR_NOSTK;	/* Assume this is the only possible failure */
	}

	xnthread_time_slice(&task->threadbase) = psos_time_slice;

	taskev_init(&task->evgroup);
	inith(&task->link);

	for (n = 0; n < PSOSTASK_NOTEPAD_REGS; n++)
		task->notepad[n] = 0;

	initgq(&task->alarmq,
	       &xnmod_glink_queue,
	       xnmod_alloc_glinks,
	       XNMOD_GHOLDER_THRESHOLD);

	task->magic = PSOS_TASK_MAGIC;

	xnlock_get_irqsave(&nklock, s);
	appendq(&psostaskq, &task->link);
	*tid_r = (u_long)task;
	xnlock_put_irqrestore(&nklock, s);

	err = xnthread_register(&task->threadbase, task->name);
	if (err) {
		t_delete((u_long)task);
		return err;
	}

	xnarch_create_display(&task->threadbase, task->name, psostask);

	return SUCCESS;
}
Ejemplo n.º 26
0
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	if (heapsize == 0)
		return -EINVAL;

	/* Make sure we won't hit trivial argument errors when calling
	   xnheap_init(). */

	heap->csize = heapsize;	/* Record this for SBA management and inquiry. */

#ifdef __KERNEL__
	if (mode & H_MAPPABLE) {
		if (!name || !*name)
			return -EINVAL;

		heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

		err = xnheap_init_mapped(&heap->heap_base,
					 heapsize,
					 ((mode & H_DMA) ? GFP_DMA : 0)
					 | ((mode & H_DMA32) ? GFP_DMA32 : 0)
					 | ((mode & H_NONCACHED) ?
					    XNHEAP_GFP_NONCACHED : 0));
		if (err)
			return err;

		heap->cpid = 0;
	} else
#endif /* __KERNEL__ */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNHEAP_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem)
			return -ENOMEM;

		err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNHEAP_PAGE_SIZE);
		if (err) {
			xnarch_free_host_mem(heapmem, heapsize);
			return err;
		}
	}
	xnheap_set_label(&heap->heap_base, "rt_heap: %s", name);

	xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO), NULL);
	heap->handle = 0;	/* i.e. (still) unregistered heap. */
	heap->magic = XENO_HEAP_MAGIC;
	heap->mode = mode;
	heap->sba = NULL;
	xnobject_copy_name(heap->name, name);
	inith(&heap->rlink);
	heap->rqueue = &xeno_get_rholder()->heapq;
	xnlock_get_irqsave(&nklock, s);
	appendq(heap->rqueue, &heap->rlink);
	xnlock_put_irqrestore(&nklock, s);

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(heap->name, heap, &heap->handle,
				       &__heap_pnode.node);

		if (err)
			rt_heap_delete(heap);
	}

	return err;
}
Ejemplo n.º 27
0
static xnshm_a_t *create_new_heap(unsigned long name, int heapsize, int suprt)
{
	xnshm_a_t *p;
	int err;

	p = xnheap_alloc(&kheap, sizeof(xnshm_a_t));
	if (!p)
		return NULL;

	p->heap = xnheap_alloc(&kheap, sizeof(xnheap_t));
	if (!p->heap) {
		xnheap_free(&kheap, p);
		return NULL;
	}

	/*
	 * Account for the minimum heap size and overhead so that the
	 * actual free space is large enough to match the requested
	 * size.
	 */

#ifdef CONFIG_XENO_OPT_PERVASIVE
	heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

	err = xnheap_init_mapped(p->heap,
				 heapsize,
				 suprt == USE_GFP_KERNEL ? GFP_KERNEL : 0);
#else /* !CONFIG_XENO_OPT_PERVASIVE */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem) {
			err = -ENOMEM;
		} else {

			err = xnheap_init(p->heap, heapmem, heapsize, XNCORE_PAGE_SIZE);
			if (err) {
				xnarch_free_host_mem(heapmem, heapsize);
			}
		}
	}
#endif /* !CONFIG_XENO_OPT_PERVASIVE */
	if (err) {
		xnheap_free(&kheap, p->heap);
		xnheap_free(&kheap, p);
		return NULL;
	}

	p->chunk = xnheap_mapped_address(p->heap, 0);

	memset(p->chunk, 0, heapsize);

	inith(&p->link);
	p->ref = 1;
	p->name = name;
	p->size = heapsize;

	return p;
}
Ejemplo n.º 28
0
static ssize_t xnpipe_write(struct file *file,
			    const char *buf, size_t count, loff_t *ppos)
{
	struct xnpipe_state *state = file->private_data;
	struct xnpipe_mh *mh;
	int pollnum, ret;
	spl_t s;

	if (count == 0)
		return 0;

	if (!access_ok(VERIFY_READ, buf, count))
		return -EFAULT;

	xnlock_get_irqsave(&nklock, s);

      retry:

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EPIPE;
	}

	pollnum = countq(&state->inq) + countq(&state->outq);
	xnlock_put_irqrestore(&nklock, s);

	mh = state->ops.alloc_ibuf(count + sizeof(*mh), state->xstate);
	if (mh == (struct xnpipe_mh *)-1)
		return -ENOMEM;

	if (mh == NULL) {
		if (file->f_flags & O_NONBLOCK)
			return -EWOULDBLOCK;

		xnlock_get_irqsave(&nklock, s);
		if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
				pollnum >
				countq(&state->inq) + countq(&state->outq))) {
			xnlock_put_irqrestore(&nklock, s);
			return -ERESTARTSYS;
		}
		goto retry;
	}

	inith(xnpipe_m_link(mh));
	xnpipe_m_size(mh) = count;
	xnpipe_m_rdoff(mh) = 0;

	if (copy_from_user(xnpipe_m_data(mh), buf, count)) {
		state->ops.free_ibuf(mh, state->xstate);
		return -EFAULT;
	}

	xnlock_get_irqsave(&nklock, s);

	appendq(&state->inq, &mh->link);

	/* Wake up a Xenomai sleeper if any. */
	if (xnsynch_wakeup_one_sleeper(&state->synchbase))
		xnpod_schedule();

	if (state->ops.input) {
		ret = state->ops.input(mh, 0, state->xstate);
		if (ret)
			count = (size_t)ret;
	}

	if (file->f_flags & O_SYNC) {
		if (!emptyq_p(&state->inq)) {
			if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
					emptyq_p(&state->inq)))
				count = -ERESTARTSYS;
		}
	}

	xnlock_put_irqrestore(&nklock, s);

	return (ssize_t)count;
}
Ejemplo n.º 29
0
int rt_task_init(RT_TASK *task,
		 void (*body) (int),
		 int cookie,
		 int stack_size,
		 int priority, int uses_fpu, void (*sigfn) (void))
{
	union xnsched_policy_param param;
	struct xnthread_start_attr sattr;
	struct xnthread_init_attr iattr;
	xnflags_t bflags = 0;
	int ret;
	spl_t s;

	if (priority < XNSCHED_LOW_PRIO ||
	    priority > XNSCHED_HIGH_PRIO || task->magic == RTAI_TASK_MAGIC)
		return -EINVAL;

	priority = XNSCHED_HIGH_PRIO - priority + 1;	/* Normalize. */

	if (uses_fpu)
#ifdef CONFIG_XENO_HW_FPU
		bflags |= XNFPU;
#else /* !CONFIG_XENO_HW_FPU */
		return -EINVAL;
#endif /* CONFIG_XENO_HW_FPU */

	iattr.tbase = rtai_tbase;
	iattr.name = NULL;
	iattr.flags = bflags;
	iattr.ops = &__rtai_task_ops;
	iattr.stacksize = stack_size;
	param.rt.prio = priority;

	if (xnpod_init_thread(&task->thread_base,
			      &iattr, &xnsched_class_rt, &param) != 0)
		/* Assume this is the only possible failure. */
		return -ENOMEM;

	xnarch_cpus_clear(task->affinity);
	inith(&task->link);
	task->suspend_depth = 1;
	task->cookie = cookie;
	task->body = body;
	task->sigfn = sigfn;

	if (xnarch_cpus_empty(task->affinity))
		task->affinity = XNPOD_ALL_CPUS;

	xnlock_get_irqsave(&nklock, s);

	sattr.mode = XNSUSP;	/* Suspend on startup. */
	sattr.imask = 0;
	sattr.affinity = task->affinity;
	sattr.entry = rt_task_trampoline;
	sattr.cookie = task;
	ret = xnpod_start_thread(&task->thread_base, &sattr);
	if (ret)
		goto unlock_and_exit;

	task->magic = RTAI_TASK_MAGIC;
	appendq(&__rtai_task_q, &task->link);

#ifdef CONFIG_XENO_FASTSYNCH
	/* We need an anonymous registry entry to obtain a handle for fast
	   mutex locking. */
	ret = xnthread_register(&task->thread_base, "");
	if (ret) {
		xnpod_abort_thread(&task->thread_base);
		goto unlock_and_exit;
	}
#endif /* CONFIG_XENO_FASTSYNCH */

	/* Add a switch hook only if a signal function has been declared
	   at least once for some created task. */

	if (sigfn != NULL && __rtai_task_sig++ == 0)
		xnpod_add_hook(XNHOOK_THREAD_SWITCH, &__task_switch_hook);

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret ? -EINVAL : 0;
}
Ejemplo n.º 30
0
/**
 * Map pages of memory.
 *
 * This service allow shared memory regions to be accessed by the caller.
 *
 * When used in kernel-space, this service returns the address of the offset @a
 * off of the shared memory object underlying @a fd. The protection flags @a
 * prot, are only checked for consistency with @a fd open flags, but memory
 * protection is unsupported. An existing shared memory region exists before it
 * is mapped, this service only increments a reference counter.
 *
 * The only supported value for @a flags is @a MAP_SHARED.
 *
 * When used in user-space, this service maps the specified shared memory region
 * into the caller address-space. If @a fd is not a shared memory object
 * descriptor (i.e. not obtained with shm_open()), this service falls back to
 * the regular Linux mmap service.
 *
 * @param addr ignored.
 *
 * @param len size of the shared memory region to be mapped.
 *
 * @param prot protection bits, checked in kernel-space, but only useful in
 * user-space, are a bitwise or of the following values:
 * - PROT_NONE, meaning that the mapped region can not be accessed;
 * - PROT_READ, meaning that the mapped region can be read;
 * - PROT_WRITE, meaning that the mapped region can be written;
 * - PROT_EXEC, meaning that the mapped region can be executed.
 *
 * @param flags only MAP_SHARED is accepted, meaning that the mapped memory
 * region is shared.
 *
 * @param fd file descriptor, obtained with shm_open().
 *
 * @param off offset in the shared memory region.
 *
 * @retval 0 on success;
 * @retval MAP_FAILED with @a errno set if:
 * - EINVAL, @a len is null or @a addr is not a multiple of @a PAGE_SIZE;
 * - EBADF, @a fd is not a shared memory object descriptor (obtained with
 *   shm_open());
 * - EPERM, the caller context is invalid;
 * - ENOTSUP, @a flags is not @a MAP_SHARED;
 * - EACCES, @a fd is not opened for reading or is not opend for writing and
 *   PROT_WRITE is set in @a prot;
 * - EINTR, this service was interrupted by a signal;
 * - ENXIO, the range [off;off+len) is invalid for the shared memory region
 *   specified by @a fd;
 * - EAGAIN, insufficient memory exists in the system heap to create the
 *   mapping, increase CONFIG_XENO_OPT_SYS_HEAPSZ.
 *
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mmap.html">
 * Specification.</a>
 * 
 */
void *mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off)
{
	pse51_shm_map_t *map;
	unsigned desc_flags;
	pse51_desc_t *desc;
	pse51_shm_t *shm;
	void *result;
	int err;
	spl_t s;

	if (!len) {
		err = EINVAL;
		goto error;
	}

	if (((unsigned long)addr) % PAGE_SIZE) {
		err = EINVAL;
		goto error;
	}

	xnlock_get_irqsave(&nklock, s);

	shm = pse51_shm_get(&desc, fd, 1);

	if (IS_ERR(shm)) {
		xnlock_put_irqrestore(&nklock, s);
		err = -PTR_ERR(shm);
		goto error;
	}

	if (xnpod_asynch_p() || !xnpod_root_p()) {
		err = EPERM;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	if (flags != MAP_SHARED) {
		err = ENOTSUP;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	desc_flags = pse51_desc_getflags(desc) & PSE51_PERMS_MASK;
	xnlock_put_irqrestore(&nklock, s);

	if ((desc_flags != O_RDWR && desc_flags != O_RDONLY) ||
	    ((prot & PROT_WRITE) && desc_flags == O_RDONLY)) {
		err = EACCES;
		goto err_shm_put;
	}

	map = (pse51_shm_map_t *) xnmalloc(sizeof(*map));
	if (!map) {
		err = EAGAIN;
		goto err_shm_put;
	}

	if (down_interruptible(&shm->maplock)) {
		err = EINTR;
		goto err_free_map;
	}

	if (!shm->addr || off + len > shm->size) {
		err = ENXIO;
		up(&shm->maplock);
		goto err_free_map;
	}

	/* Align the heap address on a page boundary. */
	result = (void *)PAGE_ALIGN((u_long)shm->addr);
	map->addr = result = (void *)((char *)result + off);
	map->size = len;
	inith(&map->link);
	prependq(&shm->mappings, &map->link);
	up(&shm->maplock);

	return result;

  err_free_map:
	xnfree(map);
  err_shm_put:
	pse51_shm_put(shm, 1);
  error:
	thread_set_errno(err);
	return MAP_FAILED;
}