Beispiel #1
0
ssize_t xnpipe_send(int minor, struct xnpipe_mh *mh, size_t size, int flags)
{
	struct xnpipe_state *state;
	int need_sched = 0;
	spl_t s;

	if (minor < 0 || minor >= XNPIPE_NDEVS)
		return -ENODEV;

	if (size <= sizeof(*mh))
		return -EINVAL;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EBADF;
	}

	inith(xnpipe_m_link(mh));
	xnpipe_m_size(mh) = size - sizeof(*mh);
	xnpipe_m_rdoff(mh) = 0;
	state->ionrd += xnpipe_m_size(mh);

	if (flags & XNPIPE_URGENT)
		prependq(&state->outq, xnpipe_m_link(mh));
	else
		appendq(&state->outq, xnpipe_m_link(mh));

	if (!testbits(state->status, XNPIPE_USER_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return (ssize_t) size;
	}

	if (testbits(state->status, XNPIPE_USER_WREAD)) {
		/*
		 * Wake up the regular Linux task waiting for input
		 * from the Xenomai side.
		 */
		__setbits(state->status, XNPIPE_USER_WREAD_READY);
		need_sched = 1;
	}

	if (state->asyncq) {	/* Schedule asynch sig. */
		__setbits(state->status, XNPIPE_USER_SIGIO);
		need_sched = 1;
	}

	if (need_sched)
		xnpipe_schedule_request();

	xnlock_put_irqrestore(&nklock, s);

	return (ssize_t) size;
}
Beispiel #2
0
void addmlq(struct xnsched_mlq *q,
	    struct xnpholder *h, int idx, int lifo)
{
	struct xnqueue *queue = &q->queue[idx];
	int hi = idx / BITS_PER_LONG;
	int lo = idx % BITS_PER_LONG;

	if (lifo)
		prependq(queue, &h->plink);
	else
		appendq(queue, &h->plink);

	h->prio = idx;
	q->elems++;
	__setbits(q->himap, 1UL << hi);
	__setbits(q->lomap[hi], 1UL << lo);
}
Beispiel #3
0
static ssize_t xnpipe_read(struct file *file,
			   char *buf, size_t count, loff_t *ppos)
{
	struct xnpipe_state *state = file->private_data;
	int sigpending, err = 0;
	size_t nbytes, inbytes;
	struct xnpipe_mh *mh;
	struct xnholder *h;
	ssize_t ret;
	spl_t s;

	if (!access_ok(VERIFY_WRITE, buf, count))
		return -EFAULT;

	xnlock_get_irqsave(&nklock, s);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EPIPE;
	}
	/*
	 * Queue probe and proc enqueuing must be seen atomically,
	 * including from the Xenomai side.
	 */
	h = getq(&state->outq);
	mh = link2mh(h);

	if (mh == NULL) {
		if (file->f_flags & O_NONBLOCK) {
			xnlock_put_irqrestore(&nklock, s);
			return -EWOULDBLOCK;
		}

		sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
					 !emptyq_p(&state->outq));
		h = getq(&state->outq);
		mh = link2mh(h);

		if (mh == NULL) {
			xnlock_put_irqrestore(&nklock, s);
			return sigpending ? -ERESTARTSYS : 0;
		}
	}

	/*
	 * We allow more data to be appended to the current message
	 * bucket while its contents is being copied to the user
	 * buffer, therefore, we need to loop until: 1) all the data
	 * has been copied, 2) we consumed the user buffer space
	 * entirely.
	 */

	inbytes = 0;

	for (;;) {
		nbytes = xnpipe_m_size(mh) - xnpipe_m_rdoff(mh);

		if (nbytes + inbytes > count)
			nbytes = count - inbytes;

		if (nbytes == 0)
			break;

		xnlock_put_irqrestore(&nklock, s);
		/* More data could be appended while doing this: */
		err =
		    __copy_to_user(buf + inbytes,
				   xnpipe_m_data(mh) + xnpipe_m_rdoff(mh),
				   nbytes);
		xnlock_get_irqsave(&nklock, s);

		if (err) {
			err = -EFAULT;
			break;
		}

		inbytes += nbytes;
		xnpipe_m_rdoff(mh) += nbytes;
	}

	state->ionrd -= inbytes;
	ret = inbytes;

	if (xnpipe_m_size(mh) > xnpipe_m_rdoff(mh))
		prependq(&state->outq, &mh->link);
	else {
		/*
		 * We always want to fire the output handler because
		 * whatever the error state is for userland (e.g
		 * -EFAULT), we did pull a message from our output
		 * queue.
		 */
		if (state->ops.output)
			state->ops.output(mh, state->xstate);
		xnlock_put_irqrestore(&nklock, s);
		state->ops.free_obuf(mh, state->xstate);
		xnlock_get_irqsave(&nklock, s);
		if (testbits(state->status, XNPIPE_USER_WSYNC)) {
			__setbits(state->status, XNPIPE_USER_WSYNC_READY);
			xnpipe_schedule_request();
		}
	}

	xnlock_put_irqrestore(&nklock, s);

	return err ? : ret;
}
Beispiel #4
0
/**
 * Map pages of memory.
 *
 * This service allow shared memory regions to be accessed by the caller.
 *
 * When used in kernel-space, this service returns the address of the offset @a
 * off of the shared memory object underlying @a fd. The protection flags @a
 * prot, are only checked for consistency with @a fd open flags, but memory
 * protection is unsupported. An existing shared memory region exists before it
 * is mapped, this service only increments a reference counter.
 *
 * The only supported value for @a flags is @a MAP_SHARED.
 *
 * When used in user-space, this service maps the specified shared memory region
 * into the caller address-space. If @a fd is not a shared memory object
 * descriptor (i.e. not obtained with shm_open()), this service falls back to
 * the regular Linux mmap service.
 *
 * @param addr ignored.
 *
 * @param len size of the shared memory region to be mapped.
 *
 * @param prot protection bits, checked in kernel-space, but only useful in
 * user-space, are a bitwise or of the following values:
 * - PROT_NONE, meaning that the mapped region can not be accessed;
 * - PROT_READ, meaning that the mapped region can be read;
 * - PROT_WRITE, meaning that the mapped region can be written;
 * - PROT_EXEC, meaning that the mapped region can be executed.
 *
 * @param flags only MAP_SHARED is accepted, meaning that the mapped memory
 * region is shared.
 *
 * @param fd file descriptor, obtained with shm_open().
 *
 * @param off offset in the shared memory region.
 *
 * @retval 0 on success;
 * @retval MAP_FAILED with @a errno set if:
 * - EINVAL, @a len is null or @a addr is not a multiple of @a PAGE_SIZE;
 * - EBADF, @a fd is not a shared memory object descriptor (obtained with
 *   shm_open());
 * - EPERM, the caller context is invalid;
 * - ENOTSUP, @a flags is not @a MAP_SHARED;
 * - EACCES, @a fd is not opened for reading or is not opend for writing and
 *   PROT_WRITE is set in @a prot;
 * - EINTR, this service was interrupted by a signal;
 * - ENXIO, the range [off;off+len) is invalid for the shared memory region
 *   specified by @a fd;
 * - EAGAIN, insufficient memory exists in the system heap to create the
 *   mapping, increase CONFIG_XENO_OPT_SYS_HEAPSZ.
 *
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mmap.html">
 * Specification.</a>
 * 
 */
void *mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off)
{
	pse51_shm_map_t *map;
	unsigned desc_flags;
	pse51_desc_t *desc;
	pse51_shm_t *shm;
	void *result;
	int err;
	spl_t s;

	if (!len) {
		err = EINVAL;
		goto error;
	}

	if (((unsigned long)addr) % PAGE_SIZE) {
		err = EINVAL;
		goto error;
	}

	xnlock_get_irqsave(&nklock, s);

	shm = pse51_shm_get(&desc, fd, 1);

	if (IS_ERR(shm)) {
		xnlock_put_irqrestore(&nklock, s);
		err = -PTR_ERR(shm);
		goto error;
	}

	if (xnpod_asynch_p() || !xnpod_root_p()) {
		err = EPERM;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	if (flags != MAP_SHARED) {
		err = ENOTSUP;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	desc_flags = pse51_desc_getflags(desc) & PSE51_PERMS_MASK;
	xnlock_put_irqrestore(&nklock, s);

	if ((desc_flags != O_RDWR && desc_flags != O_RDONLY) ||
	    ((prot & PROT_WRITE) && desc_flags == O_RDONLY)) {
		err = EACCES;
		goto err_shm_put;
	}

	map = (pse51_shm_map_t *) xnmalloc(sizeof(*map));
	if (!map) {
		err = EAGAIN;
		goto err_shm_put;
	}

	if (down_interruptible(&shm->maplock)) {
		err = EINTR;
		goto err_free_map;
	}

	if (!shm->addr || off + len > shm->size) {
		err = ENXIO;
		up(&shm->maplock);
		goto err_free_map;
	}

	/* Align the heap address on a page boundary. */
	result = (void *)PAGE_ALIGN((u_long)shm->addr);
	map->addr = result = (void *)((char *)result + off);
	map->size = len;
	inith(&map->link);
	prependq(&shm->mappings, &map->link);
	up(&shm->maplock);

	return result;

  err_free_map:
	xnfree(map);
  err_shm_put:
	pse51_shm_put(shm, 1);
  error:
	thread_set_errno(err);
	return MAP_FAILED;
}