Esempio n. 1
0
void xnmod_alloc_glinks(xnqueue_t *freehq)
{
	xngholder_t *sholder, *eholder;

	sholder = xnheap_alloc(&kheap,
			       sizeof(xngholder_t) * XNMOD_GHOLDER_REALLOC);

	if (!sholder) {
		/* If we are running out of memory but still have some free
		   holders, just return silently, hoping that the contention
		   will disappear before we have no other choice than
		   allocating memory eventually. Otherwise, we have to raise a
		   fatal error right now. */

		if (emptyq_p(freehq))
			xnpod_fatal("cannot allocate generic holders");

		return;
	}

	for (eholder = sholder + XNMOD_GHOLDER_REALLOC;
	     sholder < eholder; sholder++) {
		inith(&sholder->glink.plink);
		appendq(freehq, &sholder->glink.plink);
	}
}
Esempio n. 2
0
static int __wind_wd_wait(struct task_struct *curr, struct pt_regs *regs)
{
	xnholder_t *holder;
	wind_rholder_t *rh;
	WIND_TCB *pTcb;
	wind_wd_t *wd;
	int err = 0;
	spl_t s;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(wd->wdt)))
		return -EFAULT;

	rh = wind_get_rholder();

	xnlock_get_irqsave(&nklock, s);

	pTcb = __wind_task_current(curr);

	if (xnthread_base_priority(&pTcb->threadbase) != XNCORE_IRQ_PRIO)
		/* Renice the waiter above all regular tasks if needed. */
		xnpod_renice_thread(&pTcb->threadbase, XNCORE_IRQ_PRIO);

	if (!emptyq_p(&rh->wdpending))
		goto pull_event;

	xnsynch_sleep_on(&rh->wdsynch, XN_INFINITE, XN_RELATIVE);

	if (xnthread_test_info(&pTcb->threadbase, XNBREAK)) {
		err = -EINTR;	/* Unblocked. */
		goto unlock_and_exit;
	}
	
	if (xnthread_test_info(&pTcb->threadbase, XNRMID)) {
		err = -EIDRM;	/* Watchdog deleted while pending. */
		goto unlock_and_exit;
	}

 pull_event:

	holder = getq(&rh->wdpending);

	if (holder) {
		wd = link2wind_wd(holder);
		/* We need the following to mark the watchdog as unqueued. */
		inith(holder);
		xnlock_put_irqrestore(&nklock, s);
		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs),
				  &wd->wdt, sizeof(wd->wdt));
		return 0;
	}

 unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 3
0
static int __wind_wd_wait(struct pt_regs *regs)
{
	union xnsched_policy_param param;
	xnholder_t *holder;
	wind_rholder_t *rh;
	WIND_TCB *pTcb;
	wind_wd_t *wd;
	int err = 0;
	spl_t s;

	rh = wind_get_rholder();

	xnlock_get_irqsave(&nklock, s);

	pTcb = __wind_task_current(current);

	if (xnthread_base_priority(&pTcb->threadbase) != XNSCHED_IRQ_PRIO) {
		/* Boost the waiter above all regular tasks if needed. */
		param.rt.prio = XNSCHED_IRQ_PRIO;
		xnpod_set_thread_schedparam(&pTcb->threadbase,
					    &xnsched_class_rt, &param);
	}

	if (!emptyq_p(&rh->wdpending))
		goto pull_event;

	xnsynch_sleep_on(&rh->wdsynch, XN_INFINITE, XN_RELATIVE);

	if (xnthread_test_info(&pTcb->threadbase, XNBREAK)) {
		err = -EINTR;	/* Unblocked. */
		goto unlock_and_exit;
	}

	if (xnthread_test_info(&pTcb->threadbase, XNRMID)) {
		err = -EIDRM;	/* Watchdog deleted while pending. */
		goto unlock_and_exit;
	}

      pull_event:

	holder = getq(&rh->wdpending);

	if (holder) {
		wd = link2wind_wd(holder);
		/* We need the following to mark the watchdog as unqueued. */
		inith(holder);
		xnlock_put_irqrestore(&nklock, s);
		return __xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs),
					      &wd->wdt, sizeof(wd->wdt));
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 4
0
void removemlq(struct xnsched_mlq *q, struct xnpholder *h)
{
	int idx = h->prio;
	struct xnqueue *queue = &q->queue[idx];

	q->elems--;

	removeq(queue, &h->plink);

	if (emptyq_p(queue)) {
		int hi = idx / BITS_PER_LONG;
		int lo = idx % BITS_PER_LONG;
		__clrbits(q->lomap[hi], 1UL << lo);
		if (q->lomap[hi] == 0)
			__clrbits(q->himap, 1UL << hi);
	}
}
Esempio n. 5
0
static unsigned xnpipe_poll(struct file *file, poll_table *pt)
{
	struct xnpipe_state *state = file->private_data;
	unsigned r_mask = 0, w_mask = 0;
	spl_t s;

	poll_wait(file, &state->readq, pt);

	xnlock_get_irqsave(&nklock, s);

	if (testbits(state->status, XNPIPE_KERN_CONN))
		w_mask |= (POLLOUT | POLLWRNORM);
	else
		r_mask |= POLLHUP;

	if (!emptyq_p(&state->outq))
		r_mask |= (POLLIN | POLLRDNORM);
	else
		/*
		 * Procs which have issued a timed out poll req will
		 * remain linked to the sleepers queue, and will be
		 * silently unlinked the next time the Xenomai side
		 * kicks xnpipe_wakeup_proc.
		 */
		xnpipe_enqueue_wait(state, XNPIPE_USER_WREAD);

	xnlock_put_irqrestore(&nklock, s);

	/*
	 * A descriptor is always ready for writing with the current
	 * implementation, so there is no need to have/handle the
	 * writeq queue so far.
	 */

	return r_mask | w_mask;
}
Esempio n. 6
0
static ssize_t xnpipe_write(struct file *file,
			    const char *buf, size_t count, loff_t *ppos)
{
	struct xnpipe_state *state = file->private_data;
	struct xnpipe_mh *mh;
	int pollnum, ret;
	spl_t s;

	if (count == 0)
		return 0;

	if (!access_ok(VERIFY_READ, buf, count))
		return -EFAULT;

	xnlock_get_irqsave(&nklock, s);

      retry:

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EPIPE;
	}

	pollnum = countq(&state->inq) + countq(&state->outq);
	xnlock_put_irqrestore(&nklock, s);

	mh = state->ops.alloc_ibuf(count + sizeof(*mh), state->xstate);
	if (mh == (struct xnpipe_mh *)-1)
		return -ENOMEM;

	if (mh == NULL) {
		if (file->f_flags & O_NONBLOCK)
			return -EWOULDBLOCK;

		xnlock_get_irqsave(&nklock, s);
		if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
				pollnum >
				countq(&state->inq) + countq(&state->outq))) {
			xnlock_put_irqrestore(&nklock, s);
			return -ERESTARTSYS;
		}
		goto retry;
	}

	inith(xnpipe_m_link(mh));
	xnpipe_m_size(mh) = count;
	xnpipe_m_rdoff(mh) = 0;

	if (copy_from_user(xnpipe_m_data(mh), buf, count)) {
		state->ops.free_ibuf(mh, state->xstate);
		return -EFAULT;
	}

	xnlock_get_irqsave(&nklock, s);

	appendq(&state->inq, &mh->link);

	/* Wake up a Xenomai sleeper if any. */
	if (xnsynch_wakeup_one_sleeper(&state->synchbase))
		xnpod_schedule();

	if (state->ops.input) {
		ret = state->ops.input(mh, 0, state->xstate);
		if (ret)
			count = (size_t)ret;
	}

	if (file->f_flags & O_SYNC) {
		if (!emptyq_p(&state->inq)) {
			if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
					emptyq_p(&state->inq)))
				count = -ERESTARTSYS;
		}
	}

	xnlock_put_irqrestore(&nklock, s);

	return (ssize_t)count;
}
Esempio n. 7
0
static ssize_t xnpipe_read(struct file *file,
			   char *buf, size_t count, loff_t *ppos)
{
	struct xnpipe_state *state = file->private_data;
	int sigpending, err = 0;
	size_t nbytes, inbytes;
	struct xnpipe_mh *mh;
	struct xnholder *h;
	ssize_t ret;
	spl_t s;

	if (!access_ok(VERIFY_WRITE, buf, count))
		return -EFAULT;

	xnlock_get_irqsave(&nklock, s);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EPIPE;
	}
	/*
	 * Queue probe and proc enqueuing must be seen atomically,
	 * including from the Xenomai side.
	 */
	h = getq(&state->outq);
	mh = link2mh(h);

	if (mh == NULL) {
		if (file->f_flags & O_NONBLOCK) {
			xnlock_put_irqrestore(&nklock, s);
			return -EWOULDBLOCK;
		}

		sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
					 !emptyq_p(&state->outq));
		h = getq(&state->outq);
		mh = link2mh(h);

		if (mh == NULL) {
			xnlock_put_irqrestore(&nklock, s);
			return sigpending ? -ERESTARTSYS : 0;
		}
	}

	/*
	 * We allow more data to be appended to the current message
	 * bucket while its contents is being copied to the user
	 * buffer, therefore, we need to loop until: 1) all the data
	 * has been copied, 2) we consumed the user buffer space
	 * entirely.
	 */

	inbytes = 0;

	for (;;) {
		nbytes = xnpipe_m_size(mh) - xnpipe_m_rdoff(mh);

		if (nbytes + inbytes > count)
			nbytes = count - inbytes;

		if (nbytes == 0)
			break;

		xnlock_put_irqrestore(&nklock, s);
		/* More data could be appended while doing this: */
		err =
		    __copy_to_user(buf + inbytes,
				   xnpipe_m_data(mh) + xnpipe_m_rdoff(mh),
				   nbytes);
		xnlock_get_irqsave(&nklock, s);

		if (err) {
			err = -EFAULT;
			break;
		}

		inbytes += nbytes;
		xnpipe_m_rdoff(mh) += nbytes;
	}

	state->ionrd -= inbytes;
	ret = inbytes;

	if (xnpipe_m_size(mh) > xnpipe_m_rdoff(mh))
		prependq(&state->outq, &mh->link);
	else {
		/*
		 * We always want to fire the output handler because
		 * whatever the error state is for userland (e.g
		 * -EFAULT), we did pull a message from our output
		 * queue.
		 */
		if (state->ops.output)
			state->ops.output(mh, state->xstate);
		xnlock_put_irqrestore(&nklock, s);
		state->ops.free_obuf(mh, state->xstate);
		xnlock_get_irqsave(&nklock, s);
		if (testbits(state->status, XNPIPE_USER_WSYNC)) {
			__setbits(state->status, XNPIPE_USER_WSYNC_READY);
			xnpipe_schedule_request();
		}
	}

	xnlock_put_irqrestore(&nklock, s);

	return err ? : ret;
}
Esempio n. 8
0
/**
 * Truncate a file or shared memory object to a specified length.
 *
 * When used in kernel-space, this service set to @a len the size of a shared
 * memory object opened with the shm_open() service. In user-space this service
 * falls back to Linux regular ftruncate service for file descriptors not
 * obtained with shm_open(). When this service is used to increase the size of a
 * shared memory object, the added space is zero-filled.
 *
 * Shared memory are suitable for direct memory access (allocated in physically
 * contiguous memory) if O_DIRECT was passed to shm_open.
 *
 * Shared memory objects may only be resized if they are not currently mapped.
 *
 * @param fd file descriptor;
 *
 * @param len new length of the underlying file or shared memory object.
 *
 * @retval 0 on success;
 * @retval -1 with @a errno set if:
 * - EBADF, @a fd is not a valid file descriptor;
 * - EPERM, the caller context is invalid;
 * - EINVAL, the specified length is invalid;
 * - EINVAL, the architecture can not honour the O_DIRECT flag;
 * - EINTR, this service was interrupted by a signal;
 * - EBUSY, @a fd is a shared memory object descriptor and the underlying shared
 *   memory is currently mapped;
 * - EFBIG, allocation of system memory failed.
 *
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/ftruncate.html">
 * Specification.</a>
 * 
 */
int ftruncate(int fd, off_t len)
{
	unsigned desc_flags;
	pse51_desc_t *desc;
	pse51_shm_t *shm;
	int err;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);
	shm = pse51_shm_get(&desc, fd, 1);

	if (IS_ERR(shm)) {
		err = -PTR_ERR(shm);
		xnlock_put_irqrestore(&nklock, s);
		goto error;
	}

	if (xnpod_asynch_p() || !xnpod_root_p()) {
		err = EPERM;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	if (len < 0) {
		err = EINVAL;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	desc_flags = pse51_desc_getflags(desc);
	xnlock_put_irqrestore(&nklock, s);

	if (down_interruptible(&shm->maplock)) {
		err = EINTR;
		goto err_shm_put;
	}

	/* Allocate one more page for alignment (the address returned by mmap
	   must be aligned on a page boundary). */
	if (len)
#ifdef CONFIG_XENO_OPT_PERVASIVE
		len = xnheap_rounded_size(len + PAGE_SIZE, PAGE_SIZE);
#else /* !CONFIG_XENO_OPT_PERVASIVE */
		len = xnheap_rounded_size(len + PAGE_SIZE, XNHEAP_PAGE_SIZE);
#endif /* !CONFIG_XENO_OPT_PERVASIVE */

	err = 0;
	if (emptyq_p(&shm->mappings)) {
		/* Temporary storage, in order to preserve the memory contents upon
		   resizing, if possible. */
		void *addr = NULL;
		size_t size = 0;

		if (shm->addr) {
			if (len == xnheap_extentsize(&shm->heapbase)) {
				/* Size unchanged, skip copy and reinit. */
				err = 0;
				goto err_up;
			}

			size = xnheap_max_contiguous(&shm->heapbase);
			addr = xnarch_alloc_host_mem(size);
			if (!addr) {
				err = ENOMEM;
				goto err_up;
			}

			memcpy(addr, shm->addr, size);

			xnheap_free(&shm->heapbase, shm->addr);
			xnheap_destroy_mapped(&shm->heapbase, NULL, NULL);

			shm->addr = NULL;
			shm->size = 0;
		}

		if (len) {
			int flags = XNARCH_SHARED_HEAP_FLAGS |
				((desc_flags & O_DIRECT) ? GFP_DMA : 0);

			err = -xnheap_init_mapped(&shm->heapbase, len, flags);
			if (err)
				goto err_up;

			xnheap_set_label(&shm->heapbase,
					 "posix shm: %s", shm->nodebase.name);

			shm->size = xnheap_max_contiguous(&shm->heapbase);
			shm->addr = xnheap_alloc(&shm->heapbase, shm->size);
			/* Required. */
			memset(shm->addr, '\0', shm->size);

			/* Copy the previous contents. */
			if (addr)
				memcpy(shm->addr, addr,
				       shm->size < size ? shm->size : size);

			shm->size -= PAGE_SIZE;
		}

		if (addr)
			xnarch_free_host_mem(addr, size);
	} else if (len != xnheap_extentsize(&shm->heapbase))
		err = EBUSY;

      err_up:
	up(&shm->maplock);

      err_shm_put:
	pse51_shm_put(shm, 1);

	if (!err)
		return 0;

      error:
	thread_set_errno(err == ENOMEM ? EFBIG : err);
	return -1;
}