Esempio n. 1
0
int rt_buffer_inquire(RT_BUFFER *bf, RT_BUFFER_INFO *info)
{
	int ret = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	strcpy(info->name, bf->name);
	info->iwaiters = xnsynch_nsleepers(&bf->isynch_base);
	info->owaiters = xnsynch_nsleepers(&bf->osynch_base);
	info->totalmem = bf->bufsz;
	info->availmem = bf->bufsz - bf->fillsz;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Esempio n. 2
0
int rt_cond_signal(RT_COND *cond)
{
    int err = 0;
    spl_t s;

    xnlock_get_irqsave(&nklock, s);

    cond = xeno_h2obj_validate(cond, XENO_COND_MAGIC, RT_COND);

    if (!cond) {
        err = xeno_handle_error(cond, XENO_COND_MAGIC, RT_COND);
        goto unlock_and_exit;
    }

    if (thread2rtask(xnsynch_wakeup_one_sleeper(&cond->synch_base)) != NULL) {
        xnsynch_set_owner(&cond->synch_base, NULL);	/* No ownership to track. */
        xnpod_schedule();
    }

unlock_and_exit:

    xnlock_put_irqrestore(&nklock, s);

    return err;
}
Esempio n. 3
0
int rt_buffer_clear(RT_BUFFER *bf)
{
	int ret = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	bf->wroff = 0;
	bf->rdoff = 0;
	bf->fillsz = 0;

	if (xnsynch_flush(&bf->osynch_base, 0) == XNSYNCH_RESCHED)
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Esempio n. 4
0
int rt_event_clear(RT_EVENT *event, unsigned long mask, unsigned long *mask_r)
{
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT);

	if (!event) {
		err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT);
		goto unlock_and_exit;
	}

	if (mask_r)
		*mask_r = event->value;

	/* Clear the flags. */

	event->value &= ~mask;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 5
0
int rt_heap_inquire(RT_HEAP *heap, RT_HEAP_INFO *info)
{
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP);

	if (!heap) {
		err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP);
		goto unlock_and_exit;
	}

	strcpy(info->name, heap->name);
	info->nwaiters = xnsynch_nsleepers(&heap->synch_base);
	info->heapsize = heap->csize;
	info->usablemem = xnheap_usable_mem(&heap->heap_base);
	info->usedmem = xnheap_used_mem(&heap->heap_base);
	info->mode = heap->mode;
#ifdef __XENO_SIM__
	info->phys_addr = 0;
#else
	info->phys_addr = (heap->mode & H_SINGLE) ? virt_to_phys(heap->sba):0;
#endif

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 6
0
int rt_cond_wait_epilogue(RT_MUTEX *mutex, unsigned lockcnt)
{
    int err;
    spl_t s;

    xnlock_get_irqsave(&nklock, s);

    mutex = xeno_h2obj_validate(mutex, XENO_MUTEX_MAGIC, RT_MUTEX);

    if (!mutex) {
        err = xeno_handle_error(mutex, XENO_MUTEX_MAGIC, RT_MUTEX);
        goto unlock_and_exit;
    }

    err = rt_mutex_acquire(mutex, TM_INFINITE);

    if(!err)
        mutex->lockcnt = lockcnt; /* Adjust lockcnt */

unlock_and_exit:

    xnlock_put_irqrestore(&nklock, s);

    return err;
}
Esempio n. 7
0
int rt_sem_v(RT_SEM *sem)
{
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xeno_h2obj_validate(sem, XENO_SEM_MAGIC, RT_SEM);

	if (!sem) {
		err = xeno_handle_error(sem, XENO_SEM_MAGIC, RT_SEM);
		goto unlock_and_exit;
	}

	if (xnsynch_wakeup_one_sleeper(&sem->synch_base) != NULL)
		xnpod_schedule();
	else if (!(sem->mode & S_PULSE))
		sem->count++;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 8
0
int rt_sem_broadcast(RT_SEM *sem)
{
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xeno_h2obj_validate(sem, XENO_SEM_MAGIC, RT_SEM);

	if (!sem) {
		err = xeno_handle_error(sem, XENO_SEM_MAGIC, RT_SEM);
		goto unlock_and_exit;
	}

	if (xnsynch_flush(&sem->synch_base, 0) == XNSYNCH_RESCHED)
		xnpod_schedule();

	sem->count = 0;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 9
0
int rt_heap_free(RT_HEAP *heap, void *block)
{
	int err, nwake;
	spl_t s;

	if (block == NULL)
		return -EINVAL;

	xnlock_get_irqsave(&nklock, s);

	heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP);

	if (!heap) {
		err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP);
		goto unlock_and_exit;
	}

	if (heap->mode & H_SINGLE) {	/* No-op in single-block mode. */
		err = 0;
		goto unlock_and_exit;
	}

	err = xnheap_free(&heap->heap_base, block);

	if (!err && xnsynch_nsleepers(&heap->synch_base) > 0) {
		xnpholder_t *holder, *nholder;

		nholder = getheadpq(xnsynch_wait_queue(&heap->synch_base));
		nwake = 0;

		while ((holder = nholder) != NULL) {
			RT_TASK *sleeper =
			    thread2rtask(link2thread(holder, plink));
			void *block;

			block = xnheap_alloc(&heap->heap_base,
					     sleeper->wait_args.heap.size);
			if (block) {
				nholder =
				    xnsynch_wakeup_this_sleeper(&heap->
								synch_base,
								holder);
				sleeper->wait_args.heap.block = block;
				nwake++;
			} else
				nholder =
				    nextpq(xnsynch_wait_queue
					   (&heap->synch_base), holder);
		}

		if (nwake > 0)
			xnpod_schedule();
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 10
0
int rt_heap_delete_inner(RT_HEAP *heap, void __user *mapaddr)
{
	int err = 0;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP);

	if (!heap) {
		err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP);
		xnlock_put_irqrestore(&nklock, s);
		return err;
	}

	xeno_mark_deleted(heap);

	/* Get out of the nklocked section before releasing the heap
	   memory, since we are about to invoke Linux kernel
	   services. */

	xnlock_put_irqrestore(&nklock, s);

	/*
	 * The heap descriptor has been marked as deleted before we
	 * released the superlock thus preventing any sucessful
	 * subsequent calls of rt_heap_delete(), so now we can
	 * actually destroy it safely.
	 */

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (heap->mode & H_MAPPABLE)
		err = xnheap_destroy_mapped(&heap->heap_base,
					    __heap_post_release, mapaddr);
	else
#endif /* CONFIG_XENO_OPT_PERVASIVE */
		err = xnheap_destroy(&heap->heap_base, &__heap_flush_private, NULL);

	xnlock_get_irqsave(&nklock, s);

	if (err)
		heap->magic = XENO_HEAP_MAGIC;
	else if (!(heap->mode & H_MAPPABLE))
		__heap_post_release(&heap->heap_base);

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 11
0
static int vfile_rewind(struct xnvfile_snapshot_iterator *it)
{
    struct vfile_priv *priv = xnvfile_iterator_priv(it);
    RT_COND *cond = xnvfile_priv(it->vfile);

    cond = xeno_h2obj_validate(cond, XENO_COND_MAGIC, RT_COND);
    if (cond == NULL)
        return -EIDRM;

    priv->curr = getheadpq(xnsynch_wait_queue(&cond->synch_base));

    return xnsynch_nsleepers(&cond->synch_base);
}
Esempio n. 12
0
static int vfile_rewind(struct xnvfile_snapshot_iterator *it)
{
	struct vfile_priv *priv = xnvfile_iterator_priv(it);
	RT_EVENT *event = xnvfile_priv(it->vfile);

	event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT);
	if (event == NULL)
		return -EIDRM;

	priv->curr = getheadpq(xnsynch_wait_queue(&event->synch_base));
	priv->value = event->value;

	return xnsynch_nsleepers(&event->synch_base);
}
Esempio n. 13
0
int rt_event_signal(RT_EVENT *event, unsigned long mask)
{
	xnpholder_t *holder, *nholder;
	int err = 0, resched = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT);

	if (!event) {
		err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT);
		goto unlock_and_exit;
	}

	/* Post the flags. */

	event->value |= mask;

	/* And wakeup any sleeper having its request fulfilled. */

	nholder = getheadpq(xnsynch_wait_queue(&event->synch_base));

	while ((holder = nholder) != NULL) {
		RT_TASK *sleeper = thread2rtask(link2thread(holder, plink));
		int mode = sleeper->wait_args.event.mode;
		unsigned long bits = sleeper->wait_args.event.mask;

		if (((mode & EV_ANY) && (bits & event->value) != 0) ||
		    (!(mode & EV_ANY) && ((bits & event->value) == bits))) {
			sleeper->wait_args.event.mask = (bits & event->value);
			nholder =
			    xnsynch_wakeup_this_sleeper(&event->synch_base,
							holder);
			resched = 1;
		} else
			nholder =
			    nextpq(xnsynch_wait_queue(&event->synch_base),
				   holder);
	}

	if (resched)
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 14
0
int rt_sem_p_inner(RT_SEM *sem, xntmode_t timeout_mode, RTIME timeout)
{
	xnflags_t info;
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xeno_h2obj_validate(sem, XENO_SEM_MAGIC, RT_SEM);

	if (!sem) {
		err = xeno_handle_error(sem, XENO_SEM_MAGIC, RT_SEM);
		goto unlock_and_exit;
	}

	if (timeout == TM_NONBLOCK) {
		if (sem->count > 0)
			sem->count--;
		else
			err = -EWOULDBLOCK;

		goto unlock_and_exit;
	}

	if (xnpod_unblockable_p()) {
		err = -EPERM;
		goto unlock_and_exit;
	}

	if (sem->count > 0)
		--sem->count;
	else {
		info = xnsynch_sleep_on(&sem->synch_base,
					timeout, timeout_mode);
		if (info & XNRMID)
			err = -EIDRM;	/* Semaphore deleted while pending. */
		else if (info & XNTIMEO)
			err = -ETIMEDOUT;	/* Timeout. */
		else if (info & XNBREAK)
			err = -EINTR;	/* Unblocked. */
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 15
0
static int vfile_rewind(struct xnvfile_snapshot_iterator *it)
{
	struct vfile_priv *priv = xnvfile_iterator_priv(it);
	RT_HEAP *heap = xnvfile_priv(it->vfile);

	heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP);
	if (heap == NULL)
		return -EIDRM;

	priv->curr = getheadpq(xnsynch_wait_queue(&heap->synch_base));
	priv->mode = heap->mode;
	priv->usable_mem = xnheap_usable_mem(&heap->heap_base);
	priv->used_mem = xnheap_used_mem(&heap->heap_base);
	priv->nrmaps = heap->heap_base.archdep.numaps;

	return xnsynch_nsleepers(&heap->synch_base);
}
Esempio n. 16
0
int rt_heap_delete_inner(RT_HEAP *heap, void __user *mapaddr)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP);

	if (!heap) {
		err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP);
		xnlock_put_irqrestore(&nklock, s);
		return err;
	}

	xeno_mark_deleted(heap);

	/* Get out of the nklocked section before releasing the heap
	   memory, since we are about to invoke Linux kernel
	   services. */

	xnlock_put_irqrestore(&nklock, s);

	/*
	 * The heap descriptor has been marked as deleted before we
	 * released the superlock thus preventing any subsequent call
	 * to rt_heap_delete() to succeed, so now we can actually
	 * destroy it safely.
	 */

#ifndef __XENO_SIM__
	if (heap->mode & H_MAPPABLE)
		xnheap_destroy_mapped(&heap->heap_base,
				      __heap_post_release, mapaddr);
	else
#endif
	{
		xnheap_destroy(&heap->heap_base, &__heap_flush_private, NULL);
		__heap_post_release(&heap->heap_base);
	}

	return 0;
}
Esempio n. 17
0
static int vfile_rewind(struct xnvfile_snapshot_iterator *it)
{
	struct vfile_priv *priv = xnvfile_iterator_priv(it);
	RT_BUFFER *bf = xnvfile_priv(it->vfile);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL)
		return -EIDRM;

	/* Start collecting records from the input wait side. */
	priv->curr = getheadpq(xnsynch_wait_queue(&bf->isynch_base));
	priv->mode = bf->mode;
	priv->bufsz = bf->bufsz;
	priv->fillsz = bf->fillsz;
	priv->input = 1;

	return xnsynch_nsleepers(&bf->isynch_base) +
		xnsynch_nsleepers(&bf->osynch_base);
}
Esempio n. 18
0
int rt_intr_delete(RT_INTR *intr)
{
	int err = 0, rc = XNSYNCH_DONE;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	intr = xeno_h2obj_validate(intr, XENO_INTR_MAGIC, RT_INTR);

	if (!intr) {
		err = xeno_handle_error(intr, XENO_INTR_MAGIC, RT_INTR);
		xnlock_put_irqrestore(&nklock, s);
		return err;
	}

	removeq(intr->rqueue, &intr->rlink);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	rc = xnsynch_destroy(&intr->synch_base);
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	if (intr->handle)
		xnregistry_remove(intr->handle);

	xeno_mark_deleted(intr);

	xnlock_put_irqrestore(&nklock, s);

	err = xnintr_destroy(&intr->intr_base);

	if (rc == XNSYNCH_RESCHED)
		/* Some task has been woken up as a result of the deletion:
		   reschedule now. */
		xnpod_schedule();

	return err;
}
Esempio n. 19
0
int rt_intr_disable(RT_INTR *intr)
{
	int err;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	intr = xeno_h2obj_validate(intr, XENO_INTR_MAGIC, RT_INTR);

	if (!intr) {
		err = xeno_handle_error(intr, XENO_INTR_MAGIC, RT_INTR);
		goto unlock_and_exit;
	}

	err = xnintr_disable(&intr->intr_base);

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 20
0
int rt_event_delete(RT_EVENT *event)
{
	int err = 0, rc;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT);

	if (!event) {
		err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT);
		goto unlock_and_exit;
	}

	removeq(event->rqueue, &event->rlink);

	rc = xnsynch_destroy(&event->synch_base);

#ifdef CONFIG_XENO_OPT_REGISTRY
	if (event->handle)
		xnregistry_remove(event->handle);
#endif /* CONFIG_XENO_OPT_REGISTRY */

	xeno_mark_deleted(event);

	if (rc == XNSYNCH_RESCHED)
		/* Some task has been woken up as a result of the deletion:
		   reschedule now. */
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 21
0
int rt_buffer_delete(RT_BUFFER *bf)
{
	int ret = 0, resched;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	xnarch_free_host_mem(bf->bufmem, bf->bufsz);
	removeq(bf->rqueue, &bf->rlink);
	resched = xnsynch_destroy(&bf->isynch_base) == XNSYNCH_RESCHED;
	resched += xnsynch_destroy(&bf->osynch_base) == XNSYNCH_RESCHED;

	if (bf->handle)
		xnregistry_remove(bf->handle);

	xeno_mark_deleted(bf);

	if (resched)
		/*
		 * Some task has been woken up as a result of the
		 * deletion: reschedule now.
		 */
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Esempio n. 22
0
int rt_cond_inquire(RT_COND *cond, RT_COND_INFO *info)
{
    int err = 0;
    spl_t s;

    xnlock_get_irqsave(&nklock, s);

    cond = xeno_h2obj_validate(cond, XENO_COND_MAGIC, RT_COND);

    if (!cond) {
        err = xeno_handle_error(cond, XENO_COND_MAGIC, RT_COND);
        goto unlock_and_exit;
    }

    strcpy(info->name, cond->name);
    info->nwaiters = xnsynch_nsleepers(&cond->synch_base);

unlock_and_exit:

    xnlock_put_irqrestore(&nklock, s);

    return err;
}
Esempio n. 23
0
int rt_cond_broadcast(RT_COND *cond)
{
    int err = 0;
    spl_t s;

    xnlock_get_irqsave(&nklock, s);

    cond = xeno_h2obj_validate(cond, XENO_COND_MAGIC, RT_COND);

    if (!cond) {
        err = xeno_handle_error(cond, XENO_COND_MAGIC, RT_COND);
        goto unlock_and_exit;
    }

    if (xnsynch_flush(&cond->synch_base, 0) == XNSYNCH_RESCHED)
        xnpod_schedule();

unlock_and_exit:

    xnlock_put_irqrestore(&nklock, s);

    return err;
}
Esempio n. 24
0
int rt_sem_delete(RT_SEM *sem)
{
	int err = 0, rc;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	sem = xeno_h2obj_validate(sem, XENO_SEM_MAGIC, RT_SEM);

	if (!sem) {
		err = xeno_handle_error(sem, XENO_SEM_MAGIC, RT_SEM);
		goto unlock_and_exit;
	}

	removeq(sem->rqueue, &sem->rlink);

	rc = xnsynch_destroy(&sem->synch_base);

	if (sem->handle)
		xnregistry_remove(sem->handle);

	xeno_mark_deleted(sem);

	if (rc == XNSYNCH_RESCHED)
		/* Some task has been woken up as a result of the deletion:
		   reschedule now. */
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 25
0
int rt_intr_inquire(RT_INTR *intr, RT_INTR_INFO *info)
{
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	intr = xeno_h2obj_validate(intr, XENO_INTR_MAGIC, RT_INTR);

	if (!intr) {
		err = xeno_handle_error(intr, XENO_INTR_MAGIC, RT_INTR);
		goto unlock_and_exit;
	}

	strcpy(info->name, intr->name);
	info->hits = __intr_get_hits(intr);
	info->irq = intr->intr_base.irq;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 26
0
int rt_event_inquire(RT_EVENT *event, RT_EVENT_INFO *info)
{
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT);

	if (!event) {
		err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT);
		goto unlock_and_exit;
	}

	strcpy(info->name, event->name);
	info->value = event->value;
	info->nwaiters = xnsynch_nsleepers(&event->synch_base);

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 27
0
int rt_sem_inquire(RT_SEM *sem, RT_SEM_INFO *info)
{
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xeno_h2obj_validate(sem, XENO_SEM_MAGIC, RT_SEM);

	if (!sem) {
		err = xeno_handle_error(sem, XENO_SEM_MAGIC, RT_SEM);
		goto unlock_and_exit;
	}

	strcpy(info->name, sem->name);
	info->count = sem->count;
	info->nwaiters = xnsynch_nsleepers(&sem->synch_base);

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 28
0
int rt_event_wait_inner(RT_EVENT *event,
			unsigned long mask,
			unsigned long *mask_r,
			int mode, xntmode_t timeout_mode, RTIME timeout)
{
	RT_TASK *task;
	xnflags_t info;
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT);

	if (!event) {
		err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT);
		goto unlock_and_exit;
	}

	if (!mask) {
		*mask_r = event->value;
		goto unlock_and_exit;
	}

	if (timeout == TM_NONBLOCK) {
		unsigned long bits = (event->value & mask);
		*mask_r = bits;

		if (mode & EV_ANY) {
			if (!bits)
				err = -EWOULDBLOCK;
		} else if (bits != mask)
			err = -EWOULDBLOCK;

		goto unlock_and_exit;
	}

	if (((mode & EV_ANY) && (mask & event->value) != 0) ||
	    (!(mode & EV_ANY) && ((mask & event->value) == mask))) {
		*mask_r = (event->value & mask);
		goto unlock_and_exit;
	}

	if (xnpod_unblockable_p()) {
		err = -EPERM;
		goto unlock_and_exit;
	}

	task = xeno_current_task();
	task->wait_args.event.mode = mode;
	task->wait_args.event.mask = mask;
	info = xnsynch_sleep_on(&event->synch_base,
				timeout, timeout_mode);
	if (info & XNRMID)
		err = -EIDRM;	/* Event group deleted while pending. */
	else if (info & XNTIMEO)
		err = -ETIMEDOUT;	/* Timeout. */
	else if (info & XNBREAK)
		err = -EINTR;	/* Unblocked. */
	/*
	 * The returned mask is only significant if the operation has
	 * succeeded, but do always write it back anyway.
	 */
	*mask_r = task->wait_args.event.mask;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 29
0
int rt_cond_wait_prologue(RT_COND *cond, RT_MUTEX *mutex, unsigned *plockcnt,
                          xntmode_t timeout_mode, RTIME timeout)
{
    xnthread_t *thread;
    xnflags_t info;
    spl_t s;
    int err;

    if (timeout == TM_NONBLOCK)
        return -EWOULDBLOCK;

    if (xnpod_unblockable_p())
        return -EPERM;

    xnlock_get_irqsave(&nklock, s);

    cond = xeno_h2obj_validate(cond, XENO_COND_MAGIC, RT_COND);

    if (!cond) {
        err = xeno_handle_error(cond, XENO_COND_MAGIC, RT_COND);
        goto unlock_and_exit;
    }

    mutex = xeno_h2obj_validate(mutex, XENO_MUTEX_MAGIC, RT_MUTEX);

    if (!mutex) {
        err = xeno_handle_error(mutex, XENO_MUTEX_MAGIC, RT_MUTEX);
        goto unlock_and_exit;
    }

    thread = xnpod_current_thread();

    err = xnsynch_owner_check(&mutex->synch_base, thread);

    if (err)
        goto unlock_and_exit;

    /*
     * We can't use rt_mutex_release since that might reschedule
     * before enter xnsynch_sleep_on.
     */
    *plockcnt = mutex->lockcnt; /* Leave even if mutex is nested */

    mutex->lockcnt = 0;

    xnsynch_release(&mutex->synch_base);
    /* Scheduling deferred */

    info = xnsynch_sleep_on(&cond->synch_base,
                            timeout, timeout_mode);
    if (info & XNRMID)
        err = -EIDRM;	/* Condvar deleted while pending. */
    else if (info & XNTIMEO)
        err = -ETIMEDOUT;	/* Timeout. */
    else if (info & XNBREAK) {
        err = -EINTR;	/* Unblocked. */
    }

unlock_and_exit:

    xnlock_put_irqrestore(&nklock, s);

    return err;
}
Esempio n. 30
0
ssize_t rt_buffer_read_inner(RT_BUFFER *bf,
			     struct xnbufd *bufd,
			     xntmode_t timeout_mode, RTIME timeout)
{
	xnthread_t *thread, *waiter;
	size_t len, rbytes, n;
	xnflags_t info;
	u_long rdtoken;
	off_t rdoff;
	ssize_t ret;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	/*
	 * We may only return complete messages to readers, so there
	 * is no point in waiting for messages which are larger than
	 * what the buffer can hold.
	 */
	len = bufd->b_len;
	if (len > bf->bufsz) {
		ret = -EINVAL;
		goto unlock_and_exit;
	}

	if (len == 0) {
		ret = 0;
		goto unlock_and_exit;
	}

	if (timeout_mode == XN_RELATIVE &&
	    timeout != TM_NONBLOCK && timeout != TM_INFINITE) {
		/*
		 * We may sleep several times before receiving the
		 * data, so let's always use an absolute time spec.
		 */
		timeout_mode = XN_REALTIME;
		timeout += xntbase_get_time(__native_tbase);
	}

redo:
	for (;;) {
		/*
		 * We should be able to read a complete message of the
		 * requested length, or block.
		 */
		if (bf->fillsz < len)
			goto wait;

		/*
		 * Draw the next read token so that we can later
		 * detect preemption.
		 */
		rdtoken = ++bf->rdtoken;

		/* Read from the buffer in a circular way. */
		rdoff = bf->rdoff;
		rbytes = len;

		do {
			if (rdoff + rbytes > bf->bufsz)
				n = bf->bufsz - rdoff;
			else
				n = rbytes;
			/*
			 * Release the nklock while retrieving the
			 * data to keep latency low.
			 */

			xnlock_put_irqrestore(&nklock, s);

			ret = xnbufd_copy_from_kmem(bufd, bf->bufmem + rdoff, n);
			if (ret < 0)
				return ret;

			xnlock_get_irqsave(&nklock, s);
			/*
			 * In case we were preempted while retrieving
			 * the message, we have to re-read the whole
			 * thing.
			 */
			if (bf->rdtoken != rdtoken) {
				xnbufd_reset(bufd);
				goto redo;
			}

			rdoff = (rdoff + n) % bf->bufsz;
			rbytes -= n;
		} while (rbytes > 0);

		bf->fillsz -= len;
		bf->rdoff = rdoff;
		ret = (ssize_t)len;

		/*
		 * Wake up all threads pending on the output wait
		 * queue, if we freed enough room for the leading one
		 * to post its message.
		 */
		waiter = xnsynch_peek_pendq(&bf->osynch_base);
		if (waiter && waiter->wait_u.size + bf->fillsz <= bf->bufsz) {
			if (xnsynch_flush(&bf->osynch_base, 0) == XNSYNCH_RESCHED)
				xnpod_schedule();
		}

		/*
		 * We cannot fail anymore once some data has been
		 * copied via the buffer descriptor, so no need to
		 * check for any reason to invalidate the latter.
		 */
		goto unlock_and_exit;

	wait:
		if (timeout_mode == XN_RELATIVE && timeout == TM_NONBLOCK) {
			ret = -EWOULDBLOCK;
			break;
		}

		if (xnpod_unblockable_p()) {
			ret = -EPERM;
			break;
		}

		/*
		 * Check whether writers are already waiting for
		 * sending data, while we are about to wait for
		 * receiving some. In such a case, we have a
		 * pathological use of the buffer. We must allow for a
		 * short read to prevent a deadlock.
		 */
		if (bf->fillsz > 0 &&
		    xnsynch_nsleepers(&bf->osynch_base) > 0) {
			len = bf->fillsz;
			goto redo;
		}

		thread = xnpod_current_thread();
		thread->wait_u.bufd =  bufd;
		info = xnsynch_sleep_on(&bf->isynch_base,
					timeout, timeout_mode);
		if (info & XNRMID) {
			ret = -EIDRM;	/* Buffer deleted while pending. */
			break;
		} else if (info & XNTIMEO) {
			ret = -ETIMEDOUT;	/* Timeout. */
			break;
		} if (info & XNBREAK) {
			ret = -EINTR;	/* Unblocked. */
			break;
		}
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}