Example #1
0
void pse51_semq_cleanup(pse51_kqueues_t *q)
{
	xnholder_t *holder;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	while ((holder = getheadq(&q->semq)) != NULL) {
		pse51_sem_t *sem = link2sem(holder);
		pse51_node_t *node;
		xnlock_put_irqrestore(&nklock, s);
#if XENO_DEBUG(POSIX)
		if (sem->is_named)
			xnprintf("Posix: unlinking semaphore \"%s\".\n",
				 sem2named_sem(sem)->nodebase.name);
		else
			xnprintf("Posix: destroying semaphore %p.\n", sem);
#endif /* XENO_DEBUG(POSIX) */
		xnlock_get_irqsave(&nklock, s);
		if (sem->is_named)
			pse51_node_remove(&node,
					  sem2named_sem(sem)->nodebase.name,
					  PSE51_NAMED_SEM_MAGIC);
		xnlock_put_irqrestore(&nklock, s);
		sem_destroy_inner(sem, q);
		xnlock_get_irqsave(&nklock, s);
	}

	xnlock_put_irqrestore(&nklock, s);
}
Example #2
0
static pse51_shm_t *pse51_shm_lookup(void *addr)
{
	xnholder_t *holder;
	pse51_shm_t *shm = NULL;
	off_t off;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);
	for (holder = getheadq(&pse51_shmq);
	     holder; holder = nextq(&pse51_shmq, holder)) {
		shm = link2shm(holder);

		if (!shm->addr)
			continue;

		off = (off_t) (addr - shm->addr);
		if (off >= 0 && off < shm->size)
			break;
	}

	if (!holder) {
		xnlock_put_irqrestore(&nklock, s);
		return NULL;
	}

	xnlock_put_irqrestore(&nklock, s);

	return shm;
}
Example #3
0
File: select.c Project: ArcEye/RTAI
/* Must be called with nklock locked irqs off */
int __xnselect_signal(struct xnselect *select_block, unsigned state)
{
	xnholder_t *holder;
	int resched;

	for(resched = 0, holder = getheadq(&select_block->bindings);
	    holder; holder = nextq(&select_block->bindings, holder)) {
		struct xnselect_binding *binding;
		struct xnselector *selector;

		binding = link2binding(holder, link);

		selector = binding->selector;
		if (state) {
			if (!__FD_ISSET__(binding->bit_index,
					&selector->fds[binding->type].pending)) {
				__FD_SET__(binding->bit_index,
					 &selector->fds[binding->type].pending);
				if (xnselect_wakeup(selector))
					resched = 1;
			}
		} else
			__FD_CLR__(binding->bit_index,
				 &selector->fds[binding->type].pending);
	}

	return resched;
}
Example #4
0
void vrtxmx_cleanup(void)
{
	xnholder_t *holder;

	while ((holder = getheadq(&vrtx_mx_q)) != NULL)
		mx_destroy_internal(link2vrtxmx(holder));

	xnmap_delete(vrtx_mx_idmap);
}
Example #5
0
/*
 * _shm_alloc allocs chunk from Fusion kheap or alloc a new heap
 */
void *_shm_alloc(unsigned long name, int size, int suprt, int in_kheap,
		 unsigned long *opaque)
{
	void *ret = NULL;
	xnholder_t *holder;
	xnshm_a_t *p;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	holder = getheadq(&xnshm_allocq);

	while (holder != NULL) {
		p = link2shma(holder);

		if (p->name == name) {
			/* assert(size==p->size); */

			p->ref++;
			ret = p->chunk;
			*opaque = (unsigned long)p->heap;
			goto unlock_and_exit;
		}

		holder = nextq(&xnshm_allocq, holder);
	}

	if (in_kheap) {
		p = kalloc_new_shm(name, size);
	} else {
		/* create new heap can suspend */
		xnlock_put_irqrestore(&nklock, s);
		p = create_new_heap(name, size, suprt);
		xnlock_get_irqsave(&nklock, s);
	}
	if (!p)
		goto unlock_and_exit;

	*opaque = (unsigned long)p->heap;
	appendq(&xnshm_allocq, &p->link);

#ifdef CONFIG_XENO_OPT_REGISTRY
	{
		p->handle = 0;
		num2nam(p->name, p->szName);
		xnregistry_enter(p->szName, p, &p->handle, &__shm_pnode);
	}
#endif /* CONFIG_XENO_OPT_REGISTRY */

	ret = p->chunk;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Example #6
0
static int _shm_free(unsigned long name)
{
	int ret = 0;
	xnholder_t *holder;
	xnshm_a_t *p;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	holder = getheadq(&xnshm_allocq);

	while (holder != NULL) {
		p = link2shma(holder);

		if (p->name == name && --p->ref == 0) {
#ifdef CONFIG_XENO_OPT_REGISTRY
			if (p->handle)
				xnregistry_remove(p->handle);
#endif /* CONFIG_XENO_OPT_REGISTRY */
			if (p->heap == &kheap)
				xnheap_free(&kheap, p->chunk);
			else {
				/* Should release lock here? 
				 * Can destroy_mapped suspend ?
				 * [YES!]
				 */
#ifdef CONFIG_XENO_OPT_PERVASIVE
				ret = xnheap_destroy_mapped(p->heap, NULL, NULL);
#else /* !CONFIG_XENO_OPT_PERVASIVE */
				ret =
				    xnheap_destroy(p->heap,
						   &__heap_flush_private, NULL);
#endif /* !CONFIG_XENO_OPT_PERVASIVE */
				if (ret)
					goto unlock_and_exit;
				xnheap_free(&kheap, p->heap);
			}
			removeq(&xnshm_allocq, &p->link);
			ret = p->size;
			xnheap_free(&kheap, p);
			break;
		}

		holder = nextq(&xnshm_allocq, holder);
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Example #7
0
/* try to unqueue message for reading */
static inline wind_msg_t *unqueue_msg(wind_msgq_t *queue)
{
	xnholder_t *holder;
	wind_msg_t *msg;

	holder = getheadq(&queue->msgq);
	if (holder == NULL)
		return NULL;

	msg = link2wind_msg(holder);
	removeq(&queue->msgq, holder);

	return msg;
}
Example #8
0
/**
 * Initialize an unnamed semaphore.
 *
 * This service initializes the semaphore @a sm, with the value @a value.
 *
 * This service fails if @a sm is already initialized or is a named semaphore.
 *
 * @param sm the semaphore to be initialized;
 *
 * @param pshared if zero, means that the new semaphore may only be used by
 * threads in the same process as the thread calling sem_init(); if non zero,
 * means that the new semaphore may be used by any thread that has access to the
 * memory where the semaphore is allocated.
 *
 * @param value the semaphore initial value.
 *
 * @retval 0 on success,
 * @retval -1 with @a errno set if:
 * - EBUSY, the semaphore @a sm was already initialized;
 * - ENOSPC, insufficient memory exists in the system heap to initialize the
 *   semaphore, increase CONFIG_XENO_OPT_SYS_HEAPSZ;
 * - EINVAL, the @a value argument exceeds @a SEM_VALUE_MAX.
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_init.html">
 * Specification.</a>
 *
 */
int sem_init(sem_t * sm, int pshared, unsigned value)
{
	struct __shadow_sem *shadow = &((union __xeno_sem *)sm)->shadow_sem;
	pse51_sem_t *sem;
	xnqueue_t *semq;
	int err;
	spl_t s;

	sem = (pse51_sem_t *) xnmalloc(sizeof(pse51_sem_t));
	if (!sem) {
		err = ENOSPC;
		goto error;
	}

	xnlock_get_irqsave(&nklock, s);

	semq = &pse51_kqueues(pshared)->semq;

	if (shadow->magic == PSE51_SEM_MAGIC
	    || shadow->magic == PSE51_NAMED_SEM_MAGIC
	    || shadow->magic == ~PSE51_NAMED_SEM_MAGIC) {
		xnholder_t *holder;

		for (holder = getheadq(semq); holder;
		     holder = nextq(semq, holder))
			if (holder == &shadow->sem->link) {
				err = EBUSY;
				goto err_lock_put;
			}
	}

	err = pse51_sem_init_inner(sem, pshared, value);
	if (err)
		goto err_lock_put;

	shadow->magic = PSE51_SEM_MAGIC;
	shadow->sem = sem;
	xnlock_put_irqrestore(&nklock, s);

	return 0;

  err_lock_put:
	xnlock_put_irqrestore(&nklock, s);
	xnfree(sem);
  error:
	thread_set_errno(err);

	return -1;
}
Example #9
0
struct xnpholder *getheadmlq(struct xnsched_mlq *q)
{
	struct xnqueue *queue;
	struct xnpholder *h;

	if (emptymlq_p(q))
		return NULL;

	queue = &q->queue[ffsmlq(q)];
	h = (struct xnpholder *)getheadq(queue);

	XENO_ASSERT(QUEUES, h,
		    xnpod_fatal
		    ("corrupted multi-level queue, qslot=%p at %s:%d", q,
		     __FILE__, __LINE__);
		);
Example #10
0
static int _shm_free(unsigned long name)
{
	xnholder_t *holder;
	xnshm_a_t *p;
	int ret;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	holder = getheadq(&xnshm_allocq);

	while (holder != NULL) {
		p = link2shma(holder);

		if (p->name == name && --p->ref == 0) {
			removeq(&xnshm_allocq, &p->link);
			if (p->handle)
				xnregistry_remove(p->handle);

			xnlock_put_irqrestore(&nklock, s);

			if (p->heap == &kheap)
				xnheap_free(&kheap, p->chunk);
			else {
#ifdef CONFIG_XENO_OPT_PERVASIVE
				xnheap_destroy_mapped(p->heap,
						      __heap_flush_shared,
						      NULL);
#else /* !CONFIG_XENO_OPT_PERVASIVE */
				xnheap_destroy(p->heap,
					       &__heap_flush_private, NULL);
				xnheap_free(&kheap, p->heap);
#endif /* !CONFIG_XENO_OPT_PERVASIVE */
			}
			ret = p->size;
			xnheap_free(&kheap, p);

			return ret;
		}

		holder = nextq(&xnshm_allocq, holder);
	}

	xnlock_put_irqrestore(&nklock, s);

	return 0;
}
Example #11
0
void wind_task_cleanup(void)
{
	xnholder_t *holder;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	while ((holder = getheadq(&wind_tasks_q)) != NULL) {
		WIND_TCB *pTcb = link2wind_task(holder);
		xnpod_abort_thread(&pTcb->threadbase);
		xnlock_sync_irq(&nklock, s);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnpod_remove_hook(XNHOOK_THREAD_DELETE, wind_task_delete_hook);
}
Example #12
0
void psostask_cleanup(void)
{
	xnholder_t *holder;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	while ((holder = getheadq(&psostaskq)) != NULL) {
		psostask_t *task = link2psostask(holder);
		xnpod_abort_thread(&task->threadbase);
		xnlock_sync_irq(&nklock, s);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnpod_remove_hook(XNHOOK_THREAD_DELETE, psostask_delete_hook);
}
Example #13
0
void __rtai_task_pkg_cleanup(void)
{
	xnholder_t *holder;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	while ((holder = getheadq(&__rtai_task_q)) != NULL) {
		RT_TASK *task = link2rtask(holder);
		xnpod_abort_thread(&task->thread_base);
		xnlock_sync_irq(&nklock, s);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnpod_remove_hook(XNHOOK_THREAD_DELETE, &__task_delete_hook);

	if (__rtai_task_sig)
		xnpod_remove_hook(XNHOOK_THREAD_SWITCH, &__task_switch_hook);
}
Example #14
0
void __rtai_shm_pkg_cleanup(void)
{
#if 0
	xnholder_t *holder;
	xnshm_a_t *p;
	char szName[6];

	// Garbage collector : to be added : lock problem 
	holder = getheadq(&xnshm_allocq);

	while (holder != NULL) {
		p = link2shma(holder);

		if (p) {
			num2nam(p->name, szName);
			printk
			    ("[RTAI -SHM] Cleanup of unfreed memory %s( %d ref.)\n",
			     szName, p->ref);
			if (p->heap == &kheap)
				xnheap_free(&kheap, p->chunk);
			else {
				/* FIXME: MUST release lock here.
				 */
#ifdef CONFIG_XENO_OPT_PERVASIVE
				xnheap_destroy_mapped(p->heap, NULL, NULL);
#else /* !CONFIG_XENO_OPT_PERVASIVE */
				xnheap_destroy(p->heap, &__heap_flush_private,
					       NULL);
#endif /* !CONFIG_XENO_OPT_PERVASIVE */
				xnheap_free(&kheap, p->heap);
			}
			removeq(&xnshm_allocq, &p->link);
			xnheap_free(&kheap, p);
		}

		holder = nextq(&xnshm_allocq, holder);
	}
#endif
}
Example #15
0
void pse51_shm_pkg_cleanup(void)
{
	xnholder_t *holder;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	while ((holder = getheadq(&pse51_shmq))) {
		pse51_shm_t *shm = link2shm(holder);
		pse51_node_t *node;

		pse51_node_remove(&node, shm->nodebase.name, PSE51_SHM_MAGIC);
		xnlock_put_irqrestore(&nklock, s);
#if XENO_DEBUG(POSIX)
		xnprintf("Posix: unlinking shared memory \"%s\".\n",
			 shm->nodebase.name);
#endif /* XENO_DEBUG(POSIX) */
		xnlock_get_irqsave(&nklock, s);
		pse51_shm_destroy(shm, 1);
	}

	xnlock_put_irqrestore(&nklock, s);
}
Example #16
0
void xntbase_adjust_time(xntbase_t *base, xnsticks_t delta)
{
	xnticks_t now;

#ifdef CONFIG_XENO_OPT_TIMING_PERIODIC
	if (xntbase_isolated_p(base)) {
		/* Only update the specified isolated base. */
		base->wallclock_offset += delta;
		__setbits(base->status, XNTBSET);
		xntslave_adjust(base2slave(base), delta);

	} else {
		xnholder_t *holder;
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */
		/* Update all non-isolated bases in the system. */
		nktbase.wallclock_offset += xntbase_ticks2ns(base, delta);
		now = xnarch_get_cpu_time() + nktbase.wallclock_offset;
		xntimer_adjust_all_aperiodic(xntbase_ticks2ns(base, delta));

#ifdef CONFIG_XENO_OPT_TIMING_PERIODIC
		for (holder = getheadq(&nktimebaseq);
		     holder != NULL; holder = nextq(&nktimebaseq, holder)) {
			xntbase_t *tbase = link2tbase(holder);
			if (tbase == &nktbase || xntbase_isolated_p(tbase))
				continue;

			tbase->wallclock_offset =
				xntbase_ns2ticks(tbase, now) -
				xntbase_get_jiffies(tbase);
			xntslave_adjust(base2slave(tbase), delta);
		}
	}
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */

	trace_mark(xn_nucleus, tbase_adjust, "base %s delta %Lu",
		   base->name, delta);
}
Example #17
0
u_long t_ident(const char *name, u_long node, u_long *tid_r)
{
	u_long err = SUCCESS;
	xnholder_t *holder;
	psostask_t *task;
	spl_t s;

	if (node > 1)
		return ERR_NODENO;

	if (!name) {
		if (xnpod_unblockable_p())
			return ERR_OBJID;
		*tid_r = (u_long)psos_current_task();
		return SUCCESS;
	}

	xnlock_get_irqsave(&nklock, s);

	for (holder = getheadq(&psostaskq);
	     holder; holder = nextq(&psostaskq, holder)) {
		task = link2psostask(holder);

		if (!strcmp(task->name, name)) {
			*tid_r = (u_long)task;
			goto unlock_and_exit;
		}
	}

	err = ERR_OBJNF;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Example #18
0
struct xnpholder *findmlqh(struct xnsched_mlq *q, int prio)
{
	struct xnqueue *queue = &q->queue[indexmlq(q, prio)];
	return (struct xnpholder *)getheadq(queue);
}
Example #19
0
static void xnpipe_wakeup_proc(void *cookie)
{
	struct xnpipe_state *state;
	struct xnholder *h, *nh;
	u_long rbits;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	nh = getheadq(&xnpipe_sleepq);
	while ((h = nh) != NULL) {
		nh = nextq(&xnpipe_sleepq, h);
		state = link2xnpipe(h, slink);
		rbits = testbits(state->status, XNPIPE_USER_ALL_READY);
		if (rbits) {
			__clrbits(state->status, rbits);
			/*
			 * We could be switched out as a result of
			 * waking up a waiter, so we need the
			 * housekeeping and release the nklock before
			 * calling wake_up_interruptible().
			 */
			if ((rbits & XNPIPE_USER_WREAD_READY) != 0) {
				if (waitqueue_active(&state->readq)) {
					xnlock_put_irqrestore(&nklock, s);
					wake_up_interruptible(&state->readq);
					xnlock_get_irqsave(&nklock, s);
				}
			}
			if ((rbits & XNPIPE_USER_WSYNC_READY) != 0) {
				if (waitqueue_active(&state->syncq)) {
					xnlock_put_irqrestore(&nklock, s);
					wake_up_interruptible(&state->syncq);
					xnlock_get_irqsave(&nklock, s);
				}
			}
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
			/*
			 * Assume a waiter might have entered/left the
			 * queue, so we need to refetch the sleep
			 * queue head to be safe.
			 */
			nh = getheadq(&xnpipe_sleepq);
#endif
		}
	}

	/*
	 * Scan the async queue, sending the proper signal to
	 * subscribers.
	 */
	nh = getheadq(&xnpipe_asyncq);
	while ((h = nh) != NULL) {
		nh = nextq(&xnpipe_asyncq, h);
		state = link2xnpipe(h, alink);

		if (testbits(state->status, XNPIPE_USER_SIGIO)) {
			__clrbits(state->status, XNPIPE_USER_SIGIO);
			xnlock_put_irqrestore(&nklock, s);
			kill_fasync(&state->asyncq, xnpipe_asyncsig, POLL_IN);
			xnlock_get_irqsave(&nklock, s);
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
			nh = getheadq(&xnpipe_asyncq);
#endif
		}
	}

	xnlock_put_irqrestore(&nklock, s);
}
Example #20
0
/**
 * Unmap pages of memory.
 *
 * This service unmaps the shared memory region [addr;addr+len) from the caller
 * address-space.
 *
 * When called from kernel-space the memory region remain accessible as long as
 * it exists, and this service only decrements a reference counter.
 *
 * When called from user-space, if the region is not a shared memory region,
 * this service falls back to the regular Linux munmap() service.
 *
 * @param addr start address of shared memory area;
 *
 * @param len length of the shared memory area.
 *
 * @retval 0 on success;
 * @retval -1 with @a errno set if:
 * - EINVAL, @a len is null, @a addr is not a multiple of the page size or the
 *   range [addr;addr+len) is not a mapped region;
 * - ENXIO, @a addr is not the address of a shared memory area;
 * - EPERM, the caller context is invalid;
 * - EINTR, this service was interrupted by a signal.
 * 
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - kernel-space cancellation cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode);
 * - user-space cancellation cleanup routine.
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/munmap.html">
 * Specification.</a>
 * 
 */
int munmap(void *addr, size_t len)
{
	pse51_shm_map_t *mapping = NULL;
	xnholder_t *holder;
	pse51_shm_t *shm;
	int err;
	spl_t s;

	if (!len) {
		err = EINVAL;
		goto error;
	}

	if (((unsigned long)addr) % PAGE_SIZE) {
		err = EINVAL;
		goto error;
	}

	xnlock_get_irqsave(&nklock, s);
	shm = pse51_shm_lookup(addr);

	if (!shm) {
		xnlock_put_irqrestore(&nklock, s);
		err = ENXIO;
		goto error;
	}

	if (xnpod_asynch_p() || !xnpod_root_p()) {
		xnlock_put_irqrestore(&nklock, s);
		err = EPERM;
		goto error;
	}

	++shm->nodebase.refcount;
	xnlock_put_irqrestore(&nklock, s);

	if (down_interruptible(&shm->maplock)) {
		err = EINTR;
		goto err_shm_put;
	}

	for (holder = getheadq(&shm->mappings);
	     holder; holder = nextq(&shm->mappings, holder)) {
		mapping = link2map(holder);

		if (mapping->addr == addr && mapping->size == len)
			break;
	}

	if (!holder) {
		xnlock_put_irqrestore(&nklock, s);
		err = EINVAL;
		goto err_up;
	}

	removeq(&shm->mappings, holder);
	up(&shm->maplock);

	xnfree(mapping);
	pse51_shm_put(shm, 2);
	return 0;

      err_up:
	up(&shm->maplock);
      err_shm_put:
	pse51_shm_put(shm, 1);
      error:
	thread_set_errno(err);
	return -1;
}