Esempio n. 1
0
/**
 * @brief Unregister a RTDM device
 *
 * Removes the device from the RTDM namespace. This routine waits until
 * all connections to @a device have been closed prior to unregistering.
 *
 * @param[in] dev Device descriptor.
 *
 * @coretags{secondary-only}
 */
void rtdm_dev_unregister(struct rtdm_device *dev)
{
	struct rtdm_driver *drv = dev->driver;

	secondary_mode_only();

	trace_cobalt_device_unregister(dev);

	/* Lock out any further connection. */
	dev->magic = ~RTDM_DEVICE_MAGIC;

	/* Then wait for the ongoing connections to finish. */
	wait_event(dev->putwq,
		   atomic_read(&dev->refcount) == 0);

	mutex_lock(&register_lock);

	if (drv->device_flags & RTDM_NAMED_DEVICE)
		xnregistry_remove(dev->named.handle);
	else
		xnid_remove(&protocol_devices, &dev->proto.id);

	device_destroy(rtdm_class, dev->rdev);

	unregister_driver(drv);

	mutex_unlock(&register_lock);

	kfree(dev->name);
}
Esempio n. 2
0
static void __heap_post_release(struct xnheap *h)
{
	RT_HEAP *heap = container_of(h, RT_HEAP, heap_base);
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	removeq(heap->rqueue, &heap->rlink);

	if (heap->handle)
		xnregistry_remove(heap->handle);

	if (xnsynch_destroy(&heap->synch_base) == XNSYNCH_RESCHED)
		/*
		 * Some task has been woken up as a result of the
		 * deletion: reschedule now.
		 */
		xnpod_schedule();

	xnlock_put_irqrestore(&nklock, s);

#ifndef __XENO_SIM__
	if (heap->cpid)
		xnfree(heap);
#endif
}
Esempio n. 3
0
ER del_flg(ID flgid)
{
	uiflag_t *flag;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_FLAGID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	flag = xnmap_fetch(ui_flag_idmap, flgid);

	if (!flag) {
		xnlock_put_irqrestore(&nklock, s);
		return E_NOEXS;
	}

	xnmap_remove(ui_flag_idmap, flag->id);
	ui_mark_deleted(flag);

	xnregistry_remove(flag->handle);
	xnfree(flag);

	if (xnsynch_destroy(&flag->synchbase) == XNSYNCH_RESCHED)
		xnpod_schedule();

	xnlock_put_irqrestore(&nklock, s);

	return E_OK;
}
Esempio n. 4
0
ER del_mbx(ID mbxid)
{
	uimbx_t *mbx;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	mbx = xnmap_fetch(ui_mbx_idmap, mbxid);

	if (!mbx) {
		xnlock_put_irqrestore(&nklock, s);
		return E_NOEXS;
	}

	xnmap_remove(ui_mbx_idmap, mbx->id);
	ui_mark_deleted(mbx);
#ifdef CONFIG_XENO_OPT_REGISTRY
	xnregistry_remove(mbx->handle);
#endif /* CONFIG_XENO_OPT_REGISTRY */
	xnfree(mbx->ring);
	xnfree(mbx);

	if (xnsynch_destroy(&mbx->synchbase) == XNSYNCH_RESCHED)
		xnpod_schedule();

	xnlock_put_irqrestore(&nklock, s);

	return E_OK;
}
Esempio n. 5
0
static void psostask_delete_hook(xnthread_t *thread)
{
	/* The scheduler is locked while hooks are running */
	psostask_t *task;
	psostm_t *tm;

	if (xnthread_get_magic(thread) != PSOS_SKIN_MAGIC)
		return;

#ifdef CONFIG_XENO_OPT_REGISTRY
	if (xnthread_handle(thread) != XN_NO_HANDLE)
		xnregistry_remove(xnthread_handle(thread));
#endif /* CONFIG_XENO_OPT_REGISTRY */

	task = thread2psostask(thread);

	removeq(&psostaskq, &task->link);

	while ((tm = (psostm_t *)getgq(&task->alarmq)) != NULL)
		tm_destroy_internal(tm);

	taskev_destroy(&task->evgroup);
	xnarch_delete_display(&task->threadbase);
	psos_mark_deleted(task);

	xnheap_schedule_free(&kheap, task, &task->link);
}
Esempio n. 6
0
int mx_destroy_internal(vrtxmx_t *mx)
{
	int s = xnsynch_destroy(&mx->synchbase);
	xnmap_remove(vrtx_mx_idmap, mx->mid);
	removeq(&vrtx_mx_q, &mx->link);
	xnregistry_remove(mx->handle);
	xnfree(mx);
	return s;
}
Esempio n. 7
0
void xnthread_cleanup_tcb(xnthread_t *thread)
{
	/* Does not wreck the TCB, only releases the held resources. */

#if CONFIG_XENO_OPT_SYS_STACKPOOLSZ > 0
	xnarch_free_stack(xnthread_archtcb(thread));
#endif
	if (thread->registry.handle != XN_NO_HANDLE)
		xnregistry_remove(thread->registry.handle);

	thread->registry.handle = XN_NO_HANDLE;
}
Esempio n. 8
0
static int _shm_free(unsigned long name)
{
	int ret = 0;
	xnholder_t *holder;
	xnshm_a_t *p;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	holder = getheadq(&xnshm_allocq);

	while (holder != NULL) {
		p = link2shma(holder);

		if (p->name == name && --p->ref == 0) {
#ifdef CONFIG_XENO_OPT_REGISTRY
			if (p->handle)
				xnregistry_remove(p->handle);
#endif /* CONFIG_XENO_OPT_REGISTRY */
			if (p->heap == &kheap)
				xnheap_free(&kheap, p->chunk);
			else {
				/* Should release lock here? 
				 * Can destroy_mapped suspend ?
				 * [YES!]
				 */
#ifdef CONFIG_XENO_OPT_PERVASIVE
				ret = xnheap_destroy_mapped(p->heap, NULL, NULL);
#else /* !CONFIG_XENO_OPT_PERVASIVE */
				ret =
				    xnheap_destroy(p->heap,
						   &__heap_flush_private, NULL);
#endif /* !CONFIG_XENO_OPT_PERVASIVE */
				if (ret)
					goto unlock_and_exit;
				xnheap_free(&kheap, p->heap);
			}
			removeq(&xnshm_allocq, &p->link);
			ret = p->size;
			xnheap_free(&kheap, p);
			break;
		}

		holder = nextq(&xnshm_allocq, holder);
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Esempio n. 9
0
static int sem_destroy_internal(vrtxsem_t *sem)
{
	int s;

	removeq(&vrtx_sem_q, &sem->link);
	xnmap_remove(vrtx_sem_idmap, sem->semid);
	s = xnsynch_destroy(&sem->synchbase);
	xnregistry_remove(sem->handle);
	vrtx_mark_deleted(sem);
	xnfree(sem);

	return s;
}
Esempio n. 10
0
static int sem_destroy_internal(vrtxsem_t *sem)
{
	int s;

	removeq(&vrtx_sem_q, &sem->link);
	xnmap_remove(vrtx_sem_idmap, sem->semid);
	s = xnsynch_destroy(&sem->synchbase);
#ifdef CONFIG_XENO_OPT_REGISTRY
	xnregistry_remove(sem->handle);
#endif /* CONFIG_XENO_OPT_REGISTRY */
	vrtx_mark_deleted(sem);
	xnfree(sem);

	return s;
}
Esempio n. 11
0
static int _shm_free(unsigned long name)
{
	xnholder_t *holder;
	xnshm_a_t *p;
	int ret;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	holder = getheadq(&xnshm_allocq);

	while (holder != NULL) {
		p = link2shma(holder);

		if (p->name == name && --p->ref == 0) {
			removeq(&xnshm_allocq, &p->link);
			if (p->handle)
				xnregistry_remove(p->handle);

			xnlock_put_irqrestore(&nklock, s);

			if (p->heap == &kheap)
				xnheap_free(&kheap, p->chunk);
			else {
#ifdef CONFIG_XENO_OPT_PERVASIVE
				xnheap_destroy_mapped(p->heap,
						      __heap_flush_shared,
						      NULL);
#else /* !CONFIG_XENO_OPT_PERVASIVE */
				xnheap_destroy(p->heap,
					       &__heap_flush_private, NULL);
				xnheap_free(&kheap, p->heap);
#endif /* !CONFIG_XENO_OPT_PERVASIVE */
			}
			ret = p->size;
			xnheap_free(&kheap, p);

			return ret;
		}

		holder = nextq(&xnshm_allocq, holder);
	}

	xnlock_put_irqrestore(&nklock, s);

	return 0;
}
Esempio n. 12
0
static void __heap_post_release(struct xnheap *h) /* nklock held, IRQs off */
{
	RT_HEAP *heap = container_of(h, RT_HEAP, heap_base);

	removeq(heap->rqueue, &heap->rlink);

#ifdef CONFIG_XENO_OPT_REGISTRY
	if (heap->handle)
		xnregistry_remove(heap->handle);
#endif /* CONFIG_XENO_OPT_REGISTRY */

	if (xnsynch_destroy(&heap->synch_base) == XNSYNCH_RESCHED)
		/*
		 * Some task has been woken up as a result of the
		 * deletion: reschedule now.
		 */
		xnpod_schedule();
}
Esempio n. 13
0
int rt_intr_delete(RT_INTR *intr)
{
	int err = 0, rc = XNSYNCH_DONE;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	intr = xeno_h2obj_validate(intr, XENO_INTR_MAGIC, RT_INTR);

	if (!intr) {
		err = xeno_handle_error(intr, XENO_INTR_MAGIC, RT_INTR);
		xnlock_put_irqrestore(&nklock, s);
		return err;
	}

	removeq(intr->rqueue, &intr->rlink);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	rc = xnsynch_destroy(&intr->synch_base);
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	if (intr->handle)
		xnregistry_remove(intr->handle);

	xeno_mark_deleted(intr);

	xnlock_put_irqrestore(&nklock, s);

	err = xnintr_destroy(&intr->intr_base);

	if (rc == XNSYNCH_RESCHED)
		/* Some task has been woken up as a result of the deletion:
		   reschedule now. */
		xnpod_schedule();

	return err;
}
Esempio n. 14
0
int rt_buffer_delete(RT_BUFFER *bf)
{
	int ret = 0, resched;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	xnarch_free_host_mem(bf->bufmem, bf->bufsz);
	removeq(bf->rqueue, &bf->rlink);
	resched = xnsynch_destroy(&bf->isynch_base) == XNSYNCH_RESCHED;
	resched += xnsynch_destroy(&bf->osynch_base) == XNSYNCH_RESCHED;

	if (bf->handle)
		xnregistry_remove(bf->handle);

	xeno_mark_deleted(bf);

	if (resched)
		/*
		 * Some task has been woken up as a result of the
		 * deletion: reschedule now.
		 */
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Esempio n. 15
0
int rt_event_delete(RT_EVENT *event)
{
	int err = 0, rc;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT);

	if (!event) {
		err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT);
		goto unlock_and_exit;
	}

	removeq(event->rqueue, &event->rlink);

	rc = xnsynch_destroy(&event->synch_base);

#ifdef CONFIG_XENO_OPT_REGISTRY
	if (event->handle)
		xnregistry_remove(event->handle);
#endif /* CONFIG_XENO_OPT_REGISTRY */

	xeno_mark_deleted(event);

	if (rc == XNSYNCH_RESCHED)
		/* Some task has been woken up as a result of the deletion:
		   reschedule now. */
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 16
0
int rt_sem_delete(RT_SEM *sem)
{
	int err = 0, rc;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	sem = xeno_h2obj_validate(sem, XENO_SEM_MAGIC, RT_SEM);

	if (!sem) {
		err = xeno_handle_error(sem, XENO_SEM_MAGIC, RT_SEM);
		goto unlock_and_exit;
	}

	removeq(sem->rqueue, &sem->rlink);

	rc = xnsynch_destroy(&sem->synch_base);

	if (sem->handle)
		xnregistry_remove(sem->handle);

	xeno_mark_deleted(sem);

	if (rc == XNSYNCH_RESCHED)
		/* Some task has been woken up as a result of the deletion:
		   reschedule now. */
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 17
0
/**
 * @brief Register a RTDM device
 *
 * Registers a device in the RTDM namespace.
 *
 * @param[in] dev Device descriptor.
 *
 * @return 0 is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if the descriptor contains invalid
 * entries. RTDM_PROFILE_INFO() must appear in the list of
 * initializers for the driver properties.
 *
 * - -EEXIST is returned if the specified device name of protocol ID is
 * already in use.
 *
 * - -ENOMEM is returned if a memory allocation failed in the process
 * of registering the device.
 *
 * @coretags{secondary-only}
 */
int rtdm_dev_register(struct rtdm_device *dev)
{
	int ret, pos, major, minor;
	struct device *kdev = NULL;
	struct rtdm_driver *drv;
	xnkey_t id;
	dev_t rdev;

	secondary_mode_only();

	if (!realtime_core_enabled())
		return -ENOSYS;

	mutex_lock(&register_lock);

	dev->name = NULL;
	drv = dev->driver;
	pos = atomic_read(&drv->refcount);
	ret = register_driver(drv);
	if (ret) {
		mutex_unlock(&register_lock);
		return ret;
	}

	dev->ops = drv->ops;
	if (drv->device_flags & RTDM_NAMED_DEVICE)
		dev->ops.socket = (typeof(dev->ops.socket))enosys;
	else
		dev->ops.open = (typeof(dev->ops.open))enosys;

	init_waitqueue_head(&dev->putwq);
	dev->ops.close = __rtdm_dev_close; /* Interpose on driver's handler. */
	atomic_set(&dev->refcount, 0);

	if (drv->device_flags & RTDM_FIXED_MINOR) {
		minor = dev->minor;
		if (minor < 0 || minor >= drv->device_count) {
			ret = -EINVAL;
			goto fail;
		}
	} else
		dev->minor = minor = pos;

	if (drv->device_flags & RTDM_NAMED_DEVICE) {
		major = drv->named.major;
		dev->name = kasformat(dev->label, minor);
		if (dev->name == NULL) {
			ret = -ENOMEM;
			goto fail;
		}

		ret = xnregistry_enter(dev->name, dev,
				       &dev->named.handle, NULL);
		if (ret)
			goto fail;

		rdev = MKDEV(major, minor);
		kdev = device_create(rtdm_class, NULL, rdev,
				     dev, dev->label, minor);
		if (IS_ERR(kdev)) {
			xnregistry_remove(dev->named.handle);
			ret = PTR_ERR(kdev);
			goto fail;
		}
	} else {
		dev->name = kstrdup(dev->label, GFP_KERNEL);
		if (dev->name == NULL) {
			ret = -ENOMEM;
			goto fail;
		}

		rdev = MKDEV(0, 0);
		kdev = device_create(rtdm_class, NULL, rdev,
				     dev, dev->name);
		if (IS_ERR(kdev)) {
			ret = PTR_ERR(kdev);
			goto fail;
		}

		id = get_proto_id(drv->protocol_family, drv->socket_type);
		ret = xnid_enter(&protocol_devices, &dev->proto.id, id);
		if (ret < 0)
			goto fail;
	}

	dev->rdev = rdev;
	dev->kdev = kdev;
	dev->magic = RTDM_DEVICE_MAGIC;

	mutex_unlock(&register_lock);

	trace_cobalt_device_register(dev);

	return 0;
fail:
	if (kdev)
		device_destroy(rtdm_class, rdev);

	unregister_driver(drv);

	mutex_unlock(&register_lock);

	if (dev->name)
		kfree(dev->name);

	return ret;
}