Example #1
0
ER cre_flg(ID flgid, T_CFLG *pk_cflg)
{
	uiflag_t *flag;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_MBXID)
		return E_ID;

	flag = xnmalloc(sizeof(*flag));

	if (!flag)
		return E_NOMEM;

	flgid = xnmap_enter(ui_flag_idmap, flgid, flag);

	if (flgid <= 0) {
		xnfree(flag);
		return E_OBJ;
	}

	xnsynch_init(&flag->synchbase, XNSYNCH_FIFO, NULL);
	flag->id = flgid;
	flag->exinf = pk_cflg->exinf;
	flag->flgatr = pk_cflg->flgatr;
	flag->flgvalue = pk_cflg->iflgptn;
	sprintf(flag->name, "flg%d", flgid);
	xnregistry_enter(flag->name, flag, &flag->handle, &__flag_pnode.node);
	xnarch_memory_barrier();
	flag->magic = uITRON_FLAG_MAGIC;

	return E_OK;
}
Example #2
0
MSG_Q_ID msgQCreate(int nb_msgs, int length, int flags)
{
	static unsigned long msgq_ids;
	wind_msgq_t *queue;
	xnflags_t bflags = 0;
	int i, msg_size;
	char *msgs_mem;
	spl_t s;

	check_NOT_ISR_CALLABLE(return 0);

	error_check(nb_msgs <= 0, S_msgQLib_INVALID_QUEUE_TYPE, return 0);

	error_check(flags & ~WIND_MSG_Q_OPTION_MASK,
		    S_msgQLib_INVALID_QUEUE_TYPE, return 0);

	error_check(length < 0, S_msgQLib_INVALID_MSG_LENGTH, return 0);

	msgs_mem = xnmalloc(sizeof(wind_msgq_t) +
			    nb_msgs * (sizeof(wind_msg_t) + length));

	error_check(msgs_mem == NULL, S_memLib_NOT_ENOUGH_MEMORY, return 0);

	queue = (wind_msgq_t *)msgs_mem;
	msgs_mem += sizeof(wind_msgq_t);

	queue->magic = WIND_MSGQ_MAGIC;
	queue->msg_length = length;
	queue->free_list = NULL;
	initq(&queue->msgq);
	inith(&queue->rlink);
	queue->rqueue = &wind_get_rholder()->msgQq;

	/* init of the synch object : */
	if (flags & MSG_Q_PRIORITY)
		bflags |= XNSYNCH_PRIO;

	xnsynch_init(&queue->synchbase, bflags, NULL);

	msg_size = sizeof(wind_msg_t) + length;

	for (i = 0; i < nb_msgs; ++i, msgs_mem += msg_size)
		free_msg(queue, (wind_msg_t *)msgs_mem);

	xnlock_get_irqsave(&nklock, s);
	appendq(queue->rqueue, &queue->rlink);
	xnlock_put_irqrestore(&nklock, s);

	sprintf(queue->name, "mq%lu", msgq_ids++);

	if (xnregistry_enter(queue->name, queue,
			     &queue->handle, &msgq_pnode)) {
		wind_errnoset(S_objLib_OBJ_ID_ERROR);
		msgQDelete((MSG_Q_ID)queue);
		return 0;
	}

	return (MSG_Q_ID)queue;
}
Example #3
0
/*
 * _shm_alloc allocs chunk from Fusion kheap or alloc a new heap
 */
void *_shm_alloc(unsigned long name, int size, int suprt, int in_kheap,
		 unsigned long *opaque)
{
	void *ret = NULL;
	xnholder_t *holder;
	xnshm_a_t *p;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	holder = getheadq(&xnshm_allocq);

	while (holder != NULL) {
		p = link2shma(holder);

		if (p->name == name) {
			/* assert(size==p->size); */

			p->ref++;
			ret = p->chunk;
			*opaque = (unsigned long)p->heap;
			goto unlock_and_exit;
		}

		holder = nextq(&xnshm_allocq, holder);
	}

	if (in_kheap) {
		p = kalloc_new_shm(name, size);
	} else {
		/* create new heap can suspend */
		xnlock_put_irqrestore(&nklock, s);
		p = create_new_heap(name, size, suprt);
		xnlock_get_irqsave(&nklock, s);
	}
	if (!p)
		goto unlock_and_exit;

	*opaque = (unsigned long)p->heap;
	appendq(&xnshm_allocq, &p->link);

#ifdef CONFIG_XENO_OPT_REGISTRY
	{
		p->handle = 0;
		num2nam(p->name, p->szName);
		xnregistry_enter(p->szName, p, &p->handle, &__shm_pnode);
	}
#endif /* CONFIG_XENO_OPT_REGISTRY */

	ret = p->chunk;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Example #4
0
ER cre_mbx(ID mbxid, T_CMBX *pk_cmbx)
{
	uimbx_t *mbx;
	T_MSG **ring;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID)
		return E_ID;

	if (pk_cmbx->bufcnt <= 0)
		return E_PAR;

	if (pk_cmbx->mbxatr & TA_MPRI)
		return E_RSATR;

	mbx = xnmalloc(sizeof(*mbx));

	if (!mbx)
		return E_NOMEM;

	ring = xnmalloc(sizeof(T_MSG *) * pk_cmbx->bufcnt);

	if (!ring) {
		xnfree(mbx);
		return E_NOMEM;
	}

	mbxid = xnmap_enter(ui_mbx_idmap, mbxid, mbx);

	if (mbxid <= 0) {
		xnfree(mbx);
		return E_OBJ;
	}

	xnsynch_init(&mbx->synchbase,
		     (pk_cmbx->mbxatr & TA_TPRI) ? XNSYNCH_PRIO : XNSYNCH_FIFO);

	mbx->id = mbxid;
	mbx->exinf = pk_cmbx->exinf;
	mbx->mbxatr = pk_cmbx->mbxatr;
	mbx->bufcnt = pk_cmbx->bufcnt;
	mbx->rdptr = 0;
	mbx->wrptr = 0;
	mbx->mcount = 0;
	mbx->ring = ring;
#ifdef CONFIG_XENO_OPT_REGISTRY
	sprintf(mbx->name, "mbx%d", mbxid);
	xnregistry_enter(mbx->name, mbx, &mbx->handle, &__mbx_pnode);
#endif /* CONFIG_XENO_OPT_REGISTRY */
	xnarch_memory_barrier();
	mbx->magic = uITRON_MBX_MAGIC;

	return E_OK;
}
Example #5
0
int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode)
{
	int ret = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if (bufsz == 0)
		return -EINVAL;

	bf->bufmem = xnarch_alloc_host_mem(bufsz);
	if (bf->bufmem == NULL)
		return -ENOMEM;

	xnsynch_init(&bf->isynch_base, mode & B_PRIO, NULL);
	xnsynch_init(&bf->osynch_base, mode & B_PRIO, NULL);

	bf->handle = 0;	/* i.e. (still) unregistered buffer. */
	xnobject_copy_name(bf->name, name);
	inith(&bf->rlink);
	bf->rqueue = &xeno_get_rholder()->bufferq;
	xnlock_get_irqsave(&nklock, s);
	appendq(bf->rqueue, &bf->rlink);
	xnlock_put_irqrestore(&nklock, s);

	bf->mode = mode;
	bf->bufsz = bufsz;
	bf->rdoff = 0;
	bf->wroff = 0;
	bf->fillsz = 0;
	bf->rdtoken = 0;
	bf->wrtoken = 0;

#ifndef __XENO_SIM__
	bf->cpid = 0;
#endif
	bf->magic = XENO_BUFFER_MAGIC;

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		ret = xnregistry_enter(bf->name, bf, &bf->handle,
				       &__buffer_pnode.node);

		if (ret)
			rt_buffer_delete(bf);
	}

	return ret;
}
Example #6
0
int rt_event_create(RT_EVENT *event,
		    const char *name, unsigned long ivalue, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnsynch_init(&event->synch_base, mode & EV_PRIO);
	event->value = ivalue;
	event->handle = 0;	/* i.e. (still) unregistered event. */
	event->magic = XENO_EVENT_MAGIC;
	xnobject_copy_name(event->name, name);
	inith(&event->rlink);
	event->rqueue = &xeno_get_rholder()->eventq;
	xnlock_get_irqsave(&nklock, s);
	appendq(event->rqueue, &event->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	event->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

#ifdef CONFIG_XENO_OPT_REGISTRY
	/* <!> Since xnregister_enter() may reschedule, only register
	   complete objects, so that the registry cannot return handles to
	   half-baked objects... */

	if (name) {
		xnpnode_t *pnode = &__event_pnode;

		if (!*name) {
			/* Since this is an anonymous object (empty name on entry)
			   from user-space, it gets registered under an unique
			   internal name but is not exported through /proc. */
			xnobject_create_name(event->name, sizeof(event->name),
					     (void *)event);
			pnode = NULL;
		}

		err =
		    xnregistry_enter(event->name, event, &event->handle, pnode);

		if (err)
			rt_event_delete(event);
	}
#endif /* CONFIG_XENO_OPT_REGISTRY */

	return err;
}
Example #7
0
int sc_screate(unsigned initval, int opt, int *errp)
{
	int bflags = 0, semid;
	vrtxsem_t *sem;
	spl_t s;

	if (opt & ~1) {
		*errp = ER_IIP;
		return -1;
	}

	sem = (vrtxsem_t *)xnmalloc(sizeof(*sem));

	if (!sem) {
		*errp = ER_NOCB;
		return -1;
	}

	semid = xnmap_enter(vrtx_sem_idmap, -1, sem);

	if (semid < 0) {
		*errp = ER_NOCB;
		xnfree(sem);
		return -1;
	}

	if (opt == 0)
		bflags = XNSYNCH_PRIO;
	else
		bflags = XNSYNCH_FIFO;

	xnsynch_init(&sem->synchbase, bflags | XNSYNCH_DREORD);
	inith(&sem->link);
	sem->semid = semid;
	sem->magic = VRTX_SEM_MAGIC;
	sem->count = initval;

	xnlock_get_irqsave(&nklock, s);
	appendq(&vrtx_sem_q, &sem->link);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_REGISTRY
	sprintf(sem->name, "sem%d", semid);
	xnregistry_enter(sem->name, sem, &sem->handle, &__sem_pnode);
#endif /* CONFIG_XENO_OPT_REGISTRY */

	*errp = RET_OK;

	return semid;
}
Example #8
0
int sc_mcreate(unsigned int opt, int *errp)
{
	int bflags, mid;
	vrtxmx_t *mx;
	spl_t s;

	switch (opt) {
	case 0:
		bflags = XNSYNCH_PRIO;
		break;
	case 1:
		bflags = XNSYNCH_FIFO;
		break;
	case 2:
		bflags = XNSYNCH_PRIO | XNSYNCH_PIP;
		break;
	default:
		*errp = ER_IIP;
		return 0;
	}

	mx = xnmalloc(sizeof(*mx));
	if (mx == NULL) {
		*errp = ER_NOCB;
		return -1;
	}

	mid = xnmap_enter(vrtx_mx_idmap, -1, mx);
	if (mid < 0) {
		xnfree(mx);
		return -1;
	}

	inith(&mx->link);
	mx->mid = mid;
	xnsynch_init(&mx->synchbase, bflags | XNSYNCH_DREORD | XNSYNCH_OWNER,
		     NULL);

	xnlock_get_irqsave(&nklock, s);
	appendq(&vrtx_mx_q, &mx->link);
	xnlock_put_irqrestore(&nklock, s);

	sprintf(mx->name, "mx%d", mid);
	xnregistry_enter(mx->name, mx, &mx->handle, &__mutex_pnode.node);

	*errp = RET_OK;

	return mid;
}
Example #9
0
int rt_intr_create(RT_INTR *intr,
		   const char *name,
		   unsigned irq, rt_isr_t isr, rt_iack_t iack, int mode)
{
	int err;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if (name)
		xnobject_copy_name(intr->name, name);
	else
		/* Kernel-side "anonymous" objects (name == NULL) get unique names.
		 * Nevertheless, they will not be exported via the registry. */
		xnobject_create_name(intr->name, sizeof(intr->name), isr);

	xnintr_init(&intr->intr_base, intr->name, irq, isr, iack, mode);
#ifdef CONFIG_XENO_OPT_PERVASIVE
	xnsynch_init(&intr->synch_base, XNSYNCH_PRIO, NULL);
	intr->pending = 0;
	intr->cpid = 0;
	intr->mode = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */
	intr->magic = XENO_INTR_MAGIC;
	intr->handle = 0;	/* i.e. (still) unregistered interrupt. */
	inith(&intr->rlink);
	intr->rqueue = &xeno_get_rholder()->intrq;
	xnlock_get_irqsave(&nklock, s);
	appendq(intr->rqueue, &intr->rlink);
	xnlock_put_irqrestore(&nklock, s);

	err = xnintr_attach(&intr->intr_base, intr);

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (!err && name)
		err = xnregistry_enter(intr->name, intr, &intr->handle,
				       &__intr_pnode);
	if (err)
		rt_intr_delete(intr);

	return err;
}
Example #10
0
int rt_sem_create(RT_SEM *sem, const char *name, unsigned long icount, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if ((mode & S_PULSE) && icount > 0)
		return -EINVAL;

	xnsynch_init(&sem->synch_base, mode & S_PRIO, NULL);
	sem->count = icount;
	sem->mode = mode;
	sem->handle = 0;	/* i.e. (still) unregistered semaphore. */
	sem->magic = XENO_SEM_MAGIC;
	xnobject_copy_name(sem->name, name);
	inith(&sem->rlink);
	sem->rqueue = &xeno_get_rholder()->semq;
	xnlock_get_irqsave(&nklock, s);
	appendq(sem->rqueue, &sem->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	sem->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(sem->name, sem, &sem->handle,
				       &__sem_pnode);
		if (err)
			rt_sem_delete(sem);
	}

	return err;
}
Example #11
0
int rt_event_create(RT_EVENT *event,
		    const char *name, unsigned long ivalue, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnsynch_init(&event->synch_base, mode & EV_PRIO, NULL);
	event->value = ivalue;
	event->handle = 0;	/* i.e. (still) unregistered event. */
	event->magic = XENO_EVENT_MAGIC;
	xnobject_copy_name(event->name, name);
	inith(&event->rlink);
	event->rqueue = &xeno_get_rholder()->eventq;
	xnlock_get_irqsave(&nklock, s);
	appendq(event->rqueue, &event->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	event->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(event->name, event, &event->handle,
				       &__event_pnode);

		if (err)
			rt_event_delete(event);
	}

	return err;
}
Example #12
0
int rt_cond_create(RT_COND *cond, const char *name)
{
    int err = 0;
    spl_t s;

    if (xnpod_asynch_p())
        return -EPERM;

    xnsynch_init(&cond->synch_base, XNSYNCH_PRIO, NULL);
    cond->handle = 0;	/* i.e. (still) unregistered cond. */
    cond->magic = XENO_COND_MAGIC;
    xnobject_copy_name(cond->name, name);
    inith(&cond->rlink);
    cond->rqueue = &xeno_get_rholder()->condq;
    xnlock_get_irqsave(&nklock, s);
    appendq(cond->rqueue, &cond->rlink);
    xnlock_put_irqrestore(&nklock, s);

#ifndef __XENO_SIM__
    cond->cpid = 0;
#endif

    /*
     * <!> Since xnregister_enter() may reschedule, only register
     * complete objects, so that the registry cannot return
     * handles to half-baked objects...
     */
    if (name) {
        err = xnregistry_enter(cond->name, cond, &cond->handle,
                               &__cond_pnode.node);

        if (err)
            rt_cond_delete(cond);
    }

    return err;
}
Example #13
0
/**
 * @brief Register a RTDM device
 *
 * Registers a device in the RTDM namespace.
 *
 * @param[in] dev Device descriptor.
 *
 * @return 0 is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if the descriptor contains invalid
 * entries. RTDM_PROFILE_INFO() must appear in the list of
 * initializers for the driver properties.
 *
 * - -EEXIST is returned if the specified device name of protocol ID is
 * already in use.
 *
 * - -ENOMEM is returned if a memory allocation failed in the process
 * of registering the device.
 *
 * @coretags{secondary-only}
 */
int rtdm_dev_register(struct rtdm_device *dev)
{
	int ret, pos, major, minor;
	struct device *kdev = NULL;
	struct rtdm_driver *drv;
	xnkey_t id;
	dev_t rdev;

	secondary_mode_only();

	if (!realtime_core_enabled())
		return -ENOSYS;

	mutex_lock(&register_lock);

	dev->name = NULL;
	drv = dev->driver;
	pos = atomic_read(&drv->refcount);
	ret = register_driver(drv);
	if (ret) {
		mutex_unlock(&register_lock);
		return ret;
	}

	dev->ops = drv->ops;
	if (drv->device_flags & RTDM_NAMED_DEVICE)
		dev->ops.socket = (typeof(dev->ops.socket))enosys;
	else
		dev->ops.open = (typeof(dev->ops.open))enosys;

	init_waitqueue_head(&dev->putwq);
	dev->ops.close = __rtdm_dev_close; /* Interpose on driver's handler. */
	atomic_set(&dev->refcount, 0);

	if (drv->device_flags & RTDM_FIXED_MINOR) {
		minor = dev->minor;
		if (minor < 0 || minor >= drv->device_count) {
			ret = -EINVAL;
			goto fail;
		}
	} else
		dev->minor = minor = pos;

	if (drv->device_flags & RTDM_NAMED_DEVICE) {
		major = drv->named.major;
		dev->name = kasformat(dev->label, minor);
		if (dev->name == NULL) {
			ret = -ENOMEM;
			goto fail;
		}

		ret = xnregistry_enter(dev->name, dev,
				       &dev->named.handle, NULL);
		if (ret)
			goto fail;

		rdev = MKDEV(major, minor);
		kdev = device_create(rtdm_class, NULL, rdev,
				     dev, dev->label, minor);
		if (IS_ERR(kdev)) {
			xnregistry_remove(dev->named.handle);
			ret = PTR_ERR(kdev);
			goto fail;
		}
	} else {
		dev->name = kstrdup(dev->label, GFP_KERNEL);
		if (dev->name == NULL) {
			ret = -ENOMEM;
			goto fail;
		}

		rdev = MKDEV(0, 0);
		kdev = device_create(rtdm_class, NULL, rdev,
				     dev, dev->name);
		if (IS_ERR(kdev)) {
			ret = PTR_ERR(kdev);
			goto fail;
		}

		id = get_proto_id(drv->protocol_family, drv->socket_type);
		ret = xnid_enter(&protocol_devices, &dev->proto.id, id);
		if (ret < 0)
			goto fail;
	}

	dev->rdev = rdev;
	dev->kdev = kdev;
	dev->magic = RTDM_DEVICE_MAGIC;

	mutex_unlock(&register_lock);

	trace_cobalt_device_register(dev);

	return 0;
fail:
	if (kdev)
		device_destroy(rtdm_class, rdev);

	unregister_driver(drv);

	mutex_unlock(&register_lock);

	if (dev->name)
		kfree(dev->name);

	return ret;
}
Example #14
0
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	if (heapsize == 0)
		return -EINVAL;

	/* Make sure we won't hit trivial argument errors when calling
	   xnheap_init(). */

	heap->csize = heapsize;	/* Record this for SBA management and inquiry. */

#ifdef __KERNEL__
	if (mode & H_MAPPABLE) {
		if (!name || !*name)
			return -EINVAL;

		heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

		err = xnheap_init_mapped(&heap->heap_base,
					 heapsize,
					 ((mode & H_DMA) ? GFP_DMA : 0)
					 | ((mode & H_DMA32) ? GFP_DMA32 : 0)
					 | ((mode & H_NONCACHED) ?
					    XNHEAP_GFP_NONCACHED : 0));
		if (err)
			return err;

		heap->cpid = 0;
	} else
#endif /* __KERNEL__ */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNHEAP_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem)
			return -ENOMEM;

		err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNHEAP_PAGE_SIZE);
		if (err) {
			xnarch_free_host_mem(heapmem, heapsize);
			return err;
		}
	}
	xnheap_set_label(&heap->heap_base, "rt_heap: %s", name);

	xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO), NULL);
	heap->handle = 0;	/* i.e. (still) unregistered heap. */
	heap->magic = XENO_HEAP_MAGIC;
	heap->mode = mode;
	heap->sba = NULL;
	xnobject_copy_name(heap->name, name);
	inith(&heap->rlink);
	heap->rqueue = &xeno_get_rholder()->heapq;
	xnlock_get_irqsave(&nklock, s);
	appendq(heap->rqueue, &heap->rlink);
	xnlock_put_irqrestore(&nklock, s);

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(heap->name, heap, &heap->handle,
				       &__heap_pnode.node);

		if (err)
			rt_heap_delete(heap);
	}

	return err;
}
Example #15
0
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	if (heapsize == 0)
		return -EINVAL;

	/* Make sure we won't hit trivial argument errors when calling
	   xnheap_init(). */

	heap->csize = heapsize;	/* Record this for SBA management and inquiry. */

#ifdef __KERNEL__
	if (mode & H_MAPPABLE) {
		if (!name || !*name)
			return -EINVAL;

#ifdef CONFIG_XENO_OPT_PERVASIVE
		heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

		err = xnheap_init_mapped(&heap->heap_base,
					 heapsize,
					 ((mode & H_DMA) ? GFP_DMA : 0)
					 | ((mode & H_NONCACHED) ?
					    XNHEAP_GFP_NONCACHED : 0));
		if (err)
			return err;

		heap->cpid = 0;
#else /* !CONFIG_XENO_OPT_PERVASIVE */
		return -ENOSYS;
#endif /* CONFIG_XENO_OPT_PERVASIVE */
	} else
#endif /* __KERNEL__ */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem)
			return -ENOMEM;

		err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNCORE_PAGE_SIZE);
		if (err) {
			xnarch_free_host_mem(heapmem, heapsize);
			return err;
		}
	}

	xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO));
	heap->handle = 0;	/* i.e. (still) unregistered heap. */
	heap->magic = XENO_HEAP_MAGIC;
	heap->mode = mode;
	heap->sba = NULL;
	xnobject_copy_name(heap->name, name);
	inith(&heap->rlink);
	heap->rqueue = &xeno_get_rholder()->heapq;
	xnlock_get_irqsave(&nklock, s);
	appendq(heap->rqueue, &heap->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_REGISTRY
	/* <!> Since xnregister_enter() may reschedule, only register
	   complete objects, so that the registry cannot return handles to
	   half-baked objects... */

	if (name) {
		xnpnode_t *pnode = &__heap_pnode;

		if (!*name) {
			/* Since this is an anonymous object (empty name on entry)
			   from user-space, it gets registered under an unique
			   internal name but is not exported through /proc. */
			xnobject_create_name(heap->name, sizeof(heap->name),
					     (void *)heap);
			pnode = NULL;
		}

		err = xnregistry_enter(heap->name, heap, &heap->handle, pnode);

		if (err)
			rt_heap_delete(heap);
	}
#endif /* CONFIG_XENO_OPT_REGISTRY */

	return err;
}
Example #16
0
u_long t_create(const char *name,
		u_long prio,
		u_long sstack, u_long ustack, u_long flags, u_long *tid_r)
{
	xnflags_t bflags = 0;
	psostask_t *task;
	spl_t s;
	int n;

	/* Xenomai extension: we accept priority level #0 for creating
	   non-RT tasks (i.e. underlaid by SCHED_NORMAL pthreads),
	   which are allowed to call into the pSOS emulator, usually
	   for synchronization services. */

	if (prio > 255)
		return ERR_PRIOR;

	task = (psostask_t *)xnmalloc(sizeof(*task));

	if (!task)
		return ERR_NOTCB;

	if (flags & T_FPU)
		bflags |= XNFPU;

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (flags & T_SHADOW)
		bflags |= XNSHADOW;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	ustack += sstack;

	if (!(flags & T_SHADOW) && ustack < 1024) {
		xnfree(task);
		return ERR_TINYSTK;
	}

	if (name && *name)
		xnobject_copy_name(task->name, name);
	else
		/* i.e. Anonymous object which must be accessible from
		   user-space. */
		sprintf(task->name, "anon_task%lu", psos_task_ids++);

	if (xnpod_init_thread(&task->threadbase, psos_tbase,
			      task->name, prio, bflags, ustack, &psos_task_ops) != 0) {
		xnfree(task);
		return ERR_NOSTK;	/* Assume this is the only possible failure */
	}

	xnthread_time_slice(&task->threadbase) = psos_time_slice;

	taskev_init(&task->evgroup);
	inith(&task->link);

	for (n = 0; n < PSOSTASK_NOTEPAD_REGS; n++)
		task->notepad[n] = 0;

	initgq(&task->alarmq,
	       &xnmod_glink_queue,
	       xnmod_alloc_glinks,
	       XNMOD_GHOLDER_THRESHOLD);

	task->magic = PSOS_TASK_MAGIC;

	xnlock_get_irqsave(&nklock, s);
	appendq(&psostaskq, &task->link);
	*tid_r = (u_long)task;
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_REGISTRY
	{
		u_long err = xnregistry_enter(task->name,
					      task, &xnthread_handle(&task->threadbase), NULL);
		if (err) {
			t_delete((u_long)task);
			return err;
		}
	}
#endif /* CONFIG_XENO_OPT_REGISTRY */

	xnarch_create_display(&task->threadbase, task->name, psostask);

	return SUCCESS;
}