Beispiel #1
0
int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode)
{
	int ret = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if (bufsz == 0)
		return -EINVAL;

	bf->bufmem = xnarch_alloc_host_mem(bufsz);
	if (bf->bufmem == NULL)
		return -ENOMEM;

	xnsynch_init(&bf->isynch_base, mode & B_PRIO, NULL);
	xnsynch_init(&bf->osynch_base, mode & B_PRIO, NULL);

	bf->handle = 0;	/* i.e. (still) unregistered buffer. */
	xnobject_copy_name(bf->name, name);
	inith(&bf->rlink);
	bf->rqueue = &xeno_get_rholder()->bufferq;
	xnlock_get_irqsave(&nklock, s);
	appendq(bf->rqueue, &bf->rlink);
	xnlock_put_irqrestore(&nklock, s);

	bf->mode = mode;
	bf->bufsz = bufsz;
	bf->rdoff = 0;
	bf->wroff = 0;
	bf->fillsz = 0;
	bf->rdtoken = 0;
	bf->wrtoken = 0;

#ifndef __XENO_SIM__
	bf->cpid = 0;
#endif
	bf->magic = XENO_BUFFER_MAGIC;

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		ret = xnregistry_enter(bf->name, bf, &bf->handle,
				       &__buffer_pnode.node);

		if (ret)
			rt_buffer_delete(bf);
	}

	return ret;
}
Beispiel #2
0
ER cre_flg(ID flgid, T_CFLG *pk_cflg)
{
	uiflag_t *flag;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_MBXID)
		return E_ID;

	flag = xnmalloc(sizeof(*flag));

	if (!flag)
		return E_NOMEM;

	flgid = xnmap_enter(ui_flag_idmap, flgid, flag);

	if (flgid <= 0) {
		xnfree(flag);
		return E_OBJ;
	}

	xnsynch_init(&flag->synchbase, XNSYNCH_FIFO, NULL);
	flag->id = flgid;
	flag->exinf = pk_cflg->exinf;
	flag->flgatr = pk_cflg->flgatr;
	flag->flgvalue = pk_cflg->iflgptn;
	sprintf(flag->name, "flg%d", flgid);
	xnregistry_enter(flag->name, flag, &flag->handle, &__flag_pnode.node);
	xnarch_memory_barrier();
	flag->magic = uITRON_FLAG_MAGIC;

	return E_OK;
}
Beispiel #3
0
MSG_Q_ID msgQCreate(int nb_msgs, int length, int flags)
{
	static unsigned long msgq_ids;
	wind_msgq_t *queue;
	xnflags_t bflags = 0;
	int i, msg_size;
	char *msgs_mem;
	spl_t s;

	check_NOT_ISR_CALLABLE(return 0);

	error_check(nb_msgs <= 0, S_msgQLib_INVALID_QUEUE_TYPE, return 0);

	error_check(flags & ~WIND_MSG_Q_OPTION_MASK,
		    S_msgQLib_INVALID_QUEUE_TYPE, return 0);

	error_check(length < 0, S_msgQLib_INVALID_MSG_LENGTH, return 0);

	msgs_mem = xnmalloc(sizeof(wind_msgq_t) +
			    nb_msgs * (sizeof(wind_msg_t) + length));

	error_check(msgs_mem == NULL, S_memLib_NOT_ENOUGH_MEMORY, return 0);

	queue = (wind_msgq_t *)msgs_mem;
	msgs_mem += sizeof(wind_msgq_t);

	queue->magic = WIND_MSGQ_MAGIC;
	queue->msg_length = length;
	queue->free_list = NULL;
	initq(&queue->msgq);
	inith(&queue->rlink);
	queue->rqueue = &wind_get_rholder()->msgQq;

	/* init of the synch object : */
	if (flags & MSG_Q_PRIORITY)
		bflags |= XNSYNCH_PRIO;

	xnsynch_init(&queue->synchbase, bflags, NULL);

	msg_size = sizeof(wind_msg_t) + length;

	for (i = 0; i < nb_msgs; ++i, msgs_mem += msg_size)
		free_msg(queue, (wind_msg_t *)msgs_mem);

	xnlock_get_irqsave(&nklock, s);
	appendq(queue->rqueue, &queue->rlink);
	xnlock_put_irqrestore(&nklock, s);

	sprintf(queue->name, "mq%lu", msgq_ids++);

	if (xnregistry_enter(queue->name, queue,
			     &queue->handle, &msgq_pnode)) {
		wind_errnoset(S_objLib_OBJ_ID_ERROR);
		msgQDelete((MSG_Q_ID)queue);
		return 0;
	}

	return (MSG_Q_ID)queue;
}
Beispiel #4
0
ER cre_mbx(ID mbxid, T_CMBX *pk_cmbx)
{
	uimbx_t *mbx;
	T_MSG **ring;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID)
		return E_ID;

	if (pk_cmbx->bufcnt <= 0)
		return E_PAR;

	if (pk_cmbx->mbxatr & TA_MPRI)
		return E_RSATR;

	mbx = xnmalloc(sizeof(*mbx));

	if (!mbx)
		return E_NOMEM;

	ring = xnmalloc(sizeof(T_MSG *) * pk_cmbx->bufcnt);

	if (!ring) {
		xnfree(mbx);
		return E_NOMEM;
	}

	mbxid = xnmap_enter(ui_mbx_idmap, mbxid, mbx);

	if (mbxid <= 0) {
		xnfree(mbx);
		return E_OBJ;
	}

	xnsynch_init(&mbx->synchbase,
		     (pk_cmbx->mbxatr & TA_TPRI) ? XNSYNCH_PRIO : XNSYNCH_FIFO);

	mbx->id = mbxid;
	mbx->exinf = pk_cmbx->exinf;
	mbx->mbxatr = pk_cmbx->mbxatr;
	mbx->bufcnt = pk_cmbx->bufcnt;
	mbx->rdptr = 0;
	mbx->wrptr = 0;
	mbx->mcount = 0;
	mbx->ring = ring;
#ifdef CONFIG_XENO_OPT_REGISTRY
	sprintf(mbx->name, "mbx%d", mbxid);
	xnregistry_enter(mbx->name, mbx, &mbx->handle, &__mbx_pnode);
#endif /* CONFIG_XENO_OPT_REGISTRY */
	xnarch_memory_barrier();
	mbx->magic = uITRON_MBX_MAGIC;

	return E_OK;
}
Beispiel #5
0
/**
 * Initialize a selector structure.
 *
 * @param selector The selector structure to be initialized.
 *
 * @retval 0
 */
int xnselector_init(struct xnselector *selector)
{
	unsigned i;

	xnsynch_init(&selector->synchbase, XNSYNCH_FIFO, NULL);
	for (i = 0; i < XNSELECT_MAX_TYPES; i++) {
		__FD_ZERO__(&selector->fds[i].expected);
		__FD_ZERO__(&selector->fds[i].pending);
	}
	initq(&selector->bindings);
	return 0;
}
Beispiel #6
0
int rt_event_create(RT_EVENT *event,
		    const char *name, unsigned long ivalue, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnsynch_init(&event->synch_base, mode & EV_PRIO);
	event->value = ivalue;
	event->handle = 0;	/* i.e. (still) unregistered event. */
	event->magic = XENO_EVENT_MAGIC;
	xnobject_copy_name(event->name, name);
	inith(&event->rlink);
	event->rqueue = &xeno_get_rholder()->eventq;
	xnlock_get_irqsave(&nklock, s);
	appendq(event->rqueue, &event->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	event->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

#ifdef CONFIG_XENO_OPT_REGISTRY
	/* <!> Since xnregister_enter() may reschedule, only register
	   complete objects, so that the registry cannot return handles to
	   half-baked objects... */

	if (name) {
		xnpnode_t *pnode = &__event_pnode;

		if (!*name) {
			/* Since this is an anonymous object (empty name on entry)
			   from user-space, it gets registered under an unique
			   internal name but is not exported through /proc. */
			xnobject_create_name(event->name, sizeof(event->name),
					     (void *)event);
			pnode = NULL;
		}

		err =
		    xnregistry_enter(event->name, event, &event->handle, pnode);

		if (err)
			rt_event_delete(event);
	}
#endif /* CONFIG_XENO_OPT_REGISTRY */

	return err;
}
Beispiel #7
0
int sc_screate(unsigned initval, int opt, int *errp)
{
	int bflags = 0, semid;
	vrtxsem_t *sem;
	spl_t s;

	if (opt & ~1) {
		*errp = ER_IIP;
		return -1;
	}

	sem = (vrtxsem_t *)xnmalloc(sizeof(*sem));

	if (!sem) {
		*errp = ER_NOCB;
		return -1;
	}

	semid = xnmap_enter(vrtx_sem_idmap, -1, sem);

	if (semid < 0) {
		*errp = ER_NOCB;
		xnfree(sem);
		return -1;
	}

	if (opt == 0)
		bflags = XNSYNCH_PRIO;
	else
		bflags = XNSYNCH_FIFO;

	xnsynch_init(&sem->synchbase, bflags | XNSYNCH_DREORD);
	inith(&sem->link);
	sem->semid = semid;
	sem->magic = VRTX_SEM_MAGIC;
	sem->count = initval;

	xnlock_get_irqsave(&nklock, s);
	appendq(&vrtx_sem_q, &sem->link);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_REGISTRY
	sprintf(sem->name, "sem%d", semid);
	xnregistry_enter(sem->name, sem, &sem->handle, &__sem_pnode);
#endif /* CONFIG_XENO_OPT_REGISTRY */

	*errp = RET_OK;

	return semid;
}
Beispiel #8
0
int sc_mcreate(unsigned int opt, int *errp)
{
	int bflags, mid;
	vrtxmx_t *mx;
	spl_t s;

	switch (opt) {
	case 0:
		bflags = XNSYNCH_PRIO;
		break;
	case 1:
		bflags = XNSYNCH_FIFO;
		break;
	case 2:
		bflags = XNSYNCH_PRIO | XNSYNCH_PIP;
		break;
	default:
		*errp = ER_IIP;
		return 0;
	}

	mx = xnmalloc(sizeof(*mx));
	if (mx == NULL) {
		*errp = ER_NOCB;
		return -1;
	}

	mid = xnmap_enter(vrtx_mx_idmap, -1, mx);
	if (mid < 0) {
		xnfree(mx);
		return -1;
	}

	inith(&mx->link);
	mx->mid = mid;
	xnsynch_init(&mx->synchbase, bflags | XNSYNCH_DREORD | XNSYNCH_OWNER,
		     NULL);

	xnlock_get_irqsave(&nklock, s);
	appendq(&vrtx_mx_q, &mx->link);
	xnlock_put_irqrestore(&nklock, s);

	sprintf(mx->name, "mx%d", mid);
	xnregistry_enter(mx->name, mx, &mx->handle, &__mutex_pnode.node);

	*errp = RET_OK;

	return mid;
}
Beispiel #9
0
int xnpipe_connect(int minor, struct xnpipe_operations *ops, void *xstate)
{
	struct xnpipe_state *state;
	int need_sched = 0, ret;
	spl_t s;

	minor = xnpipe_minor_alloc(minor);
	if (minor < 0)
		return minor;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	ret = xnpipe_set_ops(state, ops);
	if (ret) {
		xnlock_put_irqrestore(&nklock, s);
		return ret;
	}

	__setbits(state->status, XNPIPE_KERN_CONN);
	xnsynch_init(&state->synchbase, XNSYNCH_FIFO, NULL);
	state->xstate = xstate;
	state->ionrd = 0;

	if (testbits(state->status, XNPIPE_USER_CONN)) {
		if (testbits(state->status, XNPIPE_USER_WREAD)) {
			/*
			 * Wake up the regular Linux task waiting for
			 * the kernel side to connect (xnpipe_open).
			 */
			__setbits(state->status, XNPIPE_USER_WREAD_READY);
			need_sched = 1;
		}

		if (state->asyncq) {	/* Schedule asynch sig. */
			__setbits(state->status, XNPIPE_USER_SIGIO);
			need_sched = 1;
		}
	}

	if (need_sched)
		xnpipe_schedule_request();

	xnlock_put_irqrestore(&nklock, s);

	return minor;
}
Beispiel #10
0
/* Called with nklock locked, irq off. */
static int pse51_sem_init_inner(pse51_sem_t * sem, int pshared, unsigned value)
{
	if (value > (unsigned)SEM_VALUE_MAX)
		return EINVAL;

	sem->magic = PSE51_SEM_MAGIC;
	inith(&sem->link);
	appendq(&pse51_kqueues(pshared)->semq, &sem->link);
	xnsynch_init(&sem->synchbase, XNSYNCH_PRIO, NULL);
	sem->value = value;
	sem->pshared = pshared;
	sem->is_named = 0;
	sem->owningq = pse51_kqueues(pshared);

	return 0;
}
Beispiel #11
0
int rt_intr_create(RT_INTR *intr,
		   const char *name,
		   unsigned irq, rt_isr_t isr, rt_iack_t iack, int mode)
{
	int err;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if (name)
		xnobject_copy_name(intr->name, name);
	else
		/* Kernel-side "anonymous" objects (name == NULL) get unique names.
		 * Nevertheless, they will not be exported via the registry. */
		xnobject_create_name(intr->name, sizeof(intr->name), isr);

	xnintr_init(&intr->intr_base, intr->name, irq, isr, iack, mode);
#ifdef CONFIG_XENO_OPT_PERVASIVE
	xnsynch_init(&intr->synch_base, XNSYNCH_PRIO, NULL);
	intr->pending = 0;
	intr->cpid = 0;
	intr->mode = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */
	intr->magic = XENO_INTR_MAGIC;
	intr->handle = 0;	/* i.e. (still) unregistered interrupt. */
	inith(&intr->rlink);
	intr->rqueue = &xeno_get_rholder()->intrq;
	xnlock_get_irqsave(&nklock, s);
	appendq(intr->rqueue, &intr->rlink);
	xnlock_put_irqrestore(&nklock, s);

	err = xnintr_attach(&intr->intr_base, intr);

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (!err && name)
		err = xnregistry_enter(intr->name, intr, &intr->handle,
				       &__intr_pnode);
	if (err)
		rt_intr_delete(intr);

	return err;
}
Beispiel #12
0
int SKIN_INIT(vxworks)
{
	int err;

	initq(&__wind_global_rholder.wdq);
	initq(&__wind_global_rholder.msgQq);
	initq(&__wind_global_rholder.semq);

	/* The following fields are unused in the global holder;
	   still, we initialize them not to leave such data in an
	   invalid state. */
	xnsynch_init(&__wind_global_rholder.wdsynch, XNSYNCH_FIFO, NULL);
	initq(&__wind_global_rholder.wdpending);
	__wind_global_rholder.wdcount = 0;

	err = xnpod_init();

	if (err != 0)
		goto fail_core;

	err = wind_sysclk_init(tick_arg * 1000);

	if (err != 0) {
		xnpod_shutdown(err);

	fail_core:
		xnlogerr("VxWorks skin init failed, code %d.\n", err);
		return err;
	}

	wind_wd_init();
	wind_task_hooks_init();
	wind_sem_init();
	wind_msgq_init();
	wind_task_init();
#ifdef CONFIG_XENO_OPT_PERVASIVE
	wind_syscall_init();
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	xnprintf("starting VxWorks services.\n");

	return 0;
}
Beispiel #13
0
int rt_sem_create(RT_SEM *sem, const char *name, unsigned long icount, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if ((mode & S_PULSE) && icount > 0)
		return -EINVAL;

	xnsynch_init(&sem->synch_base, mode & S_PRIO, NULL);
	sem->count = icount;
	sem->mode = mode;
	sem->handle = 0;	/* i.e. (still) unregistered semaphore. */
	sem->magic = XENO_SEM_MAGIC;
	xnobject_copy_name(sem->name, name);
	inith(&sem->rlink);
	sem->rqueue = &xeno_get_rholder()->semq;
	xnlock_get_irqsave(&nklock, s);
	appendq(sem->rqueue, &sem->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	sem->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(sem->name, sem, &sem->handle,
				       &__sem_pnode);
		if (err)
			rt_sem_delete(sem);
	}

	return err;
}
Beispiel #14
0
int rt_event_create(RT_EVENT *event,
		    const char *name, unsigned long ivalue, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnsynch_init(&event->synch_base, mode & EV_PRIO, NULL);
	event->value = ivalue;
	event->handle = 0;	/* i.e. (still) unregistered event. */
	event->magic = XENO_EVENT_MAGIC;
	xnobject_copy_name(event->name, name);
	inith(&event->rlink);
	event->rqueue = &xeno_get_rholder()->eventq;
	xnlock_get_irqsave(&nklock, s);
	appendq(event->rqueue, &event->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	event->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(event->name, event, &event->handle,
				       &__event_pnode);

		if (err)
			rt_event_delete(event);
	}

	return err;
}
Beispiel #15
0
static void *__wind_shadow_eventcb(int event, void *data)
{
	struct wind_resource_holder *rh;
	switch (event) {

	case XNSHADOW_CLIENT_ATTACH:

		rh = (struct wind_resource_holder *)
		    xnarch_alloc_host_mem(sizeof(*rh));
		if (!rh)
			return ERR_PTR(-ENOMEM);

		initq(&rh->wdq);
		/* A single server thread pends on this. */
		xnsynch_init(&rh->wdsynch, XNSYNCH_FIFO, NULL);
		initq(&rh->wdpending);
		rh->wdcount = 0;
		initq(&rh->msgQq);
		initq(&rh->semq);

		return &rh->ppd;

	case XNSHADOW_CLIENT_DETACH:

		rh = ppd2rholder((xnshadow_ppd_t *) data);
		wind_wd_flush_rq(&rh->wdq);
		xnsynch_destroy(&rh->wdsynch);
		/* No need to reschedule: all our threads have been zapped. */
		wind_msgq_flush_rq(&rh->msgQq);
		wind_sem_flush_rq(&rh->semq);

		xnarch_free_host_mem(rh, sizeof(*rh));

		return NULL;
	}

	return ERR_PTR(-EINVAL);
}
Beispiel #16
0
int rt_cond_create(RT_COND *cond, const char *name)
{
    int err = 0;
    spl_t s;

    if (xnpod_asynch_p())
        return -EPERM;

    xnsynch_init(&cond->synch_base, XNSYNCH_PRIO, NULL);
    cond->handle = 0;	/* i.e. (still) unregistered cond. */
    cond->magic = XENO_COND_MAGIC;
    xnobject_copy_name(cond->name, name);
    inith(&cond->rlink);
    cond->rqueue = &xeno_get_rholder()->condq;
    xnlock_get_irqsave(&nklock, s);
    appendq(cond->rqueue, &cond->rlink);
    xnlock_put_irqrestore(&nklock, s);

#ifndef __XENO_SIM__
    cond->cpid = 0;
#endif

    /*
     * <!> Since xnregister_enter() may reschedule, only register
     * complete objects, so that the registry cannot return
     * handles to half-baked objects...
     */
    if (name) {
        err = xnregistry_enter(cond->name, cond, &cond->handle,
                               &__cond_pnode.node);

        if (err)
            rt_cond_delete(cond);
    }

    return err;
}
Beispiel #17
0
void taskev_init(psosevent_t *evgroup)
{
	xnsynch_init(&evgroup->synchbase, XNSYNCH_FIFO, NULL);
	evgroup->events = 0;
}
Beispiel #18
0
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	if (heapsize == 0)
		return -EINVAL;

	/* Make sure we won't hit trivial argument errors when calling
	   xnheap_init(). */

	heap->csize = heapsize;	/* Record this for SBA management and inquiry. */

#ifdef __KERNEL__
	if (mode & H_MAPPABLE) {
		if (!name || !*name)
			return -EINVAL;

		heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

		err = xnheap_init_mapped(&heap->heap_base,
					 heapsize,
					 ((mode & H_DMA) ? GFP_DMA : 0)
					 | ((mode & H_DMA32) ? GFP_DMA32 : 0)
					 | ((mode & H_NONCACHED) ?
					    XNHEAP_GFP_NONCACHED : 0));
		if (err)
			return err;

		heap->cpid = 0;
	} else
#endif /* __KERNEL__ */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNHEAP_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem)
			return -ENOMEM;

		err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNHEAP_PAGE_SIZE);
		if (err) {
			xnarch_free_host_mem(heapmem, heapsize);
			return err;
		}
	}
	xnheap_set_label(&heap->heap_base, "rt_heap: %s", name);

	xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO), NULL);
	heap->handle = 0;	/* i.e. (still) unregistered heap. */
	heap->magic = XENO_HEAP_MAGIC;
	heap->mode = mode;
	heap->sba = NULL;
	xnobject_copy_name(heap->name, name);
	inith(&heap->rlink);
	heap->rqueue = &xeno_get_rholder()->heapq;
	xnlock_get_irqsave(&nklock, s);
	appendq(heap->rqueue, &heap->rlink);
	xnlock_put_irqrestore(&nklock, s);

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(heap->name, heap, &heap->handle,
				       &__heap_pnode.node);

		if (err)
			rt_heap_delete(heap);
	}

	return err;
}
Beispiel #19
0
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode)
{
	int err;
	spl_t s;

	if (!xnpod_root_p())
		return -EPERM;

	if (heapsize == 0)
		return -EINVAL;

	/* Make sure we won't hit trivial argument errors when calling
	   xnheap_init(). */

	heap->csize = heapsize;	/* Record this for SBA management and inquiry. */

#ifdef __KERNEL__
	if (mode & H_MAPPABLE) {
		if (!name || !*name)
			return -EINVAL;

#ifdef CONFIG_XENO_OPT_PERVASIVE
		heapsize = xnheap_rounded_size(heapsize, PAGE_SIZE);

		err = xnheap_init_mapped(&heap->heap_base,
					 heapsize,
					 ((mode & H_DMA) ? GFP_DMA : 0)
					 | ((mode & H_NONCACHED) ?
					    XNHEAP_GFP_NONCACHED : 0));
		if (err)
			return err;

		heap->cpid = 0;
#else /* !CONFIG_XENO_OPT_PERVASIVE */
		return -ENOSYS;
#endif /* CONFIG_XENO_OPT_PERVASIVE */
	} else
#endif /* __KERNEL__ */
	{
		void *heapmem;

		heapsize = xnheap_rounded_size(heapsize, XNCORE_PAGE_SIZE);

		heapmem = xnarch_alloc_host_mem(heapsize);

		if (!heapmem)
			return -ENOMEM;

		err = xnheap_init(&heap->heap_base, heapmem, heapsize, XNCORE_PAGE_SIZE);
		if (err) {
			xnarch_free_host_mem(heapmem, heapsize);
			return err;
		}
	}

	xnsynch_init(&heap->synch_base, mode & (H_PRIO | H_FIFO));
	heap->handle = 0;	/* i.e. (still) unregistered heap. */
	heap->magic = XENO_HEAP_MAGIC;
	heap->mode = mode;
	heap->sba = NULL;
	xnobject_copy_name(heap->name, name);
	inith(&heap->rlink);
	heap->rqueue = &xeno_get_rholder()->heapq;
	xnlock_get_irqsave(&nklock, s);
	appendq(heap->rqueue, &heap->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_REGISTRY
	/* <!> Since xnregister_enter() may reschedule, only register
	   complete objects, so that the registry cannot return handles to
	   half-baked objects... */

	if (name) {
		xnpnode_t *pnode = &__heap_pnode;

		if (!*name) {
			/* Since this is an anonymous object (empty name on entry)
			   from user-space, it gets registered under an unique
			   internal name but is not exported through /proc. */
			xnobject_create_name(heap->name, sizeof(heap->name),
					     (void *)heap);
			pnode = NULL;
		}

		err = xnregistry_enter(heap->name, heap, &heap->handle, pnode);

		if (err)
			rt_heap_delete(heap);
	}
#endif /* CONFIG_XENO_OPT_REGISTRY */

	return err;
}