コード例 #1
0
ファイル: flag.c プロジェクト: JackieXie168/xenomai
ER del_flg(ID flgid)
{
	uiflag_t *flag;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_FLAGID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	flag = xnmap_fetch(ui_flag_idmap, flgid);

	if (!flag) {
		xnlock_put_irqrestore(&nklock, s);
		return E_NOEXS;
	}

	xnmap_remove(ui_flag_idmap, flag->id);
	ui_mark_deleted(flag);

	xnregistry_remove(flag->handle);
	xnfree(flag);

	if (xnsynch_destroy(&flag->synchbase) == XNSYNCH_RESCHED)
		xnpod_schedule();

	xnlock_put_irqrestore(&nklock, s);

	return E_OK;
}
コード例 #2
0
ER del_mbx(ID mbxid)
{
	uimbx_t *mbx;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	mbx = xnmap_fetch(ui_mbx_idmap, mbxid);

	if (!mbx) {
		xnlock_put_irqrestore(&nklock, s);
		return E_NOEXS;
	}

	xnmap_remove(ui_mbx_idmap, mbx->id);
	ui_mark_deleted(mbx);
#ifdef CONFIG_XENO_OPT_REGISTRY
	xnregistry_remove(mbx->handle);
#endif /* CONFIG_XENO_OPT_REGISTRY */
	xnfree(mbx->ring);
	xnfree(mbx);

	if (xnsynch_destroy(&mbx->synchbase) == XNSYNCH_RESCHED)
		xnpod_schedule();

	xnlock_put_irqrestore(&nklock, s);

	return E_OK;
}
コード例 #3
0
ファイル: flag.c プロジェクト: JackieXie168/xenomai
ER cre_flg(ID flgid, T_CFLG *pk_cflg)
{
	uiflag_t *flag;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_MBXID)
		return E_ID;

	flag = xnmalloc(sizeof(*flag));

	if (!flag)
		return E_NOMEM;

	flgid = xnmap_enter(ui_flag_idmap, flgid, flag);

	if (flgid <= 0) {
		xnfree(flag);
		return E_OBJ;
	}

	xnsynch_init(&flag->synchbase, XNSYNCH_FIFO, NULL);
	flag->id = flgid;
	flag->exinf = pk_cflg->exinf;
	flag->flgatr = pk_cflg->flgatr;
	flag->flgvalue = pk_cflg->iflgptn;
	sprintf(flag->name, "flg%d", flgid);
	xnregistry_enter(flag->name, flag, &flag->handle, &__flag_pnode.node);
	xnarch_memory_barrier();
	flag->magic = uITRON_FLAG_MAGIC;

	return E_OK;
}
コード例 #4
0
ファイル: flag.c プロジェクト: JackieXie168/xenomai
ER clr_flg(ID flgid, UINT clrptn)
{
	uiflag_t *flag;
	ER err = E_OK;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_FLAGID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	flag = xnmap_fetch(ui_flag_idmap, flgid);

	if (!flag) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	flag->flgvalue &= clrptn;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
コード例 #5
0
ファイル: flag.c プロジェクト: JackieXie168/xenomai
ER set_flg(ID flgid, UINT setptn)
{
	xnpholder_t *holder, *nholder;
	uiflag_t *flag;
	ER err = E_OK;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_FLAGID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	flag = xnmap_fetch(ui_flag_idmap, flgid);

	if (!flag) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	if (setptn == 0)
		goto unlock_and_exit;

	flag->flgvalue |= setptn;

	if (!xnsynch_pended_p(&flag->synchbase))
		goto unlock_and_exit;

	nholder = getheadpq(xnsynch_wait_queue(&flag->synchbase));

	while ((holder = nholder) != NULL) {
		uitask_t *sleeper = thread2uitask(link2thread(holder, plink));
		UINT wfmode = sleeper->wargs.flag.wfmode;
		UINT waiptn = sleeper->wargs.flag.waiptn;

		if (((wfmode & TWF_ORW) && (waiptn & flag->flgvalue) != 0)
		    || (!(wfmode & TWF_ORW) && ((waiptn & flag->flgvalue) == waiptn))) {
			nholder = xnsynch_wakeup_this_sleeper(&flag->synchbase, holder);
			sleeper->wargs.flag.waiptn = flag->flgvalue;

			if (wfmode & TWF_CLR)
				flag->flgvalue = 0;
		} else
			nholder = nextpq(xnsynch_wait_queue(&flag->synchbase), holder);
	}

	xnpod_schedule();


unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
コード例 #6
0
ER cre_mbx(ID mbxid, T_CMBX *pk_cmbx)
{
	uimbx_t *mbx;
	T_MSG **ring;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID)
		return E_ID;

	if (pk_cmbx->bufcnt <= 0)
		return E_PAR;

	if (pk_cmbx->mbxatr & TA_MPRI)
		return E_RSATR;

	mbx = xnmalloc(sizeof(*mbx));

	if (!mbx)
		return E_NOMEM;

	ring = xnmalloc(sizeof(T_MSG *) * pk_cmbx->bufcnt);

	if (!ring) {
		xnfree(mbx);
		return E_NOMEM;
	}

	mbxid = xnmap_enter(ui_mbx_idmap, mbxid, mbx);

	if (mbxid <= 0) {
		xnfree(mbx);
		return E_OBJ;
	}

	xnsynch_init(&mbx->synchbase,
		     (pk_cmbx->mbxatr & TA_TPRI) ? XNSYNCH_PRIO : XNSYNCH_FIFO);

	mbx->id = mbxid;
	mbx->exinf = pk_cmbx->exinf;
	mbx->mbxatr = pk_cmbx->mbxatr;
	mbx->bufcnt = pk_cmbx->bufcnt;
	mbx->rdptr = 0;
	mbx->wrptr = 0;
	mbx->mcount = 0;
	mbx->ring = ring;
#ifdef CONFIG_XENO_OPT_REGISTRY
	sprintf(mbx->name, "mbx%d", mbxid);
	xnregistry_enter(mbx->name, mbx, &mbx->handle, &__mbx_pnode);
#endif /* CONFIG_XENO_OPT_REGISTRY */
	xnarch_memory_barrier();
	mbx->magic = uITRON_MBX_MAGIC;

	return E_OK;
}
コード例 #7
0
ファイル: buffer.c プロジェクト: BhargavKola/xenomai-forge
int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode)
{
	int ret = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if (bufsz == 0)
		return -EINVAL;

	bf->bufmem = xnarch_alloc_host_mem(bufsz);
	if (bf->bufmem == NULL)
		return -ENOMEM;

	xnsynch_init(&bf->isynch_base, mode & B_PRIO, NULL);
	xnsynch_init(&bf->osynch_base, mode & B_PRIO, NULL);

	bf->handle = 0;	/* i.e. (still) unregistered buffer. */
	xnobject_copy_name(bf->name, name);
	inith(&bf->rlink);
	bf->rqueue = &xeno_get_rholder()->bufferq;
	xnlock_get_irqsave(&nklock, s);
	appendq(bf->rqueue, &bf->rlink);
	xnlock_put_irqrestore(&nklock, s);

	bf->mode = mode;
	bf->bufsz = bufsz;
	bf->rdoff = 0;
	bf->wroff = 0;
	bf->fillsz = 0;
	bf->rdtoken = 0;
	bf->wrtoken = 0;

#ifndef __XENO_SIM__
	bf->cpid = 0;
#endif
	bf->magic = XENO_BUFFER_MAGIC;

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		ret = xnregistry_enter(bf->name, bf, &bf->handle,
				       &__buffer_pnode.node);

		if (ret)
			rt_buffer_delete(bf);
	}

	return ret;
}
コード例 #8
0
int rt_event_create(RT_EVENT *event,
		    const char *name, unsigned long ivalue, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnsynch_init(&event->synch_base, mode & EV_PRIO);
	event->value = ivalue;
	event->handle = 0;	/* i.e. (still) unregistered event. */
	event->magic = XENO_EVENT_MAGIC;
	xnobject_copy_name(event->name, name);
	inith(&event->rlink);
	event->rqueue = &xeno_get_rholder()->eventq;
	xnlock_get_irqsave(&nklock, s);
	appendq(event->rqueue, &event->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	event->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

#ifdef CONFIG_XENO_OPT_REGISTRY
	/* <!> Since xnregister_enter() may reschedule, only register
	   complete objects, so that the registry cannot return handles to
	   half-baked objects... */

	if (name) {
		xnpnode_t *pnode = &__event_pnode;

		if (!*name) {
			/* Since this is an anonymous object (empty name on entry)
			   from user-space, it gets registered under an unique
			   internal name but is not exported through /proc. */
			xnobject_create_name(event->name, sizeof(event->name),
					     (void *)event);
			pnode = NULL;
		}

		err =
		    xnregistry_enter(event->name, event, &event->handle, pnode);

		if (err)
			rt_event_delete(event);
	}
#endif /* CONFIG_XENO_OPT_REGISTRY */

	return err;
}
コード例 #9
0
int rt_intr_create(RT_INTR *intr,
		   const char *name,
		   unsigned irq, rt_isr_t isr, rt_iack_t iack, int mode)
{
	int err;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if (name)
		xnobject_copy_name(intr->name, name);
	else
		/* Kernel-side "anonymous" objects (name == NULL) get unique names.
		 * Nevertheless, they will not be exported via the registry. */
		xnobject_create_name(intr->name, sizeof(intr->name), isr);

	xnintr_init(&intr->intr_base, intr->name, irq, isr, iack, mode);
#ifdef CONFIG_XENO_OPT_PERVASIVE
	xnsynch_init(&intr->synch_base, XNSYNCH_PRIO, NULL);
	intr->pending = 0;
	intr->cpid = 0;
	intr->mode = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */
	intr->magic = XENO_INTR_MAGIC;
	intr->handle = 0;	/* i.e. (still) unregistered interrupt. */
	inith(&intr->rlink);
	intr->rqueue = &xeno_get_rholder()->intrq;
	xnlock_get_irqsave(&nklock, s);
	appendq(intr->rqueue, &intr->rlink);
	xnlock_put_irqrestore(&nklock, s);

	err = xnintr_attach(&intr->intr_base, intr);

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (!err && name)
		err = xnregistry_enter(intr->name, intr, &intr->handle,
				       &__intr_pnode);
	if (err)
		rt_intr_delete(intr);

	return err;
}
コード例 #10
0
int rt_sem_create(RT_SEM *sem, const char *name, unsigned long icount, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	if ((mode & S_PULSE) && icount > 0)
		return -EINVAL;

	xnsynch_init(&sem->synch_base, mode & S_PRIO, NULL);
	sem->count = icount;
	sem->mode = mode;
	sem->handle = 0;	/* i.e. (still) unregistered semaphore. */
	sem->magic = XENO_SEM_MAGIC;
	xnobject_copy_name(sem->name, name);
	inith(&sem->rlink);
	sem->rqueue = &xeno_get_rholder()->semq;
	xnlock_get_irqsave(&nklock, s);
	appendq(sem->rqueue, &sem->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	sem->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(sem->name, sem, &sem->handle,
				       &__sem_pnode);
		if (err)
			rt_sem_delete(sem);
	}

	return err;
}
コード例 #11
0
ER ref_mbx(T_RMBX *pk_rmbx, ID mbxid)
{
	uitask_t *sleeper;
	ER err = E_OK;
	uimbx_t *mbx;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (mbxid <= 0 || mbxid > uITRON_MAX_FLAGID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	mbx = xnmap_fetch(ui_mbx_idmap, mbxid);

	if (!mbx) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	if (xnsynch_pended_p(&mbx->synchbase)) {
		sleeper =
			thread2uitask(link2thread
				      (getheadpq(xnsynch_wait_queue(&mbx->synchbase)),
				       plink));
		pk_rmbx->wtsk = sleeper->id;
	} else
		pk_rmbx->wtsk = FALSE;

	pk_rmbx->exinf = mbx->exinf;
	pk_rmbx->pk_msg =
	    mbx->mcount > 0 ? mbx->ring[mbx->rdptr] : (T_MSG *) NADR;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
コード例 #12
0
int rt_intr_delete(RT_INTR *intr)
{
	int err = 0, rc = XNSYNCH_DONE;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	intr = xeno_h2obj_validate(intr, XENO_INTR_MAGIC, RT_INTR);

	if (!intr) {
		err = xeno_handle_error(intr, XENO_INTR_MAGIC, RT_INTR);
		xnlock_put_irqrestore(&nklock, s);
		return err;
	}

	removeq(intr->rqueue, &intr->rlink);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	rc = xnsynch_destroy(&intr->synch_base);
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	if (intr->handle)
		xnregistry_remove(intr->handle);

	xeno_mark_deleted(intr);

	xnlock_put_irqrestore(&nklock, s);

	err = xnintr_destroy(&intr->intr_base);

	if (rc == XNSYNCH_RESCHED)
		/* Some task has been woken up as a result of the deletion:
		   reschedule now. */
		xnpod_schedule();

	return err;
}
コード例 #13
0
ファイル: buffer.c プロジェクト: BhargavKola/xenomai-forge
int rt_buffer_delete(RT_BUFFER *bf)
{
	int ret = 0, resched;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	xnarch_free_host_mem(bf->bufmem, bf->bufsz);
	removeq(bf->rqueue, &bf->rlink);
	resched = xnsynch_destroy(&bf->isynch_base) == XNSYNCH_RESCHED;
	resched += xnsynch_destroy(&bf->osynch_base) == XNSYNCH_RESCHED;

	if (bf->handle)
		xnregistry_remove(bf->handle);

	xeno_mark_deleted(bf);

	if (resched)
		/*
		 * Some task has been woken up as a result of the
		 * deletion: reschedule now.
		 */
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
コード例 #14
0
ファイル: flag.c プロジェクト: JackieXie168/xenomai
ER ref_flg(T_RFLG *pk_rflg, ID flgid)
{
	uitask_t *sleeper;
	uiflag_t *flag;
	ER err = E_OK;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_FLAGID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	flag = xnmap_fetch(ui_flag_idmap, flgid);

	if (!flag) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	if (xnsynch_pended_p(&flag->synchbase)) {
		xnpholder_t *holder = getheadpq(xnsynch_wait_queue(&flag->synchbase));
		xnthread_t *thread = link2thread(holder, plink);
		sleeper = thread2uitask(thread);
		pk_rflg->wtsk = sleeper->id;
	} else
		pk_rflg->wtsk = FALSE;

	pk_rflg->exinf = flag->exinf;
	pk_rflg->flgptn = flag->flgvalue;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
コード例 #15
0
ファイル: event.c プロジェクト: chrmorais/miniemc2
int rt_event_create(RT_EVENT *event,
		    const char *name, unsigned long ivalue, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnsynch_init(&event->synch_base, mode & EV_PRIO, NULL);
	event->value = ivalue;
	event->handle = 0;	/* i.e. (still) unregistered event. */
	event->magic = XENO_EVENT_MAGIC;
	xnobject_copy_name(event->name, name);
	inith(&event->rlink);
	event->rqueue = &xeno_get_rholder()->eventq;
	xnlock_get_irqsave(&nklock, s);
	appendq(event->rqueue, &event->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	event->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(event->name, event, &event->handle,
				       &__event_pnode);

		if (err)
			rt_event_delete(event);
	}

	return err;
}
コード例 #16
0
int rt_event_delete(RT_EVENT *event)
{
	int err = 0, rc;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT);

	if (!event) {
		err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT);
		goto unlock_and_exit;
	}

	removeq(event->rqueue, &event->rlink);

	rc = xnsynch_destroy(&event->synch_base);

#ifdef CONFIG_XENO_OPT_REGISTRY
	if (event->handle)
		xnregistry_remove(event->handle);
#endif /* CONFIG_XENO_OPT_REGISTRY */

	xeno_mark_deleted(event);

	if (rc == XNSYNCH_RESCHED)
		/* Some task has been woken up as a result of the deletion:
		   reschedule now. */
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
コード例 #17
0
int rt_sem_delete(RT_SEM *sem)
{
	int err = 0, rc;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	sem = xeno_h2obj_validate(sem, XENO_SEM_MAGIC, RT_SEM);

	if (!sem) {
		err = xeno_handle_error(sem, XENO_SEM_MAGIC, RT_SEM);
		goto unlock_and_exit;
	}

	removeq(sem->rqueue, &sem->rlink);

	rc = xnsynch_destroy(&sem->synch_base);

	if (sem->handle)
		xnregistry_remove(sem->handle);

	xeno_mark_deleted(sem);

	if (rc == XNSYNCH_RESCHED)
		/* Some task has been woken up as a result of the deletion:
		   reschedule now. */
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
コード例 #18
0
ファイル: cond.c プロジェクト: meeusr/xenomai-forge
int rt_cond_create(RT_COND *cond, const char *name)
{
    int err = 0;
    spl_t s;

    if (xnpod_asynch_p())
        return -EPERM;

    xnsynch_init(&cond->synch_base, XNSYNCH_PRIO, NULL);
    cond->handle = 0;	/* i.e. (still) unregistered cond. */
    cond->magic = XENO_COND_MAGIC;
    xnobject_copy_name(cond->name, name);
    inith(&cond->rlink);
    cond->rqueue = &xeno_get_rholder()->condq;
    xnlock_get_irqsave(&nklock, s);
    appendq(cond->rqueue, &cond->rlink);
    xnlock_put_irqrestore(&nklock, s);

#ifndef __XENO_SIM__
    cond->cpid = 0;
#endif

    /*
     * <!> Since xnregister_enter() may reschedule, only register
     * complete objects, so that the registry cannot return
     * handles to half-baked objects...
     */
    if (name) {
        err = xnregistry_enter(cond->name, cond, &cond->handle,
                               &__cond_pnode.node);

        if (err)
            rt_cond_delete(cond);
    }

    return err;
}
コード例 #19
0
ファイル: pipe.c プロジェクト: JackieXie168/xenomai
ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, xnticks_t timeout)
{
	struct xnpipe_state *state;
	struct xnholder *h;
	xnthread_t *thread;
	ssize_t ret;
	spl_t s;

	if (minor < 0 || minor >= XNPIPE_NDEVS)
		return -ENODEV;

	if (xnpod_asynch_p())
		return -EPERM;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		ret = -EBADF;
		goto unlock_and_exit;
	}

	thread = xnpod_current_thread();

	while ((h = getq(&state->inq)) == NULL) {
		if (timeout == XN_NONBLOCK) {
			ret = -EWOULDBLOCK;
			goto unlock_and_exit;
		}

		xnsynch_sleep_on(&state->synchbase, timeout, XN_RELATIVE);

		if (xnthread_test_info(thread, XNTIMEO)) {
			ret = -ETIMEDOUT;
			goto unlock_and_exit;
		}
		if (xnthread_test_info(thread, XNBREAK)) {
			ret = -EINTR;
			goto unlock_and_exit;
		}
		if (xnthread_test_info(thread, XNRMID)) {
			ret = -EIDRM;
			goto unlock_and_exit;
		}

		/* remaining timeout */
		timeout = xnthread_timeout(thread);
	}

	*pmh = link2mh(h);

	ret = (ssize_t) xnpipe_m_size(*pmh);

	if (testbits(state->status, XNPIPE_USER_WSYNC)) {
		__setbits(state->status, XNPIPE_USER_WSYNC_READY);
		xnpipe_schedule_request();
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
コード例 #20
0
/**
 * Open a shared memory object.
 *
 * This service establishes a connection between a shared memory object and a
 * file descriptor. Further use of this descriptor will allow to dimension and
 * map the shared memory into the calling context address space.
 *
 * One of the following access mode should be set in @a oflags:
 * - O_RDONLY, meaning that the shared memory object may only be mapped with the
 *   PROT_READ flag;
 * - O_WRONLY, meaning that the shared memory object may only be mapped with the
 *   PROT_WRITE flag;
 * - O_RDWR, meaning that the shared memory object may be mapped with the
 *   PROT_READ | PROT_WRITE flag.
 *
 * If no shared memory object  named @a name exists, and @a oflags has the @a
 * O_CREAT bit set, the shared memory object is created by this function.
 *
 * If @a oflags has the two bits @a O_CREAT and @a O_EXCL set and the shared
 * memory object alread exists, this service fails.
 *
 * If @a oflags has the bit @a O_TRUNC set, the shared memory exists and is not
 * currently mapped, its size is truncated to 0.
 *
 * If @a oflags has the bit @a O_DIRECT set, the shared memory will be suitable
 * for direct memory access (allocated in physically contiguous memory).
 *
 * @a name may be any arbitrary string, in which slashes have no particular
 * meaning. However, for portability, using a name which starts with a slash and
 * contains no other slash is recommended.
 *
 * @param name name of the shared memory object to open;
 *
 * @param oflags flags.
 *
 * @param mode ignored.
 *
 * @return a file descriptor on success;
 * @return -1 with @a errno set if:
 * - ENAMETOOLONG, the length of the @a name argument exceeds 64 characters;
 * - EEXIST, the bits @a O_CREAT and @a O_EXCL were set in @a oflags and the
 *   shared memory object already exists;
 * - ENOENT, the bit @a O_CREAT is not set in @a oflags and the shared memory
 *   object does not exist;
 * - ENOSPC, insufficient memory exists in the system heap to create the shared
 *   memory object, increase CONFIG_XENO_OPT_SYS_HEAPSZ;
 * - EPERM, the caller context is invalid;
 * - EINVAL, the O_TRUNC flag was specified and the shared memory object is
 *   currently mapped;
 * - EMFILE, too many descriptors are currently open.
 *
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/shm_open.html">
 * Specification.</a>
 * 
 */
int shm_open(const char *name, int oflags, mode_t mode)
{
	pse51_node_t *node;
	pse51_desc_t *desc;
	pse51_shm_t *shm;
	int err, fd;
	spl_t s;

	/* From root context only. */
	if (xnpod_asynch_p() || !xnpod_root_p()) {
		thread_set_errno(EPERM);
		return -1;
	}

	xnlock_get_irqsave(&nklock, s);
	err = pse51_node_get(&node, name, PSE51_SHM_MAGIC, oflags);
	xnlock_put_irqrestore(&nklock, s);
	if (err)
		goto error;

	if (node) {
		shm = node2shm(node);
		goto got_shm;
	}

	/* We must create the shared memory object, not yet allocated. */
	shm = (pse51_shm_t *) xnmalloc(sizeof(*shm));
	if (!shm) {
		err = ENOSPC;
		goto error;
	}

	xnlock_get_irqsave(&nklock, s);
	err = pse51_node_add(&shm->nodebase, name, PSE51_SHM_MAGIC);
	if (err && err != EEXIST)
		goto err_unlock;

	if (err == EEXIST) {
		/* same shm was created in the mean time, rollback. */
		err = pse51_node_get(&node, name, PSE51_SHM_MAGIC, oflags);
	  err_unlock:
		xnlock_put_irqrestore(&nklock, s);
		xnfree(shm);
		if (err)
			goto error;

		shm = node2shm(node);
		goto got_shm;
	}

	pse51_shm_init(shm);
	xnlock_put_irqrestore(&nklock, s);

  got_shm:
	err = pse51_desc_create(&desc, &shm->nodebase,
				oflags & (PSE51_PERMS_MASK | O_DIRECT));
	if (err)
		goto err_shm_put;

	fd = pse51_desc_fd(desc);

	if ((oflags & O_TRUNC) && ftruncate(fd, 0)) {
		close(fd);
		return -1;
	}

	return fd;

  err_shm_put:
	pse51_shm_put(shm, 1);
  error:
	thread_set_errno(err);
	return -1;
}
コード例 #21
0
/**
 * Truncate a file or shared memory object to a specified length.
 *
 * When used in kernel-space, this service set to @a len the size of a shared
 * memory object opened with the shm_open() service. In user-space this service
 * falls back to Linux regular ftruncate service for file descriptors not
 * obtained with shm_open(). When this service is used to increase the size of a
 * shared memory object, the added space is zero-filled.
 *
 * Shared memory are suitable for direct memory access (allocated in physically
 * contiguous memory) if O_DIRECT was passed to shm_open.
 *
 * Shared memory objects may only be resized if they are not currently mapped.
 *
 * @param fd file descriptor;
 *
 * @param len new length of the underlying file or shared memory object.
 *
 * @retval 0 on success;
 * @retval -1 with @a errno set if:
 * - EBADF, @a fd is not a valid file descriptor;
 * - EPERM, the caller context is invalid;
 * - EINVAL, the specified length is invalid;
 * - EINVAL, the architecture can not honour the O_DIRECT flag;
 * - EINTR, this service was interrupted by a signal;
 * - EBUSY, @a fd is a shared memory object descriptor and the underlying shared
 *   memory is currently mapped;
 * - EFBIG, allocation of system memory failed.
 *
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/ftruncate.html">
 * Specification.</a>
 * 
 */
int ftruncate(int fd, off_t len)
{
	unsigned desc_flags;
	pse51_desc_t *desc;
	pse51_shm_t *shm;
	int err;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);
	shm = pse51_shm_get(&desc, fd, 1);

	if (IS_ERR(shm)) {
		err = -PTR_ERR(shm);
		xnlock_put_irqrestore(&nklock, s);
		goto error;
	}

	if (xnpod_asynch_p() || !xnpod_root_p()) {
		err = EPERM;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	if (len < 0) {
		err = EINVAL;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	desc_flags = pse51_desc_getflags(desc);
	xnlock_put_irqrestore(&nklock, s);

	if (down_interruptible(&shm->maplock)) {
		err = EINTR;
		goto err_shm_put;
	}

	/* Allocate one more page for alignment (the address returned by mmap
	   must be aligned on a page boundary). */
	if (len)
#ifdef CONFIG_XENO_OPT_PERVASIVE
		len = xnheap_rounded_size(len + PAGE_SIZE, PAGE_SIZE);
#else /* !CONFIG_XENO_OPT_PERVASIVE */
		len = xnheap_rounded_size(len + PAGE_SIZE, XNHEAP_PAGE_SIZE);
#endif /* !CONFIG_XENO_OPT_PERVASIVE */

	err = 0;
	if (emptyq_p(&shm->mappings)) {
		/* Temporary storage, in order to preserve the memory contents upon
		   resizing, if possible. */
		void *addr = NULL;
		size_t size = 0;

		if (shm->addr) {
			if (len == xnheap_extentsize(&shm->heapbase)) {
				/* Size unchanged, skip copy and reinit. */
				err = 0;
				goto err_up;
			}

			size = xnheap_max_contiguous(&shm->heapbase);
			addr = xnarch_alloc_host_mem(size);
			if (!addr) {
				err = ENOMEM;
				goto err_up;
			}

			memcpy(addr, shm->addr, size);

			xnheap_free(&shm->heapbase, shm->addr);
			xnheap_destroy_mapped(&shm->heapbase, NULL, NULL);

			shm->addr = NULL;
			shm->size = 0;
		}

		if (len) {
			int flags = XNARCH_SHARED_HEAP_FLAGS |
				((desc_flags & O_DIRECT) ? GFP_DMA : 0);

			err = -xnheap_init_mapped(&shm->heapbase, len, flags);
			if (err)
				goto err_up;

			xnheap_set_label(&shm->heapbase,
					 "posix shm: %s", shm->nodebase.name);

			shm->size = xnheap_max_contiguous(&shm->heapbase);
			shm->addr = xnheap_alloc(&shm->heapbase, shm->size);
			/* Required. */
			memset(shm->addr, '\0', shm->size);

			/* Copy the previous contents. */
			if (addr)
				memcpy(shm->addr, addr,
				       shm->size < size ? shm->size : size);

			shm->size -= PAGE_SIZE;
		}

		if (addr)
			xnarch_free_host_mem(addr, size);
	} else if (len != xnheap_extentsize(&shm->heapbase))
		err = EBUSY;

      err_up:
	up(&shm->maplock);

      err_shm_put:
	pse51_shm_put(shm, 1);

	if (!err)
		return 0;

      error:
	thread_set_errno(err == ENOMEM ? EFBIG : err);
	return -1;
}
コード例 #22
0
/**
 * Map pages of memory.
 *
 * This service allow shared memory regions to be accessed by the caller.
 *
 * When used in kernel-space, this service returns the address of the offset @a
 * off of the shared memory object underlying @a fd. The protection flags @a
 * prot, are only checked for consistency with @a fd open flags, but memory
 * protection is unsupported. An existing shared memory region exists before it
 * is mapped, this service only increments a reference counter.
 *
 * The only supported value for @a flags is @a MAP_SHARED.
 *
 * When used in user-space, this service maps the specified shared memory region
 * into the caller address-space. If @a fd is not a shared memory object
 * descriptor (i.e. not obtained with shm_open()), this service falls back to
 * the regular Linux mmap service.
 *
 * @param addr ignored.
 *
 * @param len size of the shared memory region to be mapped.
 *
 * @param prot protection bits, checked in kernel-space, but only useful in
 * user-space, are a bitwise or of the following values:
 * - PROT_NONE, meaning that the mapped region can not be accessed;
 * - PROT_READ, meaning that the mapped region can be read;
 * - PROT_WRITE, meaning that the mapped region can be written;
 * - PROT_EXEC, meaning that the mapped region can be executed.
 *
 * @param flags only MAP_SHARED is accepted, meaning that the mapped memory
 * region is shared.
 *
 * @param fd file descriptor, obtained with shm_open().
 *
 * @param off offset in the shared memory region.
 *
 * @retval 0 on success;
 * @retval MAP_FAILED with @a errno set if:
 * - EINVAL, @a len is null or @a addr is not a multiple of @a PAGE_SIZE;
 * - EBADF, @a fd is not a shared memory object descriptor (obtained with
 *   shm_open());
 * - EPERM, the caller context is invalid;
 * - ENOTSUP, @a flags is not @a MAP_SHARED;
 * - EACCES, @a fd is not opened for reading or is not opend for writing and
 *   PROT_WRITE is set in @a prot;
 * - EINTR, this service was interrupted by a signal;
 * - ENXIO, the range [off;off+len) is invalid for the shared memory region
 *   specified by @a fd;
 * - EAGAIN, insufficient memory exists in the system heap to create the
 *   mapping, increase CONFIG_XENO_OPT_SYS_HEAPSZ.
 *
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mmap.html">
 * Specification.</a>
 * 
 */
void *mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off)
{
	pse51_shm_map_t *map;
	unsigned desc_flags;
	pse51_desc_t *desc;
	pse51_shm_t *shm;
	void *result;
	int err;
	spl_t s;

	if (!len) {
		err = EINVAL;
		goto error;
	}

	if (((unsigned long)addr) % PAGE_SIZE) {
		err = EINVAL;
		goto error;
	}

	xnlock_get_irqsave(&nklock, s);

	shm = pse51_shm_get(&desc, fd, 1);

	if (IS_ERR(shm)) {
		xnlock_put_irqrestore(&nklock, s);
		err = -PTR_ERR(shm);
		goto error;
	}

	if (xnpod_asynch_p() || !xnpod_root_p()) {
		err = EPERM;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	if (flags != MAP_SHARED) {
		err = ENOTSUP;
		xnlock_put_irqrestore(&nklock, s);
		goto err_shm_put;
	}

	desc_flags = pse51_desc_getflags(desc) & PSE51_PERMS_MASK;
	xnlock_put_irqrestore(&nklock, s);

	if ((desc_flags != O_RDWR && desc_flags != O_RDONLY) ||
	    ((prot & PROT_WRITE) && desc_flags == O_RDONLY)) {
		err = EACCES;
		goto err_shm_put;
	}

	map = (pse51_shm_map_t *) xnmalloc(sizeof(*map));
	if (!map) {
		err = EAGAIN;
		goto err_shm_put;
	}

	if (down_interruptible(&shm->maplock)) {
		err = EINTR;
		goto err_free_map;
	}

	if (!shm->addr || off + len > shm->size) {
		err = ENXIO;
		up(&shm->maplock);
		goto err_free_map;
	}

	/* Align the heap address on a page boundary. */
	result = (void *)PAGE_ALIGN((u_long)shm->addr);
	map->addr = result = (void *)((char *)result + off);
	map->size = len;
	inith(&map->link);
	prependq(&shm->mappings, &map->link);
	up(&shm->maplock);

	return result;

  err_free_map:
	xnfree(map);
  err_shm_put:
	pse51_shm_put(shm, 1);
  error:
	thread_set_errno(err);
	return MAP_FAILED;
}
コード例 #23
0
/**
 * Unmap pages of memory.
 *
 * This service unmaps the shared memory region [addr;addr+len) from the caller
 * address-space.
 *
 * When called from kernel-space the memory region remain accessible as long as
 * it exists, and this service only decrements a reference counter.
 *
 * When called from user-space, if the region is not a shared memory region,
 * this service falls back to the regular Linux munmap() service.
 *
 * @param addr start address of shared memory area;
 *
 * @param len length of the shared memory area.
 *
 * @retval 0 on success;
 * @retval -1 with @a errno set if:
 * - EINVAL, @a len is null, @a addr is not a multiple of the page size or the
 *   range [addr;addr+len) is not a mapped region;
 * - ENXIO, @a addr is not the address of a shared memory area;
 * - EPERM, the caller context is invalid;
 * - EINTR, this service was interrupted by a signal.
 * 
 * @par Valid contexts:
 * - kernel module initialization or cleanup routine;
 * - kernel-space cancellation cleanup routine;
 * - user-space thread (Xenomai threads switch to secondary mode);
 * - user-space cancellation cleanup routine.
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/munmap.html">
 * Specification.</a>
 * 
 */
int munmap(void *addr, size_t len)
{
	pse51_shm_map_t *mapping = NULL;
	xnholder_t *holder;
	pse51_shm_t *shm;
	int err;
	spl_t s;

	if (!len) {
		err = EINVAL;
		goto error;
	}

	if (((unsigned long)addr) % PAGE_SIZE) {
		err = EINVAL;
		goto error;
	}

	xnlock_get_irqsave(&nklock, s);
	shm = pse51_shm_lookup(addr);

	if (!shm) {
		xnlock_put_irqrestore(&nklock, s);
		err = ENXIO;
		goto error;
	}

	if (xnpod_asynch_p() || !xnpod_root_p()) {
		xnlock_put_irqrestore(&nklock, s);
		err = EPERM;
		goto error;
	}

	++shm->nodebase.refcount;
	xnlock_put_irqrestore(&nklock, s);

	if (down_interruptible(&shm->maplock)) {
		err = EINTR;
		goto err_shm_put;
	}

	for (holder = getheadq(&shm->mappings);
	     holder; holder = nextq(&shm->mappings, holder)) {
		mapping = link2map(holder);

		if (mapping->addr == addr && mapping->size == len)
			break;
	}

	if (!holder) {
		xnlock_put_irqrestore(&nklock, s);
		err = EINVAL;
		goto err_up;
	}

	removeq(&shm->mappings, holder);
	up(&shm->maplock);

	xnfree(mapping);
	pse51_shm_put(shm, 2);
	return 0;

      err_up:
	up(&shm->maplock);
      err_shm_put:
	pse51_shm_put(shm, 1);
      error:
	thread_set_errno(err);
	return -1;
}