Beispiel #1
0
int rt_intr_delete(RT_INTR *intr)
{
	int err = 0, rc = XNSYNCH_DONE;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	intr = xeno_h2obj_validate(intr, XENO_INTR_MAGIC, RT_INTR);

	if (!intr) {
		err = xeno_handle_error(intr, XENO_INTR_MAGIC, RT_INTR);
		xnlock_put_irqrestore(&nklock, s);
		return err;
	}

	removeq(intr->rqueue, &intr->rlink);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	rc = xnsynch_destroy(&intr->synch_base);
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	if (intr->handle)
		xnregistry_remove(intr->handle);

	xeno_mark_deleted(intr);

	xnlock_put_irqrestore(&nklock, s);

	err = xnintr_destroy(&intr->intr_base);

	if (rc == XNSYNCH_RESCHED)
		/* Some task has been woken up as a result of the deletion:
		   reschedule now. */
		xnpod_schedule();

	return err;
}
Beispiel #2
0
static int format_irq_proc(unsigned int irq, char *str)
{
	xnintr_t *intr;
	char *p = str;
	spl_t s;

	if (rthal_virtual_irq_p(irq)) {
		p += sprintf(p, "         [virtual]");
		return p - str;
	} else if (irq == XNARCH_TIMER_IRQ) {
		p += sprintf(p, "         [timer]");
		return p - str;
#ifdef CONFIG_SMP
	} else if (irq == RTHAL_SERVICE_IPI0) {
		p += sprintf(p, "         [IPI]");
		return p - str;
	} else if (irq == RTHAL_CRITICAL_IPI) {
		p += sprintf(p, "         [critical sync]");
		return p - str;
#endif /* CONFIG_SMP */
	}

	xnlock_get_irqsave(&intrlock, s);

	intr = xnintr_shirq_first(irq);
	if (intr) {
		strcpy(p, "        "); p += 8;

		do {
			*p = ' '; p += 1;
			strcpy(p, intr->name); p += strlen(intr->name);

			intr = xnintr_shirq_next(intr);
		} while (intr);
	}

	xnlock_put_irqrestore(&intrlock, s);

	return p - str;
}
Beispiel #3
0
int xnmap_enter(xnmap_t *map, int key, void *objaddr)
{
	int hi, lo, ofkey = key - map->offset;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	if (ofkey >= 0 && ofkey < map->nkeys) {
		if (map->objarray[ofkey] != NULL) {
			key = -EEXIST;
			goto unlock_and_exit;
		}
	} else if (map->ukeys >= map->nkeys) {
		key = -ENOSPC;
		goto unlock_and_exit;
	}
	else {
		/* The himask implements a namespace reservation of
		   half of the bitmap space which cannot be used to
		   draw keys. */

		hi = ffnz(map->himap & ~map->himask);
		lo = ffnz(map->lomap[hi]);
		ofkey = hi * BITS_PER_LONG + lo;
		++map->ukeys;

		__clrbits(map->lomap[hi], 1UL << lo);

		if (map->lomap[hi] == 0)
			__clrbits(map->himap, 1UL << hi);
	}

	map->objarray[ofkey] = objaddr;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ofkey + map->offset;
}
Beispiel #4
0
int sc_sinquiry(int semid, int *errp)
{
	vrtxsem_t *sem;
	int count;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xnmap_fetch(vrtx_sem_idmap, semid);

	if (sem == NULL) {
		*errp = ER_ID;
		count = 0;
	} else {
		*errp = RET_OK;
		count = sem->count;
	}

	xnlock_put_irqrestore(&nklock, s);

	return count;
}
Beispiel #5
0
ER ref_flg(T_RFLG *pk_rflg, ID flgid)
{
	uitask_t *sleeper;
	uiflag_t *flag;
	ER err = E_OK;
	spl_t s;

	if (xnpod_asynch_p())
		return EN_CTXID;

	if (flgid <= 0 || flgid > uITRON_MAX_FLAGID)
		return E_ID;

	xnlock_get_irqsave(&nklock, s);

	flag = xnmap_fetch(ui_flag_idmap, flgid);

	if (!flag) {
		err = E_NOEXS;
		goto unlock_and_exit;
	}

	if (xnsynch_pended_p(&flag->synchbase)) {
		xnpholder_t *holder = getheadpq(xnsynch_wait_queue(&flag->synchbase));
		xnthread_t *thread = link2thread(holder, plink);
		sleeper = thread2uitask(thread);
		pk_rflg->wtsk = sleeper->id;
	} else
		pk_rflg->wtsk = FALSE;

	pk_rflg->exinf = flag->exinf;
	pk_rflg->flgptn = flag->flgvalue;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Beispiel #6
0
int rt_intr_disable(RT_INTR *intr)
{
	int err;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	intr = xeno_h2obj_validate(intr, XENO_INTR_MAGIC, RT_INTR);

	if (!intr) {
		err = xeno_handle_error(intr, XENO_INTR_MAGIC, RT_INTR);
		goto unlock_and_exit;
	}

	err = xnintr_disable(&intr->intr_base);

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Beispiel #7
0
int rt_event_create(RT_EVENT *event,
		    const char *name, unsigned long ivalue, int mode)
{
	int err = 0;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnsynch_init(&event->synch_base, mode & EV_PRIO, NULL);
	event->value = ivalue;
	event->handle = 0;	/* i.e. (still) unregistered event. */
	event->magic = XENO_EVENT_MAGIC;
	xnobject_copy_name(event->name, name);
	inith(&event->rlink);
	event->rqueue = &xeno_get_rholder()->eventq;
	xnlock_get_irqsave(&nklock, s);
	appendq(event->rqueue, &event->rlink);
	xnlock_put_irqrestore(&nklock, s);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	event->cpid = 0;
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	/*
	 * <!> Since xnregister_enter() may reschedule, only register
	 * complete objects, so that the registry cannot return
	 * handles to half-baked objects...
	 */
	if (name) {
		err = xnregistry_enter(event->name, event, &event->handle,
				       &__event_pnode);

		if (err)
			rt_event_delete(event);
	}

	return err;
}
Beispiel #8
0
int rt_event_delete(RT_EVENT *event)
{
	int err = 0, rc;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT);

	if (!event) {
		err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT);
		goto unlock_and_exit;
	}

	removeq(event->rqueue, &event->rlink);

	rc = xnsynch_destroy(&event->synch_base);

#ifdef CONFIG_XENO_OPT_REGISTRY
	if (event->handle)
		xnregistry_remove(event->handle);
#endif /* CONFIG_XENO_OPT_REGISTRY */

	xeno_mark_deleted(event);

	if (rc == XNSYNCH_RESCHED)
		/* Some task has been woken up as a result of the deletion:
		   reschedule now. */
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Beispiel #9
0
int rt_buffer_delete(RT_BUFFER *bf)
{
	int ret = 0, resched;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	xnarch_free_host_mem(bf->bufmem, bf->bufsz);
	removeq(bf->rqueue, &bf->rlink);
	resched = xnsynch_destroy(&bf->isynch_base) == XNSYNCH_RESCHED;
	resched += xnsynch_destroy(&bf->osynch_base) == XNSYNCH_RESCHED;

	if (bf->handle)
		xnregistry_remove(bf->handle);

	xeno_mark_deleted(bf);

	if (resched)
		/*
		 * Some task has been woken up as a result of the
		 * deletion: reschedule now.
		 */
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Beispiel #10
0
/**
 * Execute an initialization routine.
 *
 * This service may be used by libraries which need an initialization function
 * to be called only once.
 *
 * The function @a init_routine will only be called, with no argument, the first
 * time this service is called specifying the address @a once.
 *
 * @return 0 on success;
 * @return an error number if:
 * - EINVAL, the object pointed to by @a once is invalid (it must have been
 *   initialized with PTHREAD_ONCE_INIT).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_once.html">
 * Specification.</a>
 *
 */
int pthread_once(pthread_once_t * once, void (*init_routine) (void))
{
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	if (!pse51_obj_active(once, PSE51_ONCE_MAGIC, pthread_once_t)) {
		xnlock_put_irqrestore(&nklock, s);
		return EINVAL;
	}

	if (!once->routine_called) {
		init_routine();
		/* If the calling thread is canceled while executing init_routine,
		   routine_called will not be set to 1. */
		once->routine_called = 1;
	}

	xnlock_put_irqrestore(&nklock, s);

	return 0;
}
Beispiel #11
0
u_long t_restart(u_long tid, u_long targs[])
{
	u_long err = SUCCESS;
	psostask_t *task;
	spl_t s;
	int n;

	if (xnpod_unblockable_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	if (tid == 0)
		task = psos_current_task();
	else {
		task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t);

		if (!task) {
			err = psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t);
			goto unlock_and_exit;
		}

		if (xnthread_test_state(&task->threadbase, XNDORMANT)) {
			err = ERR_NACTIVE;
			goto unlock_and_exit;
		}
	}

	for (n = 0; n < 4; n++)
		task->args[n] = targs ? targs[n] : 0;

	xnpod_restart_thread(&task->threadbase);

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Beispiel #12
0
static int __sc_rblock(struct task_struct *curr, struct pt_regs *regs)
{
	char __user *buf;
	vrtxpt_t *pt;
	int pid, err;
	spl_t s;

	pid = __xn_reg_arg1(regs);
	buf = (char __user *)__xn_reg_arg2(regs);

	xnlock_get_irqsave(&nklock, s);

	pt = xnmap_fetch(vrtx_pt_idmap, pid);

	if (!pt || pt->mm != curr->mm) {
		/* Deallocation requests must be issued from the same
		 * process which created the partition. */
		err = ER_ID;
		goto unlock_and_exit;
	}

	/* Convert the caller-based address of buf to the equivalent area
	   into the kernel address space. */

	if (buf) {
		buf =
		    xnheap_mapped_address(pt->sysheap,
					  (caddr_t) buf - pt->mapbase);
		sc_rblock(pid, buf, &err);
	} else
		err = ER_NMB;

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Beispiel #13
0
int xnintr_attach(xnintr_t *intr, void *cookie)
{
	int ret;
	spl_t s;

	trace_mark(xn_nucleus, irq_attach, "irq %u name %s",
		   intr->irq, intr->name);

	intr->cookie = cookie;
	memset(&intr->stat, 0, sizeof(intr->stat));

#ifdef CONFIG_SMP
	xnarch_set_irq_affinity(intr->irq, nkaffinity);
#endif /* CONFIG_SMP */

	xnlock_get_irqsave(&intrlock, s);

	if (intr->irq >= XNARCH_NR_IRQS) {
		ret = -EINVAL;
		goto out;
	}

	if (__testbits(intr->flags, XN_ISR_ATTACHED)) {
		ret = -EBUSY;
		goto out;
	}

	ret = xnintr_irq_attach(intr);
	if (ret)
		goto out;

	__setbits(intr->flags, XN_ISR_ATTACHED);
	xnintr_stat_counter_inc();
out:
	xnlock_put_irqrestore(&intrlock, s);

	return ret;
}
Beispiel #14
0
static pse51_shm_t *pse51_shm_get(pse51_desc_t ** pdesc, int fd, unsigned inc)
{
	pse51_shm_t *shm;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	shm =
	    (pse51_shm_t *)
	    ERR_PTR(-pse51_desc_get(pdesc, fd, PSE51_SHM_MAGIC));

	if (IS_ERR(shm))
		goto out;

	shm = node2shm(pse51_desc_node(*pdesc));

	shm->nodebase.refcount += inc;

      out:
	xnlock_put_irqrestore(&nklock, s);

	return shm;
}
Beispiel #15
0
int sc_minquiry(int mid, int *errp)
{
	vrtxmx_t *mx;
	spl_t s;
	int rc;

	xnlock_get_irqsave(&nklock, s);

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	if (mx == NULL) {
		rc = 0;
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	rc = xnsynch_owner(&mx->synchbase) == NULL;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return rc;
}
Beispiel #16
0
/**
 * Attempt, during a bounded time, to lock a semaphore.
 *
 * This serivce is equivalent to sem_wait(), except that the caller is only
 * blocked until the timeout @a abs_timeout expires.
 *
 * @param sm the semaphore to be locked;
 *
 * @param abs_timeout the timeout, expressed as an absolute value of the
 * CLOCK_REALTIME clock.
 *
 * @retval 0 on success;
 * @retval -1 with @a errno set if:
 * - EPERM, the caller context is invalid;
 * - EINVAL, the semaphore is invalid or uninitialized;
 * - EINVAL, the specified timeout is invalid;
 * - EPERM, the semaphore @a sm is not process-shared and does not belong to the
 *   current process;
 * - EINTR, the caller was interrupted by a signal while blocked in this
 *   service;
 * - ETIMEDOUT, the semaphore could not be locked and the specified timeout
 *   expired.
 *
 * @par Valid contexts:
 * - Xenomai kernel-space thread,
 * - Xenomai user-space thread (switches to primary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_timedwait.html">
 * Specification.</a>
 *
 */
int sem_timedwait(sem_t * sm, const struct timespec *abs_timeout)
{
	struct __shadow_sem *shadow = &((union __xeno_sem *)sm)->shadow_sem;
	spl_t s;
	int err;

	if (abs_timeout->tv_nsec > ONE_BILLION) {
		err = EINVAL;
		goto error;
	}

	xnlock_get_irqsave(&nklock, s);
	err = sem_timedwait_internal(shadow, 1, ts2ticks_ceil(abs_timeout) + 1);
	xnlock_put_irqrestore(&nklock, s);

  error:
	if (err) {
		thread_set_errno(err);
		return -1;
	}

	return 0;
}
Beispiel #17
0
struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
{
	struct xnthread *last;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

#ifdef CONFIG_SMP
	/* If current thread migrated while suspended */
	sched = xnsched_current();
#endif /* CONFIG_SMP */

	last = sched->last;
	sched->status &= ~XNINSW;

	/* Detect a thread which called xnthread_migrate() */
	if (last->sched != sched) {
		xnsched_putback(last);
		xnthread_clear_state(last, XNMIGRATE);
	}

	return sched;
}
Beispiel #18
0
static int __wind_taskinfo_status(struct pt_regs *regs)
{
	xnhandle_t handle = __xn_reg_arg1(regs);
	unsigned long status;
	WIND_TCB *pTcb;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	pTcb = __wind_lookup_task(handle);

	if (!pTcb || pTcb->magic != WIND_TASK_MAGIC) {
		xnlock_put_irqrestore(&nklock, s);
		return S_objLib_OBJ_ID_ERROR;
	}

	status = xnthread_state_flags(&pTcb->threadbase);

	xnlock_put_irqrestore(&nklock, s);

	return __xn_safe_copy_to_user((void __user *)__xn_reg_arg2(regs), &status,
				      sizeof(status));
}
Beispiel #19
0
int rt_cond_inquire(RT_COND *cond, RT_COND_INFO *info)
{
    int err = 0;
    spl_t s;

    xnlock_get_irqsave(&nklock, s);

    cond = xeno_h2obj_validate(cond, XENO_COND_MAGIC, RT_COND);

    if (!cond) {
        err = xeno_handle_error(cond, XENO_COND_MAGIC, RT_COND);
        goto unlock_and_exit;
    }

    strcpy(info->name, cond->name);
    info->nwaiters = xnsynch_nsleepers(&cond->synch_base);

unlock_and_exit:

    xnlock_put_irqrestore(&nklock, s);

    return err;
}
Beispiel #20
0
static ssize_t lock_vfile_store(struct xnvfile_input *input)
{
	ssize_t ret;
	spl_t s;
	int cpu;

	long val;

	ret = xnvfile_get_integer(input, &val);
	if (ret < 0)
		return ret;

	if (val != 0)
		return -EINVAL;

	for_each_realtime_cpu(cpu) {
		xnlock_get_irqsave(&nklock, s);
		memset(&per_cpu(xnlock_stats, cpu), '\0', sizeof(struct xnlockinfo));
		xnlock_put_irqrestore(&nklock, s);
	}

	return ret;
}
Beispiel #21
0
int rt_cond_broadcast(RT_COND *cond)
{
    int err = 0;
    spl_t s;

    xnlock_get_irqsave(&nklock, s);

    cond = xeno_h2obj_validate(cond, XENO_COND_MAGIC, RT_COND);

    if (!cond) {
        err = xeno_handle_error(cond, XENO_COND_MAGIC, RT_COND);
        goto unlock_and_exit;
    }

    if (xnsynch_flush(&cond->synch_base, 0) == XNSYNCH_RESCHED)
        xnpod_schedule();

unlock_and_exit:

    xnlock_put_irqrestore(&nklock, s);

    return err;
}
Beispiel #22
0
int rt_sem_delete(RT_SEM *sem)
{
	int err = 0, rc;
	spl_t s;

	if (xnpod_asynch_p())
		return -EPERM;

	xnlock_get_irqsave(&nklock, s);

	sem = xeno_h2obj_validate(sem, XENO_SEM_MAGIC, RT_SEM);

	if (!sem) {
		err = xeno_handle_error(sem, XENO_SEM_MAGIC, RT_SEM);
		goto unlock_and_exit;
	}

	removeq(sem->rqueue, &sem->rlink);

	rc = xnsynch_destroy(&sem->synch_base);

	if (sem->handle)
		xnregistry_remove(sem->handle);

	xeno_mark_deleted(sem);

	if (rc == XNSYNCH_RESCHED)
		/* Some task has been woken up as a result of the deletion:
		   reschedule now. */
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Beispiel #23
0
u_long t_ident(const char *name, u_long node, u_long *tid_r)
{
	u_long err = SUCCESS;
	xnholder_t *holder;
	psostask_t *task;
	spl_t s;

	if (node > 1)
		return ERR_NODENO;

	if (!name) {
		if (xnpod_unblockable_p())
			return ERR_OBJID;
		*tid_r = (u_long)psos_current_task();
		return SUCCESS;
	}

	xnlock_get_irqsave(&nklock, s);

	for (holder = getheadq(&psostaskq);
	     holder; holder = nextq(&psostaskq, holder)) {
		task = link2psostask(holder);

		if (!strcmp(task->name, name)) {
			*tid_r = (u_long)task;
			goto unlock_and_exit;
		}
	}

	err = ERR_OBJNF;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Beispiel #24
0
int rt_cond_create(RT_COND *cond, const char *name)
{
    int err = 0;
    spl_t s;

    if (xnpod_asynch_p())
        return -EPERM;

    xnsynch_init(&cond->synch_base, XNSYNCH_PRIO, NULL);
    cond->handle = 0;	/* i.e. (still) unregistered cond. */
    cond->magic = XENO_COND_MAGIC;
    xnobject_copy_name(cond->name, name);
    inith(&cond->rlink);
    cond->rqueue = &xeno_get_rholder()->condq;
    xnlock_get_irqsave(&nklock, s);
    appendq(cond->rqueue, &cond->rlink);
    xnlock_put_irqrestore(&nklock, s);

#ifndef __XENO_SIM__
    cond->cpid = 0;
#endif

    /*
     * <!> Since xnregister_enter() may reschedule, only register
     * complete objects, so that the registry cannot return
     * handles to half-baked objects...
     */
    if (name) {
        err = xnregistry_enter(cond->name, cond, &cond->handle,
                               &__cond_pnode.node);

        if (err)
            rt_cond_delete(cond);
    }

    return err;
}
Beispiel #25
0
struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
{
	struct xnthread *last;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

#ifdef CONFIG_SMP
	/* If current thread migrated while suspended */
	sched = xnpod_current_sched();
#endif /* CONFIG_SMP */

	last = sched->last;
	__clrbits(sched->status, XNSWLOCK);

	/* Detect a thread which called xnpod_migrate_thread */
	if (last->sched != sched) {
		xnsched_putback(last);
		xnthread_clear_state(last, XNMIGRATE);
	}

	if (xnthread_test_state(last, XNZOMBIE)) {
		/*
		 * There are two cases where sched->last has the zombie
		 * bit:
		 * - either it had it before the context switch, the hooks
		 * have been executed and sched->zombie is last;
		 * - or it has been killed while the nklocked was unlocked
		 * during the context switch, in which case we must run the
		 * hooks, and we do it now.
		 */
		if (sched->zombie != last)
			xnsched_zombie_hooks(last);
	}

	return sched;
}
Beispiel #26
0
/**
 * Set the protocol attribute of a mutex attributes object.
 *
 * This service set the @a type attribute of the mutex attributes object
 * @a attr.
 *
 * @param attr an initialized mutex attributes object,
 *
 * @param proto value of the @a protocol attribute, may be one of:
 * - PTHREAD_PRIO_NONE, meaning that a mutex created with the attributes object
 *   @a attr will not follow any priority protocol;
 * - PTHREAD_PRIO_INHERIT, meaning that a mutex created with the attributes
 *   object @a attr, will follow the priority inheritance protocol.
 *
 * The value PTHREAD_PRIO_PROTECT (priority ceiling protocol) is unsupported.
 *
 * @return 0 on success,
 * @return an error number if:
 * - EINVAL, the mutex attributes object @a attr is invalid;
 * - EOPNOTSUPP, the value of @a proto is unsupported;
 * - EINVAL, the value of @a proto is invalid.
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_setprotocol.html">
 * Specification.</a>
 *
 */
static inline int
pthread_mutexattr_setprotocol(pthread_mutexattr_t * attr, int proto)
{
	spl_t s;

	if (!attr)
		return EINVAL;

	xnlock_get_irqsave(&nklock, s);

	if (!cobalt_obj_active(attr,COBALT_MUTEX_ATTR_MAGIC,pthread_mutexattr_t)) {
		xnlock_put_irqrestore(&nklock, s);
		return EINVAL;
	}

	switch (proto) {
	default:

		xnlock_put_irqrestore(&nklock, s);
		return EINVAL;

	case PTHREAD_PRIO_PROTECT:

		xnlock_put_irqrestore(&nklock, s);
		return EOPNOTSUPP;

	case PTHREAD_PRIO_NONE:
	case PTHREAD_PRIO_INHERIT:
		break;
	}

	attr->protocol = proto;

	xnlock_put_irqrestore(&nklock, s);

	return 0;
}
Beispiel #27
0
int rt_sem_inquire(RT_SEM *sem, RT_SEM_INFO *info)
{
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	sem = xeno_h2obj_validate(sem, XENO_SEM_MAGIC, RT_SEM);

	if (!sem) {
		err = xeno_handle_error(sem, XENO_SEM_MAGIC, RT_SEM);
		goto unlock_and_exit;
	}

	strcpy(info->name, sem->name);
	info->count = sem->count;
	info->nwaiters = xnsynch_nsleepers(&sem->synch_base);

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Beispiel #28
0
struct xnpholder *xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct xnpholder *holder)
{
	struct xnthread *thread;
	struct xnpholder *nholder;
	spl_t s;

	XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));

	xnlock_get_irqsave(&nklock, s);

	nholder = poppq(&synch->pendq, holder);
	thread = link2thread(holder, plink);
	thread->wchan = NULL;
	trace_mark(xn_nucleus, synch_wakeup_this,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);
	xnpod_resume_thread(thread, XNPEND);

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return nholder;
}
Beispiel #29
0
int rt_event_inquire(RT_EVENT *event, RT_EVENT_INFO *info)
{
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT);

	if (!event) {
		err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT);
		goto unlock_and_exit;
	}

	strcpy(info->name, event->name);
	info->value = event->value;
	info->nwaiters = xnsynch_nsleepers(&event->synch_base);

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Beispiel #30
0
int xnpipe_flush(int minor, int mode)
{
	struct xnpipe_state *state;
	int msgcount;
	spl_t s;

	if (minor < 0 || minor >= XNPIPE_NDEVS)
		return -ENODEV;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		xnlock_put_irqrestore(&nklock, s);
		return -EBADF;
	}

	msgcount = countq(&state->outq) + countq(&state->inq);

	if (mode & XNPIPE_OFLUSH)
		state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s);

	if (mode & XNPIPE_IFLUSH)
		xnpipe_flushq(state, inq, free_ibuf, s);

	if (testbits(state->status, XNPIPE_USER_WSYNC) &&
	    msgcount > countq(&state->outq) + countq(&state->inq)) {
		__setbits(state->status, XNPIPE_USER_WSYNC_READY);
		xnpipe_schedule_request();
	}

	xnlock_put_irqrestore(&nklock, s);

	return 0;
}