/** * Destroy a selector block. * * All bindings with file descriptor are destroyed. * * @param selector the selector block to be destroyed */ void xnselector_destroy(struct xnselector *selector) { spl_t s; inith(&selector->destroy_link); xnlock_get_irqsave(&nklock, s); appendq(&xnselectors, &selector->destroy_link); xnlock_put_irqrestore(&nklock, s); rthal_apc_schedule(xnselect_apc); }
/** * Set stacksize attribute. * * This service set to @a stacksize, the value of the @a stacksize attribute in * the attribute object @a attr. * * The @a stacksize attribute is used as the stack size of the threads created * using the attribute object @a attr. * * The minimum value for this attribute is PTHREAD_STACK_MIN. * * @param attr attribute object; * * @param stacksize value of the @a stacksize attribute. * * @return 0 on success; * @return an error number if: * - EINVAL, @a attr or @a stacksize is invalid. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_attr_setstacksize.html"> * Specification.</a> * */ int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stacksize) { spl_t s; if (stacksize < PTHREAD_STACK_MIN) return EINVAL; xnlock_get_irqsave(&nklock, s); if (!pse51_obj_active(attr, PSE51_THREAD_ATTR_MAGIC, pthread_attr_t)) { xnlock_put_irqrestore(&nklock, s); return EINVAL; } attr->stacksize = stacksize; xnlock_put_irqrestore(&nklock, s); return 0; }
/** * Get the process-shared attribute of a mutex attributes object. * * This service stores, at the address @a pshared, the value of the @a pshared * attribute in the mutex attributes object @a attr. * * The @a pashared attribute may only be one of @a PTHREAD_PROCESS_PRIVATE or * @a PTHREAD_PROCESS_SHARED. See pthread_mutexattr_setpshared() for the meaning * of these two constants. * * @param attr an initialized mutex attributes object; * * @param pshared address where the value of the @a pshared attribute will be * stored on success. * * @return 0 on success; * @return an error number if: * - EINVAL, the @a pshared address is invalid; * - EINVAL, the mutex attributes object @a attr is invalid. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_getpshared.html"> * Specification.</a> * */ int pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared) { spl_t s; if (!pshared || !attr) return EINVAL; xnlock_get_irqsave(&nklock, s); if (!pse51_obj_active(attr,PSE51_MUTEX_ATTR_MAGIC,pthread_mutexattr_t)) { xnlock_put_irqrestore(&nklock, s); return EINVAL; } *pshared = attr->pshared; xnlock_put_irqrestore(&nklock, s); return 0; }
/** * Get the mutex type attribute from a mutex attributes object. * * This service stores, at the address @a type, the value of the @a type * attribute in the mutex attributes object @a attr. * * See pthread_mutex_lock() and pthread_mutex_unlock() documentations for a * description of the values of the @a type attribute and their effect on a * mutex. * * @param attr an initialized mutex attributes object, * * @param type address where the @a type attribute value will be stored on * success. * * @return 0 on sucess, * @return an error number if: * - EINVAL, the @a type address is invalid; * - EINVAL, the mutex attributes object @a attr is invalid. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_gettype.html"> * Specification.</a> * */ int pthread_mutexattr_gettype(const pthread_mutexattr_t * attr, int *type) { spl_t s; if (!type || !attr) return EINVAL; xnlock_get_irqsave(&nklock, s); if (!pse51_obj_active(attr,PSE51_MUTEX_ATTR_MAGIC,pthread_mutexattr_t)) { xnlock_put_irqrestore(&nklock, s); return EINVAL; } *type = attr->type; xnlock_put_irqrestore(&nklock, s); return 0; }
/** * Set detachstate attribute. * * This service sets to @a detachstate the value of the @a detachstate attribute * in the attribute object @a attr. * * Valid values of this attribute are PTHREAD_CREATE_JOINABLE and * PTHREAD_CREATE_DETACHED. A detached thread is a thread which control block is * automatically reclaimed when it terminates. The control block of a joinable * thread, on the other hand, is only reclaimed when joined with the service * pthread_join(). * * A thread that was created joinable may be detached after creation by using * the pthread_detach() service. * * @param attr attribute object; * * @param detachstate value of the detachstate attribute. * * @return 0 on success; * @return an error number if: * - EINVAL, the attribute object @a attr is invalid * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_attr_setdetachstate.html"> * Specification.</a> * */ int pthread_attr_setdetachstate(pthread_attr_t * attr, int detachstate) { spl_t s; if (detachstate != PTHREAD_CREATE_JOINABLE && detachstate != PTHREAD_CREATE_DETACHED) return EINVAL; xnlock_get_irqsave(&nklock, s); if (!pse51_obj_active(attr, PSE51_THREAD_ATTR_MAGIC, pthread_attr_t)) { xnlock_put_irqrestore(&nklock, s); return EINVAL; } attr->detachstate = detachstate; xnlock_put_irqrestore(&nklock, s); return 0; }
int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode) { int ret = 0; spl_t s; if (xnpod_asynch_p()) return -EPERM; if (bufsz == 0) return -EINVAL; bf->bufmem = xnarch_alloc_host_mem(bufsz); if (bf->bufmem == NULL) return -ENOMEM; xnsynch_init(&bf->isynch_base, mode & B_PRIO, NULL); xnsynch_init(&bf->osynch_base, mode & B_PRIO, NULL); bf->handle = 0; /* i.e. (still) unregistered buffer. */ xnobject_copy_name(bf->name, name); inith(&bf->rlink); bf->rqueue = &xeno_get_rholder()->bufferq; xnlock_get_irqsave(&nklock, s); appendq(bf->rqueue, &bf->rlink); xnlock_put_irqrestore(&nklock, s); bf->mode = mode; bf->bufsz = bufsz; bf->rdoff = 0; bf->wroff = 0; bf->fillsz = 0; bf->rdtoken = 0; bf->wrtoken = 0; #ifndef __XENO_SIM__ bf->cpid = 0; #endif bf->magic = XENO_BUFFER_MAGIC; /* * <!> Since xnregister_enter() may reschedule, only register * complete objects, so that the registry cannot return * handles to half-baked objects... */ if (name) { ret = xnregistry_enter(bf->name, bf, &bf->handle, &__buffer_pnode.node); if (ret) rt_buffer_delete(bf); } return ret; }
static int openfd_show(struct xnvfile_regular_iterator *it, void *data) { struct rtdm_dev_context *context; struct rtdm_device *device; struct rtdm_process owner; int close_lock_count, fd; spl_t s; if (data == NULL) { xnvfile_puts(it, "Index\tLocked\tDevice\t\t\t\tOwner [PID]\n"); return 0; } fd = (int)it->pos - 1; xnlock_get_irqsave(&rt_fildes_lock, s); context = fildes_table[fd].context; if (context == NULL) { xnlock_put_irqrestore(&rt_fildes_lock, s); return VFILE_SEQ_SKIP; } close_lock_count = atomic_read(&context->close_lock_count); device = context->device; if (context->reserved.owner) memcpy(&owner, context->reserved.owner, sizeof(owner)); else { strcpy(owner.name, "<kernel>"); owner.pid = -1; } xnlock_put_irqrestore(&rt_fildes_lock, s); xnvfile_printf(it, "%d\t%d\t%-31s %s [%d]\n", fd, close_lock_count, (device->device_flags & RTDM_NAMED_DEVICE) ? device->device_name : device->proc_name, owner.name, owner.pid); return 0; }
/** * Destroy a thread attributes object. * * This service invalidates the attribute object pointed to by @a attr. The * object becomes invalid for all services (they all return EINVAL) except * pthread_attr_init(). * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_attr_destroy.html"> * Specification.</a> * */ int pthread_attr_destroy(pthread_attr_t * attr) { char *name; spl_t s; xnlock_get_irqsave(&nklock, s); if (!pse51_obj_active(attr, PSE51_THREAD_ATTR_MAGIC, pthread_attr_t)) { xnlock_put_irqrestore(&nklock, s); return EINVAL; } name = attr->name; pse51_mark_deleted(attr); xnlock_put_irqrestore(&nklock, s); if (name) xnfree(name); return 0; }
/** * Get the protocol attribute from a mutex attributes object. * * This service stores, at the address @a proto, the value of the @a protocol * attribute in the mutex attributes object @a attr. * * The @a protcol attribute may only be one of @a PTHREAD_PRIO_NONE or @a * PTHREAD_PRIO_INHERIT. See pthread_mutexattr_setprotocol() for the meaning of * these two constants. * * @param attr an initialized mutex attributes object; * * @param proto address where the value of the @a protocol attribute will be * stored on success. * * @return 0 on success, * @return an error number if: * - EINVAL, the @a proto address is invalid; * - EINVAL, the mutex attributes object @a attr is invalid. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_getprotocol.html"> * Specification.</a> * */ static inline int pthread_mutexattr_getprotocol(const pthread_mutexattr_t * attr, int *proto) { spl_t s; if (!proto || !attr) return EINVAL; xnlock_get_irqsave(&nklock, s); if (!cobalt_obj_active(attr,COBALT_MUTEX_ATTR_MAGIC,pthread_mutexattr_t)) { xnlock_put_irqrestore(&nklock, s); return EINVAL; } *proto = attr->protocol; xnlock_put_irqrestore(&nklock, s); return 0; }
STATUS msgQDelete(MSG_Q_ID qid) { wind_msgq_t *queue; spl_t s; check_NOT_ISR_CALLABLE(return ERROR); xnlock_get_irqsave(&nklock, s); check_OBJ_ID_ERROR(qid, wind_msgq_t, queue, WIND_MSGQ_MAGIC, goto error); if (msgq_destroy_internal(queue) == XNSYNCH_RESCHED) xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); return OK; error: xnlock_put_irqrestore(&nklock, s); return ERROR; }
int msgQNumMsgs(MSG_Q_ID qid) { wind_msgq_t *queue; int result; spl_t s; xnlock_get_irqsave(&nklock, s); check_OBJ_ID_ERROR(qid, wind_msgq_t, queue, WIND_MSGQ_MAGIC, goto error); result = queue->msgq.elems; xnlock_put_irqrestore(&nklock, s); return result; error: xnlock_put_irqrestore(&nklock, s); return ERROR; }
int rt_intr_delete(RT_INTR *intr) { int err = 0, rc = XNSYNCH_DONE; spl_t s; if (xnpod_asynch_p()) return -EPERM; xnlock_get_irqsave(&nklock, s); intr = xeno_h2obj_validate(intr, XENO_INTR_MAGIC, RT_INTR); if (!intr) { err = xeno_handle_error(intr, XENO_INTR_MAGIC, RT_INTR); xnlock_put_irqrestore(&nklock, s); return err; } removeq(intr->rqueue, &intr->rlink); #ifdef CONFIG_XENO_OPT_PERVASIVE rc = xnsynch_destroy(&intr->synch_base); #endif /* CONFIG_XENO_OPT_PERVASIVE */ if (intr->handle) xnregistry_remove(intr->handle); xeno_mark_deleted(intr); xnlock_put_irqrestore(&nklock, s); err = xnintr_destroy(&intr->intr_base); if (rc == XNSYNCH_RESCHED) /* Some task has been woken up as a result of the deletion: reschedule now. */ xnpod_schedule(); return err; }
static int xnpipe_release(struct inode *inode, struct file *file) { struct xnpipe_state *state = file->private_data; spl_t s; xnlock_get_irqsave(&nklock, s); xnpipe_dequeue_all(state, XNPIPE_USER_WREAD); xnpipe_dequeue_all(state, XNPIPE_USER_WSYNC); if (testbits(state->status, XNPIPE_KERN_CONN)) { /* Unblock waiters. */ if (xnsynch_nsleepers(&state->synchbase) > 0) { xnsynch_flush(&state->synchbase, XNRMID); xnpod_schedule(); } } if (state->ops.input) state->ops.input(NULL, -EPIPE, state->xstate); if (state->asyncq) { /* Clear the async queue */ removeq(&xnpipe_asyncq, &state->alink); __clrbits(state->status, XNPIPE_USER_SIGIO); xnlock_put_irqrestore(&nklock, s); fasync_helper(-1, file, 0, &state->asyncq); xnlock_get_irqsave(&nklock, s); } xnpipe_cleanup_user_conn(state, s); /* * The extra state may not be available from now on, if * xnpipe_disconnect() entered lingering close before we got * there; so calling xnpipe_cleanup_user_conn() should be the * last thing we do. */ xnlock_put_irqrestore(&nklock, s); return 0; }
static int _shm_free(unsigned long name) { int ret = 0; xnholder_t *holder; xnshm_a_t *p; spl_t s; xnlock_get_irqsave(&nklock, s); holder = getheadq(&xnshm_allocq); while (holder != NULL) { p = link2shma(holder); if (p->name == name && --p->ref == 0) { #ifdef CONFIG_XENO_OPT_REGISTRY if (p->handle) xnregistry_remove(p->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ if (p->heap == &kheap) xnheap_free(&kheap, p->chunk); else { /* Should release lock here? * Can destroy_mapped suspend ? * [YES!] */ #ifdef CONFIG_XENO_OPT_PERVASIVE ret = xnheap_destroy_mapped(p->heap, NULL, NULL); #else /* !CONFIG_XENO_OPT_PERVASIVE */ ret = xnheap_destroy(p->heap, &__heap_flush_private, NULL); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ if (ret) goto unlock_and_exit; xnheap_free(&kheap, p->heap); } removeq(&xnshm_allocq, &p->link); ret = p->size; xnheap_free(&kheap, p); break; } holder = nextq(&xnshm_allocq, holder); } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return ret; }
/** * Execute an initialization routine. * * This service may be used by libraries which need an initialization function * to be called only once. * * The function @a init_routine will only be called, with no argument, the first * time this service is called specifying the address @a once. * * @return 0 on success; * @return an error number if: * - EINVAL, the object pointed to by @a once is invalid (it must have been * initialized with PTHREAD_ONCE_INIT). * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_once.html"> * Specification.</a> * */ int pthread_once(pthread_once_t * once, void (*init_routine) (void)) { spl_t s; xnlock_get_irqsave(&nklock, s); if (!pse51_obj_active(once, PSE51_ONCE_MAGIC, pthread_once_t)) { xnlock_put_irqrestore(&nklock, s); return EINVAL; } if (!once->routine_called) { init_routine(); /* If the calling thread is canceled while executing init_routine, routine_called will not be set to 1. */ once->routine_called = 1; } xnlock_put_irqrestore(&nklock, s); return 0; }
struct xnthread *xnsynch_release(struct xnsynch *synch) { const int use_fastlock = xnsynch_fastlock_p(synch); struct xnthread *newowner, *lastowner; xnhandle_t lastownerh, newownerh; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); lastownerh = xnthread_handle(xnpod_current_thread()); if (use_fastlock && likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh))) return NULL; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_release, "synch %p", synch); holder = getpq(&synch->pendq); if (holder) { newowner = link2thread(holder, plink); newowner->wchan = NULL; newowner->wwake = synch; lastowner = synch->owner; synch->owner = newowner; xnthread_set_info(newowner, XNWAKEN); xnpod_resume_thread(newowner, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner), xnsynch_pended_p(synch)); } else { newowner = NULL; synch->owner = NULL; newownerh = XN_NO_HANDLE; } if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); xnarch_atomic_set(lockp, newownerh); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return newowner; }
int rt_event_create(RT_EVENT *event, const char *name, unsigned long ivalue, int mode) { int err = 0; spl_t s; if (xnpod_asynch_p()) return -EPERM; xnsynch_init(&event->synch_base, mode & EV_PRIO); event->value = ivalue; event->handle = 0; /* i.e. (still) unregistered event. */ event->magic = XENO_EVENT_MAGIC; xnobject_copy_name(event->name, name); inith(&event->rlink); event->rqueue = &xeno_get_rholder()->eventq; xnlock_get_irqsave(&nklock, s); appendq(event->rqueue, &event->rlink); xnlock_put_irqrestore(&nklock, s); #ifdef CONFIG_XENO_OPT_PERVASIVE event->cpid = 0; #endif /* CONFIG_XENO_OPT_PERVASIVE */ #ifdef CONFIG_XENO_OPT_REGISTRY /* <!> Since xnregister_enter() may reschedule, only register complete objects, so that the registry cannot return handles to half-baked objects... */ if (name) { xnpnode_t *pnode = &__event_pnode; if (!*name) { /* Since this is an anonymous object (empty name on entry) from user-space, it gets registered under an unique internal name but is not exported through /proc. */ xnobject_create_name(event->name, sizeof(event->name), (void *)event); pnode = NULL; } err = xnregistry_enter(event->name, event, &event->handle, pnode); if (err) rt_event_delete(event); } #endif /* CONFIG_XENO_OPT_REGISTRY */ return err; }
/** * @internal * @fn static int program_htick_shot(unsigned long delay, struct clock_event_device *cdev) * * @brief Program next host tick as a Xenomai timer event. * * Program the next shot for the host tick on the current CPU. * Emulation is done using a nucleus timer attached to the master * timebase. * * @param delay The time delta from the current date to the next tick, * expressed as a count of nanoseconds. * * @param cdev An pointer to the clock device which notifies us. * * @coretags{unrestricted} */ static int program_htick_shot(unsigned long delay, struct clock_event_device *cdev) { struct xnsched *sched; int ret; spl_t s; xnlock_get_irqsave(&nklock, s); sched = xnsched_current(); ret = xntimer_start(&sched->htimer, delay, XN_INFINITE, XN_RELATIVE); xnlock_put_irqrestore(&nklock, s); return ret ? -ETIME : 0; }
/** * Migrate a timer. * * This call migrates a timer to another cpu. In order to avoid pathological * cases, it must be called from the CPU to which @a timer is currently * attached. * * @param timer The address of the timer object to be migrated. * * @param sched The address of the destination CPU xnsched_t structure. * * @retval -EINVAL if @a timer is queued on another CPU than current ; * @retval 0 otherwise. * */ int xntimer_migrate(xntimer_t *timer, xnsched_t *sched) { int err = 0; int queued; spl_t s; trace_mark(xn_nucleus, timer_migrate, "timer %p cpu %d", timer, (int)xnsched_cpu(sched)); xnlock_get_irqsave(&nklock, s); if (sched == timer->sched) goto unlock_and_exit; queued = !testbits(timer->status, XNTIMER_DEQUEUED); /* Avoid the pathological case where the timer interrupt did not occur yet for the current date on the timer source CPU, whereas we are trying to migrate it to a CPU where the timer interrupt already occured. This would not be a problem in aperiodic mode. */ if (queued) { if (timer->sched != xnpod_current_sched()) { err = -EINVAL; goto unlock_and_exit; } #ifdef CONFIG_XENO_OPT_TIMING_PERIODIC timer->base->ops->stop_timer(timer); #else /* !CONFIG_XENO_OPT_TIMING_PERIODIC */ xntimer_stop_aperiodic(timer); #endif /* !CONFIG_XENO_OPT_TIMING_PERIODIC */ } timer->sched = sched; if (queued) #ifdef CONFIG_XENO_OPT_TIMING_PERIODIC timer->base->ops->move_timer(timer); #else /* !CONFIG_XENO_OPT_TIMING_PERIODIC */ xntimer_move_aperiodic(timer); #endif /* !CONFIG_XENO_OPT_TIMING_PERIODIC */ unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
void xntimer_destroy(xntimer_t *timer) { spl_t s; xnlock_get_irqsave(&nklock, s); xntimer_stop(timer); __setbits(timer->status, XNTIMER_KILLED); timer->sched = NULL; #ifdef CONFIG_XENO_OPT_STATS removeq(&xntimer_base(timer)->timerq, &timer->tblink); xntimer_base(timer)->timerq_rev++; #endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */ xnlock_put_irqrestore(&nklock, s); }
int rt_event_signal(RT_EVENT *event, unsigned long mask) { xnpholder_t *holder, *nholder; int err = 0, resched = 0; spl_t s; xnlock_get_irqsave(&nklock, s); event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT); if (!event) { err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT); goto unlock_and_exit; } /* Post the flags. */ event->value |= mask; /* And wakeup any sleeper having its request fulfilled. */ nholder = getheadpq(xnsynch_wait_queue(&event->synch_base)); while ((holder = nholder) != NULL) { RT_TASK *sleeper = thread2rtask(link2thread(holder, plink)); int mode = sleeper->wait_args.event.mode; unsigned long bits = sleeper->wait_args.event.mask; if (((mode & EV_ANY) && (bits & event->value) != 0) || (!(mode & EV_ANY) && ((bits & event->value) == bits))) { sleeper->wait_args.event.mask = (bits & event->value); nholder = xnsynch_wakeup_this_sleeper(&event->synch_base, holder); resched = 1; } else nholder = nextpq(xnsynch_wait_queue(&event->synch_base), holder); } if (resched) xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
void pse51_shm_pkg_cleanup(void) { xnholder_t *holder; spl_t s; xnlock_get_irqsave(&nklock, s); while ((holder = getheadq(&pse51_shmq))) { pse51_shm_t *shm = link2shm(holder); pse51_node_t *node; pse51_node_remove(&node, shm->nodebase.name, PSE51_SHM_MAGIC); xnlock_put_irqrestore(&nklock, s); #if XENO_DEBUG(POSIX) xnprintf("Posix: unlinking shared memory \"%s\".\n", shm->nodebase.name); #endif /* XENO_DEBUG(POSIX) */ xnlock_get_irqsave(&nklock, s); pse51_shm_destroy(shm, 1); } xnlock_put_irqrestore(&nklock, s); }
int sc_screate(unsigned initval, int opt, int *errp) { int bflags = 0, semid; vrtxsem_t *sem; spl_t s; if (opt & ~1) { *errp = ER_IIP; return -1; } sem = (vrtxsem_t *)xnmalloc(sizeof(*sem)); if (!sem) { *errp = ER_NOCB; return -1; } semid = xnmap_enter(vrtx_sem_idmap, -1, sem); if (semid < 0) { *errp = ER_NOCB; xnfree(sem); return -1; } if (opt == 0) bflags = XNSYNCH_PRIO; else bflags = XNSYNCH_FIFO; xnsynch_init(&sem->synchbase, bflags | XNSYNCH_DREORD); inith(&sem->link); sem->semid = semid; sem->magic = VRTX_SEM_MAGIC; sem->count = initval; xnlock_get_irqsave(&nklock, s); appendq(&vrtx_sem_q, &sem->link); xnlock_put_irqrestore(&nklock, s); #ifdef CONFIG_XENO_OPT_REGISTRY sprintf(sem->name, "sem%d", semid); xnregistry_enter(sem->name, sem, &sem->handle, &__sem_pnode); #endif /* CONFIG_XENO_OPT_REGISTRY */ *errp = RET_OK; return semid; }
static int xnpipe_fasync(int fd, struct file *file, int on) { struct xnpipe_state *state = file->private_data; int ret, queued; spl_t s; queued = (state->asyncq != NULL); ret = fasync_helper(fd, file, on, &state->asyncq); if (state->asyncq) { if (!queued) { xnlock_get_irqsave(&nklock, s); appendq(&xnpipe_asyncq, &state->alink); xnlock_put_irqrestore(&nklock, s); } } else if (queued) { xnlock_get_irqsave(&nklock, s); removeq(&xnpipe_asyncq, &state->alink); xnlock_put_irqrestore(&nklock, s); } return ret; }
static int __wind_taskinfo_status(struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); unsigned long status; WIND_TCB *pTcb; spl_t s; xnlock_get_irqsave(&nklock, s); pTcb = __wind_lookup_task(handle); if (!pTcb || pTcb->magic != WIND_TASK_MAGIC) { xnlock_put_irqrestore(&nklock, s); return S_objLib_OBJ_ID_ERROR; } status = xnthread_state_flags(&pTcb->threadbase); xnlock_put_irqrestore(&nklock, s); return __xn_safe_copy_to_user((void __user *)__xn_reg_arg2(regs), &status, sizeof(status)); }
/** * Set the protocol attribute of a mutex attributes object. * * This service set the @a type attribute of the mutex attributes object * @a attr. * * @param attr an initialized mutex attributes object, * * @param proto value of the @a protocol attribute, may be one of: * - PTHREAD_PRIO_NONE, meaning that a mutex created with the attributes object * @a attr will not follow any priority protocol; * - PTHREAD_PRIO_INHERIT, meaning that a mutex created with the attributes * object @a attr, will follow the priority inheritance protocol. * * The value PTHREAD_PRIO_PROTECT (priority ceiling protocol) is unsupported. * * @return 0 on success, * @return an error number if: * - EINVAL, the mutex attributes object @a attr is invalid; * - EOPNOTSUPP, the value of @a proto is unsupported; * - EINVAL, the value of @a proto is invalid. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_setprotocol.html"> * Specification.</a> * */ static inline int pthread_mutexattr_setprotocol(pthread_mutexattr_t * attr, int proto) { spl_t s; if (!attr) return EINVAL; xnlock_get_irqsave(&nklock, s); if (!cobalt_obj_active(attr,COBALT_MUTEX_ATTR_MAGIC,pthread_mutexattr_t)) { xnlock_put_irqrestore(&nklock, s); return EINVAL; } switch (proto) { default: xnlock_put_irqrestore(&nklock, s); return EINVAL; case PTHREAD_PRIO_PROTECT: xnlock_put_irqrestore(&nklock, s); return EOPNOTSUPP; case PTHREAD_PRIO_NONE: case PTHREAD_PRIO_INHERIT: break; } attr->protocol = proto; xnlock_put_irqrestore(&nklock, s); return 0; }
static int __heap_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_HEAP *heap = (RT_HEAP *)data; char *p = page; int len; spl_t s; p += sprintf(p, "type=%s:size=%lu:used=%lu:numaps=%d\n", (heap->mode & H_SHARED) == H_SHARED ? "shared" : (heap->mode & H_MAPPABLE) ? "mappable" : "kernel", xnheap_usable_mem(&heap->heap_base), xnheap_used_mem(&heap->heap_base), atomic_read(&heap->heap_base.archdep.numaps)); xnlock_get_irqsave(&nklock, s); if (xnsynch_nsleepers(&heap->synch_base) > 0) { xnpholder_t *holder; /* Pended heap -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&heap->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); RT_TASK *task = thread2rtask(sleeper); size_t size = task->wait_args.heap.size; p += sprintf(p, "+%s (size=%zd)\n", xnthread_name(sleeper), size); holder = nextpq(xnsynch_wait_queue(&heap->synch_base), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
void __xntimer_init(struct xntimer *timer, struct xnclock *clock, void (*handler)(struct xntimer *timer), struct xnsched *sched, int flags) { spl_t s __maybe_unused; int cpu; #ifdef CONFIG_XENO_OPT_EXTCLOCK timer->clock = clock; #endif xntimerh_init(&timer->aplink); xntimerh_date(&timer->aplink) = XN_INFINITE; xntimer_set_priority(timer, XNTIMER_STDPRIO); timer->status = (XNTIMER_DEQUEUED|(flags & XNTIMER_INIT_MASK)); timer->handler = handler; timer->interval_ns = 0; /* * Timers are affine to a scheduler slot, which is in turn * bound to a real-time CPU. If no scheduler affinity was * given, assign the timer to the scheduler slot of the * current CPU if real-time, otherwise default to the * scheduler slot of the first real-time CPU. */ if (sched) timer->sched = sched; else { cpu = ipipe_processor_id(); if (!xnsched_supported_cpu(cpu)) cpu = first_cpu(xnsched_realtime_cpus); timer->sched = xnsched_struct(cpu); } #ifdef CONFIG_XENO_OPT_STATS #ifdef CONFIG_XENO_OPT_EXTCLOCK timer->tracker = clock; #endif ksformat(timer->name, XNOBJECT_NAME_LEN, "%d/%s", current->pid, current->comm); xntimer_reset_stats(timer); xnlock_get_irqsave(&nklock, s); list_add_tail(&timer->next_stat, &clock->timerq); clock->nrtimers++; xnvfile_touch(&clock->timer_vfile); xnlock_put_irqrestore(&nklock, s); #endif /* CONFIG_XENO_OPT_STATS */ }
int xntbase_alloc(const char *name, u_long period, u_long flags, xntbase_t **basep) { xntslave_t *slave; xntbase_t *base; spl_t s; if (flags & ~XNTBISO) return -EINVAL; if (period == XN_APERIODIC_TICK) { *basep = &nktbase; xnarch_declare_tbase(&nktbase); return 0; } slave = (xntslave_t *)xnarch_alloc_host_mem(sizeof(*slave)); if (!slave) return -ENOMEM; base = &slave->base; base->tickvalue = period; base->ticks2sec = 1000000000UL / period; base->wallclock_offset = 0; base->jiffies = 0; base->hook = NULL; base->ops = &nktimer_ops_periodic; base->name = name; inith(&base->link); xntslave_init(slave); /* Set initial status: Not running, no time set, unlocked, isolated if requested. */ base->status = flags; *basep = base; #ifdef CONFIG_XENO_OPT_STATS initq(&base->timerq); #endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */ xntbase_declare_proc(base); xnlock_get_irqsave(&nklock, s); appendq(&nktimebaseq, &base->link); xnlock_put_irqrestore(&nklock, s); xnarch_declare_tbase(base); return 0; }
int sc_mcreate(unsigned int opt, int *errp) { int bflags, mid; vrtxmx_t *mx; spl_t s; switch (opt) { case 0: bflags = XNSYNCH_PRIO; break; case 1: bflags = XNSYNCH_FIFO; break; case 2: bflags = XNSYNCH_PRIO | XNSYNCH_PIP; break; default: *errp = ER_IIP; return 0; } mx = xnmalloc(sizeof(*mx)); if (mx == NULL) { *errp = ER_NOCB; return -1; } mid = xnmap_enter(vrtx_mx_idmap, -1, mx); if (mid < 0) { xnfree(mx); return -1; } inith(&mx->link); mx->mid = mid; xnsynch_init(&mx->synchbase, bflags | XNSYNCH_DREORD | XNSYNCH_OWNER, NULL); xnlock_get_irqsave(&nklock, s); appendq(&vrtx_mx_q, &mx->link); xnlock_put_irqrestore(&nklock, s); sprintf(mx->name, "mx%d", mid); xnregistry_enter(mx->name, mx, &mx->handle, &__mutex_pnode.node); *errp = RET_OK; return mid; }