/** * @fn int rt_pipe_delete(RT_PIPE *pipe) * @brief Delete a message pipe. * * This routine deletes a pipe object previously created by a call to * rt_pipe_create(). All resources attached to that pipe are * automatically released, all pending data is flushed. * * @param pipe The pipe descriptor. * * @return Zero is returned upon success. Otherwise: * * - -EINVAL is returned if @a pipe is not a valid pipe descriptor. * * - -EIDRM is returned if @a pipe is a closed pipe descriptor. * * - -EPERM is returned if this service was called from an * asynchronous context. * * @apitags{thread-unrestricted, switch-secondary} */ int rt_pipe_delete(RT_PIPE *pipe) { struct alchemy_pipe *pcb; struct service svc; int ret = 0; if (threadobj_irq_p()) return -EPERM; CANCEL_DEFER(svc); pcb = find_alchemy_pipe(pipe, &ret); if (pcb == NULL) goto out; ret = __RT(close(pcb->sock)); if (ret) { ret = -errno; if (ret == -EBADF) ret = -EIDRM; goto out; } syncluster_delobj(&alchemy_pipe_table, &pcb->cobj); pcb->magic = ~pipe_magic; out: CANCEL_RESTORE(svc); return ret; }
static STATUS msem_take(struct wind_sem *sem, int timeout) { struct wind_task *current; struct timespec ts; int ret; if (threadobj_irq_p()) return S_intLib_NOT_ISR_CALLABLE; /* * We allow threads from other APIs to grab a VxWorks mutex * ignoring the safe option in such a case. */ current = wind_task_current(); if (current && (sem->options & SEM_DELETE_SAFE)) __RT(pthread_mutex_lock(¤t->safelock)); if (timeout == NO_WAIT) { ret = __RT(pthread_mutex_trylock(&sem->u.msem.lock)); goto check; } if (timeout == WAIT_FOREVER) { ret = __RT(pthread_mutex_lock(&sem->u.msem.lock)); goto check; } __clockobj_ticks_to_timeout(&wind_clock, CLOCK_REALTIME, timeout, &ts); ret = __RT(pthread_mutex_timedlock(&sem->u.msem.lock, &ts)); check: switch (ret) { case 0: return OK; case EINVAL: ret = S_objLib_OBJ_ID_ERROR; break; case EBUSY: ret = S_objLib_OBJ_UNAVAILABLE; break; case ETIMEDOUT: ret = S_objLib_OBJ_TIMEOUT; break; case EOWNERDEAD: case ENOTRECOVERABLE: warning("owner of mutex-type semaphore %p died", sem); ret = S_objLib_OBJ_UNAVAILABLE; break; } if (current != NULL && (sem->options & SEM_DELETE_SAFE)) __RT(pthread_mutex_unlock(¤t->safelock)); return ret; }
static STATUS xsem_take(struct wind_sem *sem, int timeout) { struct timespec ts, *timespec; struct syncstate syns; struct service svc; STATUS ret = OK; if (threadobj_irq_p()) return S_intLib_NOT_ISR_CALLABLE; CANCEL_DEFER(svc); if (syncobj_lock(&sem->u.xsem.sobj, &syns)) { ret = S_objLib_OBJ_ID_ERROR; goto out; } if (--sem->u.xsem.value >= 0) goto done; if (timeout == NO_WAIT) { sem->u.xsem.value++; ret = S_objLib_OBJ_UNAVAILABLE; goto done; } if (timeout != WAIT_FOREVER) { timespec = &ts; clockobj_ticks_to_timeout(&wind_clock, timeout, timespec); } else timespec = NULL; ret = syncobj_wait_grant(&sem->u.xsem.sobj, timespec, &syns); if (ret == -EIDRM) { ret = S_objLib_OBJ_DELETED; goto out; } if (ret) { sem->u.xsem.value++; if (ret == -ETIMEDOUT) ret = S_objLib_OBJ_TIMEOUT; else if (ret == -EINTR) ret = OK; /* Flushed. */ } done: syncobj_unlock(&sem->u.xsem.sobj, &syns); out: CANCEL_RESTORE(svc); return ret; }
static STATUS msem_delete(struct wind_sem *sem) { int ret; if (threadobj_irq_p()) return S_intLib_NOT_ISR_CALLABLE; ret = __RT(pthread_mutex_destroy(&sem->u.msem.lock)); if (ret == EINVAL) return S_objLib_OBJ_ID_ERROR; /* * XXX: We depart from the spec here since we can't flush, but * we tell the caller about any pending task instead. */ if (ret == EBUSY) return S_semLib_INVALID_OPERATION; else xnfree(sem); return OK; }
static STATUS msem_give(struct wind_sem *sem) { struct wind_task *current; int ret; if (threadobj_irq_p()) return S_intLib_NOT_ISR_CALLABLE; ret = __RT(pthread_mutex_unlock(&sem->u.msem.lock)); if (ret == EINVAL) return S_objLib_OBJ_ID_ERROR; if (ret == EPERM) return S_semLib_INVALID_OPERATION; if (sem->options & SEM_DELETE_SAFE) { current = wind_task_current(); if (current) __RT(pthread_mutex_unlock(¤t->safelock)); } return OK; }
static STATUS xsem_delete(struct wind_sem *sem) { struct syncstate syns; struct service svc; int ret = OK; if (threadobj_irq_p()) return S_intLib_NOT_ISR_CALLABLE; CANCEL_DEFER(svc); if (syncobj_lock(&sem->u.xsem.sobj, &syns)) { ret = S_objLib_OBJ_ID_ERROR; goto out; } sem->magic = ~sem_magic; /* Prevent further reference. */ syncobj_destroy(&sem->u.xsem.sobj, &syns); out: CANCEL_RESTORE(svc); return ret; }
/** * @fn int rt_buffer_delete(RT_BUFFER *bf) * @brief Delete an IPC buffer. * * This routine deletes a buffer object previously created by a call * to rt_buffer_create(). * * @param bf The descriptor address of the deleted buffer. * * @return Zero is returned upon success. Otherwise: * * - -EINVAL is returned if @a bf is not a valid buffer descriptor. * * - -EPERM is returned if this service was called from an * asynchronous context. * * Valid calling context: * * - Regular POSIX threads * - Xenomai threads */ int rt_buffer_delete(RT_BUFFER *bf) { struct alchemy_buffer *bcb; struct syncstate syns; struct service svc; int ret = 0; if (threadobj_irq_p()) return -EPERM; CANCEL_DEFER(svc); bcb = get_alchemy_buffer(bf, &syns, &ret); if (bcb == NULL) goto out; syncluster_delobj(&alchemy_buffer_table, &bcb->cobj); bcb->magic = ~buffer_magic; syncobj_destroy(&bcb->sobj, &syns); out: CANCEL_RESTORE(svc); return ret; }
int rt_pipe_create(RT_PIPE *pipe, const char *name, int minor, size_t poolsize) #endif { struct rtipc_port_label plabel; struct sockaddr_ipc saddr; struct alchemy_pipe *pcb; struct service svc; size_t streambufsz; socklen_t addrlen; int ret, sock; if (threadobj_irq_p()) return -EPERM; CANCEL_DEFER(svc); pcb = xnmalloc(sizeof(*pcb)); if (pcb == NULL) { ret = -ENOMEM; goto out; } sock = __RT(socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP)); if (sock < 0) { warning("RTIPC/XDDP protocol not supported by kernel"); ret = -errno; xnfree(pcb); goto out; } if (name && *name) { namecpy(plabel.label, name); ret = __RT(setsockopt(sock, SOL_XDDP, XDDP_LABEL, &plabel, sizeof(plabel))); if (ret) goto fail_sockopt; } if (poolsize > 0) { ret = __RT(setsockopt(sock, SOL_XDDP, XDDP_POOLSZ, &poolsize, sizeof(poolsize))); if (ret) goto fail_sockopt; } streambufsz = ALCHEMY_PIPE_STREAMSZ; ret = __RT(setsockopt(sock, SOL_XDDP, XDDP_BUFSZ, &streambufsz, sizeof(streambufsz))); if (ret) goto fail_sockopt; memset(&saddr, 0, sizeof(saddr)); saddr.sipc_family = AF_RTIPC; saddr.sipc_port = minor; ret = __RT(bind(sock, (struct sockaddr *)&saddr, sizeof(saddr))); if (ret) goto fail_sockopt; if (minor == P_MINOR_AUTO) { /* Fetch the assigned minor device. */ addrlen = sizeof(saddr); ret = __RT(getsockname(sock, (struct sockaddr *)&saddr, &addrlen)); if (ret) goto fail_sockopt; if (addrlen != sizeof(saddr)) { ret = -EINVAL; goto fail_register; } minor = saddr.sipc_port; } generate_name(pcb->name, name, &pipe_namegen); pcb->sock = sock; pcb->minor = minor; pcb->magic = pipe_magic; if (syncluster_addobj(&alchemy_pipe_table, pcb->name, &pcb->cobj)) { ret = -EEXIST; goto fail_register; } pipe->handle = mainheap_ref(pcb, uintptr_t); CANCEL_RESTORE(svc); return minor; fail_sockopt: ret = -errno; if (ret == -EADDRINUSE) ret = -EBUSY; fail_register: __RT(close(sock)); xnfree(pcb); out: CANCEL_RESTORE(svc); return ret; }
/** * @fn int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode) * @brief Create an IPC buffer. * * This routine creates an IPC object that allows tasks to send and * receive data asynchronously via a memory buffer. Data may be of an * arbitrary length, albeit this IPC is best suited for small to * medium-sized messages, since data always have to be copied to the * buffer during transit. Large messages may be more efficiently * handled by message queues (RT_QUEUE). * * @param bf The address of a buffer descriptor which can be later * used to identify uniquely the created object, upon success of this * call. * * @param name An ASCII string standing for the symbolic name of the * buffer. When non-NULL and non-empty, a copy of this string is used * for indexing the created buffer into the object registry. * * @param bufsz The size of the buffer space available to hold * data. The required memory is obtained from the main heap. * * @param mode The buffer creation mode. The following flags can be * OR'ed into this bitmask, each of them affecting the new buffer: * * - B_FIFO makes tasks pend in FIFO order for reading data from the * buffer. * * - B_PRIO makes tasks pend in priority order for reading data from * the buffer. * * This parameter also applies to tasks blocked on the buffer's write * side (see rt_buffer_write()). * * @return Zero is returned upon success. Otherwise: * * - -ENOMEM is returned if the system fails to get memory from the * main heap in order to create the buffer. * * - -EEXIST is returned if the @a name is conflicting with an already * registered buffer. * * - -EPERM is returned if this service was called from an * asynchronous context. * * Valid calling context: * * - Regular POSIX threads * - Xenomai threads * * @note Buffers can be shared by multiple processes which belong to * the same Xenomai session. */ int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode) { struct alchemy_buffer *bcb; struct service svc; int sobj_flags = 0; int ret; if (threadobj_irq_p()) return -EPERM; if (bufsz == 0) return -EINVAL; CANCEL_DEFER(svc); bcb = xnmalloc(sizeof(*bcb)); if (bcb == NULL) { ret = __bt(-ENOMEM); goto fail; } bcb->buf = xnmalloc(bufsz); if (bcb == NULL) { ret = __bt(-ENOMEM); goto fail_bufalloc; } generate_name(bcb->name, name, &buffer_namegen); bcb->magic = buffer_magic; bcb->mode = mode; bcb->bufsz = bufsz; bcb->rdoff = 0; bcb->wroff = 0; bcb->fillsz = 0; if (mode & B_PRIO) sobj_flags = SYNCOBJ_PRIO; syncobj_init(&bcb->sobj, CLOCK_COPPERPLATE, sobj_flags, fnref_put(libalchemy, buffer_finalize)); if (syncluster_addobj(&alchemy_buffer_table, bcb->name, &bcb->cobj)) { ret = -EEXIST; goto fail_register; } bf->handle = mainheap_ref(bcb, uintptr_t); CANCEL_RESTORE(svc); return 0; fail_register: syncobj_uninit(&bcb->sobj); xnfree(bcb->buf); fail_bufalloc: xnfree(bcb); fail: CANCEL_RESTORE(svc); return ret; }