/** * Destroy an unnamed semaphore. * * This service destroys the semaphore @a sm. Threads currently blocked on @a sm * are unblocked and the service they called return -1 with @a errno set to * EINVAL. The semaphore is then considered invalid by all semaphore services * (they all fail with @a errno set to EINVAL) except sem_init(). * * This service fails if @a sm is a named semaphore. * * @param sm the semaphore to be destroyed. * * @retval 0 on success, * @retval -1 with @a errno set if: * - EINVAL, the semaphore @a sm is invalid or a named semaphore; * - EPERM, the semaphore @a sm is not process-shared and does not belong to the * current process. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_destroy.html"> * Specification.</a> * */ int sem_destroy(sem_t * sm) { struct __shadow_sem *shadow = &((union __xeno_sem *)sm)->shadow_sem; spl_t s; xnlock_get_irqsave(&nklock, s); if (shadow->magic != PSE51_SEM_MAGIC || shadow->sem->magic != PSE51_SEM_MAGIC) { thread_set_errno(EINVAL); goto error; } if (pse51_kqueues(shadow->sem->pshared) != shadow->sem->owningq) { thread_set_errno(EPERM); goto error; } pse51_mark_deleted(shadow); pse51_mark_deleted(shadow->sem); xnlock_put_irqrestore(&nklock, s); sem_destroy_inner(shadow->sem, pse51_kqueues(shadow->sem->pshared)); return 0; error: xnlock_put_irqrestore(&nklock, s); return -1; }
static Boolean _CFReadBytesFromPathAndGetFD(CFAllocatorRef alloc, const char *path, void **bytes, CFIndex *length, CFIndex maxLength, int extraOpenFlags, int *fd) { // maxLength is the number of bytes desired, or 0 if the whole file is desired regardless of length. struct statinfo statBuf; *bytes = NULL; int no_hang_fd = openAutoFSNoWait(); *fd = open(path, O_RDONLY|extraOpenFlags|CF_OPENFLGS, 0666); if (*fd < 0) { closeAutoFSNoWait(no_hang_fd); return false; } if (fstat(*fd, &statBuf) < 0) { int saveerr = thread_errno(); close(*fd); *fd = -1; closeAutoFSNoWait(no_hang_fd); thread_set_errno(saveerr); return false; } if ((statBuf.st_mode & S_IFMT) != S_IFREG) { close(*fd); *fd = -1; closeAutoFSNoWait(no_hang_fd); thread_set_errno(EACCES); return false; } if (statBuf.st_size == 0) { *bytes = CFAllocatorAllocate(alloc, 4, 0); // don't return constant string -- it's freed! if (__CFOASafe) __CFSetLastAllocationEventName(*bytes, "CFUtilities (file-bytes)"); *length = 0; } else { CFIndex desiredLength; if ((maxLength >= statBuf.st_size) || (maxLength == 0)) { desiredLength = statBuf.st_size; } else { desiredLength = maxLength; } *bytes = CFAllocatorAllocate(alloc, desiredLength, 0); if (!bytes) { close(*fd); *fd = -1; closeAutoFSNoWait(no_hang_fd); return false; } if (__CFOASafe) __CFSetLastAllocationEventName(*bytes, "CFUtilities (file-bytes)"); // fcntl(fd, F_NOCACHE, 1); if (read(*fd, *bytes, desiredLength) < 0) { CFAllocatorDeallocate(alloc, *bytes); close(*fd); *fd = -1; closeAutoFSNoWait(no_hang_fd); return false; } *length = desiredLength; } closeAutoFSNoWait(no_hang_fd); return true; }
/** * Read the specified clock. * * This service returns, at the address @a tp the current value of the clock @a * clock_id. If @a clock_id is: * - CLOCK_REALTIME, the clock value represents the amount of time since the * Epoch, with a precision of one system clock tick; * - CLOCK_MONOTONIC, the clock value is given by an architecture-dependent high * resolution counter, with a precision independent from the system clock tick * duration. * - CLOCK_MONOTONIC_RAW, same as CLOCK_MONOTONIC. * - CLOCK_HOST_REALTIME, the clock value as seen by the host, typically * Linux. Resolution and precision depend on the host, but it is guaranteed * that both, host and Xenomai, see the same information. * * @param clock_id clock identifier, either CLOCK_REALTIME, CLOCK_MONOTONIC, * CLOCK_MONOTONIC_RAW or CLOCK_HOST_REALTIME; * * @param tp the address where the value of the specified clock will be stored. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EINVAL, @a clock_id is invalid. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_gettime.html"> * Specification.</a> * */ int clock_gettime(clockid_t clock_id, struct timespec *tp) { xnticks_t cpu_time; switch (clock_id) { case CLOCK_REALTIME: ticks2ts(tp, xntbase_get_time(pse51_tbase)); break; case CLOCK_MONOTONIC: case CLOCK_MONOTONIC_RAW: cpu_time = xnpod_get_cpu_time(); tp->tv_sec = xnarch_uldivrem(cpu_time, ONE_BILLION, &tp->tv_nsec); break; case CLOCK_HOST_REALTIME: if (do_clock_host_realtime(tp) != 0) { thread_set_errno(EINVAL); return -1; } break; default: thread_set_errno(EINVAL); return -1; } return 0; }
/** * Get the value of a semaphore. * * This service stores at the address @a value, the current count of the * semaphore @a sm. The state of the semaphore is unchanged. * * If the semaphore is currently locked, the value stored is zero. * * @param sm a semaphore; * * @param value address where the semaphore count will be stored on success. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EINVAL, the semaphore is invalid or uninitialized; * - EPERM, the semaphore @a sm is not process-shared and does not belong to the * current process. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_getvalue.html"> * Specification.</a> * */ int sem_getvalue(sem_t * sm, int *value) { struct __shadow_sem *shadow = &((union __xeno_sem *)sm)->shadow_sem; pse51_sem_t *sem; spl_t s; xnlock_get_irqsave(&nklock, s); if ((shadow->magic != PSE51_SEM_MAGIC && shadow->magic != PSE51_NAMED_SEM_MAGIC) || shadow->sem->magic != PSE51_SEM_MAGIC) { xnlock_put_irqrestore(&nklock, s); thread_set_errno(EINVAL); return -1; } sem = shadow->sem; if (sem->owningq != pse51_kqueues(sem->pshared)) { xnlock_put_irqrestore(&nklock, s); thread_set_errno(EPERM); return -1; } *value = sem->value; xnlock_put_irqrestore(&nklock, s); return 0; }
int sem_post_inner(struct pse51_sem *sem, pse51_kqueues_t *ownq) { if (sem->magic != PSE51_SEM_MAGIC) { thread_set_errno(EINVAL); return -1; } #if XENO_DEBUG(POSIX) if (ownq && ownq != pse51_kqueues(sem->pshared)) { thread_set_errno(EPERM); return -1; } #endif /* XENO_DEBUG(POSIX) */ if (sem->value == SEM_VALUE_MAX) { thread_set_errno(EAGAIN); return -1; } if (xnsynch_wakeup_one_sleeper(&sem->synchbase) != NULL) xnpod_schedule(); else ++sem->value; return 0; }
__private_extern__ Boolean _CFReadBytesFromFile(CFAllocatorRef alloc, CFURLRef url, void **bytes, CFIndex *length, CFIndex maxLength) { // maxLength is the number of bytes desired, or 0 if the whole file is desired regardless of length. struct stat statBuf; int fd = -1; char path[CFMaxPathSize]; if (!CFURLGetFileSystemRepresentation(url, true, path, CFMaxPathSize)) { return false; } *bytes = NULL; __CFSetNastyFile(url); #if defined(__WIN32__) fd = open(path, O_RDONLY|CF_OPENFLGS, 0666|_S_IREAD); #else fd = open(path, O_RDONLY|CF_OPENFLGS, 0666); #endif if (fd < 0) { return false; } if (fstat(fd, &statBuf) < 0) { int saveerr = thread_errno(); close(fd); thread_set_errno(saveerr); return false; } if ((statBuf.st_mode & S_IFMT) != S_IFREG) { close(fd); thread_set_errno(EACCES); return false; } if (statBuf.st_size == 0) { *bytes = CFAllocatorAllocate(alloc, 4, 0); // don't return constant string -- it's freed! if (__CFOASafe) __CFSetLastAllocationEventName(*bytes, "CFUtilities (file-bytes)"); *length = 0; } else { CFIndex desiredLength; if ((maxLength >= statBuf.st_size) || (maxLength == 0)) { desiredLength = statBuf.st_size; } else { desiredLength = maxLength; } *bytes = CFAllocatorAllocate(alloc, desiredLength, 0); if (__CFOASafe) __CFSetLastAllocationEventName(*bytes, "CFUtilities (file-bytes)"); if (read(fd, *bytes, desiredLength) < 0) { CFAllocatorDeallocate(alloc, *bytes); close(fd); return false; } *length = desiredLength; } close(fd); return true; }
__private_extern__ Boolean _CFWriteBytesToFile(CFURLRef url, const void *bytes, CFIndex length) { struct stat statBuf; int fd = -1; int mode; char path[CFMaxPathSize]; if (!CFURLGetFileSystemRepresentation(url, true, (uint8_t *)path, CFMaxPathSize)) { return false; } #if DEPLOYMENT_TARGET_WINDOWS || 0 mode = 0666; if (0 == stat(path, &statBuf)) { mode = statBuf.st_mode; } else if (thread_errno() != ENOENT) { return false; } fd = open(path, O_WRONLY|O_CREAT|O_TRUNC|CF_OPENFLGS, 0666|_S_IWRITE); if (fd < 0) { return false; } if (length && write(fd, bytes, length) != length) { int saveerr = thread_errno(); close(fd); thread_set_errno(saveerr); return false; } FlushFileBuffers((HANDLE)_get_osfhandle(fd)); close(fd); #else int no_hang_fd = open("/dev/autofs_nowait", 0); mode = 0666; if (0 == stat(path, &statBuf)) { mode = statBuf.st_mode; } else if (thread_errno() != ENOENT) { close(no_hang_fd); return false; } fd = open(path, O_WRONLY|O_CREAT|O_TRUNC|CF_OPENFLGS, 0666); if (fd < 0) { close(no_hang_fd); return false; } if (length && write(fd, bytes, length) != length) { int saveerr = thread_errno(); close(fd); close(no_hang_fd); thread_set_errno(saveerr); return false; } fsync(fd); close(fd); close(no_hang_fd); #endif return true; }
/* Note: _IOReadBytesFromFile is called from PowerManagament project daemon powerd * e.g. it has references outside of this file. */ Boolean _IOReadBytesFromFile(CFAllocatorRef alloc, const char *path, void **bytes, CFIndex *length, CFIndex maxLength) { // alloc must be a valid allocator. // maxLength is the number of bytes desired, or 0 if the whole file is desired regardless of length. struct stat statBuf; int fd = -1; if (!alloc) { // MF:!!! This is no good. This function needs to require a non-NULL allocator. We should probably log or assert or something. return FALSE; } *bytes = NULL; fd = open(path, O_RDONLY|CF_OPENFLGS, 0666); if (fd < 0) { return FALSE; } if (fstat(fd, &statBuf) < 0) { int saveerr = thread_errno(); close(fd); thread_set_errno(saveerr); return FALSE; } if ((statBuf.st_mode & S_IFMT) != S_IFREG) { close(fd); thread_set_errno(EACCES); return FALSE; } if (statBuf.st_size == 0) { *bytes = CFAllocatorAllocate(alloc, 4, 0); // don't return constant string -- it's freed! *length = 0; } else { CFIndex desiredLength; if ((maxLength >= statBuf.st_size) || (maxLength == 0)) { desiredLength = statBuf.st_size; } else { desiredLength = maxLength; } *bytes = CFAllocatorAllocate(alloc, desiredLength, 0); if (read(fd, *bytes, desiredLength) < 0) { CFAllocatorDeallocate(alloc, *bytes); *bytes = NULL; close(fd); return FALSE; } *length = desiredLength; } close(fd); return TRUE; }
Boolean _IOWriteBytesToFile(const char *path, const void *bytes, CFIndex length) { struct stat statBuf; int fd = -1; int mode, mask; mask = umask(0); umask(mask); mode = 0666 & ~mask; if (0 == stat(path, &statBuf)) { mode = statBuf.st_mode; } else if (thread_errno() != ENOENT) { return FALSE; } fd = open(path, O_WRONLY|O_CREAT|O_TRUNC|CF_OPENFLGS, 0666); if (fd < 0) { return FALSE; } if (length && write(fd, bytes, length) != length) { int saveerr = thread_errno(); close(fd); thread_set_errno(saveerr); return FALSE; } fsync(fd); close(fd); return TRUE; }
/** * Unlink a named semaphore. * * This service unlinks the semaphore named @a name. This semaphore is not * destroyed until all references obtained with sem_open() are closed by calling * sem_close(). However, the unlinked semaphore may no longer be reached with * the sem_open() service. * * When a semaphore is destroyed, the memory it used is returned to the system * heap, so that further references to this semaphore are not guaranteed to * fail, as is the case for unnamed semaphores. * * @param name the name of the semaphore to be unlinked. * * @retval 0 on success; * @retval -1 with @a errno set if: * - ENAMETOOLONG, the length of the @a name argument exceeds 64 characters; * - ENOENT, the named semaphore does not exist. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_unlink.html"> * Specification.</a> * */ int sem_unlink(const char *name) { pse51_node_t *node; nsem_t *named_sem; spl_t s; int err; xnlock_get_irqsave(&nklock, s); err = pse51_node_remove(&node, name, PSE51_NAMED_SEM_MAGIC); if (err) goto error; named_sem = node2sem(node); if (pse51_node_removed_p(&named_sem->nodebase)) { xnlock_put_irqrestore(&nklock, s); sem_destroy_inner(&named_sem->sembase, pse51_kqueues(1)); } else xnlock_put_irqrestore(&nklock, s); return 0; error: xnlock_put_irqrestore(&nklock, s); thread_set_errno(err); return -1; }
/** * Unlink a shared memory object. * * This service unlinks the shared memory object named @a name. The shared * memory object is not destroyed until every file descriptor obtained with the * shm_open() service is closed with the close() service and all mappings done * with mmap() are unmapped with munmap(). However, after a call to this * service, the unlinked shared memory object may no longer be reached * with the shm_open() service. * * @param name name of the shared memory obect to be unlinked. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EPERM, the caller context is invalid; * - ENAMETOOLONG, the length of the @a name argument exceeds 64 characters; * - ENOENT, the shared memory object does not exist. * * @par Valid contexts: * - kernel module initialization or cleanup routine; * - kernel-space cancellation cleanup routine; * - user-space thread (Xenomai threads switch to secondary mode); * - user-space cancellation cleanup routine. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/shm_unlink.html"> * Specification.</a> * */ int shm_unlink(const char *name) { pse51_node_t *node; pse51_shm_t *shm; int err; spl_t s; if (xnpod_interrupt_p() || !xnpod_root_p()) { err = EPERM; goto error; } xnlock_get_irqsave(&nklock, s); err = pse51_node_remove(&node, name, PSE51_SHM_MAGIC); if (err) { xnlock_put_irqrestore(&nklock, s); error: thread_set_errno(err); return -1; } shm = node2shm(node); pse51_shm_put(shm, 0); xnlock_put_irqrestore(&nklock, s); return 0; }
/** * Unlock a semaphore. * * This service unlocks the semaphore @a sm. * * If no thread is currently blocked on this semaphore, its count is * incremented, otherwise the highest priority thread is unblocked. * * @param sm the semaphore to be unlocked. * * @retval 0 on success; * @retval -1 with errno set if: * - EINVAL, the specified semaphore is invalid or uninitialized; * - EPERM, the semaphore @a sm is not process-shared and does not belong to the * current process; * - EAGAIN, the semaphore count is @a SEM_VALUE_MAX. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_post.html"> * Specification.</a> * */ int sem_post(sem_t * sm) { struct __shadow_sem *shadow = &((union __xeno_sem *)sm)->shadow_sem; pse51_sem_t *sem; spl_t s; xnlock_get_irqsave(&nklock, s); if ((shadow->magic != PSE51_SEM_MAGIC && shadow->magic != PSE51_NAMED_SEM_MAGIC) || shadow->sem->magic != PSE51_SEM_MAGIC) { thread_set_errno(EINVAL); goto error; } sem = shadow->sem; #if XENO_DEBUG(POSIX) if (sem->owningq != pse51_kqueues(sem->pshared)) { thread_set_errno(EPERM); goto error; } #endif /* XENO_DEBUG(POSIX) */ if (sem->value == SEM_VALUE_MAX) { thread_set_errno(EAGAIN); goto error; } if (xnsynch_wakeup_one_sleeper(&sem->synchbase) != NULL) xnpod_schedule(); else ++sem->value; xnlock_put_irqrestore(&nklock, s); return 0; error: xnlock_put_irqrestore(&nklock, s); return -1; }
/** * Sleep some amount of time. * * This service suspends the calling thread until the wakeup time specified by * @a rqtp, or a signal is delivered. The wakeup time is specified as a time * interval. * * If this service is interrupted by a signal and @a rmtp is not @a NULL, the * time remaining until the specified wakeup time is returned at the address @a * rmtp. * * The resolution of this service is one system clock tick. * * @param rqtp address of the wakeup time. * * @param rmtp address where the remaining time before wakeup will be stored if * the service is interrupted by a signal. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EPERM, the caller context is invalid; * - EINVAL, the specified wakeup time is invalid; * - EINTR, this service was interrupted by a signal. * * @par Valid contexts: * - Xenomai kernel-space thread, * - Xenomai user-space thread (switches to primary mode). * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/nanosleep.html"> * Specification.</a> * */ int nanosleep(const struct timespec *rqtp, struct timespec *rmtp) { int err = clock_nanosleep(CLOCK_REALTIME, 0, rqtp, rmtp); if (!err) return 0; thread_set_errno(err); return -1; }
/** * Initialize an unnamed semaphore. * * This service initializes the semaphore @a sm, with the value @a value. * * This service fails if @a sm is already initialized or is a named semaphore. * * @param sm the semaphore to be initialized; * * @param pshared if zero, means that the new semaphore may only be used by * threads in the same process as the thread calling sem_init(); if non zero, * means that the new semaphore may be used by any thread that has access to the * memory where the semaphore is allocated. * * @param value the semaphore initial value. * * @retval 0 on success, * @retval -1 with @a errno set if: * - EBUSY, the semaphore @a sm was already initialized; * - ENOSPC, insufficient memory exists in the system heap to initialize the * semaphore, increase CONFIG_XENO_OPT_SYS_HEAPSZ; * - EINVAL, the @a value argument exceeds @a SEM_VALUE_MAX. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_init.html"> * Specification.</a> * */ int sem_init(sem_t * sm, int pshared, unsigned value) { struct __shadow_sem *shadow = &((union __xeno_sem *)sm)->shadow_sem; pse51_sem_t *sem; xnqueue_t *semq; int err; spl_t s; sem = (pse51_sem_t *) xnmalloc(sizeof(pse51_sem_t)); if (!sem) { err = ENOSPC; goto error; } xnlock_get_irqsave(&nklock, s); semq = &pse51_kqueues(pshared)->semq; if (shadow->magic == PSE51_SEM_MAGIC || shadow->magic == PSE51_NAMED_SEM_MAGIC || shadow->magic == ~PSE51_NAMED_SEM_MAGIC) { xnholder_t *holder; for (holder = getheadq(semq); holder; holder = nextq(semq, holder)) if (holder == &shadow->sem->link) { err = EBUSY; goto err_lock_put; } } err = pse51_sem_init_inner(sem, pshared, value); if (err) goto err_lock_put; shadow->magic = PSE51_SEM_MAGIC; shadow->sem = sem; xnlock_put_irqrestore(&nklock, s); return 0; err_lock_put: xnlock_put_irqrestore(&nklock, s); xnfree(sem); error: thread_set_errno(err); return -1; }
/** * Get the resolution of the specified clock. * * This service returns, at the address @a res, if it is not @a NULL, the * resolution of the clock @a clock_id. * * For both CLOCK_REALTIME, CLOCK_MONOTONIC and CLOCK_MONOTONIC_RAW, * this resolution is the duration of one system clock tick. No other * clock is supported. * * @param clock_id clock identifier, either CLOCK_REALTIME, * CLOCK_MONOTONIC or CLOCK_MONOTONIC_RAW; * * @param res the address where the resolution of the specified clock will be * stored on success. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EINVAL, @a clock_id is invalid; * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_getres.html"> * Specification.</a> * */ int clock_getres(clockid_t clock_id, struct timespec *res) { switch (clock_id) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: case CLOCK_MONOTONIC_RAW: if (res) ticks2ts(res, 1); break; default: thread_set_errno(EINVAL); return -1; } return 0; }
/** * Lock a semaphore. * * This service locks the semaphore @a sm if it is currently unlocked (i.e. if * its value is greater than 0). If the semaphore is currently locked, the * calling thread is suspended until the semaphore is unlocked, or a signal is * delivered to the calling thread. * * This service is a cancellation point for Xenomai POSIX skin threads (created * with the pthread_create() service). When such a thread is cancelled while * blocked in a call to this service, the semaphore state is left unchanged * before the cancellation cleanup handlers are called. * * @param sm the semaphore to be locked. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EPERM, the caller context is invalid; * - EINVAL, the semaphore is invalid or uninitialized; * - EPERM, the semaphore @a sm is not process-shared and does not belong to the * current process; * - EINTR, the caller was interrupted by a signal while blocked in this * service. * * @par Valid contexts: * - Xenomai kernel-space thread, * - Xenomai user-space thread (switches to primary mode). * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_wait.html"> * Specification.</a> * */ int sem_wait(sem_t * sm) { struct __shadow_sem *shadow = &((union __xeno_sem *)sm)->shadow_sem; spl_t s; int err; xnlock_get_irqsave(&nklock, s); err = sem_timedwait_internal(shadow, 0, XN_INFINITE); xnlock_put_irqrestore(&nklock, s); if (err) { thread_set_errno(err); return -1; } return 0; }
/** * Close a named semaphore. * * This service closes the semaphore @a sm. The semaphore is destroyed only when * unlinked with a call to the sem_unlink() service and when each call to * sem_open() matches a call to this service. * * When a semaphore is destroyed, the memory it used is returned to the system * heap, so that further references to this semaphore are not guaranteed to * fail, as is the case for unnamed semaphores. * * This service fails if @a sm is an unnamed semaphore. * * @param sm the semaphore to be closed. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EINVAL, the semaphore @a sm is invalid or is an unnamed semaphore. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_close.html"> * Specification.</a> * */ int sem_close(sem_t * sm) { struct __shadow_sem *shadow = &((union __xeno_sem *)sm)->shadow_sem; nsem_t *named_sem; spl_t s; int err; xnlock_get_irqsave(&nklock, s); if (shadow->magic != PSE51_NAMED_SEM_MAGIC || shadow->sem->magic != PSE51_SEM_MAGIC) { err = EINVAL; goto error; } named_sem = sem2named_sem(shadow->sem); err = pse51_node_put(&named_sem->nodebase); if (err) goto error; if (pse51_node_removed_p(&named_sem->nodebase)) { /* unlink was called, and this semaphore is no longer referenced. */ pse51_mark_deleted(shadow); pse51_mark_deleted(&named_sem->sembase); xnlock_put_irqrestore(&nklock, s); sem_destroy_inner(&named_sem->sembase, pse51_kqueues(1)); } else if (!pse51_node_ref_p(&named_sem->nodebase)) { /* this semaphore is no longer referenced, but not unlinked. */ pse51_mark_deleted(shadow); xnlock_put_irqrestore(&nklock, s); } else xnlock_put_irqrestore(&nklock, s); return 0; error: xnlock_put_irqrestore(&nklock, s); thread_set_errno(err); return -1; }
__private_extern__ Boolean _CFWriteBytesToFile(CFURLRef url, const void *bytes, CFIndex length) { struct stat statBuf; int fd = -1; int mode, mask; char path[CFMaxPathSize]; if (!CFURLGetFileSystemRepresentation(url, true, path, CFMaxPathSize)) { return false; } #if defined(__WIN32__) mask = 0; #else mask = umask(0); umask(mask); #endif mode = 0666 & ~mask; if (0 == stat(path, &statBuf)) { mode = statBuf.st_mode; } else if (thread_errno() != ENOENT) { return false; } #if defined(__WIN32__) fd = open(path, O_WRONLY|O_CREAT|O_TRUNC|CF_OPENFLGS, 0666|_S_IWRITE); #else fd = open(path, O_WRONLY|O_CREAT|O_TRUNC|CF_OPENFLGS, 0666); #endif if (fd < 0) { return false; } if (length && write(fd, bytes, length) != length) { int saveerr = thread_errno(); close(fd); thread_set_errno(saveerr); return false; } #if defined(__WIN32__) FlushFileBuffers((HANDLE)_get_osfhandle(fd)); #else fsync(fd); #endif close(fd); return true; }
/** * Set the specified clock. * * This allow setting the CLOCK_REALTIME clock. * * @param clock_id the id of the clock to be set, only CLOCK_REALTIME is * supported. * * @param tp the address of a struct timespec specifying the new date. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EINVAL, @a clock_id is not CLOCK_REALTIME; * - EINVAL, the date specified by @a tp is invalid. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_settime.html"> * Specification.</a> * */ int clock_settime(clockid_t clock_id, const struct timespec *tp) { xnticks_t now, new_date; spl_t s; if (clock_id != CLOCK_REALTIME || (unsigned long)tp->tv_nsec >= ONE_BILLION) { thread_set_errno(EINVAL); return -1; } new_date = ts2ticks_floor(tp); xnlock_get_irqsave(&nklock, s); now = xntbase_get_time(pse51_tbase); xntbase_adjust_time(pse51_tbase, (xnsticks_t) (new_date - now)); xnlock_put_irqrestore(&nklock, s); return 0; }
/** * Unlock a semaphore. * * This service unlocks the semaphore @a sm. * * If no thread is currently blocked on this semaphore, its count is * incremented, otherwise the highest priority thread is unblocked. * * @param sm the semaphore to be unlocked. * * @retval 0 on success; * @retval -1 with errno set if: * - EINVAL, the specified semaphore is invalid or uninitialized; * - EPERM, the semaphore @a sm is not process-shared and does not belong to the * current process; * - EAGAIN, the semaphore count is @a SEM_VALUE_MAX. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_post.html"> * Specification.</a> * */ int sem_post(sem_t * sm) { struct __shadow_sem *shadow = &((union __xeno_sem *)sm)->shadow_sem; int ret; spl_t s; xnlock_get_irqsave(&nklock, s); if (shadow->magic != PSE51_SEM_MAGIC && shadow->magic != PSE51_NAMED_SEM_MAGIC) { thread_set_errno(EINVAL); ret = -1; goto out; } ret = sem_post_inner(shadow->sem, shadow->sem->owningq); out: xnlock_put_irqrestore(&nklock, s); return ret; }
/** * Attempt, during a bounded time, to lock a semaphore. * * This serivce is equivalent to sem_wait(), except that the caller is only * blocked until the timeout @a abs_timeout expires. * * @param sm the semaphore to be locked; * * @param abs_timeout the timeout, expressed as an absolute value of the * CLOCK_REALTIME clock. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EPERM, the caller context is invalid; * - EINVAL, the semaphore is invalid or uninitialized; * - EINVAL, the specified timeout is invalid; * - EPERM, the semaphore @a sm is not process-shared and does not belong to the * current process; * - EINTR, the caller was interrupted by a signal while blocked in this * service; * - ETIMEDOUT, the semaphore could not be locked and the specified timeout * expired. * * @par Valid contexts: * - Xenomai kernel-space thread, * - Xenomai user-space thread (switches to primary mode). * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_timedwait.html"> * Specification.</a> * */ int sem_timedwait(sem_t * sm, const struct timespec *abs_timeout) { struct __shadow_sem *shadow = &((union __xeno_sem *)sm)->shadow_sem; spl_t s; int err; if (abs_timeout->tv_nsec > ONE_BILLION) { err = EINVAL; goto error; } xnlock_get_irqsave(&nklock, s); err = sem_timedwait_internal(shadow, 1, ts2ticks_ceil(abs_timeout) + 1); xnlock_put_irqrestore(&nklock, s); error: if (err) { thread_set_errno(err); return -1; } return 0; }
/** * Close a file descriptor. * * This service closes the file descriptor @a fd. In kernel-space, this service * only works for file descriptors opened with shm_open(), i.e. shared memory * objects. A shared memory object is only destroyed once all file descriptors * are closed with this service, it is unlinked with the shm_unlink() service, * and all mappings are unmapped with the munmap() service. * * @param fd file descriptor. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EBADF, @a fd is not a valid file descriptor (in kernel-space, it was not * obtained with shm_open()); * - EPERM, the caller context is invalid. * * @par Valid contexts: * - kernel module initialization or cleanup routine; * - kernel-space cancellation cleanup routine; * - user-space thread (Xenomai threads switch to secondary mode); * - user-space cancellation cleanup routine. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/close.html"> * Specification.</a> * */ int close(int fd) { pse51_desc_t *desc; pse51_shm_t *shm; spl_t s; int err; xnlock_get_irqsave(&nklock, s); shm = pse51_shm_get(&desc, fd, 0); if (IS_ERR(shm)) { err = -PTR_ERR(shm); goto err_put; } if (xnpod_interrupt_p() || !xnpod_root_p()) { err = EPERM; goto err_put; } pse51_shm_put(shm, 1); xnlock_put_irqrestore(&nklock, s); err = pse51_desc_destroy(desc); if (err) goto error; return 0; err_put: xnlock_put_irqrestore(&nklock, s); error: thread_set_errno(err); return -1; }
/* __CFWriteBytesToFileWithAtomicity is a "safe save" facility. Write the bytes using the specified mode on the file to the provided URL. If the atomic flag is true, try to do it in a fashion that will enable a safe save. */ static Boolean __CFWriteBytesToFileWithAtomicity(CFURLRef url, const void *bytes, int length, SInt32 mode, Boolean atomic) { int fd = -1; char auxPath[CFMaxPathSize + 16]; char cpath[CFMaxPathSize]; uid_t owner = getuid(); gid_t group = getgid(); Boolean writingFileAsRoot = ((getuid() != geteuid()) && (geteuid() == 0)); if (!CFURLGetFileSystemRepresentation(url, true, (uint8_t *)cpath, CFMaxPathSize)) { return false; } if (-1 == mode || writingFileAsRoot) { struct stat statBuf; if (0 == stat(cpath, &statBuf)) { mode = statBuf.st_mode; owner = statBuf.st_uid; group = statBuf.st_gid; } else { mode = 0664; if (writingFileAsRoot && (0 == strncmp(cpath, "/Library/Preferences", 20))) { owner = geteuid(); group = 80; } } } if (atomic) { CFURLRef dir = CFURLCreateCopyDeletingLastPathComponent(kCFAllocatorSystemDefault, url); CFURLRef tempFile = CFURLCreateCopyAppendingPathComponent(kCFAllocatorSystemDefault, dir, CFSTR("cf#XXXXX"), false); CFRelease(dir); if (!CFURLGetFileSystemRepresentation(tempFile, true, (uint8_t *)auxPath, CFMaxPathSize)) { CFRelease(tempFile); return false; } CFRelease(tempFile); fd = mkstemp(auxPath); } else { fd = open(cpath, O_WRONLY|O_CREAT|O_TRUNC, mode); } if (fd < 0) return false; if (length && (write(fd, bytes, length) != length || fsync(fd) < 0)) { int saveerr = thread_errno(); close(fd); if (atomic) unlink(auxPath); thread_set_errno(saveerr); return false; } close(fd); if (atomic) { // preserve the mode as passed in originally chmod(auxPath, mode); if (0 != rename(auxPath, cpath)) { unlink(auxPath); return false; } // If the file was renamed successfully and we wrote it as root we need to reset the owner & group as they were. if (writingFileAsRoot) { chown(cpath, owner, group); } } return true; }
/** * Unmap pages of memory. * * This service unmaps the shared memory region [addr;addr+len) from the caller * address-space. * * When called from kernel-space the memory region remain accessible as long as * it exists, and this service only decrements a reference counter. * * When called from user-space, if the region is not a shared memory region, * this service falls back to the regular Linux munmap() service. * * @param addr start address of shared memory area; * * @param len length of the shared memory area. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EINVAL, @a len is null, @a addr is not a multiple of the page size or the * range [addr;addr+len) is not a mapped region; * - ENXIO, @a addr is not the address of a shared memory area; * - EPERM, the caller context is invalid; * - EINTR, this service was interrupted by a signal. * * @par Valid contexts: * - kernel module initialization or cleanup routine; * - kernel-space cancellation cleanup routine; * - user-space thread (Xenomai threads switch to secondary mode); * - user-space cancellation cleanup routine. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/munmap.html"> * Specification.</a> * */ int munmap(void *addr, size_t len) { pse51_shm_map_t *mapping = NULL; xnholder_t *holder; pse51_shm_t *shm; int err; spl_t s; if (!len) { err = EINVAL; goto error; } if (((unsigned long)addr) % PAGE_SIZE) { err = EINVAL; goto error; } xnlock_get_irqsave(&nklock, s); shm = pse51_shm_lookup(addr); if (!shm) { xnlock_put_irqrestore(&nklock, s); err = ENXIO; goto error; } if (xnpod_asynch_p() || !xnpod_root_p()) { xnlock_put_irqrestore(&nklock, s); err = EPERM; goto error; } ++shm->nodebase.refcount; xnlock_put_irqrestore(&nklock, s); if (down_interruptible(&shm->maplock)) { err = EINTR; goto err_shm_put; } for (holder = getheadq(&shm->mappings); holder; holder = nextq(&shm->mappings, holder)) { mapping = link2map(holder); if (mapping->addr == addr && mapping->size == len) break; } if (!holder) { xnlock_put_irqrestore(&nklock, s); err = EINVAL; goto err_up; } removeq(&shm->mappings, holder); up(&shm->maplock); xnfree(mapping); pse51_shm_put(shm, 2); return 0; err_up: up(&shm->maplock); err_shm_put: pse51_shm_put(shm, 1); error: thread_set_errno(err); return -1; }
/** * Map pages of memory. * * This service allow shared memory regions to be accessed by the caller. * * When used in kernel-space, this service returns the address of the offset @a * off of the shared memory object underlying @a fd. The protection flags @a * prot, are only checked for consistency with @a fd open flags, but memory * protection is unsupported. An existing shared memory region exists before it * is mapped, this service only increments a reference counter. * * The only supported value for @a flags is @a MAP_SHARED. * * When used in user-space, this service maps the specified shared memory region * into the caller address-space. If @a fd is not a shared memory object * descriptor (i.e. not obtained with shm_open()), this service falls back to * the regular Linux mmap service. * * @param addr ignored. * * @param len size of the shared memory region to be mapped. * * @param prot protection bits, checked in kernel-space, but only useful in * user-space, are a bitwise or of the following values: * - PROT_NONE, meaning that the mapped region can not be accessed; * - PROT_READ, meaning that the mapped region can be read; * - PROT_WRITE, meaning that the mapped region can be written; * - PROT_EXEC, meaning that the mapped region can be executed. * * @param flags only MAP_SHARED is accepted, meaning that the mapped memory * region is shared. * * @param fd file descriptor, obtained with shm_open(). * * @param off offset in the shared memory region. * * @retval 0 on success; * @retval MAP_FAILED with @a errno set if: * - EINVAL, @a len is null or @a addr is not a multiple of @a PAGE_SIZE; * - EBADF, @a fd is not a shared memory object descriptor (obtained with * shm_open()); * - EPERM, the caller context is invalid; * - ENOTSUP, @a flags is not @a MAP_SHARED; * - EACCES, @a fd is not opened for reading or is not opend for writing and * PROT_WRITE is set in @a prot; * - EINTR, this service was interrupted by a signal; * - ENXIO, the range [off;off+len) is invalid for the shared memory region * specified by @a fd; * - EAGAIN, insufficient memory exists in the system heap to create the * mapping, increase CONFIG_XENO_OPT_SYS_HEAPSZ. * * @par Valid contexts: * - kernel module initialization or cleanup routine; * - user-space thread (Xenomai threads switch to secondary mode). * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mmap.html"> * Specification.</a> * */ void *mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off) { pse51_shm_map_t *map; unsigned desc_flags; pse51_desc_t *desc; pse51_shm_t *shm; void *result; int err; spl_t s; if (!len) { err = EINVAL; goto error; } if (((unsigned long)addr) % PAGE_SIZE) { err = EINVAL; goto error; } xnlock_get_irqsave(&nklock, s); shm = pse51_shm_get(&desc, fd, 1); if (IS_ERR(shm)) { xnlock_put_irqrestore(&nklock, s); err = -PTR_ERR(shm); goto error; } if (xnpod_asynch_p() || !xnpod_root_p()) { err = EPERM; xnlock_put_irqrestore(&nklock, s); goto err_shm_put; } if (flags != MAP_SHARED) { err = ENOTSUP; xnlock_put_irqrestore(&nklock, s); goto err_shm_put; } desc_flags = pse51_desc_getflags(desc) & PSE51_PERMS_MASK; xnlock_put_irqrestore(&nklock, s); if ((desc_flags != O_RDWR && desc_flags != O_RDONLY) || ((prot & PROT_WRITE) && desc_flags == O_RDONLY)) { err = EACCES; goto err_shm_put; } map = (pse51_shm_map_t *) xnmalloc(sizeof(*map)); if (!map) { err = EAGAIN; goto err_shm_put; } if (down_interruptible(&shm->maplock)) { err = EINTR; goto err_free_map; } if (!shm->addr || off + len > shm->size) { err = ENXIO; up(&shm->maplock); goto err_free_map; } /* Align the heap address on a page boundary. */ result = (void *)PAGE_ALIGN((u_long)shm->addr); map->addr = result = (void *)((char *)result + off); map->size = len; inith(&map->link); prependq(&shm->mappings, &map->link); up(&shm->maplock); return result; err_free_map: xnfree(map); err_shm_put: pse51_shm_put(shm, 1); error: thread_set_errno(err); return MAP_FAILED; }
/** * Truncate a file or shared memory object to a specified length. * * When used in kernel-space, this service set to @a len the size of a shared * memory object opened with the shm_open() service. In user-space this service * falls back to Linux regular ftruncate service for file descriptors not * obtained with shm_open(). When this service is used to increase the size of a * shared memory object, the added space is zero-filled. * * Shared memory are suitable for direct memory access (allocated in physically * contiguous memory) if O_DIRECT was passed to shm_open. * * Shared memory objects may only be resized if they are not currently mapped. * * @param fd file descriptor; * * @param len new length of the underlying file or shared memory object. * * @retval 0 on success; * @retval -1 with @a errno set if: * - EBADF, @a fd is not a valid file descriptor; * - EPERM, the caller context is invalid; * - EINVAL, the specified length is invalid; * - EINVAL, the architecture can not honour the O_DIRECT flag; * - EINTR, this service was interrupted by a signal; * - EBUSY, @a fd is a shared memory object descriptor and the underlying shared * memory is currently mapped; * - EFBIG, allocation of system memory failed. * * @par Valid contexts: * - kernel module initialization or cleanup routine; * - user-space thread (Xenomai threads switch to secondary mode). * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/ftruncate.html"> * Specification.</a> * */ int ftruncate(int fd, off_t len) { unsigned desc_flags; pse51_desc_t *desc; pse51_shm_t *shm; int err; spl_t s; xnlock_get_irqsave(&nklock, s); shm = pse51_shm_get(&desc, fd, 1); if (IS_ERR(shm)) { err = -PTR_ERR(shm); xnlock_put_irqrestore(&nklock, s); goto error; } if (xnpod_asynch_p() || !xnpod_root_p()) { err = EPERM; xnlock_put_irqrestore(&nklock, s); goto err_shm_put; } if (len < 0) { err = EINVAL; xnlock_put_irqrestore(&nklock, s); goto err_shm_put; } desc_flags = pse51_desc_getflags(desc); xnlock_put_irqrestore(&nklock, s); if (down_interruptible(&shm->maplock)) { err = EINTR; goto err_shm_put; } /* Allocate one more page for alignment (the address returned by mmap must be aligned on a page boundary). */ if (len) #ifdef CONFIG_XENO_OPT_PERVASIVE len = xnheap_rounded_size(len + PAGE_SIZE, PAGE_SIZE); #else /* !CONFIG_XENO_OPT_PERVASIVE */ len = xnheap_rounded_size(len + PAGE_SIZE, XNHEAP_PAGE_SIZE); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ err = 0; if (emptyq_p(&shm->mappings)) { /* Temporary storage, in order to preserve the memory contents upon resizing, if possible. */ void *addr = NULL; size_t size = 0; if (shm->addr) { if (len == xnheap_extentsize(&shm->heapbase)) { /* Size unchanged, skip copy and reinit. */ err = 0; goto err_up; } size = xnheap_max_contiguous(&shm->heapbase); addr = xnarch_alloc_host_mem(size); if (!addr) { err = ENOMEM; goto err_up; } memcpy(addr, shm->addr, size); xnheap_free(&shm->heapbase, shm->addr); xnheap_destroy_mapped(&shm->heapbase, NULL, NULL); shm->addr = NULL; shm->size = 0; } if (len) { int flags = XNARCH_SHARED_HEAP_FLAGS | ((desc_flags & O_DIRECT) ? GFP_DMA : 0); err = -xnheap_init_mapped(&shm->heapbase, len, flags); if (err) goto err_up; xnheap_set_label(&shm->heapbase, "posix shm: %s", shm->nodebase.name); shm->size = xnheap_max_contiguous(&shm->heapbase); shm->addr = xnheap_alloc(&shm->heapbase, shm->size); /* Required. */ memset(shm->addr, '\0', shm->size); /* Copy the previous contents. */ if (addr) memcpy(shm->addr, addr, shm->size < size ? shm->size : size); shm->size -= PAGE_SIZE; } if (addr) xnarch_free_host_mem(addr, size); } else if (len != xnheap_extentsize(&shm->heapbase)) err = EBUSY; err_up: up(&shm->maplock); err_shm_put: pse51_shm_put(shm, 1); if (!err) return 0; error: thread_set_errno(err == ENOMEM ? EFBIG : err); return -1; }
/** * Open a named semaphore. * * This service establishes a connection between the semaphore named @a name and * the calling context (kernel-space as a whole, or user-space process). * * If no semaphore named @a name exists and @a oflags has the @a O_CREAT bit * set, the semaphore is created by this function, using two more arguments: * - a @a mode argument, of type @b mode_t, currently ignored; * - a @a value argument, of type @b unsigned, specifying the initial value of * the created semaphore. * * If @a oflags has the two bits @a O_CREAT and @a O_EXCL set and the semaphore * already exists, this service fails. * * @a name may be any arbitrary string, in which slashes have no particular * meaning. However, for portability, using a name which starts with a slash and * contains no other slash is recommended. * * If sem_open() is called from the same context (kernel-space as a whole, or * user-space process) several times with the same value of @a name, the same * address is returned. * * @param name the name of the semaphore to be created; * * @param oflags flags. * * @return the address of the named semaphore on success; * @return SEM_FAILED with @a errno set if: * - ENAMETOOLONG, the length of the @a name argument exceeds 64 characters; * - EEXIST, the bits @a O_CREAT and @a O_EXCL were set in @a oflags and the * named semaphore already exists; * - ENOENT, the bit @a O_CREAT is not set in @a oflags and the named semaphore * does not exist; * - ENOSPC, insufficient memory exists in the system heap to create the * semaphore, increase CONFIG_XENO_OPT_SYS_HEAPSZ; * - EINVAL, the @a value argument exceeds @a SEM_VALUE_MAX. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_open.html"> * Specification.</a> * */ sem_t *sem_open(const char *name, int oflags, ...) { pse51_node_t *node; nsem_t *named_sem; unsigned value; mode_t mode; va_list ap; spl_t s; int err; xnlock_get_irqsave(&nklock, s); err = pse51_node_get(&node, name, PSE51_NAMED_SEM_MAGIC, oflags); xnlock_put_irqrestore(&nklock, s); if (err) goto error; if (node) { named_sem = node2sem(node); goto got_sem; } named_sem = (nsem_t *) xnmalloc(sizeof(*named_sem)); if (!named_sem) { err = ENOSPC; goto error; } named_sem->sembase.is_named = 1; named_sem->descriptor.shadow_sem.sem = &named_sem->sembase; va_start(ap, oflags); mode = va_arg(ap, int); (void)mode; /* unused */ value = va_arg(ap, unsigned); va_end(ap); xnlock_get_irqsave(&nklock, s); err = pse51_sem_init_inner(&named_sem->sembase, 1, value); if (err) { xnlock_put_irqrestore(&nklock, s); xnfree(named_sem); goto error; } err = pse51_node_add(&named_sem->nodebase, name, PSE51_NAMED_SEM_MAGIC); if (err && err != EEXIST) goto err_put_lock; if (err == EEXIST) { err = pse51_node_get(&node, name, PSE51_NAMED_SEM_MAGIC, oflags); if (err) goto err_put_lock; xnlock_put_irqrestore(&nklock, s); sem_destroy_inner(&named_sem->sembase, pse51_kqueues(named_sem->sembase.pshared)); named_sem = node2sem(node); goto got_sem; } xnlock_put_irqrestore(&nklock, s); got_sem: /* Set the magic, needed both at creation and when re-opening a semaphore that was closed but not unlinked. */ named_sem->descriptor.shadow_sem.magic = PSE51_NAMED_SEM_MAGIC; return &named_sem->descriptor.native_sem; err_put_lock: xnlock_put_irqrestore(&nklock, s); sem_destroy_inner(&named_sem->sembase, pse51_kqueues(named_sem->sembase.pshared)); error: thread_set_errno(err); return SEM_FAILED; }
/** * Open a shared memory object. * * This service establishes a connection between a shared memory object and a * file descriptor. Further use of this descriptor will allow to dimension and * map the shared memory into the calling context address space. * * One of the following access mode should be set in @a oflags: * - O_RDONLY, meaning that the shared memory object may only be mapped with the * PROT_READ flag; * - O_WRONLY, meaning that the shared memory object may only be mapped with the * PROT_WRITE flag; * - O_RDWR, meaning that the shared memory object may be mapped with the * PROT_READ | PROT_WRITE flag. * * If no shared memory object named @a name exists, and @a oflags has the @a * O_CREAT bit set, the shared memory object is created by this function. * * If @a oflags has the two bits @a O_CREAT and @a O_EXCL set and the shared * memory object alread exists, this service fails. * * If @a oflags has the bit @a O_TRUNC set, the shared memory exists and is not * currently mapped, its size is truncated to 0. * * If @a oflags has the bit @a O_DIRECT set, the shared memory will be suitable * for direct memory access (allocated in physically contiguous memory). * * @a name may be any arbitrary string, in which slashes have no particular * meaning. However, for portability, using a name which starts with a slash and * contains no other slash is recommended. * * @param name name of the shared memory object to open; * * @param oflags flags. * * @param mode ignored. * * @return a file descriptor on success; * @return -1 with @a errno set if: * - ENAMETOOLONG, the length of the @a name argument exceeds 64 characters; * - EEXIST, the bits @a O_CREAT and @a O_EXCL were set in @a oflags and the * shared memory object already exists; * - ENOENT, the bit @a O_CREAT is not set in @a oflags and the shared memory * object does not exist; * - ENOSPC, insufficient memory exists in the system heap to create the shared * memory object, increase CONFIG_XENO_OPT_SYS_HEAPSZ; * - EPERM, the caller context is invalid; * - EINVAL, the O_TRUNC flag was specified and the shared memory object is * currently mapped; * - EMFILE, too many descriptors are currently open. * * @par Valid contexts: * - kernel module initialization or cleanup routine; * - user-space thread (Xenomai threads switch to secondary mode). * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/shm_open.html"> * Specification.</a> * */ int shm_open(const char *name, int oflags, mode_t mode) { pse51_node_t *node; pse51_desc_t *desc; pse51_shm_t *shm; int err, fd; spl_t s; /* From root context only. */ if (xnpod_asynch_p() || !xnpod_root_p()) { thread_set_errno(EPERM); return -1; } xnlock_get_irqsave(&nklock, s); err = pse51_node_get(&node, name, PSE51_SHM_MAGIC, oflags); xnlock_put_irqrestore(&nklock, s); if (err) goto error; if (node) { shm = node2shm(node); goto got_shm; } /* We must create the shared memory object, not yet allocated. */ shm = (pse51_shm_t *) xnmalloc(sizeof(*shm)); if (!shm) { err = ENOSPC; goto error; } xnlock_get_irqsave(&nklock, s); err = pse51_node_add(&shm->nodebase, name, PSE51_SHM_MAGIC); if (err && err != EEXIST) goto err_unlock; if (err == EEXIST) { /* same shm was created in the mean time, rollback. */ err = pse51_node_get(&node, name, PSE51_SHM_MAGIC, oflags); err_unlock: xnlock_put_irqrestore(&nklock, s); xnfree(shm); if (err) goto error; shm = node2shm(node); goto got_shm; } pse51_shm_init(shm); xnlock_put_irqrestore(&nklock, s); got_shm: err = pse51_desc_create(&desc, &shm->nodebase, oflags & (PSE51_PERMS_MASK | O_DIRECT)); if (err) goto err_shm_put; fd = pse51_desc_fd(desc); if ((oflags & O_TRUNC) && ftruncate(fd, 0)) { close(fd); return -1; } return fd; err_shm_put: pse51_shm_put(shm, 1); error: thread_set_errno(err); return -1; }