sem_t * sem_open(const char *name, int oflag, ...) { char sempath[SEM_PATH_SIZE]; struct stat sb; sem_t sem, *semp; unsigned int value = 0; int created = 0, fd; if (!_threads_ready) _rthread_init(); if (oflag & ~(O_CREAT | O_EXCL)) { errno = EINVAL; return (SEM_FAILED); } if (oflag & O_CREAT) { va_list ap; va_start(ap, oflag); /* 3rd parameter mode is not used */ va_arg(ap, mode_t); value = va_arg(ap, unsigned); va_end(ap); if (value > SEM_VALUE_MAX) { errno = EINVAL; return (SEM_FAILED); } }
int sem_timedwait(sem_t *semp, const struct timespec *abstime) { struct tib *tib = TIB_GET(); pthread_t self; sem_t sem; int r; PREP_CANCEL_POINT(tib); if (!_threads_ready) _rthread_init(); self = tib->tib_thread; if (!semp || !(sem = *semp)) { errno = EINVAL; return (-1); } ENTER_DELAYED_CANCEL_POINT(tib, self); r = _sem_wait(sem, 0, abstime, &self->delayed_cancel); LEAVE_CANCEL_POINT_INNER(tib, r); if (r) { errno = r == EWOULDBLOCK ? ETIMEDOUT : r; return (-1); } return (0); }
int sem_wait(sem_t *semp) { struct tib *tib = TIB_GET(); pthread_t self; sem_t sem; int r; PREP_CANCEL_POINT(tib); if (!_threads_ready) _rthread_init(); self = tib->tib_thread; if (!semp || !(sem = *semp)) { errno = EINVAL; return (-1); } ENTER_DELAYED_CANCEL_POINT(tib, self); r = _sem_wait(sem, 0, NULL, &self->delayed_cancel); LEAVE_CANCEL_POINT_INNER(tib, r); if (r) { errno = r; return (-1); } return (0); }
/* * real pthread functions */ pthread_t pthread_self(void) { pthread_t thread; if (!_threads_ready) if (_rthread_init()) return (NULL); _spinlock(&_thread_lock); thread = _rthread_findself(); _spinunlock(&_thread_lock); return (thread); }
int pthread_attr_init(pthread_attr_t *attrp) { pthread_attr_t attr; int error; /* make sure _rthread_attr_default has been initialized */ if (!_threads_ready) if ((error = _rthread_init())) return (error); attr = calloc(1, sizeof(*attr)); if (!attr) return (errno); *attr = _rthread_attr_default; *attrp = attr; return (0); }
int pthread_create(pthread_t *threadp, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { pthread_t thread; pid_t tid; int rc = 0; if (!_threads_ready) if ((rc = _rthread_init())) return (rc); thread = malloc(sizeof(*thread)); if (!thread) return (errno); memset(thread, 0, sizeof(*thread)); thread->donesem.lock = _SPINLOCK_UNLOCKED; thread->flags_lock = _SPINLOCK_UNLOCKED; thread->fn = start_routine; thread->arg = arg; if (attr) thread->attr = *(*attr); else { thread->attr.stack_size = RTHREAD_STACK_SIZE_DEF; thread->attr.guard_size = sysconf(_SC_PAGESIZE); thread->attr.stack_size -= thread->attr.guard_size; } if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED) thread->flags |= THREAD_DETACHED; _spinlock(&_thread_lock); thread->stack = _rthread_alloc_stack(thread); if (!thread->stack) { rc = errno; goto fail1; } LIST_INSERT_HEAD(&_thread_list, thread, threads); tid = rfork_thread(RFPROC | RFTHREAD | RFMEM | RFNOWAIT, thread->stack->sp, _rthread_start, thread); if (tid == -1) { rc = errno; goto fail2; } /* new thread will appear _rthread_start */ thread->tid = tid; thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED; *threadp = thread; /* * Since _rthread_start() aquires the thread lock and due to the way * signal delivery is implemented, this is not a race. */ if (thread->attr.create_suspended) kill(thread->tid, SIGSTOP); _spinunlock(&_thread_lock); return (0); fail2: _rthread_free_stack(thread->stack); LIST_REMOVE(thread, threads); fail1: _spinunlock(&_thread_lock); _rthread_free(thread); return (rc); }