int sem_timedwait(sem_t* sem, const struct timespec *abs_timeout) { int retval = 0; mach_timespec_t mts; if (abs_timeout->tv_sec >= 0 || abs_timeout->tv_nsec >= 0) { mts.tv_sec = abs_timeout->tv_sec; mts.tv_nsec = abs_timeout->tv_nsec; } else { // FIX: If we really wait forever, we cannot shut down VERMONT // this is mac os x specific and does not happen on linux // hence, we just add a small timeout instead of blocking // indefinately mts.tv_sec = 1; mts.tv_nsec = 0; } retval = semaphore_timedwait(*sem, mts); switch (retval) { case KERN_SUCCESS: return 0; case KERN_OPERATION_TIMED_OUT: errno = ETIMEDOUT; break; case KERN_ABORTED: errno = EINTR; break; default: errno = EINVAL; break; } return -1; }
bool uHTTP::Semaphore::wait(time_t timeoutSec) { if (!this->isInitialized) return false; bool isSuccess = true; #if defined(__APPLE__) if (0 < timeoutSec) { mach_timespec_t machTimeout; machTimeout.tv_sec = (unsigned int)timeoutSec; machTimeout.tv_nsec = 0; isSuccess = (semaphore_timedwait(semId, machTimeout) == KERN_SUCCESS) ? true : false; } else { isSuccess = (semaphore_wait(semId) == KERN_SUCCESS) ? true : false; } #else if (0 < timeoutSec) { timespec absTimeout; absTimeout.tv_sec = timeoutSec; absTimeout.tv_nsec = 0; isSuccess = (sem_timedwait(&semId, &absTimeout) == 0) ? true : false; } else { isSuccess = (sem_wait(&semId) == 0) ? true : false; } #endif return isSuccess; }
static int sem_timedwait_osx(sem_ref sem, const struct timespec *abs_timeout) { mach_timespec_t wait_time; wait_time.tv_sec = abs_timeout->tv_sec; wait_time.tv_nsec = abs_timeout->tv_nsec; return semaphore_timedwait(*sem, wait_time); }
inline bool Semaphore::timed_wait(unsigned ms) { const unsigned s = ms / 1000; const int nsec = ((int)ms - (s * 1000)) * 1000000; const mach_timespec_t t = { s, nsec }; return semaphore_timedwait(_sem, t) == KERN_SUCCESS; }
static int MXUserTimedDown(NativeSemaphore *sema, // IN: uint32 msecWait, // IN: Bool *downOccurred) // OUT: { uint64 nsecWait; VmTimeType before; kern_return_t err; ASSERT_ON_COMPILE(KERN_SUCCESS == 0); /* * Work in nanoseconds. Time the semaphore_timedwait operation in case * it is interrupted (KERN_ABORT). If it is, determine how much time is * necessary to fulfill the specified wait time and retry with a new * and appropriate timeout. */ nsecWait = 1000000ULL * (uint64) msecWait; before = Hostinfo_SystemTimerNS(); do { VmTimeType after; mach_timespec_t ts; ts.tv_sec = nsecWait / MXUSER_A_BILLION; ts.tv_nsec = nsecWait % MXUSER_A_BILLION; err = semaphore_timedwait(*sema, ts); after = Hostinfo_SystemTimerNS(); if (err == KERN_SUCCESS) { *downOccurred = TRUE; } else { *downOccurred = FALSE; if (err == KERN_OPERATION_TIMED_OUT) { /* Really timed out; no down occurred, no error */ err = KERN_SUCCESS; } else { if (err == KERN_ABORTED) { VmTimeType duration = after - before; if (duration < nsecWait) { nsecWait -= duration; before = after; } else { err = KERN_SUCCESS; // "timed out" anyway... no error } } } } } while (nsecWait && (err == KERN_ABORTED)); return err; }
int uv_sem_trywait(uv_sem_t* sem) { mach_timespec_t interval; interval.tv_sec = 0; interval.tv_nsec = 0; if (semaphore_timedwait(*sem, interval) == KERN_SUCCESS) return 0; else return -1; }
bool Semaphore::TimedWait(mach_timespec_t duration) { kern_return_t result = semaphore_timedwait(mSemaphore, duration); if(KERN_SUCCESS != result && KERN_OPERATION_TIMED_OUT != result) { LOGGER_WARNING("org.sbooth.AudioEngine.Semaphore", "Semaphore couldn't timedwait: " << mach_error_string(result)); return false; } return true; }
bool JSemaphore::Wait(unsigned int time_ms) { #if defined(__MACH__) && defined(__APPLE__) mach_timespec_t waittime; waittime.tv_sec = time_ms / 1000; waittime.tv_nsec = 1000000 * (time_ms % 1000); #else struct timespec waittime; #endif struct timeval now; if (gettimeofday(&now, NULL) == -1) { assert("Unable to get time by clock_gettime!" == 0); return false; } #if !(defined(__MACH__) && defined(__APPLE__)) waittime.tv_nsec = ((time_ms % 1000) * 1000 * 1000) + (now.tv_usec * 1000); waittime.tv_sec = (time_ms / 1000) + (waittime.tv_nsec / (1000*1000*1000)) + now.tv_sec; waittime.tv_nsec %= 1000*1000*1000; #endif errno = 0; #if defined(__MACH__) && defined(__APPLE__) int sem_wait_retval = semaphore_timedwait(m_semaphore, waittime); if (sem_wait_retval == KERN_OPERATION_TIMED_OUT) { errno = ETIMEDOUT; } else if (sem_wait_retval == KERN_ABORTED) { errno = EINTR; } else if (sem_wait_retval != 0) { errno = EINVAL; } #else int sem_wait_retval = sem_timedwait(&m_semaphore, &waittime); #endif if (sem_wait_retval == 0) { #if defined(__MACH__) && defined(__APPLE__) pthread_mutex_lock(&semcount_mutex); semcount--; pthread_mutex_unlock(&semcount_mutex); #endif return true; } else { assert((errno == ETIMEDOUT) || (errno == EINTR)); return false; } return sem_wait_retval == 0 ? true : false; }
bool Semaphore::wait(unsigned int time_ms) { #ifdef _WIN32 unsigned int ret = WaitForSingleObject(semaphore, time_ms); if (ret == WAIT_OBJECT_0) { return true; } else { assert(ret == WAIT_TIMEOUT); return false; } #else # if defined(__MACH__) && defined(__APPLE__) mach_timespec_t wait_time; wait_time.tv_sec = time_ms / 1000; wait_time.tv_nsec = 1000000 * (time_ms % 1000); errno = 0; int ret = semaphore_timedwait(semaphore, wait_time); switch (ret) { case KERN_OPERATION_TIMED_OUT: errno = ETIMEDOUT; break; case KERN_ABORTED: errno = EINTR; break; default: if (ret) errno = EINVAL; } # else struct timespec wait_time; struct timeval now; if (gettimeofday(&now, NULL) == -1) { std::cerr << "Semaphore::wait(ms): Unable to get time with gettimeofday!" << std::endl; abort(); } wait_time.tv_nsec = ((time_ms % 1000) * 1000 * 1000) + (now.tv_usec * 1000); wait_time.tv_sec = (time_ms / 1000) + (wait_time.tv_nsec / (1000 * 1000 * 1000)) + now.tv_sec; wait_time.tv_nsec %= 1000 * 1000 * 1000; int ret = sem_timedwait(&semaphore, &wait_time); # endif assert(!ret || (errno == ETIMEDOUT || errno == EINTR)); return !ret; #endif }
bool Threading::Semaphore::WaitWithoutYield(const wxTimeSpan& timeout) { // This method is the reason why there has to be a special Darwin // implementation of Semaphore. Note that semaphore_timedwait() is prone // to returning with KERN_ABORTED, which basically signifies that some // signal has worken it up. The best official "documentation" for // semaphore_timedwait() is the way it's used in Grand Central Dispatch, // which is open-source. // on x86 platforms, mach_absolute_time() returns nanoseconds // TODO(aktau): on iOS a scale value from mach_timebase_info will be necessary u64 const kOneThousand = 1000; u64 const kOneBillion = kOneThousand * kOneThousand * kOneThousand; u64 const delta = timeout.GetMilliseconds().GetValue() * (kOneThousand * kOneThousand); mach_timespec_t ts; kern_return_t kr = KERN_ABORTED; for (u64 now = mach_absolute_time(), deadline = now + delta; kr == KERN_ABORTED; now = mach_absolute_time()) { if (now > deadline) { // timed out by definition return false; } u64 timeleft = deadline - now; ts.tv_sec = timeleft / kOneBillion; ts.tv_nsec = timeleft % kOneBillion; // possible return values of semaphore_timedwait() (from XNU sources): // internal kernel val -> return value // THREAD_INTERRUPTED -> KERN_ABORTED // THREAD_TIMED_OUT -> KERN_OPERATION_TIMED_OUT // THREAD_AWAKENED -> KERN_SUCCESS // THREAD_RESTART -> KERN_TERMINATED // default -> KERN_FAILURE kr = semaphore_timedwait(m_sema, ts); } if (kr == KERN_OPERATION_TIMED_OUT) { return false; } // while it's entirely possible to have KERN_FAILURE here, we should // probably assert so we can study and correct the actual error here // (the thread dying while someone is wainting for it). MACH_CHECK(kr); __atomic_sub_fetch(&m_counter, 1, __ATOMIC_SEQ_CST); return true; }
int uv_sem_trywait(uv_sem_t* sem) { mach_timespec_t interval; kern_return_t err; interval.tv_sec = 0; interval.tv_nsec = 0; err = semaphore_timedwait(*sem, interval); if (err == KERN_SUCCESS) return 0; if (err == KERN_OPERATION_TIMED_OUT) return -EAGAIN; abort(); return -EINVAL; /* Satisfy the compiler. */ }
bool QMutexPrivate::wait(int timeout) { kern_return_t r; if (timeout < 0) { do { r = semaphore_wait(mach_semaphore); } while (r == KERN_ABORTED); Q_ASSERT(r == KERN_SUCCESS); } else { mach_timespec_t ts; ts.tv_nsec = ((timeout % 1000) * 1000) * 1000; ts.tv_sec = (timeout / 1000); r = semaphore_timedwait(mach_semaphore, ts); } return (r == KERN_SUCCESS); }
mm_event_listener_timedwait(struct mm_event_listener *listener, mm_ring_seqno_t stamp, mm_timeout_t timeout) { ENTER(); #if ENABLE_LINUX_FUTEX struct timespec ts; ts.tv_sec = (timeout / 1000000); ts.tv_nsec = (timeout % 1000000) * 1000; // Publish the log before a sleep. mm_log_relay(); int rc = mm_syscall_4(SYS_futex, mm_event_listener_futex(listener), FUTEX_WAIT_PRIVATE, stamp, (uintptr_t) &ts); if (rc != 0 && errno != EWOULDBLOCK && errno != ETIMEDOUT) mm_fatal(errno, "futex"); #elif ENABLE_MACH_SEMAPHORE (void) stamp; mach_timespec_t ts; ts.tv_sec = (timeout / 1000000); ts.tv_nsec = (timeout % 1000000) * 1000; // Publish the log before a sleep. mm_log_relay(); kern_return_t r = semaphore_timedwait(listener->semaphore, ts); if (r != KERN_SUCCESS && unlikely(r != KERN_OPERATION_TIMED_OUT)) mm_fatal(0, "semaphore_timedwait"); #else mm_timeval_t time = mm_clock_gettime_realtime() + timeout; mm_thread_monitor_lock(&listener->monitor); #if ENABLE_NOTIFY_STAMP if (stamp == mm_event_listener_enqueue_stamp(listener)) mm_thread_monitor_timedwait(&listener->monitor, time); #else if (listener->notify_stamp == stamp) mm_thread_monitor_timedwait(&listener->monitor, time); #endif mm_thread_monitor_unlock(&listener->monitor); #endif LEAVE(); }
bool JackMachSemaphore::TimedWait(long usec) { if (!fSemaphore) { jack_error("JackMachSemaphore::TimedWait name = %s already deallocated!!", fName); return false; } kern_return_t res; mach_timespec time; time.tv_sec = usec / 1000000; time.tv_nsec = (usec % 1000000) * 1000; if ((res = semaphore_timedwait(fSemaphore, time)) != KERN_SUCCESS) { jack_error("JackMachSemaphore::TimedWait name = %s usec = %ld err = %s", fName, usec, mach_error_string(res)); } return (res == KERN_SUCCESS); }
bool _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) { mach_timespec_t _timeout; kern_return_t kr; do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); kr = slowpath(semaphore_timedwait(*sema, _timeout)); } while (kr == KERN_ABORTED); if (kr == KERN_OPERATION_TIMED_OUT) { return true; } DISPATCH_SEMAPHORE_VERIFY_KR(kr); return false; }
void com_ximeta_driver_NDASDiskArrayController::workerThread(void* argument) { DbgIOLog(DEBUG_MASK_NDAS_TRACE, ("workerThread: Entered.\n")); do { // com_ximeta_driver_NDASCommand *command; kern_return_t sema_result; mach_timespec_t wait_time; UInt32 currentTime; // Set timeout and wait. wait_time.tv_sec = NDAS_MAX_WAIT_TIME_FOR_PNP_MESSAGE; sema_result = semaphore_timedwait(fCommandSema, wait_time); switch(sema_result) { case KERN_SUCCESS: { // Check Command Queue. if(0 == fCommandArray->getCount()) { // Maybe Terminate signal. DbgIOLog(DEBUG_MASK_NDAS_TRACE, ("workerThread: No Command.\n")); continue; } com_ximeta_driver_NDASCommand *command = (com_ximeta_driver_NDASCommand *)fCommandArray->getObject(0); if( NULL == command ) { DbgIOLog(DEBUG_MASK_NDAS_ERROR, ("workerThread: getObject return NULL.\n")); continue; } switch(command->command()) { case kNDASCommandChangedSetting: { // Update Last BroadCast Time. NDAS_clock_get_system_value(&lastBroadcastTime); processSettingCommand(command); } break; case kNDASCommandSRB: { processSrbCommand(command); } break; default: DbgIOLog(DEBUG_MASK_NDAS_ERROR, ("workerThread: Unknowen Command. %d\n", command->command())); } fCommandGate->runCommand((void *)kNDASCommandQueueCompleteCommand); // Check Broadcast Time. NDAS_clock_get_system_value(¤tTime); if(currentTime - lastBroadcastTime > NDAS_MAX_WAIT_TIME_FOR_PNP_MESSAGE) { DbgIOLog(DEBUG_MASK_NDAS_INFO, ("workerThread: Timed Out with SRB commands.\n")); if ( fWakeup ) { // Timer value is not correct. fWakeup = false; NDAS_clock_get_system_value(&lastBroadcastTime); } else { if(!fInSleepMode) { // No PnP Message and User command. processNoPnPMessageFromNDAS(); } } } } break; case KERN_OPERATION_TIMED_OUT: { DbgIOLog(DEBUG_MASK_NDAS_INFO, ("workerThread: Timed Out.\n")); if ( fWakeup ) { // Timer value is not correct. fWakeup = false; } else { if(!fInSleepMode) { // No PnP Message and User command. processNoPnPMessageFromNDAS(); } } } break; default: DbgIOLog(DEBUG_MASK_NDAS_ERROR, ("workerThread: Invalid return value. %d\n", sema_result)); break; } } while(fThreadTerminate == false); for(int count = 0; count < MAX_NR_OF_TARGETS_PER_DEVICE; count++) { if ( fTargetDevices[count] ) { deleteLogicalDevice(count); fProvider->unmount(count); } } semaphore_signal(fExitSema); IOExitThread(); // Never return. }
UInt64 BGM_TaskQueue::QueueSync(BGM_TaskID inTaskID, bool inRunOnRealtimeThread, UInt64 inTaskArg1, UInt64 inTaskArg2) { DebugMsg("BGM_TaskQueue::QueueSync: Queueing task synchronously to be processed on the %s thread. inTaskID=%d inTaskArg1=%llu inTaskArg2=%llu", (inRunOnRealtimeThread ? "realtime" : "non-realtime"), inTaskID, inTaskArg1, inTaskArg2); // Create the task BGM_Task theTask(inTaskID, /* inIsSync = */ true, inTaskArg1, inTaskArg2); // Add the task to the queue TAtomicStack<BGM_Task>& theTasks = (inRunOnRealtimeThread ? mRealTimeThreadTasks : mNonRealTimeThreadTasks); theTasks.push_atomic(&theTask); // Wake the worker thread so it'll process the task. (Note that semaphore_signal has an implicit barrier.) kern_return_t theError = semaphore_signal(inRunOnRealtimeThread ? mRealTimeThreadWorkQueuedSemaphore : mNonRealTimeThreadWorkQueuedSemaphore); BGM_Utils::ThrowIfMachError("BGM_TaskQueue::QueueSync", "semaphore_signal", theError); // Wait until the task has been processed. // // The worker thread signals all threads waiting on this semaphore when it finishes a task. The comments in WorkerThreadProc // explain why we have to check the condition in a loop here. bool didLogTimeoutMessage = false; while(!theTask.IsComplete()) { semaphore_t theTaskCompletedSemaphore = inRunOnRealtimeThread ? mRealTimeThreadSyncTaskCompletedSemaphore : mNonRealTimeThreadSyncTaskCompletedSemaphore; // TODO: Because the worker threads use semaphore_signal_all instead of semaphore_signal, a thread can miss the signal if // it isn't waiting at the right time. Using a timeout for now as a temporary fix so threads don't get stuck here. theError = semaphore_timedwait(theTaskCompletedSemaphore, (mach_timespec_t){ 0, kRealTimeThreadMaximumComputationNs * 4 }); if(theError == KERN_OPERATION_TIMED_OUT) { if(!didLogTimeoutMessage && inRunOnRealtimeThread) { DebugMsg("BGM_TaskQueue::QueueSync: Task %d taking longer than expected.", theTask.GetTaskID()); didLogTimeoutMessage = true; } } else { BGM_Utils::ThrowIfMachError("BGM_TaskQueue::QueueSync", "semaphore_timedwait", theError); } CAMemoryBarrier(); } if(didLogTimeoutMessage) { DebugMsg("BGM_TaskQueue::QueueSync: Late task %d finished.", theTask.GetTaskID()); } if(theTask.GetReturnValue() != INT64_MAX) { DebugMsg("BGM_TaskQueue::QueueSync: Task %d returned %llu.", theTask.GetTaskID(), theTask.GetReturnValue()); } return theTask.GetReturnValue(); }
bool Semaphore::trywait() { mach_timespec_t timeout_m_ts = { 0, 0 }; return semaphore_timedwait(sem, timeout_m_ts) == KERN_SUCCESS; }
bool Semaphore::timedwait(const Time& timeout) { mach_timespec_t timeout_ts; timeout_ts.tv_sec = timeout.ns() / Time::NS_IN_S; timeout_ts.tv_nsec = timeout.ns() % Time::NS_IN_S; return semaphore_timedwait(sem, timeout_ts) == KERN_SUCCESS; }
inline bool Semaphore::try_wait() { const mach_timespec_t zero = { 0, 0 }; return semaphore_timedwait(_sem, zero) == KERN_SUCCESS; }
DISPATCH_NOINLINE static long _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) { long orig; again: // check before we cause another signal to be sent by incrementing // dsema->dsema_group_waiters if (dsema->dsema_value == dsema->dsema_orig) { return _dispatch_group_wake(dsema); } // Mach semaphores appear to sometimes spuriously wake up. Therefore, // we keep a parallel count of the number of times a Mach semaphore is // signaled (6880961). (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters); // check the values again in case we need to wake any threads if (dsema->dsema_value == dsema->dsema_orig) { return _dispatch_group_wake(dsema); } #if USE_MACH_SEM mach_timespec_t _timeout; kern_return_t kr; _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); // From xnu/osfmk/kern/sync_sema.c: // wait_semaphore->count = -1; /* we don't keep an actual count */ // // The code above does not match the documentation, and that fact is // not surprising. The documented semantics are clumsy to use in any // practical way. The above hack effectively tricks the rest of the // Mach semaphore logic to behave like the libdispatch algorithm. switch (timeout) { default: do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port, _timeout)); } while (kr == KERN_ABORTED); if (kr != KERN_OPERATION_TIMED_OUT) { DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } // Fall through and try to undo the earlier change to // dsema->dsema_group_waiters case DISPATCH_TIME_NOW: while ((orig = dsema->dsema_group_waiters)) { if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, orig - 1)) { return KERN_OPERATION_TIMED_OUT; } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: do { kr = semaphore_wait(dsema->dsema_waiter_port); } while (kr == KERN_ABORTED); DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } #elif USE_POSIX_SEM struct timespec _timeout; int ret; switch (timeout) { default: do { _timeout = _dispatch_timeout_ts(timeout); ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); } while (ret == -1 && errno == EINTR); if (!(ret == -1 && errno == ETIMEDOUT)) { DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } // Fall through and try to undo the earlier change to // dsema->dsema_group_waiters case DISPATCH_TIME_NOW: while ((orig = dsema->dsema_group_waiters)) { if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, orig - 1)) { errno = ETIMEDOUT; return -1; } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: do { ret = sem_wait(&dsema->dsema_sem); } while (ret == -1 && errno == EINTR); DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } #endif goto again; }
/* * Suspend waiting for a condition variable. * Note: we have to keep a list of condition variables which are using * this same mutex variable so we can detect invalid 'destroy' sequences. */ static int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) { int res; kern_return_t kern_res; pthread_mutex_t *busy; tvalspec_t then; if (cond->sig == _PTHREAD_COND_SIG_init) { if (res = pthread_cond_init(cond, NULL)) return (res); } if (cond->sig != _PTHREAD_COND_SIG) return (EINVAL); /* Not a condition variable */ LOCK(cond->lock); busy = cond->busy; if ((busy != (pthread_mutex_t *)NULL) && (busy != mutex)) { /* Must always specify the same mutex! */ UNLOCK(cond->lock); return (EINVAL); } cond->waiters++; if (cond->waiters == 1) { _pthread_cond_add(cond, mutex); cond->busy = mutex; } if ((res = pthread_mutex_unlock(mutex)) != ESUCCESS) { cond->waiters--; if (cond->waiters == 0) { _pthread_cond_remove(cond, mutex); cond->busy = (pthread_mutex_t *)NULL; } UNLOCK(cond->lock); return (res); } UNLOCK(cond->lock); if (abstime) { struct timespec now; getclock(TIMEOFDAY, &now); /* Compute relative time to sleep */ then.tv_nsec = abstime->tv_nsec - now.tv_nsec; then.tv_sec = abstime->tv_sec - now.tv_sec; if (then.tv_nsec < 0) { then.tv_nsec += 1000000000; /* nsec/sec */ then.tv_sec--; } if (((int)then.tv_sec < 0) || ((then.tv_sec == 0) && (then.tv_nsec == 0))) { kern_res = KERN_OPERATION_TIMED_OUT; } else { MACH_CALL(semaphore_timedwait(cond->sem, then), kern_res); } } else { MACH_CALL(semaphore_wait(cond->sem), kern_res); } LOCK(cond->lock); cond->waiters--; if (cond->waiters == 0) { _pthread_cond_remove(cond, mutex); cond->busy = (pthread_mutex_t *)NULL; } UNLOCK(cond->lock); if ((res = pthread_mutex_lock(mutex)) != ESUCCESS) { return (res); } if (kern_res == KERN_SUCCESS) { return (ESUCCESS); } else if (kern_res == KERN_OPERATION_TIMED_OUT) { return (ETIMEDOUT); } else { return (EINVAL); } }