c_syncResult c_condTimedWait ( c_cond *cnd, c_mutex *mtx, const c_time time) { os_result result; os_time t; t.tv_sec = time.seconds; t.tv_nsec = time.nanoseconds; #ifdef NDEBUG result = os_condTimedWait(cnd,mtx,&t); #else mtx->owner = OS_THREAD_ID_NONE; result = os_condTimedWait(cnd,&mtx->mtx,&t); mtx->owner = os_threadIdSelf(); #endif #if 1 /* TODO: Remove temporary workaround to prevent spinning * applications and come up with an actual fix. */ wait_on_error(result); #endif if((result != os_resultSuccess) && (result != os_resultTimeout)){ OS_REPORT_1(OS_ERROR, "c_condWait", 0, "os_condWait failed; os_result = %d.", result); assert((result == os_resultSuccess) || (result == os_resultTimeout)); } return result; }
/** * \brief This operation will return whether or not the shared memory is clean. * * This operation will block as long as the shared memory monitor is determining * if terminated processes left resources and if it can remove those leaked * resources. When unable the shared memory will be unclean. * * \param _this The shmMonitor instance. * * \return This operation will return TRUE is the shared memory is clean. * Otherwise it will return FALSE. */ c_bool s_shmMonitorIsClean(s_shmMonitor _this) { c_bool result; os_duration pollDelay = 100*OS_DURATION_MILLISECOND; os_mutexLock(&_this->mutex); while ((_this->terminate == OS_FALSE) && (_this->shmState == SHM_STATE_UNKNOWN)) { (void) os_condTimedWait(&_this->cleanCondition, &_this->mutex, pollDelay); } result = (_this->shmState == SHM_STATE_CLEAN) ? TRUE : FALSE; os_mutexUnlock(&_this->mutex); return result; }
os_result _ObjectTimedWait( _Object object, os_cond *cv, const os_time *timeout) { gapi_handle handle; os_result result; assert(object); assert(object->handle); assert(cv != NULL); handle = (gapi_handle)object->handle; assert(handle->magic == MAGIC); result = os_condTimedWait(cv, &handle->mutex, timeout); return result; }
int s_kernelManagerWaitForActive( s_kernelManager km) { int result; os_time delay = {1, 0}; os_time cur; os_time start; os_result osr; os_mutexLock(&km->mtx); osr = os_resultSuccess; cur = os_timeGet(); start = cur; while ((km->active < 2) && (cur.tv_sec - start.tv_sec < 20)) { osr = os_condTimedWait(&km->cv, &km->mtx, &delay); cur = os_timeGet(); } result = km->active; os_mutexUnlock(&km->mtx); return result; }
static void nw_serviceMain( const char *serviceName, const char *URI) { u_serviceManager serviceManager; os_time sleepTime; os_result waitResult; nw_termination terminate; os_mutexAttr termMtxAttr; os_condAttr termCvAttr; c_bool fatal = FALSE; v_duration leasePeriod; terminate.terminate = FALSE; os_mutexAttrInit( & termMtxAttr ); os_condAttrInit( & termCvAttr ); termMtxAttr.scopeAttr = OS_SCOPE_PRIVATE; termCvAttr.scopeAttr = OS_SCOPE_PRIVATE; os_mutexInit( &terminate.mtx, &termMtxAttr ); os_condInit( &terminate.cv, &terminate.mtx, &termCvAttr ); /* Create networking service with kernel */ service = u_serviceNew(URI, NW_ATTACH_TIMEOUT, serviceName, NULL, U_SERVICE_NETWORKING, NULL); /* Initialize configuration */ nw_configurationInitialize(service, serviceName, URI); /* Ask service manager for splicedaemon state */ serviceManager = u_serviceManagerNew(u_participant(service)); /* Create the controller which starts the updating */ /* and calls the listener on a fatal error */ controller = nw_controllerNew(service,controller_onfatal,&fatal); if (controller) { os_procAtExit(on_exit_handler); /* Start the actual engine */ NW_REPORT_INFO(1, "Networking started"); NW_TRACE(Mainloop, 1, "Networking started"); nw_controllerStart(controller); /* Change state for spliced */ u_serviceChangeState(service, STATE_INITIALISING); u_serviceChangeState(service, STATE_OPERATIONAL); /* Get sleeptime from configuration */ nw_retrieveLeaseSettings(&leasePeriod, &sleepTime); /*sleepTime.tv_sec = 1; */ /* Loop until termination is requested */ u_serviceWatchSpliceDaemon(service, nw_splicedaemonListener, &terminate); os_mutexLock( &terminate.mtx ); while ((!(int)terminate.terminate) && (!(int)fatal) && (!(int)f_exit)) { /* Assert my liveliness and the Splicedaemon's liveliness*/ u_serviceRenewLease(service, leasePeriod); /* Check if anybody is still remotely interested */ nw_controllerUpdateHeartbeats(controller); /* Wait before renewing again */ waitResult = os_condTimedWait( &terminate.cv, &terminate.mtx, &sleepTime ); if (waitResult == os_resultFail) { OS_REPORT(OS_CRITICAL, "nw_serviceMain", 0, "os_condTimedWait failed - thread will terminate"); fatal = TRUE; } /* QAC EXPECT 2467; Control variable, terminate, not modified inside loop. That is correct, it is modified by another thread */ } os_mutexUnlock( &terminate.mtx ); /* keep process here waiting for the exit processing */ while ((int)f_exit){os_nanoSleep(sleepTime);} if (!(int)fatal ) { leasePeriod.seconds = 20; leasePeriod.nanoseconds = 0; u_serviceRenewLease(service, leasePeriod); u_serviceChangeState(service, STATE_TERMINATING); nw_controllerStop(controller); nw_controllerFree(controller); controller = NULL; NW_REPORT_INFO(1, "Networking stopped"); NW_TRACE(Mainloop, 1, "Networking stopped"); } } if (!(int)fatal ) { nw_configurationFinalize(); /* Clean up */ u_serviceChangeState(service, STATE_TERMINATED); u_serviceManagerFree(serviceManager); u_serviceFree(service); } }
static void *lease_renewal_thread (struct nn_servicelease *sl) { /* Do not check more often than once every 100ms (no particular reason why it has to be 100ms), regardless of the lease settings. Note: can't trust sl->self, may have been scheduled before the assignment. */ const os_int64 min_progress_check_intv = 100 * T_MILLISECOND; struct thread_state1 *self = lookup_thread_state (); nn_mtime_t next_thread_cputime = { 0 }; nn_mtime_t tlast = { 0 }; int was_alive = 1; unsigned i; for (i = 0; i < thread_states.nthreads; i++) { sl->av_ary[i].alive = 1; sl->av_ary[i].wd = thread_states.ts[i].watchdog - 1; } os_mutexLock (&sl->lock); while (sl->keepgoing) { unsigned n_alive = 0; nn_mtime_t tnow = now_mt (); LOG_THREAD_CPUTIME (next_thread_cputime); TRACE (("servicelease: tnow %"PA_PRId64":", tnow.v)); /* Check progress only if enough time has passed: there is no guarantee that os_cond_timedwait wont ever return early, and we do want to avoid spurious warnings. */ if (tnow.v < tlast.v + min_progress_check_intv) { n_alive = thread_states.nthreads; } else { tlast = tnow; for (i = 0; i < thread_states.nthreads; i++) { if (thread_states.ts[i].state != THREAD_STATE_ALIVE) n_alive++; else { vtime_t vt = thread_states.ts[i].vtime; vtime_t wd = thread_states.ts[i].watchdog; int alive = vtime_asleep_p (vt) || vtime_asleep_p (wd) || vtime_gt (wd, sl->av_ary[i].wd); n_alive += (unsigned) alive; TRACE ((" %d(%s):%c:%u:%u->%u:", i, thread_states.ts[i].name, alive ? 'a' : 'd', vt, sl->av_ary[i].wd, wd)); sl->av_ary[i].wd = wd; if (sl->av_ary[i].alive != alive) { const char *name = thread_states.ts[i].name; const char *msg; if (!alive) msg = "failed to make progress"; else msg = "once again made progress"; NN_WARNING2 ("thread %s %s\n", name ? name : "(anon)", msg); sl->av_ary[i].alive = (char) alive; } } } } /* Only renew the lease if all threads are alive, so that one thread blocking for a while but not too extremely long will cause warnings for that thread in the log file, but won't cause the DDSI2 service to be marked as dead. */ if (n_alive == thread_states.nthreads) { TRACE ((": [%d] renewing\n", n_alive)); /* FIXME: perhaps it would be nice to control automatic liveliness updates from here. FIXME: should terminate failure of renew_cb() */ sl->renew_cb (sl->renew_arg); was_alive = 1; } else { TRACE ((": [%d] NOT renewing\n", n_alive)); if (was_alive) log_stack_traces (); was_alive = 0; } #if SYSDEPS_HAVE_GETRUSAGE /* If getrusage() is available, use it to log CPU and memory statistics to the trace. Getrusage() can't fail if the parameters are valid, and these are by the book. Still we check. */ if (config.enabled_logcats & LC_TIMING) { struct rusage u; if (getrusage (RUSAGE_SELF, &u) == 0) { nn_log (LC_TIMING, "rusage: utime %d.%06d stime %d.%06d maxrss %ld data %ld vcsw %ld ivcsw %ld\n", (int) u.ru_utime.tv_sec, (int) u.ru_utime.tv_usec, (int) u.ru_stime.tv_sec, (int) u.ru_stime.tv_usec, u.ru_maxrss, u.ru_idrss, u.ru_nvcsw, u.ru_nivcsw); } } #endif os_condTimedWait (&sl->cond, &sl->lock, sl->sleepTime); /* We are never active in a way that matters for the garbage collection of old writers, &c. */ thread_state_asleep (self); } os_mutexUnlock (&sl->lock); return NULL; }
u_result u_waitsetWaitAction ( const u_waitset _this, u_waitsetAction action, void *arg, const os_duration timeout) { u_result result = U_RESULT_OK; os_result osr; c_ulong length; assert(_this != NULL); assert(action != NULL); assert(OS_DURATION_ISPOSITIVE(timeout)); osr = os_mutexLock_s(&_this->mutex); if (osr == os_resultSuccess) { if (!_this->alive) { result = U_RESULT_ALREADY_DELETED; } if (result == U_RESULT_OK) { if (!_this->waitBusy) { /* Wait for possible detach to complete. * If you don't do that, it's possible that this wait call sets * the waitBusy flag before the detach can wake up of its waitBusy * loop, meaning that the detach will block at least until the * waitset is triggered again. */ while (_this->detachCnt > 0) { os_condWait(&_this->waitCv, &_this->mutex); } _this->waitBusy = TRUE; length = c_iterLength(_this->entries); if (length == 1) { /* Single Domain Mode. */ u_waitsetEntry entry = c_iterObject(_this->entries,0); os_mutexUnlock(&_this->mutex); result = u_waitsetEntryWait(entry, action, arg, timeout); os_mutexLock(&_this->mutex); _this->waitBusy = FALSE; os_condBroadcast(&_this->waitCv); os_mutexUnlock(&_this->mutex); if ((result == U_RESULT_OK) && (_this->alive == FALSE)) { result = U_RESULT_ALREADY_DELETED; } } else { /* Multi Domain Mode (or no Domain). */ if (OS_DURATION_ISINFINITE(timeout)) { os_condWait(&_this->cv, &_this->mutex); osr = os_resultSuccess; } else { osr = os_condTimedWait(&_this->cv, &_this->mutex, timeout); } _this->waitBusy = FALSE; os_condBroadcast(&_this->waitCv); switch (osr) { case os_resultSuccess: if (_this->alive == TRUE) { result = U_RESULT_OK; } else { result = U_RESULT_ALREADY_DELETED; } break; case os_resultTimeout: result = U_RESULT_TIMEOUT; break; default: result = U_RESULT_INTERNAL_ERROR; OS_REPORT(OS_ERROR, "u_waitsetWaitAction", result, "os_condWait failed for waitset 0x" PA_ADDRFMT, (PA_ADDRCAST)_this); break; } os_mutexUnlock(&_this->mutex); } } else { os_mutexUnlock(&_this->mutex); result = U_RESULT_PRECONDITION_NOT_MET; } } else { os_mutexUnlock(&_this->mutex); } } else { result = U_RESULT_INTERNAL_ERROR; OS_REPORT(OS_ERROR, "u_waitsetWaitAction", result, "os_mutexLock failed for waitset 0x" PA_ADDRFMT, (PA_ADDRCAST)_this); } return result; }
u_result u_waitsetWaitAction2 ( const u_waitset _this, u_waitsetAction2 action, void *arg, const os_duration timeout) { u_result result = U_RESULT_OK; os_result osr; c_ulong length; struct checkArg a; a.action = action; a.arg = arg; a.count = 0; assert(_this != NULL); assert(OS_DURATION_ISPOSITIVE(timeout)); osr = os_mutexLock_s(&_this->mutex); if (osr == os_resultSuccess) { if (!_this->alive) { result = U_RESULT_ALREADY_DELETED; OS_REPORT(OS_ERROR, "u_waitsetWaitAction2", result, "Precondition not met: Waitset is already deleted"); } if (_this->waitBusy) { result = U_RESULT_PRECONDITION_NOT_MET; OS_REPORT(OS_ERROR, "u_waitsetWaitAction2", result, "Precondition not met: A Wait call is already active on this Waitset"); } if (result == U_RESULT_OK) { /* Wait for possible detach to complete. * If you don't do that, it's possible that this wait call sets * the waitBusy flag before the detach can wake up of its waitBusy * loop, meaning that the detach will block at least until the * waitset is triggered again. */ while (_this->detachCnt > 0) { os_condWait(&_this->waitCv, &_this->mutex); } length = c_iterLength(_this->entries); if (length == 1) { /* Single Domain Mode. */ u_waitsetEntry entry = c_iterObject(_this->entries,0); _this->waitBusy = TRUE; os_mutexUnlock(&_this->mutex); result = u_waitsetEntryWait2(entry, action, arg, timeout); os_mutexLock(&_this->mutex); _this->waitBusy = FALSE; if (_this->notifyDetached) { result = U_RESULT_DETACHING; _this->notifyDetached = OS_FALSE; } os_condBroadcast(&_this->waitCv); os_mutexUnlock(&_this->mutex); if ((result == U_RESULT_OK) && (_this->alive == FALSE)) { result = U_RESULT_ALREADY_DELETED; OS_REPORT(OS_ERROR, "u_waitsetWaitAction2", result, "Precondition not met: Waitset is already deleted"); } } else { /* Multi Domain Mode (or no Domain). */ a.count = 0; /* For each Domain test Conditions. */ (void)c_iterWalkUntil(_this->entries, check_entry_conditions, &a); /* Test Guard Conditions */ if ((a.count == 0) && (!action(NULL,arg))) { a.count++; } /* If No Conditions are true then wait. */ if (a.count == 0) { _this->waitBusy = TRUE; if (OS_DURATION_ISINFINITE(timeout)) { os_condWait(&_this->cv, &_this->mutex); osr = os_resultSuccess; } else { osr = os_condTimedWait(&_this->cv, &_this->mutex, timeout); } _this->waitBusy = FALSE; os_condBroadcast(&_this->waitCv); switch (osr) { case os_resultSuccess: if (_this->alive == TRUE) { if (_this->notifyDetached) { result = U_RESULT_DETACHING; _this->notifyDetached = OS_FALSE; } else { result = U_RESULT_OK; } } else { result = U_RESULT_ALREADY_DELETED; OS_REPORT(OS_ERROR, "u_waitsetWaitAction2", result, "Precondition not met: Waitset is already deleted"); } break; case os_resultTimeout: result = U_RESULT_TIMEOUT; break; default: result = U_RESULT_INTERNAL_ERROR; OS_REPORT(OS_ERROR, "u_waitsetWaitAction2", result, "os_condWait failed for waitset 0x" PA_ADDRFMT, (PA_ADDRCAST)_this); break; } } os_mutexUnlock(&_this->mutex); } } else { os_mutexUnlock(&_this->mutex); } } else { result = U_RESULT_INTERNAL_ERROR; OS_REPORT(OS_ERROR, "u_waitsetWaitAction2", result, "os_mutexLock failed for waitset 0x" PA_ADDRFMT, (PA_ADDRCAST)_this); } return result; }