u_result u_waitsetTrigger( const u_waitset _this) { assert(_this != NULL); os_condSignal(&_this->cv); return U_RESULT_OK; }
void cms_clientFree( cms_client client) { struct soap* soap; cms_soapThread soapThread; cms_thread(client)->terminate = TRUE; os_mutexLock(&client->conditionMutex); os_condSignal(&client->condition); os_mutexUnlock(&client->conditionMutex); cms_threadDeinit(cms_thread(client)); if(client->soapEnvs){ os_mutexLock(&client->soapMutex); soap = (struct soap*)(c_iterTakeFirst(client->soapEnvs)); while(soap){ soap->error = soap_receiver_fault(soap, "Service is terminating.", NULL); soap_send_fault(soap); soap_destroy(soap); soap_end(soap); soap_done(soap); os_free(soap); soap = (struct soap*)(c_iterTakeFirst(client->soapEnvs)); } c_iterFree(client->soapEnvs); client->soapEnvs = NULL; os_mutexUnlock(&client->soapMutex); } if(client->threads){ soapThread = cms_soapThread(c_iterTakeFirst(client->threads)); while(soapThread){ cms_soapThreadFree(soapThread); (void)u_observableAction(u_observable(client->service->uservice), cms_clientStatisticsThreadRemove, client->service); soapThread = cms_soapThread(c_iterTakeFirst(client->threads)); } c_iterFree(client->threads); client->threads = NULL; } os_mutexDestroy(&client->soapMutex); os_mutexDestroy(&client->threadMutex); os_mutexDestroy(&client->conditionMutex); os_condDestroy(&client->condition); client->initCount = 0; if(client->service->configuration->verbosity >= 5){ OS_REPORT(OS_INFO, CMS_CONTEXT, 0, "Client thread stopped for IP: %d.%d.%d.%d", (int)(client->ip>>24)&0xFF, (int)(client->ip>>16)&0xFF, (int)(client->ip>>8)&0xFF, (int)(client->ip&0xFF)); }
c_syncResult c_condSignal ( c_cond *cnd) { os_result result; result = os_condSignal(cnd); if(result != os_resultSuccess){ OS_REPORT_1(OS_ERROR, "c_condSignal", 0, "os_condSignal failed; os_result = %d.", result); assert(result == os_resultSuccess); } return result; }
void nn_servicelease_free (struct nn_servicelease *sl) { if (sl->keepgoing != -1) { os_mutexLock (&sl->lock); sl->keepgoing = 0; os_condSignal (&sl->cond); os_mutexUnlock (&sl->lock); join_thread (sl->ts, (void **) 0); } os_condDestroy (&sl->cond); os_mutexDestroy (&sl->lock); os_free (sl->av_ary); os_free (sl); }
c_bool cms_soapThreadHandleRequest( cms_soapThread thread, struct soap* soap) { c_bool result; if(cms_thread(thread)->terminate == FALSE){ os_mutexLock(&thread->soapMutex); cms_thread(thread)->ready = FALSE; thread->soap = soap; os_condSignal(&thread->condition); os_mutexUnlock(&thread->soapMutex); result = TRUE; } else { result = FALSE; } return result; }
static u_result waitset_notify ( const u_waitset _this, void *eventArg) { u_result result = U_RESULT_OK; c_ulong length; assert(_this != NULL); length = c_iterLength(_this->entries); if (length == 1) { /* Single Domain Mode. */ result = u_waitsetEntryTrigger(c_iterObject(_this->entries,0), eventArg); } else { /* Multi Domain Mode (or no Domain). */ os_condSignal(&_this->cv); result = U_RESULT_OK; } return result; }
void cms_soapThreadFree( cms_soapThread thread) { if(thread->client->service->configuration->verbosity >= 6){ OS_REPORT_1(OS_INFO, CMS_CONTEXT, 0, "Stopping soapThread '%s'...", cms_thread(thread)->name); } os_mutexLock(&thread->soapMutex); cms_thread(thread)->terminate = TRUE; os_condSignal(&thread->condition); os_mutexUnlock(&thread->soapMutex); cms_threadDeinit(cms_thread(thread)); if(thread->client->service->configuration->verbosity >= 6){ OS_REPORT_1(OS_INFO, CMS_CONTEXT, 0, "soapThread '%s' stopped.", cms_thread(thread)->name); } os_condDestroy(&thread->condition); os_mutexDestroy(&thread->soapMutex); os_free(cms_thread(thread)->uri); os_free(thread); }
static void* shmMonitorMain( void* arg) { os_sharedHandle shmHandle; u_result cleanupResult; os_result result; os_duration blockingTime = 10*OS_DURATION_MILLISECOND; os_shmClient clients, client; s_shmMonitor _this = (s_shmMonitor)arg; os_procId ownPID; ownPID = os_procIdSelf(); shmHandle = u_domainSharedMemoryHandle( u_participantDomain( u_participant( splicedGetService(_this->spliceDaemon)))); os_mutexLock(&_this->mutex); while(_this->terminate == OS_FALSE){ clients = NULL; os_mutexUnlock(&_this->mutex); ut_threadAsleep(_this->thr, 1); result = os_sharedMemoryWaitForClientChanges(shmHandle, blockingTime, &clients); os_mutexLock(&_this->mutex); if(result == os_resultSuccess){ client = clients; _this->shmState = SHM_STATE_UNKNOWN; while(client){ if(client->state == OS_SHM_PROC_TERMINATED){ if(client->procId != ownPID){ OS_REPORT(OS_WARNING, OSRPT_CNTXT_SPLICED, 0, "Detected termination of process %d, that failed " "to clean up its resources before terminating. " "Attempting to clean up its resources now..." , client->procId); os_mutexUnlock(&_this->mutex); /* Allow the u_splicedCleanupProcessInfo() to take as * long as MAX(leasePeriod, serviceTerminatePeriod). * This is set in the threadsMonitor as the threads * interval. * By indicating that it'll sleep for 1 second, it * is allowed to stay dormant for that 1 second plus * the threads interval. */ ut_threadAsleep(_this->thr, 1); cleanupResult = u_splicedCleanupProcessInfo(splicedGetService(_this->spliceDaemon), client->procId); os_mutexLock(&_this->mutex); if(cleanupResult != U_RESULT_OK){ OS_REPORT(OS_FATAL, OSRPT_CNTXT_SPLICED, 0, "Cleaning up resources of terminated process " "%d failed, because process was modifying " "shared resources when it terminated, " "stopping domain now...", client->procId); _this->shmState = SHM_STATE_UNCLEAN; os_condSignal(&_this->cleanCondition); splicedSignalTerminate(_this->spliceDaemon, SPLICED_EXIT_CODE_RECOVERABLE_ERROR, SPLICED_SHM_NOK); } else { OS_REPORT(OS_INFO, OSRPT_CNTXT_SPLICED, 0, "Successfully cleaned up resources of " "terminated process %d.", client->procId); } } else { OS_REPORT(OS_FATAL, OSRPT_CNTXT_SPLICED, 0, "Detected unexpected detach of kernel by my own " "process, stopping domain now..."); _this->shmState = SHM_STATE_UNCLEAN; os_condSignal(&_this->cleanCondition); splicedSignalTerminate(_this->spliceDaemon, SPLICED_EXIT_CODE_RECOVERABLE_ERROR, SPLICED_SHM_NOK); } ut_threadAwake(_this->thr); } client = client->next; } os_shmClientFree(clients); if (_this->shmState == SHM_STATE_UNKNOWN) { _this->shmState = SHM_STATE_CLEAN; os_condSignal(&_this->cleanCondition); } } else if (result == os_resultUnavailable) { /* client list is empty so we need to give up some cpu time * in order that it can be initialised on non timesliced systems * e.g. vxworks kernel builds */ ut_sleep(_this->thr, 100*OS_DURATION_MICROSECOND); } } os_mutexUnlock(&_this->mutex); return NULL; }
u_result u_waitsetAttach( const u_waitset _this, const u_observable observable, void *context) { u_waitsetEntry entry; u_domain domain; u_result result; c_ulong length; u_bool changed = FALSE; os_result osr; assert(_this != NULL); assert(observable != NULL); osr = os_mutexLock_s(&_this->mutex); if (osr == os_resultSuccess) { length = c_iterLength(_this->entries); domain = u_observableDomain(observable); if (domain != NULL) { entry = c_iterResolve(_this->entries, compare_domain, domain); } else { entry = NULL; } if ((entry == NULL)&&(domain != NULL)) { result = u_domainAddWaitset(domain, _this); if (result == U_RESULT_OK) { entry = u_waitsetEntryNew(_this, domain, _this->eventMask); if (entry != NULL) { _this->entries = c_iterInsert(_this->entries, entry); changed = TRUE; } } else { result = U_RESULT_INTERNAL_ERROR; OS_REPORT(OS_ERROR, "u_waitSetAttach", result, "Failed to add waitset to domain."); } } if (entry != NULL) { result = u_waitsetEntryAttach(entry, observable, context); } else { result = U_RESULT_INTERNAL_ERROR; OS_REPORT(OS_ERROR, "u_waitSetAttach", result, "Failed to connect to domain."); } if (changed == TRUE) { if (length == 0) { /* Wakeup waitset because its no longer in zero domain mode */ _this->multi_mode = OS_FALSE; os_condSignal(&_this->cv); result = U_RESULT_OK; } else if (length == 1) { _this->multi_mode = OS_TRUE; c_iterWalk(_this->entries, set_multi_mode, (c_voidp)&_this->multi_mode); } } os_mutexUnlock(&_this->mutex); } else { result = U_RESULT_INTERNAL_ERROR; OS_REPORT(OS_ERROR, "u_waitSetAttach", result, "Could not lock the waitset."); } return result; }