int WaitServerState(char *waiter, PRInt32 state) { PRInt32 rv; PR_Lock(ServerStateCVLock); if (debug_mode) DPRINTF("\t%s waiting for state %d\n", waiter, state); while(!(ServerState & state)) PR_WaitCondVar(ServerStateCV, PR_INTERVAL_NO_TIMEOUT); rv = ServerState; if (debug_mode) DPRINTF("\t%s resuming from wait for state %d; state now %d\n", waiter, state, ServerState); PR_Unlock(ServerStateCVLock); return rv; }
int main(int argc, char **argv) { PRThread *thread; PRIntervalTime start, end; PRUint32 elapsed_ms; lock1 = PR_NewLock(); PR_ASSERT(NULL != lock1); cv1 = PR_NewCondVar(lock1); PR_ASSERT(NULL != cv1); lock2 = PR_NewLock(); PR_ASSERT(NULL != lock2); cv2 = PR_NewCondVar(lock2); PR_ASSERT(NULL != cv2); start = PR_IntervalNow(); thread = PR_CreateThread( PR_USER_THREAD, ThreadFunc, NULL, PR_PRIORITY_NORMAL, PR_LOCAL_THREAD, PR_JOINABLE_THREAD, 0); PR_ASSERT(NULL != thread); PR_Lock(lock2); PR_WaitCondVar(cv2, PR_MillisecondsToInterval(LONG_TIMEOUT)); PR_Unlock(lock2); PR_JoinThread(thread); end = PR_IntervalNow(); elapsed_ms = PR_IntervalToMilliseconds((PRIntervalTime)(end - start)); /* Allow 100ms imprecision */ if (elapsed_ms < LONG_TIMEOUT - 100 || elapsed_ms > LONG_TIMEOUT + 100) { printf("Elapsed time should be %u ms but is %u ms\n", LONG_TIMEOUT, elapsed_ms); printf("FAIL\n"); exit(1); } printf("Elapsed time: %u ms, expected time: %u ms\n", LONG_TIMEOUT, elapsed_ms); printf("PASS\n"); return 0; }
/** * * CCApp Provider main routine. * * @param arg - CCApp msg queue * * @return void * * @pre None */ void CCApp_task(void * arg) { static const char fname[] = "CCApp_task"; phn_syshdr_t *syshdr = NULL; appListener *listener = NULL; void * msg; // If the "ready to start" condition variable has been created // (is non-null), we're going to wait for it to be signaled // before we start processing messages. if (ccAppReadyToStartCond) { PR_Lock(ccAppReadyToStartLock); while (!ccAppReadyToStart) { PR_WaitCondVar(ccAppReadyToStartCond, PR_INTERVAL_NO_TIMEOUT); } PR_Unlock(ccAppReadyToStartLock); } //initialize the listener list sll_lite_init(&sll_list); CCAppInit(); while (1) { msg = cprGetMessage(ccapp_msgq, TRUE, (void **) &syshdr); if ( msg) { CCAPP_DEBUG(DEB_F_PREFIX"Received Cmd[%d] for app[%d]\n", DEB_F_PREFIX_ARGS(SIP_CC_PROV, fname), syshdr->Cmd, syshdr->Usr.UsrInfo); listener = getCcappListener(syshdr->Usr.UsrInfo); if (listener != NULL) { (* ((appListener)(listener)))(msg, syshdr->Cmd); } else { CCAPP_DEBUG(DEB_F_PREFIX"Event[%d] doesn't have a dedicated listener.\n", DEB_F_PREFIX_ARGS(SIP_CC_PROV, fname), syshdr->Usr.UsrInfo); } cprReleaseSysHeader(syshdr); cpr_free(msg); } } }
// // Debug implementation of CondVar nsresult CondVar::Wait(PRIntervalTime aInterval) { AssertCurrentThreadOwnsMutex(); // save mutex state and reset to empty CallStack savedAcquisitionContext = mLock->GetAcquisitionContext(); BlockingResourceBase* savedChainPrev = mLock->mChainPrev; mLock->SetAcquisitionContext(CallStack::kNone); mLock->mChainPrev = 0; // give up mutex until we're back from Wait() nsresult rv = PR_WaitCondVar(mCvar, aInterval) == PR_SUCCESS ? NS_OK : NS_ERROR_FAILURE; // restore saved state mLock->SetAcquisitionContext(savedAcquisitionContext); mLock->mChainPrev = savedChainPrev; return rv; }
static PRIntervalTime ConditionNotify(PRUint32 loops) { PRThread *thread; NotifyData notifyData; PRIntervalTime timein, overhead; timein = PR_IntervalNow(); notifyData.counter = loops; notifyData.ml = PR_NewLock(); notifyData.child = PR_NewCondVar(notifyData.ml); notifyData.parent = PR_NewCondVar(notifyData.ml); thread = PR_CreateThread( PR_USER_THREAD, Notifier, ¬ifyData, PR_GetThreadPriority(PR_GetCurrentThread()), thread_scope, PR_JOINABLE_THREAD, 0); overhead = PR_IntervalNow() - timein; /* elapsed so far */ PR_Lock(notifyData.ml); while (notifyData.counter > 0) { notifyData.pending = PR_TRUE; PR_NotifyCondVar(notifyData.child); while (notifyData.pending) PR_WaitCondVar(notifyData.parent, PR_INTERVAL_NO_TIMEOUT); } PR_Unlock(notifyData.ml); timein = PR_IntervalNow(); (void)PR_JoinThread(thread); PR_DestroyCondVar(notifyData.child); PR_DestroyCondVar(notifyData.parent); PR_DestroyLock(notifyData.ml); overhead += (PR_IntervalNow() - timein); /* more overhead */ return overhead; } /* ConditionNotify */
static PRBool CancelTimer(TimerEvent *timer) { PRBool canceled = PR_FALSE; PR_Lock(tm_vars.ml); timer->ref_count -= 1; if (timer->links.prev == &timer->links) { while (timer->ref_count == 1) { PR_WaitCondVar(tm_vars.cancel_timer, PR_INTERVAL_NO_TIMEOUT); } } else { PR_REMOVE_LINK(&timer->links); canceled = PR_TRUE; } PR_Unlock(tm_vars.ml); PR_DELETE(timer); return canceled; }
static PRIntervalTime Alarms1(PRUint32 loops) { PRAlarm *alarm; AlarmData ad; PRIntervalTime overhead, timein = PR_IntervalNow(); PRIntervalTime duration = PR_SecondsToInterval(3); PRLock *ml = PR_NewLock(); PRCondVar *cv = PR_NewCondVar(ml); ad.ml = ml; ad.cv = cv; ad.rate = 1; ad.times = loops; ad.late = ad.times = 0; ad.duration = duration; ad.timein = PR_IntervalNow(); ad.period = PR_SecondsToInterval(1); alarm = PR_CreateAlarm(); (void)PR_SetAlarm( alarm, ad.period, ad.rate, AlarmFn1, &ad); overhead = PR_IntervalNow() - timein; PR_Lock(ml); while ((PRIntervalTime)(PR_IntervalNow() - ad.timein) < duration) PR_WaitCondVar(cv, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(ml); timein = PR_IntervalNow(); (void)PR_DestroyAlarm(alarm); PR_DestroyCondVar(cv); PR_DestroyLock(ml); overhead += (PR_IntervalNow() - timein); return duration + overhead; } /* Alarms1 */
/* ** Wait on a Semaphore. ** ** This routine allows a calling thread to wait or proceed depending upon the ** state of the semahore sem. The thread can proceed only if the counter value ** of the semaphore sem is currently greater than 0. If the value of semaphore ** sem is positive, it is decremented by one and the routine returns immediately ** allowing the calling thread to continue. If the value of semaphore sem is 0, ** the calling thread blocks awaiting the semaphore to be released by another ** thread. ** ** This routine can return PR_PENDING_INTERRUPT if the waiting thread ** has been interrupted. */ PR_IMPLEMENT(PRStatus) PR_WaitSem(PRSemaphore *sem) { PRStatus status = PR_SUCCESS; #ifdef HAVE_CVAR_BUILT_ON_SEM return _PR_MD_WAIT_SEM(&sem->md); #else PR_Lock(sem->cvar->lock); while (sem->count == 0) { sem->waiters++; status = PR_WaitCondVar(sem->cvar, PR_INTERVAL_NO_TIMEOUT); sem->waiters--; if (status != PR_SUCCESS) break; } if (status == PR_SUCCESS) sem->count--; PR_Unlock(sem->cvar->lock); #endif return (status); }
SECStatus reap_threads(GlobalThreadMgr *threadMGR) { perThread * slot; int i; if (!threadMGR->threadLock) return SECSuccess; PR_Lock(threadMGR->threadLock); while (threadMGR->numRunning > 0) { PR_WaitCondVar(threadMGR->threadEndQ, PR_INTERVAL_NO_TIMEOUT); for (i = 0; i < threadMGR->numUsed; ++i) { slot = &threadMGR->threads[i]; if (slot->running == rs_zombie) { /* Handle cleanup of thread here. */ /* Now make sure the thread has ended OK. */ PR_JoinThread(slot->prThread); slot->running = rs_idle; --threadMGR->numRunning; /* notify the thread launcher. */ PR_NotifyCondVar(threadMGR->threadStartQ); } } } /* Safety Sam sez: make sure count is right. */ for (i = 0; i < threadMGR->numUsed; ++i) { slot = &threadMGR->threads[i]; if (slot->running != rs_idle) { fprintf(stderr, "Thread in slot %d is in state %d!\n", i, slot->running); } } PR_Unlock(threadMGR->threadLock); return SECSuccess; }
PR_IMPLEMENT(PRStatus) PR_CallOnce( PRCallOnceType *once, PRCallOnceFN func) { if (!_pr_initialized) _PR_ImplicitInitialization(); if (!once->initialized) { if (PR_AtomicSet(&once->inProgress, 1) == 0) { once->status = (*func)(); PR_Lock(mod_init.ml); once->initialized = 1; PR_NotifyAllCondVar(mod_init.cv); PR_Unlock(mod_init.ml); } else { PR_Lock(mod_init.ml); while (!once->initialized) { PR_WaitCondVar(mod_init.cv, PR_INTERVAL_NO_TIMEOUT); } PR_Unlock(mod_init.ml); } } return once->status; }
static void PR_CALLBACK Server(void *arg) { PRStatus rv; PRNetAddr serverAddress; PRThread *me = PR_GetCurrentThread(); CSServer_t *server = (CSServer_t*)arg; PRSocketOptionData sockOpt; server->listener = PR_Socket(domain, SOCK_STREAM, protocol); sockOpt.option = PR_SockOpt_Reuseaddr; sockOpt.value.reuse_addr = PR_TRUE; rv = PR_SetSocketOption(server->listener, &sockOpt); TEST_ASSERT(PR_SUCCESS == rv); memset(&serverAddress, 0, sizeof(serverAddress)); if (PR_AF_INET6 != domain) rv = PR_InitializeNetAddr(PR_IpAddrAny, DEFAULT_PORT, &serverAddress); else rv = PR_SetNetAddr(PR_IpAddrAny, PR_AF_INET6, DEFAULT_PORT, &serverAddress); rv = PR_Bind(server->listener, &serverAddress); TEST_ASSERT(PR_SUCCESS == rv); rv = PR_Listen(server->listener, server->backlog); TEST_ASSERT(PR_SUCCESS == rv); server->started = PR_IntervalNow(); TimeOfDayMessage("Server started at", me); PR_Lock(server->ml); server->state = cs_run; PR_NotifyCondVar(server->stateChange); PR_Unlock(server->ml); /* ** Create the first worker (actually, a thread that accepts ** connections and then processes the work load as needed). ** From this point on, additional worker threads are created ** as they are needed by existing worker threads. */ rv = CreateWorker(server, &server->pool); TEST_ASSERT(PR_SUCCESS == rv); /* ** From here on this thread is merely hanging around as the contact ** point for the main test driver. It's just waiting for the driver ** to declare the test complete. */ TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tServer(0x%p): waiting for state change\n", me)); PR_Lock(server->ml); while ((cs_run == server->state) && !Aborted(rv)) { rv = PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT); } PR_Unlock(server->ml); PR_ClearInterrupt(); TEST_LOG( cltsrv_log_file, TEST_LOG_INFO, ("\tServer(0x%p): shutting down workers\n", me)); /* ** Get all the worker threads to exit. They know how to ** clean up after themselves, so this is just a matter of ** waiting for clorine in the pool to take effect. During ** this stage we're ignoring interrupts. */ server->workers.minimum = server->workers.maximum = 0; PR_Lock(server->ml); while (!PR_CLIST_IS_EMPTY(&server->list)) { PRCList *head = PR_LIST_HEAD(&server->list); CSWorker_t *worker = (CSWorker_t*)head; TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tServer(0x%p): interrupting worker(0x%p)\n", me, worker)); rv = PR_Interrupt(worker->thread); TEST_ASSERT(PR_SUCCESS == rv); PR_REMOVE_AND_INIT_LINK(head); } while (server->pool.workers > 0) { TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("\tServer(0x%p): waiting for %u workers to exit\n", me, server->pool.workers)); (void)PR_WaitCondVar(server->pool.exiting, PR_INTERVAL_NO_TIMEOUT); } server->state = cs_exit; PR_NotifyCondVar(server->stateChange); PR_Unlock(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_ALWAYS, ("\tServer(0x%p): stopped after %u operations and %u bytes\n", me, server->operations, server->bytesTransferred)); if (NULL != server->listener) PR_Close(server->listener); server->stopped = PR_IntervalNow(); } /* Server */
void ThreadFunc(void *arg) { PR_Lock(lock1); PR_WaitCondVar(cv1, PR_MillisecondsToInterval(SHORT_TIMEOUT)); PR_Unlock(lock1); }
XPCJSRuntime::~XPCJSRuntime() { if (mWatchdogWakeup) { // If the watchdog thread is running, tell it to terminate waking it // up if necessary and wait until it signals that it finished. As we // must release the lock before calling PR_DestroyCondVar, we use an // extra block here. { AutoLockJSGC lock(mJSRuntime); if (mWatchdogThread) { mWatchdogThread = nsnull; PR_NotifyCondVar(mWatchdogWakeup); PR_WaitCondVar(mWatchdogWakeup, PR_INTERVAL_NO_TIMEOUT); } } PR_DestroyCondVar(mWatchdogWakeup); mWatchdogWakeup = nsnull; } #ifdef XPC_DUMP_AT_SHUTDOWN { // count the total JSContexts in use JSContext* iter = nsnull; int count = 0; while(JS_ContextIterator(mJSRuntime, &iter)) count ++; if(count) printf("deleting XPCJSRuntime with %d live JSContexts\n", count); } #endif // clean up and destroy maps... if(mWrappedJSMap) { #ifdef XPC_DUMP_AT_SHUTDOWN uint32 count = mWrappedJSMap->Count(); if(count) printf("deleting XPCJSRuntime with %d live wrapped JSObject\n", (int)count); #endif mWrappedJSMap->Enumerate(WrappedJSShutdownMarker, mJSRuntime); delete mWrappedJSMap; } if(mWrappedJSClassMap) { #ifdef XPC_DUMP_AT_SHUTDOWN uint32 count = mWrappedJSClassMap->Count(); if(count) printf("deleting XPCJSRuntime with %d live nsXPCWrappedJSClass\n", (int)count); #endif delete mWrappedJSClassMap; } if(mIID2NativeInterfaceMap) { #ifdef XPC_DUMP_AT_SHUTDOWN uint32 count = mIID2NativeInterfaceMap->Count(); if(count) printf("deleting XPCJSRuntime with %d live XPCNativeInterfaces\n", (int)count); #endif delete mIID2NativeInterfaceMap; } if(mClassInfo2NativeSetMap) { #ifdef XPC_DUMP_AT_SHUTDOWN uint32 count = mClassInfo2NativeSetMap->Count(); if(count) printf("deleting XPCJSRuntime with %d live XPCNativeSets\n", (int)count); #endif delete mClassInfo2NativeSetMap; } if(mNativeSetMap) { #ifdef XPC_DUMP_AT_SHUTDOWN uint32 count = mNativeSetMap->Count(); if(count) printf("deleting XPCJSRuntime with %d live XPCNativeSets\n", (int)count); #endif delete mNativeSetMap; } if(mMapLock) XPCAutoLock::DestroyLock(mMapLock); if(mThisTranslatorMap) { #ifdef XPC_DUMP_AT_SHUTDOWN uint32 count = mThisTranslatorMap->Count(); if(count) printf("deleting XPCJSRuntime with %d live ThisTranslator\n", (int)count); #endif delete mThisTranslatorMap; } #ifdef XPC_CHECK_WRAPPERS_AT_SHUTDOWN if(DEBUG_WrappedNativeHashtable) { int LiveWrapperCount = 0; JS_DHashTableEnumerate(DEBUG_WrappedNativeHashtable, DEBUG_WrapperChecker, &LiveWrapperCount); if(LiveWrapperCount) printf("deleting XPCJSRuntime with %d live XPCWrappedNative (found in wrapper check)\n", (int)LiveWrapperCount); JS_DHashTableDestroy(DEBUG_WrappedNativeHashtable); } #endif if(mNativeScriptableSharedMap) { #ifdef XPC_DUMP_AT_SHUTDOWN uint32 count = mNativeScriptableSharedMap->Count(); if(count) printf("deleting XPCJSRuntime with %d live XPCNativeScriptableShared\n", (int)count); #endif delete mNativeScriptableSharedMap; } if(mDyingWrappedNativeProtoMap) { #ifdef XPC_DUMP_AT_SHUTDOWN uint32 count = mDyingWrappedNativeProtoMap->Count(); if(count) printf("deleting XPCJSRuntime with %d live but dying XPCWrappedNativeProto\n", (int)count); #endif delete mDyingWrappedNativeProtoMap; } if(mDetachedWrappedNativeProtoMap) { #ifdef XPC_DUMP_AT_SHUTDOWN uint32 count = mDetachedWrappedNativeProtoMap->Count(); if(count) printf("deleting XPCJSRuntime with %d live detached XPCWrappedNativeProto\n", (int)count); #endif delete mDetachedWrappedNativeProtoMap; } if(mExplicitNativeWrapperMap) { #ifdef XPC_DUMP_AT_SHUTDOWN uint32 count = mExplicitNativeWrapperMap->Count(); if(count) printf("deleting XPCJSRuntime with %d live explicit XPCNativeWrapper\n", (int)count); #endif delete mExplicitNativeWrapperMap; } // unwire the readable/JSString sharing magic XPCStringConvert::ShutdownDOMStringFinalizer(); XPCConvert::RemoveXPCOMUCStringFinalizer(); if(mJSHolders.ops) { JS_DHashTableFinish(&mJSHolders); mJSHolders.ops = nsnull; } if(mJSRuntime) { JS_DestroyRuntime(mJSRuntime); JS_ShutDown(); #ifdef DEBUG_shaver_off fprintf(stderr, "nJRSI: destroyed runtime %p\n", (void *)mJSRuntime); #endif } XPCPerThreadData::ShutDown(); }
nsresult MediaEngineWebRTCVideoSource::Snapshot(PRUint32 aDuration, nsIDOMFile** aFile) { /** * To get a Snapshot we do the following: * - Set a condition variable (mInSnapshotMode) to true * - Attach the external renderer and start the camera * - Wait for the condition variable to change to false * * Starting the camera has the effect of invoking DeliverFrame() when * the first frame arrives from the camera. We only need one frame for * GetCaptureDeviceSnapshot to work, so we immediately set the condition * variable to false and notify this method. * * This causes the current thread to continue (PR_CondWaitVar will return), * at which point we can grab a snapshot, convert it to a file and * return from this function after cleaning up the temporary stream object * and caling Stop() on the media source. */ *aFile = nsnull; if (!mInitDone || mState != kAllocated) { return NS_ERROR_FAILURE; } mSnapshotLock = PR_NewLock(); mSnapshotCondVar = PR_NewCondVar(mSnapshotLock); PR_Lock(mSnapshotLock); mInSnapshotMode = true; // Start the rendering (equivalent to calling Start(), but without a track). int error = 0; if (!mInitDone || mState != kAllocated) { return NS_ERROR_FAILURE; } error = mViERender->AddRenderer(mCapIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this); if (error == -1) { return NS_ERROR_FAILURE; } error = mViERender->StartRender(mCapIndex); if (error == -1) { return NS_ERROR_FAILURE; } // Wait for the condition variable, will be set in DeliverFrame. // We use a while loop, because even if PR_WaitCondVar returns, it's not // guaranteed that the condition variable changed. while (mInSnapshotMode) { PR_WaitCondVar(mSnapshotCondVar, PR_INTERVAL_NO_TIMEOUT); } // If we get here, DeliverFrame received at least one frame. PR_Unlock(mSnapshotLock); PR_DestroyCondVar(mSnapshotCondVar); PR_DestroyLock(mSnapshotLock); webrtc::ViEFile* vieFile = webrtc::ViEFile::GetInterface(mVideoEngine); if (!vieFile) { return NS_ERROR_FAILURE; } // Create a temporary file on the main thread and put the snapshot in it. // See Run() in MediaEngineWebRTCVideo.h (sets mSnapshotPath). NS_DispatchToMainThread(this, NS_DISPATCH_SYNC); if (!mSnapshotPath) { return NS_ERROR_FAILURE; } const char* path = NS_ConvertUTF16toUTF8(*mSnapshotPath).get(); if (vieFile->GetCaptureDeviceSnapshot(mCapIndex, path) < 0) { delete mSnapshotPath; mSnapshotPath = NULL; return NS_ERROR_FAILURE; } // Stop the camera. mViERender->StopRender(mCapIndex); mViERender->RemoveRenderer(mCapIndex); nsCOMPtr<nsIFile> file; nsresult rv = NS_NewLocalFile(*mSnapshotPath, false, getter_AddRefs(file)); delete mSnapshotPath; mSnapshotPath = NULL; NS_ENSURE_SUCCESS(rv, rv); NS_ADDREF(*aFile = new nsDOMFileFile(file)); return NS_OK; }
void referint_thread_func(void *arg) { PRFileDesc *prfd; char **plugin_argv = (char **)arg; char *logfilename; char thisline[MAX_LINE]; char delimiter[]="\t\n"; char *ptoken; char *tmprdn; char *iter = NULL; Slapi_DN *sdn = NULL; Slapi_DN *tmpsuperior = NULL; int logChanges = 0; int delay; int no_changes; if(plugin_argv == NULL){ slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "referint_thread_func not get args \n" ); return; } delay = atoi(plugin_argv[0]); logfilename = plugin_argv[1]; logChanges = atoi(plugin_argv[2]); /* * keep running this thread until plugin is signaled to close */ while(1){ no_changes=1; while(no_changes){ PR_Lock(keeprunning_mutex); if(keeprunning == 0){ PR_Unlock(keeprunning_mutex); break; } PR_Unlock(keeprunning_mutex); referint_lock(); if (( prfd = PR_Open( logfilename, PR_RDONLY, REFERINT_DEFAULT_FILE_MODE )) == NULL ){ referint_unlock(); /* go back to sleep and wait for this file */ PR_Lock(keeprunning_mutex); PR_WaitCondVar(keeprunning_cv, PR_SecondsToInterval(delay)); PR_Unlock(keeprunning_mutex); } else { no_changes = 0; } } /* * Check keep running here, because after break out of no * changes loop on shutdown, also need to break out of this * loop before trying to do the changes. The server * will pick them up on next startup as file still exists */ PR_Lock(keeprunning_mutex); if(keeprunning == 0){ PR_Unlock(keeprunning_mutex); break; } PR_Unlock(keeprunning_mutex); while( GetNextLine(thisline, MAX_LINE, prfd) ){ ptoken = ldap_utf8strtok_r(thisline, delimiter, &iter); sdn = slapi_sdn_new_normdn_byref(ptoken); ptoken = ldap_utf8strtok_r (NULL, delimiter, &iter); if(!strcasecmp(ptoken, "NULL")) { tmprdn = NULL; } else { tmprdn = slapi_ch_smprintf("%s", ptoken); } ptoken = ldap_utf8strtok_r (NULL, delimiter, &iter); if (!strcasecmp(ptoken, "NULL")) { tmpsuperior = NULL; } else { tmpsuperior = slapi_sdn_new_normdn_byref(ptoken); } ptoken = ldap_utf8strtok_r (NULL, delimiter, &iter); if (strcasecmp(ptoken, "NULL") != 0) { /* Set the bind DN in the thread data */ if(slapi_td_set_dn(slapi_ch_strdup(ptoken))){ slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,"Failed to set thread data\n"); } } update_integrity(plugin_argv, sdn, tmprdn, tmpsuperior, logChanges); slapi_sdn_free(&sdn); slapi_ch_free_string(&tmprdn); slapi_sdn_free(&tmpsuperior); } PR_Close(prfd); /* remove the original file */ if( PR_SUCCESS != PR_Delete(logfilename) ){ slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "referint_postop_close could not delete \"%s\"\n", logfilename ); } /* unlock and let other writers back at the file */ referint_unlock(); /* wait on condition here */ PR_Lock(keeprunning_mutex); PR_WaitCondVar(keeprunning_cv, PR_SecondsToInterval(delay)); PR_Unlock(keeprunning_mutex); } /* cleanup resources allocated in start */ if (NULL != keeprunning_mutex) { PR_DestroyLock(keeprunning_mutex); } if (NULL != referint_mutex) { PR_DestroyLock(referint_mutex); } if (NULL != keeprunning_cv) { PR_DestroyCondVar(keeprunning_cv); } }
PRIntn PR_CALLBACK Switch(PRIntn argc, char **argv) { PLOptStatus os; PRStatus status; PRBool help = PR_FALSE; PRUintn concurrency = 1; Shared *shared, *link; PRIntervalTime timein, timeout; PRThreadScope thread_scope = PR_LOCAL_THREAD; PRUintn thread_count, inner_count, loop_count, average; PRUintn thread_limit = DEFAULT_THREADS, loop_limit = DEFAULT_LOOPS; PLOptState *opt = PL_CreateOptState(argc, argv, "hdvc:t:C:G"); while (PL_OPT_EOL != (os = PL_GetNextOpt(opt))) { if (PL_OPT_BAD == os) continue; switch (opt->option) { case 'v': /* verbose mode */ verbosity = PR_TRUE; case 'd': /* debug mode */ debug_mode = PR_TRUE; break; case 'c': /* loop counter */ loop_limit = atoi(opt->value); break; case 't': /* thread limit */ thread_limit = atoi(opt->value); break; case 'C': /* Concurrency limit */ concurrency = atoi(opt->value); break; case 'G': /* global threads only */ thread_scope = PR_GLOBAL_THREAD; break; case 'h': /* help message */ Help(); help = PR_TRUE; break; default: break; } } PL_DestroyOptState(opt); if (help) return -1; if (PR_TRUE == debug_mode) { debug_out = PR_STDOUT; PR_fprintf(debug_out, "Test parameters\n"); PR_fprintf(debug_out, "\tThreads involved: %d\n", thread_limit); PR_fprintf(debug_out, "\tIteration limit: %d\n", loop_limit); PR_fprintf(debug_out, "\tConcurrency: %d\n", concurrency); PR_fprintf( debug_out, "\tThread type: %s\n", (PR_GLOBAL_THREAD == thread_scope) ? "GLOBAL" : "LOCAL"); } PR_SetConcurrency(concurrency); link = &home; home.ml = PR_NewLock(); home.cv = PR_NewCondVar(home.ml); home.twiddle = PR_FALSE; home.next = NULL; timeout = 0; for (thread_count = 1; thread_count <= thread_limit; ++thread_count) { shared = PR_NEWZAP(Shared); shared->ml = home.ml; shared->cv = PR_NewCondVar(home.ml); shared->twiddle = PR_TRUE; shared->next = link; link = shared; shared->thread = PR_CreateThread( PR_USER_THREAD, Notified, shared, PR_PRIORITY_HIGH, thread_scope, PR_JOINABLE_THREAD, 0); PR_ASSERT(shared->thread != NULL); if (NULL == shared->thread) failed = PR_TRUE; } for (loop_count = 1; loop_count <= loop_limit; ++loop_count) { timein = PR_IntervalNow(); for (inner_count = 0; inner_count < INNER_LOOPS; ++inner_count) { PR_Lock(home.ml); home.twiddle = PR_TRUE; shared->twiddle = PR_FALSE; PR_NotifyCondVar(shared->cv); while (home.twiddle) { status = PR_WaitCondVar(home.cv, PR_INTERVAL_NO_TIMEOUT); if (PR_FAILURE == status) failed = PR_TRUE; } PR_Unlock(home.ml); } timeout += (PR_IntervalNow() - timein); } if (debug_mode) { average = PR_IntervalToMicroseconds(timeout) / (INNER_LOOPS * loop_limit * thread_count); PR_fprintf( debug_out, "Average switch times %d usecs for %d threads\n", average, thread_limit); } link = shared; for (thread_count = 1; thread_count <= thread_limit; ++thread_count) { if (&home == link) break; status = PR_Interrupt(link->thread); if (PR_SUCCESS != status) { failed = PR_TRUE; if (debug_mode) PL_FPrintError(debug_out, "Failed to interrupt"); } link = link->next; } for (thread_count = 1; thread_count <= thread_limit; ++thread_count) { link = shared->next; status = PR_JoinThread(shared->thread); if (PR_SUCCESS != status) { failed = PR_TRUE; if (debug_mode) PL_FPrintError(debug_out, "Failed to join"); } PR_DestroyCondVar(shared->cv); PR_DELETE(shared); if (&home == link) break; shared = link; } PR_DestroyCondVar(home.cv); PR_DestroyLock(home.ml); PR_fprintf(PR_STDOUT, ((failed) ? "FAILED\n" : "PASSED\n")); return ((failed) ? 1 : 0); } /* Switch */
PR_RecordTraceEntries( void ) { PRFileDesc *logFile; PRInt32 lostSegments; PRInt32 currentSegment = 0; void *buf; PRBool doWrite; logFile = InitializeRecording(); if ( logFile == NULL ) { PR_LOG( lm, PR_LOG_DEBUG, ("PR_RecordTraceEntries: Failed to initialize")); return; } /* Do this until told to stop */ while ( logState != LogStop ) { PR_Lock( logLock ); while ( (logCount == 0) && ( logOrder == logState ) ) PR_WaitCondVar( logCVar, PR_INTERVAL_NO_TIMEOUT ); /* Handle state transitions */ if ( logOrder != logState ) ProcessOrders(); /* recalculate local controls */ if ( logCount ) { lostSegments = logCount - logSegments; if ( lostSegments > 0 ) { logLostData += ( logCount - logSegments ); logCount = (logCount % logSegments); currentSegment = logCount; PR_LOG( lm, PR_LOG_DEBUG, ("PR_RecordTraceEntries: LostData segments: %ld", logLostData)); } else { logCount--; } buf = tBuf + ( logEntriesPerSegment * currentSegment ); if (++currentSegment >= logSegments ) currentSegment = 0; doWrite = PR_TRUE; } else doWrite = PR_FALSE; PR_Unlock( logLock ); if ( doWrite == PR_TRUE ) { if ( localState != LogSuspend ) WriteTraceSegment( logFile, buf, logSegSize ); else PR_LOG( lm, PR_LOG_DEBUG, ("RecordTraceEntries: PR_Write(): is suspended" )); } } /* end while(logState...) */ PR_Close( logFile ); PR_LOG( lm, PR_LOG_DEBUG, ("RecordTraceEntries: exiting")); return; } /* end PR_RecordTraceEntries() */
void CondVarTest(void *_arg) { PRInt32 arg = (PRInt32)_arg; PRInt32 index, loops; threadinfo *list; PRLock *sharedlock; PRCondVar *sharedcvar; PRLock *exitlock; PRCondVar *exitcvar; PRInt32 *ptcount, *saved_ptcount; exitcount=0; tcount=0; list = (threadinfo *)PR_MALLOC(sizeof(threadinfo) * (arg * 4)); saved_ptcount = ptcount = (PRInt32 *)PR_CALLOC(sizeof(*ptcount) * (arg * 4)); sharedlock = PR_NewLock(); sharedcvar = PR_NewCondVar(sharedlock); exitlock = PR_NewLock(); exitcvar = PR_NewCondVar(exitlock); /* Create the threads */ for(index=0; index<arg*4; ) { CreateTestThread(&list[index], index, sharedlock, sharedcvar, count, PR_INTERVAL_NO_TIMEOUT, &tcount, exitlock, exitcvar, &exitcount, PR_TRUE, PR_LOCAL_THREAD); index++; CreateTestThread(&list[index], index, sharedlock, sharedcvar, count, PR_INTERVAL_NO_TIMEOUT, &tcount, exitlock, exitcvar, &exitcount, PR_TRUE, PR_GLOBAL_THREAD); index++; list[index].lock = PR_NewLock(); list[index].cvar = PR_NewCondVar(list[index].lock); CreateTestThread(&list[index], index, list[index].lock, list[index].cvar, count, PR_INTERVAL_NO_TIMEOUT, ptcount, exitlock, exitcvar, &exitcount, PR_FALSE, PR_LOCAL_THREAD); index++; ptcount++; list[index].lock = PR_NewLock(); list[index].cvar = PR_NewCondVar(list[index].lock); CreateTestThread(&list[index], index, list[index].lock, list[index].cvar, count, PR_INTERVAL_NO_TIMEOUT, ptcount, exitlock, exitcvar, &exitcount, PR_FALSE, PR_GLOBAL_THREAD); index++; ptcount++; } for (loops = 0; loops < count; loops++) { /* Notify the threads */ for(index=0; index<(arg*4); index++) { PR_Lock(list[index].lock); (*list[index].tcount)++; PR_NotifyCondVar(list[index].cvar); PR_Unlock(list[index].lock); } #if 0 printf("wait for threads done\n"); #endif /* Wait for threads to finish */ PR_Lock(exitlock); while(exitcount < arg*4) PR_WaitCondVar(exitcvar, PR_SecondsToInterval(60)); PR_ASSERT(exitcount >= arg*4); exitcount -= arg*4; PR_Unlock(exitlock); #if 0 printf("threads ready\n"); #endif } /* Join all the threads */ for(index=0; index<(arg*4); index++) { PR_JoinThread(list[index].thread); if (list[index].internal) { PR_Lock(list[index].lock); PR_DestroyCondVar(list[index].cvar); PR_Unlock(list[index].lock); PR_DestroyLock(list[index].lock); } } PR_DestroyCondVar(sharedcvar); PR_DestroyLock(sharedlock); PR_DestroyCondVar(exitcvar); PR_DestroyLock(exitlock); PR_DELETE(list); PR_DELETE(saved_ptcount); }
void CondVarTestSUU(void *_arg) { PRInt32 arg = (PRInt32)_arg; PRInt32 index, loops; threadinfo *list; PRLock *sharedlock; PRCondVar *sharedcvar; PRLock *exitlock; PRCondVar *exitcvar; exitcount=0; tcount=0; list = (threadinfo *)PR_MALLOC(sizeof(threadinfo) * (arg * 4)); sharedlock = PR_NewLock(); sharedcvar = PR_NewCondVar(sharedlock); exitlock = PR_NewLock(); exitcvar = PR_NewCondVar(exitlock); /* Create the threads */ for(index=0; index<arg; ) { CreateTestThread(&list[index], index, sharedlock, sharedcvar, count, PR_INTERVAL_NO_TIMEOUT, &tcount, exitlock, exitcvar, &exitcount, PR_TRUE, PR_LOCAL_THREAD); index++; DPRINTF(("CondVarTestSUU: created thread 0x%lx\n",list[index].thread)); } for (loops = 0; loops < count; loops++) { /* Notify the threads */ for(index=0; index<(arg); index++) { PR_Lock(list[index].lock); (*list[index].tcount)++; PR_NotifyCondVar(list[index].cvar); PR_Unlock(list[index].lock); DPRINTF(("PrivateCondVarThread: thread 0x%lx notified cvar = 0x%lx\n", PR_GetCurrentThread(), list[index].cvar)); } /* Wait for threads to finish */ PR_Lock(exitlock); while(exitcount < arg) PR_WaitCondVar(exitcvar, PR_SecondsToInterval(60)); PR_ASSERT(exitcount >= arg); exitcount -= arg; PR_Unlock(exitlock); } /* Join all the threads */ for(index=0; index<(arg); index++) PR_JoinThread(list[index].thread); PR_DestroyCondVar(sharedcvar); PR_DestroyLock(sharedlock); PR_DestroyCondVar(exitcvar); PR_DestroyLock(exitlock); PR_DELETE(list); }
static SECStatus get_blinding_params(RSAPrivateKey *key, mp_int *n, unsigned int modLen, mp_int *f, mp_int *g) { RSABlindingParams *rsabp = NULL; blindingParams *bpUnlinked = NULL; blindingParams *bp, *prevbp = NULL; PRCList *el; SECStatus rv = SECSuccess; mp_err err = MP_OKAY; int cmp = -1; PRBool holdingLock = PR_FALSE; do { if (blindingParamsList.lock == NULL) { PORT_SetError(SEC_ERROR_LIBRARY_FAILURE); return SECFailure; } /* Acquire the list lock */ PZ_Lock(blindingParamsList.lock); holdingLock = PR_TRUE; /* Walk the list looking for the private key */ for (el = PR_NEXT_LINK(&blindingParamsList.head); el != &blindingParamsList.head; el = PR_NEXT_LINK(el)) { rsabp = (RSABlindingParams *)el; cmp = SECITEM_CompareItem(&rsabp->modulus, &key->modulus); if (cmp >= 0) { /* The key is found or not in the list. */ break; } } if (cmp) { /* At this point, the key is not in the list. el should point to ** the list element before which this key should be inserted. */ rsabp = PORT_ZNew(RSABlindingParams); if (!rsabp) { PORT_SetError(SEC_ERROR_NO_MEMORY); goto cleanup; } rv = init_blinding_params(rsabp, key, n, modLen); if (rv != SECSuccess) { PORT_ZFree(rsabp, sizeof(RSABlindingParams)); goto cleanup; } /* Insert the new element into the list ** If inserting in the middle of the list, el points to the link ** to insert before. Otherwise, the link needs to be appended to ** the end of the list, which is the same as inserting before the ** head (since el would have looped back to the head). */ PR_INSERT_BEFORE(&rsabp->link, el); } /* We've found (or created) the RSAblindingParams struct for this key. * Now, search its list of ready blinding params for a usable one. */ while (0 != (bp = rsabp->bp)) { if (--(bp->counter) > 0) { /* Found a match and there are still remaining uses left */ /* Return the parameters */ CHECK_MPI_OK( mp_copy(&bp->f, f) ); CHECK_MPI_OK( mp_copy(&bp->g, g) ); PZ_Unlock(blindingParamsList.lock); return SECSuccess; } /* exhausted this one, give its values to caller, and * then retire it. */ mp_exch(&bp->f, f); mp_exch(&bp->g, g); mp_clear( &bp->f ); mp_clear( &bp->g ); bp->counter = 0; /* Move to free list */ rsabp->bp = bp->next; bp->next = rsabp->free; rsabp->free = bp; /* In case there're threads waiting for new blinding * value - notify 1 thread the value is ready */ if (blindingParamsList.waitCount > 0) { PR_NotifyCondVar( blindingParamsList.cVar ); blindingParamsList.waitCount--; } PZ_Unlock(blindingParamsList.lock); return SECSuccess; } /* We did not find a usable set of blinding params. Can we make one? */ /* Find a free bp struct. */ prevbp = NULL; if ((bp = rsabp->free) != NULL) { /* unlink this bp */ rsabp->free = bp->next; bp->next = NULL; bpUnlinked = bp; /* In case we fail */ PZ_Unlock(blindingParamsList.lock); holdingLock = PR_FALSE; /* generate blinding parameter values for the current thread */ CHECK_SEC_OK( generate_blinding_params(key, f, g, n, modLen ) ); /* put the blinding parameter values into cache */ CHECK_MPI_OK( mp_init( &bp->f) ); CHECK_MPI_OK( mp_init( &bp->g) ); CHECK_MPI_OK( mp_copy( f, &bp->f) ); CHECK_MPI_OK( mp_copy( g, &bp->g) ); /* Put this at head of queue of usable params. */ PZ_Lock(blindingParamsList.lock); holdingLock = PR_TRUE; /* initialize RSABlindingParamsStr */ bp->counter = RSA_BLINDING_PARAMS_MAX_REUSE; bp->next = rsabp->bp; rsabp->bp = bp; bpUnlinked = NULL; /* In case there're threads waiting for new blinding value * just notify them the value is ready */ if (blindingParamsList.waitCount > 0) { PR_NotifyAllCondVar( blindingParamsList.cVar ); blindingParamsList.waitCount = 0; } PZ_Unlock(blindingParamsList.lock); return SECSuccess; } /* Here, there are no usable blinding parameters available, * and no free bp blocks, presumably because they're all * actively having parameters generated for them. * So, we need to wait here and not eat up CPU until some * change happens. */ blindingParamsList.waitCount++; PR_WaitCondVar( blindingParamsList.cVar, PR_INTERVAL_NO_TIMEOUT ); PZ_Unlock(blindingParamsList.lock); holdingLock = PR_FALSE; } while (1); cleanup: /* It is possible to reach this after the lock is already released. */ if (bpUnlinked) { if (!holdingLock) { PZ_Lock(blindingParamsList.lock); holdingLock = PR_TRUE; } bp = bpUnlinked; mp_clear( &bp->f ); mp_clear( &bp->g ); bp->counter = 0; /* Must put the unlinked bp back on the free list */ bp->next = rsabp->free; rsabp->free = bp; } if (holdingLock) { PZ_Unlock(blindingParamsList.lock); holdingLock = PR_FALSE; } if (err) { MP_TO_SEC_ERROR(err); } return SECFailure; }
static void PR_CALLBACK Worker(void *arg) { PRStatus rv; PRNetAddr from; PRFileDesc *fd = NULL; PRThread *me = PR_GetCurrentThread(); CSWorker_t *worker = (CSWorker_t*)arg; CSServer_t *server = worker->server; CSPool_t *pool = &server->pool; TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("\t\tWorker(0x%p): started [%u]\n", me, pool->workers + 1)); PR_Lock(server->ml); PR_APPEND_LINK(&worker->element, &server->list); pool->workers += 1; /* define our existance */ while (cs_run == server->state) { while (pool->accepting >= server->workers.accepting) { TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\t\tWorker(0x%p): waiting for accept slot[%d]\n", me, pool->accepting)); rv = PR_WaitCondVar(pool->acceptComplete, PR_INTERVAL_NO_TIMEOUT); if (Aborted(rv) || (cs_run != server->state)) { TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("\tWorker(0x%p): has been %s\n", me, (Aborted(rv) ? "interrupted" : "stopped"))); goto exit; } } pool->accepting += 1; /* how many are really in accept */ PR_Unlock(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\t\tWorker(0x%p): calling accept\n", me)); fd = PR_Accept(server->listener, &from, PR_INTERVAL_NO_TIMEOUT); PR_Lock(server->ml); pool->accepting -= 1; PR_NotifyCondVar(pool->acceptComplete); if ((NULL == fd) && Aborted(PR_FAILURE)) { if (NULL != server->listener) { PR_Close(server->listener); server->listener = NULL; } goto exit; } if (NULL != fd) { /* ** Create another worker of the total number of workers is ** less than the minimum specified or we have none left in ** accept() AND we're not over the maximum. ** This sort of presumes that the number allowed in accept ** is at least as many as the minimum. Otherwise we'll keep ** creating new threads and deleting them soon after. */ PRBool another = ((pool->workers < server->workers.minimum) || ((0 == pool->accepting) && (pool->workers < server->workers.maximum))) ? PR_TRUE : PR_FALSE; pool->active += 1; PR_Unlock(server->ml); if (another) (void)CreateWorker(server, pool); rv = ProcessRequest(fd, server); if (PR_SUCCESS != rv) TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\t\tWorker(0x%p): server process ended abnormally\n", me)); (void)PR_Close(fd); fd = NULL; PR_Lock(server->ml); pool->active -= 1; } } exit: PR_ClearInterrupt(); PR_Unlock(server->ml); if (NULL != fd) { (void)PR_Shutdown(fd, PR_SHUTDOWN_BOTH); (void)PR_Close(fd); } TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("\t\tWorker(0x%p): exiting [%u]\n", PR_GetCurrentThread(), pool->workers)); PR_Lock(server->ml); pool->workers -= 1; /* undefine our existance */ PR_REMOVE_AND_INIT_LINK(&worker->element); PR_NotifyCondVar(pool->exiting); PR_Unlock(server->ml); PR_DELETE(worker); /* destruction of the "worker" object */ } /* Worker */
static void PR_CALLBACK Client(void *arg) { PRStatus rv; PRIntn index; char buffer[1024]; PRFileDesc *fd = NULL; PRUintn clipping = DEFAULT_CLIPPING; PRThread *me = PR_GetCurrentThread(); CSClient_t *client = (CSClient_t*)arg; CSDescriptor_t *descriptor = PR_NEW(CSDescriptor_t); PRIntervalTime timeout = PR_MillisecondsToInterval(DEFAULT_CLIENT_TIMEOUT); for (index = 0; index < sizeof(buffer); ++index) buffer[index] = (char)index; client->started = PR_IntervalNow(); PR_Lock(client->ml); client->state = cs_run; PR_NotifyCondVar(client->stateChange); PR_Unlock(client->ml); TimeOfDayMessage("Client started at", me); while (cs_run == client->state) { PRInt32 bytes, descbytes, filebytes, netbytes; (void)PR_NetAddrToString(&client->serverAddress, buffer, sizeof(buffer)); TEST_LOG(cltsrv_log_file, TEST_LOG_INFO, ("\tClient(0x%p): connecting to server at %s\n", me, buffer)); fd = PR_Socket(domain, SOCK_STREAM, protocol); TEST_ASSERT(NULL != fd); rv = PR_Connect(fd, &client->serverAddress, timeout); if (PR_FAILURE == rv) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tClient(0x%p): conection failed (%d, %d)\n", me, PR_GetError(), PR_GetOSError())); goto aborted; } memset(descriptor, 0, sizeof(*descriptor)); descriptor->size = PR_htonl(descbytes = rand() % clipping); PR_snprintf( descriptor->filename, sizeof(descriptor->filename), "CS%p%p-%p.dat", client->started, me, client->operations); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tClient(0x%p): sending descriptor for %u bytes\n", me, descbytes)); bytes = PR_Send( fd, descriptor, sizeof(*descriptor), SEND_FLAGS, timeout); if (sizeof(CSDescriptor_t) != bytes) { if (Aborted(PR_FAILURE)) goto aborted; if (PR_IO_TIMEOUT_ERROR == PR_GetError()) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tClient(0x%p): send descriptor timeout\n", me)); goto retry; } } TEST_ASSERT(sizeof(*descriptor) == bytes); netbytes = 0; while (netbytes < descbytes) { filebytes = sizeof(buffer); if ((descbytes - netbytes) < filebytes) filebytes = descbytes - netbytes; TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tClient(0x%p): sending %d bytes\n", me, filebytes)); bytes = PR_Send(fd, buffer, filebytes, SEND_FLAGS, timeout); if (filebytes != bytes) { if (Aborted(PR_FAILURE)) goto aborted; if (PR_IO_TIMEOUT_ERROR == PR_GetError()) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tClient(0x%p): send data timeout\n", me)); goto retry; } } TEST_ASSERT(bytes == filebytes); netbytes += bytes; } filebytes = 0; while (filebytes < descbytes) { netbytes = sizeof(buffer); if ((descbytes - filebytes) < netbytes) netbytes = descbytes - filebytes; TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("\tClient(0x%p): receiving %d bytes\n", me, netbytes)); bytes = PR_Recv(fd, buffer, netbytes, RECV_FLAGS, timeout); if (-1 == bytes) { if (Aborted(PR_FAILURE)) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tClient(0x%p): receive data aborted\n", me)); goto aborted; } else if (PR_IO_TIMEOUT_ERROR == PR_GetError()) TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tClient(0x%p): receive data timeout\n", me)); else TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\tClient(0x%p): receive error (%d, %d)\n", me, PR_GetError(), PR_GetOSError())); goto retry; } if (0 == bytes) { TEST_LOG( cltsrv_log_file, TEST_LOG_ERROR, ("\t\tClient(0x%p): unexpected end of stream\n", PR_GetCurrentThread())); break; } filebytes += bytes; } rv = PR_Shutdown(fd, PR_SHUTDOWN_BOTH); if (Aborted(rv)) goto aborted; TEST_ASSERT(PR_SUCCESS == rv); retry: (void)PR_Close(fd); fd = NULL; TEST_LOG( cltsrv_log_file, TEST_LOG_INFO, ("\tClient(0x%p): disconnected from server\n", me)); PR_Lock(client->ml); client->operations += 1; client->bytesTransferred += 2 * descbytes; rv = PR_WaitCondVar(client->stateChange, rand() % clipping); PR_Unlock(client->ml); if (Aborted(rv)) break; } aborted: client->stopped = PR_IntervalNow(); PR_ClearInterrupt(); if (NULL != fd) rv = PR_Close(fd); PR_Lock(client->ml); client->state = cs_exit; PR_NotifyCondVar(client->stateChange); PR_Unlock(client->ml); PR_DELETE(descriptor); TEST_LOG( cltsrv_log_file, TEST_LOG_ALWAYS, ("\tClient(0x%p): stopped after %u operations and %u bytes\n", PR_GetCurrentThread(), client->operations, client->bytesTransferred)); } /* Client */
/*********************************************************************** ** PRIVATE FUNCTION: main ** DESCRIPTION: ** Hammer on the file I/O system ** INPUTS: The usual argc and argv ** argv[0] - program name (not used) ** argv[1] - the number of times to execute the major loop ** argv[2] - the number of threads to toss into the batch ** argv[3] - the clipping number applied to randoms ** default values: loops = 2, threads = 10, limit = 57 ** OUTPUTS: None ** RETURN: None ** SIDE EFFECTS: ** Creates, accesses and deletes lots of files ** RESTRICTIONS: ** (Currently) must have file create permission in "/usr/tmp". ** MEMORY: NA ** ALGORITHM: ** 1) Fork a "Thread()" ** 2) Wait for 'interleave' seconds ** 3) For [0..'threads') repeat [1..2] ** 4) Mark all objects to stop ** 5) Collect the threads, accumulating the results ** 6) For [0..'loops') repeat [1..5] ** 7) Print accumulated results and exit ** ** Characteristic output (from IRIX) ** Random File: Using loops = 2, threads = 10, limit = 57 ** Random File: [min [avg] max] writes/sec average ***********************************************************************/ int main (int argc, char *argv[]) { PRLock *ml; PRUint32 id = 0; int active, poll; PRIntervalTime interleave; PRIntervalTime duration = 0; int limit = 0, loops = 0, threads = 0, times; PRUint32 writes, writesMin = 0x7fffffff, writesTot = 0, durationTot = 0, writesMax = 0; const char *where[] = {"okay", "open", "close", "delete", "write", "seek"}; /* The command line argument: -d is used to determine if the test is being run in debug mode. The regress tool requires only one line output:PASS or FAIL. All of the printfs associated with this test has been handled with a if (debug_mode) test. Usage: test_name -d */ PLOptStatus os; PLOptState *opt = PL_CreateOptState(argc, argv, "Gdl:t:i:"); while (PL_OPT_EOL != (os = PL_GetNextOpt(opt))) { if (PL_OPT_BAD == os) continue; switch (opt->option) { case 'G': /* global threads */ thread_scope = PR_GLOBAL_THREAD; break; case 'd': /* debug mode */ debug_mode = 1; break; case 'l': /* limiting number */ limit = atoi(opt->value); break; case 't': /* number of threads */ threads = atoi(opt->value); break; case 'i': /* iteration counter */ loops = atoi(opt->value); break; default: break; } } PL_DestroyOptState(opt); /* main test */ PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0); PR_STDIO_INIT(); interleave = PR_SecondsToInterval(10); #ifdef XP_MAC SetupMacPrintfLog("ranfile.log"); debug_mode = 1; #endif ml = PR_NewLock(); cv = PR_NewCondVar(ml); if (loops == 0) loops = DEFAULT_LOOPS; if (limit == 0) limit = DEFAULT_LIMIT; if (threads == 0) threads = DEFAULT_THREADS; if (debug_mode) printf( "%s: Using loops = %d, threads = %d, limit = %d and %s threads\n", programName, loops, threads, limit, (thread_scope == PR_LOCAL_THREAD) ? "LOCAL" : "GLOBAL"); for (times = 0; times < loops; ++times) { if (debug_mode) printf("%s: Setting concurrency level to %d\n", programName, times + 1); PR_SetConcurrency(times + 1); for (active = 0; active < threads; active++) { hammer[active].ml = ml; hammer[active].cv = cv; hammer[active].id = id++; hammer[active].writes = 0; hammer[active].action = sg_go; hammer[active].problem = sg_okay; hammer[active].limit = (Random() % limit) + 1; hammer[active].timein = PR_IntervalNow(); hammer[active].thread = PR_CreateThread( PR_USER_THREAD, Thread, &hammer[active], PR_GetThreadPriority(PR_GetCurrentThread()), thread_scope, PR_JOINABLE_THREAD, 0); PR_Lock(ml); PR_WaitCondVar(cv, interleave); /* start new ones slowly */ PR_Unlock(ml); } /* * The last thread started has had the opportunity to run for * 'interleave' seconds. Now gather them all back in. */ PR_Lock(ml); for (poll = 0; poll < threads; poll++) { if (hammer[poll].action == sg_go) /* don't overwrite done */ hammer[poll].action = sg_stop; /* ask him to stop */ } PR_Unlock(ml); while (active > 0) { for (poll = 0; poll < threads; poll++) { PR_Lock(ml); while (hammer[poll].action < sg_done) PR_WaitCondVar(cv, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(ml); active -= 1; /* this is another one down */ (void)PR_JoinThread(hammer[poll].thread); hammer[poll].thread = NULL; if (hammer[poll].problem == sg_okay) { duration = PR_IntervalToMilliseconds( PR_IntervalNow() - hammer[poll].timein); writes = hammer[poll].writes * 1000 / duration; if (writes < writesMin) writesMin = writes; if (writes > writesMax) writesMax = writes; writesTot += hammer[poll].writes; durationTot += duration; } else if (debug_mode) printf( "%s: test failed %s after %ld seconds\n", programName, where[hammer[poll].problem], duration); else failed_already=1; } } } if (debug_mode) printf( "%s: [%ld [%ld] %ld] writes/sec average\n", programName, writesMin, writesTot * 1000 / durationTot, writesMax); PR_DestroyCondVar(cv); PR_DestroyLock(ml); if (failed_already) { printf("FAIL\n"); return 1; } else { printf("PASS\n"); return 0; } } /* main */
int main(int argc, char** argv) { PRUintn index; PRBool boolean; CSClient_t *client; PRStatus rv, joinStatus; CSServer_t *server = NULL; PRUintn backlog = DEFAULT_BACKLOG; PRUintn clients = DEFAULT_CLIENTS; const char *serverName = DEFAULT_SERVER; PRBool serverIsLocal = PR_TRUE; PRUintn accepting = ALLOWED_IN_ACCEPT; PRUintn workersMin = DEFAULT_WORKERS_MIN; PRUintn workersMax = DEFAULT_WORKERS_MAX; PRIntn execution = DEFAULT_EXECUTION_TIME; PRIntn low = DEFAULT_LOW, high = DEFAULT_HIGH; /* * -G use global threads * -a <n> threads allowed in accept * -b <n> backlock for listen * -c <threads> number of clients to create * -f <low> low water mark for caching FDs * -F <high> high water mark for caching FDs * -w <threads> minimal number of server threads * -W <threads> maximum number of server threads * -e <seconds> duration of the test in seconds * -s <string> dsn name of server (implies no server here) * -v verbosity */ PLOptStatus os; PLOptState *opt = PL_CreateOptState(argc, argv, "GX6b:a:c:f:F:w:W:e:s:vdhp"); debug_out = PR_GetSpecialFD(PR_StandardError); while (PL_OPT_EOL != (os = PL_GetNextOpt(opt))) { if (PL_OPT_BAD == os) continue; switch (opt->option) { case 'G': /* use global threads */ thread_scope = PR_GLOBAL_THREAD; break; case 'X': /* use XTP as transport */ protocol = 36; break; case '6': /* Use IPv6 */ domain = PR_AF_INET6; break; case 'a': /* the value for accepting */ accepting = atoi(opt->value); break; case 'b': /* the value for backlock */ backlog = atoi(opt->value); break; case 'c': /* number of client threads */ clients = atoi(opt->value); break; case 'f': /* low water fd cache */ low = atoi(opt->value); break; case 'F': /* low water fd cache */ high = atoi(opt->value); break; case 'w': /* minimum server worker threads */ workersMin = atoi(opt->value); break; case 'W': /* maximum server worker threads */ workersMax = atoi(opt->value); break; case 'e': /* program execution time in seconds */ execution = atoi(opt->value); break; case 's': /* server's address */ serverName = opt->value; break; case 'v': /* verbosity */ verbosity = IncrementVerbosity(); break; case 'd': /* debug mode */ debug_mode = PR_TRUE; break; case 'p': /* pthread mode */ pthread_stats = PR_TRUE; break; case 'h': default: Help(); return 2; } } PL_DestroyOptState(opt); if (0 != PL_strcmp(serverName, DEFAULT_SERVER)) serverIsLocal = PR_FALSE; if (0 == execution) execution = DEFAULT_EXECUTION_TIME; if (0 == workersMax) workersMax = DEFAULT_WORKERS_MAX; if (0 == workersMin) workersMin = DEFAULT_WORKERS_MIN; if (0 == accepting) accepting = ALLOWED_IN_ACCEPT; if (0 == backlog) backlog = DEFAULT_BACKLOG; if (workersMin > accepting) accepting = workersMin; PR_STDIO_INIT(); TimeOfDayMessage("Client/Server started at", PR_GetCurrentThread()); cltsrv_log_file = PR_NewLogModule("cltsrv_log"); MY_ASSERT(NULL != cltsrv_log_file); boolean = PR_SetLogFile("cltsrv.log"); MY_ASSERT(boolean); rv = PR_SetFDCacheSize(low, high); PR_ASSERT(PR_SUCCESS == rv); if (serverIsLocal) { /* Establish the server */ TEST_LOG( cltsrv_log_file, TEST_LOG_INFO, ("main(0x%p): starting server\n", PR_GetCurrentThread())); server = PR_NEWZAP(CSServer_t); PR_INIT_CLIST(&server->list); server->state = cs_init; server->ml = PR_NewLock(); server->backlog = backlog; server->port = DEFAULT_PORT; server->workers.minimum = workersMin; server->workers.maximum = workersMax; server->workers.accepting = accepting; server->stateChange = PR_NewCondVar(server->ml); server->pool.exiting = PR_NewCondVar(server->ml); server->pool.acceptComplete = PR_NewCondVar(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("main(0x%p): creating server thread\n", PR_GetCurrentThread())); server->thread = PR_CreateThread( PR_USER_THREAD, Server, server, PR_PRIORITY_HIGH, thread_scope, PR_JOINABLE_THREAD, 0); TEST_ASSERT(NULL != server->thread); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("main(0x%p): waiting for server init\n", PR_GetCurrentThread())); PR_Lock(server->ml); while (server->state == cs_init) PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("main(0x%p): server init complete (port #%d)\n", PR_GetCurrentThread(), server->port)); } if (clients != 0) { /* Create all of the clients */ PRHostEnt host; char buffer[BUFFER_SIZE]; client = (CSClient_t*)PR_CALLOC(clients * sizeof(CSClient_t)); TEST_LOG( cltsrv_log_file, TEST_LOG_VERBOSE, ("main(0x%p): creating %d client threads\n", PR_GetCurrentThread(), clients)); if (!serverIsLocal) { rv = PR_GetHostByName(serverName, buffer, BUFFER_SIZE, &host); if (PR_SUCCESS != rv) { PL_FPrintError(PR_STDERR, "PR_GetHostByName"); return 2; } } for (index = 0; index < clients; ++index) { client[index].state = cs_init; client[index].ml = PR_NewLock(); if (serverIsLocal) { if (PR_AF_INET6 != domain) (void)PR_InitializeNetAddr( PR_IpAddrLoopback, DEFAULT_PORT, &client[index].serverAddress); else rv = PR_SetNetAddr(PR_IpAddrLoopback, PR_AF_INET6, DEFAULT_PORT, &client[index].serverAddress); } else { (void)PR_EnumerateHostEnt( 0, &host, DEFAULT_PORT, &client[index].serverAddress); } client[index].stateChange = PR_NewCondVar(client[index].ml); TEST_LOG( cltsrv_log_file, TEST_LOG_INFO, ("main(0x%p): creating client threads\n", PR_GetCurrentThread())); client[index].thread = PR_CreateThread( PR_USER_THREAD, Client, &client[index], PR_PRIORITY_NORMAL, thread_scope, PR_JOINABLE_THREAD, 0); TEST_ASSERT(NULL != client[index].thread); PR_Lock(client[index].ml); while (cs_init == client[index].state) PR_WaitCondVar(client[index].stateChange, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(client[index].ml); } } /* Then just let them go at it for a bit */ TEST_LOG( cltsrv_log_file, TEST_LOG_ALWAYS, ("main(0x%p): waiting for execution interval (%d seconds)\n", PR_GetCurrentThread(), execution)); WaitForCompletion(execution); TimeOfDayMessage("Shutting down", PR_GetCurrentThread()); if (clients != 0) { for (index = 0; index < clients; ++index) { TEST_LOG(cltsrv_log_file, TEST_LOG_STATUS, ("main(0x%p): notifying client(0x%p) to stop\n", PR_GetCurrentThread(), client[index].thread)); PR_Lock(client[index].ml); if (cs_run == client[index].state) { client[index].state = cs_stop; PR_Interrupt(client[index].thread); while (cs_stop == client[index].state) PR_WaitCondVar( client[index].stateChange, PR_INTERVAL_NO_TIMEOUT); } PR_Unlock(client[index].ml); TEST_LOG(cltsrv_log_file, TEST_LOG_VERBOSE, ("main(0x%p): joining client(0x%p)\n", PR_GetCurrentThread(), client[index].thread)); joinStatus = PR_JoinThread(client[index].thread); TEST_ASSERT(PR_SUCCESS == joinStatus); PR_DestroyCondVar(client[index].stateChange); PR_DestroyLock(client[index].ml); } PR_DELETE(client); } if (NULL != server) { /* All clients joined - retrieve the server */ TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("main(0x%p): notifying server(0x%p) to stop\n", PR_GetCurrentThread(), server->thread)); PR_Lock(server->ml); server->state = cs_stop; PR_Interrupt(server->thread); while (cs_exit != server->state) PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(server->ml); TEST_LOG( cltsrv_log_file, TEST_LOG_NOTICE, ("main(0x%p): joining server(0x%p)\n", PR_GetCurrentThread(), server->thread)); joinStatus = PR_JoinThread(server->thread); TEST_ASSERT(PR_SUCCESS == joinStatus); PR_DestroyCondVar(server->stateChange); PR_DestroyCondVar(server->pool.exiting); PR_DestroyCondVar(server->pool.acceptComplete); PR_DestroyLock(server->ml); PR_DELETE(server); } TEST_LOG( cltsrv_log_file, TEST_LOG_ALWAYS, ("main(0x%p): test complete\n", PR_GetCurrentThread())); PT_FPrintStats(debug_out, "\nPThread Statistics\n"); TimeOfDayMessage("Test exiting at", PR_GetCurrentThread()); PR_Cleanup(); return 0; } /* main */
void CondVarTestPUU(void *_arg) { PRInt32 arg = (PRInt32)_arg; PRInt32 index, loops; threadinfo *list; PRLock *sharedlock; PRCondVar *sharedcvar; PRLock *exitlock; PRCondVar *exitcvar; PRInt32 *tcount, *saved_tcount; exitcount=0; list = (threadinfo *)PR_MALLOC(sizeof(threadinfo) * (arg * 4)); saved_tcount = tcount = (PRInt32 *)PR_CALLOC(sizeof(*tcount) * (arg * 4)); sharedlock = PR_NewLock(); sharedcvar = PR_NewCondVar(sharedlock); exitlock = PR_NewLock(); exitcvar = PR_NewCondVar(exitlock); /* Create the threads */ for(index=0; index<arg; ) { list[index].lock = PR_NewLock(); list[index].cvar = PR_NewCondVar(list[index].lock); CreateTestThread(&list[index], index, list[index].lock, list[index].cvar, count, PR_INTERVAL_NO_TIMEOUT, tcount, exitlock, exitcvar, &exitcount, PR_FALSE, PR_LOCAL_THREAD); DPRINTF(("CondVarTestPUU: created thread 0x%lx\n",list[index].thread)); index++; tcount++; } for (loops = 0; loops < count; loops++) { /* Notify the threads */ for(index=0; index<(arg); index++) { PR_Lock(list[index].lock); (*list[index].tcount)++; PR_NotifyCondVar(list[index].cvar); PR_Unlock(list[index].lock); } PR_Lock(exitlock); /* Wait for threads to finish */ while(exitcount < arg) { DPRINTF(("CondVarTestPUU: thread 0x%lx waiting on exitcvar = 0x%lx cnt = %ld\n", PR_GetCurrentThread(), exitcvar, exitcount)); PR_WaitCondVar(exitcvar, PR_SecondsToInterval(60)); } PR_ASSERT(exitcount >= arg); exitcount -= arg; PR_Unlock(exitlock); } /* Join all the threads */ for(index=0; index<(arg); index++) { DPRINTF(("CondVarTestPUU: joining thread 0x%lx\n",list[index].thread)); PR_JoinThread(list[index].thread); if (list[index].internal) { PR_Lock(list[index].lock); PR_DestroyCondVar(list[index].cvar); PR_Unlock(list[index].lock); PR_DestroyLock(list[index].lock); } } PR_DestroyCondVar(sharedcvar); PR_DestroyLock(sharedlock); PR_DestroyCondVar(exitcvar); PR_DestroyLock(exitlock); PR_DELETE(list); PR_DELETE(saved_tcount); }
PR_JoinThreadPool(PRThreadPool *tpool) { PRStatus rval = PR_SUCCESS; PRCList *head; PRStatus rval_status; PR_Lock(tpool->jobq.lock); while (!tpool->shutdown) PR_WaitCondVar(tpool->shutdown_cv, PR_INTERVAL_NO_TIMEOUT); /* * wakeup worker threads */ #ifdef OPT_WINNT /* * post shutdown notification for all threads */ { int i; for(i=0; i < tpool->current_threads; i++) { PostQueuedCompletionStatus(tpool->jobq.nt_completion_port, 0, TRUE, NULL); } } #else PR_NotifyAllCondVar(tpool->jobq.cv); #endif /* * wakeup io thread(s) */ notify_ioq(tpool); /* * wakeup timer thread(s) */ PR_Lock(tpool->timerq.lock); notify_timerq(tpool); PR_Unlock(tpool->timerq.lock); while (!PR_CLIST_IS_EMPTY(&tpool->jobq.wthreads)) { wthread *wthrp; head = PR_LIST_HEAD(&tpool->jobq.wthreads); PR_REMOVE_AND_INIT_LINK(head); PR_Unlock(tpool->jobq.lock); wthrp = WTHREAD_LINKS_PTR(head); rval_status = PR_JoinThread(wthrp->thread); PR_ASSERT(PR_SUCCESS == rval_status); PR_DELETE(wthrp); PR_Lock(tpool->jobq.lock); } PR_Unlock(tpool->jobq.lock); while (!PR_CLIST_IS_EMPTY(&tpool->ioq.wthreads)) { wthread *wthrp; head = PR_LIST_HEAD(&tpool->ioq.wthreads); PR_REMOVE_AND_INIT_LINK(head); wthrp = WTHREAD_LINKS_PTR(head); rval_status = PR_JoinThread(wthrp->thread); PR_ASSERT(PR_SUCCESS == rval_status); PR_DELETE(wthrp); } while (!PR_CLIST_IS_EMPTY(&tpool->timerq.wthreads)) { wthread *wthrp; head = PR_LIST_HEAD(&tpool->timerq.wthreads); PR_REMOVE_AND_INIT_LINK(head); wthrp = WTHREAD_LINKS_PTR(head); rval_status = PR_JoinThread(wthrp->thread); PR_ASSERT(PR_SUCCESS == rval_status); PR_DELETE(wthrp); } /* * Delete queued jobs */ while (!PR_CLIST_IS_EMPTY(&tpool->jobq.list)) { PRJob *jobp; head = PR_LIST_HEAD(&tpool->jobq.list); PR_REMOVE_AND_INIT_LINK(head); jobp = JOB_LINKS_PTR(head); tpool->jobq.cnt--; delete_job(jobp); } /* delete io jobs */ while (!PR_CLIST_IS_EMPTY(&tpool->ioq.list)) { PRJob *jobp; head = PR_LIST_HEAD(&tpool->ioq.list); PR_REMOVE_AND_INIT_LINK(head); tpool->ioq.cnt--; jobp = JOB_LINKS_PTR(head); delete_job(jobp); } /* delete timer jobs */ while (!PR_CLIST_IS_EMPTY(&tpool->timerq.list)) { PRJob *jobp; head = PR_LIST_HEAD(&tpool->timerq.list); PR_REMOVE_AND_INIT_LINK(head); tpool->timerq.cnt--; jobp = JOB_LINKS_PTR(head); delete_job(jobp); } PR_ASSERT(0 == tpool->jobq.cnt); PR_ASSERT(0 == tpool->ioq.cnt); PR_ASSERT(0 == tpool->timerq.cnt); delete_threadpool(tpool); return rval; }
void CondVarMixedTest(void *_arg) { PRInt32 arg = (PRInt32)_arg; PRInt32 index, loops; threadinfo *list; PRLock *sharedlock; PRCondVar *sharedcvar; PRLock *exitlock; PRCondVar *exitcvar; PRInt32 *ptcount; exitcount=0; tcount=0; list = (threadinfo *)PR_MALLOC(sizeof(threadinfo) * (arg * 4)); ptcount = (PRInt32 *)PR_CALLOC(sizeof(*ptcount) * (arg * 4)); sharedlock = PR_NewLock(); sharedcvar = PR_NewCondVar(sharedlock); exitlock = PR_NewLock(); exitcvar = PR_NewCondVar(exitlock); /* Create the threads */ for(index=0; index<arg*4; ) { CreateTestThread(&list[index], index, sharedlock, sharedcvar, count, PR_MillisecondsToInterval(50), &tcount, exitlock, exitcvar, &exitcount, PR_TRUE, PR_LOCAL_THREAD); index++; CreateTestThread(&list[index], index, sharedlock, sharedcvar, count, PR_MillisecondsToInterval(50), &tcount, exitlock, exitcvar, &exitcount, PR_TRUE, PR_GLOBAL_THREAD); index++; list[index].lock = PR_NewLock(); list[index].cvar = PR_NewCondVar(list[index].lock); CreateTestThread(&list[index], index, list[index].lock, list[index].cvar, count, PR_MillisecondsToInterval(50), ptcount, exitlock, exitcvar, &exitcount, PR_FALSE, PR_LOCAL_THREAD); index++; ptcount++; list[index].lock = PR_NewLock(); list[index].cvar = PR_NewCondVar(list[index].lock); CreateTestThread(&list[index], index, list[index].lock, list[index].cvar, count, PR_MillisecondsToInterval(50), ptcount, exitlock, exitcvar, &exitcount, PR_FALSE, PR_GLOBAL_THREAD); index++; ptcount++; } /* Notify every 3rd thread */ for (loops = 0; loops < count; loops++) { /* Notify the threads */ for(index=0; index<(arg*4); index+=3) { PR_Lock(list[index].lock); *list[index].tcount++; PR_NotifyCondVar(list[index].cvar); PR_Unlock(list[index].lock); } /* Wait for threads to finish */ PR_Lock(exitlock); while(exitcount < arg*4) PR_WaitCondVar(exitcvar, PR_SecondsToInterval(60)); PR_ASSERT(exitcount >= arg*4); exitcount -= arg*4; PR_Unlock(exitlock); } /* Join all the threads */ for(index=0; index<(arg*4); index++) { PR_JoinThread(list[index].thread); if (list[index].internal) { PR_Lock(list[index].lock); PR_DestroyCondVar(list[index].cvar); PR_Unlock(list[index].lock); PR_DestroyLock(list[index].lock); } } PR_DestroyCondVar(sharedcvar); PR_DestroyLock(sharedlock); PR_DELETE(list); }
/* * worker thread function */ static void wstart(void *arg) { PRThreadPool *tp = (PRThreadPool *) arg; PRCList *head; /* * execute jobs until shutdown */ while (!tp->shutdown) { PRJob *jobp; #ifdef OPT_WINNT BOOL rv; DWORD unused, shutdown; LPOVERLAPPED olp; PR_Lock(tp->jobq.lock); tp->idle_threads++; PR_Unlock(tp->jobq.lock); rv = GetQueuedCompletionStatus(tp->jobq.nt_completion_port, &unused, &shutdown, &olp, INFINITE); PR_ASSERT(rv); if (shutdown) break; jobp = ((NT_notifier *) olp)->jobp; PR_Lock(tp->jobq.lock); tp->idle_threads--; tp->jobq.cnt--; PR_Unlock(tp->jobq.lock); #else PR_Lock(tp->jobq.lock); while (PR_CLIST_IS_EMPTY(&tp->jobq.list) && (!tp->shutdown)) { tp->idle_threads++; PR_WaitCondVar(tp->jobq.cv, PR_INTERVAL_NO_TIMEOUT); tp->idle_threads--; } if (tp->shutdown) { PR_Unlock(tp->jobq.lock); break; } head = PR_LIST_HEAD(&tp->jobq.list); /* * remove job from queue */ PR_REMOVE_AND_INIT_LINK(head); tp->jobq.cnt--; jobp = JOB_LINKS_PTR(head); PR_Unlock(tp->jobq.lock); #endif jobp->job_func(jobp->job_arg); if (!JOINABLE_JOB(jobp)) { delete_job(jobp); } else { JOIN_NOTIFY(jobp); } } PR_Lock(tp->jobq.lock); tp->current_threads--; PR_Unlock(tp->jobq.lock); }
/* void Run(); */ NS_IMETHODIMP TimerThread::Run() { nsAutoLock lock(mLock); while (!mShutdown) { PRIntervalTime waitFor; if (mSleeping) { // Sleep for 0.1 seconds while not firing timers. waitFor = PR_MillisecondsToInterval(100); } else { waitFor = PR_INTERVAL_NO_TIMEOUT; PRIntervalTime now = PR_IntervalNow(); nsTimerImpl *timer = nsnull; if (mTimers.Count() > 0) { timer = static_cast<nsTimerImpl*>(mTimers[0]); if (!TIMER_LESS_THAN(now, timer->mTimeout + mTimeoutAdjustment)) { next: // NB: AddRef before the Release under RemoveTimerInternal to avoid // mRefCnt passing through zero, in case all other refs than the one // from mTimers have gone away (the last non-mTimers[i]-ref's Release // must be racing with us, blocked in gThread->RemoveTimer waiting // for TimerThread::mLock, under nsTimerImpl::Release. NS_ADDREF(timer); RemoveTimerInternal(timer); // We release mLock around the Fire call to avoid deadlock. lock.unlock(); #ifdef DEBUG_TIMERS if (PR_LOG_TEST(gTimerLog, PR_LOG_DEBUG)) { PR_LOG(gTimerLog, PR_LOG_DEBUG, ("Timer thread woke up %dms from when it was supposed to\n", (now >= timer->mTimeout) ? PR_IntervalToMilliseconds(now - timer->mTimeout) : -(PRInt32)PR_IntervalToMilliseconds(timer->mTimeout-now)) ); } #endif // We are going to let the call to PostTimerEvent here handle the // release of the timer so that we don't end up releasing the timer // on the TimerThread instead of on the thread it targets. if (NS_FAILED(timer->PostTimerEvent())) { nsrefcnt rc; NS_RELEASE2(timer, rc); // The nsITimer interface requires that its users keep a reference // to the timers they use while those timers are initialized but // have not yet fired. If this ever happens, it is a bug in the // code that created and used the timer. // // Further, note that this should never happen even with a // misbehaving user, because nsTimerImpl::Release checks for a // refcount of 1 with an armed timer (a timer whose only reference // is from the timer thread) and when it hits this will remove the // timer from the timer thread and thus destroy the last reference, // preventing this situation from occurring. NS_ASSERTION(rc != 0, "destroyed timer off its target thread!"); } timer = nsnull; lock.lock(); if (mShutdown) break; // Update now, as PostTimerEvent plus the locking may have taken a // tick or two, and we may goto next below. now = PR_IntervalNow(); } } if (mTimers.Count() > 0) { timer = static_cast<nsTimerImpl *>(mTimers[0]); PRIntervalTime timeout = timer->mTimeout + mTimeoutAdjustment; // Don't wait at all (even for PR_INTERVAL_NO_WAIT) if the next timer // is due now or overdue. if (!TIMER_LESS_THAN(now, timeout)) goto next; waitFor = timeout - now; } #ifdef DEBUG_TIMERS if (PR_LOG_TEST(gTimerLog, PR_LOG_DEBUG)) { if (waitFor == PR_INTERVAL_NO_TIMEOUT) PR_LOG(gTimerLog, PR_LOG_DEBUG, ("waiting for PR_INTERVAL_NO_TIMEOUT\n")); else PR_LOG(gTimerLog, PR_LOG_DEBUG, ("waiting for %u\n", PR_IntervalToMilliseconds(waitFor))); } #endif } mWaiting = PR_TRUE; PR_WaitCondVar(mCondVar, waitFor); mWaiting = PR_FALSE; } return NS_OK; }
PR_CancelJob(PRJob *jobp) { PRStatus rval = PR_FAILURE; PRThreadPool *tp; if (jobp->on_timerq) { /* * now, check again while holding the timerq lock */ tp = jobp->tpool; PR_Lock(tp->timerq.lock); if (jobp->on_timerq) { jobp->on_timerq = PR_FALSE; PR_REMOVE_AND_INIT_LINK(&jobp->links); tp->timerq.cnt--; PR_Unlock(tp->timerq.lock); if (!JOINABLE_JOB(jobp)) { delete_job(jobp); } else { JOIN_NOTIFY(jobp); } rval = PR_SUCCESS; } else PR_Unlock(tp->timerq.lock); } else if (jobp->on_ioq) { /* * now, check again while holding the ioq lock */ tp = jobp->tpool; PR_Lock(tp->ioq.lock); if (jobp->on_ioq) { jobp->cancel_cv = PR_NewCondVar(tp->ioq.lock); if (NULL == jobp->cancel_cv) { PR_Unlock(tp->ioq.lock); PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); return PR_FAILURE; } /* * mark job 'cancelled' and notify io thread(s) * XXXX: * this assumes there is only one io thread; when there * are multiple threads, the io thread processing this job * must be notified. */ jobp->cancel_io = PR_TRUE; PR_Unlock(tp->ioq.lock); /* release, reacquire ioq lock */ notify_ioq(tp); PR_Lock(tp->ioq.lock); while (jobp->cancel_io) PR_WaitCondVar(jobp->cancel_cv, PR_INTERVAL_NO_TIMEOUT); PR_Unlock(tp->ioq.lock); PR_ASSERT(!jobp->on_ioq); if (!JOINABLE_JOB(jobp)) { delete_job(jobp); } else { JOIN_NOTIFY(jobp); } rval = PR_SUCCESS; } else PR_Unlock(tp->ioq.lock); } if (PR_FAILURE == rval) PR_SetError(PR_INVALID_STATE_ERROR, 0); return rval; }