Example #1
0
static PRIntervalTime ConditionNotify(PRUint32 loops)
{
    PRThread *thread;
    NotifyData notifyData;
    PRIntervalTime timein, overhead;
    
    timein = PR_IntervalNow();

    notifyData.counter = loops;
    notifyData.ml = PR_NewLock();
    notifyData.child = PR_NewCondVar(notifyData.ml);
    notifyData.parent = PR_NewCondVar(notifyData.ml);
    thread = PR_CreateThread(
        PR_USER_THREAD, Notifier, &notifyData,
        PR_GetThreadPriority(PR_GetCurrentThread()),
        thread_scope, PR_JOINABLE_THREAD, 0);

    overhead = PR_IntervalNow() - timein;  /* elapsed so far */

    PR_Lock(notifyData.ml);
    while (notifyData.counter > 0)
    {
        notifyData.pending = PR_TRUE;
        PR_NotifyCondVar(notifyData.child);
        while (notifyData.pending)
            PR_WaitCondVar(notifyData.parent, PR_INTERVAL_NO_TIMEOUT);
    }
    PR_Unlock(notifyData.ml);

    timein = PR_IntervalNow();

    (void)PR_JoinThread(thread);
    PR_DestroyCondVar(notifyData.child);
    PR_DestroyCondVar(notifyData.parent);
    PR_DestroyLock(notifyData.ml);
    
    overhead += (PR_IntervalNow() - timein);  /* more overhead */

    return overhead;
}  /* ConditionNotify */
Example #2
0
static PRIntervalTime Alarms2(PRUint32 loops)
{
    PRStatus rv;
    PRAlarm *alarm;
    PRIntervalTime overhead, timein = PR_IntervalNow();
    AlarmData ad;
    PRIntervalTime duration = PR_SecondsToInterval(30);

    PRLock *ml = PR_NewLock();
    PRCondVar *cv = PR_NewCondVar(ml);

    ad.ml = ml;
    ad.cv = cv;
    ad.rate = 1;
    ad.times = loops;
    ad.late = ad.times = 0;
    ad.duration = duration;
    ad.timein = PR_IntervalNow();
    ad.period = PR_SecondsToInterval(1);

    alarm = PR_CreateAlarm();

    (void)PR_SetAlarm(
        alarm, ad.period, ad.rate, AlarmFn2, &ad);
        
    overhead = PR_IntervalNow() - timein;

    PR_Lock(ml);
    while ((PRIntervalTime)(PR_IntervalNow() - ad.timein) < duration)
        PR_WaitCondVar(cv, PR_INTERVAL_NO_TIMEOUT);
    PR_Unlock(ml);
    
    timein = PR_IntervalNow();

    rv = PR_DestroyAlarm(alarm);
    if (rv != PR_SUCCESS)
    {
		if (!debug_mode)
			failed_already=1;
		else	
			printf("***Destroying alarm status: FAIL\n");
    }
		

    PR_DestroyCondVar(cv);
    PR_DestroyLock(ml);
    
    overhead += (PR_IntervalNow() - timein);
    
    return duration + overhead;
}  /* Alarms2 */
Example #3
0
PR_IMPLEMENT(PRStatus) PR_DestroyWaitGroup(PRWaitGroup *group)
{
    PRStatus rv = PR_SUCCESS;
    if (NULL == group) group = mw_state->group;
    PR_ASSERT(NULL != group);
    if (NULL != group)
    {
        if (_prmw_stopped != group->state)  /* quick, unsafe test */
        {
            PRMWGroupState mws;
            /* One shot to correct the situation */
            PR_Lock(group->ml);
            if (group->state < _prmw_stopped)  /* safer test */
                group->state = _prmw_stopping;
            mws = MW_TestForShutdownInternal(group);
            PR_Unlock(group->ml);
            if (_prmw_stopped != mws)  /* quick test again */
            {
                PR_SetError(PR_INVALID_STATE_ERROR, 0);
                return PR_FAILURE;
            }
        }

        PR_Lock(mw_lock);
        PR_REMOVE_LINK(&group->group_link);
        PR_Unlock(mw_lock);

        PR_DELETE(group->waiter);
        PR_DestroyCondVar(group->new_business);
        PR_DestroyCondVar(group->io_complete);
        PR_DestroyCondVar(group->io_taken);
        PR_DestroyLock(group->ml);
        if (group == mw_state->group) mw_state->group = NULL;
        PR_DELETE(group);
    }
    return rv;
}  /* PR_DestroyWaitGroup */
Example #4
0
TimerThread::~TimerThread()
{
  if (mCondVar)
    PR_DestroyCondVar(mCondVar);
  if (mLock)
    PR_DestroyLock(mLock);

  mThread = nsnull;

  PRInt32 n = mTimers.Count();
  while (--n >= 0) {
    nsTimerImpl *timer = NS_STATIC_CAST(nsTimerImpl *, mTimers[n]);
    NS_RELEASE(timer);
  }
}
Example #5
0
nsHTTPListener::~nsHTTPListener()
{
  if (mResponsibleForDoneSignal)
    send_done_signal();

  if (mCondition)
    PR_DestroyCondVar(mCondition);
  
  if (mLock)
    PR_DestroyLock(mLock);

  if (mLoader) {
    nsCOMPtr<nsIThread> mainThread(do_GetMainThread());
    NS_ProxyRelease(mainThread, mLoader);
  }
}
Example #6
0
PR_IMPLEMENT(PRStatus) PR_DestroyAlarm(PRAlarm *alarm)
{
    PRStatus rv;

    PR_Lock(alarm->lock);
    alarm->state = alarm_inactive;
    rv = PR_NotifyCondVar(alarm->cond);
    PR_Unlock(alarm->lock);

    if (rv == PR_SUCCESS)
        rv = PR_JoinThread(alarm->notifier);
    if (rv == PR_SUCCESS)
    {
        PR_DestroyCondVar(alarm->cond);
        PR_DestroyLock(alarm->lock);
        PR_DELETE(alarm);
    }
    return rv;
}  /* PR_DestroyAlarm */
Example #7
0
void _PR_CleanupIO(void)
{
    PR_FreeFileDesc(_pr_stdin);
    _pr_stdin = NULL;
    PR_FreeFileDesc(_pr_stdout);
    _pr_stdout = NULL;
    PR_FreeFileDesc(_pr_stderr);
    _pr_stderr = NULL;

    if (_pr_flock_cv) {
        PR_DestroyCondVar(_pr_flock_cv);
        _pr_flock_cv = NULL;
    }
    if (_pr_flock_lock) {
        PR_DestroyLock(_pr_flock_lock);
        _pr_flock_lock = NULL;
    }

    _PR_CleanupFdCache();
}
DNSSession :: ~ DNSSession()
{
    //PR_ASSERT(awake);
    PR_DestroyCondVar(cvar);
    PR_DestroyLock(lock);
    hostname = "grmblll";
    free_packet_list();

    PRInt32 host_cnt = 0;
    while (re_he.h_addr_list[host_cnt])
    {
        free(re_he.h_addr_list[host_cnt++]);
    }

    if (re_he.h_name)
    {
        free(re_he.h_name);
    }
    
    ar_free_hostent(&ar_host, 0);
}
Example #9
0
static PRIntervalTime Alarms1(PRUint32 loops)
{
    PRAlarm *alarm;
    AlarmData ad;
    PRIntervalTime overhead, timein = PR_IntervalNow();
    PRIntervalTime duration = PR_SecondsToInterval(3);

    PRLock *ml = PR_NewLock();
    PRCondVar *cv = PR_NewCondVar(ml);

    ad.ml = ml;
    ad.cv = cv;
    ad.rate = 1;
    ad.times = loops;
    ad.late = ad.times = 0;
    ad.duration = duration;
    ad.timein = PR_IntervalNow();
    ad.period = PR_SecondsToInterval(1);

    alarm = PR_CreateAlarm();

    (void)PR_SetAlarm(
        alarm, ad.period, ad.rate, AlarmFn1, &ad);
        
    overhead = PR_IntervalNow() - timein;

    PR_Lock(ml);
    while ((PRIntervalTime)(PR_IntervalNow() - ad.timein) < duration)
        PR_WaitCondVar(cv, PR_INTERVAL_NO_TIMEOUT);
    PR_Unlock(ml);

    timein = PR_IntervalNow();
    (void)PR_DestroyAlarm(alarm);
    PR_DestroyCondVar(cv);
    PR_DestroyLock(ml);
    overhead += (PR_IntervalNow() - timein);
    
    return duration + overhead;
}  /* Alarms1 */
Example #10
0
PR_IMPLEMENT(PRAlarm*) PR_CreateAlarm(void)
{
    PRAlarm *alarm = PR_NEWZAP(PRAlarm);
    if (alarm != NULL)
    {
        if ((alarm->lock = PR_NewLock()) == NULL) goto done;
        if ((alarm->cond = PR_NewCondVar(alarm->lock)) == NULL) goto done;
        alarm->state = alarm_active;
        PR_INIT_CLIST(&alarm->timers);
        alarm->notifier = PR_CreateThread(
            PR_USER_THREAD, pr_alarmNotifier, alarm,
            PR_GetThreadPriority(PR_GetCurrentThread()),
            PR_LOCAL_THREAD, PR_JOINABLE_THREAD, 0);
        if (alarm->notifier == NULL) goto done;
    }
    return alarm;

done:
    if (alarm->cond != NULL) PR_DestroyCondVar(alarm->cond);
    if (alarm->lock != NULL) PR_DestroyLock(alarm->lock);
    PR_DELETE(alarm);
    return NULL;
}  /* CreateAlarm */
void 
CondVarTestSUU(void *_arg)
{
    PRInt32 arg = (PRInt32)_arg;
    PRInt32 index, loops;
    threadinfo *list;
    PRLock *sharedlock;
    PRCondVar *sharedcvar;
    PRLock *exitlock;
    PRCondVar *exitcvar;
    
    exitcount=0;
    tcount=0;
    list = (threadinfo *)PR_MALLOC(sizeof(threadinfo) * (arg * 4));

    sharedlock = PR_NewLock();
    sharedcvar = PR_NewCondVar(sharedlock);
    exitlock = PR_NewLock();
    exitcvar = PR_NewCondVar(exitlock);

    /* Create the threads */
    for(index=0; index<arg; ) {
        CreateTestThread(&list[index],
                         index,
                         sharedlock,
                         sharedcvar,
                         count,
                         PR_INTERVAL_NO_TIMEOUT,
                         &tcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_TRUE,
                         PR_LOCAL_THREAD);
        index++;
	DPRINTF(("CondVarTestSUU: created thread 0x%lx\n",list[index].thread));
    }

    for (loops = 0; loops < count; loops++) {
        /* Notify the threads */
        for(index=0; index<(arg); index++) {
            PR_Lock(list[index].lock);
            (*list[index].tcount)++;
            PR_NotifyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);
	    DPRINTF(("PrivateCondVarThread: thread 0x%lx notified cvar = 0x%lx\n",
				PR_GetCurrentThread(), list[index].cvar));
        }

        /* Wait for threads to finish */
        PR_Lock(exitlock);
        while(exitcount < arg)
            PR_WaitCondVar(exitcvar, PR_SecondsToInterval(60));
        PR_ASSERT(exitcount >= arg);
        exitcount -= arg;
        PR_Unlock(exitlock);
    }

    /* Join all the threads */
    for(index=0; index<(arg); index++) 
        PR_JoinThread(list[index].thread);

    PR_DestroyCondVar(sharedcvar);
    PR_DestroyLock(sharedlock);
    PR_DestroyCondVar(exitcvar);
    PR_DestroyLock(exitlock);

    PR_DELETE(list);
}
Example #12
0
/***********************************************************************
** PRIVATE FUNCTION:    main
** DESCRIPTION:
**   Hammer on the file I/O system
** INPUTS:      The usual argc and argv
**              argv[0] - program name (not used)
**              argv[1] - the number of times to execute the major loop
**              argv[2] - the number of threads to toss into the batch
**              argv[3] - the clipping number applied to randoms
**              default values: loops = 2, threads = 10, limit = 57
** OUTPUTS:     None
** RETURN:      None
** SIDE EFFECTS:
**      Creates, accesses and deletes lots of files
** RESTRICTIONS:
**      (Currently) must have file create permission in "/usr/tmp".
** MEMORY:      NA
** ALGORITHM:
**      1) Fork a "Thread()"
**      2) Wait for 'interleave' seconds
**      3) For [0..'threads') repeat [1..2]
**      4) Mark all objects to stop
**      5) Collect the threads, accumulating the results
**      6) For [0..'loops') repeat [1..5]
**      7) Print accumulated results and exit
**
**      Characteristic output (from IRIX)
**          Random File: Using loops = 2, threads = 10, limit = 57
**          Random File: [min [avg] max] writes/sec average
***********************************************************************/
int main (int argc,      char   *argv[])
{
    PRLock *ml;
    PRUint32 id = 0;
    int active, poll;
    PRIntervalTime interleave;
    PRIntervalTime duration = 0;
    int limit = 0, loops = 0, threads = 0, times;
    PRUint32 writes, writesMin = 0x7fffffff, writesTot = 0, durationTot = 0, writesMax = 0;

    const char *where[] = {"okay", "open", "close", "delete", "write", "seek"};

	/* The command line argument: -d is used to determine if the test is being run
	in debug mode. The regress tool requires only one line output:PASS or FAIL.
	All of the printfs associated with this test has been handled with a if (debug_mode)
	test.
	Usage: test_name -d
	*/
	PLOptStatus os;
	PLOptState *opt = PL_CreateOptState(argc, argv, "Gdl:t:i:");
	while (PL_OPT_EOL != (os = PL_GetNextOpt(opt)))
    {
		if (PL_OPT_BAD == os) continue;
        switch (opt->option)
        {
        case 'G':  /* global threads */
			thread_scope = PR_GLOBAL_THREAD;
            break;
        case 'd':  /* debug mode */
			debug_mode = 1;
            break;
        case 'l':  /* limiting number */
			limit = atoi(opt->value);
            break;
        case 't':  /* number of threads */
			threads = atoi(opt->value);
            break;
        case 'i':  /* iteration counter */
			loops = atoi(opt->value);
            break;
         default:
            break;
        }
    }
	PL_DestroyOptState(opt);

 /* main test */
	
    PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
    PR_STDIO_INIT();

    interleave = PR_SecondsToInterval(10);

#ifdef XP_MAC
	SetupMacPrintfLog("ranfile.log");
	debug_mode = 1;
#endif

    ml = PR_NewLock();
    cv = PR_NewCondVar(ml);

    if (loops == 0) loops = DEFAULT_LOOPS;
    if (limit == 0) limit = DEFAULT_LIMIT;
    if (threads == 0) threads = DEFAULT_THREADS;

    if (debug_mode) printf(
        "%s: Using loops = %d, threads = %d, limit = %d and %s threads\n",
        programName, loops, threads, limit,
        (thread_scope == PR_LOCAL_THREAD) ? "LOCAL" : "GLOBAL");

    for (times = 0; times < loops; ++times)
    {
        if (debug_mode) printf("%s: Setting concurrency level to %d\n", programName, times + 1);
        PR_SetConcurrency(times + 1);
        for (active = 0; active < threads; active++)
        {
            hammer[active].ml = ml;
            hammer[active].cv = cv;
            hammer[active].id = id++;
            hammer[active].writes = 0;
            hammer[active].action = sg_go;
            hammer[active].problem = sg_okay;
            hammer[active].limit = (Random() % limit) + 1;
            hammer[active].timein = PR_IntervalNow();
            hammer[active].thread = PR_CreateThread(
                PR_USER_THREAD, Thread, &hammer[active],
                PR_GetThreadPriority(PR_GetCurrentThread()),
                thread_scope, PR_JOINABLE_THREAD, 0);

            PR_Lock(ml);
            PR_WaitCondVar(cv, interleave);  /* start new ones slowly */
            PR_Unlock(ml);
        }

        /*
         * The last thread started has had the opportunity to run for
         * 'interleave' seconds. Now gather them all back in.
         */
        PR_Lock(ml);
        for (poll = 0; poll < threads; poll++)
        {
            if (hammer[poll].action == sg_go)  /* don't overwrite done */
                hammer[poll].action = sg_stop;  /* ask him to stop */
        }
        PR_Unlock(ml);

        while (active > 0)
        {
            for (poll = 0; poll < threads; poll++)
            {
                PR_Lock(ml);
                while (hammer[poll].action < sg_done)
                    PR_WaitCondVar(cv, PR_INTERVAL_NO_TIMEOUT);
                PR_Unlock(ml);

                active -= 1;  /* this is another one down */
                (void)PR_JoinThread(hammer[poll].thread);
                hammer[poll].thread = NULL;
                if (hammer[poll].problem == sg_okay)
                {
                    duration = PR_IntervalToMilliseconds(
                        PR_IntervalNow() - hammer[poll].timein);
                    writes = hammer[poll].writes * 1000 / duration;
                    if (writes < writesMin) 
                        writesMin = writes;
                    if (writes > writesMax) 
                        writesMax = writes;
                    writesTot += hammer[poll].writes;
                    durationTot += duration;
                }
                else
                    if (debug_mode) printf(
                        "%s: test failed %s after %ld seconds\n",
                        programName, where[hammer[poll].problem], duration);
					else failed_already=1;
            }
        }
    }
    if (debug_mode) printf(
        "%s: [%ld [%ld] %ld] writes/sec average\n",
        programName, writesMin, writesTot * 1000 / durationTot, writesMax);

    PR_DestroyCondVar(cv);
    PR_DestroyLock(ml);

	if (failed_already) 
	{
	    printf("FAIL\n");
		return 1;
	}
    else
    {
        printf("PASS\n");
		return 0;
    }
}  /* main */
Example #13
0
XPCJSRuntime::~XPCJSRuntime()
{
    if (mWatchdogWakeup)
    {
        // If the watchdog thread is running, tell it to terminate waking it
        // up if necessary and wait until it signals that it finished. As we
        // must release the lock before calling PR_DestroyCondVar, we use an
        // extra block here.
        {
            AutoLockJSGC lock(mJSRuntime);
            if (mWatchdogThread) {
                mWatchdogThread = nsnull;
                PR_NotifyCondVar(mWatchdogWakeup);
                PR_WaitCondVar(mWatchdogWakeup, PR_INTERVAL_NO_TIMEOUT);
            }
        }
        PR_DestroyCondVar(mWatchdogWakeup);
        mWatchdogWakeup = nsnull;
    }

#ifdef XPC_DUMP_AT_SHUTDOWN
    {
    // count the total JSContexts in use
    JSContext* iter = nsnull;
    int count = 0;
    while(JS_ContextIterator(mJSRuntime, &iter))
        count ++;
    if(count)
        printf("deleting XPCJSRuntime with %d live JSContexts\n", count);
    }
#endif

    // clean up and destroy maps...
    if(mWrappedJSMap)
    {
#ifdef XPC_DUMP_AT_SHUTDOWN
        uint32 count = mWrappedJSMap->Count();
        if(count)
            printf("deleting XPCJSRuntime with %d live wrapped JSObject\n", (int)count);
#endif
        mWrappedJSMap->Enumerate(WrappedJSShutdownMarker, mJSRuntime);
        delete mWrappedJSMap;
    }

    if(mWrappedJSClassMap)
    {
#ifdef XPC_DUMP_AT_SHUTDOWN
        uint32 count = mWrappedJSClassMap->Count();
        if(count)
            printf("deleting XPCJSRuntime with %d live nsXPCWrappedJSClass\n", (int)count);
#endif
        delete mWrappedJSClassMap;
    }

    if(mIID2NativeInterfaceMap)
    {
#ifdef XPC_DUMP_AT_SHUTDOWN
        uint32 count = mIID2NativeInterfaceMap->Count();
        if(count)
            printf("deleting XPCJSRuntime with %d live XPCNativeInterfaces\n", (int)count);
#endif
        delete mIID2NativeInterfaceMap;
    }

    if(mClassInfo2NativeSetMap)
    {
#ifdef XPC_DUMP_AT_SHUTDOWN
        uint32 count = mClassInfo2NativeSetMap->Count();
        if(count)
            printf("deleting XPCJSRuntime with %d live XPCNativeSets\n", (int)count);
#endif
        delete mClassInfo2NativeSetMap;
    }

    if(mNativeSetMap)
    {
#ifdef XPC_DUMP_AT_SHUTDOWN
        uint32 count = mNativeSetMap->Count();
        if(count)
            printf("deleting XPCJSRuntime with %d live XPCNativeSets\n", (int)count);
#endif
        delete mNativeSetMap;
    }

    if(mMapLock)
        XPCAutoLock::DestroyLock(mMapLock);

    if(mThisTranslatorMap)
    {
#ifdef XPC_DUMP_AT_SHUTDOWN
        uint32 count = mThisTranslatorMap->Count();
        if(count)
            printf("deleting XPCJSRuntime with %d live ThisTranslator\n", (int)count);
#endif
        delete mThisTranslatorMap;
    }

#ifdef XPC_CHECK_WRAPPERS_AT_SHUTDOWN
    if(DEBUG_WrappedNativeHashtable)
    {
        int LiveWrapperCount = 0;
        JS_DHashTableEnumerate(DEBUG_WrappedNativeHashtable,
                               DEBUG_WrapperChecker, &LiveWrapperCount);
        if(LiveWrapperCount)
            printf("deleting XPCJSRuntime with %d live XPCWrappedNative (found in wrapper check)\n", (int)LiveWrapperCount);
        JS_DHashTableDestroy(DEBUG_WrappedNativeHashtable);
    }
#endif

    if(mNativeScriptableSharedMap)
    {
#ifdef XPC_DUMP_AT_SHUTDOWN
        uint32 count = mNativeScriptableSharedMap->Count();
        if(count)
            printf("deleting XPCJSRuntime with %d live XPCNativeScriptableShared\n", (int)count);
#endif
        delete mNativeScriptableSharedMap;
    }

    if(mDyingWrappedNativeProtoMap)
    {
#ifdef XPC_DUMP_AT_SHUTDOWN
        uint32 count = mDyingWrappedNativeProtoMap->Count();
        if(count)
            printf("deleting XPCJSRuntime with %d live but dying XPCWrappedNativeProto\n", (int)count);
#endif
        delete mDyingWrappedNativeProtoMap;
    }

    if(mDetachedWrappedNativeProtoMap)
    {
#ifdef XPC_DUMP_AT_SHUTDOWN
        uint32 count = mDetachedWrappedNativeProtoMap->Count();
        if(count)
            printf("deleting XPCJSRuntime with %d live detached XPCWrappedNativeProto\n", (int)count);
#endif
        delete mDetachedWrappedNativeProtoMap;
    }

    if(mExplicitNativeWrapperMap)
    {
#ifdef XPC_DUMP_AT_SHUTDOWN
        uint32 count = mExplicitNativeWrapperMap->Count();
        if(count)
            printf("deleting XPCJSRuntime with %d live explicit XPCNativeWrapper\n", (int)count);
#endif
        delete mExplicitNativeWrapperMap;
    }

    // unwire the readable/JSString sharing magic
    XPCStringConvert::ShutdownDOMStringFinalizer();

    XPCConvert::RemoveXPCOMUCStringFinalizer();

    if(mJSHolders.ops)
    {
        JS_DHashTableFinish(&mJSHolders);
        mJSHolders.ops = nsnull;
    }

    if(mJSRuntime)
    {
        JS_DestroyRuntime(mJSRuntime);
        JS_ShutDown();
#ifdef DEBUG_shaver_off
        fprintf(stderr, "nJRSI: destroyed runtime %p\n", (void *)mJSRuntime);
#endif
    }

    XPCPerThreadData::ShutDown();
}
nsresult
MediaEngineWebRTCVideoSource::Snapshot(PRUint32 aDuration, nsIDOMFile** aFile)
{
  /**
   * To get a Snapshot we do the following:
   * - Set a condition variable (mInSnapshotMode) to true
   * - Attach the external renderer and start the camera
   * - Wait for the condition variable to change to false
   *
   * Starting the camera has the effect of invoking DeliverFrame() when
   * the first frame arrives from the camera. We only need one frame for
   * GetCaptureDeviceSnapshot to work, so we immediately set the condition
   * variable to false and notify this method.
   *
   * This causes the current thread to continue (PR_CondWaitVar will return),
   * at which point we can grab a snapshot, convert it to a file and
   * return from this function after cleaning up the temporary stream object
   * and caling Stop() on the media source.
   */
  *aFile = nsnull;
  if (!mInitDone || mState != kAllocated) {
    return NS_ERROR_FAILURE;
  }

  mSnapshotLock = PR_NewLock();
  mSnapshotCondVar = PR_NewCondVar(mSnapshotLock);

  PR_Lock(mSnapshotLock);
  mInSnapshotMode = true;

  // Start the rendering (equivalent to calling Start(), but without a track).
  int error = 0;
  if (!mInitDone || mState != kAllocated) {
    return NS_ERROR_FAILURE;
  }
  error = mViERender->AddRenderer(mCapIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this);
  if (error == -1) {
    return NS_ERROR_FAILURE;
  }
  error = mViERender->StartRender(mCapIndex);
  if (error == -1) {
    return NS_ERROR_FAILURE;
  }

  // Wait for the condition variable, will be set in DeliverFrame.
  // We use a while loop, because even if PR_WaitCondVar returns, it's not
  // guaranteed that the condition variable changed.
  while (mInSnapshotMode) {
    PR_WaitCondVar(mSnapshotCondVar, PR_INTERVAL_NO_TIMEOUT);
  }

  // If we get here, DeliverFrame received at least one frame.
  PR_Unlock(mSnapshotLock);
  PR_DestroyCondVar(mSnapshotCondVar);
  PR_DestroyLock(mSnapshotLock);

  webrtc::ViEFile* vieFile = webrtc::ViEFile::GetInterface(mVideoEngine);
  if (!vieFile) {
    return NS_ERROR_FAILURE;
  }

  // Create a temporary file on the main thread and put the snapshot in it.
  // See Run() in MediaEngineWebRTCVideo.h (sets mSnapshotPath).
  NS_DispatchToMainThread(this, NS_DISPATCH_SYNC);

  if (!mSnapshotPath) {
    return NS_ERROR_FAILURE;
  }

  const char* path = NS_ConvertUTF16toUTF8(*mSnapshotPath).get();
  if (vieFile->GetCaptureDeviceSnapshot(mCapIndex, path) < 0) {
    delete mSnapshotPath;
    mSnapshotPath = NULL;
    return NS_ERROR_FAILURE;
  }

  // Stop the camera.
  mViERender->StopRender(mCapIndex);
  mViERender->RemoveRenderer(mCapIndex);

  nsCOMPtr<nsIFile> file;
  nsresult rv = NS_NewLocalFile(*mSnapshotPath, false, getter_AddRefs(file));

  delete mSnapshotPath;
  mSnapshotPath = NULL;

  NS_ENSURE_SUCCESS(rv, rv);

  NS_ADDREF(*aFile = new nsDOMFileFile(file));

  return NS_OK;
}
Example #15
0
PRIntn main(PRIntn argc, char *argv[])
{
    PRThread *tJitter;
    PRThread *tAccept;
    PRThread *tConnect;
    PRStatus rv;
    /* This test if valid for WinNT only! */

#if !defined(WINNT)
    return 0;
#endif

    {
        /*
        ** Get command line options
        */
        PLOptStatus os;
        PLOptState *opt = PL_CreateOptState(argc, argv, "hdrvj:");

	    while (PL_OPT_EOL != (os = PL_GetNextOpt(opt)))
        {
		    if (PL_OPT_BAD == os) continue;
            switch (opt->option)
            {
            case 'd':  /* debug */
                debug = 1;
			    msgLevel = PR_LOG_ERROR;
                break;
            case 'v':  /* verbose mode */
                verbose = 1;
			    msgLevel = PR_LOG_DEBUG;
                break;
            case 'j':
                jitter = atoi(opt->value);
                if ( jitter == 0)
                    jitter = JITTER_DEFAULT;
                break;
            case 'r':
                resume = PR_TRUE;
                break;
            case 'h':  /* help message */
			    Help();
                break;
             default:
                break;
            }
        }
	    PL_DestroyOptState(opt);
    }

    lm = PR_NewLogModule("Test");       /* Initialize logging */

    /* set concurrency */
    PR_SetConcurrency( 4 );

    /* setup thread synchronization mechanics */
    ml = PR_NewLock();
    cv = PR_NewCondVar( ml );

    /* setup a tcp socket */
    memset(&listenAddr, 0, sizeof(listenAddr));
    rv = PR_InitializeNetAddr(PR_IpAddrAny, BASE_PORT, &listenAddr);
    PR_ASSERT( PR_SUCCESS == rv );

    listenSock = PR_NewTCPSocket();
    PR_ASSERT( listenSock );

    rv = PR_Bind( listenSock, &listenAddr);
    PR_ASSERT( PR_SUCCESS == rv );

    rv = PR_Listen( listenSock, 5 );
    PR_ASSERT( PR_SUCCESS == rv );

    /* open a file for writing, provoke bug */
    file1 = PR_Open("xxxTestFile", PR_CREATE_FILE | PR_RDWR, 666);

    /* create Connect thread */
    tConnect = PR_CreateThread(
        PR_USER_THREAD, ConnectThread, NULL,
        PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
        PR_JOINABLE_THREAD, 0 );
    PR_ASSERT( tConnect );

    /* create jitter off thread */
    tJitter = PR_CreateThread(
        PR_USER_THREAD, JitterThread, NULL,
        PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
        PR_JOINABLE_THREAD, 0 );
    PR_ASSERT( tJitter );

    /* create acceptread thread */
    tAccept = PR_CreateThread(
        PR_USER_THREAD, AcceptThread, NULL,
        PR_PRIORITY_NORMAL, PR_LOCAL_THREAD,
        PR_JOINABLE_THREAD, 0 );
    PR_ASSERT( tAccept );

    /* wait for all threads to quit, then terminate gracefully */
    PR_JoinThread( tConnect );
    PR_JoinThread( tAccept );
    PR_JoinThread( tJitter );
    PR_Close( listenSock );
    PR_DestroyCondVar(cv);
    PR_DestroyLock(ml);
    PR_Close( file1 );
    PR_Delete( "xxxTestFile");

    /* test return and exit */
    if (debug) printf("%s\n", (failed_already)? "FAIL" : "PASS");
    return( (failed_already == PR_TRUE )? 1 : 0 );
}  /* main() */
Example #16
0
PRIntn PR_CALLBACK Switch(PRIntn argc, char **argv)
{
	PLOptStatus os;
    PRStatus status;
    PRBool help = PR_FALSE;
    PRUintn concurrency = 1;
    Shared *shared, *link;
    PRIntervalTime timein, timeout;
    PRThreadScope thread_scope = PR_LOCAL_THREAD;
    PRUintn thread_count, inner_count, loop_count, average;
    PRUintn thread_limit = DEFAULT_THREADS, loop_limit = DEFAULT_LOOPS;
	PLOptState *opt = PL_CreateOptState(argc, argv, "hdvc:t:C:G");
	while (PL_OPT_EOL != (os = PL_GetNextOpt(opt)))
    {
		if (PL_OPT_BAD == os) continue;
        switch (opt->option)
        {
        case 'v':  /* verbose mode */
			verbosity = PR_TRUE;
        case 'd':  /* debug mode */
			debug_mode = PR_TRUE;
            break;
        case 'c':  /* loop counter */
			loop_limit = atoi(opt->value);
            break;
        case 't':  /* thread limit */
			thread_limit = atoi(opt->value);
            break;
        case 'C':  /* Concurrency limit */
			concurrency = atoi(opt->value);
            break;
        case 'G':  /* global threads only */
			thread_scope = PR_GLOBAL_THREAD;
            break;
        case 'h':  /* help message */
			Help();
			help = PR_TRUE;
            break;
         default:
            break;
        }
    }
	PL_DestroyOptState(opt);

    if (help) return -1;

	if (PR_TRUE == debug_mode)
	{
		debug_out = PR_STDOUT;
		PR_fprintf(debug_out, "Test parameters\n");
		PR_fprintf(debug_out, "\tThreads involved: %d\n", thread_limit);
		PR_fprintf(debug_out, "\tIteration limit: %d\n", loop_limit);
		PR_fprintf(debug_out, "\tConcurrency: %d\n", concurrency);
		PR_fprintf(
			debug_out, "\tThread type: %s\n",
			(PR_GLOBAL_THREAD == thread_scope) ? "GLOBAL" : "LOCAL");
	}

    PR_SetConcurrency(concurrency);

    link = &home;
    home.ml = PR_NewLock();
    home.cv = PR_NewCondVar(home.ml);
    home.twiddle = PR_FALSE;
    home.next = NULL;

    timeout = 0;

    for (thread_count = 1; thread_count <= thread_limit; ++thread_count)
    {
        shared = PR_NEWZAP(Shared);

        shared->ml = home.ml;
        shared->cv = PR_NewCondVar(home.ml);
        shared->twiddle = PR_TRUE;
        shared->next = link;
        link = shared;

        shared->thread = PR_CreateThread(
            PR_USER_THREAD, Notified, shared,
            PR_PRIORITY_HIGH, thread_scope,
            PR_JOINABLE_THREAD, 0);
        PR_ASSERT(shared->thread != NULL);
        if (NULL == shared->thread)
            failed = PR_TRUE;
	}

    for (loop_count = 1; loop_count <= loop_limit; ++loop_count)
    {
		timein = PR_IntervalNow();
		for (inner_count = 0; inner_count < INNER_LOOPS; ++inner_count)
		{
			PR_Lock(home.ml);
			home.twiddle = PR_TRUE;
			shared->twiddle = PR_FALSE;
			PR_NotifyCondVar(shared->cv);
			while (home.twiddle)
            {
				status = PR_WaitCondVar(home.cv, PR_INTERVAL_NO_TIMEOUT);
				if (PR_FAILURE == status)
				    failed = PR_TRUE;
            }
			PR_Unlock(home.ml);
		}
		timeout += (PR_IntervalNow() - timein);
	}

	if (debug_mode)
	{
		average = PR_IntervalToMicroseconds(timeout)
			/ (INNER_LOOPS * loop_limit * thread_count);
		PR_fprintf(
			debug_out, "Average switch times %d usecs for %d threads\n",
            average, thread_limit);
	}

    link = shared;
    for (thread_count = 1; thread_count <= thread_limit; ++thread_count)
    {
        if (&home == link) break;
        status = PR_Interrupt(link->thread);
		if (PR_SUCCESS != status)
        {
            failed = PR_TRUE;
            if (debug_mode)
			    PL_FPrintError(debug_out, "Failed to interrupt");
        }
		link = link->next; 
    }

    for (thread_count = 1; thread_count <= thread_limit; ++thread_count)
    {
        link = shared->next;
        status = PR_JoinThread(shared->thread);
		if (PR_SUCCESS != status)
		{
            failed = PR_TRUE;
            if (debug_mode)
			    PL_FPrintError(debug_out, "Failed to join");
        }
        PR_DestroyCondVar(shared->cv);
        PR_DELETE(shared);
        if (&home == link) break;
        shared = link;
    }
    PR_DestroyCondVar(home.cv);
    PR_DestroyLock(home.ml);

    PR_fprintf(PR_STDOUT, ((failed) ? "FAILED\n" : "PASSED\n"));
    return ((failed) ? 1 : 0);
}  /* Switch */
NSTP_DestroyPool(NSTPPool pool, PRBool doitnow)
{
    NSTPWorkItem *work;
	
    PR_Lock(pool->lock);
	
    /*
	* Indicate pool is being shut down, so no more requests
	* will be accepted.
	*/
    pool->shutdown = PR_TRUE;
	
    if (doitnow) {
		
		/* Complete all queued work items with NSTP_STATUS_SHUTDOWN_REJECT */
		while ((work = pool->head) != NULL) {
			
			/* Dequeue work item */
			pool->head = work->next;
			if (pool->head == NULL) {
				pool->tail = NULL;
			}
			
			PR_Unlock(pool->lock);
			
			/* Acquire the lock used by the calling, waiting thread */
			PR_Lock ( work -> waiter_mon -> lock);
			
			/* Set work completion status */
			work->work_status = NSTP_STATUS_SHUTDOWN_REJECT;
			work->work_complete = PR_TRUE;
			
			/* Wake up the calling, waiting thread */
			PR_NotifyCondVar (work -> waiter_mon -> cvar);
			
			/* Release the lock */
			PR_Unlock (work -> waiter_mon -> lock);
			
			PR_Lock (pool -> lock);
		}
    }
    else {
		/* doitnow is PR_FALSE */
		
		/* Wait for work queue to be empty */
		while (pool->head != NULL) {
			PR_WaitCondVar(pool->cvar, PR_INTERVAL_NO_TIMEOUT);
		}
    }
	
    if (pool -> stats.threadCount > 0)
	{
		NSTPThread *thread;
		NSTPThread *nxt_thread;
		
		for (thread = pool->threads; thread; thread = nxt_thread) {
			nxt_thread = thread->next;
			thread->shutdown = PR_TRUE;
		}
		
		/* Wakeup all threads to look at their shutdown flags */
		PR_NotifyAllCondVar(pool->cvar);
		
		/* Wait for threadCount to go to zero */
		while (pool -> stats.threadCount > 0) 
		{
			PR_WaitCondVar(pool->cvar, PR_INTERVAL_NO_TIMEOUT);
		}
    }
	
    PR_Unlock(pool->lock);
	
    PR_DestroyCondVar(pool->cvar);
    PR_DestroyLock(pool->lock);
    PR_DELETE(pool);
}
Example #18
0
/*
** Destroy a monitor. There must be no thread waiting on the monitor's
** condition variable. The caller is responsible for guaranteeing that the
** monitor is no longer in use.
*/
PR_IMPLEMENT(void) PR_DestroyMonitor(PRMonitor *mon)
{
	PR_DestroyLock(mon->cvar->lock);
    PR_DestroyCondVar(mon->cvar);
    PR_DELETE(mon);
}
void 
CondVarMixedTest(void *_arg)
{
    PRInt32 arg = (PRInt32)_arg;
    PRInt32 index, loops;
    threadinfo *list;
    PRLock *sharedlock;
    PRCondVar *sharedcvar;
    PRLock *exitlock;
    PRCondVar *exitcvar;
    PRInt32 *ptcount;

    exitcount=0;
    tcount=0;
    list = (threadinfo *)PR_MALLOC(sizeof(threadinfo) * (arg * 4));
    ptcount = (PRInt32 *)PR_CALLOC(sizeof(*ptcount) * (arg * 4));

    sharedlock = PR_NewLock();
    sharedcvar = PR_NewCondVar(sharedlock);
    exitlock = PR_NewLock();
    exitcvar = PR_NewCondVar(exitlock);

    /* Create the threads */
    for(index=0; index<arg*4; ) {
        CreateTestThread(&list[index],
                         index,
                         sharedlock,
                         sharedcvar,
                         count,
                         PR_MillisecondsToInterval(50),
                         &tcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_TRUE,
                         PR_LOCAL_THREAD);
        index++;
        CreateTestThread(&list[index],
                         index,
                         sharedlock,
                         sharedcvar,
                         count,
                         PR_MillisecondsToInterval(50),
                         &tcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_TRUE,
                         PR_GLOBAL_THREAD);
        index++;
        list[index].lock = PR_NewLock();
        list[index].cvar = PR_NewCondVar(list[index].lock);
        CreateTestThread(&list[index],
                         index,
                         list[index].lock,
                         list[index].cvar,
                         count,
                         PR_MillisecondsToInterval(50),
                         ptcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_FALSE,
                         PR_LOCAL_THREAD);
        index++;
	ptcount++;

        list[index].lock = PR_NewLock();
        list[index].cvar = PR_NewCondVar(list[index].lock);
        CreateTestThread(&list[index],
                         index,
                         list[index].lock,
                         list[index].cvar,
                         count,
                         PR_MillisecondsToInterval(50),
                         ptcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_FALSE,
                         PR_GLOBAL_THREAD);
        index++;
	ptcount++;
    }


    /* Notify every 3rd thread */
    for (loops = 0; loops < count; loops++) {

        /* Notify the threads */
        for(index=0; index<(arg*4); index+=3) {

            PR_Lock(list[index].lock);
            *list[index].tcount++;
            PR_NotifyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);

        }
        /* Wait for threads to finish */
        PR_Lock(exitlock);
        while(exitcount < arg*4)
            PR_WaitCondVar(exitcvar, PR_SecondsToInterval(60));
        PR_ASSERT(exitcount >= arg*4);
        exitcount -= arg*4;
        PR_Unlock(exitlock);
    }

    /* Join all the threads */
    for(index=0; index<(arg*4); index++) {
        PR_JoinThread(list[index].thread);
        if (list[index].internal) {
            PR_Lock(list[index].lock);
            PR_DestroyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);
            PR_DestroyLock(list[index].lock);
        }
    }

    PR_DestroyCondVar(sharedcvar);
    PR_DestroyLock(sharedlock);

    PR_DELETE(list);
}
Example #20
0
RCCondition::~RCCondition()
{
    if (NULL != cv) PR_DestroyCondVar(cv);
}  /* RCCondition::~RCCondition */
Example #21
0
PR_IMPLEMENT(PRWaitGroup*) PR_CreateWaitGroup(PRInt32 size /* ignored */)
{
    PRWaitGroup *wg;

    if (NULL == (wg = PR_NEWZAP(PRWaitGroup)))
    {
        PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
        goto failed;
    }
    /* the wait group itself */
    wg->ml = PR_NewLock();
    if (NULL == wg->ml) goto failed_lock;
    wg->io_taken = PR_NewCondVar(wg->ml);
    if (NULL == wg->io_taken) goto failed_cvar0;
    wg->io_complete = PR_NewCondVar(wg->ml);
    if (NULL == wg->io_complete) goto failed_cvar1;
    wg->new_business = PR_NewCondVar(wg->ml);
    if (NULL == wg->new_business) goto failed_cvar2;
    wg->mw_manage = PR_NewCondVar(wg->ml);
    if (NULL == wg->mw_manage) goto failed_cvar3;

    PR_INIT_CLIST(&wg->group_link);
    PR_INIT_CLIST(&wg->io_ready);

    /* the waiters sequence */
    wg->waiter = (_PRWaiterHash*)PR_CALLOC(
        sizeof(_PRWaiterHash) +
        (_PR_DEFAULT_HASH_LENGTH * sizeof(PRRecvWait*)));
    if (NULL == wg->waiter)
    {
        PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
        goto failed_waiter;
    }
    wg->waiter->count = 0;
    wg->waiter->length = _PR_DEFAULT_HASH_LENGTH;

#ifdef WINNT
    _PR_MD_NEW_LOCK(&wg->mdlock);
    PR_INIT_CLIST(&wg->wait_list);
#endif /* WINNT */

    PR_Lock(mw_lock);
    PR_APPEND_LINK(&wg->group_link, &mw_state->group_list);
    PR_Unlock(mw_lock);
    return wg;

failed_waiter:
    PR_DestroyCondVar(wg->mw_manage);
failed_cvar3:
    PR_DestroyCondVar(wg->new_business);
failed_cvar2:
    PR_DestroyCondVar(wg->io_complete);
failed_cvar1:
    PR_DestroyCondVar(wg->io_taken);
failed_cvar0:
    PR_DestroyLock(wg->ml);
failed_lock:
    PR_DELETE(wg);
    wg = NULL;

failed:
    return wg;
}  /* MW_CreateWaitGroup */
Example #22
0
static PRIntervalTime Alarms3(PRUint32 loops)
{
    PRIntn i;
    PRStatus rv;
    PRAlarm *alarm;
    AlarmData ad[3];
    PRIntervalTime duration = PR_SecondsToInterval(30);
    PRIntervalTime overhead, timein = PR_IntervalNow();

    PRLock *ml = PR_NewLock();
    PRCondVar *cv = PR_NewCondVar(ml);

    for (i = 0; i < 3; ++i)
    {
        ad[i].ml = ml;
        ad[i].cv = cv;
        ad[i].rate = 1;
        ad[i].times = loops;
        ad[i].duration = duration;
        ad[i].late = ad[i].times = 0;
        ad[i].timein = PR_IntervalNow();
        ad[i].period = PR_SecondsToInterval(1);

        /* more loops, faster rate => same elapsed time */
        ad[i].times = (i + 1) * loops;
        ad[i].rate = (i + 1) * 10;
    }

    alarm = PR_CreateAlarm();

    for (i = 0; i < 3; ++i)
    {
        (void)PR_SetAlarm(
            alarm, ad[i].period, ad[i].rate,
            AlarmFn2, &ad[i]);
    }
        
    overhead = PR_IntervalNow() - timein;

    PR_Lock(ml);
    for (i = 0; i < 3; ++i)
    {
        while ((PRIntervalTime)(PR_IntervalNow() - ad[i].timein) < duration)
            PR_WaitCondVar(cv, PR_INTERVAL_NO_TIMEOUT);
    }
    PR_Unlock(ml);

    timein = PR_IntervalNow();

	if (debug_mode)
	printf
        ("Alarms3 finished at %u, %u, %u\n",
        ad[0].timein, ad[1].timein, ad[2].timein);
    
    rv = PR_DestroyAlarm(alarm);
    if (rv != PR_SUCCESS)
    {
		if (!debug_mode)		
			failed_already=1;
		else	
		   printf("***Destroying alarm status: FAIL\n");
	}
    PR_DestroyCondVar(cv);
    PR_DestroyLock(ml);
    
    overhead += (duration / 3);
    overhead += (PR_IntervalNow() - timein);

    return overhead;
}  /* Alarms3 */
void 
CondVarTestPUU(void *_arg)
{
    PRInt32 arg = (PRInt32)_arg;
    PRInt32 index, loops;
    threadinfo *list;
    PRLock *sharedlock;
    PRCondVar *sharedcvar;
    PRLock *exitlock;
    PRCondVar *exitcvar;
    PRInt32 *tcount, *saved_tcount;

    exitcount=0;
    list = (threadinfo *)PR_MALLOC(sizeof(threadinfo) * (arg * 4));
    saved_tcount = tcount = (PRInt32 *)PR_CALLOC(sizeof(*tcount) * (arg * 4));

    sharedlock = PR_NewLock();
    sharedcvar = PR_NewCondVar(sharedlock);
    exitlock = PR_NewLock();
    exitcvar = PR_NewCondVar(exitlock);

    /* Create the threads */
    for(index=0; index<arg; ) {
        list[index].lock = PR_NewLock();
        list[index].cvar = PR_NewCondVar(list[index].lock);
        CreateTestThread(&list[index],
                         index,
                         list[index].lock,
                         list[index].cvar,
                         count,
                         PR_INTERVAL_NO_TIMEOUT,
                         tcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_FALSE,
                         PR_LOCAL_THREAD);

	DPRINTF(("CondVarTestPUU: created thread 0x%lx\n",list[index].thread));
        index++;
	tcount++;
    }

    for (loops = 0; loops < count; loops++) {
        /* Notify the threads */
        for(index=0; index<(arg); index++) {

            PR_Lock(list[index].lock);
            (*list[index].tcount)++;
            PR_NotifyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);
        }

	PR_Lock(exitlock);
        /* Wait for threads to finish */
        while(exitcount < arg) {
DPRINTF(("CondVarTestPUU: thread 0x%lx waiting on exitcvar = 0x%lx cnt = %ld\n",
				PR_GetCurrentThread(), exitcvar, exitcount));
            	PR_WaitCondVar(exitcvar, PR_SecondsToInterval(60));
	}
        PR_ASSERT(exitcount >= arg);
        exitcount -= arg;
        PR_Unlock(exitlock);
    }

    /* Join all the threads */
    for(index=0; index<(arg); index++)  {
	DPRINTF(("CondVarTestPUU: joining thread 0x%lx\n",list[index].thread));
        PR_JoinThread(list[index].thread);
        if (list[index].internal) {
            PR_Lock(list[index].lock);
            PR_DestroyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);
            PR_DestroyLock(list[index].lock);
        }
    }

    PR_DestroyCondVar(sharedcvar);
    PR_DestroyLock(sharedlock);
    PR_DestroyCondVar(exitcvar);
    PR_DestroyLock(exitlock);

    PR_DELETE(list);
    PR_DELETE(saved_tcount);
}
void 
CondVarTest(void *_arg)
{
    PRInt32 arg = (PRInt32)_arg;
    PRInt32 index, loops;
    threadinfo *list;
    PRLock *sharedlock;
    PRCondVar *sharedcvar;
    PRLock *exitlock;
    PRCondVar *exitcvar;
    PRInt32 *ptcount, *saved_ptcount;

    exitcount=0;
    tcount=0;
    list = (threadinfo *)PR_MALLOC(sizeof(threadinfo) * (arg * 4));
    saved_ptcount = ptcount = (PRInt32 *)PR_CALLOC(sizeof(*ptcount) * (arg * 4));

    sharedlock = PR_NewLock();
    sharedcvar = PR_NewCondVar(sharedlock);
    exitlock = PR_NewLock();
    exitcvar = PR_NewCondVar(exitlock);

    /* Create the threads */
    for(index=0; index<arg*4; ) {
        CreateTestThread(&list[index],
                         index,
                         sharedlock,
                         sharedcvar,
                         count,
                         PR_INTERVAL_NO_TIMEOUT,
                         &tcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_TRUE,
                         PR_LOCAL_THREAD);

        index++;
        CreateTestThread(&list[index],
                         index,
                         sharedlock,
                         sharedcvar,
                         count,
                         PR_INTERVAL_NO_TIMEOUT,
                         &tcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_TRUE,
                         PR_GLOBAL_THREAD);

        index++;
        list[index].lock = PR_NewLock();
        list[index].cvar = PR_NewCondVar(list[index].lock);
        CreateTestThread(&list[index],
                         index,
                         list[index].lock,
                         list[index].cvar,
                         count,
                         PR_INTERVAL_NO_TIMEOUT,
                         ptcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_FALSE,
                         PR_LOCAL_THREAD);
        index++;
	ptcount++;
        list[index].lock = PR_NewLock();
        list[index].cvar = PR_NewCondVar(list[index].lock);
        CreateTestThread(&list[index],
                         index,
                         list[index].lock,
                         list[index].cvar,
                         count,
                         PR_INTERVAL_NO_TIMEOUT,
                         ptcount,
                         exitlock,
                         exitcvar,
                         &exitcount,
                         PR_FALSE,
                         PR_GLOBAL_THREAD);

        index++;
	ptcount++;
    }

    for (loops = 0; loops < count; loops++) {

        /* Notify the threads */
        for(index=0; index<(arg*4); index++) {
            PR_Lock(list[index].lock);
            (*list[index].tcount)++;
            PR_NotifyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);
        }

#if 0
        printf("wait for threads done\n");
#endif

        /* Wait for threads to finish */
        PR_Lock(exitlock);
        while(exitcount < arg*4)
            PR_WaitCondVar(exitcvar, PR_SecondsToInterval(60));
        PR_ASSERT(exitcount >= arg*4);
        exitcount -= arg*4;
        PR_Unlock(exitlock);
#if 0
        printf("threads ready\n");
#endif
    }

    /* Join all the threads */
    for(index=0; index<(arg*4); index++) {
        PR_JoinThread(list[index].thread);
        if (list[index].internal) {
            PR_Lock(list[index].lock);
            PR_DestroyCondVar(list[index].cvar);
            PR_Unlock(list[index].lock);
            PR_DestroyLock(list[index].lock);
        }
    }

    PR_DestroyCondVar(sharedcvar);
    PR_DestroyLock(sharedlock);
    PR_DestroyCondVar(exitcvar);
    PR_DestroyLock(exitlock);

    PR_DELETE(list);
    PR_DELETE(saved_ptcount);
}
Example #25
0
void
referint_thread_func(void *arg)
{
    PRFileDesc *prfd;
    char **plugin_argv = (char **)arg;
    char *logfilename;
    char thisline[MAX_LINE];
    char delimiter[]="\t\n";
    char *ptoken;
    char *tmprdn;
    char *iter = NULL;
    Slapi_DN *sdn = NULL;
    Slapi_DN *tmpsuperior = NULL;
    int logChanges = 0;
    int delay;
    int no_changes;

    if(plugin_argv == NULL){
        slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,
            "referint_thread_func not get args \n" );
        return;
    }

    delay = atoi(plugin_argv[0]);
    logfilename = plugin_argv[1]; 
    logChanges = atoi(plugin_argv[2]);
    /*
     * keep running this thread until plugin is signaled to close
     */
    while(1){ 
        no_changes=1;
        while(no_changes){
            PR_Lock(keeprunning_mutex);
            if(keeprunning == 0){
                PR_Unlock(keeprunning_mutex);
                break;
            }
            PR_Unlock(keeprunning_mutex);

            referint_lock();
            if (( prfd = PR_Open( logfilename, PR_RDONLY, REFERINT_DEFAULT_FILE_MODE )) == NULL ){
                referint_unlock();
                /* go back to sleep and wait for this file */
                PR_Lock(keeprunning_mutex);
                PR_WaitCondVar(keeprunning_cv, PR_SecondsToInterval(delay));
                PR_Unlock(keeprunning_mutex);
            } else {
                no_changes = 0;
            }
        }
        /*
         *  Check keep running here, because after break out of no
         *  changes loop on shutdown, also need to break out of this
         *  loop before trying to do the changes. The server
         *  will pick them up on next startup as file still exists
         */
        PR_Lock(keeprunning_mutex);
        if(keeprunning == 0){
            PR_Unlock(keeprunning_mutex);
            break;
        }
        PR_Unlock(keeprunning_mutex);  

        while( GetNextLine(thisline, MAX_LINE, prfd) ){
            ptoken = ldap_utf8strtok_r(thisline, delimiter, &iter);
            sdn = slapi_sdn_new_normdn_byref(ptoken);
            ptoken = ldap_utf8strtok_r (NULL, delimiter, &iter);

            if(!strcasecmp(ptoken, "NULL")) {
                tmprdn = NULL;
            } else {
                tmprdn = slapi_ch_smprintf("%s", ptoken);
            }

            ptoken = ldap_utf8strtok_r (NULL, delimiter, &iter);
            if (!strcasecmp(ptoken, "NULL")) {
                tmpsuperior = NULL;
            } else {
                tmpsuperior = slapi_sdn_new_normdn_byref(ptoken);
            }
            ptoken = ldap_utf8strtok_r (NULL, delimiter, &iter);
            if (strcasecmp(ptoken, "NULL") != 0) {
                /* Set the bind DN in the thread data */
                if(slapi_td_set_dn(slapi_ch_strdup(ptoken))){
                    slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,"Failed to set thread data\n");
                }
            }

            update_integrity(plugin_argv, sdn, tmprdn, tmpsuperior, logChanges);

            slapi_sdn_free(&sdn);
            slapi_ch_free_string(&tmprdn);
            slapi_sdn_free(&tmpsuperior);
        }

        PR_Close(prfd);

        /* remove the original file */
        if( PR_SUCCESS != PR_Delete(logfilename) ){
            slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,
                "referint_postop_close could not delete \"%s\"\n", logfilename );
        }

        /* unlock and let other writers back at the file */
        referint_unlock();

        /* wait on condition here */
        PR_Lock(keeprunning_mutex);
        PR_WaitCondVar(keeprunning_cv, PR_SecondsToInterval(delay));
        PR_Unlock(keeprunning_mutex);
    }

    /* cleanup resources allocated in start  */
    if (NULL != keeprunning_mutex) {
        PR_DestroyLock(keeprunning_mutex);
    }
    if (NULL != referint_mutex) {
        PR_DestroyLock(referint_mutex);
    }
    if (NULL != keeprunning_cv) {
        PR_DestroyCondVar(keeprunning_cv);
    }
}
Example #26
0
int
test_mutex2(int argc, char *argv[])
{

    PRThread *consThread, *prodThread, *prodThread2, *prodThread3;
    int x = 10, y = 20, z = 30;
    PKIX_UInt32 actualMinorVersion;
    PKIX_UInt32 j = 0;

    PKIX_TEST_STD_VARS();

    startTests("Mutex and Threads");

    PKIX_TEST_EXPECT_NO_ERROR(
        PKIX_PL_NssContext_Create(0, PKIX_FALSE, NULL, &plContext));

    (void)printf("Attempting to create new mutex...\n");
    subTest("Mutex Creation");
    PKIX_TEST_EXPECT_NO_ERROR(PKIX_PL_Mutex_Create(&mutex, plContext));

    cv = PR_NewCondVar(*(PRLock **)mutex);

    subTest("Starting consumer thread");
    consThread = PR_CreateThread(PR_USER_THREAD,
                                 consumer,
                                 NULL,
                                 PR_PRIORITY_NORMAL,
                                 PR_LOCAL_THREAD,
                                 PR_JOINABLE_THREAD,
                                 0);

    subTest("Starting producer thread 1");
    prodThread = PR_CreateThread(PR_USER_THREAD,
                                 producer,
                                 &x,
                                 PR_PRIORITY_NORMAL,
                                 PR_LOCAL_THREAD,
                                 PR_JOINABLE_THREAD,
                                 0);

    subTest("Starting producer thread 2");
    prodThread2 = PR_CreateThread(PR_USER_THREAD,
                                  producer,
                                  &y,
                                  PR_PRIORITY_NORMAL,
                                  PR_LOCAL_THREAD,
                                  PR_JOINABLE_THREAD,
                                  0);

    subTest("Starting producer thread 3");
    prodThread3 = PR_CreateThread(PR_USER_THREAD,
                                  producer,
                                  &z,
                                  PR_PRIORITY_NORMAL,
                                  PR_LOCAL_THREAD,
                                  PR_JOINABLE_THREAD,
                                  0);

    PR_JoinThread(consThread);

    (void)PR_DestroyCondVar(cv);
    PKIX_TEST_DECREF_BC(mutex);

    /*
         * Note: we should also be freeing each thread's stack, but we
         * don't have access to the prodThread->stack variable (since
         * it is not exported). As a result, we have 120 bytes of memory
         * leakage.
         */

    PR_Free(prodThread);
    PR_Free(prodThread2);
    PR_Free(prodThread3);

cleanup:

    PKIX_Shutdown(plContext);

    PKIX_TEST_RETURN();

    endTests("Mutex and Threads");

    return (0);
}
static void WaitCondVarThread(void *arg)
{
    PRIntervalTime timeout = (PRIntervalTime) arg;
    PRIntervalTime elapsed;
#if defined(XP_UNIX) || defined(WIN32)
    PRInt32 timeout_msecs = PR_IntervalToMilliseconds(timeout);
    PRInt32 elapsed_msecs;
#endif
#if defined(XP_UNIX)
    struct timeval end_time_tv;
#endif
#if defined(WIN32) && !defined(WINCE)
    struct _timeb end_time_tb;
#endif
    PRLock *ml;
    PRCondVar *cv;

    ml = PR_NewLock();
    if (ml == NULL) {
        fprintf(stderr, "PR_NewLock failed\n");
        exit(1);
    }
    cv = PR_NewCondVar(ml);
    if (cv == NULL) {
        fprintf(stderr, "PR_NewCondVar failed\n");
        exit(1);
    }
    PR_Lock(ml);
    PR_WaitCondVar(cv, timeout);
    PR_Unlock(ml);
    elapsed = (PRIntervalTime)(PR_IntervalNow() - start_time);
    if (elapsed + tolerance < timeout || elapsed > timeout + tolerance) {
        fprintf(stderr, "timeout wrong\n");
        exit(1);
    }
#if defined(XP_UNIX)
    gettimeofday(&end_time_tv, NULL);
    elapsed_msecs = 1000*(end_time_tv.tv_sec - start_time_tv.tv_sec)
            + (end_time_tv.tv_usec - start_time_tv.tv_usec)/1000;
#endif
#if defined(WIN32)
#if defined(WINCE)
    elapsed_msecs = GetTickCount() - start_time_tick;
#else
    _ftime(&end_time_tb);
    elapsed_msecs = 1000*(end_time_tb.time - start_time_tb.time)
            + (end_time_tb.millitm - start_time_tb.millitm);
#endif
#endif
#if defined(XP_UNIX) || defined(WIN32)
    if (elapsed_msecs + tolerance_msecs < timeout_msecs
            || elapsed_msecs > timeout_msecs + tolerance_msecs) {
        fprintf(stderr, "timeout wrong\n");
        exit(1);
    }
#endif
    PR_DestroyCondVar(cv);
    PR_DestroyLock(ml);
    if (debug_mode) {
        fprintf(stderr, "wait cond var thread (scope %d) done\n",
                PR_GetThreadScope(PR_GetCurrentThread()));
    }
}
Example #28
0
static void pt_PostNotifies(PRLock *lock, PRBool unlock)
{
    PRIntn index, rv;
    _PT_Notified post;
    _PT_Notified *notified, *prev = NULL;
    /*
     * Time to actually notify any conditions that were affected
     * while the lock was held. Get a copy of the list that's in
     * the lock structure and then zero the original. If it's
     * linked to other such structures, we own that storage.
     */
    post = lock->notified;  /* a safe copy; we own the lock */

#if defined(DEBUG)
    memset(&lock->notified, 0, sizeof(_PT_Notified));  /* reset */
#else
    lock->notified.length = 0;  /* these are really sufficient */
    lock->notified.link = NULL;
#endif

    /* should (may) we release lock before notifying? */
    if (unlock)
    {
        rv = pthread_mutex_unlock(&lock->mutex);
        PR_ASSERT(0 == rv);
    }

    notified = &post;  /* this is where we start */
    do
    {
        for (index = 0; index < notified->length; ++index)
        {
            PRCondVar *cv = notified->cv[index].cv;
            PR_ASSERT(NULL != cv);
            PR_ASSERT(0 != notified->cv[index].times);
            if (-1 == notified->cv[index].times)
            {
                rv = pthread_cond_broadcast(&cv->cv);
                PR_ASSERT(0 == rv);
            }
            else
            {
                while (notified->cv[index].times-- > 0)
                {
                    rv = pthread_cond_signal(&cv->cv);
                    PR_ASSERT(0 == rv);
                }
            }
#if defined(DEBUG)
            pt_debug.cvars_notified += 1;
            if (0 > PR_ATOMIC_DECREMENT(&cv->notify_pending))
            {
                pt_debug.delayed_cv_deletes += 1;
                PR_DestroyCondVar(cv);
            }
#else  /* defined(DEBUG) */
            if (0 > PR_ATOMIC_DECREMENT(&cv->notify_pending))
                PR_DestroyCondVar(cv);
#endif  /* defined(DEBUG) */
        }
        prev = notified;
        notified = notified->link;
        if (&post != prev) PR_DELETE(prev);
    } while (NULL != notified);
}  /* pt_PostNotifies */
int main(int argc, char** argv)
{
    PRUintn index;
    PRBool boolean;
    CSClient_t *client;
    PRStatus rv, joinStatus;
    CSServer_t *server = NULL;

    PRUintn backlog = DEFAULT_BACKLOG;
    PRUintn clients = DEFAULT_CLIENTS;
    const char *serverName = DEFAULT_SERVER;
    PRBool serverIsLocal = PR_TRUE;
    PRUintn accepting = ALLOWED_IN_ACCEPT;
    PRUintn workersMin = DEFAULT_WORKERS_MIN;
    PRUintn workersMax = DEFAULT_WORKERS_MAX;
    PRIntn execution = DEFAULT_EXECUTION_TIME;
    PRIntn low = DEFAULT_LOW, high = DEFAULT_HIGH;

    /*
     * -G           use global threads
     * -a <n>       threads allowed in accept
     * -b <n>       backlock for listen
     * -c <threads> number of clients to create
     * -f <low>     low water mark for caching FDs
     * -F <high>    high water mark for caching FDs
     * -w <threads> minimal number of server threads
     * -W <threads> maximum number of server threads
     * -e <seconds> duration of the test in seconds
     * -s <string>  dsn name of server (implies no server here)
     * -v           verbosity
     */

    PLOptStatus os;
    PLOptState *opt = PL_CreateOptState(argc, argv, "GX6b:a:c:f:F:w:W:e:s:vdhp");

    debug_out = PR_GetSpecialFD(PR_StandardError);

    while (PL_OPT_EOL != (os = PL_GetNextOpt(opt)))
    {
        if (PL_OPT_BAD == os) continue;
        switch (opt->option)
        {
        case 'G':  /* use global threads */
            thread_scope = PR_GLOBAL_THREAD;
            break;
        case 'X':  /* use XTP as transport */
            protocol = 36;
            break;
        case '6':  /* Use IPv6 */
            domain = PR_AF_INET6;
            break;
        case 'a':  /* the value for accepting */
            accepting = atoi(opt->value);
            break;
        case 'b':  /* the value for backlock */
            backlog = atoi(opt->value);
            break;
        case 'c':  /* number of client threads */
            clients = atoi(opt->value);
            break;
        case 'f':  /* low water fd cache */
            low = atoi(opt->value);
            break;
        case 'F':  /* low water fd cache */
            high = atoi(opt->value);
            break;
        case 'w':  /* minimum server worker threads */
            workersMin = atoi(opt->value);
            break;
        case 'W':  /* maximum server worker threads */
            workersMax = atoi(opt->value);
            break;
        case 'e':  /* program execution time in seconds */
            execution = atoi(opt->value);
            break;
        case 's':  /* server's address */
            serverName = opt->value;
            break;
        case 'v':  /* verbosity */
            verbosity = IncrementVerbosity();
            break;
        case 'd':  /* debug mode */
            debug_mode = PR_TRUE;
            break;
        case 'p':  /* pthread mode */
            pthread_stats = PR_TRUE;
            break;
        case 'h':
        default:
            Help();
            return 2;
        }
    }
    PL_DestroyOptState(opt);

    if (0 != PL_strcmp(serverName, DEFAULT_SERVER)) serverIsLocal = PR_FALSE;
    if (0 == execution) execution = DEFAULT_EXECUTION_TIME;
    if (0 == workersMax) workersMax = DEFAULT_WORKERS_MAX;
    if (0 == workersMin) workersMin = DEFAULT_WORKERS_MIN;
    if (0 == accepting) accepting = ALLOWED_IN_ACCEPT;
    if (0 == backlog) backlog = DEFAULT_BACKLOG;

    if (workersMin > accepting) accepting = workersMin;

    PR_STDIO_INIT();
    TimeOfDayMessage("Client/Server started at", PR_GetCurrentThread());

    cltsrv_log_file = PR_NewLogModule("cltsrv_log");
    MY_ASSERT(NULL != cltsrv_log_file);
    boolean = PR_SetLogFile("cltsrv.log");
    MY_ASSERT(boolean);

    rv = PR_SetFDCacheSize(low, high);
    PR_ASSERT(PR_SUCCESS == rv);

    if (serverIsLocal)
    {
        /* Establish the server */
        TEST_LOG(
            cltsrv_log_file, TEST_LOG_INFO,
            ("main(0x%p): starting server\n", PR_GetCurrentThread()));

        server = PR_NEWZAP(CSServer_t);
        PR_INIT_CLIST(&server->list);
        server->state = cs_init;
        server->ml = PR_NewLock();
        server->backlog = backlog;
        server->port = DEFAULT_PORT;
        server->workers.minimum = workersMin;
        server->workers.maximum = workersMax;
        server->workers.accepting = accepting;
        server->stateChange = PR_NewCondVar(server->ml);
        server->pool.exiting = PR_NewCondVar(server->ml);
        server->pool.acceptComplete = PR_NewCondVar(server->ml);

        TEST_LOG(
            cltsrv_log_file, TEST_LOG_NOTICE,
            ("main(0x%p): creating server thread\n", PR_GetCurrentThread()));

        server->thread = PR_CreateThread(
            PR_USER_THREAD, Server, server, PR_PRIORITY_HIGH,
            thread_scope, PR_JOINABLE_THREAD, 0);
        TEST_ASSERT(NULL != server->thread);

        TEST_LOG(
            cltsrv_log_file, TEST_LOG_VERBOSE,
            ("main(0x%p): waiting for server init\n", PR_GetCurrentThread()));

        PR_Lock(server->ml);
        while (server->state == cs_init)
            PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT);
        PR_Unlock(server->ml);

        TEST_LOG(
            cltsrv_log_file, TEST_LOG_VERBOSE,
            ("main(0x%p): server init complete (port #%d)\n",
            PR_GetCurrentThread(), server->port));
    }

    if (clients != 0)
    {
        /* Create all of the clients */
        PRHostEnt host;
        char buffer[BUFFER_SIZE];
        client = (CSClient_t*)PR_CALLOC(clients * sizeof(CSClient_t));

        TEST_LOG(
            cltsrv_log_file, TEST_LOG_VERBOSE,
            ("main(0x%p): creating %d client threads\n",
            PR_GetCurrentThread(), clients));
        
        if (!serverIsLocal)
        {
            rv = PR_GetHostByName(serverName, buffer, BUFFER_SIZE, &host);
            if (PR_SUCCESS != rv)
            {
                PL_FPrintError(PR_STDERR, "PR_GetHostByName");
                return 2;
            }
        }

        for (index = 0; index < clients; ++index)
        {
            client[index].state = cs_init;
            client[index].ml = PR_NewLock();
            if (serverIsLocal)
            {
				if (PR_AF_INET6 != domain)
                	(void)PR_InitializeNetAddr(
                    	PR_IpAddrLoopback, DEFAULT_PORT,
                    	&client[index].serverAddress);
				else
					rv = PR_SetNetAddr(PR_IpAddrLoopback, PR_AF_INET6,
							DEFAULT_PORT, &client[index].serverAddress);
            }
            else
            {
                (void)PR_EnumerateHostEnt(
                    0, &host, DEFAULT_PORT, &client[index].serverAddress);
            }
            client[index].stateChange = PR_NewCondVar(client[index].ml);
            TEST_LOG(
                cltsrv_log_file, TEST_LOG_INFO,
                ("main(0x%p): creating client threads\n", PR_GetCurrentThread()));
            client[index].thread = PR_CreateThread(
                PR_USER_THREAD, Client, &client[index], PR_PRIORITY_NORMAL,
                thread_scope, PR_JOINABLE_THREAD, 0);
            TEST_ASSERT(NULL != client[index].thread);
            PR_Lock(client[index].ml);
            while (cs_init == client[index].state)
                PR_WaitCondVar(client[index].stateChange, PR_INTERVAL_NO_TIMEOUT);
            PR_Unlock(client[index].ml);
        }
    }

    /* Then just let them go at it for a bit */
    TEST_LOG(
        cltsrv_log_file, TEST_LOG_ALWAYS,
        ("main(0x%p): waiting for execution interval (%d seconds)\n",
        PR_GetCurrentThread(), execution));

    WaitForCompletion(execution);

    TimeOfDayMessage("Shutting down", PR_GetCurrentThread());

    if (clients != 0)
    {
        for (index = 0; index < clients; ++index)
        {
            TEST_LOG(cltsrv_log_file, TEST_LOG_STATUS, 
                ("main(0x%p): notifying client(0x%p) to stop\n",
                PR_GetCurrentThread(), client[index].thread));

            PR_Lock(client[index].ml);
            if (cs_run == client[index].state)
            {
                client[index].state = cs_stop;
                PR_Interrupt(client[index].thread);
                while (cs_stop == client[index].state)
                    PR_WaitCondVar(
                        client[index].stateChange, PR_INTERVAL_NO_TIMEOUT);
            }
            PR_Unlock(client[index].ml);

            TEST_LOG(cltsrv_log_file, TEST_LOG_VERBOSE, 
                ("main(0x%p): joining client(0x%p)\n",
                PR_GetCurrentThread(), client[index].thread));

		    joinStatus = PR_JoinThread(client[index].thread);
		    TEST_ASSERT(PR_SUCCESS == joinStatus);
            PR_DestroyCondVar(client[index].stateChange);
            PR_DestroyLock(client[index].ml);
        }
        PR_DELETE(client);
    }

    if (NULL != server)
    {
        /* All clients joined - retrieve the server */
        TEST_LOG(
            cltsrv_log_file, TEST_LOG_NOTICE, 
            ("main(0x%p): notifying server(0x%p) to stop\n",
            PR_GetCurrentThread(), server->thread));

        PR_Lock(server->ml);
        server->state = cs_stop;
        PR_Interrupt(server->thread);
        while (cs_exit != server->state)
            PR_WaitCondVar(server->stateChange, PR_INTERVAL_NO_TIMEOUT);
        PR_Unlock(server->ml);

        TEST_LOG(
            cltsrv_log_file, TEST_LOG_NOTICE, 
            ("main(0x%p): joining server(0x%p)\n",
            PR_GetCurrentThread(), server->thread));
        joinStatus = PR_JoinThread(server->thread);
        TEST_ASSERT(PR_SUCCESS == joinStatus);

        PR_DestroyCondVar(server->stateChange);
        PR_DestroyCondVar(server->pool.exiting);
        PR_DestroyCondVar(server->pool.acceptComplete);
        PR_DestroyLock(server->ml);
        PR_DELETE(server);
    }

    TEST_LOG(
        cltsrv_log_file, TEST_LOG_ALWAYS, 
        ("main(0x%p): test complete\n", PR_GetCurrentThread()));

    PT_FPrintStats(debug_out, "\nPThread Statistics\n");

    TimeOfDayMessage("Test exiting at", PR_GetCurrentThread());
    PR_Cleanup();
    return 0;
}  /* main */