Exemplo n.º 1
0
void
nsIOThreadPool::Shutdown()
{
    LOG(("nsIOThreadPool::Shutdown\n"));

    // synchronize with background threads...
    {
        nsAutoLock lock(mLock);
        mShutdown = PR_TRUE;

        PR_NotifyAllCondVar(mIdleThreadCV);

        while (mNumThreads != 0)
            PR_WaitCondVar(mExitThreadCV, PR_INTERVAL_NO_TIMEOUT);
    }
}
Exemplo n.º 2
0
Arquivo: eventq.c Projeto: leto/389-ds
/*
 * eq_stop: shut down the event queue system.
 * Does not return until event queue is fully
 * shut down.
 */
void
eq_stop()
{
    slapi_eq_context *p, *q;

    if ( NULL == eq || NULL == eq->eq_lock ) {    /* never started */
        eq_stopped = 1;
        return;
    }

    eq_stopped = 0;
    eq_running = 0;
    /* 
     * Signal the eq thread function to stop, and wait until
     * it acknowledges by setting eq_stopped.
     */
    while (!eq_stopped) {
        PR_Lock(eq->eq_lock);
        PR_NotifyAllCondVar(eq->eq_cv);
        PR_Unlock(eq->eq_lock);
        PR_Lock(ss_lock);
        PR_WaitCondVar(ss_cv, PR_MillisecondsToInterval(100));
        PR_Unlock(ss_lock);
    }
    (void)PR_JoinThread(eq_loop_tid);
    /*
     * XXXggood we don't free the actual event queue data structures.
     * This is intentional, to allow enqueueing/cancellation of events
     * even after event queue services have shut down (these are no-ops).
     * The downside is that the event queue can't be stopped and restarted
     * easily.
     */
    PR_Lock(eq->eq_lock);
    p = eq->eq_queue;
    while (p != NULL) {
         q = p->ec_next;
         slapi_ch_free((void**)&p);
        /* Some ec_arg could get leaked here in shutdown (e.g., replica_name)
         * This can be fixed by specifying a flag when the context is queued.
         * [After 6.2]
         */
         p = q;
    }
    PR_Unlock(eq->eq_lock);
    slapi_log_error(SLAPI_LOG_HOUSE, NULL, "event queue services have shut down\n");
}
Exemplo n.º 3
0
static void TimerManager(void *arg)
{
    PRIntervalTime now;
    PRIntervalTime timeout;
    PRCList *head;
    TimerEvent *timer;

    PR_Lock(tm_vars.ml);
    while (1)
    {
        if (PR_CLIST_IS_EMPTY(&tm_vars.timer_queue))
        {
            PR_WaitCondVar(tm_vars.new_timer, PR_INTERVAL_NO_TIMEOUT);
        }
        else
        {
            now = PR_IntervalNow();
            head = PR_LIST_HEAD(&tm_vars.timer_queue);
            timer = TIMER_EVENT_PTR(head);
            if ((PRInt32) (now - timer->absolute) >= 0)
            {
                PR_REMOVE_LINK(head);
                /*
                 * make its prev and next point to itself so that
                 * it's obvious that it's not on the timer_queue.
                 */
                PR_INIT_CLIST(head);
                PR_ASSERT(2 == timer->ref_count);
                PR_Unlock(tm_vars.ml);
                timer->func(timer->arg);
                PR_Lock(tm_vars.ml);
                timer->ref_count -= 1;
                if (0 == timer->ref_count)
                {
                    PR_NotifyAllCondVar(tm_vars.cancel_timer);
                }
            }
            else
            {
                timeout = (PRIntervalTime)(timer->absolute - now);
                PR_WaitCondVar(tm_vars.new_timer, timeout);
            } 
        }
    }
    PR_Unlock(tm_vars.ml);
}
Exemplo n.º 4
0
nsresult TimerThread::Init()
{
  if (mInitialized) {
    if (!mThread)
      return NS_ERROR_FAILURE;

    return NS_OK;
  }

  if (PR_AtomicSet(&mInitInProgress, 1) == 0) {
    nsresult rv;

    mEventQueueService = do_GetService("@mozilla.org/event-queue-service;1", &rv);
    if (NS_SUCCEEDED(rv)) {
      nsCOMPtr<nsIObserverService> observerService
        (do_GetService("@mozilla.org/observer-service;1", &rv));

      if (NS_SUCCEEDED(rv)) {
        // We hold on to mThread to keep the thread alive.
        rv = NS_NewThread(getter_AddRefs(mThread),
                          NS_STATIC_CAST(nsIRunnable*, this),
                          0,
                          PR_JOINABLE_THREAD,
                          PR_PRIORITY_NORMAL,
                          PR_GLOBAL_THREAD);

        if (NS_FAILED(rv)) {
          mThread = nsnull;
        }
        else {
          // We'll be released at xpcom shutdown
          observerService->AddObserver(this, "sleep_notification", PR_FALSE);
          observerService->AddObserver(this, "wake_notification", PR_FALSE);
        }
      }
    }

    PR_Lock(mLock);
    mInitialized = PR_TRUE;
    PR_NotifyAllCondVar(mCondVar);
    PR_Unlock(mLock);
  }
Exemplo n.º 5
0
static void _PR_PostNotifiesFromMonitor(PRCondVar *cv, PRIntn times)
{
    PRStatus rv;

    /*
     * Time to actually notify any waits that were affected while the monitor
     * was entered.
     */
    PR_ASSERT(cv != NULL);
    PR_ASSERT(times != 0);
    if (times == -1) {
        rv = PR_NotifyAllCondVar(cv);
        PR_ASSERT(rv == PR_SUCCESS);
    } else {
        while (times-- > 0) {
            rv = PR_NotifyCondVar(cv);
            PR_ASSERT(rv == PR_SUCCESS);
        }
    }
}
Exemplo n.º 6
0
PR_IMPLEMENT(PRStatus) PR_CallOnce(
    PRCallOnceType *once,
    PRCallOnceFN    func)
{
    if (!_pr_initialized) _PR_ImplicitInitialization();

    if (!once->initialized) {
	if (PR_AtomicSet(&once->inProgress, 1) == 0) {
	    once->status = (*func)();
	    PR_Lock(mod_init.ml);
	    once->initialized = 1;
	    PR_NotifyAllCondVar(mod_init.cv);
	    PR_Unlock(mod_init.ml);
	} else {
	    PR_Lock(mod_init.ml);
	    while (!once->initialized) {
		PR_WaitCondVar(mod_init.cv, PR_INTERVAL_NO_TIMEOUT);
            }
	    PR_Unlock(mod_init.ml);
	}
    }
    return once->status;
}
Exemplo n.º 7
0
static SECStatus
get_blinding_params(RSAPrivateKey *key, mp_int *n, unsigned int modLen,
                    mp_int *f, mp_int *g)
{
    RSABlindingParams *rsabp           = NULL;
    blindingParams    *bpUnlinked      = NULL;
    blindingParams    *bp, *prevbp     = NULL;
    PRCList           *el;
    SECStatus          rv              = SECSuccess;
    mp_err             err             = MP_OKAY;
    int                cmp             = -1;
    PRBool             holdingLock     = PR_FALSE;

    do {
	if (blindingParamsList.lock == NULL) {
	    PORT_SetError(SEC_ERROR_LIBRARY_FAILURE);
	    return SECFailure;
	}
	/* Acquire the list lock */
	PZ_Lock(blindingParamsList.lock);
	holdingLock = PR_TRUE;

	/* Walk the list looking for the private key */
	for (el = PR_NEXT_LINK(&blindingParamsList.head);
	     el != &blindingParamsList.head;
	     el = PR_NEXT_LINK(el)) {
	    rsabp = (RSABlindingParams *)el;
	    cmp = SECITEM_CompareItem(&rsabp->modulus, &key->modulus);
	    if (cmp >= 0) {
		/* The key is found or not in the list. */
		break;
	    }
	}

	if (cmp) {
	    /* At this point, the key is not in the list.  el should point to 
	    ** the list element before which this key should be inserted. 
	    */
	    rsabp = PORT_ZNew(RSABlindingParams);
	    if (!rsabp) {
		PORT_SetError(SEC_ERROR_NO_MEMORY);
		goto cleanup;
	    }

	    rv = init_blinding_params(rsabp, key, n, modLen);
	    if (rv != SECSuccess) {
		PORT_ZFree(rsabp, sizeof(RSABlindingParams));
		goto cleanup;
	    }

	    /* Insert the new element into the list
	    ** If inserting in the middle of the list, el points to the link
	    ** to insert before.  Otherwise, the link needs to be appended to
	    ** the end of the list, which is the same as inserting before the
	    ** head (since el would have looped back to the head).
	    */
	    PR_INSERT_BEFORE(&rsabp->link, el);
	}

	/* We've found (or created) the RSAblindingParams struct for this key.
	 * Now, search its list of ready blinding params for a usable one.
	 */
	while (0 != (bp = rsabp->bp)) {
	    if (--(bp->counter) > 0) {
		/* Found a match and there are still remaining uses left */
		/* Return the parameters */
		CHECK_MPI_OK( mp_copy(&bp->f, f) );
		CHECK_MPI_OK( mp_copy(&bp->g, g) );

		PZ_Unlock(blindingParamsList.lock); 
		return SECSuccess;
	    }
	    /* exhausted this one, give its values to caller, and
	     * then retire it.
	     */
	    mp_exch(&bp->f, f);
	    mp_exch(&bp->g, g);
	    mp_clear( &bp->f );
	    mp_clear( &bp->g );
	    bp->counter = 0;
	    /* Move to free list */
	    rsabp->bp   = bp->next;
	    bp->next    = rsabp->free;
	    rsabp->free = bp;
	    /* In case there're threads waiting for new blinding
	     * value - notify 1 thread the value is ready
	     */
	    if (blindingParamsList.waitCount > 0) {
		PR_NotifyCondVar( blindingParamsList.cVar );
		blindingParamsList.waitCount--;
	    }
	    PZ_Unlock(blindingParamsList.lock); 
	    return SECSuccess;
	}
	/* We did not find a usable set of blinding params.  Can we make one? */
	/* Find a free bp struct. */
	prevbp = NULL;
	if ((bp = rsabp->free) != NULL) {
	    /* unlink this bp */
	    rsabp->free  = bp->next;
	    bp->next     = NULL;
	    bpUnlinked   = bp;  /* In case we fail */

	    PZ_Unlock(blindingParamsList.lock); 
	    holdingLock = PR_FALSE;
	    /* generate blinding parameter values for the current thread */
	    CHECK_SEC_OK( generate_blinding_params(key, f, g, n, modLen ) );

	    /* put the blinding parameter values into cache */
	    CHECK_MPI_OK( mp_init( &bp->f) );
	    CHECK_MPI_OK( mp_init( &bp->g) );
	    CHECK_MPI_OK( mp_copy( f, &bp->f) );
	    CHECK_MPI_OK( mp_copy( g, &bp->g) );

	    /* Put this at head of queue of usable params. */
	    PZ_Lock(blindingParamsList.lock);
	    holdingLock = PR_TRUE;
	    /* initialize RSABlindingParamsStr */
	    bp->counter = RSA_BLINDING_PARAMS_MAX_REUSE;
	    bp->next    = rsabp->bp;
	    rsabp->bp   = bp;
	    bpUnlinked  = NULL;
	    /* In case there're threads waiting for new blinding value
	     * just notify them the value is ready
	     */
	    if (blindingParamsList.waitCount > 0) {
		PR_NotifyAllCondVar( blindingParamsList.cVar );
		blindingParamsList.waitCount = 0;
	    }
	    PZ_Unlock(blindingParamsList.lock);
	    return SECSuccess;
	}
	/* Here, there are no usable blinding parameters available,
	 * and no free bp blocks, presumably because they're all 
	 * actively having parameters generated for them.
	 * So, we need to wait here and not eat up CPU until some 
	 * change happens. 
	 */
	blindingParamsList.waitCount++;
	PR_WaitCondVar( blindingParamsList.cVar, PR_INTERVAL_NO_TIMEOUT );
	PZ_Unlock(blindingParamsList.lock); 
	holdingLock = PR_FALSE;
    } while (1);

cleanup:
    /* It is possible to reach this after the lock is already released.  */
    if (bpUnlinked) {
	if (!holdingLock) {
	    PZ_Lock(blindingParamsList.lock);
	    holdingLock = PR_TRUE;
	}
	bp = bpUnlinked;
	mp_clear( &bp->f );
	mp_clear( &bp->g );
	bp->counter = 0;
    	/* Must put the unlinked bp back on the free list */
	bp->next    = rsabp->free;
	rsabp->free = bp;
    }
    if (holdingLock) {
	PZ_Unlock(blindingParamsList.lock);
	holdingLock = PR_FALSE;
    }
    if (err) {
	MP_TO_SEC_ERROR(err);
    }
    return SECFailure;
}
Exemplo n.º 8
0
PR_JoinThreadPool(PRThreadPool *tpool)
{
PRStatus rval = PR_SUCCESS;
PRCList *head;
PRStatus rval_status;

	PR_Lock(tpool->jobq.lock);
	while (!tpool->shutdown)
		PR_WaitCondVar(tpool->shutdown_cv, PR_INTERVAL_NO_TIMEOUT);

	/*
	 * wakeup worker threads
	 */
#ifdef OPT_WINNT
	/*
	 * post shutdown notification for all threads
	 */
	{
		int i;
		for(i=0; i < tpool->current_threads; i++) {
			PostQueuedCompletionStatus(tpool->jobq.nt_completion_port, 0,
												TRUE, NULL);
		}
	}
#else
	PR_NotifyAllCondVar(tpool->jobq.cv);
#endif

	/*
	 * wakeup io thread(s)
	 */
	notify_ioq(tpool);

	/*
	 * wakeup timer thread(s)
	 */
	PR_Lock(tpool->timerq.lock);
	notify_timerq(tpool);
	PR_Unlock(tpool->timerq.lock);

	while (!PR_CLIST_IS_EMPTY(&tpool->jobq.wthreads)) {
		wthread *wthrp;

		head = PR_LIST_HEAD(&tpool->jobq.wthreads);
		PR_REMOVE_AND_INIT_LINK(head);
		PR_Unlock(tpool->jobq.lock);
		wthrp = WTHREAD_LINKS_PTR(head);
		rval_status = PR_JoinThread(wthrp->thread);
		PR_ASSERT(PR_SUCCESS == rval_status);
		PR_DELETE(wthrp);
		PR_Lock(tpool->jobq.lock);
	}
	PR_Unlock(tpool->jobq.lock);
	while (!PR_CLIST_IS_EMPTY(&tpool->ioq.wthreads)) {
		wthread *wthrp;

		head = PR_LIST_HEAD(&tpool->ioq.wthreads);
		PR_REMOVE_AND_INIT_LINK(head);
		wthrp = WTHREAD_LINKS_PTR(head);
		rval_status = PR_JoinThread(wthrp->thread);
		PR_ASSERT(PR_SUCCESS == rval_status);
		PR_DELETE(wthrp);
	}

	while (!PR_CLIST_IS_EMPTY(&tpool->timerq.wthreads)) {
		wthread *wthrp;

		head = PR_LIST_HEAD(&tpool->timerq.wthreads);
		PR_REMOVE_AND_INIT_LINK(head);
		wthrp = WTHREAD_LINKS_PTR(head);
		rval_status = PR_JoinThread(wthrp->thread);
		PR_ASSERT(PR_SUCCESS == rval_status);
		PR_DELETE(wthrp);
	}

	/*
	 * Delete queued jobs
	 */
	while (!PR_CLIST_IS_EMPTY(&tpool->jobq.list)) {
		PRJob *jobp;

		head = PR_LIST_HEAD(&tpool->jobq.list);
		PR_REMOVE_AND_INIT_LINK(head);
		jobp = JOB_LINKS_PTR(head);
		tpool->jobq.cnt--;
		delete_job(jobp);
	}

	/* delete io jobs */
	while (!PR_CLIST_IS_EMPTY(&tpool->ioq.list)) {
		PRJob *jobp;

		head = PR_LIST_HEAD(&tpool->ioq.list);
		PR_REMOVE_AND_INIT_LINK(head);
		tpool->ioq.cnt--;
		jobp = JOB_LINKS_PTR(head);
		delete_job(jobp);
	}

	/* delete timer jobs */
	while (!PR_CLIST_IS_EMPTY(&tpool->timerq.list)) {
		PRJob *jobp;

		head = PR_LIST_HEAD(&tpool->timerq.list);
		PR_REMOVE_AND_INIT_LINK(head);
		tpool->timerq.cnt--;
		jobp = JOB_LINKS_PTR(head);
		delete_job(jobp);
	}

	PR_ASSERT(0 == tpool->jobq.cnt);
	PR_ASSERT(0 == tpool->ioq.cnt);
	PR_ASSERT(0 == tpool->timerq.cnt);

	delete_threadpool(tpool);
	return rval;
}
Exemplo n.º 9
0
PRStatus RCCondition::Broadcast()
{
    return PR_NotifyAllCondVar(cv);
}  /* RCCondition::Broadcast */
Exemplo n.º 10
0
PR_IMPLEMENT(PRRecvWait*) PR_CancelWaitGroup(PRWaitGroup *group)
{
    PRRecvWait **desc;
    PRRecvWait *recv_wait = NULL;
#ifdef WINNT
    _MDOverlapped *overlapped;
    PRRecvWait **end;
    PRThread *me = _PR_MD_CURRENT_THREAD();
#endif

    if (NULL == group) group = mw_state->group;
    PR_ASSERT(NULL != group);
    if (NULL == group)
    {
        PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
        return NULL;
    }

    PR_Lock(group->ml);
    if (_prmw_stopped != group->state)
    {
        if (_prmw_running == group->state)
            group->state = _prmw_stopping;  /* so nothing new comes in */
        if (0 == group->waiting_threads)  /* is there anybody else? */
            group->state = _prmw_stopped;  /* we can stop right now */
        else
        {
            PR_NotifyAllCondVar(group->new_business);
            PR_NotifyAllCondVar(group->io_complete);
        }
        while (_prmw_stopped != group->state)
            (void)PR_WaitCondVar(group->mw_manage, PR_INTERVAL_NO_TIMEOUT);
    }

#ifdef WINNT
    _PR_MD_LOCK(&group->mdlock);
#endif
    /* make all the existing descriptors look done/interrupted */
#ifdef WINNT
    end = &group->waiter->recv_wait + group->waiter->length;
    for (desc = &group->waiter->recv_wait; desc < end; ++desc)
    {
        if (NULL != *desc)
        {
            if (InterlockedCompareExchange((LONG *)&(*desc)->outcome,
                (LONG)PR_MW_INTERRUPT, (LONG)PR_MW_PENDING)
                == (LONG)PR_MW_PENDING)
            {
                PRFileDesc *bottom = PR_GetIdentitiesLayer(
                    (*desc)->fd, PR_NSPR_IO_LAYER);
                PR_ASSERT(NULL != bottom);
                if (NULL == bottom)
                {
                    PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
                    goto invalid_arg;
                }
                bottom->secret->state = _PR_FILEDESC_CLOSED;
#if 0
                fprintf(stderr, "cancel wait group: closing socket\n");
#endif
                if (closesocket(bottom->secret->md.osfd) == SOCKET_ERROR)
                {
                    fprintf(stderr, "closesocket failed: %d\n",
                        WSAGetLastError());
                    exit(1);
                }
            }
        }
    }
    while (group->waiter->count > 0)
    {
        _PR_THREAD_LOCK(me);
        me->state = _PR_IO_WAIT;
        PR_APPEND_LINK(&me->waitQLinks, &group->wait_list);
        if (!_PR_IS_NATIVE_THREAD(me))
        {
            _PR_SLEEPQ_LOCK(me->cpu);
            _PR_ADD_SLEEPQ(me, PR_INTERVAL_NO_TIMEOUT);
            _PR_SLEEPQ_UNLOCK(me->cpu);
        }
        _PR_THREAD_UNLOCK(me);
        _PR_MD_UNLOCK(&group->mdlock);
        PR_Unlock(group->ml);
        _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT);
        me->state = _PR_RUNNING;
        PR_Lock(group->ml);
        _PR_MD_LOCK(&group->mdlock);
    }
#else
    for (desc = &group->waiter->recv_wait; group->waiter->count > 0; ++desc)
    {
        PR_ASSERT(desc < &group->waiter->recv_wait + group->waiter->length);
        if (NULL != *desc)
            _MW_DoneInternal(group, desc, PR_MW_INTERRUPT);
    }
#endif

    /* take first element of finished list and return it or NULL */
    if (PR_CLIST_IS_EMPTY(&group->io_ready))
        PR_SetError(PR_GROUP_EMPTY_ERROR, 0);
    else
    {
        PRCList *head = PR_LIST_HEAD(&group->io_ready);
        PR_REMOVE_AND_INIT_LINK(head);
#ifdef WINNT
        overlapped = (_MDOverlapped *)
            ((char *)head - offsetof(_MDOverlapped, data));
        head = &overlapped->data.mw.desc->internal;
        if (NULL != overlapped->data.mw.timer)
        {
            PR_ASSERT(PR_INTERVAL_NO_TIMEOUT
                != overlapped->data.mw.desc->timeout);
            CancelTimer(overlapped->data.mw.timer);
        }
        else
        {
            PR_ASSERT(PR_INTERVAL_NO_TIMEOUT
                == overlapped->data.mw.desc->timeout);
        }
        PR_DELETE(overlapped);
#endif
        recv_wait = (PRRecvWait*)head;
    }
#ifdef WINNT
invalid_arg:
    _PR_MD_UNLOCK(&group->mdlock);
#endif
    PR_Unlock(group->ml);

    return recv_wait;
}  /* PR_CancelWaitGroup */
Exemplo n.º 11
0
NSTP_CreatePool (NSTPPoolConfig *pcfg, NSTPPool *pool)
{
    PRStatus rv = PR_SUCCESS;
    NSTPPool pip = NULL;
	
    /* Do default module initialization if it hasn't been done yet */
    if (!NSTPLock) {
        NSTP_Initialize(NULL, NULL, NULL);
    }
	
    /* Enter global lock */
    PR_Lock(NSTPLock);
	
    /* Pretend loop to avoid goto */
    while (1) {
		
        /* Allocate a new pool instance from the global pool */
        pip = PR_NEWZAP (struct NSTPPool_s);
        
		if (!pip) 
		{
            /* Failed to allocate pool instance structure */
            rv = PR_FAILURE;
            break;
        }
		
        if (pcfg->version != NSTP_API_VERSION) {
			
            /* Unsupported API version */
            PR_DELETE(pip);
            rv = PR_FAILURE;
            break;
        }
		
        /* Copy the configuration parameters into the instance */
        pip->config = *pcfg;
		
        /* Get a new lock for this instance */
        pip->lock = PR_NewLock();
        if (!pip->lock) {
            /* Failed to create lock for new pool instance */
            PR_DELETE(pip);
            rv = PR_FAILURE;
            break;
        }
		
        /* Create a condition variable for the lock */
        pip->cvar = PR_NewCondVar(pip->lock);
        if (!pip->cvar) {
            /* Failed to create condition variable for new pool instance */
            PR_DestroyLock(pip->lock);
            PR_DELETE(pip);
            rv = PR_FAILURE;
            break;
        }
		
        /* Add this instance to the global list of instances */
        pip->next = NSTPInstanceList;
        NSTPInstanceList = pip;
        ++NSTPInstanceCount;
		break;	// ruslan: need to get out of the loop, right Howard :-)?
    }
	
    PR_Unlock(NSTPLock);
	
    /* If that went well, continue initializing the new instance */
    if (rv == PR_SUCCESS) {
		
        /* Create initial threads */
        if (pip->config.initThread > 0) {
            PRThread *thread;
            int i;
			
            for (i = 0; i < pip->config.initThread; ++i) {
				
			/*
			* In solaris, all the threads  which are going to run
			* java needs to be bound thread so that we can reliably
			* get the register state to do GC.
				*/
                thread = PR_CreateThread(PR_USER_THREAD,
					NSTP_ThreadMain,
					(void *)pip,
					PR_PRIORITY_NORMAL,
					PR_GLOBAL_THREAD,
					PR_UNJOINABLE_THREAD,
					pip->config.stackSize);
				
                if (!thread) {
                    /* Failed, so shutdown threads already created */
                    PR_Lock(pip->lock);
                    if (pip->stats.threadCount > 0) {
                        pip->shutdown = PR_TRUE;
                        rv = PR_NotifyAllCondVar(pip->cvar);
                    }
                    PR_Unlock(pip->lock);
                    rv = PR_FAILURE;
                    break;
                }
				
                PR_Lock(pip->lock);
                ++pip->stats.threadCount;
                ++pip->stats.freeCount;
                PR_Unlock(pip->lock);
            }
        } /* initThread > 0 */
		*pool = pip;	// ruslan: need to assign it back
    }
	
    return rv;
}
Exemplo n.º 12
0
NSTP_DestroyPool(NSTPPool pool, PRBool doitnow)
{
    NSTPWorkItem *work;
	
    PR_Lock(pool->lock);
	
    /*
	* Indicate pool is being shut down, so no more requests
	* will be accepted.
	*/
    pool->shutdown = PR_TRUE;
	
    if (doitnow) {
		
		/* Complete all queued work items with NSTP_STATUS_SHUTDOWN_REJECT */
		while ((work = pool->head) != NULL) {
			
			/* Dequeue work item */
			pool->head = work->next;
			if (pool->head == NULL) {
				pool->tail = NULL;
			}
			
			PR_Unlock(pool->lock);
			
			/* Acquire the lock used by the calling, waiting thread */
			PR_Lock ( work -> waiter_mon -> lock);
			
			/* Set work completion status */
			work->work_status = NSTP_STATUS_SHUTDOWN_REJECT;
			work->work_complete = PR_TRUE;
			
			/* Wake up the calling, waiting thread */
			PR_NotifyCondVar (work -> waiter_mon -> cvar);
			
			/* Release the lock */
			PR_Unlock (work -> waiter_mon -> lock);
			
			PR_Lock (pool -> lock);
		}
    }
    else {
		/* doitnow is PR_FALSE */
		
		/* Wait for work queue to be empty */
		while (pool->head != NULL) {
			PR_WaitCondVar(pool->cvar, PR_INTERVAL_NO_TIMEOUT);
		}
    }
	
    if (pool -> stats.threadCount > 0)
	{
		NSTPThread *thread;
		NSTPThread *nxt_thread;
		
		for (thread = pool->threads; thread; thread = nxt_thread) {
			nxt_thread = thread->next;
			thread->shutdown = PR_TRUE;
		}
		
		/* Wakeup all threads to look at their shutdown flags */
		PR_NotifyAllCondVar(pool->cvar);
		
		/* Wait for threadCount to go to zero */
		while (pool -> stats.threadCount > 0) 
		{
			PR_WaitCondVar(pool->cvar, PR_INTERVAL_NO_TIMEOUT);
		}
    }
	
    PR_Unlock(pool->lock);
	
    PR_DestroyCondVar(pool->cvar);
    PR_DestroyLock(pool->lock);
    PR_DELETE(pool);
}
Exemplo n.º 13
0
void
NSTP_ThreadMain (void *arg)
{
    NSTPPool pip = (NSTPPool)arg;
    NSTPWorkItem *work = NULL;
    NSTPThread self;
	
    /* Initialize structure describing this thread */
    self.prthread = PR_GetCurrentThread ();
    self.work = NULL;
    self.shutdown = PR_FALSE;
	
    PR_Lock(pip->lock);
	
    /* Add this thread to the list of threads in the thread pool instance */
    self.next = pip->threads;
    pip->threads = &self;
	
    /*
	* The free thread count was incremented when this thread was created.
	* The thread is free, but we're going to increment the count at the
	* beginning of the loop below, so decrement it here.
	*/
    --pip -> stats.freeCount;
	
    /*
	* Begin main service loop.  The pool lock is held at the beginning
	* of the loop, either because it was acquired above, or because it
	* was acquired at the end of the previous iteration.
	*/
    while (!self.shutdown) {
		
        /* Count this thread as free */
        ++pip -> stats.freeCount;
		
        /* Wait for something on the work queue, or for shutdown */
        while (!pip->head && !self.shutdown) {
            PR_WaitCondVar (pip->cvar, PR_INTERVAL_NO_TIMEOUT);
        }
		
        /* Dequeue the head work item */
        work = pip->head;
        pip -> head = work -> next;
		pip -> stats.queueCount--;	// decrement the queue count

        if (!pip->head) {
            pip->tail = NULL;
			
			/*
			* If the pool shutdown flag is set, wake all threads waiting
			* on the pool cvar, so that the one that is waiting for the
			* work queue to be empty will wake up and see that.  The
			* other (worker) threads will immediately go back to waiting
			* on the pool cvar when they see that the work queue is
			* empty.
			*/
			if (pip->shutdown) {
				PR_NotifyAllCondVar(pip->cvar);
			}
        }
        self.work = work;
		
        /* This thread is no longer free */
        --pip -> stats.freeCount;
		
        /* Release the pool instance lock */
        PR_Unlock(pip->lock);
		
        /* Call the work function */
        work->workfn(work->workarg);
		
        /* Acquire the lock used by the calling, waiting thread */
        PR_Lock(work -> waiter_mon -> lock);
		
        /* Set work completion status */
        work->work_status = NSTP_STATUS_WORK_DONE;
        work->work_complete = PR_TRUE;
		
        /* Wake up the calling, waiting thread */
        PR_NotifyCondVar (work -> waiter_mon -> cvar);
		
        /* Release the lock */
        PR_Unlock(work -> waiter_mon -> lock);
		
        /* Acquire the pool instance lock for the next iteration */
        PR_Lock(pip->lock);
    }
	
    /* Decrement the thread count before this thread terminates */
    if (--pip -> stats.threadCount <= 0)
	{
		
		/* Notify shutdown thread when this is the last thread */
		PR_NotifyCondVar(pip->cvar);
    }
	
    PR_Unlock(pip->lock);
}