static PRStatus NT_HashRemove(PRWaitGroup *group, PRFileDesc *fd) { PRRecvWait **waiter; _PR_MD_LOCK(&group->mdlock); waiter = _MW_LookupInternal(group, fd); if (NULL != waiter) { group->waiter->count -= 1; *waiter = NULL; } _PR_MD_UNLOCK(&group->mdlock); return (NULL != waiter) ? PR_SUCCESS : PR_FAILURE; }
PR_IMPLEMENT(PRStatus) PRP_NakedBroadcast(PRCondVar *cvar) { PRCList *q; PRIntn is; PRThread *me = _PR_MD_CURRENT_THREAD(); PR_ASSERT(_PR_NAKED_CV_LOCK == cvar->lock); if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is); _PR_MD_LOCK( &(cvar->ilock) ); q = cvar->condQ.next; while (q != &cvar->condQ) { PR_LOG(_pr_cvar_lm, PR_LOG_MIN, ("PR_NotifyAll: cvar=%p", cvar)); _PR_NotifyThread(_PR_THREAD_CONDQ_PTR(q), me); q = q->next; } _PR_MD_UNLOCK( &(cvar->ilock) ); if (!_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is); return PR_SUCCESS; } /* PRP_NakedBroadcast */
/* ** Lock the lock. */ PR_IMPLEMENT(void) PR_Lock(PRLock *lock) { PRThread *me = _PR_MD_CURRENT_THREAD(); PRIntn is; PRThread *t; PRCList *q; PR_ASSERT(me != suspendAllThread); PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); PR_ASSERT(lock != NULL); #ifdef _PR_GLOBAL_THREADS_ONLY _PR_MD_LOCK(&lock->ilock); PR_ASSERT(lock->owner == 0); lock->owner = me; return; #else /* _PR_GLOBAL_THREADS_ONLY */ if (_native_threads_only) { _PR_MD_LOCK(&lock->ilock); PR_ASSERT(lock->owner == 0); lock->owner = me; return; } if (!_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is); PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); retry: _PR_LOCK_LOCK(lock); if (lock->owner == 0) { /* Just got the lock */ lock->owner = me; lock->priority = me->priority; /* Add the granted lock to this owning thread's lock list */ PR_APPEND_LINK(&lock->links, &me->lockList); _PR_LOCK_UNLOCK(lock); if (!_PR_IS_NATIVE_THREAD(me)) _PR_FAST_INTSON(is); return; } /* If this thread already owns this lock, then it is a deadlock */ PR_ASSERT(lock->owner != me); PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); #if 0 if (me->priority > lock->owner->priority) { /* ** Give the lock owner a priority boost until we get the ** lock. Record the priority we boosted it to. */ lock->boostPriority = me->priority; _PR_SetThreadPriority(lock->owner, me->priority); } #endif /* Add this thread to the asked for lock's list of waiting threads. We add this thread thread in the right priority order so when the unlock occurs, the thread with the higher priority will get the lock. */ q = lock->waitQ.next; if (q == &lock->waitQ || _PR_THREAD_CONDQ_PTR(q)->priority == _PR_THREAD_CONDQ_PTR(lock->waitQ.prev)->priority) { /* * If all the threads in the lock waitQ have the same priority, * then avoid scanning the list: insert the element at the end. */ q = &lock->waitQ; } else { /* Sort thread into lock's waitQ at appropriate point */ /* Now scan the list for where to insert this entry */ while (q != &lock->waitQ) { t = _PR_THREAD_CONDQ_PTR(lock->waitQ.next); if (me->priority > t->priority) { /* Found a lower priority thread to insert in front of */ break; } q = q->next; } } PR_INSERT_BEFORE(&me->waitQLinks, q); /* Now grab the threadLock since we are about to change the state. We have to do this since a PR_Suspend or PR_SetThreadPriority type call that takes a PRThread* as an argument could be changing the state of this thread from a thread running on a different cpu. */ _PR_THREAD_LOCK(me); me->state = _PR_LOCK_WAIT; me->wait.lock = lock; _PR_THREAD_UNLOCK(me); _PR_LOCK_UNLOCK(lock); _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT); goto retry; #endif /* _PR_GLOBAL_THREADS_ONLY */ }
PR_IMPLEMENT(PRRecvWait*) PR_WaitRecvReady(PRWaitGroup *group) { PRCList *io_ready = NULL; #ifdef WINNT PRThread *me = _PR_MD_CURRENT_THREAD(); _MDOverlapped *overlapped; #endif if (!_pr_initialized) _PR_ImplicitInitialization(); if ((NULL == group) && (NULL == (group = MW_Init2()))) goto failed_init; PR_Lock(group->ml); if (_prmw_running != group->state) { PR_SetError(PR_INVALID_STATE_ERROR, 0); goto invalid_state; } group->waiting_threads += 1; /* the polling thread is counted */ #ifdef WINNT _PR_MD_LOCK(&group->mdlock); while (PR_CLIST_IS_EMPTY(&group->io_ready)) { _PR_THREAD_LOCK(me); me->state = _PR_IO_WAIT; PR_APPEND_LINK(&me->waitQLinks, &group->wait_list); if (!_PR_IS_NATIVE_THREAD(me)) { _PR_SLEEPQ_LOCK(me->cpu); _PR_ADD_SLEEPQ(me, PR_INTERVAL_NO_TIMEOUT); _PR_SLEEPQ_UNLOCK(me->cpu); } _PR_THREAD_UNLOCK(me); _PR_MD_UNLOCK(&group->mdlock); PR_Unlock(group->ml); _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT); me->state = _PR_RUNNING; PR_Lock(group->ml); _PR_MD_LOCK(&group->mdlock); if (_PR_PENDING_INTERRUPT(me)) { PR_REMOVE_LINK(&me->waitQLinks); _PR_MD_UNLOCK(&group->mdlock); me->flags &= ~_PR_INTERRUPT; me->io_suspended = PR_FALSE; PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); goto aborted; } } io_ready = PR_LIST_HEAD(&group->io_ready); PR_ASSERT(io_ready != NULL); PR_REMOVE_LINK(io_ready); _PR_MD_UNLOCK(&group->mdlock); overlapped = (_MDOverlapped *) ((char *)io_ready - offsetof(_MDOverlapped, data)); io_ready = &overlapped->data.mw.desc->internal; #else do { /* ** If the I/O ready list isn't empty, have this thread ** return with the first receive wait object that's available. */ if (PR_CLIST_IS_EMPTY(&group->io_ready)) { /* ** Is there a polling thread yet? If not, grab this thread ** and use it. */ if (NULL == group->poller) { /* ** This thread will stay do polling until it becomes the only one ** left to service a completion. Then it will return and there will ** be none left to actually poll or to run completions. ** ** The polling function should only return w/ failure or ** with some I/O ready. */ if (PR_FAILURE == _MW_PollInternal(group)) goto failed_poll; } else { /* ** There are four reasons a thread can be awakened from ** a wait on the io_complete condition variable. ** 1. Some I/O has completed, i.e., the io_ready list ** is nonempty. ** 2. The wait group is canceled. ** 3. The thread is interrupted. ** 4. The current polling thread has to leave and needs ** a replacement. ** The logic to find a new polling thread is made more ** complicated by all the other possible events. ** I tried my best to write the logic clearly, but ** it is still full of if's with continue and goto. */ PRStatus st; do { st = PR_WaitCondVar(group->io_complete, PR_INTERVAL_NO_TIMEOUT); if (_prmw_running != group->state) { PR_SetError(PR_INVALID_STATE_ERROR, 0); goto aborted; } if (_MW_ABORTED(st) || (NULL == group->poller)) break; } while (PR_CLIST_IS_EMPTY(&group->io_ready)); /* ** The thread is interrupted and has to leave. It might ** have also been awakened to process ready i/o or be the ** new poller. To be safe, if either condition is true, ** we awaken another thread to take its place. */ if (_MW_ABORTED(st)) { if ((NULL == group->poller || !PR_CLIST_IS_EMPTY(&group->io_ready)) && group->waiting_threads > 1) PR_NotifyCondVar(group->io_complete); goto aborted; } /* ** A new poller is needed, but can I be the new poller? ** If there is no i/o ready, sure. But if there is any ** i/o ready, it has a higher priority. I want to ** process the ready i/o first and wake up another ** thread to be the new poller. */ if (NULL == group->poller) { if (PR_CLIST_IS_EMPTY(&group->io_ready)) continue; if (group->waiting_threads > 1) PR_NotifyCondVar(group->io_complete); } } PR_ASSERT(!PR_CLIST_IS_EMPTY(&group->io_ready)); } io_ready = PR_LIST_HEAD(&group->io_ready); PR_NotifyCondVar(group->io_taken); PR_ASSERT(io_ready != NULL); PR_REMOVE_LINK(io_ready); } while (NULL == io_ready); failed_poll: #endif aborted: group->waiting_threads -= 1; invalid_state: (void)MW_TestForShutdownInternal(group); PR_Unlock(group->ml); failed_init: if (NULL != io_ready) { /* If the operation failed, record the reason why */ switch (((PRRecvWait*)io_ready)->outcome) { case PR_MW_PENDING: PR_ASSERT(0); break; case PR_MW_SUCCESS: #ifndef WINNT _MW_InitialRecv(io_ready); #endif break; #ifdef WINNT case PR_MW_FAILURE: _PR_MD_MAP_READ_ERROR(overlapped->data.mw.error); break; #endif case PR_MW_TIMEOUT: PR_SetError(PR_IO_TIMEOUT_ERROR, 0); break; case PR_MW_INTERRUPT: PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); break; default: break; } #ifdef WINNT if (NULL != overlapped->data.mw.timer) { PR_ASSERT(PR_INTERVAL_NO_TIMEOUT != overlapped->data.mw.desc->timeout); CancelTimer(overlapped->data.mw.timer); } else { PR_ASSERT(PR_INTERVAL_NO_TIMEOUT == overlapped->data.mw.desc->timeout); } PR_DELETE(overlapped); #endif } return (PRRecvWait*)io_ready; } /* PR_WaitRecvReady */
PR_IMPLEMENT(PRStatus) PR_AddWaitFileDesc( PRWaitGroup *group, PRRecvWait *desc) { _PR_HashStory hrv; PRStatus rv = PR_FAILURE; #ifdef WINNT _MDOverlapped *overlapped; HANDLE hFile; BOOL bResult; DWORD dwError; PRFileDesc *bottom; #endif if (!_pr_initialized) _PR_ImplicitInitialization(); if ((NULL == group) && (NULL == (group = MW_Init2()))) { return rv; } PR_ASSERT(NULL != desc->fd); desc->outcome = PR_MW_PENDING; /* nice, well known value */ desc->bytesRecv = 0; /* likewise, though this value is ambiguious */ PR_Lock(group->ml); if (_prmw_running != group->state) { /* Not allowed to add after cancelling the group */ desc->outcome = PR_MW_INTERRUPT; PR_SetError(PR_INVALID_STATE_ERROR, 0); PR_Unlock(group->ml); return rv; } #ifdef WINNT _PR_MD_LOCK(&group->mdlock); #endif /* ** If the waiter count is zero at this point, there's no telling ** how long we've been idle. Therefore, initialize the beginning ** of the timing interval. As long as the list doesn't go empty, ** it will maintain itself. */ if (0 == group->waiter->count) group->last_poll = PR_IntervalNow(); do { hrv = MW_AddHashInternal(desc, group->waiter); if (_prmw_rehash != hrv) break; hrv = MW_ExpandHashInternal(group); /* gruesome */ if (_prmw_success != hrv) break; } while (PR_TRUE); #ifdef WINNT _PR_MD_UNLOCK(&group->mdlock); #endif PR_NotifyCondVar(group->new_business); /* tell the world */ rv = (_prmw_success == hrv) ? PR_SUCCESS : PR_FAILURE; PR_Unlock(group->ml); #ifdef WINNT overlapped = PR_NEWZAP(_MDOverlapped); if (NULL == overlapped) { PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); NT_HashRemove(group, desc->fd); return rv; } overlapped->ioModel = _MD_MultiWaitIO; overlapped->data.mw.desc = desc; overlapped->data.mw.group = group; if (desc->timeout != PR_INTERVAL_NO_TIMEOUT) { overlapped->data.mw.timer = CreateTimer( desc->timeout, NT_TimeProc, overlapped); if (0 == overlapped->data.mw.timer) { NT_HashRemove(group, desc->fd); PR_DELETE(overlapped); /* * XXX It appears that a maximum of 16 timer events can * be outstanding. GetLastError() returns 0 when I try it. */ PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, GetLastError()); return PR_FAILURE; } } /* Reach to the bottom layer to get the OS fd */ bottom = PR_GetIdentitiesLayer(desc->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); if (NULL == bottom) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return PR_FAILURE; } hFile = (HANDLE)bottom->secret->md.osfd; if (!bottom->secret->md.io_model_committed) { PRInt32 st; st = _md_Associate(hFile); PR_ASSERT(0 != st); bottom->secret->md.io_model_committed = PR_TRUE; } bResult = ReadFile(hFile, desc->buffer.start, (DWORD)desc->buffer.length, NULL, &overlapped->overlapped); if (FALSE == bResult && (dwError = GetLastError()) != ERROR_IO_PENDING) { if (desc->timeout != PR_INTERVAL_NO_TIMEOUT) { if (InterlockedCompareExchange((LONG *)&desc->outcome, (LONG)PR_MW_FAILURE, (LONG)PR_MW_PENDING) == (LONG)PR_MW_PENDING) { CancelTimer(overlapped->data.mw.timer); } NT_HashRemove(group, desc->fd); PR_DELETE(overlapped); } _PR_MD_MAP_READ_ERROR(dwError); rv = PR_FAILURE; } #endif return rv; } /* PR_AddWaitFileDesc */
PR_IMPLEMENT(PRRecvWait*) PR_EnumerateWaitGroup( PRMWaitEnumerator *enumerator, const PRRecvWait *previous) { PRRecvWait *result = NULL; /* entry point sanity checking */ PR_ASSERT(NULL != enumerator); PR_ASSERT(_PR_ENUM_SEALED == enumerator->seal); if ((NULL == enumerator) || (_PR_ENUM_SEALED != enumerator->seal)) goto bad_argument; /* beginning of enumeration */ if (NULL == previous) { if (NULL == enumerator->group) { enumerator->group = mw_state->group; if (NULL == enumerator->group) { PR_SetError(PR_GROUP_EMPTY_ERROR, 0); return NULL; } } enumerator->waiter = &enumerator->group->waiter->recv_wait; enumerator->p_timestamp = enumerator->group->p_timestamp; enumerator->thread = PR_GetCurrentThread(); enumerator->index = 0; } /* continuing an enumeration */ else { PRThread *me = PR_GetCurrentThread(); PR_ASSERT(me == enumerator->thread); if (me != enumerator->thread) goto bad_argument; /* need to restart the enumeration */ if (enumerator->p_timestamp != enumerator->group->p_timestamp) return PR_EnumerateWaitGroup(enumerator, NULL); } /* actually progress the enumeration */ #if defined(WINNT) _PR_MD_LOCK(&enumerator->group->mdlock); #else PR_Lock(enumerator->group->ml); #endif while (enumerator->index++ < enumerator->group->waiter->length) { if (NULL != (result = *(enumerator->waiter)++)) break; } #if defined(WINNT) _PR_MD_UNLOCK(&enumerator->group->mdlock); #else PR_Unlock(enumerator->group->ml); #endif return result; /* what we live for */ bad_argument: PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return NULL; /* probably ambiguous */ } /* PR_EnumerateWaitGroup */
PR_IMPLEMENT(PRRecvWait*) PR_CancelWaitGroup(PRWaitGroup *group) { PRRecvWait **desc; PRRecvWait *recv_wait = NULL; #ifdef WINNT _MDOverlapped *overlapped; PRRecvWait **end; PRThread *me = _PR_MD_CURRENT_THREAD(); #endif if (NULL == group) group = mw_state->group; PR_ASSERT(NULL != group); if (NULL == group) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); return NULL; } PR_Lock(group->ml); if (_prmw_stopped != group->state) { if (_prmw_running == group->state) group->state = _prmw_stopping; /* so nothing new comes in */ if (0 == group->waiting_threads) /* is there anybody else? */ group->state = _prmw_stopped; /* we can stop right now */ else { PR_NotifyAllCondVar(group->new_business); PR_NotifyAllCondVar(group->io_complete); } while (_prmw_stopped != group->state) (void)PR_WaitCondVar(group->mw_manage, PR_INTERVAL_NO_TIMEOUT); } #ifdef WINNT _PR_MD_LOCK(&group->mdlock); #endif /* make all the existing descriptors look done/interrupted */ #ifdef WINNT end = &group->waiter->recv_wait + group->waiter->length; for (desc = &group->waiter->recv_wait; desc < end; ++desc) { if (NULL != *desc) { if (InterlockedCompareExchange((LONG *)&(*desc)->outcome, (LONG)PR_MW_INTERRUPT, (LONG)PR_MW_PENDING) == (LONG)PR_MW_PENDING) { PRFileDesc *bottom = PR_GetIdentitiesLayer( (*desc)->fd, PR_NSPR_IO_LAYER); PR_ASSERT(NULL != bottom); if (NULL == bottom) { PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); goto invalid_arg; } bottom->secret->state = _PR_FILEDESC_CLOSED; #if 0 fprintf(stderr, "cancel wait group: closing socket\n"); #endif if (closesocket(bottom->secret->md.osfd) == SOCKET_ERROR) { fprintf(stderr, "closesocket failed: %d\n", WSAGetLastError()); exit(1); } } } } while (group->waiter->count > 0) { _PR_THREAD_LOCK(me); me->state = _PR_IO_WAIT; PR_APPEND_LINK(&me->waitQLinks, &group->wait_list); if (!_PR_IS_NATIVE_THREAD(me)) { _PR_SLEEPQ_LOCK(me->cpu); _PR_ADD_SLEEPQ(me, PR_INTERVAL_NO_TIMEOUT); _PR_SLEEPQ_UNLOCK(me->cpu); } _PR_THREAD_UNLOCK(me); _PR_MD_UNLOCK(&group->mdlock); PR_Unlock(group->ml); _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT); me->state = _PR_RUNNING; PR_Lock(group->ml); _PR_MD_LOCK(&group->mdlock); } #else for (desc = &group->waiter->recv_wait; group->waiter->count > 0; ++desc) { PR_ASSERT(desc < &group->waiter->recv_wait + group->waiter->length); if (NULL != *desc) _MW_DoneInternal(group, desc, PR_MW_INTERRUPT); } #endif /* take first element of finished list and return it or NULL */ if (PR_CLIST_IS_EMPTY(&group->io_ready)) PR_SetError(PR_GROUP_EMPTY_ERROR, 0); else { PRCList *head = PR_LIST_HEAD(&group->io_ready); PR_REMOVE_AND_INIT_LINK(head); #ifdef WINNT overlapped = (_MDOverlapped *) ((char *)head - offsetof(_MDOverlapped, data)); head = &overlapped->data.mw.desc->internal; if (NULL != overlapped->data.mw.timer) { PR_ASSERT(PR_INTERVAL_NO_TIMEOUT != overlapped->data.mw.desc->timeout); CancelTimer(overlapped->data.mw.timer); } else { PR_ASSERT(PR_INTERVAL_NO_TIMEOUT == overlapped->data.mw.desc->timeout); } PR_DELETE(overlapped); #endif recv_wait = (PRRecvWait*)head; } #ifdef WINNT invalid_arg: _PR_MD_UNLOCK(&group->mdlock); #endif PR_Unlock(group->ml); return recv_wait; } /* PR_CancelWaitGroup */