Esempio n. 1
0
/*
** Notify all of the threads waiting on the condition variable. All of
** threads are notified in turn. The highest priority thread will
** probably acquire the lock.
*/
PR_IMPLEMENT(PRStatus) PR_NotifyAllCondVar(PRCondVar *cvar)
{
    PRCList *q;
    PRIntn is;
    PRThread *me = _PR_MD_CURRENT_THREAD();

    PR_ASSERT(cvar->lock->owner == me);
    if (cvar->lock->owner != me) return PR_FAILURE;

#ifdef _PR_GLOBAL_THREADS_ONLY
    _PR_MD_NOTIFYALL_CV(&cvar->md, &cvar->lock->ilock);
    return PR_SUCCESS;
#else  /* _PR_GLOBAL_THREADS_ONLY */
    if ( !_PR_IS_NATIVE_THREAD(me))
    	_PR_INTSOFF(is);
    _PR_CVAR_LOCK(cvar);
    q = cvar->condQ.next;
    while (q != &cvar->condQ) {
		PR_LOG(_pr_cvar_lm, PR_LOG_MIN, ("PR_NotifyAll: cvar=%p", cvar));
		_PR_NotifyThread(_PR_THREAD_CONDQ_PTR(q), me);
		q = q->next;
    }
    _PR_CVAR_UNLOCK(cvar);
    if (!_PR_IS_NATIVE_THREAD(me))
    	_PR_INTSON(is);

    return PR_SUCCESS;
#endif  /* _PR_GLOBAL_THREADS_ONLY */
}
Esempio n. 2
0
void _PR_NotifyCondVar(PRCondVar *cvar, PRThread *me)
{
#ifdef _PR_GLOBAL_THREADS_ONLY
    _PR_MD_NOTIFY_CV(&cvar->md, &cvar->lock->ilock);
#else  /* _PR_GLOBAL_THREADS_ONLY */

    PRCList *q;
    PRIntn is;

    if ( !_PR_IS_NATIVE_THREAD(me))
    	_PR_INTSOFF(is);
    PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0);

    _PR_CVAR_LOCK(cvar);
    q = cvar->condQ.next;
    while (q != &cvar->condQ) {
#ifndef XP_MAC
        PR_LOG(_pr_cvar_lm, PR_LOG_MIN, ("_PR_NotifyCondVar: cvar=%p", cvar));
#endif
        if (_PR_THREAD_CONDQ_PTR(q)->wait.cvar)  {
            if (_PR_NotifyThread(_PR_THREAD_CONDQ_PTR(q), me) == PR_TRUE)
                break;
        }
        q = q->next;
    }
    _PR_CVAR_UNLOCK(cvar);

    if ( !_PR_IS_NATIVE_THREAD(me))
    	_PR_INTSON(is);

#endif  /* _PR_GLOBAL_THREADS_ONLY */
}
Esempio n. 3
0
PR_IMPLEMENT(void) _MD_FREE_LOCK(struct _MDLock *lockp)
{
	PRIntn _is;
	PRThread *me = _PR_MD_CURRENT_THREAD();

	if (me && !_PR_IS_NATIVE_THREAD(me))
		_PR_INTSOFF(_is); 
	pthread_mutex_destroy(&lockp->mutex);
	if (me && !_PR_IS_NATIVE_THREAD(me))
		_PR_FAST_INTSON(_is);
}
Esempio n. 4
0
PR_IMPLEMENT(void) PR_ShowStatus(void)
{
    PRIntn is;

    if ( _PR_MD_CURRENT_THREAD()
    && !_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) _PR_INTSOFF(is);
    _pr_dumpOut = _pr_stderr;
    _PR_DumpThreads(_pr_dumpOut);
    if ( _PR_MD_CURRENT_THREAD()
    && !_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) _PR_FAST_INTSON(is);
}
Esempio n. 5
0
PR_IMPLEMENT(PRStatus) _MD_NEW_LOCK(struct _MDLock *lockp)
{
    PRStatus rv;
    PRIntn is;
    PRThread *me = _PR_MD_CURRENT_THREAD();	

	if (me && !_PR_IS_NATIVE_THREAD(me))
		_PR_INTSOFF(is);
	rv = pthread_mutex_init(&lockp->mutex, NULL);
	if (me && !_PR_IS_NATIVE_THREAD(me))
		_PR_FAST_INTSON(is);
	return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
}
Esempio n. 6
0
/*
 * Notify thread waiting on cvar; called when thread is interrupted
 * 	The thread lock is held on entry and released before return
 */
void _PR_NotifyLockedThread (PRThread *thread)
{
    PRThread *me = _PR_MD_CURRENT_THREAD();
    PRCondVar *cvar;
    PRThreadPriority pri;

    if ( !_PR_IS_NATIVE_THREAD(me))
    	PR_ASSERT(_PR_MD_GET_INTSOFF() != 0);

    cvar = thread->wait.cvar;
    thread->wait.cvar = NULL;
    _PR_THREAD_UNLOCK(thread);

    _PR_CVAR_LOCK(cvar);
    _PR_THREAD_LOCK(thread);

    if (!_PR_IS_NATIVE_THREAD(thread)) {
            _PR_SLEEPQ_LOCK(thread->cpu);
            /* The notify and timeout can collide; in which case both may
             * attempt to delete from the sleepQ; only let one do it.
             */
            if (thread->flags & (_PR_ON_SLEEPQ|_PR_ON_PAUSEQ))
                _PR_DEL_SLEEPQ(thread, PR_TRUE);
            _PR_SLEEPQ_UNLOCK(thread->cpu);

	    /* Make thread runnable */
	    pri = thread->priority;
	    thread->state = _PR_RUNNABLE;

	    PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));

            _PR_AddThreadToRunQ(me, thread);
            _PR_THREAD_UNLOCK(thread);

            _PR_MD_WAKEUP_WAITER(thread);
    } else {
	    if (thread->flags & _PR_SUSPENDING) {
		/*
		 * set thread state to SUSPENDED; a Resume operation
		 * on the thread will enable the thread to run
		 */
            	thread->state = _PR_SUSPENDED;
	     } else
            	thread->state = _PR_RUNNING;
            _PR_THREAD_UNLOCK(thread);
            _PR_MD_WAKEUP_WAITER(thread);
    }    

    _PR_CVAR_UNLOCK(cvar);
    return;
}
Esempio n. 7
0
void
_PR_MD_RESUME_THREAD(PRThread *thread)
{
    if (_PR_IS_NATIVE_THREAD(thread)) {
        ResumeThread(thread->md.handle);
    }
}
Esempio n. 8
0
static DWORD FiberSafeWaitForSingleObject(
    HANDLE hHandle,
    DWORD dwMilliseconds
)
{
    PRThread *me = _PR_MD_CURRENT_THREAD();

    if (_PR_IS_NATIVE_THREAD(me)) {
        return WaitForSingleObject(hHandle, dwMilliseconds);
    } else {
        PRThread *waitThread;
        struct WaitSingleArg warg;
        PRStatus rv;

        warg.handle = hHandle;
        warg.timeout = dwMilliseconds;
        waitThread = PR_CreateThread(
                         PR_USER_THREAD, WaitSingleThread, &warg,
                         PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0);
        if (waitThread == NULL) {
            return WAIT_FAILED;
        }

        rv = PR_JoinThread(waitThread);
        PR_ASSERT(rv == PR_SUCCESS);
        if (rv == PR_FAILURE) {
            return WAIT_FAILED;
        }
        if (warg.rv == WAIT_FAILED) {
            SetLastError(warg.error);
        }
        return warg.rv;
    }
}
Esempio n. 9
0
PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np) 
{
#if defined(_X86_)
    CONTEXT context;
    context.ContextFlags = CONTEXT_INTEGER;

    if (_PR_IS_NATIVE_THREAD(t)) {
        context.ContextFlags |= CONTEXT_CONTROL;
        if (GetThreadContext(t->md.handle, &context)) {
            t->md.gcContext[0] = context.Eax;
            t->md.gcContext[1] = context.Ebx;
            t->md.gcContext[2] = context.Ecx;
            t->md.gcContext[3] = context.Edx;
            t->md.gcContext[4] = context.Esi;
            t->md.gcContext[5] = context.Edi;
            t->md.gcContext[6] = context.Esp;
            t->md.gcContext[7] = context.Ebp;
            *np = PR_NUM_GCREGS;
        } else {
            PR_ASSERT(0);/* XXX */
        }
    } else {
        /* WARNING WARNING WARNING WARNING WARNING WARNING WARNING
         *
         * This code is extremely machine dependant and completely 
         * undocumented by MS.  Its only known to work experimentally.  
         * Ready for a walk on the wild * side?
         *
         * WARNING WARNING WARNING WARNING WARNING WARNING WARNING */

#if !defined WIN95 // Win95 does not have fibers
        int *fiberData = t->md.fiber_id;

        /* I found these offsets by disassembling SwitchToFiber().
         * Are your palms sweating yet?
         */

        /* 
        ** EAX is on the stack (ESP+0)
        ** EDX is on the stack (ESP+4)
        ** ECX is on the stack (ESP+8)
        */
        t->md.gcContext[0] = 0;                /* context.Eax */
        t->md.gcContext[1] = fiberData[0x2e];  /* context.Ebx */
        t->md.gcContext[2] = 0;                /* context.Ecx */
        t->md.gcContext[3] = 0;                /* context.Edx */
        t->md.gcContext[4] = fiberData[0x2d];  /* context.Esi */
        t->md.gcContext[5] = fiberData[0x2c];  /* context.Edi */
        t->md.gcContext[6] = fiberData[0x36];  /* context.Esp */
        t->md.gcContext[7] = fiberData[0x32];  /* context.Ebp */
        *np = PR_NUM_GCREGS;
#endif
    }
    return (PRWord *)&t->md.gcContext;
#else
    PR_NOT_REACHED("not implemented");
    return NULL;
#endif /* defined(_X86_) */
}
Esempio n. 10
0
void
_PR_MD_SWITCH_CONTEXT(PRThread *thread)
{
    PR_ASSERT( !_PR_IS_NATIVE_THREAD(thread) );

    thread->md.fiber_last_error = GetLastError();
    _PR_Schedule();
}
/*
** Test and then lock the lock if it's not already locked by some other
** thread. Return PR_FALSE if some other thread owned the lock at the
** time of the call.
*/
PR_IMPLEMENT(PRBool) PR_TestAndLock(PRLock *lock)
{
    PRThread *me = _PR_MD_CURRENT_THREAD();
    PRBool rv = PR_FALSE;
    PRIntn is;

#ifdef _PR_GLOBAL_THREADS_ONLY 
    is = _PR_MD_TEST_AND_LOCK(&lock->ilock);
    if (is == 0) {
        lock->owner = me;
        return PR_TRUE;
    }
    return PR_FALSE;
#else  /* _PR_GLOBAL_THREADS_ONLY */

#ifndef _PR_LOCAL_THREADS_ONLY
	if (_native_threads_only) {
		is = _PR_MD_TEST_AND_LOCK(&lock->ilock);
		if (is == 0) {
			lock->owner = me;
			return PR_TRUE;
		}
    	return PR_FALSE;
	}
#endif

    if (!_PR_IS_NATIVE_THREAD(me))
    	_PR_INTSOFF(is);

    _PR_LOCK_LOCK(lock);
    if (lock->owner == 0) {
        /* Just got the lock */
        lock->owner = me;
        lock->priority = me->priority;
		/* Add the granted lock to this owning thread's lock list */
        PR_APPEND_LINK(&lock->links, &me->lockList);
        rv = PR_TRUE;
    }
    _PR_LOCK_UNLOCK(lock);

    if (!_PR_IS_NATIVE_THREAD(me))
    	_PR_INTSON(is);
    return rv;
#endif  /* _PR_GLOBAL_THREADS_ONLY */
}
Esempio n. 12
0
PRStatus
_PR_MD_WAKEUP_WAITER(PRThread *thread)
{
    if ( _PR_IS_NATIVE_THREAD(thread) ) 
    {
        if (ReleaseSemaphore(thread->md.blocked_sema, 1, NULL) == FALSE)
            return PR_FAILURE;
        else
			return PR_SUCCESS;
	}
}
Esempio n. 13
0
PR_IMPLEMENT(PRStatus) PRP_NakedBroadcast(PRCondVar *cvar)
{
    PRCList *q;
    PRIntn is;
    PRThread *me = _PR_MD_CURRENT_THREAD();
    PR_ASSERT(_PR_NAKED_CV_LOCK == cvar->lock);

    if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is);
	_PR_MD_LOCK( &(cvar->ilock) );
    q = cvar->condQ.next;
    while (q != &cvar->condQ) {
		PR_LOG(_pr_cvar_lm, PR_LOG_MIN, ("PR_NotifyAll: cvar=%p", cvar));
		_PR_NotifyThread(_PR_THREAD_CONDQ_PTR(q), me);
		q = q->next;
    }
	_PR_MD_UNLOCK( &(cvar->ilock) );
    if (!_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is);

    return PR_SUCCESS;
}  /* PRP_NakedBroadcast */
Esempio n. 14
0
PRStatus
_PR_MD_WAKEUP_WAITER(PRThread *thread)
{
    if ( _PR_IS_NATIVE_THREAD(thread) ) 
    {
        if (DosPostEventSem(thread->md.blocked_sema) != NO_ERROR)
            return PR_FAILURE;
        else
			return PR_SUCCESS;
	}
}
Esempio n. 15
0
_MD_WakeupWaiter(PRThread *thread)
{
    PRThread *me = _PR_MD_CURRENT_THREAD();
    PRInt32 pid, rv;
    PRIntn is;

	PR_ASSERT(_pr_md_idle_cpus >= 0);
    if (thread == NULL) {
		if (_pr_md_idle_cpus)
        	_MD_Wakeup_CPUs();
    } else if (!_PR_IS_NATIVE_THREAD(thread)) {
		/*
		 * If the thread is on my cpu's runq there is no need to
		 * wakeup any cpus
		 */
		if (!_PR_IS_NATIVE_THREAD(me)) {
			if (me->cpu != thread->cpu) {
				if (_pr_md_idle_cpus)
        			_MD_Wakeup_CPUs();
			}
		} else {
			if (_pr_md_idle_cpus)
        		_MD_Wakeup_CPUs();
		}
    } else {
		PR_ASSERT(_PR_IS_NATIVE_THREAD(thread));
		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_INTSOFF(is);

		pthread_mutex_lock(&thread->md.pthread_mutex);
		thread->md.wait++;
		rv = pthread_cond_signal(&thread->md.pthread_cond);
		PR_ASSERT(rv == 0);
		pthread_mutex_unlock(&thread->md.pthread_mutex);

		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_FAST_INTSON(is);
    } 
    return PR_SUCCESS;
}
Esempio n. 16
0
void
_PR_MD_SUSPEND_THREAD(PRThread *thread)
{
    if (_PR_IS_NATIVE_THREAD(thread)) {
       APIRET rc;

        /* XXXMB - DosSuspendThread() is not a blocking call; how do we
         * know when the thread is *REALLY* suspended?
         */
       rc = DosSuspendThread(thread->md.handle);
       PR_ASSERT(rc == NO_ERROR);
    }
}
Esempio n. 17
0
_MD_CreateThread(
    PRThread *thread,
    void (*start) (void *),
    PRThreadPriority priority,
    PRThreadScope scope,
    PRThreadState state,
    PRUint32 stackSize)
{
    PRIntn is;
    int rv;
	PRThread *me = _PR_MD_CURRENT_THREAD();	
	pthread_attr_t attr;

	if (!_PR_IS_NATIVE_THREAD(me))
		_PR_INTSOFF(is);

	if (pthread_mutex_init(&thread->md.pthread_mutex, NULL) != 0) {
		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_FAST_INTSON(is);
        return PR_FAILURE;
	}

	if (pthread_cond_init(&thread->md.pthread_cond, NULL) != 0) {
		pthread_mutex_destroy(&thread->md.pthread_mutex);
		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_FAST_INTSON(is);
        return PR_FAILURE;
	}
    thread->flags |= _PR_GLOBAL_SCOPE;

	pthread_attr_init(&attr); /* initialize attr with default attributes */
	if (pthread_attr_setstacksize(&attr, (size_t) stackSize) != 0) {
		pthread_mutex_destroy(&thread->md.pthread_mutex);
		pthread_cond_destroy(&thread->md.pthread_cond);
		pthread_attr_destroy(&attr);
		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_FAST_INTSON(is);
        return PR_FAILURE;
	}

	thread->md.wait = 0;
    rv = pthread_create(&thread->md.pthread, &attr, start, (void *)thread);
    if (0 == rv) {
        _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_created);
        _MD_ATOMIC_INCREMENT(&_pr_md_pthreads);
		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_FAST_INTSON(is);
        return PR_SUCCESS;
    } else {
		pthread_mutex_destroy(&thread->md.pthread_mutex);
		pthread_cond_destroy(&thread->md.pthread_cond);
		pthread_attr_destroy(&attr);
        _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_failed);
		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_FAST_INTSON(is);
        PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, rv);
        return PR_FAILURE;
    }
}
Esempio n. 18
0
void
_PR_MD_SUSPEND_THREAD(PRThread *thread)
{
    if (_PR_IS_NATIVE_THREAD(thread)) {
        /*
        ** There seems to be some doubt about whether or not SuspendThread
        ** is a synchronous function. The test afterwards is to help veriry
        ** that it is, which is what Microsoft says it is.
        */
        PRUintn rv = SuspendThread(thread->md.handle);
        PR_ASSERT(0xffffffffUL != rv);
    }
}
Esempio n. 19
0
PRInt32 _MD_pr_poll(PRPollDesc *pds, PRIntn npds, PRIntervalTime timeout)
{
    PRInt32 rv = 0;
    PRThread *me = _PR_MD_CURRENT_THREAD();

    if (_PR_PENDING_INTERRUPT(me))
    {
        me->flags &= ~_PR_INTERRUPT;
        PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
        return -1;
    }
    if (0 == npds) PR_Sleep(timeout);
    else if (_PR_IS_NATIVE_THREAD(me))
        rv = NativeThreads(pds, npds, timeout);
    else rv = LocalThreads(pds, npds, timeout);

    return rv;
}  /* _MD_pr_poll */
Esempio n. 20
0
void
_PR_MD_RESTORE_CONTEXT(PRThread *thread)
{
    PRThread *me = _PR_MD_CURRENT_THREAD();

    PR_ASSERT( !_PR_IS_NATIVE_THREAD(thread) );

    /* The user-level code for yielding will happily add ourselves to the runq
     * and then switch to ourselves; the NT fibers can't handle switching to 
     * ourselves.
     */
    if (thread != me) {
        SetLastError(thread->md.fiber_last_error);
        _MD_SET_CURRENT_THREAD(thread);
        _PR_MD_SET_LAST_THREAD(me);
        thread->no_sched = 1;
        SwitchToFiber(thread->md.fiber_id);
        POST_SWITCH_WORK();
    }
}
PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np) 
{
    CONTEXTRECORD context;
    context.ContextFlags = CONTEXT_INTEGER;

    if (_PR_IS_NATIVE_THREAD(t)) {
        context.ContextFlags |= CONTEXT_CONTROL;
        if (QueryThreadContext(t->md.handle, CONTEXT_CONTROL, &context)) {
            t->md.gcContext[0] = context.ctx_RegEax;
            t->md.gcContext[1] = context.ctx_RegEbx;
            t->md.gcContext[2] = context.ctx_RegEcx;
            t->md.gcContext[3] = context.ctx_RegEdx;
            t->md.gcContext[4] = context.ctx_RegEsi;
            t->md.gcContext[5] = context.ctx_RegEdi;
            t->md.gcContext[6] = context.ctx_RegEsp;
            t->md.gcContext[7] = context.ctx_RegEbp;
            *np = PR_NUM_GCREGS;
        } else {
            PR_ASSERT(0);/* XXX */
        }
    }
    return (PRWord *)&t->md.gcContext;
}
Esempio n. 22
0
/*
** Make the given thread wait for the given condition variable
*/
PRStatus _PR_WaitCondVar(
    PRThread *thread, PRCondVar *cvar, PRLock *lock, PRIntervalTime timeout)
{
    PRIntn is;
    PRStatus rv = PR_SUCCESS;

    PR_ASSERT(thread == _PR_MD_CURRENT_THREAD());
    PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));

#ifdef _PR_GLOBAL_THREADS_ONLY
    if (_PR_PENDING_INTERRUPT(thread)) {
        PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
        thread->flags &= ~_PR_INTERRUPT;
        return PR_FAILURE;
    }

    thread->wait.cvar = cvar;
    lock->owner = NULL;
    _PR_MD_WAIT_CV(&cvar->md,&lock->ilock, timeout);
    thread->wait.cvar = NULL;
    lock->owner = thread;
    if (_PR_PENDING_INTERRUPT(thread)) {
        PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
        thread->flags &= ~_PR_INTERRUPT;
        return PR_FAILURE;
    }

    return PR_SUCCESS;
#else  /* _PR_GLOBAL_THREADS_ONLY */

    if ( !_PR_IS_NATIVE_THREAD(thread))
    	_PR_INTSOFF(is);

    _PR_CVAR_LOCK(cvar);
    _PR_THREAD_LOCK(thread);

    if (_PR_PENDING_INTERRUPT(thread)) {
        PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
        thread->flags &= ~_PR_INTERRUPT;
    	_PR_CVAR_UNLOCK(cvar);
    	_PR_THREAD_UNLOCK(thread);
    	if ( !_PR_IS_NATIVE_THREAD(thread))
    		_PR_INTSON(is);
        return PR_FAILURE;
    }

    thread->state = _PR_COND_WAIT;
    thread->wait.cvar = cvar;

    /*
    ** Put the caller thread on the condition variable's wait Q
    */
    PR_APPEND_LINK(&thread->waitQLinks, &cvar->condQ);

    /* Note- for global scope threads, we don't put them on the
     *       global sleepQ, so each global thread must put itself
     *       to sleep only for the time it wants to.
     */
    if ( !_PR_IS_NATIVE_THREAD(thread) ) {
        _PR_SLEEPQ_LOCK(thread->cpu);
        _PR_ADD_SLEEPQ(thread, timeout);
        _PR_SLEEPQ_UNLOCK(thread->cpu);
    }
    _PR_CVAR_UNLOCK(cvar);
    _PR_THREAD_UNLOCK(thread);
   
    /* 
    ** Release lock protecting the condition variable and thereby giving time 
    ** to the next thread which can potentially notify on the condition variable
    */
    PR_Unlock(lock);

    PR_LOG(_pr_cvar_lm, PR_LOG_MIN,
	   ("PR_Wait: cvar=%p waiting for %d", cvar, timeout));

    rv = _PR_MD_WAIT(thread, timeout);

    _PR_CVAR_LOCK(cvar);
    PR_REMOVE_LINK(&thread->waitQLinks);
    _PR_CVAR_UNLOCK(cvar);

    PR_LOG(_pr_cvar_lm, PR_LOG_MIN,
	   ("PR_Wait: cvar=%p done waiting", cvar));

    if ( !_PR_IS_NATIVE_THREAD(thread))
    	_PR_INTSON(is);

    /* Acquire lock again that we had just relinquished */
    PR_Lock(lock);

    if (_PR_PENDING_INTERRUPT(thread)) {
        PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
        thread->flags &= ~_PR_INTERRUPT;
        return PR_FAILURE;
    }

    return rv;
#endif  /* _PR_GLOBAL_THREADS_ONLY */
}
Esempio n. 23
0
/*
** Notify one thread that it has finished waiting on a condition variable
** Caller must hold the _PR_CVAR_LOCK(cv)
*/
PRBool _PR_NotifyThread (PRThread *thread, PRThread *me)
{
    PRBool rv;

    PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0);

    _PR_THREAD_LOCK(thread);
    PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
    if ( !_PR_IS_NATIVE_THREAD(thread) ) {
        if (thread->wait.cvar != NULL) {
            thread->wait.cvar = NULL;

            _PR_SLEEPQ_LOCK(thread->cpu);
            /* The notify and timeout can collide; in which case both may
             * attempt to delete from the sleepQ; only let one do it.
             */
            if (thread->flags & (_PR_ON_SLEEPQ|_PR_ON_PAUSEQ))
                _PR_DEL_SLEEPQ(thread, PR_TRUE);
            _PR_SLEEPQ_UNLOCK(thread->cpu);

	    if (thread->flags & _PR_SUSPENDING) {
		/*
		 * set thread state to SUSPENDED; a Resume operation
		 * on the thread will move it to the runQ
		 */
            	thread->state = _PR_SUSPENDED;
		_PR_MISCQ_LOCK(thread->cpu);
		_PR_ADD_SUSPENDQ(thread, thread->cpu);
		_PR_MISCQ_UNLOCK(thread->cpu);
            	_PR_THREAD_UNLOCK(thread);
	    } else {
            	/* Make thread runnable */
            	thread->state = _PR_RUNNABLE;
            	_PR_THREAD_UNLOCK(thread);

                _PR_AddThreadToRunQ(me, thread);
                _PR_MD_WAKEUP_WAITER(thread);
            }

            rv = PR_TRUE;
        } else {
            /* Thread has already been notified */
            _PR_THREAD_UNLOCK(thread);
            rv = PR_FALSE;
        }
    } else { /* If the thread is a native thread */
        if (thread->wait.cvar) {
            thread->wait.cvar = NULL;

	    if (thread->flags & _PR_SUSPENDING) {
		/*
		 * set thread state to SUSPENDED; a Resume operation
		 * on the thread will enable the thread to run
		 */
            	thread->state = _PR_SUSPENDED;
	     } else
            	thread->state = _PR_RUNNING;
            _PR_THREAD_UNLOCK(thread);
            _PR_MD_WAKEUP_WAITER(thread);
            rv = PR_TRUE;
        } else {
            _PR_THREAD_UNLOCK(thread);
            rv = PR_FALSE;
        }    
    }    

    return rv;
}
Esempio n. 24
0
int poll(struct pollfd *filedes, unsigned long nfds, int timeout)
#endif
{
#ifdef AIX
    struct pollfd *filedes = (struct pollfd *) listptr;
#endif
    struct pollfd *pfd, *epfd;
    _PRUnixPollDesc *unixpds, *unixpd, *eunixpd;
    PRIntervalTime ticks;
    PRInt32 pdcnt;
    int ready;

    /*
     * Easy special case: zero timeout.  Simply call the native
     * poll() with no fear of blocking.
     */
    if (timeout == 0) {
#if defined(AIX)
        return _MD_POLL(listptr, nfds, timeout);
#else
        return _MD_POLL(filedes, nfds, timeout);
#endif
    }

    if (!_pr_initialized) {
        _PR_ImplicitInitialization();
    }

#ifndef _PR_LOCAL_THREADS_ONLY
    if (_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) {
    	return _MD_POLL(filedes, nfds, timeout);
    }
#endif

    /* We do not support the pollmsg structures on AIX */
#ifdef AIX
    PR_ASSERT((nfds & 0xff00) == 0);
#endif

    if (timeout < 0 && timeout != -1) {
        errno = EINVAL;
        return -1;
    }

    /* Convert timeout from miliseconds to ticks */
    if (timeout == -1) {
        ticks = PR_INTERVAL_NO_TIMEOUT;
    } else {
        ticks = PR_MillisecondsToInterval(timeout);
    }

    /* Check for no descriptor case (just do a timeout) */
    if (nfds == 0) {
        PR_Sleep(ticks);
        return 0;
    }

    unixpds = (_PRUnixPollDesc *)
            PR_MALLOC(nfds * sizeof(_PRUnixPollDesc));
    if (NULL == unixpds) {
        errno = EAGAIN;
        return -1;
    }

    pdcnt = 0;
    epfd = filedes + nfds;
    unixpd = unixpds;
    for (pfd = filedes; pfd < epfd; pfd++) {
        /*
         * poll() ignores negative fd's.
         */
        if (pfd->fd >= 0) {
            unixpd->osfd = pfd->fd;
#ifdef _PR_USE_POLL
            unixpd->in_flags = pfd->events;
#else
            /*
             * Map the poll events to one of the three that can be
             * represented by the select fd_sets:
             *     POLLIN, POLLRDNORM  ===> readable
             *     POLLOUT, POLLWRNORM ===> writable
             *     POLLPRI, POLLRDBAND ===> exception
             *     POLLNORM, POLLWRBAND (and POLLMSG on some platforms)
             *     are ignored.
             *
             * The output events POLLERR and POLLHUP are never turned on.
             * POLLNVAL may be turned on.
             */
            unixpd->in_flags = 0;
            if (pfd->events & (POLLIN
#ifdef POLLRDNORM
                    | POLLRDNORM
#endif
                    )) {
                unixpd->in_flags |= _PR_UNIX_POLL_READ;
            }
            if (pfd->events & (POLLOUT
#ifdef POLLWRNORM
                    | POLLWRNORM
#endif
                    )) {
                unixpd->in_flags |= _PR_UNIX_POLL_WRITE;
            }
            if (pfd->events & (POLLPRI
#ifdef POLLRDBAND
                    | POLLRDBAND
#endif
                    )) {
                unixpd->in_flags |= PR_POLL_EXCEPT;
            }
#endif  /* _PR_USE_POLL */
            unixpd->out_flags = 0;
            unixpd++;
            pdcnt++;
        }
    }

    ready = _PR_WaitForMultipleFDs(unixpds, pdcnt, ticks);
    if (-1 == ready) {
        if (PR_GetError() == PR_PENDING_INTERRUPT_ERROR) {
            errno = EINTR;  /* XXX we aren't interrupted by a signal, but... */
        } else {
            errno = PR_GetOSError();
        }
    }
    if (ready <= 0) {
        goto done;
    }

    /*
     * Copy the out_flags from the _PRUnixPollDesc structures to the
     * user's pollfd structures and free the allocated memory
     */
    unixpd = unixpds;
    for (pfd = filedes; pfd < epfd; pfd++) {
        pfd->revents = 0;
        if (pfd->fd >= 0) {
#ifdef _PR_USE_POLL
            pfd->revents = unixpd->out_flags;
#else
            if (0 != unixpd->out_flags) {
                if (unixpd->out_flags & _PR_UNIX_POLL_READ) {
                    if (pfd->events & POLLIN) {
                        pfd->revents |= POLLIN;
                    }
#ifdef POLLRDNORM
                    if (pfd->events & POLLRDNORM) {
                        pfd->revents |= POLLRDNORM;
                    }
#endif
                }
                if (unixpd->out_flags & _PR_UNIX_POLL_WRITE) {
                    if (pfd->events & POLLOUT) {
                        pfd->revents |= POLLOUT;
                    }
#ifdef POLLWRNORM
                    if (pfd->events & POLLWRNORM) {
                        pfd->revents |= POLLWRNORM;
                    }
#endif
                }
                if (unixpd->out_flags & _PR_UNIX_POLL_EXCEPT) {
                    if (pfd->events & POLLPRI) {
                        pfd->revents |= POLLPRI;
                    }
#ifdef POLLRDBAND
                    if (pfd->events & POLLRDBAND) {
                        pfd->revents |= POLLRDBAND;
                    }
#endif
                }
                if (unixpd->out_flags & _PR_UNIX_POLL_ERR) {
                    pfd->revents |= POLLERR;
                }
                if (unixpd->out_flags & _PR_UNIX_POLL_NVAL) {
                    pfd->revents |= POLLNVAL;
                }
                if (unixpd->out_flags & _PR_UNIX_POLL_HUP) {
                    pfd->revents |= POLLHUP;
                }
            }
#endif  /* _PR_USE_POLL */
            unixpd++;
        }
    }

done:
    PR_DELETE(unixpds);
    return ready;
}
/*
** Unblock the first runnable waiting thread. Skip over
** threads that are trying to be suspended
** Note: Caller must hold _PR_LOCK_LOCK()
*/
void _PR_UnblockLockWaiter(PRLock *lock)
{
    PRThread *t = NULL;
    PRThread *me;
    PRCList *q;

    q = lock->waitQ.next;
    PR_ASSERT(q != &lock->waitQ);
    while (q != &lock->waitQ) {
        /* Unblock first waiter */
        t = _PR_THREAD_CONDQ_PTR(q);

		/* 
		** We are about to change the thread's state to runnable and for local
		** threads, we are going to assign a cpu to it.  So, protect thread's
		** data structure.
		*/
        _PR_THREAD_LOCK(t);

        if (t->flags & _PR_SUSPENDING) {
            q = q->next;
            _PR_THREAD_UNLOCK(t);
            continue;
        }

        /* Found a runnable thread */
	    PR_ASSERT(t->state == _PR_LOCK_WAIT);
	    PR_ASSERT(t->wait.lock == lock);
        t->wait.lock = 0;
        PR_REMOVE_LINK(&t->waitQLinks);         /* take it off lock's waitQ */

		/*
		** If this is a native thread, nothing else to do except to wake it
		** up by calling the machine dependent wakeup routine.
		**
		** If this is a local thread, we need to assign it a cpu and
		** put the thread on that cpu's run queue.  There are two cases to
		** take care of.  If the currently running thread is also a local
		** thread, we just assign our own cpu to that thread and put it on
		** the cpu's run queue.  If the the currently running thread is a
		** native thread, we assign the primordial cpu to it (on NT,
		** MD_WAKEUP handles the cpu assignment).  
		*/
		
        if ( !_PR_IS_NATIVE_THREAD(t) ) {

            t->state = _PR_RUNNABLE;

            me = _PR_MD_CURRENT_THREAD();

            _PR_AddThreadToRunQ(me, t);
            _PR_THREAD_UNLOCK(t);
        } else {
            t->state = _PR_RUNNING;
            _PR_THREAD_UNLOCK(t);
        }
        _PR_MD_WAKEUP_WAITER(t);
        break;
    }
    return;
}
/*
** Unlock the lock.
*/
PR_IMPLEMENT(PRStatus) PR_Unlock(PRLock *lock)
{
    PRCList *q;
    PRThreadPriority pri, boost;
    PRIntn is;
    PRThread *me = _PR_MD_CURRENT_THREAD();

    PR_ASSERT(lock != NULL);
    PR_ASSERT(lock->owner == me);
    PR_ASSERT(me != suspendAllThread); 
    PR_ASSERT(!(me->flags & _PR_IDLE_THREAD));
    if (lock->owner != me) {
        return PR_FAILURE;
    }

#ifdef _PR_GLOBAL_THREADS_ONLY 
    lock->owner = 0;
    _PR_MD_UNLOCK(&lock->ilock);
    return PR_SUCCESS;
#else  /* _PR_GLOBAL_THREADS_ONLY */

	if (_native_threads_only) {
		lock->owner = 0;
		_PR_MD_UNLOCK(&lock->ilock);
		return PR_SUCCESS;
	}

    if (!_PR_IS_NATIVE_THREAD(me))
    	_PR_INTSOFF(is);
    _PR_LOCK_LOCK(lock);

	/* Remove the lock from the owning thread's lock list */
    PR_REMOVE_LINK(&lock->links);
    pri = lock->priority;
    boost = lock->boostPriority;
    if (boost > pri) {
        /*
        ** We received a priority boost during the time we held the lock.
        ** We need to figure out what priority to move to by scanning
        ** down our list of lock's that we are still holding and using
        ** the highest boosted priority found.
        */
        q = me->lockList.next;
        while (q != &me->lockList) {
            PRLock *ll = _PR_LOCK_PTR(q);
            if (ll->boostPriority > pri) {
                pri = ll->boostPriority;
            }
            q = q->next;
        }
        if (pri != me->priority) {
            _PR_SetThreadPriority(me, pri);
        }
    }

    /* Unblock the first waiting thread */
    q = lock->waitQ.next;
    if (q != &lock->waitQ)
        _PR_UnblockLockWaiter(lock);
    lock->boostPriority = PR_PRIORITY_LOW;
    lock->owner = 0;
    _PR_LOCK_UNLOCK(lock);
    if (!_PR_IS_NATIVE_THREAD(me))
    	_PR_INTSON(is);
    return PR_SUCCESS;
#endif  /* _PR_GLOBAL_THREADS_ONLY */
}
/*
** Lock the lock.
*/
PR_IMPLEMENT(void) PR_Lock(PRLock *lock)
{
    PRThread *me = _PR_MD_CURRENT_THREAD();
    PRIntn is;
    PRThread *t;
    PRCList *q;

    PR_ASSERT(me != suspendAllThread); 
    PR_ASSERT(!(me->flags & _PR_IDLE_THREAD));
    PR_ASSERT(lock != NULL);
#ifdef _PR_GLOBAL_THREADS_ONLY 
    _PR_MD_LOCK(&lock->ilock);
    PR_ASSERT(lock->owner == 0);
    lock->owner = me;
    return;
#else  /* _PR_GLOBAL_THREADS_ONLY */

	if (_native_threads_only) {
		_PR_MD_LOCK(&lock->ilock);
		PR_ASSERT(lock->owner == 0);
		lock->owner = me;
		return;
	}

    if (!_PR_IS_NATIVE_THREAD(me))
    	_PR_INTSOFF(is);

    PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0);

retry:
    _PR_LOCK_LOCK(lock);
    if (lock->owner == 0) {
        /* Just got the lock */
        lock->owner = me;
        lock->priority = me->priority;
		/* Add the granted lock to this owning thread's lock list */
        PR_APPEND_LINK(&lock->links, &me->lockList);
        _PR_LOCK_UNLOCK(lock);
    	if (!_PR_IS_NATIVE_THREAD(me))
        	_PR_FAST_INTSON(is);
        return;
    }

    /* If this thread already owns this lock, then it is a deadlock */
    PR_ASSERT(lock->owner != me);

    PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0);

#if 0
    if (me->priority > lock->owner->priority) {
        /*
        ** Give the lock owner a priority boost until we get the
        ** lock. Record the priority we boosted it to.
        */
        lock->boostPriority = me->priority;
        _PR_SetThreadPriority(lock->owner, me->priority);
    }
#endif

    /* 
    Add this thread to the asked for lock's list of waiting threads.  We
    add this thread thread in the right priority order so when the unlock
    occurs, the thread with the higher priority will get the lock.
    */
    q = lock->waitQ.next;
    if (q == &lock->waitQ || _PR_THREAD_CONDQ_PTR(q)->priority ==
      	_PR_THREAD_CONDQ_PTR(lock->waitQ.prev)->priority) {
		/*
		 * If all the threads in the lock waitQ have the same priority,
		 * then avoid scanning the list:  insert the element at the end.
		 */
		q = &lock->waitQ;
    } else {
		/* Sort thread into lock's waitQ at appropriate point */
		/* Now scan the list for where to insert this entry */
		while (q != &lock->waitQ) {
			t = _PR_THREAD_CONDQ_PTR(lock->waitQ.next);
			if (me->priority > t->priority) {
				/* Found a lower priority thread to insert in front of */
				break;
			}
			q = q->next;
		}
	}
    PR_INSERT_BEFORE(&me->waitQLinks, q);

	/* 
	Now grab the threadLock since we are about to change the state.  We have
	to do this since a PR_Suspend or PR_SetThreadPriority type call that takes
	a PRThread* as an argument could be changing the state of this thread from
	a thread running on a different cpu.
	*/

    _PR_THREAD_LOCK(me);
    me->state = _PR_LOCK_WAIT;
    me->wait.lock = lock;
    _PR_THREAD_UNLOCK(me);

    _PR_LOCK_UNLOCK(lock);

    _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT);
	goto retry;

#endif  /* _PR_GLOBAL_THREADS_ONLY */
}
Esempio n. 28
0
PR_IMPLEMENT(PRStatus) PR_Cleanup()
{
    PRThread *me = PR_GetCurrentThread();
    PR_ASSERT((NULL != me) && (me->flags & _PR_PRIMORDIAL));
    if ((NULL != me) && (me->flags & _PR_PRIMORDIAL))
    {

        PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("PR_Cleanup: shutting down NSPR"));

        /*
         * No more recycling of threads
         */
        _pr_recycleThreads = 0;

        /*
         * Wait for all other user (non-system/daemon) threads
         * to terminate.
         */
        PR_Lock(_pr_activeLock);
        while (_pr_userActive > _pr_primordialExitCount) {
            PR_WaitCondVar(_pr_primordialExitCVar, PR_INTERVAL_NO_TIMEOUT);
        }
        PR_Unlock(_pr_activeLock);

#if defined(WIN16)
		_PR_ShutdownLinker();
#endif
        /* Release the primordial thread's private data, etc. */
        _PR_CleanupThread(me);

        _PR_MD_STOP_INTERRUPTS();

	    PR_LOG(_pr_thread_lm, PR_LOG_MIN,
	            ("PR_Cleanup: clean up before destroying thread"));
	#ifdef PR_LOGGING
	    _PR_LogCleanup();
	#endif

        /*
         * This part should look like the end of _PR_NativeRunThread
         * and _PR_UserRunThread.
         */
        if (_PR_IS_NATIVE_THREAD(me)) {
            _PR_MD_EXIT_THREAD(me);
            _PR_NativeDestroyThread(me);
        } else {
            _PR_UserDestroyThread(me);
            PR_DELETE(me->stack);
            PR_DELETE(me);
        }

        /*
         * XXX: We are freeing the heap memory here so that Purify won't
         * complain, but we should also free other kinds of resources
         * that are allocated by the _PR_InitXXX() functions.
         * Ideally, for each _PR_InitXXX(), there should be a corresponding
         * _PR_XXXCleanup() that we can call here.
         */
        _PR_CleanupBeforeExit();
        return PR_SUCCESS;
    }
    return PR_FAILURE;
}
Esempio n. 29
0
int select(int width, fd_set *rd, fd_set *wr, fd_set *ex, struct timeval *tv)
#endif
{
    int osfd;
    _PRUnixPollDesc *unixpds, *unixpd, *eunixpd;
    PRInt32 pdcnt;
    PRIntervalTime timeout;
    int retVal;
#if defined(HPUX9) || defined(AIX_RENAME_SELECT)
    fd_set *rd = (fd_set*) rl;
    fd_set *wr = (fd_set*) wl;
    fd_set *ex = (fd_set*) el;
#endif

#if 0
    /*
     * Easy special case: zero timeout.  Simply call the native
     * select() with no fear of blocking.
     */
    if (tv != NULL && tv->tv_sec == 0 && tv->tv_usec == 0) {
#if defined(HPUX9) || defined(AIX_RENAME_SELECT)
        return _MD_SELECT(width, rl, wl, el, tv);
#else
        return _MD_SELECT(width, rd, wr, ex, tv);
#endif
    }
#endif

    if (!_pr_initialized) {
        _PR_ImplicitInitialization();
    }
		
#ifndef _PR_LOCAL_THREADS_ONLY
    if (_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) {
        return _MD_SELECT(width, rd, wr, ex, tv);	
    }
#endif

    if (width < 0 || width > FD_SETSIZE) {
        errno = EINVAL;
        return -1;
    }

    /* Compute timeout */
    if (tv) {
        /*
         * These acceptable ranges for t_sec and t_usec are taken
         * from the select() man pages.
         */
        if (tv->tv_sec < 0 || tv->tv_sec > 100000000
                || tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
            errno = EINVAL;
            return -1;
        }

        /* Convert microseconds to ticks */
        timeout = PR_MicrosecondsToInterval(1000000*tv->tv_sec + tv->tv_usec);
    } else {
        /* tv being a NULL pointer means blocking indefinitely */
        timeout = PR_INTERVAL_NO_TIMEOUT;
    }

    /* Check for no descriptors case (just doing a timeout) */
    if ((!rd && !wr && !ex) || !width) {
        PR_Sleep(timeout);
        return 0;
    }

    /*
     * Set up for PR_Poll().  The PRPollDesc array is allocated
     * dynamically.  If this turns out to have high performance
     * penalty, one can change to use a large PRPollDesc array
     * on the stack, and allocate dynamically only when it turns
     * out to be not large enough.
     *
     * I allocate an array of size 'width', which is the maximum
     * number of fds we may need to poll.
     */
    unixpds = (_PRUnixPollDesc *) PR_CALLOC(width * sizeof(_PRUnixPollDesc));
    if (!unixpds) {
        errno = ENOMEM;
        return -1;
    }

    pdcnt = 0;
    unixpd = unixpds;
    for (osfd = 0; osfd < width; osfd++) {
        int in_flags = 0;
        if (rd && FD_ISSET(osfd, rd)) {
            in_flags |= _PR_UNIX_POLL_READ;
        }
        if (wr && FD_ISSET(osfd, wr)) {
            in_flags |= _PR_UNIX_POLL_WRITE;
        }
        if (ex && FD_ISSET(osfd, ex)) {
            in_flags |= _PR_UNIX_POLL_EXCEPT;
        }
        if (in_flags) {
            unixpd->osfd = osfd;
            unixpd->in_flags = in_flags;
            unixpd->out_flags = 0;
            unixpd++;
            pdcnt++;
        }
    }

    /*
     * see comments in mozilla/cmd/xfe/mozilla.c (look for
     * "PR_XGetXtHackFD")
     */
   {
     int needToLockXAgain;
 
     needToLockXAgain = 0;
     if (rd && (_pr_xt_hack_fd != -1)
             && FD_ISSET(_pr_xt_hack_fd, rd) && PR_XIsLocked()
             && (!_pr_xt_hack_okayToReleaseXLock
             || _pr_xt_hack_okayToReleaseXLock())) {
         PR_XUnlock();
         needToLockXAgain = 1;
     }

    /* This is the potentially blocking step */
    retVal = _PR_WaitForMultipleFDs(unixpds, pdcnt, timeout);

     if (needToLockXAgain) {
         PR_XLock();
     }
   }

    if (retVal > 0) {
        /* Compute select results */
        if (rd) ZAP_SET(rd, width);
        if (wr) ZAP_SET(wr, width);
        if (ex) ZAP_SET(ex, width);

        /*
         * The return value can be either the number of ready file
         * descriptors or the number of set bits in the three fd_set's.
         */
        retVal = 0;  /* we're going to recompute */
        eunixpd = unixpds + pdcnt;
        for (unixpd = unixpds; unixpd < eunixpd; unixpd++) {
            if (unixpd->out_flags) {
                int nbits = 0;  /* The number of set bits on for this fd */

                if (unixpd->out_flags & _PR_UNIX_POLL_NVAL) {
                    errno = EBADF;
                    PR_LOG(_pr_io_lm, PR_LOG_ERROR,
                            ("select returns EBADF for %d", unixpd->osfd));
                    retVal = -1;
                    break;
                }
                /*
                 * If a socket has a pending error, it is considered
                 * both readable and writable.  (See W. Richard Stevens,
                 * Unix Network Programming, Vol. 1, 2nd Ed., Section 6.3,
                 * pp. 153-154.)  We also consider a socket readable if
                 * it has a hangup condition.
                 */
                if (rd && (unixpd->in_flags & _PR_UNIX_POLL_READ)
                        && (unixpd->out_flags & (_PR_UNIX_POLL_READ
                        | _PR_UNIX_POLL_ERR | _PR_UNIX_POLL_HUP))) {
                    FD_SET(unixpd->osfd, rd);
                    nbits++;
                }
                if (wr && (unixpd->in_flags & _PR_UNIX_POLL_WRITE)
                        && (unixpd->out_flags & (_PR_UNIX_POLL_WRITE
                        | _PR_UNIX_POLL_ERR))) {
                    FD_SET(unixpd->osfd, wr);
                    nbits++;
                }
                if (ex && (unixpd->in_flags & _PR_UNIX_POLL_WRITE)
                        && (unixpd->out_flags & PR_POLL_EXCEPT)) {
                    FD_SET(unixpd->osfd, ex);
                    nbits++;
                }
                PR_ASSERT(nbits > 0);
#if defined(HPUX) || defined(SOLARIS) || defined(SUNOS4) || defined(OSF1) || defined(AIX)
                retVal += nbits;
#else /* IRIX */
                retVal += 1;
#endif
            }
        }
    }

    PR_ASSERT(tv || retVal != 0);
    PR_LOG(_pr_io_lm, PR_LOG_MIN, ("select returns %d", retVal));
    PR_DELETE(unixpds);

    return retVal;
}
Esempio n. 30
0
/*
** Expire condition variable waits that are ready to expire. "now" is the current
** time.
*/
void _PR_ClockInterrupt(void)
{
    PRThread *thread, *me = _PR_MD_CURRENT_THREAD();
    _PRCPU *cpu = me->cpu;
    PRIntervalTime elapsed, now;
 
    PR_ASSERT(_PR_MD_GET_INTSOFF() != 0);
    /* Figure out how much time elapsed since the last clock tick */
    now = PR_IntervalNow();
    elapsed = now - cpu->last_clock;
    cpu->last_clock = now;

#ifndef XP_MAC
    PR_LOG(_pr_clock_lm, PR_LOG_MAX,
	   ("ExpireWaits: elapsed=%lld usec", elapsed));
#endif

    while(1) {
        _PR_SLEEPQ_LOCK(cpu);
        if (_PR_SLEEPQ(cpu).next == &_PR_SLEEPQ(cpu)) {
            _PR_SLEEPQ_UNLOCK(cpu);
            break;
        }

        thread = _PR_THREAD_PTR(_PR_SLEEPQ(cpu).next);
        PR_ASSERT(thread->cpu == cpu);

        if (elapsed < thread->sleep) {
            thread->sleep -= elapsed;
            _PR_SLEEPQMAX(thread->cpu) -= elapsed;
            _PR_SLEEPQ_UNLOCK(cpu);
            break;
        }
        _PR_SLEEPQ_UNLOCK(cpu);

        PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));

        _PR_THREAD_LOCK(thread);

        if (thread->cpu != cpu) {
            /*
            ** The thread was switched to another CPU
            ** between the time we unlocked the sleep
            ** queue and the time we acquired the thread
            ** lock, so it is none of our business now.
            */
            _PR_THREAD_UNLOCK(thread);
            continue;
        }

        /*
        ** Consume this sleeper's amount of elapsed time from the elapsed
        ** time value. The next remaining piece of elapsed time will be
        ** available for the next sleeping thread's timer.
        */
        _PR_SLEEPQ_LOCK(cpu);
        PR_ASSERT(!(thread->flags & _PR_ON_PAUSEQ));
        if (thread->flags & _PR_ON_SLEEPQ) {
            _PR_DEL_SLEEPQ(thread, PR_FALSE);
            elapsed -= thread->sleep;
            _PR_SLEEPQ_UNLOCK(cpu);
        } else {
            /* Thread was already handled; Go get another one */
            _PR_SLEEPQ_UNLOCK(cpu);
            _PR_THREAD_UNLOCK(thread);
            continue;
        }

        /* Notify the thread waiting on the condition variable */
        if (thread->flags & _PR_SUSPENDING) {
		PR_ASSERT((thread->state == _PR_IO_WAIT) ||
				(thread->state == _PR_COND_WAIT));
            /*
            ** Thread is suspended and its condition timeout
            ** expired. Transfer thread from sleepQ to suspendQ.
            */
            thread->wait.cvar = NULL;
            _PR_MISCQ_LOCK(cpu);
            thread->state = _PR_SUSPENDED;
            _PR_ADD_SUSPENDQ(thread, cpu);
            _PR_MISCQ_UNLOCK(cpu);
        } else {
            if (thread->wait.cvar) {
                PRThreadPriority pri;

                /* Do work very similar to what _PR_NotifyThread does */
                PR_ASSERT( !_PR_IS_NATIVE_THREAD(thread) );

                /* Make thread runnable */
                pri = thread->priority;
                thread->state = _PR_RUNNABLE;
                PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));

                PR_ASSERT(thread->cpu == cpu);
                _PR_RUNQ_LOCK(cpu);
                _PR_ADD_RUNQ(thread, cpu, pri);
                _PR_RUNQ_UNLOCK(cpu);

                if (pri > me->priority)
                    _PR_SET_RESCHED_FLAG();

                thread->wait.cvar = NULL;

                _PR_MD_WAKEUP_WAITER(thread);

            } else if (thread->io_pending == PR_TRUE) {
                /* Need to put IO sleeper back on runq */
                int pri = thread->priority;

                thread->io_suspended = PR_TRUE;
#ifdef WINNT
				/*
				 * For NT, record the cpu on which I/O was issued
				 * I/O cancellation is done on the same cpu
				 */
                thread->md.thr_bound_cpu = cpu;
#endif

				PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
                PR_ASSERT(thread->cpu == cpu);
                thread->state = _PR_RUNNABLE;
                _PR_RUNQ_LOCK(cpu);
                _PR_ADD_RUNQ(thread, cpu, pri);
                _PR_RUNQ_UNLOCK(cpu);
            }
        }
        _PR_THREAD_UNLOCK(thread);
    }
}