예제 #1
0
_MD_CreateThread(
    PRThread *thread,
    void (*start) (void *),
    PRThreadPriority priority,
    PRThreadScope scope,
    PRThreadState state,
    PRUint32 stackSize)
{
    PRIntn is;
    int rv;
	PRThread *me = _PR_MD_CURRENT_THREAD();	
	pthread_attr_t attr;

	if (!_PR_IS_NATIVE_THREAD(me))
		_PR_INTSOFF(is);

	if (pthread_mutex_init(&thread->md.pthread_mutex, NULL) != 0) {
		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_FAST_INTSON(is);
        return PR_FAILURE;
	}

	if (pthread_cond_init(&thread->md.pthread_cond, NULL) != 0) {
		pthread_mutex_destroy(&thread->md.pthread_mutex);
		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_FAST_INTSON(is);
        return PR_FAILURE;
	}
    thread->flags |= _PR_GLOBAL_SCOPE;

	pthread_attr_init(&attr); /* initialize attr with default attributes */
	if (pthread_attr_setstacksize(&attr, (size_t) stackSize) != 0) {
		pthread_mutex_destroy(&thread->md.pthread_mutex);
		pthread_cond_destroy(&thread->md.pthread_cond);
		pthread_attr_destroy(&attr);
		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_FAST_INTSON(is);
        return PR_FAILURE;
	}

	thread->md.wait = 0;
    rv = pthread_create(&thread->md.pthread, &attr, start, (void *)thread);
    if (0 == rv) {
        _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_created);
        _MD_ATOMIC_INCREMENT(&_pr_md_pthreads);
		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_FAST_INTSON(is);
        return PR_SUCCESS;
    } else {
		pthread_mutex_destroy(&thread->md.pthread_mutex);
		pthread_cond_destroy(&thread->md.pthread_cond);
		pthread_attr_destroy(&attr);
        _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_failed);
		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_FAST_INTSON(is);
        PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, rv);
        return PR_FAILURE;
    }
}
예제 #2
0
파일: prdump.c 프로젝트: Akesure/jxcore
PR_IMPLEMENT(void) PR_ShowStatus(void)
{
    PRIntn is;

    if ( _PR_MD_CURRENT_THREAD()
    && !_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) _PR_INTSOFF(is);
    _pr_dumpOut = _pr_stderr;
    _PR_DumpThreads(_pr_dumpOut);
    if ( _PR_MD_CURRENT_THREAD()
    && !_PR_IS_NATIVE_THREAD(_PR_MD_CURRENT_THREAD())) _PR_FAST_INTSON(is);
}
예제 #3
0
PR_IMPLEMENT(void) _MD_FREE_LOCK(struct _MDLock *lockp)
{
	PRIntn _is;
	PRThread *me = _PR_MD_CURRENT_THREAD();

	if (me && !_PR_IS_NATIVE_THREAD(me))
		_PR_INTSOFF(_is); 
	pthread_mutex_destroy(&lockp->mutex);
	if (me && !_PR_IS_NATIVE_THREAD(me))
		_PR_FAST_INTSON(_is);
}
예제 #4
0
PR_IMPLEMENT(PRStatus) _MD_NEW_LOCK(struct _MDLock *lockp)
{
    PRStatus rv;
    PRIntn is;
    PRThread *me = _PR_MD_CURRENT_THREAD();	

	if (me && !_PR_IS_NATIVE_THREAD(me))
		_PR_INTSOFF(is);
	rv = pthread_mutex_init(&lockp->mutex, NULL);
	if (me && !_PR_IS_NATIVE_THREAD(me))
		_PR_FAST_INTSON(is);
	return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
}
예제 #5
0
_MD_WakeupWaiter(PRThread *thread)
{
    PRThread *me = _PR_MD_CURRENT_THREAD();
    PRInt32 pid, rv;
    PRIntn is;

	PR_ASSERT(_pr_md_idle_cpus >= 0);
    if (thread == NULL) {
		if (_pr_md_idle_cpus)
        	_MD_Wakeup_CPUs();
    } else if (!_PR_IS_NATIVE_THREAD(thread)) {
		/*
		 * If the thread is on my cpu's runq there is no need to
		 * wakeup any cpus
		 */
		if (!_PR_IS_NATIVE_THREAD(me)) {
			if (me->cpu != thread->cpu) {
				if (_pr_md_idle_cpus)
        			_MD_Wakeup_CPUs();
			}
		} else {
			if (_pr_md_idle_cpus)
        		_MD_Wakeup_CPUs();
		}
    } else {
		PR_ASSERT(_PR_IS_NATIVE_THREAD(thread));
		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_INTSOFF(is);

		pthread_mutex_lock(&thread->md.pthread_mutex);
		thread->md.wait++;
		rv = pthread_cond_signal(&thread->md.pthread_cond);
		PR_ASSERT(rv == 0);
		pthread_mutex_unlock(&thread->md.pthread_mutex);

		if (!_PR_IS_NATIVE_THREAD(me))
			_PR_FAST_INTSON(is);
    } 
    return PR_SUCCESS;
}
예제 #6
0
inline void DoneWaitingOnThisThread(PRThread thread)
{
    int is;
    int thread_md_asyncIOLock;
    int thread_io_pending;
    int thread_md_asyncIOCVar;

	_PR_INTSOFF(is);
	PR_Lock(thread_md_asyncIOLock);
	{ __ESBMC_atomic_begin();
	if (__COUNT__ == 1) {
	  thread_io_pending = PR_FALSE; // check for order violation
	  __COUNT__ = __COUNT__ + 1;
	} else {
	  assert(0);
	}
     __ESBMC_atomic_end();
	}
	// let the waiting thread know that async IO completed 
	PR_NotifyCondVar(thread_md_asyncIOCVar);
	PR_Unlock(thread_md_asyncIOLock);
	_PR_FAST_INTSON(is);
}
예제 #7
0
/*
** Lock the lock.
*/
PR_IMPLEMENT(void) PR_Lock(PRLock *lock)
{
    PRThread *me = _PR_MD_CURRENT_THREAD();
    PRIntn is;
    PRThread *t;
    PRCList *q;

    PR_ASSERT(me != suspendAllThread); 
    PR_ASSERT(!(me->flags & _PR_IDLE_THREAD));
    PR_ASSERT(lock != NULL);
#ifdef _PR_GLOBAL_THREADS_ONLY 
    _PR_MD_LOCK(&lock->ilock);
    PR_ASSERT(lock->owner == 0);
    lock->owner = me;
    return;
#else  /* _PR_GLOBAL_THREADS_ONLY */

	if (_native_threads_only) {
		_PR_MD_LOCK(&lock->ilock);
		PR_ASSERT(lock->owner == 0);
		lock->owner = me;
		return;
	}

    if (!_PR_IS_NATIVE_THREAD(me))
    	_PR_INTSOFF(is);

    PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0);

retry:
    _PR_LOCK_LOCK(lock);
    if (lock->owner == 0) {
        /* Just got the lock */
        lock->owner = me;
        lock->priority = me->priority;
		/* Add the granted lock to this owning thread's lock list */
        PR_APPEND_LINK(&lock->links, &me->lockList);
        _PR_LOCK_UNLOCK(lock);
    	if (!_PR_IS_NATIVE_THREAD(me))
        	_PR_FAST_INTSON(is);
        return;
    }

    /* If this thread already owns this lock, then it is a deadlock */
    PR_ASSERT(lock->owner != me);

    PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0);

#if 0
    if (me->priority > lock->owner->priority) {
        /*
        ** Give the lock owner a priority boost until we get the
        ** lock. Record the priority we boosted it to.
        */
        lock->boostPriority = me->priority;
        _PR_SetThreadPriority(lock->owner, me->priority);
    }
#endif

    /* 
    Add this thread to the asked for lock's list of waiting threads.  We
    add this thread thread in the right priority order so when the unlock
    occurs, the thread with the higher priority will get the lock.
    */
    q = lock->waitQ.next;
    if (q == &lock->waitQ || _PR_THREAD_CONDQ_PTR(q)->priority ==
      	_PR_THREAD_CONDQ_PTR(lock->waitQ.prev)->priority) {
		/*
		 * If all the threads in the lock waitQ have the same priority,
		 * then avoid scanning the list:  insert the element at the end.
		 */
		q = &lock->waitQ;
    } else {
		/* Sort thread into lock's waitQ at appropriate point */
		/* Now scan the list for where to insert this entry */
		while (q != &lock->waitQ) {
			t = _PR_THREAD_CONDQ_PTR(lock->waitQ.next);
			if (me->priority > t->priority) {
				/* Found a lower priority thread to insert in front of */
				break;
			}
			q = q->next;
		}
	}
    PR_INSERT_BEFORE(&me->waitQLinks, q);

	/* 
	Now grab the threadLock since we are about to change the state.  We have
	to do this since a PR_Suspend or PR_SetThreadPriority type call that takes
	a PRThread* as an argument could be changing the state of this thread from
	a thread running on a different cpu.
	*/

    _PR_THREAD_LOCK(me);
    me->state = _PR_LOCK_WAIT;
    me->wait.lock = lock;
    _PR_THREAD_UNLOCK(me);

    _PR_LOCK_UNLOCK(lock);

    _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT);
	goto retry;

#endif  /* _PR_GLOBAL_THREADS_ONLY */
}