DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType) { int iPriority; disp_lock_t **ppDispLock; switch (enmType) { case RTTHREADTYPE_INFREQUENT_POLLER: iPriority = 60; break; case RTTHREADTYPE_EMULATION: iPriority = 66; break; case RTTHREADTYPE_DEFAULT: iPriority = 72; break; case RTTHREADTYPE_MSG_PUMP: iPriority = 78; break; case RTTHREADTYPE_IO: iPriority = 84; break; case RTTHREADTYPE_TIMER: iPriority = 99; break; default: AssertMsgFailed(("enmType=%d\n", enmType)); return VERR_INVALID_PARAMETER; } Assert(curthread); thread_lock(curthread); thread_change_pri(curthread, iPriority, 0); /* * thread_unlock() is a macro calling disp_lock_exit() with the thread's dispatcher lock. * We need to dereference the offset manually here (for S10, S11 compatibility) rather than * using the macro. */ ppDispLock = SOL_THREAD_LOCKP_PTR; disp_lock_exit(*ppDispLock); return VINF_SUCCESS; }
squeue_t * ip_squeue_getfree(pri_t pri) { squeue_set_t *sqs = sqset_global_list[0]; squeue_t *sq; mutex_enter(&sqset_lock); for (sq = sqs->sqs_head; sq != NULL; sq = sq->sq_next) { /* * Select a non-default TCP squeue that is free i.e. not * bound to any ill. */ if (!(sq->sq_state & (SQS_DEFAULT | SQS_ILL_BOUND))) break; } if (sq == NULL) { sq = ip_squeue_create(pri); sq->sq_set = sqs; sq->sq_next = sqs->sqs_head; sqs->sqs_head = sq; } ASSERT(!(sq->sq_state & (SQS_POLL_THR_CONTROL | SQS_WORKER_THR_CONTROL | SQS_POLL_CLEANUP_DONE | SQS_POLL_QUIESCE_DONE | SQS_POLL_THR_QUIESCED))); mutex_enter(&sq->sq_lock); sq->sq_state |= SQS_ILL_BOUND; mutex_exit(&sq->sq_lock); mutex_exit(&sqset_lock); if (sq->sq_priority != pri) { thread_lock(sq->sq_worker); (void) thread_change_pri(sq->sq_worker, pri, 0); thread_unlock(sq->sq_worker); thread_lock(sq->sq_poll_thr); (void) thread_change_pri(sq->sq_poll_thr, pri, 0); thread_unlock(sq->sq_poll_thr); sq->sq_priority = pri; } return (sq); }