/* * Set the thread's requested mode and recompute priority * Called with thread mutex and thread locked * * TODO: Mitigate potential problems caused by moving thread to end of runq * whenever its priority is recomputed * Only remove when it actually changes? Attempt to re-insert at appropriate location? */ static void thread_set_user_sched_mode_and_recompute_pri(thread_t thread, sched_mode_t mode) { if (thread->policy_reset) return; boolean_t removed = thread_run_queue_remove(thread); /* * TODO: Instead of having saved mode, have 'user mode' and 'true mode'. * That way there's zero confusion over which the user wants * and which the kernel wants. */ if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) thread->saved_mode = mode; else sched_set_thread_mode(thread, mode); thread_recompute_priority(thread); if (removed) thread_run_queue_reinsert(thread, SCHED_TAILQ); }
/* * thread_set_mode_and_absolute_pri: * * Set scheduling policy & absolute priority for thread, for deprecated * thread_set_policy and thread_policy interfaces. * * Note that there is no implemented difference between POLICY_RR and POLICY_FIFO. * Both result in FIXED mode scheduling. * * Called with thread mutex locked. */ kern_return_t thread_set_mode_and_absolute_pri( thread_t thread, integer_t policy, integer_t priority) { spl_t s; sched_mode_t mode; kern_return_t kr = KERN_SUCCESS; if (thread_is_static_param(thread)) return (KERN_POLICY_STATIC); if (thread->policy_reset) return (KERN_SUCCESS); /* Setting legacy policies on threads kills the current QoS */ if (thread->requested_policy.thrp_qos != THREAD_QOS_UNSPECIFIED) { thread_mtx_unlock(thread); kr = thread_remove_qos_policy(thread); thread_mtx_lock(thread); if (!thread->active) { return (KERN_TERMINATED); } } switch (policy) { case POLICY_TIMESHARE: mode = TH_MODE_TIMESHARE; break; case POLICY_RR: case POLICY_FIFO: mode = TH_MODE_FIXED; break; default: panic("unexpected sched policy: %d", policy); break; } s = splsched(); thread_lock(thread); /* This path isn't allowed to change a thread out of realtime. */ if ((thread->sched_mode != TH_MODE_REALTIME) && (thread->saved_mode != TH_MODE_REALTIME)) { /* * Reverse engineer and apply the correct importance value * from the requested absolute priority value. */ if (priority >= thread->max_priority) priority = thread->max_priority - thread->task_priority; else if (priority >= MINPRI_KERNEL) priority -= MINPRI_KERNEL; else if (priority >= MINPRI_RESERVED) priority -= MINPRI_RESERVED; else priority -= BASEPRI_DEFAULT; priority += thread->task_priority; if (priority > thread->max_priority) priority = thread->max_priority; else if (priority < MINPRI) priority = MINPRI; thread->importance = priority - thread->task_priority; boolean_t removed = thread_run_queue_remove(thread); thread_set_user_sched_mode(thread, mode); thread_recompute_priority(thread); if (removed) thread_setrun(thread, SCHED_TAILQ); } thread_unlock(thread); splx(s); sfi_reevaluate(thread); return (kr); }
kern_return_t thread_policy_set_internal( thread_t thread, thread_policy_flavor_t flavor, thread_policy_t policy_info, mach_msg_type_number_t count) { kern_return_t result = KERN_SUCCESS; spl_t s; thread_mtx_lock(thread); if (!thread->active) { thread_mtx_unlock(thread); return (KERN_TERMINATED); } switch (flavor) { case THREAD_EXTENDED_POLICY: { boolean_t timeshare = TRUE; if (count >= THREAD_EXTENDED_POLICY_COUNT) { thread_extended_policy_t info; info = (thread_extended_policy_t)policy_info; timeshare = info->timeshare; } sched_mode_t mode = (timeshare == TRUE) ? TH_MODE_TIMESHARE : TH_MODE_FIXED; s = splsched(); thread_lock(thread); boolean_t removed = thread_run_queue_remove(thread); thread_set_user_sched_mode(thread, mode); thread_recompute_priority(thread); if (removed) thread_setrun(thread, SCHED_TAILQ); thread_unlock(thread); splx(s); sfi_reevaluate(thread); break; } case THREAD_TIME_CONSTRAINT_POLICY: { thread_time_constraint_policy_t info; if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_time_constraint_policy_t)policy_info; if ( info->constraint < info->computation || info->computation > max_rt_quantum || info->computation < min_rt_quantum ) { result = KERN_INVALID_ARGUMENT; break; } s = splsched(); thread_lock(thread); boolean_t removed = thread_run_queue_remove(thread); thread->realtime.period = info->period; thread->realtime.computation = info->computation; thread->realtime.constraint = info->constraint; thread->realtime.preemptible = info->preemptible; thread_set_user_sched_mode(thread, TH_MODE_REALTIME); thread_recompute_priority(thread); if (removed) thread_setrun(thread, SCHED_TAILQ); thread_unlock(thread); splx(s); sfi_reevaluate(thread); break; } case THREAD_PRECEDENCE_POLICY: { thread_precedence_policy_t info; if (count < THREAD_PRECEDENCE_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_precedence_policy_t)policy_info; s = splsched(); thread_lock(thread); thread->importance = info->importance; thread_recompute_priority(thread); thread_unlock(thread); splx(s); break; } case THREAD_AFFINITY_POLICY: { thread_affinity_policy_t info; if (!thread_affinity_is_supported()) { result = KERN_NOT_SUPPORTED; break; } if (count < THREAD_AFFINITY_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_affinity_policy_t) policy_info; /* * Unlock the thread mutex here and * return directly after calling thread_affinity_set(). * This is necessary for correct lock ordering because * thread_affinity_set() takes the task lock. */ thread_mtx_unlock(thread); return thread_affinity_set(thread, info->affinity_tag); } case THREAD_THROUGHPUT_QOS_POLICY: { thread_throughput_qos_policy_t info = (thread_throughput_qos_policy_t) policy_info; int tqos; if (count < THREAD_LATENCY_QOS_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } if ((result = qos_throughput_policy_validate(info->thread_throughput_qos_tier)) != KERN_SUCCESS) { break; } tqos = qos_extract(info->thread_throughput_qos_tier); thread->effective_policy.t_through_qos = tqos; } break; case THREAD_LATENCY_QOS_POLICY: { thread_latency_qos_policy_t info = (thread_latency_qos_policy_t) policy_info; int lqos; if (count < THREAD_THROUGHPUT_QOS_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } if ((result = qos_latency_policy_validate(info->thread_latency_qos_tier)) != KERN_SUCCESS) { break; } lqos = qos_extract(info->thread_latency_qos_tier); /* The expected use cases (opt-in) of per-thread latency QoS would seem to * preclude any requirement at present to re-evaluate timers on a thread level * latency QoS change. */ thread->effective_policy.t_latency_qos = lqos; } break; case THREAD_QOS_POLICY: case THREAD_QOS_POLICY_OVERRIDE: { thread_qos_policy_t info = (thread_qos_policy_t)policy_info; if (count < THREAD_QOS_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } if (info->qos_tier < 0 || info->qos_tier >= THREAD_QOS_LAST) { result = KERN_INVALID_ARGUMENT; break; } if (info->tier_importance > 0 || info->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) { result = KERN_INVALID_ARGUMENT; break; } if (info->qos_tier == THREAD_QOS_UNSPECIFIED && info->tier_importance != 0) { result = KERN_INVALID_ARGUMENT; break; } /* * Going into task policy requires the task mutex, * because of the way synchronization against the IO policy * subsystem works. * * We need to move thread policy to the thread mutex instead. * <rdar://problem/15831652> separate thread policy from task policy */ if (flavor == THREAD_QOS_POLICY_OVERRIDE) { int strongest_override = info->qos_tier; if (info->qos_tier != THREAD_QOS_UNSPECIFIED && thread->requested_policy.thrp_qos_override != THREAD_QOS_UNSPECIFIED) strongest_override = MAX(thread->requested_policy.thrp_qos_override, info->qos_tier); thread_mtx_unlock(thread); /* There is a race here. To be closed in <rdar://problem/15831652> separate thread policy from task policy */ proc_set_task_policy(thread->task, thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_OVERRIDE, strongest_override); return (result); } thread_mtx_unlock(thread); proc_set_task_policy2(thread->task, thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_AND_RELPRIO, info->qos_tier, -info->tier_importance); thread_mtx_lock(thread); if (!thread->active) { thread_mtx_unlock(thread); return (KERN_TERMINATED); } break; } default: result = KERN_INVALID_ARGUMENT; break; } thread_mtx_unlock(thread); return (result); }