kern_return_t thread_policy_get( thread_t thread, thread_policy_flavor_t flavor, thread_policy_t policy_info, mach_msg_type_number_t *count, boolean_t *get_default) { kern_return_t result = KERN_SUCCESS; spl_t s; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (!thread->active) { thread_mtx_unlock(thread); return (KERN_TERMINATED); } switch (flavor) { case THREAD_EXTENDED_POLICY: { boolean_t timeshare = TRUE; if (!(*get_default)) { s = splsched(); thread_lock(thread); if ( (thread->sched_mode != TH_MODE_REALTIME) && (thread->saved_mode != TH_MODE_REALTIME) ) { if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) timeshare = (thread->sched_mode == TH_MODE_TIMESHARE) != 0; else timeshare = (thread->saved_mode == TH_MODE_TIMESHARE) != 0; } else *get_default = TRUE; thread_unlock(thread); splx(s); } if (*count >= THREAD_EXTENDED_POLICY_COUNT) { thread_extended_policy_t info; info = (thread_extended_policy_t)policy_info; info->timeshare = timeshare; } break; } case THREAD_TIME_CONSTRAINT_POLICY: { thread_time_constraint_policy_t info; if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_time_constraint_policy_t)policy_info; if (!(*get_default)) { s = splsched(); thread_lock(thread); if ( (thread->sched_mode == TH_MODE_REALTIME) || (thread->saved_mode == TH_MODE_REALTIME) ) { info->period = thread->realtime.period; info->computation = thread->realtime.computation; info->constraint = thread->realtime.constraint; info->preemptible = thread->realtime.preemptible; } else *get_default = TRUE; thread_unlock(thread); splx(s); } if (*get_default) { info->period = 0; info->computation = default_timeshare_computation; info->constraint = default_timeshare_constraint; info->preemptible = TRUE; } break; } case THREAD_PRECEDENCE_POLICY: { thread_precedence_policy_t info; if (*count < THREAD_PRECEDENCE_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_precedence_policy_t)policy_info; if (!(*get_default)) { s = splsched(); thread_lock(thread); info->importance = thread->importance; thread_unlock(thread); splx(s); } else info->importance = 0; break; } case THREAD_AFFINITY_POLICY: { thread_affinity_policy_t info; if (!thread_affinity_is_supported()) { result = KERN_NOT_SUPPORTED; break; } if (*count < THREAD_AFFINITY_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_affinity_policy_t)policy_info; if (!(*get_default)) info->affinity_tag = thread_affinity_get(thread); else info->affinity_tag = THREAD_AFFINITY_TAG_NULL; break; } case THREAD_POLICY_STATE: { thread_policy_state_t info; if (*count < THREAD_POLICY_STATE_COUNT) { result = KERN_INVALID_ARGUMENT; break; } /* Only root can get this info */ if (current_task()->sec_token.val[0] != 0) { result = KERN_PROTECTION_FAILURE; break; } info = (thread_policy_state_t)policy_info; if (!(*get_default)) { info->flags = 0; info->flags |= (thread->static_param ? THREAD_POLICY_STATE_FLAG_STATIC_PARAM : 0); /* * Unlock the thread mutex and directly return. * This is necessary because proc_get_thread_policy() * takes the task lock. */ thread_mtx_unlock(thread); proc_get_thread_policy(thread, info); return (result); } else { info->requested = 0; info->effective = 0; info->pending = 0; } break; } case THREAD_LATENCY_QOS_POLICY: { thread_latency_qos_policy_t info = (thread_latency_qos_policy_t) policy_info; uint32_t plqos; if (*count < THREAD_LATENCY_QOS_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } if (*get_default) { plqos = 0; } else { plqos = thread->effective_policy.t_latency_qos; } info->thread_latency_qos_tier = qos_latency_policy_package(plqos); } break; case THREAD_THROUGHPUT_QOS_POLICY: { thread_throughput_qos_policy_t info = (thread_throughput_qos_policy_t) policy_info; uint32_t ptqos; if (*count < THREAD_THROUGHPUT_QOS_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } if (*get_default) { ptqos = 0; } else { ptqos = thread->effective_policy.t_through_qos; } info->thread_throughput_qos_tier = qos_throughput_policy_package(ptqos); } break; case THREAD_QOS_POLICY: case THREAD_QOS_POLICY_OVERRIDE: { thread_qos_policy_t info = (thread_qos_policy_t)policy_info; if (*count < THREAD_QOS_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } if (!(*get_default)) { if (flavor == THREAD_QOS_POLICY_OVERRIDE) { info->qos_tier = thread->requested_policy.thrp_qos_override; /* TODO: handle importance overrides */ info->tier_importance = 0; } else { info->qos_tier = thread->requested_policy.thrp_qos; info->tier_importance = thread->importance; } } else { info->qos_tier = THREAD_QOS_UNSPECIFIED; info->tier_importance = 0; } break; } default: result = KERN_INVALID_ARGUMENT; break; } thread_mtx_unlock(thread); return (result); }
kern_return_t thread_policy_set( thread_t thread, thread_policy_flavor_t flavor, thread_policy_t policy_info, mach_msg_type_number_t count) { kern_return_t result = KERN_SUCCESS; spl_t s; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (!thread->active) { thread_mtx_unlock(thread); return (KERN_TERMINATED); } if (thread->static_param) { thread_mtx_unlock(thread); return (KERN_SUCCESS); } switch (flavor) { case THREAD_EXTENDED_POLICY: { boolean_t timeshare = TRUE; if (count >= THREAD_EXTENDED_POLICY_COUNT) { thread_extended_policy_t info; info = (thread_extended_policy_t)policy_info; timeshare = info->timeshare; } s = splsched(); thread_lock(thread); if (!(thread->sched_mode & TH_MODE_FAILSAFE)) { integer_t oldmode = (thread->sched_mode & TH_MODE_TIMESHARE); thread->sched_mode &= ~TH_MODE_REALTIME; if (timeshare && !oldmode) { thread->sched_mode |= TH_MODE_TIMESHARE; if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) sched_share_incr(); } else if (!timeshare && oldmode) { thread->sched_mode &= ~TH_MODE_TIMESHARE; if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) sched_share_decr(); } thread_recompute_priority(thread); } else { thread->safe_mode &= ~TH_MODE_REALTIME; if (timeshare) thread->safe_mode |= TH_MODE_TIMESHARE; else thread->safe_mode &= ~TH_MODE_TIMESHARE; } thread_unlock(thread); splx(s); break; } case THREAD_TIME_CONSTRAINT_POLICY: { thread_time_constraint_policy_t info; if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_time_constraint_policy_t)policy_info; if ( info->constraint < info->computation || info->computation > max_rt_quantum || info->computation < min_rt_quantum ) { result = KERN_INVALID_ARGUMENT; break; } s = splsched(); thread_lock(thread); thread->realtime.period = info->period; thread->realtime.computation = info->computation; thread->realtime.constraint = info->constraint; thread->realtime.preemptible = info->preemptible; if (!(thread->sched_mode & TH_MODE_FAILSAFE)) { if (thread->sched_mode & TH_MODE_TIMESHARE) { thread->sched_mode &= ~TH_MODE_TIMESHARE; if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) sched_share_decr(); } thread->sched_mode |= TH_MODE_REALTIME; thread_recompute_priority(thread); } else { thread->safe_mode &= ~TH_MODE_TIMESHARE; thread->safe_mode |= TH_MODE_REALTIME; } thread_unlock(thread); splx(s); break; } case THREAD_PRECEDENCE_POLICY: { thread_precedence_policy_t info; if (count < THREAD_PRECEDENCE_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_precedence_policy_t)policy_info; s = splsched(); thread_lock(thread); thread->importance = info->importance; thread_recompute_priority(thread); thread_unlock(thread); splx(s); break; } case THREAD_AFFINITY_POLICY: { thread_affinity_policy_t info; if (!thread_affinity_is_supported()) { result = KERN_NOT_SUPPORTED; break; } if (count < THREAD_AFFINITY_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_affinity_policy_t) policy_info; /* * Unlock the thread mutex here and * return directly after calling thread_affinity_set(). * This is necessary for correct lock ordering because * thread_affinity_set() takes the task lock. */ thread_mtx_unlock(thread); return thread_affinity_set(thread, info->affinity_tag); } default: result = KERN_INVALID_ARGUMENT; break; } thread_mtx_unlock(thread); return (result); }
kern_return_t thread_policy_set_internal( thread_t thread, thread_policy_flavor_t flavor, thread_policy_t policy_info, mach_msg_type_number_t count) { kern_return_t result = KERN_SUCCESS; spl_t s; thread_mtx_lock(thread); if (!thread->active) { thread_mtx_unlock(thread); return (KERN_TERMINATED); } switch (flavor) { case THREAD_EXTENDED_POLICY: { boolean_t timeshare = TRUE; if (count >= THREAD_EXTENDED_POLICY_COUNT) { thread_extended_policy_t info; info = (thread_extended_policy_t)policy_info; timeshare = info->timeshare; } sched_mode_t mode = (timeshare == TRUE) ? TH_MODE_TIMESHARE : TH_MODE_FIXED; s = splsched(); thread_lock(thread); thread_set_user_sched_mode_and_recompute_pri(thread, mode); thread_unlock(thread); splx(s); sfi_reevaluate(thread); break; } case THREAD_TIME_CONSTRAINT_POLICY: { thread_time_constraint_policy_t info; if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_time_constraint_policy_t)policy_info; if ( info->constraint < info->computation || info->computation > max_rt_quantum || info->computation < min_rt_quantum ) { result = KERN_INVALID_ARGUMENT; break; } s = splsched(); thread_lock(thread); thread->realtime.period = info->period; thread->realtime.computation = info->computation; thread->realtime.constraint = info->constraint; thread->realtime.preemptible = info->preemptible; thread_set_user_sched_mode_and_recompute_pri(thread, TH_MODE_REALTIME); thread_unlock(thread); splx(s); sfi_reevaluate(thread); break; } case THREAD_PRECEDENCE_POLICY: { thread_precedence_policy_t info; if (count < THREAD_PRECEDENCE_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_precedence_policy_t)policy_info; s = splsched(); thread_lock(thread); thread->importance = info->importance; thread_recompute_priority(thread); thread_unlock(thread); splx(s); break; } case THREAD_AFFINITY_POLICY: { thread_affinity_policy_t info; if (!thread_affinity_is_supported()) { result = KERN_NOT_SUPPORTED; break; } if (count < THREAD_AFFINITY_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_affinity_policy_t) policy_info; /* * Unlock the thread mutex here and * return directly after calling thread_affinity_set(). * This is necessary for correct lock ordering because * thread_affinity_set() takes the task lock. */ thread_mtx_unlock(thread); return thread_affinity_set(thread, info->affinity_tag); } case THREAD_THROUGHPUT_QOS_POLICY: { thread_throughput_qos_policy_t info = (thread_throughput_qos_policy_t) policy_info; int tqos; if (count < THREAD_LATENCY_QOS_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } if ((result = qos_throughput_policy_validate(info->thread_throughput_qos_tier)) != KERN_SUCCESS) { break; } tqos = qos_extract(info->thread_throughput_qos_tier); thread->effective_policy.t_through_qos = tqos; } break; case THREAD_LATENCY_QOS_POLICY: { thread_latency_qos_policy_t info = (thread_latency_qos_policy_t) policy_info; int lqos; if (count < THREAD_THROUGHPUT_QOS_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } if ((result = qos_latency_policy_validate(info->thread_latency_qos_tier)) != KERN_SUCCESS) { break; } lqos = qos_extract(info->thread_latency_qos_tier); /* The expected use cases (opt-in) of per-thread latency QoS would seem to * preclude any requirement at present to re-evaluate timers on a thread level * latency QoS change. */ thread->effective_policy.t_latency_qos = lqos; } break; case THREAD_QOS_POLICY: case THREAD_QOS_POLICY_OVERRIDE: { thread_qos_policy_t info = (thread_qos_policy_t)policy_info; if (count < THREAD_QOS_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } if (info->qos_tier < 0 || info->qos_tier >= THREAD_QOS_LAST) { result = KERN_INVALID_ARGUMENT; break; } if (info->tier_importance > 0 || info->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) { result = KERN_INVALID_ARGUMENT; break; } if (info->qos_tier == THREAD_QOS_UNSPECIFIED && info->tier_importance != 0) { result = KERN_INVALID_ARGUMENT; break; } /* * Going into task policy requires the task mutex, * because of the way synchronization against the IO policy * subsystem works. * * We need to move thread policy to the thread mutex instead. * <rdar://problem/15831652> separate thread policy from task policy */ if (flavor == THREAD_QOS_POLICY_OVERRIDE) { int strongest_override = info->qos_tier; if (info->qos_tier != THREAD_QOS_UNSPECIFIED && thread->requested_policy.thrp_qos_override != THREAD_QOS_UNSPECIFIED) strongest_override = MAX(thread->requested_policy.thrp_qos_override, info->qos_tier); thread_mtx_unlock(thread); /* There is a race here. To be closed in <rdar://problem/15831652> separate thread policy from task policy */ proc_set_task_policy(thread->task, thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_OVERRIDE, strongest_override); return (result); } thread_mtx_unlock(thread); proc_set_task_policy2(thread->task, thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_AND_RELPRIO, info->qos_tier, -info->tier_importance); thread_mtx_lock(thread); if (!thread->active) { thread_mtx_unlock(thread); return (KERN_TERMINATED); } break; } default: result = KERN_INVALID_ARGUMENT; break; } thread_mtx_unlock(thread); return (result); }
kern_return_t thread_policy_get( thread_t thread, thread_policy_flavor_t flavor, thread_policy_t policy_info, mach_msg_type_number_t *count, boolean_t *get_default) { kern_return_t result = KERN_SUCCESS; spl_t s; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (!thread->active) { thread_mtx_unlock(thread); return (KERN_TERMINATED); } switch (flavor) { case THREAD_EXTENDED_POLICY: { boolean_t timeshare = TRUE; if (!(*get_default)) { s = splsched(); thread_lock(thread); if ( !(thread->sched_mode & TH_MODE_REALTIME) && !(thread->safe_mode & TH_MODE_REALTIME) ) { if (!(thread->sched_mode & TH_MODE_FAILSAFE)) timeshare = (thread->sched_mode & TH_MODE_TIMESHARE) != 0; else timeshare = (thread->safe_mode & TH_MODE_TIMESHARE) != 0; } else *get_default = TRUE; thread_unlock(thread); splx(s); } if (*count >= THREAD_EXTENDED_POLICY_COUNT) { thread_extended_policy_t info; info = (thread_extended_policy_t)policy_info; info->timeshare = timeshare; } break; } case THREAD_TIME_CONSTRAINT_POLICY: { thread_time_constraint_policy_t info; if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_time_constraint_policy_t)policy_info; if (!(*get_default)) { s = splsched(); thread_lock(thread); if ( (thread->sched_mode & TH_MODE_REALTIME) || (thread->safe_mode & TH_MODE_REALTIME) ) { info->period = thread->realtime.period; info->computation = thread->realtime.computation; info->constraint = thread->realtime.constraint; info->preemptible = thread->realtime.preemptible; } else *get_default = TRUE; thread_unlock(thread); splx(s); } if (*get_default) { info->period = 0; info->computation = std_quantum / 2; info->constraint = std_quantum; info->preemptible = TRUE; } break; } case THREAD_PRECEDENCE_POLICY: { thread_precedence_policy_t info; if (*count < THREAD_PRECEDENCE_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_precedence_policy_t)policy_info; if (!(*get_default)) { s = splsched(); thread_lock(thread); info->importance = thread->importance; thread_unlock(thread); splx(s); } else info->importance = 0; break; } case THREAD_AFFINITY_POLICY: { thread_affinity_policy_t info; if (!thread_affinity_is_supported()) { result = KERN_NOT_SUPPORTED; break; } if (*count < THREAD_AFFINITY_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; break; } info = (thread_affinity_policy_t)policy_info; if (!(*get_default)) info->affinity_tag = thread_affinity_get(thread); else info->affinity_tag = THREAD_AFFINITY_TAG_NULL; break; } default: result = KERN_INVALID_ARGUMENT; break; } thread_mtx_unlock(thread); return (result); }