Пример #1
0
/* Acquires LOCK, sleeping until it becomes available if
   necessary.  The lock must not already be held by the current
   thread.

   This function may sleep, so it must not be called within an
   interrupt handler.  This function may be called with
   interrupts disabled, but interrupts will be turned back on if
   we need to sleep. */
void
lock_acquire (struct lock *lock)
{
  enum intr_level old_level;

  ASSERT (lock != NULL);
  ASSERT (!intr_context ());
  ASSERT (!lock_held_by_current_thread (lock));

  old_level = intr_disable ();

  if (lock->holder != NULL) 
    {
      /* Donate our priority to the thread holding the lock.
         First, update the data structures. */
      struct thread *donor = thread_current ();
      donor->want_lock = lock;
      donor->donee = lock->holder;
      list_push_back (&lock->holder->donors, &donor->donor_elem);
      
      /* Now implement the priority donation itself
         by recomputing the donee's priority
         and cascading the donation as far as necessary. */
      if (donor->donee != NULL)
        thread_recompute_priority (donor->donee);
    }

  sema_down (&lock->semaphore);
  lock->holder = thread_current ();
  intr_set_level (old_level);
}
Пример #2
0
/* Called with the thread mutex held */
void
thread_task_priority(
	thread_t		thread,
	integer_t		priority,
	integer_t		max_priority)
{
	spl_t s;

	assert(thread != THREAD_NULL);

	if (!thread->active || thread->policy_reset)
		return;

	s = splsched();
	thread_lock(thread);

	integer_t old_max_priority = thread->max_priority;

	thread->task_priority = priority;
	thread->max_priority = max_priority;

	/* A thread is 'throttled' when its max priority is below MAXPRI_THROTTLE */
	if ((max_priority > MAXPRI_THROTTLE) && (old_max_priority <= MAXPRI_THROTTLE)) {
		sched_set_thread_throttled(thread, FALSE);
	} else if ((max_priority <= MAXPRI_THROTTLE) && (old_max_priority > MAXPRI_THROTTLE)) {
		sched_set_thread_throttled(thread, TRUE);
	}

	thread_recompute_priority(thread);

	thread_unlock(thread);
	splx(s);
}
Пример #3
0
void
thread_policy_reset(
	thread_t		thread)
{
	spl_t		s;

	s = splsched();
	thread_lock(thread);

	if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
		thread->sched_mode &= ~TH_MODE_REALTIME;

		if (!(thread->sched_mode & TH_MODE_TIMESHARE)) {
			thread->sched_mode |= TH_MODE_TIMESHARE;

			if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
				sched_share_incr();
		}
	}
	else {
		thread->safe_mode = 0;
		thread->sched_mode &= ~TH_MODE_FAILSAFE;
	}

	thread->importance = 0;

	thread_recompute_priority(thread);

	thread_unlock(thread);
	splx(s);
}
Пример #4
0
/* called with task lock locked */
void
thread_recompute_qos(thread_t thread) {
	spl_t s;

	thread_mtx_lock(thread);

	if (!thread->active) {
		thread_mtx_unlock(thread);
		return;
	}

	s = splsched();
	thread_lock(thread);

	thread_recompute_priority(thread);

	thread_unlock(thread);
	splx(s);

	thread_mtx_unlock(thread);
}
Пример #5
0
void
thread_task_priority(
    thread_t		thread,
    integer_t		priority,
    integer_t		max_priority)
{
    spl_t				s;

    assert(thread != THREAD_NULL);

    s = splsched();
    thread_lock(thread);

    thread->task_priority = priority;
    thread->max_priority = max_priority;

    thread_recompute_priority(thread);

    thread_unlock(thread);
    splx(s);
}
Пример #6
0
/*
 * Set the thread's requested mode and recompute priority
 * Called with thread mutex and thread locked
 *
 * TODO: Mitigate potential problems caused by moving thread to end of runq
 * whenever its priority is recomputed
 *      Only remove when it actually changes? Attempt to re-insert at appropriate location?
 */
static void
thread_set_user_sched_mode_and_recompute_pri(thread_t thread, sched_mode_t mode)
{
	if (thread->policy_reset)
		return;

	boolean_t removed = thread_run_queue_remove(thread);

	/*
	 * TODO: Instead of having saved mode, have 'user mode' and 'true mode'.
	 * That way there's zero confusion over which the user wants
	 * and which the kernel wants.
	 */
	if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK)
		thread->saved_mode = mode;
	else
		sched_set_thread_mode(thread, mode);

	thread_recompute_priority(thread);

	if (removed)
		thread_run_queue_reinsert(thread, SCHED_TAILQ);
}
Пример #7
0
/* Releases LOCK, which must be owned by the current thread.

   An interrupt handler cannot acquire a lock, so it does not
   make sense to try to release a lock within an interrupt
   handler. */
void
lock_release (struct lock *lock) 
{
  enum intr_level old_level;
  struct thread *t = thread_current ();
  struct list_elem *e;

  ASSERT (lock != NULL);
  ASSERT (lock_held_by_current_thread (lock));

  old_level = intr_disable ();

  /* Return donations to threads that want this lock. */
  for (e = list_begin (&t->donors); e != list_end (&t->donors); ) 
    {
      struct thread *donor = list_entry (e, struct thread, donor_elem);
      if (donor->want_lock == lock) 
        {
          donor->donee = NULL;
          e = list_remove (e);
        }
      else
        e = list_next (e);
    }

  /* Release lock. */
  lock->holder = NULL;
  sema_up (&lock->semaphore);

  /* Recompute our priority based on our remaining donations,
     then yield to a higher-priority ready thread if one now
     exists. */
  thread_recompute_priority (t);
  thread_yield_to_higher_priority ();

  intr_set_level (old_level);
}
Пример #8
0
kern_return_t
thread_policy_set_internal(
	thread_t				thread,
	thread_policy_flavor_t	flavor,
	thread_policy_t			policy_info,
	mach_msg_type_number_t	count)
{
	kern_return_t			result = KERN_SUCCESS;
	spl_t					s;

	thread_mtx_lock(thread);
	if (!thread->active) {
		thread_mtx_unlock(thread);

		return (KERN_TERMINATED);
	}

	switch (flavor) {

	case THREAD_EXTENDED_POLICY:
	{
		boolean_t				timeshare = TRUE;

		if (count >= THREAD_EXTENDED_POLICY_COUNT) {
			thread_extended_policy_t	info;

			info = (thread_extended_policy_t)policy_info;
			timeshare = info->timeshare;
		}

		sched_mode_t mode = (timeshare == TRUE) ? TH_MODE_TIMESHARE : TH_MODE_FIXED;

		s = splsched();
		thread_lock(thread);

		thread_set_user_sched_mode_and_recompute_pri(thread, mode);

		thread_unlock(thread);
		splx(s);

		sfi_reevaluate(thread);

		break;
	}

	case THREAD_TIME_CONSTRAINT_POLICY:
	{
		thread_time_constraint_policy_t		info;

		if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_time_constraint_policy_t)policy_info;
		if (	info->constraint < info->computation	||
				info->computation > max_rt_quantum		||
				info->computation < min_rt_quantum		) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		s = splsched();
		thread_lock(thread);

		thread->realtime.period = info->period;
		thread->realtime.computation = info->computation;
		thread->realtime.constraint = info->constraint;
		thread->realtime.preemptible = info->preemptible;

		thread_set_user_sched_mode_and_recompute_pri(thread, TH_MODE_REALTIME);

		thread_unlock(thread);
		splx(s);

		sfi_reevaluate(thread);

		break;
	}

	case THREAD_PRECEDENCE_POLICY:
	{
		thread_precedence_policy_t		info;

		if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}
		info = (thread_precedence_policy_t)policy_info;

		s = splsched();
		thread_lock(thread);

		thread->importance = info->importance;

		thread_recompute_priority(thread);

		thread_unlock(thread);
		splx(s);

		break;
	}

	case THREAD_AFFINITY_POLICY:
	{
		thread_affinity_policy_t	info;

		if (!thread_affinity_is_supported()) {
			result = KERN_NOT_SUPPORTED;
			break;
		}
		if (count < THREAD_AFFINITY_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_affinity_policy_t) policy_info;
		/*
		 * Unlock the thread mutex here and
		 * return directly after calling thread_affinity_set().
		 * This is necessary for correct lock ordering because
		 * thread_affinity_set() takes the task lock.
		 */
		thread_mtx_unlock(thread);
		return thread_affinity_set(thread, info->affinity_tag);
	}

	case THREAD_THROUGHPUT_QOS_POLICY:
	{
		thread_throughput_qos_policy_t info = (thread_throughput_qos_policy_t) policy_info;
		int tqos;
		
		if (count < THREAD_LATENCY_QOS_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		if ((result = qos_throughput_policy_validate(info->thread_throughput_qos_tier)) !=
		    KERN_SUCCESS) {
			break;
		}

		tqos = qos_extract(info->thread_throughput_qos_tier);
		thread->effective_policy.t_through_qos = tqos;
	}
		break;

	case THREAD_LATENCY_QOS_POLICY:
	{
		thread_latency_qos_policy_t info = (thread_latency_qos_policy_t) policy_info;
		int lqos;
		
		if (count < THREAD_THROUGHPUT_QOS_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		if ((result = qos_latency_policy_validate(info->thread_latency_qos_tier)) !=
		    KERN_SUCCESS) {
			break;
		}

		lqos = qos_extract(info->thread_latency_qos_tier);
/* The expected use cases (opt-in) of per-thread latency QoS would seem to
 * preclude any requirement at present to re-evaluate timers on a thread level
 * latency QoS change.
 */
		thread->effective_policy.t_latency_qos = lqos;

	}
		break;

	case THREAD_QOS_POLICY:
	case THREAD_QOS_POLICY_OVERRIDE:
	{
		thread_qos_policy_t info = (thread_qos_policy_t)policy_info;

		if (count < THREAD_QOS_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		if (info->qos_tier < 0 || info->qos_tier >= THREAD_QOS_LAST) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		if (info->tier_importance > 0 || info->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		if (info->qos_tier == THREAD_QOS_UNSPECIFIED && info->tier_importance != 0) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		/*
		 * Going into task policy requires the task mutex,
		 * because of the way synchronization against the IO policy
		 * subsystem works.
		 *
		 * We need to move thread policy to the thread mutex instead.
		 * <rdar://problem/15831652> separate thread policy from task policy
		 */

		if (flavor == THREAD_QOS_POLICY_OVERRIDE) {
			int strongest_override = info->qos_tier;

			if (info->qos_tier != THREAD_QOS_UNSPECIFIED &&
			    thread->requested_policy.thrp_qos_override != THREAD_QOS_UNSPECIFIED)
				strongest_override = MAX(thread->requested_policy.thrp_qos_override, info->qos_tier);

			thread_mtx_unlock(thread);

			/* There is a race here. To be closed in <rdar://problem/15831652> separate thread policy from task policy */

			proc_set_task_policy(thread->task, thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_OVERRIDE, strongest_override);

			return (result);
		}

		thread_mtx_unlock(thread);

		proc_set_task_policy2(thread->task, thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_AND_RELPRIO, info->qos_tier, -info->tier_importance);

		thread_mtx_lock(thread);
		if (!thread->active) {
			thread_mtx_unlock(thread);
			return (KERN_TERMINATED);
		}
		
		break;
	}

	default:
		result = KERN_INVALID_ARGUMENT;
		break;
	}

	thread_mtx_unlock(thread);
	return (result);
}
Пример #9
0
kern_return_t
thread_policy_set(
    thread_act_t			act,
    thread_policy_flavor_t	flavor,
    thread_policy_t			policy_info,
    mach_msg_type_number_t	count)
{
    kern_return_t			result = KERN_SUCCESS;
    thread_t				thread;
    spl_t					s;

    if (act == THR_ACT_NULL)
        return (KERN_INVALID_ARGUMENT);

    thread = act_lock_thread(act);
    if (!act->active) {
        act_unlock_thread(act);

        return (KERN_TERMINATED);
    }

    assert(thread != THREAD_NULL);

    switch (flavor) {

    case THREAD_EXTENDED_POLICY:
    {
        boolean_t				timeshare = TRUE;

        if (count >= THREAD_EXTENDED_POLICY_COUNT) {
            thread_extended_policy_t	info;

            info = (thread_extended_policy_t)policy_info;
            timeshare = info->timeshare;
        }

        s = splsched();
        thread_lock(thread);

        if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
            integer_t	oldmode = (thread->sched_mode & TH_MODE_TIMESHARE);

            thread->sched_mode &= ~TH_MODE_REALTIME;

            if (timeshare && !oldmode) {
                thread->sched_mode |= TH_MODE_TIMESHARE;

                if (thread->state & TH_RUN)
                    pset_share_incr(thread->processor_set);
            }
            else if (!timeshare && oldmode) {
                thread->sched_mode &= ~TH_MODE_TIMESHARE;

                if (thread->state & TH_RUN)
                    pset_share_decr(thread->processor_set);
            }

            thread_recompute_priority(thread);
        }
        else {
            thread->safe_mode &= ~TH_MODE_REALTIME;

            if (timeshare)
                thread->safe_mode |= TH_MODE_TIMESHARE;
            else
                thread->safe_mode &= ~TH_MODE_TIMESHARE;
        }

        thread_unlock(thread);
        splx(s);

        break;
    }

    case THREAD_TIME_CONSTRAINT_POLICY:
    {
        thread_time_constraint_policy_t		info;

        if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
            result = KERN_INVALID_ARGUMENT;
            break;
        }

        info = (thread_time_constraint_policy_t)policy_info;
        if (	info->constraint < info->computation	||
                info->computation > max_rt_quantum		||
                info->computation < min_rt_quantum		) {
            result = KERN_INVALID_ARGUMENT;
            break;
        }

        s = splsched();
        thread_lock(thread);

        thread->realtime.period = info->period;
        thread->realtime.computation = info->computation;
        thread->realtime.constraint = info->constraint;
        thread->realtime.preemptible = info->preemptible;

        if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
            if (thread->sched_mode & TH_MODE_TIMESHARE) {
                thread->sched_mode &= ~TH_MODE_TIMESHARE;

                if (thread->state & TH_RUN)
                    pset_share_decr(thread->processor_set);
            }
            thread->sched_mode |= TH_MODE_REALTIME;
            thread_recompute_priority(thread);
        }
        else {
            thread->safe_mode &= ~TH_MODE_TIMESHARE;
            thread->safe_mode |= TH_MODE_REALTIME;
        }

        thread_unlock(thread);
        splx(s);

        break;
    }

    case THREAD_PRECEDENCE_POLICY:
    {
        thread_precedence_policy_t		info;

        if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
            result = KERN_INVALID_ARGUMENT;
            break;
        }

        info = (thread_precedence_policy_t)policy_info;

        s = splsched();
        thread_lock(thread);

        thread->importance = info->importance;

        thread_recompute_priority(thread);

        thread_unlock(thread);
        splx(s);

        break;
    }

    default:
        result = KERN_INVALID_ARGUMENT;
        break;
    }

    act_unlock_thread(act);

    return (result);
}
Пример #10
0
kern_return_t
thread_policy_set(
	thread_t				thread,
	thread_policy_flavor_t	flavor,
	thread_policy_t			policy_info,
	mach_msg_type_number_t	count)
{
	kern_return_t			result = KERN_SUCCESS;
	spl_t					s;

	if (thread == THREAD_NULL)
		return (KERN_INVALID_ARGUMENT);

	thread_mtx_lock(thread);
	if (!thread->active) {
		thread_mtx_unlock(thread);

		return (KERN_TERMINATED);
	}

	if (thread->static_param) {
		thread_mtx_unlock(thread);

		return (KERN_SUCCESS);
	}

	switch (flavor) {

	case THREAD_EXTENDED_POLICY:
	{
		boolean_t				timeshare = TRUE;

		if (count >= THREAD_EXTENDED_POLICY_COUNT) {
			thread_extended_policy_t	info;

			info = (thread_extended_policy_t)policy_info;
			timeshare = info->timeshare;
		}

		s = splsched();
		thread_lock(thread);

		if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
			integer_t	oldmode = (thread->sched_mode & TH_MODE_TIMESHARE);

			thread->sched_mode &= ~TH_MODE_REALTIME;

			if (timeshare && !oldmode) {
				thread->sched_mode |= TH_MODE_TIMESHARE;

				if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
					sched_share_incr();
			}
			else
			if (!timeshare && oldmode) {
				thread->sched_mode &= ~TH_MODE_TIMESHARE;

				if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
					sched_share_decr();
			}

			thread_recompute_priority(thread);
		}
		else {
			thread->safe_mode &= ~TH_MODE_REALTIME;

			if (timeshare)
				thread->safe_mode |= TH_MODE_TIMESHARE;
			else
				thread->safe_mode &= ~TH_MODE_TIMESHARE;
		}

		thread_unlock(thread);
		splx(s);

		break;
	}

	case THREAD_TIME_CONSTRAINT_POLICY:
	{
		thread_time_constraint_policy_t		info;

		if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_time_constraint_policy_t)policy_info;
		if (	info->constraint < info->computation	||
				info->computation > max_rt_quantum		||
				info->computation < min_rt_quantum		) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		s = splsched();
		thread_lock(thread);

		thread->realtime.period = info->period;
		thread->realtime.computation = info->computation;
		thread->realtime.constraint = info->constraint;
		thread->realtime.preemptible = info->preemptible;

		if (!(thread->sched_mode & TH_MODE_FAILSAFE)) {
			if (thread->sched_mode & TH_MODE_TIMESHARE) {
				thread->sched_mode &= ~TH_MODE_TIMESHARE;

				if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN)
					sched_share_decr();
			}
			thread->sched_mode |= TH_MODE_REALTIME;
			thread_recompute_priority(thread);
		}
		else {
			thread->safe_mode &= ~TH_MODE_TIMESHARE;
			thread->safe_mode |= TH_MODE_REALTIME;
		}

		thread_unlock(thread);
		splx(s);

		break;
	}

	case THREAD_PRECEDENCE_POLICY:
	{
		thread_precedence_policy_t		info;

		if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_precedence_policy_t)policy_info;

		s = splsched();
		thread_lock(thread);

		thread->importance = info->importance;

		thread_recompute_priority(thread);

		thread_unlock(thread);
		splx(s);

		break;
	}

	case THREAD_AFFINITY_POLICY:
	{
		thread_affinity_policy_t	info;

		if (!thread_affinity_is_supported()) {
			result = KERN_NOT_SUPPORTED;
			break;
		}
		if (count < THREAD_AFFINITY_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_affinity_policy_t) policy_info;
		/*
		 * Unlock the thread mutex here and
		 * return directly after calling thread_affinity_set().
		 * This is necessary for correct lock ordering because
		 * thread_affinity_set() takes the task lock.
		 */
		thread_mtx_unlock(thread);
		return thread_affinity_set(thread, info->affinity_tag);
	}
	default:
		result = KERN_INVALID_ARGUMENT;
		break;
	}

	thread_mtx_unlock(thread);

	return (result);
}
Пример #11
0
/*
 * thread_set_mode_and_absolute_pri:
 *
 * Set scheduling policy & absolute priority for thread, for deprecated
 * thread_set_policy and thread_policy interfaces.
 *
 * Note that there is no implemented difference between POLICY_RR and POLICY_FIFO.
 * Both result in FIXED mode scheduling.
 *
 * Called with thread mutex locked.
 */
kern_return_t
thread_set_mode_and_absolute_pri(
	thread_t		thread,
	integer_t		policy,
	integer_t		priority)
{
	spl_t s;
	sched_mode_t mode;
	kern_return_t kr = KERN_SUCCESS;

	if (thread_is_static_param(thread))
		return (KERN_POLICY_STATIC);

	if (thread->policy_reset)
		return (KERN_SUCCESS);

	/* Setting legacy policies on threads kills the current QoS */
	if (thread->requested_policy.thrp_qos != THREAD_QOS_UNSPECIFIED) {
		thread_mtx_unlock(thread);

		kr = thread_remove_qos_policy(thread);

		thread_mtx_lock(thread);
		if (!thread->active) {
			return (KERN_TERMINATED);
		}
	}

	switch (policy) {
		case POLICY_TIMESHARE:
			mode = TH_MODE_TIMESHARE;
			break;
		case POLICY_RR:
		case POLICY_FIFO:
			mode = TH_MODE_FIXED;
			break;
		default:
			panic("unexpected sched policy: %d", policy);
			break;
	}

	s = splsched();
	thread_lock(thread);

	/* This path isn't allowed to change a thread out of realtime. */
	if ((thread->sched_mode != TH_MODE_REALTIME) &&
	    (thread->saved_mode != TH_MODE_REALTIME)) {

		/*
		 * Reverse engineer and apply the correct importance value
		 * from the requested absolute priority value.
		 */

		if (priority >= thread->max_priority)
			priority = thread->max_priority - thread->task_priority;
		else if (priority >= MINPRI_KERNEL)
			priority -=  MINPRI_KERNEL;
		else if (priority >= MINPRI_RESERVED)
			priority -=  MINPRI_RESERVED;
		else
			priority -= BASEPRI_DEFAULT;

		priority += thread->task_priority;

		if (priority > thread->max_priority)
			priority = thread->max_priority;
		else if (priority < MINPRI)
			priority = MINPRI;

		thread->importance = priority - thread->task_priority;

		boolean_t removed = thread_run_queue_remove(thread);

		thread_set_user_sched_mode(thread, mode);

		thread_recompute_priority(thread);

		if (removed)
			thread_setrun(thread, SCHED_TAILQ);
	}

	thread_unlock(thread);
	splx(s);

	sfi_reevaluate(thread);

	return (kr);
}