예제 #1
0
/*
 * thread_set_mode_and_absolute_pri:
 *
 * Set scheduling policy & absolute priority for thread, for deprecated
 * thread_set_policy and thread_policy interfaces.
 *
 * Note that there is no implemented difference between POLICY_RR and POLICY_FIFO.
 * Both result in FIXED mode scheduling.
 *
 * Called with thread mutex locked.
 */
kern_return_t
thread_set_mode_and_absolute_pri(
	thread_t		thread,
	integer_t		policy,
	integer_t		priority)
{
	spl_t s;
	sched_mode_t mode;
	kern_return_t kr = KERN_SUCCESS;

	if (thread_is_static_param(thread))
		return (KERN_POLICY_STATIC);

	if (thread->policy_reset)
		return (KERN_SUCCESS);

	/* Setting legacy policies on threads kills the current QoS */
	if (thread->requested_policy.thrp_qos != THREAD_QOS_UNSPECIFIED) {
		thread_mtx_unlock(thread);

		kr = thread_remove_qos_policy(thread);

		thread_mtx_lock(thread);
		if (!thread->active) {
			return (KERN_TERMINATED);
		}
	}

	switch (policy) {
		case POLICY_TIMESHARE:
			mode = TH_MODE_TIMESHARE;
			break;
		case POLICY_RR:
		case POLICY_FIFO:
			mode = TH_MODE_FIXED;
			break;
		default:
			panic("unexpected sched policy: %d", policy);
			break;
	}

	s = splsched();
	thread_lock(thread);

	/* This path isn't allowed to change a thread out of realtime. */
	if ((thread->sched_mode != TH_MODE_REALTIME) &&
	    (thread->saved_mode != TH_MODE_REALTIME)) {

		/*
		 * Reverse engineer and apply the correct importance value
		 * from the requested absolute priority value.
		 */

		if (priority >= thread->max_priority)
			priority = thread->max_priority - thread->task_priority;
		else if (priority >= MINPRI_KERNEL)
			priority -=  MINPRI_KERNEL;
		else if (priority >= MINPRI_RESERVED)
			priority -=  MINPRI_RESERVED;
		else
			priority -= BASEPRI_DEFAULT;

		priority += thread->task_priority;

		if (priority > thread->max_priority)
			priority = thread->max_priority;
		else if (priority < MINPRI)
			priority = MINPRI;

		thread->importance = priority - thread->task_priority;

		boolean_t removed = thread_run_queue_remove(thread);

		thread_set_user_sched_mode(thread, mode);

		thread_recompute_priority(thread);

		if (removed)
			thread_setrun(thread, SCHED_TAILQ);
	}

	thread_unlock(thread);
	splx(s);

	sfi_reevaluate(thread);

	return (kr);
}
예제 #2
0
kern_return_t
thread_policy_set_internal(
	thread_t				thread,
	thread_policy_flavor_t	flavor,
	thread_policy_t			policy_info,
	mach_msg_type_number_t	count)
{
	kern_return_t			result = KERN_SUCCESS;
	spl_t					s;

	thread_mtx_lock(thread);
	if (!thread->active) {
		thread_mtx_unlock(thread);

		return (KERN_TERMINATED);
	}

	switch (flavor) {

	case THREAD_EXTENDED_POLICY:
	{
		boolean_t				timeshare = TRUE;

		if (count >= THREAD_EXTENDED_POLICY_COUNT) {
			thread_extended_policy_t	info;

			info = (thread_extended_policy_t)policy_info;
			timeshare = info->timeshare;
		}

		sched_mode_t mode = (timeshare == TRUE) ? TH_MODE_TIMESHARE : TH_MODE_FIXED;

		s = splsched();
		thread_lock(thread);

		boolean_t removed = thread_run_queue_remove(thread);

		thread_set_user_sched_mode(thread, mode);
		thread_recompute_priority(thread);

		if (removed)
			thread_setrun(thread, SCHED_TAILQ);

		thread_unlock(thread);
		splx(s);

		sfi_reevaluate(thread);

		break;
	}

	case THREAD_TIME_CONSTRAINT_POLICY:
	{
		thread_time_constraint_policy_t		info;

		if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_time_constraint_policy_t)policy_info;
		if (	info->constraint < info->computation	||
				info->computation > max_rt_quantum		||
				info->computation < min_rt_quantum		) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		s = splsched();
		thread_lock(thread);

		boolean_t removed = thread_run_queue_remove(thread);

		thread->realtime.period = info->period;
		thread->realtime.computation = info->computation;
		thread->realtime.constraint = info->constraint;
		thread->realtime.preemptible = info->preemptible;

		thread_set_user_sched_mode(thread, TH_MODE_REALTIME);
		thread_recompute_priority(thread);

		if (removed)
			thread_setrun(thread, SCHED_TAILQ);

		thread_unlock(thread);
		splx(s);

		sfi_reevaluate(thread);		

		break;
	}

	case THREAD_PRECEDENCE_POLICY:
	{
		thread_precedence_policy_t		info;

		if (count < THREAD_PRECEDENCE_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}
		info = (thread_precedence_policy_t)policy_info;

		s = splsched();
		thread_lock(thread);

		thread->importance = info->importance;

		thread_recompute_priority(thread);

		thread_unlock(thread);
		splx(s);

		break;
	}

	case THREAD_AFFINITY_POLICY:
	{
		thread_affinity_policy_t	info;

		if (!thread_affinity_is_supported()) {
			result = KERN_NOT_SUPPORTED;
			break;
		}
		if (count < THREAD_AFFINITY_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		info = (thread_affinity_policy_t) policy_info;
		/*
		 * Unlock the thread mutex here and
		 * return directly after calling thread_affinity_set().
		 * This is necessary for correct lock ordering because
		 * thread_affinity_set() takes the task lock.
		 */
		thread_mtx_unlock(thread);
		return thread_affinity_set(thread, info->affinity_tag);
	}

	case THREAD_THROUGHPUT_QOS_POLICY:
	{
		thread_throughput_qos_policy_t info = (thread_throughput_qos_policy_t) policy_info;
		int tqos;
		
		if (count < THREAD_LATENCY_QOS_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		if ((result = qos_throughput_policy_validate(info->thread_throughput_qos_tier)) !=
		    KERN_SUCCESS) {
			break;
		}

		tqos = qos_extract(info->thread_throughput_qos_tier);
		thread->effective_policy.t_through_qos = tqos;
	}
		break;

	case THREAD_LATENCY_QOS_POLICY:
	{
		thread_latency_qos_policy_t info = (thread_latency_qos_policy_t) policy_info;
		int lqos;
		
		if (count < THREAD_THROUGHPUT_QOS_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		if ((result = qos_latency_policy_validate(info->thread_latency_qos_tier)) !=
		    KERN_SUCCESS) {
			break;
		}

		lqos = qos_extract(info->thread_latency_qos_tier);
/* The expected use cases (opt-in) of per-thread latency QoS would seem to
 * preclude any requirement at present to re-evaluate timers on a thread level
 * latency QoS change.
 */
		thread->effective_policy.t_latency_qos = lqos;

	}
		break;

	case THREAD_QOS_POLICY:
	case THREAD_QOS_POLICY_OVERRIDE:
	{
		thread_qos_policy_t info = (thread_qos_policy_t)policy_info;

		if (count < THREAD_QOS_POLICY_COUNT) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		if (info->qos_tier < 0 || info->qos_tier >= THREAD_QOS_LAST) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		if (info->tier_importance > 0 || info->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		if (info->qos_tier == THREAD_QOS_UNSPECIFIED && info->tier_importance != 0) {
			result = KERN_INVALID_ARGUMENT;
			break;
		}

		/*
		 * Going into task policy requires the task mutex,
		 * because of the way synchronization against the IO policy
		 * subsystem works.
		 *
		 * We need to move thread policy to the thread mutex instead.
		 * <rdar://problem/15831652> separate thread policy from task policy
		 */

		if (flavor == THREAD_QOS_POLICY_OVERRIDE) {
			int strongest_override = info->qos_tier;

			if (info->qos_tier != THREAD_QOS_UNSPECIFIED &&
			    thread->requested_policy.thrp_qos_override != THREAD_QOS_UNSPECIFIED)
				strongest_override = MAX(thread->requested_policy.thrp_qos_override, info->qos_tier);

			thread_mtx_unlock(thread);

			/* There is a race here. To be closed in <rdar://problem/15831652> separate thread policy from task policy */

			proc_set_task_policy(thread->task, thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_OVERRIDE, strongest_override);

			return (result);
		}

		thread_mtx_unlock(thread);

		proc_set_task_policy2(thread->task, thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_AND_RELPRIO, info->qos_tier, -info->tier_importance);

		thread_mtx_lock(thread);
		if (!thread->active) {
			thread_mtx_unlock(thread);
			return (KERN_TERMINATED);
		}
		
		break;
	}

	default:
		result = KERN_INVALID_ARGUMENT;
		break;
	}

	thread_mtx_unlock(thread);
	return (result);
}
예제 #3
0
t_Handle
qman_portal_setup(struct qman_softc *qsc)
{
    struct dpaa_portals_softc *sc;
    t_QmPortalParam qpp;
    unsigned int cpu, p;
    t_Handle portal;

    /* Return NULL if we're not ready or while detach */
    if (qp_sc == NULL)
        return (NULL);

    sc = qp_sc;

    sched_pin();
    portal = NULL;
    cpu = PCPU_GET(cpuid);

    /* Check if portal is ready */
    while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
                                0, -1) == 0) {
        p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph);

        /* Return if portal is already initialized */
        if (p != 0 && p != -1) {
            sched_unpin();
            return ((t_Handle)p);
        }

        /* Not inititialized and "owned" by another thread */
        thread_lock(curthread);
        mi_switch(SW_VOL, NULL);
        thread_unlock(curthread);
    }

    /* Map portal registers */
    dpaa_portal_map_registers(sc);

    /* Configure and initialize portal */
    qpp.ceBaseAddress = rman_get_bushandle(sc->sc_rres[0]);
    qpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]);
    qpp.h_Qm = qsc->sc_qh;
    qpp.swPortalId = cpu;
    qpp.irq = (int)sc->sc_dp[cpu].dp_ires;
    qpp.fdLiodnOffset = 0;
    qpp.f_DfltFrame = qman_received_frame_callback;
    qpp.f_RejectedFrame = qman_rejected_frame_callback;
    qpp.h_App = qsc;

    portal = QM_PORTAL_Config(&qpp);
    if (portal == NULL)
        goto err;

    if (QM_PORTAL_Init(portal) != E_OK)
        goto err;

    if (QM_PORTAL_AddPoolChannel(portal, QMAN_COMMON_POOL_CHANNEL) != E_OK)
        goto err;

    atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph,
                        (uint32_t)portal);
    sched_unpin();

    return (portal);

err:
    if (portal != NULL)
        QM_PORTAL_Free(portal);

    atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0);
    sched_unpin();

    return (NULL);
}
예제 #4
0
/* ARGSUSED */
static void
schedcpu(void)
{
	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
	struct thread *td;
	struct proc *p;
	struct td_sched *ts;
	int awake;

	sx_slock(&allproc_lock);
	FOREACH_PROC_IN_SYSTEM(p) {
		PROC_LOCK(p);
		if (p->p_state == PRS_NEW) {
			PROC_UNLOCK(p);
			continue;
		}
		FOREACH_THREAD_IN_PROC(p, td) {
			awake = 0;
			thread_lock(td);
			ts = td->td_sched;
			/*
			 * Increment sleep time (if sleeping).  We
			 * ignore overflow, as above.
			 */
			/*
			 * The td_sched slptimes are not touched in wakeup
			 * because the thread may not HAVE everything in
			 * memory? XXX I think this is out of date.
			 */
			if (TD_ON_RUNQ(td)) {
				awake = 1;
				td->td_flags &= ~TDF_DIDRUN;
			} else if (TD_IS_RUNNING(td)) {
				awake = 1;
				/* Do not clear TDF_DIDRUN */
			} else if (td->td_flags & TDF_DIDRUN) {
				awake = 1;
				td->td_flags &= ~TDF_DIDRUN;
			}

			/*
			 * ts_pctcpu is only for ps and ttyinfo().
			 */
			ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
			/*
			 * If the td_sched has been idle the entire second,
			 * stop recalculating its priority until
			 * it wakes up.
			 */
			if (ts->ts_cpticks != 0) {
#if	(FSHIFT >= CCPU_SHIFT)
				ts->ts_pctcpu += (realstathz == 100)
				    ? ((fixpt_t) ts->ts_cpticks) <<
				    (FSHIFT - CCPU_SHIFT) :
				    100 * (((fixpt_t) ts->ts_cpticks)
				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
#else
				ts->ts_pctcpu += ((FSCALE - ccpu) *
				    (ts->ts_cpticks *
				    FSCALE / realstathz)) >> FSHIFT;
#endif
				ts->ts_cpticks = 0;
			}
			/*
			 * If there are ANY running threads in this process,
			 * then don't count it as sleeping.
			 * XXX: this is broken.
			 */
			if (awake) {
				if (ts->ts_slptime > 1) {
					/*
					 * In an ideal world, this should not
					 * happen, because whoever woke us
					 * up from the long sleep should have
					 * unwound the slptime and reset our
					 * priority before we run at the stale
					 * priority.  Should KASSERT at some
					 * point when all the cases are fixed.
					 */
					updatepri(td);
				}
				ts->ts_slptime = 0;
			} else
				ts->ts_slptime++;
			if (ts->ts_slptime > 1) {
				thread_unlock(td);
				continue;
			}
			td->td_estcpu = decay_cpu(loadfac, td->td_estcpu);
		      	resetpriority(td);
			resetpriority_thread(td);
			thread_unlock(td);
		}
예제 #5
0
/*
 * This function is called when a thread is about to be put on run queue
 * because it has been made runnable or its priority has been adjusted.  It
 * determines if the new thread should be immediately preempted to.  If so,
 * it switches to it and eventually returns true.  If not, it returns false
 * so that the caller may place the thread on an appropriate run queue.
 */
int
maybe_preempt(struct thread *td)
{
#ifdef PREEMPTION
	struct thread *ctd;
	int cpri, pri;

	/*
	 * The new thread should not preempt the current thread if any of the
	 * following conditions are true:
	 *
	 *  - The kernel is in the throes of crashing (panicstr).
	 *  - The current thread has a higher (numerically lower) or
	 *    equivalent priority.  Note that this prevents curthread from
	 *    trying to preempt to itself.
	 *  - It is too early in the boot for context switches (cold is set).
	 *  - The current thread has an inhibitor set or is in the process of
	 *    exiting.  In this case, the current thread is about to switch
	 *    out anyways, so there's no point in preempting.  If we did,
	 *    the current thread would not be properly resumed as well, so
	 *    just avoid that whole landmine.
	 *  - If the new thread's priority is not a realtime priority and
	 *    the current thread's priority is not an idle priority and
	 *    FULL_PREEMPTION is disabled.
	 *
	 * If all of these conditions are false, but the current thread is in
	 * a nested critical section, then we have to defer the preemption
	 * until we exit the critical section.  Otherwise, switch immediately
	 * to the new thread.
	 */
	ctd = curthread;
	THREAD_LOCK_ASSERT(td, MA_OWNED);
	KASSERT((td->td_inhibitors == 0),
			("maybe_preempt: trying to run inhibited thread"));
	pri = td->td_priority;
	cpri = ctd->td_priority;
	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
	    TD_IS_INHIBITED(ctd))
		return (0);
#ifndef FULL_PREEMPTION
	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
		return (0);
#endif

	if (ctd->td_critnest > 1) {
		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
		    ctd->td_critnest);
		ctd->td_owepreempt = 1;
		return (0);
	}
	/*
	 * Thread is runnable but not yet put on system run queue.
	 */
	MPASS(ctd->td_lock == td->td_lock);
	MPASS(TD_ON_RUNQ(td));
	TD_SET_RUNNING(td);
	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
	    td->td_proc->p_pid, td->td_name);
	mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td);
	/*
	 * td's lock pointer may have changed.  We have to return with it
	 * locked.
	 */
	spinlock_enter();
	thread_unlock(ctd);
	thread_lock(td);
	spinlock_exit();
	return (1);
#else
	return (0);
#endif
}
예제 #6
0
/*------------------------------------------------------------------------*
 *	usb_process
 *
 * This function is the USB process dispatcher.
 *------------------------------------------------------------------------*/
static void
usb_process(void *arg)
{
	struct usb_process *up = arg;
	struct usb_proc_msg *pm;
	struct thread *td;

	/* in case of attach error, check for suspended */
	USB_THREAD_SUSPEND_CHECK();

	/* adjust priority */
	td = curthread;
	thread_lock(td);
	sched_prio(td, up->up_prio);
	thread_unlock(td);

	USB_MTX_LOCK(up->up_mtx);

	up->up_curtd = td;

	while (1) {

		if (up->up_gone)
			break;

		/*
		 * NOTE to reimplementors: dequeueing a command from the
		 * "used" queue and executing it must be atomic, with regard
		 * to the "up_mtx" mutex. That means any attempt to queue a
		 * command by another thread must be blocked until either:
		 *
		 * 1) the command sleeps
		 *
		 * 2) the command returns
		 *
		 * Here is a practical example that shows how this helps
		 * solving a problem:
		 *
		 * Assume that you want to set the baud rate on a USB serial
		 * device. During the programming of the device you don't
		 * want to receive nor transmit any data, because it will be
		 * garbage most likely anyway. The programming of our USB
		 * device takes 20 milliseconds and it needs to call
		 * functions that sleep.
		 *
		 * Non-working solution: Before we queue the programming
		 * command, we stop transmission and reception of data. Then
		 * we queue a programming command. At the end of the
		 * programming command we enable transmission and reception
		 * of data.
		 *
		 * Problem: If a second programming command is queued while the
		 * first one is sleeping, we end up enabling transmission
		 * and reception of data too early.
		 *
		 * Working solution: Before we queue the programming command,
		 * we stop transmission and reception of data. Then we queue
		 * a programming command. Then we queue a second command
		 * that only enables transmission and reception of data.
		 *
		 * Why it works: If a second programming command is queued
		 * while the first one is sleeping, then the queueing of a
		 * second command to enable the data transfers, will cause
		 * the previous one, which is still on the queue, to be
		 * removed from the queue, and re-inserted after the last
		 * baud rate programming command, which then gives the
		 * desired result.
		 */
		pm = TAILQ_FIRST(&up->up_qhead);

		if (pm) {
			DPRINTF("Message pm=%p, cb=%p (enter)\n",
			    pm, pm->pm_callback);

			(pm->pm_callback) (pm);

			if (pm == TAILQ_FIRST(&up->up_qhead)) {
				/* nothing changed */
				TAILQ_REMOVE(&up->up_qhead, pm, pm_qentry);
				pm->pm_qentry.tqe_prev = NULL;
			}
			DPRINTF("Message pm=%p (leave)\n", pm);

			continue;
		}
		/* end if messages - check if anyone is waiting for sync */
		if (up->up_dsleep) {
			up->up_dsleep = 0;
			cv_broadcast(&up->up_drain);
		}
		up->up_msleep = 1;
		cv_wait(&up->up_cv, up->up_mtx);
	}

	up->up_ptr = NULL;
	cv_signal(&up->up_cv);
	USB_MTX_UNLOCK(up->up_mtx);
#if (__FreeBSD_version >= 800000)
	/* Clear the proc pointer if this is the last thread. */
	if (--usb_pcount == 0)
		usbproc = NULL;
#endif

	USB_THREAD_EXIT(0);
}