Пример #1
0
static int tzdev_cpu_callback(struct notifier_block *nfb,
                              unsigned long action, void *hcpu)
{
    unsigned int cpu = (unsigned long)hcpu;
    int new_cpu;

    switch (action) {
    case CPU_DOWN_PREPARE:
    case CPU_DOWN_PREPARE_FROZEN:
        get_online_cpus();
        mutex_lock(&tzdev_core_migration_lock);
        if (sw_cpu == cpu) {
            pr_notice("Current CPU = %d is going down.\n", cpu);
            new_cpu = tzdev_get_destination_cpu();
            if (new_cpu < 0 || new_cpu == sw_cpu) {
                new_cpu = cpumask_any(cpu_active_mask);
                BUG_ON(new_cpu >= nr_cpu_ids || new_cpu == sw_cpu);
                pr_notice("No big CPU available for migration,"
                          "move Secure OS to CPU = %d\n", new_cpu);
            }
            sw_cpu = new_cpu;
            mutex_unlock(&tzdev_core_migration_lock);
            /* Migration is necessary, so do it
             * and return */
            tzdev_migrate_threads(new_cpu);
            put_online_cpus();
            return NOTIFY_OK;
        }
        mutex_unlock(&tzdev_core_migration_lock);
        put_online_cpus();
        return NOTIFY_OK;
    }
    return NOTIFY_OK;
}
Пример #2
0
int ehca_create_comp_pool(void)
{
	int cpu;
	struct task_struct *task;

	if (!ehca_scaling_code)
		return 0;

	pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
	if (pool == NULL)
		return -ENOMEM;

	spin_lock_init(&pool->last_cpu_lock);
	pool->last_cpu = cpumask_any(cpu_online_mask);

	pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
	if (pool->cpu_comp_tasks == NULL) {
		kfree(pool);
		return -EINVAL;
	}

	for_each_online_cpu(cpu) {
		task = create_comp_task(pool, cpu);
		if (task) {
			kthread_bind(task, cpu);
			wake_up_process(task);
		}
	}

	register_hotcpu_notifier(&comp_pool_callback_nb);

	printk(KERN_INFO "eHCA scaling code enabled\n");

	return 0;
}
static void tegra_cpufreq_hotplug(NvRmPmRequest req)
{
	int rc = 0;
#ifdef CONFIG_HOTPLUG_CPU
	unsigned int cpu;
	int policy = atomic_read(&hotplug_policy);

	smp_rmb();
	if (disable_hotplug)
		return;

	if (req & NvRmPmRequest_CpuOnFlag && (policy > 1 || !policy)) {
		struct cpumask m;

		cpumask_andnot(&m, cpu_present_mask, cpu_online_mask);
		cpu = cpumask_any(&m);

		if (cpu_present(cpu) && !cpu_online(cpu))
			rc = cpu_up(cpu);

	} else if (req & NvRmPmRequest_CpuOffFlag && (policy < NR_CPUS || !policy)) {
		cpu = cpumask_any_but(cpu_online_mask, 0);

		if (cpu_present(cpu) && cpu_online(cpu))
			rc = cpu_down(cpu);
	}
#endif
	if (rc)
		pr_err("%s: error %d servicing hot plug request\n",
		       __func__, rc);
}
Пример #4
0
/**
 * cpufreq_apply_cooling - function to apply frequency clipping.
 * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
 *	clipping data.
 * @cooling_state: value of the cooling state.
 *
 * Function used to make sure the cpufreq layer is aware of current thermal
 * limits. The limits are applied by updating the cpufreq policy.
 *
 * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
 * cooling state).
 */
static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
				 unsigned long cooling_state)
{
	unsigned int cpuid, clip_freq;
	struct cpumask *mask = &cpufreq_device->allowed_cpus;
	unsigned int cpu = cpumask_any(mask);


	/* Check if the old cooling action is same as new cooling action */
	if (cpufreq_device->cpufreq_state == cooling_state)
		return 0;

	clip_freq = get_cpu_frequency(cpu, cooling_state);
	if (!clip_freq)
		return -EINVAL;

	cpufreq_device->cpufreq_state = cooling_state;
	cpufreq_device->cpufreq_val = clip_freq;
	notify_device = cpufreq_device;

	for_each_cpu(cpuid, mask) {
		if (is_cpufreq_valid(cpuid))
			cpufreq_update_policy(cpuid);
	}

	notify_device = NOTIFY_INVALID;

	return 0;
}
Пример #5
0
/**
 * cpufreq_apply_cooling - function to apply frequency clipping.
 * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
 *	clipping data.
 * @cooling_state: value of the cooling state.
 *
 * Function used to make sure the cpufreq layer is aware of current thermal
 * limits. The limits are applied by updating the cpufreq policy.
 *
 * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
 * cooling state).
 */
static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
				 unsigned long cooling_state)
{
	unsigned int cpuid, clip_freq;
	struct cpumask *mask = &cpufreq_device->allowed_cpus;
	unsigned int cpu = cpumask_any(mask);


	/* Check if th
Пример #6
0
static unsigned int cpu_mask_to_apicid_x2apic_cluster(const cpumask_t *cpumask)
{
    unsigned int cpu = cpumask_any(cpumask);
    unsigned int dest = per_cpu(cpu_2_logical_apicid, cpu);
    const cpumask_t *cluster_cpus = per_cpu(cluster_cpus, cpu);

    for_each_cpu ( cpu, cluster_cpus )
        if ( cpumask_test_cpu(cpu, cpumask) )
            dest |= per_cpu(cpu_2_logical_apicid, cpu);

    return dest;
}
Пример #7
0
static int pnv_smp_cpu_disable(void)
{
	int cpu = smp_processor_id();

	/* This is identical to pSeries... might consolidate by
	 * moving migrate_irqs_away to a ppc_md with default to
	 * the generic fixup_irqs. --BenH.
	 */
	set_cpu_online(cpu, false);
	vdso_data->processorCount--;
	if (cpu == boot_cpuid)
		boot_cpuid = cpumask_any(cpu_online_mask);
	xics_migrate_irqs_away();
	return 0;
}
Пример #8
0
static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
					unsigned long action,
					void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct ehca_cpu_comp_task *cct;

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
		ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
		if (!create_comp_task(pool, cpu)) {
			ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
			return NOTIFY_BAD;
		}
		break;
	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:
		ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
		kthread_bind(cct->task, cpumask_any(cpu_online_mask));
		destroy_comp_task(pool, cpu);
		break;
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
		ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
		cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
		kthread_bind(cct->task, cpu);
		wake_up_process(cct->task);
		break;
	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
		ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
		break;
	case CPU_DOWN_FAILED:
	case CPU_DOWN_FAILED_FROZEN:
		ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
		break;
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
		destroy_comp_task(pool, cpu);
		take_over_work(pool, cpu);
		break;
	}

	return NOTIFY_OK;
}
static bool migrate_one_irq(struct irq_data *d)
{
	unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask);
	bool ret = false;

	if (cpu >= nr_cpu_ids) {
		cpu = cpumask_any(cpu_online_mask);
		ret = true;
	}

	pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu);

	d->chip->irq_set_affinity(d, cpumask_of(cpu), true);

	return ret;
}
Пример #10
0
/*
 * Since cpu_online_mask is already updated, we just need to check for
 * affinity that has zeros
 */
static void migrate_irqs(void)
{
	int 		irq, new_cpu;

	for (irq=0; irq < NR_IRQS; irq++) {
		struct irq_desc *desc = irq_to_desc(irq);
		struct irq_data *data = irq_desc_get_irq_data(desc);
		struct irq_chip *chip = irq_data_get_irq_chip(data);

		if (irqd_irq_disabled(data))
			continue;

		/*
		 * No handling for now.
		 * TBD: Implement a disable function so we can now
		 * tell CPU not to respond to these local intr sources.
		 * such as ITV,CPEI,MCA etc.
		 */
		if (irqd_is_per_cpu(data))
			continue;

		if (cpumask_any_and(data->affinity, cpu_online_mask)
		    >= nr_cpu_ids) {
			/*
			 * Save it for phase 2 processing
			 */
			vectors_in_migration[irq] = irq;

			new_cpu = cpumask_any(cpu_online_mask);

			/*
			 * Al three are essential, currently WARN_ON.. maybe panic?
			 */
			if (chip && chip->irq_disable &&
				chip->irq_enable && chip->irq_set_affinity) {
				chip->irq_disable(data);
				chip->irq_set_affinity(data,
						       cpumask_of(new_cpu), false);
				chip->irq_enable(data);
			} else {
				WARN_ON((!chip || !chip->irq_disable ||
					 !chip->irq_enable ||
					 !chip->irq_set_affinity));
			}
		}
	}
}
Пример #11
0
/**
 * cpufreq_get_max_state - callback function to get the max cooling state.
 * @cdev: thermal cooling device pointer.
 * @state: fill this variable with the max cooling state.
 *
 * Callback for the thermal cooling device to return the cpufreq
 * max cooling state.
 *
 * Return: 0 on success, an error code otherwise.
 */
static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
				 unsigned long *state)
{
	struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
	struct cpumask *mask = &cpufreq_device->allowed_cpus;
	unsigned int cpu;
	unsigned int count = 0;
	int ret;

	cpu = cpumask_any(mask);

	ret = get_property(cpu, 0, &count, GET_MAXL);

	if (count > 0)
		*state = count;

	return ret;
}
Пример #12
0
static int pseries_cpu_disable(void)
{
	int cpu = smp_processor_id();

	set_cpu_online(cpu, false);
	vdso_data->processorCount--;

	/*fix boot_cpuid here*/
	if (cpu == boot_cpuid)
		boot_cpuid = cpumask_any(cpu_online_mask);

	/* FIXME: abstract this to not be platform specific later on */
	if (xive_enabled())
		xive_smp_disable_cpu();
	else
		xics_migrate_irqs_away();
	return 0;
}
/*
 * cpudl_find - find the best (later-dl) CPU in the system
 * @cp: the cpudl max-heap context
 * @p: the task
 * @later_mask: a mask to fill in with the selected CPUs (or NULL)
 *
 * Returns: int - best CPU (heap maximum if suitable)
 */
int cpudl_find(struct cpudl *cp, struct task_struct *p,
	       struct cpumask *later_mask)
{
	int best_cpu = -1;
	const struct sched_dl_entity *dl_se = &p->dl;

	if (later_mask && cpumask_and(later_mask, later_mask, cp->free_cpus)) {
		best_cpu = cpumask_any(later_mask);
		goto out;
	} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
			dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
		best_cpu = cpudl_maximum(cp);
		if (later_mask)
			cpumask_set_cpu(best_cpu, later_mask);
	}

out:
	WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));

	return best_cpu;
}
Пример #14
0
static int cpu_callback(struct notifier_block *nfb, unsigned long action,
			void *hcpu)
{
	int hotcpu = (unsigned long)hcpu;
	struct task_struct *p;

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
		p = kthread_create(vmmr0_rcu_sync_thread, hcpu,
				   "vmmr0srcusync/%d", hotcpu);
		if (IS_ERR(p)) {
			printk(KERN_ERR "vmmr0: vmmr0srcsync for %d failed\n",
			       hotcpu);
			return NOTIFY_BAD;
		}
		kthread_bind(p, hotcpu);
		sched_setscheduler(p, SCHED_FIFO, &sync_thread_param);
		per_cpu(sync_thread, hotcpu) = p;
		break;
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
		wake_up_process(per_cpu(sync_thread, hotcpu));
		break;
	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:
		if (!per_cpu(sync_thread, hotcpu))
			break;
		/* Unbind so it can run.  Fall thru. */
		kthread_bind(per_cpu(sync_thread, hotcpu),
			     cpumask_any(cpu_online_mask));
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		p = per_cpu(sync_thread, hotcpu);
		per_cpu(sync_thread, hotcpu) = NULL;
		kthread_stop(p);
		break;
	}
	return NOTIFY_OK;
}
Пример #15
0
int migrate_platform_irqs(unsigned int cpu)
{
	int new_cpei_cpu;
	struct irq_data *data = NULL;
	const struct cpumask *mask;
	int 		retval = 0;

	/*
	 * dont permit CPEI target to removed.
	 */
	if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) {
		printk ("CPU (%d) is CPEI Target\n", cpu);
		if (can_cpei_retarget()) {
			/*
			 * Now re-target the CPEI to a different processor
			 */
			new_cpei_cpu = cpumask_any(cpu_online_mask);
			mask = cpumask_of(new_cpei_cpu);
			set_cpei_target_cpu(new_cpei_cpu);
			data = irq_get_irq_data(ia64_cpe_irq);
			/*
			 * Switch for now, immediately, we need to do fake intr
			 * as other interrupts, but need to study CPEI behaviour with
			 * polling before making changes.
			 */
			if (data && data->chip) {
				data->chip->irq_disable(data);
				data->chip->irq_set_affinity(data, mask, false);
				data->chip->irq_enable(data);
				printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu);
			}
		}
		if (!data) {
			printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
			retval = -EBUSY;
		}
	}
	return retval;
}
Пример #16
0
int cpupri_find(struct cpupri *cp, struct task_struct *p,
		struct cpumask *lowest_mask)
{
	int                  idx      = 0;
	int                  task_pri = convert_prio(p->prio);

	if (task_pri >= MAX_RT_PRIO)
		return 0;

	for (idx = 0; idx < task_pri; idx++) {
		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
		int skip = 0;

		if (!atomic_read(&(vec)->count))
			skip = 1;
		smp_rmb();

		
		if (skip)
			continue;

		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
			continue;

		if (lowest_mask) {
			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);

			if (cpumask_any(lowest_mask) >= nr_cpu_ids)
				continue;
		}

		return 1;
	}

	return 0;
}
Пример #17
0
static void drv_read(struct drv_cmd *cmd)
{
	cmd->val = 0;

	smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1);
}
Пример #18
0
/**
 * cpupri_find - find the best (lowest-pri) CPU in the system
 * @cp: The cpupri context
 * @p: The task
 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
 *
 * Note: This function returns the recommended CPUs as calculated during the
 * current invocation.  By the time the call returns, the CPUs may have in
 * fact changed priorities any number of times.  While not ideal, it is not
 * an issue of correctness since the normal rebalancer logic will correct
 * any discrepancies created by racing against the uncertainty of the current
 * priority configuration.
 *
 * Returns: (int)bool - CPUs were found
 */
int cpupri_find(struct cpupri *cp, struct task_struct *p,
		struct cpumask *lowest_mask)
{
	int                  idx      = 0;
	int                  task_pri = convert_prio(p->prio);

	if (task_pri >= MAX_RT_PRIO)
		return 0;

	for (idx = 0; idx < task_pri; idx++) {
		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
		int skip = 0;

		if (!atomic_read(&(vec)->count))
			skip = 1;
		/*
		 * When looking at the vector, we need to read the counter,
		 * do a memory barrier, then read the mask.
		 *
		 * Note: This is still all racey, but we can deal with it.
		 *  Ideally, we only want to look at masks that are set.
		 *
		 *  If a mask is not set, then the only thing wrong is that we
		 *  did a little more work than necessary.
		 *
		 *  If we read a zero count but the mask is set, because of the
		 *  memory barriers, that can only happen when the highest prio
		 *  task for a run queue has left the run queue, in which case,
		 *  it will be followed by a pull. If the task we are processing
		 *  fails to find a proper place to go, that pull request will
		 *  pull this task if the run queue is running at a lower
		 *  priority.
		 */
		smp_rmb();

		/* Need to do the rmb for every iteration */
		if (skip)
			continue;

		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
			continue;

		if (lowest_mask) {
			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);

			/*
			 * We have to ensure that we have at least one bit
			 * still set in the array, since the map could have
			 * been concurrently emptied between the first and
			 * second reads of vec->mask.  If we hit this
			 * condition, simply act as though we never hit this
			 * priority level and continue on.
			 */
			if (cpumask_any(lowest_mask) >= nr_cpu_ids)
				continue;
		}

		return 1;
	}

	return 0;
}
Пример #19
0
unsigned int cpu_mask_to_apicid_phys(const cpumask_t *cpumask)
{
	/* As we are using single CPU as destination, pick only one CPU here */
	return cpu_physical_id(cpumask_any(cpumask));
}