Exemple #1
0
static void setup_cpu_sibling_map(int cpu)
{
    if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) ||
         !zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) )
        panic("No memory for CPU sibling/core maps");

    /* A CPU is a sibling with itself and is always on its own core. */
    cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
    cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, cpu));
}
Exemple #2
0
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
    unsigned cpu;
    unsigned int i;

    if (skip_ioapic_setup) {
        char *m = (max_cpus == 0) ?
                  "The nosmp parameter is incompatible with Xen; " \
                  "use Xen dom0_max_vcpus=1 parameter" :
                  "The noapic parameter is incompatible with Xen";

        xen_raw_printk(m);
        panic(m);
    }
    xen_init_lock_cpu(0);

    smp_store_cpu_info(0);
    cpu_data(0).x86_max_cores = 1;

    for_each_possible_cpu(i) {
        zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
        zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
        zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
    }
    set_cpu_sibling_map(0);

    if (xen_smp_intr_init(0))
        BUG();

    if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
        panic("could not allocate xen_cpu_initialized_map\n");

    cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));

    /* Restrict the possible_map according to max_cpus. */
    while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
        for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
            continue;
        set_cpu_possible(cpu, false);
    }

    for_each_possible_cpu (cpu) {
        struct task_struct *idle;

        if (cpu == 0)
            continue;

        idle = fork_idle(cpu);
        if (IS_ERR(idle))
            panic("failed fork for CPU %d", cpu);

        set_cpu_present(cpu, true);
    }
}
Exemple #3
0
/*
 * Update cpu_present_mask and paca(s) for a new cpu node.  The wrinkle
 * here is that a cpu device node may represent up to two logical cpus
 * in the SMT case.  We must honor the assumption in other code that
 * the logical ids for sibling SMT threads x and y are adjacent, such
 * that x^1 == y and y^1 == x.
 */
static int pseries_add_processor(struct device_node *np)
{
	unsigned int cpu;
	cpumask_var_t candidate_mask, tmp;
	int err = -ENOSPC, len, nthreads, i;
	const __be32 *intserv;

	intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
	if (!intserv)
		return 0;

	zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
	zalloc_cpumask_var(&tmp, GFP_KERNEL);

	nthreads = len / sizeof(u32);
	for (i = 0; i < nthreads; i++)
		cpumask_set_cpu(i, tmp);

	cpu_maps_update_begin();

	BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));

	/* Get a bitmap of unoccupied slots. */
	cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
	if (cpumask_empty(candidate_mask)) {
		/* If we get here, it most likely means that NR_CPUS is
		 * less than the partition's max processors setting.
		 */
		printk(KERN_ERR "Cannot add cpu %pOF; this system configuration"
		       " supports %d logical cpus.\n", np,
		       num_possible_cpus());
		goto out_unlock;
	}

	while (!cpumask_empty(tmp))
		if (cpumask_subset(tmp, candidate_mask))
			/* Found a range where we can insert the new cpu(s) */
			break;
		else
			cpumask_shift_left(tmp, tmp, nthreads);

	if (cpumask_empty(tmp)) {
		printk(KERN_ERR "Unable to find space in cpu_present_mask for"
		       " processor %s with %d thread(s)\n", np->name,
		       nthreads);
		goto out_unlock;
	}

	for_each_cpu(cpu, tmp) {
		BUG_ON(cpu_present(cpu));
		set_cpu_present(cpu, true);
		set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
	}
Exemple #4
0
static void __init init_irq_default_affinity(void)
{
	if (!cpumask_available(irq_default_affinity))
		zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
	if (cpumask_empty(irq_default_affinity))
		cpumask_setall(irq_default_affinity);
}
Exemple #5
0
/**
 * cpu_rmap_update - update CPU rmap following a change of object affinity
 * @rmap: CPU rmap to update
 * @index: Index of object whose affinity changed
 * @affinity: New CPU affinity of object
 */
int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
		    const struct cpumask *affinity)
{
	cpumask_var_t update_mask;
	unsigned int cpu;

	if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
		return -ENOMEM;

	/* Invalidate distance for all CPUs for which this used to be
	 * the nearest object.  Mark those CPUs for update.
	 */
	for_each_online_cpu(cpu) {
		if (rmap->near[cpu].index == index) {
			rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
			cpumask_set_cpu(cpu, update_mask);
		}
	}

	debug_print_rmap(rmap, "after invalidating old distances");

	/* Set distance to 0 for all CPUs in the new affinity mask.
	 * Mark all CPUs within their NUMA nodes for update.
	 */
	for_each_cpu(cpu, affinity) {
		rmap->near[cpu].index = index;
		rmap->near[cpu].dist = 0;
		cpumask_or(update_mask, update_mask,
			   cpumask_of_node(cpu_to_node(cpu)));
	}
Exemple #6
0
/**
 * cpupri_init - initialize the cpupri structure
 * @cp: The cpupri context
 *
 * Return: -ENOMEM on memory allocation failure.
 */
int cpupri_init(struct cpupri *cp)
{
	int i;

	memset(cp, 0, sizeof(*cp));

	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
		struct cpupri_vec *vec = &cp->pri_to_cpu[i];

		atomic_set(&vec->count, 0);
		if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
			goto cleanup;
	}

	cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
	if (!cp->cpu_to_pri)
		goto cleanup;

	for_each_possible_cpu(i)
		cp->cpu_to_pri[i] = CPUPRI_INVALID;

	return 0;

cleanup:
	for (i--; i >= 0; i--)
		free_cpumask_var(cp->pri_to_cpu[i].mask);
	return -ENOMEM;
}
Exemple #7
0
void domain_update_node_affinity(struct domain *d)
{
    cpumask_var_t cpumask;
    cpumask_var_t online_affinity;
    const cpumask_t *online;
    nodemask_t nodemask = NODE_MASK_NONE;
    struct vcpu *v;
    unsigned int node;

    if ( !zalloc_cpumask_var(&cpumask) )
        return;
    if ( !alloc_cpumask_var(&online_affinity) )
    {
        free_cpumask_var(cpumask);
        return;
    }

    online = cpupool_online_cpumask(d->cpupool);

    spin_lock(&d->node_affinity_lock);

    for_each_vcpu ( d, v )
    {
        cpumask_and(online_affinity, v->cpu_affinity, online);
        cpumask_or(cpumask, cpumask, online_affinity);
    }
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned cpu;
	unsigned int i;

	xen_init_lock_cpu(0);

	smp_store_cpu_info(0);
	cpu_data(0).x86_max_cores = 1;

	for_each_possible_cpu(i) {
		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
		zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
	}
	set_cpu_sibling_map(0);

	if (xen_smp_intr_init(0))
		BUG();

	if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
		panic("could not allocate xen_cpu_initialized_map\n");

	cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));

	/* Restrict the possible_map according to max_cpus. */
	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
		for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
			continue;
		set_cpu_possible(cpu, false);
	}

	for_each_possible_cpu (cpu) {
		struct task_struct *idle;

		if (cpu == 0)
			continue;

		idle = fork_idle(cpu);
		if (IS_ERR(idle))
			panic("failed fork for CPU %d", cpu);

		set_cpu_present(cpu, true);
	}
}
Exemple #9
0
static void __init init_irq_default_affinity(void)
{
#ifdef CONFIG_CPUMASK_OFFSTACK
	if (!irq_default_affinity)
		zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
#endif
	if (cpumask_empty(irq_default_affinity))
		cpumask_setall(irq_default_affinity);
}
Exemple #10
0
static struct cpupool *alloc_cpupool_struct(void)
{
    struct cpupool *c = xzalloc(struct cpupool);

    if ( !c || !zalloc_cpumask_var(&c->cpu_valid) )
    {
        xfree(c);
        c = NULL;
    }
    else if ( !zalloc_cpumask_var(&c->cpu_suspended) )
    {
        free_cpumask_var(c->cpu_valid);
        xfree(c);
        c = NULL;
    }

    return c;
}
Exemple #11
0
long init_domain_proc_info(struct domain_proc_info* m,
				int num_cpus, int num_domains)
{
	int i;
	int num_alloced_cpu_masks = 0;
	int num_alloced_domain_masks = 0;

	m->cpu_to_domains =
		kmalloc(sizeof(*(m->cpu_to_domains))*num_cpus,
			GFP_ATOMIC);
	if(!m->cpu_to_domains)
		goto failure;

	m->domain_to_cpus =
		kmalloc(sizeof(*(m->domain_to_cpus))*num_domains,
			GFP_ATOMIC);
	if(!m->domain_to_cpus)
		goto failure;

	for(i = 0; i < num_cpus; ++i) {
		if(!zalloc_cpumask_var(&m->cpu_to_domains[i].mask, GFP_ATOMIC))
			goto failure;
		++num_alloced_cpu_masks;
	}
	for(i = 0; i < num_domains; ++i) {
		if(!zalloc_cpumask_var(&m->domain_to_cpus[i].mask, GFP_ATOMIC))
			goto failure;
		++num_alloced_domain_masks;
	}

	return 0;

failure:
	for(i = 0; i < num_alloced_cpu_masks; ++i)
		free_cpumask_var(m->cpu_to_domains[i].mask);
	for(i = 0; i < num_alloced_domain_masks; ++i)
		free_cpumask_var(m->domain_to_cpus[i].mask);
	if(m->cpu_to_domains)
		kfree(m->cpu_to_domains);
	if(m->domain_to_cpus)
		kfree(m->domain_to_cpus);
	return -ENOMEM;
}
Exemple #12
0
static int __init irq_affinity_setup(char *str)
{
	zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
	cpulist_parse(str, irq_default_affinity);
	/*
	 * Set at least the boot cpu. We don't want to end up with
	 * bugreports caused by random comandline masks
	 */
	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
	return 1;
}
static int mem_rx_setup(struct link_device *ld)
{
	struct mem_link_device *mld = to_mem_link_device(ld);

	if (!zalloc_cpumask_var(&mld->dmask, GFP_KERNEL))
		return -ENOMEM;
	if (!zalloc_cpumask_var(&mld->imask, GFP_KERNEL))
		return -ENOMEM;
	if (!zalloc_cpumask_var(&mld->tmask, GFP_KERNEL))
		return -ENOMEM;

#ifdef CONFIG_ARGOS
	/* Below hard-coded mask values should be removed later on.
	 * Like net-sysfs, argos module also should support sysfs knob,
	 * so that user layer must be able to control these cpu mask. */
#ifdef CONFIG_SCHED_HMP
	cpumask_copy(mld->dmask, &hmp_slow_cpu_mask);
#endif

	cpumask_or(mld->imask, mld->imask, cpumask_of(3));

	argos_irq_affinity_setup_label(217, "IPC", mld->imask, mld->dmask);
#endif

	ld->tx_wq = create_singlethread_workqueue("mem_tx_work");
	if (!ld->tx_wq) {
		mif_err("%s: ERR! fail to create tx_wq\n", ld->name);
		return -ENOMEM;
	}

	ld->rx_wq = alloc_workqueue(
			"mem_rx_work", WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
	if (!ld->rx_wq) {
		mif_err("%s: ERR! fail to create rx_wq\n", ld->name);
		return -ENOMEM;
	}

	INIT_DELAYED_WORK(&ld->rx_delayed_work, link_to_demux_work);

	return 0;
}
static int init_rootdomain(struct root_domain *rd)
{
	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
		goto out;
	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
		goto free_span;
	if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
		goto free_online;
	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
		goto free_dlo_mask;

#ifdef HAVE_RT_PUSH_IPI
	rd->rto_cpu = -1;
	raw_spin_lock_init(&rd->rto_lock);
	init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
#endif

	init_dl_bw(&rd->dl_bw);
	if (cpudl_init(&rd->cpudl) != 0)
		goto free_rto_mask;

	if (cpupri_init(&rd->cpupri) != 0)
		goto free_cpudl;
	return 0;

free_cpudl:
	cpudl_cleanup(&rd->cpudl);
free_rto_mask:
	free_cpumask_var(rd->rto_mask);
free_dlo_mask:
	free_cpumask_var(rd->dlo_mask);
free_online:
	free_cpumask_var(rd->online);
free_span:
	free_cpumask_var(rd->span);
out:
	return -ENOMEM;
}
Exemple #15
0
static struct p2m_domain *p2m_init_one(struct domain *d)
{
    struct p2m_domain *p2m = xzalloc(struct p2m_domain);

    if ( !p2m )
        return NULL;

    if ( !zalloc_cpumask_var(&p2m->dirty_cpumask) )
        goto free_p2m;

    if ( p2m_initialise(d, p2m) )
        goto free_cpumask;
    return p2m;

free_cpumask:
    free_cpumask_var(p2m->dirty_cpumask);
free_p2m:
    xfree(p2m);
    return NULL;
}
Exemple #16
0
static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf)
{
	struct rps_map *map;
	cpumask_var_t mask;
	int i, len;

	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
		return -ENOMEM;

	rcu_read_lock();
	map = rcu_dereference(queue->rps_map);
	if (map)
		for (i = 0; i < map->len; i++)
			cpumask_set_cpu(map->cpus[i], mask);

	len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
	rcu_read_unlock();
	free_cpumask_var(mask);

	return len < PAGE_SIZE ? len : -EINVAL;
}
Exemple #17
0
int get_cluster_size(enum cache_level level)
{
	cpumask_var_t mask;
	int ok;
	int num_cpus;

	if (level == GLOBAL_CLUSTER)
		return num_online_cpus();
	else {
		if (!zalloc_cpumask_var(&mask, GFP_ATOMIC))
			return -ENOMEM;
		/* assumes CPU 0 is representative of all CPUs */
		ok = get_shared_cpu_map(mask, 0, level);
		/* ok == 0 means we got the map; otherwise it's an invalid cache level */
		if (ok == 0)
			num_cpus = cpumask_weight(mask);
		free_cpumask_var(mask);

		if (ok == 0)
			return num_cpus;
		else
			return -EINVAL;
	}
}
Exemple #18
0
/*
 * cpudl_init - initialize the cpudl structure
 * @cp: the cpudl max-heap context
 */
int cpudl_init(struct cpudl *cp)
{
	int i;

	memset(cp, 0, sizeof(*cp));
	raw_spin_lock_init(&cp->lock);
	cp->size = 0;

	cp->elements = kcalloc(nr_cpu_ids,
			       sizeof(struct cpudl_item),
			       GFP_KERNEL);
	if (!cp->elements)
		return -ENOMEM;

	if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
		kfree(cp->elements);
		return -ENOMEM;
	}

	for_each_possible_cpu(i)
		cp->elements[i].idx = IDX_INVALID;

	return 0;
}
static int acpi_processor_add(struct acpi_device *device)
{
	struct acpi_processor *pr = NULL;


	if (!device)
		return -EINVAL;

	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
	if (!pr)
		return -ENOMEM;

	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
		kfree(pr);
		return -ENOMEM;
	}

	pr->handle = device->handle;
	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
	device->driver_data = pr;

	return 0;
}
Exemple #20
0
void __init init_amd_e400_c1e_mask(void)
{
	/* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
	if (x86_idle == amd_e400_idle)
		zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
}
Exemple #21
0
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
	unsigned int i;
	unsigned int valid_states = 0;
	unsigned int cpu = policy->cpu;
	struct acpi_cpufreq_data *data;
	unsigned int result = 0;
	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
	struct acpi_processor_performance *perf;
#ifdef CONFIG_SMP
	static int blacklisted;
#endif

	pr_debug("acpi_cpufreq_cpu_init\n");

#ifdef CONFIG_SMP
	if (blacklisted)
		return blacklisted;
	blacklisted = acpi_cpufreq_blacklist(c);
	if (blacklisted)
		return blacklisted;
#endif

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
		result = -ENOMEM;
		goto err_free;
	}

	perf = per_cpu_ptr(acpi_perf_data, cpu);
	data->acpi_perf_cpu = cpu;
	policy->driver_data = data;

	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;

	result = acpi_processor_register_performance(perf, cpu);
	if (result)
		goto err_free_mask;

	policy->shared_type = perf->shared_type;

	/*
	 * Will let policy->cpus know about dependency only when software
	 * coordination is required.
	 */
	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
		cpumask_copy(policy->cpus, perf->shared_cpu_map);
	}
	cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);

#ifdef CONFIG_SMP
	dmi_check_system(sw_any_bug_dmi_table);
	if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
	}

	if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
		cpumask_clear(policy->cpus);
		cpumask_set_cpu(cpu, policy->cpus);
		cpumask_copy(data->freqdomain_cpus,
			     topology_sibling_cpumask(cpu));
		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
		pr_info_once(PFX "overriding BIOS provided _PSD data\n");
	}
#endif

	/* capability check */
	if (perf->state_count <= 1) {
		pr_debug("No P-States\n");
		result = -ENODEV;
		goto err_unreg;
	}

	if (perf->control_register.space_id != perf->status_register.space_id) {
		result = -ENODEV;
		goto err_unreg;
	}

	switch (perf->control_register.space_id) {
	case ACPI_ADR_SPACE_SYSTEM_IO:
		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
		    boot_cpu_data.x86 == 0xf) {
			pr_debug("AMD K8 systems must use native drivers.\n");
			result = -ENODEV;
			goto err_unreg;
		}
		pr_debug("SYSTEM IO addr space\n");
		data->cpu_feature = SYSTEM_IO_CAPABLE;
		break;
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
		pr_debug("HARDWARE addr space\n");
		if (check_est_cpu(cpu)) {
			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
			break;
		}
		if (check_amd_hwpstate_cpu(cpu)) {
			data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
			break;
		}
		result = -ENODEV;
		goto err_unreg;
	default:
		pr_debug("Unknown addr space %d\n",
			(u32) (perf->control_register.space_id));
		result = -ENODEV;
		goto err_unreg;
	}

	data->freq_table = kzalloc(sizeof(*data->freq_table) *
		    (perf->state_count+1), GFP_KERNEL);
	if (!data->freq_table) {
		result = -ENOMEM;
		goto err_unreg;
	}

	/* detect transition latency */
	policy->cpuinfo.transition_latency = 0;
	for (i = 0; i < perf->state_count; i++) {
		if ((perf->states[i].transition_latency * 1000) >
		    policy->cpuinfo.transition_latency)
			policy->cpuinfo.transition_latency =
			    perf->states[i].transition_latency * 1000;
	}

	/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
	if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
	    policy->cpuinfo.transition_latency > 20 * 1000) {
		policy->cpuinfo.transition_latency = 20 * 1000;
		printk_once(KERN_INFO
			    "P-state transition latency capped at 20 uS\n");
	}

	/* table init */
	for (i = 0; i < perf->state_count; i++) {
		if (i > 0 && perf->states[i].core_frequency >=
		    data->freq_table[valid_states-1].frequency / 1000)
			continue;

		data->freq_table[valid_states].driver_data = i;
		data->freq_table[valid_states].frequency =
		    perf->states[i].core_frequency * 1000;
		valid_states++;
	}
	data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
	perf->state = 0;

	result = cpufreq_table_validate_and_show(policy, data->freq_table);
	if (result)
		goto err_freqfree;

	if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
		printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");

	switch (perf->control_register.space_id) {
	case ACPI_ADR_SPACE_SYSTEM_IO:
		/*
		 * The core will not set policy->cur, because
		 * cpufreq_driver->get is NULL, so we need to set it here.
		 * However, we have to guess it, because the current speed is
		 * unknown and not detectable via IO ports.
		 */
		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
		break;
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
		acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
		break;
	default:
		break;
	}

	/* notify BIOS that we exist */
	acpi_processor_notify_smm(THIS_MODULE);

	pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
	for (i = 0; i < perf->state_count; i++)
		pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
			(i == perf->state ? '*' : ' '), i,
			(u32) perf->states[i].core_frequency,
			(u32) perf->states[i].power,
			(u32) perf->states[i].transition_latency);

	/*
	 * the first call to ->target() should result in us actually
	 * writing something to the appropriate registers.
	 */
	data->resume = 1;

	return result;

err_freqfree:
	kfree(data->freq_table);
err_unreg:
	acpi_processor_unregister_performance(cpu);
err_free_mask:
	free_cpumask_var(data->freqdomain_cpus);
err_free:
	kfree(data);
	policy->driver_data = NULL;

	return result;
}
Exemple #22
0
struct domain *domain_create(
    domid_t domid, unsigned int domcr_flags, uint32_t ssidref)
{
    struct domain *d, **pd;
    enum { INIT_xsm = 1u<<0, INIT_watchdog = 1u<<1, INIT_rangeset = 1u<<2,
           INIT_evtchn = 1u<<3, INIT_gnttab = 1u<<4, INIT_arch = 1u<<5 };
    int err, init_status = 0;
    int poolid = CPUPOOLID_NONE;

    if ( (d = alloc_domain_struct()) == NULL )
        return ERR_PTR(-ENOMEM);

    d->domain_id = domid;

    lock_profile_register_struct(LOCKPROF_TYPE_PERDOM, d, domid, "Domain");

    if ( (err = xsm_alloc_security_domain(d)) != 0 )
        goto fail;
    init_status |= INIT_xsm;

    watchdog_domain_init(d);
    init_status |= INIT_watchdog;

    atomic_set(&d->refcnt, 1);
    spin_lock_init_prof(d, domain_lock);
    spin_lock_init_prof(d, page_alloc_lock);
    spin_lock_init(&d->hypercall_deadlock_mutex);
    INIT_PAGE_LIST_HEAD(&d->page_list);
    INIT_PAGE_LIST_HEAD(&d->xenpage_list);

    spin_lock_init(&d->node_affinity_lock);
    d->node_affinity = NODE_MASK_ALL;
    d->auto_node_affinity = 1;

    spin_lock_init(&d->shutdown_lock);
    d->shutdown_code = -1;

    err = -ENOMEM;
    if ( !zalloc_cpumask_var(&d->domain_dirty_cpumask) )
        goto fail;

    if ( domcr_flags & DOMCRF_hvm )
        d->is_hvm = 1;

    if ( domid == 0 )
    {
        d->is_pinned = opt_dom0_vcpus_pin;
        d->disable_migrate = 1;
    }

    rangeset_domain_initialise(d);
    init_status |= INIT_rangeset;

    d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
    d->irq_caps   = rangeset_new(d, "Interrupts", 0);
    if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
        goto fail;

    if ( domcr_flags & DOMCRF_dummy )
        return d;

    if ( !is_idle_domain(d) )
    {
        if ( (err = xsm_domain_create(XSM_HOOK, d, ssidref)) != 0 )
            goto fail;

        d->is_paused_by_controller = 1;
        atomic_inc(&d->pause_count);

        if ( domid )
            d->nr_pirqs = nr_static_irqs + extra_domU_irqs;
        else
            d->nr_pirqs = nr_static_irqs + extra_dom0_irqs;
        if ( d->nr_pirqs > nr_irqs )
            d->nr_pirqs = nr_irqs;

        radix_tree_init(&d->pirq_tree);

        if ( (err = evtchn_init(d)) != 0 )
            goto fail;
        init_status |= INIT_evtchn;

        if ( (err = grant_table_create(d)) != 0 )
            goto fail;
        init_status |= INIT_gnttab;

        poolid = 0;

        err = -ENOMEM;
        d->mem_event = xzalloc(struct mem_event_per_domain);
        if ( !d->mem_event )
            goto fail;
    }

    if ( (err = arch_domain_create(d, domcr_flags)) != 0 )
        goto fail;
    init_status |= INIT_arch;

    if ( (err = cpupool_add_domain(d, poolid)) != 0 )
        goto fail;

    if ( (err = sched_init_domain(d)) != 0 )
        goto fail;

    if ( !is_idle_domain(d) )
    {
        spin_lock(&domlist_update_lock);
        pd = &domain_list; /* NB. domain_list maintained in order of domid. */
        for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
            if ( (*pd)->domain_id > d->domain_id )
                break;
        d->next_in_list = *pd;
        d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
        rcu_assign_pointer(*pd, d);
        rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d);
        spin_unlock(&domlist_update_lock);
    }

    return d;

 fail:
    d->is_dying = DOMDYING_dead;
    atomic_set(&d->refcnt, DOMAIN_DESTROYED);
    xfree(d->mem_event);
    if ( init_status & INIT_arch )
        arch_domain_destroy(d);
    if ( init_status & INIT_gnttab )
        grant_table_destroy(d);
    if ( init_status & INIT_evtchn )
    {
        evtchn_destroy(d);
        evtchn_destroy_final(d);
        radix_tree_destroy(&d->pirq_tree, free_pirq_struct);
    }
    if ( init_status & INIT_rangeset )
        rangeset_domain_destroy(d);
    if ( init_status & INIT_watchdog )
        watchdog_domain_destroy(d);
    if ( init_status & INIT_xsm )
        xsm_free_security_domain(d);
    free_cpumask_var(d->domain_dirty_cpumask);
    free_domain_struct(d);
    return ERR_PTR(err);
}
Exemple #23
0
struct vcpu *alloc_vcpu(
    struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
{
    struct vcpu *v;

    BUG_ON((!is_idle_domain(d) || vcpu_id) && d->vcpu[vcpu_id]);

    if ( (v = alloc_vcpu_struct()) == NULL )
        return NULL;

    v->domain = d;
    v->vcpu_id = vcpu_id;

    spin_lock_init(&v->virq_lock);

    tasklet_init(&v->continue_hypercall_tasklet, NULL, 0);

    if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
         !zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
         !zalloc_cpumask_var(&v->cpu_affinity_saved) ||
         !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
        goto fail_free;

    if ( is_idle_domain(d) )
    {
        v->runstate.state = RUNSTATE_running;
    }
    else
    {
        v->runstate.state = RUNSTATE_offline;        
        v->runstate.state_entry_time = NOW();
        set_bit(_VPF_down, &v->pause_flags);
        v->vcpu_info = ((vcpu_id < XEN_LEGACY_MAX_VCPUS)
                        ? (vcpu_info_t *)&shared_info(d, vcpu_info[vcpu_id])
                        : &dummy_vcpu_info);
        v->vcpu_info_mfn = INVALID_MFN;
        init_waitqueue_vcpu(v);
    }

    if ( sched_init_vcpu(v, cpu_id) != 0 )
        goto fail_wq;

    if ( vcpu_initialise(v) != 0 )
    {
        sched_destroy_vcpu(v);
 fail_wq:
        destroy_waitqueue_vcpu(v);
 fail_free:
        free_cpumask_var(v->cpu_affinity);
        free_cpumask_var(v->cpu_affinity_tmp);
        free_cpumask_var(v->cpu_affinity_saved);
        free_cpumask_var(v->vcpu_dirty_cpumask);
        free_vcpu_struct(v);
        return NULL;
    }

    d->vcpu[vcpu_id] = v;
    if ( vcpu_id != 0 )
    {
        int prev_id = v->vcpu_id - 1;
        while ( (prev_id >= 0) && (d->vcpu[prev_id] == NULL) )
            prev_id--;
        BUG_ON(prev_id < 0);
        v->next_in_list = d->vcpu[prev_id]->next_in_list;
        d->vcpu[prev_id]->next_in_list = v;
    }

    /* Must be called after making new vcpu visible to for_each_vcpu(). */
    vcpu_check_shutdown(v);

    domain_update_node_affinity(d);

    return v;
}
Exemple #24
0
int assign_cpus_to_clusters(enum cache_level level,
			    struct scheduling_cluster* clusters[],
			    unsigned int num_clusters,
			    struct cluster_cpu* cpus[],
			    unsigned int num_cpus)
{
	cpumask_var_t mask;
	unsigned int i, free_cluster = 0, low_cpu;
	int err = 0;

	if (!zalloc_cpumask_var(&mask, GFP_ATOMIC))
		return -ENOMEM;

	/* clear cluster pointers */
	for (i = 0; i < num_cpus; i++) {
		cpus[i]->id      = i;
		cpus[i]->cluster = NULL;
	}

	/* initialize clusters */
	for (i = 0; i < num_clusters; i++) {
		clusters[i]->id = i;
		INIT_LIST_HEAD(&clusters[i]->cpus);
	}

	/* Assign each CPU. Two assumtions are made:
	 * 1) The index of a cpu in cpus corresponds to its processor id (i.e., the index in a cpu mask).
	 * 2) All cpus that belong to some cluster are online.
	 */
	for_each_online_cpu(i) {
		/* get lowest-id CPU in cluster */
		if (level != GLOBAL_CLUSTER) {
			err = get_shared_cpu_map(mask, cpus[i]->id, level);
			if (err != 0) {
				/* ugh... wrong cache level? Either caller screwed up
				 * or the CPU topology is weird. */
				printk(KERN_ERR "Could not set up clusters for L%d sharing (max: L%d).\n",
				       level, err);
				err = -EINVAL;
				goto out;
			}
			low_cpu = cpumask_first(mask);
		} else
			low_cpu = 0;
		if (low_cpu == i) {
			/* caller must provide an appropriate number of clusters */
			BUG_ON(free_cluster >= num_clusters);

			/* create new cluster */
			cpus[i]->cluster = clusters[free_cluster++];
		} else {
			/* low_cpu points to the right cluster
			 * Assumption: low_cpu is actually online and was processed earlier. */
			cpus[i]->cluster = cpus[low_cpu]->cluster;
		}
		/* enqueue in cpus list */
		list_add_tail(&cpus[i]->cluster_list, &cpus[i]->cluster->cpus);
		printk(KERN_INFO "Assigning CPU%u to cluster %u\n.", i, cpus[i]->cluster->id);
	}
out:
	free_cpumask_var(mask);
	return err;
}
/**
 * channel_backend_init - initialize a channel backend
 * @chanb: channel backend
 * @name: channel name
 * @config: client ring buffer configuration
 * @priv: client private data
 * @parent: dentry of parent directory, %NULL for root directory
 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
 * @num_subbuf: number of sub-buffers (power of 2)
 *
 * Returns channel pointer if successful, %NULL otherwise.
 *
 * Creates per-cpu channel buffers using the sizes and attributes
 * specified.  The created channel buffer files will be named
 * name_0...name_N-1.  File permissions will be %S_IRUSR.
 *
 * Called with CPU hotplug disabled.
 */
int channel_backend_init(struct channel_backend *chanb,
			 const char *name,
			 const struct lib_ring_buffer_config *config,
			 void *priv, size_t subbuf_size, size_t num_subbuf)
{
	struct channel *chan = container_of(chanb, struct channel, backend);
	unsigned int i;
	int ret;

	if (!name)
		return -EPERM;

	/* Check that the subbuffer size is larger than a page. */
	if (subbuf_size < PAGE_SIZE)
		return -EINVAL;

	/*
	 * Make sure the number of subbuffers and subbuffer size are
	 * power of 2 and nonzero.
	 */
	if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
		return -EINVAL;
	if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
		return -EINVAL;
	/*
	 * Overwrite mode buffers require at least 2 subbuffers per
	 * buffer.
	 */
	if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
		return -EINVAL;

	ret = subbuffer_id_check_index(config, num_subbuf);
	if (ret)
		return ret;

	chanb->priv = priv;
	chanb->buf_size = num_subbuf * subbuf_size;
	chanb->subbuf_size = subbuf_size;
	chanb->buf_size_order = get_count_order(chanb->buf_size);
	chanb->subbuf_size_order = get_count_order(subbuf_size);
	chanb->num_subbuf_order = get_count_order(num_subbuf);
	chanb->extra_reader_sb =
			(config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
	chanb->num_subbuf = num_subbuf;
	strlcpy(chanb->name, name, NAME_MAX);
	memcpy(&chanb->config, config, sizeof(chanb->config));

	if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
		if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
			return -ENOMEM;
	}

	if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
		/* Allocating the buffer per-cpu structures */
		chanb->buf = alloc_percpu(struct lib_ring_buffer);
		if (!chanb->buf)
			goto free_cpumask;

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
		chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
		ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
			&chanb->cpuhp_prepare.node);
		if (ret)
			goto free_bufs;
#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */

		{
			/*
			 * In case of non-hotplug cpu, if the ring-buffer is allocated
			 * in early initcall, it will not be notified of secondary cpus.
			 * In that off case, we need to allocate for all possible cpus.
			 */
#ifdef CONFIG_HOTPLUG_CPU
			/*
			 * buf->backend.allocated test takes care of concurrent CPU
			 * hotplug.
			 * Priority higher than frontend, so we create the ring buffer
			 * before we start the timer.
			 */
			chanb->cpu_hp_notifier.notifier_call =
					lib_ring_buffer_cpu_hp_callback;
			chanb->cpu_hp_notifier.priority = 5;
			register_hotcpu_notifier(&chanb->cpu_hp_notifier);

			get_online_cpus();
			for_each_online_cpu(i) {
				ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
							 chanb, i);
				if (ret)
					goto free_bufs;	/* cpu hotplug locked */
			}
			put_online_cpus();
#else
			for_each_possible_cpu(i) {
				ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
							 chanb, i);
				if (ret)
					goto free_bufs;
			}
#endif
		}
#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
	} else {
/*
 * Do not put anything in here which needs the core to be online.
 * For example MSR access or setting up things which check for cpuinfo_x86
 * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
 * Such things have to be put in and set up above in acpi_processor_start()
 */
static int __cpuinit acpi_processor_add(struct acpi_device *device)
{
	struct acpi_processor *pr = NULL;
	int result = 0;
	struct device *dev;

	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
	if (!pr)
		return -ENOMEM;

	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
		result = -ENOMEM;
		goto err_free_pr;
	}

	pr->handle = device->handle;
	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
	device->driver_data = pr;

	result = acpi_processor_get_info(device);
	if (result) {
		/* Processor is physically not present */
		return 0;
	}

#ifdef CONFIG_SMP
	if (pr->id >= setup_max_cpus && pr->id != 0)
		return 0;
#endif

	BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));

	/*
	 * Buggy BIOS check
	 * ACPI id of processors can be reported wrongly by the BIOS.
	 * Don't trust it blindly
	 */
	if (per_cpu(processor_device_array, pr->id) != NULL &&
	    per_cpu(processor_device_array, pr->id) != device) {
		printk(KERN_WARNING "BIOS reported wrong ACPI id "
			"for the processor\n");
		result = -ENODEV;
		goto err_free_cpumask;
	}
	per_cpu(processor_device_array, pr->id) = device;

	per_cpu(processors, pr->id) = pr;

	dev = get_cpu_device(pr->id);
	if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {
		result = -EFAULT;
		goto err_clear_processor;
	}

	/*
	 * Do not start hotplugged CPUs now, but when they
	 * are onlined the first time
	 */
	if (pr->flags.need_hotplug_init)
		return 0;

	result = acpi_processor_start(pr);
	if (result)
		goto err_remove_sysfs;

	return 0;

err_remove_sysfs:
	sysfs_remove_link(&device->dev.kobj, "sysdev");
err_clear_processor:
	/*
	 * processor_device_array is not cleared to allow checks for buggy BIOS
	 */ 
	per_cpu(processors, pr->id) = NULL;
err_free_cpumask:
	free_cpumask_var(pr->throttling.shared_cpu_map);
err_free_pr:
	kfree(pr);
	return result;
}
static int __cpuinit acpi_processor_add(struct acpi_device *device)
{
	struct acpi_processor *pr = NULL;
	int result = 0;
	struct sys_device *sysdev;

	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
	if (!pr)
		return -ENOMEM;

	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
		kfree(pr);
		return -ENOMEM;
	}

	pr->handle = device->handle;
	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
	device->driver_data = pr;

 	processor_extcntl_init();

	result = acpi_processor_get_info(device);
	if (result ||
	    ((pr->id == -1) && !processor_cntl_external())) {
		/* Processor is physically not present */
		return 0;
	}

	BUG_ON(!processor_cntl_external() &&
	       ((pr->id >= nr_cpu_ids) || (pr->id < 0)));

	/*
	 * Buggy BIOS check
	 * ACPI id of processors can be reported wrongly by the BIOS.
	 * Don't trust it blindly
	 */
#ifndef CONFIG_XEN
	if (per_cpu(processor_device_array, pr->id) != NULL &&
	    per_cpu(processor_device_array, pr->id) != device) {
#else
	BUG_ON(pr->acpi_id >= NR_ACPI_CPUS);
	if (processor_device_array[pr->acpi_id] != NULL &&
	    processor_device_array[pr->acpi_id] != device) {
#endif
		printk(KERN_WARNING "BIOS reported wrong ACPI id "
			"for the processor\n");
		result = -ENODEV;
		goto err_free_cpumask;
	}
#ifndef CONFIG_XEN
	per_cpu(processor_device_array, pr->id) = device;

	per_cpu(processors, pr->id) = pr;
#else
	processor_device_array[pr->acpi_id] = device;
	if (pr->id != -1)
		per_cpu(processors, pr->id) = pr;
#endif

	result = acpi_processor_add_fs(device);
	if (result)
		goto err_free_cpumask;

	if (pr->id != -1) {
		sysdev = get_cpu_sysdev(pr->id);
		if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
			result = -EFAULT;
			goto err_remove_fs;
		}
	}

	/* _PDC call should be done before doing anything else (if reqd.). */
	arch_acpi_processor_init_pdc(pr);
	acpi_processor_set_pdc(pr);
	arch_acpi_processor_cleanup_pdc(pr);

#if defined(CONFIG_CPU_FREQ) || defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL)
	acpi_processor_ppc_has_changed(pr);
#endif

	/*
	 * pr->id may equal to -1 while processor_cntl_external enabled.
	 * throttle and thermal module don't support this case.
	 * Tx only works when dom0 vcpu == pcpu num by far, as we give
	 * control to dom0.
	 */
	if (pr->id != -1) {
		acpi_processor_get_throttling_info(pr);
		acpi_processor_get_limit_info(pr);
	}

	acpi_processor_power_init(pr, device);

	result = processor_extcntl_prepare(pr);
	if (result)
		goto err_power_exit;

	pr->cdev = thermal_cooling_device_register("Processor", device,
						&processor_cooling_ops);
	if (IS_ERR(pr->cdev)) {
		result = PTR_ERR(pr->cdev);
		goto err_power_exit;
	}

	dev_info(&device->dev, "registered as cooling_device%d\n",
		 pr->cdev->id);

	result = sysfs_create_link(&device->dev.kobj,
				   &pr->cdev->device.kobj,
				   "thermal_cooling");
	if (result) {
		printk(KERN_ERR PREFIX "Create sysfs link\n");
		goto err_thermal_unregister;
	}
	result = sysfs_create_link(&pr->cdev->device.kobj,
				   &device->dev.kobj,
				   "device");
	if (result) {
		printk(KERN_ERR PREFIX "Create sysfs link\n");
		goto err_remove_sysfs;
	}

	return 0;

err_remove_sysfs:
	sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
err_thermal_unregister:
	thermal_cooling_device_unregister(pr->cdev);
err_power_exit:
	acpi_processor_power_exit(pr, device);
err_remove_fs:
	acpi_processor_remove_fs(device);
err_free_cpumask:
	free_cpumask_var(pr->throttling.shared_cpu_map);

	return result;
}

static int acpi_processor_remove(struct acpi_device *device, int type)
{
	struct acpi_processor *pr = NULL;


	if (!device || !acpi_driver_data(device))
		return -EINVAL;

	pr = acpi_driver_data(device);

	if (!processor_cntl_external() && pr->id >= nr_cpu_ids)
		goto free;

	if (type == ACPI_BUS_REMOVAL_EJECT) {
		if (acpi_processor_handle_eject(pr))
			return -EINVAL;
	}

	acpi_processor_power_exit(pr, device);

	if (pr->id != -1)
		sysfs_remove_link(&device->dev.kobj, "sysdev");

	acpi_processor_remove_fs(device);

	if (pr->cdev) {
		sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
		sysfs_remove_link(&pr->cdev->device.kobj, "device");
		thermal_cooling_device_unregister(pr->cdev);
		pr->cdev = NULL;
	}

#ifndef CONFIG_XEN
	per_cpu(processors, pr->id) = NULL;
	per_cpu(processor_device_array, pr->id) = NULL;
#else
	if (pr->id != -1)
		per_cpu(processors, pr->id) = NULL;
	processor_device_array[pr->acpi_id] = NULL;
#endif

free:
	free_cpumask_var(pr->throttling.shared_cpu_map);
	kfree(pr);

	return 0;
}

#ifdef CONFIG_ACPI_HOTPLUG_CPU
/****************************************************************************
 * 	Acpi processor hotplug support 				       	    *
 ****************************************************************************/

static int is_processor_present(acpi_handle handle)
{
	acpi_status status;
	unsigned long long sta = 0;


	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);

	if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
		return 1;

	/*
	 * _STA is mandatory for a processor that supports hot plug
	 */
	if (status == AE_NOT_FOUND)
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
				"Processor does not support hot plug\n"));
	else
		ACPI_EXCEPTION((AE_INFO, status,
				"Processor Device is not present"));
	return 0;
}
static int __cpuinit acpi_processor_add(struct acpi_device *device)
{
	struct acpi_processor *pr = NULL;
	int result = 0;
	struct sys_device *sysdev;

	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
	if (!pr)
		return -ENOMEM;

	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
		kfree(pr);
		return -ENOMEM;
	}

	pr->handle = device->handle;
	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
	device->driver_data = pr;

	result = acpi_processor_get_info(device);
	if (result) {
		/* Processor is physically not present */
		return 0;
	}

	BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));

	/*
	 * Buggy BIOS check
	 * ACPI id of processors can be reported wrongly by the BIOS.
	 * Don't trust it blindly
	 */
	if (per_cpu(processor_device_array, pr->id) != NULL &&
	    per_cpu(processor_device_array, pr->id) != device) {
		printk(KERN_WARNING "BIOS reported wrong ACPI id "
			"for the processor\n");
		result = -ENODEV;
		goto err_free_cpumask;
	}
	per_cpu(processor_device_array, pr->id) = device;

	per_cpu(processors, pr->id) = pr;

	result = acpi_processor_add_fs(device);
	if (result)
		goto err_free_cpumask;

	sysdev = get_cpu_sysdev(pr->id);
	if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
		result = -EFAULT;
		goto err_remove_fs;
	}

	/* _PDC call should be done before doing anything else (if reqd.). */
	arch_acpi_processor_init_pdc(pr);
	acpi_processor_set_pdc(pr);
	arch_acpi_processor_cleanup_pdc(pr);

#ifdef CONFIG_CPU_FREQ
	acpi_processor_ppc_has_changed(pr);
#endif
	acpi_processor_get_throttling_info(pr);
	acpi_processor_get_limit_info(pr);


	acpi_processor_power_init(pr, device);

	pr->cdev = thermal_cooling_device_register("Processor", device,
						&processor_cooling_ops);
	if (IS_ERR(pr->cdev)) {
		result = PTR_ERR(pr->cdev);
		goto err_power_exit;
	}

	dev_info(&device->dev, "registered as cooling_device%d\n",
		 pr->cdev->id);

	result = sysfs_create_link(&device->dev.kobj,
				   &pr->cdev->device.kobj,
				   "thermal_cooling");
	if (result) {
		printk(KERN_ERR PREFIX "Create sysfs link\n");
		goto err_thermal_unregister;
	}
	result = sysfs_create_link(&pr->cdev->device.kobj,
				   &device->dev.kobj,
				   "device");
	if (result) {
		printk(KERN_ERR PREFIX "Create sysfs link\n");
		goto err_remove_sysfs;
	}

	return 0;

err_remove_sysfs:
	sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
err_thermal_unregister:
	thermal_cooling_device_unregister(pr->cdev);
err_power_exit:
	acpi_processor_power_exit(pr, device);
err_remove_fs:
	acpi_processor_remove_fs(device);
err_free_cpumask:
	free_cpumask_var(pr->throttling.shared_cpu_map);

	return result;
}
static int acpi_processor_add(struct acpi_device *device,
					const struct acpi_device_id *id)
{
	struct acpi_processor *pr;
	struct device *dev;
	int result = 0;

	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
	if (!pr)
		return -ENOMEM;

	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
		result = -ENOMEM;
		goto err_free_pr;
	}

	pr->handle = device->handle;
	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
	device->driver_data = pr;

	result = acpi_processor_get_info(device);
	if (result) /* Processor is not physically present or unavailable */
		return 0;

#ifdef CONFIG_SMP
	if (pr->id >= setup_max_cpus && pr->id != 0)
		return 0;
#endif

	BUG_ON(pr->id >= nr_cpu_ids);

	/*
	 * Buggy BIOS check.
	 * ACPI id of processors can be reported wrongly by the BIOS.
	 * Don't trust it blindly
	 */
	if (per_cpu(processor_device_array, pr->id) != NULL &&
	    per_cpu(processor_device_array, pr->id) != device) {
		dev_warn(&device->dev,
			"BIOS reported wrong ACPI id %d for the processor\n",
			pr->id);
		/* Give up, but do not abort the namespace scan. */
		goto err;
	}
	/*
	 * processor_device_array is not cleared on errors to allow buggy BIOS
	 * checks.
	 */
	per_cpu(processor_device_array, pr->id) = device;
	per_cpu(processors, pr->id) = pr;

	dev = get_cpu_device(pr->id);
	if (!dev) {
		result = -ENODEV;
		goto err;
	}

	result = acpi_bind_one(dev, pr->handle);
	if (result)
		goto err;

	pr->dev = dev;
	dev->offline = pr->flags.need_hotplug_init;

	/* Trigger the processor driver's .probe() if present. */
	if (device_attach(dev) >= 0)
		return 1;

	dev_err(dev, "Processor driver could not be attached\n");
	acpi_unbind_one(dev);

 err:
	free_cpumask_var(pr->throttling.shared_cpu_map);
	device->driver_data = NULL;
	per_cpu(processors, pr->id) = NULL;
 err_free_pr:
	kfree(pr);
	return result;
}
Exemple #30
0
/**
 * acpi_get_psd_map - Map the CPUs in a common freq domain.
 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
 *
 *	Return: 0 for success or negative value for err.
 */
int acpi_get_psd_map(struct cpudata **all_cpu_data)
{
	int count_target;
	int retval = 0;
	unsigned int i, j;
	cpumask_var_t covered_cpus;
	struct cpudata *pr, *match_pr;
	struct acpi_psd_package *pdomain;
	struct acpi_psd_package *match_pdomain;
	struct cpc_desc *cpc_ptr, *match_cpc_ptr;

	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
		return -ENOMEM;

	/*
	 * Now that we have _PSD data from all CPUs, lets setup P-state
	 * domain info.
	 */
	for_each_possible_cpu(i) {
		pr = all_cpu_data[i];
		if (!pr)
			continue;

		if (cpumask_test_cpu(i, covered_cpus))
			continue;

		cpc_ptr = per_cpu(cpc_desc_ptr, i);
		if (!cpc_ptr) {
			retval = -EFAULT;
			goto err_ret;
		}

		pdomain = &(cpc_ptr->domain_info);
		cpumask_set_cpu(i, pr->shared_cpu_map);
		cpumask_set_cpu(i, covered_cpus);
		if (pdomain->num_processors <= 1)
			continue;

		/* Validate the Domain info */
		count_target = pdomain->num_processors;
		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
			pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
			pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;

		for_each_possible_cpu(j) {
			if (i == j)
				continue;

			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
			if (!match_cpc_ptr) {
				retval = -EFAULT;
				goto err_ret;
			}

			match_pdomain = &(match_cpc_ptr->domain_info);
			if (match_pdomain->domain != pdomain->domain)
				continue;

			/* Here i and j are in the same domain */
			if (match_pdomain->num_processors != count_target) {
				retval = -EFAULT;
				goto err_ret;
			}

			if (pdomain->coord_type != match_pdomain->coord_type) {
				retval = -EFAULT;
				goto err_ret;
			}

			cpumask_set_cpu(j, covered_cpus);
			cpumask_set_cpu(j, pr->shared_cpu_map);
		}

		for_each_possible_cpu(j) {
			if (i == j)
				continue;

			match_pr = all_cpu_data[j];
			if (!match_pr)
				continue;

			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
			if (!match_cpc_ptr) {
				retval = -EFAULT;
				goto err_ret;
			}

			match_pdomain = &(match_cpc_ptr->domain_info);
			if (match_pdomain->domain != pdomain->domain)
				continue;

			match_pr->shared_type = pr->shared_type;
			cpumask_copy(match_pr->shared_cpu_map,
				     pr->shared_cpu_map);
		}
	}

err_ret:
	for_each_possible_cpu(i) {
		pr = all_cpu_data[i];
		if (!pr)
			continue;

		/* Assume no coordination on any error parsing domain info */
		if (retval) {
			cpumask_clear(pr->shared_cpu_map);
			cpumask_set_cpu(i, pr->shared_cpu_map);
			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		}
	}

	free_cpumask_var(covered_cpus);
	return retval;
}