Esempio n. 1
0
/*
 * This cpu is going to be removed and its vectors migrated to the remaining
 * online cpus.  Check to see if there are enough vectors in the remaining cpus.
 * This function is protected by stop_machine().
 */
int check_irq_vectors_for_cpu_disable(void)
{
	unsigned int this_cpu, vector, this_count, count;
	struct irq_desc *desc;
	struct irq_data *data;
	int cpu;

	this_cpu = smp_processor_id();
	cpumask_copy(&online_new, cpu_online_mask);
	cpumask_clear_cpu(this_cpu, &online_new);

	this_count = 0;
	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
		desc = __this_cpu_read(vector_irq[vector]);
		if (IS_ERR_OR_NULL(desc))
			continue;
		/*
		 * Protect against concurrent action removal, affinity
		 * changes etc.
		 */
		raw_spin_lock(&desc->lock);
		data = irq_desc_get_irq_data(desc);
		cpumask_copy(&affinity_new,
			     irq_data_get_affinity_mask(data));
		cpumask_clear_cpu(this_cpu, &affinity_new);

		/* Do not count inactive or per-cpu irqs. */
		if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
			raw_spin_unlock(&desc->lock);
			continue;
		}

		raw_spin_unlock(&desc->lock);
		/*
		 * A single irq may be mapped to multiple cpu's
		 * vector_irq[] (for example IOAPIC cluster mode).  In
		 * this case we have two possibilities:
		 *
		 * 1) the resulting affinity mask is empty; that is
		 * this the down'd cpu is the last cpu in the irq's
		 * affinity mask, or
		 *
		 * 2) the resulting affinity mask is no longer a
		 * subset of the online cpus but the affinity mask is
		 * not zero; that is the down'd cpu is the last online
		 * cpu in a user set affinity mask.
		 */
		if (cpumask_empty(&affinity_new) ||
		    !cpumask_subset(&affinity_new, &online_new))
			this_count++;
	}
	/* No need to check any further. */
	if (!this_count)
		return 0;

	count = 0;
	for_each_online_cpu(cpu) {
		if (cpu == this_cpu)
			continue;
		/*
		 * We scan from FIRST_EXTERNAL_VECTOR to first system
		 * vector. If the vector is marked in the used vectors
		 * bitmap or an irq is assigned to it, we don't count
		 * it as available.
		 *
		 * As this is an inaccurate snapshot anyway, we can do
		 * this w/o holding vector_lock.
		 */
		for (vector = FIRST_EXTERNAL_VECTOR;
		     vector < FIRST_SYSTEM_VECTOR; vector++) {
			if (!test_bit(vector, used_vectors) &&
			    IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) {
				if (++count == this_count)
					return 0;
			}
		}
	}

	if (count < this_count) {
		pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
			this_cpu, this_count, count);
		return -ERANGE;
	}
	return 0;
}
Esempio n. 2
0
static int powernow_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
    unsigned int i;
    unsigned int valid_states = 0;
    unsigned int cpu = policy->cpu;
    struct acpi_cpufreq_data *data;
    unsigned int result = 0;
    struct processor_performance *perf;
    u32 max_hw_pstate;
    uint64_t msr_content;
    struct cpuinfo_x86 *c = &cpu_data[policy->cpu];

    data = xzalloc(struct acpi_cpufreq_data);
    if (!data)
        return -ENOMEM;

    cpufreq_drv_data[cpu] = data;

    data->acpi_data = &processor_pminfo[cpu]->perf;

    perf = data->acpi_data;
    policy->shared_type = perf->shared_type;

    if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
        policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
        cpumask_set_cpu(cpu, policy->cpus);
        if (cpumask_weight(policy->cpus) != 1) {
            printk(XENLOG_WARNING "Unsupported sharing type %d (%u CPUs)\n",
                   policy->shared_type, cpumask_weight(policy->cpus));
            result = -ENODEV;
            goto err_unreg;
        }
    } else {
        cpumask_copy(policy->cpus, cpumask_of(cpu));
    }

    /* capability check */
    if (perf->state_count <= 1) {
        printk("No P-States\n");
        result = -ENODEV;
        goto err_unreg;
    }
    rdmsrl(MSR_PSTATE_CUR_LIMIT, msr_content);
    max_hw_pstate = (msr_content & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;

    if (perf->control_register.space_id != perf->status_register.space_id) {
        result = -ENODEV;
        goto err_unreg;
    }

    data->freq_table = xmalloc_array(struct cpufreq_frequency_table, 
                                    (perf->state_count+1));
    if (!data->freq_table) {
        result = -ENOMEM;
        goto err_unreg;
    }

    /* detect transition latency */
    policy->cpuinfo.transition_latency = 0;
    for (i=0; i<perf->state_count; i++) {
        if ((perf->states[i].transition_latency * 1000) >
            policy->cpuinfo.transition_latency)
            policy->cpuinfo.transition_latency =
                perf->states[i].transition_latency * 1000;
    }

    policy->governor = cpufreq_opt_governor ? : CPUFREQ_DEFAULT_GOVERNOR;

    /* table init */
    for (i = 0; i < perf->state_count && i <= max_hw_pstate; i++) {
        if (i > 0 && perf->states[i].core_frequency >=
            data->freq_table[valid_states-1].frequency / 1000)
            continue;

        data->freq_table[valid_states].index = perf->states[i].control & HW_PSTATE_MASK;
        data->freq_table[valid_states].frequency =
            perf->states[i].core_frequency * 1000;
        valid_states++;
    }
    data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
    perf->state = 0;

    result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
    if (result)
        goto err_freqfree;

    if (c->cpuid_level >= 6)
        on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1);
      
    /*
     * the first call to ->target() should result in us actually
     * writing something to the appropriate registers.
     */
    data->arch_cpu_flags |= ARCH_CPU_FLAG_RESUME;

    policy->cur = data->freq_table[i].frequency;
    return result;

err_freqfree:
    xfree(data->freq_table);
err_unreg:
    xfree(data);
    cpufreq_drv_data[cpu] = NULL;

    return result;
}
/**
 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
 * @adapter: board private structure to initialize
 * @v_idx: index of vector in adapter struct
 *
 * We allocate one q_vector.  If allocation fails we return -ENOMEM.
 **/
static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
				int txr_count, int txr_idx,
				int rxr_count, int rxr_idx)
{
	struct ixgbe_q_vector *q_vector;
	struct ixgbe_ring *ring;
	int node = -1;
	int cpu = -1;
	int ring_count, size;

	ring_count = txr_count + rxr_count;
	size = sizeof(struct ixgbe_q_vector) +
	       (sizeof(struct ixgbe_ring) * ring_count);

	/* customize cpu for Flow Director mapping */
	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
		if (cpu_online(v_idx)) {
			cpu = v_idx;
			node = cpu_to_node(cpu);
		}
	}

	/* allocate q_vector and rings */
	q_vector = kzalloc_node(size, GFP_KERNEL, node);
	if (!q_vector)
		q_vector = kzalloc(size, GFP_KERNEL);
	if (!q_vector)
		return -ENOMEM;

	/* setup affinity mask and node */
	if (cpu != -1)
		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
	else
		cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
	q_vector->numa_node = node;

	/* initialize NAPI */
	netif_napi_add(adapter->netdev, &q_vector->napi,
		       ixgbe_poll, 64);

	/* tie q_vector and adapter together */
	adapter->q_vector[v_idx] = q_vector;
	q_vector->adapter = adapter;
	q_vector->v_idx = v_idx;

	/* initialize work limits */
	q_vector->tx.work_limit = adapter->tx_work_limit;

	/* initialize pointer to rings */
	ring = q_vector->ring;

	while (txr_count) {
		/* assign generic ring traits */
		ring->dev = &adapter->pdev->dev;
		ring->netdev = adapter->netdev;

		/* configure backlink on ring */
		ring->q_vector = q_vector;

		/* update q_vector Tx values */
		ixgbe_add_ring(ring, &q_vector->tx);

		/* apply Tx specific ring traits */
		ring->count = adapter->tx_ring_count;
		ring->queue_index = txr_idx;

		/* assign ring to adapter */
		adapter->tx_ring[txr_idx] = ring;

		/* update count and index */
		txr_count--;
		txr_idx++;

		/* push pointer to next ring */
		ring++;
	}

	while (rxr_count) {
		/* assign generic ring traits */
		ring->dev = &adapter->pdev->dev;
		ring->netdev = adapter->netdev;

		/* configure backlink on ring */
		ring->q_vector = q_vector;

		/* update q_vector Rx values */
		ixgbe_add_ring(ring, &q_vector->rx);

		/*
		 * 82599 errata, UDP frames with a 0 checksum
		 * can be marked as checksum errors.
		 */
		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);

#ifdef IXGBE_FCOE
		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
			struct ixgbe_ring_feature *f;
			f = &adapter->ring_feature[RING_F_FCOE];
			if ((rxr_idx >= f->mask) &&
			    (rxr_idx < f->mask + f->indices))
				set_bit(__IXGBE_RX_FCOE, &ring->state);
		}

#endif /* IXGBE_FCOE */
		/* apply Rx specific ring traits */
		ring->count = adapter->rx_ring_count;
		ring->queue_index = rxr_idx;

		/* assign ring to adapter */
		adapter->rx_ring[rxr_idx] = ring;

		/* update count and index */
		rxr_count--;
		rxr_idx++;

		/* push pointer to next ring */
		ring++;
	}

	return 0;
}
Esempio n. 4
0
/**
 * acpi_get_psd_map - Map the CPUs in a common freq domain.
 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
 *
 *	Return: 0 for success or negative value for err.
 */
int acpi_get_psd_map(struct cpudata **all_cpu_data)
{
	int count_target;
	int retval = 0;
	unsigned int i, j;
	cpumask_var_t covered_cpus;
	struct cpudata *pr, *match_pr;
	struct acpi_psd_package *pdomain;
	struct acpi_psd_package *match_pdomain;
	struct cpc_desc *cpc_ptr, *match_cpc_ptr;

	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
		return -ENOMEM;

	/*
	 * Now that we have _PSD data from all CPUs, lets setup P-state
	 * domain info.
	 */
	for_each_possible_cpu(i) {
		pr = all_cpu_data[i];
		if (!pr)
			continue;

		if (cpumask_test_cpu(i, covered_cpus))
			continue;

		cpc_ptr = per_cpu(cpc_desc_ptr, i);
		if (!cpc_ptr) {
			retval = -EFAULT;
			goto err_ret;
		}

		pdomain = &(cpc_ptr->domain_info);
		cpumask_set_cpu(i, pr->shared_cpu_map);
		cpumask_set_cpu(i, covered_cpus);
		if (pdomain->num_processors <= 1)
			continue;

		/* Validate the Domain info */
		count_target = pdomain->num_processors;
		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
			pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
			pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;

		for_each_possible_cpu(j) {
			if (i == j)
				continue;

			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
			if (!match_cpc_ptr) {
				retval = -EFAULT;
				goto err_ret;
			}

			match_pdomain = &(match_cpc_ptr->domain_info);
			if (match_pdomain->domain != pdomain->domain)
				continue;

			/* Here i and j are in the same domain */
			if (match_pdomain->num_processors != count_target) {
				retval = -EFAULT;
				goto err_ret;
			}

			if (pdomain->coord_type != match_pdomain->coord_type) {
				retval = -EFAULT;
				goto err_ret;
			}

			cpumask_set_cpu(j, covered_cpus);
			cpumask_set_cpu(j, pr->shared_cpu_map);
		}

		for_each_possible_cpu(j) {
			if (i == j)
				continue;

			match_pr = all_cpu_data[j];
			if (!match_pr)
				continue;

			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
			if (!match_cpc_ptr) {
				retval = -EFAULT;
				goto err_ret;
			}

			match_pdomain = &(match_cpc_ptr->domain_info);
			if (match_pdomain->domain != pdomain->domain)
				continue;

			match_pr->shared_type = pr->shared_type;
			cpumask_copy(match_pr->shared_cpu_map,
				     pr->shared_cpu_map);
		}
	}

err_ret:
	for_each_possible_cpu(i) {
		pr = all_cpu_data[i];
		if (!pr)
			continue;

		/* Assume no coordination on any error parsing domain info */
		if (retval) {
			cpumask_clear(pr->shared_cpu_map);
			cpumask_set_cpu(i, pr->shared_cpu_map);
			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		}
	}

	free_cpumask_var(covered_cpus);
	return retval;
}
Esempio n. 5
0
void init_cpu_present(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_present_bits), src);
}
Esempio n. 6
0
void init_cpu_online(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_online_bits), src);
}
Esempio n. 7
0
/* Unpark enabled threads */
static void softlockup_unpark_threads(void)
{
	cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
	softlockup_update_smpboot_threads();
}
static inline void
irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
{
	cpumask_copy(mask, desc->pending_mask);
}
Esempio n. 9
0
void init_cpu_online(const struct cpumask *src)
{
	cpumask_copy(&__cpu_online_mask, src);
}
Esempio n. 10
0
/**
 * irq_reserve_ipi() - Setup an IPI to destination cpumask
 * @domain:	IPI domain
 * @dest:	cpumask of cpus which can receive the IPI
 *
 * Allocate a virq that can be used to send IPI to any CPU in dest mask.
 *
 * On success it'll return linux irq number and error code on failure
 */
int irq_reserve_ipi(struct irq_domain *domain,
			     const struct cpumask *dest)
{
	unsigned int nr_irqs, offset;
	struct irq_data *data;
	int virq, i;

	if (!domain ||!irq_domain_is_ipi(domain)) {
		pr_warn("Reservation on a non IPI domain\n");
		return -EINVAL;
	}

	if (!cpumask_subset(dest, cpu_possible_mask)) {
		pr_warn("Reservation is not in possible_cpu_mask\n");
		return -EINVAL;
	}

	nr_irqs = cpumask_weight(dest);
	if (!nr_irqs) {
		pr_warn("Reservation for empty destination mask\n");
		return -EINVAL;
	}

	if (irq_domain_is_ipi_single(domain)) {
		/*
		 * If the underlying implementation uses a single HW irq on
		 * all cpus then we only need a single Linux irq number for
		 * it. We have no restrictions vs. the destination mask. The
		 * underlying implementation can deal with holes nicely.
		 */
		nr_irqs = 1;
		offset = 0;
	} else {
		unsigned int next;

		/*
		 * The IPI requires a seperate HW irq on each CPU. We require
		 * that the destination mask is consecutive. If an
		 * implementation needs to support holes, it can reserve
		 * several IPI ranges.
		 */
		offset = cpumask_first(dest);
		/*
		 * Find a hole and if found look for another set bit after the
		 * hole. For now we don't support this scenario.
		 */
		next = cpumask_next_zero(offset, dest);
		if (next < nr_cpu_ids)
			next = cpumask_next(next, dest);
		if (next < nr_cpu_ids) {
			pr_warn("Destination mask has holes\n");
			return -EINVAL;
		}
	}

	virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE);
	if (virq <= 0) {
		pr_warn("Can't reserve IPI, failed to alloc descs\n");
		return -ENOMEM;
	}

	virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
				       (void *) dest, true);

	if (virq <= 0) {
		pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
		goto free_descs;
	}

	for (i = 0; i < nr_irqs; i++) {
		data = irq_get_irq_data(virq + i);
		cpumask_copy(data->common->affinity, dest);
		data->common->ipi_offset = offset;
		irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
	}
	return virq;

free_descs:
	irq_free_descs(virq, nr_irqs);
	return -EBUSY;
}
Esempio n. 11
0
void init_cpu_possible(const struct cpumask *src)
{
	cpumask_copy(&__cpu_possible_mask, src);
}
Esempio n. 12
0
void init_cpu_present(const struct cpumask *src)
{
	cpumask_copy(&__cpu_present_mask, src);
}
Esempio n. 13
0
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int mycpu, err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};
	cpumask_var_t cpumask;
	cpumask_var_t cpumask_org;

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	/* Move the downtaker off the unplug cpu */
	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
		return -ENOMEM;
	if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL))  {
		free_cpumask_var(cpumask);
		return -ENOMEM;
	}

	cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
	cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
	set_cpus_allowed_ptr(current, cpumask);
	free_cpumask_var(cpumask);
	migrate_disable();
	mycpu = smp_processor_id();
	if (mycpu == cpu) {
		printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
		migrate_enable();
		err = -EBUSY;
		goto restore_cpus;
	}

	cpu_hotplug_begin();
	err = cpu_unplug_begin(cpu);
	if (err) {
		printk("cpu_unplug_begin(%d) failed\n", cpu);
		goto out_cancel;
	}

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}

	__cpu_unplug_wait(cpu);
	smpboot_park_threads(cpu);

	/* Notifiers are done. Don't let any more tasks pin this CPU. */
	cpu_unplug_sync(cpu);

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		smpboot_unpark_threads(cpu);
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
	 *
	 * Wait for the stop thread to go away.
	 */
	while (!idle_cpu(cpu))
		cpu_relax();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_unplug_done(cpu);
out_cancel:
	migrate_enable();
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
restore_cpus:
	set_cpus_allowed_ptr(current, cpumask_org);
	free_cpumask_var(cpumask_org);
	return err;
}
/**
 * __cpufreq_cooling_register - helper function to create cpufreq cooling device
 * @np: a valid struct device_node to the cooling device device tree node
 * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
 *
 * This interface function registers the cpufreq cooling device with the name
 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
 * cooling devices. It also gives the opportunity to link the cooling device
 * with a device tree node, in order to bind it via the thermal DT code.
 *
 * Return: a valid struct thermal_cooling_device pointer on success,
 * on failure, it returns a corresponding ERR_PTR().
 */
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
                           const struct cpumask *clip_cpus)
{
    struct thermal_cooling_device *cool_dev;
    struct cpufreq_cooling_device *cpufreq_dev = NULL;
    unsigned int min = 0, max = 0;
    char dev_name[THERMAL_NAME_LENGTH];
    int ret = 0, i;
    struct cpufreq_policy policy;

    /* Verify that all the clip cpus have same freq_min, freq_max limit */
    for_each_cpu(i, clip_cpus) {
        /* continue if cpufreq policy not found and not return error */
        if (!cpufreq_get_policy(&policy, i))
            continue;
        if (min == 0 && max == 0) {
            min = policy.cpuinfo.min_freq;
            max = policy.cpuinfo.max_freq;
        } else {
            if (min != policy.cpuinfo.min_freq ||
                    max != policy.cpuinfo.max_freq)
                return ERR_PTR(-EINVAL);
        }
    }
    cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
                          GFP_KERNEL);
    if (!cpufreq_dev)
        return ERR_PTR(-ENOMEM);

    cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);

    ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
    if (ret) {
        kfree(cpufreq_dev);
        return ERR_PTR(-EINVAL);
    }

    snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
             cpufreq_dev->id);

    cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
               &cpufreq_cooling_ops);
    if (IS_ERR(cool_dev)) {
        release_idr(&cpufreq_idr, cpufreq_dev->id);
        kfree(cpufreq_dev);
        return ERR_PTR(-EINVAL);
    }
    cpufreq_dev->cool_dev = cool_dev;
    cpufreq_dev->cpufreq_state = 0;
    mutex_lock(&cooling_cpufreq_lock);

    /* Register the notifier for first cpufreq cooling device */
    if (cpufreq_dev_count == 0)
        cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
                                  CPUFREQ_POLICY_NOTIFIER);
    cpufreq_dev_count++;

    mutex_unlock(&cooling_cpufreq_lock);

    return cool_dev;
}
Esempio n. 15
0
/*
 * This wrapper function around hv_flush_remote() does several things:
 *
 *  - Provides a return value error-checking panic path, since
 *    there's never any good reason for hv_flush_remote() to fail.
 *  - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
 *    is the type that Linux wants to pass around anyway.
 *  - Centralizes the mark_caches_evicted() handling.
 *  - Canonicalizes that lengths of zero make cpumasks NULL.
 *  - Handles deferring TLB flushes for dataplane tiles.
 *  - Tracks remote interrupts in the per-cpu irq_cpustat_t.
 *
 * Note that we have to wait until the cache flush completes before
 * updating the per-cpu last_cache_flush word, since otherwise another
 * concurrent flush can race, conclude the flush has already
 * completed, and start to use the page while it's still dirty
 * remotely (running concurrently with the actual evict, presumably).
 */
void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
		  const struct cpumask *cache_cpumask_orig,
		  HV_VirtAddr tlb_va, unsigned long tlb_length,
		  unsigned long tlb_pgsize,
		  const struct cpumask *tlb_cpumask_orig,
		  HV_Remote_ASID *asids, int asidcount)
{
	int rc;
	int timestamp = 0;  /* happy compiler */
	struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
	struct cpumask *cache_cpumask, *tlb_cpumask;
	HV_PhysAddr cache_pa;
	char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];

	mb();   /* provided just to simplify "magic hypervisor" mode */

	/*
	 * Canonicalize and copy the cpumasks.
	 */
	if (cache_cpumask_orig && cache_control) {
		cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
		cache_cpumask = &cache_cpumask_copy;
	} else {
		cpumask_clear(&cache_cpumask_copy);
		cache_cpumask = NULL;
	}
	if (cache_cpumask == NULL)
		cache_control = 0;
	if (tlb_cpumask_orig && tlb_length) {
		cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
		tlb_cpumask = &tlb_cpumask_copy;
	} else {
		cpumask_clear(&tlb_cpumask_copy);
		tlb_cpumask = NULL;
	}

	hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
			asids, asidcount);
	cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
	if (cache_control & HV_FLUSH_EVICT_L2)
		timestamp = mark_caches_evicted_start();
	rc = hv_flush_remote(cache_pa, cache_control,
			     cpumask_bits(cache_cpumask),
			     tlb_va, tlb_length, tlb_pgsize,
			     cpumask_bits(tlb_cpumask),
			     asids, asidcount);
	if (cache_control & HV_FLUSH_EVICT_L2)
		mark_caches_evicted_finish(cache_cpumask, timestamp);
	if (rc == 0)
		return;
	cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
	cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);

	pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
	       " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
	       cache_pa, cache_control, cache_cpumask, cache_buf,
	       (unsigned long)tlb_va, tlb_length, tlb_pgsize,
	       tlb_cpumask, tlb_buf,
	       asids, asidcount, rc);
	panic("Unsafe to continue.");
}
Esempio n. 16
0
/*
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET.
 *
 * This routine transitions us from using a set of compiled-in large
 * pages to using some more precise caching, including removing access
 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
 * marking read-only data as locally cacheable, striping the remaining
 * .data and .bss across all the available tiles, and removing access
 * to pages above the top of RAM (thus ensuring a page fault from a bad
 * virtual address rather than a hypervisor shoot down for accessing
 * memory outside the assigned limits).
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
    unsigned long address, pfn;
    pmd_t *pmd;
    pte_t *pte;
    int pte_ofs;
    const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
    struct cpumask kstripe_mask;
    int rc, i;

#if CHIP_HAS_CBOX_HOME_MAP()
    if (ktext_arg_seen && ktext_hash) {
        pr_warning("warning: \"ktext\" boot argument ignored"
                   " if \"kcache_hash\" sets up text hash-for-home\n");
        ktext_small = 0;
    }

    if (kdata_arg_seen && kdata_hash) {
        pr_warning("warning: \"kdata\" boot argument ignored"
                   " if \"kcache_hash\" sets up data hash-for-home\n");
    }

    if (kdata_huge && !hash_default) {
        pr_warning("warning: disabling \"kdata=huge\"; requires"
                   " kcache_hash=all or =allbutstack\n");
        kdata_huge = 0;
    }
#endif

    /*
     * Set up a mask for cpus to use for kernel striping.
     * This is normally all cpus, but minus dataplane cpus if any.
     * If the dataplane covers the whole chip, we stripe over
     * the whole chip too.
     */
    cpumask_copy(&kstripe_mask, cpu_possible_mask);
#ifdef CONFIG_DATAPLANE
    cpumask_andnot(&kstripe_mask, &kstripe_mask, &dataplane_map);
    if (cpumask_empty(&kstripe_mask))
        cpumask_copy(&kstripe_mask, cpu_possible_mask);
#endif
    if (!kdata_arg_seen)
        kdata_mask = kstripe_mask;

    /* Allocate and fill in L2 page tables */
    for (i = 0; i < MAX_NUMNODES; ++i) {
#ifdef CONFIG_HIGHMEM
        unsigned long end_pfn = node_lowmem_end_pfn[i];
#else
        unsigned long end_pfn = node_end_pfn[i];
#endif
        unsigned long end_huge_pfn = 0;

        /* Pre-shatter the last huge page to allow per-cpu pages. */
        if (kdata_huge)
            end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);

        pfn = node_start_pfn[i];

        /* Allocate enough memory to hold L2 page tables for node. */
        init_prealloc_ptes(i, end_pfn - pfn);

        address = (unsigned long) pfn_to_kaddr(pfn);
        while (pfn < end_pfn) {
            BUG_ON(address & (HPAGE_SIZE-1));
            pmd = get_pmd(pgtables, address);
            pte = get_prealloc_pte(pfn);
            if (pfn < end_huge_pfn) {
                pgprot_t prot = init_pgprot(address);
                *(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
                for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
                        pfn++, pte_ofs++, address += PAGE_SIZE)
                    pte[pte_ofs] = pfn_pte(pfn, prot);
            } else {
                if (kdata_huge)
                    printk(KERN_DEBUG "pre-shattered huge"
                           " page at %#lx\n", address);
                for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
                        pfn++, pte_ofs++, address += PAGE_SIZE) {
                    pgprot_t prot = init_pgprot(address);
                    pte[pte_ofs] = pfn_pte(pfn, prot);
                }
                assign_pte(pmd, pte);
            }
        }
    }

    /*
     * Set or check ktext_map now that we have cpu_possible_mask
     * and kstripe_mask to work with.
     */
    if (ktext_all)
        cpumask_copy(&ktext_mask, cpu_possible_mask);
    else if (ktext_nondataplane)
        ktext_mask = kstripe_mask;
    else if (!cpumask_empty(&ktext_mask)) {
        /* Sanity-check any mask that was requested */
        struct cpumask bad;
        cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
        cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
        if (!cpumask_empty(&bad)) {
            char buf[NR_CPUS * 5];
            cpulist_scnprintf(buf, sizeof(buf), &bad);
            pr_info("ktext: not using unavailable cpus %s\n", buf);
        }
        if (cpumask_empty(&ktext_mask)) {
            pr_warning("ktext: no valid cpus; caching on %d.\n",
                       smp_processor_id());
            cpumask_copy(&ktext_mask,
                         cpumask_of(smp_processor_id()));
        }
    }

    address = MEM_SV_INTRPT;
    pmd = get_pmd(pgtables, address);
    if (ktext_small) {
        /* Allocate an L2 PTE for the kernel text */
        int cpu = 0;
        pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
                                         PAGE_HOME_IMMUTABLE);

        if (ktext_local) {
            if (ktext_nocache)
                prot = hv_pte_set_mode(prot,
                                       HV_PTE_MODE_UNCACHED);
            else
                prot = hv_pte_set_mode(prot,
                                       HV_PTE_MODE_CACHE_NO_L3);
        } else {
            prot = hv_pte_set_mode(prot,
                                   HV_PTE_MODE_CACHE_TILE_L3);
            cpu = cpumask_first(&ktext_mask);

            prot = ktext_set_nocache(prot);
        }

        BUG_ON(address != (unsigned long)_stext);
        pfn = 0;  /* code starts at PA 0 */
        pte = alloc_pte();
        for (pte_ofs = 0; address < (unsigned long)_einittext;
                pfn++, pte_ofs++, address += PAGE_SIZE) {
            if (!ktext_local) {
                prot = set_remote_cache_cpu(prot, cpu);
                cpu = cpumask_next(cpu, &ktext_mask);
                if (cpu == NR_CPUS)
                    cpu = cpumask_first(&ktext_mask);
            }
            pte[pte_ofs] = pfn_pte(pfn, prot);
        }
        assign_pte(pmd, pte);
    } else {
        pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
        pteval = pte_mkhuge(pteval);
#if CHIP_HAS_CBOX_HOME_MAP()
        if (ktext_hash) {
            pteval = hv_pte_set_mode(pteval,
                                     HV_PTE_MODE_CACHE_HASH_L3);
            pteval = ktext_set_nocache(pteval);
        } else
#endif /* CHIP_HAS_CBOX_HOME_MAP() */
            if (cpumask_weight(&ktext_mask) == 1) {
                pteval = set_remote_cache_cpu(pteval,
                                              cpumask_first(&ktext_mask));
                pteval = hv_pte_set_mode(pteval,
                                         HV_PTE_MODE_CACHE_TILE_L3);
                pteval = ktext_set_nocache(pteval);
            } else if (ktext_nocache)
                pteval = hv_pte_set_mode(pteval,
                                         HV_PTE_MODE_UNCACHED);
            else
                pteval = hv_pte_set_mode(pteval,
                                         HV_PTE_MODE_CACHE_NO_L3);
        *(pte_t *)pmd = pteval;
    }

    /* Set swapper_pgprot here so it is flushed to memory right away. */
    swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir);

    /*
     * Since we may be changing the caching of the stack and page
     * table itself, we invoke an assembly helper to do the
     * following steps:
     *
     *  - flush the cache so we start with an empty slate
     *  - install pgtables[] as the real page table
     *  - flush the TLB so the new page table takes effect
     */
    rc = flush_and_install_context(__pa(pgtables),
                                   init_pgprot((unsigned long)pgtables),
                                   __get_cpu_var(current_asid),
                                   cpumask_bits(my_cpu_mask));
    BUG_ON(rc != 0);

    /* Copy the page table back to the normal swapper_pg_dir. */
    memcpy(pgd_base, pgtables, sizeof(pgtables));
    __install_page_table(pgd_base, __get_cpu_var(current_asid),
                         swapper_pgprot);

    /*
     * We just read swapper_pgprot and thus brought it into the cache,
     * with its new home & caching mode.  When we start the other CPUs,
     * they're going to reference swapper_pgprot via their initial fake
     * VA-is-PA mappings, which cache everything locally.  At that
     * time, if it's in our cache with a conflicting home, the
     * simulator's coherence checker will complain.  So, flush it out
     * of our cache; we're not going to ever use it again anyway.
     */
    __insn_finv(&swapper_pgprot);
}
int acpi_processor_preregister_performance(
		struct acpi_processor_performance *performance)
{
	int count, count_target;
	int retval = 0;
	unsigned int i, j;
	cpumask_var_t covered_cpus;
	struct acpi_processor *pr;
	struct acpi_psd_package *pdomain;
	struct acpi_processor *match_pr;
	struct acpi_psd_package *match_pdomain;

	if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
		return -ENOMEM;

	mutex_lock(&performance_mutex);

	retval = 0;

	/* Call _PSD for all CPUs */
	for_each_possible_cpu(i) {
		pr = per_cpu(processors, i);
		if (!pr) {
			/* Look only at processors in ACPI namespace */
			continue;
		}

		if (pr->performance) {
			retval = -EBUSY;
			continue;
		}

		if (!performance || !percpu_ptr(performance, i)) {
			retval = -EINVAL;
			continue;
		}

		pr->performance = percpu_ptr(performance, i);
		cpumask_set_cpu(i, pr->performance->shared_cpu_map);
		if (acpi_processor_get_psd(pr)) {
			retval = -EINVAL;
			continue;
		}
	}
	if (retval)
		goto err_ret;

	/*
	 * Now that we have _PSD data from all CPUs, lets setup P-state 
	 * domain info.
	 */
	for_each_possible_cpu(i) {
		pr = per_cpu(processors, i);
		if (!pr)
			continue;

		/* Basic validity check for domain info */
		pdomain = &(pr->performance->domain_info);
		if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
		    (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
			retval = -EINVAL;
			goto err_ret;
		}
		if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
		    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
		    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
			retval = -EINVAL;
			goto err_ret;
		}
	}

	cpumask_clear(covered_cpus);
	for_each_possible_cpu(i) {
		pr = per_cpu(processors, i);
		if (!pr)
			continue;

		if (cpumask_test_cpu(i, covered_cpus))
			continue;

		pdomain = &(pr->performance->domain_info);
		cpumask_set_cpu(i, pr->performance->shared_cpu_map);
		cpumask_set_cpu(i, covered_cpus);
		if (pdomain->num_processors <= 1)
			continue;

		/* Validate the Domain info */
		count_target = pdomain->num_processors;
		count = 1;
		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;

		for_each_possible_cpu(j) {
			if (i == j)
				continue;

			match_pr = per_cpu(processors, j);
			if (!match_pr)
				continue;

			match_pdomain = &(match_pr->performance->domain_info);
			if (match_pdomain->domain != pdomain->domain)
				continue;

			/* Here i and j are in the same domain */

			if (match_pdomain->num_processors != count_target) {
				retval = -EINVAL;
				goto err_ret;
			}

			if (pdomain->coord_type != match_pdomain->coord_type) {
				retval = -EINVAL;
				goto err_ret;
			}

			cpumask_set_cpu(j, covered_cpus);
			cpumask_set_cpu(j, pr->performance->shared_cpu_map);
			count++;
		}

		for_each_possible_cpu(j) {
			if (i == j)
				continue;

			match_pr = per_cpu(processors, j);
			if (!match_pr)
				continue;

			match_pdomain = &(match_pr->performance->domain_info);
			if (match_pdomain->domain != pdomain->domain)
				continue;

			match_pr->performance->shared_type = 
					pr->performance->shared_type;
			cpumask_copy(match_pr->performance->shared_cpu_map,
				     pr->performance->shared_cpu_map);
		}
	}

err_ret:
	for_each_possible_cpu(i) {
		pr = per_cpu(processors, i);
		if (!pr || !pr->performance)
			continue;

		/* Assume no coordination on any error parsing domain info */
		if (retval) {
			cpumask_clear(pr->performance->shared_cpu_map);
			cpumask_set_cpu(i, pr->performance->shared_cpu_map);
			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		}
		pr->performance = NULL; /* Will be set for real in register */
	}

	mutex_unlock(&performance_mutex);
	free_cpumask_var(covered_cpus);
	return retval;
}
Esempio n. 18
0
File: irq.c Progetto: Flipkart/linux
/*
 * This cpu is going to be removed and its vectors migrated to the remaining
 * online cpus.  Check to see if there are enough vectors in the remaining cpus.
 * This function is protected by stop_machine().
 */
int check_irq_vectors_for_cpu_disable(void)
{
	int irq, cpu;
	unsigned int this_cpu, vector, this_count, count;
	struct irq_desc *desc;
	struct irq_data *data;

	this_cpu = smp_processor_id();
	cpumask_copy(&online_new, cpu_online_mask);
	cpu_clear(this_cpu, online_new);

	this_count = 0;
	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
		irq = __this_cpu_read(vector_irq[vector]);
		if (irq >= 0) {
			desc = irq_to_desc(irq);
			data = irq_desc_get_irq_data(desc);
			cpumask_copy(&affinity_new, data->affinity);
			cpu_clear(this_cpu, affinity_new);

			/* Do not count inactive or per-cpu irqs. */
			if (!irq_has_action(irq) || irqd_is_per_cpu(data))
				continue;

			/*
			 * A single irq may be mapped to multiple
			 * cpu's vector_irq[] (for example IOAPIC cluster
			 * mode).  In this case we have two
			 * possibilities:
			 *
			 * 1) the resulting affinity mask is empty; that is
			 * this the down'd cpu is the last cpu in the irq's
			 * affinity mask, or
			 *
			 * 2) the resulting affinity mask is no longer
			 * a subset of the online cpus but the affinity
			 * mask is not zero; that is the down'd cpu is the
			 * last online cpu in a user set affinity mask.
			 */
			if (cpumask_empty(&affinity_new) ||
			    !cpumask_subset(&affinity_new, &online_new))
				this_count++;
		}
	}

	count = 0;
	for_each_online_cpu(cpu) {
		if (cpu == this_cpu)
			continue;
		/*
		 * We scan from FIRST_EXTERNAL_VECTOR to first system
		 * vector. If the vector is marked in the used vectors
		 * bitmap or an irq is assigned to it, we don't count
		 * it as available.
		 */
		for (vector = FIRST_EXTERNAL_VECTOR;
		     vector < first_system_vector; vector++) {
			if (!test_bit(vector, used_vectors) &&
			    per_cpu(vector_irq, cpu)[vector] < 0)
					count++;
		}
	}

	if (count < this_count) {
		pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
			this_cpu, this_count, count);
		return -ERANGE;
	}
	return 0;
}
/*
 * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
 */
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
				      unsigned long __user *user_mask_ptr)
{
	cpumask_var_t cpus_allowed, new_mask, effective_mask;
	struct thread_info *ti;
	struct task_struct *p;
	int retval;

	if (len < sizeof(new_mask))
		return -EINVAL;

	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
		return -EFAULT;

	get_online_cpus();
	rcu_read_lock();

	p = find_process_by_pid(pid);
	if (!p) {
		rcu_read_unlock();
		put_online_cpus();
		return -ESRCH;
	}

	/* Prevent p going away */
	get_task_struct(p);
	rcu_read_unlock();

	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_put_task;
	}
	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_cpus_allowed;
	}
	if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
		retval = -ENOMEM;
		goto out_free_new_mask;
	}
	retval = -EPERM;
	if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
		goto out_unlock;

	retval = security_task_setscheduler(p);
	if (retval)
		goto out_unlock;

	/* Record new user-specified CPU set for future reference */
	cpumask_copy(&p->thread.user_cpus_allowed, new_mask);

 again:
	/* Compute new global allowed CPU set if necessary */
	ti = task_thread_info(p);
	if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
	    cpus_intersects(*new_mask, mt_fpu_cpumask)) {
		cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
		retval = set_cpus_allowed_ptr(p, effective_mask);
	} else {
		cpumask_copy(effective_mask, new_mask);
		clear_ti_thread_flag(ti, TIF_FPUBOUND);
		retval = set_cpus_allowed_ptr(p, new_mask);
	}

	if (!retval) {
		cpuset_cpus_allowed(p, cpus_allowed);
		if (!cpumask_subset(effective_mask, cpus_allowed)) {
			/*
			 * We must have raced with a concurrent cpuset
			 * update. Just reset the cpus_allowed to the
			 * cpuset's cpus_allowed
			 */
			cpumask_copy(new_mask, cpus_allowed);
			goto again;
		}
	}
out_unlock:
	free_cpumask_var(effective_mask);
out_free_new_mask:
	free_cpumask_var(new_mask);
out_free_cpus_allowed:
	free_cpumask_var(cpus_allowed);
out_put_task:
	put_task_struct(p);
	put_online_cpus();
	return retval;
}
Esempio n. 20
0
void init_cpu_possible(const struct cpumask *src)
{
	cpumask_copy(to_cpumask(cpu_possible_bits), src);
}
Esempio n. 21
0
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
	unsigned int i;
	unsigned int valid_states = 0;
	unsigned int cpu = policy->cpu;
	struct acpi_cpufreq_data *data;
	unsigned int result = 0;
	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
	struct acpi_processor_performance *perf;
#ifdef CONFIG_SMP
	static int blacklisted;
#endif

	pr_debug("acpi_cpufreq_cpu_init\n");

#ifdef CONFIG_SMP
	if (blacklisted)
		return blacklisted;
	blacklisted = acpi_cpufreq_blacklist(c);
	if (blacklisted)
		return blacklisted;
#endif

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
		result = -ENOMEM;
		goto err_free;
	}

	perf = per_cpu_ptr(acpi_perf_data, cpu);
	data->acpi_perf_cpu = cpu;
	policy->driver_data = data;

	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
		pax_open_kernel();
		*(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
		pax_close_kernel();
	}

	result = acpi_processor_register_performance(perf, cpu);
	if (result)
		goto err_free_mask;

	policy->shared_type = perf->shared_type;

	/*
	 * Will let policy->cpus know about dependency only when software
	 * coordination is required.
	 */
	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
		cpumask_copy(policy->cpus, perf->shared_cpu_map);
	}
	cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);

#ifdef CONFIG_SMP
	dmi_check_system(sw_any_bug_dmi_table);
	if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
	}

	if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
		cpumask_clear(policy->cpus);
		cpumask_set_cpu(cpu, policy->cpus);
		cpumask_copy(data->freqdomain_cpus,
			     topology_sibling_cpumask(cpu));
		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
		pr_info_once(PFX "overriding BIOS provided _PSD data\n");
	}
#endif

	/* capability check */
	if (perf->state_count <= 1) {
		pr_debug("No P-States\n");
		result = -ENODEV;
		goto err_unreg;
	}

	if (perf->control_register.space_id != perf->status_register.space_id) {
		result = -ENODEV;
		goto err_unreg;
	}

	switch (perf->control_register.space_id) {
	case ACPI_ADR_SPACE_SYSTEM_IO:
		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
		    boot_cpu_data.x86 == 0xf) {
			pr_debug("AMD K8 systems must use native drivers.\n");
			result = -ENODEV;
			goto err_unreg;
		}
		pr_debug("SYSTEM IO addr space\n");
		data->cpu_feature = SYSTEM_IO_CAPABLE;
		break;
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
		pr_debug("HARDWARE addr space\n");
		if (check_est_cpu(cpu)) {
			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
			break;
		}
		if (check_amd_hwpstate_cpu(cpu)) {
			data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
			break;
		}
		result = -ENODEV;
		goto err_unreg;
	default:
		pr_debug("Unknown addr space %d\n",
			(u32) (perf->control_register.space_id));
		result = -ENODEV;
		goto err_unreg;
	}

	data->freq_table = kzalloc(sizeof(*data->freq_table) *
		    (perf->state_count+1), GFP_KERNEL);
	if (!data->freq_table) {
		result = -ENOMEM;
		goto err_unreg;
	}

	/* detect transition latency */
	policy->cpuinfo.transition_latency = 0;
	for (i = 0; i < perf->state_count; i++) {
		if ((perf->states[i].transition_latency * 1000) >
		    policy->cpuinfo.transition_latency)
			policy->cpuinfo.transition_latency =
			    perf->states[i].transition_latency * 1000;
	}

	/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
	if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
	    policy->cpuinfo.transition_latency > 20 * 1000) {
		policy->cpuinfo.transition_latency = 20 * 1000;
		printk_once(KERN_INFO
			    "P-state transition latency capped at 20 uS\n");
	}

	/* table init */
	for (i = 0; i < perf->state_count; i++) {
		if (i > 0 && perf->states[i].core_frequency >=
		    data->freq_table[valid_states-1].frequency / 1000)
			continue;

		data->freq_table[valid_states].driver_data = i;
		data->freq_table[valid_states].frequency =
		    perf->states[i].core_frequency * 1000;
		valid_states++;
	}
	data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
	perf->state = 0;

	result = cpufreq_table_validate_and_show(policy, data->freq_table);
	if (result)
		goto err_freqfree;

	if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
		printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");

	switch (perf->control_register.space_id) {
	case ACPI_ADR_SPACE_SYSTEM_IO:
		/*
		 * The core will not set policy->cur, because
		 * cpufreq_driver->get is NULL, so we need to set it here.
		 * However, we have to guess it, because the current speed is
		 * unknown and not detectable via IO ports.
		 */
		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
		break;
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
		pax_open_kernel();
		*(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
		pax_close_kernel();
		break;
	default:
		break;
	}

	/* notify BIOS that we exist */
	acpi_processor_notify_smm(THIS_MODULE);

	pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
	for (i = 0; i < perf->state_count; i++)
		pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
			(i == perf->state ? '*' : ' '), i,
			(u32) perf->states[i].core_frequency,
			(u32) perf->states[i].power,
			(u32) perf->states[i].transition_latency);

	/*
	 * the first call to ->target() should result in us actually
	 * writing something to the appropriate registers.
	 */
	data->resume = 1;

	return result;

err_freqfree:
	kfree(data->freq_table);
err_unreg:
	acpi_processor_unregister_performance(cpu);
err_free_mask:
	free_cpumask_var(data->freqdomain_cpus);
err_free:
	kfree(data);
	policy->driver_data = NULL;

	return result;
}
Esempio n. 22
0
static inline void
irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
{
	cpumask_copy(desc->pending_mask, mask);
}
Esempio n. 23
0
File: vm.c Progetto: joelagnel/ktap
/* ktap mainthread initization, main entry for ktap */
ktap_state *kp_newstate(struct ktap_parm *parm, struct dentry *dir, char **argv)
{
	ktap_state *ks;
	pid_t pid;
	int cpu;

	ks = kzalloc(sizeof(ktap_state) + sizeof(ktap_global_state),
		     GFP_KERNEL);
	if (!ks)
		return NULL;

	ks->stack = kp_malloc(ks, KTAP_STACK_SIZE);
	G(ks) = (ktap_global_state *)(ks + 1);
	G(ks)->mainthread = ks;
	G(ks)->seed = 201236; /* todo: make more random in future */
	G(ks)->task = current;
	G(ks)->verbose = parm->verbose; /* for debug use */
	G(ks)->print_timestamp = parm->print_timestamp;
	G(ks)->workload = parm->workload;
	INIT_LIST_HEAD(&(G(ks)->timers));
	INIT_LIST_HEAD(&(G(ks)->probe_events_head));
	G(ks)->exit = 0;

	if (kp_transport_init(ks, dir))
		goto out;

	pid = (pid_t)parm->trace_pid;
	if (pid != -1) {
		struct task_struct *task;

		rcu_read_lock();
		task = pid_task(find_vpid(pid), PIDTYPE_PID);
		if (!task) {
			kp_error(ks, "cannot find pid %d\n", pid);
			rcu_read_unlock();
			goto out;
		}
		G(ks)->trace_task = task;
		get_task_struct(task);
		rcu_read_unlock();
	}

	if( !alloc_cpumask_var(&G(ks)->cpumask, GFP_KERNEL))
		goto out;

	cpumask_copy(G(ks)->cpumask, cpu_online_mask);

	cpu = parm->trace_cpu;
	if (cpu != -1) {
		if (!cpu_online(cpu)) {
			printk(KERN_INFO "ktap: cpu %d is not online\n", cpu);
			goto out;
		}

		cpumask_clear(G(ks)->cpumask);
		cpumask_set_cpu(cpu, G(ks)->cpumask);
	}

	if (cfunction_cache_init(ks))
		goto out;

	kp_tstring_resize(ks, 512); /* set inital string hashtable size */

	ktap_init_state(ks);
	ktap_init_registry(ks);
	ktap_init_arguments(ks, parm->argc, argv);

	/* init library */
	kp_init_baselib(ks);
	kp_init_kdebuglib(ks);
	kp_init_timerlib(ks);
	kp_init_ansilib(ks);

	if (alloc_kp_percpu_data())
		goto out;

	if (kp_probe_init(ks))
		goto out;

	return ks;

 out:
	G(ks)->exit = 1;
	kp_final_exit(ks);
	return NULL;
}