Beispiel #1
0
/*
 * Broadcast handler for MT7621 VPE0, VPE1, VPE2 and VPE3 dummy clockevent device.
 * This function is registered in arch/mips/kernel/cevt-r4k.c.
 *
 * The function is in timer irq context.
 */
void ra_systick_event_broadcast(const struct cpumask *mask)
{
	u32 reg;
	int i;
	unsigned long flags;

	/*
	 * Mailbox design.
	 *
	 * The IPI VPE sender writes the signal bit to RALINK_TESTSTAT register.
	 * So the receiver VPEs can judge "ipi_call" or "broadcast" event by
	 * RALINK_TESTSTAT register when receiving ipi_call interrupt.
	 * 
	 * Using spin_lock() to prevent other VPEs from accessing RALINK_TESTSTAT
	 * register at the same time.
	 */
	spin_lock_irqsave(&ra_teststat_lock, flags);
	reg = (*((volatile u32 *)(RALINK_TESTSTAT)));
	for_each_cpu(i, mask)
		reg |= ((0x1UL) << i);
	(*((volatile u32 *)(RALINK_TESTSTAT))) = reg;
	spin_unlock_irqrestore(&ra_teststat_lock, flags);

#ifdef CONFIG_MIPS_MT_SMP
	/* send IPI to other VPEs, using "ipi_call" GIC(60~63), MIPS int#2  */
	for_each_cpu(i, mask)
		gic_send_ipi(plat_ipi_call_int_xlate(i));
#endif
}
Beispiel #2
0
static inline void
clear_cpu_sibling_map(int cpu)
{
	int i;

	for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
	for_each_cpu(i, &cpu_core_map[cpu])
		cpumask_clear_cpu(cpu, &cpu_core_map[i]);

	per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
}
static void bts_trace_stop(struct trace_array *tr)
{
	int cpu;

	for_each_cpu(cpu, cpu_possible_mask)
		smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
}
static void trace_bts_prepare(struct trace_iterator *iter)
{
	int cpu;

	for_each_cpu(cpu, cpu_possible_mask)
		smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
}
Beispiel #5
0
void __init smp_cpus_done(unsigned int max_cpus)
{
	int cpu_id, timeout;
	unsigned long bogosum = 0;

	for (timeout = 0; timeout < 5000; timeout++) {
		if (cpumask_equal(&cpu_callin_map, cpu_online_mask))
			break;
		udelay(1000);
	}
	if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
		BUG();

	for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++)
		show_cpu_info(cpu_id);

	/*
	 * Allow the user to impress friends.
	 */
	Dprintk("Before bogomips.\n");
	if (cpucount) {
		for_each_cpu(cpu_id,cpu_online_mask)
			bogosum += cpu_data[cpu_id].loops_per_jiffy;

		printk(KERN_INFO "Total of %d processors activated " \
			"(%lu.%02lu BogoMIPS).\n", cpucount + 1,
			bogosum / (500000 / HZ),
			(bogosum / (5000 / HZ)) % 100);
		Dprintk("Before bogocount - setting activated=1.\n");
	}
}
Beispiel #6
0
static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
	unsigned int i;

	for_each_cpu(i, mask)
		yos_send_ipi_single(i, action);
}
Beispiel #7
0
static int __init proc_ppc64_init(void)
{
	unsigned long i;
	struct proc_dir_entry *pde;

	pde = create_proc_entry("ppc64/naca", S_IRUSR, NULL);
	if (!pde)
		return 1;
	pde->nlink = 1;
	pde->data = naca;
	pde->size = 4096;
	pde->proc_fops = &page_map_fops;

	pde = create_proc_entry("ppc64/systemcfg", S_IFREG|S_IRUGO, NULL);
	if (!pde)
		return 1;
	pde->nlink = 1;
	pde->data = systemcfg;
	pde->size = 4096;
	pde->proc_fops = &page_map_fops;

	/* /proc/ppc64/paca/XX -- raw paca contents.  Only readable to root */
	pde = proc_mkdir("ppc64/paca", NULL);
	if (!pde)
		return 1;
	for_each_cpu(i)
		proc_create_paca(pde, i);

#ifdef CONFIG_PPC_PSERIES
	if ((systemcfg->platform & PLATFORM_PSERIES))
		proc_ppc64_create_ofdt();
#endif

	return 0;
}
Beispiel #8
0
/*
 * should not need lock here. only showing stuff 
 */
	static void
rt_dump(const struct scheduler *ops)
{
	struct list_head *iter_sdom, *iter_svc, *runq, *iter;
	struct rt_private *prv = RT_PRIV(ops);
	struct rt_vcpu *svc;
	int cpu = 0;
	int loop = 0;

	printtime();
	printk("OCBP:_dumpV Priority Scheme: OCBP\n");

	printk("PCPU info: \n");
	for_each_cpu(cpu, &prv->cpus) 
		rt_dump_pcpu(ops, cpu);

	printk("OCBP:_dumpV Global RunQueue info: \n");
	loop = 0;
	runq = RUNQ(ops);
	list_for_each( iter, runq ) 
	{
		svc = __runq_elem(iter);
		printk("\tOCBP:_dumpV RunQ no:%3d: ", ++loop);
		rt_dump_vcpu(svc);
	}
Beispiel #9
0
void lru_add_drain_all(void)
{
	static DEFINE_MUTEX(lock);
	static struct cpumask has_work;
	int cpu;

	mutex_lock(&lock);
	get_online_cpus();
	cpumask_clear(&has_work);

	for_each_online_cpu(cpu) {
		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);

		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
		    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
		    need_activate_page_drain(cpu)) {
			INIT_WORK(work, lru_add_drain_per_cpu);
			schedule_work_on(cpu, work);
			cpumask_set_cpu(cpu, &has_work);
		}
	}

	for_each_cpu(cpu, &has_work)
		flush_work(&per_cpu(lru_add_drain_work, cpu));

	put_online_cpus();
	mutex_unlock(&lock);
}
Beispiel #10
0
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	int cpu;

	for_each_cpu(cpu, mask)
		smp_ext_bitcall(cpu, ec_call_function);
}
/*
 * This function returns closest match cpumask among the supported bitmasks
 * in XLP
 * Logic is moot, need to improve it later.
 *
 * @m	: user supplied cpumask
 */
static int xlp_closest_match_cpumask(u8 node, const struct cpumask *m)
{
    int i;
    char buf[40];
    struct cpumask t, a;
    /* m will be a logical cpu mask.  If all threads are enabled, this will
       match the physical cpu mask.  If not, this won't match and this function
       won't work, however the fall-back is to route interrupts to all CPUs so
       the system will still work, just not necessarily with the desired affinity.
       So we have to convert the logical cpu mask to a physical mask first. */
    cpumask_clear(&a);
    for_each_cpu(i, m)	// Iterate through logical map, build a corresponding physical map
    cpumask_set_cpu(cpu_logical_map(i), &a);
    cpumask_and(&a, &a, &phys_cpu_present_map);
    constrict_mask_to_node(node, &t, &a);
    cpumask_clear(&a);
    for (i = 0; i < XLP_ITE_ENTRIES; i++) {
        cpumask_and(&a, &xlp_ites[node][i], &phys_cpu_present_map);
        if (cpumask_equal(&t, &a)) {
            cpumask_scnprintf(buf, 40, m);
//			printk(KERN_DEBUG "[ite.c] Matched ITE #%d for logical cpumask %s\n", i, buf);
            return i;
        }
    }
    cpumask_scnprintf(buf, 40, m);
    printk(KERN_WARNING "[ite.c] Could not find ITE match for logical cpumask %s\n", buf);
    cpumask_scnprintf(buf, 40, &t);
    printk(KERN_WARNING "[ite.c]                  Calculated physical cpumask %s\n", buf);
    printk(KERN_WARNING "[ite.c] Using ITE #1 (default all online CPUs)\n");
    return 1; /* if no match, point to all local cpus */
}
Beispiel #12
0
static void paravirt_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
	unsigned int cpu;

	for_each_cpu(cpu, mask)
		paravirt_send_ipi_single(cpu, action);
}
Beispiel #13
0
/*
 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
 * kworkers being shut down before our page_alloc_cpu_dead callback is
 * executed on the offlined cpu.
 * Calling this function with cpu hotplug locks held can actually lead
 * to obscure indirect dependencies via WQ context.
 */
void lru_add_drain_all(void)
{
	static DEFINE_MUTEX(lock);
	static struct cpumask has_work;
	int cpu;

	/*
	 * Make sure nobody triggers this path before mm_percpu_wq is fully
	 * initialized.
	 */
	if (WARN_ON(!mm_percpu_wq))
		return;

	mutex_lock(&lock);
	cpumask_clear(&has_work);

	for_each_online_cpu(cpu) {
		struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);

		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
		    pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
		    need_activate_page_drain(cpu)) {
			INIT_WORK(work, lru_add_drain_per_cpu);
			queue_work_on(cpu, mm_percpu_wq, work);
			cpumask_set_cpu(cpu, &has_work);
		}
	}

	for_each_cpu(cpu, &has_work)
		flush_work(&per_cpu(lru_add_drain_work, cpu));

	mutex_unlock(&lock);
}
Beispiel #14
0
static int __init flow_cache_init(void)
{
	int i;

	flow_cachep = kmem_cache_create("flow_cache",
					sizeof(struct flow_cache_entry),
					0, SLAB_HWCACHE_ALIGN,
					NULL, NULL);

	if (!flow_cachep)
		panic("NET: failed to allocate flow cache slab\n");

	flow_hash_shift = 10;
	flow_lwm = 2 * flow_hash_size;
	flow_hwm = 4 * flow_hash_size;

	init_timer(&flow_hash_rnd_timer);
	flow_hash_rnd_timer.function = flow_cache_new_hashrnd;
	flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
	add_timer(&flow_hash_rnd_timer);

	for_each_cpu(i)
		flow_cache_cpu_prepare(i);

	hotcpu_notifier(flow_cache_cpu, 0);
	return 0;
}
Beispiel #15
0
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	unsigned int cpu;

	for_each_cpu(cpu, mask)
		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
Beispiel #16
0
void tick_broadcast(const struct cpumask *mask)
{
	unsigned int cpu;

	for_each_cpu(cpu, mask)
		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
}
Beispiel #17
0
/*
 * Update the irq_stat for cpus that we are going to interrupt
 * with TLB or cache flushes.  Also handle removing dataplane cpus
 * from the TLB flush set, and setting dataplane_tlb_state instead.
 */
static void hv_flush_update(const struct cpumask *cache_cpumask,
			    struct cpumask *tlb_cpumask,
			    unsigned long tlb_va, unsigned long tlb_length,
			    HV_Remote_ASID *asids, int asidcount)
{
	struct cpumask mask;
	int i, cpu;

	cpumask_clear(&mask);
	if (cache_cpumask)
		cpumask_or(&mask, &mask, cache_cpumask);
	if (tlb_cpumask && tlb_length) {
		cpumask_or(&mask, &mask, tlb_cpumask);
	}

	for (i = 0; i < asidcount; ++i)
		cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);

	/*
	 * Don't bother to update atomically; losing a count
	 * here is not that critical.
	 */
	for_each_cpu(cpu, &mask)
		++per_cpu(irq_stat, cpu).irq_hv_flush_count;
}
Beispiel #18
0
void smp_timer_broadcast(const struct cpumask *mask)
{
	int cpu;

	for_each_cpu(cpu, mask)
		mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
}
Beispiel #19
0
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	int cpu;

	for_each_cpu(cpu, mask)
		mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
}
Beispiel #20
0
static int __init topology_init(void)
{
	int i;

	for_each_cpu(i)
		arch_register_cpu(i);
	return 0;
}
Beispiel #21
0
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	int cpu;

	/* trigger IPI mask call on each CPU */
	for_each_cpu(cpu, mask)
		sparc32_ipi_ops->mask_one(cpu);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	int cpu;

	/*                                   */
	for_each_cpu(cpu, mask)
		BTFIXUP_CALL(smp_ipi_mask_one)(cpu);
}
Beispiel #23
0
static void octeon_78xx_send_ipi_mask(const struct cpumask *mask,
				      unsigned int action)
{
	unsigned int cpu;

	for_each_cpu(cpu, mask)
		octeon_78xx_send_ipi_single(cpu, action);
}
Beispiel #24
0
static inline void octeon_send_ipi_mask(const struct cpumask *mask,
					unsigned int action)
{
	unsigned int i;

	for_each_cpu(i, mask)
		octeon_send_ipi_single(i, action);
}
static void
send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
{
	int cpu;

	for_each_cpu(cpu, mask)
		ipi_send(cpu, op);
}
static void bts_trace_start(struct trace_array *tr)
{
	int cpu;

	tracing_reset_online_cpus(tr);

	for_each_cpu(cpu, cpu_possible_mask)
		smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
}
/* Ping non-responding cpus attemping to force them into the NMI handler */
static void uv_nmi_nr_cpus_ping(void)
{
	int cpu;

	for_each_cpu(cpu, uv_nmi_cpu_mask)
		atomic_set(&uv_cpu_nmi_per(cpu).pinging, 1);

	apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
}
/*
 * Userspace sends cpu#:min_freq_value to vote for min_freq_value as the new
 * scaling_min. To withdraw its vote it needs to enter cpu#:0
 */
static int set_cpu_min_freq(const char *buf, const struct kernel_param *kp)
{
	int i, j, ntokens = 0;
	unsigned int val, cpu;
	const char *cp = buf;
	struct cpu_status *i_cpu_stats;
	struct cpufreq_policy policy;
	cpumask_var_t limit_mask;
	int ret;

	while ((cp = strpbrk(cp + 1, " :")))
		ntokens++;

	/* CPU:value pair */
	if (!(ntokens % 2))
		return -EINVAL;

	cp = buf;
	cpumask_clear(limit_mask);
	for (i = 0; i < ntokens; i += 2) {
		if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
			return -EINVAL;
		if (cpu > (num_present_cpus() - 1))
			return -EINVAL;

		i_cpu_stats = &per_cpu(cpu_stats, cpu);

		i_cpu_stats->min = val;
		cpumask_set_cpu(cpu, limit_mask);

		cp = strchr(cp, ' ');
		cp++;
	}

	/*
	 * Since on synchronous systems policy is shared amongst multiple
	 * CPUs only one CPU needs to be updated for the limit to be
	 * reflected for the entire cluster. We can avoid updating the policy
	 * of other CPUs in the cluster once it is done for at least one CPU
	 * in the cluster
	 */
	get_online_cpus();
	for_each_cpu(i, limit_mask) {
		i_cpu_stats = &per_cpu(cpu_stats, i);

		if (cpufreq_get_policy(&policy, i))
			continue;

		if (cpu_online(i) && (policy.min != i_cpu_stats->min)) {
			ret = cpufreq_update_policy(i);
			if (ret)
				continue;
		}
		for_each_cpu(j, policy.related_cpus)
			cpumask_clear_cpu(j, limit_mask);
	}
Beispiel #29
0
static int powernow_cpufreq_target(struct cpufreq_policy *policy,
                               unsigned int target_freq, unsigned int relation)
{
    struct acpi_cpufreq_data *data = cpufreq_drv_data[policy->cpu];
    struct processor_performance *perf;
    unsigned int next_state; /* Index into freq_table */
    unsigned int next_perf_state; /* Index into perf table */
    int result;

    if (unlikely(data == NULL ||
        data->acpi_data == NULL || data->freq_table == NULL)) {
        return -ENODEV;
    }

    perf = data->acpi_data;
    result = cpufreq_frequency_table_target(policy,
                                            data->freq_table,
                                            target_freq,
                                            relation, &next_state);
    if (unlikely(result))
        return result;

    next_perf_state = data->freq_table[next_state].index;
    if (perf->state == next_perf_state) {
        if (unlikely(data->arch_cpu_flags & ARCH_CPU_FLAG_RESUME)) 
            data->arch_cpu_flags &= ~ARCH_CPU_FLAG_RESUME;
        else
            return 0;
    }

    if (policy->shared_type == CPUFREQ_SHARED_TYPE_HW &&
        likely(policy->cpu == smp_processor_id())) {
        transition_pstate(&next_perf_state);
        cpufreq_statistic_update(policy->cpu, perf->state, next_perf_state);
    } else {
        cpumask_t online_policy_cpus;
        unsigned int cpu;

        cpumask_and(&online_policy_cpus, policy->cpus, &cpu_online_map);

        if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
            unlikely(policy->cpu != smp_processor_id()))
            on_selected_cpus(&online_policy_cpus, transition_pstate,
                             &next_perf_state, 1);
        else
            transition_pstate(&next_perf_state);

        for_each_cpu(cpu, &online_policy_cpus)
            cpufreq_statistic_update(cpu, perf->state, next_perf_state);
    }

    perf->state = next_perf_state;
    policy->cur = data->freq_table[next_state].frequency;

    return 0;
}
static int fold_prot_inuse(struct proto *proto)
{
	int res = 0;
	int cpu;

	for_each_cpu(cpu)
		res += proto->stats[cpu].inuse;

	return res;
}