Пример #1
0
static int zswap_cpu_init(void)
{
	unsigned long cpu;

	get_online_cpus();
	for_each_online_cpu(cpu)
		if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK)
			goto cleanup;
	register_cpu_notifier(&zswap_cpu_notifier_block);
	put_online_cpus();
	return 0;

cleanup:
	for_each_online_cpu(cpu)
		__zswap_cpu_notifier(CPU_UP_CANCELED, cpu);
	put_online_cpus();
	return -ENOMEM;
}
Пример #2
0
static void timer_list_show_tickdevices(struct seq_file *m)
{
    int cpu;

#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
    print_tickdevice(m, tick_get_broadcast_device(), -1);
    SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
               tick_get_broadcast_mask()->bits[0]);
#ifdef CONFIG_TICK_ONESHOT
    SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
               tick_get_broadcast_oneshot_mask()->bits[0]);
#endif
    SEQ_printf(m, "\n");
#endif
    for_each_online_cpu(cpu)
    print_tickdevice(m, tick_get_device(cpu), cpu);
    SEQ_printf(m, "\n");
}
Пример #3
0
Файл: nmi.c Проект: HPSI/xen-v4v
int __init check_nmi_watchdog (void)
{
    static unsigned int __initdata prev_nmi_count[NR_CPUS];
    int cpu;
    
    if ( !nmi_watchdog )
        return 0;

    printk("Testing NMI watchdog --- ");

    for_each_online_cpu ( cpu )
        prev_nmi_count[cpu] = nmi_count(cpu);
    local_irq_enable();

    /* Wait for 10 ticks.  Busy-wait on all CPUs: the LAPIC counter that
     * the NMI watchdog uses only runs while the core's not halted */
    if ( nmi_watchdog == NMI_LOCAL_APIC )
        smp_call_function(wait_for_nmis, NULL, 0);
    wait_for_nmis(NULL);

    for_each_online_cpu ( cpu )
    {
        if ( nmi_count(cpu) - prev_nmi_count[cpu] <= 5 )
            printk("CPU#%d stuck. ", cpu);
        else
            printk("CPU#%d okay. ", cpu);
    }

    printk("\n");

    /*
     * Now that we know it works we can reduce NMI frequency to
     * something more reasonable; makes a difference in some configs.
     * There's a limit to how slow we can go because writing the perfctr
     * MSRs only sets the low 32 bits, with the top 8 bits sign-extended
     * from those, so it's not possible to set up a delay larger than
     * 2^31 cycles and smaller than (2^40 - 2^31) cycles. 
     * (Intel SDM, section 18.22.2)
     */
    if ( nmi_watchdog == NMI_LOCAL_APIC )
        nmi_hz = max(1ul, cpu_khz >> 20);

    return 0;
}
Пример #4
0
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
				int (*calculate_pressure)(struct zone *))
{
	struct zone *zone;
	int cpu;
	int threshold;
	int i;

	for (i = 0; i < pgdat->nr_zones; i++) {
		zone = &pgdat->node_zones[i];
		if (!zone->percpu_drift_mark)
			continue;

		threshold = (*calculate_pressure)(zone);
		for_each_online_cpu(cpu)
			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
							= threshold;
	}
}
/**
 * cpufreq_limit_put - release of a limit of min_freq or max_freq, free
 *			a cpufreq_limit_handle
 * @handle	a cpufreq_limit_handle that has been requested
 */
int cpufreq_limit_put(struct cpufreq_limit_handle *handle)
{
	int i;

	if (handle == NULL || IS_ERR(handle))
		return -EINVAL;

	pr_debug("%s: %s,%lu,%lu\n", __func__, handle->label, handle->min,
			handle->max);

	mutex_lock(&cpufreq_limit_lock);
	list_del(&handle->node);
	mutex_unlock(&cpufreq_limit_lock);

	for_each_online_cpu(i)
		cpufreq_update_policy(i);

	kfree(handle);
	return 0;
}
Пример #6
0
static int earlyshots_read_proc(char *page,
				char **start,
				off_t off, int count, int *eof, void *data)
{
	int i, len = 0;

	for_each_online_cpu(i)
		len += sprintf(page + len, "CPU#%d: %u\n",
			       i, rthal_nmi_wds[i].early_shots);
	len -= off;
	if (len <= off + count)
		*eof = 1;
	*start = page + off;
	if (len > count)
		len = count;
	if (len < 0)
		len = 0;

	return len;
}
Пример #7
0
Файл: rtas.c Проект: 710leo/LVS
static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
{
	u16 slb_size = mmu_slb_size;
	int rc = H_MULTI_THREADS_ACTIVE;
	int cpu;

	slb_set_size(SLB_MIN_SIZE);
	printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());

	while (rc == H_MULTI_THREADS_ACTIVE && !data->done) {
		rc = rtas_call(data->token, 0, 1, NULL);
		if (rc && rc != H_MULTI_THREADS_ACTIVE)
			printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
	}

	smp_rmb();
	if (rc || data->error)
		slb_set_size(slb_size);

	if (data->error)
		rc = data->error;

	data->error = rc;

	if (wake_when_done) {
		smp_wmb();
		data->done = 1;

		/* Ensure data->done is seen on all CPUs that are about to wake up
		 as a result of the H_PROD below */
		mb();

		for_each_online_cpu(cpu)
			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
	}

	if (atomic_dec_return(&data->working) == 0)
		complete(data->complete);

	return rc;
}
Пример #8
0
void nmi_show_all_regs(void)
{
	int i;

	if (nmi_watchdog == NMI_NONE)
		return;
	if (system_state != SYSTEM_RUNNING) {
		printk("nmi_show_all_regs(): system state %d, not doing.\n",
			system_state);
		return;
	}
	printk("nmi_show_all_regs(): start on CPU#%d.\n",
		raw_smp_processor_id());
	dump_stack();

	for_each_online_cpu(i)
		nmi_show_regs[i] = 1;
	for_each_online_cpu(i)
		while (nmi_show_regs[i] == 1)
			barrier();
}
Пример #9
0
static void
splat_kmem_cache_test_debug(struct file *file, char *name,
			    kmem_cache_priv_t *kcp)
{
	int j;

	splat_vprint(file, name,
		     "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
		     kcp->kcp_cache->skc_name, kcp->kcp_count,
		     (unsigned)kcp->kcp_cache->skc_slab_alloc,
		     (unsigned)kcp->kcp_cache->skc_slab_total,
		     (unsigned)kcp->kcp_cache->skc_obj_alloc,
		     (unsigned)kcp->kcp_cache->skc_obj_total);

	for_each_online_cpu(j)
		splat_print(file, "%u/%u ",
			     kcp->kcp_cache->skc_mag[j]->skm_avail,
			     kcp->kcp_cache->skc_mag[j]->skm_size);

	splat_print(file, "%s\n", "");
}
Пример #10
0
static int __init msr_init(void)
{
	int i, err = 0;
	i = 0;

	if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) {
		pr_err("unable to get major %d for msr\n", MSR_MAJOR);
		err = -EBUSY;
		goto out;
	}
	msr_class = class_create(THIS_MODULE, "msr");
	if (IS_ERR(msr_class)) {
		err = PTR_ERR(msr_class);
		goto out_chrdev;
	}
	msr_class->devnode = msr_devnode;

	cpu_notifier_register_begin();
	for_each_online_cpu(i) {
		err = msr_device_create(i);
		if (err != 0)
			goto out_class;
	}
	__register_hotcpu_notifier(&msr_class_cpu_notifier);
	cpu_notifier_register_done();

	err = 0;
	goto out;

out_class:
	i = 0;
	for_each_online_cpu(i)
		msr_device_destroy(i);
	cpu_notifier_register_done();
	class_destroy(msr_class);
out_chrdev:
	__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
out:
	return err;
}
Пример #11
0
void refresh_zone_stat_thresholds(void)
{
	struct zone *zone;
	int cpu;
	int threshold;

	for_each_populated_zone(zone) {
		unsigned long max_drift, tolerate_drift;

		threshold = calculate_normal_threshold(zone);

		for_each_online_cpu(cpu)
			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
							= threshold;

		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
		max_drift = num_online_cpus() * threshold;
		if (max_drift > tolerate_drift)
			zone->percpu_drift_mark = high_wmark_pages(zone) +
					max_drift;
	}
}
Пример #12
0
static void do_nmi_stats(unsigned char key)
{
    int i;
    struct domain *d;
    struct vcpu *v;

    printk("CPU\tNMI\n");
    for_each_online_cpu ( i )
        printk("%3d\t%3d\n", i, nmi_count(i));

    if ( ((d = dom0) == NULL) || (d->vcpu == NULL) ||
         ((v = d->vcpu[0]) == NULL) )
        return;

    i = v->async_exception_mask & (1 << VCPU_TRAP_NMI);
    if ( v->nmi_pending || i )
        printk("dom0 vpu0: NMI %s%s\n",
               v->nmi_pending ? "pending " : "",
               i ? "masked " : "");
    else
        printk("dom0 vcpu0: NMI neither pending nor masked\n");
}
Пример #13
0
int show_interrupts(struct seq_file *p, void *v)
{
	int i = *(loff_t *) v, j;
	struct irqaction *action;
	unsigned long flags;

	if (i == 0) {
		seq_printf(p, "           ");
		for (j = 0; j < NR_CPUS; j++)
			if (cpu_online(j))
				seq_printf(p, "CPU%-8d", j);
		seq_putc(p, '\n');
	}

	if (i < NR_IRQS) {
		raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto skip;
		seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP
		seq_printf(p, "%10u ", kstat_irqs(i));
#else
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
		seq_printf(p, " %14s", irq_desc[i].chip->name);
		seq_printf(p, "  %s", action->name);

		for (action = action->next; action; action = action->next)
			seq_printf(p, ", %s", action->name);

		seq_putc(p, '\n');
skip:
		raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
	}
	return 0;
}
/*
 * /proc/interrupts printing for arch specific interrupts
 */
int arch_show_interrupts(struct seq_file *p, int prec)
{
    int j;

#ifdef CONFIG_DEBUG_STACKOVERFLOW
    seq_printf(p, "%*s: ", prec, "STK");
    for_each_online_cpu(j)
    seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
    seq_puts(p, "  Kernel stack usage\n");
# ifdef CONFIG_IRQSTACKS
    seq_printf(p, "%*s: ", prec, "IST");
    for_each_online_cpu(j)
    seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
    seq_puts(p, "  Interrupt stack usage\n");
# endif
#endif
#ifdef CONFIG_SMP
    seq_printf(p, "%*s: ", prec, "RES");
    for_each_online_cpu(j)
    seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
    seq_puts(p, "  Rescheduling interrupts\n");
    seq_printf(p, "%*s: ", prec, "CAL");
    for_each_online_cpu(j)
    seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
    seq_puts(p, "  Function call interrupts\n");
#endif
    seq_printf(p, "%*s: ", prec, "UAH");
    for_each_online_cpu(j)
    seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
    seq_puts(p, "  Unaligned access handler traps\n");
    seq_printf(p, "%*s: ", prec, "FPA");
    for_each_online_cpu(j)
    seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
    seq_puts(p, "  Floating point assist traps\n");
    seq_printf(p, "%*s: ", prec, "TLB");
    for_each_online_cpu(j)
    seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
    seq_puts(p, "  TLB shootdowns\n");
    return 0;
}
Пример #15
0
/*
 * Add/remove the given breakpoint in our constraint table
 */
static void toggle_bp_slot(struct perf_event *bp, bool enable)
{
	int cpu = bp->cpu;
	struct task_struct *tsk = bp->ctx->task;

	/* Pinned counter task profiling */
	if (tsk) {
		if (cpu >= 0) {
			toggle_bp_task_slot(tsk, cpu, enable);
			return;
		}

		for_each_online_cpu(cpu)
			toggle_bp_task_slot(tsk, cpu, enable);
		return;
	}

	/* Pinned counter cpu profiling */
	if (enable)
		per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
	else
		per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
}
Пример #16
0
/*
 * fcoe_sw_lport_config - sets up the fc_lport
 * @lp: ptr to the fc_lport
 * @shost: ptr to the parent scsi host
 *
 * Returns: 0 for success
 *
 */
static int fcoe_sw_lport_config(struct fc_lport *lp)
{
	int i = 0;

	lp->link_status = 0;
	lp->max_retry_count = 3;
	lp->e_d_tov = 2 * 1000;	/* FC-FS default */
	lp->r_a_tov = 2 * 2 * 1000;
	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
			      FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);

	/*
	 * allocate per cpu stats block
	 */
	for_each_online_cpu(i)
		lp->dev_stats[i] = kzalloc(sizeof(struct fcoe_dev_stats),
					   GFP_KERNEL);

	/* lport fc_lport related configuration */
	fc_lport_config(lp);

	return 0;
}
Пример #17
0
static void __send_IPI_shortcut(unsigned int shortcut, int vector)
{
	unsigned int cpu;

	switch (shortcut) {
	case APIC_DEST_SELF:
		notify_remote_via_ipi(vector, smp_processor_id());
		break;
	case APIC_DEST_ALLBUT:
		for_each_online_cpu(cpu)
			if (cpu != smp_processor_id())
				notify_remote_via_ipi(vector, cpu);
		break;
	case APIC_DEST_ALLINC:
		for_each_online_cpu(cpu)
			notify_remote_via_ipi(vector, cpu);
		break;
	default:
		printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
		       vector);
		break;
	}
}
static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
{
	u16 slb_size = mmu_slb_size;
	int rc = H_MULTI_THREADS_ACTIVE;
	int cpu;

	slb_set_size(SLB_MIN_SIZE);
	stop_topology_update();
	printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());

	while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
	       !atomic_read(&data->error))
		rc = rtas_call(data->token, 0, 1, NULL);

	if (rc || atomic_read(&data->error)) {
		printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
		slb_set_size(slb_size);
	}

	if (atomic_read(&data->error))
		rc = atomic_read(&data->error);

	atomic_set(&data->error, rc);
	start_topology_update();

	if (wake_when_done) {
		atomic_set(&data->done, 1);

		for_each_online_cpu(cpu)
			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
	}

	if (atomic_dec_return(&data->working) == 0)
		complete(data->complete);

	return rc;
}
static int sched_debug_show(struct seq_file *m, void *v)
{
	u64 now = ktime_to_ns(ktime_get());
	int cpu;

	SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
		init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);

	SEQ_printf(m, "now at %Lu.%06ld msecs\n", SPLIT_NS(now));

#define P(x) \
	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
	P(jiffies);
	PN(sysctl_sched_latency);
	PN(sysctl_sched_min_granularity);
	PN(sysctl_sched_wakeup_granularity);
	PN(sysctl_sched_child_runs_first);
	P(sysctl_sched_features);
#undef PN
#undef P

	SEQ_printf(m, "  .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
		sysctl_sched_tunable_scaling,
		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);

	for_each_online_cpu(cpu)
		print_cpu(m, cpu);

	SEQ_printf(m, "\n");

	return 0;
}
Пример #20
0
static int __init msr_init(void)
{
	int i, err = 0;
	i = 0;

	if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) {
		printk(KERN_ERR "msr: unable to get major %d for msr\n",
		       MSR_MAJOR);
		err = -EBUSY;
		goto out;
	}
	msr_class = class_create(THIS_MODULE, "msr");
	if (IS_ERR(msr_class)) {
		err = PTR_ERR(msr_class);
		goto out_chrdev;
	}
	for_each_online_cpu(i) {
		err = msr_device_create(i);
		if (err != 0)
			goto out_class;
	}
	register_hotcpu_notifier(&msr_class_cpu_notifier);

	err = 0;
	goto out;

out_class:
	i = 0;
	for_each_online_cpu(i)
		msr_device_destroy(i);
	class_destroy(msr_class);
out_chrdev:
	unregister_chrdev(MSR_MAJOR, "cpu/msr");
out:
	return err;
}
Пример #21
0
static void dvfs_core_work_handler(struct work_struct *work)
{
	u32 fsvai;
	u32 reg;
	u32 curr_cpu = 0;
	int ret = 0;
	int low_freq_bus_ready = 0;
	int bus_incr = 0, cpu_dcr = 0;
#ifdef CONFIG_ARCH_MX5
	int disable_dvfs_irq = 0;
#endif
	int cpu;

	low_freq_bus_ready = low_freq_bus_used();

	/* Check DVFS frequency adjustment interrupt status */
	reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
	fsvai = (reg & MXC_DVFSCNTR_FSVAI_MASK) >> MXC_DVFSCNTR_FSVAI_OFFSET;
	/* Check FSVAI, FSVAI=0 is error */
	if (fsvai == FSVAI_FREQ_NOCHANGE) {
		/* Do nothing. Freq change is not required */
		goto END;
	}
	curr_cpu = clk_get_rate(cpu_clk);
	/* If FSVAI indicate freq down,
	   check arm-clk is not in lowest frequency*/
	if (fsvai == FSVAI_FREQ_DECREASE) {
		if (curr_cpu <= cpu_op_tbl[cpu_op_nr - 1].cpu_rate) {
			minf = 1;
			mutex_lock(&bus_freq_mutex);
			if (low_bus_freq_mode) {
				mutex_unlock(&bus_freq_mutex);
				goto END;
			} else
				mutex_unlock(&bus_freq_mutex);
		} else {
			/* freq down */
			curr_op++;
			maxf = 0;
			if (curr_op >= cpu_op_nr) {
				curr_op = cpu_op_nr - 1;
				goto END;
			}
			cpu_dcr = 1;
			dvfs_load_config(curr_op);
		}
	} else {
		if (curr_cpu == cpu_op_tbl[0].cpu_rate) {
			maxf = 1;
			goto END;
		} else {
			mutex_lock(&bus_freq_mutex);
			if (!high_bus_freq_mode &&
				dvfs_config_setpoint == (cpu_op_nr + 1)) {
				/* bump up LP freq first. */
				bus_incr = 1;
				dvfs_load_config(cpu_op_nr);
			} else {
				/* freq up */
				curr_op = 0;
				maxf = 1;
				minf = 0;
				dvfs_load_config(0);
			}
			mutex_unlock(&bus_freq_mutex);
		}
	}

	low_freq_bus_ready = low_freq_bus_used();
	mutex_lock(&bus_freq_mutex);
	if ((curr_op == cpu_op_nr - 1) && (!low_bus_freq_mode)
	    && (low_freq_bus_ready) && !bus_incr) {
		if (!minf)
			set_cpu_freq(curr_op);
		/* If dvfs_core_op is greater than cpu_op_nr, it implies
		 * we support LPAPM mode for this platform.
		 */
		if (dvfs_core_op > cpu_op_nr) {
			set_low_bus_freq();
			dvfs_load_config(cpu_op_nr + 1);
		}
		mutex_unlock(&bus_freq_mutex);
	} else {
		if (!high_bus_freq_mode) {
			mutex_unlock(&bus_freq_mutex);
			set_high_bus_freq(1);
		} else
			mutex_unlock(&bus_freq_mutex);
		if (!bus_incr)
			ret = set_cpu_freq(curr_op);
		bus_incr = 0;
	}

END:
	if (cpufreq_trig_needed == 1) {
		/*Fix loops-per-jiffy */
#ifdef CONFIG_SMP
		for_each_online_cpu(cpu)
			per_cpu(cpu_data, cpu).loops_per_jiffy =
			dvfs_cpu_jiffies(per_cpu(cpu_data, cpu).loops_per_jiffy,
				curr_cpu / 1000, clk_get_rate(cpu_clk) / 1000);
#else
		u32 old_loops_per_jiffy = loops_per_jiffy;

		loops_per_jiffy =
			dvfs_cpu_jiffies(old_loops_per_jiffy,
				curr_cpu/1000, clk_get_rate(cpu_clk) / 1000);
#endif
#if defined (CONFIG_CPU_FREQ)
		/* Fix CPU frequency for CPUFREQ. */
		for (cpu = 0; cpu < num_online_cpus(); cpu++)
			cpufreq_get(cpu);
#endif
		cpufreq_trig_needed = 0;
	}

	/* Set MAXF, MINF */
	reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
	reg = (reg & ~(MXC_DVFSCNTR_MAXF_MASK | MXC_DVFSCNTR_MINF_MASK));
	reg |= maxf << MXC_DVFSCNTR_MAXF_OFFSET;
	reg |= minf << MXC_DVFSCNTR_MINF_OFFSET;

	/* Enable DVFS interrupt */
	/* FSVAIM=0 */
	reg = (reg & ~MXC_DVFSCNTR_FSVAIM);
	reg |= FSVAI_FREQ_NOCHANGE;
	/* LBFL=1 */
	reg = (reg & ~MXC_DVFSCNTR_LBFL);
	reg |= MXC_DVFSCNTR_LBFL;
	__raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);
	/*Unmask GPC1 IRQ */
	reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
	reg &= ~MXC_GPCCNTR_GPCIRQM;
	__raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);

}
Пример #22
0
	/* avoid HT sibilings if possible */
	if (cpumask_empty(tmp))
		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
	if (cpumask_empty(tmp)) {
		mutex_unlock(&round_robin_lock);
		return;
	}
	for_each_cpu(cpu, tmp) {
		if (cpu_weight[cpu] < min_weight) {
			min_weight = cpu_weight[cpu];
			preferred_cpu = cpu;
		}
	}

	if (tsk_in_cpu[tsk_index] != -1)
		cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
	tsk_in_cpu[tsk_index] = preferred_cpu;
	cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
	cpu_weight[preferred_cpu]++;
	mutex_unlock(&round_robin_lock);

	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
}

static void exit_round_robin(unsigned int tsk_index)
{
	struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
	cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
	tsk_in_cpu[tsk_index] = -1;
}

static unsigned int idle_pct = 5; /* percentage */
static unsigned int round_robin_time = 1; /* second */
static int power_saving_thread(void *data)
{
	struct sched_param param = {.sched_priority = 1};
	int do_sleep;
	unsigned int tsk_index = (unsigned long)data;
	u64 last_jiffies = 0;

	sched_setscheduler(current, SCHED_RR, &param);
	set_freezable();

	while (!kthread_should_stop()) {
		int cpu;
		u64 expire_time;

		try_to_freeze();

		/* round robin to cpus */
		if (last_jiffies + round_robin_time * HZ < jiffies) {
			last_jiffies = jiffies;
			round_robin_cpu(tsk_index);
		}

		do_sleep = 0;

		expire_time = jiffies + HZ * (100 - idle_pct) / 100;

		while (!need_resched()) {
			if (tsc_detected_unstable && !tsc_marked_unstable) {
				/* TSC could halt in idle, so notify users */
				mark_tsc_unstable("TSC halts in idle");
				tsc_marked_unstable = 1;
			}
			if (lapic_detected_unstable && !lapic_marked_unstable) {
				int i;
				/* LAPIC could halt in idle, so notify users */
				for_each_online_cpu(i)
					clockevents_notify(
						CLOCK_EVT_NOTIFY_BROADCAST_ON,
						&i);
				lapic_marked_unstable = 1;
			}
			local_irq_disable();
			cpu = smp_processor_id();
			if (lapic_marked_unstable)
				clockevents_notify(
					CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
			stop_critical_timings();

			__monitor((void *)&current_thread_info()->flags, 0, 0);
			smp_mb();
			if (!need_resched())
				__mwait(power_saving_mwait_eax, 1);

			start_critical_timings();
			if (lapic_marked_unstable)
				clockevents_notify(
					CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
			local_irq_enable();

			if (jiffies > expire_time) {
				do_sleep = 1;
				break;
			}
		}

		/*
		 * current sched_rt has threshold for rt task running time.
		 * When a rt task uses 95% CPU time, the rt thread will be
		 * scheduled out for 5% CPU time to not starve other tasks. But
		 * the mechanism only works when all CPUs have RT task running,
		 * as if one CPU hasn't RT task, RT task from other CPUs will
		 * borrow CPU time from this CPU and cause RT task use > 95%
		 * CPU time. To make 'avoid starvation' work, takes a nap here.
		 */
		if (do_sleep)
			schedule_timeout_killable(HZ * idle_pct / 100);
	}

	exit_round_robin(tsk_index);
	return 0;
}

static struct task_struct *ps_tsks[NR_CPUS];
static unsigned int ps_tsk_num;
static int create_power_saving_task(void)
{
	int rc = -ENOMEM;

	ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
		(void *)(unsigned long)ps_tsk_num,
		"acpi_pad/%d", ps_tsk_num);
	rc = PTR_RET(ps_tsks[ps_tsk_num]);
	if (!rc)
		ps_tsk_num++;
	else
		ps_tsks[ps_tsk_num] = NULL;

	return rc;
}
Пример #23
0
static void set_cpu_config(enum ux500_uc new_uc)
{
	bool update = false;
	int cpu;
	int min_freq, max_freq;

	if (new_uc != current_uc)
		update = true;
	else if ((user_config_updated) && (new_uc == UX500_UC_USER))
		update = true;

	pr_debug("%s: new_usecase=%d, current_usecase=%d, update=%d\n",
		__func__, new_uc, current_uc, update);

	if (!update)
		goto exit;

	/* Cpu hotplug */
	if (!(usecase_conf[new_uc].second_cpu_online) &&
	    (num_online_cpus() > 1))
		cpu_down(1);
	else if ((usecase_conf[new_uc].second_cpu_online) &&
		 (num_online_cpus() < 2))
		cpu_up(1);

	if (usecase_conf[new_uc].max_arm)
		max_freq = usecase_conf[new_uc].max_arm;
	else
		max_freq = system_max_freq;

	if (usecase_conf[new_uc].min_arm)
		min_freq = usecase_conf[new_uc].min_arm;
	else
		min_freq = system_min_freq;

	for_each_online_cpu(cpu)
		set_cpufreq(cpu,
			    min_freq,
			    max_freq);

	/* Kinda doing the job twice, but this is needed for reference keeping */
	if (usecase_conf[new_uc].min_arm)
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
					     "usecase",
					     usecase_conf[new_uc].min_arm);
	else
		prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ,
					     "usecase",
					     PRCMU_QOS_DEFAULT_VALUE);

	/* Cpu idle */
	cpuidle_set_multiplier(usecase_conf[new_uc].cpuidle_multiplier);

	/* L2 prefetch */
	if (usecase_conf[new_uc].l2_prefetch_en)
		outer_prefetch_enable();
	else
		outer_prefetch_disable();

	/* Force cpuidle state */
	cpuidle_force_state(usecase_conf[new_uc].forced_state);

	/* QOS override */
	prcmu_qos_voice_call_override(usecase_conf[new_uc].vc_override);

	current_uc = new_uc;

exit:
	/* Its ok to clear even if new_uc != UX500_UC_USER */
	user_config_updated = false;
}
Пример #24
0
static void exynos4_handler_tmu_state(struct work_struct *work)
{
	struct delayed_work *delayed_work = to_delayed_work(work);
	struct s5p_tmu_info *info =
		container_of(delayed_work, struct s5p_tmu_info, polling);
	struct s5p_platform_tmu *data = info->dev->platform_data;
	unsigned int cur_temp;
	static int auto_refresh_changed;
	static int check_handle;
	int trend = 0;
	int cpu = 0;

	mutex_lock(&tmu_lock);

	cur_temp = get_curr_temp(info);
	trend = cur_temp - info->last_temperature;
	pr_debug("curr_temp = %u, temp_diff = %d\n", cur_temp, trend);

	switch (info->tmu_state) {
#if defined(CONFIG_TC_VOLTAGE)
	case TMU_STATUS_TC:
		/* lock has priority than unlock */
		if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
		} else if (cur_temp >= data->ts.stop_tc) {
			if (exynos_tc_volt(info, 0) < 0) {
				pr_err("TMU: unlock error!\n");
			} else {
				info->tmu_state = TMU_STATUS_NORMAL;
				pr_info("change state: tc -> normal.\n");
			}
		}
		/* free if upper limit is locked */
		if (check_handle) {
			exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
			check_handle = 0;
		}
		break;
#endif
	case TMU_STATUS_NORMAL:
		/* 1. change state: 1st-throttling */
		if (cur_temp >= data->ts.start_1st_throttle) {
			info->tmu_state = TMU_STATUS_THROTTLED;
			pr_info("change state: normal->throttle.\n");
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		} else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0) {
				pr_err("TMU: lock error!\n");
			} else {
				info->tmu_state = TMU_STATUS_TC;
				pr_info("change state: normal->tc.\n");
			}
#endif
		/* 2. polling end and uevent */
		} else if ((cur_temp <= data->ts.stop_1st_throttle)
			&& (cur_temp <= data->ts.stop_mem_throttle)) {
			if (check_handle & THROTTLE_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(THROTTLE_FLAG);
			}
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("normal: free cpufreq_limit & interrupt enable.\n");

			for_each_online_cpu(cpu)
				cpufreq_update_policy(cpu);

			/* clear to prevent from interfupt by peindig bit */
			__raw_writel(INTCLEARALL,
				info->tmu_base + EXYNOS4_TMU_INTCLEAR);
			exynos_interrupt_enable(info, 1);
			enable_irq(info->irq);
			mutex_unlock(&tmu_lock);
			return;
		}
		break;

	case TMU_STATUS_THROTTLED:
		/* 1. change state: 2nd-throttling or warning */
		if (cur_temp >= data->ts.start_2nd_throttle) {
			info->tmu_state = TMU_STATUS_WARNING;
			pr_info("change state: 1st throttle->2nd throttle.\n");
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		} else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
				info->tmu_state = TMU_STATUS_TC;
#endif
		/* 2. cpufreq limitation and uevent */
		} else if ((cur_temp >= data->ts.start_1st_throttle) &&
			!(check_handle & THROTTLE_FLAG)) {
			if (check_handle & WARNING_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(WARNING_FLAG);
			}
			exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
					info->cpufreq_level_1st_throttle);
			check_handle |= THROTTLE_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("throttling: set cpufreq upper limit.\n");
		/* 3. change state: normal */
		} else if ((cur_temp <= data->ts.stop_1st_throttle)
			&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_NORMAL;
			pr_info("change state: 1st throttle->normal.\n");
		}
		break;

	case TMU_STATUS_WARNING:
		/* 1. change state: tripping */
		if (cur_temp >= data->ts.start_tripping) {
			info->tmu_state = TMU_STATUS_TRIPPED;
			pr_info("change state: 2nd throttle->trip\n");
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		} else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
				info->tmu_state = TMU_STATUS_TC;
#endif
		/* 2. cpufreq limitation and uevent */
		} else if ((cur_temp >= data->ts.start_2nd_throttle) &&
			!(check_handle & WARNING_FLAG)) {
			if (check_handle & THROTTLE_FLAG) {
				exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU);
				check_handle &= ~(THROTTLE_FLAG);
			}
			exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU,
					info->cpufreq_level_2nd_throttle);

			check_handle |= WARNING_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
			notify_change_of_tmu_state(info);
			pr_info("2nd throttle: cpufreq is limited.\n");
		/* 3. change state: 1st-throttling */
		} else if ((cur_temp <= data->ts.stop_2nd_throttle)
			&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_THROTTLED;
			pr_info("change state: 2nd throttle->1st throttle, "
				"and release cpufreq upper limit.\n");
		}
		break;

	case TMU_STATUS_TRIPPED:
		/* 1. call uevent to shut-down */
		if ((cur_temp >= data->ts.start_tripping) &&
			(trend > 0) && !(check_handle & TRIPPING_FLAG)) {
			notify_change_of_tmu_state(info);
			pr_info("tripping: on waiting shutdown.\n");
			check_handle |= TRIPPING_FLAG;
			pr_debug("check_handle = %d\n", check_handle);
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		} else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
			info->tmu_state = TMU_STATUS_TC;
#endif
		/* 2. change state: 2nd-throttling or warning */
		} else if ((cur_temp <= data->ts.stop_2nd_throttle)
				&& (trend < 0)) {
			info->tmu_state = TMU_STATUS_WARNING;
			pr_info("change state: trip->2nd throttle, "
				"Check! occured only test mode.\n");
		}
		/* 3. chip protection: kernel panic as SW workaround */
		if ((cur_temp >= data->ts.start_emergency) && (trend > 0)) {
			panic("Emergency!!!! tripping is not treated!\n");
			/* clear to prevent from interfupt by peindig bit */
			__raw_writel(INTCLEARALL,
				info->tmu_state + EXYNOS4_TMU_INTCLEAR);
			enable_irq(info->irq);
			mutex_unlock(&tmu_lock);
			return;
		}
		break;

	case TMU_STATUS_INIT:
		/* sned tmu initial status to platform */
		disable_irq(info->irq);
		if (cur_temp >= data->ts.start_tripping)
			info->tmu_state = TMU_STATUS_TRIPPED;
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
				info->tmu_state = TMU_STATUS_TC;
		}
#endif
		else if (cur_temp >= data->ts.start_2nd_throttle)
			info->tmu_state = TMU_STATUS_WARNING;
		else if (cur_temp >= data->ts.start_1st_throttle)
			info->tmu_state = TMU_STATUS_THROTTLED;
		else if (cur_temp <= data->ts.stop_1st_throttle)
			info->tmu_state = TMU_STATUS_NORMAL;

		notify_change_of_tmu_state(info);
		pr_info("%s: inform to init state to platform.\n", __func__);
		break;

	default:
		pr_warn("Bug: checked tmu_state.\n");
		if (cur_temp >= data->ts.start_tripping)
			info->tmu_state = TMU_STATUS_TRIPPED;
#if defined(CONFIG_TC_VOLTAGE)
		/* check whether temp compesation need or not */
		else if (cur_temp <= data->ts.start_tc) {
			if (exynos_tc_volt(info, 1) < 0)
				pr_err("TMU: lock error!\n");
			else
				info->tmu_state = TMU_STATUS_TC;
		}
#endif
		else
			info->tmu_state = TMU_STATUS_WARNING;
		break;
	} /* end */

	info->last_temperature = cur_temp;

	/* reschedule the next work */
	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
			info->sampling_rate);

	mutex_unlock(&tmu_lock);

	return;
}
Пример #25
0
int show_interrupts(struct seq_file *p, void *v)
{
	int i = *(loff_t *) v, j;
	unsigned long flags;

	if (i == 0) {
		seq_puts(p, "    ");
		for_each_online_cpu(j)
			seq_printf(p, "       CPU%d", j);

#ifdef PARISC_IRQ_CR16_COUNTS
		seq_printf(p, " [min/avg/max] (CPU cycle counts)");
#endif
		seq_putc(p, '\n');
	}

	if (i < NR_IRQS) {
		struct irqaction *action;

		atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto skip;
		seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#else
		seq_printf(p, "%10u ", kstat_irqs(i));
#endif

		seq_printf(p, " %14s", irq_desc[i].chip->typename);
#ifndef PARISC_IRQ_CR16_COUNTS
		seq_printf(p, "  %s", action->name);

		while ((action = action->next))
			seq_printf(p, ", %s", action->name);
#else
		for ( ;action; action = action->next) {
			unsigned int k, avg, min, max;

			min = max = action->cr16_hist[0];

			for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
				int hist = action->cr16_hist[k];

				if (hist) {
					avg += hist;
				} else
					break;

				if (hist > max) max = hist;
				if (hist < min) min = hist;
			}

			avg /= k;
			seq_printf(p, " %s[%d/%d/%d]", action->name,
					min,avg,max);
		}
#endif

		seq_putc(p, '\n');
 skip:
		atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
	}

	return 0;
}
Пример #26
0
/*
 * /proc/interrupts printing for arch specific interrupts
 */
int arch_show_interrupts(struct seq_file *p, int prec)
{
	int j;

	seq_printf(p, "%*s: ", prec, "NMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
	seq_printf(p, "  Non-maskable interrupts\n");
#ifdef CONFIG_X86_LOCAL_APIC
	seq_printf(p, "%*s: ", prec, "LOC");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
	seq_printf(p, "  Local timer interrupts\n");

	seq_printf(p, "%*s: ", prec, "SPU");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
	seq_printf(p, "  Spurious interrupts\n");
	seq_printf(p, "%*s: ", prec, "PMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
	seq_printf(p, "  Performance monitoring interrupts\n");
	seq_printf(p, "%*s: ", prec, "IWI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
	seq_printf(p, "  IRQ work interrupts\n");
	seq_printf(p, "%*s: ", prec, "RTR");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
	seq_printf(p, "  APIC ICR read retries\n");
#endif
	if (x86_platform_ipi_callback) {
		seq_printf(p, "%*s: ", prec, "PLT");
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
		seq_printf(p, "  Platform interrupts\n");
	}
#ifdef CONFIG_SMP
	seq_printf(p, "%*s: ", prec, "RES");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
	seq_printf(p, "  Rescheduling interrupts\n");
	seq_printf(p, "%*s: ", prec, "CAL");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
					irq_stats(j)->irq_tlb_count);
	seq_printf(p, "  Function call interrupts\n");
	seq_printf(p, "%*s: ", prec, "TLB");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
	seq_printf(p, "  TLB shootdowns\n");
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
	seq_printf(p, "%*s: ", prec, "TRM");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
	seq_printf(p, "  Thermal event interrupts\n");
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
	seq_printf(p, "%*s: ", prec, "THR");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
	seq_printf(p, "  Threshold APIC interrupts\n");
#endif
#ifdef CONFIG_X86_MCE
	seq_printf(p, "%*s: ", prec, "MCE");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
	seq_printf(p, "  Machine check exceptions\n");
	seq_printf(p, "%*s: ", prec, "MCP");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
	seq_printf(p, "  Machine check polls\n");
#endif
	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
#if defined(CONFIG_X86_IO_APIC)
	seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
#endif
	return 0;
}
static void k3_wdt_kick_stop(void)
{
	int cpu;
	for_each_online_cpu(cpu)
		k3_wdt_kick_stop_oncpu(cpu);
}
Пример #28
0
/*!
 * This function disables the DVFS module.
 */
void stop_dvfs(void)
{
	u32 reg = 0;
	unsigned long flags;
	u32 curr_cpu;
	int cpu;
#ifndef CONFIG_SMP
	unsigned long old_loops_per_jiffy;
#endif

	if (dvfs_core_is_active) {

		/* Mask dvfs irq, disable DVFS */
		reg = __raw_readl(dvfs_data->membase
				  + MXC_DVFSCORE_CNTR);
		/* FSVAIM=1 */
		reg |= MXC_DVFSCNTR_FSVAIM;
		__raw_writel(reg, dvfs_data->membase
				  + MXC_DVFSCORE_CNTR);

		curr_op = 0;
		mutex_lock(&bus_freq_mutex);
		if (!high_bus_freq_mode) {
			mutex_unlock(&bus_freq_mutex);
			set_high_bus_freq(1);
		} else
			mutex_unlock(&bus_freq_mutex);

		curr_cpu = clk_get_rate(cpu_clk);
		if (curr_cpu != cpu_op_tbl[curr_op].cpu_rate) {
			set_cpu_freq(curr_op);

			/*Fix loops-per-jiffy */
#ifdef CONFIG_SMP
			for_each_online_cpu(cpu)
				per_cpu(cpu_data, cpu).loops_per_jiffy =
				dvfs_cpu_jiffies(per_cpu(cpu_data, cpu).loops_per_jiffy,
					curr_cpu/1000, clk_get_rate(cpu_clk) / 1000);
#else
		old_loops_per_jiffy = loops_per_jiffy;

		loops_per_jiffy =
			dvfs_cpu_jiffies(old_loops_per_jiffy,
				curr_cpu/1000, clk_get_rate(cpu_clk) / 1000);
#endif

#if defined (CONFIG_CPU_FREQ)
			/* Fix CPU frequency for CPUFREQ. */
			for (cpu = 0; cpu < num_online_cpus(); cpu++)
				cpufreq_get(cpu);
#endif
		}
		spin_lock_irqsave(&mxc_dvfs_core_lock, flags);

		reg = __raw_readl(dvfs_data->membase
				  + MXC_DVFSCORE_CNTR);
		reg = (reg & ~MXC_DVFSCNTR_DVFEN);
		__raw_writel(reg, dvfs_data->membase
				  + MXC_DVFSCORE_CNTR);

		spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);

		dvfs_core_is_active = 0;

		clk_disable(dvfs_clk);
	}

	printk(KERN_DEBUG "DVFS is stopped\n");
}
Пример #29
0
/*
 * /proc/interrupts printing for arch specific interrupts
 */
int arch_show_interrupts(struct seq_file *p, int prec)
{
	int j;

	seq_printf(p, "%*s: ", prec, "NMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
	seq_puts(p, "  Non-maskable interrupts\n");
#ifdef CONFIG_X86_LOCAL_APIC
	seq_printf(p, "%*s: ", prec, "LOC");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
	seq_puts(p, "  Local timer interrupts\n");

	seq_printf(p, "%*s: ", prec, "SPU");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
	seq_puts(p, "  Spurious interrupts\n");
	seq_printf(p, "%*s: ", prec, "PMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
	seq_puts(p, "  Performance monitoring interrupts\n");
	seq_printf(p, "%*s: ", prec, "IWI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
	seq_puts(p, "  IRQ work interrupts\n");
	seq_printf(p, "%*s: ", prec, "RTR");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
	seq_puts(p, "  APIC ICR read retries\n");
	if (x86_platform_ipi_callback) {
		seq_printf(p, "%*s: ", prec, "PLT");
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
		seq_puts(p, "  Platform interrupts\n");
	}
#endif
#ifdef CONFIG_SMP
	seq_printf(p, "%*s: ", prec, "RES");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
	seq_puts(p, "  Rescheduling interrupts\n");
	seq_printf(p, "%*s: ", prec, "CAL");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
	seq_puts(p, "  Function call interrupts\n");
	seq_printf(p, "%*s: ", prec, "TLB");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
	seq_puts(p, "  TLB shootdowns\n");
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
	seq_printf(p, "%*s: ", prec, "TRM");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
	seq_puts(p, "  Thermal event interrupts\n");
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
	seq_printf(p, "%*s: ", prec, "THR");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
	seq_puts(p, "  Threshold APIC interrupts\n");
#endif
#ifdef CONFIG_X86_MCE_AMD
	seq_printf(p, "%*s: ", prec, "DFR");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
	seq_puts(p, "  Deferred Error APIC interrupts\n");
#endif
#ifdef CONFIG_X86_MCE
	seq_printf(p, "%*s: ", prec, "MCE");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
	seq_puts(p, "  Machine check exceptions\n");
	seq_printf(p, "%*s: ", prec, "MCP");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
	seq_puts(p, "  Machine check polls\n");
#endif
#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
	if (test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) {
		seq_printf(p, "%*s: ", prec, "HYP");
		for_each_online_cpu(j)
			seq_printf(p, "%10u ",
				   irq_stats(j)->irq_hv_callback_count);
		seq_puts(p, "  Hypervisor callback interrupts\n");
	}
#endif
	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
#if defined(CONFIG_X86_IO_APIC)
	seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
#endif
#ifdef CONFIG_HAVE_KVM
	seq_printf(p, "%*s: ", prec, "PIN");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
	seq_puts(p, "  Posted-interrupt notification event\n");

	seq_printf(p, "%*s: ", prec, "NPI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ",
			   irq_stats(j)->kvm_posted_intr_nested_ipis);
	seq_puts(p, "  Nested posted-interrupt event\n");

	seq_printf(p, "%*s: ", prec, "PIW");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ",
			   irq_stats(j)->kvm_posted_intr_wakeup_ipis);
	seq_puts(p, "  Posted-interrupt wakeup event\n");
#endif
	return 0;
}
Пример #30
0
static int show_other_interrupts(struct seq_file *p, int prec)
{
	int j;

	seq_printf(p, "%*s: ", prec, "NMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
	seq_printf(p, "  Non-maskable interrupts\n");
#ifdef CONFIG_X86_LOCAL_APIC
	seq_printf(p, "%*s: ", prec, "LOC");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
	seq_printf(p, "  Local timer interrupts\n");

	seq_printf(p, "%*s: ", prec, "SPU");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
	seq_printf(p, "  Spurious interrupts\n");
	seq_printf(p, "%*s: ", prec, "PMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
	seq_printf(p, "  Performance monitoring interrupts\n");
	seq_printf(p, "%*s: ", prec, "PND");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
	seq_printf(p, "  Performance pending work\n");
#endif
	if (generic_interrupt_extension) {
		seq_printf(p, "%*s: ", prec, "PLT");
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
		seq_printf(p, "  Platform interrupts\n");
	}
#ifdef CONFIG_SMP
	seq_printf(p, "%*s: ", prec, "RES");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
	seq_printf(p, "  Rescheduling interrupts\n");
	seq_printf(p, "%*s: ", prec, "CAL");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
	seq_printf(p, "  Function call interrupts\n");
	seq_printf(p, "%*s: ", prec, "TLB");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
	seq_printf(p, "  TLB shootdowns\n");
#endif
#ifdef CONFIG_X86_MCE
	seq_printf(p, "%*s: ", prec, "TRM");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
	seq_printf(p, "  Thermal event interrupts\n");
# ifdef CONFIG_X86_MCE_THRESHOLD
	seq_printf(p, "%*s: ", prec, "THR");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
	seq_printf(p, "  Threshold APIC interrupts\n");
# endif
#endif
#ifdef CONFIG_X86_MCE
	seq_printf(p, "%*s: ", prec, "MCE");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
	seq_printf(p, "  Machine check exceptions\n");
	seq_printf(p, "%*s: ", prec, "MCP");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
	seq_printf(p, "  Machine check polls\n");
#endif
	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
#if defined(CONFIG_X86_IO_APIC)
	seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
#endif
	return 0;
}