Esempio n. 1
0
static int __init cstate_init(void)
{
	int err;

	cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
			  "AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init,
			  NULL);
	cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
			  "AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit);

	if (has_cstate_core) {
		err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
		if (err) {
			has_cstate_core = false;
			pr_info("Failed to register cstate core pmu\n");
			return err;
		}
	}

	if (has_cstate_pkg) {
		err = perf_pmu_register(&cstate_pkg_pmu, cstate_pkg_pmu.name, -1);
		if (err) {
			has_cstate_pkg = false;
			pr_info("Failed to register cstate pkg pmu\n");
			cstate_cleanup();
			return err;
		}
	}

	return err;
}
static int __init cpum_cf_init(void)
{
	int rc;

	if (!cpum_cf_avail())
		return -ENODEV;

	/* clear bit 15 of cr0 to unauthorize problem-state to
	 * extract measurement counters */
	ctl_clear_bit(0, 48);

	/* register handler for measurement-alert interruptions */
	rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
				   cpumf_measurement_alert);
	if (rc) {
		pr_err("Registering for CPU-measurement alerts "
		       "failed with rc=%i\n", rc);
		return rc;
	}

	rc = cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
				"perf/s390/cf:online",
				cpum_cf_online_cpu, cpum_cf_offline_cpu);
	if (!rc)
		cpum_cf_initalized = true;

	return rc;
}
Esempio n. 3
0
/*
 * clockevent setup for boot CPU
 */
static int __init arc_clockevent_setup(struct device_node *node)
{
	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
	int ret;

	arc_timer_irq = irq_of_parse_and_map(node, 0);
	if (arc_timer_irq <= 0) {
		pr_err("clockevent: missing irq");
		return -EINVAL;
	}

	ret = arc_get_timer_clk(node);
	if (ret) {
		pr_err("clockevent: missing clk");
		return ret;
	}

	/* Needs apriori irq_set_percpu_devid() done in intc map function */
	ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
				 "Timer0 (per-cpu-tick)", evt);
	if (ret) {
		pr_err("clockevent: unable to request irq\n");
		return ret;
	}

	ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
				"AP_ARC_TIMER_STARTING",
				arc_timer_starting_cpu,
				arc_timer_dying_cpu);
	if (ret) {
		pr_err("Failed to setup hotplug state");
		return ret;
	}
	return 0;
}
static int __init amd_power_pmu_init(void)
{
	int ret;

	if (!x86_match_cpu(cpu_match))
		return 0;

	if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
		return -ENODEV;

	cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);

	if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) {
		pr_err("Failed to read max compute unit power accumulator MSR\n");
		return -ENODEV;
	}


	cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
			  "perf/x86/amd/power:online",
			  power_cpu_init, power_cpu_exit);

	ret = perf_pmu_register(&pmu_class, "power", -1);
	if (WARN_ON(ret)) {
		pr_warn("AMD Power PMU registration failed\n");
		return ret;
	}

	pr_info("AMD Power PMU detected\n");
	return ret;
}
static int __init msr_init(void)
{
	int err;

	if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) {
		pr_err("unable to get major %d for msr\n", MSR_MAJOR);
		return -EBUSY;
	}
	msr_class = class_create(THIS_MODULE, "msr");
	if (IS_ERR(msr_class)) {
		err = PTR_ERR(msr_class);
		goto out_chrdev;
	}
	msr_class->devnode = msr_devnode;

	err  = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/msr:online",
				 msr_device_create, msr_device_destroy);
	if (err < 0)
		goto out_class;
	cpuhp_msr_state = err;
	return 0;

out_class:
	class_destroy(msr_class);
out_chrdev:
	__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
	return err;
}
static int nmi_setup(void)
{
	int err = 0;
	int cpu;

	if (!allocate_msrs())
		return -ENOMEM;

	/* We need to serialize save and setup for HT because the subset
	 * of msrs are distinct for save and setup operations
	 */

	/* Assume saved/restored counters are the same on all CPUs */
	err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	if (err)
		goto fail;

	for_each_possible_cpu(cpu) {
		if (!cpu)
			continue;

		memcpy(per_cpu(cpu_msrs, cpu).counters,
		       per_cpu(cpu_msrs, 0).counters,
		       sizeof(struct op_msr) * model->num_counters);

		memcpy(per_cpu(cpu_msrs, cpu).controls,
		       per_cpu(cpu_msrs, 0).controls,
		       sizeof(struct op_msr) * model->num_controls);

		mux_clone(cpu);
	}

	nmi_enabled = 0;
	ctr_running = 0;
	/* make variables visible to the nmi handler: */
	smp_mb();
	err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
					0, "oprofile");
	if (err)
		goto fail;

	nmi_enabled = 1;
	/* make nmi_enabled visible to the nmi handler: */
	smp_mb();
	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/oprofile:online",
				nmi_cpu_online, nmi_cpu_down_prep);
	if (err < 0)
		goto fail_nmi;
	cpuhp_nmi_online = err;
	return 0;
fail_nmi:
	unregister_nmi_handler(NMI_LOCAL, "oprofile");
fail:
	free_msrs();
	return err;
}
Esempio n. 7
0
static int __init csky_mptimer_init(struct device_node *np)
{
	int ret, cpu, cpu_rollback;
	struct timer_of *to = NULL;

	/*
	 * Csky_mptimer is designed for C-SKY SMP multi-processors and
	 * every core has it's own private irq and regs for clkevt and
	 * clksrc.
	 *
	 * The regs is accessed by cpu instruction: mfcr/mtcr instead of
	 * mmio map style. So we needn't mmio-address in dts, but we still
	 * need to give clk and irq number.
	 *
	 * We use private irq for the mptimer and irq number is the same
	 * for every core. So we use request_percpu_irq() in timer_of_init.
	 */
	csky_mptimer_irq = irq_of_parse_and_map(np, 0);
	if (csky_mptimer_irq <= 0)
		return -EINVAL;

	ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt,
				 "csky_mp_timer", &csky_to);
	if (ret)
		return -EINVAL;

	for_each_possible_cpu(cpu) {
		to = per_cpu_ptr(&csky_to, cpu);
		ret = timer_of_init(np, to);
		if (ret)
			goto rollback;
	}

	clocksource_register_hz(&csky_clocksource, timer_of_rate(to));
	sched_clock_register(sched_clock_read, 32, timer_of_rate(to));

	ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING,
				"clockevents/csky/timer:starting",
				csky_mptimer_starting_cpu,
				csky_mptimer_dying_cpu);
	if (ret)
		return -EINVAL;

	return 0;

rollback:
	for_each_possible_cpu(cpu_rollback) {
		if (cpu_rollback == cpu)
			break;

		to = per_cpu_ptr(&csky_to, cpu_rollback);
		timer_of_cleanup(to);
	}
	return -EINVAL;
}
Esempio n. 8
0
/**
 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
 * according to the host GIC model. Accordingly calls either
 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be
 * instantiated by a guest later on .
 */
int kvm_vgic_hyp_init(void)
{
	const struct gic_kvm_info *gic_kvm_info;
	int ret;

	gic_kvm_info = gic_get_kvm_info();
	if (!gic_kvm_info)
		return -ENODEV;

	if (!gic_kvm_info->maint_irq) {
		kvm_err("No vgic maintenance irq\n");
		return -ENXIO;
	}

	switch (gic_kvm_info->type) {
	case GIC_V2:
		ret = vgic_v2_probe(gic_kvm_info);
		break;
	case GIC_V3:
		ret = vgic_v3_probe(gic_kvm_info);
		break;
	default:
		ret = -ENODEV;
	};

	if (ret)
		return ret;

	kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
	ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
				 vgic_maintenance_handler,
				 "vgic", kvm_get_running_vcpus());
	if (ret) {
		kvm_err("Cannot register interrupt %d\n",
			kvm_vgic_global_state.maint_irq);
		return ret;
	}

	ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
				"AP_KVM_ARM_VGIC_INIT_STARTING",
				vgic_init_cpu_starting, vgic_init_cpu_dying);
	if (ret) {
		kvm_err("Cannot register vgic CPU notifier\n");
		goto out_free_irq;
	}

	kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
	return 0;

out_free_irq:
	free_percpu_irq(kvm_vgic_global_state.maint_irq,
			kvm_get_running_vcpus());
	return ret;
}
Esempio n. 9
0
static int __init cpuinfo_regs_init(void)
{
	int cpu, ret;

	for_each_possible_cpu(cpu) {
		struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);

		kobject_init(&info->kobj, &cpuregs_kobj_type);
	}

	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/cpuinfo:online",
				cpuid_cpu_online, cpuid_cpu_offline);
	if (ret < 0) {
		pr_err("cpuinfo: failed to register hotplug callbacks.\n");
		return ret;
	}
	return 0;
}
Esempio n. 10
0
static int gic_clockevent_init(void)
{
	int ret;

	if (!gic_frequency)
		return -ENXIO;

	ret = setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
	if (ret < 0) {
		pr_err("IRQ %d setup failed (%d)\n", gic_timer_irq, ret);
		return ret;
	}

	cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
			  "clockevents/mips/gic/timer:starting",
			  gic_starting_cpu, gic_dying_cpu);
	return 0;
}
Esempio n. 11
0
static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
				  bool percpu)
{
	struct clocksource *cs = &msm_clocksource;
	int res = 0;

	msm_timer_irq = irq;
	msm_timer_has_ppi = percpu;

	msm_evt = alloc_percpu(struct clock_event_device);
	if (!msm_evt) {
		pr_err("memory allocation failed for clockevents\n");
		goto err;
	}

	if (percpu)
		res = request_percpu_irq(irq, msm_timer_interrupt,
					 "gp_timer", msm_evt);

	if (res) {
		pr_err("request_percpu_irq failed\n");
	} else {
		/* Install and invoke hotplug callbacks */
		res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
					"AP_QCOM_TIMER_STARTING",
					msm_local_timer_starting_cpu,
					msm_local_timer_dying_cpu);
		if (res) {
			free_percpu_irq(irq, msm_evt);
			goto err;
		}
	}

err:
	writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
	res = clocksource_register_hz(cs, dgt_hz);
	if (res)
		pr_err("clocksource_register failed\n");
	sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
	msm_delay_timer.freq = dgt_hz;
	register_current_timer_delay(&msm_delay_timer);

	return res;
}
Esempio n. 12
0
static __init int tboot_late_init(void)
{
	if (!tboot_enabled())
		return 0;

	tboot_create_trampoline();

	atomic_set(&ap_wfs_count, 0);
	cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "AP_X86_TBOOT_DYING", NULL,
			  tboot_dying_cpu);
#ifdef CONFIG_DEBUG_FS
	debugfs_create_file("tboot_log", S_IRUSR,
			arch_debugfs_dir, NULL, &tboot_log_fops);
#endif

	acpi_os_set_prepare_sleep(&tboot_sleep);
	acpi_os_set_prepare_extended_sleep(&tboot_extended_sleep);
	return 0;
}
Esempio n. 13
0
static int nmi_timer_setup(void)
{
	int err;
	u64 period;

	/* clock cycles per tick: */
	period = (u64)cpu_khz * 1000;
	do_div(period, HZ);
	nmi_timer_attr.sample_period = period;

	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "oprofile/nmi:online",
				nmi_timer_cpu_online, nmi_timer_cpu_predown);
	if (err < 0) {
		nmi_timer_shutdown();
		return err;
	}
	hp_online = err;
	return 0;
}
Esempio n. 14
0
int enable_swap_slots_cache(void)
{
	int ret = 0;

	mutex_lock(&swap_slots_cache_enable_mutex);
	if (swap_slot_cache_initialized) {
		__reenable_swap_slots_cache();
		goto out_unlock;
	}

	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache",
				alloc_swap_slot_cache, free_slot_cache);
	if (ret < 0)
		goto out_unlock;
	swap_slot_cache_initialized = true;
	__reenable_swap_slots_cache();
out_unlock:
	mutex_unlock(&swap_slots_cache_enable_mutex);
	return 0;
}
Esempio n. 15
0
static __init int intel_epb_init(void)
{
	int ret;

	if (!boot_cpu_has(X86_FEATURE_EPB))
		return -ENODEV;

	ret = cpuhp_setup_state(CPUHP_AP_X86_INTEL_EPB_ONLINE,
				"x86/intel/epb:online", intel_epb_online,
				intel_epb_offline);
	if (ret < 0)
		goto err_out_online;

	register_syscore_ops(&intel_epb_syscore_ops);
	return 0;

err_out_online:
	cpuhp_remove_state(CPUHP_AP_X86_INTEL_EPB_ONLINE);
	return ret;
}
Esempio n. 16
0
static void __init acpi_cpufreq_boost_init(void)
{
	int ret;

	if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)))
		return;

	acpi_cpufreq_driver.set_boost = set_boost;
	acpi_cpufreq_driver.boost_enabled = boost_state(0);

	/*
	 * This calls the online callback on all online cpu and forces all
	 * MSRs to the same value.
	 */
	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online",
				cpufreq_boost_online, cpufreq_boost_down_prep);
	if (ret < 0) {
		pr_err("acpi_cpufreq: failed to register hotplug callbacks\n");
		return;
	}
	acpi_cpufreq_online = ret;
}
Esempio n. 17
0
static int arm_pmu_acpi_init(void)
{
	int ret;

	if (acpi_disabled)
		return 0;

	/*
	 * We can't request IRQs yet, since we don't know the cookie value
	 * until we know which CPUs share the same logical PMU. We'll handle
	 * that in arm_pmu_acpi_cpu_starting().
	 */
	ret = arm_pmu_acpi_parse_irqs();
	if (ret)
		return ret;

	ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
				"perf/arm/pmu_acpi:starting",
				arm_pmu_acpi_cpu_starting, NULL);

	return ret;
}
Esempio n. 18
0
int __init metag_generic_timer_init(void)
{
	/*
	 * On Meta 2 SoCs, the actual frequency of the timer is based on the
	 * Meta core clock speed divided by an integer, so it is only
	 * approximately 1MHz. Calculating the real frequency here drastically
	 * reduces clock skew on these SoCs.
	 */
#ifdef CONFIG_METAG_META21
	hwtimer_freq = get_coreclock() / (metag_in32(EXPAND_TIMER_DIV) + 1);
#endif
	pr_info("Timer frequency: %u Hz\n", hwtimer_freq);

	clocksource_register_hz(&clocksource_metag, hwtimer_freq);

	setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq);

	/* Hook cpu boot to configure the CPU's timers */
	return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
				 "clockevents/metag:starting",
				 arch_timer_starting_cpu, NULL);
}
Esempio n. 19
0
static int __init nps_setup_clockevent(struct device_node *node)
{
	struct clk *clk;
	int ret;

	nps_timer0_irq = irq_of_parse_and_map(node, 0);
	if (nps_timer0_irq <= 0) {
		pr_err("clockevent: missing irq");
		return -EINVAL;
	}

	ret = nps_get_timer_clk(node, &nps_timer0_freq, &clk);
	if (ret)
		return ret;

	/* Needs apriori irq_set_percpu_devid() done in intc map function */
	ret = request_percpu_irq(nps_timer0_irq, timer_irq_handler,
				 "Timer0 (per-cpu-tick)",
				 &nps_clockevent_device);
	if (ret) {
		pr_err("Couldn't request irq\n");
		clk_disable_unprepare(clk);
		return ret;
	}

	ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
				"clockevents/nps:starting",
				nps_timer_starting_cpu,
				nps_timer_dying_cpu);
	if (ret) {
		pr_err("Failed to setup hotplug state");
		clk_disable_unprepare(clk);
		free_percpu_irq(nps_timer0_irq, &nps_clockevent_device);
		return ret;
	}

	return 0;
}
Esempio n. 20
0
static int __init ledtrig_cpu_init(void)
{
	int cpu;
	int ret;

	/* Supports up to 9999 cpu cores */
	BUILD_BUG_ON(CONFIG_NR_CPUS > 9999);

	/*
	 * Registering a trigger for all CPUs.
	 */
	led_trigger_register_simple("cpu", &trig_cpu_all);

	/*
	 * Registering CPU led trigger for each CPU core here
	 * ignores CPU hotplug, but after this CPU hotplug works
	 * fine with this trigger.
	 */
	for_each_possible_cpu(cpu) {
		struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);

		snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);

		led_trigger_register_simple(trig->name, &trig->_trig);
	}

	register_syscore_ops(&ledtrig_cpu_syscore_ops);

	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "leds/trigger:starting",
				ledtrig_online_cpu, ledtrig_prepare_down_cpu);
	if (ret < 0)
		pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n",
		       ret);

	pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n");

	return 0;
}
Esempio n. 21
0
static int topology_sysfs_init(void)
{
	return cpuhp_setup_state(CPUHP_TOPOLOGY_PREPARE,
				 "base/topology:prepare", topology_add_dev,
				 topology_remove_dev);
}
Esempio n. 22
0
static int __init arch_timer_register(void)
{
	int err;
	int ppi;

	arch_timer_evt = alloc_percpu(struct clock_event_device);
	if (!arch_timer_evt) {
		err = -ENOMEM;
		goto out;
	}

	ppi = arch_timer_ppi[arch_timer_uses_ppi];
	switch (arch_timer_uses_ppi) {
	case VIRT_PPI:
		err = request_percpu_irq(ppi, arch_timer_handler_virt,
					 "arch_timer", arch_timer_evt);
		break;
	case PHYS_SECURE_PPI:
	case PHYS_NONSECURE_PPI:
		err = request_percpu_irq(ppi, arch_timer_handler_phys,
					 "arch_timer", arch_timer_evt);
		if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
			ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
			err = request_percpu_irq(ppi, arch_timer_handler_phys,
						 "arch_timer", arch_timer_evt);
			if (err)
				free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
						arch_timer_evt);
		}
		break;
	case HYP_PPI:
		err = request_percpu_irq(ppi, arch_timer_handler_phys,
					 "arch_timer", arch_timer_evt);
		break;
	default:
		BUG();
	}

	if (err) {
		pr_err("arch_timer: can't register interrupt %d (%d)\n",
		       ppi, err);
		goto out_free;
	}

	err = arch_timer_cpu_pm_init();
	if (err)
		goto out_unreg_notify;


	/* Register and immediately configure the timer on the boot CPU */
	err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
				"AP_ARM_ARCH_TIMER_STARTING",
				arch_timer_starting_cpu, arch_timer_dying_cpu);
	if (err)
		goto out_unreg_cpupm;
	return 0;

out_unreg_cpupm:
	arch_timer_cpu_pm_deinit();

out_unreg_notify:
	free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
	if (arch_timer_has_nonsecure_ppi())
		free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
				arch_timer_evt);

out_free:
	free_percpu(arch_timer_evt);
out:
	return err;
}
Esempio n. 23
0
static int __init tegra_init_timer(struct device_node *np, bool tegra20)
{
	struct timer_of *to;
	int cpu, ret;

	to = this_cpu_ptr(&tegra_to);
	ret = timer_of_init(np, to);
	if (ret)
		goto out;

	timer_reg_base = timer_of_base(to);

	/*
	 * Configure microsecond timers to have 1MHz clock
	 * Config register is 0xqqww, where qq is "dividend", ww is "divisor"
	 * Uses n+1 scheme
	 */
	switch (timer_of_rate(to)) {
	case 12000000:
		usec_config = 0x000b; /* (11+1)/(0+1) */
		break;
	case 12800000:
		usec_config = 0x043f; /* (63+1)/(4+1) */
		break;
	case 13000000:
		usec_config = 0x000c; /* (12+1)/(0+1) */
		break;
	case 16800000:
		usec_config = 0x0453; /* (83+1)/(4+1) */
		break;
	case 19200000:
		usec_config = 0x045f; /* (95+1)/(4+1) */
		break;
	case 26000000:
		usec_config = 0x0019; /* (25+1)/(0+1) */
		break;
	case 38400000:
		usec_config = 0x04bf; /* (191+1)/(4+1) */
		break;
	case 48000000:
		usec_config = 0x002f; /* (47+1)/(0+1) */
		break;
	default:
		ret = -EINVAL;
		goto out;
	}

	writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);

	for_each_possible_cpu(cpu) {
		struct timer_of *cpu_to = per_cpu_ptr(&tegra_to, cpu);
		unsigned int base = tegra_base_for_cpu(cpu, tegra20);
		unsigned int idx = tegra_irq_idx_for_cpu(cpu, tegra20);

		/*
		 * TIMER1-9 are fixed to 1MHz, TIMER10-13 are running off the
		 * parent clock.
		 */
		if (tegra20)
			cpu_to->of_clk.rate = 1000000;

		cpu_to = per_cpu_ptr(&tegra_to, cpu);
		cpu_to->of_base.base = timer_reg_base + base;
		cpu_to->clkevt.cpumask = cpumask_of(cpu);
		cpu_to->clkevt.irq = irq_of_parse_and_map(np, idx);
		if (!cpu_to->clkevt.irq) {
			pr_err("failed to map irq for cpu%d\n", cpu);
			ret = -EINVAL;
			goto out_irq;
		}

		irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
		ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr,
				  IRQF_TIMER | IRQF_NOBALANCING,
				  cpu_to->clkevt.name, &cpu_to->clkevt);
		if (ret) {
			pr_err("failed to set up irq for cpu%d: %d\n",
			       cpu, ret);
			irq_dispose_mapping(cpu_to->clkevt.irq);
			cpu_to->clkevt.irq = 0;
			goto out_irq;
		}
	}

	sched_clock_register(tegra_read_sched_clock, 32, 1000000);

	ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
				    "timer_us", 1000000,
				    300, 32, clocksource_mmio_readl_up);
	if (ret)
		pr_err("failed to register clocksource: %d\n", ret);

#ifdef CONFIG_ARM
	register_current_timer_delay(&tegra_delay_timer);
#endif

	ret = cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
				"AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
				tegra_timer_stop);
	if (ret)
		pr_err("failed to set up cpu hp state: %d\n", ret);

	return ret;

out_irq:
	for_each_possible_cpu(cpu) {
		struct timer_of *cpu_to;

		cpu_to = per_cpu_ptr(&tegra_to, cpu);
		if (cpu_to->clkevt.irq) {
			free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
			irq_dispose_mapping(cpu_to->clkevt.irq);
		}
	}
out:
	timer_of_cleanup(to);

	return ret;
}
Esempio n. 24
0
static int __init armada_370_xp_timer_common_init(struct device_node *np)
{
	u32 clr = 0, set = 0;
	int res;

	timer_base = of_iomap(np, 0);
	if (!timer_base) {
		pr_err("Failed to iomap");
		return -ENXIO;
	}

	local_base = of_iomap(np, 1);
	if (!local_base) {
		pr_err("Failed to iomap");
		return -ENXIO;
	}

	if (timer25Mhz) {
		set = TIMER0_25MHZ;		
		enable_mask = TIMER0_EN;
	} else {
		clr = TIMER0_25MHZ;
		enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
	}
	atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set);
	local_timer_ctrl_clrset(clr, set);

	/*
	 * We use timer 0 as clocksource, and private(local) timer 0
	 * for clockevents
	 */
	armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4);

	ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;

	/*
	 * Setup free-running clocksource timer (interrupts
	 * disabled).
	 */
	writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
	writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);

	atomic_io_modify(timer_base + TIMER_CTRL_OFF,
		TIMER0_RELOAD_EN | enable_mask,
		TIMER0_RELOAD_EN | enable_mask);

	armada_370_delay_timer.freq = timer_clk;
	register_current_timer_delay(&armada_370_delay_timer);

	/*
	 * Set scale and timer for sched_clock.
	 */
	sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);

	res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
				    "armada_370_xp_clocksource",
				    timer_clk, 300, 32, clocksource_mmio_readl_down);
	if (res) {
		pr_err("Failed to initialize clocksource mmio");
		return res;
	}

	armada_370_xp_evt = alloc_percpu(struct clock_event_device);
	if (!armada_370_xp_evt)
		return -ENOMEM;

	/*
	 * Setup clockevent timer (interrupt-driven).
	 */
	res = request_percpu_irq(armada_370_xp_clkevt_irq,
				armada_370_xp_timer_interrupt,
				"armada_370_xp_per_cpu_tick",
				armada_370_xp_evt);
	/* Immediately configure the timer on the boot CPU */
	if (res) {
		pr_err("Failed to request percpu irq");
		return res;
	}

	res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
				"clockevents/armada:starting",
				armada_370_xp_timer_starting_cpu,
				armada_370_xp_timer_dying_cpu);
	if (res) {
		pr_err("Failed to setup hotplug state and timer");
		return res;
	}

	register_syscore_ops(&armada_370_xp_timer_syscore_ops);
	
	return 0;
}