int k3v2_irqaffinity_register(unsigned int irq, int cpu)
{
	unsigned int i;

	if (cpu >= NR_CPUS) {
		pr_err(KERN_ERR "irq affinity set error cpu %d larger than max cpu num\n", cpu);
		return -EINVAL;
	}

	spin_lock(&irqaff_lock);

	for (i = 1; i < NR_CPUS; i++) {
		if (i == cpu)
			irqaffinity_mask[i][irq] = 1;
		else
			irqaffinity_mask[i][irq] = 0;
	}

	spin_unlock(&irqaff_lock);

	irq_set_affinity(irq, cpumask_of(cpu));

	pr_info("k3v2 irqaffinity irq %u register irq to cpu %d\n", irq, cpu);

	return 0;
}
static int __cpuinit s5p_ehci_cpu_notify(struct notifier_block *self,
				unsigned long action, void *hcpu)
{
	int cpu = (unsigned long)hcpu;

	if (!s5p_ehci_irq_no || cpu != s5p_ehci_irq_cpu)
		goto exit;

	switch (action) {
	case CPU_ONLINE:
	case CPU_DOWN_FAILED:
	case CPU_ONLINE_FROZEN:
		irq_set_affinity(s5p_ehci_irq_no, cpumask_of(s5p_ehci_irq_cpu));
		pr_debug("%s: set ehci irq to cpu%d\n", __func__, cpu);
		break;
	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
		irq_set_affinity(s5p_ehci_irq_no, cpumask_of(0));
		pr_debug("%s: set ehci irq to cpu%d\n", __func__, 0);
		break;
	default:
		break;
	}
exit:
	return NOTIFY_OK;
}
Exemplo n.º 3
0
static void mhi_move_interrupts(mhi_device_ctxt *mhi_dev_ctxt, u32 cpu)
{
	u32 irq_to_affin = 0;

	MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC,
	mhi_dev_ctxt->ev_ring_props[IPA_IN_EV_RING], irq_to_affin);
	irq_to_affin += mhi_dev_ctxt->dev_props->irq_base;
	irq_set_affinity(irq_to_affin, get_cpu_mask(cpu));
	MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC,
	mhi_dev_ctxt->ev_ring_props[IPA_OUT_EV_RING], irq_to_affin);
	irq_to_affin += mhi_dev_ctxt->dev_props->irq_base;
	irq_set_affinity(irq_to_affin, get_cpu_mask(cpu));
}
Exemplo n.º 4
0
static void tegra3_register_wake_timer(unsigned int cpu)
{
	int ret;

	ret = setup_irq(tegra_lp2wake_irq[cpu].irq, &tegra_lp2wake_irq[cpu]);
	if (ret) {
		pr_err("Failed to register LP2 timer IRQ for CPU %d: "
			"irq=%d, ret=%d\n", cpu,
			tegra_lp2wake_irq[cpu].irq, ret);
		goto fail;
	}

#ifdef CONFIG_SMP
	ret = irq_set_affinity(tegra_lp2wake_irq[cpu].irq, cpumask_of(cpu));
	if (ret) {
		pr_err("Failed to set affinity for LP2 timer IRQ to "
			"CPU %d: irq=%d, ret=%d\n", cpu,
			tegra_lp2wake_irq[cpu].irq, ret);
		goto fail;
	}
#endif

	test_lp2_wake_timer(cpu);
	return;
fail:
	tegra_lp2_in_idle(false);
}
Exemplo n.º 5
0
static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
				   unsigned long count, void *data)
{
	unsigned int irq = (int)(long)data, full_count = count, err;
	cpumask_t new_value, tmp;

	if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
	    irq_balancing_disabled(irq))
		return -EIO;

	err = cpumask_parse_user(buffer, count, new_value);
	if (err)
		return err;

	if (!is_affinity_mask_valid(new_value))
		return -EINVAL;

	/*
	 * Do not allow disabling IRQs completely - it's a too easy
	 * way to make the system unusable accidentally :-) At least
	 * one online CPU still has to be targeted.
	 */
	cpus_and(tmp, new_value, cpu_online_map);
	if (cpus_empty(tmp))
		/* Special case for empty set - allow the architecture
		   code to set default SMP affinity. */
		return select_smp_affinity(irq) ? -EINVAL : full_count;

	irq_set_affinity(irq, new_value);

	return full_count;
}
Exemplo n.º 6
0
static int __cpuinit mmp_percpu_timer_setup(struct clock_event_device *clk)
{
	u32 cpuid = hard_smp_processor_id();

	clk->features = CLOCK_EVT_FEAT_ONESHOT;
	clk->name = "apb_percpu_timer";
	clk->rating = 300;

	clk->irq = irq_map[cpuid].irq;
	clk->cpumask = cpumask_of(cpuid);
	clk->set_mode = percpu_timer_set_mode;
	clk->set_next_event = percpu_timer_set_next_event;

	clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);

	irq_map[cpuid].irq_act.dev_id = clk;
	irq_set_affinity(clk->irq, clk->cpumask);

	clockevents_config_and_register(clk, CLOCK_TICK_RATE_32KHZ,
					MIN_DELTA, MAX_DELTA);

	enable_irq(clk->irq);

	/* the broadcast clockevent is no longer needed */
	if (cpuid == 0) {
		remove_irq(ckevt.irq, &timer_irq);

#ifndef CONFIG_TZ_HYPERVISOR
		/* reset APB and functional domain */
		__raw_writel(APBC_RST, APBC_PXA1088_TIMERS2);
#endif /* CONFIG_TZ_HYPERVISOR */
	}

	return 0;
}
Exemplo n.º 7
0
static ssize_t irq_affinity_proc_write(struct file *file,
                                       const char __user *buffer, size_t count, loff_t *pos)
{
    unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
    cpumask_t new_value;
    int err;

    if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
            irq_balancing_disabled(irq))
        return -EIO;

    err = cpumask_parse_user(buffer, count, new_value);
    if (err)
        return err;

    if (!is_affinity_mask_valid(new_value))
        return -EINVAL;

    /*
     * Do not allow disabling IRQs completely - it's a too easy
     * way to make the system unusable accidentally :-) At least
     * one online CPU still has to be targeted.
     */
    if (!cpus_intersects(new_value, cpu_online_map))
        /* Special case for empty set - allow the architecture
           code to set default SMP affinity. */
        return irq_select_affinity(irq) ? -EINVAL : count;

    irq_set_affinity(irq, new_value);

    return count;
}
void __init x3_bpc_mgmt_init(void)
{
#ifdef CONFIG_SMP
#if defined(CONFIG_MACH_VU10)
	int tegra_bpc_gpio;
	int int_gpio;

	if (x3_get_hw_rev_pcb_version() == hw_rev_pcb_type_A) {
		tegra_bpc_gpio = TEGRA_GPIO_PJ6;
	}
	else {
		tegra_bpc_gpio = TEGRA_GPIO_PX6;
	}


	bpc_mgmt_platform_data.gpio_trigger = tegra_bpc_gpio;

	int_gpio = TEGRA_GPIO_TO_IRQ(tegra_bpc_gpio);
	tegra_gpio_enable(tegra_bpc_gpio);
#else
	int int_gpio = TEGRA_GPIO_TO_IRQ(TEGRA_BPC_TRIGGER);

	tegra_gpio_enable(TEGRA_BPC_TRIGGER);

#endif
	cpumask_setall(&(bpc_mgmt_platform_data.affinity_mask));
	irq_set_affinity_hint(int_gpio,
				&(bpc_mgmt_platform_data.affinity_mask));
	irq_set_affinity(int_gpio, &(bpc_mgmt_platform_data.affinity_mask));
#endif
	platform_device_register(&x3_bpc_mgmt_device);

	return;
}
Exemplo n.º 9
0
static __cpuinit int rk_timer_init_clockevent(struct clock_event_device *ce, unsigned int cpu)
{
	struct irqaction *irq = &timer.ce_irq[cpu];
	void __iomem *base = timer.ce_base[cpu];

	if (!base)
		return 0;

	ce->name = timer.ce_name[cpu];
	ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
	ce->set_next_event = rk_timer_set_next_event;
	ce->set_mode = rk_timer_set_mode;
	ce->irq = irq->irq;
	ce->cpumask = cpumask_of(cpu);

	writel_relaxed(1, base + TIMER_INT_STATUS);
	rk_timer_disable(base);

	irq->dev_id = ce;
	irq_set_affinity(irq->irq, cpumask_of(cpu));
	setup_irq(irq->irq, irq);

	clockevents_config_and_register(ce, 24000000, 0xF, 0xFFFFFFFF);

	return 0;
}
Exemplo n.º 10
0
static void tegra3_suspend_wake_timer(unsigned int cpu)
{
	cpumask_clear_cpu(cpu, &wake_timer_ready);
#ifdef CONFIG_SMP
	/* Reassign the affinity of the wake IRQ to CPU 0. */
	(void)irq_set_affinity(tegra_lp2wake_irq[cpu].irq, cpumask_of(0));
#endif
}
Exemplo n.º 11
0
/*
 * Setup the tick device
 */
static void tick_setup_device(struct tick_device *td,
			      struct clock_event_device *newdev, int cpu,
			      const struct cpumask *cpumask)
{
	ktime_t next_event;
	void (*handler)(struct clock_event_device *) = NULL;

	/*
	 * First device setup ?
	 */
	if (!td->evtdev) {
		/*
		 * If no cpu took the do_timer update, assign it to
		 * this cpu:
		 */
		if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
			if (!tick_nohz_full_cpu(cpu))
				tick_do_timer_cpu = cpu;
			else
				tick_do_timer_cpu = TICK_DO_TIMER_NONE;
			tick_next_period = ktime_get();
			tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
		}

		/*
		 * Startup in periodic mode first.
		 */
		td->mode = TICKDEV_MODE_PERIODIC;
	} else {
		handler = td->evtdev->event_handler;
		next_event = td->evtdev->next_event;
		td->evtdev->event_handler = clockevents_handle_noop;
	}

	td->evtdev = newdev;

	/*
	 * When the device is not per cpu, pin the interrupt to the
	 * current cpu:
	 */
	if (!cpumask_equal(newdev->cpumask, cpumask))
		irq_set_affinity(newdev->irq, cpumask);

	/*
	 * When global broadcasting is active, check if the current
	 * device is registered as a placeholder for broadcast mode.
	 * This allows us to handle this x86 misfeature in a generic
	 * way. This function also returns !=0 when we keep the
	 * current active broadcast state for this CPU.
	 */
	if (tick_device_uses_broadcast(newdev, cpu))
		return;

	if (td->mode == TICKDEV_MODE_PERIODIC)
		tick_setup_periodic(newdev, 0);
	else
		tick_setup_oneshot(newdev, handler, next_event);
}
Exemplo n.º 12
0
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
{
	int i, err, irq, irqs;
	struct platform_device *pmu_device = cpu_pmu->plat_device;
	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;

	if (!pmu_device)
		return -ENODEV;

	irqs = min(pmu_device->num_resources, num_possible_cpus());
	if (irqs < 1) {
		pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
		return 0;
	}

	irq = platform_get_irq(pmu_device, 0);
	if (irq >= 0 && irq_is_percpu(irq)) {
		err = request_percpu_irq(irq, handler, "arm-pmu",
					 &hw_events->percpu_pmu);
		if (err) {
			pr_err("unable to request IRQ%d for ARM PMU counters\n",
				irq);
			return err;
		}
		on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
	} else {
		for (i = 0; i < irqs; ++i) {
			err = 0;
			irq = platform_get_irq(pmu_device, i);
			if (irq < 0)
				continue;

			/*
			 * If we have a single PMU interrupt that we can't shift,
			 * assume that we're running on a uniprocessor machine and
			 * continue. Otherwise, continue without this interrupt.
			 */
			if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
				pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
					irq, i);
				continue;
			}

			err = request_irq(irq, handler,
					  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
					  per_cpu_ptr(&hw_events->percpu_pmu, i));
			if (err) {
				pr_err("unable to request IRQ%d for ARM PMU counters\n",
					irq);
				return err;
			}

			cpumask_set_cpu(i, &cpu_pmu->active_irqs);
		}
	}

	return 0;
}
Exemplo n.º 13
0
/*
 * Setup the local clock events for a CPU.
 */
int __cpuinit local_timer_setup(struct clock_event_device *evt)
{
	unsigned int cpu = smp_processor_id();
	struct kona_td kona_td;
	struct timer_ch_cfg config;

	/* allocate an AON timer channel as local tick timer
	 */
	kona_td = (struct kona_td)__get_cpu_var(percpu_kona_td);

	if (!kona_td.allocated) {
		kona_td.kona_timer =
		    kona_timer_request(TICK_TIMER_NAME,
				       TICK_TIMER_OFFSET + cpu);
		if (kona_td.kona_timer) {
			kona_td.allocated = true;
		} else {
			pr_err("%s: Failed to allocate %s channel %d as"
			       "CPU %d local tick device\n", __func__,
			       TICK_TIMER_NAME,
			       TICK_TIMER_OFFSET + cpu, cpu);
			return -ENXIO;
		}
	}

	/*
	 * In the future: The following codes should be one time configuration
	 */
	config.mode = MODE_ONESHOT;
	config.arg = evt;
	config.cb = kona_tick_interrupt_cb;
	kona_timer_config(kona_td.kona_timer, &config);

	irq_set_affinity(kona_td.kona_timer->irq, cpumask_of(cpu));

	evt->name = "local_timer";
	evt->cpumask = cpumask_of(cpu);
	evt->irq = kona_td.kona_timer->irq;
	evt->set_next_event = kona_tick_set_next_event;
	evt->set_mode = kona_tick_set_mode;
	evt->features = CLOCK_EVT_FEAT_ONESHOT;
	evt->rating = 250;
	evt->shift = 32;
	evt->mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, evt->shift);
	evt->max_delta_ns = clockevent_delta2ns(MAX_KONA_COUNT_CLOCK, evt);
	/* There is MIN_KONA_DELTA_CLOCK clock cycles delay in HUB Timer by
	 * ASIC limitation. When min_delta_ns set N, real requested load value
	 * in hrtimer becomes N - 1. So add 1 to be MIN_DELTA_CLOCK
	 */
	evt->min_delta_ns = clockevent_delta2ns(MIN_KONA_DELTA_CLOCK + 1, evt);

	per_cpu(percpu_kona_td, cpu) = kona_td;

	clockevents_register_device(evt);

	return 0;
}
Exemplo n.º 14
0
static int isp_open(struct inode *inode, struct file *filp)
{
	int ret = 0;

	struct isp_t *dev = kmalloc(sizeof(struct isp_t), GFP_KERNEL);
	if (!dev)
		return -ENOMEM;

	filp->private_data = dev;
	spin_lock_init(&dev->lock);
	dev->isp_status.status = 0;

	init_completion(&dev->irq_sem);

	ret =
	    pi_mgr_dfs_add_request(&isp_dfs_node, "isp", PI_MGR_PI_ID_MM,
				   PI_MGR_DFS_MIN_VALUE);

	if (ret) {
		printk(KERN_ERR "%s: failed to register PI DFS request\n",
		       __func__);
		goto err;
	}

	ret = pi_mgr_qos_add_request(&isp_qos_node, "isp",
					PI_MGR_PI_ID_ARM_CORE,
					PI_MGR_QOS_DEFAULT_VALUE);
	if (ret) {
		printk(KERN_ERR "%s: failed to register PI QOS request\n",
				__func__);
		ret = -EIO;
		goto qos_request_fail;
	}

	enable_isp_clock();
	pi_mgr_qos_request_update(&isp_qos_node, 0);
	scu_standby(0);

	ret =
	    request_irq(IRQ_ISP, isp_isr, IRQF_DISABLED | IRQF_SHARED,
			ISP_DEV_NAME, dev);
	if (ret) {
		err_print("request_irq failed ret = %d\n", ret);
		goto err;
	}
	/* Ensure that only one CORE handles interrupt for the MM block. */
	irq_set_affinity(IRQ_ISP, cpumask_of(0));
	disable_irq(IRQ_ISP);
	return 0;


qos_request_fail:
	pi_mgr_dfs_request_remove(&isp_dfs_node);
err:
	kfree(dev);
	return ret;
}
Exemplo n.º 15
0
static void tegra3_unregister_wake_timer(unsigned int cpu)
{
#ifdef CONFIG_SMP
	/* Reassign the affinity of the wake IRQ to CPU 0. */
	(void)irq_set_affinity(tegra_lp2wake_irq[cpu].irq, cpumask_of(0));
#endif

	/* Dispose of this IRQ. */
	remove_irq(tegra_lp2wake_irq[cpu].irq, &tegra_lp2wake_irq[cpu]);
}
Exemplo n.º 16
0
static int combiner_set_affinity(struct irq_data *data, const struct
				 cpumask *dest, bool force)
{
	struct combiner_chip_data *chip_data = data->chip_data;

	if (!chip_data)
		return -EINVAL;

	return irq_set_affinity(chip_data->gic_irq, dest);
}
Exemplo n.º 17
0
static void bpmp_irq_set_affinity(int cpu)
{
	int nr_cpus = num_present_cpus();
	int r;
	int i;

	for (i = cpu; i < ARRAY_SIZE(cpu_irqs); i += nr_cpus) {
		r = irq_set_affinity(cpu_irqs[i], cpumask_of(cpu));
		WARN_ON(r);
	}
}
Exemplo n.º 18
0
/*
 * Set broadcast interrupt affinity
 */
static void tick_broadcast_set_affinity(struct clock_event_device *bc,
					const struct cpumask *cpumask)
{
	if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
		return;

	if (cpumask_equal(bc->cpumask, cpumask))
		return;

	bc->cpumask = cpumask;
	irq_set_affinity(bc->irq, bc->cpumask);
}
Exemplo n.º 19
0
static void bpmp_irq_clr_affinity(int cpu)
{
	int nr_cpus = num_present_cpus();
	int new_cpu;
	int r;
	int i;

	for (i = cpu; i < ARRAY_SIZE(cpu_irqs); i += nr_cpus) {
		new_cpu = cpumask_any_but(cpu_online_mask, cpu);
		r = irq_set_affinity(cpu_irqs[i], cpumask_of(new_cpu));
		WARN_ON(r);
	}
}
Exemplo n.º 20
0
static int tegra3_resume_wake_timer(unsigned int cpu)
{
#ifdef CONFIG_SMP
	int ret = irq_set_affinity(tegra_lp2wake_irq[cpu].irq, cpumask_of(cpu));
	if (ret) {
		pr_err("Failed to set affinity for LP2 timer IRQ to "
			"CPU %d: irq=%d, ret=%d\n", cpu,
			tegra_lp2wake_irq[cpu].irq, ret);
		return ret;
	}
#endif
	cpumask_set_cpu(cpu, &wake_timer_ready);
	return 0;
}
Exemplo n.º 21
0
static int
set_irq_affinity(int irq,
		 unsigned int cpu)
{
#ifdef CONFIG_SMP
	int err = irq_set_affinity(irq, cpumask_of(cpu));
	if (err)
		pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
			   irq, cpu);
	return err;
#else
	return 0;
#endif
}
Exemplo n.º 22
0
/*
 * The PMU IRQ lines of two cores are wired together into a single interrupt.
 * Bounce the interrupt to the other core if it's not ours.
 */
static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
{
	irqreturn_t ret = handler(irq, dev);
	int other = !smp_processor_id();

	if (ret == IRQ_NONE && cpu_online(other))
		irq_set_affinity(irq, cpumask_of(other));

	/*
	 * We should be able to get away with the amount of IRQ_NONEs we give,
	 * while still having the spurious IRQ detection code kick in if the
	 * interrupt really starts hitting spuriously.
	 */
	return ret;
}
Exemplo n.º 23
0
static int bman_online_cpu(unsigned int cpu)
{
	struct bman_portal *p = affine_bportals[cpu];
	const struct bm_portal_config *pcfg;

	if (!p)
		return 0;

	pcfg = bman_get_bm_portal_config(p);
	if (!pcfg)
		return 0;

	irq_set_affinity(pcfg->irq, cpumask_of(cpu));
	return 0;
}
void __init enterprise_bpc_mgmt_init(void)
{
	int int_gpio = TEGRA_GPIO_TO_IRQ(TEGRA_BPC_TRIGGER);

	tegra_gpio_enable(TEGRA_BPC_TRIGGER);

#ifdef CONFIG_SMP
	cpumask_setall(&(bpc_mgmt_platform_data.affinity_mask));
	irq_set_affinity_hint(int_gpio,
				&(bpc_mgmt_platform_data.affinity_mask));
	irq_set_affinity(int_gpio, &(bpc_mgmt_platform_data.affinity_mask));
#endif
	platform_device_register(&enterprise_bpc_mgmt_device);

	return;
}
Exemplo n.º 25
0
void request_fiq(unsigned fiq, void (*isr)(void))
{
    ulong flags;

    BUG_ON(fiq >= NR_IRQS) ;
    BUG_ON(fiq_isr[fiq]!=NULL);
    spin_lock_irqsave(&lock, flags);
    irq_level=MESON_GIC_FIQ_LEVEL;
    fiq_isr[fiq]=isr;
    setup_irq(fiq,&fake_fiq);
    irq_level=MESON_GIC_IRQ_LEVEL;
#ifdef CONFIG_ARM_GIC
    irq_set_affinity(fiq,cpumask_of(1));
#else
#endif
    spin_unlock_irqrestore(&lock, flags);
}
Exemplo n.º 26
0
static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
{
	struct mct_clock_event_device *mevt;
	unsigned int cpu = smp_processor_id();

	mevt = this_cpu_ptr(&percpu_mct_tick);
	mevt->evt = evt;

	mevt->base = EXYNOS4_MCT_L_BASE(cpu);
	sprintf(mevt->name, "mct_tick%d", cpu);

	evt->name = mevt->name;
	evt->cpumask = cpumask_of(cpu);
	evt->set_next_event = exynos4_tick_set_next_event;
	evt->set_mode = exynos4_tick_set_mode;
	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
	evt->rating = 450;

	clockevents_calc_mult_shift(evt, clk_rate / 2, 5);
	evt->max_delta_ns =
		clockevent_delta2ns(0x7fffffff, evt);
	evt->min_delta_ns =
		clockevent_delta2ns(0xf, evt);

	clockevents_register_device(evt);

	exynos4_mct_write(0x1, mevt->base + MCT_L_TCNTB_OFFSET);

	if (mct_int_type == MCT_INT_SPI) {
		if (cpu == 0) {
			mct_tick0_event_irq.dev_id = mevt;
			evt->irq = IRQ_MCT_L0;
			setup_irq(IRQ_MCT_L0, &mct_tick0_event_irq);
		} else {
			mct_tick1_event_irq.dev_id = mevt;
			evt->irq = IRQ_MCT_L1;
			setup_irq(IRQ_MCT_L1, &mct_tick1_event_irq);
			irq_set_affinity(IRQ_MCT_L1, cpumask_of(1));
		}
	} else {
		enable_percpu_irq(IRQ_MCT_LOCALTIMER, 0);
	}

	return 0;
}
Exemplo n.º 27
0
static int __cpuinit tegra_local_timer_setup(struct clock_event_device *evt)
{
	unsigned int cpu = smp_processor_id();
	struct tegra_clock_event_device *clkevt;
	int ret;

	clkevt = this_cpu_ptr(&percpu_tegra_timer);
	clkevt->evt = evt;
	sprintf(clkevt->name, "tegra_timer%d", cpu);
	evt->name = clkevt->name;
	evt->cpumask = cpumask_of(cpu);
	evt->features = tegra_cputimer_clockevent.features;
	evt->rating = tegra_cputimer_clockevent.rating;
	evt->set_mode = tegra_cputimer_set_mode;
	evt->set_next_event = tegra_cputimer_set_next_event;
	clockevents_calc_mult_shift(evt, 1000000, 5);
	evt->max_delta_ns =
		clockevent_delta2ns(0x1fffffff, evt);
	evt->min_delta_ns =
		clockevent_delta2ns(0x1, evt);
	tegra_cputimer_irq[cpu].dev_id = clkevt;

	clockevents_register_device(evt);

	ret = setup_irq(tegra_cputimer_irq[cpu].irq, &tegra_cputimer_irq[cpu]);
	if (ret) {
		pr_err("Failed to register CPU timer IRQ for CPU %d: " \
			"irq=%d, ret=%d\n", cpu,
			tegra_cputimer_irq[cpu].irq, ret);
		return ret;
	}

	evt->irq = tegra_cputimer_irq[cpu].irq;
	ret = irq_set_affinity(tegra_cputimer_irq[cpu].irq, cpumask_of(cpu));
	if (ret) {
		pr_err("Failed to set affinity for CPU timer IRQ to " \
			"CPU %d: irq=%d, ret=%d\n", cpu,
			tegra_cputimer_irq[cpu].irq, ret);
		return ret;
	}

	enable_percpu_irq(evt->irq, IRQ_TYPE_LEVEL_HIGH);

	return 0;
}
Exemplo n.º 28
0
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
{
	int i, err, irq, irqs;
	struct platform_device *pmu_device = cpu_pmu->plat_device;

	if (!pmu_device)
		return -ENODEV;

	irqs = min(pmu_device->num_resources, num_possible_cpus());
	if (irqs < 1) {
		pr_err("no irqs for PMUs defined\n");
		return -ENODEV;
	}

	for (i = 0; i < irqs; ++i) {
		err = 0;
		irq = platform_get_irq(pmu_device, i);
		if (irq < 0)
			continue;

		/*
		 * If we have a single PMU interrupt that we can't shift,
		 * assume that we're running on a uniprocessor machine and
		 * continue. Otherwise, continue without this interrupt.
		 */
		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
				    irq, i);
			continue;
		}

		err = request_irq(irq, handler,
				  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
				  cpu_pmu);
		if (err) {
			pr_err("unable to request IRQ%d for ARM PMU counters\n",
				irq);
			return err;
		}

		cpumask_set_cpu(i, &cpu_pmu->active_irqs);
	}

	return 0;
}
Exemplo n.º 29
0
void sb1480_clockevent_init(void)
{
	unsigned int cpu = smp_processor_id();
	unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
	struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
	struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
	unsigned char *name = per_cpu(sibyte_hpt_name, cpu);

	BUG_ON(cpu > 3);	/* Only have 4 general purpose timers */

	sprintf(name, "bcm1480-counter-%d", cpu);
	cd->name		= name;
	cd->features		= CLOCK_EVT_FEAT_PERIODIC |
				  CLOCK_EVT_FEAT_ONESHOT;
	clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
	cd->max_delta_ns	= clockevent_delta2ns(0x7fffff, cd);
	cd->min_delta_ns	= clockevent_delta2ns(2, cd);
	cd->rating		= 200;
	cd->irq			= irq;
	cd->cpumask		= cpumask_of(cpu);
	cd->set_next_event	= sibyte_next_event;
	cd->set_state_shutdown	= sibyte_shutdown;
	cd->set_state_periodic	= sibyte_set_periodic;
	cd->set_state_oneshot	= sibyte_shutdown;
	clockevents_register_device(cd);

	bcm1480_mask_irq(cpu, irq);

	/*
	 * Map the timer interrupt to IP[4] of this cpu
	 */
	__raw_writeq(IMR_IP4_VAL,
		     IOADDR(A_BCM1480_IMR_REGISTER(cpu,
			R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (irq << 3)));

	bcm1480_unmask_irq(cpu, irq);

	action->handler = sibyte_counter_handler;
	action->flags	= IRQF_PERCPU | IRQF_TIMER;
	action->name	= name;
	action->dev_id	= cd;

	irq_set_affinity(irq, cpumask_of(cpu));
	setup_irq(irq, action);
}
Exemplo n.º 30
0
static void irq_affinity_init(struct work_struct *dummy)
{
	int ret, cpu, i;
	char irq_affinity_key[48];

	memset(irq_affinity_records, 0, sizeof(irq_affinity_records));

	for (i = 0; i < NR_SOC_IRQS; i++)
	{
		snprintf(irq_affinity_key, sizeof(irq_affinity_key), irq_affinity_key_prefix, i);

		ret = get_hw_config_int(irq_affinity_key, &cpu, NULL);
		if (ret == false)
			continue;
		if (cpu < 0)
			continue;
		if (cpu > NR_CPUS)
			continue;

		irq_affinity_register_sysctl_table(i, cpu);

		/*init the records. as default, irqs binds to CPU0*/
		irq_affinity_records[i] = cpu;

		ret = irq_set_affinity(i, cpumask_of(cpu));
		if (ret < 0)
			printk(KERN_ERR "%s %d : failed to irq_set_affinity %d errno %d\r\n",
		           __FUNCTION__, __LINE__, i, ret);
		}

		ret = register_hotcpu_notifier(&cpu_up_notifier);
		if (ret < 0){
			printk(KERN_ERR"%s : register_hotcpu_notifier failed %d !\n",__FUNCTION__, ret);
		}

		ret = register_pm_notifier(&post_suspend_notifier);
		if (ret < 0){
			printk(KERN_ERR"%s : register_pm_notifier failed %d !\n",__FUNCTION__, ret);
	}

	kfree(dummy);

	return;
}