void __init init_ISA_irqs(void)
{
	int i;

#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
	init_bsp_APIC();
#endif
	legacy_pic->init(0);

	/*
	 * 16 old-style INTA-cycle interrupts:
	 */
	for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) {
		struct irq_desc *desc = irq_to_desc(i);

		desc->status = IRQ_DISABLED;
		desc->action = NULL;
		desc->depth = 1;

		set_irq_chip_and_handler_name(i, &i8259A_chip,
					      handle_level_irq, "XT");
	}
}
Beispiel #2
0
void arch_suspend_disable_irqs(void)
{
	int i,j,irq;
	struct irq_desc *desc;

	local_irq_disable();

	intc_saved[0] = readl(intc_base + IMR_OFF);
	intc_saved[1] = readl(intc_base + PART_OFF + IMR_OFF);

	writel(0xffffffff & ~intc_wakeup[0], intc_base + IMSR_OFF);
	writel(0xffffffff & ~intc_wakeup[1], intc_base + PART_OFF + IMSR_OFF);

	for(j=0;j<2;j++) {
		for(i=0;i<32;i++) {
			if(intc_wakeup[j] & (0x1<<i)) {
				irq = i + IRQ_INTC_BASE + 32*j;
				desc = irq_to_desc(irq);
				__enable_irq(desc, irq, true);
			}
		}
	}
}
Beispiel #3
0
struct irq_desc *
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
		    unsigned int check)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (desc) {
		if (check & _IRQ_DESC_CHECK) {
			if ((check & _IRQ_DESC_PERCPU) &&
			    !irq_settings_is_per_cpu_devid(desc))
				return NULL;

			if (!(check & _IRQ_DESC_PERCPU) &&
			    irq_settings_is_per_cpu_devid(desc))
				return NULL;
		}

		if (bus)
			chip_bus_lock(desc);
		raw_spin_lock_irqsave(&desc->lock, *flags);
	}
	return desc;
}
Beispiel #4
0
/**
 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
 *	for each.
 */
void irq_cpu_offline(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
		if (chip && chip->irq_cpu_offline &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
		     !irqd_irq_disabled(&desc->irq_data)))
			chip->irq_cpu_offline(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}
Beispiel #5
0
/**
 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
 *
 * The current CPU has been marked offline.  Migrate IRQs off this CPU.
 * If the affinity settings do not allow other CPUs, force them onto any
 * available CPU.
 *
 * Note: we must iterate over all IRQs, whether they have an attached
 * action structure or not, as we need to get chained interrupts too.
 */
void irq_migrate_all_off_this_cpu(void)
{
	unsigned int irq;
	struct irq_desc *desc;
	unsigned long flags;

	local_irq_save(flags);

	for_each_active_irq(irq) {
		bool affinity_broken;

		desc = irq_to_desc(irq);
		raw_spin_lock(&desc->lock);
		affinity_broken = migrate_one_irq(desc);
		raw_spin_unlock(&desc->lock);

		if (affinity_broken)
			pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
					    irq, smp_processor_id());
	}

	local_irq_restore(flags);
}
Beispiel #6
0
void msm_mpm_exit_sleep(bool from_idle)
{
	unsigned long pending;
	int i;
	int k;

	for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
		pending = msm_mpm_read(MSM_MPM_STATUS_REG_PENDING, i);

		if (MSM_MPM_DEBUG_PENDING_IRQ & msm_mpm_debug_mask)
			pr_info("%s: pending.%d: 0x%08lx", __func__,
				i, pending);

		k = find_first_bit(&pending, 32);
		while (k < 32) {
			unsigned int mpm_irq = 32 * i + k;
			unsigned int apps_irq = msm_mpm_get_irq_m2a(mpm_irq);
			struct irq_desc *desc = apps_irq ?
						irq_to_desc(apps_irq) : NULL;

			/*
			 * This function is called when only CPU 0 is
			 * running and when both preemption and irqs
			 * are disabled.  There is no need to lock desc.
			 */
			if (desc && (desc->status & IRQ_TYPE_EDGE_BOTH)) {
				desc->status |= IRQ_PENDING;
				if (from_idle)
					check_irq_resend(desc, apps_irq);
			}

			k = find_next_bit(&pending, 32, k + 1);
		}
	}

	msm_mpm_set(!from_idle);
}
Beispiel #7
0
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	int mask_this_irq = 0;
	irqreturn_t action_ret;

	might_sleep();

	raw_spin_lock_irq(&desc->lock);

	kstat_incr_irqs_this_cpu(irq, desc);

	action = desc->action;
	if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
		mask_this_irq = 1;
		goto out_unlock;
	}

	desc->status |= IRQ_INPROGRESS;
	raw_spin_unlock_irq(&desc->lock);

	action_ret = action->thread_fn(action->irq, action->dev_id);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

	raw_spin_lock_irq(&desc->lock);
	desc->status &= ~IRQ_INPROGRESS;

out_unlock:
	raw_spin_unlock_irq(&desc->lock);
	if (unlikely(mask_this_irq)) {
		chip_bus_lock(irq, desc);
		mask_irq(desc, irq);
		chip_bus_sync_unlock(irq, desc);
	}
}
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	int mask_this_irq = 0;
	irqreturn_t action_ret;

	might_sleep();

	raw_spin_lock_irq(&desc->lock);

	kstat_incr_irqs_this_cpu(irq, desc);

	action = desc->action;
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
		mask_this_irq = 1;
		goto out_unlock;
	}

	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
	raw_spin_unlock_irq(&desc->lock);

	action_ret = action->thread_fn(action->irq, action->dev_id);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

	raw_spin_lock_irq(&desc->lock);
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);

out_unlock:
	raw_spin_unlock_irq(&desc->lock);
	if (unlikely(mask_this_irq)) {
		chip_bus_lock(desc);
		mask_irq(desc);
		chip_bus_sync_unlock(desc);
	}
}
Beispiel #9
0
/**
 * xgpiops_irqhandler - IRQ handler for the gpio banks of a gpio device
 * @irq:	irq number of the gpio bank where interrupt has occurred
 * @desc:	irq descriptor instance of the 'irq'
 *
 * This function reads the Interrupt Status Register of each bank to get the
 * gpio pin number which has triggered an interrupt. It then acks the triggered
 * interrupt and calls the pin specific handler set by the higher layer
 * application for that pin.
 * Note: A bug is reported if no handler is set for the gpio pin.
 */
void xgpiops_irqhandler(unsigned int irq, struct irq_desc *desc)
{
	int gpio_irq = (int)irq_get_handler_data(irq);
	struct xgpiops *gpio = (struct xgpiops *)irq_get_chip_data(gpio_irq);
	unsigned int int_sts, int_enb, bank_num;
	struct irq_desc *gpio_irq_desc;
	struct irq_chip *chip = irq_desc_get_chip(desc);

	chained_irq_enter(chip, desc);

	for (bank_num = 0; bank_num < 4; bank_num++) {
		int_sts = xgpiops_readreg(gpio->base_addr +
					   XGPIOPS_INTSTS_OFFSET(bank_num));
		int_enb = xgpiops_readreg(gpio->base_addr +
					   XGPIOPS_INTMASK_OFFSET(bank_num));
		int_sts &= ~int_enb;

		for (; int_sts != 0; int_sts >>= 1, gpio_irq++) {
			if ((int_sts & 1) == 0)
				continue;
			gpio_irq_desc = irq_to_desc(gpio_irq);
			BUG_ON(!gpio_irq_desc);
			chip = irq_desc_get_chip(gpio_irq_desc);
			BUG_ON(!chip);
			chip->irq_ack(&gpio_irq_desc->irq_data);

			/* call the pin specific handler */
			generic_handle_irq(gpio_irq);
		}
		/* shift to first virtual irq of next bank */
		gpio_irq = (int)irq_get_handler_data(irq) +
				(xgpiops_pin_table[bank_num] + 1);
	}

	chip = irq_desc_get_chip(desc);
	chained_irq_exit(chip, desc);
}
Beispiel #10
0
int irq_set_percpu_devid_partition(unsigned int irq,
				   const struct cpumask *affinity)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;

	if (desc->percpu_enabled)
		return -EINVAL;

	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);

	if (!desc->percpu_enabled)
		return -ENOMEM;

	if (affinity)
		desc->percpu_affinity = affinity;
	else
		desc->percpu_affinity = cpu_possible_mask;

	irq_set_percpu_devid_flags(irq);
	return 0;
}
 /*
  * Restore interrupt subsystem from sleep -- phase 2
  * Poke the specified pending interrupts into interrupt hardware.
  */
void msm_gic_irq_exit_sleep2(uint32_t irq_mask, uint32_t wakeup_reason,
			uint32_t pending)
{
	int i, smsm_irq, smsm_mask;
	struct irq_desc *desc;

	if (msm_gic_irq_debug_mask & IRQ_DEBUG_SLEEP)
		pr_info("%s %x %x %x now\n", __func__, irq_mask,
				 pending, wakeup_reason);

	for (i = 0; pending && i < ARRAY_SIZE(msm_gic_irq_to_smsm); i++) {
		smsm_irq = msm_gic_irq_to_smsm[i];

		if (smsm_irq == 0)
			continue;

		smsm_mask = BIT(smsm_irq - 1);
		if (!(pending & smsm_mask))
			continue;

		pending &= ~smsm_mask;

		if (msm_gic_irq_debug_mask & IRQ_DEBUG_SLEEP_INT)
			pr_info("%s, irq %d, still pending %x now\n",
					__func__, i, pending);
		/* Peding IRQ */
		desc = i ? irq_to_desc(i) : NULL;

		/* Check if the pending */
		if (desc && !irqd_is_level_type(&desc->irq_data)) {
			/* Mark the IRQ as pending, if not Level */
			irq_set_pending(i);
			check_irq_resend(desc, i);
		}
	}
}
Beispiel #12
0
/**
 * generic_handle_irq - Invoke the handler for a particular irq
 * @irq:	The irq number to handle
 *
 */
int generic_handle_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

#if defined(CONFIG_PANTECH_DEBUG)
#ifdef CONFIG_PANTECH_DEBUG_IRQ_LOG  //p14291_pantech_dbg
	int cpu_temp = smp_processor_id();
	unsigned long long start_time = cpu_clock(cpu_temp);
#endif
#endif
	
	if (!desc)
		return -EINVAL;
	generic_handle_irq_desc(irq, desc);
#if defined(CONFIG_PANTECH_DEBUG)
#ifdef CONFIG_PANTECH_DEBUG_IRQ_LOG  //p14291_pantech_dbg
	if (desc->action)
		pantech_debug_irq_sched_log(irq, (void *)desc->action->handler,	irqs_disabled(), start_time);
	else
		pantech_debug_irq_sched_log(irq, (void *)desc->handle_irq,irqs_disabled(), start_time);
#endif
#endif
	return 0;
}
void
handle_irq(int irq)
{	
	/* 
	 * We ack quickly, we don't want the irq controller
	 * thinking we're snobs just because some other CPU has
	 * disabled global interrupts (we have already done the
	 * INT_ACK cycles, it's too late to try to pretend to the
	 * controller that we aren't taking the interrupt).
	 *
	 * 0 return value means that this irq is already being
	 * handled by some other CPU. (or is disabled)
	 */
	static unsigned int illegal_count=0;
	struct irq_desc *desc = irq_to_desc(irq);
	
	if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS &&
	    illegal_count < MAX_ILLEGAL_IRQS)) {
		irq_err_count++;
		illegal_count++;
		printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
		       irq);
		return;
	}

	/*
	 * From here we must proceed with IPL_MAX. Note that we do not
	 * explicitly enable interrupts afterwards - some MILO PALcode
	 * (namely LX164 one) seems to have severe problems with RTI
	 * at IPL 0.
	 */
	local_irq_disable();
	irq_enter();
	generic_handle_irq_desc(irq, desc);
	irq_exit();
}
Beispiel #14
0
void synchronize_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	bool inprogress;

	if (!desc)
		return;

	do {
		unsigned long flags;

		while (irqd_irq_inprogress(&desc->irq_data))
			cpu_relax();

		
		raw_spin_lock_irqsave(&desc->lock, flags);
		inprogress = irqd_irq_inprogress(&desc->irq_data);
		raw_spin_unlock_irqrestore(&desc->lock, flags);

		
	} while (inprogress);

	wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
}
Beispiel #15
0
void __init init_ISA_irqs (void)
{
	int i;

#ifdef CONFIG_X86_LOCAL_APIC
	init_bsp_APIC();
#endif
	init_8259A(0);

	/*
	 * 16 old-style INTA-cycle interrupts:
	 */
	for (i = 0; i < 16; i++) {
		/* first time call this irq_desc */
		struct irq_desc *desc = irq_to_desc(i);

		desc->status = IRQ_DISABLED;
		desc->action = NULL;
		desc->depth = 1;

		set_irq_chip_and_handler_name(i, &i8259A_chip,
					      handle_level_irq, "XT");
	}
}
Beispiel #16
0
/**
 * zynq_gpio_irqhandler - IRQ handler for the gpio banks of a gpio device
 * @irq:	irq number of the gpio bank where interrupt has occurred
 * @desc:	irq descriptor instance of the 'irq'
 *
 * This function reads the Interrupt Status Register of each bank to get the
 * gpio pin number which has triggered an interrupt. It then acks the triggered
 * interrupt and calls the pin specific handler set by the higher layer
 * application for that pin.
 * Note: A bug is reported if no handler is set for the gpio pin.
 */
static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc)
{
	struct zynq_gpio *gpio = (struct zynq_gpio *)irq_get_handler_data(irq);
	int gpio_irq = gpio->irq_base;
	unsigned int int_sts, int_enb, bank_num;
	struct irq_desc *gpio_irq_desc;
	struct irq_chip *chip = irq_desc_get_chip(desc);

	chained_irq_enter(chip, desc);

	for (bank_num = 0; bank_num < ZYNQ_GPIO_MAX_BANK; bank_num++) {
		int_sts = zynq_gpio_readreg(gpio->base_addr +
					    ZYNQ_GPIO_INTSTS_OFFSET(bank_num));
		int_enb = zynq_gpio_readreg(gpio->base_addr +
					    ZYNQ_GPIO_INTMASK_OFFSET(bank_num));
		int_sts &= ~int_enb;

		for (; int_sts != 0; int_sts >>= 1, gpio_irq++) {
			if (!(int_sts & 1))
				continue;
			gpio_irq_desc = irq_to_desc(gpio_irq);
			BUG_ON(!gpio_irq_desc);
			chip = irq_desc_get_chip(gpio_irq_desc);
			BUG_ON(!chip);
			chip->irq_ack(&gpio_irq_desc->irq_data);

			/* call the pin specific handler */
			generic_handle_irq(gpio_irq);
		}
		/* shift to first virtual irq of next bank */
		gpio_irq = gpio->irq_base + zynq_gpio_pin_table[bank_num] + 1;
	}

	chip = irq_desc_get_chip(desc);
	chained_irq_exit(chip, desc);
}
Beispiel #17
0
int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
	unsigned int src = virq_to_hw(d->irq);
	struct irq_desc *desc = irq_to_desc(d->irq);
	unsigned int vecpri, vold, vnew, prio, cpu_dest;
	unsigned long flags;

	if (flow_type == IRQ_TYPE_NONE)
		flow_type = IRQ_TYPE_LEVEL_LOW;

	irq_settings_clr_level(desc);
	irq_settings_set_trigger_mask(desc, flow_type);
	if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
		irq_settings_set_level(desc);

	vecpri = ehv_pic_type_to_vecpri(flow_type);

	spin_lock_irqsave(&ehv_pic_lock, flags);
	ev_int_get_config(src, &vold, &prio, &cpu_dest);
	vnew = vold & ~(EHV_PIC_INFO(VECPRI_POLARITY_MASK) |
			EHV_PIC_INFO(VECPRI_SENSE_MASK));
	vnew |= vecpri;

	/*
	 * TODO : Add specific interface call for platform to set
	 * individual interrupt priorities.
	 * platform currently using static/default priority for all ints
	 */

	prio = 8;

	ev_int_set_config(src, vecpri, prio, cpu_dest);

	spin_unlock_irqrestore(&ehv_pic_lock, flags);
	return 0;
}
/*
 * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
 * just like if it has been actually received from a hw source. Also
 * works for virtual interrupts.
 */
int ipipe_trigger_irq(unsigned int irq)
{
	struct pt_regs regs;
	unsigned long flags;

#ifdef CONFIG_IPIPE_DEBUG
	if (irq >= IPIPE_NR_IRQS)
		return -EINVAL;
	if (ipipe_virtual_irq_p(irq)) {
		if (!test_bit(irq - IPIPE_VIRQ_BASE,
			      &__ipipe_virtual_irq_map))
			return -EINVAL;
	} else if (irq_to_desc(irq) == NULL)
		return -EINVAL;
#endif
	local_irq_save_hw(flags);
	regs.flags = flags;
	regs.orig_ax = irq;	/* Positive value - IRQ won't be acked */
	regs.cs = __KERNEL_CS;
	__ipipe_handle_irq(&regs);
	local_irq_restore_hw(flags);

	return 1;
}
Beispiel #19
0
/* cat gap_time_irq to show it */
static int irq_read_proc(char *page, char **start, off_t off,
		int count, int *eof, void *data)
{
	int len = 0;
	int i;
	struct irq_desc *desc;

#define PRINT(ARGS...) len += sprintf (page+len, ##ARGS)
	PRINT("ID NAME       gap_time   over_times       dev_name\n");
	for(i = 0; i < 499; i++) {
		if (time_monitor[i] == 0)
			continue;
		else {
			desc = irq_to_desc(i);
			if (desc->action == NULL)
				PRINT("%3d %18lld   %10d       %s \n", i, time_monitor[i], time_over[i], irq_srcs[i].name);
			else
				PRINT("%3d %18lld   %10d       %s \n", i, time_monitor[i], time_over[i],desc->action->name);
		}
	}
	PRINT("EER: \n");

	return len;
}
Beispiel #20
0
static void irq_affinity_notify(struct work_struct *work)
{
	struct irq_affinity_notify *notify =
		container_of(work, struct irq_affinity_notify, work);
	struct irq_desc *desc = irq_to_desc(notify->irq);
	cpumask_var_t cpumask;
	unsigned long flags;

	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
		goto out;

	raw_spin_lock_irqsave(&desc->lock, flags);
	if (irq_move_pending(&desc->irq_data))
		irq_get_pending(cpumask, desc);
	else
		cpumask_copy(cpumask, desc->irq_data.affinity);
	raw_spin_unlock_irqrestore(&desc->lock, flags);

	notify->notify(notify, cpumask);

	free_cpumask_var(cpumask);
out:
	kref_put(&notify->kref, notify->release);
}
Beispiel #21
0
void tegra_irq_suspend(void)
{
	unsigned long flags;
	int i;

	for (i=INT_PRI_BASE; i<INT_GPIO_BASE; i++) {
		struct irq_desc *desc = irq_to_desc(i);
		if (!desc) continue;
		if (desc->status & IRQ_WAKEUP) {
			pr_debug("irq %d is wakeup\n", i);
			continue;
		}
		disable_irq(i);
	}

	local_irq_save(flags);
	for (i=0; i<PPI_NR; i++) {
		void __iomem *ictlr = ictlr_to_virt(i);
		cpu_ier[i] = readl(ictlr + ICTLR_CPU_IER);
		cop_ier[i] = readl(ictlr + ICTLR_COP_IER);
		writel(~0, ictlr + ICTLR_COP_IER_CLR);
	}
	local_irq_restore(flags);
}
Beispiel #22
0
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
			int cq_idx)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err = 0;
	char name[25];
	int timestamp_en = 0;
	bool assigned_eq = false;

	cq->dev = mdev->pndev[priv->port];
	cq->mcq.set_ci_db  = cq->wqres.db.db;
	cq->mcq.arm_db     = cq->wqres.db.db + 1;
	*cq->mcq.set_ci_db = 0;
	*cq->mcq.arm_db    = 0;
	memset(cq->buf, 0, cq->buf_size);

	if (cq->is_tx == RX) {
		if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
					     cq->vector)) {
			cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);

			err = mlx4_assign_eq(mdev->dev, priv->port,
					     MLX4_EQ_ID_TO_UUID(MLX4_EQ_ID_EN,
								priv->port,
								cq_idx),
					     mlx4_en_cq_eq_cb,
					     &priv->rx_cq[cq_idx],
					     &cq->vector);
			if (err) {
				mlx4_err(mdev, "Failed assigning an EQ to %s\n",
					 name);
				goto free_eq;
			}

			assigned_eq = true;
		}

		/* Set IRQ for specific name (per ring) */
		err = mlx4_rename_eq(mdev->dev, priv->port, cq->vector,
				     MLX4_EN_EQ_NAME_PRIORITY, "%s-%d",
				     priv->dev->name, cq->ring);

		if (err) {
			mlx4_warn(mdev, "Failed to rename EQ, continuing with default name\n");
			err = 0;
		}

#if defined(HAVE_IRQ_DESC_GET_IRQ_DATA) && defined(HAVE_IRQ_TO_DESC_EXPORTED)
		cq->irq_desc =
			irq_to_desc(mlx4_eq_get_irq(mdev->dev,
						    cq->vector));
#endif
	} else {
		/* For TX we use the same irq per
		ring we assigned for the RX    */
		struct mlx4_en_cq *rx_cq;

		cq_idx = cq_idx % priv->rx_ring_num;
		rx_cq = priv->rx_cq[cq_idx];
		cq->vector = rx_cq->vector;
	}

	if (!cq->is_tx)
		cq->size = priv->rx_ring[cq->ring]->actual_size;

	if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
	    (!cq->is_tx && priv->hwtstamp_config.rx_filter))
		timestamp_en = 1;

	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
			    &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
			    cq->vector, 0, timestamp_en, &cq->wqres.buf, false);
	if (err)
		goto free_eq;

	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
	cq->mcq.event = mlx4_en_cq_event;

	if (cq->is_tx) {
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
			       NAPI_POLL_WEIGHT);
	} else {
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
#ifdef HAVE_NAPI_HASH_ADD
		napi_hash_add(&cq->napi);
#endif
	}

	napi_enable(&cq->napi);

	return 0;

free_eq:
	if (assigned_eq)
		mlx4_release_eq(mdev->dev, MLX4_EQ_ID_TO_UUID(
					MLX4_EQ_ID_EN, priv->port, cq_idx),
				cq->vector);
	cq->vector = mdev->dev->caps.num_comp_vectors;
	return err;
}
/*
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
 * interrupt protection log is maintained here for each domain.	 Hw
 * interrupts are off on entry.
 */
int __ipipe_handle_irq(struct pt_regs *regs)
{
	struct ipipe_domain *this_domain, *next_domain;
	int irq, vector = regs->orig_ax;
	struct list_head *head, *pos;
	struct pt_regs *tick_regs;
	int m_ack;

	if (vector < 0) {
		irq = __get_cpu_var(vector_irq)[~vector];
		BUG_ON(irq < 0);
		m_ack = 0;
	} else { /* This is a self-triggered one. */
		irq = vector;
		m_ack = 1;
	}

	this_domain = ipipe_current_domain;

	if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
		head = &this_domain->p_link;
	else {
		head = __ipipe_pipeline.next;
		next_domain = list_entry(head, struct ipipe_domain, p_link);
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
			if (!m_ack && next_domain->irqs[irq].acknowledge)
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
			__ipipe_dispatch_wired(next_domain, irq);
			goto finalize_nosync;
		}
	}

	/* Ack the interrupt. */

	pos = head;

	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
			__ipipe_set_irq_pending(next_domain, irq);
			if (!m_ack && next_domain->irqs[irq].acknowledge) {
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
				m_ack = 1;
			}
		}
		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
			break;
		pos = next_domain->p_link.next;
	}

	/*
	 * If the interrupt preempted the head domain, then do not
	 * even try to walk the pipeline, unless an interrupt is
	 * pending for it.
	 */
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
	    !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
		goto finalize_nosync;

	/*
	 * Now walk the pipeline, yielding control to the highest
	 * priority domain that has pending interrupt(s) or
	 * immediately to the current domain if the interrupt has been
	 * marked as 'sticky'. This search does not go beyond the
	 * current domain in the pipeline.
	 */

	__ipipe_walk_pipeline(head);

finalize_nosync:

	/*
	 * Given our deferred dispatching model for regular IRQs, we
	 * only record CPU regs for the last timer interrupt, so that
	 * the timer handler charges CPU times properly. It is assumed
	 * that other interrupt handlers don't actually care for such
	 * information.
	 */

	if (irq == __ipipe_hrtimer_irq) {
		tick_regs = &__raw_get_cpu_var(__ipipe_tick_regs);
		tick_regs->flags = regs->flags;
		tick_regs->cs = regs->cs;
		tick_regs->ip = regs->ip;
		tick_regs->bp = regs->bp;
#ifdef CONFIG_X86_64
		tick_regs->ss = regs->ss;
		tick_regs->sp = regs->sp;
#endif
		if (!__ipipe_root_domain_p)
			tick_regs->flags &= ~X86_EFLAGS_IF;
	}

	if (user_mode(regs) && (current->ipipe_flags & PF_EVTRET) != 0) {
		current->ipipe_flags &= ~PF_EVTRET;
		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
	}

	if (!__ipipe_root_domain_p ||
	    test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)))
		return 0;

	return 1;
}
void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
{
	irq_to_desc(irq)->status &= ~IRQ_DISABLED;
}
Beispiel #25
0
/*
 * That's where the IVT branches when we get an external
 * interrupt. This branches to the correct hardware IRQ handler via
 * function ptr.
 */
void
ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	unsigned long saved_tpr;

#if IRQ_DEBUG
	{
		unsigned long bsp, sp;

		/*
		 * Note: if the interrupt happened while executing in
		 * the context switch routine (ia64_switch_to), we may
		 * get a spurious stack overflow here.  This is
		 * because the register and the memory stack are not
		 * switched atomically.
		 */
		bsp = ia64_getreg(_IA64_REG_AR_BSP);
		sp = ia64_getreg(_IA64_REG_SP);

		if ((sp - bsp) < 1024) {
			static unsigned char count;
			static long last_time;

			if (time_after(jiffies, last_time + 5 * HZ))
				count = 0;
			if (++count < 5) {
				last_time = jiffies;
				printk("ia64_handle_irq: DANGER: less than "
				       "1KB of free stack space!!\n"
				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
			}
		}
	}
#endif /* IRQ_DEBUG */

	/*
	 * Always set TPR to limit maximum interrupt nesting depth to
	 * 16 (without this, it would be ~240, which could easily lead
	 * to kernel stack overflows).
	 */
	irq_enter();
	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
	ia64_srlz_d();
	while (vector != IA64_SPURIOUS_INT_VECTOR) {
		int irq = local_vector_to_irq(vector);
		struct irq_desc *desc = irq_to_desc(irq);

		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
			smp_local_flush_tlb();
			kstat_incr_irqs_this_cpu(irq, desc);
		} else if (unlikely(IS_RESCHEDULE(vector))) {
			kstat_incr_irqs_this_cpu(irq, desc);
		} else {
			ia64_setreg(_IA64_REG_CR_TPR, vector);
			ia64_srlz_d();

			if (unlikely(irq < 0)) {
				printk(KERN_ERR "%s: Unexpected interrupt "
				       "vector %d on CPU %d is not mapped "
				       "to any IRQ!\n", __func__, vector,
				       smp_processor_id());
			} else
				generic_handle_irq(irq);

			/*
			 * Disable interrupts and send EOI:
			 */
			local_irq_disable();
			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
		}
		ia64_eoi();
		vector = ia64_get_ivr();
	}
	/*
	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
	 * handler needs to be able to wait for further keyboard interrupts, which can't
	 * come through until ia64_eoi() has been done.
	 */
	irq_exit();
	set_irq_regs(old_regs);
}
struct irq_data *irq_get_irq_data(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return desc ? &desc->irq_data : NULL;
}
Beispiel #27
0
/**
 * __do_IRQ - original all in one highlevel IRQ handler
 * @irq:	the interrupt number
 *
 * __do_IRQ handles all normal device IRQ's (the special
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 *
 * This is the original x86 implementation which is used for every
 * interrupt type.
 */
unsigned int __do_IRQ(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	unsigned int status;

	kstat_incr_irqs_this_cpu(irq, desc);

	if (CHECK_IRQ_PER_CPU(desc->status)) {
		irqreturn_t action_ret;

		/*
		 * No locking required for CPU-local interrupts:
		 */
		if (desc->chip->ack)
			desc->chip->ack(irq);
		if (likely(!(desc->status & IRQ_DISABLED))) {
			action_ret = handle_IRQ_event(irq, desc->action);
			if (!noirqdebug)
				note_interrupt(irq, desc, action_ret);
		}
		desc->chip->end(irq);
		return 1;
	}

	spin_lock(&desc->lock);
	if (desc->chip->ack)
		desc->chip->ack(irq);
	/*
	 * REPLAY is when Linux resends an IRQ that was dropped earlier
	 * WAITING is used by probe to mark irqs that are being tested
	 */
	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
	status |= IRQ_PENDING; /* we _want_ to handle it */

	/*
	 * If the IRQ is disabled for whatever reason, we cannot
	 * use the action we have.
	 */
	action = NULL;
	if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
		action = desc->action;
		status &= ~IRQ_PENDING; /* we commit to handling */
		status |= IRQ_INPROGRESS; /* we are handling it */
	}
	desc->status = status;

	/*
	 * If there is no IRQ handler or it was disabled, exit early.
	 * Since we set PENDING, if another processor is handling
	 * a different instance of this same irq, the other processor
	 * will take care of it.
	 */
	if (unlikely(!action))
		goto out;

	/*
	 * Edge triggered interrupts need to remember
	 * pending events.
	 * This applies to any hw interrupts that allow a second
	 * instance of the same irq to arrive while we are in do_IRQ
	 * or in the handler. But the code here only handles the _second_
	 * instance of the irq, not the third or fourth. So it is mostly
	 * useful for irq hardware that does not mask cleanly in an
	 * SMP environment.
	 */
	for (;;) {
		irqreturn_t action_ret;

		spin_unlock(&desc->lock);

		action_ret = handle_IRQ_event(irq, action);
		if (!noirqdebug)
			note_interrupt(irq, desc, action_ret);

		spin_lock(&desc->lock);
		if (likely(!(desc->status & IRQ_PENDING)))
			break;
		desc->status &= ~IRQ_PENDING;
	}
	desc->status &= ~IRQ_INPROGRESS;

out:
	/*
	 * The ->end() handler has to deal with interrupts which got
	 * disabled while the handler was running.
	 */
	desc->chip->end(irq);
	spin_unlock(&desc->lock);

	return 1;
}
Beispiel #28
0
struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
{
	return irq_to_desc(irq);
}
asmlinkage
void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
{

	/*
	 * Since current-> is always on the stack, and we always switch
	 * the stack NMI-atomically, it's safe to use smp_processor_id().
	 */
	int sum, cpu = smp_processor_id();
	int irq = NMIIRQ;
	u8 wdt, tmp;

	wdt = WDCTR & ~WDCTR_WDCNE;
	WDCTR = wdt;
	tmp = WDCTR;
	NMICR = NMICR_WDIF;

	nmi_count(cpu)++;
	kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
	sum = irq_stat[cpu].__irq_count;

	if (last_irq_sums[cpu] == sum) {
		/*
		 * Ayiee, looks like this CPU is stuck ...
		 * wait a few IRQs (5 seconds) before doing the oops ...
		 */
		watchdog_alert_counter++;
		if (watchdog_alert_counter == 5 * watchdog_hz) {
			spin_lock(&watchdog_print_lock);
			/*
			 * We are in trouble anyway, lets at least try
			 * to get a message out.
			 */
			bust_spinlocks(1);
			printk(KERN_ERR
			       "NMI Watchdog detected LOCKUP on CPU%d,"
			       " pc %08lx, registers:\n",
			       cpu, regs->pc);
			show_registers(regs);
			printk("console shuts up ...\n");
			console_silent();
			spin_unlock(&watchdog_print_lock);
			bust_spinlocks(0);
#ifdef CONFIG_GDBSTUB
			if (gdbstub_busy)
				gdbstub_exception(regs, excep);
			else
				gdbstub_intercept(regs, excep);
#endif
			do_exit(SIGSEGV);
		}
	} else {
		last_irq_sums[cpu] = sum;
		watchdog_alert_counter = 0;
	}

	WDCTR = wdt | WDCTR_WDRST;
	tmp = WDCTR;
	WDCTR = wdt | WDCTR_WDCNE;
	tmp = WDCTR;
}
Beispiel #30
0
static int s5p_irq_eint_set_type(struct irq_data *data, unsigned int type)
{
	int offs = EINT_OFFSET(data->irq);
	int shift;
	u32 ctrl, mask;
	u32 newvalue = 0;
	struct irq_desc *desc = irq_to_desc(data->irq);

	switch (type) {
	case IRQ_TYPE_EDGE_RISING:
		newvalue = S5P_IRQ_TYPE_EDGE_RISING;
		break;

	case IRQ_TYPE_EDGE_FALLING:
		newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
		break;

	case IRQ_TYPE_EDGE_BOTH:
		newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
		break;

	case IRQ_TYPE_LEVEL_LOW:
		newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
		break;

	case IRQ_TYPE_LEVEL_HIGH:
		newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
		break;

	default:
		printk(KERN_ERR "No such irq type %d", type);
		return -EINVAL;
	}

	shift = (offs & 0x7) * 4;
	mask = 0x7 << shift;

	ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
	ctrl &= ~mask;
	ctrl |= newvalue << shift;
	__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));

	if ((0 <= offs) && (offs < 8))
		s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);

	else if ((8 <= offs) && (offs < 16))
		s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);

	else if ((16 <= offs) && (offs < 24))
		s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);

	else if ((24 <= offs) && (offs < 32))
		s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);

	else
		printk(KERN_ERR "No such irq number %d", offs);

	if (type & IRQ_TYPE_EDGE_BOTH)
		desc->handle_irq = handle_edge_irq;
	else
		desc->handle_irq = handle_level_irq;

	return 0;
}