Ejemplo n.º 1
0
static int gb_gpio_request_recv(u8 type, struct gb_operation *op)
{
	struct gb_connection *connection = op->connection;
	struct device *dev = &connection->bundle->dev;
	struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
	struct gb_message *request;
	struct gb_gpio_irq_event_request *event;
	int irq;
	struct irq_desc *desc;

	if (type != GB_GPIO_TYPE_IRQ_EVENT) {
		dev_err(dev, "unsupported unsolicited request: %u\n", type);
		return -EINVAL;
	}

	request = op->request;

	if (request->payload_size < sizeof(*event)) {
		dev_err(dev, "short event received (%zu < %zu)\n",
			request->payload_size, sizeof(*event));
		return -EINVAL;
	}

	event = request->payload;
	if (event->which > ggc->line_max) {
		dev_err(dev, "invalid hw irq: %d\n", event->which);
		return -EINVAL;
	}

	irq = irq_find_mapping(ggc->irqdomain, event->which);
	if (!irq) {
		dev_err(dev, "failed to find IRQ\n");
		return -EINVAL;
	}
	desc = irq_to_desc(irq);
	if (!desc) {
		dev_err(dev, "failed to look up irq\n");
		return -EINVAL;
	}

	local_irq_disable();
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
	generic_handle_irq_desc(irq, desc);
#else
	generic_handle_irq_desc(desc);
#endif
	local_irq_enable();

	return 0;
}
Ejemplo n.º 2
0
Archivo: irq.c Proyecto: 01org/prd
void
handle_irq(int irq)
{	
	/* 
	 * We ack quickly, we don't want the irq controller
	 * thinking we're snobs just because some other CPU has
	 * disabled global interrupts (we have already done the
	 * INT_ACK cycles, it's too late to try to pretend to the
	 * controller that we aren't taking the interrupt).
	 *
	 * 0 return value means that this irq is already being
	 * handled by some other CPU. (or is disabled)
	 */
	static unsigned int illegal_count=0;
	struct irq_desc *desc = irq_to_desc(irq);
	
	if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS &&
	    illegal_count < MAX_ILLEGAL_IRQS)) {
		irq_err_count++;
		illegal_count++;
		printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
		       irq);
		return;
	}

	irq_enter();
	generic_handle_irq_desc(irq, desc);
	irq_exit();
}
Ejemplo n.º 3
0
/**
 * generic_handle_irq - Invoke the handler for a particular irq
 * @irq:	The irq number to handle
 *
 */
int generic_handle_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	/* merge qcom DEBUG_CODE for RPC crashes */
#ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG
	uint32_t  timetick=0; 
	timetick = read_timestamp();  
	irq_ts[irq_idx].irq=irq;
	irq_ts[irq_idx].ts=timetick; 
	irq_ts[irq_idx].state=1; 
	/*end of HUAWEI*/
#endif

    if (!desc)
		return -EINVAL;
    generic_handle_irq_desc(irq, desc);

#ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG
    /*HUAWEI debug */
    irq_ts[irq_idx].state=3;
    irq_idx = (irq_idx + 1)%128; 
#endif

	return 0;
}
Ejemplo n.º 4
0
static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
	irq_hw_number_t hw;
	unsigned int irq_stat, irq_no;
	struct tz1090_gpio_bank *bank;
	struct irq_desc *child_desc;

	bank = (struct tz1090_gpio_bank *)irq_desc_get_handler_data(desc);
	irq_stat = tz1090_gpio_read(bank, REG_GPIO_DIR) &
		   tz1090_gpio_read(bank, REG_GPIO_IRQ_STS) &
		   tz1090_gpio_read(bank, REG_GPIO_IRQ_EN) &
		   0x3FFFFFFF; /* 30 bits only */

	for (hw = 0; irq_stat; irq_stat >>= 1, ++hw) {
		if (!(irq_stat & 1))
			continue;

		irq_no = irq_linear_revmap(bank->domain, hw);
		child_desc = irq_to_desc(irq_no);

		/* Toggle edge for pin with both edges triggering enabled */
		if (irqd_get_trigger_type(&child_desc->irq_data)
				== IRQ_TYPE_EDGE_BOTH)
			tz1090_gpio_irq_next_edge(bank, hw);

		generic_handle_irq_desc(irq_no, child_desc);
	}
}
Ejemplo n.º 5
0
Archivo: irq_64.c Proyecto: E-LLP/n900
/*
 * do_IRQ handles all normal device IRQ's (the special
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 */
asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	struct irq_desc *desc;

	/* high bit used in ret_from_ code  */
	unsigned vector = ~regs->orig_ax;
	unsigned irq;

	exit_idle();
	irq_enter();
	irq = __get_cpu_var(vector_irq)[vector];

#ifdef CONFIG_DEBUG_STACKOVERFLOW
	stack_overflow_check(regs);
#endif

	desc = irq_to_desc(irq);
	if (likely(desc))
		generic_handle_irq_desc(irq, desc);
	else {
		if (!disable_apic)
			ack_APIC_irq();

		if (printk_ratelimit())
			printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
				__func__, smp_processor_id(), vector);
	}

	irq_exit();

	set_irq_regs(old_regs);
	return 1;
}
Ejemplo n.º 6
0
/**
 * generic_handle_irq - Invoke the handler for a particular irq
 * @irq:	The irq number to handle
 *
 */
int generic_handle_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;
	generic_handle_irq_desc(irq, desc);
	return 0;
}
Ejemplo n.º 7
0
bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
	stack_overflow_check(regs);

	if (IS_ERR_OR_NULL(desc))
		return false;

	generic_handle_irq_desc(desc);
	return true;
}
Ejemplo n.º 8
0
bool handle_irq(unsigned irq, struct pt_regs *regs)
{
	struct irq_desc *desc;

	stack_overflow_check(regs);

	desc = irq_to_desc(irq);
	if (unlikely(!desc))
		return false;

	generic_handle_irq_desc(irq, desc);
	return true;
}
int generic_handle_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;

	if (unlikely(logging_wakeup_reasons_nosync()))
		return log_possible_wakeup_reason(irq,
				desc,
				generic_handle_irq_desc);

	return generic_handle_irq_desc(irq, desc);
}
Ejemplo n.º 10
0
void hyperv_vector_handler(struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	struct irq_desc *desc;

	irq_enter();
	exit_idle();

	desc = irq_to_desc(vmbus_irq);

	if (desc)
		generic_handle_irq_desc(vmbus_irq, desc);

	irq_exit();
	set_irq_regs(old_regs);
}
/**
 * generic_handle_irq - Invoke the handler for a particular irq
 * @irq:	The irq number to handle
 *
 */
int generic_handle_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;
#ifdef CONFIG_SEC_DEBUG
	if (desc->action)
		sec_debug_irq_sched_log(irq, (void *)desc->action->handler,
				irqs_disabled());
	else
		sec_debug_irq_sched_log(irq, (void *)desc->handle_irq,
				irqs_disabled());
#endif
	generic_handle_irq_desc(irq, desc);
	return 0;
}
Ejemplo n.º 12
0
void hyperv_vector_handler(struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	struct irq_desc *desc;

	irq_enter();
	msa_start_irq(vmbus_irq);
	exit_idle();

	desc = irq_to_desc(vmbus_irq);

	if (desc)
		generic_handle_irq_desc(vmbus_irq, desc);

	msa_irq_exit(vmbus_irq, regs->cs != __KERNEL_CS);
	set_irq_regs(old_regs);
}
Ejemplo n.º 13
0
static void gpio_handler(unsigned int irq, struct irq_desc *desc)
{
	void __iomem *base = irq_desc_get_handler_data(desc);
	u32 stat, mask;
	int phy, bit;
	int cpu = raw_smp_processor_id();

	mask = readl(base + GPIO_INT_ENB);
	stat = readl(base + GPIO_INT_STATUS) & mask;
	bit  = ffs(stat) - 1;
	phy  = irq;

	pr_debug("%s: cpu.%d gpio irq=%d [%s.%d], stat=0x%08x, mask=0x%08x\n",
		__func__, cpu, phy, PIO_NAME(phy), bit, stat, mask);

	if (-1 == bit) {
		printk(KERN_ERR "Unknown cpu.%d gpio phy irq=%d, stat=0x%08x, mask=0x%08x\r\n",
			cpu, phy, stat, mask);
		writel(-1, (base + GPIO_INT_STATUS));	/* clear gpio status all */
    	writel(1<<phy, (VIC1_INT_BASE + VIC_INT_SOFT_CLEAR));
		return;
	}

	/* gpio descriptor */
	irq  = (VIO_IRQ_BASE + bit + (32 * (phy - PIO_IRQ_BASE)));	// virtual irq
	desc = irq_desc + irq;

	if (desc && desc->action) {
		/* disable irq reentrant */
		desc->action->flags |= IRQF_DISABLED;
		generic_handle_irq_desc(irq, desc);
	} else {
		printk(KERN_ERR "Error, not registered gpio interrupt=%d (%s.%d), disable !!!\n",
			irq, PIO_NAME(phy), bit);
		writel(readl(base + GPIO_INT_ENB) & ~(1<<bit), base + GPIO_INT_ENB);		/* gpio mask : irq disable */
		writel(readl(base + GPIO_INT_STATUS) | (1<<bit), base + GPIO_INT_STATUS);	/* gpio ack  : irq pend clear */
    	writel(1<<phy, (VIC1_INT_BASE + VIC_INT_SOFT_CLEAR));
		readl(base + GPIO_INT_STATUS);	/* Guarantee */
	}

	return;
}
Ejemplo n.º 14
0
static void alive_handler(unsigned int irq, struct irq_desc *desc)
{
	void __iomem *base = irq_desc_get_handler_data(desc);
	u32 stat, mask;
	int phy, bit;
	int cpu = raw_smp_processor_id();

	mask = readl(base + ALIVE_INT_SET_READ);
	stat = readl(base + ALIVE_INT_STATUS) & mask;
	bit  = ffs(stat) - 1;
	phy  = irq;

	pr_debug("%s: cpu.%d alive irq=%d [io=%d], stat=0x%02x, mask=0x%02x\n",
		__func__, cpu, phy, bit, stat, mask);

	if (-1 == bit) {
		printk(KERN_ERR "Unknown cpu.%d alive irq=%d, stat=0x%08x, mask=0x%02x\r\n",
			cpu, phy, stat, mask);
		writel(-1, (base + ALIVE_INT_STATUS));	/* clear alive status all */
		writel(1<<phy, (VIC0_INT_BASE + VIC_INT_SOFT_CLEAR));
		return;
	}

	/* alive descriptor */
	irq  = IRQ_ALIVE_START + bit;
	desc = irq_desc + irq;

	if (desc && desc->action) {
		desc->action->flags |= IRQF_DISABLED;	/* disable irq reentrant */
		generic_handle_irq_desc(irq, desc);
	} else {
		printk(KERN_ERR "Error, not registered alive interrupt=%d (%d.%d), disable !!!\n",
			irq, phy, bit);
		writel(readl(base + ALIVE_INT_SET) & ~(1<<bit), base + ALIVE_INT_SET);		/* alive mask : irq disable */
		writel(readl(base + ALIVE_INT_STATUS) | (1<<bit), base + ALIVE_INT_STATUS);	/* alive ack  : irq pend clear */
		writel(1<<phy, (VIC0_INT_BASE + VIC_INT_SOFT_CLEAR));
		readl(base + ALIVE_INT_STATUS);	/* Guarantee */
	}

	return;
}
/**
 * generic_handle_irq - Invoke the handler for a particular irq
 * @irq:	The irq number to handle
 *
 */
int generic_handle_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

#ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG
	uint32_t  timetick=0; 
	timetick = read_timestamp();  
	irq_ts[irq_idx].irq=irq;
	irq_ts[irq_idx].ts=timetick; 
	irq_ts[irq_idx].state=1; 
#endif

    if (!desc)
		return -EINVAL;
    generic_handle_irq_desc(irq, desc);

#ifdef CONFIG_HUAWEI_RPC_CRASH_DEBUG
    irq_ts[irq_idx].state=3;
    irq_idx = (irq_idx + 1)%128; 
#endif

	return 0;
}
Ejemplo n.º 16
0
/**
 * generic_handle_irq - Invoke the handler for a particular irq
 * @irq:	The irq number to handle
 *
 */
int generic_handle_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

#if defined(CONFIG_PANTECH_DEBUG)
#ifdef CONFIG_PANTECH_DEBUG_IRQ_LOG  //p14291_pantech_dbg
	int cpu_temp = smp_processor_id();
	unsigned long long start_time = cpu_clock(cpu_temp);
#endif
#endif
	
	if (!desc)
		return -EINVAL;
	generic_handle_irq_desc(irq, desc);
#if defined(CONFIG_PANTECH_DEBUG)
#ifdef CONFIG_PANTECH_DEBUG_IRQ_LOG  //p14291_pantech_dbg
	if (desc->action)
		pantech_debug_irq_sched_log(irq, (void *)desc->action->handler,	irqs_disabled(), start_time);
	else
		pantech_debug_irq_sched_log(irq, (void *)desc->handle_irq,irqs_disabled(), start_time);
#endif
#endif
	return 0;
}
Ejemplo n.º 17
0
void
handle_irq(int irq)
{	
	/* 
	 * We ack quickly, we don't want the irq controller
	 * thinking we're snobs just because some other CPU has
	 * disabled global interrupts (we have already done the
	 * INT_ACK cycles, it's too late to try to pretend to the
	 * controller that we aren't taking the interrupt).
	 *
	 * 0 return value means that this irq is already being
	 * handled by some other CPU. (or is disabled)
	 */
	static unsigned int illegal_count=0;
	struct irq_desc *desc = irq_to_desc(irq);
	
	if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS &&
	    illegal_count < MAX_ILLEGAL_IRQS)) {
		irq_err_count++;
		illegal_count++;
		printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
		       irq);
		return;
	}

	/*
	 * From here we must proceed with IPL_MAX. Note that we do not
	 * explicitly enable interrupts afterwards - some MILO PALcode
	 * (namely LX164 one) seems to have severe problems with RTI
	 * at IPL 0.
	 */
	local_irq_disable();
	irq_enter();
	generic_handle_irq_desc(irq, desc);
	irq_exit();
}
Ejemplo n.º 18
0
static void __vic_handler(unsigned int irq, struct irq_desc *desc)
{
	void __iomem *base;
	u32 stat[2], pend = 0;
	int i = 0, gic = irq;
	int cpu, n;
	static u32 vic_nr[4] = { 0, 1, 0, 1};
#if defined (CONFIG_CPU_S5P4418_SMP_ISR)
	static u32 vic_mask[3] = { 0, } ;
#endif

#if (DEBUG_TIMESTAMP)
	long long ts = ktime_to_us(ktime_get());
	static long long max = 0;
#endif

	stat[0] = readl_relaxed(VIC0_INT_BASE+VIC_IRQ_STATUS);
	stat[1] = readl_relaxed(VIC1_INT_BASE+VIC_IRQ_STATUS);
	cpu = raw_smp_processor_id();

	/* 1st usb-otg */
	if (stat[1] & (1<<(IRQ_PHY_USB20OTG - 32))) {
		pend = (1<<(IRQ_PHY_USB20OTG - 32));
		irq = IRQ_PHY_USB20OTG;
		writel_relaxed(0, (VIC1_INT_BASE + VIC_PL192_VECT_ADDR) );
		goto irq_hnd;
	}

	/* 2nd event timer */
	if (stat[0] & (1<<IRQ_PHY_TIMER_INT1)) {
		pend = (1<<IRQ_PHY_TIMER_INT1);
		irq = IRQ_PHY_TIMER_INT1;
		writel_relaxed(0, (VIC0_INT_BASE + VIC_PL192_VECT_ADDR) );
		goto irq_hnd;
	}

	/*
	 * Other round-robin vic groupt
	 */
	n = vic_nr[cpu];
	for (i = 0; 2 > i; i++, n ^= 1) {
		pend = stat[n];
		if (pend) {
			vic_nr[cpu] = !n;
			base = n ? VIC1_INT_BASE : VIC0_INT_BASE;
			irq = readl_relaxed(base + VIC_PL192_VECT_ADDR);
			writel_relaxed(0, base + VIC_PL192_VECT_ADDR);
			break;
		}
	}
#if defined (CONFIG_CPU_S5P4418_SMP_ISR)
	pr_debug("%s: cpu.%d vic[%s] gic irq=%d, vic=%d, stat=0x%02x [0x%08x:0x%08x:0x%08x]\n",
		__func__, cpu, i?"1":"0", gic, irq, pend, vic_mask[0], vic_mask[1], vic_mask[2]);
#endif

	if (0 == pend)
		goto irq_eoi;

irq_hnd:
#if defined (CONFIG_CPU_S5P4418_SMP_ISR)
	raw_spin_lock(&smp_irq_lock);
	if (vic_mask[irq>>5] & (1<<(irq&0x1f))) {
		writel_relaxed(31, GIC_CPUI_BASE + GIC_CPU_EOI);
		raw_spin_unlock(&smp_irq_lock);
		return;
	}
	vic_mask[irq>>5] |= (1<<(irq&0x1f));
	raw_spin_unlock(&smp_irq_lock);
#endif

	/* vic descriptor */
	desc = irq_desc + irq;
	if (desc)
		generic_handle_irq_desc(irq, desc);
	else
		printk(KERN_ERR "Error, not registered vic irq=%d !!!\n", irq);

#if defined (CONFIG_CPU_S5P4418_SMP_ISR)
	raw_spin_lock(&smp_irq_lock);
	vic_mask[irq>>5] &= ~(1<<(irq&0x1f));
	raw_spin_unlock(&smp_irq_lock);
#endif

#if (DEBUG_TIMESTAMP)
	ts = ktime_to_us(ktime_get()) - ts;
	if (ts > 2000) {
		max = ts;
		printk("[cpu.%d irq.%d, %03lldms]\n", cpu, irq, div64_s64(ts, 1000));
	}
#endif

irq_eoi:
	writel_relaxed(31, GIC_CPUI_BASE + GIC_CPU_EOI);
	return;
}