struct irq_desc * __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus) { struct irq_desc *desc = irq_to_desc(irq); if (desc) { if (bus) chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, *flags); } return desc; }
void __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, const char *name) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) { printk(KERN_ERR "Trying to install type control for IRQ%d\n", irq); return; } if (!handle) handle = handle_bad_irq; else if (desc->irq_data.chip == &no_irq_chip) { printk(KERN_WARNING "Trying to install %sinterrupt handler " "for IRQ%d\n", is_chained ? "chained " : "", irq); /* * Some ARM implementations install a handler for really dumb * interrupt hardware without setting an irq_chip. This worked * with the ARM no_irq_chip but the check in setup_irq would * prevent us to setup the interrupt at all. Switch it to * dummy_irq_chip for easy transition. */ desc->irq_data.chip = &dummy_irq_chip; } chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, flags); handle = __fixup_irq_handler(desc, handle, is_chained); /* Uninstall? */ if (handle == handle_bad_irq) { if (desc->irq_data.chip != &no_irq_chip) mask_ack_irq(desc); desc->status |= IRQ_DISABLED; desc->depth = 1; } desc->handle_irq = handle; desc->name = name; if (handle != handle_bad_irq && is_chained) { desc->status &= ~IRQ_DISABLED; desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; desc->depth = 0; desc->irq_data.chip->irq_startup(&desc->irq_data); } raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(desc); }
/** * disable_irq_nosync - disable an irq without waiting * @irq: Interrupt to disable * * Disable the selected interrupt line. Disables and Enables are * nested. * Unlike disable_irq(), this function does not ensure existing * instances of the IRQ handler have completed before returning. * * This function may be called from IRQ context. */ void disable_irq_nosync(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) return; chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, flags); __disable_irq(desc, irq, false); raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(desc); }
/** * irq_read_line - read the value on an irq line * @irq: Interrupt number representing a hardware line * * This function is meant to be called from within the irq handler. * Slowbus irq controllers might sleep, but it is assumed that the irq * handler for slowbus interrupts will execute in thread context, so * sleeping is okay. */ int irq_read_line(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); int val; if (!desc || !desc->irq_data.chip->irq_read_line) return -EINVAL; chip_bus_lock(desc); raw_spin_lock(&desc->lock); val = desc->irq_data.chip->irq_read_line(&desc->irq_data); raw_spin_unlock(&desc->lock); chip_bus_sync_unlock(desc); return val; }
/* Henry [email protected] set tg irq status */ int set_tg_irq_status(unsigned int irq, bool enable) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; int value; if (!desc) return 0xffffffff; chip_bus_lock(irq, desc); raw_spin_lock_irqsave(&desc->lock, flags); value=__set_tg_irq_status(desc, irq, enable); raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(irq, desc); }
/** * enable_irq - enable handling of an irq * @irq: Interrupt to enable * * Undoes the effect of one call to disable_irq(). If this * matches the last disable, processing of interrupts on this * IRQ line is re-enabled. * * This function may be called from IRQ context only when * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! */ void enable_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) return; if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable, KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) return; chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, flags); __enable_irq(desc, irq, false); raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(desc); }
// ARM10C 20141122 // irq: 16, flags: &flags, false, check: 0 // ARM10C 20141122 // irq: 16, flags: &flags, true, check: 0 struct irq_desc * __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, unsigned int check) { // irq: 16, irq_to_desc(16): kmem_cache#28-oX (irq 16) // irq: 16, irq_to_desc(16): kmem_cache#28-oX (irq 16) struct irq_desc *desc = irq_to_desc(irq); // desc: kmem_cache#28-oX (irq 16) // desc: kmem_cache#28-oX (irq 16) // desc: kmem_cache#28-oX (irq 16) // desc: kmem_cache#28-oX (irq 16) if (desc) { // check: 0, _IRQ_DESC_CHECK: 1 // check: 0, _IRQ_DESC_CHECK: 1 if (check & _IRQ_DESC_CHECK) { if ((check & _IRQ_DESC_PERCPU) && !irq_settings_is_per_cpu_devid(desc)) return NULL; if (!(check & _IRQ_DESC_PERCPU) && irq_settings_is_per_cpu_devid(desc)) return NULL; } // bus: false // bus: true if (bus) // desc: kmem_cache#28-oX (irq 16) chip_bus_lock(desc); // &desc->lock: &(kmem_cache#28-oX (irq 16))->lock, *flags: flags // &desc->lock: &(kmem_cache#28-oX (irq 16))->lock, *flags: flags raw_spin_lock_irqsave(&desc->lock, *flags); // &(kmem_cache#28-oX (irq 16))->lock을 사용하여 spinlock을 설정하고 cpsr을 flags에 저장 // &(kmem_cache#28-oX (irq 16))->lock을 사용하여 spinlock을 설정하고 cpsr을 flags에 저장 } // desc: kmem_cache#28-oX (irq 16) // desc: kmem_cache#28-oX (irq 16) return desc; // return kmem_cache#28-oX (irq 16) // return kmem_cache#28-oX (irq 16) }
/* * handle_nested_irq - Handle a nested irq from a irq thread * @irq: the interrupt number * * Handle interrupts which are nested into a threaded interrupt * handler. The handler function is called inside the calling * threads context. */ bool handle_nested_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; int mask_this_irq = 0; irqreturn_t action_ret; bool handled = false; might_sleep(); raw_spin_lock_irq(&desc->lock); desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); kstat_incr_irqs_this_cpu(irq, desc); action = desc->action; if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { desc->istate |= IRQS_PENDING; mask_this_irq = 1; goto out_unlock; } irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); raw_spin_unlock_irq(&desc->lock); action_ret = action->thread_fn(action->irq, action->dev_id); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock_irq(&desc->lock); irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); handled = true; out_unlock: raw_spin_unlock_irq(&desc->lock); if (unlikely(mask_this_irq)) { chip_bus_lock(desc); mask_irq(desc); chip_bus_sync_unlock(desc); } return handled; }
/* * handle_nested_irq - Handle a nested irq from a irq thread * @irq: the interrupt number * * Handle interrupts which are nested into a threaded interrupt * handler. The handler function is called inside the calling * threads context. */ void handle_nested_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; int mask_this_irq = 0; irqreturn_t action_ret; might_sleep(); raw_spin_lock_irq(&desc->lock); kstat_incr_irqs_this_cpu(irq, desc); action = desc->action; if (unlikely(!action || (desc->status & IRQ_DISABLED))) { mask_this_irq = 1; if (!(desc->status & IRQ_LEVEL)) desc->status |= IRQ_PENDING; goto out_unlock; } desc->status |= IRQ_INPROGRESS; raw_spin_unlock_irq(&desc->lock); action_ret = action->thread_fn(action->irq, action->dev_id); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock_irq(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out_unlock: if (unlikely(mask_this_irq)) { chip_bus_lock(irq, desc); mask_irq(desc, irq); chip_bus_sync_unlock(irq, desc); } raw_spin_unlock_irq(&desc->lock); }
struct irq_desc * __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, unsigned int check) { struct irq_desc *desc = irq_to_desc(irq); if (desc) { if (check & _IRQ_DESC_CHECK) { if ((check & _IRQ_DESC_PERCPU) && !irq_settings_is_per_cpu_devid(desc)) return NULL; if (!(check & _IRQ_DESC_PERCPU) && irq_settings_is_per_cpu_devid(desc)) return NULL; } if (bus) chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, *flags); } return desc; }