/* Secondary CPUs starts using C here. Here we need to setup CPU * specific stuff such as the local timer and the MMU. */ void __init smp_callin(void) { extern void cpu_idle(void); int cpu = cpu_now_booting; reg_intr_vect_rw_mask vect_mask = {0}; /* Initialise the idle task for this CPU */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; /* Set up MMU */ cris_mmu_init(); __flush_tlb_all(); /* Setup local timer. */ cris_timer_init(); /* Enable IRQ and idle */ REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask); unmask_irq(IPI_INTR_VECT); unmask_irq(TIMER0_INTR_VECT); preempt_disable(); local_irq_enable(); cpu_set(cpu, cpu_online_map); cpu_idle(); }
void __init__ init_idt(uint32 *IDT) { int i; for(i=0;i<16;i++) irq_task_map[i] = NULL; set_irq(IDT,0x00,__ex0); set_irq(IDT,0x01,__ex1); set_irq(IDT,0x02,__ex2); set_irq(IDT,0x03,__ex3); set_irq(IDT,0x04,__ex4); set_irq(IDT,0x05,__ex5); set_irq(IDT,0x06,__ex6); set_irq(IDT,0x07,__ex7); set_irq(IDT,0x08,__ex8); set_irq(IDT,0x09,__ex9); set_irq(IDT,0x0A,__ex10); set_irq(IDT,0x0B,__ex11); set_irq(IDT,0x0C,__ex12); set_irq(IDT,0x0D,__ex13); set_irq(IDT,0x0E,__ex14); set_irq(IDT,0x0F,__ex15); set_irq(IDT,0x10,__ex16); set_irq(IDT,0x11,__ex17); set_irq(IDT,0x12,__ex18); set_irqU(IDT,0x20,__syscall); set_irq(IDT,0x30,__timer_irq); set_irq(IDT,0x31,__irq1); set_irq(IDT,0x32,__irq2); set_irq(IDT,0x33,__irq3); set_irq(IDT,0x34,__irq4); set_irq(IDT,0x35,__irq5); set_irq(IDT,0x36,__irq6); set_irq(IDT,0x37,__irq7); set_irq(IDT,0x38,__irq8); set_irq(IDT,0x39,__irq9); set_irq(IDT,0x3A,__irq10); set_irq(IDT,0x3B,__irq11); set_irq(IDT,0x3C,__irq12); set_irq(IDT,0x3D,__irq13); set_irq(IDT,0x3E,__irq14); set_irq(IDT,0x3F,__irq15); #ifdef __SMP__ set_irq(IDT,0x40,__ipi_cf); set_irq(IDT,0x41,__ipi_tlb); set_irq(IDT,0x42,__ipi_pte); set_irq(IDT,0x43,__ipi_resched); set_irq(IDT,0x44,__ipi_stop); #endif i386lidt((uint32) IDT, 0x3FF); remap_irqs(); unmask_irq(0); unmask_irq(2); }
void irq_enable(struct irq_desc *desc) { if (!irqd_irq_disabled(&desc->irq_data)) { unmask_irq(desc); } else { irq_state_clr_disabled(desc); if (desc->irq_data.chip->irq_enable) { desc->irq_data.chip->irq_enable(&desc->irq_data); irq_state_clr_masked(desc); } else { unmask_irq(desc); } } }
/* * do_IRQ handles IRQ's that have been installed without the * SA_INTERRUPT flag: it uses the full signal-handling return * and runs with other interrupts enabled. All relatively slow * IRQ's should use this format: notably the keyboard/timer * routines. */ static void do_IRQ(int irq, struct pt_regs * regs) { struct irqaction *action; int do_random, cpu; cpu = smp_processor_id(); irq_enter(cpu); kstat.irqs[cpu][irq]++; mask_irq(irq); action = *(irq + irq_action); if (action) { if (!(action->flags & SA_INTERRUPT)) __sti(); action = *(irq + irq_action); do_random = 0; do { do_random |= action->flags; action->handler(irq, action->dev_id, regs); action = action->next; } while (action); if (do_random & SA_SAMPLE_RANDOM) add_interrupt_randomness(irq); __cli(); } else { printk("do_IRQ: Unregistered IRQ (0x%X) occured\n", irq); } unmask_irq(irq); irq_exit(cpu); /* unmasking and bottom half handling is done magically for us. */ }
int timer_init(void) { /* initialize timer 0 and 2 * * Timer 0 is used to increment system_tick 1000 times/sec * Timer 1 was used for DRAM refresh in early PC's * Timer 2 is used to drive the speaker * (to stasrt a beep: write 3 to port 0x61, * to stop it again: write 0) */ outb(PIT_CMD_CTR0 | PIT_CMD_BOTH | PIT_CMD_MODE2, PIT_BASE + PIT_COMMAND); outb(TIMER0_VALUE & 0xff, PIT_BASE + PIT_T0); outb(TIMER0_VALUE >> 8, PIT_BASE + PIT_T0); outb(PIT_CMD_CTR2 | PIT_CMD_BOTH | PIT_CMD_MODE3, PIT_BASE + PIT_COMMAND); outb(TIMER2_VALUE & 0xff, PIT_BASE + PIT_T2); outb(TIMER2_VALUE >> 8, PIT_BASE + PIT_T2); irq_install_handler(0, timer_isr, NULL); unmask_irq(0); timer_init_done = 1; return 0; }
void do_irq_mask(unsigned long mask, struct irq_region *region, struct pt_regs *regs) { unsigned long bit; int irq; int cpu = smp_processor_id(); #ifdef DEBUG_IRQ if (mask != (1L << MAX_CPU_IRQ)) printk("do_irq_mask %08lx %p %p\n", mask, region, regs); #endif for(bit=(1L<<MAX_CPU_IRQ), irq = 0; mask && bit; bit>>=1, irq++) { int irq_num; if(!(bit&mask)) continue; irq_num = region->data.irqbase + irq; ++kstat.irqs[cpu][IRQ_FROM_REGION(CPU_IRQ_REGION) | irq]; if (IRQ_REGION(irq_num) != CPU_IRQ_REGION) ++kstat.irqs[cpu][irq_num]; mask_irq(irq_num); do_irq(®ion->action[irq], irq_num, regs); unmask_irq(irq_num); } }
/** * handle_edge_irq - edge type IRQ handler * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Interrupt occures on the falling and/or rising edge of a hardware * signal. The occurence is latched into the irq controller hardware * and must be acked in order to be reenabled. After the ack another * interrupt can happen on the same source even before the first one * is handled by the associated event handler. If this happens it * might be necessary to disable (mask) the interrupt depending on the * controller hardware. This requires to reenable the interrupt inside * of the loop which handles the interrupts which have arrived while * the handler was running. If all pending interrupts are handled, the * loop is left. */ void handle_edge_irq(unsigned int irq, struct irq_desc *desc) { raw_spin_lock(&desc->lock); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); /* * If we're currently running this IRQ, or its disabled, * we shouldn't process the IRQ. Mark it pending, handle * the necessary masking and go out */ if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || !desc->action)) { desc->status |= (IRQ_PENDING | IRQ_MASKED); mask_ack_irq(desc, irq); goto out_unlock; } kstat_incr_irqs_this_cpu(irq, desc); /* Start handling the irq */ if (desc->chip->ack) desc->chip->ack(irq); /* Mark the IRQ currently in progress.*/ desc->status |= IRQ_INPROGRESS; do { struct irqaction *action = desc->action; irqreturn_t action_ret; if (unlikely(!action)) { mask_irq(desc, irq); goto out_unlock; } /* * When another irq arrived while we were handling * one, we could have masked the irq. * Renable it, if it was not disabled in meantime. */ if (unlikely((desc->status & (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == (IRQ_PENDING | IRQ_MASKED))) { unmask_irq(desc, irq); } desc->status &= ~IRQ_PENDING; raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock(&desc->lock); } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); desc->status &= ~IRQ_INPROGRESS; out_unlock: raw_spin_unlock(&desc->lock); }
int timer_init(void) { /* Register the SC520 specific timer interrupt handler */ register_timer_isr(sc520_timer_isr); /* Install interrupt handler for GP Timer 1 */ irq_install_handler (0, timer_isr, NULL); /* Map GP Timer 1 to Master PIC IR0 */ writeb(0x01, &sc520_mmcr->gp_tmr_int_map[1]); /* Disable GP Timers 1 & 2 - Allow configuration writes */ writew(0x4000, &sc520_mmcr->gptmr1ctl); writew(0x4000, &sc520_mmcr->gptmr2ctl); /* Reset GP Timers 1 & 2 */ writew(0x0000, &sc520_mmcr->gptmr1cnt); writew(0x0000, &sc520_mmcr->gptmr2cnt); /* Setup GP Timer 2 as a 100kHz (10us) prescaler */ writew(83, &sc520_mmcr->gptmr2maxcmpa); writew(0xc001, &sc520_mmcr->gptmr2ctl); /* Setup GP Timer 1 as a 1000 Hz (1ms) interrupt generator */ writew(100, &sc520_mmcr->gptmr1maxcmpa); writew(0xe009, &sc520_mmcr->gptmr1ctl); unmask_irq(0); /* Clear the GP Timer 1 status register to get the show rolling*/ writeb(0x02, &sc520_mmcr->gptmrsta); return 0; }
static inline void device_interrupt(int irq, int ack, struct pt_regs * regs) { struct irqaction * action; if ((unsigned) irq > NR_IRQS) { printk("device_interrupt: unexpected interrupt %d\n", irq); return; } kstat.interrupts[irq]++; action = irq_action[irq]; /* * For normal interrupts, we mask it out, and then ACK it. * This way another (more timing-critical) interrupt can * come through while we're doing this one. * * Note! A irq without a handler gets masked and acked, but * never unmasked. The autoirq stuff depends on this (it looks * at the masks before and after doing the probing). */ mask_irq(ack); ack_irq(ack); if (!action) return; if (action->flags & SA_SAMPLE_RANDOM) add_interrupt_randomness(irq); do { action->handler(irq, action->dev_id, regs); action = action->next; } while (action); unmask_irq(ack); }
int BSP_disable_irq_at_pic(const rtems_irq_number irq) { uint16_t vec_idx = irq - Score_IRQ_First; unmask_irq( vec_idx ); return 0; }
int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char * devname, void *dev_id) { int shared = 0; struct irqaction * action, **p; unsigned long flags; if (irq >= NR_IRQS) return -EINVAL; if (IS_RESERVED_IRQ(irq)) return -EINVAL; if (!handler) return -EINVAL; p = irq_action + irq; action = *p; if (action) { /* Can't share interrupts unless both agree to */ if (!(action->flags & irqflags & SA_SHIRQ)) return -EBUSY; /* Can't share interrupts unless both are same type */ if ((action->flags ^ irqflags) & SA_INTERRUPT) return -EBUSY; /* add new interrupt at end of irq queue */ do { p = &action->next; action = *p; } while (action); shared = 1; } action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; if (irqflags & SA_SAMPLE_RANDOM) rand_initialize_irq(irq); action->handler = handler; action->flags = irqflags; action->mask = 0; action->name = devname; action->next = NULL; action->dev_id = dev_id; save_flags(flags); cli(); *p = action; if (!shared) unmask_irq(irq); restore_flags(flags); return 0; }
/** * handle_level_irq - Level type irq handler * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Level type interrupts are active as long as the hardware line has * the active level. This may require to mask the interrupt and unmask * it after the associated handler has acknowledged the device, so the * interrupt line is back to inactive. */ void handle_level_irq(unsigned int irq, struct irq_desc *desc) { raw_spin_lock(&desc->lock); mask_ack_irq(desc); if (unlikely(desc->istate & IRQS_INPROGRESS)) if (!irq_check_poll(desc)) goto out_unlock; desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); kstat_incr_irqs_this_cpu(irq, desc); /* * If its disabled or no action available * keep it masked and get out of here */ if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) goto out_unlock; handle_irq_event(desc); if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT))) unmask_irq(desc); out_unlock: raw_spin_unlock(&desc->lock); }
void enable_irq(unsigned int irq_nr) { unsigned long flags; save_and_cli(flags); unmask_irq(irq_nr); restore_flags(flags); }
int timer_init(void) { /* Map GP Timer 1 to Master PIC IR0 */ write_mmcr_byte (SC520_GPTMR1MAP, 0x01); /* Disable GP Timers 1 & 2 - Allow configuration writes */ write_mmcr_word (SC520_GPTMR1CTL, 0x4000); write_mmcr_word (SC520_GPTMR2CTL, 0x4000); /* Reset GP Timers 1 & 2 */ write_mmcr_word (SC520_GPTMR1CNT, 0x0000); write_mmcr_word (SC520_GPTMR2CNT, 0x0000); /* Setup GP Timer 2 as a 100kHz (10us) prescaler */ write_mmcr_word (SC520_GPTMR2MAXCMPA, 83); write_mmcr_word (SC520_GPTMR2CTL, 0xc001); /* Setup GP Timer 1 as a 1000 Hz (1ms) interrupt generator */ write_mmcr_word (SC520_GPTMR1MAXCMPA, 100); write_mmcr_word (SC520_GPTMR1CTL, 0xe009); /* Clear the GP Timers status register */ write_mmcr_byte (SC520_GPTMRSTA, 0x07); /* Register the SC520 specific timer interrupt handler */ register_timer_isr (sc520_timer_isr); /* Install interrupt handler for GP Timer 1 */ irq_install_handler (0, timer_isr, NULL); unmask_irq (0); return 0; }
static void i8254_interrupt_handler(int irq) { (void) irq; ++jiffies; unmask_irq(irq); yield(); }
void enable_cpu_timer(void) { unsigned long flags; save_and_cli(flags); unmask_irq(1<<EXT_IRQ5_TO_IP); /* timer interrupt */ restore_flags(flags); }
void enable_irq(unsigned int irq_nr) { unsigned long flags; local_save_flags(flags); local_irq_disable(); unmask_irq(irq_nr); local_irq_restore(flags); }
void unmask_threaded_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; if (chip->flags & IRQCHIP_EOI_THREADED) chip->irq_eoi(&desc->irq_data); unmask_irq(desc); }
void unmask_irq_count(int irq_nr) { unsigned long flags; int pil = irq_to_pil(irq_nr); save_and_cli(flags); if (!pil_in_use[pil]++) unmask_irq(irq_nr); restore_flags(flags); }
void register_irq_handler(int irq, irq_t isr) { DBG_ASSERT(irq < IDT_IRQS); const int intno = irq + IDT_EXCEPTIONS; handler[irq] = isr; setup_irq(isr_entry[intno], intno); unmask_irq(irq); }
static void i8254_interrupt_handler(int irq) { (void) irq; ++jiffies; if (jiffies % (HZ / 2) == 0) { printf("PIT before unmask\n"); unmask_irq(irq); printf("PIT after unmask, before schedule\n"); schedule(); printf("PIT after schedule\n"); } }
/** * handle_edge_irq - edge type IRQ handler * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Interrupt occures on the falling and/or rising edge of a hardware * signal. The occurrence is latched into the irq controller hardware * and must be acked in order to be reenabled. After the ack another * interrupt can happen on the same source even before the first one * is handled by the associated event handler. If this happens it * might be necessary to disable (mask) the interrupt depending on the * controller hardware. This requires to reenable the interrupt inside * of the loop which handles the interrupts which have arrived while * the handler was running. If all pending interrupts are handled, the * loop is left. */ void handle_edge_irq(unsigned int irq, struct irq_desc *desc) { raw_spin_lock(&desc->lock); desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); if (!irq_may_run(desc)) { desc->istate |= IRQS_PENDING; mask_ack_irq(desc); goto out_unlock; } /* * If its disabled or no action available then mask it and get * out of here. */ if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { desc->istate |= IRQS_PENDING; mask_ack_irq(desc); goto out_unlock; } kstat_incr_irqs_this_cpu(irq, desc); /* Start handling the irq */ desc->irq_data.chip->irq_ack(&desc->irq_data); do { if (unlikely(!desc->action)) { mask_irq(desc); goto out_unlock; } /* * When another irq arrived while we were handling * one, we could have masked the irq. * Renable it, if it was not disabled in meantime. */ if (unlikely(desc->istate & IRQS_PENDING)) { if (!irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data)) unmask_irq(desc); } handle_irq_event(desc); } while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data)); out_unlock: raw_spin_unlock(&desc->lock); }
/** * handle_edge_irq - edge type IRQ handler * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Interrupt occures on the falling and/or rising edge of a hardware * signal. The occurrence is latched into the irq controller hardware * and must be acked in order to be reenabled. After the ack another * interrupt can happen on the same source even before the first one * is handled by the associated event handler. If this happens it * might be necessary to disable (mask) the interrupt depending on the * controller hardware. This requires to reenable the interrupt inside * of the loop which handles the interrupts which have arrived while * the handler was running. If all pending interrupts are handled, the * loop is left. */ bool handle_edge_irq(unsigned int irq, struct irq_desc *desc) { bool handled = false; raw_spin_lock(&desc->lock); desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); /* * If we're currently running this IRQ, or its disabled, * we shouldn't process the IRQ. Mark it pending, handle * the necessary masking and go out */ if (unlikely(irqd_irq_disabled(&desc->irq_data) || irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { if (!irq_check_poll(desc)) { desc->istate |= IRQS_PENDING; mask_ack_irq(desc); goto out_unlock; } } kstat_incr_irqs_this_cpu(irq, desc); /* Start handling the irq */ desc->irq_data.chip->irq_ack(&desc->irq_data); do { if (unlikely(!desc->action)) { mask_irq(desc); goto out_unlock; } /* * When another irq arrived while we were handling * one, we could have masked the irq. * Renable it, if it was not disabled in meantime. */ if (unlikely(desc->istate & IRQS_PENDING)) { if (!irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data)) unmask_irq(desc); } handle_irq_event(desc); handled = true; } while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data)); out_unlock: raw_spin_unlock(&desc->lock); return handled; }
/* * Called unconditionally from handle_level_irq() and only for oneshot * interrupts from handle_fasteoi_irq() */ static void cond_unmask_irq(struct irq_desc *desc) { /* * We need to unmask in the following cases: * - Standard level irq (IRQF_ONESHOT is not set) * - Oneshot irq which did not wake the thread (caused by a * spurious interrupt or a primary handler handling it * completely). */ if (!irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) unmask_irq(desc); }
void cool_down_thread() { __current_tib = &__start_of_thread_info_list; __current_thread = __start_of_thread_info_list.ptrToThread; unmask_irq(0); asm("sti"); for(;;) { //putc('m'); asm("hlt"); }; };
/** * handle_edge_irq - edge type IRQ handler * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Interrupt occures on the falling and/or rising edge of a hardware * signal. The occurence is latched into the irq controller hardware * and must be acked in order to be reenabled. After the ack another * interrupt can happen on the same source even before the first one * is handled by the associated event handler. If this happens it * might be necessary to disable (mask) the interrupt depending on the * controller hardware. This requires to reenable the interrupt inside * of the loop which handles the interrupts which have arrived while * the handler was running. If all pending interrupts are handled, the * loop is left. */ void handle_edge_irq(unsigned int irq, struct irq_desc *desc) { raw_spin_lock(&desc->lock); desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); /* * If we're currently running this IRQ, or its disabled, * we shouldn't process the IRQ. Mark it pending, handle * the necessary masking and go out */ if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) || !desc->action))) { if (!irq_check_poll(desc)) { irq_compat_set_pending(desc); desc->istate |= IRQS_PENDING; mask_ack_irq(desc); goto out_unlock; } } kstat_incr_irqs_this_cpu(irq, desc); /* Start handling the irq */ desc->irq_data.chip->irq_ack(&desc->irq_data); do { if (unlikely(!desc->action)) { mask_irq(desc); goto out_unlock; } /* * When another irq arrived while we were handling * one, we could have masked the irq. * Renable it, if it was not disabled in meantime. */ if (unlikely(desc->istate & IRQS_PENDING)) { if (!(desc->istate & IRQS_DISABLED) && (desc->istate & IRQS_MASKED)) unmask_irq(desc); } handle_irq_event(desc); } while ((desc->istate & IRQS_PENDING) && !(desc->istate & IRQS_DISABLED)); out_unlock: raw_spin_unlock(&desc->lock); }
// Installs a handler if needed to watch an IRQ. // You must also send the right key for the number. // this allows making sure that only one driver at // a time is watching an IRQ and that only certified // drivers can watch the IRQ. // Returns the address of to the variable that holds // the number of times the IRQ has fired, 0 on failure. // // NOTE: IRQ 0 cannot be watched this way u_long watch_irq(u_char irq_number, u_long key) { if(irq_keys[irq_number] == key && irq_number != 0) { irq_watching[irq_number] = 0; irq_keys[irq_number] += 1; // the keys table is also used to keep track of what IRQs are being currently watched enable_gate(irq_number+0x40); unmask_irq(irq_number); return(&irq_watching[irq_number]); } else { return(0); }; };
int irq_ack( struct thread *t, int irq, int status ) { acquire_spinlock( &irq_lock ); dmesg("%!ACK with status %i for %i\n", status, irq ); if ( status == 0 ) { unmask_irq( irq ); release_spinlock( &irq_lock ); return 0; } dmesg("%!Unhandled IRQ %i\n", irq ); release_spinlock( &irq_lock ); return 0; }
/* interrupt service routine */ void rt_dm9161_isr(int irqno) { unsigned long intstatus; rt_uint32_t address; mask_irq(INTSRC_MAC); intstatus = sep_emac_read(MAC_INTSRC); sep_emac_write(MAC_INTSRC,intstatus); /*Receive complete*/ if(intstatus & 0x04) { eth_device_ready(&(dm9161_device.parent)); } /*Receive error*/ else if(intstatus & 0x08) { rt_kprintf("Receive error\n"); } /*Transmit complete*/ else if(intstatus & 0x03) { if(dm9161_device.tx_index == 0) address = (MAC_TX_BD +(MAX_TX_DESCR-2)*8); else if(dm9161_device.tx_index == 1) address = (MAC_TX_BD +(MAX_TX_DESCR-1)*8); else address = (MAC_TX_BD + dm9161_device.tx_index*8-16); //printk("free tx skb 0x%x in inter!!\n",lp->txBuffIndex); sep_emac_write(address,0x0); } else if (intstatus & 0x10) { rt_kprintf("ROVER ERROR\n"); } while(intstatus) { sep_emac_write(MAC_INTSRC,intstatus); intstatus = sep_emac_read(MAC_INTSRC); } unmask_irq(INTSRC_MAC); }
void isr_common_handler(struct thread_regs *ctx) { const int intno = ctx->intno; if (intno < IDT_EXCEPTIONS) { default_exception_handler(ctx); return; } const int irqno = intno - IDT_EXCEPTIONS; const irq_t irq = handler[irqno]; mask_irq(irqno); ack_irq(irqno); if (irq) irq(irqno); unmask_irq(irqno); }