static void enable_ev64120_irq(unsigned int irq_nr) { if (irq_nr >= 8) // All PCI interrupts are on line 5 or 2 set_c0_status(9 << 10); else set_c0_status(1 << (irq_nr + 8)); }
static void enable_brcm_irq(unsigned int irq) { if (irq < 8) set_c0_status(1 << (irq + 8)); else set_c0_status(IE_IRQ0); }
void enable_sead_irq(unsigned int irq_nr) { if (irq_nr == SEADINT_UART0) set_c0_status(0x00000400); else if (irq_nr == SEADINT_UART1) set_c0_status(0x00000800); }
void __init arch_init_irq(void) { int i; unsigned int gic_rev; mips_cpu_irq_init(); if (cpu_has_vint) set_vi_handler(cp0_compare_irq, mips_timer_dispatch); if (gcmp_present) { GCMPGCB(GICBA) = GIC_BASE_ADDR | GCMP_GCB_GICBA_EN_MSK; gic_present = 1; } if (gic_present) { #if defined (CONFIG_MIPS_GIC_IPI) gic_call_int_base = GIC_IPI_CALL_VPE0; gic_resched_int_base = GIC_IPI_RESCHED_VPE0; fill_ipi_map(); #endif gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map, ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE); GICREAD(GIC_REG(SHARED, GIC_SH_REVISIONID), gic_rev); printk("MIPS GIC RevID: %d.%d\n", (gic_rev >> 8) & 0xff, gic_rev & 0xff); if (cpu_has_vint) { pr_info("Setting up vectored interrupts\n"); set_vi_handler(2 + GIC_CPU_INT0, gic_irq_dispatch); // CPU #if defined (CONFIG_MIPS_GIC_IPI) set_vi_handler(2 + GIC_CPU_INT1, gic_irq_dispatch); // IPI resched set_vi_handler(2 + GIC_CPU_INT2, gic_irq_dispatch); // IPI call #endif set_vi_handler(2 + GIC_CPU_INT3, gic_irq_dispatch); // FE set_vi_handler(2 + GIC_CPU_INT4, gic_irq_dispatch); // PCIe } #if defined (CONFIG_MIPS_GIC_IPI) set_c0_status(STATUSF_IP7 | STATUSF_IP6 | STATUSF_IP5 | STATUSF_IP2 | STATUSF_IP4 | STATUSF_IP3); /* setup ipi interrupts */ for (i = 0; i < nr_cpu_ids; i++) { arch_init_ipiirq(MIPS_GIC_IRQ_BASE + GIC_RESCHED_INT(i), &irq_resched); arch_init_ipiirq(MIPS_GIC_IRQ_BASE + GIC_CALL_INT(i), &irq_call); } #else set_c0_status(STATUSF_IP7 | STATUSF_IP6 | STATUSF_IP5 | STATUSF_IP2); #endif /* set hardware irq, mapped to GIC shared (skip 0, 1, 2, 5, 7) */ for (i = 3; i <= 31; i++) { if (i != 5 && i != 7) irq_set_handler(MIPS_GIC_IRQ_BASE + i, handle_level_irq); } } else {
static void enable_ev64120_irq(unsigned int irq_nr) { unsigned long flags; local_irq_save(flags); if (irq_nr >= 8) // All PCI interrupts are on line 5 or 2 set_c0_status(9 << 10); else set_c0_status(1 << (irq_nr + 8)); local_irq_restore(flags); }
/* * IRQ6 functions */ static void brcm_mips_int6_enable(unsigned int irq) { set_c0_status(STATUSF_IP6); INTC->extIrqMask |= (EXTIRQ4_EN); /* enable mips int2 for DMA ints. */ set_c0_status(STATUSF_IP2); /* enable ISB DMA EBI RCV (DMA channel 1) interrupt for DMA receive */ INTC->IrqMask |= DMA_IRQ; DMA_CHAN[EBI_RX_CHAN].intMask |= DMA_BUFF_DONE; /* enable EBI TX interrupt. */ INTC->IrqMask |= EBI_TX_IRQ; }
void __init init_IRQ(void) { int i; DANUBE_INT_DMSG("init_IRQ\n"); board_be_handler = &danube_be_handler; init_generic_irq(); /* mask all interrupt sources */ *DANUBE_ICU_IM0_IER = 0; *DANUBE_ICU_IM1_IER = 0; *DANUBE_ICU_IM2_IER = 0; *DANUBE_ICU_IM3_IER = 0; *DANUBE_ICU_IM4_IER = 0; /* Now safe to set the exception vector. */ set_except_vector(0, mipsIRQ); for (i = 0; i <= INT_NUM_IM4_IRL31; i++) { irq_desc[i].status = IRQ_DISABLED; irq_desc[i].action = 0; irq_desc[i].depth = 1; irq_desc[i].handler = &danube_irq_type; } set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); #ifdef CONFIG_KGDB set_debug_traps(); breakpoint(); #endif }
static void octeon_unmask_irq(unsigned int irq) { unsigned long flags; spin_lock_irqsave(&octeon_irq_lock, flags); if (irq < 8) { /* Core local interrupts, irq 0-7 */ clear_c0_cause(0x100 << irq); set_c0_status(0x100 << irq); } else if (irq<72) { /* Interrupts from the CIU, irq 8-71 */ const uint64_t coreid = octeon_get_core_num(); uint64_t bit = (irq - 8) & 0x3f; /* Bit 0-63 of EN0 */ uint64_t en0 = octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2)); en0 |= 1ull<<bit; octeon_write_csr(OCTEON_CIU_INTX_EN0(coreid*2), en0); } else if (irq<88) { /* Interrupts from the master 8259, irq 80-87 */ outb(inb(0x21) & ~(1<<(irq-80)), 0x21); } else if (irq<96) { /* Interrupts from the slave 8259, irq 88-95 */ outb(inb(0xa1) & ~(1<<(irq-88)), 0xa1); } spin_unlock_irqrestore(&octeon_irq_lock, flags); }
static unsigned int bcm63xx_external_irq_startup(struct irq_data *d) { set_c0_status(0x100 << (d->irq - IRQ_MIPS_BASE)); irq_enable_hazard(); bcm63xx_external_irq_unmask(d); return 0; }
static void brcm_mips_int2_enable(unsigned int irq) { set_c0_status(STATUSF_IP2); #if 0 /* JPF */ switch(irq) { case BCM_LINUX_IDE0_IRQ: INTC->IrqMask |= (IDE0_IRQ); break; case BCM_LINUX_IDE1_IRQ: INTC->IrqMask |= (IDE1_IRQ); break; case BCM_LINUX_USB_HOST1_IRQ: INTC->IrqMask |= (USB_HOST1_IRQ); break; case BCM_LINUX_UARTA_IRQ: // This has to change depending on the console tty device we choose to use // (defined in cmdline). INTC->IrqMask |= UPG_IRQ; UPG_INTC->irqen_l |= UPG_UA_IRQ; break; default: break; } #endif }
void enable_brcm_irq(unsigned int irq) { unsigned long flags; int i, bit; if (irq <= MIPS_TIMER_INT) { if (irq == MIPS_TIMER_INT) return; set_c0_status(1 << (STATUSB_IP0 + irq - INTERRUPT_ID_SOFTWARE_0)); return; } for (i = 0; i < (sizeof(irq_table)/sizeof(irq_ctl *)); i++) { if (irq >= irq_table[i]->vstart && irq < irq_table[i]->vend) { bit = irq - irq_table[i]->vstart; spin_lock_irqsave(&brcm_irqlock, flags); *(irq_table[i]->enable) |= (1 << bit); if (irq_table[i]->lower_level) { *(irq_table[i]->lower_level->enable) |= 1 << irq_table[i]->lower_level_bit; } spin_unlock_irqrestore(&brcm_irqlock, flags); return; } } printk("ERROR in %s: unable to enable irq %d\n", __func__, irq); }
void __cpuinit per_cpu_init(void) { int cpu = smp_processor_id(); int slice = LOCAL_HUB_L(PI_CPU_NUM); cnodeid_t cnode = get_compact_nodeid(); struct hub_data *hub = hub_data(cnode); struct slice_data *si = hub->slice + slice; int i; if (test_and_set_bit(slice, &hub->slice_map)) return; clear_c0_status(ST0_IM); per_hub_init(cnode); for (i = 0; i < LEVELS_PER_SLICE; i++) si->level_to_irq[i] = -1; /* */ cpu_data[cpu].data = si; cpu_time_init(); install_ipi(); /* */ install_cpu_nmi_handler(cputoslice(cpu)); set_c0_status(SRB_DEV0 | SRB_DEV1); }
void __init arch_init_irq(void) { int i; unsigned long cp0_status; au1xxx_irq_map_t *imp; extern au1xxx_irq_map_t au1xxx_irq_map[]; extern au1xxx_irq_map_t au1xxx_ic0_map[]; extern int au1xxx_nr_irqs; extern int au1xxx_ic0_nr_irqs; cp0_status = read_c0_status(); set_except_vector(0, au1000_IRQ); /* Initialize interrupt controllers to a safe state. */ au_writel(0xffffffff, IC0_CFG0CLR); au_writel(0xffffffff, IC0_CFG1CLR); au_writel(0xffffffff, IC0_CFG2CLR); au_writel(0xffffffff, IC0_MASKCLR); au_writel(0xffffffff, IC0_ASSIGNSET); au_writel(0xffffffff, IC0_WAKECLR); au_writel(0xffffffff, IC0_SRCSET); au_writel(0xffffffff, IC0_FALLINGCLR); au_writel(0xffffffff, IC0_RISINGCLR); au_writel(0x00000000, IC0_TESTBIT); au_writel(0xffffffff, IC1_CFG0CLR); au_writel(0xffffffff, IC1_CFG1CLR); au_writel(0xffffffff, IC1_CFG2CLR); au_writel(0xffffffff, IC1_MASKCLR); au_writel(0xffffffff, IC1_ASSIGNSET); au_writel(0xffffffff, IC1_WAKECLR); au_writel(0xffffffff, IC1_SRCSET); au_writel(0xffffffff, IC1_FALLINGCLR); au_writel(0xffffffff, IC1_RISINGCLR); au_writel(0x00000000, IC1_TESTBIT); /* Initialize IC0, which is fixed per processor. */ imp = au1xxx_ic0_map; for (i=0; i<au1xxx_ic0_nr_irqs; i++) { setup_local_irq(imp->im_irq, imp->im_type, imp->im_request); imp++; } /* Now set up the irq mapping for the board. */ imp = au1xxx_irq_map; for (i=0; i<au1xxx_nr_irqs; i++) { setup_local_irq(imp->im_irq, imp->im_type, imp->im_request); imp++; } set_c0_status(ALLINTS); /* Board specific IRQ initialization. */ if (board_init_irq) (*board_init_irq)(); }
static unsigned int bcm63xx_external_irq_startup(unsigned int irq) { set_c0_status(0x100 << (irq - IRQ_MIPS_BASE)); irq_enable_hazard(); bcm63xx_external_irq_unmask(irq); return 0; }
static void __init ar7_irq_init(int base) { int i; /* * Disable interrupts and clear pending */ writel(0xffffffff, REG(ECR_OFFSET(0))); writel(0xff, REG(ECR_OFFSET(32))); writel(0xffffffff, REG(SEC_ECR_OFFSET)); writel(0xffffffff, REG(CR_OFFSET(0))); writel(0xff, REG(CR_OFFSET(32))); writel(0xffffffff, REG(SEC_CR_OFFSET)); ar7_irq_base = base; for (i = 0; i < 40; i++) { writel(i, REG(CHNL_OFFSET(i))); /* Primary IRQ's */ set_irq_chip_and_handler(base + i, &ar7_irq_type, handle_level_irq); /* Secondary IRQ's */ if (i < 32) set_irq_chip_and_handler(base + i + 40, &ar7_sec_irq_type, handle_level_irq); } setup_irq(2, &ar7_cascade_action); setup_irq(ar7_irq_base, &ar7_cascade_action); set_c0_status(IE_IRQ0); }
void rs780e_init_irq(void) { /* Route the HT interrupt to Core0 INT1 */ INT_router_regs_HT1_int0 = 0x20 | (1 << phy_core_id[0]); INT_router_regs_HT1_int1 = 0x20 | (1 << phy_core_id[0]); INT_router_regs_HT1_int2 = 0x20 | (1 << phy_core_id[0]); INT_router_regs_HT1_int3 = 0x20 | (1 << phy_core_id[0]); INT_router_regs_HT1_int4 = 0x20 | (1 << phy_core_id[0]); INT_router_regs_HT1_int5 = 0x20 | (1 << phy_core_id[0]); INT_router_regs_HT1_int6 = 0x20 | (1 << phy_core_id[0]); INT_router_regs_HT1_int7 = 0x20 | (1 << phy_core_id[0]); /* Enable the all HT interrupt */ HT_irq_enable_reg0 = 0x0000ffff; HT_irq_enable_reg1 = 0x00000000; HT_irq_enable_reg2 = 0x00000000; HT_irq_enable_reg3 = 0x00000000; HT_irq_enable_reg4 = 0x00000000; HT_irq_enable_reg5 = 0x00000000; HT_irq_enable_reg6 = 0x00000000; HT_irq_enable_reg7 = 0x00000000; /* Enable the IO interrupt controller */ IO_control_regs_Intenset = IO_control_regs_Inten | (0xffff << 16); prom_printf("the new IO inten is %x\n", IO_control_regs_Inten); /* Sets the first-level interrupt dispatcher. */ mips_cpu_irq_init(); #ifdef CONFIG_I8259 init_i8259_irqs(); #endif set_c0_status(STATUSF_IP6); mach_ip3 = dispatch_ht_irq; }
void __cpuinit per_cpu_init(void) { int cpu = smp_processor_id(); int slice = LOCAL_HUB_L(PI_CPU_NUM); cnodeid_t cnode = get_compact_nodeid(); struct hub_data *hub = hub_data(cnode); struct slice_data *si = hub->slice + slice; int i; if (test_and_set_bit(slice, &hub->slice_map)) return; clear_c0_status(ST0_IM); per_hub_init(cnode); for (i = 0; i < LEVELS_PER_SLICE; i++) si->level_to_irq[i] = -1; /* * We use this so we can find the local hub's data as fast as only * possible. */ cpu_data[cpu].data = si; cpu_time_init(); install_ipi(); /* Install our NMI handler if symmon hasn't installed one. */ install_cpu_nmi_handler(cputoslice(cpu)); set_c0_status(SRB_DEV0 | SRB_DEV1); }
void __init arch_init_irq(void) { mips_hpt_frequency = QEMU_C0_COUNTER_CLOCK; /* 100MHz */ init_i8259_irqs(); set_c0_status(0x8400); }
void __init arch_init_irq(void) { int i; clear_c0_status(0xff04); /* clear ERL */ set_c0_status(0x0400); /* set IP2 */ /* Set up INTC irq */ for (i = 0; i < 32; i++) { disable_intc_irq(i); set_irq_chip_and_handler(i, &intc_irq_type, handle_level_irq); } /* Set up DMAC irq */ for (i = 0; i < NUM_DMA; i++) { disable_dma_irq(IRQ_DMA_0 + i); set_irq_chip_and_handler(IRQ_DMA_0 + i, &dma_irq_type, handle_level_irq); } /* Set up GPIO irq */ for (i = 0; i < NUM_GPIO; i++) { disable_gpio_irq(IRQ_GPIO_0 + i); set_irq_chip_and_handler(IRQ_GPIO_0 + i, &gpio_irq_type, handle_level_irq); } }
asmlinkage void __init init_arch(int argc, char **argv, char **envp, int *prom_vec) { /* Determine which MIPS variant we are running on. */ cpu_probe(); prom_init(argc, argv, envp, prom_vec); cpu_report(); /* * Determine the mmu/cache attached to this machine, then flush the * tlb and caches. On the r4xx0 variants this also sets CP0_WIRED to * zero. */ load_mmu(); /* * On IP27, I am seeing the TS bit set when the kernel is loaded. * Maybe because the kernel is in ckseg0 and not xkphys? Clear it * anyway ... */ clear_c0_status(ST0_BEV|ST0_TS|ST0_CU1|ST0_CU2|ST0_CU3); set_c0_status(ST0_CU0|ST0_KX|ST0_SX|ST0_FR); start_kernel(); }
void prom_init_secondary(void) { clear_c0_config(CONF_CM_CMASK); set_c0_config(0x2); clear_c0_status(ST0_IM); set_c0_status(0x1ffff); }
void enable_cpu_timer(void) { unsigned long flags; local_irq_save(flags); set_c0_status(0x100 << EXT_IRQ5_TO_IP); local_irq_restore(flags); }
/* * Post-config but pre-boot cleanup entry point */ static void __cpuinit msmtc_init_secondary(void) { int myvpe; /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ myvpe = read_c0_tcbind() & TCBIND_CURVPE; if (myvpe != 0) { /* Ideally, this should be done only once per VPE, but... */ clear_c0_status(ST0_IM); set_c0_status((0x100 << cp0_compare_irq) | (0x100 << MIPS_CPU_IPI_IRQ)); if (cp0_perfcount_irq >= 0) set_c0_status(0x100 << cp0_perfcount_irq); } smtc_init_secondary(); }
static inline void enable_ev96100_irq(unsigned int irq_nr) { unsigned long flags; local_irq_save(flags); set_c0_status(0x100 << irq_nr); local_irq_restore(flags); }
static void __init yos_prepare_cpus(unsigned int max_cpus) { /* * Be paranoid. Enable the IPI only if we're really about to go SMP. */ if (cpus_weight(cpu_possible_map)) set_c0_status(STATUSF_IP5); }
static void __cpuinit msmtc_init_secondary(void) { int myvpe; /* */ myvpe = read_c0_tcbind() & TCBIND_CURVPE; if (myvpe != 0) { /* */ clear_c0_status(ST0_IM); set_c0_status((0x100 << cp0_compare_irq) | (0x100 << MIPS_CPU_IPI_IRQ)); if (cp0_perfcount_irq >= 0) set_c0_status(0x100 << cp0_perfcount_irq); } smtc_init_secondary(); }
static void octeon_irq_ciu1_eoi(unsigned int irq) { /* * Enable all CIU interrupts again. We don't need to disable * IRQs to make these atomic since they are already disabled * earlier in the low level interrupt code. */ set_c0_status(0x100 << 3); }
void __init arch_init_irq(void) { /* by default, we disable all interrupts and route all vrc5477 * interrupts to pin 0 (irq 2) */ ddb_out32(DDB_INTCTRL0, 0); ddb_out32(DDB_INTCTRL1, 0); ddb_out32(DDB_INTCTRL2, 0); ddb_out32(DDB_INTCTRL3, 0); clear_c0_status(0xff00); set_c0_status(0x0400); /* setup PCI interrupt attributes */ set_pci_int_attr(PCI0, INTA, ACTIVE_LOW, LEVEL_SENSE); set_pci_int_attr(PCI0, INTB, ACTIVE_LOW, LEVEL_SENSE); if (mips_machtype == MACH_NEC_ROCKHOPPERII) set_pci_int_attr(PCI0, INTC, ACTIVE_HIGH, LEVEL_SENSE); else set_pci_int_attr(PCI0, INTC, ACTIVE_LOW, LEVEL_SENSE); set_pci_int_attr(PCI0, INTD, ACTIVE_LOW, LEVEL_SENSE); set_pci_int_attr(PCI0, INTE, ACTIVE_LOW, LEVEL_SENSE); set_pci_int_attr(PCI1, INTA, ACTIVE_LOW, LEVEL_SENSE); set_pci_int_attr(PCI1, INTB, ACTIVE_LOW, LEVEL_SENSE); set_pci_int_attr(PCI1, INTC, ACTIVE_LOW, LEVEL_SENSE); set_pci_int_attr(PCI1, INTD, ACTIVE_LOW, LEVEL_SENSE); set_pci_int_attr(PCI1, INTE, ACTIVE_LOW, LEVEL_SENSE); /* * for debugging purpose, we enable several error interrupts * and route them to pin 1. (IP3) */ /* cpu parity check - 0 */ ll_vrc5477_irq_route(0, 1); ll_vrc5477_irq_enable(0); /* cpu no-target decode - 1 */ ll_vrc5477_irq_route(1, 1); ll_vrc5477_irq_enable(1); /* local bus read time-out - 7 */ ll_vrc5477_irq_route(7, 1); ll_vrc5477_irq_enable(7); /* PCI SERR# - 14 */ ll_vrc5477_irq_route(14, 1); ll_vrc5477_irq_enable(14); /* PCI internal error - 15 */ ll_vrc5477_irq_route(15, 1); ll_vrc5477_irq_enable(15); /* IOPCI SERR# - 30 */ ll_vrc5477_irq_route(30, 1); ll_vrc5477_irq_enable(30); /* IOPCI internal error - 31 */ ll_vrc5477_irq_route(31, 1); ll_vrc5477_irq_enable(31); /* init all controllers */ init_i8259_irqs(); mips_cpu_irq_init(); vrc5477_irq_init(VRC5477_IRQ_BASE); /* setup cascade interrupts */ setup_irq(VRC5477_IRQ_BASE + VRC5477_I8259_CASCADE, &irq_cascade); setup_irq(CPU_IRQ_BASE + CPU_VRC5477_CASCADE, &irq_cascade); }
void __init mips_timer_setup(struct irqaction *irq) { /* we are using the cpu counter for timer interrupts */ irq->handler = no_action; /* we use our own handler */ setup_irq(MIPS_CPU_TIMER_IRQ, irq); /* to generate the first timer interrupt */ write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ); set_c0_status(ALLINTS); }
/* * hwint 3 should deal with the PCI A - D interrupts, */ void pciasic_hwint3(struct pt_regs *regs) { u8 pend = *(volatile char *)PCIMT_CSITPEND; int irq; pend &= (IT_INTA | IT_INTB | IT_INTC | IT_INTD); clear_c0_status(IE_IRQ3); irq = PCIMT_IRQ_INT2 + ls1bit8(pend); do_IRQ(irq, regs); set_c0_status(IE_IRQ3); }