static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) { int cpu; unsigned long flags; int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); for_each_online_cpu(cpu) { int coreid = cpu_logical_map(cpu); uint64_t en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); if (cpumask_test_cpu(cpu, dest)) en0 |= 1ull << bit; else en0 &= ~(1ull << bit); cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); } /* * We need to do a read after the last update to make sure all * of them are done. */ cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); return 0; }
/* * octeon_ciu_reset * * Shutdown all CIU to IP2, IP3 mappings */ void octeon_ciu_reset(void) { uint64_t cvmctl; /* Disable all CIU interrupts by default */ cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num()*2), 0); cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num()*2+1), 0); cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num()*2), 0); cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num()*2+1), 0); #ifdef SMP /* Enable the MBOX interrupts. */ cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num()*2+1), (1ull << (OCTEON_IRQ_MBOX0 - 8)) | (1ull << (OCTEON_IRQ_MBOX1 - 8))); #endif /* * Move the Performance Counter interrupt to OCTEON_PMC_IRQ */ cvmctl = mips_rd_cvmctl(); cvmctl &= ~(7 << 7); cvmctl |= (OCTEON_PMC_IRQ + 2) << 7; mips_wr_cvmctl(cvmctl); }
static void ciu_en0_intr_unmask(void *arg) { uint64_t mask; int irq; irq = (uintptr_t)arg; mask = cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num()*2)); mask |= 1ull << (irq - CIU_IRQ_EN0_BEGIN); cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num()*2), mask); }
static void octeon_irq_ciu0_enable(unsigned int irq) { int coreid = cvmx_get_core_num(); unsigned long flags; uint64_t en0; int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); en0 |= 1ull << bit; cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); }
static int ciu_en0_intr_bind(void *arg, u_char target) { uint64_t mask; int core; int irq; irq = (uintptr_t)arg; CPU_FOREACH(core) { mask = cvmx_read_csr(CVMX_CIU_INTX_EN0(core*2)); if (core == target) mask |= 1ull << (irq - CIU_IRQ_EN0_BEGIN); else mask &= ~(1ull << (irq - CIU_IRQ_EN0_BEGIN)); cvmx_write_csr(CVMX_CIU_INTX_EN0(core*2), mask); } return (0); }
static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu) { unsigned int isset; int coreid = octeon_coreid_for_cpu(cpu); int bit = (irq < OCTEON_IRQ_WDOG0) ? irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0; if (irq < 64) { isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) & (1ull << bit)) >> bit; } else {
static void octeon_irq_ciu0_disable(unsigned int irq) { int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ unsigned long flags; uint64_t en0; int cpu; raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); en0 &= ~(1ull << bit); cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); } /* * We need to do a read after the last update to make sure all * of them are done. */ cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); }
static void octeon_irq_ciu0_enable(unsigned int irq) { int coreid = cvmx_get_core_num(); unsigned long flags; uint64_t en0; int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ /* * A read lock is used here to make sure only one core is ever * updating the CIU enable bits at a time. During an enable * the cores don't interfere with each other. During a disable * the write lock stops any enables that might cause a * problem. */ read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); en0 |= 1ull << bit; cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); }
static void octeon_irq_ciu0_disable(unsigned int irq) { int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ unsigned long flags; uint64_t en0; #ifdef CONFIG_SMP int cpu; write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); for_each_online_cpu(cpu) { int coreid = cpu_logical_map(cpu); en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); en0 &= ~(1ull << bit); cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); } /* * We need to do a read after the last update to make sure all * of them are done. */ cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); #else int coreid = cvmx_get_core_num(); local_irq_save(flags); en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); en0 &= ~(1ull << bit); cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); local_irq_restore(flags); #endif }
static int ciu_intr(void *arg) { struct ciu_softc *sc; uint64_t en0_sum, en1_sum; uint64_t en0_mask, en1_mask; int irq_index; int error; sc = arg; (void)sc; en0_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(cvmx_get_core_num()*2)); en1_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); en0_mask = cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num()*2)); en1_mask = cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num()*2)); en0_sum &= en0_mask; en1_sum &= en1_mask; if (en0_sum == 0 && en1_sum == 0) return (FILTER_STRAY); irq_index = 0; for (irq_index = 0; en0_sum != 0; irq_index++, en0_sum >>= 1) { if ((en0_sum & 1) == 0) continue; mips_intrcnt_inc(ciu_en0_intrcnt[irq_index]); error = intr_event_handle(ciu_en0_intr_events[irq_index], NULL); if (error != 0) printf("%s: stray en0 irq%d\n", __func__, irq_index); } irq_index = 0; for (irq_index = 0; en1_sum != 0; irq_index++, en1_sum >>= 1) { if ((en1_sum & 1) == 0) continue; mips_intrcnt_inc(ciu_en1_intrcnt[irq_index]); error = intr_event_handle(ciu_en1_intr_events[irq_index], NULL); if (error != 0) printf("%s: stray en1 irq%d\n", __func__, irq_index); } return (FILTER_HANDLED); }
static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu) { unsigned int isset; #ifdef CONFIG_SMP int coreid = cpu_logical_map(cpu); #else int coreid = cvmx_get_core_num(); #endif int bit = (irq < OCTEON_IRQ_WDOG0) ? irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0; if (irq < 64) { isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) & (1ull << bit)) >> bit; } else {
asmlinkage void plat_irq_dispatch(void) { const unsigned long core_id = cvmx_get_core_num(); const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2); const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2); const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1; const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1); unsigned long cop0_cause; unsigned long cop0_status; uint64_t ciu_en; uint64_t ciu_sum; while (1) { cop0_cause = read_c0_cause(); cop0_status = read_c0_status(); cop0_cause &= cop0_status; cop0_cause &= ST0_IM; if (unlikely(cop0_cause & STATUSF_IP2)) { ciu_sum = cvmx_read_csr(ciu_sum0_address); ciu_en = cvmx_read_csr(ciu_en0_address); ciu_sum &= ciu_en; if (likely(ciu_sum)) do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1); else spurious_interrupt(); } else if (unlikely(cop0_cause & STATUSF_IP3)) { ciu_sum = cvmx_read_csr(ciu_sum1_address); ciu_en = cvmx_read_csr(ciu_en1_address); ciu_sum &= ciu_en; if (likely(ciu_sum)) do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1); else spurious_interrupt(); } else if (likely(cop0_cause)) { do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); } else { break; } } }
static void octeon_irq_ciu_disable_all(struct irq_data *data) { unsigned long flags; unsigned long *pen; int cpu; union octeon_ciu_chip_data cd; wmb(); /* Make sure flag changes arrive before register updates. */ cd.p = irq_data_get_irq_chip_data(data); if (cd.s.line == 0) { raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); clear_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); } raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else {
static void octeon_irq_ciu_disable_local(struct irq_data *data) { unsigned long *pen; unsigned long flags; union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); if (cd.s.line == 0) { raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); clear_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); clear_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } }
static void octeon_irq_ciu_enable(struct irq_data *data) { int cpu = next_cpu_for_irq(data); int coreid = octeon_coreid_for_cpu(cpu); unsigned long *pen; unsigned long flags; union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); if (cd.s.line == 0) { raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); set_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); set_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } }
/** * Configure common hardware for all interfaces */ static void cvm_oct_configure_common_hw(device_t bus) { struct octebus_softc *sc; int pko_queues; int error; int rid; sc = device_get_softc(bus); /* Setup the FPA */ cvmx_fpa_enable(); cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers); cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers); if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) { /* * If the FPA uses different pools for output buffers and * packets, size the output buffer pool based on the number * of PKO queues. */ if (OCTEON_IS_MODEL(OCTEON_CN38XX)) pko_queues = 128; else if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) pko_queues = 32; else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) pko_queues = 32; else pko_queues = 256; cvm_oct_num_output_buffers = 4 * pko_queues; cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, cvm_oct_num_output_buffers); } if (USE_RED) cvmx_helper_setup_red(num_packet_buffers/4, num_packet_buffers/8); /* Enable the MII interface */ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) cvmx_write_csr(CVMX_SMI_EN, 1); /* Register an IRQ hander for to receive POW interrupts */ rid = 0; sc->sc_rx_irq = bus_alloc_resource(bus, SYS_RES_IRQ, &rid, OCTEON_IRQ_WORKQ0 + pow_receive_group, OCTEON_IRQ_WORKQ0 + pow_receive_group, 1, RF_ACTIVE); if (sc->sc_rx_irq == NULL) { device_printf(bus, "could not allocate workq irq"); return; } error = bus_setup_intr(bus, sc->sc_rx_irq, INTR_TYPE_NET | INTR_MPSAFE, cvm_oct_do_interrupt, NULL, cvm_oct_device, &sc->sc_rx_intr_cookie); if (error != 0) { device_printf(bus, "could not setup workq irq"); return; } #ifdef SMP { cvmx_ciu_intx0_t en; int core; CPU_FOREACH(core) { if (core == PCPU_GET(cpuid)) continue; en.u64 = cvmx_read_csr(CVMX_CIU_INTX_EN0(core*2)); en.s.workq |= (1<<pow_receive_group); cvmx_write_csr(CVMX_CIU_INTX_EN0(core*2), en.u64); } } #endif }