static void cvm_oct_configure_common_hw(void) { /* Setup the FPA */ cvmx_fpa_enable(); cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers); cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers); if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024); #ifdef __LITTLE_ENDIAN { union cvmx_ipd_ctl_status ipd_ctl_status; ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); ipd_ctl_status.s.pkt_lend = 1; ipd_ctl_status.s.wqe_lend = 1; cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64); } #endif cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8); }
static __init void cvm_oct_configure_common_hw(void) { /* Setup the FPA */ cvmx_fpa_enable(); cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers); cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers); if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); if (USE_RED) cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8); }
/** * Configure common hardware for all interfaces */ static void cvm_oct_configure_common_hw(device_t bus) { struct octebus_softc *sc; int pko_queues; int error; int rid; sc = device_get_softc(bus); /* Setup the FPA */ cvmx_fpa_enable(); cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers); cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers); if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) { /* * If the FPA uses different pools for output buffers and * packets, size the output buffer pool based on the number * of PKO queues. */ if (OCTEON_IS_MODEL(OCTEON_CN38XX)) pko_queues = 128; else if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) pko_queues = 32; else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) pko_queues = 32; else pko_queues = 256; cvm_oct_num_output_buffers = 4 * pko_queues; cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, cvm_oct_num_output_buffers); } if (USE_RED) cvmx_helper_setup_red(num_packet_buffers/4, num_packet_buffers/8); /* Enable the MII interface */ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) cvmx_write_csr(CVMX_SMI_EN, 1); /* Register an IRQ hander for to receive POW interrupts */ rid = 0; sc->sc_rx_irq = bus_alloc_resource(bus, SYS_RES_IRQ, &rid, OCTEON_IRQ_WORKQ0 + pow_receive_group, OCTEON_IRQ_WORKQ0 + pow_receive_group, 1, RF_ACTIVE); if (sc->sc_rx_irq == NULL) { device_printf(bus, "could not allocate workq irq"); return; } error = bus_setup_intr(bus, sc->sc_rx_irq, INTR_TYPE_NET | INTR_MPSAFE, cvm_oct_do_interrupt, NULL, cvm_oct_device, &sc->sc_rx_intr_cookie); if (error != 0) { device_printf(bus, "could not setup workq irq"); return; } #ifdef SMP { cvmx_ciu_intx0_t en; int core; CPU_FOREACH(core) { if (core == PCPU_GET(cpuid)) continue; en.u64 = cvmx_read_csr(CVMX_CIU_INTX_EN0(core*2)); en.s.workq |= (1<<pow_receive_group); cvmx_write_csr(CVMX_CIU_INTX_EN0(core*2), en.u64); } } #endif }