/** * Enable port. */ int cvm_oct_common_open(struct ifnet *ifp) { cvmx_gmxx_prtx_cfg_t gmx_cfg; cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); cvmx_helper_link_info_t link_info; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 1; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); /* * Set the link state unless we are using MII. */ if (!octeon_is_simulation() && priv->miibus == NULL) { link_info = cvmx_helper_link_get(priv->port); if (!link_info.s.link_up) if_link_state_change(ifp, LINK_STATE_DOWN); else if_link_state_change(ifp, LINK_STATE_UP); } return 0; }
void cvm_oct_rgmii_uninit(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvm_oct_common_uninit(dev); if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) && (priv->port == 0)) || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { if (!octeon_is_simulation()) { union cvmx_gmxx_rxx_int_en gmx_rx_int_en; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN (index, interface)); gmx_rx_int_en.s.phy_dupx = 0; gmx_rx_int_en.s.phy_link = 0; gmx_rx_int_en.s.phy_spd = 0; cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), gmx_rx_int_en.u64); } } number_rgmii_ports--; if (number_rgmii_ports == 0) free_irq(OCTEON_IRQ_RML, &number_rgmii_ports); }
int cvm_oct_sgmii_open(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); cvmx_helper_link_info_t link_info; int rv; rv = cvm_oct_phy_setup_device(dev); if (rv) return rv; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(priv->interface_port, priv->interface)); gmx_cfg.s.en = 1; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(priv->interface_port, priv->interface), gmx_cfg.u64); if (octeon_is_simulation()) return 0; if (priv->phydev) { int r = phy_read_status(priv->phydev); if (r == 0 && priv->phydev->link == 0) netif_carrier_off(dev); cvm_oct_adjust_link(dev); } else { link_info = cvmx_helper_link_get(priv->ipd_port); if (!link_info.s.link_up) netif_carrier_off(dev); priv->poll = cvm_oct_sgmii_poll; cvm_oct_sgmii_poll(dev); } return 0; }
static int octeon_mgmt_init_phy(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); char phy_id[20]; if (octeon_is_simulation()) { /* No PHYs in the simulator. */ netif_carrier_on(netdev); return 0; } snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port); p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(p->phydev)) { p->phydev = NULL; return -1; } phy_start_aneg(p->phydev); return 0; }
/** * cvm_oct_common_get_stats - get the low level ethernet statistics * @dev: Device to get the statistics from * * Returns Pointer to the statistics */ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) { cvmx_pip_port_status_t rx_status; cvmx_pko_port_status_t tx_status; struct octeon_ethernet *priv = netdev_priv(dev); if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { if (octeon_is_simulation()) { /* The simulator doesn't support statistics */ memset(&rx_status, 0, sizeof(rx_status)); memset(&tx_status, 0, sizeof(tx_status)); } else { cvmx_pip_get_port_status(priv->port, 1, &rx_status); cvmx_pko_get_port_status(priv->port, 1, &tx_status); } priv->stats.rx_packets += rx_status.inb_packets; priv->stats.tx_packets += tx_status.packets; priv->stats.rx_bytes += rx_status.inb_octets; priv->stats.tx_bytes += tx_status.octets; priv->stats.multicast += rx_status.multicast_packets; priv->stats.rx_crc_errors += rx_status.inb_errors; priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; priv->stats.rx_dropped += rx_status.dropped_packets; } return &priv->stats; }
static struct ifnet_stats *cvm_oct_common_get_stats(struct ifnet *ifp) { cvmx_pip_port_status_t rx_status; cvmx_pko_port_status_t tx_status; cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { if (octeon_is_simulation()) { /* The simulator doesn't support statistics */ memset(&rx_status, 0, sizeof(rx_status)); memset(&tx_status, 0, sizeof(tx_status)); } else { cvmx_pip_get_port_status(priv->port, 1, &rx_status); cvmx_pko_get_port_status(priv->port, 1, &tx_status); } priv->stats.rx_packets += rx_status.inb_packets; priv->stats.tx_packets += tx_status.packets; priv->stats.rx_bytes += rx_status.inb_octets; priv->stats.tx_bytes += tx_status.octets; priv->stats.multicast += rx_status.multicast_packets; priv->stats.rx_crc_errors += rx_status.inb_errors; priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; /* The drop counter must be incremented atomically since the RX tasklet also increments it */ #ifdef CONFIG_64BIT cvmx_atomic_add64_nosync(&priv->stats.rx_dropped, rx_status.dropped_packets); #else cvmx_atomic_add32_nosync((int32_t *)&priv->stats.rx_dropped, rx_status.dropped_packets); #endif } return &priv->stats; }
int cvm_oct_sgmii_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvm_oct_common_init(dev); dev->netdev_ops->ndo_stop(dev); if (!octeon_is_simulation() && priv->phydev == NULL) priv->poll = cvm_oct_sgmii_poll; return 0; }
int cvm_oct_rgmii_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); int r; cvm_oct_common_init(dev); dev->netdev_ops->ndo_stop(dev); INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll); /* * Due to GMX errata in CN3XXX series chips, it is necessary * to take the link down immediately when the PHY changes * state. In order to do this we call the poll function every * time the RGMII inband status changes. This may cause * problems if the PHY doesn't implement inband status * properly. */ if (number_rgmii_ports == 0) { r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt, IRQF_SHARED, "RGMII", &number_rgmii_ports); if (r != 0) return r; } number_rgmii_ports++; /* * Only true RGMII ports need to be polled. In GMII mode, port * 0 is really a RGMII port. */ if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) && (priv->port == 0)) || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { if (!octeon_is_simulation()) { union cvmx_gmxx_rxx_int_en gmx_rx_int_en; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); /* * Enable interrupts on inband status changes * for this port. */ gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN (index, interface)); gmx_rx_int_en.s.phy_dupx = 1; gmx_rx_int_en.s.phy_link = 1; gmx_rx_int_en.s.phy_spd = 1; cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), gmx_rx_int_en.u64); priv->poll = cvm_oct_rgmii_poll; } } return 0; }
int cvm_oct_sgmii_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvm_oct_common_init(dev); dev->netdev_ops->ndo_stop(dev); if (!octeon_is_simulation()) priv->poll = cvm_oct_sgmii_poll; /* FIXME: Need autoneg logic */ return 0; }
static void __init octeon_uart_set_common(struct plat_serial8250_port *p) { p->flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE; p->type = PORT_OCTEON; p->iotype = UPIO_MEM; p->regshift = 3; /* I/O addresses are every 8 bytes */ if (octeon_is_simulation()) /* Make simulator output fast*/ p->uartclk = 115200 * 16; else p->uartclk = octeon_get_io_clock_rate(); p->serial_in = octeon_serial_in; p->serial_out = octeon_serial_out; }
int cvm_oct_xaui_open(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); cvmx_helper_link_info_t link_info; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 1; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); if (!octeon_is_simulation()) { link_info = cvmx_helper_link_get(priv->port); if (!link_info.s.link_up) netif_carrier_off(dev); } return 0; }
static int octeon_mgmt_init_phy(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); if (octeon_is_simulation() || p->phy_np == NULL) { /* No PHYs in the simulator. */ netif_carrier_on(netdev); return 0; } p->phydev = of_phy_connect(netdev, p->phy_np, octeon_mgmt_adjust_link, 0, PHY_INTERFACE_MODE_MII); if (!p->phydev) return -ENODEV; return 0; }
int cvm_oct_rgmii_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); int r; cvm_oct_common_init(dev); dev->netdev_ops->ndo_stop(dev); if (number_rgmii_ports == 0) { r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt, IRQF_SHARED, "RGMII", &number_rgmii_ports); } number_rgmii_ports++; if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) && (priv->port == 0)) || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { if (!octeon_is_simulation()) { union cvmx_gmxx_rxx_int_en gmx_rx_int_en; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN (index, interface)); gmx_rx_int_en.s.phy_dupx = 1; gmx_rx_int_en.s.phy_link = 1; gmx_rx_int_en.s.phy_spd = 1; cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), gmx_rx_int_en.u64); priv->poll = cvm_oct_rgmii_poll; } } return 0; }
void cvm_oct_rgmii_uninit(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvm_oct_common_uninit(dev); /* * Only true RGMII ports need to be polled. In GMII mode, port * 0 is really a RGMII port. */ if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) && (priv->port == 0)) || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { if (!octeon_is_simulation()) { union cvmx_gmxx_rxx_int_en gmx_rx_int_en; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); /* * Disable interrupts on inband status changes * for this port. */ gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN (index, interface)); gmx_rx_int_en.s.phy_dupx = 0; gmx_rx_int_en.s.phy_link = 0; gmx_rx_int_en.s.phy_spd = 0; cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), gmx_rx_int_en.u64); } } /* Remove the interrupt handler when the last port is removed. */ number_rgmii_ports--; if (number_rgmii_ports == 0) free_irq(OCTEON_IRQ_RML, &number_rgmii_ports); cancel_work_sync(&priv->port_work); }
/** * cvm_oct_common_get_stats - get the low level ethernet statistics * @dev: Device to get the statistics from * * Returns Pointer to the statistics */ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) { cvmx_pip_port_status_t rx_status; cvmx_pko_port_status_t tx_status; struct octeon_ethernet *priv = netdev_priv(dev); if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { if (octeon_is_simulation()) { /* The simulator doesn't support statistics */ memset(&rx_status, 0, sizeof(rx_status)); memset(&tx_status, 0, sizeof(tx_status)); } else { cvmx_pip_get_port_status(priv->port, 1, &rx_status); cvmx_pko_get_port_status(priv->port, 1, &tx_status); } priv->stats.rx_packets += rx_status.inb_packets; priv->stats.tx_packets += tx_status.packets; priv->stats.rx_bytes += rx_status.inb_octets; priv->stats.tx_bytes += tx_status.octets; priv->stats.multicast += rx_status.multicast_packets; priv->stats.rx_crc_errors += rx_status.inb_errors; priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; /* * The drop counter must be incremented atomically * since the RX tasklet also increments it. */ #ifdef CONFIG_64BIT atomic64_add(rx_status.dropped_packets, (atomic64_t *)&priv->stats.rx_dropped); #else atomic_add(rx_status.dropped_packets, (atomic_t *)&priv->stats.rx_dropped); #endif } return &priv->stats; }
int cvm_oct_common_open(struct net_device *dev, void (*link_poll)(struct net_device *)) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); cvmx_helper_link_info_t link_info; int rv; rv = cvm_oct_phy_setup_device(dev); if (rv) return rv; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 1; if (octeon_has_feature(OCTEON_FEATURE_PKND)) gmx_cfg.s.pknd = priv->port; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); if (octeon_is_simulation()) return 0; if (dev->phydev) { int r = phy_read_status(dev->phydev); if (r == 0 && dev->phydev->link == 0) netif_carrier_off(dev); cvm_oct_adjust_link(dev); } else { link_info = cvmx_helper_link_get(priv->port); if (!link_info.s.link_up) netif_carrier_off(dev); priv->poll = link_poll; link_poll(dev); } return 0; }
/** * Poll for link status change. */ void cvm_oct_common_poll(struct ifnet *ifp) { cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; cvmx_helper_link_info_t link_info; /* * If this is a simulation, do nothing. */ if (octeon_is_simulation()) return; /* * If there is a device-specific poll method, use it. */ if (priv->poll != NULL) { priv->poll(ifp); return; } /* * If an MII bus is attached, don't use the Simple Executive's link * state routines. */ if (priv->miibus != NULL) return; /* * Use the Simple Executive's link state routines. */ link_info = cvmx_helper_link_get(priv->port); if (link_info.u64 == priv->link_info) return; link_info = cvmx_helper_link_autoconf(priv->port); priv->link_info = link_info.u64; priv->need_link_update = 1; }
int cvm_oct_sgmii_open(struct net_device *dev) { struct octeon_hw_status_reg sr[3]; union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); cvmx_helper_link_info_t link_info; cvmx_helper_interface_mode_t imode; int rv, i; u64 en_mask; rv = cvm_oct_phy_setup_device(dev); if (rv) return rv; gmx_cfg.u64 = cvmx_read_csr(priv->gmx_base + GMX_PRT_CFG); gmx_cfg.s.en = 1; cvmx_write_csr(priv->gmx_base + GMX_PRT_CFG, gmx_cfg.u64); if (octeon_is_simulation()) return 0; if (priv->phydev) { int r = phy_read_status(priv->phydev); if (r == 0 && priv->phydev->link == 0) netif_carrier_off(dev); cvm_oct_adjust_link(dev); } else { link_info = cvmx_helper_link_get(priv->ipd_port); if (!link_info.s.link_up) netif_carrier_off(dev); priv->poll = cvm_oct_sgmii_poll; cvm_oct_sgmii_poll(dev); } imode = cvmx_helper_interface_get_mode(priv->interface); switch (imode) { case CVMX_HELPER_INTERFACE_MODE_XAUI: case CVMX_HELPER_INTERFACE_MODE_RXAUI: /* Handle GMXX_RXX_INT_REG[LOC_FAULT,REM_FAULT]*/ priv->hw_status_notifier.priority = 10; priv->hw_status_notifier.notifier_call = cvm_oct_sgmii_hw_status; octeon_hw_status_notifier_register(&priv->hw_status_notifier); memset(sr, 0, sizeof(sr)); i = 0; en_mask = 0; if (OCTEON_IS_OCTEONPLUS()) { sr[i].reg = 46; /* RML */ sr[i].reg_is_hwint = 1; sr[i].has_child = 1; i++; sr[i].reg = CVMX_NPEI_RSL_INT_BLOCKS; /* GMX[priv->interface]*/ sr[i].bit = priv->interface + 1; sr[i].has_child = 1; i++; } else if (octeon_has_feature(OCTEON_FEATURE_CIU2)) { /* PKT[AGX[priv->interface]]*/ sr[i].reg = (6 << 6) | priv->interface; sr[i].reg_is_hwint = 1; sr[i].has_child = 1; i++; } else { /* INT_SUM1[AGX[priv->interface]]*/ sr[i].reg = (1 << 6) | (priv->interface + 36); sr[i].reg_is_hwint = 1; sr[i].has_child = 1; i++; } sr[i].reg = CVMX_GMXX_RXX_INT_REG(priv->interface_port, priv->interface); sr[i].mask_reg = CVMX_GMXX_RXX_INT_EN(priv->interface_port, priv->interface); sr[i].ack_w1c = 1; sr[i].bit = INT_BIT_LOC_FAULT; en_mask |= 1ull << sr[i].bit; octeon_hw_status_add_source(sr); sr[i].bit = INT_BIT_REM_FAULT; en_mask |= 1ull << sr[i].bit; octeon_hw_status_add_source(sr); octeon_hw_status_enable(sr[i].reg, en_mask); break; default: break; } return 0; }
/** * Configure common hardware for all interfaces */ static void cvm_oct_configure_common_hw(device_t bus) { struct octebus_softc *sc; int pko_queues; int error; int rid; sc = device_get_softc(bus); /* Setup the FPA */ cvmx_fpa_enable(); cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers); cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers); if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) { /* * If the FPA uses different pools for output buffers and * packets, size the output buffer pool based on the number * of PKO queues. */ if (OCTEON_IS_MODEL(OCTEON_CN38XX)) pko_queues = 128; else if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) pko_queues = 32; else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) pko_queues = 32; else pko_queues = 256; cvm_oct_num_output_buffers = 4 * pko_queues; cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, cvm_oct_num_output_buffers); } if (USE_RED) cvmx_helper_setup_red(num_packet_buffers/4, num_packet_buffers/8); /* Enable the MII interface */ if (!octeon_is_simulation()) cvmx_write_csr(CVMX_SMI_EN, 1); /* Register an IRQ hander for to receive POW interrupts */ rid = 0; sc->sc_rx_irq = bus_alloc_resource(bus, SYS_RES_IRQ, &rid, CVMX_IRQ_WORKQ0 + pow_receive_group, CVMX_IRQ_WORKQ0 + pow_receive_group, 1, RF_ACTIVE); if (sc->sc_rx_irq == NULL) { device_printf(bus, "could not allocate workq irq"); return; } error = bus_setup_intr(bus, sc->sc_rx_irq, INTR_TYPE_NET | INTR_MPSAFE, cvm_oct_do_interrupt, NULL, cvm_oct_device, &sc->sc_rx_intr_cookie); if (error != 0) { device_printf(bus, "could not setup workq irq"); return; } #ifdef SMP { cvmx_ciu_intx0_t en; int core; CPU_FOREACH(core) { if (core == PCPU_GET(cpuid)) continue; en.u64 = cvmx_read_csr(CVMX_CIU_INTX_EN0(core*2)); en.s.workq |= (1<<pow_receive_group); cvmx_write_csr(CVMX_CIU_INTX_EN0(core*2), en.u64); } } #endif }