/** * Set the multicast list. Currently unimplemented. * * @param dev Device to work on */ void cvm_oct_common_set_multicast_list(struct ifnet *ifp) { cvmx_gmxx_prtx_cfg_t gmx_cfg; cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); if ((interface < 2) && (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { cvmx_gmxx_rxx_adr_ctl_t control; control.u64 = 0; control.s.bcst = 1; /* Allow broadcast MAC addresses */ if (/*ifp->mc_list || */(ifp->if_flags&IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) control.s.mcst = 2; /* Force accept multicast packets */ else control.s.mcst = 1; /* Force reject multicat packets */ if (ifp->if_flags & IFF_PROMISC) control.s.cam_mode = 0; /* Reject matches if promisc. Since CAM is shut off, should accept everything */ else control.s.cam_mode = 1; /* Filter packets based on the CAM */ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64 & ~1ull); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), control.u64); if (ifp->if_flags&IFF_PROMISC) cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN(index, interface), 0); else cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN(index, interface), 1); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); } }
/** * Set the hardware MAC address for a device * * @param dev Device to change the MAC address for * @param addr Address structure to change it too. */ void cvm_oct_common_set_mac_address(struct ifnet *ifp, const void *addr) { cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; cvmx_gmxx_prtx_cfg_t gmx_cfg; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); memcpy(priv->mac, addr, 6); if ((interface < 2) && (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { int i; const uint8_t *ptr = addr; uint64_t mac = 0; for (i = 0; i < 6; i++) mac = (mac<<8) | (uint64_t)(ptr[i]); gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64 & ~1ull); cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), ptr[0]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), ptr[1]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), ptr[2]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), ptr[3]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), ptr[4]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), ptr[5]); cvm_oct_common_set_multicast_list(ifp); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); } }
int is_interface_disabled(int interface_num) { int mode = cvmx_helper_interface_get_mode(interface_num); if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED) return 1; return 0; }
/** * cvm_oct_common_change_mtu - change the link MTU * @dev: Device to change * @new_mtu: The new MTU * * Returns Zero on success */ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) { struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) int vlan_bytes = 4; #else int vlan_bytes = 0; #endif /* * Limit the MTU to make sure the ethernet packets are between * 64 bytes and 65535 bytes. */ if ((new_mtu + 14 + 4 + vlan_bytes < 64) || (new_mtu + 14 + 4 + vlan_bytes > 65392)) { pr_err("MTU must be between %d and %d.\n", 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes); return -EINVAL; } dev->mtu = new_mtu; if ((interface < 2) && (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { /* Add ethernet header and FCS, and VLAN if configured. */ int max_packet = new_mtu + 14 + 4 + vlan_bytes; if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) { /* Signal errors on packets larger than the MTU */ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), max_packet); } else { /* * Set the hardware to truncate packets larger * than the MTU and smaller the 64 bytes. */ union cvmx_pip_frm_len_chkx frm_len_chk; frm_len_chk.u64 = 0; frm_len_chk.s.minlen = 64; frm_len_chk.s.maxlen = max_packet; cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), frm_len_chk.u64); } /* * Set the hardware to truncate packets larger than * the MTU. The jabber register must be set to a * multiple of 8 bytes, so round up. */ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), (max_packet + 7) & ~7u); } return 0; }
/** * cvm_oct_common_set_multicast_list - set the multicast list * @dev: Device to work on */ static void cvm_oct_common_set_multicast_list(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); if ((interface < 2) && (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { union cvmx_gmxx_rxx_adr_ctl control; control.u64 = 0; control.s.bcst = 1; /* Allow broadcast MAC addresses */ if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) || (dev->flags & IFF_PROMISC)) /* Force accept multicast packets */ control.s.mcst = 2; else /* Force reject multicast packets */ control.s.mcst = 1; if (dev->flags & IFF_PROMISC) /* * Reject matches if promisc. Since CAM is * shut off, should accept everything. */ control.s.cam_mode = 0; else /* Filter packets based on the CAM */ control.s.cam_mode = 1; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64 & ~1ull); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), control.u64); if (dev->flags & IFF_PROMISC) cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN (index, interface), 0); else cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN (index, interface), 1); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); } }
/** * cvm_oct_common_change_mtu - change the link MTU * @dev: Device to change * @new_mtu: The new MTU * * Returns Zero on success */ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) { struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); #if IS_ENABLED(CONFIG_VLAN_8021Q) int vlan_bytes = VLAN_HLEN; #else int vlan_bytes = 0; #endif int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes; dev->mtu = new_mtu; if ((interface < 2) && (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { int index = INDEX(priv->port); /* Add ethernet header and FCS, and VLAN if configured. */ int max_packet = new_mtu + mtu_overhead; if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) { /* Signal errors on packets larger than the MTU */ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), max_packet); } else { /* * Set the hardware to truncate packets larger * than the MTU and smaller the 64 bytes. */ union cvmx_pip_frm_len_chkx frm_len_chk; frm_len_chk.u64 = 0; frm_len_chk.s.minlen = VLAN_ETH_ZLEN; frm_len_chk.s.maxlen = max_packet; cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), frm_len_chk.u64); } /* * Set the hardware to truncate packets larger than * the MTU. The jabber register must be set to a * multiple of 8 bytes, so round up. */ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), (max_packet + 7) & ~7u); } return 0; }
/** * cvm_oct_common_set_mac_address - set the hardware MAC address for a device * @dev: The device in question. * @addr: Address structure to change it too. * Returns Zero on success */ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) { struct octeon_ethernet *priv = netdev_priv(dev); union cvmx_gmxx_prtx_cfg gmx_cfg; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); memcpy(dev->dev_addr, addr + 2, 6); if ((interface < 2) && (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { int i; uint8_t *ptr = addr; uint64_t mac = 0; for (i = 0; i < 6; i++) mac = (mac << 8) | (uint64_t) (ptr[i + 2]); gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64 & ~1ull); cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), ptr[2]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), ptr[3]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), ptr[4]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), ptr[5]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), ptr[6]); cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), ptr[7]); cvm_oct_common_set_multicast_list(dev); cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); } return 0; }
/** * Change the link MTU. Unimplemented * * @param dev Device to change * @param new_mtu The new MTU * @return Zero on success */ int cvm_oct_common_change_mtu(struct ifnet *ifp, int new_mtu) { cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); int vlan_bytes = 4; /* Limit the MTU to make sure the ethernet packets are between 64 bytes and 65535 bytes */ if ((new_mtu + 14 + 4 + vlan_bytes < 64) || (new_mtu + 14 + 4 + vlan_bytes > 65392)) { printf("MTU must be between %d and %d.\n", 64-14-4-vlan_bytes, 65392-14-4-vlan_bytes); return -EINVAL; } ifp->if_mtu = new_mtu; if ((interface < 2) && (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { int max_packet = new_mtu + 14 + 4 + vlan_bytes; /* Add ethernet header and FCS, and VLAN if configured. */ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) { /* Signal errors on packets larger than the MTU */ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), max_packet); } else { /* Set the hardware to truncate packets larger than the MTU and smaller the 64 bytes */ cvmx_pip_frm_len_chkx_t frm_len_chk; frm_len_chk.u64 = 0; frm_len_chk.s.minlen = 64; frm_len_chk.s.maxlen = max_packet; cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), frm_len_chk.u64); } /* Set the hardware to truncate packets larger than the MTU. The jabber register must be set to a multiple of 8 bytes, so round up */ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), (max_packet + 7) & ~7u); } return 0; }
static int cvm_oct_probe(struct platform_device *pdev) { int num_interfaces; int interface; int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; int qos; struct device_node *pip; octeon_mdiobus_force_mod_depencency(); pip = pdev->dev.of_node; if (!pip) { pr_err("Error: No 'pip' in /aliases\n"); return -EINVAL; } cvm_oct_configure_common_hw(); cvmx_helper_initialize_packet_io_global(); /* Change the input group for all ports before input is enabled */ num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port++) { union cvmx_pip_prt_tagx pip_prt_tagx; pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(port)); pip_prt_tagx.s.grp = pow_receive_group; cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), pip_prt_tagx.u64); } } cvmx_helper_ipd_and_packet_input_enable(); memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); /* * Initialize the FAU used for counting packet buffers that * need to be freed. */ cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); /* Initialize the FAU used for counting tx SKBs that need to be freed */ cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0); if ((pow_send_group != -1)) { struct net_device *dev; pr_info("\tConfiguring device for POW only access\n"); dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (dev) { /* Initialize the device private structure. */ struct octeon_ethernet *priv = netdev_priv(dev); dev->netdev_ops = &cvm_oct_pow_netdev_ops; priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; priv->port = CVMX_PIP_NUM_INPUT_PORTS; priv->queue = -1; strcpy(dev->name, "pow%d"); for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device for POW\n"); free_netdev(dev); } else { cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; pr_info("%s: POW send group %d, receive group %d\n", dev->name, pow_send_group, pow_receive_group); } } else { pr_err("Failed to allocate ethernet device for POW\n"); } } num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface); int num_ports = cvmx_helper_ports_on_interface(interface); int port; int port_index; for (port_index = 0, port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port_index++, port++) { struct octeon_ethernet *priv; struct net_device *dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (!dev) { pr_err("Failed to allocate ethernet device for port %d\n", port); continue; } /* Initialize the device private structure. */ priv = netdev_priv(dev); priv->netdev = dev; priv->of_node = cvm_oct_node_for_port(pip, interface, port_index); INIT_DELAYED_WORK(&priv->port_periodic_work, cvm_oct_periodic_worker); priv->imode = imode; priv->port = port; priv->queue = cvmx_pko_get_base_queue(priv->port); priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++) cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); switch (priv->imode) { /* These types don't support ports to IPD/PKO */ case CVMX_HELPER_INTERFACE_MODE_DISABLED: case CVMX_HELPER_INTERFACE_MODE_PCIE: case CVMX_HELPER_INTERFACE_MODE_PICMG: break; case CVMX_HELPER_INTERFACE_MODE_NPI: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "npi%d"); break; case CVMX_HELPER_INTERFACE_MODE_XAUI: dev->netdev_ops = &cvm_oct_xaui_netdev_ops; strcpy(dev->name, "xaui%d"); break; case CVMX_HELPER_INTERFACE_MODE_LOOP: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "loop%d"); break; case CVMX_HELPER_INTERFACE_MODE_SGMII: dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; case CVMX_HELPER_INTERFACE_MODE_SPI: dev->netdev_ops = &cvm_oct_spi_netdev_ops; strcpy(dev->name, "spi%d"); break; case CVMX_HELPER_INTERFACE_MODE_RGMII: case CVMX_HELPER_INTERFACE_MODE_GMII: dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; } if (!dev->netdev_ops) { free_netdev(dev); } else if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device for interface %d, port %d\n", interface, priv->port); free_netdev(dev); } else { cvm_oct_device[priv->port] = dev; fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(u32); schedule_delayed_work(&priv->port_periodic_work, HZ); } } } cvm_oct_tx_initialize(); cvm_oct_rx_initialize(); /* * 150 uS: about 10 1500-byte packets at 1GE. */ cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); schedule_delayed_work(&cvm_oct_rx_refill_work, HZ); return 0; }
static int cvm_oct_driver_probe(struct platform_device *dev) { int num_interfaces; int interface; int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; int qos; octeon_mdiobus_force_mod_depencency(); pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION); if (OCTEON_IS_MODEL(OCTEON_CN52XX)) cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */ else if (OCTEON_IS_MODEL(OCTEON_CN56XX)) cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */ else cvm_oct_mac_addr_offset = 0; #if defined(CONFIG_SG590) || defined(CONFIG_SG770) { union cvmx_gmxx_inf_mode mode; mode.u64 = 0; mode.s.en = 1; cvmx_write_csr(CVMX_GMXX_INF_MODE(0), mode.u64); } #endif #ifdef CONFIG_SG8200 /* * SG8200 directs the 2 internal ports out MII to 2 single 100Mbit * interfaces. (The switch is on the 8139C+). */ { union cvmx_gmxx_inf_mode mode; mode.u64 = 0; mode.s.en = 1; mode.s.p0mii = 1; mode.s.type = 1; cvmx_write_csr(CVMX_GMXX_INF_MODE(0), mode.u64); } #endif cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet"); if (cvm_oct_poll_queue == NULL) { pr_err("octeon-ethernet: Cannot create workqueue"); return -ENOMEM; } cvm_oct_configure_common_hw(); cvmx_helper_initialize_packet_io_global(); /* Change the input group for all ports before input is enabled */ num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port++) { union cvmx_pip_prt_tagx pip_prt_tagx; pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(port)); pip_prt_tagx.s.grp = pow_receive_group; cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), pip_prt_tagx.u64); } } cvmx_helper_ipd_and_packet_input_enable(); memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); /* * Initialize the FAU used for counting packet buffers that * need to be freed. */ cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); /* Initialize the FAU used for counting tx SKBs that need to be freed */ cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0); if ((pow_send_group != -1)) { struct net_device *dev; pr_info("\tConfiguring device for POW only access\n"); dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (dev) { /* Initialize the device private structure. */ struct octeon_ethernet *priv = netdev_priv(dev); dev->netdev_ops = &cvm_oct_pow_netdev_ops; priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; priv->port = CVMX_PIP_NUM_INPUT_PORTS; priv->queue = -1; strcpy(dev->name, "pow%d"); for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device for POW\n"); free_netdev(dev); } else { cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; pr_info("%s: POW send group %d, receive group %d\n", dev->name, pow_send_group, pow_receive_group); } } else { pr_err("Failed to allocate ethernet device for POW\n"); } } num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface); int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port++) { struct octeon_ethernet *priv; struct net_device *dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (!dev) { pr_err("Failed to allocate ethernet device for port %d\n", port); continue; } /* Initialize the device private structure. */ priv = netdev_priv(dev); INIT_DELAYED_WORK(&priv->port_periodic_work, cvm_oct_periodic_worker); priv->imode = imode; priv->port = port; priv->queue = cvmx_pko_get_base_queue(priv->port); priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++) cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); switch (priv->imode) { /* These types don't support ports to IPD/PKO */ case CVMX_HELPER_INTERFACE_MODE_DISABLED: case CVMX_HELPER_INTERFACE_MODE_PCIE: case CVMX_HELPER_INTERFACE_MODE_PICMG: break; case CVMX_HELPER_INTERFACE_MODE_NPI: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "npi%d"); break; case CVMX_HELPER_INTERFACE_MODE_XAUI: dev->netdev_ops = &cvm_oct_xaui_netdev_ops; strcpy(dev->name, "xaui%d"); break; case CVMX_HELPER_INTERFACE_MODE_LOOP: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "loop%d"); break; case CVMX_HELPER_INTERFACE_MODE_SGMII: dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; case CVMX_HELPER_INTERFACE_MODE_SPI: dev->netdev_ops = &cvm_oct_spi_netdev_ops; strcpy(dev->name, "spi%d"); break; case CVMX_HELPER_INTERFACE_MODE_RGMII: case CVMX_HELPER_INTERFACE_MODE_GMII: dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; } if (!dev->netdev_ops) { free_netdev(dev); } else if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device " "for interface %d, port %d\n", interface, priv->port); free_netdev(dev); } else { cvm_oct_device[priv->port] = dev; fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t); queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ); } } } cvm_oct_tx_initialize(); cvm_oct_rx_initialize(); /* * 150 uS: about 10 1500-byte packtes at 1GE. */ cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ); #ifdef CONFIG_SG590 { void cvm_oct_marvel_switch_init(void); printk("cavium-ethernet: enabling Marvell 88e6046 switch: intra-switch forwarding disabled\n"); cvm_oct_marvel_switch_init(); } #endif return 0; }
/** * Module/ driver initialization. Creates the linux network * devices. * * @return Zero on success */ int cvm_oct_init_module(device_t bus) { device_t dev; int ifnum; int num_interfaces; int interface; int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; int qos; cvm_oct_rx_initialize(); cvm_oct_configure_common_hw(bus); cvmx_helper_initialize_packet_io_global(); /* Change the input group for all ports before input is enabled */ num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = 0; port < num_ports; port++) { cvmx_pip_prt_tagx_t pip_prt_tagx; int pkind = cvmx_helper_get_ipd_port(interface, port); pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(pkind)); pip_prt_tagx.s.grp = pow_receive_group; cvmx_write_csr(CVMX_PIP_PRT_TAGX(pkind), pip_prt_tagx.u64); } } cvmx_helper_ipd_and_packet_input_enable(); memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); cvm_oct_link_taskq = taskqueue_create("octe link", M_NOWAIT, taskqueue_thread_enqueue, &cvm_oct_link_taskq); taskqueue_start_threads(&cvm_oct_link_taskq, 1, PI_NET, "octe link taskq"); /* Initialize the FAU used for counting packet buffers that need to be freed */ cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); ifnum = 0; num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface); int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); ifnum++, port++) { cvm_oct_private_t *priv; struct ifnet *ifp; dev = BUS_ADD_CHILD(bus, 0, "octe", ifnum); if (dev != NULL) ifp = if_alloc(IFT_ETHER); if (dev == NULL || ifp == NULL) { printf("Failed to allocate ethernet device for interface %d port %d\n", interface, port); continue; } /* Initialize the device private structure. */ device_probe(dev); priv = device_get_softc(dev); priv->dev = dev; priv->ifp = ifp; priv->imode = imode; priv->port = port; priv->queue = cvmx_pko_get_base_queue(priv->port); priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++) cvmx_fau_atomic_write32(priv->fau+qos*4, 0); TASK_INIT(&priv->link_task, 0, cvm_oct_update_link, priv); switch (priv->imode) { /* These types don't support ports to IPD/PKO */ case CVMX_HELPER_INTERFACE_MODE_DISABLED: case CVMX_HELPER_INTERFACE_MODE_PCIE: case CVMX_HELPER_INTERFACE_MODE_PICMG: break; case CVMX_HELPER_INTERFACE_MODE_NPI: priv->init = cvm_oct_common_init; priv->uninit = cvm_oct_common_uninit; device_set_desc(dev, "Cavium Octeon NPI Ethernet"); break; case CVMX_HELPER_INTERFACE_MODE_XAUI: priv->init = cvm_oct_xaui_init; priv->uninit = cvm_oct_common_uninit; device_set_desc(dev, "Cavium Octeon XAUI Ethernet"); break; case CVMX_HELPER_INTERFACE_MODE_LOOP: priv->init = cvm_oct_common_init; priv->uninit = cvm_oct_common_uninit; device_set_desc(dev, "Cavium Octeon LOOP Ethernet"); break; case CVMX_HELPER_INTERFACE_MODE_SGMII: priv->init = cvm_oct_sgmii_init; priv->uninit = cvm_oct_common_uninit; device_set_desc(dev, "Cavium Octeon SGMII Ethernet"); break; case CVMX_HELPER_INTERFACE_MODE_SPI: priv->init = cvm_oct_spi_init; priv->uninit = cvm_oct_spi_uninit; device_set_desc(dev, "Cavium Octeon SPI Ethernet"); break; case CVMX_HELPER_INTERFACE_MODE_RGMII: priv->init = cvm_oct_rgmii_init; priv->uninit = cvm_oct_rgmii_uninit; device_set_desc(dev, "Cavium Octeon RGMII Ethernet"); break; case CVMX_HELPER_INTERFACE_MODE_GMII: priv->init = cvm_oct_rgmii_init; priv->uninit = cvm_oct_rgmii_uninit; device_set_desc(dev, "Cavium Octeon GMII Ethernet"); break; } ifp->if_softc = priv; if (!priv->init) { printf("octe%d: unsupported device type interface %d, port %d\n", ifnum, interface, priv->port); if_free(ifp); } else if (priv->init(ifp) != 0) { printf("octe%d: failed to register device for interface %d, port %d\n", ifnum, interface, priv->port); if_free(ifp); } else { cvm_oct_device[priv->port] = ifp; fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t); } } } if (INTERRUPT_LIMIT) { /* Set the POW timer rate to give an interrupt at most INTERRUPT_LIMIT times per second */ cvmx_write_csr(CVMX_POW_WQ_INT_PC, cvmx_clock_get_rate(CVMX_CLOCK_CORE)/(INTERRUPT_LIMIT*16*256)<<8); /* Enable POW timer interrupt. It will count when there are packets available */ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1ful<<24); } else { /* Enable POW interrupt when our port has at least one packet */ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001); } callout_init(&cvm_oct_poll_timer, 1); callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL); return 0; }
int cvm_oct_sgmii_open(struct net_device *dev) { struct octeon_hw_status_reg sr[3]; union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); cvmx_helper_link_info_t link_info; cvmx_helper_interface_mode_t imode; int rv, i; u64 en_mask; rv = cvm_oct_phy_setup_device(dev); if (rv) return rv; gmx_cfg.u64 = cvmx_read_csr(priv->gmx_base + GMX_PRT_CFG); gmx_cfg.s.en = 1; cvmx_write_csr(priv->gmx_base + GMX_PRT_CFG, gmx_cfg.u64); if (octeon_is_simulation()) return 0; if (priv->phydev) { int r = phy_read_status(priv->phydev); if (r == 0 && priv->phydev->link == 0) netif_carrier_off(dev); cvm_oct_adjust_link(dev); } else { link_info = cvmx_helper_link_get(priv->ipd_port); if (!link_info.s.link_up) netif_carrier_off(dev); priv->poll = cvm_oct_sgmii_poll; cvm_oct_sgmii_poll(dev); } imode = cvmx_helper_interface_get_mode(priv->interface); switch (imode) { case CVMX_HELPER_INTERFACE_MODE_XAUI: case CVMX_HELPER_INTERFACE_MODE_RXAUI: /* Handle GMXX_RXX_INT_REG[LOC_FAULT,REM_FAULT]*/ priv->hw_status_notifier.priority = 10; priv->hw_status_notifier.notifier_call = cvm_oct_sgmii_hw_status; octeon_hw_status_notifier_register(&priv->hw_status_notifier); memset(sr, 0, sizeof(sr)); i = 0; en_mask = 0; if (OCTEON_IS_OCTEONPLUS()) { sr[i].reg = 46; /* RML */ sr[i].reg_is_hwint = 1; sr[i].has_child = 1; i++; sr[i].reg = CVMX_NPEI_RSL_INT_BLOCKS; /* GMX[priv->interface]*/ sr[i].bit = priv->interface + 1; sr[i].has_child = 1; i++; } else if (octeon_has_feature(OCTEON_FEATURE_CIU2)) { /* PKT[AGX[priv->interface]]*/ sr[i].reg = (6 << 6) | priv->interface; sr[i].reg_is_hwint = 1; sr[i].has_child = 1; i++; } else { /* INT_SUM1[AGX[priv->interface]]*/ sr[i].reg = (1 << 6) | (priv->interface + 36); sr[i].reg_is_hwint = 1; sr[i].has_child = 1; i++; } sr[i].reg = CVMX_GMXX_RXX_INT_REG(priv->interface_port, priv->interface); sr[i].mask_reg = CVMX_GMXX_RXX_INT_EN(priv->interface_port, priv->interface); sr[i].ack_w1c = 1; sr[i].bit = INT_BIT_LOC_FAULT; en_mask |= 1ull << sr[i].bit; octeon_hw_status_add_source(sr); sr[i].bit = INT_BIT_REM_FAULT; en_mask |= 1ull << sr[i].bit; octeon_hw_status_add_source(sr); octeon_hw_status_enable(sr[i].reg, en_mask); break; default: break; } return 0; }
static int cvm_oct_probe(struct platform_device *pdev) { int num_interfaces; int interface; int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; int qos; struct device_node *pip; int mtu_overhead = ETH_HLEN + ETH_FCS_LEN; #if IS_ENABLED(CONFIG_VLAN_8021Q) mtu_overhead += VLAN_HLEN; #endif octeon_mdiobus_force_mod_depencency(); pip = pdev->dev.of_node; if (!pip) { pr_err("Error: No 'pip' in /aliases\n"); return -EINVAL; } cvm_oct_configure_common_hw(); cvmx_helper_initialize_packet_io_global(); if (receive_group_order) { if (receive_group_order > 4) receive_group_order = 4; pow_receive_groups = (1 << (1 << receive_group_order)) - 1; } else { pow_receive_groups = BIT(pow_receive_group); } /* Change the input group for all ports before input is enabled */ num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { int num_ports = cvmx_helper_ports_on_interface(interface); int port; for (port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port++) { union cvmx_pip_prt_tagx pip_prt_tagx; pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(port)); if (receive_group_order) { int tag_mask; /* We support only 16 groups at the moment, so * always disable the two additional "hidden" * tag_mask bits on CN68XX. */ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) pip_prt_tagx.u64 |= 0x3ull << 44; tag_mask = ~((1 << receive_group_order) - 1); pip_prt_tagx.s.grptagbase = 0; pip_prt_tagx.s.grptagmask = tag_mask; pip_prt_tagx.s.grptag = 1; pip_prt_tagx.s.tag_mode = 0; pip_prt_tagx.s.inc_prt_flag = 1; pip_prt_tagx.s.ip6_dprt_flag = 1; pip_prt_tagx.s.ip4_dprt_flag = 1; pip_prt_tagx.s.ip6_sprt_flag = 1; pip_prt_tagx.s.ip4_sprt_flag = 1; pip_prt_tagx.s.ip6_dst_flag = 1; pip_prt_tagx.s.ip4_dst_flag = 1; pip_prt_tagx.s.ip6_src_flag = 1; pip_prt_tagx.s.ip4_src_flag = 1; pip_prt_tagx.s.grp = 0; } else { pip_prt_tagx.s.grptag = 0; pip_prt_tagx.s.grp = pow_receive_group; } cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), pip_prt_tagx.u64); } } cvmx_helper_ipd_and_packet_input_enable(); memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); /* * Initialize the FAU used for counting packet buffers that * need to be freed. */ cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); /* Initialize the FAU used for counting tx SKBs that need to be freed */ cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0); if ((pow_send_group != -1)) { struct net_device *dev; dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (dev) { /* Initialize the device private structure. */ struct octeon_ethernet *priv = netdev_priv(dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->netdev_ops = &cvm_oct_pow_netdev_ops; priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; priv->port = CVMX_PIP_NUM_INPUT_PORTS; priv->queue = -1; strcpy(dev->name, "pow%d"); for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead; dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead; if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device for POW\n"); free_netdev(dev); } else { cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; pr_info("%s: POW send group %d, receive group %d\n", dev->name, pow_send_group, pow_receive_group); } } else { pr_err("Failed to allocate ethernet device for POW\n"); } } num_interfaces = cvmx_helper_get_number_of_interfaces(); for (interface = 0; interface < num_interfaces; interface++) { cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface); int num_ports = cvmx_helper_ports_on_interface(interface); int port; int port_index; for (port_index = 0, port = cvmx_helper_get_ipd_port(interface, 0); port < cvmx_helper_get_ipd_port(interface, num_ports); port_index++, port++) { struct octeon_ethernet *priv; struct net_device *dev = alloc_etherdev(sizeof(struct octeon_ethernet)); if (!dev) { pr_err("Failed to allocate ethernet device for port %d\n", port); continue; } /* Initialize the device private structure. */ SET_NETDEV_DEV(dev, &pdev->dev); priv = netdev_priv(dev); priv->netdev = dev; priv->of_node = cvm_oct_node_for_port(pip, interface, port_index); INIT_DELAYED_WORK(&priv->port_periodic_work, cvm_oct_periodic_worker); priv->imode = imode; priv->port = port; priv->queue = cvmx_pko_get_base_queue(priv->port); priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; for (qos = 0; qos < 16; qos++) skb_queue_head_init(&priv->tx_free_list[qos]); for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++) cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead; dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead; switch (priv->imode) { /* These types don't support ports to IPD/PKO */ case CVMX_HELPER_INTERFACE_MODE_DISABLED: case CVMX_HELPER_INTERFACE_MODE_PCIE: case CVMX_HELPER_INTERFACE_MODE_PICMG: break; case CVMX_HELPER_INTERFACE_MODE_NPI: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "npi%d"); break; case CVMX_HELPER_INTERFACE_MODE_XAUI: dev->netdev_ops = &cvm_oct_xaui_netdev_ops; strcpy(dev->name, "xaui%d"); break; case CVMX_HELPER_INTERFACE_MODE_LOOP: dev->netdev_ops = &cvm_oct_npi_netdev_ops; strcpy(dev->name, "loop%d"); break; case CVMX_HELPER_INTERFACE_MODE_SGMII: dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; strcpy(dev->name, "eth%d"); break; case CVMX_HELPER_INTERFACE_MODE_SPI: dev->netdev_ops = &cvm_oct_spi_netdev_ops; strcpy(dev->name, "spi%d"); break; case CVMX_HELPER_INTERFACE_MODE_RGMII: case CVMX_HELPER_INTERFACE_MODE_GMII: dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; strcpy(dev->name, "eth%d"); cvm_set_rgmii_delay(priv->of_node, interface, port_index); break; } if (!dev->netdev_ops) { free_netdev(dev); } else if (register_netdev(dev) < 0) { pr_err("Failed to register ethernet device for interface %d, port %d\n", interface, priv->port); free_netdev(dev); } else { cvm_oct_device[priv->port] = dev; fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(u32); schedule_delayed_work(&priv->port_periodic_work, HZ); } } } cvm_oct_tx_initialize(); cvm_oct_rx_initialize(); /* * 150 uS: about 10 1500-byte packets at 1GE. */ cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); schedule_delayed_work(&cvm_oct_rx_refill_work, HZ); return 0; }
int main(int argc, char *argv[]) { /* mandatory function to initialize simple executive application */ cvmx_user_app_init(); sysinfo = cvmx_sysinfo_get(); if (cvmx_is_init_core()) { /* may need to specify this manually for simulator */ cpu_clock_hz = sysinfo->cpu_clock_hz; if(init_tasks(NUM_PACKET_BUFFERS) != 0) { printf("Initialization failed!\n"); exit(-1); } /* get the FPA pool number of packet and WQE pools */ packet_pool = cvmx_fpa_get_packet_pool(); wqe_pool = cvmx_fpa_get_wqe_pool(); print_debug_info(); int num_interfaces = cvmx_helper_get_number_of_interfaces(); int interface; bool found_valid_xaui_port = false; for (interface=0; interface < num_interfaces && !found_valid_xaui_port; interface++) { uint32_t num_ports = cvmx_helper_ports_on_interface(interface); cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface); if (imode == CVMX_HELPER_INTERFACE_MODE_XAUI) { printf("\nIdentified XAUI interface with %" PRIu32 " port(s)\n", num_ports); printf("interface number: %d\n", interface); uint32_t port; for (port = 0; port < num_ports; port++) { if (cvmx_helper_is_port_valid(interface, port)) { xaui_ipd_port = cvmx_helper_get_ipd_port(interface, port); printf("xaui_ipd_port: %d\n", xaui_ipd_port); found_valid_xaui_port = true; break; } } } printf("\n"); } } /* Wait (stall) until all cores in the given coremask have reached this * point in the progam execution before proceeding. */ CORE_MASK_BARRIER_SYNC; if (cvmx_is_init_core()) { receive_packet(); } else if (cvmx_get_core_num() == 1) { send_packet(); } else { /* for this program, all cores besides the first two are superfluous */ printf("Superfluous core #%02d\n", cvmx_get_core_num()); return 0; } printf("Execution complete for core #%02d\n", cvmx_get_core_num()); return 0; }