static int enic_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *ecmd) { struct enic *enic = netdev_priv(netdev); struct ethtool_link_settings *base = &ecmd->base; ethtool_link_ksettings_add_link_mode(ecmd, supported, 10000baseT_Full); ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(ecmd, advertising, 10000baseT_Full); ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE); base->port = PORT_FIBRE; if (netif_carrier_ok(netdev)) { base->speed = vnic_dev_port_speed(enic->vdev); base->duplex = DUPLEX_FULL; } else { base->speed = SPEED_UNKNOWN; base->duplex = DUPLEX_UNKNOWN; } base->autoneg = AUTONEG_DISABLE; return 0; }
static void fnic_get_host_speed(struct Scsi_Host *shost) { struct fc_lport *lp = shost_priv(shost); struct fnic *fnic = lport_priv(lp); u32 port_speed = vnic_dev_port_speed(fnic->vdev); /* Add in other values as they get defined in fw */ switch (port_speed) { case DCEM_PORTSPEED_10G: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; case DCEM_PORTSPEED_20G: fc_host_speed(shost) = FC_PORTSPEED_20GBIT; break; case DCEM_PORTSPEED_25G: fc_host_speed(shost) = FC_PORTSPEED_25GBIT; break; case DCEM_PORTSPEED_40G: case DCEM_PORTSPEED_4x10G: fc_host_speed(shost) = FC_PORTSPEED_40GBIT; break; case DCEM_PORTSPEED_100G: fc_host_speed(shost) = FC_PORTSPEED_100GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } }
static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) { struct enic *enic = pmd_priv(eth_dev); int ret; int link_status = 0; ENICPMD_FUNC_TRACE(); link_status = enic_get_link_status(enic); ret = (link_status == enic->link_status); enic->link_status = link_status; eth_dev->data->dev_link.link_status = link_status; eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); return ret; }
static void fnic_get_host_speed(struct Scsi_Host *shost) { struct fc_lport *lp = shost_priv(shost); struct fnic *fnic = lport_priv(lp); u32 port_speed = vnic_dev_port_speed(fnic->vdev); switch (port_speed) { case 10000: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; } }
int enic_enable(struct enic *enic) { unsigned int index; struct rte_eth_dev *eth_dev = enic->rte_dev; eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ if (enic_clsf_init(enic)) dev_warning(enic, "Init of hash table for clsf failed."\ "Flow director feature will not work\n"); /* Fill RQ bufs */ for (index = 0; index < enic->rq_count; index++) { vnic_rq_fill(&enic->rq[index], enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(&enic->rq[index]) == 0) { dev_err(enic, "Unable to alloc receive buffers\n"); return -1; } } for (index = 0; index < enic->wq_count; index++) vnic_wq_enable(&enic->wq[index]); for (index = 0; index < enic->rq_count; index++) vnic_rq_enable(&enic->rq[index]); vnic_dev_enable_wait(enic->vdev); /* Register and enable error interrupt */ rte_intr_callback_register(&(enic->pdev->intr_handle), enic_intr_handler, (void *)enic->rte_dev); rte_intr_enable(&(enic->pdev->intr_handle)); vnic_intr_unmask(&enic->intr); return 0; }
static int enic_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct enic *enic = netdev_priv(netdev); ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); ecmd->port = PORT_FIBRE; ecmd->transceiver = XCVR_EXTERNAL; if (netif_carrier_ok(netdev)) { ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); ecmd->duplex = DUPLEX_FULL; } else { ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); ecmd->duplex = DUPLEX_UNKNOWN; } ecmd->autoneg = AUTONEG_DISABLE; return 0; }
int enic_enable(struct enic *enic) { unsigned int index; int err; struct rte_eth_dev *eth_dev = enic->rte_dev; eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ if (enic_clsf_init(enic)) dev_warning(enic, "Init of hash table for clsf failed."\ "Flow director feature will not work\n"); for (index = 0; index < enic->rq_count; index++) { err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]); if (err) { dev_err(enic, "Failed to alloc RX queue mbufs\n"); return err; } } for (index = 0; index < enic->wq_count; index++) vnic_wq_enable(&enic->wq[index]); for (index = 0; index < enic->rq_count; index++) vnic_rq_enable(&enic->rq[index]); vnic_dev_enable_wait(enic->vdev); /* Register and enable error interrupt */ rte_intr_callback_register(&(enic->pdev->intr_handle), enic_intr_handler, (void *)enic->rte_dev); rte_intr_enable(&(enic->pdev->intr_handle)); vnic_intr_unmask(&enic->intr); return 0; }