void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac) { bool macaddr_changed = false; struct net_device *netdev; struct lio *lio; rtnl_lock(); netdev = oct->props[0].netdev; lio = GET_LIO(netdev); lio->linfo.macaddr_is_admin_asgnd = true; if (!ether_addr_equal(netdev->dev_addr, mac)) { macaddr_changed = true; ether_addr_copy(netdev->dev_addr, mac); ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac); call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev); } rtnl_unlock(); if (macaddr_changed) dev_info(&oct->pci_dev->dev, "PF changed VF's MAC address to %pM\n", mac); /* no need to notify the firmware of the macaddr change because * the PF did that already */ }
static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct oct_link_info *linfo; linfo = &lio->linfo; if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || linfo->link.s.if_mode == INTERFACE_MODE_XFI) { ecmd->port = PORT_FIBRE; ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE | SUPPORTED_Pause); ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause); ecmd->transceiver = XCVR_EXTERNAL; ecmd->autoneg = AUTONEG_DISABLE; } else { dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n", linfo->link.s.if_mode); } if (linfo->link.s.link_up) { ethtool_cmd_speed_set(ecmd, linfo->link.s.speed); ecmd->duplex = linfo->link.s.duplex; } else { ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); ecmd->duplex = DUPLEX_UNKNOWN; } return 0; }
static int octnet_gpio_access(struct net_device *netdev, int addr, int val) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; int ret = 0; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; nctrl.ncmd.s.param1 = addr; nctrl.ncmd.s.param2 = val; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); return -EINVAL; } return 0; }
int setup_rx_oom_poll_fn(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct cavium_wq *wq; int q, q_no; for (q = 0; q < oct->num_oqs; q++) { q_no = lio->linfo.rxpciq[q].s.q_no; wq = &lio->rxq_status_wq[q_no]; wq->wq = alloc_workqueue("rxq-oom-status", WQ_MEM_RECLAIM, 0); if (!wq->wq) { dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); return -ENOMEM; } INIT_DELAYED_WORK(&wq->wk.work, octnet_poll_check_rxq_oom_status); wq->wk.ctxptr = lio; wq->wk.ctxul = q_no; } return 0; }
int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; int ret = 0; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = cmd; nctrl.ncmd.s.param1 = param1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret) { dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", ret); if (ret > 0) ret = -EIO; } return ret; }
static int lio_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) { struct lio *lio = GET_LIO(netdev); info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (lio->ptp_clock) info->phc_index = ptp_clock_index(lio->ptp_clock); else info->phc_index = -1; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); return 0; }
static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; int num_iq_stats, num_oq_stats, i, j; num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { if (!(oct_dev->io_qmask.iq & (1UL << i))) continue; for (j = 0; j < num_iq_stats; j++) { sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]); data += ETH_GSTRING_LEN; } } num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); /* for (i = 0; i < oct_dev->num_oqs; i++) { */ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { if (!(oct_dev->io_qmask.oq & (1UL << i))) continue; for (j = 0; j < num_oq_stats; j++) { sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]); data += ETH_GSTRING_LEN; } } }
static int lio_get_sset_count(struct net_device *netdev, int sset) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) + (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); }
void cleanup_rx_oom_poll_fn(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); if (lio->rxq_status_wq.wq) { cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work); flush_workqueue(lio->rxq_status_wq.wq); destroy_workqueue(lio->rxq_status_wq.wq); } }
void octeon_schedule_rxq_oom_work(struct octeon_device *oct, struct octeon_droq *droq) { struct net_device *netdev = oct->props[0].netdev; struct lio *lio = GET_LIO(netdev); struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; queue_delayed_work(wq->wq, &wq->wk.work, msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); }
static void lio_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; int i = 0, j; for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) { if (!(oct_dev->io_qmask.iq & (1UL << j))) continue; data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); data[i++] = CVM_CAST64( oct_dev->instr_queue[j]->stats.instr_processed); data[i++] = CVM_CAST64( oct_dev->instr_queue[j]->stats.instr_dropped); data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); data[i++] = readl(oct_dev->instr_queue[j]->inst_cnt_reg); data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); } /* for (j = 0; j < oct_dev->num_oqs; j++){ */ for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) { if (!(oct_dev->io_qmask.oq & (1UL << j))) continue; data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); } }
static int lio_get_intr_coalesce(struct net_device *netdev, struct ethtool_coalesce *intr_coal) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octeon_instr_queue *iq; struct oct_intrmod_cfg *intrmod_cfg; intrmod_cfg = &oct->intrmod; switch (oct->chip_id) { case OCTEON_CN68XX: case OCTEON_CN66XX: { struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; if (!intrmod_cfg->rx_enable) { intr_coal->rx_coalesce_usecs = CFG_GET_OQ_INTR_TIME(cn6xxx->conf); intr_coal->rx_max_coalesced_frames = CFG_GET_OQ_INTR_PKT(cn6xxx->conf); } iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; intr_coal->tx_max_coalesced_frames = iq->fill_threshold; break; } default: netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); return -EINVAL; } if (intrmod_cfg->rx_enable) { intr_coal->use_adaptive_rx_coalesce = intrmod_cfg->rx_enable; intr_coal->rate_sample_interval = intrmod_cfg->check_intrvl; intr_coal->pkt_rate_high = intrmod_cfg->maxpkt_ratethr; intr_coal->pkt_rate_low = intrmod_cfg->minpkt_ratethr; intr_coal->rx_max_coalesced_frames_high = intrmod_cfg->rx_maxcnt_trigger; intr_coal->rx_coalesce_usecs_high = intrmod_cfg->rx_maxtmr_trigger; intr_coal->rx_coalesce_usecs_low = intrmod_cfg->rx_mintmr_trigger; intr_coal->rx_max_coalesced_frames_low = intrmod_cfg->rx_mincnt_trigger; } return 0; }
/** * \brief Net device change_mtu * @param netdev network device */ int liquidio_change_mtu(struct net_device *netdev, int new_mtu) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octeon_soft_command *sc; union octnet_cmd *ncmd; int ret = 0; sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0); ncmd = (union octnet_cmd *)sc->virtdptr; init_completion(&sc->complete); sc->sc_status = OCTEON_REQUEST_PENDING; ncmd->u64 = 0; ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU; ncmd->s.param1 = new_mtu; octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); sc->iq_no = lio->linfo.txpciq[0].s.q_no; octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); ret = octeon_send_soft_command(oct, sc); if (ret == IQ_SEND_FAILED) { netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n"); octeon_free_soft_command(oct, sc); return -EINVAL; } /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ ret = wait_for_sc_completion_timeout(oct, sc, 0); if (ret) return ret; if (sc->sc_status) { WRITE_ONCE(sc->caller_is_done, true); return -EINVAL; } netdev->mtu = new_mtu; lio->mtu = new_mtu; WRITE_ONCE(sc->caller_is_done, true); return 0; }
static void lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { /* Notes: Not supporting any auto negotiation in these * drivers. Just report pause frame support. */ struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; pause->autoneg = 0; pause->tx_pause = oct->tx_pause; pause->rx_pause = oct->rx_pause; }
static int lio_get_sset_count(struct net_device *netdev, int sset) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; switch (sset) { case ETH_SS_STATS: return (ARRAY_SIZE(oct_stats_strings) + ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); default: return -EOPNOTSUPP; } }
static int lio_get_eeprom_len(struct net_device *netdev) { u8 buf[128]; struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; struct octeon_board_info *board_info; int len; board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n", board_info->name, board_info->serial_number, board_info->major, board_info->minor); return len; }
static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) { struct lio *lio = GET_LIO(netdev); if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { if (msglvl & NETIF_MSG_HW) liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE, 0); else liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_DISABLE, 0); } lio->msg_enable = msglvl; }
static void lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct lio *lio; struct octeon_device *oct; lio = GET_LIO(netdev); oct = lio->oct_dev; memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); strcpy(drvinfo->driver, "liquidio"); strcpy(drvinfo->version, LIQUIDIO_VERSION); strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, ETHTOOL_FWVERS_LEN); strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); }
void cleanup_rx_oom_poll_fn(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct cavium_wq *wq; int q_no; for (q_no = 0; q_no < oct->num_oqs; q_no++) { wq = &lio->rxq_status_wq[q_no]; if (wq->wq) { cancel_delayed_work_sync(&wq->wk.work); flush_workqueue(wq->wq); destroy_workqueue(wq->wq); wq->wq = NULL; } } }
static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; int num_iq_stats, num_oq_stats, i, j; int num_stats; switch (stringset) { case ETH_SS_STATS: num_stats = ARRAY_SIZE(oct_stats_strings); for (j = 0; j < num_stats; j++) { sprintf(data, "%s", oct_stats_strings[j]); data += ETH_GSTRING_LEN; } num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { if (!(oct_dev->io_qmask.iq & (1ULL << i))) continue; for (j = 0; j < num_iq_stats; j++) { sprintf(data, "tx-%d-%s", i, oct_iq_stats_strings[j]); data += ETH_GSTRING_LEN; } } num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); /* for (i = 0; i < oct_dev->num_oqs; i++) { */ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { if (!(oct_dev->io_qmask.oq & (1ULL << i))) continue; for (j = 0; j < num_oq_stats; j++) { sprintf(data, "rx-%d-%s", i, oct_droq_stats_strings[j]); data += ETH_GSTRING_LEN; } } break; default: netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); break; } }
int setup_rx_oom_poll_fn(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status", WQ_MEM_RECLAIM, 0); if (!lio->rxq_status_wq.wq) { dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); return -ENOMEM; } INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work, octnet_poll_check_rxq_oom_status); lio->rxq_status_wq.wk.ctxptr = lio; queue_delayed_work(lio->rxq_status_wq.wq, &lio->rxq_status_wq.wk.work, msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); return 0; }
static void lio_ethtool_get_channels(struct net_device *dev, struct ethtool_channels *channel) { struct lio *lio = GET_LIO(dev); struct octeon_device *oct = lio->oct_dev; u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; if (OCTEON_CN6XXX(oct)) { struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); max_rx = CFG_GET_OQ_MAX_Q(conf6x); max_tx = CFG_GET_IQ_MAX_Q(conf6x); rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); } channel->max_rx = max_rx; channel->max_tx = max_tx; channel->rx_count = rx_count; channel->tx_count = tx_count; }
static int lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; struct octeon_board_info *board_info; int len; if (eeprom->offset != 0) return -EINVAL; eeprom->magic = oct_dev->pci_dev->vendor; board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); len = sprintf((char *)bytes, "boardname:%s serialnum:%s maj:%lld min:%lld\n", board_info->name, board_info->serial_number, board_info->major, board_info->minor); return 0; }
/* Return register dump user app. */ static void lio_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) { struct lio *lio = GET_LIO(dev); int len = 0; struct octeon_device *oct = lio->oct_dev; memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); regs->version = OCT_ETHTOOL_REGSVER; switch (oct->chip_id) { /* case OCTEON_CN73XX: Todo */ case OCTEON_CN68XX: case OCTEON_CN66XX: len += cn6xxx_read_csr_reg(regbuf + len, oct); len += cn6xxx_read_config_reg(regbuf + len, oct); break; default: dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n", __func__, oct->chip_id); } }
/* Runs in interrupt context. */ static void lio_update_txq_status(struct octeon_device *oct, int iq_num) { struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; struct net_device *netdev; struct lio *lio; netdev = oct->props[iq->ifidx].netdev; /* This is needed because the first IQ does not have * a netdev associated with it. */ if (!netdev) return; lio = GET_LIO(netdev); if (__netif_subqueue_stopped(netdev, iq->q_index) && lio->linfo.link.s.link_up && (!octnet_iq_is_full(oct, iq_num))) { netif_wake_subqueue(netdev, iq->q_index); INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, tx_restart, 1); } }
static void lio_ethtool_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ering) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0, rx_pending = 0; if (OCTEON_CN6XXX(oct)) { struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS; rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); } if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) { ering->rx_pending = 0; ering->rx_max_pending = 0; ering->rx_mini_pending = 0; ering->rx_jumbo_pending = rx_pending; ering->rx_mini_max_pending = 0; ering->rx_jumbo_max_pending = rx_max_pending; } else { ering->rx_pending = rx_pending; ering->rx_max_pending = rx_max_pending; ering->rx_mini_pending = 0; ering->rx_jumbo_pending = 0; ering->rx_mini_max_pending = 0; ering->rx_jumbo_max_pending = 0; } ering->tx_pending = tx_pending; ering->tx_max_pending = tx_max_pending; }
/** * \brief Setup input and output queues * @param octeon_dev octeon device * @param ifidx Interface index * * Note: Queues are with respect to the octeon device. Thus * an input queue is for egress packets, and output queues * are for ingress packets. */ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx, u32 num_iqs, u32 num_oqs) { struct octeon_droq_ops droq_ops; struct net_device *netdev; struct octeon_droq *droq; struct napi_struct *napi; int cpu_id_modulus; int num_tx_descs; struct lio *lio; int retval = 0; int q, q_no; int cpu_id; netdev = octeon_dev->props[ifidx].netdev; lio = GET_LIO(netdev); memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); droq_ops.fptr = liquidio_push_packet; droq_ops.farg = netdev; droq_ops.poll_mode = 1; droq_ops.napi_fn = liquidio_napi_drv_callback; cpu_id = 0; cpu_id_modulus = num_present_cpus(); /* set up DROQs. */ for (q = 0; q < num_oqs; q++) { q_no = lio->linfo.rxpciq[q].s.q_no; dev_dbg(&octeon_dev->pci_dev->dev, "%s index:%d linfo.rxpciq.s.q_no:%d\n", __func__, q, q_no); retval = octeon_setup_droq( octeon_dev, q_no, CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev), lio->ifidx), CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev), lio->ifidx), NULL); if (retval) { dev_err(&octeon_dev->pci_dev->dev, "%s : Runtime DROQ(RxQ) creation failed.\n", __func__); return 1; } droq = octeon_dev->droq[q_no]; napi = &droq->napi; dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n", (u64)netdev, (u64)octeon_dev); netif_napi_add(netdev, napi, liquidio_napi_poll, 64); /* designate a CPU for this droq */ droq->cpu_id = cpu_id; cpu_id++; if (cpu_id >= cpu_id_modulus) cpu_id = 0; octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); } if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) { /* 23XX PF/VF can send/recv control messages (via the first * PF/VF-owned droq) from the firmware even if the ethX * interface is down, so that's why poll_mode must be off * for the first droq. */ octeon_dev->droq[0]->ops.poll_mode = 0; } /* set up IQs. */ for (q = 0; q < num_iqs; q++) { num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( octeon_get_conf(octeon_dev), lio->ifidx); retval = octeon_setup_iq(octeon_dev, ifidx, q, lio->linfo.txpciq[q], num_tx_descs, netdev_get_tx_queue(netdev, q)); if (retval) { dev_err(&octeon_dev->pci_dev->dev, " %s : Runtime IQ(TxQ) creation failed.\n", __func__); return 1; } /* XPS */ if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on && octeon_dev->ioq_vector) { struct octeon_ioq_vector *ioq_vector; ioq_vector = &octeon_dev->ioq_vector[q]; netif_set_xps_queue(netdev, &ioq_vector->affinity_mask, ioq_vector->iq_index); } } return 0; }
/** Routine to push packets arriving on Octeon interface upto network layer. * @param oct_id - octeon device id. * @param skbuff - skbuff struct to be passed to network layer. * @param len - size of total data received. * @param rh - Control header associated with the packet * @param param - additional control data with the packet * @param arg - farg registered in droq_ops */ static void liquidio_push_packet(u32 octeon_id __attribute__((unused)), void *skbuff, u32 len, union octeon_rh *rh, void *param, void *arg) { struct net_device *netdev = (struct net_device *)arg; struct octeon_droq *droq = container_of(param, struct octeon_droq, napi); struct sk_buff *skb = (struct sk_buff *)skbuff; struct skb_shared_hwtstamps *shhwtstamps; struct napi_struct *napi = param; u16 vtag = 0; u32 r_dh_off; u64 ns; if (netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; /* Do not proceed if the interface is not in RUNNING state. */ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { recv_buffer_free(skb); droq->stats.rx_dropped++; return; } skb->dev = netdev; skb_record_rx_queue(skb, droq->q_no); if (likely(len > MIN_SKB_SIZE)) { struct octeon_skb_page_info *pg_info; unsigned char *va; pg_info = ((struct octeon_skb_page_info *)(skb->cb)); if (pg_info->page) { /* For Paged allocation use the frags */ va = page_address(pg_info->page) + pg_info->page_offset; memcpy(skb->data, va, MIN_SKB_SIZE); skb_put(skb, MIN_SKB_SIZE); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, pg_info->page, pg_info->page_offset + MIN_SKB_SIZE, len - MIN_SKB_SIZE, LIO_RXBUFFER_SZ); } } else { struct octeon_skb_page_info *pg_info = ((struct octeon_skb_page_info *)(skb->cb)); skb_copy_to_linear_data(skb, page_address(pg_info->page) + pg_info->page_offset, len); skb_put(skb, len); put_page(pg_info->page); } r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; if (oct->ptp_enable) { if (rh->r_dh.has_hwtstamp) { /* timestamp is included from the hardware at * the beginning of the packet. */ if (ifstate_check (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { /* Nanoseconds are in the first 64-bits * of the packet. */ memcpy(&ns, (skb->data + r_dh_off), sizeof(ns)); r_dh_off -= BYTES_PER_DHLEN_UNIT; shhwtstamps = skb_hwtstamps(skb); shhwtstamps->hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); } } } if (rh->r_dh.has_hash) { __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); u32 hash = be32_to_cpu(*hash_be); skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); r_dh_off -= BYTES_PER_DHLEN_UNIT; } skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); skb->protocol = eth_type_trans(skb, skb->dev); if ((netdev->features & NETIF_F_RXCSUM) && (((rh->r_dh.encap_on) && (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || (!(rh->r_dh.encap_on) && (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) /* checksum has already been verified */ skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; /* Setting Encapsulation field on basis of status received * from the firmware */ if (rh->r_dh.encap_on) { skb->encapsulation = 1; skb->csum_level = 1; droq->stats.rx_vxlan++; } /* inbound VLAN tag */ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && rh->r_dh.vlan) { u16 priority = rh->r_dh.priority; u16 vid = rh->r_dh.vlan; vtag = (priority << VLAN_PRIO_SHIFT) | vid; __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); } napi_gro_receive(napi, skb); droq->stats.rx_bytes_received += len - rh->r_dh.len * BYTES_PER_DHLEN_UNIT; droq->stats.rx_pkts_received++; } else { recv_buffer_free(skb); } }
void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) { struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr; struct net_device *netdev = (struct net_device *)nctrl->netpndev; struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; u8 *mac; switch (nctrl->ncmd.s.cmd) { case OCTNET_CMD_CHANGE_DEVFLAGS: case OCTNET_CMD_SET_MULTI_LIST: break; case OCTNET_CMD_CHANGE_MACADDR: mac = ((u8 *)&nctrl->udd[0]) + 2; netif_info(lio, probe, lio->netdev, "MACAddr changed to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); break; case OCTNET_CMD_CHANGE_MTU: /* If command is successful, change the MTU. */ netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n", netdev->mtu, nctrl->ncmd.s.param1); dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", netdev->name, netdev->mtu, nctrl->ncmd.s.param1); netdev->mtu = nctrl->ncmd.s.param1; queue_delayed_work(lio->link_status_wq.wq, &lio->link_status_wq.wk.work, 0); break; case OCTNET_CMD_GPIO_ACCESS: netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n"); break; case OCTNET_CMD_ID_ACTIVE: netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n"); break; case OCTNET_CMD_LRO_ENABLE: dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); break; case OCTNET_CMD_LRO_DISABLE: dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n", netdev->name); break; case OCTNET_CMD_VERBOSE_ENABLE: dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n", netdev->name); break; case OCTNET_CMD_VERBOSE_DISABLE: dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n", netdev->name); break; case OCTNET_CMD_ENABLE_VLAN_FILTER: dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n", netdev->name); break; case OCTNET_CMD_ADD_VLAN_FILTER: dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n", netdev->name, nctrl->ncmd.s.param1); break; case OCTNET_CMD_DEL_VLAN_FILTER: dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n", netdev->name, nctrl->ncmd.s.param1); break; case OCTNET_CMD_SET_SETTINGS: dev_info(&oct->pci_dev->dev, "%s settings changed\n", netdev->name); break; /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL" * Command passed by NIC driver */ case OCTNET_CMD_TNL_RX_CSUM_CTL: if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) { netif_info(lio, probe, lio->netdev, "RX Checksum Offload Enabled\n"); } else if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_DISABLE) { netif_info(lio, probe, lio->netdev, "RX Checksum Offload Disabled\n"); } break; /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL" * Command passed by NIC driver */ case OCTNET_CMD_TNL_TX_CSUM_CTL: if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) { netif_info(lio, probe, lio->netdev, "TX Checksum Offload Enabled\n"); } else if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_DISABLE) { netif_info(lio, probe, lio->netdev, "TX Checksum Offload Disabled\n"); } break; /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG" * Command passed by NIC driver */ case OCTNET_CMD_VXLAN_PORT_CONFIG: if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) { netif_info(lio, probe, lio->netdev, "VxLAN Destination UDP PORT:%d ADDED\n", nctrl->ncmd.s.param1); } else if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_DEL) { netif_info(lio, probe, lio->netdev, "VxLAN Destination UDP PORT:%d DELETED\n", nctrl->ncmd.s.param1); } break; case OCTNET_CMD_SET_FLOW_CTL: netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n"); break; default: dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, nctrl->ncmd.s.cmd); } }
void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) { struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr; struct net_device *netdev = (struct net_device *)nctrl->netpndev; struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; u8 *mac; if (nctrl->sc_status) return; switch (nctrl->ncmd.s.cmd) { case OCTNET_CMD_CHANGE_DEVFLAGS: case OCTNET_CMD_SET_MULTI_LIST: case OCTNET_CMD_SET_UC_LIST: break; case OCTNET_CMD_CHANGE_MACADDR: mac = ((u8 *)&nctrl->udd[0]) + 2; if (nctrl->ncmd.s.param1) { /* vfidx is 0 based, but vf_num (param1) is 1 based */ int vfidx = nctrl->ncmd.s.param1 - 1; bool mac_is_admin_assigned = nctrl->ncmd.s.param2; if (mac_is_admin_assigned) netif_info(lio, probe, lio->netdev, "MAC Address %pM is configured for VF %d\n", mac, vfidx); } else { netif_info(lio, probe, lio->netdev, " MACAddr changed to %pM\n", mac); } break; case OCTNET_CMD_GPIO_ACCESS: netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n"); break; case OCTNET_CMD_ID_ACTIVE: netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n"); break; case OCTNET_CMD_LRO_ENABLE: dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); break; case OCTNET_CMD_LRO_DISABLE: dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n", netdev->name); break; case OCTNET_CMD_VERBOSE_ENABLE: dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n", netdev->name); break; case OCTNET_CMD_VERBOSE_DISABLE: dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n", netdev->name); break; case OCTNET_CMD_VLAN_FILTER_CTL: if (nctrl->ncmd.s.param1) dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n", netdev->name); else dev_info(&oct->pci_dev->dev, "%s VLAN filter disabled\n", netdev->name); break; case OCTNET_CMD_ADD_VLAN_FILTER: dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n", netdev->name, nctrl->ncmd.s.param1); break; case OCTNET_CMD_DEL_VLAN_FILTER: dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n", netdev->name, nctrl->ncmd.s.param1); break; case OCTNET_CMD_SET_SETTINGS: dev_info(&oct->pci_dev->dev, "%s settings changed\n", netdev->name); break; /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL" * Command passed by NIC driver */ case OCTNET_CMD_TNL_RX_CSUM_CTL: if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) { netif_info(lio, probe, lio->netdev, "RX Checksum Offload Enabled\n"); } else if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_DISABLE) { netif_info(lio, probe, lio->netdev, "RX Checksum Offload Disabled\n"); } break; /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL" * Command passed by NIC driver */ case OCTNET_CMD_TNL_TX_CSUM_CTL: if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) { netif_info(lio, probe, lio->netdev, "TX Checksum Offload Enabled\n"); } else if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_DISABLE) { netif_info(lio, probe, lio->netdev, "TX Checksum Offload Disabled\n"); } break; /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG" * Command passed by NIC driver */ case OCTNET_CMD_VXLAN_PORT_CONFIG: if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) { netif_info(lio, probe, lio->netdev, "VxLAN Destination UDP PORT:%d ADDED\n", nctrl->ncmd.s.param1); } else if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_DEL) { netif_info(lio, probe, lio->netdev, "VxLAN Destination UDP PORT:%d DELETED\n", nctrl->ncmd.s.param1); } break; case OCTNET_CMD_SET_FLOW_CTL: netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n"); break; case OCTNET_CMD_QUEUE_COUNT_CTL: netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n", nctrl->ncmd.s.param1); break; default: dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, nctrl->ncmd.s.cmd); } }