static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, u32 length, u8 *data) { struct bnxt *bp = netdev_priv(dev); int rc; u8 *buf; dma_addr_t dma_handle; struct hwrm_nvm_read_input req = {0}; buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, GFP_KERNEL); if (!buf) { netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", (unsigned)length); return -ENOMEM; } bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); req.host_dest_addr = cpu_to_le64(dma_handle); req.dir_idx = cpu_to_le16(index); req.offset = cpu_to_le32(offset); req.len = cpu_to_le32(length); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc == 0) memcpy(data, buf, length); dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); return rc; }
int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) { struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); struct bnxt_vf_info *vf; u16 vlan_tag; int rc; rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; /* TODO: needed to implement proper handling of user priority, * currently fail the command if there is valid priority */ if (vlan_id > 4095 || qos) return -EINVAL; vf = &bp->pf.vf[vf_id]; vlan_tag = vlan_id; if (vlan_tag == vf->vlan) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(vf->func_flags); req.dflt_vlan = cpu_to_le16(vlan_tag); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) vf->vlan = vlan_tag; return rc; }
int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) { struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); struct bnxt_vf_info *vf; int rc; rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; /* reject bc or mc mac addr, zero mac addr means allow * VF to use its own mac addr */ if (is_multicast_ether_addr(mac)) { netdev_err(dev, "Invalid VF ethernet address\n"); return -EINVAL; } vf = &bp->pf.vf[vf_id]; memcpy(vf->mac_addr, mac, ETH_ALEN); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); memcpy(req.dflt_mac_addr, mac, ETH_ALEN); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) { struct bnxt *bp = netdev_priv(dev); struct hwrm_nvm_erase_dir_entry_input req = {0}; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); req.dir_idx = cpu_to_le16(index); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) { struct hwrm_func_buf_rgtr_input req = {0}; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int hwrm_cfa_encap_record_free(struct bnxt *bp, __le32 encap_record_handle) { struct hwrm_cfa_encap_record_free_input req = { 0 }; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1); req.encap_record_id = encap_record_handle; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); if (rc) rc = -EIO; return rc; }
static int hwrm_cfa_decap_filter_free(struct bnxt *bp, __le32 decap_filter_handle) { struct hwrm_cfa_decap_filter_free_input req = { 0 }; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1); req.decap_filter_id = decap_filter_handle; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); if (rc) rc = -EIO; return rc; }
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle) { struct hwrm_cfa_flow_free_input req = { 0 }; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); req.flow_handle = flow_handle; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", __func__, flow_handle, rc); if (rc) rc = -EIO; return rc; }
int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) { struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); struct bnxt_vf_info *vf; bool old_setting = false; u32 func_flags; int rc; if (bp->hwrm_spec_code < 0x10701) return -ENOTSUPP; rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; vf = &bp->pf.vf[vf_id]; if (vf->flags & BNXT_VF_SPOOFCHK) old_setting = true; if (old_setting == setting) return 0; func_flags = vf->func_flags; if (setting) func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; else func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(func_flags); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { vf->func_flags = func_flags; if (setting) vf->flags |= BNXT_VF_SPOOFCHK; else vf->flags &= ~BNXT_VF_SPOOFCHK; } return rc; }
int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, int max_tx_rate) { struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); struct bnxt_vf_info *vf; u32 pf_link_speed; int rc; rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; vf = &bp->pf.vf[vf_id]; pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); if (max_tx_rate > pf_link_speed) { netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", max_tx_rate, vf_id); return -EINVAL; } if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", min_tx_rate, vf_id); return -EINVAL; } if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); req.max_bw = cpu_to_le32(max_tx_rate); req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); req.min_bw = cpu_to_le32(min_tx_rate); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { vf->min_tx_rate = min_tx_rate; vf->max_tx_rate = max_tx_rate; } return rc; }
static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) { struct bnxt *bp = netdev_priv(dev); int rc; u32 dir_entries; u32 entry_length; u8 *buf; size_t buflen; dma_addr_t dma_handle; struct hwrm_nvm_get_dir_entries_input req = {0}; rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); if (rc != 0) return rc; /* Insert 2 bytes of directory info (count and size of entries) */ if (len < 2) return -EINVAL; *data++ = dir_entries; *data++ = entry_length; len -= 2; memset(data, 0xff, len); buflen = dir_entries * entry_length; buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, GFP_KERNEL); if (!buf) { netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", (unsigned)buflen); return -ENOMEM; } bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); req.host_dest_addr = cpu_to_le64(dma_handle); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc == 0) memcpy(data, buf, len > buflen ? buflen : len); dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); return rc; }
static int bnxt_firmware_reset(struct net_device *dev, u16 dir_type) { struct bnxt *bp = netdev_priv(dev); struct hwrm_fw_reset_input req = {0}; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */ /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ /* (e.g. when firmware isn't already running) */ switch (dir_type) { case BNX_DIR_TYPE_CHIMP_PATCH: case BNX_DIR_TYPE_BOOTCODE: case BNX_DIR_TYPE_BOOTCODE_2: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; /* Self-reset ChiMP upon next PCIe reset: */ req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; break; case BNX_DIR_TYPE_APE_FW: case BNX_DIR_TYPE_APE_PATCH: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; break; case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_PATCH: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; break; case BNX_DIR_TYPE_BONO_FW: case BNX_DIR_TYPE_BONO_PATCH: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; break; default: return -EINVAL; } return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, u16 dir_ordinal, u16 dir_ext, u16 dir_attr, const u8 *data, size_t data_len) { struct bnxt *bp = netdev_priv(dev); int rc; struct hwrm_nvm_write_input req = {0}; dma_addr_t dma_handle; u8 *kmem; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); req.dir_type = cpu_to_le16(dir_type); req.dir_ordinal = cpu_to_le16(dir_ordinal); req.dir_ext = cpu_to_le16(dir_ext); req.dir_attr = cpu_to_le16(dir_attr); req.dir_data_length = cpu_to_le32(data_len); kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, GFP_KERNEL); if (!kmem) { netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", (unsigned)data_len); return -ENOMEM; } memcpy(kmem, data, data_len); req.host_src_addr = cpu_to_le64(dma_handle); rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); return rc; }