static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, u32 msg_size) { int rc = 0; struct hwrm_exec_fwd_resp_input req = {0}; struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); /* Set the new target id */ req.target_id = cpu_to_le16(vf->fw_fid); req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) { netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); goto exec_fwd_resp_exit; } if (resp->error_code) { netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n", resp->error_code); rc = -1; } exec_fwd_resp_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; }
int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) { struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); struct bnxt_vf_info *vf; int rc; rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; /* reject bc or mc mac addr, zero mac addr means allow * VF to use its own mac addr */ if (is_multicast_ether_addr(mac)) { netdev_err(dev, "Invalid VF ethernet address\n"); return -EINVAL; } vf = &bp->pf.vf[vf_id]; memcpy(vf->mac_addr, mac, ETH_ALEN); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); memcpy(req.dflt_mac_addr, mac, ETH_ALEN); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, u32 length, u8 *data) { struct bnxt *bp = netdev_priv(dev); int rc; u8 *buf; dma_addr_t dma_handle; struct hwrm_nvm_read_input req = {0}; buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, GFP_KERNEL); if (!buf) { netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", (unsigned)length); return -ENOMEM; } bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); req.host_dest_addr = cpu_to_le64(dma_handle); req.dir_idx = cpu_to_le16(index); req.offset = cpu_to_le32(offset); req.len = cpu_to_le32(length); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc == 0) memcpy(data, buf, length); dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); return rc; }
int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) { struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); struct bnxt_vf_info *vf; u16 vlan_tag; int rc; rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; /* TODO: needed to implement proper handling of user priority, * currently fail the command if there is valid priority */ if (vlan_id > 4095 || qos) return -EINVAL; vf = &bp->pf.vf[vf_id]; vlan_tag = vlan_id; if (vlan_tag == vf->vlan) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(vf->func_flags); req.dflt_vlan = cpu_to_le16(vlan_tag); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) vf->vlan = vlan_tag; return rc; }
static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) { struct bnxt *bp = netdev_priv(dev); struct hwrm_nvm_erase_dir_entry_input req = {0}; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); req.dir_idx = cpu_to_le16(index); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, struct ip_tunnel_key *encap_key, struct bnxt_tc_l2_key *l2_info, __le32 *encap_record_handle) { struct hwrm_cfa_encap_record_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_cfa_encap_record_alloc_input req = { 0 }; struct hwrm_cfa_encap_data_vxlan *encap = (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = (struct hwrm_vxlan_ipv4_hdr *)encap->l3; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1); req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; ether_addr_copy(encap->dst_mac_addr, l2_info->dmac); ether_addr_copy(encap->src_mac_addr, l2_info->smac); if (l2_info->num_vlans) { encap->num_vlan_tags = l2_info->num_vlans; encap->ovlan_tci = l2_info->inner_vlan_tci; encap->ovlan_tpid = l2_info->inner_vlan_tpid; } encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT; encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT; encap_ipv4->ttl = encap_key->ttl; encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst; encap_ipv4->src_ip_addr = encap_key->u.ipv4.src; encap_ipv4->protocol = IPPROTO_UDP; encap->dst_port = encap_key->tp_dst; encap->vni = tunnel_id_to_key32(encap_key->tun_id); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) *encap_record_handle = resp->encap_record_id; else netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); mutex_unlock(&bp->hwrm_cmd_lock); if (rc) rc = -EIO; return rc; }
static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) { struct hwrm_func_buf_rgtr_input req = {0}; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle) { struct hwrm_cfa_flow_free_input req = { 0 }; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); req.flow_handle = flow_handle; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", __func__, flow_handle, rc); if (rc) rc = -EIO; return rc; }
static int hwrm_cfa_encap_record_free(struct bnxt *bp, __le32 encap_record_handle) { struct hwrm_cfa_encap_record_free_input req = { 0 }; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1); req.encap_record_id = encap_record_handle; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); if (rc) rc = -EIO; return rc; }
static int hwrm_cfa_decap_filter_free(struct bnxt *bp, __le32 decap_filter_handle) { struct hwrm_cfa_decap_filter_free_input req = { 0 }; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1); req.decap_filter_id = decap_filter_handle; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); if (rc) rc = -EIO; return rc; }
static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) { struct bnxt *bp = netdev_priv(dev); int rc; struct hwrm_nvm_get_dir_info_input req = {0}; struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { *entries = le32_to_cpu(output->entries); *length = le32_to_cpu(output->entry_length); } mutex_unlock(&bp->hwrm_cmd_lock); return rc; }
static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) { int i, rc = 0; struct bnxt_pf_info *pf = &bp->pf; struct hwrm_func_vf_resc_free_input req = {0}; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { req.vf_id = cpu_to_le16(i); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) break; } mutex_unlock(&bp->hwrm_cmd_lock); return rc; }
int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) { struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); struct bnxt_vf_info *vf; bool old_setting = false; u32 func_flags; int rc; if (bp->hwrm_spec_code < 0x10701) return -ENOTSUPP; rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; vf = &bp->pf.vf[vf_id]; if (vf->flags & BNXT_VF_SPOOFCHK) old_setting = true; if (old_setting == setting) return 0; func_flags = vf->func_flags; if (setting) func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; else func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(func_flags); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { vf->func_flags = func_flags; if (setting) vf->flags |= BNXT_VF_SPOOFCHK; else vf->flags &= ~BNXT_VF_SPOOFCHK; } return rc; }
int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, int max_tx_rate) { struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); struct bnxt_vf_info *vf; u32 pf_link_speed; int rc; rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; vf = &bp->pf.vf[vf_id]; pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); if (max_tx_rate > pf_link_speed) { netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", max_tx_rate, vf_id); return -EINVAL; } if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", min_tx_rate, vf_id); return -EINVAL; } if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(vf->fw_fid); req.flags = cpu_to_le32(vf->func_flags); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); req.max_bw = cpu_to_le32(max_tx_rate); req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); req.min_bw = cpu_to_le32(min_tx_rate); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { vf->min_tx_rate = min_tx_rate; vf->max_tx_rate = max_tx_rate; } return rc; }
static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) { struct bnxt *bp = netdev_priv(dev); int rc; u32 dir_entries; u32 entry_length; u8 *buf; size_t buflen; dma_addr_t dma_handle; struct hwrm_nvm_get_dir_entries_input req = {0}; rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); if (rc != 0) return rc; /* Insert 2 bytes of directory info (count and size of entries) */ if (len < 2) return -EINVAL; *data++ = dir_entries; *data++ = entry_length; len -= 2; memset(data, 0xff, len); buflen = dir_entries * entry_length; buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, GFP_KERNEL); if (!buf) { netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", (unsigned)buflen); return -ENOMEM; } bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); req.host_dest_addr = cpu_to_le64(dma_handle); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc == 0) memcpy(data, buf, len > buflen ? buflen : len); dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); return rc; }
void bnxt_update_vf_mac(struct bnxt *bp) { struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); req.fid = cpu_to_le16(0xffff); mutex_lock(&bp->hwrm_cmd_lock); if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) goto update_vf_mac_exit; if (!is_valid_ether_addr(resp->perm_mac_address)) goto update_vf_mac_exit; if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); /* overwrite netdev dev_adr with admin VF MAC */ memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); update_vf_mac_exit: mutex_unlock(&bp->hwrm_cmd_lock); }
static int bnxt_firmware_reset(struct net_device *dev, u16 dir_type) { struct bnxt *bp = netdev_priv(dev); struct hwrm_fw_reset_input req = {0}; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */ /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ /* (e.g. when firmware isn't already running) */ switch (dir_type) { case BNX_DIR_TYPE_CHIMP_PATCH: case BNX_DIR_TYPE_BOOTCODE: case BNX_DIR_TYPE_BOOTCODE_2: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; /* Self-reset ChiMP upon next PCIe reset: */ req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; break; case BNX_DIR_TYPE_APE_FW: case BNX_DIR_TYPE_APE_PATCH: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; break; case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_PATCH: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; break; case BNX_DIR_TYPE_BONO_FW: case BNX_DIR_TYPE_BONO_PATCH: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; break; default: return -EINVAL; } return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); }
static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, u16 dir_ordinal, u16 dir_ext, u16 dir_attr, const u8 *data, size_t data_len) { struct bnxt *bp = netdev_priv(dev); int rc; struct hwrm_nvm_write_input req = {0}; dma_addr_t dma_handle; u8 *kmem; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); req.dir_type = cpu_to_le16(dir_type); req.dir_ordinal = cpu_to_le16(dir_ordinal); req.dir_ext = cpu_to_le16(dir_ext); req.dir_attr = cpu_to_le16(dir_attr); req.dir_data_length = cpu_to_le32(data_len); kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, GFP_KERNEL); if (!kmem) { netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", (unsigned)data_len); return -ENOMEM; } memcpy(kmem, data, data_len); req.host_src_addr = cpu_to_le64(dma_handle); rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); return rc; }
static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, struct bnxt_vf_info *vf, u16 event_id) { struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_fwd_async_event_cmpl_input req = {0}; struct hwrm_async_event_cmpl *async_cmpl; int rc = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); if (vf) req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); else /* broadcast this async event to all VFs */ req.encap_async_event_target_id = cpu_to_le16(0xffff); async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); async_cmpl->event_id = cpu_to_le16(event_id); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) { netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", rc); goto fwd_async_event_cmpl_exit; } if (resp->error_code) { netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", resp->error_code); rc = -1; } fwd_async_event_cmpl_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; }
static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, __le16 ref_flow_handle, __le32 tunnel_handle, __le16 *flow_handle) { struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_tc_actions *actions = &flow->actions; struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; struct bnxt_tc_l3_key *l3_key = &flow->l3_key; struct hwrm_cfa_flow_alloc_input req = { 0 }; u16 flow_flags = 0, action_flags = 0; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1); req.src_fid = cpu_to_le16(flow->src_fid); req.ref_flow_handle = ref_flow_handle; if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP || actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { req.tunnel_handle = tunnel_handle; flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL; action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL; } req.ethertype = flow->l2_key.ether_type; req.ip_proto = flow->l4_key.ip_proto; if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) { memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN); memcpy(req.smac, flow->l2_key.smac, ETH_ALEN); } if (flow->l2_key.num_vlans > 0) { flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE; /* FW expects the inner_vlan_tci value to be set * in outer_vlan_tci when num_vlans is 1 (which is * always the case in TC.) */ req.outer_vlan_tci = flow->l2_key.inner_vlan_tci; } /* If all IP and L4 fields are wildcarded then this is an L2 flow */ if (is_wildcard(l3_mask, sizeof(*l3_mask)) && is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; } else { flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ? CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 : CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6; if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) { req.ip_dst[0] = l3_key->ipv4.daddr.s_addr; req.ip_dst_mask_len = inet_mask_len(l3_mask->ipv4.daddr.s_addr); req.ip_src[0] = l3_key->ipv4.saddr.s_addr; req.ip_src_mask_len = inet_mask_len(l3_mask->ipv4.saddr.s_addr); } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) { memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32, sizeof(req.ip_dst)); req.ip_dst_mask_len = ipv6_mask_len(&l3_mask->ipv6.daddr); memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32, sizeof(req.ip_src)); req.ip_src_mask_len = ipv6_mask_len(&l3_mask->ipv6.saddr); } } if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) { req.l4_src_port = flow->l4_key.ports.sport; req.l4_src_port_mask = flow->l4_mask.ports.sport; req.l4_dst_port = flow->l4_key.ports.dport; req.l4_dst_port_mask = flow->l4_mask.ports.dport; } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) { /* l4 ports serve as type/code when ip_proto is ICMP */ req.l4_src_port = htons(flow->l4_key.icmp.type); req.l4_src_port_mask = htons(flow->l4_mask.icmp.type); req.l4_dst_port = htons(flow->l4_key.icmp.code); req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code); } req.flags = cpu_to_le16(flow_flags); if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP; } else { if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD; req.dst_fid = cpu_to_le16(actions->dst_fid); } if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid; req.l2_rewrite_vlan_tci = actions->push_vlan_tci; memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); } if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; /* Rewrite config with tpid = 0 implies vlan pop */ req.l2_rewrite_vlan_tpid = 0; memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); } } req.action_flags = cpu_to_le16(action_flags); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) *flow_handle = resp->flow_handle; mutex_unlock(&bp->hwrm_cmd_lock); if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) rc = -ENOSPC; else if (rc) rc = -EIO; return rc; }
static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, struct bnxt_tc_l2_key *l2_info, __le32 ref_decap_handle, __le32 *decap_filter_handle) { struct hwrm_cfa_decap_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; struct ip_tunnel_key *tun_key = &flow->tun_key; u32 enables = 0; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1); req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL; req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID; /* tunnel_id is wrongly defined in hsi defn. as __le32 */ req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id); } if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR; ether_addr_copy(req.dst_macaddr, l2_info->dmac); } if (l2_info->num_vlans) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID; req.t_ivlan_vid = l2_info->inner_vlan_tci; } enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE; req.ethertype = htons(ETH_P_IP); if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE; req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; req.dst_ipaddr[0] = tun_key->u.ipv4.dst; req.src_ipaddr[0] = tun_key->u.ipv4.src; } if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT; req.dst_port = tun_key->tp_dst; } /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16. */ req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle; req.enables = cpu_to_le32(enables); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) *decap_filter_handle = resp->decap_filter_id; else netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); mutex_unlock(&bp->hwrm_cmd_lock); if (rc) rc = -EIO; return rc; }
/* only call by PF to reserve resources for VF */ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) { u32 rc = 0, mtu, i; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; u16 vf_ring_grps; struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); /* Remaining rings are distributed equally amongs VF's for now */ /* TODO: the following workaroud is needed to restrict total number * of vf_cp_rings not exceed number of HW ring groups. This WA should * be removed once new HWRM provides HW ring groups capability in * hwrm_func_qcap. */ vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs); vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs; /* TODO: restore this logic below once the WA above is removed */ /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */ vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; if (bp->flags & BNXT_FLAG_AGG_RINGS) vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) / num_vfs; else vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs; vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs; req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | FUNC_CFG_REQ_ENABLES_MRU | FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | FUNC_CFG_REQ_ENABLES_NUM_VNICS | FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; req.mru = cpu_to_le16(mtu); req.mtu = cpu_to_le16(mtu); req.num_rsscos_ctxs = cpu_to_le16(1); req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); req.num_tx_rings = cpu_to_le16(vf_tx_rings); req.num_rx_rings = cpu_to_le16(vf_rx_rings); req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); req.num_l2_ctxs = cpu_to_le16(4); vf_vnics = 1; req.num_vnics = cpu_to_le16(vf_vnics); /* FIXME spec currently uses 1 bit for stats ctx */ req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < num_vfs; i++) { req.fid = cpu_to_le16(pf->first_vf_id + i); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) break; pf->active_vfs = i + 1; pf->vf[i].fw_fid = le16_to_cpu(req.fid); } mutex_unlock(&bp->hwrm_cmd_lock); if (!rc) { pf->max_tx_rings -= vf_tx_rings * num_vfs; pf->max_rx_rings -= vf_rx_rings * num_vfs; pf->max_hw_ring_grps -= vf_ring_grps * num_vfs; pf->max_cp_rings -= vf_cp_rings * num_vfs; pf->max_rsscos_ctxs -= num_vfs; pf->max_stat_ctxs -= vf_stat_ctx * num_vfs; pf->max_vnics -= vf_vnics * num_vfs; } return rc; }