int ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size) { uint32_t version; version = mpu->id.vernum & 0x0000fF00; if ((mpu->id.idnum != 0x2055504d) || (mpu->hw.obj_size != obj_size) || (version != 0x00003100)) { PMD_DRV_LOG(ERR, " MPU module not found as expected %08x" " \"%c%c%c%c %c%c%c%c\"\n", mpu->id.idnum, mpu->id.id[0], mpu->id.id[1], mpu->id.id[2], mpu->id.id[3], mpu->id.ver[0], mpu->id.ver[1], mpu->id.ver[2], mpu->id.ver[3]); PMD_DRV_LOG(ERR, " MPU HW num_queues: %u hw_depth %u," " obj_size: %u, obj_per_mrr: %u" " Expected size %u\n", mpu->hw.num_queues, mpu->hw.hw_depth, mpu->hw.obj_size, mpu->hw.obj_per_mrr, obj_size); return -1; } return 0; }
static int rte_pmd_i40e_get_vf_native_stats(uint16_t port, uint16_t vf_id, struct i40e_eth_stats *stats) { struct rte_eth_dev *dev; struct i40e_pf *pf; struct i40e_vsi *vsi; RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); dev = &rte_eth_devices[port]; if (!is_i40e_supported(dev)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); if (vf_id >= pf->vf_num || !pf->vfs) { PMD_DRV_LOG(ERR, "Invalid VF ID."); return -EINVAL; } vsi = pf->vfs[vf_id].vsi; if (!vsi) { PMD_DRV_LOG(ERR, "Invalid VSI."); return -EINVAL; } i40e_update_vsi_stats(vsi); memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats)); return 0; }
static int bnx2x_dev_start(struct rte_eth_dev *dev) { struct bnx2x_softc *sc = dev->data->dev_private; int ret = 0; PMD_INIT_FUNC_TRACE(); ret = bnx2x_init(sc); if (ret) { PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret); return -1; } if (IS_PF(sc)) { rte_intr_callback_register(&(dev->pci_dev->intr_handle), bnx2x_interrupt_handler, (void *)dev); if(rte_intr_enable(&(dev->pci_dev->intr_handle))) PMD_DRV_LOG(ERR, "rte_intr_enable failed"); } ret = bnx2x_dev_rx_init(dev); if (ret != 0) { PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code"); return -3; } /* Print important adapter info for the user. */ bnx2x_print_adapter_info(sc); DELAY_MS(2500); return ret; }
static int bnx2x_dev_configure(struct rte_eth_dev *dev) { struct bnx2x_softc *sc = dev->data->dev_private; int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF); int ret; PMD_INIT_FUNC_TRACE(); if (dev->data->dev_conf.rxmode.jumbo_frame) sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len; if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) { PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues"); return -EINVAL; } sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues); if (sc->num_queues > mp_ncpus) { PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs"); return -EINVAL; } PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d", sc->num_queues, sc->mtu); /* allocate ilt */ if (bnx2x_alloc_ilt_mem(sc) != 0) { PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed"); return -ENXIO; } /* allocate the host hardware/software hsi structures */ if (bnx2x_alloc_hsi_mem(sc) != 0) { PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed"); bnx2x_free_ilt_mem(sc); return -ENXIO; } if (IS_VF(sc)) { if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg), &sc->vf2pf_mbox_mapping, "vf2pf_mbox", RTE_CACHE_LINE_SIZE) != 0) return -ENOMEM; sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)sc->vf2pf_mbox_mapping.vaddr; if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin), &sc->pf2vf_bulletin_mapping, "vf2pf_bull", RTE_CACHE_LINE_SIZE) != 0) return -ENOMEM; sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)sc->pf2vf_bulletin_mapping.vaddr; ret = bnx2x_vf_get_resources(sc, sc->num_queues, sc->num_queues); if (ret) return ret; } return 0; }
static int avf_start_queues(struct rte_eth_dev *dev) { struct avf_rx_queue *rxq; struct avf_tx_queue *txq; int i; for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; if (txq->tx_deferred_start) continue; if (avf_dev_tx_queue_start(dev, i) != 0) { PMD_DRV_LOG(ERR, "Fail to start queue %u", i); return -1; } } for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; if (rxq->rx_deferred_start) continue; if (avf_dev_rx_queue_start(dev, i) != 0) { PMD_DRV_LOG(ERR, "Fail to start queue %u", i); return -1; } } return 0; }
/* i40e_check_fdir_flex_payload -check flex payload configuration arguments */ static inline int i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg) { struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN]; uint16_t num, i; for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) { if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) { PMD_DRV_LOG(ERR, "exceeds maxmial payload limit."); return -EINVAL; } } memset(flex_pit, 0, sizeof(flex_pit)); num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit); if (num > I40E_MAX_FLXPLD_FIED) { PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields."); return -EINVAL; } for (i = 0; i < num; i++) { if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 || flex_pit[i].src_offset & 0x01) { PMD_DRV_LOG(ERR, "flexpayload should be measured" " in word"); return -EINVAL; } if (i != num - 1) I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]); } return 0; }
static int i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf, uint8_t *msg, uint16_t msglen) { struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); struct i40e_vsi *vsi = vf->vsi; struct i40e_virtchnl_vsi_queue_config_info *vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)msg; struct i40e_virtchnl_queue_pair_info *vc_qpi; int i, ret = I40E_SUCCESS; if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps || vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP || msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, vc_vqci->num_queue_pairs)) { PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n"); ret = I40E_ERR_PARAM; goto send_msg; } vc_qpi = vc_vqci->qpair; for (i = 0; i < vc_vqci->num_queue_pairs; i++) { if (vc_qpi[i].rxq.queue_id > vsi->nb_qps - 1 || vc_qpi[i].txq.queue_id > vsi->nb_qps - 1) { ret = I40E_ERR_PARAM; goto send_msg; } /* * Apply VF RX queue setting to HMC. * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, * then the extra information of * 'struct i40e_virtchnl_queue_pair_extra_info' is needed, * otherwise set the last parameter to NULL. */ if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq, I40E_CFG_CRCSTRIP_DEFAULT) != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Configure RX queue HMC failed"); ret = I40E_ERR_PARAM; goto send_msg; } /* Apply VF TX queue setting to HMC */ if (i40e_pf_host_hmc_config_txq(hw, vf, &vc_qpi[i].txq) != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Configure TX queue HMC failed"); ret = I40E_ERR_PARAM; goto send_msg; } } send_msg: i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, ret, NULL, 0); return ret; }
int build_all_dependencies(struct rte_eventdev *dev) { int err = 0; unsigned int i; struct opdl_evdev *device = opdl_pmd_priv(dev); uint8_t start_qid = 0; for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { struct opdl_queue *queue = &device->queue[i]; if (!queue->initialized) break; if (queue->q_pos == OPDL_Q_POS_START) { start_qid = i; continue; } if (queue->q_pos == OPDL_Q_POS_MIDDLE) { err = opdl_add_deps(device, i, i-1); if (err < 0) { PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " "dependency addition for queue:[%u] - FAILED", dev->data->dev_id, queue->external_qid); break; } } if (queue->q_pos == OPDL_Q_POS_END) { /* Add this dependency */ err = opdl_add_deps(device, i, i-1); if (err < 0) { PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " "dependency addition for queue:[%u] - FAILED", dev->data->dev_id, queue->external_qid); break; } /* Add dependency for rx on tx */ err = opdl_add_deps(device, start_qid, i); if (err < 0) { PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " "dependency addition for queue:[%u] - FAILED", dev->data->dev_id, queue->external_qid); break; } } } if (!err) fprintf(stdout, "Success - dependencies built\n"); return err; }
void * qat_crypto_sym_configure_session(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private) { struct qat_pmd_private *internals = dev->data->dev_private; struct qat_session *session = session_private; int qat_cmd_id; PMD_INIT_FUNC_TRACE(); /* Get requested QAT command id */ qat_cmd_id = qat_get_cmd_id(xform); if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) { PMD_DRV_LOG(ERR, "Unsupported xform chain requested"); goto error_out; } session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id; switch (session->qat_cmd) { case ICP_QAT_FW_LA_CMD_CIPHER: session = qat_crypto_sym_configure_session_cipher(dev, xform, session); break; case ICP_QAT_FW_LA_CMD_AUTH: session = qat_crypto_sym_configure_session_auth(dev, xform, session); break; case ICP_QAT_FW_LA_CMD_CIPHER_HASH: session = qat_crypto_sym_configure_session_cipher(dev, xform, session); session = qat_crypto_sym_configure_session_auth(dev, xform, session); break; case ICP_QAT_FW_LA_CMD_HASH_CIPHER: session = qat_crypto_sym_configure_session_auth(dev, xform, session); session = qat_crypto_sym_configure_session_cipher(dev, xform, session); break; case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: case ICP_QAT_FW_LA_CMD_TRNG_TEST: case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE: case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE: case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE: case ICP_QAT_FW_LA_CMD_MGF1: case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP: case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP: case ICP_QAT_FW_LA_CMD_DELIMITER: PMD_DRV_LOG(ERR, "Unsupported Service %u", session->qat_cmd); goto error_out; default: PMD_DRV_LOG(ERR, "Unsupported Service %u", session->qat_cmd); goto error_out; } return session; error_out: rte_mempool_put(internals->sess_mp, session); return NULL; }
static int avf_init_rss(struct avf_adapter *adapter) { struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter); struct rte_eth_rss_conf *rss_conf; uint8_t i, j, nb_q; int ret; rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues, AVF_MAX_NUM_QUEUES); if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) { PMD_DRV_LOG(DEBUG, "RSS is not supported"); return -ENOTSUP; } if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default"); /* set all lut items to default queue */ for (i = 0; i < vf->vf_res->rss_lut_size; i++) vf->rss_lut[i] = 0; ret = avf_configure_rss_lut(adapter); return ret; } /* In AVF, RSS enablement is set by PF driver. It is not supported * to set based on rss_conf->rss_hf. */ /* configure RSS key */ if (!rss_conf->rss_key) { /* Calculate the default hash key */ for (i = 0; i <= vf->vf_res->rss_key_size; i++) vf->rss_key[i] = (uint8_t)rte_rand(); } else rte_memcpy(vf->rss_key, rss_conf->rss_key, RTE_MIN(rss_conf->rss_key_len, vf->vf_res->rss_key_size)); /* init RSS LUT table */ for (i = 0, j = 0; i < vf->vf_res->rss_lut_size; i++, j++) { if (j >= nb_q) j = 0; vf->rss_lut[i] = j; } /* send virtchnnl ops to configure rss*/ ret = avf_configure_rss_lut(adapter); if (ret) return ret; ret = avf_configure_rss_key(adapter); if (ret) return ret; return 0; }
void bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val) { if ((offset % 4) != 0) { PMD_DRV_LOG(DEBUG, "Unaligned 32-bit write to 0x%08lx", offset); } PMD_DRV_LOG(DEBUG, "offset=0x%08lx val=0x%08x", offset, val); *((volatile uint32_t*)((uint64_t)sc->bar[BAR0].base_addr + offset)) = val; }
static const struct rte_memzone * queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, int socket_id) { const struct rte_memzone *mz; unsigned memzone_flags = 0; const struct rte_memseg *ms; PMD_INIT_FUNC_TRACE(); mz = rte_memzone_lookup(queue_name); if (mz != 0) { if (((size_t)queue_size <= mz->len) && ((socket_id == SOCKET_ID_ANY) || (socket_id == mz->socket_id))) { PMD_DRV_LOG(DEBUG, "re-use memzone already " "allocated for %s", queue_name); return mz; } PMD_DRV_LOG(ERR, "Incompatible memzone already " "allocated %s, size %u, socket %d. " "Requested size %u, socket %u", queue_name, (uint32_t)mz->len, mz->socket_id, queue_size, socket_id); return NULL; } PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u", queue_name, queue_size, socket_id); ms = rte_eal_get_physmem_layout(); switch (ms[0].hugepage_sz) { case(RTE_PGSIZE_2M): memzone_flags = RTE_MEMZONE_2MB; break; case(RTE_PGSIZE_1G): memzone_flags = RTE_MEMZONE_1GB; break; case(RTE_PGSIZE_16M): memzone_flags = RTE_MEMZONE_16MB; break; case(RTE_PGSIZE_16G): memzone_flags = RTE_MEMZONE_16GB; break; default: memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY; } #ifdef RTE_LIBRTE_XEN_DOM0 return rte_memzone_reserve_bounded(queue_name, queue_size, socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M); #else return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id, memzone_flags, queue_size); #endif }
static int i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask) { struct i40e_vf_representor *representor = ethdev->data->dev_private; struct rte_eth_dev *pdev; struct i40e_pf_vf *vf; struct i40e_vsi *vsi; struct i40e_pf *pf; uint32_t vfid; pdev = representor->adapter->eth_dev; vfid = representor->vf_id; if (!is_i40e_supported(pdev)) { PMD_DRV_LOG(ERR, "Invalid PF dev."); return -EINVAL; } pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private); if (vfid >= pf->vf_num || !pf->vfs) { PMD_DRV_LOG(ERR, "Invalid VF ID."); return -EINVAL; } vf = &pf->vfs[vfid]; vsi = vf->vsi; if (!vsi) { PMD_DRV_LOG(ERR, "Invalid VSI."); return -EINVAL; } if (mask & ETH_VLAN_FILTER_MASK) { /* Enable or disable VLAN filtering offload */ if (ethdev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) return i40e_vsi_config_vlan_filter(vsi, TRUE); else return i40e_vsi_config_vlan_filter(vsi, FALSE); } if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping offload */ if (ethdev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) return i40e_vsi_config_vlan_stripping(vsi, TRUE); else return i40e_vsi_config_vlan_stripping(vsi, FALSE); } return -EINVAL; }
/* * Configure flow director related setting */ int i40e_fdir_configure(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_fdir_flex_conf *conf; enum i40e_filter_pctype pctype; uint32_t val; uint8_t i; int ret = 0; /* * configuration need to be done before * flow director filters are added * If filters exist, flush them. */ if (i40e_fdir_empty(hw) < 0) { ret = i40e_fdir_flush(dev); if (ret) { PMD_DRV_LOG(ERR, "failed to flush fdir table."); return ret; } } /* enable FDIR filter */ val = I40E_READ_REG(hw, I40E_PFQF_CTL_0); val |= I40E_PFQF_CTL_0_FD_ENA_MASK; I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, val); i40e_init_flx_pld(pf); /* set flex config to default value */ conf = &dev->data->dev_conf.fdir_conf.flex_conf; ret = i40e_check_fdir_flex_conf(conf); if (ret < 0) { PMD_DRV_LOG(ERR, " invalid configuration arguments."); return -EINVAL; } /* configure flex payload */ for (i = 0; i < conf->nb_payloads; i++) i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]); /* configure flex mask*/ for (i = 0; i < conf->nb_flexmasks; i++) { pctype = i40e_flowtype_to_pctype( conf->flex_mask[i].flow_type); i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]); } return ret; }
uint16_t bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset) { uint16_t val; if ((offset % 2) != 0) { PMD_DRV_LOG(DEBUG, "Unaligned 16-bit read from 0x%08lx", offset); } val = (uint16_t)(*((volatile uint16_t*)((uint64_t)sc->bar[BAR0].base_addr + offset))); PMD_DRV_LOG(DEBUG, "offset=0x%08lx val=0x%08x", offset, val); return (val); }
static int i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf, uint8_t *msg, uint16_t msglen) { struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); struct i40e_vsi *vsi = vf->vsi; int ret = I40E_SUCCESS; struct i40e_virtchnl_vsi_queue_config_info *qconfig = (struct i40e_virtchnl_vsi_queue_config_info *)msg; int i; struct i40e_virtchnl_queue_pair_info *qpair; if (msg == NULL || msglen <= sizeof(*qconfig) || qconfig->num_queue_pairs > vsi->nb_qps) { PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n"); ret = I40E_ERR_PARAM; goto send_msg; } qpair = qconfig->qpair; for (i = 0; i < qconfig->num_queue_pairs; i++) { if (qpair[i].rxq.queue_id > vsi->nb_qps - 1 || qpair[i].txq.queue_id > vsi->nb_qps - 1) { ret = I40E_ERR_PARAM; goto send_msg; } /* Apply VF RX queue setting to HMC */ if (i40e_pf_host_hmc_config_rxq(hw, vf, &qpair[i].rxq) != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Configure RX queue HMC failed"); ret = I40E_ERR_PARAM; goto send_msg; } /* Apply VF TX queue setting to HMC */ if (i40e_pf_host_hmc_config_txq(hw, vf, &qpair[i].txq) != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Configure TX queue HMC failed"); ret = I40E_ERR_PARAM; goto send_msg; } } send_msg: i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, ret, NULL, 0); return ret; }
static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) { struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi); struct i40e_hmc_obj_rxq rx_ctx; int err = I40E_SUCCESS; memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); /* Init the RX queue in hardware */ rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT; rx_ctx.hbuff = 0; rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT; rx_ctx.qlen = rxq->nb_rx_desc; #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC rx_ctx.dsize = 1; #endif rx_ctx.dtype = i40e_header_split_none; rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE; rx_ctx.rxmax = ETHER_MAX_LEN; rx_ctx.tphrdesc_ena = 1; rx_ctx.tphwdesc_ena = 1; rx_ctx.tphdata_ena = 1; rx_ctx.tphhead_ena = 1; rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 0; rx_ctx.l2tsel = 1; rx_ctx.showiv = 0; rx_ctx.prefena = 1; err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx); if (err != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context."); return err; } err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx); if (err != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context."); return err; } rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(rxq->vsi->base_queue); rte_wmb(); /* Init the RX tail regieter. */ I40E_PCI_REG_WRITE(rxq->qrx_tail, 0); I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); return err; }
static int avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq) { struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_dev_data *dev_data = dev->data; uint16_t buf_size, max_pkt_len, len; buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; /* Calculate the maximum packet length allowed */ len = rxq->rx_buf_len * AVF_MAX_CHAINED_RX_BUFFERS; max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len); /* Check if the jumbo frame and maximum packet length are set * correctly. */ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (max_pkt_len <= ETHER_MAX_LEN || max_pkt_len > AVF_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must be " "larger than %u and smaller than %u, " "as jumbo frame is enabled", (uint32_t)ETHER_MAX_LEN, (uint32_t)AVF_FRAME_SIZE_MAX); return -EINVAL; } } else { if (max_pkt_len < ETHER_MIN_LEN || max_pkt_len > ETHER_MAX_LEN) { PMD_DRV_LOG(ERR, "maximum packet length must be " "larger than %u and smaller than %u, " "as jumbo frame is disabled", (uint32_t)ETHER_MIN_LEN, (uint32_t)ETHER_MAX_LEN); return -EINVAL; } } rxq->max_pkt_len = max_pkt_len; if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) { dev_data->scattered_rx = 1; } AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); AVF_WRITE_FLUSH(hw); return 0; }
static uint16_t opdl_claim(struct opdl_port *p, struct rte_event ev[], uint16_t num) { uint32_t num_events = 0; if (unlikely(num > MAX_OPDL_CONS_Q_DEPTH)) { PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " "Attempt to dequeue num of events larger than port (%d) max", opdl_pmd_dev_id(p->opdl), p->id); rte_errno = -EINVAL; return 0; } num_events = opdl_stage_claim(p->deq_stage_inst, (void *)ev, num, NULL, false, p->atomic_claim); update_on_dequeue(p, ev, num, num_events); return num_events; }
static int i40e_pf_host_process_cmd_cfg_vlan_offload( struct i40e_pf_vf *vf, uint8_t *msg, uint16_t msglen) { int ret = I40E_SUCCESS; struct i40e_virtchnl_vlan_offload_info *offload = (struct i40e_virtchnl_vlan_offload_info *)msg; if (msg == NULL || msglen != sizeof(*offload)) { ret = I40E_ERR_PARAM; goto send_msg; } ret = i40e_vsi_config_vlan_stripping(vf->vsi, !!offload->enable_vlan_strip); if (ret != 0) PMD_DRV_LOG(ERR, "Failed to configure vlan stripping\n"); send_msg: i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD, ret, NULL, 0); return ret; }
static int i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf, uint8_t *msg, uint16_t msglen) { int ret = I40E_SUCCESS; struct i40e_virtchnl_vlan_filter_list *vlan_filter_list = (struct i40e_virtchnl_vlan_filter_list *)msg; int i; uint16_t *vid; if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) { PMD_DRV_LOG(ERR, "delete_vlan argument too short\n"); ret = I40E_ERR_PARAM; goto send_msg; } vid = vlan_filter_list->vlan_id; for (i = 0; i < vlan_filter_list->num_elements; i++) { ret = i40e_vsi_delete_vlan(vf->vsi, vid[i]); if(ret != I40E_SUCCESS) goto send_msg; } send_msg: i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, ret, NULL, 0); return ret; }
static int i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf, uint8_t *msg, uint16_t msglen) { int ret = I40E_SUCCESS; struct i40e_virtchnl_ether_addr_list *addr_list = (struct i40e_virtchnl_ether_addr_list *)msg; int i; struct ether_addr *mac; if (msg == NULL || msglen <= sizeof(*addr_list)) { PMD_DRV_LOG(ERR, "delete_ether_address argument too short\n"); ret = I40E_ERR_PARAM; goto send_msg; } for (i = 0; i < addr_list->num_elements; i++) { mac = (struct ether_addr *)(addr_list->list[i].addr); if(!is_valid_assigned_ether_addr(mac) || i40e_vsi_delete_mac(vf->vsi, mac)) { ret = I40E_ERR_INVALID_MAC_ADDR; goto send_msg; } } send_msg: i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, ret, NULL, 0); return ret; }
int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) { int err = 0; err = hw_atl_utils_soft_reset(self); if (err) return err; hw_atl_utils_hw_chip_features_init(self, &self->chip_features); hw_atl_utils_get_fw_version(self, &self->fw_ver_actual); if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, self->fw_ver_actual) == 0) { *fw_ops = &aq_fw_1x_ops; } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, self->fw_ver_actual) == 0) { *fw_ops = &aq_fw_2x_ops; } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, self->fw_ver_actual) == 0) { *fw_ops = &aq_fw_2x_ops; } else { PMD_DRV_LOG(ERR, "Bad FW version detected: %x\n", self->fw_ver_actual); return -EOPNOTSUPP; } self->aq_fw_ops = *fw_ops; err = self->aq_fw_ops->init(self); return err; }
static int add_memseg_list(const struct rte_memseg_list *msl, void *arg) { struct vhost_memory_kernel *vm = arg; struct vhost_memory_region *mr; void *start_addr; uint64_t len; if (msl->external) return 0; if (vm->nregions >= max_regions) return -1; start_addr = msl->base_va; len = msl->page_sz * msl->memseg_arr.len; mr = &vm->regions[vm->nregions++]; mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr; mr->userspace_addr = (uint64_t)(uintptr_t)start_addr; mr->memory_size = len; mr->mmap_offset = 0; /* flags_padding */ PMD_DRV_LOG(DEBUG, "index=%u addr=%p len=%" PRIu64, vm->nregions - 1, start_addr, len); return 0; }
/* * Returns size in bytes per hash algo for state1 size field in cd_ctrl * This is digest size rounded up to nearest quadword */ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg) { switch (qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); case ICP_QAT_HW_AUTH_ALGO_SHA256: return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); case ICP_QAT_HW_AUTH_ALGO_SHA512: return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); case ICP_QAT_HW_AUTH_ALGO_DELIMITER: /* return maximum state1 size in this case */ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); default: PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg); return -EFAULT; }; return -EFAULT; }
static __rte_always_inline int create_opdl(struct opdl_evdev *device) { int err = 0; char name[RTE_MEMZONE_NAMESIZE]; snprintf(name, RTE_MEMZONE_NAMESIZE, "%s_%u", device->service_name, device->nb_opdls); device->opdl[device->nb_opdls] = opdl_ring_create(name, device->nb_events_limit, sizeof(struct rte_event), device->max_port_nb * 2, device->socket); if (!device->opdl[device->nb_opdls]) { PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " "opdl ring %u creation - FAILED", opdl_pmd_dev_id(device), device->nb_opdls); err = -EINVAL; } else { device->nb_opdls++; } return err; }
/* ************************************************************************* */ int ark_ddm_verify(struct ark_ddm_t *ddm) { if (sizeof(struct ark_ddm_t) != ARK_DDM_EXPECTED_SIZE) { PMD_DRV_LOG(ERR, "ARK: DDM structure looks incorrect %d vs %zd\n", ARK_DDM_EXPECTED_SIZE, sizeof(struct ark_ddm_t)); return -1; } if (ddm->cfg.const0 != ARK_DDM_CONST) { PMD_DRV_LOG(ERR, "ARK: DDM module not found as expected 0x%08x\n", ddm->cfg.const0); return -1; } return 0; }
static void axgbe_change_mode(struct axgbe_port *pdata, enum axgbe_mode mode) { switch (mode) { case AXGBE_MODE_KX_1000: axgbe_kx_1000_mode(pdata); break; case AXGBE_MODE_KX_2500: axgbe_kx_2500_mode(pdata); break; case AXGBE_MODE_KR: axgbe_kr_mode(pdata); break; case AXGBE_MODE_SGMII_100: axgbe_sgmii_100_mode(pdata); break; case AXGBE_MODE_SGMII_1000: axgbe_sgmii_1000_mode(pdata); break; case AXGBE_MODE_X: axgbe_x_mode(pdata); break; case AXGBE_MODE_SFI: axgbe_sfi_mode(pdata); break; case AXGBE_MODE_UNKNOWN: break; default: PMD_DRV_LOG(ERR, "invalid operation mode requested (%u)\n", mode); } }
/* returns block size in byes per hash algo */ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg) { switch (qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: return SHA_CBLOCK; case ICP_QAT_HW_AUTH_ALGO_SHA224: return SHA256_CBLOCK; case ICP_QAT_HW_AUTH_ALGO_SHA256: return SHA256_CBLOCK; case ICP_QAT_HW_AUTH_ALGO_SHA384: return SHA512_CBLOCK; case ICP_QAT_HW_AUTH_ALGO_SHA512: return SHA512_CBLOCK; case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: return 16; case ICP_QAT_HW_AUTH_ALGO_MD5: return MD5_CBLOCK; case ICP_QAT_HW_AUTH_ALGO_DELIMITER: /* return maximum block size in this case */ return SHA512_CBLOCK; default: PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg); return -EFAULT; }; return -EFAULT; }
static uint16_t virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num) { struct vring_used_elem *uep; struct rte_mbuf *cookie; uint16_t used_idx, desc_idx; uint16_t i; /* Caller does the check */ for (i = 0; i < num ; i++) { used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); uep = &vq->vq_ring.used->ring[used_idx]; desc_idx = (uint16_t) uep->id; len[i] = uep->len; cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie; if (unlikely(cookie == NULL)) { PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", vq->vq_used_cons_idx); break; } rte_prefetch0(cookie); rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); rx_pkts[i] = cookie; vq->vq_used_cons_idx++; vq_ring_free_chain(vq, desc_idx); vq->vq_descx[desc_idx].cookie = NULL; } return i; }