/* Maps the specified queue to the specified COS */ void ecore_map_q_cos(struct _lm_device_t *pdev, u32_t q_num, u32_t new_cos) { /* find current COS mapping */ u32_t curr_cos = REG_RD(pdev, QM_REG_QVOQIDX_0 + q_num * 4); /* check if queue->COS mapping has changed */ if (curr_cos != new_cos) { u32_t num_vnics = ECORE_PORT2_MODE_NUM_VNICS; u32_t reg_addr, reg_bit_map, vnic; /* update parameters for 4port mode */ if (INIT_MODE_FLAGS(pdev) & MODE_PORT4) { num_vnics = ECORE_PORT4_MODE_NUM_VNICS; if (PORT_ID(pdev)) { curr_cos += ECORE_E3B0_PORT1_COS_OFFSET; new_cos += ECORE_E3B0_PORT1_COS_OFFSET; } } /* change queue mapping for each VNIC */ for (vnic = 0; vnic < num_vnics; vnic++) { u32_t pf_q_num = ECORE_PF_Q_NUM(q_num, PORT_ID(pdev), vnic); u32_t q_bit_map = 1 << (pf_q_num & 0x1f); /* overwrite queue->VOQ mapping */ REG_WR(pdev, ECORE_Q_VOQ_REG_ADDR(pf_q_num), new_cos); /* clear queue bit from current COS bit map */ reg_addr = ECORE_VOQ_Q_REG_ADDR(curr_cos, pf_q_num); reg_bit_map = REG_RD(pdev, reg_addr); REG_WR(pdev, reg_addr, reg_bit_map & (~q_bit_map)); /* set queue bit in new COS bit map */ reg_addr = ECORE_VOQ_Q_REG_ADDR(new_cos, pf_q_num); reg_bit_map = REG_RD(pdev, reg_addr); REG_WR(pdev, reg_addr, reg_bit_map | q_bit_map); /* set/clear queue bit in command-queue bit map (E2/E3A0 only, valid COS values are 0/1) */ if (!(INIT_MODE_FLAGS(pdev) & MODE_E3_B0)) { reg_addr = ECORE_Q_CMDQ_REG_ADDR(pf_q_num); reg_bit_map = REG_RD(pdev, reg_addr); q_bit_map = 1 << (2 * (pf_q_num & 0xf)); reg_bit_map = new_cos ? (reg_bit_map | q_bit_map) : (reg_bit_map & (~q_bit_map)); REG_WR(pdev, reg_addr, reg_bit_map); } } } }
static int fs_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item patterns[], const struct rte_flow_action actions[], struct rte_flow_error *error) { struct sub_device *sdev; uint8_t i; int ret; FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { DEBUG("Calling rte_flow_validate on sub_device %d", i); ret = rte_flow_validate(PORT_ID(sdev), attr, patterns, actions, error); if (ret) { ERROR("Operation rte_flow_validate failed for sub_device %d" " with error %d", i, ret); return ret; } }
static int es705_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { unsigned int base = dai->driver->base; unsigned int msg, resp; int ret, val; u32 cmd_block[3]; return 0; if (escore_priv.flag.is_fw_ready == 0) { pr_warn("%s(port-%c): es705 firmware is not ready, abort\n", __func__, PORT_NAME(base)); return 0; } /* word length */ //if (es705_ports[PORT_ID(base)].wl != params_format(params)) { printk("++%s(port-%c)++: format=%d\n", __func__, PORT_NAME(base), params_format(params)); val = snd_pcm_format_width(params_format(params)) - 1; cmd_block[0] = ES705_PORT_PARAM_ID + base + ES705_PORT_WORDLENGHT; cmd_block[1] = ES705_PORT_SET_PARAM + val; cmd_block[2] = 0xffffffff; ret = escore_write_block(&escore_priv, cmd_block); if (ret < 0) { dev_err(dai->dev, "escore_cmd: send [%08x %08x] failed\n", cmd_block[0], cmd_block[2]); goto out; } es705_ports[PORT_ID(base)].wl = params_format(params); } /* sample rate */ //if (es705_ports[PORT_ID(base)].rate != params_rate(params)) { printk("++%s(port-%c)++: rate=%d\n", __func__, PORT_NAME(base), params_rate(params)); msg = ES705_PORT_GET_PARAM + base + ES705_PORT_CLOCK; ret = escore_cmd(&escore_priv, msg, &resp); if (ret < 0) { dev_err(dai->dev, "escore_cmd: send %08x failed\n", msg); goto out; } val = (resp & 0x100) + (params_rate(params) / 1000); cmd_block[0] = ES705_PORT_PARAM_ID + base + ES705_PORT_CLOCK; cmd_block[1] = ES705_PORT_SET_PARAM + val; cmd_block[2] = 0xffffffff; ret = escore_write_block(&escore_priv, cmd_block); if (ret < 0) { dev_err(dai->dev, "escore_cmd: send [%08x %08x] failed\n", cmd_block[0], cmd_block[2]); goto out; } es705_ports[PORT_ID(base)].rate = params_rate(params); } out: return ret; }
static int es705_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { unsigned int base = dai->driver->base; unsigned int msg, resp; int ret = 0, val; u32 cmd_block[3]; return 0; if (escore_priv.flag.is_fw_ready == 0) { pr_warn("%s(): es705 firmware is not ready, abort\n", __func__); return 0; } //if (es705_ports[PORT_ID(base)].fmt == fmt) // return 0; /* port mode */ if (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { printk("++%s(port-%c)++: port-mode=%s\n", __func__, PORT_NAME(base), (fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S ? "i2s" : "pcm"); switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: val = 0x00; break; case SND_SOC_DAIFMT_I2S: val = 0x01; break; default: dev_err(dai->dev, "Unsupported DAI format %d\n", fmt & SND_SOC_DAIFMT_FORMAT_MASK); ret = -EINVAL; goto out; } cmd_block[0] = ES705_PORT_PARAM_ID + base + ES705_PORT_MODE; cmd_block[1] = ES705_PORT_SET_PARAM + val; cmd_block[2] = 0xffffffff; ret = escore_write_block(&escore_priv, cmd_block); if (ret < 0) { dev_err(dai->dev, "escore_cmd: send [%08x %08x] failed\n", cmd_block[0], cmd_block[2]); goto out; } /* latch edge: Tx on Falling Edge, Rx on Rising Edge */ if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_DSP_A) { cmd_block[0] = ES705_PORT_PARAM_ID + base + ES705_PORT_LATCHEDGE; cmd_block[1] = ES705_PORT_SET_PARAM + 0x01; cmd_block[2] = 0xffffffff; ret = escore_write_block(&escore_priv, cmd_block); if (ret < 0) { dev_err(dai->dev, "escore_cmd: send [%08x %08x] failed\n", cmd_block[0], cmd_block[2]); goto out; } } } /* master mode */ if (fmt & SND_SOC_DAIFMT_MASTER_MASK) { printk("++%s(port-%c)++: clock-mode=%s\n", __func__, PORT_NAME(base), (fmt & SND_SOC_DAIFMT_MASTER_MASK) == SND_SOC_DAIFMT_CBS_CFS ? "slave" : "master"); msg = ES705_PORT_GET_PARAM + base + ES705_PORT_CLOCK; ret = escore_cmd(&escore_priv, msg, &resp); if (ret < 0) { dev_err(dai->dev, "escore_cmd: send %08x failed\n", msg); goto out; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: val = resp & 0xff; // slave: bit8 = 0 break; case SND_SOC_DAIFMT_CBM_CFM: val = (resp & 0xff) | 0x100; // master: bit8 = 1 break; default: dev_err(dai->dev, "Unsupported master mode %d\n", fmt & SND_SOC_DAIFMT_MASTER_MASK); ret = -EINVAL; goto out; } cmd_block[0] = ES705_PORT_PARAM_ID + base + ES705_PORT_CLOCK; cmd_block[1] = ES705_PORT_SET_PARAM + val; cmd_block[2] = 0xffffffff; ret = escore_write_block(&escore_priv, cmd_block); if (ret < 0) { dev_err(dai->dev, "escore_cmd: send [%08x %08x] failed\n", cmd_block[0], cmd_block[2]); goto out; } } es705_ports[PORT_ID(base)].fmt = fmt; out: return ret; }
static int fs_eth_dev_create(struct rte_vdev_device *vdev) { struct rte_eth_dev *dev; struct ether_addr *mac; struct fs_priv *priv; struct sub_device *sdev; const char *params; unsigned int socket_id; uint8_t i; int ret; dev = NULL; priv = NULL; socket_id = rte_socket_id(); INFO("Creating fail-safe device on NUMA socket %u", socket_id); params = rte_vdev_device_args(vdev); if (params == NULL) { ERROR("This PMD requires sub-devices, none provided"); return -1; } dev = rte_eth_vdev_allocate(vdev, sizeof(*priv)); if (dev == NULL) { ERROR("Unable to allocate rte_eth_dev"); return -1; } priv = PRIV(dev); priv->dev = dev; dev->dev_ops = &failsafe_ops; dev->data->mac_addrs = &PRIV(dev)->mac_addrs[0]; dev->data->dev_link = eth_link; PRIV(dev)->nb_mac_addr = 1; TAILQ_INIT(&PRIV(dev)->flow_list); dev->rx_pkt_burst = (eth_rx_burst_t)&failsafe_rx_burst; dev->tx_pkt_burst = (eth_tx_burst_t)&failsafe_tx_burst; ret = fs_sub_device_alloc(dev, params); if (ret) { ERROR("Could not allocate sub_devices"); goto free_dev; } ret = failsafe_args_parse(dev, params); if (ret) goto free_subs; ret = rte_eth_dev_owner_new(&priv->my_owner.id); if (ret) { ERROR("Failed to get unique owner identifier"); goto free_args; } snprintf(priv->my_owner.name, sizeof(priv->my_owner.name), FAILSAFE_OWNER_NAME); DEBUG("Failsafe port %u owner info: %s_%016"PRIX64, dev->data->port_id, priv->my_owner.name, priv->my_owner.id); ret = rte_eth_dev_callback_register(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, failsafe_eth_new_event_callback, dev); if (ret) { ERROR("Failed to register NEW callback"); goto free_args; } ret = failsafe_eal_init(dev); if (ret) goto unregister_new_callback; ret = fs_mutex_init(priv); if (ret) goto unregister_new_callback; ret = failsafe_hotplug_alarm_install(dev); if (ret) { ERROR("Could not set up plug-in event detection"); goto unregister_new_callback; } mac = &dev->data->mac_addrs[0]; if (failsafe_mac_from_arg) { /* * If MAC address was provided as a parameter, * apply to all probed slaves. */ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac); if (ret) { ERROR("Failed to set default MAC address"); goto cancel_alarm; } } } else {
static int bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf) { int ret = 0; struct rte_pci_device *pci_dev; struct bnx2x_softc *sc; PMD_INIT_FUNC_TRACE(); eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops; pci_dev = eth_dev->pci_dev; sc = eth_dev->data->dev_private; sc->pcie_bus = pci_dev->addr.bus; sc->pcie_device = pci_dev->addr.devid; if (is_vf) sc->flags = BNX2X_IS_VF_FLAG; sc->devinfo.vendor_id = pci_dev->id.vendor_id; sc->devinfo.device_id = pci_dev->id.device_id; sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id; sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id; sc->pcie_func = pci_dev->addr.function; sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr; if (is_vf) sc->bar[BAR1].base_addr = (void *) ((uint64_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START); else sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr; assert(sc->bar[BAR0].base_addr); assert(sc->bar[BAR1].base_addr); bnx2x_load_firmware(sc); assert(sc->firmware); if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) sc->udp_rss = 1; sc->rx_budget = BNX2X_RX_BUDGET; sc->hc_rx_ticks = BNX2X_RX_TICKS; sc->hc_tx_ticks = BNX2X_TX_TICKS; sc->interrupt_mode = INTR_MODE_SINGLE_MSIX; sc->rx_mode = BNX2X_RX_MODE_NORMAL; sc->pci_dev = pci_dev; ret = bnx2x_attach(sc); if (ret) { PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret); } eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr; PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d", sc->pcie_bus, sc->pcie_device); PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p", sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr); PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d", PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc)); PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x", eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id); return ret; }