/** * t4vf_port_init - initialize port hardware/software state * @adapter: the adapter * @pidx: the adapter port index */ int t4vf_port_init(struct adapter *adapter, int pidx) { struct port_info *pi = adap2pinfo(adapter, pidx); struct fw_vi_cmd vi_cmd, vi_rpl; struct fw_port_cmd port_cmd, port_rpl; int v; u32 word; /* * Execute a VI Read command to get our Virtual Interface information * like MAC address, etc. */ memset(&vi_cmd, 0, sizeof(vi_cmd)); vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | FW_CMD_READ); vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(pi->viid)); v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); if (v) return v; BUG_ON(pi->port_id != FW_VI_CMD_PORTID_GET(vi_rpl.portid_pkd)); pi->rss_size = FW_VI_CMD_RSSSIZE_GET(be16_to_cpu(vi_rpl.rsssize_pkd)); t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); /* * If we don't have read access to our port information, we're done * now. Otherwise, execute a PORT Read command to get it ... */ if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) return 0; memset(&port_cmd, 0, sizeof(port_cmd)); port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_PORT_CMD_PORTID(pi->port_id)); port_cmd.action_to_len16 = cpu_to_be32(FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | FW_LEN16(port_cmd)); v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); if (v) return v; v = 0; word = be16_to_cpu(port_rpl.u.info.pcap); if (word & FW_PORT_CAP_SPEED_100M) v |= SUPPORTED_100baseT_Full; if (word & FW_PORT_CAP_SPEED_1G) v |= SUPPORTED_1000baseT_Full; if (word & FW_PORT_CAP_SPEED_10G) v |= SUPPORTED_10000baseT_Full; if (word & FW_PORT_CAP_ANEG) v |= SUPPORTED_Autoneg; init_link_config(&pi->link_cfg, v); return 0; }
/** * t4vf_set_rxmode - set Rx properties of a virtual interface * @adapter: the adapter * @viid: the VI id * @mtu: the new MTU or -1 for no change * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, * -1 no change * * Sets Rx properties of a virtual interface. */ int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok) { struct fw_vi_rxmode_cmd cmd; /* convert to FW values */ if (mtu < 0) mtu = FW_VI_RXMODE_CMD_MTU_MASK; if (promisc < 0) promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; if (all_multi < 0) all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; if (bcast < 0) bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; if (vlanex < 0) vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); cmd.mtu_to_vlanexen = cpu_to_be32(FW_VI_RXMODE_CMD_MTU(mtu) | FW_VI_RXMODE_CMD_PROMISCEN(promisc) | FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); }
/** * t4vf_fw_reset - issue a reset to FW * @adapter: the adapter * * Issues a reset command to FW. For a Physical Function this would * result in the Firmware reseting all of its state. For a Virtual * Function this just resets the state associated with the VF. */ int t4vf_fw_reset(struct adapter *adapter) { struct fw_reset_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) | FW_CMD_WRITE); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); }
/** * t4vf_eth_eq_free - free an Ethernet egress queue * @adapter: the adapter * @eqid: egress queue ID * * Frees an Ethernet egress queue. */ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) { struct fw_eq_eth_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC); cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE | FW_LEN16(cmd)); cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID(eqid)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); }
/** * t4vf_identify_port - identify a VI's port by blinking its LED * @adapter: the adapter * @viid: the Virtual Interface ID * @nblinks: how many times to blink LED at 2.5 Hz * * Identifies a VI's port by blinking its LED. */ int t4vf_identify_port(struct adapter *adapter, unsigned int viid, unsigned int nblinks) { struct fw_vi_enable_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED | FW_LEN16(cmd)); cmd.blinkdur = cpu_to_be16(nblinks); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); }
/** * t4vf_enable_vi - enable/disable a virtual interface * @adapter: the adapter * @viid: the Virtual Interface ID * @rx_en: 1=enable Rx, 0=disable Rx * @tx_en: 1=enable Tx, 0=disable Tx * * Enables/disables a virtual interface. */ int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, bool rx_en, bool tx_en) { struct fw_vi_enable_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN(rx_en) | FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(cmd)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); }
/** * t4vf_free_vi -- free a virtual interface * @adapter: the adapter * @viid: the virtual interface identifier * * Free a previously allocated Virtual Interface. Return an error on * failure. */ int t4vf_free_vi(struct adapter *adapter, int viid) { struct fw_vi_cmd cmd; /* * Execute a VI command to free the Virtual Interface. */ memset(&cmd, 0, sizeof(cmd)); cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC); cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | FW_VI_CMD_FREE); cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID(viid)); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); }
/** * t4vf_get_vfres - retrieve VF resource limits * @adapter: the adapter * * Retrieves configured resource limits and capabilities for a virtual * function. The results are stored in @adapter->vfres. */ int t4vf_get_vfres(struct adapter *adapter) { struct vf_resources *vfres = &adapter->params.vfres; struct fw_pfvf_cmd cmd, rpl; int v; u32 word; /* * Execute PFVF Read command to get VF resource limits; bail out early * with error on command failure. */ memset(&cmd, 0, sizeof(cmd)); cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | FW_CMD_READ); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) return v; /* * Extract VF resource limits and return success. */ word = be32_to_cpu(rpl.niqflint_niq); vfres->niqflint = FW_PFVF_CMD_NIQFLINT_GET(word); vfres->niq = FW_PFVF_CMD_NIQ_GET(word); word = be32_to_cpu(rpl.type_to_neq); vfres->neq = FW_PFVF_CMD_NEQ_GET(word); vfres->pmask = FW_PFVF_CMD_PMASK_GET(word); word = be32_to_cpu(rpl.tc_to_nexactf); vfres->tc = FW_PFVF_CMD_TC_GET(word); vfres->nvi = FW_PFVF_CMD_NVI_GET(word); vfres->nexactf = FW_PFVF_CMD_NEXACTF_GET(word); word = be32_to_cpu(rpl.r_caps_to_nethctrl); vfres->r_caps = FW_PFVF_CMD_R_CAPS_GET(word); vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_GET(word); vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_GET(word); return 0; }
/** * t4vf_iq_free - free an ingress queue and its free lists * @adapter: the adapter * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) * @iqid: ingress queue ID * @fl0id: FL0 queue ID or 0xffff if no attached FL0 * @fl1id: FL1 queue ID or 0xffff if no attached FL1 * * Frees an ingress queue and its associated free lists, if any. */ int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id) { struct fw_iq_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC); cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE | FW_LEN16(cmd)); cmd.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE(iqtype)); cmd.iqid = cpu_to_be16(iqid); cmd.fl0id = cpu_to_be16(fl0id); cmd.fl1id = cpu_to_be16(fl1id); return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); }
/** * t4vf_read_rss_vi_config - read a VI's RSS configuration * @adapter: the adapter * @viid: Virtual Interface ID * @config: pointer to host-native VI RSS Configuration buffer * * Reads the Virtual Interface's RSS configuration information and * translates it into CPU-native format. */ int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, union rss_vi_config *config) { struct fw_rss_vi_config_cmd cmd, rpl; int v; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_RSS_VI_CONFIG_CMD_VIID(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) return v; switch (adapter->params.rss.mode) { case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); config->basicvirtual.ip6fourtupen = ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) != 0); config->basicvirtual.ip6twotupen = ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) != 0); config->basicvirtual.ip4fourtupen = ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) != 0); config->basicvirtual.ip4twotupen = ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) != 0); config->basicvirtual.udpen = ((word & FW_RSS_VI_CONFIG_CMD_UDPEN) != 0); config->basicvirtual.defaultq = FW_RSS_VI_CONFIG_CMD_DEFAULTQ_GET(word); break; } default: return -EINVAL; } return 0; }
/** * t4vf_alloc_vi - allocate a virtual interface on a port * @adapter: the adapter * @port_id: physical port associated with the VI * * Allocate a new Virtual Interface and bind it to the indicated * physical port. Return the new Virtual Interface Identifier on * success, or a [negative] error number on failure. */ int t4vf_alloc_vi(struct adapter *adapter, int port_id) { struct fw_vi_cmd cmd, rpl; int v; /* * Execute a VI command to allocate Virtual Interface and return its * VIID. */ memset(&cmd, 0, sizeof(cmd)); cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_CMD_EXEC); cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | FW_VI_CMD_ALLOC); cmd.portid_pkd = FW_VI_CMD_PORTID(port_id); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) return v; return FW_VI_CMD_VIID_GET(be16_to_cpu(rpl.type_viid)); }
/** * t4vf_write_rss_vi_config - write a VI's RSS configuration * @adapter: the adapter * @viid: Virtual Interface ID * @config: pointer to host-native VI RSS Configuration buffer * * Write the Virtual Interface's RSS configuration information * (translating it into firmware-native format before writing). */ int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, union rss_vi_config *config) { struct fw_rss_vi_config_cmd cmd, rpl; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | FW_RSS_VI_CONFIG_CMD_VIID(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); switch (adapter->params.rss.mode) { case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { u32 word = 0; if (config->basicvirtual.ip6fourtupen) word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F; if (config->basicvirtual.ip6twotupen) word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F; if (config->basicvirtual.ip4fourtupen) word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F; if (config->basicvirtual.ip4twotupen) word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F; if (config->basicvirtual.udpen) word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F; word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V( config->basicvirtual.defaultq); cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); break; } default: return -EINVAL; } return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); }
/** * t4vf_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter * @viid: Virtual Interface of RSS Table Slice * @start: starting entry in the table to write * @n: how many table entries to write * @rspq: values for the "Response Queue" (Ingress Queue) lookup table * @nrspq: number of values in @rspq * * Programs the selected part of the VI's RSS mapping table with the * provided values. If @nrspq < @n the supplied values are used repeatedly * until the full table range is populated. * * The caller must ensure the values in @rspq are in the range 0..1023. */ int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, int start, int n, const u16 *rspq, int nrspq) { const u16 *rsp = rspq; const u16 *rsp_end = rspq+nrspq; struct fw_rss_ind_tbl_cmd cmd; /* * Initialize firmware command template to write the RSS table. */ memset(&cmd, 0, sizeof(cmd)); cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_RSS_IND_TBL_CMD_VIID(viid)); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); /* * Each firmware RSS command can accommodate up to 32 RSS Ingress * Queue Identifiers. These Ingress Queue IDs are packed three to * a 32-bit word as 10-bit values with the upper remaining 2 bits * reserved. */ while (n > 0) { __be32 *qp = &cmd.iq0_to_iq2; int nq = min(n, 32); int ret; /* * Set up the firmware RSS command header to send the next * "nq" Ingress Queue IDs to the firmware. */ cmd.niqid = cpu_to_be16(nq); cmd.startidx = cpu_to_be16(start); /* * "nq" more done for the start of the next loop. */ start += nq; n -= nq; /* * While there are still Ingress Queue IDs to stuff into the * current firmware RSS command, retrieve them from the * Ingress Queue ID array and insert them into the command. */ while (nq > 0) { /* * Grab up to the next 3 Ingress Queue IDs (wrapping * around the Ingress Queue ID array if necessary) and * insert them into the firmware RSS command at the * current 3-tuple position within the commad. */ u16 qbuf[3]; u16 *qbp = qbuf; int nqbuf = min(3, nq); nq -= nqbuf; qbuf[0] = qbuf[1] = qbuf[2] = 0; while (nqbuf) { nqbuf--; *qbp++ = *rsp++; if (rsp >= rsp_end) rsp = rspq; } *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); } /* * Send this portion of the RRS table update to the firmware; * bail out on any errors. */ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); if (ret) return ret; } return 0; }
/** * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration * @adapter: the adapter * * Retrieves global RSS mode and parameters with which we have to live * and stores them in the @adapter's RSS parameters. */ int t4vf_get_rss_glb_config(struct adapter *adapter) { struct rss_params *rss = &adapter->params.rss; struct fw_rss_glb_config_cmd cmd, rpl; int v; /* * Execute an RSS Global Configuration read command to retrieve * our RSS configuration. */ memset(&cmd, 0, sizeof(cmd)); cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_READ); cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); if (v) return v; /* * Transate the big-endian RSS Global Configuration into our * cpu-endian format based on the RSS mode. We also do first level * filtering at this point to weed out modes which don't support * VF Drivers ... */ rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_GET( be32_to_cpu(rpl.u.manual.mode_pkd)); switch (rss->mode) { case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { u32 word = be32_to_cpu( rpl.u.basicvirtual.synmapen_to_hashtoeplitz); rss->u.basicvirtual.synmapen = ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0); rss->u.basicvirtual.syn4tupenipv6 = ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0); rss->u.basicvirtual.syn2tupenipv6 = ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0); rss->u.basicvirtual.syn4tupenipv4 = ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0); rss->u.basicvirtual.syn2tupenipv4 = ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0); rss->u.basicvirtual.ofdmapen = ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0); rss->u.basicvirtual.tnlmapen = ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0); rss->u.basicvirtual.tnlalllookup = ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0); rss->u.basicvirtual.hashtoeplitz = ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0); /* we need at least Tunnel Map Enable to be set */ if (!rss->u.basicvirtual.tnlmapen) return -EINVAL; break; } default: /* all unknown/unsupported RSS modes result in an error */ return -EINVAL; } return 0; }
static int alloc_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq) { int rc, cntxt_id; size_t len; struct adapter *sc = pi->adapter; struct netmap_adapter *na = NA(pi->nm_ifp); struct fw_eq_eth_cmd c; MPASS(na != NULL); MPASS(nm_txq->desc != NULL); len = na->num_tx_desc * EQ_ESIZE + spg_len; bzero(nm_txq->desc, len); bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | V_FW_EQ_ETH_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); c.autoequiqe_to_viid = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->nm_viid)); c.fetchszm_to_iqid = htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); c.eqaddr = htobe64(nm_txq->ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(pi->dev, "failed to create netmap egress queue: %d\n", rc); return (rc); } nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); sc->sge.eqmap[cntxt_id] = (void *)nm_txq; nm_txq->pidx = nm_txq->cidx = 0; MPASS(nm_txq->sidx == na->num_tx_desc); nm_txq->equiqidx = nm_txq-> equeqidx = nm_txq->dbidx = 0; nm_txq->doorbells = sc->doorbells; if (isset(&nm_txq->doorbells, DOORBELL_UDB) || isset(&nm_txq->doorbells, DOORBELL_UDBWC) || isset(&nm_txq->doorbells, DOORBELL_WCWR)) { uint32_t s_qpp = sc->sge.eq_s_qpp; uint32_t mask = (1 << s_qpp) - 1; volatile uint8_t *udb; udb = sc->udbs_base + UDBS_DB_OFFSET; udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; nm_txq->udb_qid = nm_txq->cntxt_id & mask; if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) clrbit(&nm_txq->doorbells, DOORBELL_WCWR); else { udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; nm_txq->udb_qid = 0; } nm_txq->udb = (volatile void *)udb; }
static int alloc_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq) { int rc, cntxt_id; __be32 v; struct adapter *sc = pi->adapter; struct netmap_adapter *na = NA(pi->nm_ifp); struct fw_iq_cmd c; MPASS(na != NULL); MPASS(nm_rxq->iq_desc != NULL); MPASS(nm_rxq->fl_desc != NULL); bzero(nm_rxq->iq_desc, pi->qsize_rxq * IQ_ESIZE); bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + spg_len); bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | V_FW_IQ_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | FW_LEN16(c)); if (pi->flags & INTR_NM_RXQ) { KASSERT(nm_rxq->intr_idx < sc->intr_count, ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx)); v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); } else { CXGBE_UNIMPLEMENTED(__func__); /* XXXNM: needs review */ v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx) | F_FW_IQ_CMD_IQANDST; } c.type_to_iqandstindex = htobe32(v | V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | V_FW_IQ_CMD_VIID(pi->nm_viid) | V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | F_FW_IQ_CMD_IQGTSMODE | V_FW_IQ_CMD_IQINTCNTTHRESH(0) | V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); c.iqsize = htobe16(pi->qsize_rxq); c.iqaddr = htobe64(nm_rxq->iq_ba); c.iqns_to_fl0congen |= htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0)); c.fl0dcaen_to_fl0cidxfthresh = htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) | V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B)); c.fl0size = htobe16(na->num_rx_desc + spg_len / EQ_ESIZE); c.fl0addr = htobe64(nm_rxq->fl_ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(sc->dev, "failed to create netmap ingress queue: %d\n", rc); return (rc); } nm_rxq->iq_cidx = 0; MPASS(nm_rxq->iq_sidx == pi->qsize_rxq - spg_len / IQ_ESIZE); nm_rxq->iq_gen = F_RSPD_GEN; nm_rxq->iq_cntxt_id = be16toh(c.iqid); nm_rxq->iq_abs_id = be16toh(c.physiqid); cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; if (cntxt_id >= sc->sge.niq) { panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.niq - 1); } sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; nm_rxq->fl_cntxt_id = be16toh(c.fl0id); nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; MPASS(nm_rxq->fl_sidx == na->num_rx_desc); cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) { panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); } sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; nm_rxq->fl_db_val = F_DBPRIO | V_QID(nm_rxq->fl_cntxt_id) | V_PIDX(0); if (is_t5(sc)) nm_rxq->fl_db_val |= F_DBTYPE; t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(F_QINTR_CNT_EN) | V_INGRESSQID(nm_rxq->iq_cntxt_id)); return (rc); }