Exemplo n.º 1
0
static inline void
ulp_mem_io_set_hdr(struct adapter *sc, int tid, struct ulp_mem_io *req,
		unsigned int wr_len, unsigned int dlen,
		unsigned int pm_addr)
{
	struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);

	INIT_ULPTX_WR(req, wr_len, 0, 0);
	req->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
				V_ULP_MEMIO_ORDER(is_t4(sc)) |
				V_T5_ULP_MEMIO_IMM(is_t5(sc)));
	req->dlen = htonl(V_ULP_MEMIO_DATA_LEN(dlen >> 5));
	req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)
				| V_FW_WR_FLOWID(tid));
	req->lock_addr = htonl(V_ULP_MEMIO_ADDR(pm_addr >> 5));

	idata->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM));
	idata->len = htonl(dlen);
}
Exemplo n.º 2
0
static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
{
	struct cudbg_tcam tcam_region = { 0 };
	u32 value, n = 0, len = 0;

	switch (entity) {
	case CUDBG_REG_DUMP:
		switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
		case CHELSIO_T4:
			len = T4_REGMAP_SIZE;
			break;
		case CHELSIO_T5:
		case CHELSIO_T6:
			len = T5_REGMAP_SIZE;
			break;
		default:
			break;
		}
		break;
	case CUDBG_DEV_LOG:
		len = adap->params.devlog.size;
		break;
	case CUDBG_CIM_LA:
		if (is_t6(adap->params.chip)) {
			len = adap->params.cim_la_size / 10 + 1;
			len *= 10 * sizeof(u32);
		} else {
			len = adap->params.cim_la_size / 8;
			len *= 8 * sizeof(u32);
		}
		len += sizeof(u32); /* for reading CIM LA configuration */
		break;
	case CUDBG_CIM_MA_LA:
		len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
		break;
	case CUDBG_CIM_QCFG:
		len = sizeof(struct cudbg_cim_qcfg);
		break;
	case CUDBG_CIM_IBQ_TP0:
	case CUDBG_CIM_IBQ_TP1:
	case CUDBG_CIM_IBQ_ULP:
	case CUDBG_CIM_IBQ_SGE0:
	case CUDBG_CIM_IBQ_SGE1:
	case CUDBG_CIM_IBQ_NCSI:
		len = CIM_IBQ_SIZE * 4 * sizeof(u32);
		break;
	case CUDBG_CIM_OBQ_ULP0:
		len = cudbg_cim_obq_size(adap, 0);
		break;
	case CUDBG_CIM_OBQ_ULP1:
		len = cudbg_cim_obq_size(adap, 1);
		break;
	case CUDBG_CIM_OBQ_ULP2:
		len = cudbg_cim_obq_size(adap, 2);
		break;
	case CUDBG_CIM_OBQ_ULP3:
		len = cudbg_cim_obq_size(adap, 3);
		break;
	case CUDBG_CIM_OBQ_SGE:
		len = cudbg_cim_obq_size(adap, 4);
		break;
	case CUDBG_CIM_OBQ_NCSI:
		len = cudbg_cim_obq_size(adap, 5);
		break;
	case CUDBG_CIM_OBQ_RXQ0:
		len = cudbg_cim_obq_size(adap, 6);
		break;
	case CUDBG_CIM_OBQ_RXQ1:
		len = cudbg_cim_obq_size(adap, 7);
		break;
	case CUDBG_EDC0:
		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
		if (value & EDRAM0_ENABLE_F) {
			value = t4_read_reg(adap, MA_EDRAM0_BAR_A);
			len = EDRAM0_SIZE_G(value);
		}
		len = cudbg_mbytes_to_bytes(len);
		break;
	case CUDBG_EDC1:
		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
		if (value & EDRAM1_ENABLE_F) {
			value = t4_read_reg(adap, MA_EDRAM1_BAR_A);
			len = EDRAM1_SIZE_G(value);
		}
		len = cudbg_mbytes_to_bytes(len);
		break;
	case CUDBG_MC0:
		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
		if (value & EXT_MEM0_ENABLE_F) {
			value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
			len = EXT_MEM0_SIZE_G(value);
		}
		len = cudbg_mbytes_to_bytes(len);
		break;
	case CUDBG_MC1:
		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
		if (value & EXT_MEM1_ENABLE_F) {
			value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
			len = EXT_MEM1_SIZE_G(value);
		}
		len = cudbg_mbytes_to_bytes(len);
		break;
	case CUDBG_RSS:
		len = t4_chip_rss_size(adap) * sizeof(u16);
		break;
	case CUDBG_RSS_VF_CONF:
		len = adap->params.arch.vfcount *
		      sizeof(struct cudbg_rss_vf_conf);
		break;
	case CUDBG_PATH_MTU:
		len = NMTUS * sizeof(u16);
		break;
	case CUDBG_PM_STATS:
		len = sizeof(struct cudbg_pm_stats);
		break;
	case CUDBG_HW_SCHED:
		len = sizeof(struct cudbg_hw_sched);
		break;
	case CUDBG_TP_INDIRECT:
		switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
		case CHELSIO_T5:
			n = sizeof(t5_tp_pio_array) +
			    sizeof(t5_tp_tm_pio_array) +
			    sizeof(t5_tp_mib_index_array);
			break;
		case CHELSIO_T6:
			n = sizeof(t6_tp_pio_array) +
			    sizeof(t6_tp_tm_pio_array) +
			    sizeof(t6_tp_mib_index_array);
			break;
		default:
			break;
		}
		n = n / (IREG_NUM_ELEM * sizeof(u32));
		len = sizeof(struct ireg_buf) * n;
		break;
	case CUDBG_SGE_INDIRECT:
		len = sizeof(struct ireg_buf) * 2 +
		      sizeof(struct sge_qbase_reg_field);
		break;
	case CUDBG_ULPRX_LA:
		len = sizeof(struct cudbg_ulprx_la);
		break;
	case CUDBG_TP_LA:
		len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
		break;
	case CUDBG_MEMINFO:
		len = sizeof(struct cudbg_meminfo);
		break;
	case CUDBG_CIM_PIF_LA:
		len = sizeof(struct cudbg_cim_pif_la);
		len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
		break;
	case CUDBG_CLK:
		len = sizeof(struct cudbg_clk_info);
		break;
	case CUDBG_PCIE_INDIRECT:
		n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
		len = sizeof(struct ireg_buf) * n * 2;
		break;
	case CUDBG_PM_INDIRECT:
		n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
		len = sizeof(struct ireg_buf) * n * 2;
		break;
	case CUDBG_TID_INFO:
		len = sizeof(struct cudbg_tid_info_region_rev1);
		break;
	case CUDBG_PCIE_CONFIG:
		len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
		break;
	case CUDBG_DUMP_CONTEXT:
		len = cudbg_dump_context_size(adap);
		break;
	case CUDBG_MPS_TCAM:
		len = sizeof(struct cudbg_mps_tcam) *
		      adap->params.arch.mps_tcam_size;
		break;
	case CUDBG_VPD_DATA:
		len = sizeof(struct cudbg_vpd_data);
		break;
	case CUDBG_LE_TCAM:
		cudbg_fill_le_tcam_info(adap, &tcam_region);
		len = sizeof(struct cudbg_tcam) +
		      sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
		break;
	case CUDBG_CCTRL:
		len = sizeof(u16) * NMTUS * NCCTRL_WIN;
		break;
	case CUDBG_MA_INDIRECT:
		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
			n = sizeof(t6_ma_ireg_array) /
			    (IREG_NUM_ELEM * sizeof(u32));
			len = sizeof(struct ireg_buf) * n * 2;
		}
		break;
	case CUDBG_ULPTX_LA:
		len = sizeof(struct cudbg_ulptx_la);
		break;
	case CUDBG_UP_CIM_INDIRECT:
		n = 0;
		if (is_t5(adap->params.chip))
			n = sizeof(t5_up_cim_reg_array) /
			    ((IREG_NUM_ELEM + 1) * sizeof(u32));
		else if (is_t6(adap->params.chip))
			n = sizeof(t6_up_cim_reg_array) /
			    ((IREG_NUM_ELEM + 1) * sizeof(u32));
		len = sizeof(struct ireg_buf) * n;
		break;
	case CUDBG_PBT_TABLE:
		len = sizeof(struct cudbg_pbt_tables);
		break;
	case CUDBG_MBOX_LOG:
		len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size;
		break;
	case CUDBG_HMA_INDIRECT:
		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
			n = sizeof(t6_hma_ireg_array) /
			    (IREG_NUM_ELEM * sizeof(u32));
			len = sizeof(struct ireg_buf) * n;
		}
		break;
	case CUDBG_HMA:
		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
		if (value & HMA_MUX_F) {
			/* In T6, there's no MC1.  So, HMA shares MC1
			 * address space.
			 */
			value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
			len = EXT_MEM1_SIZE_G(value);
		}
		len = cudbg_mbytes_to_bytes(len);
		break;
	default:
		break;
	}

	return len;
}
Exemplo n.º 3
0
static int
alloc_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq)
{
	int rc, cntxt_id;
	__be32 v;
	struct adapter *sc = pi->adapter;
	struct netmap_adapter *na = NA(pi->nm_ifp);
	struct fw_iq_cmd c;

	MPASS(na != NULL);
	MPASS(nm_rxq->iq_desc != NULL);
	MPASS(nm_rxq->fl_desc != NULL);

	bzero(nm_rxq->iq_desc, pi->qsize_rxq * IQ_ESIZE);
	bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + spg_len);

	bzero(&c, sizeof(c));
	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
	    V_FW_IQ_CMD_VFN(0));
	c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
	    FW_LEN16(c));
	if (pi->flags & INTR_NM_RXQ) {
		KASSERT(nm_rxq->intr_idx < sc->intr_count,
		    ("%s: invalid direct intr_idx %d", __func__,
		    nm_rxq->intr_idx));
		v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
	} else {
		CXGBE_UNIMPLEMENTED(__func__);	/* XXXNM: needs review */
		v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx) |
		    F_FW_IQ_CMD_IQANDST;
	}
	c.type_to_iqandstindex = htobe32(v |
	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
	    V_FW_IQ_CMD_VIID(pi->nm_viid) |
	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
	c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
	    F_FW_IQ_CMD_IQGTSMODE |
	    V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
	    V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
	c.iqsize = htobe16(pi->qsize_rxq);
	c.iqaddr = htobe64(nm_rxq->iq_ba);
	c.iqns_to_fl0congen |=
	    htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
		F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
		(fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0));
	c.fl0dcaen_to_fl0cidxfthresh =
	    htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
		V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
	c.fl0size = htobe16(na->num_rx_desc + spg_len / EQ_ESIZE);
	c.fl0addr = htobe64(nm_rxq->fl_ba);

	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
	if (rc != 0) {
		device_printf(sc->dev,
		    "failed to create netmap ingress queue: %d\n", rc);
		return (rc);
	}

	nm_rxq->iq_cidx = 0;
	MPASS(nm_rxq->iq_sidx == pi->qsize_rxq - spg_len / IQ_ESIZE);
	nm_rxq->iq_gen = F_RSPD_GEN;
	nm_rxq->iq_cntxt_id = be16toh(c.iqid);
	nm_rxq->iq_abs_id = be16toh(c.physiqid);
	cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start;
	if (cntxt_id >= sc->sge.niq) {
		panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)",
		    __func__, cntxt_id, sc->sge.niq - 1);
	}
	sc->sge.iqmap[cntxt_id] = (void *)nm_rxq;

	nm_rxq->fl_cntxt_id = be16toh(c.fl0id);
	nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
	MPASS(nm_rxq->fl_sidx == na->num_rx_desc);
	cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start;
	if (cntxt_id >= sc->sge.neq) {
		panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)",
		    __func__, cntxt_id, sc->sge.neq - 1);
	}
	sc->sge.eqmap[cntxt_id] = (void *)nm_rxq;

	nm_rxq->fl_db_val = F_DBPRIO | V_QID(nm_rxq->fl_cntxt_id) | V_PIDX(0);
	if (is_t5(sc))
		nm_rxq->fl_db_val |= F_DBTYPE;

	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(F_QINTR_CNT_EN) |
	    V_INGRESSQID(nm_rxq->iq_cntxt_id));

	return (rc);
}
Exemplo n.º 4
0
Arquivo: qp.c Projeto: 908626950/linux
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		     struct t4_cq *rcq, struct t4_cq *scq,
		     struct c4iw_dev_ucontext *uctx)
{
	int user = (uctx != &rdev->uctx);
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret = 0;
	int eqsize;

	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->sq.qid)
		return -ENOMEM;

	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->rq.qid) {
		ret = -ENOMEM;
		goto free_sq_qid;
	}

	if (!user) {
		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
				 GFP_KERNEL);
		if (!wq->sq.sw_sq) {
			ret = -ENOMEM;
			goto free_rq_qid;
		}

		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
				 GFP_KERNEL);
		if (!wq->rq.sw_rq) {
			ret = -ENOMEM;
			goto free_sw_sq;
		}
	}

	/*
	 * RQT must be a power of 2.
	 */
	wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
	if (!wq->rq.rqt_hwaddr) {
		ret = -ENOMEM;
		goto free_sw_rq;
	}

	ret = alloc_sq(rdev, &wq->sq, user);
	if (ret)
		goto free_hwaddr;
	memset(wq->sq.queue, 0, wq->sq.memsize);
	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);

	wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
					  wq->rq.memsize, &(wq->rq.dma_addr),
					  GFP_KERNEL);
	if (!wq->rq.queue) {
		ret = -ENOMEM;
		goto free_sq;
	}
	PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
		__func__, wq->sq.queue,
		(unsigned long long)virt_to_phys(wq->sq.queue),
		wq->rq.queue,
		(unsigned long long)virt_to_phys(wq->rq.queue));
	memset(wq->rq.queue, 0, wq->rq.memsize);
	dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);

	wq->db = rdev->lldi.db_reg;
	wq->gts = rdev->lldi.gts_reg;
	if (user || is_t5(rdev->lldi.adapter_type)) {
		u32 off;

		off = (wq->sq.qid << rdev->qpshift) & PAGE_MASK;
		if (user) {
			wq->sq.udb = (u64 __iomem *)(rdev->bar2_pa + off);
		} else {
			off += 128 * (wq->sq.qid & rdev->qpmask) + 8;
			wq->sq.udb = (u64 __iomem *)(rdev->bar2_kva + off);
		}
		off = (wq->rq.qid << rdev->qpshift) & PAGE_MASK;
		if (user) {
			wq->rq.udb = (u64 __iomem *)(rdev->bar2_pa + off);
		} else {
			off += 128 * (wq->rq.qid & rdev->qpmask) + 8;
			wq->rq.udb = (u64 __iomem *)(rdev->bar2_kva + off);
		}
	}
	wq->rdev = rdev;
	wq->rq.msn = 1;

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + 2 * sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto free_dma;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(2) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;

	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		(t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
		V_FW_RI_RES_WR_IQID(scq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
	res++;
	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(rcq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto free_dma;
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
	if (ret)
		goto free_dma;

	PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%lx rqudb 0x%lx\n",
	     __func__, wq->sq.qid, wq->rq.qid, wq->db,
	     (__force unsigned long) wq->sq.udb,
	     (__force unsigned long) wq->rq.udb);

	return 0;
free_dma:
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->rq.memsize, wq->rq.queue,
			  dma_unmap_addr(&wq->rq, mapping));
free_sq:
	dealloc_sq(rdev, &wq->sq);
free_hwaddr:
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
free_sw_rq:
	kfree(wq->rq.sw_rq);
free_sw_sq:
	kfree(wq->sq.sw_sq);
free_rq_qid:
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
free_sq_qid:
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return ret;
}