static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, struct c4iw_dev_ucontext *uctx, struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp) { struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; int ret; wr_len = sizeof *res_wr + sizeof *res; set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (uintptr_t)wr_waitp; res = res_wr->res; res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.op = FW_RI_RES_OP_RESET; res->u.cq.iqid = cpu_to_be32(cq->cqid); c4iw_init_wr_wait(wr_waitp); ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__); kfree(cq->sw_queue); dma_free_coherent(&(rdev->lldi.pdev->dev), cq->memsize, cq->queue, dma_unmap_addr(cq, mapping)); c4iw_put_cqid(rdev, cq->cqid, uctx); return ret; }
/* cxgb4_get_srq_entry: read the SRQ table entry * @dev: Pointer to the net_device * @idx: Index to the srq * @entryp: pointer to the srq entry * * Sends CPL_SRQ_TABLE_REQ message for the given index. * Contents will be returned in CPL_SRQ_TABLE_RPL message. * * Returns zero if the read is successful, else a error * number will be returned. Caller should not use the srq * entry if the return value is non-zero. * * */ int cxgb4_get_srq_entry(struct net_device *dev, int srq_idx, struct srq_entry *entryp) { struct cpl_srq_table_req *req; struct adapter *adap; struct sk_buff *skb; struct srq_data *s; int rc = -ENODEV; adap = netdev2adap(dev); s = adap->srq; if (!(adap->flags & FULL_INIT_DONE) || !s) goto out; skb = alloc_skb(sizeof(*req), GFP_KERNEL); if (!skb) return -ENOMEM; req = (struct cpl_srq_table_req *) __skb_put_zero(skb, sizeof(*req)); INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ, TID_TID_V(srq_idx) | TID_QID_V(adap->sge.fw_evtq.abs_id))); req->idx = srq_idx; mutex_lock(&s->lock); s->entryp = entryp; t4_mgmt_tx(adap, skb); rc = wait_for_completion_timeout(&s->comp, SRQ_WAIT_TO); if (rc) rc = 0; else /* !rc means we timed out */ rc = -ETIMEDOUT; WARN_ON_ONCE(entryp->idx != srq_idx); mutex_unlock(&s->lock); out: return rc; }
/* Send a Work Request to write the filter at a specified index. We construct * a Firmware Filter Work Request to have the work done and put the indicated * filter into "pending" mode which will prevent any further actions against * it till we get a reply from the firmware on the completion status of the * request. */ int set_filter_wr(struct adapter *adapter, int fidx) { struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; struct fw_filter2_wr *fwr; struct sk_buff *skb; skb = alloc_skb(sizeof(*fwr), GFP_KERNEL); if (!skb) return -ENOMEM; /* If the new filter requires loopback Destination MAC and/or VLAN * rewriting then we need to allocate a Layer 2 Table (L2T) entry for * the filter. */ if (f->fs.newdmac || f->fs.newvlan) { /* allocate L2T entry for new filter */ f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan, f->fs.eport, f->fs.dmac); if (!f->l2t) { kfree_skb(skb); return -ENOMEM; } } /* If the new filter requires loopback Source MAC rewriting then * we need to allocate a SMT entry for the filter. */ if (f->fs.newsmac) { f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac); if (!f->smt) { if (f->l2t) { cxgb4_l2t_release(f->l2t); f->l2t = NULL; } kfree_skb(skb); return -ENOMEM; } } fwr = __skb_put_zero(skb, sizeof(*fwr)); /* It would be nice to put most of the following in t4_hw.c but most * of the work is translating the cxgbtool ch_filter_specification * into the Work Request and the definition of that structure is * currently in cxgbtool.h which isn't appropriate to pull into the * common code. We may eventually try to come up with a more neutral * filter specification structure but for now it's easiest to simply * put this fairly direct code in line ... */ if (adapter->params.filter2_wr_support) fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR)); else fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16)); fwr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(f->tid) | FW_FILTER_WR_RQTYPE_V(f->fs.type) | FW_FILTER_WR_NOREPLY_V(0) | FW_FILTER_WR_IQ_V(f->fs.iq)); fwr->del_filter_to_l2tix = htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) | FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) | FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) | FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) | FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) | FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) | FW_FILTER_WR_DMAC_V(f->fs.newdmac) | FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) | FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE || f->fs.newvlan == VLAN_REWRITE) | FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) | FW_FILTER_WR_TXCHAN_V(f->fs.eport) | FW_FILTER_WR_PRIO_V(f->fs.prio) | FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0)); fwr->ethtype = htons(f->fs.val.ethtype); fwr->ethtypem = htons(f->fs.mask.ethtype); fwr->frag_to_ovlan_vldm = (FW_FILTER_WR_FRAG_V(f->fs.val.frag) | FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) | FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) | FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) | FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) | FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld)); fwr->smac_sel = 0; fwr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_CHAN_V(0) | FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id)); fwr->maci_to_matchtypem = htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) | FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) | FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) | FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) | FW_FILTER_WR_PORT_V(f->fs.val.iport) | FW_FILTER_WR_PORTM_V(f->fs.mask.iport) | FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) | FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype)); fwr->ptcl = f->fs.val.proto; fwr->ptclm = f->fs.mask.proto; fwr->ttyp = f->fs.val.tos; fwr->ttypm = f->fs.mask.tos; fwr->ivlan = htons(f->fs.val.ivlan); fwr->ivlanm = htons(f->fs.mask.ivlan); fwr->ovlan = htons(f->fs.val.ovlan); fwr->ovlanm = htons(f->fs.mask.ovlan); memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip)); memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm)); memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip)); memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm)); fwr->lp = htons(f->fs.val.lport); fwr->lpm = htons(f->fs.mask.lport); fwr->fp = htons(f->fs.val.fport); fwr->fpm = htons(f->fs.mask.fport); if (adapter->params.filter2_wr_support) { fwr->natmode_to_ulp_type = FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) | FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode); memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip)); memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip)); fwr->newlport = htons(f->fs.nat_lport); fwr->newfport = htons(f->fs.nat_fport); } /* Mark the filter as "pending" and ship off the Filter Work Request. * When we get the Work Request Reply we'll clear the pending status. */ f->pending = 1; set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); t4_ofld_send(adapter, skb); return 0; }
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp) { struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; int user = (uctx != &rdev->uctx); int ret; struct sk_buff *skb; struct c4iw_ucontext *ucontext = NULL; if (user) ucontext = container_of(uctx, struct c4iw_ucontext, uctx); cq->cqid = c4iw_get_cqid(rdev, uctx); if (!cq->cqid) { ret = -ENOMEM; goto err1; } if (!user) { cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL); if (!cq->sw_queue) { ret = -ENOMEM; goto err2; } } cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize, &cq->dma_addr, GFP_KERNEL); if (!cq->queue) { ret = -ENOMEM; goto err3; } dma_unmap_addr_set(cq, mapping, cq->dma_addr); memset(cq->queue, 0, cq->memsize); if (user && ucontext->is_32b_cqe) { cq->qp_errp = &((struct t4_status_page *) ((u8 *)cq->queue + (cq->size - 1) * (sizeof(*cq->queue) / 2)))->qp_err; } else { cq->qp_errp = &((struct t4_status_page *) ((u8 *)cq->queue + (cq->size - 1) * sizeof(*cq->queue)))->qp_err; } /* build fw_ri_res_wr */ wr_len = sizeof *res_wr + sizeof *res; skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto err4; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (uintptr_t)wr_waitp; res = res_wr->res; res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.op = FW_RI_RES_OP_WRITE; res->u.cq.iqid = cpu_to_be32(cq->cqid); res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( FW_RI_RES_WR_IQANUS_V(0) | FW_RI_RES_WR_IQANUD_V(1) | FW_RI_RES_WR_IQANDST_F | FW_RI_RES_WR_IQANDSTINDEX_V( rdev->lldi.ciq_ids[cq->vector])); res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( FW_RI_RES_WR_IQDROPRSS_F | FW_RI_RES_WR_IQPCIECH_V(2) | FW_RI_RES_WR_IQINTCNTTHRESH_V(0) | FW_RI_RES_WR_IQO_F | ((user && ucontext->is_32b_cqe) ? FW_RI_RES_WR_IQESIZE_V(1) : FW_RI_RES_WR_IQESIZE_V(2))); res->u.cq.iqsize = cpu_to_be16(cq->size); res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); c4iw_init_wr_wait(wr_waitp); ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__); if (ret) goto err4; cq->gen = 1; cq->gts = rdev->lldi.gts_reg; cq->rdev = rdev; cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS, &cq->bar2_qid, user ? &cq->bar2_pa : NULL); if (user && !cq->bar2_pa) { pr_warn("%s: cqid %u not in BAR2 range\n", pci_name(rdev->lldi.pdev), cq->cqid); ret = -EINVAL; goto err4; } return 0; err4: dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, dma_unmap_addr(cq, mapping)); err3: kfree(cq->sw_queue); err2: c4iw_put_cqid(rdev, cq->cqid, uctx); err1: return ret; }