예제 #1
0
static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
				 struct filter_ctx *ctx)
{
	struct adapter *adapter = netdev2adap(dev);
	struct tid_info *t = &adapter->tids;
	struct cpl_abort_req *abort_req;
	struct cpl_abort_rpl *abort_rpl;
	struct cpl_set_tcb_field *req;
	struct ulptx_idata *aligner;
	struct work_request_hdr *wr;
	struct filter_entry *f;
	struct sk_buff *skb;
	unsigned int wrlen;
	int ret;

	netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
		   __func__, filter_id, adapter->tids.nftids);

	if (filter_id > adapter->tids.ntids)
		return -E2BIG;

	f = lookup_tid(t, filter_id);
	if (!f) {
		netdev_err(dev, "%s: no filter entry for filter_id = %d",
			   __func__, filter_id);
		return -EINVAL;
	}

	ret = writable_filter(f);
	if (ret)
		return ret;

	if (!f->valid)
		return -EINVAL;

	f->ctx = ctx;
	f->pending = 1;
	wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
			+ sizeof(*abort_req) + sizeof(*abort_rpl), 16);
	skb = alloc_skb(wrlen, GFP_KERNEL);
	if (!skb) {
		netdev_err(dev, "%s: could not allocate skb ..\n", __func__);
		return -ENOMEM;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
	req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
	INIT_ULPTX_WR(req, wrlen, 0, 0);
	wr = (struct work_request_hdr *)req;
	wr++;
	req = (struct cpl_set_tcb_field *)wr;
	mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
		       TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1);
	aligner = (struct ulptx_idata *)(req + 1);
	abort_req = (struct cpl_abort_req *)(aligner + 1);
	mk_abort_req_ulp(abort_req, f->tid);
	abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
	mk_abort_rpl_ulp(abort_rpl, f->tid);
	t4_ofld_send(adapter, skb);
	return 0;
}
예제 #2
0
파일: cq.c 프로젝트: avagin/linux
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		      struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
		      struct c4iw_wr_wait *wr_waitp)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	int ret;

	wr_len = sizeof *res_wr + sizeof *res;
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = __skb_put_zero(skb, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP_V(FW_RI_RES_WR) |
			FW_RI_RES_WR_NRES_V(1) |
			FW_WR_COMPL_F);
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (uintptr_t)wr_waitp;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_RESET;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);

	c4iw_init_wr_wait(wr_waitp);
	ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);

	kfree(cq->sw_queue);
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  cq->memsize, cq->queue,
			  dma_unmap_addr(cq, mapping));
	c4iw_put_cqid(rdev, cq->cqid, uctx);
	return ret;
}
예제 #3
0
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		      struct c4iw_dev_ucontext *uctx)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret;

	wr_len = sizeof *res_wr + sizeof *res;
	skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
	if (!skb)
		return -ENOMEM;
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(1) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (u64)&wr_wait;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_RESET;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);

	c4iw_init_wr_wait(&wr_wait);
	ret = c4iw_ofld_send(rdev, skb);
	if (!ret) {
		wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
		if (!wr_wait.done) {
			printk(KERN_ERR MOD "Device %s not responding!\n",
			       pci_name(rdev->lldi.pdev));
			rdev->flags = T4_FATAL_ERROR;
			ret = -EIO;
		} else
			ret = wr_wait.ret;
	}

	kfree(cq->sw_queue);
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  cq->memsize, cq->queue,
			  dma_unmap_addr(cq, mapping));
	c4iw_put_cqid(rdev, cq->cqid, uctx);
	return ret;
}
예제 #4
0
파일: cq.c 프로젝트: ANFS/ANFS-kernel
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		      struct c4iw_dev_ucontext *uctx)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret;

	wr_len = sizeof *res_wr + sizeof *res;
	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb)
		return -ENOMEM;
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(1) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_RESET;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);

	c4iw_init_wr_wait(&wr_wait);
	ret = c4iw_ofld_send(rdev, skb);
	if (!ret) {
		ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
	}

	kfree(cq->sw_queue);
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  cq->memsize, cq->queue,
			  dma_unmap_addr(cq, mapping));
	c4iw_put_cqid(rdev, cq->cqid, uctx);
	return ret;
}
예제 #5
0
static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
			 unsigned int ftid,  u16 word, u64 mask, u64 val,
			 int no_reply)
{
	struct cpl_set_tcb_field *req;
	struct sk_buff *skb;

	skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
	if (!skb)
		return -ENOMEM;

	req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req));
	memset(req, 0, sizeof(*req));
	INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
	req->reply_ctrl = htons(REPLY_CHAN_V(0) |
				QUEUENO_V(adap->sge.fw_evtq.abs_id) |
				NO_REPLY_V(no_reply));
	req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
	req->mask = cpu_to_be64(mask);
	req->val = cpu_to_be64(val);
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
	t4_ofld_send(adap, skb);
	return 0;
}
예제 #6
0
파일: cq.c 프로젝트: ANFS/ANFS-kernel
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		     struct c4iw_dev_ucontext *uctx)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	int user = (uctx != &rdev->uctx);
	struct c4iw_wr_wait wr_wait;
	int ret;
	struct sk_buff *skb;

	cq->cqid = c4iw_get_cqid(rdev, uctx);
	if (!cq->cqid) {
		ret = -ENOMEM;
		goto err1;
	}

	if (!user) {
		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
		if (!cq->sw_queue) {
			ret = -ENOMEM;
			goto err2;
		}
	}
	cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
				       &cq->dma_addr, GFP_KERNEL);
	if (!cq->queue) {
		ret = -ENOMEM;
		goto err3;
	}
	dma_unmap_addr_set(cq, mapping, cq->dma_addr);
	memset(cq->queue, 0, cq->memsize);

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto err4;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(1) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_WRITE;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);
	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
			V_FW_RI_RES_WR_IQANUS(0) |
			V_FW_RI_RES_WR_IQANUD(1) |
			F_FW_RI_RES_WR_IQANDST |
			V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
			F_FW_RI_RES_WR_IQDROPRSS |
			V_FW_RI_RES_WR_IQPCIECH(2) |
			V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
			F_FW_RI_RES_WR_IQO |
			V_FW_RI_RES_WR_IQESIZE(1));
	res->u.cq.iqsize = cpu_to_be16(cq->size);
	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto err4;
	PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
	if (ret)
		goto err4;

	cq->gen = 1;
	cq->gts = rdev->lldi.gts_reg;
	cq->rdev = rdev;
	if (user) {
		cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(cq->cqid << rdev->cqshift);
		cq->ugts &= PAGE_MASK;
	}
	return 0;
err4:
	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
			  dma_unmap_addr(cq, mapping));
err3:
	kfree(cq->sw_queue);
err2:
	c4iw_put_cqid(rdev, cq->cqid, uctx);
err1:
	return ret;
}
예제 #7
0
/* Send a Work Request to write the filter at a specified index.  We construct
 * a Firmware Filter Work Request to have the work done and put the indicated
 * filter into "pending" mode which will prevent any further actions against
 * it till we get a reply from the firmware on the completion status of the
 * request.
 */
int set_filter_wr(struct adapter *adapter, int fidx)
{
	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
	struct fw_filter2_wr *fwr;
	struct sk_buff *skb;

	skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

	/* If the new filter requires loopback Destination MAC and/or VLAN
	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
	 * the filter.
	 */
	if (f->fs.newdmac || f->fs.newvlan) {
		/* allocate L2T entry for new filter */
		f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
						f->fs.eport, f->fs.dmac);
		if (!f->l2t) {
			kfree_skb(skb);
			return -ENOMEM;
		}
	}

	/* If the new filter requires loopback Source MAC rewriting then
	 * we need to allocate a SMT entry for the filter.
	 */
	if (f->fs.newsmac) {
		f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
		if (!f->smt) {
			if (f->l2t) {
				cxgb4_l2t_release(f->l2t);
				f->l2t = NULL;
			}
			kfree_skb(skb);
			return -ENOMEM;
		}
	}

	fwr = __skb_put_zero(skb, sizeof(*fwr));

	/* It would be nice to put most of the following in t4_hw.c but most
	 * of the work is translating the cxgbtool ch_filter_specification
	 * into the Work Request and the definition of that structure is
	 * currently in cxgbtool.h which isn't appropriate to pull into the
	 * common code.  We may eventually try to come up with a more neutral
	 * filter specification structure but for now it's easiest to simply
	 * put this fairly direct code in line ...
	 */
	if (adapter->params.filter2_wr_support)
		fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR));
	else
		fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
	fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
	fwr->tid_to_iq =
		htonl(FW_FILTER_WR_TID_V(f->tid) |
		      FW_FILTER_WR_RQTYPE_V(f->fs.type) |
		      FW_FILTER_WR_NOREPLY_V(0) |
		      FW_FILTER_WR_IQ_V(f->fs.iq));
	fwr->del_filter_to_l2tix =
		htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
		      FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
		      FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
		      FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
		      FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
		      FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
		      FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
		      FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
					     f->fs.newvlan == VLAN_REWRITE) |
		      FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
					    f->fs.newvlan == VLAN_REWRITE) |
		      FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
		      FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
		      FW_FILTER_WR_PRIO_V(f->fs.prio) |
		      FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
	fwr->ethtype = htons(f->fs.val.ethtype);
	fwr->ethtypem = htons(f->fs.mask.ethtype);
	fwr->frag_to_ovlan_vldm =
		(FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
		 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
		 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
		 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
		 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
		 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
	fwr->smac_sel = 0;
	fwr->rx_chan_rx_rpl_iq =
		htons(FW_FILTER_WR_RX_CHAN_V(0) |
		      FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
	fwr->maci_to_matchtypem =
		htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
		      FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
		      FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
		      FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
		      FW_FILTER_WR_PORT_V(f->fs.val.iport) |
		      FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
		      FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
		      FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
	fwr->ptcl = f->fs.val.proto;
	fwr->ptclm = f->fs.mask.proto;
	fwr->ttyp = f->fs.val.tos;
	fwr->ttypm = f->fs.mask.tos;
	fwr->ivlan = htons(f->fs.val.ivlan);
	fwr->ivlanm = htons(f->fs.mask.ivlan);
	fwr->ovlan = htons(f->fs.val.ovlan);
	fwr->ovlanm = htons(f->fs.mask.ovlan);
	memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
	memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
	memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
	memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
	fwr->lp = htons(f->fs.val.lport);
	fwr->lpm = htons(f->fs.mask.lport);
	fwr->fp = htons(f->fs.val.fport);
	fwr->fpm = htons(f->fs.mask.fport);

	if (adapter->params.filter2_wr_support) {
		fwr->natmode_to_ulp_type =
			FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
						 ULP_MODE_TCPDDP :
						 ULP_MODE_NONE) |
			FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
		memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
		memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
		fwr->newlport = htons(f->fs.nat_lport);
		fwr->newfport = htons(f->fs.nat_fport);
	}

	/* Mark the filter as "pending" and ship off the Filter Work Request.
	 * When we get the Work Request Reply we'll clear the pending status.
	 */
	f->pending = 1;
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
	t4_ofld_send(adapter, skb);
	return 0;
}
예제 #8
0
static int cxgb4_set_hash_filter(struct net_device *dev,
				 struct ch_filter_specification *fs,
				 struct filter_ctx *ctx)
{
	struct adapter *adapter = netdev2adap(dev);
	struct tid_info *t = &adapter->tids;
	struct filter_entry *f;
	struct sk_buff *skb;
	int iq, atid, size;
	int ret = 0;
	u32 iconf;

	fill_default_mask(fs);
	ret = validate_filter(dev, fs);
	if (ret)
		return ret;

	iq = get_filter_steerq(dev, fs);
	if (iq < 0)
		return iq;

	f = kzalloc(sizeof(*f), GFP_KERNEL);
	if (!f)
		return -ENOMEM;

	f->fs = *fs;
	f->ctx = ctx;
	f->dev = dev;
	f->fs.iq = iq;

	/* If the new filter requires loopback Destination MAC and/or VLAN
	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
	 * the filter.
	 */
	if (f->fs.newdmac || f->fs.newvlan) {
		/* allocate L2T entry for new filter */
		f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
						f->fs.eport, f->fs.dmac);
		if (!f->l2t) {
			ret = -ENOMEM;
			goto out_err;
		}
	}

	/* If the new filter requires loopback Source MAC rewriting then
	 * we need to allocate a SMT entry for the filter.
	 */
	if (f->fs.newsmac) {
		f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
		if (!f->smt) {
			if (f->l2t) {
				cxgb4_l2t_release(f->l2t);
				f->l2t = NULL;
			}
			ret = -ENOMEM;
			goto free_l2t;
		}
	}

	atid = cxgb4_alloc_atid(t, f);
	if (atid < 0) {
		ret = atid;
		goto free_smt;
	}

	iconf = adapter->params.tp.ingress_config;
	if (iconf & VNIC_F) {
		f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
		f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
		f->fs.val.ovlan_vld = fs->val.pfvf_vld;
		f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
	}

	size = sizeof(struct cpl_t6_act_open_req);
	if (f->fs.type) {
		ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1);
		if (ret)
			goto free_atid;

		skb = alloc_skb(size, GFP_KERNEL);
		if (!skb) {
			ret = -ENOMEM;
			goto free_clip;
		}

		mk_act_open_req6(f, skb,
				 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
				 adapter);
	} else {
		skb = alloc_skb(size, GFP_KERNEL);
		if (!skb) {
			ret = -ENOMEM;
			goto free_atid;
		}

		mk_act_open_req(f, skb,
				((adapter->sge.fw_evtq.abs_id << 14) | atid),
				adapter);
	}

	f->pending = 1;
	set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3);
	t4_ofld_send(adapter, skb);
	return 0;

free_clip:
	cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);

free_atid:
	cxgb4_free_atid(t, atid);

free_smt:
	if (f->smt) {
		cxgb4_smt_release(f->smt);
		f->smt = NULL;
	}

free_l2t:
	if (f->l2t) {
		cxgb4_l2t_release(f->l2t);
		f->l2t = NULL;
	}

out_err:
	kfree(f);
	return ret;
}
예제 #9
0
파일: qp.c 프로젝트: richardlee8681/linux
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		     struct t4_cq *rcq, struct t4_cq *scq,
		     struct c4iw_dev_ucontext *uctx)
{
	int user = (uctx != &rdev->uctx);
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret;
	int eqsize;

	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->sq.qid)
		return -ENOMEM;

	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->rq.qid) {
		ret = -ENOMEM;
		goto free_sq_qid;
	}

	if (!user) {
		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
				 GFP_KERNEL);
		if (!wq->sq.sw_sq) {
			ret = -ENOMEM;
			goto free_rq_qid;
		}

		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
				 GFP_KERNEL);
		if (!wq->rq.sw_rq) {
			ret = -ENOMEM;
			goto free_sw_sq;
		}
	}

	/*
	 * RQT must be a power of 2.
	 */
	wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
	if (!wq->rq.rqt_hwaddr) {
		ret = -ENOMEM;
		goto free_sw_rq;
	}

	if (user) {
		ret = alloc_oc_sq(rdev, &wq->sq);
		if (ret)
			goto free_hwaddr;

		ret = alloc_host_sq(rdev, &wq->sq);
		if (ret)
			goto free_sq;
	} else
		ret = alloc_host_sq(rdev, &wq->sq);
		if (ret)
			goto free_hwaddr;
	memset(wq->sq.queue, 0, wq->sq.memsize);
	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);

	wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
					  wq->rq.memsize, &(wq->rq.dma_addr),
					  GFP_KERNEL);
	if (!wq->rq.queue)
		goto free_sq;
	PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
		__func__, wq->sq.queue,
		(unsigned long long)virt_to_phys(wq->sq.queue),
		wq->rq.queue,
		(unsigned long long)virt_to_phys(wq->rq.queue));
	memset(wq->rq.queue, 0, wq->rq.memsize);
	dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);

	wq->db = rdev->lldi.db_reg;
	wq->gts = rdev->lldi.gts_reg;
	if (user) {
		wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->sq.qid << rdev->qpshift);
		wq->sq.udb &= PAGE_MASK;
		wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->rq.qid << rdev->qpshift);
		wq->rq.udb &= PAGE_MASK;
	}
	wq->rdev = rdev;
	wq->rq.msn = 1;

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + 2 * sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto free_dma;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(2) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;

	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		(t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
		V_FW_RI_RES_WR_IQID(scq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
	res++;
	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(rcq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto free_dma;
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
	if (ret)
		goto free_dma;

	PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
	     __func__, wq->sq.qid, wq->rq.qid, wq->db,
	     (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);

	return 0;
free_dma:
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->rq.memsize, wq->rq.queue,
			  dma_unmap_addr(&wq->rq, mapping));
free_sq:
	dealloc_sq(rdev, &wq->sq);
free_hwaddr:
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
free_sw_rq:
	kfree(wq->rq.sw_rq);
free_sw_sq:
	kfree(wq->sq.sw_sq);
free_rq_qid:
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
free_sq_qid:
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return ret;
}
예제 #10
0
파일: cq.c 프로젝트: avagin/linux
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		     struct c4iw_dev_ucontext *uctx,
		     struct c4iw_wr_wait *wr_waitp)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	int user = (uctx != &rdev->uctx);
	int ret;
	struct sk_buff *skb;
	struct c4iw_ucontext *ucontext = NULL;

	if (user)
		ucontext = container_of(uctx, struct c4iw_ucontext, uctx);

	cq->cqid = c4iw_get_cqid(rdev, uctx);
	if (!cq->cqid) {
		ret = -ENOMEM;
		goto err1;
	}

	if (!user) {
		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
		if (!cq->sw_queue) {
			ret = -ENOMEM;
			goto err2;
		}
	}
	cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
				       &cq->dma_addr, GFP_KERNEL);
	if (!cq->queue) {
		ret = -ENOMEM;
		goto err3;
	}
	dma_unmap_addr_set(cq, mapping, cq->dma_addr);
	memset(cq->queue, 0, cq->memsize);

	if (user && ucontext->is_32b_cqe) {
		cq->qp_errp = &((struct t4_status_page *)
		((u8 *)cq->queue + (cq->size - 1) *
		 (sizeof(*cq->queue) / 2)))->qp_err;
	} else {
		cq->qp_errp = &((struct t4_status_page *)
		((u8 *)cq->queue + (cq->size - 1) *
		 sizeof(*cq->queue)))->qp_err;
	}

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto err4;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = __skb_put_zero(skb, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP_V(FW_RI_RES_WR) |
			FW_RI_RES_WR_NRES_V(1) |
			FW_WR_COMPL_F);
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (uintptr_t)wr_waitp;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_WRITE;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);
	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
			FW_RI_RES_WR_IQANUS_V(0) |
			FW_RI_RES_WR_IQANUD_V(1) |
			FW_RI_RES_WR_IQANDST_F |
			FW_RI_RES_WR_IQANDSTINDEX_V(
				rdev->lldi.ciq_ids[cq->vector]));
	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
			FW_RI_RES_WR_IQDROPRSS_F |
			FW_RI_RES_WR_IQPCIECH_V(2) |
			FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
			FW_RI_RES_WR_IQO_F |
			((user && ucontext->is_32b_cqe) ?
			 FW_RI_RES_WR_IQESIZE_V(1) :
			 FW_RI_RES_WR_IQESIZE_V(2)));
	res->u.cq.iqsize = cpu_to_be16(cq->size);
	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);

	c4iw_init_wr_wait(wr_waitp);
	ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
	if (ret)
		goto err4;

	cq->gen = 1;
	cq->gts = rdev->lldi.gts_reg;
	cq->rdev = rdev;

	cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS,
				      &cq->bar2_qid,
				      user ? &cq->bar2_pa : NULL);
	if (user && !cq->bar2_pa) {
		pr_warn("%s: cqid %u not in BAR2 range\n",
			pci_name(rdev->lldi.pdev), cq->cqid);
		ret = -EINVAL;
		goto err4;
	}
	return 0;
err4:
	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
			  dma_unmap_addr(cq, mapping));
err3:
	kfree(cq->sw_queue);
err2:
	c4iw_put_cqid(rdev, cq->cqid, uctx);
err1:
	return ret;
}