Пример #1
0
static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
				 struct filter_ctx *ctx)
{
	struct adapter *adapter = netdev2adap(dev);
	struct tid_info *t = &adapter->tids;
	struct cpl_abort_req *abort_req;
	struct cpl_abort_rpl *abort_rpl;
	struct cpl_set_tcb_field *req;
	struct ulptx_idata *aligner;
	struct work_request_hdr *wr;
	struct filter_entry *f;
	struct sk_buff *skb;
	unsigned int wrlen;
	int ret;

	netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
		   __func__, filter_id, adapter->tids.nftids);

	if (filter_id > adapter->tids.ntids)
		return -E2BIG;

	f = lookup_tid(t, filter_id);
	if (!f) {
		netdev_err(dev, "%s: no filter entry for filter_id = %d",
			   __func__, filter_id);
		return -EINVAL;
	}

	ret = writable_filter(f);
	if (ret)
		return ret;

	if (!f->valid)
		return -EINVAL;

	f->ctx = ctx;
	f->pending = 1;
	wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
			+ sizeof(*abort_req) + sizeof(*abort_rpl), 16);
	skb = alloc_skb(wrlen, GFP_KERNEL);
	if (!skb) {
		netdev_err(dev, "%s: could not allocate skb ..\n", __func__);
		return -ENOMEM;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
	req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
	INIT_ULPTX_WR(req, wrlen, 0, 0);
	wr = (struct work_request_hdr *)req;
	wr++;
	req = (struct cpl_set_tcb_field *)wr;
	mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
		       TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1);
	aligner = (struct ulptx_idata *)(req + 1);
	abort_req = (struct cpl_abort_req *)(aligner + 1);
	mk_abort_req_ulp(abort_req, f->tid);
	abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
	mk_abort_rpl_ulp(abort_rpl, f->tid);
	t4_ofld_send(adapter, skb);
	return 0;
}
Пример #2
0
static struct sk_buff *
cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm,
		       unsigned int idx, unsigned int npods, unsigned int tid)
{
	struct ulp_mem_io *req;
	struct ulptx_idata *idata;
	unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
	unsigned int dlen = npods << PPOD_SIZE_SHIFT;
	unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
				sizeof(struct ulptx_idata) + dlen, 16);
	struct sk_buff *skb;

	skb  = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb)
		return NULL;

	req = __skb_put(skb, wr_len);
	INIT_ULPTX_WR(req, wr_len, 0, tid);
	req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
		FW_WR_ATOMIC_V(0));
	req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
		ULP_MEMIO_ORDER_V(0) |
		T5_ULP_MEMIO_IMM_V(1));
	req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
	req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
	req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));

	idata = (struct ulptx_idata *)(req + 1);
	idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
	idata->len = htonl(dlen);

	return skb;
}
Пример #3
0
static inline void
ulp_mem_io_set_hdr(struct adapter *sc, int tid, struct ulp_mem_io *req,
		unsigned int wr_len, unsigned int dlen,
		unsigned int pm_addr)
{
	struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);

	INIT_ULPTX_WR(req, wr_len, 0, 0);
	req->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
				V_ULP_MEMIO_ORDER(is_t4(sc)) |
				V_T5_ULP_MEMIO_IMM(is_t5(sc)));
	req->dlen = htonl(V_ULP_MEMIO_DATA_LEN(dlen >> 5));
	req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)
				| V_FW_WR_FLOWID(tid));
	req->lock_addr = htonl(V_ULP_MEMIO_ADDR(pm_addr >> 5));

	idata->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM));
	idata->len = htonl(dlen);
}
Пример #4
0
/**
 * Delete the specified hash filter.
 */
static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
				 unsigned int filter_id,
				 struct filter_ctx *ctx)
{
	struct adapter *adapter = ethdev2adap(dev);
	struct tid_info *t = &adapter->tids;
	struct filter_entry *f;
	struct sge_ctrl_txq *ctrlq;
	unsigned int port_id = ethdev2pinfo(dev)->port_id;
	int ret;

	if (filter_id > adapter->tids.ntids)
		return -E2BIG;

	f = lookup_tid(t, filter_id);
	if (!f) {
		dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
			__func__, filter_id);
		return -EINVAL;
	}

	ret = writable_filter(f);
	if (ret)
		return ret;

	if (f->valid) {
		unsigned int wrlen;
		struct rte_mbuf *mbuf;
		struct work_request_hdr *wr;
		struct ulptx_idata *aligner;
		struct cpl_set_tcb_field *req;
		struct cpl_abort_req *abort_req;
		struct cpl_abort_rpl *abort_rpl;

		f->ctx = ctx;
		f->pending = 1;

		wrlen = cxgbe_roundup(sizeof(*wr) +
				      (sizeof(*req) + sizeof(*aligner)) +
				      sizeof(*abort_req) + sizeof(*abort_rpl),
				      16);

		ctrlq = &adapter->sge.ctrlq[port_id];
		mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
		if (!mbuf) {
			dev_err(adapter, "%s: could not allocate skb ..\n",
				__func__);
			goto out_err;
		}

		mbuf->data_len = wrlen;
		mbuf->pkt_len = mbuf->data_len;

		req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
		INIT_ULPTX_WR(req, wrlen, 0, 0);
		wr = (struct work_request_hdr *)req;
		wr++;
		req = (struct cpl_set_tcb_field *)wr;
		mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
				V_TCB_RSS_INFO(M_TCB_RSS_INFO),
				V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
				0, 1);
		aligner = (struct ulptx_idata *)(req + 1);
		abort_req = (struct cpl_abort_req *)(aligner + 1);
		mk_abort_req_ulp(abort_req, f->tid);
		abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
		mk_abort_rpl_ulp(abort_rpl, f->tid);
		t4_mgmt_tx(ctrlq, mbuf);
	}
Пример #5
0
static int
write_page_pods(struct adapter *sc, struct toepcb *toep, struct ddp_buffer *db)
{
	struct wrqe *wr;
	struct ulp_mem_io *ulpmc;
	struct ulptx_idata *ulpsc;
	struct pagepod *ppod;
	int i, j, k, n, chunk, len, ddp_pgsz, idx;
	u_int ppod_addr;
	uint32_t cmd;

	cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
	if (is_t4(sc))
		cmd |= htobe32(F_ULP_MEMIO_ORDER);
	else
		cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
	ddp_pgsz = t4_ddp_pgsz[G_PPOD_PGSZ(db->tag)];
	ppod_addr = db->ppod_addr;
	for (i = 0; i < db->nppods; ppod_addr += chunk) {

		/* How many page pods are we writing in this cycle */
		n = min(db->nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
		chunk = PPOD_SZ(n);
		len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);

		wr = alloc_wrqe(len, toep->ctrlq);
		if (wr == NULL)
			return (ENOMEM);	/* ok to just bail out */
		ulpmc = wrtod(wr);

		INIT_ULPTX_WR(ulpmc, len, 0, 0);
		ulpmc->cmd = cmd;
		ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
		ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
		ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));

		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
		ulpsc->len = htobe32(chunk);

		ppod = (struct pagepod *)(ulpsc + 1);
		for (j = 0; j < n; i++, j++, ppod++) {
			ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
			    V_PPOD_TID(toep->tid) | db->tag);
			ppod->len_offset = htobe64(V_PPOD_LEN(db->len) |
			    V_PPOD_OFST(db->offset));
			ppod->rsvd = 0;
			idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE);
			for (k = 0; k < nitems(ppod->addr); k++) {
				if (idx < db->npages) {
					ppod->addr[k] =
					    htobe64(db->pages[idx]->phys_addr);
					idx += ddp_pgsz / PAGE_SIZE;
				} else
					ppod->addr[k] = 0;
#if 0
				CTR5(KTR_CXGBE,
				    "%s: tid %d ppod[%d]->addr[%d] = %p",
				    __func__, toep->tid, i, k,
				    htobe64(ppod->addr[k]));
#endif
			}

		}

		t4_wrq_tx(sc, wr);
	}

	return (0);
}