示例#1
0
/*
 * Write an L2T entry.  Must be called with the entry locked.
 * The write may be synchronous or asynchronous.
 */
int
t4_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync)
{
	struct wrqe *wr;
	struct cpl_l2t_write_req *req;
	int idx = e->idx + sc->vres.l2t.start;

	mtx_assert(&e->lock, MA_OWNED);

	wr = alloc_wrqe(sizeof(*req), &sc->sge.mgmtq);
	if (wr == NULL)
		return (ENOMEM);
	req = wrtod(wr);

	INIT_TP_WR(req, 0);
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
	    V_SYNC_WR(sync) | V_TID_QID(sc->sge.fwq.abs_id)));
	req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync));
	req->l2t_idx = htons(idx);
	req->vlan = htons(e->vlan);
	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));

	t4_wrq_tx(sc, wr);

	if (sync && e->state != L2T_STATE_SWITCHING)
		e->state = L2T_STATE_SYNC_WRITE;

	return (0);
}
示例#2
0
void
t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, int tid,
    uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie, int iqid)
{
	struct wrqe *wr;
	struct cpl_set_tcb_field *req;

	MPASS((cookie & ~M_COOKIE) == 0);
	MPASS((iqid & ~M_QUEUENO) == 0);

	wr = alloc_wrqe(sizeof(*req), wrq);
	if (wr == NULL) {
		/* XXX */
		panic("%s: allocation failure.", __func__);
	}
	req = wrtod(wr);

	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
	req->reply_ctrl = htobe16(V_QUEUENO(iqid));
	if (reply == 0)
		req->reply_ctrl |= htobe16(F_NO_REPLY);
	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
	req->mask = htobe64(mask);
	req->val = htobe64(val);

	t4_wrq_tx(sc, wr);
}
示例#3
0
static inline void
send_pending(struct adapter *sc, struct l2t_entry *e)
{
	struct wrqe *wr;

	mtx_assert(&e->lock, MA_OWNED);

	while ((wr = STAILQ_FIRST(&e->wr_list)) != NULL) {
		STAILQ_REMOVE_HEAD(&e->wr_list, link);
		t4_wrq_tx(sc, wr);
	}
}
示例#4
0
int
t4_l2t_send_slow(struct adapter *sc, struct wrqe *wr, struct l2t_entry *e)
{

again:
	switch (e->state) {
	case L2T_STATE_STALE:     /* entry is stale, kick off revalidation */

		if (resolve_entry(sc, e) != EWOULDBLOCK)
			goto again;	/* entry updated, re-examine state */

		/* Fall through */

	case L2T_STATE_VALID:     /* fast-path, send the packet on */

		t4_wrq_tx(sc, wr);
		return (0);

	case L2T_STATE_RESOLVING:
	case L2T_STATE_SYNC_WRITE:

		mtx_lock(&e->lock);
		if (e->state != L2T_STATE_SYNC_WRITE &&
		    e->state != L2T_STATE_RESOLVING) {
			/* state changed by the time we got here */
			mtx_unlock(&e->lock);
			goto again;
		}
		arpq_enqueue(e, wr);
		mtx_unlock(&e->lock);

		if (resolve_entry(sc, e) == EWOULDBLOCK)
			break;

		mtx_lock(&e->lock);
		if (e->state == L2T_STATE_VALID && !STAILQ_EMPTY(&e->wr_list))
			send_pending(sc, e);
		if (e->state == L2T_STATE_FAILED)
			resolution_failed(e);
		mtx_unlock(&e->lock);
		break;

	case L2T_STATE_FAILED:
		resolution_failed_for_wr(wr);
		return (EHOSTUNREACH);
	}

	return (0);
}
示例#5
0
void
send_abort_rpl(struct adapter *sc, struct sge_wrq *ofld_txq, int tid,
    int rst_status)
{
	struct wrqe *wr;
	struct cpl_abort_rpl *cpl;

	wr = alloc_wrqe(sizeof(*cpl), ofld_txq);
	if (wr == NULL) {
		/* XXX */
		panic("%s: allocation failure.", __func__);
	}
	cpl = wrtod(wr);

	INIT_TP_WR_MIT_CPL(cpl, CPL_ABORT_RPL, tid);
	cpl->cmd = rst_status;

	t4_wrq_tx(sc, wr);
}
示例#6
0
static int
send_rx_credits(struct adapter *sc, struct toepcb *toep, int credits)
{
	struct wrqe *wr;
	struct cpl_rx_data_ack *req;
	uint32_t dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);

	KASSERT(credits >= 0, ("%s: %d credits", __func__, credits));

	wr = alloc_wrqe(sizeof(*req), toep->ctrlq);
	if (wr == NULL)
		return (0);
	req = wrtod(wr);

	INIT_TP_WR_MIT_CPL(req, CPL_RX_DATA_ACK, toep->tid);
	req->credit_dack = htobe32(dack | V_RX_CREDITS(credits));

	t4_wrq_tx(sc, wr);
	return (credits);
}
示例#7
0
static int
ppod_write_idata(struct cxgbei_data *ci,
			struct cxgbei_ulp2_pagepod_hdr *hdr,
			unsigned int idx, unsigned int npods,
			struct cxgbei_ulp2_gather_list *gl,
			unsigned int gl_pidx, struct toepcb *toep)
{
	u_int dlen = PPOD_SIZE * npods;
	u_int pm_addr = idx * PPOD_SIZE + ci->llimit;
	u_int wr_len = roundup(sizeof(struct ulp_mem_io) +
	    sizeof(struct ulptx_idata) + dlen, 16);
	struct ulp_mem_io *req;
	struct ulptx_idata *idata;
	struct pagepod *ppod;
	u_int i;
	struct wrqe *wr;
	struct adapter *sc = toep->vi->pi->adapter;

	wr = alloc_wrqe(wr_len, toep->ctrlq);
	if (wr == NULL) {
		CXGBE_UNIMPLEMENTED("ppod_write_idata: alloc_wrqe failure");
		return (ENOMEM);
	}

	req = wrtod(wr);
	memset(req, 0, wr_len);
	ulp_mem_io_set_hdr(sc, toep->tid, req, wr_len, dlen, pm_addr);
	idata = (struct ulptx_idata *)(req + 1);

	ppod = (struct pagepod *)(idata + 1);
	for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES) {
		if (!hdr) /* clear the pagepod */
			ppod_clear(ppod);
		else /* set the pagepod */
			ppod_set(ppod, hdr, gl, gl_pidx);
	}

	t4_wrq_tx(sc, wr);
	return 0;
}
示例#8
0
void
t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, uint16_t word,
    uint64_t mask, uint64_t val)
{
	struct wrqe *wr;
	struct cpl_set_tcb_field *req;

	wr = alloc_wrqe(sizeof(*req), toep->ctrlq);
	if (wr == NULL) {
		/* XXX */
		panic("%s: allocation failure.", __func__);
	}
	req = wrtod(wr);

	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
	req->reply_ctrl = htobe16(V_NO_REPLY(1) |
	    V_QUEUENO(toep->ofld_rxq->iq.abs_id));
	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
	req->mask = htobe64(mask);
	req->val = htobe64(val);

	t4_wrq_tx(sc, wr);
}
示例#9
0
文件: qp.c 项目: Lxg1582/freebsd
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		     struct t4_cq *rcq, struct t4_cq *scq,
		     struct c4iw_dev_ucontext *uctx)
{
	struct adapter *sc = rdev->adap;
	int user = (uctx != &rdev->uctx);
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	int ret;
	int eqsize;
	struct wrqe *wr;

	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->sq.qid)
		return -ENOMEM;

	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->rq.qid)
		goto err1;

	if (!user) {
		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
				 GFP_KERNEL);
		if (!wq->sq.sw_sq)
			goto err2;

		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
				 GFP_KERNEL);
		if (!wq->rq.sw_rq)
			goto err3;
	}

	/* RQT must be a power of 2. */
	wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
	if (!wq->rq.rqt_hwaddr)
		goto err4;

	if (alloc_host_sq(rdev, &wq->sq))
		goto err5;

	memset(wq->sq.queue, 0, wq->sq.memsize);
	pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);

	wq->rq.queue = contigmalloc(wq->rq.memsize,
            M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0);
        if (wq->rq.queue)
                wq->rq.dma_addr = vtophys(wq->rq.queue);
        else
                goto err6;
	CTR5(KTR_IW_CXGBE,
	    "%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx", __func__,
	    wq->sq.queue, (unsigned long long)vtophys(wq->sq.queue),
	    wq->rq.queue, (unsigned long long)vtophys(wq->rq.queue));
	memset(wq->rq.queue, 0, wq->rq.memsize);
	pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);

	wq->db = (void *)((unsigned long)rman_get_virtual(sc->regs_res) +
	    MYPF_REG(SGE_PF_KDOORBELL));
	wq->gts = (void *)((unsigned long)rman_get_virtual(rdev->adap->regs_res)
			   + MYPF_REG(SGE_PF_GTS));
	if (user) {
		wq->sq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) +
						(wq->sq.qid << rdev->qpshift));
		wq->sq.udb &= PAGE_MASK;
		wq->rq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) +
						(wq->rq.qid << rdev->qpshift));
		wq->rq.udb &= PAGE_MASK;
	}
	wq->rdev = rdev;
	wq->rq.msn = 1;

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + 2 * sizeof *res;

	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
        if (wr == NULL)
		return (0);
        res_wr = wrtod(wr);

	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			V_FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(2) |
			F_FW_WR_COMPL);
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/* eqsize is the number of 64B entries plus the status page size. */
	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + spg_creds;

	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(scq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
	res++;
	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/* eqsize is the number of 64B entries plus the status page size. */
	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + spg_creds ;
	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(rcq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	t4_wrq_tx(sc, wr);
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
	if (ret)
		goto err7;

	CTR6(KTR_IW_CXGBE,
	    "%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx",
	    __func__, wq->sq.qid, wq->rq.qid, wq->db,
	    (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);

	return 0;
err7:
	contigfree(wq->rq.queue, wq->rq.memsize, M_DEVBUF);
err6:
	dealloc_sq(rdev, &wq->sq);
err5:
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
err4:
	kfree(wq->rq.sw_rq);
err3:
	kfree(wq->sq.sw_sq);
err2:
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
err1:
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return -ENOMEM;
}
示例#10
0
void
send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp)
{
	struct wrqe *wr;
	struct fw_flowc_wr *flowc;
	unsigned int nparams = ftxp ? 8 : 6, flowclen;
	struct port_info *pi = toep->port;
	struct adapter *sc = pi->adapter;
	unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN;
	struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];

	KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT),
	    ("%s: flowc for tid %u sent already", __func__, toep->tid));

	CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid);

	flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);

	wr = alloc_wrqe(roundup(flowclen, 16), toep->ofld_txq);
	if (wr == NULL) {
		/* XXX */
		panic("%s: allocation failure.", __func__);
	}
	flowc = wrtod(wr);
	memset(flowc, 0, wr->wr_len);

	flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
	    V_FW_FLOWC_WR_NPARAMS(nparams));
	flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
	    V_FW_WR_FLOWID(toep->tid));

	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
	flowc->mnemval[0].val = htobe32(pfvf);
	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
	flowc->mnemval[1].val = htobe32(pi->tx_chan);
	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
	flowc->mnemval[2].val = htobe32(pi->tx_chan);
	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
	flowc->mnemval[3].val = htobe32(toep->ofld_rxq->iq.abs_id);
	if (ftxp) {
		uint32_t sndbuf = min(ftxp->snd_space, sc->tt.sndbuf);

		flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
		flowc->mnemval[4].val = htobe32(ftxp->snd_nxt);
		flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
		flowc->mnemval[5].val = htobe32(ftxp->rcv_nxt);
		flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
		flowc->mnemval[6].val = htobe32(sndbuf);
		flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
		flowc->mnemval[7].val = htobe32(ftxp->mss);
	} else {
		flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF;
		flowc->mnemval[4].val = htobe32(512);
		flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS;
		flowc->mnemval[5].val = htobe32(512);
	}

	txsd->tx_credits = howmany(flowclen, 16);
	txsd->plen = 0;
	KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
	    ("%s: not enough credits (%d)", __func__, toep->tx_credits));
	toep->tx_credits -= txsd->tx_credits;
	if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
		toep->txsd_pidx = 0;
	toep->txsd_avail--;

	toep->flags |= TPF_FLOWC_WR_SENT;
        t4_wrq_tx(sc, wr);
}
示例#11
0
static int
handle_ddp(struct socket *so, struct uio *uio, int flags, int error)
{
	struct sockbuf *sb = &so->so_rcv;
	struct tcpcb *tp = so_sototcpcb(so);
	struct toepcb *toep = tp->t_toe;
	struct adapter *sc = td_adapter(toep->td);
	vm_page_t *pages;
	int npages, db_idx, rc, buf_flag;
	struct ddp_buffer *db;
	struct wrqe *wr;
	uint64_t ddp_flags;

	SOCKBUF_LOCK_ASSERT(sb);

#if 0
	if (sbused(sb) + sc->tt.ddp_thres > uio->uio_resid) {
		CTR4(KTR_CXGBE, "%s: sb_cc %d, threshold %d, resid %d",
		    __func__, sbused(sb), sc->tt.ddp_thres, uio->uio_resid);
	}
#endif

	/* XXX: too eager to disable DDP, could handle NBIO better than this. */
	if (sbused(sb) >= uio->uio_resid || uio->uio_resid < sc->tt.ddp_thres ||
	    uio->uio_resid > MAX_DDP_BUFFER_SIZE || uio->uio_iovcnt > 1 ||
	    so->so_state & SS_NBIO || flags & (MSG_DONTWAIT | MSG_NBIO) ||
	    error || so->so_error || sb->sb_state & SBS_CANTRCVMORE)
		goto no_ddp;

	/*
	 * Fault in and then hold the pages of the uio buffers.  We'll wire them
	 * a bit later if everything else works out.
	 */
	SOCKBUF_UNLOCK(sb);
	if (hold_uio(uio, &pages, &npages) != 0) {
		SOCKBUF_LOCK(sb);
		goto no_ddp;
	}
	SOCKBUF_LOCK(sb);
	if (__predict_false(so->so_error || sb->sb_state & SBS_CANTRCVMORE)) {
		vm_page_unhold_pages(pages, npages);
		free(pages, M_CXGBE);
		goto no_ddp;
	}

	/*
	 * Figure out which one of the two DDP buffers to use this time.
	 */
	db_idx = select_ddp_buffer(sc, toep, pages, npages,
	    (uintptr_t)uio->uio_iov->iov_base & PAGE_MASK, uio->uio_resid);
	pages = NULL;	/* handed off to select_ddp_buffer */
	if (db_idx < 0)
		goto no_ddp;
	db = toep->db[db_idx];
	buf_flag = db_idx == 0 ? DDP_BUF0_ACTIVE : DDP_BUF1_ACTIVE;

	/*
	 * Build the compound work request that tells the chip where to DMA the
	 * payload.
	 */
	ddp_flags = select_ddp_flags(so, flags, db_idx);
	wr = mk_update_tcb_for_ddp(sc, toep, db_idx, sbused(sb), ddp_flags);
	if (wr == NULL) {
		/*
		 * Just unhold the pages.  The DDP buffer's software state is
		 * left as-is in the toep.  The page pods were written
		 * successfully and we may have an opportunity to use it in the
		 * future.
		 */
		vm_page_unhold_pages(db->pages, db->npages);
		goto no_ddp;
	}

	/* Wire (and then unhold) the pages, and give the chip the go-ahead. */
	wire_ddp_buffer(db);
	t4_wrq_tx(sc, wr);
	sb->sb_flags &= ~SB_DDP_INDICATE;
	toep->ddp_flags |= buf_flag;

	/*
	 * Wait for the DDP operation to complete and then unwire the pages.
	 * The return code from the sbwait will be the final return code of this
	 * function.  But we do need to wait for DDP no matter what.
	 */
	rc = sbwait(sb);
	while (toep->ddp_flags & buf_flag) {
		/* XXXGL: shouldn't here be sbwait() call? */
		sb->sb_flags |= SB_WAIT;
		msleep(&sb->sb_acc, &sb->sb_mtx, PSOCK , "sbwait", 0);
	}
	unwire_ddp_buffer(db);
	return (rc);
no_ddp:
	disable_ddp(sc, toep);
	discourage_ddp(toep);
	sb->sb_flags &= ~SB_DDP_INDICATE;
	return (0);
}
示例#12
0
static int
write_page_pods(struct adapter *sc, struct toepcb *toep, struct ddp_buffer *db)
{
	struct wrqe *wr;
	struct ulp_mem_io *ulpmc;
	struct ulptx_idata *ulpsc;
	struct pagepod *ppod;
	int i, j, k, n, chunk, len, ddp_pgsz, idx;
	u_int ppod_addr;
	uint32_t cmd;

	cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
	if (is_t4(sc))
		cmd |= htobe32(F_ULP_MEMIO_ORDER);
	else
		cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
	ddp_pgsz = t4_ddp_pgsz[G_PPOD_PGSZ(db->tag)];
	ppod_addr = db->ppod_addr;
	for (i = 0; i < db->nppods; ppod_addr += chunk) {

		/* How many page pods are we writing in this cycle */
		n = min(db->nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
		chunk = PPOD_SZ(n);
		len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);

		wr = alloc_wrqe(len, toep->ctrlq);
		if (wr == NULL)
			return (ENOMEM);	/* ok to just bail out */
		ulpmc = wrtod(wr);

		INIT_ULPTX_WR(ulpmc, len, 0, 0);
		ulpmc->cmd = cmd;
		ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
		ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
		ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));

		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
		ulpsc->len = htobe32(chunk);

		ppod = (struct pagepod *)(ulpsc + 1);
		for (j = 0; j < n; i++, j++, ppod++) {
			ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
			    V_PPOD_TID(toep->tid) | db->tag);
			ppod->len_offset = htobe64(V_PPOD_LEN(db->len) |
			    V_PPOD_OFST(db->offset));
			ppod->rsvd = 0;
			idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE);
			for (k = 0; k < nitems(ppod->addr); k++) {
				if (idx < db->npages) {
					ppod->addr[k] =
					    htobe64(db->pages[idx]->phys_addr);
					idx += ddp_pgsz / PAGE_SIZE;
				} else
					ppod->addr[k] = 0;
#if 0
				CTR5(KTR_CXGBE,
				    "%s: tid %d ppod[%d]->addr[%d] = %p",
				    __func__, toep->tid, i, k,
				    htobe64(ppod->addr[k]));
#endif
			}

		}

		t4_wrq_tx(sc, wr);
	}

	return (0);
}