/*
 * Reposts the kernel DDP buffer after it has been previously become full and
 * invalidated.  We just need to reset the offset and adjust the DDP flags.
 * Conveniently, we can set the flags and the offset with a single message.
 * Note that this function does not set the buffer length.  Again conveniently
 * our kernel buffer is of fixed size.  If the length needs to be changed it
 * needs to be done separately.
 */
static void
t3_repost_kbuf(struct toepcb *toep, unsigned int bufidx, int modulate, 
    int activate, int nonblock)
{
	struct ddp_state *p = &toep->tp_ddp_state;
	unsigned long flags;

#if 0	
	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
#endif	
	p->buf_state[bufidx].cur_offset = p->kbuf[bufidx]->dgl_offset;
	p->buf_state[bufidx].flags = p->kbuf_noinval ? DDP_BF_NOINVAL : 0;
	p->buf_state[bufidx].gl = p->kbuf[bufidx];
	p->cur_buf = bufidx;
	p->kbuf_idx = bufidx;

	flags = select_ddp_flags(toep, bufidx, nonblock, 0);
	if (!bufidx)
		t3_setup_ddpbufs(toep, 0, 0, 0, 0, flags |
			 V_TF_DDP_PSH_NO_INVALIDATE0(p->kbuf_noinval) |
			 V_TF_DDP_PSH_NO_INVALIDATE1(p->kbuf_noinval) |
		         V_TF_DDP_BUF0_VALID(1),
		         V_TF_DDP_BUF0_FLUSH(1) |
			 V_TF_DDP_PSH_NO_INVALIDATE0(1) |
		         V_TF_DDP_PSH_NO_INVALIDATE1(1) | V_TF_DDP_OFF(1) |
			 V_TF_DDP_BUF0_VALID(1) |
			 V_TF_DDP_ACTIVE_BUF(activate), modulate);
	else
		t3_setup_ddpbufs(toep, 0, 0, 0, 0, flags |
			 V_TF_DDP_PSH_NO_INVALIDATE0(p->kbuf_noinval) |	
		         V_TF_DDP_PSH_NO_INVALIDATE1(p->kbuf_noinval) | 
			 V_TF_DDP_BUF1_VALID(1) | 
			 V_TF_DDP_ACTIVE_BUF(activate),
		         V_TF_DDP_BUF1_FLUSH(1) | 
			 V_TF_DDP_PSH_NO_INVALIDATE0(1) |
		         V_TF_DDP_PSH_NO_INVALIDATE1(1) | V_TF_DDP_OFF(1) |
			 V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_ACTIVE_BUF(1), 
			 modulate);
	
}
/*
 * Post a user buffer as an overlay on top of the current kernel buffer.
 */
int
t3_overlay_ubuf(struct toepcb *toep, struct sockbuf *rcv,
    const struct uio *uio, int nonblock, int rcv_flags,
    int modulate, int post_kbuf)
{
	int err, len, ubuf_idx;
	unsigned long flags;
	struct ddp_state *p = &toep->tp_ddp_state;

	if (p->kbuf[0] == NULL) {
		return (EINVAL);
	}
	sockbuf_unlock(rcv);
	err = setup_uio_ppods(toep, uio, 0, &len);
	sockbuf_lock(rcv);
	if (err)
		return (err);
	
	if ((rcv->sb_state & SBS_CANTRCVMORE) ||
	    (toep->tp_tp->t_flags & TF_TOE) == 0) 
		return (EINVAL);
		
	ubuf_idx = p->kbuf_idx;
	p->buf_state[ubuf_idx].flags = DDP_BF_NOFLIP;
	/* Use existing offset */
	/* Don't need to update .gl, user buffer isn't copied. */
	p->cur_buf = ubuf_idx;

	flags = select_ddp_flags(toep, ubuf_idx, nonblock, rcv_flags);

	if (post_kbuf) {
		struct ddp_buf_state *dbs = &p->buf_state[ubuf_idx ^ 1];
		
		dbs->cur_offset = 0;
		dbs->flags = 0;
		dbs->gl = p->kbuf[ubuf_idx ^ 1];
		p->kbuf_idx ^= 1;
		flags |= p->kbuf_idx ?
		    V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_PUSH_DISABLE_1(0) :
		    V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_PUSH_DISABLE_0(0);
	}
	
	if (ubuf_idx == 0) {
		t3_overlay_ddpbuf(toep, 0, p->ubuf_tag << 6, p->kbuf_tag[1] << 6,
				  len);
		t3_setup_ddpbufs(toep, 0, 0, p->kbuf[1]->dgl_length, 0,
				 flags,
				 OVERLAY_MASK | flags, 1);
	} else {
		t3_overlay_ddpbuf(toep, 1, p->kbuf_tag[0] << 6, p->ubuf_tag << 6,
				  len);
		t3_setup_ddpbufs(toep, p->kbuf[0]->dgl_length, 0, 0, 0,
				 flags,
				 OVERLAY_MASK | flags, 1);
	}
#ifdef T3_TRACE
	T3_TRACE5(TIDTB(so),
		  "t3_overlay_ubuf: tag %u flags 0x%x mask 0x%x ubuf_idx %d "
		  " kbuf_idx %d",
		   p->ubuf_tag, flags, OVERLAY_MASK, ubuf_idx, p->kbuf_idx);
#endif
	CTR3(KTR_TOM,
	    "t3_overlay_ubuf: tag %u flags 0x%x mask 0x%x",
	    p->ubuf_tag, flags, OVERLAY_MASK);
	CTR3(KTR_TOM,
	    "t3_overlay_ubuf:  ubuf_idx %d kbuf_idx %d post_kbuf %d",
	    ubuf_idx, p->kbuf_idx, post_kbuf);
	    
	return (0);
}
示例#3
0
static int
handle_ddp(struct socket *so, struct uio *uio, int flags, int error)
{
	struct sockbuf *sb = &so->so_rcv;
	struct tcpcb *tp = so_sototcpcb(so);
	struct toepcb *toep = tp->t_toe;
	struct adapter *sc = td_adapter(toep->td);
	vm_page_t *pages;
	int npages, db_idx, rc, buf_flag;
	struct ddp_buffer *db;
	struct wrqe *wr;
	uint64_t ddp_flags;

	SOCKBUF_LOCK_ASSERT(sb);

#if 0
	if (sbused(sb) + sc->tt.ddp_thres > uio->uio_resid) {
		CTR4(KTR_CXGBE, "%s: sb_cc %d, threshold %d, resid %d",
		    __func__, sbused(sb), sc->tt.ddp_thres, uio->uio_resid);
	}
#endif

	/* XXX: too eager to disable DDP, could handle NBIO better than this. */
	if (sbused(sb) >= uio->uio_resid || uio->uio_resid < sc->tt.ddp_thres ||
	    uio->uio_resid > MAX_DDP_BUFFER_SIZE || uio->uio_iovcnt > 1 ||
	    so->so_state & SS_NBIO || flags & (MSG_DONTWAIT | MSG_NBIO) ||
	    error || so->so_error || sb->sb_state & SBS_CANTRCVMORE)
		goto no_ddp;

	/*
	 * Fault in and then hold the pages of the uio buffers.  We'll wire them
	 * a bit later if everything else works out.
	 */
	SOCKBUF_UNLOCK(sb);
	if (hold_uio(uio, &pages, &npages) != 0) {
		SOCKBUF_LOCK(sb);
		goto no_ddp;
	}
	SOCKBUF_LOCK(sb);
	if (__predict_false(so->so_error || sb->sb_state & SBS_CANTRCVMORE)) {
		vm_page_unhold_pages(pages, npages);
		free(pages, M_CXGBE);
		goto no_ddp;
	}

	/*
	 * Figure out which one of the two DDP buffers to use this time.
	 */
	db_idx = select_ddp_buffer(sc, toep, pages, npages,
	    (uintptr_t)uio->uio_iov->iov_base & PAGE_MASK, uio->uio_resid);
	pages = NULL;	/* handed off to select_ddp_buffer */
	if (db_idx < 0)
		goto no_ddp;
	db = toep->db[db_idx];
	buf_flag = db_idx == 0 ? DDP_BUF0_ACTIVE : DDP_BUF1_ACTIVE;

	/*
	 * Build the compound work request that tells the chip where to DMA the
	 * payload.
	 */
	ddp_flags = select_ddp_flags(so, flags, db_idx);
	wr = mk_update_tcb_for_ddp(sc, toep, db_idx, sbused(sb), ddp_flags);
	if (wr == NULL) {
		/*
		 * Just unhold the pages.  The DDP buffer's software state is
		 * left as-is in the toep.  The page pods were written
		 * successfully and we may have an opportunity to use it in the
		 * future.
		 */
		vm_page_unhold_pages(db->pages, db->npages);
		goto no_ddp;
	}

	/* Wire (and then unhold) the pages, and give the chip the go-ahead. */
	wire_ddp_buffer(db);
	t4_wrq_tx(sc, wr);
	sb->sb_flags &= ~SB_DDP_INDICATE;
	toep->ddp_flags |= buf_flag;

	/*
	 * Wait for the DDP operation to complete and then unwire the pages.
	 * The return code from the sbwait will be the final return code of this
	 * function.  But we do need to wait for DDP no matter what.
	 */
	rc = sbwait(sb);
	while (toep->ddp_flags & buf_flag) {
		/* XXXGL: shouldn't here be sbwait() call? */
		sb->sb_flags |= SB_WAIT;
		msleep(&sb->sb_acc, &sb->sb_mtx, PSOCK , "sbwait", 0);
	}
	unwire_ddp_buffer(db);
	return (rc);
no_ddp:
	disable_ddp(sc, toep);
	discourage_ddp(toep);
	sb->sb_flags &= ~SB_DDP_INDICATE;
	return (0);
}