Ejemplo n.º 1
0
static inline uint64_t
select_ddp_flags(struct socket *so, int flags, int db_idx)
{
    uint64_t ddp_flags = V_TF_DDP_INDICATE_OUT(0);
    int waitall = flags & MSG_WAITALL;
    int nb = so->so_state & SS_NBIO || flags & (MSG_DONTWAIT | MSG_NBIO);

    KASSERT(db_idx == 0 || db_idx == 1,
            ("%s: bad DDP buffer index %d", __func__, db_idx));

    if (db_idx == 0) {
        ddp_flags |= V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_ACTIVE_BUF(0);
        if (waitall)
            ddp_flags |= V_TF_DDP_PUSH_DISABLE_0(1);
        else if (nb)
            ddp_flags |= V_TF_DDP_BUF0_FLUSH(1);
        else
            ddp_flags |= V_TF_DDP_BUF0_FLUSH(0);
    } else {
        ddp_flags |= V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_ACTIVE_BUF(1);
        if (waitall)
            ddp_flags |= V_TF_DDP_PUSH_DISABLE_1(1);
        else if (nb)
            ddp_flags |= V_TF_DDP_BUF1_FLUSH(1);
        else
            ddp_flags |= V_TF_DDP_BUF1_FLUSH(0);
    }

    return (ddp_flags);
}
Ejemplo n.º 2
0
/*
 * Reposts the kernel DDP buffer after it has been previously become full and
 * invalidated.  We just need to reset the offset and adjust the DDP flags.
 * Conveniently, we can set the flags and the offset with a single message.
 * Note that this function does not set the buffer length.  Again conveniently
 * our kernel buffer is of fixed size.  If the length needs to be changed it
 * needs to be done separately.
 */
static void
t3_repost_kbuf(struct toepcb *toep, unsigned int bufidx, int modulate, 
    int activate, int nonblock)
{
	struct ddp_state *p = &toep->tp_ddp_state;
	unsigned long flags;

#if 0	
	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
#endif	
	p->buf_state[bufidx].cur_offset = p->kbuf[bufidx]->dgl_offset;
	p->buf_state[bufidx].flags = p->kbuf_noinval ? DDP_BF_NOINVAL : 0;
	p->buf_state[bufidx].gl = p->kbuf[bufidx];
	p->cur_buf = bufidx;
	p->kbuf_idx = bufidx;

	flags = select_ddp_flags(toep, bufidx, nonblock, 0);
	if (!bufidx)
		t3_setup_ddpbufs(toep, 0, 0, 0, 0, flags |
			 V_TF_DDP_PSH_NO_INVALIDATE0(p->kbuf_noinval) |
			 V_TF_DDP_PSH_NO_INVALIDATE1(p->kbuf_noinval) |
		         V_TF_DDP_BUF0_VALID(1),
		         V_TF_DDP_BUF0_FLUSH(1) |
			 V_TF_DDP_PSH_NO_INVALIDATE0(1) |
		         V_TF_DDP_PSH_NO_INVALIDATE1(1) | V_TF_DDP_OFF(1) |
			 V_TF_DDP_BUF0_VALID(1) |
			 V_TF_DDP_ACTIVE_BUF(activate), modulate);
	else
		t3_setup_ddpbufs(toep, 0, 0, 0, 0, flags |
			 V_TF_DDP_PSH_NO_INVALIDATE0(p->kbuf_noinval) |	
		         V_TF_DDP_PSH_NO_INVALIDATE1(p->kbuf_noinval) | 
			 V_TF_DDP_BUF1_VALID(1) | 
			 V_TF_DDP_ACTIVE_BUF(activate),
		         V_TF_DDP_BUF1_FLUSH(1) | 
			 V_TF_DDP_PSH_NO_INVALIDATE0(1) |
		         V_TF_DDP_PSH_NO_INVALIDATE1(1) | V_TF_DDP_OFF(1) |
			 V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_ACTIVE_BUF(1), 
			 modulate);
	
}
Ejemplo n.º 3
0
void
enable_ddp(struct adapter *sc, struct toepcb *toep)
{

	KASSERT((toep->ddp_flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK,
	    ("%s: toep %p has bad ddp_flags 0x%x",
	    __func__, toep, toep->ddp_flags));

	CTR3(KTR_CXGBE, "%s: tid %u (time %u)",
	    __func__, toep->tid, time_uptime);

	toep->ddp_flags |= DDP_SC_REQ;
	t4_set_tcb_field(sc, toep, 1, W_TCB_RX_DDP_FLAGS,
	    V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) |
	    V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) |
	    V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1),
	    V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1));
	t4_set_tcb_field(sc, toep, 1, W_TCB_T_FLAGS,
	    V_TF_RCV_COALESCE_ENABLE(1), 0);
}
Ejemplo n.º 4
0
static struct wrqe *
mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx,
                      int offset, uint64_t ddp_flags)
{
    struct ddp_buffer *db = toep->db[db_idx];
    struct wrqe *wr;
    struct work_request_hdr *wrh;
    struct ulp_txpkt *ulpmc;
    int len;

    KASSERT(db_idx == 0 || db_idx == 1,
            ("%s: bad DDP buffer index %d", __func__, db_idx));

    /*
     * We'll send a compound work request that has 3 SET_TCB_FIELDs and an
     * RX_DATA_ACK (with RX_MODULATE to speed up delivery).
     *
     * The work request header is 16B and always ends at a 16B boundary.
     * The ULPTX master commands that follow must all end at 16B boundaries
     * too so we round up the size to 16.
     */
    len = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
          roundup2(LEN__RX_DATA_ACK_ULP, 16);

    wr = alloc_wrqe(len, toep->ctrlq);
    if (wr == NULL)
        return (NULL);
    wrh = wrtod(wr);
    INIT_ULPTX_WRH(wrh, len, 1, 0);	/* atomic */
    ulpmc = (struct ulp_txpkt *)(wrh + 1);

    /* Write the buffer's tag */
    ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
                                 W_TCB_RX_DDP_BUF0_TAG + db_idx,
                                 V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG),
                                 V_TCB_RX_DDP_BUF0_TAG(db->tag));

    /* Update the current offset in the DDP buffer and its total length */
    if (db_idx == 0)
        ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
                                     W_TCB_RX_DDP_BUF0_OFFSET,
                                     V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) |
                                     V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN),
                                     V_TCB_RX_DDP_BUF0_OFFSET(offset) |
                                     V_TCB_RX_DDP_BUF0_LEN(db->len));
    else
        ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
                                     W_TCB_RX_DDP_BUF1_OFFSET,
                                     V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) |
                                     V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32),
                                     V_TCB_RX_DDP_BUF1_OFFSET(offset) |
                                     V_TCB_RX_DDP_BUF1_LEN((u64)db->len << 32));

    /* Update DDP flags */
    ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS,
                                 V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF1_FLUSH(1) |
                                 V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PUSH_DISABLE_1(1) |
                                 V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1) |
                                 V_TF_DDP_ACTIVE_BUF(1) | V_TF_DDP_INDICATE_OUT(1), ddp_flags);

    /* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */
    ulpmc = mk_rx_data_ack_ulp(ulpmc, toep);

    return (wr);
}
Ejemplo n.º 5
0
/*
 * Post a user buffer as an overlay on top of the current kernel buffer.
 */
int
t3_overlay_ubuf(struct toepcb *toep, struct sockbuf *rcv,
    const struct uio *uio, int nonblock, int rcv_flags,
    int modulate, int post_kbuf)
{
	int err, len, ubuf_idx;
	unsigned long flags;
	struct ddp_state *p = &toep->tp_ddp_state;

	if (p->kbuf[0] == NULL) {
		return (EINVAL);
	}
	sockbuf_unlock(rcv);
	err = setup_uio_ppods(toep, uio, 0, &len);
	sockbuf_lock(rcv);
	if (err)
		return (err);
	
	if ((rcv->sb_state & SBS_CANTRCVMORE) ||
	    (toep->tp_tp->t_flags & TF_TOE) == 0) 
		return (EINVAL);
		
	ubuf_idx = p->kbuf_idx;
	p->buf_state[ubuf_idx].flags = DDP_BF_NOFLIP;
	/* Use existing offset */
	/* Don't need to update .gl, user buffer isn't copied. */
	p->cur_buf = ubuf_idx;

	flags = select_ddp_flags(toep, ubuf_idx, nonblock, rcv_flags);

	if (post_kbuf) {
		struct ddp_buf_state *dbs = &p->buf_state[ubuf_idx ^ 1];
		
		dbs->cur_offset = 0;
		dbs->flags = 0;
		dbs->gl = p->kbuf[ubuf_idx ^ 1];
		p->kbuf_idx ^= 1;
		flags |= p->kbuf_idx ?
		    V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_PUSH_DISABLE_1(0) :
		    V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_PUSH_DISABLE_0(0);
	}
	
	if (ubuf_idx == 0) {
		t3_overlay_ddpbuf(toep, 0, p->ubuf_tag << 6, p->kbuf_tag[1] << 6,
				  len);
		t3_setup_ddpbufs(toep, 0, 0, p->kbuf[1]->dgl_length, 0,
				 flags,
				 OVERLAY_MASK | flags, 1);
	} else {
		t3_overlay_ddpbuf(toep, 1, p->kbuf_tag[0] << 6, p->ubuf_tag << 6,
				  len);
		t3_setup_ddpbufs(toep, p->kbuf[0]->dgl_length, 0, 0, 0,
				 flags,
				 OVERLAY_MASK | flags, 1);
	}
#ifdef T3_TRACE
	T3_TRACE5(TIDTB(so),
		  "t3_overlay_ubuf: tag %u flags 0x%x mask 0x%x ubuf_idx %d "
		  " kbuf_idx %d",
		   p->ubuf_tag, flags, OVERLAY_MASK, ubuf_idx, p->kbuf_idx);
#endif
	CTR3(KTR_TOM,
	    "t3_overlay_ubuf: tag %u flags 0x%x mask 0x%x",
	    p->ubuf_tag, flags, OVERLAY_MASK);
	CTR3(KTR_TOM,
	    "t3_overlay_ubuf:  ubuf_idx %d kbuf_idx %d post_kbuf %d",
	    ubuf_idx, p->kbuf_idx, post_kbuf);
	    
	return (0);
}