static inline void * mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep, uint64_t word, uint64_t mask, uint64_t val) { struct ulptx_idata *ulpsc; struct cpl_set_tcb_field_core *req; ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16)); ulpsc = (struct ulptx_idata *)(ulpmc + 1); ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); ulpsc->len = htobe32(sizeof(*req)); req = (struct cpl_set_tcb_field_core *)(ulpsc + 1); OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid)); req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(toep->ofld_rxq->iq.abs_id)); req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); req->mask = htobe64(mask); req->val = htobe64(val); ulpsc = (struct ulptx_idata *)(req + 1); if (LEN__SET_TCB_FIELD_ULP % 16) { ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); ulpsc->len = htobe32(0); return (ulpsc + 1); } return (ulpsc); }
/* * Set up an L2T entry and send any packets waiting in the arp queue. The * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the * entry locked. */ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb, struct l2t_entry *e) { struct cpl_l2t_write_req *req; if (!skb) { skb = alloc_skb(sizeof(*req), GFP_ATOMIC); if (!skb) return -ENOMEM; } req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx)); req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) | V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) | V_L2T_W_PRIO(vlan_prio(e))); memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); skb->priority = CPL_PRIORITY_CONTROL; cxgb3_ofld_send(dev, skb); while (e->arpq_head) { skb = e->arpq_head; e->arpq_head = skb->next; skb->next = NULL; cxgb3_ofld_send(dev, skb); } e->arpq_tail = NULL; e->state = L2T_STATE_VALID; return 0; }
static inline void * mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep) { struct ulptx_idata *ulpsc; struct cpl_rx_data_ack_core *req; ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16)); ulpsc = (struct ulptx_idata *)(ulpmc + 1); ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); ulpsc->len = htobe32(sizeof(*req)); req = (struct cpl_rx_data_ack_core *)(ulpsc + 1); OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid)); req->credit_dack = htobe32(F_RX_MODULATE_RX); ulpsc = (struct ulptx_idata *)(req + 1); if (LEN__RX_DATA_ACK_ULP % 16) { ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); ulpsc->len = htobe32(0); return (ulpsc + 1); } return (ulpsc); }
static int do_abort_req_rss(struct t3cdev *dev, struct mbuf *m) { union opcode_tid *p = cplhdr(m); unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); struct toe_tid_entry *toe_tid; toe_tid = lookup_tid(&(T3C_DATA (dev))->tid_maps, hwtid); if (toe_tid->ctx && toe_tid->client->handlers && toe_tid->client->handlers[p->opcode]) { return toe_tid->client->handlers[p->opcode] (dev, m, toe_tid->ctx); } else { struct cpl_abort_req_rss *req = cplhdr(m); struct cpl_abort_rpl *rpl; struct mbuf *m = m_get(M_NOWAIT, MT_DATA); if (!m) { log(LOG_NOTICE, "do_abort_req_rss: couldn't get mbuf!\n"); goto out; } m_set_priority(m, CPL_PRIORITY_DATA); rpl = cplhdr(m); rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req))); OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req))); rpl->cmd = req->status; cxgb_ofld_send(dev, m); out: return (CPL_RET_BUF_DONE); } }
/* * Write an L2T entry. Must be called with the entry locked. * The write may be synchronous or asynchronous. */ int t4_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync) { struct wrqe *wr; struct cpl_l2t_write_req *req; int idx = e->idx + sc->vres.l2t.start; mtx_assert(&e->lock, MA_OWNED); wr = alloc_wrqe(sizeof(*req), &sc->sge.mgmtq); if (wr == NULL) return (ENOMEM); req = wrtod(wr); INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx | V_SYNC_WR(sync) | V_TID_QID(sc->sge.fwq.abs_id))); req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync)); req->l2t_idx = htons(idx); req->vlan = htons(e->vlan); memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); t4_wrq_tx(sc, wr); if (sync && e->state != L2T_STATE_SWITCHING) e->state = L2T_STATE_SYNC_WRITE; return (0); }
/* * Write an L2T entry. Must be called with the entry locked. * The write may be synchronous or asynchronous. */ int t4_write_l2e(struct l2t_entry *e, int sync) { struct sge_wrq *wrq; struct adapter *sc; struct wrq_cookie cookie; struct cpl_l2t_write_req *req; int idx; mtx_assert(&e->lock, MA_OWNED); MPASS(e->wrq != NULL); wrq = e->wrq; sc = wrq->adapter; req = start_wrq_wr(wrq, howmany(sizeof(*req), 16), &cookie); if (req == NULL) return (ENOMEM); idx = e->idx + sc->vres.l2t.start; INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx | V_SYNC_WR(sync) | V_TID_QID(e->iqid))); req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync)); req->l2t_idx = htons(idx); req->vlan = htons(e->vlan); memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); commit_wrq_wr(wrq, req, &cookie); if (sync && e->state != L2T_STATE_SWITCHING) e->state = L2T_STATE_SYNC_WRITE; return (0); }
static int write_smt_entry(struct adapter *sc, int idx) { struct port_info *pi = &sc->port[idx]; struct cpl_smt_write_req *req; struct mbuf *m; m = M_GETHDR_OFLD(0, CPL_PRIORITY_CONTROL, req); if (m == NULL) { log(LOG_ERR, "%s: no mbuf, can't write SMT entry for %d\n", __func__, idx); return (ENOMEM); } req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx)); req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */ req->iff = idx; memset(req->src_mac1, 0, sizeof(req->src_mac1)); memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN); t3_offload_tx(sc, m); return (0); }
/* * Populate a TID_RELEASE WR. The mbuf must be already propely sized. */ static inline void mk_tid_release(struct mbuf *m, unsigned int tid) { struct cpl_tid_release *req; m_set_priority(m, CPL_PRIORITY_SETUP); req = mtod(m, struct cpl_tid_release *); m->m_pkthdr.len = m->m_len = sizeof(*req); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); }
static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) { struct cpl_tid_release *req; skb = get_skb(skb, sizeof *req, GFP_KERNEL); if (!skb) return; req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); skb->priority = CPL_PRIORITY_SETUP; iwch_cxgb3_ofld_send(tdev, skb); return; }
static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, unsigned int tid) { struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req; struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0)); txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_req), 16)); sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); sc->len = htonl(sizeof(*abort_req) - sizeof(struct work_request_hdr)); OPCODE_TID(abort_req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid)); abort_req->rsvd0 = htonl(0); abort_req->rsvd1 = 0; abort_req->cmd = CPL_ABORT_NO_RST; }
/* cxgb4_get_srq_entry: read the SRQ table entry * @dev: Pointer to the net_device * @idx: Index to the srq * @entryp: pointer to the srq entry * * Sends CPL_SRQ_TABLE_REQ message for the given index. * Contents will be returned in CPL_SRQ_TABLE_RPL message. * * Returns zero if the read is successful, else a error * number will be returned. Caller should not use the srq * entry if the return value is non-zero. * * */ int cxgb4_get_srq_entry(struct net_device *dev, int srq_idx, struct srq_entry *entryp) { struct cpl_srq_table_req *req; struct adapter *adap; struct sk_buff *skb; struct srq_data *s; int rc = -ENODEV; adap = netdev2adap(dev); s = adap->srq; if (!(adap->flags & FULL_INIT_DONE) || !s) goto out; skb = alloc_skb(sizeof(*req), GFP_KERNEL); if (!skb) return -ENOMEM; req = (struct cpl_srq_table_req *) __skb_put_zero(skb, sizeof(*req)); INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ, TID_TID_V(srq_idx) | TID_QID_V(adap->sge.fw_evtq.abs_id))); req->idx = srq_idx; mutex_lock(&s->lock); s->entryp = entryp; t4_mgmt_tx(adap, skb); rc = wait_for_completion_timeout(&s->comp, SRQ_WAIT_TO); if (rc) rc = 0; else /* !rc means we timed out */ rc = -ETIMEDOUT; WARN_ON_ONCE(entryp->idx != srq_idx); mutex_unlock(&s->lock); out: return rc; }
int iwch_resume_tid(struct iwch_ep *ep) { struct cpl_set_tcb_field *req; struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); if (!skb) return -ENOMEM; req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); req->reply = 0; req->cpu_idx = 0; req->word = htons(W_TCB_RX_QUIESCE); req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE); req->val = 0; skb->priority = CPL_PRIORITY_DATA; return iwch_cxgb3_ofld_send(ep->com.tdev, skb); }
/** * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command. */ static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, unsigned int tid) { struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl; struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16)); sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); sc->len = cpu_to_be32(sizeof(*abort_rpl) - sizeof(struct work_request_hdr)); OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); abort_rpl->rsvd0 = cpu_to_be32(0); abort_rpl->rsvd1 = 0; abort_rpl->cmd = CPL_ABORT_NO_RST; sc = (struct ulptx_idata *)(abort_rpl + 1); sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); sc->len = cpu_to_be32(0); }
/* * Set up an L2T entry and send any packets waiting in the arp queue. Must be * called with the entry locked. */ static int setup_l2e_send_pending(struct adapter *sc, struct l2t_entry *e) { struct mbuf *m; struct cpl_l2t_write_req *req; struct port_info *pi = &sc->port[e->smt_idx]; /* smt_idx is port_id */ mtx_assert(&e->lock, MA_OWNED); m = M_GETHDR_OFLD(pi->first_qset, CPL_PRIORITY_CONTROL, req); if (m == NULL) { log(LOG_ERR, "%s: no mbuf, can't setup L2 entry at index %d\n", __func__, e->idx); return (ENOMEM); } req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx)); req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) | V_L2T_W_VLAN(e->vlan & EVL_VLID_MASK) | V_L2T_W_PRIO(EVL_PRIOFTAG(e->vlan))); memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); t3_offload_tx(sc, m); /* * XXX: We used pi->first_qset to send the L2T_WRITE_REQ. If any mbuf * on the arpq is going out via another queue set associated with the * port then it has a bad race with the L2T_WRITE_REQ. Ideally we * should wait till the reply to the write before draining the arpq. */ while (e->arpq_head) { m = e->arpq_head; e->arpq_head = m->m_next; m->m_next = NULL; t3_offload_tx(sc, m); } e->arpq_tail = NULL; return (0); }
static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb, unsigned int qid_filterid, struct adapter *adap) { struct cpl_t6_act_open_req *t6req = NULL; struct cpl_act_open_req *req = NULL; t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req)); INIT_TP_WR(t6req, 0); req = (struct cpl_act_open_req *)t6req; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid)); req->local_port = cpu_to_be16(f->fs.val.lport); req->peer_port = cpu_to_be16(f->fs.val.fport); req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 | f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24; req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 | f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24; req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE || f->fs.newvlan == VLAN_REWRITE) | DELACK_V(f->fs.hitcnts) | L2T_IDX_V(f->l2t ? f->l2t->idx : 0) | SMAC_SEL_V((cxgb4_port_viid(f->dev) & 0x7F) << 1) | TX_CHAN_V(f->fs.eport) | NO_CONG_V(f->fs.rpttid) | ULP_MODE_V(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) | TCAM_BYPASS_F | NON_OFFLOAD_F); t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs, f->dev))); t6req->opt2 = htonl(RSS_QUEUE_VALID_F | RSS_QUEUE_V(f->fs.iq) | TX_QUEUE_V(f->fs.nat_mode) | T5_OPT_2_VALID_F | RX_CHANNEL_F | CONG_CNTRL_V((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) | PACE_V((f->fs.maskhash) | ((f->fs.dirsteerhash) << 1)) | CCTRL_ECN_V(f->fs.action == FILTER_SWITCH)); }
static void mk_set_tcb_ulp(struct filter_entry *f, struct cpl_set_tcb_field *req, unsigned int word, u64 mask, u64 val, u8 cookie, int no_reply) { struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0)); txpkt->len = htonl(DIV_ROUND_UP(sizeof(*req), 16)); sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); sc->len = htonl(sizeof(*req) - sizeof(struct work_request_hdr)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid)); req->reply_ctrl = htons(NO_REPLY_V(no_reply) | REPLY_CHAN_V(0) | QUEUENO_V(0)); req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie)); req->mask = cpu_to_be64(mask); req->val = cpu_to_be64(val); sc = (struct ulptx_idata *)(req + 1); sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP)); sc->len = htonl(0); }
/* * Close a connection by sending a CPL_CLOSE_CON_REQ message. */ static int close_conn(struct adapter *sc, struct toepcb *toep) { struct wrqe *wr; struct cpl_close_con_req *req; unsigned int tid = toep->tid; CTR3(KTR_CXGBE, "%s: tid %u%s", __func__, toep->tid, toep->flags & TPF_FIN_SENT ? ", IGNORED" : ""); if (toep->flags & TPF_FIN_SENT) return (0); KASSERT(toep->flags & TPF_FLOWC_WR_SENT, ("%s: flowc_wr not sent for tid %u.", __func__, tid)); wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); if (wr == NULL) { /* XXX */ panic("%s: allocation failure.", __func__); } req = wrtod(wr); req->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | V_FW_WR_IMMDLEN(sizeof(*req) - sizeof(req->wr))); req->wr.wr_mid = htonl(V_FW_WR_LEN16(howmany(sizeof(*req), 16)) | V_FW_WR_FLOWID(tid)); req->wr.wr_lo = cpu_to_be64(0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); req->rsvd = 0; toep->flags |= TPF_FIN_SENT; toep->flags &= ~TPF_SEND_FIN; t4_l2t_send(sc, wr, toep->l2te); return (0); }
/** * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command. */ static inline void mk_set_tcb_field_ulp(struct filter_entry *f, struct cpl_set_tcb_field *req, unsigned int word, u64 mask, u64 val, u8 cookie, int no_reply) { struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16)); sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr)); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid)); req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) | V_QUEUENO(0)); req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie)); req->mask = cpu_to_be64(mask); req->val = cpu_to_be64(val); sc = (struct ulptx_idata *)(req + 1); sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); sc->len = cpu_to_be32(0); }
/* * active open (soconnect). * * State of affairs on entry: * soisconnecting (so_state |= SS_ISCONNECTING) * tcbinfo not locked (This has changed - used to be WLOCKed) * inp WLOCKed * tp->t_state = TCPS_SYN_SENT * rtalloc1, RT_UNLOCK on rt. */ int t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt, struct sockaddr *nam) { struct adapter *sc = tod->tod_softc; struct tom_data *td = tod_td(tod); struct toepcb *toep = NULL; struct wrqe *wr = NULL; struct ifnet *rt_ifp = rt->rt_ifp; struct port_info *pi; int mtu_idx, rscale, qid_atid, rc, isipv6; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); int reason; INP_WLOCK_ASSERT(inp); KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6, ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family)); if (rt_ifp->if_type == IFT_ETHER) pi = rt_ifp->if_softc; else if (rt_ifp->if_type == IFT_L2VLAN) { struct ifnet *ifp = VLAN_COOKIE(rt_ifp); pi = ifp->if_softc; } else if (rt_ifp->if_type == IFT_IEEE8023ADLAG) DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */ else DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP); toep = alloc_toepcb(pi, -1, -1, M_NOWAIT); if (toep == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); toep->tid = alloc_atid(sc, toep); if (toep->tid < 0) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); toep->l2te = t4_l2t_get(pi, rt_ifp, rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam); if (toep->l2te == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); isipv6 = nam->sa_family == AF_INET6; wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq); if (wr == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); if (sc->tt.ddp && (so->so_options & SO_NO_DDP) == 0) set_tcpddp_ulp_mode(toep); else toep->ulp_mode = ULP_MODE_NONE; SOCKBUF_LOCK(&so->so_rcv); /* opt0 rcv_bufsiz initially, assumes its normal meaning later */ toep->rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ); SOCKBUF_UNLOCK(&so->so_rcv); /* * The kernel sets request_r_scale based on sb_max whereas we need to * take hardware's MAX_RCV_WND into account too. This is normally a * no-op as MAX_RCV_WND is much larger than the default sb_max. */ if (tp->t_flags & TF_REQ_SCALE) rscale = tp->request_r_scale = select_rcv_wscale(); else rscale = 0; mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, 0); qid_atid = (toep->ofld_rxq->iq.abs_id << 14) | toep->tid; if (isipv6) { struct cpl_act_open_req6 *cpl = wrtod(wr); if ((inp->inp_vflag & INP_IPV6) == 0) { /* XXX think about this a bit more */ log(LOG_ERR, "%s: time to think about AF_INET6 + vflag 0x%x.\n", __func__, inp->inp_vflag); DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP); } toep->ce = hold_lip(td, &inp->in6p_laddr); if (toep->ce == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOENT); if (is_t4(sc)) { INIT_TP_WR(cpl, 0); cpl->params = select_ntuple(pi, toep->l2te); } else { struct cpl_t5_act_open_req6 *c5 = (void *)cpl; INIT_TP_WR(c5, 0); c5->iss = htobe32(tp->iss); c5->params = select_ntuple(pi, toep->l2te); } OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid)); cpl->local_port = inp->inp_lport; cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0]; cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8]; cpl->peer_port = inp->inp_fport; cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0]; cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8]; cpl->opt0 = calc_opt0(so, pi, toep->l2te, mtu_idx, rscale, toep->rx_credits, toep->ulp_mode); cpl->opt2 = calc_opt2a(so, toep); } else { struct cpl_act_open_req *cpl = wrtod(wr); if (is_t4(sc)) { INIT_TP_WR(cpl, 0); cpl->params = select_ntuple(pi, toep->l2te); } else { struct cpl_t5_act_open_req *c5 = (void *)cpl; INIT_TP_WR(c5, 0); c5->iss = htobe32(tp->iss); c5->params = select_ntuple(pi, toep->l2te); } OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid)); inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port, &cpl->peer_ip, &cpl->peer_port); cpl->opt0 = calc_opt0(so, pi, toep->l2te, mtu_idx, rscale, toep->rx_credits, toep->ulp_mode); cpl->opt2 = calc_opt2a(so, toep); } CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__, toep->tid, tcpstates[tp->t_state], toep, inp); offload_socket(so, toep); rc = t4_l2t_send(sc, wr, toep->l2te); if (rc == 0) { toep->flags |= TPF_CPL_PENDING; return (0); } undo_offload_socket(so); reason = __LINE__; failed: CTR3(KTR_CXGBE, "%s: not offloading (%d), rc %d", __func__, reason, rc); if (wr) free_wrqe(wr); if (toep) { if (toep->tid >= 0) free_atid(sc, toep->tid); if (toep->l2te) t4_l2t_release(toep->l2te); if (toep->ce) release_lip(td, toep->ce); free_toepcb(toep); } return (rc); }
/* * active open (soconnect). * * State of affairs on entry: * soisconnecting (so_state |= SS_ISCONNECTING) * tcbinfo not locked (This has changed - used to be WLOCKed) * inp WLOCKed * tp->t_state = TCPS_SYN_SENT * rtalloc1, RT_UNLOCK on rt. */ int t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt, struct sockaddr *nam) { struct adapter *sc = tod->tod_softc; struct toepcb *toep = NULL; struct wrqe *wr = NULL; struct ifnet *rt_ifp = rt->rt_ifp; struct vi_info *vi; int mtu_idx, rscale, qid_atid, rc, isipv6, txqid, rxqid; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); int reason; struct offload_settings settings; uint16_t vid = 0xfff, pcp = 0; INP_WLOCK_ASSERT(inp); KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6, ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family)); if (rt_ifp->if_type == IFT_ETHER) vi = rt_ifp->if_softc; else if (rt_ifp->if_type == IFT_L2VLAN) { struct ifnet *ifp = VLAN_TRUNKDEV(rt_ifp); vi = ifp->if_softc; VLAN_TAG(rt_ifp, &vid); VLAN_PCP(rt_ifp, &pcp); } else if (rt_ifp->if_type == IFT_IEEE8023ADLAG) DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */ else DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP); rw_rlock(&sc->policy_lock); settings = *lookup_offload_policy(sc, OPEN_TYPE_ACTIVE, NULL, EVL_MAKETAG(vid, pcp, 0), inp); rw_runlock(&sc->policy_lock); if (!settings.offload) DONT_OFFLOAD_ACTIVE_OPEN(EPERM); if (settings.txq >= 0 && settings.txq < vi->nofldtxq) txqid = settings.txq; else txqid = arc4random() % vi->nofldtxq; txqid += vi->first_ofld_txq; if (settings.rxq >= 0 && settings.rxq < vi->nofldrxq) rxqid = settings.rxq; else rxqid = arc4random() % vi->nofldrxq; rxqid += vi->first_ofld_rxq; toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT | M_ZERO); if (toep == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); toep->tid = alloc_atid(sc, toep); if (toep->tid < 0) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); toep->l2te = t4_l2t_get(vi->pi, rt_ifp, rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam); if (toep->l2te == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); isipv6 = nam->sa_family == AF_INET6; wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq); if (wr == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); toep->vnet = so->so_vnet; set_ulp_mode(toep, select_ulp_mode(so, sc, &settings)); SOCKBUF_LOCK(&so->so_rcv); /* opt0 rcv_bufsiz initially, assumes its normal meaning later */ toep->rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ); SOCKBUF_UNLOCK(&so->so_rcv); /* * The kernel sets request_r_scale based on sb_max whereas we need to * take hardware's MAX_RCV_WND into account too. This is normally a * no-op as MAX_RCV_WND is much larger than the default sb_max. */ if (tp->t_flags & TF_REQ_SCALE) rscale = tp->request_r_scale = select_rcv_wscale(); else rscale = 0; mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, &settings); qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) | V_TID_COOKIE(CPL_COOKIE_TOM); if (isipv6) { struct cpl_act_open_req6 *cpl = wrtod(wr); struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl; struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl; if ((inp->inp_vflag & INP_IPV6) == 0) DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP); toep->ce = t4_hold_lip(sc, &inp->in6p_laddr, NULL); if (toep->ce == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOENT); switch (chip_id(sc)) { case CHELSIO_T4: INIT_TP_WR(cpl, 0); cpl->params = select_ntuple(vi, toep->l2te); break; case CHELSIO_T5: INIT_TP_WR(cpl5, 0); cpl5->iss = htobe32(tp->iss); cpl5->params = select_ntuple(vi, toep->l2te); break; case CHELSIO_T6: default: INIT_TP_WR(cpl6, 0); cpl6->iss = htobe32(tp->iss); cpl6->params = select_ntuple(vi, toep->l2te); break; } OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid)); cpl->local_port = inp->inp_lport; cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0]; cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8]; cpl->peer_port = inp->inp_fport; cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0]; cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8]; cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale, toep->rx_credits, toep->ulp_mode, &settings); cpl->opt2 = calc_opt2a(so, toep, &settings); } else { struct cpl_act_open_req *cpl = wrtod(wr); struct cpl_t5_act_open_req *cpl5 = (void *)cpl; struct cpl_t6_act_open_req *cpl6 = (void *)cpl; switch (chip_id(sc)) { case CHELSIO_T4: INIT_TP_WR(cpl, 0); cpl->params = select_ntuple(vi, toep->l2te); break; case CHELSIO_T5: INIT_TP_WR(cpl5, 0); cpl5->iss = htobe32(tp->iss); cpl5->params = select_ntuple(vi, toep->l2te); break; case CHELSIO_T6: default: INIT_TP_WR(cpl6, 0); cpl6->iss = htobe32(tp->iss); cpl6->params = select_ntuple(vi, toep->l2te); break; } OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid)); inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port, &cpl->peer_ip, &cpl->peer_port); cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale, toep->rx_credits, toep->ulp_mode, &settings); cpl->opt2 = calc_opt2a(so, toep, &settings); } CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__, toep->tid, tcpstates[tp->t_state], toep, inp); offload_socket(so, toep); rc = t4_l2t_send(sc, wr, toep->l2te); if (rc == 0) { toep->flags |= TPF_CPL_PENDING; return (0); } undo_offload_socket(so); reason = __LINE__; failed: CTR3(KTR_CXGBE, "%s: not offloading (%d), rc %d", __func__, reason, rc); if (wr) free_wrqe(wr); if (toep) { if (toep->tid >= 0) free_atid(sc, toep->tid); if (toep->l2te) t4_l2t_release(toep->l2te); if (toep->ce) t4_release_lip(sc, toep->ce); free_toepcb(toep); } return (rc); }