/* Should only be used when creating the match for a packet-in */ void ind_ovs_key_to_match(const struct ind_ovs_parsed_key *pkey, of_match_t *match) { memset(match, 0, sizeof(*match)); /* We only populate the masks for this OF version */ match->version = ind_ovs_version; of_match_fields_t *fields = &match->fields; assert(ATTR_BITMAP_TEST(pkey->populated, OVS_KEY_ATTR_IN_PORT)); fields->in_port = pkey->in_port; OF_MATCH_MASK_IN_PORT_EXACT_SET(match); assert(ATTR_BITMAP_TEST(pkey->populated, OVS_KEY_ATTR_ETHERNET)); memcpy(&fields->eth_dst, pkey->ethernet.eth_dst, OF_MAC_ADDR_BYTES); memcpy(&fields->eth_src, pkey->ethernet.eth_src, OF_MAC_ADDR_BYTES); OF_MATCH_MASK_ETH_DST_EXACT_SET(match); OF_MATCH_MASK_ETH_SRC_EXACT_SET(match); if (ATTR_BITMAP_TEST(pkey->populated, OVS_KEY_ATTR_ETHERTYPE)) { fields->eth_type = ntohs(pkey->ethertype); if (fields->eth_type <= OF_DL_TYPE_NOT_ETH_TYPE) { fields->eth_type = OF_DL_TYPE_NOT_ETH_TYPE; } } else { fields->eth_type = OF_DL_TYPE_NOT_ETH_TYPE; } OF_MATCH_MASK_ETH_TYPE_EXACT_SET(match); if (ATTR_BITMAP_TEST(pkey->populated, OVS_KEY_ATTR_VLAN)) { fields->vlan_vid = VLAN_VID(ntohs(pkey->vlan)); fields->vlan_pcp = VLAN_PCP(ntohs(pkey->vlan)); if (ind_ovs_version == OF_VERSION_1_3) { fields->vlan_vid |= VLAN_CFI_BIT; } } else { if (ind_ovs_version == OF_VERSION_1_0) { fields->vlan_vid = -1; } else { fields->vlan_vid = 0; } fields->vlan_pcp = 0; } OF_MATCH_MASK_VLAN_VID_EXACT_SET(match); OF_MATCH_MASK_VLAN_PCP_EXACT_SET(match); if (ATTR_BITMAP_TEST(pkey->populated, OVS_KEY_ATTR_IPV4)) { fields->ipv4_src = ntohl(pkey->ipv4.ipv4_src); fields->ipv4_dst = ntohl(pkey->ipv4.ipv4_dst); fields->ip_dscp = pkey->ipv4.ipv4_tos; fields->ip_proto = pkey->ipv4.ipv4_proto; OF_MATCH_MASK_IPV4_SRC_EXACT_SET(match); OF_MATCH_MASK_IPV4_DST_EXACT_SET(match); OF_MATCH_MASK_IP_DSCP_EXACT_SET(match); OF_MATCH_MASK_IP_PROTO_EXACT_SET(match); } if (ATTR_BITMAP_TEST(pkey->populated, OVS_KEY_ATTR_IPV6)) { memcpy(&fields->ipv6_src, pkey->ipv6.ipv6_src, OF_IPV6_BYTES); memcpy(&fields->ipv6_dst, pkey->ipv6.ipv6_dst, OF_IPV6_BYTES); fields->ipv6_flabel = ntohl(pkey->ipv6.ipv6_label); } if (ATTR_BITMAP_TEST(pkey->populated, OVS_KEY_ATTR_ARP)) { fields->arp_op = ntohs(pkey->arp.arp_op); fields->arp_spa = ntohl(pkey->arp.arp_sip); fields->arp_tpa = ntohl(pkey->arp.arp_tip); memcpy(&fields->arp_sha, pkey->arp.arp_sha, OF_MAC_ADDR_BYTES); memcpy(&fields->arp_tha, pkey->arp.arp_tha, OF_MAC_ADDR_BYTES); /* Special case ARP for OF 1.0 */ if (ind_ovs_version == OF_VERSION_1_0) { fields->ipv4_src = ntohl(pkey->arp.arp_sip); fields->ipv4_dst = ntohl(pkey->arp.arp_tip); fields->ip_proto = ntohs(pkey->arp.arp_op) & 0xFF; OF_MATCH_MASK_IPV4_SRC_EXACT_SET(match); OF_MATCH_MASK_IPV4_DST_EXACT_SET(match); OF_MATCH_MASK_IP_PROTO_EXACT_SET(match); } } if (ATTR_BITMAP_TEST(pkey->populated, OVS_KEY_ATTR_TCP)) { fields->tcp_dst = ntohs(pkey->tcp.tcp_dst); fields->tcp_src = ntohs(pkey->tcp.tcp_src); OF_MATCH_MASK_TCP_DST_EXACT_SET(match); OF_MATCH_MASK_TCP_SRC_EXACT_SET(match); } if (ATTR_BITMAP_TEST(pkey->populated, OVS_KEY_ATTR_UDP)) { fields->udp_dst = ntohs(pkey->udp.udp_dst); fields->udp_src = ntohs(pkey->udp.udp_src); /* Special case UDP for OF 1.0 */ if (ind_ovs_version == OF_VERSION_1_0) { fields->tcp_dst = ntohs(pkey->udp.udp_dst); fields->tcp_src = ntohs(pkey->udp.udp_src); OF_MATCH_MASK_TCP_DST_EXACT_SET(match); OF_MATCH_MASK_TCP_SRC_EXACT_SET(match); } } if (ATTR_BITMAP_TEST(pkey->populated, OVS_KEY_ATTR_ICMP)) { fields->icmpv4_type = pkey->icmp.icmp_type; fields->icmpv4_code = pkey->icmp.icmp_code; /* Special case ICMP for OF 1.0 */ if (ind_ovs_version == OF_VERSION_1_0) { fields->tcp_dst = pkey->icmp.icmp_code; fields->tcp_src = pkey->icmp.icmp_type; OF_MATCH_MASK_TCP_DST_EXACT_SET(match); OF_MATCH_MASK_TCP_SRC_EXACT_SET(match); } } if (ATTR_BITMAP_TEST(pkey->populated, OVS_KEY_ATTR_ICMPV6)) { fields->icmpv6_type = pkey->icmpv6.icmpv6_type; fields->icmpv6_code = pkey->icmpv6.icmpv6_code; } /* * Not supported by OVS: * sctp_dst, sctp_src, ipv6_nd_target, ipv6_nd_sll, ipv6_nd_tll, * mpls_label, mpls_tc, ip_ecn, in_phy_port, metadata */ }
/* * active open (soconnect). * * State of affairs on entry: * soisconnecting (so_state |= SS_ISCONNECTING) * tcbinfo not locked (This has changed - used to be WLOCKed) * inp WLOCKed * tp->t_state = TCPS_SYN_SENT * rtalloc1, RT_UNLOCK on rt. */ int t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt, struct sockaddr *nam) { struct adapter *sc = tod->tod_softc; struct toepcb *toep = NULL; struct wrqe *wr = NULL; struct ifnet *rt_ifp = rt->rt_ifp; struct vi_info *vi; int mtu_idx, rscale, qid_atid, rc, isipv6, txqid, rxqid; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); int reason; struct offload_settings settings; uint16_t vid = 0xfff, pcp = 0; INP_WLOCK_ASSERT(inp); KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6, ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family)); if (rt_ifp->if_type == IFT_ETHER) vi = rt_ifp->if_softc; else if (rt_ifp->if_type == IFT_L2VLAN) { struct ifnet *ifp = VLAN_TRUNKDEV(rt_ifp); vi = ifp->if_softc; VLAN_TAG(rt_ifp, &vid); VLAN_PCP(rt_ifp, &pcp); } else if (rt_ifp->if_type == IFT_IEEE8023ADLAG) DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */ else DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP); rw_rlock(&sc->policy_lock); settings = *lookup_offload_policy(sc, OPEN_TYPE_ACTIVE, NULL, EVL_MAKETAG(vid, pcp, 0), inp); rw_runlock(&sc->policy_lock); if (!settings.offload) DONT_OFFLOAD_ACTIVE_OPEN(EPERM); if (settings.txq >= 0 && settings.txq < vi->nofldtxq) txqid = settings.txq; else txqid = arc4random() % vi->nofldtxq; txqid += vi->first_ofld_txq; if (settings.rxq >= 0 && settings.rxq < vi->nofldrxq) rxqid = settings.rxq; else rxqid = arc4random() % vi->nofldrxq; rxqid += vi->first_ofld_rxq; toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT | M_ZERO); if (toep == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); toep->tid = alloc_atid(sc, toep); if (toep->tid < 0) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); toep->l2te = t4_l2t_get(vi->pi, rt_ifp, rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam); if (toep->l2te == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); isipv6 = nam->sa_family == AF_INET6; wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq); if (wr == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); toep->vnet = so->so_vnet; set_ulp_mode(toep, select_ulp_mode(so, sc, &settings)); SOCKBUF_LOCK(&so->so_rcv); /* opt0 rcv_bufsiz initially, assumes its normal meaning later */ toep->rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ); SOCKBUF_UNLOCK(&so->so_rcv); /* * The kernel sets request_r_scale based on sb_max whereas we need to * take hardware's MAX_RCV_WND into account too. This is normally a * no-op as MAX_RCV_WND is much larger than the default sb_max. */ if (tp->t_flags & TF_REQ_SCALE) rscale = tp->request_r_scale = select_rcv_wscale(); else rscale = 0; mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, &settings); qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) | V_TID_COOKIE(CPL_COOKIE_TOM); if (isipv6) { struct cpl_act_open_req6 *cpl = wrtod(wr); struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl; struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl; if ((inp->inp_vflag & INP_IPV6) == 0) DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP); toep->ce = t4_hold_lip(sc, &inp->in6p_laddr, NULL); if (toep->ce == NULL) DONT_OFFLOAD_ACTIVE_OPEN(ENOENT); switch (chip_id(sc)) { case CHELSIO_T4: INIT_TP_WR(cpl, 0); cpl->params = select_ntuple(vi, toep->l2te); break; case CHELSIO_T5: INIT_TP_WR(cpl5, 0); cpl5->iss = htobe32(tp->iss); cpl5->params = select_ntuple(vi, toep->l2te); break; case CHELSIO_T6: default: INIT_TP_WR(cpl6, 0); cpl6->iss = htobe32(tp->iss); cpl6->params = select_ntuple(vi, toep->l2te); break; } OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid)); cpl->local_port = inp->inp_lport; cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0]; cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8]; cpl->peer_port = inp->inp_fport; cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0]; cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8]; cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale, toep->rx_credits, toep->ulp_mode, &settings); cpl->opt2 = calc_opt2a(so, toep, &settings); } else { struct cpl_act_open_req *cpl = wrtod(wr); struct cpl_t5_act_open_req *cpl5 = (void *)cpl; struct cpl_t6_act_open_req *cpl6 = (void *)cpl; switch (chip_id(sc)) { case CHELSIO_T4: INIT_TP_WR(cpl, 0); cpl->params = select_ntuple(vi, toep->l2te); break; case CHELSIO_T5: INIT_TP_WR(cpl5, 0); cpl5->iss = htobe32(tp->iss); cpl5->params = select_ntuple(vi, toep->l2te); break; case CHELSIO_T6: default: INIT_TP_WR(cpl6, 0); cpl6->iss = htobe32(tp->iss); cpl6->params = select_ntuple(vi, toep->l2te); break; } OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid)); inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port, &cpl->peer_ip, &cpl->peer_port); cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale, toep->rx_credits, toep->ulp_mode, &settings); cpl->opt2 = calc_opt2a(so, toep, &settings); } CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__, toep->tid, tcpstates[tp->t_state], toep, inp); offload_socket(so, toep); rc = t4_l2t_send(sc, wr, toep->l2te); if (rc == 0) { toep->flags |= TPF_CPL_PENDING; return (0); } undo_offload_socket(so); reason = __LINE__; failed: CTR3(KTR_CXGBE, "%s: not offloading (%d), rc %d", __func__, reason, rc); if (wr) free_wrqe(wr); if (toep) { if (toep->tid >= 0) free_atid(sc, toep->tid); if (toep->l2te) t4_l2t_release(toep->l2te); if (toep->ce) t4_release_lip(sc, toep->ce); free_toepcb(toep); } return (rc); }