static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; struct flowi4 fl; struct rtable *rt; int min_headroom; int tunnel_hlen; __be16 df, flags; int err; tun_info = skb_tunnel_info(skb); if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || ip_tunnel_info_af(tun_info) != AF_INET)) goto err_free_skb; key = &tun_info->key; rt = gre_get_rt(skb, dev, &fl, key); if (IS_ERR(rt)) goto err_free_skb; tunnel_hlen = ip_gre_calc_hlen(key->tun_flags); min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + tunnel_hlen + sizeof(struct iphdr); if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { int head_delta = SKB_DATA_ALIGN(min_headroom - skb_headroom(skb) + 16); err = pskb_expand_head(skb, max_t(int, head_delta, 0), 0, GFP_ATOMIC); if (unlikely(err)) goto err_free_rt; }
static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) { int ret; struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_pub *drvr = ifp->drvr; struct ethhdr *eh; int head_delta; brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); /* Can the device send data? */ if (drvr->bus_if->state != BRCMF_BUS_UP) { bphy_err(drvr, "xmit rejected state=%d\n", drvr->bus_if->state); netif_stop_queue(ndev); dev_kfree_skb(skb); ret = -ENODEV; goto done; } /* Some recent Broadcom's firmwares disassociate STA when they receive * an 802.11f ADD frame. This behavior can lead to a local DoS security * issue. Attacker may trigger disassociation of any STA by sending a * proper Ethernet frame to the wireless interface. * * Moreover this feature may break AP interfaces in some specific * setups. This applies e.g. to the bridge with hairpin mode enabled and * IFLA_BRPORT_MCAST_TO_UCAST set. IAPP packet generated by a firmware * will get passed back to the wireless interface and cause immediate * disassociation of a just-connected STA. */ if (!drvr->settings->iapp && brcmf_skb_is_iapp(skb)) { dev_kfree_skb(skb); ret = -EINVAL; goto done; } /* Make sure there's enough writeable headroom */ if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) { head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0); brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n", brcmf_ifname(ifp), head_delta); atomic_inc(&drvr->bus_if->stats.pktcowed); ret = pskb_expand_head(skb, ALIGN(head_delta, NET_SKB_PAD), 0, GFP_ATOMIC); if (ret < 0) { bphy_err(drvr, "%s: failed to expand headroom\n", brcmf_ifname(ifp)); atomic_inc(&drvr->bus_if->stats.pktcow_failed); goto done; } }
static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) { struct net *net = ovs_dp_get_net(vport->dp); struct ovs_key_ipv4_tunnel *tun_key; struct flowi4 fl; struct rtable *rt; int min_headroom; int tunnel_hlen; __be16 df; int err; if (unlikely(!OVS_CB(skb)->egress_tun_key)) { err = -EINVAL; goto error; } tun_key = OVS_CB(skb)->egress_tun_key; /* Route lookup */ memset(&fl, 0, sizeof(fl)); fl.daddr = tun_key->ipv4_dst; fl.saddr = tun_key->ipv4_src; fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos); fl.flowi4_mark = skb->mark; fl.flowi4_proto = IPPROTO_GRE; rt = ip_route_output_key(net, &fl); if (IS_ERR(rt)) return PTR_ERR(rt); tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags); min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + tunnel_hlen + sizeof(struct iphdr) + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { int head_delta = SKB_DATA_ALIGN(min_headroom - skb_headroom(skb) + 16); err = pskb_expand_head(skb, max_t(int, head_delta, 0), 0, GFP_ATOMIC); if (unlikely(err)) goto err_free_rt; }
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel_info *tun_info; struct net *net = dev_net(dev); const struct ip_tunnel_key *key; struct flowi4 fl; struct rtable *rt; int min_headroom; int tunnel_hlen; __be16 df, flags; int err; tun_info = skb_tunnel_info(skb, AF_INET); if (unlikely(!tun_info || tun_info->mode != IP_TUNNEL_INFO_TX)) goto err_free_skb; key = &tun_info->key; memset(&fl, 0, sizeof(fl)); fl.daddr = key->ipv4_dst; fl.saddr = key->ipv4_src; fl.flowi4_tos = RT_TOS(key->ipv4_tos); fl.flowi4_mark = skb->mark; fl.flowi4_proto = IPPROTO_GRE; rt = ip_route_output_key(net, &fl); if (IS_ERR(rt)) goto err_free_skb; tunnel_hlen = ip_gre_calc_hlen(key->tun_flags); min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + tunnel_hlen + sizeof(struct iphdr); if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { int head_delta = SKB_DATA_ALIGN(min_headroom - skb_headroom(skb) + 16); err = pskb_expand_head(skb, max_t(int, head_delta, 0), 0, GFP_ATOMIC); if (unlikely(err)) goto err_free_rt; }
/* * Transmit a packet to the base station on behalf of the network stack * * * Returns: NETDEV_TX_OK (always, even in case of error) * * In case of error, we just drop it. Reasons: * * - we add a hw header to each skb, and if the network stack * retries, we have no way to know if that skb has it or not. * * - network protocols have their own drop-recovery mechanisms * * - there is not much else we can do * * If the device is idle, we need to wake it up; that is an operation * that will sleep. See i2400m_net_wake_tx() for details. */ static netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { struct i2400m *i2400m = net_dev_to_i2400m(net_dev); struct device *dev = i2400m_dev(i2400m); int result; d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); if (skb_header_cloned(skb)) { /* * Make tcpdump/wireshark happy -- if they are * running, the skb is cloned and we will overwrite * the mac fields in i2400m_tx_prep_header. Expand * seems to fix this... */ result = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (result) { result = NETDEV_TX_BUSY; goto error_expand; } } if (i2400m->state == I2400M_SS_IDLE) result = i2400m_net_wake_tx(i2400m, net_dev, skb); else result = i2400m_net_tx(i2400m, net_dev, skb); if (result < 0) net_dev->stats.tx_dropped++; else { net_dev->stats.tx_packets++; net_dev->stats.tx_bytes += skb->len; } result = NETDEV_TX_OK; error_expand: kfree_skb(skb); d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result); return result; }
static struct rtable *prepare_fb_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi4 *fl, int tunnel_hlen) { struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; struct rtable *rt = NULL; int min_headroom; bool use_cache; int err; tun_info = skb_tunnel_info(skb); key = &tun_info->key; use_cache = ip_tunnel_dst_cache_usable(skb, tun_info); if (use_cache) rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr); if (!rt) { rt = gre_get_rt(skb, dev, fl, key); if (IS_ERR(rt)) goto err_free_skb; if (use_cache) dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst, fl->saddr); } min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + tunnel_hlen + sizeof(struct iphdr); if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { int head_delta = SKB_DATA_ALIGN(min_headroom - skb_headroom(skb) + 16); err = pskb_expand_head(skb, max_t(int, head_delta, 0), 0, GFP_ATOMIC); if (unlikely(err)) goto err_free_rt; }
static int __send(struct vport *vport, struct sk_buff *skb, int tunnel_hlen, __be32 seq, __be16 gre64_flag) { struct rtable *rt; int min_headroom; __be16 df; __be32 saddr; int err; /* Route lookup */ saddr = OVS_CB(skb)->tun_key->ipv4_src; rt = find_route(ovs_dp_get_net(vport->dp), &saddr, OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, OVS_CB(skb)->tun_key->ipv4_tos, skb->mark); if (IS_ERR(rt)) { err = PTR_ERR(rt); goto error; } min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len + tunnel_hlen + sizeof(struct iphdr) + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { int head_delta = SKB_DATA_ALIGN(min_headroom - skb_headroom(skb) + 16); err = pskb_expand_head(skb, max_t(int, head_delta, 0), 0, GFP_ATOMIC); if (unlikely(err)) goto err_free_rt; }
static inline int dma_xmit(struct sk_buff *skb, struct net_device *dev, END_DEVICE *ei_local, int gmac_no) { struct netdev_queue *txq; dma_addr_t frag_addr; u32 frag_size, nr_desc; u32 txd_info3, txd_info4; #if defined (CONFIG_RAETH_SG_DMA_TX) u32 i, nr_frags; const skb_frag_t *tx_frag; const struct skb_shared_info *shinfo; #else #define nr_frags 0 #endif #if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE) if (ra_sw_nat_hook_tx != NULL) { #if defined (CONFIG_RA_HW_NAT_WIFI) || defined (CONFIG_RA_HW_NAT_PCI) if (IS_DPORT_PPE_VALID(skb)) gmac_no = PSE_PORT_PPE; else #endif if (ra_sw_nat_hook_tx(skb, gmac_no) == 0) { dev_kfree_skb(skb); return NETDEV_TX_OK; } } #endif txd_info3 = TX3_QDMA_SWC; if (gmac_no != PSE_PORT_PPE) { u32 QID = M2Q_table[(skb->mark & 0x3f)]; if (QID < 8 && M2Q_wan_lan) { #if defined (CONFIG_PSEUDO_SUPPORT) if (gmac_no == PSE_PORT_GMAC2) QID += 8; #elif defined (CONFIG_RAETH_HW_VLAN_TX) if ((skb_vlan_tag_get(skb) & VLAN_VID_MASK) > 1) QID += 8; #endif } txd_info3 |= TX3_QDMA_QID(QID); } txd_info4 = TX4_DMA_FPORT(gmac_no); #if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) if (skb->ip_summed == CHECKSUM_PARTIAL) txd_info4 |= TX4_DMA_TUI_CO(7); #endif #if defined (CONFIG_RAETH_HW_VLAN_TX) if (skb_vlan_tag_present(skb)) txd_info4 |= (0x10000 | skb_vlan_tag_get(skb)); #endif #if defined (CONFIG_RAETH_SG_DMA_TX) shinfo = skb_shinfo(skb); #endif #if defined (CONFIG_RAETH_TSO) /* fill MSS info in tcp checksum field */ if (shinfo->gso_size) { u32 hdr_len; if (!(shinfo->gso_type & (SKB_GSO_TCPV4|SKB_GSO_TCPV6))) { dev_kfree_skb(skb); return NETDEV_TX_OK; } if (skb_header_cloned(skb)) { if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } } hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (hdr_len >= skb->len) { dev_kfree_skb(skb); return NETDEV_TX_OK; } tcp_hdr(skb)->check = htons(shinfo->gso_size); txd_info4 |= TX4_DMA_TSO; } #endif nr_desc = DIV_ROUND_UP(skb_headlen(skb), TXD_MAX_SEG_SIZE); #if defined (CONFIG_RAETH_SG_DMA_TX) nr_frags = (u32)shinfo->nr_frags; for (i = 0; i < nr_frags; i++) { tx_frag = &shinfo->frags[i]; nr_desc += DIV_ROUND_UP(skb_frag_size(tx_frag), TXD_MAX_SEG_SIZE); } #endif txq = netdev_get_tx_queue(dev, 0); /* flush main skb part before spin_lock() */ frag_size = (u32)skb_headlen(skb); frag_addr = dma_map_single(NULL, skb->data, frag_size, DMA_TO_DEVICE); /* protect TX ring access (from eth2/eth3 queues) */ spin_lock(&ei_local->page_lock); /* check nr_desc+2 free descriptors (2 need to prevent head/tail overlap) */ if (ei_local->txd_pool_free_num < (nr_desc+2)) { spin_unlock(&ei_local->page_lock); netif_tx_stop_queue(txq); #if defined (CONFIG_RAETH_DEBUG) if (net_ratelimit()) printk("%s: QDMA TX pool is run out! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no); #endif return NETDEV_TX_BUSY; } qdma_write_skb_fragment(ei_local, frag_addr, frag_size, txd_info3, txd_info4, skb, nr_frags == 0); #if defined (CONFIG_RAETH_SG_DMA_TX) for (i = 0; i < nr_frags; i++) { tx_frag = &shinfo->frags[i]; frag_size = skb_frag_size(tx_frag); frag_addr = skb_frag_dma_map(NULL, tx_frag, 0, frag_size, DMA_TO_DEVICE); qdma_write_skb_fragment(ei_local, frag_addr, frag_size, txd_info3, txd_info4, skb, i == nr_frags - 1); } #endif #if defined (CONFIG_RAETH_BQL) netdev_tx_sent_queue(txq, skb->len); #endif #if !defined (CONFIG_RAETH_BQL) || !defined (CONFIG_SMP) /* smp_mb() already inlined in netdev_tx_sent_queue */ wmb(); #endif /* kick the QDMA TX */ sysRegWrite(QTX_CTX_PTR, (u32)get_txd_ptr_phy(ei_local, ei_local->txd_last_idx)); spin_unlock(&ei_local->page_lock); return NETDEV_TX_OK; }
static inline int dma_xmit(struct sk_buff* skb, struct net_device *dev, END_DEVICE *ei_local, int gmac_no) { struct netdev_queue *txq; dma_addr_t frag_addr; u32 frag_size, nr_desc; u32 next_idx, desc_odd = 0; u32 txd_info2 = 0, txd_info4; #if defined (CONFIG_RAETH_SG_DMA_TX) u32 i, nr_frags; const skb_frag_t *tx_frag; const struct skb_shared_info *shinfo; #else #define nr_frags 0 #endif #if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE) if (ra_sw_nat_hook_tx != NULL) { #if defined (CONFIG_RA_HW_NAT_WIFI) || defined (CONFIG_RA_HW_NAT_PCI) if (IS_DPORT_PPE_VALID(skb)) gmac_no = PSE_PORT_PPE; else #endif if (ra_sw_nat_hook_tx(skb, gmac_no) == 0) { dev_kfree_skb(skb); return NETDEV_TX_OK; } } #endif #if !defined (RAETH_HW_PADPKT) if (skb->len < ei_local->min_pkt_len) { if (skb_padto(skb, ei_local->min_pkt_len)) { #if defined (CONFIG_RAETH_DEBUG) if (net_ratelimit()) printk(KERN_ERR "%s: skb_padto failed\n", RAETH_DEV_NAME); #endif return NETDEV_TX_OK; } skb_put(skb, ei_local->min_pkt_len - skb->len); } #endif #if defined (CONFIG_RALINK_MT7620) if (gmac_no == PSE_PORT_PPE) txd_info4 = TX4_DMA_FP_BMAP(0x80); /* P7 */ else #if defined (CONFIG_RAETH_HAS_PORT5) && !defined (CONFIG_RAETH_HAS_PORT4) && !defined (CONFIG_RAETH_ESW) txd_info4 = TX4_DMA_FP_BMAP(0x20); /* P5 */ #elif defined (CONFIG_RAETH_HAS_PORT4) && !defined (CONFIG_RAETH_HAS_PORT5) && !defined (CONFIG_RAETH_ESW) txd_info4 = TX4_DMA_FP_BMAP(0x10); /* P4 */ #else txd_info4 = 0; /* routing by DA */ #endif #elif defined (CONFIG_RALINK_MT7621) txd_info4 = TX4_DMA_FPORT(gmac_no); #else txd_info4 = (TX4_DMA_QN(3) | TX4_DMA_PN(gmac_no)); #endif #if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && !defined (RAETH_SDMA) if (skb->ip_summed == CHECKSUM_PARTIAL) txd_info4 |= TX4_DMA_TUI_CO(7); #endif #if defined (CONFIG_RAETH_HW_VLAN_TX) if (skb_vlan_tag_present(skb)) { #if defined (RAETH_HW_VLAN4K) txd_info4 |= (0x10000 | skb_vlan_tag_get(skb)); #else u32 vlan_tci = skb_vlan_tag_get(skb); txd_info4 |= (TX4_DMA_INSV | TX4_DMA_VPRI(vlan_tci)); txd_info4 |= (u32)ei_local->vlan_4k_map[(vlan_tci & VLAN_VID_MASK)]; #endif } #endif #if defined (CONFIG_RAETH_SG_DMA_TX) shinfo = skb_shinfo(skb); #endif #if defined (CONFIG_RAETH_TSO) /* fill MSS info in tcp checksum field */ if (shinfo->gso_size) { u32 hdr_len; if (!(shinfo->gso_type & (SKB_GSO_TCPV4|SKB_GSO_TCPV6))) { dev_kfree_skb(skb); return NETDEV_TX_OK; } if (skb_header_cloned(skb)) { if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } } hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (hdr_len >= skb->len) { dev_kfree_skb(skb); return NETDEV_TX_OK; } tcp_hdr(skb)->check = htons(shinfo->gso_size); txd_info4 |= TX4_DMA_TSO; } #endif nr_desc = DIV_ROUND_UP(skb_headlen(skb), TXD_MAX_SEG_SIZE); #if defined (CONFIG_RAETH_SG_DMA_TX) nr_frags = (u32)shinfo->nr_frags; for (i = 0; i < nr_frags; i++) { tx_frag = &shinfo->frags[i]; nr_desc += DIV_ROUND_UP(skb_frag_size(tx_frag), TXD_MAX_SEG_SIZE); } #endif nr_desc = DIV_ROUND_UP(nr_desc, 2); txq = netdev_get_tx_queue(dev, 0); /* flush main skb part before spin_lock() */ frag_size = (u32)skb_headlen(skb); frag_addr = dma_map_single(NULL, skb->data, frag_size, DMA_TO_DEVICE); /* protect TX ring access (from eth2/eth3 queues) */ spin_lock(&ei_local->page_lock); /* check nr_desc+1 free descriptors */ next_idx = (ei_local->txd_last_idx + nr_desc) % NUM_TX_DESC; if (ei_local->txd_buff[ei_local->txd_last_idx] || ei_local->txd_buff[next_idx]) { spin_unlock(&ei_local->page_lock); netif_tx_stop_queue(txq); #if defined (CONFIG_RAETH_DEBUG) if (net_ratelimit()) printk("%s: PDMA TX ring is full! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no); #endif return NETDEV_TX_BUSY; } pdma_write_skb_fragment(ei_local, frag_addr, frag_size, &desc_odd, &txd_info2, txd_info4, skb, nr_frags == 0); #if defined (CONFIG_RAETH_SG_DMA_TX) for (i = 0; i < nr_frags; i++) { tx_frag = &shinfo->frags[i]; frag_size = skb_frag_size(tx_frag); frag_addr = skb_frag_dma_map(NULL, tx_frag, 0, frag_size, DMA_TO_DEVICE); pdma_write_skb_fragment(ei_local, frag_addr, frag_size, &desc_odd, &txd_info2, txd_info4, skb, i == nr_frags - 1); } #endif #if defined (CONFIG_RAETH_BQL) netdev_tx_sent_queue(txq, skb->len); #endif #if !defined (CONFIG_RAETH_BQL) || !defined (CONFIG_SMP) /* smp_mb() already inlined in netdev_tx_sent_queue */ wmb(); #endif /* kick the DMA TX */ sysRegWrite(TX_CTX_IDX0, cpu_to_le32(ei_local->txd_last_idx)); spin_unlock(&ei_local->page_lock); return NETDEV_TX_OK; }