static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { struct ec_bhf_priv *priv = netdev_priv(net_dev); struct tx_desc *desc; unsigned len; desc = &priv->tx_descs[priv->tx_dnext]; skb_copy_and_csum_dev(skb, desc->data); len = skb->len; memset(&desc->header, 0, sizeof(desc->header)); desc->header.len = cpu_to_le16(len); desc->header.port = TX_HDR_PORT_0; ec_bhf_send_packet(priv, desc); priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount; if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) { /* Make sure that updates to tx_dnext are perceived * by timer routine. */ smp_wmb(); netif_stop_queue(net_dev); } priv->stat_tx_bytes += len; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) { struct ixpdev_priv *ip = netdev_priv(dev); struct ixpdev_tx_desc *desc; int entry; if (unlikely(skb->len > PAGE_SIZE)) { /* @@@ Count drops. */ dev_kfree_skb(skb); return 0; } entry = tx_pointer; tx_pointer = (tx_pointer + 1) % TX_BUF_COUNT; desc = tx_desc + entry; desc->pkt_length = skb->len; desc->channel = ip->channel; skb_copy_and_csum_dev(skb, phys_to_virt(desc->buf_addr)); dev_kfree_skb(skb); ixp2000_reg_write(RING_TX_PENDING, TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc))); dev->trans_start = jiffies; local_irq_disable(); ip->tx_queue_entries++; if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) netif_stop_queue(dev); local_irq_enable(); return 0; }
/*put the fram into trasmition buffer*/ static int nic_start_xmit (struct sk_buff *skb, struct net_device *dev) { /*get our private struct */ struct nic_private *np = netdev_priv(dev); void __iomem *ioaddr = np->iobase; unsigned int len = skb->len; printk("In start_xmit"); /*see if frame size is more than our trasmition buffer*/ if (likely(len < TX_BUF_LEN)) { if (len < ETH_ZLEN) memset(np->txbuf, 0, ETH_ZLEN); skb_copy_and_csum_dev(skb, np->txbuf); dev_kfree_skb(skb); } else { dev_kfree_skb(skb); np->txdroped++; return 0; } /*tell device size of frame and threasold */ writel(max(len, (unsigned int)ETH_ZLEN),ioaddr+TxStatus0 ); return 0; }
static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) { struct ixpdev_priv *ip = netdev_priv(dev); struct ixpdev_tx_desc *desc; int entry; unsigned long flags; if (unlikely(skb->len > PAGE_SIZE)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } entry = tx_pointer; tx_pointer = (tx_pointer + 1) % TX_BUF_COUNT; desc = tx_desc + entry; desc->pkt_length = skb->len; desc->channel = ip->channel; skb_copy_and_csum_dev(skb, phys_to_virt(desc->buf_addr)); dev_kfree_skb(skb); ixp2000_reg_write(RING_TX_PENDING, TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc))); local_irq_save(flags); ip->tx_queue_entries++; if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) netif_stop_queue(dev); local_irq_restore(flags); return NETDEV_TX_OK; }
netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); struct sk_buff *nskb; int padlen; u8 *trailer; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* * We have to make sure that the trailer ends up as the very * last 4 bytes of the packet. This means that we have to pad * the packet to the minimum ethernet frame size, if necessary, * before adding the trailer. */ padlen = 0; if (skb->len < 60) padlen = 60 - skb->len; nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC); if (nskb == NULL) { kfree_skb(skb); return NETDEV_TX_OK; } skb_reserve(nskb, NET_IP_ALIGN); skb_reset_mac_header(nskb); skb_set_network_header(nskb, skb_network_header(skb) - skb->head); skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head); skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); kfree_skb(skb); if (padlen) { u8 *pad = skb_put(nskb, padlen); memset(pad, 0, padlen); } trailer = skb_put(nskb, 4); trailer[0] = 0x80; trailer[1] = 1 << p->port; trailer[2] = 0x10; trailer[3] = 0x00; nskb->protocol = htons(ETH_P_TRAILER); nskb->dev = p->parent->dst->master_netdev; dev_queue_xmit(nskb); return NETDEV_TX_OK; }
static netdev_tx_t nic_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct nic_priv *priv = netdev_priv(netdev); netif_info(priv, drv, netdev, "%s(#%d), orig, src:%pI4, dst:%pI4\n", __func__, __LINE__, &(ip_hdr(skb)->saddr), &(ip_hdr(skb)->daddr)); priv->tx_len = skb->len; if (likely(priv->tx_len < MAX_ETH_FRAME_SIZE)) { if (priv->tx_len < ETH_ZLEN) { memset(priv->tx_buf, 0, ETH_ZLEN); priv->tx_len = ETH_ZLEN; } skb_copy_and_csum_dev(skb, priv->tx_buf); dev_kfree_skb_any(skb); } else { dev_kfree_skb_any(skb); netdev->stats.tx_dropped++; return NETDEV_TX_OK; } nic_hw_xmit(netdev); return NETDEV_TX_OK; }
static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct ovs_header *upcall; struct sk_buff *nskb = NULL; struct sk_buff *user_skb; /* to be queued to userspace */ struct nlattr *nla; unsigned int len; int err; if (vlan_tx_tag_present(skb)) { nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); if (!nskb) return -ENOMEM; nskb->vlan_tci = 0; skb = nskb; } if (nla_attr_size(skb->len) > USHRT_MAX) { err = -EFBIG; goto out; } len = sizeof(struct ovs_header); len += nla_total_size(skb->len); len += nla_total_size(FLOW_BUFSIZE); if (upcall_info->cmd == OVS_PACKET_CMD_ACTION) len += nla_total_size(8); user_skb = genlmsg_new(len, GFP_ATOMIC); if (!user_skb) { err = -ENOMEM; goto out; } upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd); upcall->dp_ifindex = dp_ifindex; nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); ovs_flow_to_nlattrs(upcall_info->key, user_skb); nla_nest_end(user_skb, nla); if (upcall_info->userdata) nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, nla_get_u64(upcall_info->userdata)); nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); skb_copy_and_csum_dev(skb, nla_data(nla)); err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid); out: kfree_skb(nskb); return err; }