/* netif_tx_lock held, process context with BHs disabled */ static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); struct vnic_wq *wq = &enic->wq[0]; unsigned long flags; if (skb->len <= 0) { dev_kfree_skb(skb); return NETDEV_TX_OK; } /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, * which is very likely. In the off chance it's going to take * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. */ if (skb_shinfo(skb)->gso_size == 0 && skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && skb_linearize(skb)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } spin_lock_irqsave(&enic->wq_lock[0], flags); if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + 1) { netif_stop_queue(netdev); /* This is a hard error, log it */ printk(KERN_ERR PFX "%s: BUG! Tx ring full when " "queue awake!\n", netdev->name); spin_unlock_irqrestore(&enic->wq_lock[0], flags); return NETDEV_TX_BUSY; } enic_queue_wq_skb(enic, wq, skb); if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1) netif_stop_queue(netdev); netdev->trans_start = jiffies; spin_unlock_irqrestore(&enic->wq_lock[0], flags); return NETDEV_TX_OK; }
static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); struct vnic_wq *wq = &enic->wq[0]; unsigned long flags; if (skb->len <= 0) { dev_kfree_skb(skb); return NETDEV_TX_OK; } if (skb_shinfo(skb)->gso_size == 0 && skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && skb_linearize(skb)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } spin_lock_irqsave(&enic->wq_lock[0], flags); if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { netif_stop_queue(netdev); printk(KERN_ERR PFX "%s: BUG! Tx ring full when " "queue awake!\n", netdev->name); spin_unlock_irqrestore(&enic->wq_lock[0], flags); return NETDEV_TX_BUSY; } enic_queue_wq_skb(enic, wq, skb); if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) netif_stop_queue(netdev); spin_unlock_irqrestore(&enic->wq_lock[0], flags); return NETDEV_TX_OK; }
static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type, u16 q_number, u16 completed_index, void *opaque) { struct enic *enic = vnic_dev_priv(vdev); spin_lock(&enic->wq_lock[q_number]); vnic_wq_service(&enic->wq[q_number], cq_desc, completed_index, enic_wq_free_buf, opaque); if (netif_queue_stopped(enic->netdev) && vnic_wq_desc_avail(&enic->wq[q_number]) >= MAX_SKB_FRAGS + 1) netif_wake_queue(enic->netdev); spin_unlock(&enic->wq_lock[q_number]); return 0; }
int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) { struct vnic_wq *wq = &fnic->wq[0]; struct sk_buff *skb; dma_addr_t pa; struct ethhdr *eth_hdr; struct vlan_ethhdr *vlan_hdr; struct fcoe_hdr *fcoe_hdr; struct fc_frame_header *fh; u32 tot_len, eth_hdr_len; int ret = 0; unsigned long flags; fh = fc_frame_header_get(fp); skb = fp_skb(fp); if (!fnic->vlan_hw_insert) { eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); eth_hdr = (struct ethhdr *)vlan_hdr; vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE); vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1); } else { eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr); eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len); eth_hdr->h_proto = htons(ETH_P_FCOE); fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); } if (is_flogi_frame(fh)) { fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN); } else { if (fnic->fcoui_mode) fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); else memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN); memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); } tot_len = skb->len; BUG_ON(tot_len % 4); memset(fcoe_hdr, 0, sizeof(*fcoe_hdr)); fcoe_hdr->fcoe_sof = fr_sof(fp); if (FC_FCOE_VER) FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); spin_lock_irqsave(&fnic->wq_lock[0], flags); if (!vnic_wq_desc_avail(wq)) { pci_unmap_single(fnic->pdev, pa, tot_len, PCI_DMA_TODEVICE); ret = -1; goto fnic_send_frame_end; } fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1); fnic_send_frame_end: spin_unlock_irqrestore(&fnic->wq_lock[0], flags); if (ret) dev_kfree_skb_any(fp_skb(fp)); return ret; }