static netdev_tx_t greth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; int err = NETDEV_TX_OK; u32 status, dma_addr, ctrl; unsigned long flags; /* Clean TX Ring */ greth_clean_tx(greth->netdev); if (unlikely(greth->tx_free <= 0)) { spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ ctrl = GRETH_REGLOAD(greth->regs->control); /* Enable TX IRQ only if not already in poll() routine */ if (ctrl & GRETH_RXI) GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); netif_stop_queue(dev); spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_BUSY; } if (netif_msg_pktdata(greth)) greth_print_tx_packet(skb); if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; goto out; } bdp = greth->tx_bd_base + greth->tx_next; dma_addr = greth_read_bd(&bdp->addr); memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN; /* Wrap around descriptor ring */ if (greth->tx_next == GRETH_TXBD_NUM_MASK) { status |= GRETH_BD_WR; } greth->tx_next = NEXT_TX(greth->tx_next); greth->tx_free--; /* Write descriptor control word and enable transmission */ greth_write_bd(&bdp->stat, status); spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth_enable_tx(greth); spin_unlock_irqrestore(&greth->devlock, flags); out: dev_kfree_skb(skb); return err; }
/** * @ingroup ctrl_pkt_functions * This function dispatches control packet to the h/w interface * @return zero(success) or -ve value(failure) */ int SendControlPacket(struct bcm_mini_adapter *Adapter, char *pControlPacket) { struct bcm_leader *PLeader = (struct bcm_leader *)pControlPacket; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Tx"); if (!pControlPacket || !Adapter) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Got NULL Control Packet or Adapter"); return STATUS_FAILURE; } if ((atomic_read(&Adapter->CurrNumFreeTxDesc) < ((PLeader->PLength-1)/MAX_DEVICE_DESC_SIZE)+1)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "NO FREE DESCRIPTORS TO SEND CONTROL PACKET"); return STATUS_FAILURE; } /* Update the netdevice statistics */ /* Dump Packet */ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Leader Status: %x", PLeader->Status); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Leader VCID: %x", PLeader->Vcid); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Leader Length: %x", PLeader->PLength); if (Adapter->device_removed) return 0; if (netif_msg_pktdata(Adapter)) print_hex_dump(KERN_DEBUG, PFX "tx control: ", DUMP_PREFIX_NONE, 16, 1, pControlPacket, PLeader->PLength + LEADER_SIZE, 0); Adapter->interface_transmit(Adapter->pvInterfaceAdapter, pControlPacket, (PLeader->PLength + LEADER_SIZE)); atomic_dec(&Adapter->CurrNumFreeTxDesc); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "<========="); return STATUS_SUCCESS; }
static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, struct cpmac_desc *desc) { struct sk_buff *skb, *result = NULL; if (unlikely(netif_msg_hw(priv))) cpmac_dump_desc(priv->dev, desc); cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); if (unlikely(!desc->datalen)) { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: rx: spurious interrupt\n", priv->dev->name); return NULL; } skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE); if (likely(skb)) { skb_reserve(skb, 2); skb_put(desc->skb, desc->datalen); desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); desc->skb->ip_summed = CHECKSUM_NONE; priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += desc->datalen; result = desc->skb; dma_unmap_single(&priv->dev->dev, desc->data_mapping, CPMAC_SKB_SIZE, DMA_FROM_DEVICE); desc->skb = skb; desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, CPMAC_SKB_SIZE, DMA_FROM_DEVICE); desc->hw_data = (u32)desc->data_mapping; if (unlikely(netif_msg_pktdata(priv))) { printk(KERN_DEBUG "%s: received packet:\n", priv->dev->name); cpmac_dump_skb(priv->dev, result); } } else { if (netif_msg_rx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: low on skbs, dropping packet\n", priv->dev->name); priv->dev->stats.rx_dropped++; } desc->buflen = CPMAC_SKB_SIZE; desc->dataflags = CPMAC_OWN; return result; }
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { int queue, len; struct cpmac_desc *desc; struct cpmac_priv *priv = netdev_priv(dev); if (unlikely(atomic_read(&priv->reset_pending))) return NETDEV_TX_BUSY; if (unlikely(skb_padto(skb, ETH_ZLEN))) return NETDEV_TX_OK; len = max(skb->len, ETH_ZLEN); queue = skb_get_queue_mapping(skb); #ifdef CONFIG_NETDEVICES_MULTIQUEUE netif_stop_subqueue(dev, queue); #else netif_stop_queue(dev); #endif desc = &priv->desc_ring[queue]; if (unlikely(desc->dataflags & CPMAC_OWN)) { if (netif_msg_tx_err(priv) && net_ratelimit()) printk(KERN_WARNING "%s: tx dma ring full\n", dev->name); return NETDEV_TX_BUSY; } spin_lock(&priv->lock); dev->trans_start = jiffies; spin_unlock(&priv->lock); desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; desc->skb = skb; desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, DMA_TO_DEVICE); desc->hw_data = (u32)desc->data_mapping; desc->datalen = len; desc->buflen = len; if (unlikely(netif_msg_tx_queued(priv))) printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb, skb->len); if (unlikely(netif_msg_hw(priv))) cpmac_dump_desc(dev, desc); if (unlikely(netif_msg_pktdata(priv))) cpmac_dump_skb(dev, skb); cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); return NETDEV_TX_OK; }
static int greth_rx(struct net_device *dev, int limit) { struct greth_private *greth; struct greth_bd *bdp; struct sk_buff *skb; int pkt_len; int bad, count; u32 status, dma_addr; unsigned long flags; greth = netdev_priv(dev); for (count = 0; count < limit; ++count) { bdp = greth->rx_bd_base + greth->rx_cur; GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); mb(); status = greth_read_bd(&bdp->stat); if (unlikely(status & GRETH_BD_EN)) { break; } dma_addr = greth_read_bd(&bdp->addr); bad = 0; /* Check status for errors. */ if (unlikely(status & GRETH_RXBD_STATUS)) { if (status & GRETH_RXBD_ERR_FT) { dev->stats.rx_length_errors++; bad = 1; } if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) { dev->stats.rx_frame_errors++; bad = 1; } if (status & GRETH_RXBD_ERR_CRC) { dev->stats.rx_crc_errors++; bad = 1; } } if (unlikely(bad)) { dev->stats.rx_errors++; } else { pkt_len = status & GRETH_BD_LEN; skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN); if (unlikely(skb == NULL)) { if (net_ratelimit()) dev_warn(&dev->dev, "low on memory - " "packet dropped\n"); dev->stats.rx_dropped++; } else { skb_reserve(skb, NET_IP_ALIGN); dma_sync_single_for_cpu(greth->dev, dma_addr, pkt_len, DMA_FROM_DEVICE); if (netif_msg_pktdata(greth)) greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len); memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_bytes += pkt_len; dev->stats.rx_packets++; netif_receive_skb(skb); } } status = GRETH_BD_EN | GRETH_BD_IE; if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { status |= GRETH_BD_WR; } wmb(); greth_write_bd(&bdp->stat, status); dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */ greth_enable_rx(greth); spin_unlock_irqrestore(&greth->devlock, flags); greth->rx_cur = NEXT_RX(greth->rx_cur); } return count; }
static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; u32 status, dma_addr; int curr_tx, nr_frags, i, err = NETDEV_TX_OK; unsigned long flags; u16 tx_last; nr_frags = skb_shinfo(skb)->nr_frags; tx_last = greth->tx_last; rmb(); /* tx_last is updated by the poll task */ if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) { netif_stop_queue(dev); err = NETDEV_TX_BUSY; goto out; } if (netif_msg_pktdata(greth)) greth_print_tx_packet(skb); if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; goto out; } /* Save skb pointer. */ greth->tx_skbuff[greth->tx_next] = skb; /* Linear buf */ if (nr_frags != 0) status = GRETH_TXBD_MORE; else status = GRETH_BD_IE; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= GRETH_TXBD_CSALL; status |= skb_headlen(skb) & GRETH_BD_LEN; if (greth->tx_next == GRETH_TXBD_NUM_MASK) status |= GRETH_BD_WR; bdp = greth->tx_bd_base + greth->tx_next; greth_write_bd(&bdp->stat, status); dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto map_error; greth_write_bd(&bdp->addr, dma_addr); curr_tx = NEXT_TX(greth->tx_next); /* Frags */ for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; greth->tx_skbuff[curr_tx] = NULL; bdp = greth->tx_bd_base + curr_tx; status = GRETH_BD_EN; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= GRETH_TXBD_CSALL; status |= skb_frag_size(frag) & GRETH_BD_LEN; /* Wrap around descriptor ring */ if (curr_tx == GRETH_TXBD_NUM_MASK) status |= GRETH_BD_WR; /* More fragments left */ if (i < nr_frags - 1) status |= GRETH_TXBD_MORE; else status |= GRETH_BD_IE; /* enable IRQ on last fragment */ greth_write_bd(&bdp->stat, status); dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto frag_map_error; greth_write_bd(&bdp->addr, dma_addr); curr_tx = NEXT_TX(curr_tx); } wmb(); /* Enable the descriptor chain by enabling the first descriptor */ bdp = greth->tx_bd_base + greth->tx_next; greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth->tx_next = curr_tx; greth_enable_tx_and_irq(greth); spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_OK; frag_map_error: /* Unmap SKB mappings that succeeded and disable descriptor */ for (i = 0; greth->tx_next + i != curr_tx; i++) { bdp = greth->tx_bd_base + greth->tx_next + i; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), greth_read_bd(&bdp->stat) & GRETH_BD_LEN, DMA_TO_DEVICE); greth_write_bd(&bdp->stat, 0); } map_error: if (net_ratelimit()) dev_warn(greth->dev, "Could not create TX DMA mapping\n"); dev_kfree_skb(skb); out: return err; }
static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_pkt_info *tx_pkt_info; struct xlgmac_desc_ops *desc_ops; struct xlgmac_channel *channel; struct xlgmac_hw_ops *hw_ops; struct netdev_queue *txq; struct xlgmac_ring *ring; int ret; desc_ops = &pdata->desc_ops; hw_ops = &pdata->hw_ops; XLGMAC_PR("skb->len = %d\n", skb->len); channel = pdata->channel_head + skb->queue_mapping; txq = netdev_get_tx_queue(netdev, channel->queue_index); ring = channel->tx_ring; tx_pkt_info = &ring->pkt_info; if (skb->len == 0) { netif_err(pdata, tx_err, netdev, "empty skb received from stack\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* Prepare preliminary packet info for TX */ memset(tx_pkt_info, 0, sizeof(*tx_pkt_info)); xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info); /* Check that there are enough descriptors available */ ret = xlgmac_maybe_stop_tx_queue(channel, ring, tx_pkt_info->desc_count); if (ret) return ret; ret = xlgmac_prep_tso(skb, tx_pkt_info); if (ret) { netif_err(pdata, tx_err, netdev, "error processing TSO packet\n"); dev_kfree_skb_any(skb); return ret; } xlgmac_prep_vlan(skb, tx_pkt_info); if (!desc_ops->map_tx_skb(channel, skb)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* Report on the actual number of bytes (to be) sent */ netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes); /* Configure required descriptor fields for transmission */ hw_ops->dev_xmit(channel); if (netif_msg_pktdata(pdata)) xlgmac_print_pkt(netdev, skb, true); /* Stop the queue in advance if there may not be enough descriptors */ xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR); return NETDEV_TX_OK; }
static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget) { struct xlgmac_pdata *pdata = channel->pdata; struct xlgmac_ring *ring = channel->rx_ring; struct net_device *netdev = pdata->netdev; unsigned int len, dma_desc_len, max_len; unsigned int context_next, context; struct xlgmac_desc_data *desc_data; struct xlgmac_pkt_info *pkt_info; unsigned int incomplete, error; struct xlgmac_hw_ops *hw_ops; unsigned int received = 0; struct napi_struct *napi; struct sk_buff *skb; int packet_count = 0; hw_ops = &pdata->hw_ops; /* Nothing to do if there isn't a Rx ring for this channel */ if (!ring) return 0; incomplete = 0; context_next = 0; napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); pkt_info = &ring->pkt_info; while (packet_count < budget) { /* First time in loop see if we need to restore state */ if (!received && desc_data->state_saved) { skb = desc_data->state.skb; error = desc_data->state.error; len = desc_data->state.len; } else { memset(pkt_info, 0, sizeof(*pkt_info)); skb = NULL; error = 0; len = 0; } read_again: desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY) xlgmac_rx_refresh(channel); if (hw_ops->dev_read(channel)) break; received++; ring->cur++; incomplete = XLGMAC_GET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN); context_next = XLGMAC_GET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN); context = XLGMAC_GET_REG_BITS( pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, RX_PACKET_ATTRIBUTES_CONTEXT_LEN); /* Earlier error, just drain the remaining data */ if ((incomplete || context_next) && error) goto read_again; if (error || pkt_info->errors) { if (pkt_info->errors) netif_err(pdata, rx_err, netdev, "error in received packet\n"); dev_kfree_skb(skb); goto next_packet; } if (!context) { /* Length is cumulative, get this descriptor's length */ dma_desc_len = desc_data->rx.len - len; len += dma_desc_len; if (dma_desc_len && !skb) { skb = xlgmac_create_skb(pdata, napi, desc_data, dma_desc_len); if (!skb) error = 1; } else if (dma_desc_len) { dma_sync_single_range_for_cpu( pdata->dev, desc_data->rx.buf.dma_base, desc_data->rx.buf.dma_off, desc_data->rx.buf.dma_len, DMA_FROM_DEVICE); skb_add_rx_frag( skb, skb_shinfo(skb)->nr_frags, desc_data->rx.buf.pa.pages, desc_data->rx.buf.pa.pages_offset, dma_desc_len, desc_data->rx.buf.dma_len); desc_data->rx.buf.pa.pages = NULL; } } if (incomplete || context_next) goto read_again; if (!skb) goto next_packet; /* Be sure we don't exceed the configured MTU */ max_len = netdev->mtu + ETH_HLEN; if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (skb->protocol == htons(ETH_P_8021Q))) max_len += VLAN_HLEN; if (skb->len > max_len) { netif_err(pdata, rx_err, netdev, "packet length exceeds configured MTU\n"); dev_kfree_skb(skb); goto next_packet; } if (netif_msg_pktdata(pdata)) xlgmac_print_pkt(netdev, skb, false); skb_checksum_none_assert(skb); if (XLGMAC_GET_REG_BITS(pkt_info->attributes, RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN)) skb->ip_summed = CHECKSUM_UNNECESSARY; if (XLGMAC_GET_REG_BITS(pkt_info->attributes, RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) { __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pkt_info->vlan_ctag); pdata->stats.rx_vlan_packets++; } if (XLGMAC_GET_REG_BITS(pkt_info->attributes, RX_PACKET_ATTRIBUTES_RSS_HASH_POS, RX_PACKET_ATTRIBUTES_RSS_HASH_LEN)) skb_set_hash(skb, pkt_info->rss_hash, pkt_info->rss_hash_type); skb->dev = netdev; skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, channel->queue_index); napi_gro_receive(napi, skb); next_packet: packet_count++; } /* Check if we need to save state before leaving */ if (received && (incomplete || context_next)) { desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur); desc_data->state_saved = 1; desc_data->state.skb = skb; desc_data->state.len = len; desc_data->state.error = error; } XLGMAC_PR("packet_count = %d\n", packet_count); return packet_count; }
static int stmmac_rx(struct stmmac_priv *priv, int limit) { unsigned int rxsize = priv->dma_rx_size; unsigned int entry = priv->cur_rx % rxsize; unsigned int next_entry; unsigned int count = 0; struct dma_desc *p = priv->dma_rx + entry; struct dma_desc *p_next; #ifdef STMMAC_RX_DEBUG if (netif_msg_hw(priv)) { pr_debug(">>> stmmac_rx: descriptor ring:\n"); display_ring(priv->dma_rx, rxsize); } #endif count = 0; while (!priv->hw->desc->get_rx_owner(p)) { int status; if (count >= limit) break; count++; next_entry = (++priv->cur_rx) % rxsize; p_next = priv->dma_rx + next_entry; prefetch(p_next); /* read the status of the incoming frame */ status = (priv->hw->desc->rx_status(&priv->dev->stats, &priv->xstats, p)); if (unlikely(status == discard_frame)) priv->dev->stats.rx_errors++; else { struct sk_buff *skb; int frame_len; frame_len = priv->hw->desc->get_rx_frame_len(p); /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 * Type frames (LLC/LLC-SNAP) */ if (unlikely(status != llc_snap)) frame_len -= ETH_FCS_LEN; #ifdef STMMAC_RX_DEBUG if (frame_len > ETH_FRAME_LEN) pr_debug("\tRX frame size %d, COE status: %d\n", frame_len, status); if (netif_msg_hw(priv)) pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", p, entry, p->des2); #endif skb = priv->rx_skbuff[entry]; if (unlikely(!skb)) { pr_err("%s: Inconsistent Rx descriptor chain\n", priv->dev->name); priv->dev->stats.rx_dropped++; break; } prefetch(skb->data - NET_IP_ALIGN); priv->rx_skbuff[entry] = NULL; skb_put(skb, frame_len); dma_unmap_single(priv->device, priv->rx_skbuff_dma[entry], priv->dma_buf_sz, DMA_FROM_DEVICE); #ifdef STMMAC_RX_DEBUG if (netif_msg_pktdata(priv)) { pr_info(" frame received (%dbytes)", frame_len); print_pkt(skb->data, frame_len); } #endif skb->protocol = eth_type_trans(skb, priv->dev); if (unlikely(status == csum_none)) { /* always for the old mac 10/100 */ skb_checksum_none_assert(skb); netif_receive_skb(skb); } else { skb->ip_summed = CHECKSUM_UNNECESSARY; napi_gro_receive(&priv->napi, skb); } priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; } entry = next_entry; p = p_next; /* use prefetched values */ } stmmac_rx_refill(priv); priv->xstats.rx_pkt_n += count; return count; }
static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; u32 status = 0, dma_addr, ctrl; int curr_tx, nr_frags, i, err = NETDEV_TX_OK; unsigned long flags; nr_frags = skb_shinfo(skb)->nr_frags; /* Clean TX Ring */ greth_clean_tx_gbit(dev); if (greth->tx_free < nr_frags + 1) { spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ ctrl = GRETH_REGLOAD(greth->regs->control); /* Enable TX IRQ only if not already in poll() routine */ if (ctrl & GRETH_RXI) GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); netif_stop_queue(dev); spin_unlock_irqrestore(&greth->devlock, flags); err = NETDEV_TX_BUSY; goto out; } if (netif_msg_pktdata(greth)) greth_print_tx_packet(skb); if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; goto out; } /* Save skb pointer. */ greth->tx_skbuff[greth->tx_next] = skb; /* Linear buf */ if (nr_frags != 0) status = GRETH_TXBD_MORE; status |= GRETH_TXBD_CSALL; status |= skb_headlen(skb) & GRETH_BD_LEN; if (greth->tx_next == GRETH_TXBD_NUM_MASK) status |= GRETH_BD_WR; bdp = greth->tx_bd_base + greth->tx_next; greth_write_bd(&bdp->stat, status); dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto map_error; greth_write_bd(&bdp->addr, dma_addr); curr_tx = NEXT_TX(greth->tx_next); /* Frags */ for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; greth->tx_skbuff[curr_tx] = NULL; bdp = greth->tx_bd_base + curr_tx; status = GRETH_TXBD_CSALL | GRETH_BD_EN; status |= frag->size & GRETH_BD_LEN; /* Wrap around descriptor ring */ if (curr_tx == GRETH_TXBD_NUM_MASK) status |= GRETH_BD_WR; /* More fragments left */ if (i < nr_frags - 1) status |= GRETH_TXBD_MORE; else status |= GRETH_BD_IE; /* enable IRQ on last fragment */ greth_write_bd(&bdp->stat, status); dma_addr = dma_map_page(greth->dev, frag->page, frag->page_offset, frag->size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto frag_map_error; greth_write_bd(&bdp->addr, dma_addr); curr_tx = NEXT_TX(curr_tx); } wmb(); /* Enable the descriptor chain by enabling the first descriptor */ bdp = greth->tx_bd_base + greth->tx_next; greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); greth->tx_next = curr_tx; greth->tx_free -= nr_frags + 1; wmb(); spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth_enable_tx(greth); spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_OK; frag_map_error: /* Unmap SKB mappings that succeeded and disable descriptor */ for (i = 0; greth->tx_next + i != curr_tx; i++) { bdp = greth->tx_bd_base + greth->tx_next + i; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), greth_read_bd(&bdp->stat) & GRETH_BD_LEN, DMA_TO_DEVICE); greth_write_bd(&bdp->stat, 0); } map_error: if (net_ratelimit()) dev_warn(greth->dev, "Could not create TX DMA mapping\n"); dev_kfree_skb(skb); out: return err; }
static int greth_rx_gbit(struct net_device *dev, int limit) { struct greth_private *greth; struct greth_bd *bdp; struct sk_buff *skb, *newskb; int pkt_len; int bad, count = 0; u32 status, dma_addr; greth = netdev_priv(dev); for (count = 0; count < limit; ++count) { bdp = greth->rx_bd_base + greth->rx_cur; skb = greth->rx_skbuff[greth->rx_cur]; status = greth_read_bd(&bdp->stat); bad = 0; if (status & GRETH_BD_EN) break; /* Check status for errors. */ if (unlikely(status & GRETH_RXBD_STATUS)) { if (status & GRETH_RXBD_ERR_FT) { dev->stats.rx_length_errors++; bad = 1; } else if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) { dev->stats.rx_frame_errors++; bad = 1; } else if (status & GRETH_RXBD_ERR_CRC) { dev->stats.rx_crc_errors++; bad = 1; } } /* Allocate new skb to replace current */ newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN); if (!bad && newskb) { skb_reserve(newskb, NET_IP_ALIGN); dma_addr = dma_map_single(greth->dev, newskb->data, MAX_FRAME_SIZE + NET_IP_ALIGN, DMA_FROM_DEVICE); if (!dma_mapping_error(greth->dev, dma_addr)) { /* Process the incoming frame. */ pkt_len = status & GRETH_BD_LEN; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), MAX_FRAME_SIZE + NET_IP_ALIGN, DMA_FROM_DEVICE); if (netif_msg_pktdata(greth)) greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len); skb_put(skb, pkt_len); if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; netif_receive_skb(skb); greth->rx_skbuff[greth->rx_cur] = newskb; greth_write_bd(&bdp->addr, dma_addr); } else { if (net_ratelimit()) dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); dev_kfree_skb(newskb); dev->stats.rx_dropped++; } } else { if (net_ratelimit()) dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); dev->stats.rx_dropped++; } status = GRETH_BD_EN | GRETH_BD_IE; if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { status |= GRETH_BD_WR; } wmb(); greth_write_bd(&bdp->stat, status); greth_enable_rx(greth); greth->rx_cur = NEXT_RX(greth->rx_cur); } return count; }
static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; u32 status = 0, dma_addr; int curr_tx, nr_frags, i, err = NETDEV_TX_OK; nr_frags = skb_shinfo(skb)->nr_frags; if (greth->tx_free < nr_frags + 1) { netif_stop_queue(dev); err = NETDEV_TX_BUSY; goto out; } if (netif_msg_pktdata(greth)) greth_print_tx_packet(skb); if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; goto out; } /* Save skb pointer. */ greth->tx_skbuff[greth->tx_next] = skb; /* Linear buf */ if (nr_frags != 0) status = GRETH_TXBD_MORE; status |= GRETH_TXBD_CSALL; status |= skb_headlen(skb) & GRETH_BD_LEN; if (greth->tx_next == GRETH_TXBD_NUM_MASK) status |= GRETH_BD_WR; bdp = greth->tx_bd_base + greth->tx_next; greth_write_bd(&bdp->stat, status); dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto map_error; greth_write_bd(&bdp->addr, dma_addr); curr_tx = NEXT_TX(greth->tx_next); /* Frags */ for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; greth->tx_skbuff[curr_tx] = NULL; bdp = greth->tx_bd_base + curr_tx; status = GRETH_TXBD_CSALL; status |= frag->size & GRETH_BD_LEN; /* Wrap around descriptor ring */ if (curr_tx == GRETH_TXBD_NUM_MASK) status |= GRETH_BD_WR; /* More fragments left */ if (i < nr_frags - 1) status |= GRETH_TXBD_MORE; /* ... last fragment, check if out of descriptors */ else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) { /* Enable interrupts and stop queue */ status |= GRETH_BD_IE; netif_stop_queue(dev); } greth_write_bd(&bdp->stat, status); dma_addr = dma_map_page(greth->dev, frag->page, frag->page_offset, frag->size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto frag_map_error; greth_write_bd(&bdp->addr, dma_addr); curr_tx = NEXT_TX(curr_tx); } wmb(); /* Enable the descriptors that we configured ... */ for (i = 0; i < nr_frags + 1; i++) { bdp = greth->tx_bd_base + greth->tx_next; greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); greth->tx_next = NEXT_TX(greth->tx_next); greth->tx_free--; } greth_enable_tx(greth); return NETDEV_TX_OK; frag_map_error: /* Unmap SKB mappings that succeeded */ for (i = 0; greth->tx_next + i != curr_tx; i++) { bdp = greth->tx_bd_base + greth->tx_next + i; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), greth_read_bd(&bdp->stat) & GRETH_BD_LEN, DMA_TO_DEVICE); } map_error: if (net_ratelimit()) dev_warn(greth->dev, "Could not create TX DMA mapping\n"); dev_kfree_skb(skb); out: return err; }
static netdev_tx_t greth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; int err = NETDEV_TX_OK; u32 status, dma_addr; bdp = greth->tx_bd_base + greth->tx_next; if (unlikely(greth->tx_free <= 0)) { netif_stop_queue(dev); return NETDEV_TX_BUSY; } if (netif_msg_pktdata(greth)) greth_print_tx_packet(skb); if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; goto out; } dma_addr = greth_read_bd(&bdp->addr); memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN); /* Wrap around descriptor ring */ if (greth->tx_next == GRETH_TXBD_NUM_MASK) { status |= GRETH_BD_WR; } greth->tx_next = NEXT_TX(greth->tx_next); greth->tx_free--; /* No more descriptors */ if (unlikely(greth->tx_free == 0)) { /* Free transmitted descriptors */ greth_clean_tx(dev); /* If nothing was cleaned, stop queue & wait for irq */ if (unlikely(greth->tx_free == 0)) { status |= GRETH_BD_IE; netif_stop_queue(dev); } } /* Write descriptor control word and enable transmission */ greth_write_bd(&bdp->stat, status); greth_enable_tx(greth); out: dev_kfree_skb(skb); return err; }
/** * stmmac_xmit: * @skb : the socket buffer * @dev : device pointer * Description : Tx entry point of the driver. */ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); unsigned int txsize = priv->dma_tx_size; unsigned int entry; int i, csum_insertion = 0; int nfrags = skb_shinfo(skb)->nr_frags; struct dma_desc *desc, *first; if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); /* This is a hard error, log it. */ pr_err("%s: BUG! Tx Ring full when queue awake\n", __func__); } return NETDEV_TX_BUSY; } entry = priv->cur_tx % txsize; #ifdef STMMAC_XMIT_DEBUG if ((skb->len > ETH_FRAME_LEN) || nfrags) pr_info("stmmac xmit:\n" "\tskb addr %p - len: %d - nopaged_len: %d\n" "\tn_frags: %d - ip_summed: %d - %s gso\n", skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed, !skb_is_gso(skb) ? "isn't" : "is"); #endif csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); desc = priv->dma_tx + entry; first = desc; #ifdef STMMAC_XMIT_DEBUG if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN)) pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n" "\t\tn_frags: %d, ip_summed: %d\n", skb->len, skb_headlen(skb), nfrags, skb->ip_summed); #endif priv->tx_skbuff[entry] = skb; if (unlikely(skb->len >= BUF_SIZE_4KiB)) { entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion); desc = priv->dma_tx + entry; } else { unsigned int nopaged_len = skb_headlen(skb); desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum_insertion); } for (i = 0; i < nfrags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int len = frag->size; entry = (++priv->cur_tx) % txsize; desc = priv->dma_tx + entry; TX_DBG("\t[entry %d] segment len: %d\n", entry, len); desc->des2 = dma_map_page(priv->device, frag->page, frag->page_offset, len, DMA_TO_DEVICE); priv->tx_skbuff[entry] = NULL; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); wmb(); priv->hw->desc->set_tx_owner(desc); } /* Interrupt on completition only for the latest segment */ priv->hw->desc->close_tx_desc(desc); #ifdef CONFIG_STMMAC_TIMER /* Clean IC while using timer */ if (likely(priv->tm->enable)) priv->hw->desc->clear_tx_ic(desc); #endif wmb(); /* To avoid raise condition */ priv->hw->desc->set_tx_owner(first); priv->cur_tx++; #ifdef STMMAC_XMIT_DEBUG if (netif_msg_pktdata(priv)) { pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, " "first=%p, nfrags=%d\n", (priv->cur_tx % txsize), (priv->dirty_tx % txsize), entry, first, nfrags); display_ring(priv->dma_tx, txsize); pr_info(">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); } #endif if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { TX_DBG("%s: stop transmitted packets\n", __func__); netif_stop_queue(dev); } dev->stats.tx_bytes += skb->len; skb_tx_timestamp(skb); priv->hw->dma->enable_dma_transmission(priv->ioaddr); return NETDEV_TX_OK; }
static int greth_rx_gbit(struct net_device *dev, int limit) { struct greth_private *greth; struct greth_bd *bdp; struct sk_buff *skb, *newskb; int pkt_len; int bad, count = 0; u32 status, dma_addr; unsigned long flags; greth = netdev_priv(dev); for (count = 0; count < limit; ++count) { bdp = greth->rx_bd_base + greth->rx_cur; skb = greth->rx_skbuff[greth->rx_cur]; GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); mb(); status = greth_read_bd(&bdp->stat); bad = 0; if (status & GRETH_BD_EN) break; /* Check status for errors. */ if (unlikely(status & GRETH_RXBD_STATUS)) { if (status & GRETH_RXBD_ERR_FT) { dev->stats.rx_length_errors++; bad = 1; } else if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) { dev->stats.rx_frame_errors++; bad = 1; } else if (status & GRETH_RXBD_ERR_CRC) { dev->stats.rx_crc_errors++; bad = 1; } } /* Allocate new skb to replace current, not needed if the * current skb can be reused */ if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) { skb_reserve(newskb, NET_IP_ALIGN); dma_addr = dma_map_single(greth->dev, newskb->data, MAX_FRAME_SIZE + NET_IP_ALIGN, DMA_FROM_DEVICE); if (!dma_mapping_error(greth->dev, dma_addr)) { /* Process the incoming frame. */ pkt_len = status & GRETH_BD_LEN; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), MAX_FRAME_SIZE + NET_IP_ALIGN, DMA_FROM_DEVICE); if (netif_msg_pktdata(greth)) greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len); skb_put(skb, pkt_len); if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; netif_receive_skb(skb); greth->rx_skbuff[greth->rx_cur] = newskb; greth_write_bd(&bdp->addr, dma_addr); } else { if (net_ratelimit()) dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); dev_kfree_skb(newskb); /* reusing current skb, so it is a drop */ dev->stats.rx_dropped++; } } else if (bad) { /* Bad Frame transfer, the skb is reused */ dev->stats.rx_dropped++; } else { /* Failed Allocating a new skb. This is rather stupid * but the current "filled" skb is reused, as if * transfer failure. One could argue that RX descriptor * table handling should be divided into cleaning and * filling as the TX part of the driver */ if (net_ratelimit()) dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); /* reusing current skb, so it is a drop */ dev->stats.rx_dropped++; } status = GRETH_BD_EN | GRETH_BD_IE; if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { status |= GRETH_BD_WR; } wmb(); greth_write_bd(&bdp->stat, status); spin_lock_irqsave(&greth->devlock, flags); greth_enable_rx(greth); spin_unlock_irqrestore(&greth->devlock, flags); greth->rx_cur = NEXT_RX(greth->rx_cur); } return count; }
/** * When a control packet is received, analyze the * "status" and call appropriate response function. * Enqueue the control packet for Application. * @return None */ static VOID handle_rx_control_packet(struct bcm_mini_adapter *Adapter, struct sk_buff *skb) { struct bcm_tarang_data *pTarang = NULL; BOOLEAN HighPriorityMessage = FALSE; struct sk_buff *newPacket = NULL; CHAR cntrl_msg_mask_bit = 0; BOOLEAN drop_pkt_flag = TRUE; USHORT usStatus = *(PUSHORT)(skb->data); if (netif_msg_pktdata(Adapter)) print_hex_dump(KERN_DEBUG, PFX "rx control: ", DUMP_PREFIX_NONE, 16, 1, skb->data, skb->len, 0); switch (usStatus) { case CM_RESPONSES: /* 0xA0 */ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "MAC Version Seems to be Non Multi-Classifier, rejected by Driver"); HighPriorityMessage = TRUE; break; case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP: HighPriorityMessage = TRUE; if (Adapter->LinkStatus == LINKUP_DONE) CmControlResponseMessage(Adapter, (skb->data + sizeof(USHORT))); break; case LINK_CONTROL_RESP: /* 0xA2 */ case STATUS_RSP: /* 0xA1 */ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "LINK_CONTROL_RESP"); HighPriorityMessage = TRUE; LinkControlResponseMessage(Adapter, (skb->data + sizeof(USHORT))); break; case STATS_POINTER_RESP: /* 0xA6 */ HighPriorityMessage = TRUE; StatisticsResponse(Adapter, (skb->data + sizeof(USHORT))); break; case IDLE_MODE_STATUS: /* 0xA3 */ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "IDLE_MODE_STATUS Type Message Got from F/W"); InterfaceIdleModeRespond(Adapter, (PUINT)(skb->data + sizeof(USHORT))); HighPriorityMessage = TRUE; break; case AUTH_SS_HOST_MSG: HighPriorityMessage = TRUE; break; default: BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "Got Default Response"); /* Let the Application Deal with This Packet */ break; } /* Queue The Control Packet to The Application Queues */ down(&Adapter->RxAppControlQueuelock); for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) { if (Adapter->device_removed) break; drop_pkt_flag = TRUE; /* * There are cntrl msg from A0 to AC. It has been mapped to 0 to * C bit in the cntrl mask. * Also, by default AD to BF has been masked to the rest of the * bits... which wil be ON by default. * if mask bit is enable to particular pkt status, send it out * to app else stop it. */ cntrl_msg_mask_bit = (usStatus & 0x1F); /* * printk("\ninew msg mask bit which is disable in mask:%X", * cntrl_msg_mask_bit); */ if (pTarang->RxCntrlMsgBitMask & (1 << cntrl_msg_mask_bit)) drop_pkt_flag = FALSE; if ((drop_pkt_flag == TRUE) || (pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN) || ((pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN / 2) && (HighPriorityMessage == FALSE))) { /* * Assumption:- * 1. every tarang manages it own dropped pkt * statitistics * 2. Total packet dropped per tarang will be equal to * the sum of all types of dropped pkt by that * tarang only. */ switch (*(PUSHORT)skb->data) { case CM_RESPONSES: pTarang->stDroppedAppCntrlMsgs.cm_responses++; break; case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP: pTarang->stDroppedAppCntrlMsgs.cm_control_newdsx_multiclassifier_resp++; break; case LINK_CONTROL_RESP: pTarang->stDroppedAppCntrlMsgs.link_control_resp++; break; case STATUS_RSP: pTarang->stDroppedAppCntrlMsgs.status_rsp++; break; case STATS_POINTER_RESP: pTarang->stDroppedAppCntrlMsgs.stats_pointer_resp++; break; case IDLE_MODE_STATUS: pTarang->stDroppedAppCntrlMsgs.idle_mode_status++; break; case AUTH_SS_HOST_MSG: pTarang->stDroppedAppCntrlMsgs.auth_ss_host_msg++; break; default: pTarang->stDroppedAppCntrlMsgs.low_priority_message++; break; } continue; } newPacket = skb_clone(skb, GFP_KERNEL); if (!newPacket) break; ENQUEUEPACKET(pTarang->RxAppControlHead, pTarang->RxAppControlTail, newPacket); pTarang->AppCtrlQueueLen++; } up(&Adapter->RxAppControlQueuelock); wake_up(&Adapter->process_read_wait_queue); dev_kfree_skb(skb); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "After wake_up_interruptible"); }
static VOID handle_rx_control_packet(PMINI_ADAPTER Adapter, struct sk_buff *skb) { PPER_TARANG_DATA pTarang = NULL; BOOLEAN HighPriorityMessage = FALSE; struct sk_buff *newPacket = NULL; CHAR cntrl_msg_mask_bit = 0; BOOLEAN drop_pkt_flag = TRUE; USHORT usStatus = *(PUSHORT)(skb->data); if (netif_msg_pktdata(Adapter)) print_hex_dump(KERN_DEBUG, PFX "rx control: ", DUMP_PREFIX_NONE, 16, 1, skb->data, skb->len, 0); switch (usStatus) { case CM_RESPONSES: BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "MAC Version Seems to be Non Multi-Classifier, rejected by Driver"); HighPriorityMessage = TRUE; break; case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP: HighPriorityMessage = TRUE; if (Adapter->LinkStatus == LINKUP_DONE) CmControlResponseMessage(Adapter, (skb->data + sizeof(USHORT))); break; case LINK_CONTROL_RESP: case STATUS_RSP: BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "LINK_CONTROL_RESP"); HighPriorityMessage = TRUE; LinkControlResponseMessage(Adapter, (skb->data + sizeof(USHORT))); break; case STATS_POINTER_RESP: HighPriorityMessage = TRUE; StatisticsResponse(Adapter, (skb->data + sizeof(USHORT))); break; case IDLE_MODE_STATUS: BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "IDLE_MODE_STATUS Type Message Got from F/W"); InterfaceIdleModeRespond(Adapter, (PUINT)(skb->data + sizeof(USHORT))); HighPriorityMessage = TRUE; break; case AUTH_SS_HOST_MSG: HighPriorityMessage = TRUE; break; default: BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "Got Default Response"); break; } down(&Adapter->RxAppControlQueuelock); for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) { if (Adapter->device_removed) break; drop_pkt_flag = TRUE; cntrl_msg_mask_bit = (usStatus & 0x1F); if (pTarang->RxCntrlMsgBitMask & (1 << cntrl_msg_mask_bit)) drop_pkt_flag = FALSE; if ((drop_pkt_flag == TRUE) || (pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN) || ((pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN / 2) && (HighPriorityMessage == FALSE))) { switch (*(PUSHORT)skb->data) { case CM_RESPONSES: pTarang->stDroppedAppCntrlMsgs.cm_responses++; break; case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP: pTarang->stDroppedAppCntrlMsgs.cm_control_newdsx_multiclassifier_resp++; break; case LINK_CONTROL_RESP: pTarang->stDroppedAppCntrlMsgs.link_control_resp++; break; case STATUS_RSP: pTarang->stDroppedAppCntrlMsgs.status_rsp++; break; case STATS_POINTER_RESP: pTarang->stDroppedAppCntrlMsgs.stats_pointer_resp++; break; case IDLE_MODE_STATUS: pTarang->stDroppedAppCntrlMsgs.idle_mode_status++; break; case AUTH_SS_HOST_MSG: pTarang->stDroppedAppCntrlMsgs.auth_ss_host_msg++; break; default: pTarang->stDroppedAppCntrlMsgs.low_priority_message++; break; } continue; } newPacket = skb_clone(skb, GFP_KERNEL); if (!newPacket) break; ENQUEUEPACKET(pTarang->RxAppControlHead, pTarang->RxAppControlTail, newPacket); pTarang->AppCtrlQueueLen++; } up(&Adapter->RxAppControlQueuelock); wake_up(&Adapter->process_read_wait_queue); dev_kfree_skb(skb); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "After wake_up_interruptible"); }
static void cyrf6936_rx_tx(struct cyrf6936_net *p) { u8 val, *data; size_t rx_len; struct sk_buff *skb; /* update signal level */ cyrf6936_iw_rssi(p); /* rx */ val = cyrf6936_rreg(p, RX_IRQ_STATUS); if (val & RXE_IRQ) goto err_rxe; if (val & RXC_IRQ) { /* debouncing, 2nd read */ val = cyrf6936_rreg(p, RX_IRQ_STATUS); if (val & RXE_IRQ) goto err_rxe; /* get data length*/ rx_len = cyrf6936_rreg(p, RX_LENGTH); /* allocate buffer */ skb = dev_alloc_skb(rx_len + NET_IP_ALIGN); if (!skb) goto err_oom; skb_reserve(skb, NET_IP_ALIGN); data = skb_put(skb, rx_len); /* read data */ cyrf6936_wreg(p, RX_IRQ_STATUS, RXOW_IRQ); while (rx_len--) *data++ = cyrf6936_rreg(p, RX_BUFFER); /* dump received packet data */ if (netif_msg_pktdata(p)) print_hex_dump_bytes("cyrf6936 rx data: ", DUMP_PREFIX_NONE, skb->data, skb->len); skb->dev = p->netdev; skb->protocol = htons(ETH_P_ALL); skb->ip_summed = CHECKSUM_UNNECESSARY; p->stats.rx_packets++; p->stats.rx_bytes += rx_len; netif_rx_ni(skb); cyrf6936_rx_enable(p); } /* tx */ val = cyrf6936_rreg(p, TX_IRQ_STATUS); if (val & TXE_IRQ) goto err_txe; if (val & TXC_IRQ) { /* debouncing, 2nd read */ val = cyrf6936_rreg(p, TX_IRQ_STATUS); if (val & TXE_IRQ) goto err_txe; /* tx ok*/ p->stats.tx_packets++; p->stats.tx_bytes += p->tx_skb->len; if (netif_msg_tx_done(p)) dev_dbg(&p->netdev->dev, "tx done\n"); dev_kfree_skb(p->tx_skb); p->tx_skb = NULL; netif_wake_queue(p->netdev); cyrf6936_rx_enable(p); } if (!p->pollmode) enable_irq(p->netdev->irq); return; err_rxe: if (netif_msg_rx_err(p)) dev_info(&p->netdev->dev, "rx error\n"); p->stats.rx_errors++; cyrf6936_rx_enable(p); return; err_txe: if (netif_msg_tx_err(p)) dev_info(&p->netdev->dev, "tx error\n"); p->stats.tx_errors++; cyrf6936_rx_enable(p); return; err_oom: dev_err(&p->netdev->dev, "out of memory, packet dropped\n"); p->stats.rx_dropped++; cyrf6936_rx_enable(p); }