/* * Load the rx ring. * * Loading rings is safe without holding the spin lock since this is * done only before the device is enabled, thus no interrupts are * generated and by the interrupt handler/tasklet handler. */ static void ar2313_load_rx_ring(struct net_device *dev, int nr_bufs) { struct ar2313_private *sp = ((struct net_device *) dev)->priv; short i, idx; idx = sp->rx_skbprd; for (i = 0; i < nr_bufs; i++) { struct sk_buff *skb; ar2313_descr_t *rd; if (sp->rx_skb[idx]) { #if DEBUG_RX printk(KERN_INFO "ar2313 rx refill full\n"); #endif /* DEBUG */ break; } // partha: create additional room for the second GRE fragment skb = alloc_skb(AR2313_BUFSIZE + 128, GFP_ATOMIC); if (!skb) { printk("\n\n\n\n %s: No memory in system\n\n\n\n", __FUNCTION__); break; } // partha: create additional room in the front for tx pkt capture skb_reserve(skb, 32); /* * Make sure IP header starts on a fresh cache line. */ skb->dev = dev; skb_reserve(skb, RX_OFFSET); sp->rx_skb[idx] = skb; rd = (ar2313_descr_t *) & sp->rx_ring[idx]; /* initialize dma descriptor */ rd->devcs = ((AR2313_BUFSIZE << DMA_RX1_BSIZE_SHIFT) | DMA_RX1_CHAINED); rd->addr = virt_to_phys(skb->data); rd->descr = virt_to_phys(&sp-> rx_ring[(idx + 1) & (AR2313_DESCR_ENTRIES - 1)]); rd->status = DMA_RX_OWN; idx = DSC_NEXT(idx); } if (!i) { #if DEBUG_ERR printk(KERN_INFO "Out of memory when allocating standard receive buffers\n"); #endif /* DEBUG */ } else { sp->rx_skbprd = idx; } return; }
static void ar231x_allocate_dma_descriptors(struct eth_device *edev) { struct ar231x_eth_priv *priv = edev->priv; u16 ar231x_descr_size = sizeof(struct ar231x_descr); u16 i; priv->tx_ring = xmalloc(ar231x_descr_size); dev_dbg(&edev->dev, "allocate tx_ring @ %p\n", priv->tx_ring); priv->rx_ring = xmalloc(ar231x_descr_size * AR2313_RXDSC_ENTRIES); dev_dbg(&edev->dev, "allocate rx_ring @ %p\n", priv->rx_ring); priv->rx_buffer = xmalloc(AR2313_RX_BUFSIZE * AR2313_RXDSC_ENTRIES); dev_dbg(&edev->dev, "allocate rx_buffer @ %p\n", priv->rx_buffer); /* Initialize the rx Descriptors */ for (i = 0; i < AR2313_RXDSC_ENTRIES; i++) { struct ar231x_descr *rxdsc = &priv->rx_ring[i]; ar231x_flash_rxdsc(rxdsc); rxdsc->buffer_ptr = (u32)(priv->rx_buffer + AR2313_RX_BUFSIZE * i); rxdsc->next_dsc_ptr = (u32)&priv->rx_ring[DSC_NEXT(i)]; } /* set initial position of ring descriptor */ priv->next_rxdsc = &priv->rx_ring[0]; }
skb_reserve(skb_new, RX_OFFSET + 32); /* reset descriptor's curr_addr */ rxdesc->addr = virt_to_phys(skb_new->data); sp->stats.rx_packets++; sp->rx_skb[idx] = skb_new; } else { sp->stats.rx_dropped++; } } rxdesc->devcs = ((AR2313_BUFSIZE << DMA_RX1_BSIZE_SHIFT) | DMA_RX1_CHAINED); rxdesc->status = DMA_RX_OWN; idx = DSC_NEXT(idx); } sp->cur_rx = idx; return rval; } static void ar2313_tx_int(struct net_device *dev) { struct ar2313_private *sp = dev->priv; u32 idx; struct sk_buff *skb; ar2313_descr_t *txdesc; unsigned int status = 0;