static int greth_open(struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); int err; err = greth_init_rings(greth); if (err) { if (netif_msg_ifup(greth)) dev_err(&dev->dev, "Could not allocate memory for DMA rings\n"); return err; } err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev); if (err) { if (netif_msg_ifup(greth)) dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq); greth_clean_rings(greth); return err; } if (netif_msg_ifup(greth)) dev_dbg(&dev->dev, " starting queue\n"); netif_start_queue(dev); GRETH_REGSAVE(greth->regs->status, 0xFF); napi_enable(&greth->napi); greth_enable_irqs(greth); greth_enable_tx(greth); greth_enable_rx(greth); return 0; }
static int greth_open(struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); int err; err = greth_init_rings(greth); if (err) { if (netif_msg_ifup(greth)) dev_err(&dev->dev, "Could not allocate memory for DMA rings\n"); return err; } err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev); if (err) { if (netif_msg_ifup(greth)) dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq); greth_clean_rings(greth); return err; } if (netif_msg_ifup(greth)) dev_dbg(&dev->dev, " starting queue\n"); netif_start_queue(dev); <<<<<<< HEAD
static int greth_close(struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); napi_disable(&greth->napi); greth_disable_tx(greth); netif_stop_queue(dev); free_irq(greth->irq, (void *) dev); greth_clean_rings(greth); return 0; }
static int greth_init_rings(struct greth_private *greth) { struct sk_buff *skb; struct greth_bd *rx_bd, *tx_bd; u32 dma_addr; int i; rx_bd = greth->rx_bd_base; tx_bd = greth->tx_bd_base; /* Initialize descriptor rings and buffers */ if (greth->gbit_mac) { for (i = 0; i < GRETH_RXBD_NUM; i++) { skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN); if (skb == NULL) { if (netif_msg_ifup(greth)) dev_err(greth->dev, "Error allocating DMA ring.\n"); goto cleanup; } skb_reserve(skb, NET_IP_ALIGN); dma_addr = dma_map_single(greth->dev, skb->data, MAX_FRAME_SIZE+NET_IP_ALIGN, DMA_FROM_DEVICE); if (dma_mapping_error(greth->dev, dma_addr)) { if (netif_msg_ifup(greth)) dev_err(greth->dev, "Could not create initial DMA mapping\n"); goto cleanup; } greth->rx_skbuff[i] = skb; greth_write_bd(&rx_bd[i].addr, dma_addr); greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE); } } else { /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */ for (i = 0; i < GRETH_RXBD_NUM; i++) { greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL); if (greth->rx_bufs[i] == NULL) { if (netif_msg_ifup(greth)) dev_err(greth->dev, "Error allocating DMA ring.\n"); goto cleanup; } dma_addr = dma_map_single(greth->dev, greth->rx_bufs[i], MAX_FRAME_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(greth->dev, dma_addr)) { if (netif_msg_ifup(greth)) dev_err(greth->dev, "Could not create initial DMA mapping\n"); goto cleanup; } greth_write_bd(&rx_bd[i].addr, dma_addr); greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE); } for (i = 0; i < GRETH_TXBD_NUM; i++) { greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL); if (greth->tx_bufs[i] == NULL) { if (netif_msg_ifup(greth)) dev_err(greth->dev, "Error allocating DMA ring.\n"); goto cleanup; } dma_addr = dma_map_single(greth->dev, greth->tx_bufs[i], MAX_FRAME_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(greth->dev, dma_addr)) { if (netif_msg_ifup(greth)) dev_err(greth->dev, "Could not create initial DMA mapping\n"); goto cleanup; } greth_write_bd(&tx_bd[i].addr, dma_addr); greth_write_bd(&tx_bd[i].stat, 0); } } greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat, greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR); /* Initialize pointers. */ greth->rx_cur = 0; greth->tx_next = 0; greth->tx_last = 0; greth->tx_free = GRETH_TXBD_NUM; /* Initialize descriptor base address */ GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys); GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys); return 0; cleanup: greth_clean_rings(greth); return -ENOMEM; }