static int c2_down(struct net_device *netdev)
{
	struct c2_port *c2_port = netdev_priv(netdev);
	struct c2_dev *c2dev = c2_port->c2dev;

	if (netif_msg_ifdown(c2_port))
		pr_debug("%s: disabling interface\n",
			netdev->name);

	
	c2_tx_interrupt(netdev);

	
	netif_stop_queue(netdev);

	
	writel(1, c2dev->regs + C2_IDIS);
	writel(0, c2dev->regs + C2_NIMR0);

	

	

	
	c2_reset(c2_port);

	

	
	c2_tx_clean(c2_port);
	c2_rx_clean(c2_port);

	
	kfree(c2_port->rx_ring.start);
	kfree(c2_port->tx_ring.start);
	pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
			    c2_port->dma);

	return 0;
}
Exemple #2
0
static int c2_down(struct net_device *netdev)
{
	struct c2_port *c2_port = netdev_priv(netdev);
	struct c2_dev *c2dev = c2_port->c2dev;

	if (netif_msg_ifdown(c2_port))
		pr_debug("%s: disabling interface\n",
			netdev->name);

	/* Wait for all the queued packets to get sent */
	c2_tx_interrupt(netdev);

	/* Disable network packets */
	netif_stop_queue(netdev);

	/* Disable IRQs by clearing the interrupt mask */
	writel(1, c2dev->regs + C2_IDIS);
	writel(0, c2dev->regs + C2_NIMR0);

	/* missing: Stop transmitter */

	/* missing: Stop receiver */

	/* Reset the adapter, ensures the driver is in sync with the RXP */
	c2_reset(c2_port);

	/* missing: Turn off LEDs here */

	/* Free all buffers in the host descriptor rings */
	c2_tx_clean(c2_port);
	c2_rx_clean(c2_port);

	/* Free the host descriptor rings */
	kfree(c2_port->rx_ring.start);
	kfree(c2_port->tx_ring.start);
	pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
			    c2_port->dma);

	return 0;
}
static int c2_up(struct net_device *netdev)
{
	struct c2_port *c2_port = netdev_priv(netdev);
	struct c2_dev *c2dev = c2_port->c2dev;
	struct c2_element *elem;
	struct c2_rxp_hdr *rxp_hdr;
	struct in_device *in_dev;
	size_t rx_size, tx_size;
	int ret, i;
	unsigned int netimr0;

	if (netif_msg_ifup(c2_port))
		pr_debug("%s: enabling interface\n", netdev->name);

	
	c2_set_rxbufsize(c2_port);

	
	rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
	tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);

	c2_port->mem_size = tx_size + rx_size;
	c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
					    &c2_port->dma);
	if (c2_port->mem == NULL) {
		pr_debug("Unable to allocate memory for "
			"host descriptor rings\n");
		return -ENOMEM;
	}

	memset(c2_port->mem, 0, c2_port->mem_size);

	
	if ((ret =
	     c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
			      c2dev->mmio_rxp_ring))) {
		pr_debug("Unable to create RX ring\n");
		goto bail0;
	}

	
	if (c2_rx_fill(c2_port)) {
		pr_debug("Unable to fill RX ring\n");
		goto bail1;
	}

	
	if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
				    c2_port->dma + rx_size,
				    c2dev->mmio_txp_ring))) {
		pr_debug("Unable to create TX ring\n");
		goto bail1;
	}

	
	c2_port->tx_avail = c2_port->tx_ring.count - 1;
	c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
	    c2_port->tx_ring.start + c2dev->cur_tx;

	

	BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);

	
	c2_reset(c2_port);

	
	for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
	     i++, elem++) {
		rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
		rxp_hdr->flags = 0;
		__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
			     elem->hw_desc + C2_RXP_FLAGS);
	}

	
	netif_start_queue(netdev);

	
	writel(0, c2dev->regs + C2_IDIS);
	netimr0 = readl(c2dev->regs + C2_NIMR0);
	netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
	writel(netimr0, c2dev->regs + C2_NIMR0);

	in_dev = in_dev_get(netdev);
	IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
	in_dev_put(in_dev);

	return 0;

      bail1:
	c2_rx_clean(c2_port);
	kfree(c2_port->rx_ring.start);

      bail0:
	pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
			    c2_port->dma);

	return ret;
}
Exemple #4
0
static int c2_up(struct net_device *netdev)
{
	struct c2_port *c2_port = netdev_priv(netdev);
	struct c2_dev *c2dev = c2_port->c2dev;
	struct c2_element *elem;
	struct c2_rxp_hdr *rxp_hdr;
	struct in_device *in_dev;
	size_t rx_size, tx_size;
	int ret, i;
	unsigned int netimr0;

	if (netif_msg_ifup(c2_port))
		pr_debug("%s: enabling interface\n", netdev->name);

	/* Set the Rx buffer size based on MTU */
	c2_set_rxbufsize(c2_port);

	/* Allocate DMA'able memory for Tx/Rx host descriptor rings */
	rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
	tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);

	c2_port->mem_size = tx_size + rx_size;
	c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
					    &c2_port->dma);
	if (c2_port->mem == NULL) {
		pr_debug("Unable to allocate memory for "
			"host descriptor rings\n");
		return -ENOMEM;
	}

	memset(c2_port->mem, 0, c2_port->mem_size);

	/* Create the Rx host descriptor ring */
	if ((ret =
	     c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
			      c2dev->mmio_rxp_ring))) {
		pr_debug("Unable to create RX ring\n");
		goto bail0;
	}

	/* Allocate Rx buffers for the host descriptor ring */
	if (c2_rx_fill(c2_port)) {
		pr_debug("Unable to fill RX ring\n");
		goto bail1;
	}

	/* Create the Tx host descriptor ring */
	if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
				    c2_port->dma + rx_size,
				    c2dev->mmio_txp_ring))) {
		pr_debug("Unable to create TX ring\n");
		goto bail1;
	}

	/* Set the TX pointer to where we left off */
	c2_port->tx_avail = c2_port->tx_ring.count - 1;
	c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
	    c2_port->tx_ring.start + c2dev->cur_tx;

	/* missing: Initialize MAC */

	BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);

	/* Reset the adapter, ensures the driver is in sync with the RXP */
	c2_reset(c2_port);

	/* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
	for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
	     i++, elem++) {
		rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
		rxp_hdr->flags = 0;
		__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
			     elem->hw_desc + C2_RXP_FLAGS);
	}

	/* Enable network packets */
	netif_start_queue(netdev);

	/* Enable IRQ */
	writel(0, c2dev->regs + C2_IDIS);
	netimr0 = readl(c2dev->regs + C2_NIMR0);
	netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
	writel(netimr0, c2dev->regs + C2_NIMR0);

	/* Tell the stack to ignore arp requests for ipaddrs bound to
	 * other interfaces.  This is needed to prevent the host stack
	 * from responding to arp requests to the ipaddr bound on the
	 * rdma interface.
	 */
	in_dev = in_dev_get(netdev);
	IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
	in_dev_put(in_dev);

	return 0;

      bail1:
	c2_rx_clean(c2_port);
	kfree(c2_port->rx_ring.start);

      bail0:
	pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
			    c2_port->dma);

	return ret;
}