Ejemplo n.º 1
0
static void dma_block_input(struct net_device *dev, int count,
			    struct sk_buff *skb, int ring_offset)
{
    unsigned int nic_base = dev->base_addr;
    int xfer_count = count;
    char *buf = skb->data;

    if ((ei_debug > 4) && (count != 4))
	netdev_dbg(dev, "[bi=%d]\n", count+4);
    if (ei_status.dmaing) {
	netdev_notice(dev, "DMAing conflict in dma_block_input."
	       "[DMAstat:%1x][irqlock:%1x]\n",
	       ei_status.dmaing, ei_status.irqlock);
	return;
    }
    ei_status.dmaing |= 0x01;
    outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
    outb_p(count & 0xff, nic_base + EN0_RCNTLO);
    outb_p(count >> 8, nic_base + EN0_RCNTHI);
    outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
    outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
    outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);

    insw(nic_base + PCNET_DATAPORT,buf,count>>1);
    if (count & 0x01)
	buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++;

    /* This was for the ALPHA version only, but enough people have been
       encountering problems that it is still here. */
#ifdef PCMCIA_DEBUG
    if (ei_debug > 4) {		/* DMA termination address check... */
	int addr, tries = 20;
	do {
	    /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
	       -- it's broken for Rx on some cards! */
	    int high = inb_p(nic_base + EN0_RSARHI);
	    int low = inb_p(nic_base + EN0_RSARLO);
	    addr = (high << 8) + low;
	    if (((ring_offset + xfer_count) & 0xff) == (addr & 0xff))
		break;
	} while (--tries > 0);
	if (tries <= 0)
	    netdev_notice(dev, "RX transfer address mismatch,"
		   "%#4.4x (expected) vs. %#4.4x (actual).\n",
		   ring_offset + xfer_count, addr);
    }
#endif
    outb_p(ENISR_RDC, nic_base + EN0_ISR);	/* Ack intr. */
    ei_status.dmaing &= ~0x01;
} /* dma_block_input */
Ejemplo n.º 2
0
static void dma_get_8390_hdr(struct net_device *dev,
			     struct e8390_pkt_hdr *hdr,
			     int ring_page)
{
    unsigned int nic_base = dev->base_addr;

    if (ei_status.dmaing) {
	netdev_notice(dev, "DMAing conflict in dma_block_input."
	       "[DMAstat:%1x][irqlock:%1x]\n",
	       ei_status.dmaing, ei_status.irqlock);
	return;
    }

    ei_status.dmaing |= 0x01;
    outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
    outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
    outb_p(0, nic_base + EN0_RCNTHI);
    outb_p(0, nic_base + EN0_RSARLO);		/* On page boundary */
    outb_p(ring_page, nic_base + EN0_RSARHI);
    outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);

    insw(nic_base + PCNET_DATAPORT, hdr,
	    sizeof(struct e8390_pkt_hdr)>>1);
    /* Fix for big endian systems */
    hdr->count = le16_to_cpu(hdr->count);

    outb_p(ENISR_RDC, nic_base + EN0_ISR);	/* Ack intr. */
    ei_status.dmaing &= ~0x01;
}
Ejemplo n.º 3
0
/* ----------------------------------------------------------------------------
mace_interrupt
	The interrupt handler.
---------------------------------------------------------------------------- */
static irqreturn_t mace_interrupt(int irq, void *dev_id)
{
  struct net_device *dev = (struct net_device *) dev_id;
  mace_private *lp = netdev_priv(dev);
  unsigned int ioaddr;
  int status;
  int IntrCnt = MACE_MAX_IR_ITERATIONS;

  if (dev == NULL) {
    pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
	  irq);
    return IRQ_NONE;
  }

  ioaddr = dev->base_addr;

  if (lp->tx_irq_disabled) {
    const char *msg;
    if (lp->tx_irq_disabled)
      msg = "Interrupt with tx_irq_disabled";
    else
      msg = "Re-entering the interrupt handler";
    netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n",
		  msg,
		  inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
		  inb(ioaddr + AM2150_MACE_BASE + MACE_IMR));
    /* WARNING: MACE_IR has been read! */
    return IRQ_NONE;
  }

  if (!netif_device_present(dev)) {
    netdev_dbg(dev, "interrupt from dead card\n");
    return IRQ_NONE;
  }

  do {
    /* WARNING: MACE_IR is a READ/CLEAR port! */
    status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);

    pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);

    if (status & MACE_IR_RCVINT) {
      mace_rx(dev, MACE_MAX_RX_ITERATIONS);
    }

    if (status & MACE_IR_XMTINT) {
      unsigned char fifofc;
      unsigned char xmtrc;
      unsigned char xmtfs;

      fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
      if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
	lp->linux_stats.tx_errors++;
	outb(0xFF, ioaddr + AM2150_XMT_SKIP);
      }
Ejemplo n.º 4
0
static void mace_tx_timeout(struct net_device *dev)
{
  mace_private *lp = netdev_priv(dev);
  struct pcmcia_device *link = lp->p_dev;

  netdev_notice(dev, "transmit timed out -- ");
#if RESET_ON_TIMEOUT
  pr_cont("resetting card\n");
  pcmcia_reset_card(link->socket);
#else /* #if RESET_ON_TIMEOUT */
  pr_cont("NOT resetting card\n");
#endif /* #if RESET_ON_TIMEOUT */
  dev->trans_start = jiffies; /* prevent tx timeout */
  netif_wake_queue(dev);
}
Ejemplo n.º 5
0
static void dpaa_get_drvinfo(struct net_device *net_dev,
			     struct ethtool_drvinfo *drvinfo)
{
	int len;

	strlcpy(drvinfo->driver, KBUILD_MODNAME,
		sizeof(drvinfo->driver));
	len = snprintf(drvinfo->version, sizeof(drvinfo->version),
		       "%X", 0);
	len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
		       "%X", 0);

	if (len >= sizeof(drvinfo->fw_version)) {
		/* Truncated output */
		netdev_notice(net_dev, "snprintf() = %d\n", len);
	}
	strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
		sizeof(drvinfo->bus_info));
}
Ejemplo n.º 6
0
/* Rename bridge's brif symlink */
int br_sysfs_renameif(struct net_bridge_port *p)
{
	struct net_bridge *br = p->br;
	int err;

	/* If a rename fails, the rollback will cause another
	 * rename call with the existing name.
	 */
	if (!strncmp(p->sysfs_name, p->dev->name, IFNAMSIZ))
		return 0;

	err = sysfs_rename_link(br->ifobj, &p->kobj,
				p->sysfs_name, p->dev->name);
	if (err)
		netdev_notice(br->dev, "unable to rename link %s to %s",
			      p->sysfs_name, p->dev->name);
	else
		strlcpy(p->sysfs_name, p->dev->name, IFNAMSIZ);

	return err;
}
Ejemplo n.º 7
0
/* Extract received frame from buffer descriptors and sent to upper layers.
 * (Called from interrupt context)
 */
static void at91ether_rx(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
	unsigned char *p_recv;
	struct sk_buff *skb;
	unsigned int pktlen;

	while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
		p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ;
		pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
		skb = netdev_alloc_skb(dev, pktlen + 2);
		if (skb) {
			skb_reserve(skb, 2);
			memcpy(skb_put(skb, pktlen), p_recv, pktlen);

			skb->protocol = eth_type_trans(skb, dev);
			lp->stats.rx_packets++;
			lp->stats.rx_bytes += pktlen;
			netif_rx(skb);
		} else {
			lp->stats.rx_dropped++;
			netdev_notice(dev, "Memory squeeze, dropping packet.\n");
		}

		if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
			lp->stats.multicast++;

		/* reset ownership bit */
		lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);

		/* wrap after last buffer */
		if (lp->rx_tail == MAX_RX_DESCR - 1)
			lp->rx_tail = 0;
		else
			lp->rx_tail++;
	}
}
Ejemplo n.º 8
0
/*
 * atl1e_check_options - Range Checking for Command Line Parameters
 * @adapter: board private structure
 *
 * This routine checks all command line parameters for valid user
 * input.  If an invalid value is given, or if no user specified
 * value exists, a default value is used.  The final value is stored
 * in a variable in the adapter structure.
 */
void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
{
	int bd = adapter->bd_number;

	if (bd >= ATL1E_MAX_NIC) {
		netdev_notice(adapter->netdev,
			      "no configuration for board #%i\n", bd);
		netdev_notice(adapter->netdev,
			      "Using defaults for all values\n");
	}

	{ 		/* Transmit Ring Size */
		struct atl1e_option opt = {
			.type = range_option,
			.name = "Transmit Ddescription Count",
			.err  = "using default of "
				__MODULE_STRING(ATL1E_DEFAULT_TX_DESC_CNT),
			.def  = ATL1E_DEFAULT_TX_DESC_CNT,
			.arg  = { .r = { .min = ATL1E_MIN_TX_DESC_CNT,
					 .max = ATL1E_MAX_TX_DESC_CNT} }
		};
		int val;
		if (num_tx_desc_cnt > bd) {
			val = tx_desc_cnt[bd];
			atl1e_validate_option(&val, &opt, adapter);
			adapter->tx_ring.count = (u16) val & 0xFFFC;
		} else
			adapter->tx_ring.count = (u16)opt.def;
	}

	{ 		/* Receive Memory Block Count */
		struct atl1e_option opt = {
			.type = range_option,
			.name = "Memory size of rx buffer(KB)",
			.err  = "using default of "
				__MODULE_STRING(ATL1E_DEFAULT_RX_MEM_SIZE),
			.def  = ATL1E_DEFAULT_RX_MEM_SIZE,
			.arg  = { .r = { .min = ATL1E_MIN_RX_MEM_SIZE,
					 .max = ATL1E_MAX_RX_MEM_SIZE} }
		};
		int val;
		if (num_rx_mem_size > bd) {
			val = rx_mem_size[bd];
			atl1e_validate_option(&val, &opt, adapter);
			adapter->rx_ring.page_size = (u32)val * 1024;
		} else {
			adapter->rx_ring.page_size = (u32)opt.def * 1024;
		}
	}

	{ 		/* Interrupt Moderate Timer */
		struct atl1e_option opt = {
			.type = range_option,
			.name = "Interrupt Moderate Timer",
			.err  = "using default of "
				__MODULE_STRING(INT_MOD_DEFAULT_CNT),
			.def  = INT_MOD_DEFAULT_CNT,
			.arg  = { .r = { .min = INT_MOD_MIN_CNT,
					 .max = INT_MOD_MAX_CNT} }
		} ;
		int val;
		if (num_int_mod_timer > bd) {
			val = int_mod_timer[bd];
			atl1e_validate_option(&val, &opt, adapter);
			adapter->hw.imt = (u16) val;
		} else
			adapter->hw.imt = (u16)(opt.def);
	}

	{ 		/* MediaType */
		struct atl1e_option opt = {
			.type = range_option,
			.name = "Speed/Duplex Selection",
			.err  = "using default of "
				__MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR),
			.def  = MEDIA_TYPE_AUTO_SENSOR,
			.arg  = { .r = { .min = MEDIA_TYPE_AUTO_SENSOR,
					 .max = MEDIA_TYPE_10M_HALF} }
		} ;
		int val;
		if (num_media_type > bd) {
			val = media_type[bd];
			atl1e_validate_option(&val, &opt, adapter);
			adapter->hw.media_type = (u16) val;
		} else
			adapter->hw.media_type = (u16)(opt.def);

	}
}
Ejemplo n.º 9
0
static int xgbe_probe(struct platform_device *pdev)
{
	struct xgbe_prv_data *pdata;
	struct xgbe_hw_if *hw_if;
	struct xgbe_desc_if *desc_if;
	struct net_device *netdev;
	struct device *dev = &pdev->dev;
	struct resource *res;
	const char *phy_mode;
	unsigned int i;
	int ret;

	DBGPR("--> xgbe_probe\n");

	netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
				   XGBE_MAX_DMA_CHANNELS);
	if (!netdev) {
		dev_err(dev, "alloc_etherdev failed\n");
		ret = -ENOMEM;
		goto err_alloc;
	}
	SET_NETDEV_DEV(netdev, dev);
	pdata = netdev_priv(netdev);
	pdata->netdev = netdev;
	pdata->pdev = pdev;
	pdata->adev = ACPI_COMPANION(dev);
	pdata->dev = dev;
	platform_set_drvdata(pdev, netdev);

	spin_lock_init(&pdata->lock);
	mutex_init(&pdata->xpcs_mutex);
	mutex_init(&pdata->rss_mutex);
	spin_lock_init(&pdata->tstamp_lock);

	/* Check if we should use ACPI or DT */
	pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;

	/* Set and validate the number of descriptors for a ring */
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
	pdata->tx_desc_count = XGBE_TX_DESC_CNT;
	if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
		dev_err(dev, "tx descriptor count (%d) is not valid\n",
			pdata->tx_desc_count);
		ret = -EINVAL;
		goto err_io;
	}
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
	pdata->rx_desc_count = XGBE_RX_DESC_CNT;
	if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
		dev_err(dev, "rx descriptor count (%d) is not valid\n",
			pdata->rx_desc_count);
		ret = -EINVAL;
		goto err_io;
	}

	/* Obtain the mmio areas for the device */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	pdata->xgmac_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->xgmac_regs)) {
		dev_err(dev, "xgmac ioremap failed\n");
		ret = PTR_ERR(pdata->xgmac_regs);
		goto err_io;
	}
	DBGPR("  xgmac_regs = %p\n", pdata->xgmac_regs);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	pdata->xpcs_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->xpcs_regs)) {
		dev_err(dev, "xpcs ioremap failed\n");
		ret = PTR_ERR(pdata->xpcs_regs);
		goto err_io;
	}
	DBGPR("  xpcs_regs  = %p\n", pdata->xpcs_regs);

	/* Retrieve the MAC address */
	ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
					    pdata->mac_addr,
					    sizeof(pdata->mac_addr));
	if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
		dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
		if (!ret)
			ret = -EINVAL;
		goto err_io;
	}

	/* Retrieve the PHY mode - it must be "xgmii" */
	ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
					  &phy_mode);
	if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
		dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
		if (!ret)
			ret = -EINVAL;
		goto err_io;
	}
	pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;

	/* Check for per channel interrupt support */
	if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
		pdata->per_channel_irq = 1;

	/* Obtain device settings unique to ACPI/OF */
	if (pdata->use_acpi)
		ret = xgbe_acpi_support(pdata);
	else
		ret = xgbe_of_support(pdata);
	if (ret)
		goto err_io;

	/* Set the DMA coherency values */
	if (pdata->coherent) {
		pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
		pdata->arcache = XGBE_DMA_OS_ARCACHE;
		pdata->awcache = XGBE_DMA_OS_AWCACHE;
	} else {
		pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
		pdata->arcache = XGBE_DMA_SYS_ARCACHE;
		pdata->awcache = XGBE_DMA_SYS_AWCACHE;
	}

	/* Get the device interrupt */
	ret = platform_get_irq(pdev, 0);
	if (ret < 0) {
		dev_err(dev, "platform_get_irq 0 failed\n");
		goto err_io;
	}
	pdata->dev_irq = ret;

	netdev->irq = pdata->dev_irq;
	netdev->base_addr = (unsigned long)pdata->xgmac_regs;
	memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);

	/* Set all the function pointers */
	hw_if = pdata->hw_if = &default_xgbe_hw_if;
	desc_if = pdata->desc_if = &default_xgbe_desc_if;

	/* Issue software reset to device */
	hw_if->exit(pdata);

	/* Populate the hardware features */
	xgbe_get_all_hw_features(pdata);

	/* Set default configuration data */
	xgbe_default_config(pdata);

	/* Set the DMA mask */
	if (!dev->dma_mask)
		dev->dma_mask = &dev->coherent_dma_mask;
	ret = dma_set_mask_and_coherent(dev,
					DMA_BIT_MASK(pdata->hw_feat.dma_width));
	if (ret) {
		dev_err(dev, "dma_set_mask_and_coherent failed\n");
		goto err_io;
	}

	/* Calculate the number of Tx and Rx rings to be created
	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
	 *   the number of Tx queues to the number of Tx channels
	 *   enabled
	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
	 *   number of Rx queues
	 */
	pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
				     pdata->hw_feat.tx_ch_cnt);
	pdata->tx_q_count = pdata->tx_ring_count;
	ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
	if (ret) {
		dev_err(dev, "error setting real tx queue count\n");
		goto err_io;
	}

	pdata->rx_ring_count = min_t(unsigned int,
				     netif_get_num_default_rss_queues(),
				     pdata->hw_feat.rx_ch_cnt);
	pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
	ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
	if (ret) {
		dev_err(dev, "error setting real rx queue count\n");
		goto err_io;
	}

	/* Initialize RSS hash key and lookup table */
	netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));

	for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
		XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
			       i % pdata->rx_ring_count);

	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);

	/* Prepare to regsiter with MDIO */
	pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
	if (!pdata->mii_bus_id) {
		dev_err(dev, "failed to allocate mii bus id\n");
		ret = -ENOMEM;
		goto err_io;
	}
	ret = xgbe_mdio_register(pdata);
	if (ret)
		goto err_bus_id;

	/* Set device operations */
	netdev->netdev_ops = xgbe_get_netdev_ops();
	netdev->ethtool_ops = xgbe_get_ethtool_ops();
#ifdef CONFIG_AMD_XGBE_DCB
	netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
#endif

	/* Set device features */
	netdev->hw_features = NETIF_F_SG |
			      NETIF_F_IP_CSUM |
			      NETIF_F_IPV6_CSUM |
			      NETIF_F_RXCSUM |
			      NETIF_F_TSO |
			      NETIF_F_TSO6 |
			      NETIF_F_GRO |
			      NETIF_F_HW_VLAN_CTAG_RX |
			      NETIF_F_HW_VLAN_CTAG_TX |
			      NETIF_F_HW_VLAN_CTAG_FILTER;

	if (pdata->hw_feat.rss)
		netdev->hw_features |= NETIF_F_RXHASH;

	netdev->vlan_features |= NETIF_F_SG |
				 NETIF_F_IP_CSUM |
				 NETIF_F_IPV6_CSUM |
				 NETIF_F_TSO |
				 NETIF_F_TSO6;

	netdev->features |= netdev->hw_features;
	pdata->netdev_features = netdev->features;

	netdev->priv_flags |= IFF_UNICAST_FLT;

	/* Use default watchdog timeout */
	netdev->watchdog_timeo = 0;

	xgbe_init_rx_coalesce(pdata);
	xgbe_init_tx_coalesce(pdata);

	netif_carrier_off(netdev);
	ret = register_netdev(netdev);
	if (ret) {
		dev_err(dev, "net device registration failed\n");
		goto err_reg_netdev;
	}

	xgbe_ptp_register(pdata);

	xgbe_debugfs_init(pdata);

	netdev_notice(netdev, "net device enabled\n");

	DBGPR("<-- xgbe_probe\n");

	return 0;

err_reg_netdev:
	xgbe_mdio_unregister(pdata);

err_bus_id:
	kfree(pdata->mii_bus_id);

err_io:
	free_netdev(netdev);

err_alloc:
	dev_notice(dev, "net device not enabled\n");

	return ret;
}
Ejemplo n.º 10
0
static int xgbe_probe(struct platform_device *pdev)
{
	struct xgbe_prv_data *pdata;
	struct xgbe_hw_if *hw_if;
	struct xgbe_desc_if *desc_if;
	struct net_device *netdev;
	struct device *dev = &pdev->dev;
	struct resource *res;
	const u8 *mac_addr;
	int ret;

	DBGPR("--> xgbe_probe\n");

	netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
				   XGBE_MAX_DMA_CHANNELS);
	if (!netdev) {
		dev_err(dev, "alloc_etherdev failed\n");
		ret = -ENOMEM;
		goto err_alloc;
	}
	SET_NETDEV_DEV(netdev, dev);
	pdata = netdev_priv(netdev);
	pdata->netdev = netdev;
	pdata->pdev = pdev;
	pdata->dev = dev;
	platform_set_drvdata(pdev, netdev);

	spin_lock_init(&pdata->lock);
	mutex_init(&pdata->xpcs_mutex);
	spin_lock_init(&pdata->tstamp_lock);

	/* Set and validate the number of descriptors for a ring */
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
	pdata->tx_desc_count = XGBE_TX_DESC_CNT;
	if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
		dev_err(dev, "tx descriptor count (%d) is not valid\n",
			pdata->tx_desc_count);
		ret = -EINVAL;
		goto err_io;
	}
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
	pdata->rx_desc_count = XGBE_RX_DESC_CNT;
	if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
		dev_err(dev, "rx descriptor count (%d) is not valid\n",
			pdata->rx_desc_count);
		ret = -EINVAL;
		goto err_io;
	}

	/* Obtain the system clock setting */
	pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
	if (IS_ERR(pdata->sysclk)) {
		dev_err(dev, "dma devm_clk_get failed\n");
		ret = PTR_ERR(pdata->sysclk);
		goto err_io;
	}

	/* Obtain the PTP clock setting */
	pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
	if (IS_ERR(pdata->ptpclk)) {
		dev_err(dev, "ptp devm_clk_get failed\n");
		ret = PTR_ERR(pdata->ptpclk);
		goto err_io;
	}

	/* Obtain the mmio areas for the device */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	pdata->xgmac_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->xgmac_regs)) {
		dev_err(dev, "xgmac ioremap failed\n");
		ret = PTR_ERR(pdata->xgmac_regs);
		goto err_io;
	}
	DBGPR("  xgmac_regs = %p\n", pdata->xgmac_regs);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	pdata->xpcs_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->xpcs_regs)) {
		dev_err(dev, "xpcs ioremap failed\n");
		ret = PTR_ERR(pdata->xpcs_regs);
		goto err_io;
	}
	DBGPR("  xpcs_regs  = %p\n", pdata->xpcs_regs);

	/* Set the DMA mask */
	if (!dev->dma_mask)
		dev->dma_mask = &dev->coherent_dma_mask;
	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
	if (ret) {
		dev_err(dev, "dma_set_mask_and_coherent failed\n");
		goto err_io;
	}

	if (of_property_read_bool(dev->of_node, "dma-coherent")) {
		pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
		pdata->arcache = XGBE_DMA_OS_ARCACHE;
		pdata->awcache = XGBE_DMA_OS_AWCACHE;
	} else {
		pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
		pdata->arcache = XGBE_DMA_SYS_ARCACHE;
		pdata->awcache = XGBE_DMA_SYS_AWCACHE;
	}

	ret = platform_get_irq(pdev, 0);
	if (ret < 0) {
		dev_err(dev, "platform_get_irq failed\n");
		goto err_io;
	}
	netdev->irq = ret;
	netdev->base_addr = (unsigned long)pdata->xgmac_regs;

	/* Set all the function pointers */
	xgbe_init_all_fptrs(pdata);
	hw_if = &pdata->hw_if;
	desc_if = &pdata->desc_if;

	/* Issue software reset to device */
	hw_if->exit(pdata);

	/* Populate the hardware features */
	xgbe_get_all_hw_features(pdata);

	/* Retrieve the MAC address */
	mac_addr = of_get_mac_address(dev->of_node);
	if (!mac_addr) {
		dev_err(dev, "invalid mac address for this device\n");
		ret = -EINVAL;
		goto err_io;
	}
	memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);

	/* Retrieve the PHY mode - it must be "xgmii" */
	pdata->phy_mode = of_get_phy_mode(dev->of_node);
	if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
		dev_err(dev, "invalid phy-mode specified for this device\n");
		ret = -EINVAL;
		goto err_io;
	}

	/* Set default configuration data */
	xgbe_default_config(pdata);

	/* Calculate the number of Tx and Rx rings to be created
	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
	 *   the number of Tx queues to the number of Tx channels
	 *   enabled
	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
	 *   number of Rx queues
	 */
	pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
				     pdata->hw_feat.tx_ch_cnt);
	pdata->tx_q_count = pdata->tx_ring_count;
	ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
	if (ret) {
		dev_err(dev, "error setting real tx queue count\n");
		goto err_io;
	}

	pdata->rx_ring_count = min_t(unsigned int,
				     netif_get_num_default_rss_queues(),
				     pdata->hw_feat.rx_ch_cnt);
	pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
	ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
	if (ret) {
		dev_err(dev, "error setting real rx queue count\n");
		goto err_io;
	}

	/* Allocate the rings for the DMA channels */
	pdata->channel = xgbe_alloc_rings(pdata);
	if (!pdata->channel) {
		dev_err(dev, "ring allocation failed\n");
		ret = -ENOMEM;
		goto err_io;
	}

	/* Prepare to regsiter with MDIO */
	pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
	if (!pdata->mii_bus_id) {
		dev_err(dev, "failed to allocate mii bus id\n");
		ret = -ENOMEM;
		goto err_io;
	}
	ret = xgbe_mdio_register(pdata);
	if (ret)
		goto err_bus_id;

	/* Set device operations */
	netdev->netdev_ops = xgbe_get_netdev_ops();
	netdev->ethtool_ops = xgbe_get_ethtool_ops();
#ifdef CONFIG_AMD_XGBE_DCB
	netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
#endif

	/* Set device features */
	netdev->hw_features = NETIF_F_SG |
			      NETIF_F_IP_CSUM |
			      NETIF_F_IPV6_CSUM |
			      NETIF_F_RXCSUM |
			      NETIF_F_TSO |
			      NETIF_F_TSO6 |
			      NETIF_F_GRO |
			      NETIF_F_HW_VLAN_CTAG_RX |
			      NETIF_F_HW_VLAN_CTAG_TX |
			      NETIF_F_HW_VLAN_CTAG_FILTER;

	netdev->vlan_features |= NETIF_F_SG |
				 NETIF_F_IP_CSUM |
				 NETIF_F_IPV6_CSUM |
				 NETIF_F_TSO |
				 NETIF_F_TSO6;

	netdev->features |= netdev->hw_features;
	pdata->netdev_features = netdev->features;

	netdev->priv_flags |= IFF_UNICAST_FLT;

	xgbe_init_rx_coalesce(pdata);
	xgbe_init_tx_coalesce(pdata);

	netif_carrier_off(netdev);
	ret = register_netdev(netdev);
	if (ret) {
		dev_err(dev, "net device registration failed\n");
		goto err_reg_netdev;
	}

	xgbe_ptp_register(pdata);

	xgbe_debugfs_init(pdata);

	netdev_notice(netdev, "net device enabled\n");

	DBGPR("<-- xgbe_probe\n");

	return 0;

err_reg_netdev:
	xgbe_mdio_unregister(pdata);

err_bus_id:
	kfree(pdata->mii_bus_id);

err_io:
	free_netdev(netdev);

err_alloc:
	dev_notice(dev, "net device not enabled\n");

	return ret;
}
Ejemplo n.º 11
0
/*----------------------------------------------------------------
 * p80211knetdev_hard_start_xmit
 *
 * Linux netdevice method for transmitting a frame.
 *
 * Arguments:
 *	skb	Linux sk_buff containing the frame.
 *	netdev	Linux netdevice.
 *
 * Side effects:
 *	If the lower layers report that buffers are full. netdev->tbusy
 *	will be set to prevent higher layers from sending more traffic.
 *
 *	Note: If this function returns non-zero, higher layers retain
 *	      ownership of the skb.
 *
 * Returns:
 *	zero on success, non-zero on failure.
 *----------------------------------------------------------------
 */
static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
					 struct net_device *netdev)
{
	int result = 0;
	int txresult = -1;
	struct wlandevice *wlandev = netdev->ml_priv;
	union p80211_hdr p80211_hdr;
	struct p80211_metawep p80211_wep;

	p80211_wep.data = NULL;

	if (!skb)
		return NETDEV_TX_OK;

	if (wlandev->state != WLAN_DEVICE_OPEN) {
		result = 1;
		goto failed;
	}

	memset(&p80211_hdr, 0, sizeof(p80211_hdr));
	memset(&p80211_wep, 0, sizeof(p80211_wep));

	if (netif_queue_stopped(netdev)) {
		netdev_dbg(netdev, "called when queue stopped.\n");
		result = 1;
		goto failed;
	}

	netif_stop_queue(netdev);

	/* Check to see that a valid mode is set */
	switch (wlandev->macmode) {
	case WLAN_MACMODE_IBSS_STA:
	case WLAN_MACMODE_ESS_STA:
	case WLAN_MACMODE_ESS_AP:
		break;
	default:
		/* Mode isn't set yet, just drop the frame
		 * and return success .
		 * TODO: we need a saner way to handle this
		 */
		if (be16_to_cpu(skb->protocol) != ETH_P_80211_RAW) {
			netif_start_queue(wlandev->netdev);
			netdev_notice(netdev, "Tx attempt prior to association, frame dropped.\n");
			netdev->stats.tx_dropped++;
			result = 0;
			goto failed;
		}
		break;
	}

	/* Check for raw transmits */
	if (be16_to_cpu(skb->protocol) == ETH_P_80211_RAW) {
		if (!capable(CAP_NET_ADMIN)) {
			result = 1;
			goto failed;
		}
		/* move the header over */
		memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr));
		skb_pull(skb, sizeof(p80211_hdr));
	} else {
		if (skb_ether_to_p80211
		    (wlandev, wlandev->ethconv, skb, &p80211_hdr,
		     &p80211_wep) != 0) {
			/* convert failed */
			netdev_dbg(netdev, "ether_to_80211(%d) failed.\n",
				   wlandev->ethconv);
			result = 1;
			goto failed;
		}
	}
	if (!wlandev->txframe) {
		result = 1;
		goto failed;
	}

	netif_trans_update(netdev);

	netdev->stats.tx_packets++;
	/* count only the packet payload */
	netdev->stats.tx_bytes += skb->len;

	txresult = wlandev->txframe(wlandev, skb, &p80211_hdr, &p80211_wep);

	if (txresult == 0) {
		/* success and more buf */
		/* avail, re: hw_txdata */
		netif_wake_queue(wlandev->netdev);
		result = NETDEV_TX_OK;
	} else if (txresult == 1) {
		/* success, no more avail */
		netdev_dbg(netdev, "txframe success, no more bufs\n");
		/* netdev->tbusy = 1;  don't set here, irqhdlr */
		/*   may have already cleared it */
		result = NETDEV_TX_OK;
	} else if (txresult == 2) {
		/* alloc failure, drop frame */
		netdev_dbg(netdev, "txframe returned alloc_fail\n");
		result = NETDEV_TX_BUSY;
	} else {
		/* buffer full or queue busy, drop frame. */
		netdev_dbg(netdev, "txframe returned full or busy\n");
		result = NETDEV_TX_BUSY;
	}

failed:
	/* Free up the WEP buffer if it's not the same as the skb */
	if ((p80211_wep.data) && (p80211_wep.data != skb->data))
		kzfree(p80211_wep.data);

	/* we always free the skb here, never in a lower level. */
	if (!result)
		dev_kfree_skb(skb);

	return result;
}
Ejemplo n.º 12
0
/* ----------------------------------------------------------------------------
mace_interrupt
	The interrupt handler.
---------------------------------------------------------------------------- */
static irqreturn_t mace_interrupt(int irq, void *dev_id)
{
  struct net_device *dev = (struct net_device *) dev_id;
  mace_private *lp = netdev_priv(dev);
  unsigned int ioaddr;
  int status;
  int IntrCnt = MACE_MAX_IR_ITERATIONS;

  if (dev == NULL) {
    pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
	  irq);
    return IRQ_NONE;
  }

  ioaddr = dev->base_addr;

  if (lp->tx_irq_disabled) {
    const char *msg;
    if (lp->tx_irq_disabled)
      msg = "Interrupt with tx_irq_disabled";
    else
      msg = "Re-entering the interrupt handler";
    netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n",
		  msg,
		  inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
		  inb(ioaddr + AM2150_MACE_BASE + MACE_IMR));
    /* WARNING: MACE_IR has been read! */
    return IRQ_NONE;
  }

  if (!netif_device_present(dev)) {
    netdev_dbg(dev, "interrupt from dead card\n");
    return IRQ_NONE;
  }

  do {
    /* WARNING: MACE_IR is a READ/CLEAR port! */
    status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);

    pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);

    if (status & MACE_IR_RCVINT) {
      mace_rx(dev, MACE_MAX_RX_ITERATIONS);
    }

    if (status & MACE_IR_XMTINT) {
      unsigned char fifofc;
      unsigned char xmtrc;
      unsigned char xmtfs;

      fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
      if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
	lp->linux_stats.tx_errors++;
	outb(0xFF, ioaddr + AM2150_XMT_SKIP);
      }

      /* Transmit Retry Count (XMTRC, reg 4) */
      xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC);
      if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++;
      lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC);

      if (
        (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) &
        MACE_XMTFS_XMTSV /* Transmit Status Valid */
      ) {
	lp->mace_stats.xmtsv++;

	if (xmtfs & ~MACE_XMTFS_XMTSV) {
	  if (xmtfs & MACE_XMTFS_UFLO) {
	    /* Underflow.  Indicates that the Transmit FIFO emptied before
	       the end of frame was reached. */
	    lp->mace_stats.uflo++;
	  }
	  if (xmtfs & MACE_XMTFS_LCOL) {
	    /* Late Collision */
	    lp->mace_stats.lcol++;
	  }
	  if (xmtfs & MACE_XMTFS_MORE) {
	    /* MORE than one retry was needed */
	    lp->mace_stats.more++;
	  }
	  if (xmtfs & MACE_XMTFS_ONE) {
	    /* Exactly ONE retry occurred */
	    lp->mace_stats.one++;
	  }
	  if (xmtfs & MACE_XMTFS_DEFER) {
	    /* Transmission was defered */
	    lp->mace_stats.defer++;
	  }
	  if (xmtfs & MACE_XMTFS_LCAR) {
	    /* Loss of carrier */
	    lp->mace_stats.lcar++;
	  }
	  if (xmtfs & MACE_XMTFS_RTRY) {
	    /* Retry error: transmit aborted after 16 attempts */
	    lp->mace_stats.rtry++;
	  }
        } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */

      } /* if (xmtfs & MACE_XMTFS_XMTSV) */

      lp->linux_stats.tx_packets++;
      lp->tx_free_frames++;
      netif_wake_queue(dev);
    } /* if (status & MACE_IR_XMTINT) */

    if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) {
      if (status & MACE_IR_JAB) {
        /* Jabber Error.  Excessive transmit duration (20-150ms). */
        lp->mace_stats.jab++;
      }
      if (status & MACE_IR_BABL) {
        /* Babble Error.  >1518 bytes transmitted. */
        lp->mace_stats.babl++;
      }
      if (status & MACE_IR_CERR) {
	/* Collision Error.  CERR indicates the absence of the
	   Signal Quality Error Test message after a packet
	   transmission. */
        lp->mace_stats.cerr++;
      }
      if (status & MACE_IR_RCVCCO) {
        /* Receive Collision Count Overflow; */
        lp->mace_stats.rcvcco++;
      }
      if (status & MACE_IR_RNTPCO) {
        /* Runt Packet Count Overflow */
        lp->mace_stats.rntpco++;
      }
      if (status & MACE_IR_MPCO) {
        /* Missed Packet Count Overflow */
        lp->mace_stats.mpco++;
      }
    } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */

  } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt));

  return IRQ_HANDLED;
} /* mace_interrupt */
Ejemplo n.º 13
0
static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct xgbe_prv_data *pdata;
	struct device *dev = &pdev->dev;
	void __iomem * const *iomap_table;
	struct pci_dev *rdev;
	unsigned int ma_lo, ma_hi;
	unsigned int reg;
	int bar_mask;
	int ret;

	pdata = xgbe_alloc_pdata(dev);
	if (IS_ERR(pdata)) {
		ret = PTR_ERR(pdata);
		goto err_alloc;
	}

	pdata->pcidev = pdev;
	pci_set_drvdata(pdev, pdata);

	/* Get the version data */
	pdata->vdata = (struct xgbe_version_data *)id->driver_data;

	ret = pcim_enable_device(pdev);
	if (ret) {
		dev_err(dev, "pcim_enable_device failed\n");
		goto err_pci_enable;
	}

	/* Obtain the mmio areas for the device */
	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
	ret = pcim_iomap_regions(pdev, bar_mask, XGBE_DRV_NAME);
	if (ret) {
		dev_err(dev, "pcim_iomap_regions failed\n");
		goto err_pci_enable;
	}

	iomap_table = pcim_iomap_table(pdev);
	if (!iomap_table) {
		dev_err(dev, "pcim_iomap_table failed\n");
		ret = -ENOMEM;
		goto err_pci_enable;
	}

	pdata->xgmac_regs = iomap_table[XGBE_XGMAC_BAR];
	if (!pdata->xgmac_regs) {
		dev_err(dev, "xgmac ioremap failed\n");
		ret = -ENOMEM;
		goto err_pci_enable;
	}
	pdata->xprop_regs = pdata->xgmac_regs + XGBE_MAC_PROP_OFFSET;
	pdata->xi2c_regs = pdata->xgmac_regs + XGBE_I2C_CTRL_OFFSET;
	if (netif_msg_probe(pdata)) {
		dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
		dev_dbg(dev, "xprop_regs = %p\n", pdata->xprop_regs);
		dev_dbg(dev, "xi2c_regs  = %p\n", pdata->xi2c_regs);
	}

	pdata->xpcs_regs = iomap_table[XGBE_XPCS_BAR];
	if (!pdata->xpcs_regs) {
		dev_err(dev, "xpcs ioremap failed\n");
		ret = -ENOMEM;
		goto err_pci_enable;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);

	/* Set the PCS indirect addressing definition registers */
	rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
	if (rdev &&
	    (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
		pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
		pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
	} else {
		pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
		pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
	}
	pci_dev_put(rdev);

	/* Configure the PCS indirect addressing support */
	reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
	pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
	pdata->xpcs_window <<= 6;
	pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
	pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
	pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
	if (netif_msg_probe(pdata)) {
		dev_dbg(dev, "xpcs window def  = %#010x\n",
			pdata->xpcs_window_def_reg);
		dev_dbg(dev, "xpcs window sel  = %#010x\n",
			pdata->xpcs_window_sel_reg);
		dev_dbg(dev, "xpcs window      = %#010x\n",
			pdata->xpcs_window);
		dev_dbg(dev, "xpcs window size = %#010x\n",
			pdata->xpcs_window_size);
		dev_dbg(dev, "xpcs window mask = %#010x\n",
			pdata->xpcs_window_mask);
	}

	pci_set_master(pdev);

	/* Enable all interrupts in the hardware */
	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);

	/* Retrieve the MAC address */
	ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
	ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
	pdata->mac_addr[0] = ma_lo & 0xff;
	pdata->mac_addr[1] = (ma_lo >> 8) & 0xff;
	pdata->mac_addr[2] = (ma_lo >> 16) & 0xff;
	pdata->mac_addr[3] = (ma_lo >> 24) & 0xff;
	pdata->mac_addr[4] = ma_hi & 0xff;
	pdata->mac_addr[5] = (ma_hi >> 8) & 0xff;
	if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID) ||
	    !is_valid_ether_addr(pdata->mac_addr)) {
		dev_err(dev, "invalid mac address\n");
		ret = -EINVAL;
		goto err_pci_enable;
	}

	/* Clock settings */
	pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ;
	pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ;

	/* Set the DMA coherency values */
	pdata->coherent = 1;
	pdata->arcr = XGBE_DMA_PCI_ARCR;
	pdata->awcr = XGBE_DMA_PCI_AWCR;
	pdata->awarcr = XGBE_DMA_PCI_AWARCR;

	/* Set the maximum channels and queues */
	reg = XP_IOREAD(pdata, XP_PROP_1);
	pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
	pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
	pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
	pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
	if (netif_msg_probe(pdata)) {
		dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
			pdata->tx_max_channel_count,
			pdata->tx_max_channel_count);
		dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n",
			pdata->tx_max_q_count, pdata->rx_max_q_count);
	}

	/* Set the hardware channel and queue counts */
	xgbe_set_counts(pdata);

	/* Set the maximum fifo amounts */
	reg = XP_IOREAD(pdata, XP_PROP_2);
	pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
	pdata->tx_max_fifo_size *= 16384;
	pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
				      pdata->vdata->tx_max_fifo_size);
	pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
	pdata->rx_max_fifo_size *= 16384;
	pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
				      pdata->vdata->rx_max_fifo_size);
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "max tx/rx max fifo size = %u/%u\n",
			pdata->tx_max_fifo_size, pdata->rx_max_fifo_size);

	/* Configure interrupt support */
	ret = xgbe_config_irqs(pdata);
	if (ret)
		goto err_pci_enable;

	/* Configure the netdev resource */
	ret = xgbe_config_netdev(pdata);
	if (ret)
		goto err_irq_vectors;

	netdev_notice(pdata->netdev, "net device enabled\n");

	return 0;

err_irq_vectors:
	pci_free_irq_vectors(pdata->pcidev);

err_pci_enable:
	xgbe_free_pdata(pdata);

err_alloc:
	dev_notice(dev, "net device not enabled\n");

	return ret;
}
Ejemplo n.º 14
0
static void dma_block_output(struct net_device *dev, int count,
			     const u_char *buf, const int start_page)
{
    unsigned int nic_base = dev->base_addr;
    pcnet_dev_t *info = PRIV(dev);
#ifdef PCMCIA_DEBUG
    int retries = 0;
#endif
    u_long dma_start;

#ifdef PCMCIA_DEBUG
    if (ei_debug > 4)
	netdev_dbg(dev, "[bo=%d]\n", count);
#endif

    /* Round the count up for word writes.  Do we need to do this?
       What effect will an odd byte count have on the 8390?
       I should check someday. */
    if (count & 0x01)
	count++;
    if (ei_status.dmaing) {
	netdev_notice(dev, "DMAing conflict in dma_block_output."
	       "[DMAstat:%1x][irqlock:%1x]\n",
	       ei_status.dmaing, ei_status.irqlock);
	return;
    }
    ei_status.dmaing |= 0x01;
    /* We should already be in page 0, but to be safe... */
    outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base+PCNET_CMD);

#ifdef PCMCIA_DEBUG
  retry:
#endif

    outb_p(ENISR_RDC, nic_base + EN0_ISR);

    /* Now the normal output. */
    outb_p(count & 0xff, nic_base + EN0_RCNTLO);
    outb_p(count >> 8,   nic_base + EN0_RCNTHI);
    outb_p(0x00, nic_base + EN0_RSARLO);
    outb_p(start_page, nic_base + EN0_RSARHI);

    outb_p(E8390_RWRITE+E8390_START, nic_base + PCNET_CMD);
    outsw(nic_base + PCNET_DATAPORT, buf, count>>1);

    dma_start = jiffies;

#ifdef PCMCIA_DEBUG
    /* This was for the ALPHA version only, but enough people have been
       encountering problems that it is still here. */
    if (ei_debug > 4) {	/* DMA termination address check... */
	int addr, tries = 20;
	do {
	    int high = inb_p(nic_base + EN0_RSARHI);
	    int low = inb_p(nic_base + EN0_RSARLO);
	    addr = (high << 8) + low;
	    if ((start_page << 8) + count == addr)
		break;
	} while (--tries > 0);
	if (tries <= 0) {
	    netdev_notice(dev, "Tx packet transfer address mismatch,"
		   "%#4.4x (expected) vs. %#4.4x (actual).\n",
		   (start_page << 8) + count, addr);
	    if (retries++ == 0)
		goto retry;
	}
    }
#endif

    while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
	if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) {
	    netdev_notice(dev, "timeout waiting for Tx RDC.\n");
	    pcnet_reset_8390(dev);
	    NS8390_init(dev, 1);
	    break;
	}

    outb_p(ENISR_RDC, nic_base + EN0_ISR);	/* Ack intr. */
    if (info->flags & DELAY_OUTPUT)
	udelay((long)delay_time);
    ei_status.dmaing &= ~0x01;
}
Ejemplo n.º 15
0
static int xgbe_probe(struct platform_device *pdev)
{
	struct xgbe_prv_data *pdata;
	struct net_device *netdev;
	struct device *dev = &pdev->dev, *phy_dev;
	struct platform_device *phy_pdev;
	struct resource *res;
	const char *phy_mode;
	unsigned int i, phy_memnum, phy_irqnum;
	enum dev_dma_attr attr;
	int ret;

	DBGPR("--> xgbe_probe\n");

	netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
				   XGBE_MAX_DMA_CHANNELS);
	if (!netdev) {
		dev_err(dev, "alloc_etherdev failed\n");
		ret = -ENOMEM;
		goto err_alloc;
	}
	SET_NETDEV_DEV(netdev, dev);
	pdata = netdev_priv(netdev);
	pdata->netdev = netdev;
	pdata->pdev = pdev;
	pdata->adev = ACPI_COMPANION(dev);
	pdata->dev = dev;
	platform_set_drvdata(pdev, netdev);

	spin_lock_init(&pdata->lock);
	spin_lock_init(&pdata->xpcs_lock);
	mutex_init(&pdata->rss_mutex);
	spin_lock_init(&pdata->tstamp_lock);

	pdata->msg_enable = netif_msg_init(debug, default_msg_level);

	set_bit(XGBE_DOWN, &pdata->dev_state);

	/* Check if we should use ACPI or DT */
	pdata->use_acpi = dev->of_node ? 0 : 1;

	phy_pdev = xgbe_get_phy_pdev(pdata);
	if (!phy_pdev) {
		dev_err(dev, "unable to obtain phy device\n");
		ret = -EINVAL;
		goto err_phydev;
	}
	phy_dev = &phy_pdev->dev;

	if (pdev == phy_pdev) {
		/* New style device tree or ACPI:
		 *   The XGBE and PHY resources are grouped together with
		 *   the PHY resources listed last
		 */
		phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
		phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
	} else {
		/* Old style device tree:
		 *   The XGBE and PHY resources are separate
		 */
		phy_memnum = 0;
		phy_irqnum = 0;
	}

	/* Set and validate the number of descriptors for a ring */
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
	pdata->tx_desc_count = XGBE_TX_DESC_CNT;
	if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
		dev_err(dev, "tx descriptor count (%d) is not valid\n",
			pdata->tx_desc_count);
		ret = -EINVAL;
		goto err_io;
	}
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
	pdata->rx_desc_count = XGBE_RX_DESC_CNT;
	if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
		dev_err(dev, "rx descriptor count (%d) is not valid\n",
			pdata->rx_desc_count);
		ret = -EINVAL;
		goto err_io;
	}

	/* Obtain the mmio areas for the device */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	pdata->xgmac_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->xgmac_regs)) {
		dev_err(dev, "xgmac ioremap failed\n");
		ret = PTR_ERR(pdata->xgmac_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	pdata->xpcs_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->xpcs_regs)) {
		dev_err(dev, "xpcs ioremap failed\n");
		ret = PTR_ERR(pdata->xpcs_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);

	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
	pdata->rxtx_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->rxtx_regs)) {
		dev_err(dev, "rxtx ioremap failed\n");
		ret = PTR_ERR(pdata->rxtx_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "rxtx_regs  = %p\n", pdata->rxtx_regs);

	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
	pdata->sir0_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->sir0_regs)) {
		dev_err(dev, "sir0 ioremap failed\n");
		ret = PTR_ERR(pdata->sir0_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "sir0_regs  = %p\n", pdata->sir0_regs);

	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
	pdata->sir1_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->sir1_regs)) {
		dev_err(dev, "sir1 ioremap failed\n");
		ret = PTR_ERR(pdata->sir1_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "sir1_regs  = %p\n", pdata->sir1_regs);

	/* Retrieve the MAC address */
	ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
					    pdata->mac_addr,
					    sizeof(pdata->mac_addr));
	if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
		dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
		if (!ret)
			ret = -EINVAL;
		goto err_io;
	}

	/* Retrieve the PHY mode - it must be "xgmii" */
	ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
					  &phy_mode);
	if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
		dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
		if (!ret)
			ret = -EINVAL;
		goto err_io;
	}
	pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;

	/* Check for per channel interrupt support */
	if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
		pdata->per_channel_irq = 1;

	/* Retrieve the PHY speedset */
	ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
				       &pdata->speed_set);
	if (ret) {
		dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
		goto err_io;
	}

	switch (pdata->speed_set) {
	case XGBE_SPEEDSET_1000_10000:
	case XGBE_SPEEDSET_2500_10000:
		break;
	default:
		dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
		ret = -EINVAL;
		goto err_io;
	}

	/* Retrieve the PHY configuration properties */
	if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_BLWC_PROPERTY,
						     pdata->serdes_blwc,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_BLWC_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
		       sizeof(pdata->serdes_blwc));
	}

	if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_CDR_RATE_PROPERTY,
						     pdata->serdes_cdr_rate,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_CDR_RATE_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
		       sizeof(pdata->serdes_cdr_rate));
	}

	if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_PQ_SKEW_PROPERTY,
						     pdata->serdes_pq_skew,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_PQ_SKEW_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
		       sizeof(pdata->serdes_pq_skew));
	}

	if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_TX_AMP_PROPERTY,
						     pdata->serdes_tx_amp,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_TX_AMP_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
		       sizeof(pdata->serdes_tx_amp));
	}

	if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_DFE_CFG_PROPERTY,
						     pdata->serdes_dfe_tap_cfg,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_DFE_CFG_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
		       sizeof(pdata->serdes_dfe_tap_cfg));
	}

	if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_DFE_ENA_PROPERTY,
						     pdata->serdes_dfe_tap_ena,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_DFE_ENA_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
		       sizeof(pdata->serdes_dfe_tap_ena));
	}

	/* Obtain device settings unique to ACPI/OF */
	if (pdata->use_acpi)
		ret = xgbe_acpi_support(pdata);
	else
		ret = xgbe_of_support(pdata);
	if (ret)
		goto err_io;

	/* Set the DMA coherency values */
	attr = device_get_dma_attr(dev);
	if (attr == DEV_DMA_NOT_SUPPORTED) {
		dev_err(dev, "DMA is not supported");
		goto err_io;
	}
	pdata->coherent = (attr == DEV_DMA_COHERENT);
	if (pdata->coherent) {
		pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
		pdata->arcache = XGBE_DMA_OS_ARCACHE;
		pdata->awcache = XGBE_DMA_OS_AWCACHE;
	} else {
		pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
		pdata->arcache = XGBE_DMA_SYS_ARCACHE;
		pdata->awcache = XGBE_DMA_SYS_AWCACHE;
	}

	/* Get the device interrupt */
	ret = platform_get_irq(pdev, 0);
	if (ret < 0) {
		dev_err(dev, "platform_get_irq 0 failed\n");
		goto err_io;
	}
	pdata->dev_irq = ret;

	/* Get the auto-negotiation interrupt */
	ret = platform_get_irq(phy_pdev, phy_irqnum++);
	if (ret < 0) {
		dev_err(dev, "platform_get_irq phy 0 failed\n");
		goto err_io;
	}
	pdata->an_irq = ret;

	netdev->irq = pdata->dev_irq;
	netdev->base_addr = (unsigned long)pdata->xgmac_regs;
	memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);

	/* Set all the function pointers */
	xgbe_init_all_fptrs(pdata);

	/* Issue software reset to device */
	pdata->hw_if.exit(pdata);

	/* Populate the hardware features */
	xgbe_get_all_hw_features(pdata);

	/* Set default configuration data */
	xgbe_default_config(pdata);

	/* Set the DMA mask */
	ret = dma_set_mask_and_coherent(dev,
					DMA_BIT_MASK(pdata->hw_feat.dma_width));
	if (ret) {
		dev_err(dev, "dma_set_mask_and_coherent failed\n");
		goto err_io;
	}

	/* Calculate the number of Tx and Rx rings to be created
	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
	 *   the number of Tx queues to the number of Tx channels
	 *   enabled
	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
	 *   number of Rx queues
	 */
	pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
				     pdata->hw_feat.tx_ch_cnt);
	pdata->tx_q_count = pdata->tx_ring_count;
	ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
	if (ret) {
		dev_err(dev, "error setting real tx queue count\n");
		goto err_io;
	}

	pdata->rx_ring_count = min_t(unsigned int,
				     netif_get_num_default_rss_queues(),
				     pdata->hw_feat.rx_ch_cnt);
	pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
	ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
	if (ret) {
		dev_err(dev, "error setting real rx queue count\n");
		goto err_io;
	}

	/* Initialize RSS hash key and lookup table */
	netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));

	for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
		XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
			       i % pdata->rx_ring_count);

	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);

	/* Call MDIO/PHY initialization routine */
	pdata->phy_if.phy_init(pdata);

	/* Set device operations */
	netdev->netdev_ops = xgbe_get_netdev_ops();
	netdev->ethtool_ops = xgbe_get_ethtool_ops();
#ifdef CONFIG_AMD_XGBE_DCB
	netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
#endif

	/* Set device features */
	netdev->hw_features = NETIF_F_SG |
			      NETIF_F_IP_CSUM |
			      NETIF_F_IPV6_CSUM |
			      NETIF_F_RXCSUM |
			      NETIF_F_TSO |
			      NETIF_F_TSO6 |
			      NETIF_F_GRO |
			      NETIF_F_HW_VLAN_CTAG_RX |
			      NETIF_F_HW_VLAN_CTAG_TX |
			      NETIF_F_HW_VLAN_CTAG_FILTER;

	if (pdata->hw_feat.rss)
		netdev->hw_features |= NETIF_F_RXHASH;

	netdev->vlan_features |= NETIF_F_SG |
				 NETIF_F_IP_CSUM |
				 NETIF_F_IPV6_CSUM |
				 NETIF_F_TSO |
				 NETIF_F_TSO6;

	netdev->features |= netdev->hw_features;
	pdata->netdev_features = netdev->features;

	netdev->priv_flags |= IFF_UNICAST_FLT;

	/* Use default watchdog timeout */
	netdev->watchdog_timeo = 0;

	xgbe_init_rx_coalesce(pdata);
	xgbe_init_tx_coalesce(pdata);

	netif_carrier_off(netdev);
	ret = register_netdev(netdev);
	if (ret) {
		dev_err(dev, "net device registration failed\n");
		goto err_io;
	}

	/* Create the PHY/ANEG name based on netdev name */
	snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
		 netdev_name(netdev));

	/* Create workqueues */
	pdata->dev_workqueue =
		create_singlethread_workqueue(netdev_name(netdev));
	if (!pdata->dev_workqueue) {
		netdev_err(netdev, "device workqueue creation failed\n");
		ret = -ENOMEM;
		goto err_netdev;
	}

	pdata->an_workqueue =
		create_singlethread_workqueue(pdata->an_name);
	if (!pdata->an_workqueue) {
		netdev_err(netdev, "phy workqueue creation failed\n");
		ret = -ENOMEM;
		goto err_wq;
	}

	xgbe_ptp_register(pdata);

	xgbe_debugfs_init(pdata);

	platform_device_put(phy_pdev);

	netdev_notice(netdev, "net device enabled\n");

	DBGPR("<-- xgbe_probe\n");

	return 0;

err_wq:
	destroy_workqueue(pdata->dev_workqueue);

err_netdev:
	unregister_netdev(netdev);

err_io:
	platform_device_put(phy_pdev);

err_phydev:
	free_netdev(netdev);

err_alloc:
	dev_notice(dev, "net device not enabled\n");

	return ret;
}