static void cpmac_check_status(struct net_device *dev)
{
	struct cpmac_priv *priv = netdev_priv(dev);

	u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
	int rx_channel = (macstatus >> 8) & 7;
	int rx_code = (macstatus >> 12) & 15;
	int tx_channel = (macstatus >> 16) & 7;
	int tx_code = (macstatus >> 20) & 15;

	if (rx_code || tx_code) {
		if (netif_msg_drv(priv) && net_ratelimit()) {
			/* Can't find any documentation on what these
			 *error codes actually are. So just log them and hope..
			 */
			if (rx_code)
				printk(KERN_WARNING "%s: host error %d on rx "
				     "channel %d (macstatus %08x), resetting\n",
				     dev->name, rx_code, rx_channel, macstatus);
			if (tx_code)
				printk(KERN_WARNING "%s: host error %d on tx "
				     "channel %d (macstatus %08x), resetting\n",
				     dev->name, tx_code, tx_channel, macstatus);
		}

		netif_tx_stop_all_queues(dev);
		cpmac_hw_stop(dev);
		if (schedule_work(&priv->reset_work))
			atomic_inc(&priv->reset_pending);
		if (unlikely(netif_msg_hw(priv)))
			cpmac_dump_regs(dev);
	}
	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
}
Beispiel #2
0
static void cyrf6936_iw_setup(struct cyrf6936_net *p)
{
	if (netif_msg_drv(p))
		pr_debug("cyrf6936: using wireless extensions\n");
	spin_lock_init(&p->iwstats_lock);
	p->netdev->wireless_handlers = &cyrf6936_iwdef;
}
Beispiel #3
0
static void xgbe_phy_init(struct xgbe_prv_data *pdata)
{
	mutex_init(&pdata->an_mutex);
	INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
	INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
	pdata->mdio_mmd = MDIO_MMD_PCS;

	/* Initialize supported features */
	pdata->phy.supported = SUPPORTED_Autoneg;
	pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
	pdata->phy.supported |= SUPPORTED_Backplane;
	pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
	switch (pdata->speed_set) {
	case XGBE_SPEEDSET_1000_10000:
		pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
		break;
	case XGBE_SPEEDSET_2500_10000:
		pdata->phy.supported |= SUPPORTED_2500baseX_Full;
		break;
	}

	pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
					MDIO_PMA_10GBR_FECABLE);
	pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
			       MDIO_PMA_10GBR_FECABLE_ERRABLE);
	if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
		pdata->phy.supported |= SUPPORTED_10000baseR_FEC;

	pdata->phy.advertising = pdata->phy.supported;

	pdata->phy.address = 0;

	pdata->phy.autoneg = AUTONEG_ENABLE;
	pdata->phy.speed = SPEED_UNKNOWN;
	pdata->phy.duplex = DUPLEX_UNKNOWN;

	pdata->phy.link = 0;

	pdata->phy.pause_autoneg = pdata->pause_autoneg;
	pdata->phy.tx_pause = pdata->tx_pause;
	pdata->phy.rx_pause = pdata->rx_pause;

	/* Fix up Flow Control advertising */
	pdata->phy.advertising &= ~ADVERTISED_Pause;
	pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;

	if (pdata->rx_pause) {
		pdata->phy.advertising |= ADVERTISED_Pause;
		pdata->phy.advertising |= ADVERTISED_Asym_Pause;
	}

	if (pdata->tx_pause)
		pdata->phy.advertising ^= ADVERTISED_Asym_Pause;

	if (netif_msg_drv(pdata))
		xgbe_dump_phy_registers(pdata);
}
Beispiel #4
0
static int xgbe_phy_init(struct xgbe_prv_data *pdata)
{
	int ret;

	mutex_init(&pdata->an_mutex);
	INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
	INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
	pdata->mdio_mmd = MDIO_MMD_PCS;

	/* Check for FEC support */
	pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
					MDIO_PMA_10GBR_FECABLE);
	pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
			       MDIO_PMA_10GBR_FECABLE_ERRABLE);

	/* Setup the phy (including supported features) */
	ret = pdata->phy_if.phy_impl.init(pdata);
	if (ret)
		return ret;
	pdata->phy.advertising = pdata->phy.supported;

	pdata->phy.address = 0;

	if (pdata->phy.advertising & ADVERTISED_Autoneg) {
		pdata->phy.autoneg = AUTONEG_ENABLE;
		pdata->phy.speed = SPEED_UNKNOWN;
		pdata->phy.duplex = DUPLEX_UNKNOWN;
	} else {
		pdata->phy.autoneg = AUTONEG_DISABLE;
		pdata->phy.speed = xgbe_phy_best_advertised_speed(pdata);
		pdata->phy.duplex = DUPLEX_FULL;
	}

	pdata->phy.link = 0;

	pdata->phy.pause_autoneg = pdata->pause_autoneg;
	pdata->phy.tx_pause = pdata->tx_pause;
	pdata->phy.rx_pause = pdata->rx_pause;

	/* Fix up Flow Control advertising */
	pdata->phy.advertising &= ~ADVERTISED_Pause;
	pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;

	if (pdata->rx_pause) {
		pdata->phy.advertising |= ADVERTISED_Pause;
		pdata->phy.advertising |= ADVERTISED_Asym_Pause;
	}

	if (pdata->tx_pause)
		pdata->phy.advertising ^= ADVERTISED_Asym_Pause;

	if (netif_msg_drv(pdata))
		xgbe_dump_phy_registers(pdata);

	return 0;
}
Beispiel #5
0
static int __devexit cyrf6936_remove(struct spi_device *spi)
{
	struct cyrf6936_net *p = dev_get_drvdata(&spi->dev);

	if (netif_msg_drv(p))
		dev_info(&spi->dev, "remove\n");
	unregister_netdev(p->netdev);
	if (p->pollmode)
		del_timer_sync(&p->poll_timer);
	else
		free_irq(spi->irq, p);
	free_netdev(p->netdev);
	return 0;
}
static int cpmac_poll(struct napi_struct *napi, int budget)
{
	struct sk_buff *skb;
	struct cpmac_desc *desc, *restart;
	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
	int received = 0, processed = 0;

	spin_lock(&priv->rx_lock);
	if (unlikely(!priv->rx_head)) {
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx: polling, but no queue\n",
			       priv->dev->name);
		spin_unlock(&priv->rx_lock);
		netif_rx_complete(priv->dev, napi);
		return 0;
	}

	desc = priv->rx_head;
	restart = NULL;
	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
		processed++;

		if ((desc->dataflags & CPMAC_EOQ) != 0) {
			/* The last update to eoq->hw_next didn't happen
			* soon enough, and the receiver stopped here.
			*Remember this descriptor so we can restart
			* the receiver after freeing some space.
			*/
			if (unlikely(restart)) {
				if (netif_msg_rx_err(priv))
					printk(KERN_ERR "%s: poll found a"
						" duplicate EOQ: %p and %p\n",
						priv->dev->name, restart, desc);
				goto fatal_error;
			}

			restart = desc->next;
		}

		skb = cpmac_rx_one(priv, desc);
		if (likely(skb)) {
			netif_receive_skb(skb);
			received++;
		}
		desc = desc->next;
	}

	if (desc != priv->rx_head) {
		/* We freed some buffers, but not the whole ring,
		 * add what we did free to the rx list */
		desc->prev->hw_next = (u32)0;
		priv->rx_head->prev->hw_next = priv->rx_head->mapping;
	}

	/* Optimization: If we did not actually process an EOQ (perhaps because
	 * of quota limits), check to see if the tail of the queue has EOQ set.
	* We should immediately restart in that case so that the receiver can
	* restart and run in parallel with more packet processing.
	* This lets us handle slightly larger bursts before running
	* out of ring space (assuming dev->weight < ring_size) */

	if (!restart &&
	     (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
		    == CPMAC_EOQ &&
	     (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
		/* reset EOQ so the poll loop (above) doesn't try to
		* restart this when it eventually gets to this descriptor.
		*/
		priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
		restart = priv->rx_head;
	}

	if (restart) {
		priv->dev->stats.rx_errors++;
		priv->dev->stats.rx_fifo_errors++;
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx dma ring overrun\n",
			       priv->dev->name);

		if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
			if (netif_msg_drv(priv))
				printk(KERN_ERR "%s: cpmac_poll is trying to "
					"restart rx from a descriptor that's "
					"not free: %p\n",
					priv->dev->name, restart);
				goto fatal_error;
		}

		cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
	}

	priv->rx_head = desc;
	spin_unlock(&priv->rx_lock);
	if (unlikely(netif_msg_rx_status(priv)))
		printk(KERN_DEBUG "%s: poll processed %d packets\n",
		       priv->dev->name, received);
	if (processed == 0) {
		/* we ran out of packets to read,
		 * revert to interrupt-driven mode */
		netif_rx_complete(priv->dev, napi);
		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
		return 0;
	}

	return 1;

fatal_error:
	/* Something went horribly wrong.
	 * Reset hardware to try to recover rather than wedging. */

	if (netif_msg_drv(priv)) {
		printk(KERN_ERR "%s: cpmac_poll is confused. "
				"Resetting hardware\n", priv->dev->name);
		cpmac_dump_all_desc(priv->dev);
		printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
			priv->dev->name,
			cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
			cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
	}

	spin_unlock(&priv->rx_lock);
	netif_rx_complete(priv->dev, napi);
	netif_tx_stop_all_queues(priv->dev);
	napi_disable(&priv->napi);

	atomic_inc(&priv->reset_pending);
	cpmac_hw_stop(priv->dev);
	if (!schedule_work(&priv->reset_work))
		atomic_dec(&priv->reset_pending);
	return 0;

}