Exemple #1
0
static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
{
    struct net_device *ndev = netdev;
    struct sh_eth_private *mdp = netdev_priv(ndev);
    irqreturn_t ret = IRQ_NONE;
    u32 ioaddr, boguscnt = RX_RING_SIZE;
    u32 intr_status = 0;

    ioaddr = ndev->base_addr;
    spin_lock(&mdp->lock);

    /* Get interrpt stat */
    intr_status = ctrl_inl(ioaddr + EESR);
    /* Clear interrupt */
    if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
                       EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
                       TX_CHECK | EESR_ERR_CHECK)) {
        ctrl_outl(intr_status, ioaddr + EESR);
        ret = IRQ_HANDLED;
    } else
        goto other_irq;

    if (intr_status & (EESR_FRC | /* Frame recv*/
                       EESR_RMAF | /* Multi cast address recv*/
                       EESR_RRF  | /* Bit frame recv */
                       EESR_RTLF | /* Long frame recv*/
                       EESR_RTSF | /* short frame recv */
                       EESR_PRE  | /* PHY-LSI recv error */
                       EESR_CERF)) { /* recv frame CRC error */
        sh_eth_rx(ndev);
    }

    /* Tx Check */
    if (intr_status & TX_CHECK) {
        sh_eth_txfree(ndev);
        netif_wake_queue(ndev);
    }

    if (intr_status & EESR_ERR_CHECK)
        sh_eth_error(ndev, intr_status);

    if (--boguscnt < 0) {
        printk(KERN_WARNING
               "%s: Too much work at interrupt, status=0x%4.4x.\n",
               ndev->name, intr_status);
    }

other_irq:
    spin_unlock(&mdp->lock);

    return ret;
}
Exemple #2
0
/* Packet transmit function */
static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	struct sh_eth_txdesc *txdesc;
	u32 entry;
	unsigned long flags;

	spin_lock_irqsave(&mdp->lock, flags);
	if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
		if (!sh_eth_txfree(ndev)) {
			if (netif_msg_tx_queued(mdp))
				dev_warn(&ndev->dev, "TxFD exhausted.\n");
			netif_stop_queue(ndev);
			spin_unlock_irqrestore(&mdp->lock, flags);
			return NETDEV_TX_BUSY;
		}
	}
	spin_unlock_irqrestore(&mdp->lock, flags);

	entry = mdp->cur_tx % TX_RING_SIZE;
	mdp->tx_skbuff[entry] = skb;
	txdesc = &mdp->tx_ring[entry];
	txdesc->addr = virt_to_phys(skb->data);
	/* soft swap. */
	if (!mdp->cd->hw_swap)
		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
				 skb->len + 2);
	/* write back */
	__flush_purge_region(skb->data, skb->len);
	if (skb->len < ETHERSMALL)
		txdesc->buffer_length = ETHERSMALL;
	else
		txdesc->buffer_length = skb->len;

	if (entry >= TX_RING_SIZE - 1)
		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
	else
		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);

	mdp->cur_tx++;

	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);

	return NETDEV_TX_OK;
}
Exemple #3
0
static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
{
	struct net_device *ndev = netdev;
	struct sh_eth_private *mdp = netdev_priv(ndev);
	struct sh_eth_cpu_data *cd = mdp->cd;
	irqreturn_t ret = IRQ_NONE;
	u32 ioaddr, intr_status = 0;

	ioaddr = ndev->base_addr;
	spin_lock(&mdp->lock);

	/* Get interrpt stat */
	intr_status = readl(ioaddr + EESR);
	/* Clear interrupt */
	if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
			EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
			cd->tx_check | cd->eesr_err_check)) {
		writel(intr_status, ioaddr + EESR);
		ret = IRQ_HANDLED;
	} else
		goto other_irq;

	if (intr_status & (EESR_FRC | /* Frame recv*/
			EESR_RMAF | /* Multi cast address recv*/
			EESR_RRF  | /* Bit frame recv */
			EESR_RTLF | /* Long frame recv*/
			EESR_RTSF | /* short frame recv */
			EESR_PRE  | /* PHY-LSI recv error */
			EESR_CERF)){ /* recv frame CRC error */
		sh_eth_rx(ndev);
	}

	/* Tx Check */
	if (intr_status & cd->tx_check) {
		sh_eth_txfree(ndev);
		netif_wake_queue(ndev);
	}

	if (intr_status & cd->eesr_err_check)
		sh_eth_error(ndev, intr_status);

other_irq:
	spin_unlock(&mdp->lock);

	return ret;
}
Exemple #4
0
/* Packet transmit function */
static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
    struct sh_eth_private *mdp = netdev_priv(ndev);
    struct sh_eth_txdesc *txdesc;
    u32 entry;
    unsigned long flags;

    spin_lock_irqsave(&mdp->lock, flags);
    if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
        if (!sh_eth_txfree(ndev)) {
            netif_stop_queue(ndev);
            spin_unlock_irqrestore(&mdp->lock, flags);
            return 1;
        }
    }
    spin_unlock_irqrestore(&mdp->lock, flags);

    entry = mdp->cur_tx % TX_RING_SIZE;
    mdp->tx_skbuff[entry] = skb;
    txdesc = &mdp->tx_ring[entry];
    txdesc->addr = (u32)(skb->data);
    /* soft swap. */
    swaps((char *)(txdesc->addr & ~0x3), skb->len + 2);
    /* write back */
    __flush_purge_region(skb->data, skb->len);
    if (skb->len < ETHERSMALL)
        txdesc->buffer_length = ETHERSMALL;
    else
        txdesc->buffer_length = skb->len;

    if (entry >= TX_RING_SIZE - 1)
        txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
    else
        txdesc->status |= cpu_to_edmac(mdp, TD_TACT);

    mdp->cur_tx++;

    if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
        ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);

    ndev->trans_start = jiffies;

    return 0;
}
Exemple #5
0
/* error control function */
static void sh_eth_error(struct net_device *ndev, int intr_status)
{
    struct sh_eth_private *mdp = netdev_priv(ndev);
    u32 ioaddr = ndev->base_addr;
    u32 felic_stat;

    if (intr_status & EESR_ECI) {
        felic_stat = ctrl_inl(ioaddr + ECSR);
        ctrl_outl(felic_stat, ioaddr + ECSR);	/* clear int */
        if (felic_stat & ECSR_ICD)
            mdp->stats.tx_carrier_errors++;
        if (felic_stat & ECSR_LCHNG) {
            /* Link Changed */
            u32 link_stat = (ctrl_inl(ioaddr + PSR));
            if (!(link_stat & PHY_ST_LINK)) {
                /* Link Down : disable tx and rx */
                ctrl_outl(ctrl_inl(ioaddr + ECMR) &
                          ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
            } else {
                /* Link Up */
                ctrl_outl(ctrl_inl(ioaddr + EESIPR) &
                          ~DMAC_M_ECI, ioaddr + EESIPR);
                /*clear int */
                ctrl_outl(ctrl_inl(ioaddr + ECSR),
                          ioaddr + ECSR);
                ctrl_outl(ctrl_inl(ioaddr + EESIPR) |
                          DMAC_M_ECI, ioaddr + EESIPR);
                /* enable tx and rx */
                ctrl_outl(ctrl_inl(ioaddr + ECMR) |
                          (ECMR_RE | ECMR_TE), ioaddr + ECMR);
            }
        }
    }

    if (intr_status & EESR_TWB) {
        /* Write buck end. unused write back interrupt */
        if (intr_status & EESR_TABT)	/* Transmit Abort int */
            mdp->stats.tx_aborted_errors++;
    }

    if (intr_status & EESR_RABT) {
        /* Receive Abort int */
        if (intr_status & EESR_RFRMER) {
            /* Receive Frame Overflow int */
            mdp->stats.rx_frame_errors++;
            printk(KERN_ERR "Receive Frame Overflow\n");
        }
    }
#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
    if (intr_status & EESR_ADE) {
        if (intr_status & EESR_TDE) {
            if (intr_status & EESR_TFE)
                mdp->stats.tx_fifo_errors++;
        }
    }
#endif

    if (intr_status & EESR_RDE) {
        /* Receive Descriptor Empty int */
        mdp->stats.rx_over_errors++;

        if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
            ctrl_outl(EDRRR_R, ioaddr + EDRRR);
        printk(KERN_ERR "Receive Descriptor Empty\n");
    }
    if (intr_status & EESR_RFE) {
        /* Receive FIFO Overflow int */
        mdp->stats.rx_fifo_errors++;
        printk(KERN_ERR "Receive FIFO Overflow\n");
    }
    if (intr_status & (EESR_TWB | EESR_TABT |
#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
                       EESR_ADE |
#endif
                       EESR_TDE | EESR_TFE)) {
        /* Tx error */
        u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
        /* dmesg */
        printk(KERN_ERR "%s:TX error. status=%8.8x cur_tx=%8.8x ",
               ndev->name, intr_status, mdp->cur_tx);
        printk(KERN_ERR "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
               mdp->dirty_tx, (u32) ndev->state, edtrr);
        /* dirty buffer free */
        sh_eth_txfree(ndev);

        /* SH7712 BUG */
        if (edtrr ^ EDTRR_TRNS) {
            /* tx dma start */
            ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
        }
        /* wakeup */
        netif_wake_queue(ndev);
    }
}
Exemple #6
0
/* error control function */
static void sh_eth_error(struct net_device *ndev, int intr_status)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	u32 felic_stat;
	u32 link_stat;
	u32 mask;

	if (intr_status & EESR_ECI) {
		felic_stat = sh_eth_read(ndev, ECSR);
		sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
		if (felic_stat & ECSR_ICD)
			mdp->stats.tx_carrier_errors++;
		if (felic_stat & ECSR_LCHNG) {
			/* Link Changed */
			if (mdp->cd->no_psr || mdp->no_ether_link) {
				if (mdp->link == PHY_DOWN)
					link_stat = 0;
				else
					link_stat = PHY_ST_LINK;
			} else {
				link_stat = (sh_eth_read(ndev, PSR));
				if (mdp->ether_link_active_low)
					link_stat = ~link_stat;
			}
			if (!(link_stat & PHY_ST_LINK))
				sh_eth_rcv_snd_disable(ndev);
			else {
				/* Link Up */
				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
					  ~DMAC_M_ECI, EESIPR);
				/*clear int */
				sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
					  ECSR);
				sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
					  DMAC_M_ECI, EESIPR);
				/* enable tx and rx */
				sh_eth_rcv_snd_enable(ndev);
			}
		}
	}

	if (intr_status & EESR_TWB) {
		/* Write buck end. unused write back interrupt */
		if (intr_status & EESR_TABT)	/* Transmit Abort int */
			mdp->stats.tx_aborted_errors++;
			if (netif_msg_tx_err(mdp))
				dev_err(&ndev->dev, "Transmit Abort\n");
	}

	if (intr_status & EESR_RABT) {
		/* Receive Abort int */
		if (intr_status & EESR_RFRMER) {
			/* Receive Frame Overflow int */
			mdp->stats.rx_frame_errors++;
			if (netif_msg_rx_err(mdp))
				dev_err(&ndev->dev, "Receive Abort\n");
		}
	}

	if (intr_status & EESR_TDE) {
		/* Transmit Descriptor Empty int */
		mdp->stats.tx_fifo_errors++;
		if (netif_msg_tx_err(mdp))
			dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
	}

	if (intr_status & EESR_TFE) {
		/* FIFO under flow */
		mdp->stats.tx_fifo_errors++;
		if (netif_msg_tx_err(mdp))
			dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
	}

	if (intr_status & EESR_RDE) {
		/* Receive Descriptor Empty int */
		mdp->stats.rx_over_errors++;

		if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
			sh_eth_write(ndev, EDRRR_R, EDRRR);
		if (netif_msg_rx_err(mdp))
			dev_err(&ndev->dev, "Receive Descriptor Empty\n");
	}

	if (intr_status & EESR_RFE) {
		/* Receive FIFO Overflow int */
		mdp->stats.rx_fifo_errors++;
		if (netif_msg_rx_err(mdp))
			dev_err(&ndev->dev, "Receive FIFO Overflow\n");
	}

	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
		/* Address Error */
		mdp->stats.tx_fifo_errors++;
		if (netif_msg_tx_err(mdp))
			dev_err(&ndev->dev, "Address Error\n");
	}

	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
	if (mdp->cd->no_ade)
		mask &= ~EESR_ADE;
	if (intr_status & mask) {
		/* Tx error */
		u32 edtrr = sh_eth_read(ndev, EDTRR);
		/* dmesg */
		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
				intr_status, mdp->cur_tx);
		dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
				mdp->dirty_tx, (u32) ndev->state, edtrr);
		/* dirty buffer free */
		sh_eth_txfree(ndev);

		/* SH7712 BUG */
		if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
			/* tx dma start */
			sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
		}
		/* wakeup */
		netif_wake_queue(ndev);
	}
}
Exemple #7
0
/* error control function */
static void sh_eth_error(struct net_device *ndev, int intr_status)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	u32 ioaddr = ndev->base_addr;
	u32 felic_stat;
	u32 link_stat;
	u32 mask;

	if (intr_status & EESR_ECI) {
		felic_stat = readl(ioaddr + ECSR);
		writel(felic_stat, ioaddr + ECSR);	/* clear int */
		if (felic_stat & ECSR_ICD)
			mdp->stats.tx_carrier_errors++;
		if (felic_stat & ECSR_LCHNG) {
			/* Link Changed */
			if (mdp->cd->no_psr || mdp->no_ether_link) {
				if (mdp->link == PHY_DOWN)
					link_stat = 0;
				else
					link_stat = PHY_ST_LINK;
			} else {
				link_stat = (readl(ioaddr + PSR));
				if (mdp->ether_link_active_low)
					link_stat = ~link_stat;
			}
			if (!(link_stat & PHY_ST_LINK)) {
				/* Link Down : disable tx and rx */
				writel(readl(ioaddr + ECMR) &
					  ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
			} else {
				/* Link Up */
				writel(readl(ioaddr + EESIPR) &
					  ~DMAC_M_ECI, ioaddr + EESIPR);
				/*clear int */
				writel(readl(ioaddr + ECSR),
					  ioaddr + ECSR);
				writel(readl(ioaddr + EESIPR) |
					  DMAC_M_ECI, ioaddr + EESIPR);
				/* enable tx and rx */
				writel(readl(ioaddr + ECMR) |
					  (ECMR_RE | ECMR_TE), ioaddr + ECMR);
			}
		}
	}

	if (intr_status & EESR_TWB) {
		/* Write buck end. unused write back interrupt */
		if (intr_status & EESR_TABT)	/* Transmit Abort int */
			mdp->stats.tx_aborted_errors++;
	}

	if (intr_status & EESR_RABT) {
		/* Receive Abort int */
		if (intr_status & EESR_RFRMER) {
			/* Receive Frame Overflow int */
			mdp->stats.rx_frame_errors++;
			dev_err(&ndev->dev, "Receive Frame Overflow\n");
		}
	}

	if (!mdp->cd->no_ade) {
		if (intr_status & EESR_ADE && intr_status & EESR_TDE &&
		    intr_status & EESR_TFE)
			mdp->stats.tx_fifo_errors++;
	}

	if (intr_status & EESR_RDE) {
		/* Receive Descriptor Empty int */
		mdp->stats.rx_over_errors++;

		if (readl(ioaddr + EDRRR) ^ EDRRR_R)
			writel(EDRRR_R, ioaddr + EDRRR);
		dev_err(&ndev->dev, "Receive Descriptor Empty\n");
	}
	if (intr_status & EESR_RFE) {
		/* Receive FIFO Overflow int */
		mdp->stats.rx_fifo_errors++;
		dev_err(&ndev->dev, "Receive FIFO Overflow\n");
	}

	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
	if (mdp->cd->no_ade)
		mask &= ~EESR_ADE;
	if (intr_status & mask) {
		/* Tx error */
		u32 edtrr = readl(ndev->base_addr + EDTRR);
		/* dmesg */
		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
				intr_status, mdp->cur_tx);
		dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
				mdp->dirty_tx, (u32) ndev->state, edtrr);
		/* dirty buffer free */
		sh_eth_txfree(ndev);

		/* SH7712 BUG */
		if (edtrr ^ EDTRR_TRNS) {
			/* tx dma start */
			writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
		}
		/* wakeup */
		netif_wake_queue(ndev);
	}
}