Пример #1
0
static void mace_tx_timeout(struct net_device *dev)
{
	struct mace_data *mp = netdev_priv(dev);
	volatile struct mace *mb = mp->mace;
	unsigned long flags;

	local_irq_save(flags);

	/* turn off both tx and rx and reset the chip */
	mb->maccc = 0;
	printk(KERN_ERR "macmace: transmit timeout - resetting\n");
	mace_txdma_reset(dev);
	mace_reset(dev);

	/* restart rx dma */
	mace_rxdma_reset(dev);

	mp->tx_count = N_TX_RING;
	netif_wake_queue(dev);

	/* turn it on! */
	mb->maccc = ENXMT | ENRCV;
	/* enable all interrupts except receive interrupts */
	mb->imr = RCVINT;

	local_irq_restore(flags);
}
Пример #2
0
static void mace_dma_intr(int irq, void *dev_id, struct pt_regs *regs)
{
	struct net_device *dev = (struct net_device *) dev_id;
	struct mace_data *mp = (struct mace_data *) dev->priv;
	int left, head;
	u16 status;
	u32 baka;

	/* Not sure what this does */

	while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
	if (!(baka & 0x60000000)) return;

	/*
	 * Process the read queue
	 */
		 
	status = psc_read_word(PSC_ENETRD_CTL);
		
	if (status & 0x2000) {
		mace_rxdma_reset(dev);
	} else if (status & 0x0100) {
		psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);

		left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
		head = N_RX_RING - left;

		/* Loop through the ring buffer and process new packages */

		while (mp->rx_tail < head) {
			mace_dma_rx_frame(dev, (struct mace_frame *) (mp->rx_ring + (mp->rx_tail * 0x0800)));
			mp->rx_tail++;
		}
			
		/* If we're out of buffers in this ring then switch to */
		/* the other set, otherwise just reactivate this one.  */

		if (!left) {
			mace_load_rxdma_base(dev, mp->rx_slot);
			mp->rx_slot ^= 0x10;
		} else {
			psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
		}
	}
		
	/*
	 * Process the write queue
	 */

	status = psc_read_word(PSC_ENETWR_CTL);

	if (status & 0x2000) {
		mace_txdma_reset(dev);
	} else if (status & 0x0100) {
		psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
		mp->tx_sloti ^= 0x10;
		mp->tx_count++;
		netif_wake_queue(dev);
	}
}
Пример #3
0
static int mace_open(struct net_device *dev)
{
    struct mace_data *mp = netdev_priv(dev);
    volatile struct mace *mb = mp->mace;

    /* reset the chip */
    mace_reset(dev);

    if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
        printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
        return -EAGAIN;
    }
    if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
        printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
        free_irq(dev->irq, dev);
        return -EAGAIN;
    }

    /* Allocate the DMA ring buffers */

    mp->tx_ring = dma_alloc_coherent(mp->device,
                                     N_TX_RING * MACE_BUFF_SIZE,
                                     &mp->tx_ring_phys, GFP_KERNEL);
    if (mp->tx_ring == NULL)
        goto out1;

    mp->rx_ring = dma_alloc_coherent(mp->device,
                                     N_RX_RING * MACE_BUFF_SIZE,
                                     &mp->rx_ring_phys, GFP_KERNEL);
    if (mp->rx_ring == NULL)
        goto out2;

    mace_dma_off(dev);

    /* Not sure what these do */

    psc_write_word(PSC_ENETWR_CTL, 0x9000);
    psc_write_word(PSC_ENETRD_CTL, 0x9000);
    psc_write_word(PSC_ENETWR_CTL, 0x0400);
    psc_write_word(PSC_ENETRD_CTL, 0x0400);

    mace_rxdma_reset(dev);
    mace_txdma_reset(dev);

    /* turn it on! */
    mb->maccc = ENXMT | ENRCV;
    /* enable all interrupts except receive interrupts */
    mb->imr = RCVINT;
    return 0;

out2:
    dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
                      mp->tx_ring, mp->tx_ring_phys);
out1:
    free_irq(dev->irq, dev);
    free_irq(mp->dma_intr, dev);
    return -ENOMEM;
}
Пример #4
0
static irqreturn_t mace_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *) dev_id;
	struct mace_data *mp = netdev_priv(dev);
	volatile struct mace *mb = mp->mace;
	int intr, fs;
	unsigned int flags;

	/* don't want the dma interrupt handler to fire */
	local_irq_save(flags);

	intr = mb->ir; /* read interrupt register */
	mace_handle_misc_intrs(mp, intr);

	if (intr & XMTINT) {
		fs = mb->xmtfs;
		if ((fs & XMTSV) == 0) {
			printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
			mace_reset(dev);
			/*
			 * XXX mace likes to hang the machine after a xmtfs error.
			 * This is hard to reproduce, reseting *may* help
			 */
		}
		/* dma should have finished */
		if (!mp->tx_count) {
			printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
		}
		/* Update stats */
		if (fs & (UFLO|LCOL|LCAR|RTRY)) {
			++mp->stats.tx_errors;
			if (fs & LCAR)
				++mp->stats.tx_carrier_errors;
			else if (fs & (UFLO|LCOL|RTRY)) {
				++mp->stats.tx_aborted_errors;
				if (mb->xmtfs & UFLO) {
					printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
					mp->stats.tx_fifo_errors++;
					mace_txdma_reset(dev);
				}
			}
		}
	}

	if (mp->tx_count)
		netif_wake_queue(dev);

	local_irq_restore(flags);

	return IRQ_HANDLED;
}
Пример #5
0
static void mace_xmit_error(struct net_device *dev)
{
	struct mace_data *mp = (struct mace_data *) dev->priv;
	volatile struct mace *mb = mp->mace;
	u8 xmtfs, xmtrc;
	
	xmtfs = mb->xmtfs;
	xmtrc = mb->xmtrc;
	
	if (xmtfs & XMTSV) {
		if (xmtfs & UFLO) {
			printk("%s: DMA underrun.\n", dev->name);
			mp->stats.tx_errors++;
			mp->stats.tx_fifo_errors++;
			mace_txdma_reset(dev);
		}
		if (xmtfs & RTRY) {
			mp->stats.collisions++;
		}
	}			
}
Пример #6
0
static int mace_open(struct net_device *dev)
{
	struct mace_data *mp = (struct mace_data *) dev->priv;
	volatile struct mace *mb = mp->mace;
#if 0
	int i;

	i = 200;
	while (--i) {
		mb->biucc = SWRST;
		if (mb->biucc & SWRST) {
			udelay(10);
			continue;
		}
		break;
	}
	if (!i) {
		printk(KERN_ERR "%s: software reset failed!!\n", dev->name);
		return -EAGAIN;
	}
#endif

	mb->biucc = XMTSP_64;
	mb->fifocc = XMTFW_16 | RCVFW_64 | XMTFWU | RCVFWU | XMTBRST | RCVBRST;
	mb->xmtfc = AUTO_PAD_XMIT;
	mb->plscc = PORTSEL_AUI;
	/* mb->utr = RTRD; */

	if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
		return -EAGAIN;
	}
	if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
		free_irq(dev->irq, dev);
		return -EAGAIN;
	}

	/* Allocate the DMA ring buffers */

	mp->rx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, N_RX_PAGES);
	mp->tx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, 0);
	
	if (mp->tx_ring==NULL || mp->rx_ring==NULL) {
		if (mp->rx_ring) free_pages((u32) mp->rx_ring, N_RX_PAGES);
		if (mp->tx_ring) free_pages((u32) mp->tx_ring, 0);
		free_irq(dev->irq, dev);
		free_irq(mp->dma_intr, dev);
		printk(KERN_ERR "%s: unable to allocate DMA buffers\n", dev->name);
		return -ENOMEM;
	}

	mp->rx_ring_phys = (unsigned char *) virt_to_bus((void *)mp->rx_ring);
	mp->tx_ring_phys = (unsigned char *) virt_to_bus((void *)mp->tx_ring);

	/* We want the Rx buffer to be uncached and the Tx buffer to be writethrough */

	kernel_set_cachemode((void *)mp->rx_ring, N_RX_PAGES * PAGE_SIZE, IOMAP_NOCACHE_NONSER);	
	kernel_set_cachemode((void *)mp->tx_ring, PAGE_SIZE, IOMAP_WRITETHROUGH);

	mace_dma_off(dev);

	/* Not sure what these do */

	psc_write_word(PSC_ENETWR_CTL, 0x9000);
	psc_write_word(PSC_ENETRD_CTL, 0x9000);
	psc_write_word(PSC_ENETWR_CTL, 0x0400);
	psc_write_word(PSC_ENETRD_CTL, 0x0400);

#if 0
	/* load up the hardware address */
	
	mb->iac = ADDRCHG | PHYADDR;
	
	while ((mb->iac & ADDRCHG) != 0);
	
	for (i = 0; i < 6; ++i)
		mb->padr = dev->dev_addr[i];

	/* clear the multicast filter */
	mb->iac = ADDRCHG | LOGADDR;

	while ((mb->iac & ADDRCHG) != 0);
	
	for (i = 0; i < 8; ++i)
		mb->ladrf = 0;

	mb->plscc = PORTSEL_GPSI + ENPLSIO;

	mb->maccc = ENXMT | ENRCV;
	mb->imr = RCVINT;
#endif

	mace_rxdma_reset(dev);
	mace_txdma_reset(dev);
	
	return 0;
}
Пример #7
0
static int mace_open(struct net_device *dev)
{
	struct mace_data *mp = netdev_priv(dev);
	volatile struct mace *mb = mp->mace;

	
	mace_reset(dev);

	if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
		return -EAGAIN;
	}
	if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
		free_irq(dev->irq, dev);
		return -EAGAIN;
	}

	

	mp->tx_ring = dma_alloc_coherent(mp->device,
			N_TX_RING * MACE_BUFF_SIZE,
			&mp->tx_ring_phys, GFP_KERNEL);
	if (mp->tx_ring == NULL) {
		printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
		goto out1;
	}

	mp->rx_ring = dma_alloc_coherent(mp->device,
			N_RX_RING * MACE_BUFF_SIZE,
			&mp->rx_ring_phys, GFP_KERNEL);
	if (mp->rx_ring == NULL) {
		printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
		goto out2;
	}

	mace_dma_off(dev);

	

	psc_write_word(PSC_ENETWR_CTL, 0x9000);
	psc_write_word(PSC_ENETRD_CTL, 0x9000);
	psc_write_word(PSC_ENETWR_CTL, 0x0400);
	psc_write_word(PSC_ENETRD_CTL, 0x0400);

	mace_rxdma_reset(dev);
	mace_txdma_reset(dev);

	
	mb->maccc = ENXMT | ENRCV;
	
	mb->imr = RCVINT;
	return 0;

out2:
	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
	                  mp->tx_ring, mp->tx_ring_phys);
out1:
	free_irq(dev->irq, dev);
	free_irq(mp->dma_intr, dev);
	return -ENOMEM;
}