static void psc_load_rxdma_base(int set, void *base) { psc_write_word(PSC_ENETRD_CMD + set, 0x0100); psc_write_long(PSC_ENETRD_ADDR + set, (u32)base); psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING); psc_write_word(PSC_ENETRD_CMD + set, 0x9800); }
static void mace_dma_intr(int irq, void *dev_id, struct pt_regs *regs) { struct net_device *dev = (struct net_device *) dev_id; struct mace_data *mp = (struct mace_data *) dev->priv; int left, head; u16 status; u32 baka; /* Not sure what this does */ while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY)); if (!(baka & 0x60000000)) return; /* * Process the read queue */ status = psc_read_word(PSC_ENETRD_CTL); if (status & 0x2000) { mace_rxdma_reset(dev); } else if (status & 0x0100) { psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100); left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot); head = N_RX_RING - left; /* Loop through the ring buffer and process new packages */ while (mp->rx_tail < head) { mace_dma_rx_frame(dev, (struct mace_frame *) (mp->rx_ring + (mp->rx_tail * 0x0800))); mp->rx_tail++; } /* If we're out of buffers in this ring then switch to */ /* the other set, otherwise just reactivate this one. */ if (!left) { mace_load_rxdma_base(dev, mp->rx_slot); mp->rx_slot ^= 0x10; } else { psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800); } } /* * Process the write queue */ status = psc_read_word(PSC_ENETWR_CTL); if (status & 0x2000) { mace_txdma_reset(dev); } else if (status & 0x0100) { psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100); mp->tx_sloti ^= 0x10; mp->tx_count++; netif_wake_queue(dev); } }
static int mace_open(struct net_device *dev) { struct mace_data *mp = netdev_priv(dev); volatile struct mace *mb = mp->mace; /* reset the chip */ mace_reset(dev); if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) { printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq); return -EAGAIN; } if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) { printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr); free_irq(dev->irq, dev); return -EAGAIN; } /* Allocate the DMA ring buffers */ mp->tx_ring = dma_alloc_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE, &mp->tx_ring_phys, GFP_KERNEL); if (mp->tx_ring == NULL) goto out1; mp->rx_ring = dma_alloc_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE, &mp->rx_ring_phys, GFP_KERNEL); if (mp->rx_ring == NULL) goto out2; mace_dma_off(dev); /* Not sure what these do */ psc_write_word(PSC_ENETWR_CTL, 0x9000); psc_write_word(PSC_ENETRD_CTL, 0x9000); psc_write_word(PSC_ENETWR_CTL, 0x0400); psc_write_word(PSC_ENETRD_CTL, 0x0400); mace_rxdma_reset(dev); mace_txdma_reset(dev); /* turn it on! */ mb->maccc = ENXMT | ENRCV; /* enable all interrupts except receive interrupts */ mb->imr = RCVINT; return 0; out2: dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE, mp->tx_ring, mp->tx_ring_phys); out1: free_irq(dev->irq, dev); free_irq(mp->dma_intr, dev); return -ENOMEM; }
static void mace_load_rxdma_base(struct net_device *dev, int set) { struct mace_data *mp = netdev_priv(dev); psc_write_word(PSC_ENETRD_CMD + set, 0x0100); psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys); psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING); psc_write_word(PSC_ENETRD_CMD + set, 0x9800); mp->rx_tail = 0; }
static void mace68k_txdma_reset(struct net_device *dev) { struct mace68k_data *mp = (struct mace68k_data *) dev->priv; volatile struct mace *mace = mp->mace; u8 mcc = mace->maccc; psc_write_word(PSC_ENETWR_CTL,0x8800); mace->maccc = mcc&~ENXMT; psc_write_word(PSC_ENETWR_CTL,0x0400); mace->maccc = mcc; }
static __init void psc_dma_die_die_die(void) { int i; printk("Killing all PSC DMA channels..."); for (i = 0 ; i < 9 ; i++) { psc_write_word(PSC_CTL_BASE + (i << 4), 0x8800); psc_write_word(PSC_CTL_BASE + (i << 4), 0x1000); psc_write_word(PSC_CMD_BASE + (i << 5), 0x1100); psc_write_word(PSC_CMD_BASE + (i << 5) + 0x10, 0x1100); } printk("done!\n"); }
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) { struct mace_data *mp = netdev_priv(dev); unsigned long flags; /* Stop the queue since there's only the one buffer */ local_irq_save(flags); netif_stop_queue(dev); if (!mp->tx_count) { printk(KERN_ERR "macmace: tx queue running but no free buffers.\n"); local_irq_restore(flags); return NETDEV_TX_BUSY; } mp->tx_count--; local_irq_restore(flags); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* We need to copy into our xmit buffer to take care of alignment and caching issues */ skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); /* load the Tx DMA and fire it off */ psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys); psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len); psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800); mp->tx_slot ^= 0x10; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) { struct mace_data *mp = netdev_priv(dev); unsigned long flags; local_irq_save(flags); netif_stop_queue(dev); if (!mp->tx_count) { printk(KERN_ERR "macmace: tx queue running but no free buffers.\n"); local_irq_restore(flags); return NETDEV_TX_BUSY; } mp->tx_count--; local_irq_restore(flags); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys); psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len); psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800); mp->tx_slot ^= 0x10; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) { struct mace_data *mp = (struct mace_data *) dev->priv; /* Stop the queue if the buffer is full */ if (!mp->tx_count) { netif_stop_queue(dev); return 1; } mp->tx_count--; mp->stats.tx_packets++; mp->stats.tx_bytes += skb->len; /* We need to copy into our xmit buffer to take care of alignment and caching issues */ memcpy((void *) mp->tx_ring, skb->data, skb->len); /* load the Tx DMA and fire it off */ psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys); psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len); psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800); mp->tx_slot ^= 0x10; dev_kfree_skb(skb); return 0; }
static void mace_txdma_reset(struct net_device *dev) { struct mace_data *mp = netdev_priv(dev); volatile struct mace *mace = mp->mace; u8 maccc; psc_write_word(PSC_ENETWR_CTL, 0x8800); maccc = mace->maccc; mace->maccc = maccc & ~ENXMT; mp->tx_slot = mp->tx_sloti = 0; mp->tx_count = N_TX_RING; psc_write_word(PSC_ENETWR_CTL, 0x0400); mace->maccc = maccc; }
static void mace68k_dma_off(struct net_device *dev) { psc_write_word(PSC_ENETRD_CTL, 0x8800); psc_write_word(PSC_ENETRD_CTL, 0x1000); psc_write_word(PSC_ENETRD_CMD, 0x1100); psc_write_word(PSC_ENETRD_CMD+0x10, 0x1100); psc_write_word(PSC_ENETWR_CTL, 0x8800); psc_write_word(PSC_ENETWR_CTL, 0x1000); psc_write_word(PSC_ENETWR_CMD, 0x1100); psc_write_word(PSC_ENETWR_CMD+0x10, 0x1100); }
static void mace68k_rxdma_reset(struct net_device *dev) { struct mace68k_data *mp = (struct mace68k_data *) dev->priv; volatile struct mace *mace = mp->mace; u8 mcc = mace->maccc; /* * Turn off receive */ mcc&=~ENRCV; mace->maccc=mcc; /* * Program the DMA */ psc_write_word(PSC_ENETRD_CTL, 0x8800); psc_load_rxdma_base(0x0, (void *)virt_to_bus(mp->rx_ring)); psc_write_word(PSC_ENETRD_CTL, 0x0400); psc_write_word(PSC_ENETRD_CTL, 0x8800); psc_load_rxdma_base(0x10, (void *)virt_to_bus(mp->rx_ring)); psc_write_word(PSC_ENETRD_CTL, 0x0400); mace->maccc=mcc|ENRCV; #if 0 psc_write_word(PSC_ENETRD_CTL, 0x9800); psc_write_word(PSC_ENETRD_CTL+0x10, 0x9800); #endif }
static int mace68k_xmit_start(struct sk_buff *skb, struct net_device *dev) { struct mace68k_data *mp = (struct mace68k_data *) dev->priv; /* * This may need atomic types ??? */ printk("mace68k_xmit_start: mp->tx_count = %d, dev->tbusy = %d, mp->tx_ring = %p (%p)\n", mp->tx_count, dev->tbusy, mp->tx_ring, virt_to_bus(mp->tx_ring)); psc_debug_dump(); if(mp->tx_count == 0) { dev->tbusy=1; mace68k_dma_intr(IRQ_MAC_MACE_DMA, dev, NULL); return 1; } mp->tx_count--; /* * FIXME: * This is hackish. The memcpy probably isnt needed but * the rules for alignment are not known. Ideally we'd like * to just blast the skb directly to ethernet. We also don't * use the ring properly - just a one frame buffer. That * also requires cache pushes ;). */ memcpy((void *)mp->tx_ring, skb, skb->len); psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, virt_to_bus(mp->tx_ring)); psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len); psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800); mp->stats.tx_packets++; mp->stats.tx_bytes+=skb->len; dev_kfree_skb(skb); return 0; }
static void mace_rxdma_reset(struct net_device *dev) { struct mace_data *mp = netdev_priv(dev); volatile struct mace *mace = mp->mace; u8 maccc = mace->maccc; mace->maccc = maccc & ~ENRCV; psc_write_word(PSC_ENETRD_CTL, 0x8800); mace_load_rxdma_base(dev, 0x00); psc_write_word(PSC_ENETRD_CTL, 0x0400); psc_write_word(PSC_ENETRD_CTL, 0x8800); mace_load_rxdma_base(dev, 0x10); psc_write_word(PSC_ENETRD_CTL, 0x0400); mace->maccc = maccc; mp->rx_slot = 0; psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800); psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800); }
int mace68k_probe(struct net_device *unused) { int j; static int once=0; struct mace68k_data *mp; unsigned char *addr; struct net_device *dev; unsigned char checksum = 0; /* * There can be only one... */ if (once) return -ENODEV; once = 1; if (macintosh_config->ether_type != MAC_ETHER_MACE) return -ENODEV; printk("MACE ethernet should be present "); dev = init_etherdev(0, PRIV_BYTES); if(dev==NULL) { printk("no free memory.\n"); return -ENOMEM; } mp = (struct mace68k_data *) dev->priv; dev->base_addr = (u32)MACE_BASE; mp->mace = (volatile struct mace *) MACE_BASE; printk("at 0x%p", mp->mace); /* * 16K RX ring and 4K TX ring should do nicely */ mp->rx_ring=(void *)__get_free_pages(GFP_KERNEL, 2); mp->tx_ring=(void *)__get_free_page(GFP_KERNEL); printk("."); if(mp->tx_ring==NULL || mp->rx_ring==NULL) { if(mp->tx_ring) free_page((u32)mp->tx_ring); // if(mp->rx_ring) // __free_pages(mp->rx_ring,2); printk("\nNo memory for ring buffers.\n"); return -ENOMEM; } /* We want the receive data to be uncached. We dont care about the byte reading order */ printk("."); kernel_set_cachemode((void *)mp->rx_ring, 16384, IOMAP_NOCACHE_NONSER); printk("."); /* The transmit buffer needs to be write through */ kernel_set_cachemode((void *)mp->tx_ring, 4096, IOMAP_WRITETHROUGH); printk(" Ok\n"); dev->irq = IRQ_MAC_MACE; printk(KERN_INFO "%s: MACE at", dev->name); /* * The PROM contains 8 bytes which total 0xFF when XOR'd * together. Due to the usual peculiar apple brain damage * the bytes are spaced out in a strange boundary and the * bits are reversed. */ addr = (void *)MACE_PROM; for (j = 0; j < 6; ++j) { u8 v=bitrev(addr[j<<4]); checksum^=v; dev->dev_addr[j] = v; printk("%c%.2x", (j ? ':' : ' '), dev->dev_addr[j]); } for (; j < 8; ++j) { checksum^=bitrev(addr[j<<4]); } if(checksum!=0xFF) { printk(" (invalid checksum)\n"); return -ENODEV; } printk("\n"); memset(&mp->stats, 0, sizeof(mp->stats)); init_timer(&mp->tx_timeout); mp->timeout_active = 0; dev->open = mace68k_open; dev->stop = mace68k_close; dev->hard_start_xmit = mace68k_xmit_start; dev->get_stats = mace68k_stats; dev->set_multicast_list = mace68k_set_multicast; dev->set_mac_address = mace68k_set_address; ether_setup(dev); mp = (struct mace68k_data *) dev->priv; mp->maccc = ENXMT | ENRCV; mp->dma_intr = IRQ_MAC_MACE_DMA; psc_write_word(PSC_ENETWR_CTL, 0x9000); psc_write_word(PSC_ENETRD_CTL, 0x9000); psc_write_word(PSC_ENETWR_CTL, 0x0400); psc_write_word(PSC_ENETRD_CTL, 0x0400); /* apple's driver doesn't seem to do this */ /* except at driver shutdown time... */ #if 0 mace68k_dma_off(dev); #endif return 0; }
static int mace_open(struct net_device *dev) { struct mace_data *mp = netdev_priv(dev); volatile struct mace *mb = mp->mace; mace_reset(dev); if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) { printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq); return -EAGAIN; } if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) { printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr); free_irq(dev->irq, dev); return -EAGAIN; } mp->tx_ring = dma_alloc_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE, &mp->tx_ring_phys, GFP_KERNEL); if (mp->tx_ring == NULL) { printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name); goto out1; } mp->rx_ring = dma_alloc_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE, &mp->rx_ring_phys, GFP_KERNEL); if (mp->rx_ring == NULL) { printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name); goto out2; } mace_dma_off(dev); psc_write_word(PSC_ENETWR_CTL, 0x9000); psc_write_word(PSC_ENETRD_CTL, 0x9000); psc_write_word(PSC_ENETWR_CTL, 0x0400); psc_write_word(PSC_ENETRD_CTL, 0x0400); mace_rxdma_reset(dev); mace_txdma_reset(dev); mb->maccc = ENXMT | ENRCV; mb->imr = RCVINT; return 0; out2: dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE, mp->tx_ring, mp->tx_ring_phys); out1: free_irq(dev->irq, dev); free_irq(mp->dma_intr, dev); return -ENOMEM; }
static int mace_open(struct net_device *dev) { struct mace_data *mp = (struct mace_data *) dev->priv; volatile struct mace *mb = mp->mace; #if 0 int i; i = 200; while (--i) { mb->biucc = SWRST; if (mb->biucc & SWRST) { udelay(10); continue; } break; } if (!i) { printk(KERN_ERR "%s: software reset failed!!\n", dev->name); return -EAGAIN; } #endif mb->biucc = XMTSP_64; mb->fifocc = XMTFW_16 | RCVFW_64 | XMTFWU | RCVFWU | XMTBRST | RCVBRST; mb->xmtfc = AUTO_PAD_XMIT; mb->plscc = PORTSEL_AUI; /* mb->utr = RTRD; */ if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) { printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq); return -EAGAIN; } if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) { printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr); free_irq(dev->irq, dev); return -EAGAIN; } /* Allocate the DMA ring buffers */ mp->rx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, N_RX_PAGES); mp->tx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, 0); if (mp->tx_ring==NULL || mp->rx_ring==NULL) { if (mp->rx_ring) free_pages((u32) mp->rx_ring, N_RX_PAGES); if (mp->tx_ring) free_pages((u32) mp->tx_ring, 0); free_irq(dev->irq, dev); free_irq(mp->dma_intr, dev); printk(KERN_ERR "%s: unable to allocate DMA buffers\n", dev->name); return -ENOMEM; } mp->rx_ring_phys = (unsigned char *) virt_to_bus((void *)mp->rx_ring); mp->tx_ring_phys = (unsigned char *) virt_to_bus((void *)mp->tx_ring); /* We want the Rx buffer to be uncached and the Tx buffer to be writethrough */ kernel_set_cachemode((void *)mp->rx_ring, N_RX_PAGES * PAGE_SIZE, IOMAP_NOCACHE_NONSER); kernel_set_cachemode((void *)mp->tx_ring, PAGE_SIZE, IOMAP_WRITETHROUGH); mace_dma_off(dev); /* Not sure what these do */ psc_write_word(PSC_ENETWR_CTL, 0x9000); psc_write_word(PSC_ENETRD_CTL, 0x9000); psc_write_word(PSC_ENETWR_CTL, 0x0400); psc_write_word(PSC_ENETRD_CTL, 0x0400); #if 0 /* load up the hardware address */ mb->iac = ADDRCHG | PHYADDR; while ((mb->iac & ADDRCHG) != 0); for (i = 0; i < 6; ++i) mb->padr = dev->dev_addr[i]; /* clear the multicast filter */ mb->iac = ADDRCHG | LOGADDR; while ((mb->iac & ADDRCHG) != 0); for (i = 0; i < 8; ++i) mb->ladrf = 0; mb->plscc = PORTSEL_GPSI + ENPLSIO; mb->maccc = ENXMT | ENRCV; mb->imr = RCVINT; #endif mace_rxdma_reset(dev); mace_txdma_reset(dev); return 0; }