static void irqtxerr_handler(struct net_device *dev) { ibmlana_priv *priv = (ibmlana_priv *) dev->priv; tda_t tda; /* fetch descriptor to check status */ isa_memcpy_fromio(&tda, dev->mem_start + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); /* update statistics */ priv->stat.tx_errors++; if (tda.status & (TCREG_NCRS | TCREG_CRSL)) priv->stat.tx_carrier_errors++; if (tda.status & TCREG_EXC) priv->stat.tx_aborted_errors++; if (tda.status & TCREG_OWC) priv->stat.tx_window_errors++; if (tda.status & TCREG_FU) priv->stat.tx_fifo_errors++; /* update our pointers */ priv->txused[priv->currtxdescr] = 0; priv->txusedcnt--; /* if there are more descriptors present in RAM, start them */ if (priv->txusedcnt > 0) StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT); /* tell the upper layer we can go on transmitting */ netif_wake_queue(dev); }
static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { unsigned long hdr_start = dev->mem_start + ((ring_page - ES_START_PG)<<8); isa_memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ }
static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { unsigned long xfer_start = dev->mem_start + ring_offset - (ES_START_PG<<8); if (xfer_start + count > ei_status.rmem_end) { /* Packet wraps over end of ring buffer. */ int semi_count = ei_status.rmem_end - xfer_start; isa_memcpy_fromio(skb->data, xfer_start, semi_count); count -= semi_count; isa_memcpy_fromio(skb->data + semi_count, ei_status.rmem_start, count); } else { /* Packet is in one chunk. */ isa_eth_io_copy_and_sum(skb, xfer_start, count, 0); } }
int msnd_fifo_write(msnd_fifo *f, const char *buf, size_t len) { int count = 0; while ((count < len) && (f->len != f->n)) { int nwritten; if (f->head <= f->tail) { nwritten = len - count; if (nwritten > f->n - f->tail) nwritten = f->n - f->tail; } else { nwritten = f->head - f->tail; if (nwritten > len - count) nwritten = len - count; } isa_memcpy_fromio(f->data + f->tail, (unsigned long) buf, nwritten); count += nwritten; buf += nwritten; f->len += nwritten; f->tail += nwritten; f->tail %= f->n; } return count; }
static void ultramca_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { unsigned long xfer_start = dev->mem_start + ring_offset - (START_PG << 8); if (xfer_start + count > ei_status.rmem_end) { /* We must wrap the input move. */ int semi_count = ei_status.rmem_end - xfer_start; isa_memcpy_fromio(skb->data, xfer_start, semi_count); count -= semi_count; isa_memcpy_fromio(skb->data + semi_count, ei_status.rmem_start, count); } else { /* Packet is in one chunk -- we can copy + cksum. */ isa_eth_io_copy_and_sum(skb, xfer_start, count, 0); } }
static void ultramca_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { unsigned long hdr_start = dev->mem_start + ((ring_page - START_PG) << 8); #ifdef notdef /* Officially this is what we are doing, but the readl() is faster */ isa_memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); #else ((unsigned int*)hdr)[0] = isa_readl(hdr_start); #endif }
static void hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { int ioaddr = dev->base_addr - NIC_OFFSET; int option_reg = inw(ioaddr + HPP_OPTION); outw((ring_page<<8), ioaddr + HPP_IN_ADDR); outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); isa_memcpy_fromio(hdr, dev->mem_start, sizeof(struct e8390_pkt_hdr)); outw(option_reg, ioaddr + HPP_OPTION); hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ }
static void hpp_mem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { int ioaddr = dev->base_addr - NIC_OFFSET; int option_reg = inw(ioaddr + HPP_OPTION); outw(ring_offset, ioaddr + HPP_IN_ADDR); outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); /* Caution: this relies on get_8390_hdr() rounding up count! Also note that we *can't* use eth_io_copy_and_sum() because it will not always copy "count" bytes (e.g. padded IP). */ isa_memcpy_fromio(skb->data, dev->mem_start, count); outw(option_reg, ioaddr + HPP_OPTION); }
static void irqtx_handler(struct net_device *dev) { ibmlana_priv *priv = (ibmlana_priv *) dev->priv; tda_t tda; /* fetch descriptor (we forgot the size ;-) */ isa_memcpy_fromio(&tda, dev->mem_start + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); /* update statistics */ priv->stat.tx_packets++; priv->stat.tx_bytes += tda.length; /* update our pointers */ priv->txused[priv->currtxdescr] = 0; priv->txusedcnt--; /* if there are more descriptors present in RAM, start them */ if (priv->txusedcnt > 0) StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT); /* tell the upper layer we can go on transmitting */ netif_wake_queue(dev); }
/* Read the 4 byte, page aligned 8390 specific header. */ static void el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { int boguscount; unsigned long hdr_start = dev->mem_start + ((ring_page - EL2_MB1_START_PG)<<8); unsigned short word; if (dev->mem_start) { /* Use the shared memory. */ isa_memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); return; } /* * No shared memory, use programmed I/O. */ word = (unsigned short)ring_page; outb(word&0xFF, E33G_DMAAH); outb(word>>8, E33G_DMAAL); outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT | ECNTRL_START, E33G_CNTRL); boguscount = 0x1000; while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) { if(!boguscount--) { printk("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name); memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr)); el2_reset_8390(dev); goto blocked; } } insw(E33G_FIFOH, hdr, (sizeof(struct e8390_pkt_hdr))>> 1); blocked:; outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); }
static void el16_rx(struct net_device *dev) { struct net_local *lp = (struct net_local *)dev->priv; unsigned long shmem = dev->mem_start; ushort rx_head = lp->rx_head; ushort rx_tail = lp->rx_tail; ushort boguscount = 10; short frame_status; while ((frame_status = isa_readw(shmem+rx_head)) < 0) { /* Command complete */ unsigned long read_frame = dev->mem_start + rx_head; ushort rfd_cmd = isa_readw(read_frame+2); ushort next_rx_frame = isa_readw(read_frame+4); ushort data_buffer_addr = isa_readw(read_frame+6); unsigned long data_frame = dev->mem_start + data_buffer_addr; ushort pkt_len = isa_readw(data_frame); if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22 || (pkt_len & 0xC000) != 0xC000) { printk("%s: Rx frame at %#x corrupted, status %04x cmd %04x" "next %04x data-buf @%04x %04x.\n", dev->name, rx_head, frame_status, rfd_cmd, next_rx_frame, data_buffer_addr, pkt_len); } else if ((frame_status & 0x2000) == 0) { /* Frame Rxed, but with error. */ lp->stats.rx_errors++; if (frame_status & 0x0800) lp->stats.rx_crc_errors++; if (frame_status & 0x0400) lp->stats.rx_frame_errors++; if (frame_status & 0x0200) lp->stats.rx_fifo_errors++; if (frame_status & 0x0100) lp->stats.rx_over_errors++; if (frame_status & 0x0080) lp->stats.rx_length_errors++; } else { /* Malloc up new buffer. */ struct sk_buff *skb; pkt_len &= 0x3fff; skb = dev_alloc_skb(pkt_len+2); if (skb == NULL) { printk("%s: Memory squeeze, dropping packet.\n", dev->name); lp->stats.rx_dropped++; break; } skb_reserve(skb,2); skb->dev = dev; /* 'skb->data' points to the start of sk_buff data area. */ isa_memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; lp->stats.rx_packets++; lp->stats.rx_bytes += pkt_len; } /* Clear the status word and set End-of-List on the rx frame. */ isa_writew(0,read_frame); isa_writew(0xC000,read_frame+2); /* Clear the end-of-list on the prev. RFD. */ isa_writew(0x0000,dev->mem_start + rx_tail + 2); rx_tail = rx_head; rx_head = next_rx_frame; if (--boguscount == 0) break; } lp->rx_head = rx_head; lp->rx_tail = rx_tail; }
static void el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { int boguscount = 0; unsigned short int *buf; unsigned short word; int end_of_ring = dev->rmem_end; /* Maybe enable shared memory just be to be safe... nahh.*/ if (dev->mem_start) { /* Use the shared memory. */ ring_offset -= (EL2_MB1_START_PG<<8); if (dev->mem_start + ring_offset + count > end_of_ring) { /* We must wrap the input move. */ int semi_count = end_of_ring - (dev->mem_start + ring_offset); isa_memcpy_fromio(skb->data, dev->mem_start + ring_offset, semi_count); count -= semi_count; isa_memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); } else { /* Packet is in one chunk -- we can copy + cksum. */ isa_eth_io_copy_and_sum(skb, dev->mem_start + ring_offset, count, 0); } return; } /* * No shared memory, use programmed I/O. */ word = (unsigned short) ring_offset; outb(word>>8, E33G_DMAAH); outb(word&0xFF, E33G_DMAAL); outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT | ECNTRL_START, E33G_CNTRL); /* * Here I also try to get data as fast as possible. I am betting that I * can read one extra byte without clobbering anything in the kernel because * this would only occur on an odd byte-count and allocation of skb->data * is word-aligned. Variable 'count' is NOT checked. Caller must check * for a valid count. * [This is currently quite safe.... but if one day the 3c503 explodes * you know where to come looking ;)] */ buf = (unsigned short int *) skb->data; count = (count + 1) >> 1; for(;;) { boguscount = 0x1000; while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) { if(!boguscount--) { printk("%s: FIFO blocked in el2_block_input.\n", dev->name); el2_reset_8390(dev); goto blocked; } } if(count > WRD_COUNT) { insw(E33G_FIFOH, buf, WRD_COUNT); buf += WRD_COUNT; count -= WRD_COUNT; } else { insw(E33G_FIFOH, buf, count); break; } } blocked:; outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); return; }
static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev) { ibmlana_priv *priv = (ibmlana_priv *) dev->priv; int retval = 0, tmplen, addr; unsigned long flags; tda_t tda; int baddr; /* find out if there are free slots for a frame to transmit. If not, the upper layer is in deep desperation and we simply ignore the frame. */ if (priv->txusedcnt >= TXBUFCNT) { retval = -EIO; priv->stat.tx_dropped++; goto tx_done; } /* copy the frame data into the next free transmit buffer - fillup missing */ tmplen = skb->len; if (tmplen < 60) tmplen = 60; baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE); isa_memcpy_toio(dev->mem_start + baddr, skb->data, skb->len); /* copy filler into RAM - in case we're filling up... we're filling a bit more than necessary, but that doesn't harm since the buffer is far larger... Sorry Linus for the filler string but I couldn't resist ;-) */ if (tmplen > skb->len) { char *fill = "NetBSD is a nice OS too! "; unsigned int destoffs = skb->len, l = strlen(fill); while (destoffs < tmplen) { isa_memcpy_toio(dev->mem_start + baddr + destoffs, fill, l); destoffs += l; } } /* set up the new frame descriptor */ addr = priv->tdastart + (priv->nexttxdescr * sizeof(tda_t)); isa_memcpy_fromio(&tda, dev->mem_start + addr, sizeof(tda_t)); tda.length = tda.fraglength = tmplen; isa_memcpy_toio(dev->mem_start + addr, &tda, sizeof(tda_t)); /* if there were no active descriptors, trigger the SONIC */ spin_lock_irqsave(&priv->lock, flags); priv->txusedcnt++; priv->txused[priv->nexttxdescr] = 1; /* are all transmission slots used up ? */ if (priv->txusedcnt >= TXBUFCNT) netif_stop_queue(dev); if (priv->txusedcnt == 1) StartTx(dev, priv->nexttxdescr); priv->nexttxdescr = (priv->nexttxdescr + 1) % TXBUFCNT; spin_unlock_irqrestore(&priv->lock, flags); tx_done: dev_kfree_skb(skb); return retval; }
static void irqrx_handler(struct net_device *dev) { ibmlana_priv *priv = (ibmlana_priv *) dev->priv; rda_t rda; u32 rdaaddr, lrdaaddr; /* loop until ... */ while (1) { /* read descriptor that was next to be filled by SONIC */ rdaaddr = priv->rdastart + (priv->nextrxdescr * sizeof(rda_t)); lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t)); isa_memcpy_fromio(&rda, dev->mem_start + rdaaddr, sizeof(rda_t)); /* iron out upper word halves of fields we use - SONIC will duplicate bits 0..15 to 16..31 */ rda.status &= 0xffff; rda.length &= 0xffff; rda.startlo &= 0xffff; /* stop if the SONIC still owns it, i.e. there is no data for us */ if (rda.inuse) break; /* good packet? */ else if (rda.status & RCREG_PRX) { struct sk_buff *skb; /* fetch buffer */ skb = dev_alloc_skb(rda.length + 2); if (skb == NULL) priv->stat.rx_dropped++; else { /* copy out data */ isa_memcpy_fromio(skb_put(skb, rda.length), dev->mem_start + rda.startlo, rda.length); /* set up skb fields */ skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_NONE; /* bookkeeping */ dev->last_rx = jiffies; priv->stat.rx_packets++; priv->stat.rx_bytes += rda.length; /* pass to the upper layers */ netif_rx(skb); } } /* otherwise check error status bits and increase statistics */ else { priv->stat.rx_errors++; if (rda.status & RCREG_FAER) priv->stat.rx_frame_errors++; if (rda.status & RCREG_CRCR) priv->stat.rx_crc_errors++; } /* descriptor processed, will become new last descriptor in queue */ rda.link = 1; rda.inuse = 1; isa_memcpy_toio(dev->mem_start + rdaaddr, &rda, sizeof(rda_t)); /* set up link and EOL = 0 in currently last descriptor. Only write the link field since the SONIC may currently already access the other fields. */ isa_memcpy_toio(dev->mem_start + lrdaaddr + 20, &rdaaddr, 4); /* advance indices */ priv->lastrxdescr = priv->nextrxdescr; if ((++priv->nextrxdescr) >= priv->rxbufcnt) priv->nextrxdescr = 0; } }