static int t3e3_open(struct net_device *dev) { struct channel *sc = dev_to_priv(dev); int ret = hdlc_open(dev); if (ret) return ret; sc->r.flags |= SBE_2T3E3_FLAG_NETWORK_UP; dc_start(dev_to_priv(dev)); netif_start_queue(dev); try_module_get(THIS_MODULE); return 0; }
static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct channel *sc = dev_to_priv(dev); int cmd_2t3e3, len, rlen; struct t3e3_param param; struct t3e3_resp resp; void __user *data = ifr->ifr_data + sizeof(cmd_2t3e3) + sizeof(len); if (cmd == SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (cmd != SIOCDEVPRIVATE + 15) return -EINVAL; if (copy_from_user(&cmd_2t3e3, ifr->ifr_data, sizeof(cmd_2t3e3))) return -EFAULT; if (copy_from_user(&len, ifr->ifr_data + sizeof(cmd_2t3e3), sizeof(len))) return -EFAULT; if (len > sizeof(param)) return -EFAULT; if (len) if (copy_from_user(¶m, data, len)) return -EFAULT; t3e3_if_config(sc, cmd_2t3e3, (char *)¶m, &resp, &rlen); if (rlen) if (copy_to_user(data, &resp, rlen)) return -EFAULT; return 0; }
static int __devinit t3e3_init_channel(struct channel *channel, struct pci_dev *pdev, struct card *card) { struct net_device *dev; unsigned int val; int err; err = pci_enable_device(pdev); if (err) return err; err = pci_request_regions(pdev, "SBE 2T3E3"); if (err) goto disable; dev = alloc_hdlcdev(channel); if (!dev) { printk(KERN_ERR "SBE 2T3E3" ": Out of memory\n"); goto free_regions; } t3e3_sc_init(channel); dev_to_priv(dev) = channel; channel->pdev = pdev; channel->dev = dev; channel->card = card; channel->addr = pci_resource_start(pdev, 0); if (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1) channel->h.slot = 1; else channel->h.slot = 0; if (setup_device(dev, channel)) goto free_regions; pci_read_config_dword(channel->pdev, 0x40, &val); /* */ pci_write_config_dword(channel->pdev, 0x40, val & 0x3FFFFFFF); pci_read_config_byte(channel->pdev, PCI_CACHE_LINE_SIZE, &channel->h.cache_size); pci_read_config_dword(channel->pdev, PCI_COMMAND, &channel->h.command); t3e3_init(channel); if (request_irq(dev->irq, &t3e3_intr, IRQF_SHARED, dev->name, dev)) { printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq); goto free_regions; } pci_set_drvdata(pdev, channel); return 0; free_regions: pci_release_regions(pdev); disable: pci_disable_device(pdev); return err; }
static int t3e3_close(struct net_device *dev) { struct channel *sc = dev_to_priv(dev); hdlc_close(dev); netif_stop_queue(dev); dc_stop(sc); sc->r.flags &= ~SBE_2T3E3_FLAG_NETWORK_UP; module_put(THIS_MODULE); return 0; }
static struct net_device_stats *t3e3_get_stats(struct net_device *dev) { struct net_device_stats *nstats = &dev->stats; struct channel *sc = dev_to_priv(dev); struct t3e3_stats *stats = &sc->s; memset(nstats, 0, sizeof(struct net_device_stats)); nstats->rx_packets = stats->in_packets; nstats->tx_packets = stats->out_packets; nstats->rx_bytes = stats->in_bytes; nstats->tx_bytes = stats->out_bytes; nstats->rx_errors = stats->in_errors; nstats->tx_errors = stats->out_errors; nstats->rx_crc_errors = stats->in_error_crc; nstats->rx_dropped = stats->in_dropped; nstats->tx_dropped = stats->out_dropped; nstats->tx_carrier_errors = stats->out_error_lost_carr + stats->out_error_no_carr; return nstats; }
int t3e3_if_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct channel *sc = dev_to_priv(dev); u32 current_write, last_write; unsigned long flags; struct sk_buff *skb2; if (skb == NULL) { sc->s.out_errors++; return 0; } if (sc->p.transmitter_on != SBE_2T3E3_ON) { sc->s.out_errors++; sc->s.out_dropped++; dev_kfree_skb_any(skb); return 0; } if (sc->s.OOF && sc->p.loopback == SBE_2T3E3_LOOPBACK_NONE) { sc->s.out_dropped++; dev_kfree_skb_any(skb); return 0; } spin_lock_irqsave(&sc->ether.tx_lock, flags); current_write = sc->ether.tx_ring_current_write; for (skb2 = skb; skb2 != NULL; skb2 = NULL) { if (skb2->len) { if ((sc->ether.tx_ring[current_write].tdes1 & SBE_2T3E3_TX_DESC_BUFFER_1_SIZE) > 0) break; current_write = (current_write + 1) % SBE_2T3E3_TX_DESC_RING_SIZE; /* * Leave at least 1 tx desc free so that dc_intr_tx() can * identify empty list */ if (current_write == sc->ether.tx_ring_current_read) break; } } if (skb2 != NULL) { netif_stop_queue(sc->dev); sc->ether.tx_full = 1; dev_dbg(&sc->pdev->dev, "SBE 2T3E3: out of descriptors\n"); spin_unlock_irqrestore(&sc->ether.tx_lock, flags); return NETDEV_TX_BUSY; } current_write = last_write = sc->ether.tx_ring_current_write; dev_dbg(&sc->pdev->dev, "sending mbuf (current_write = %d)\n", current_write); for (skb2 = skb; skb2 != NULL; skb2 = NULL) { if (skb2->len) { dev_dbg(&sc->pdev->dev, "sending mbuf (len = %d, next = %p)\n", skb2->len, NULL); sc->ether.tx_free_cnt--; sc->ether.tx_ring[current_write].tdes0 = 0; sc->ether.tx_ring[current_write].tdes1 &= SBE_2T3E3_TX_DESC_END_OF_RING | SBE_2T3E3_TX_DESC_SECOND_ADDRESS_CHAINED; /* DISABLE_PADDING sometimes gets lost somehow, hands off... */ sc->ether.tx_ring[current_write].tdes1 |= SBE_2T3E3_TX_DESC_DISABLE_PADDING | skb2->len; if (current_write == sc->ether.tx_ring_current_write) { sc->ether.tx_ring[current_write].tdes1 |= SBE_2T3E3_TX_DESC_FIRST_SEGMENT; } else { sc->ether.tx_ring[current_write].tdes0 = SBE_2T3E3_TX_DESC_21143_OWN; } sc->ether.tx_ring[current_write].tdes2 = virt_to_phys(skb2->data); sc->ether.tx_data[current_write] = NULL; last_write = current_write; current_write = (current_write + 1) % SBE_2T3E3_TX_DESC_RING_SIZE; } } sc->ether.tx_data[last_write] = skb; sc->ether.tx_ring[last_write].tdes1 |= SBE_2T3E3_TX_DESC_LAST_SEGMENT | SBE_2T3E3_TX_DESC_INTERRUPT_ON_COMPLETION; sc->ether.tx_ring[sc->ether.tx_ring_current_write].tdes0 |= SBE_2T3E3_TX_DESC_21143_OWN; sc->ether.tx_ring_current_write = current_write; dev_dbg(&sc->pdev->dev, "txput: tdes0 = %08X tdes1 = %08X\n", sc->ether.tx_ring[last_write].tdes0, sc->ether.tx_ring[last_write].tdes1); dc_write(sc->addr, SBE_2T3E3_21143_REG_TRANSMIT_POLL_DEMAND, 0xffffffff); spin_unlock_irqrestore(&sc->ether.tx_lock, flags); return 0; }