static int pci200_close(struct net_device *dev) { sca_close(dev); sca_flush(dev_to_port(dev)->card); hdlc_close(dev); return 0; }
void lmc_proto_close(lmc_softc_t *sc) { lmc_trace(sc->lmc_device, "lmc_proto_close in"); if (sc->if_type == LMC_PPP) hdlc_close(sc->lmc_device); lmc_trace(sc->lmc_device, "lmc_proto_close out"); }
static int t3e3_close(struct net_device *dev) { struct channel *sc = dev_to_priv(dev); hdlc_close(dev); netif_stop_queue(dev); dc_stop(sc); sc->r.flags &= ~SBE_2T3E3_FLAG_NETWORK_UP; module_put(THIS_MODULE); return 0; }
static int c101_close(struct net_device *dev) { port_t *port = dev_to_port(dev); sca_close(dev); writeb(0, port->win0base + C101_DTR); sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port); hdlc_close(dev); return 0; }
static int hss_hdlc_close(struct net_device *dev) { struct port *port = dev_to_port(dev); unsigned long flags; int i, buffs = RX_DESCS; spin_lock_irqsave(&npe_lock, flags); ports_open--; qmgr_disable_irq(queue_ids[port->id].rx); netif_stop_queue(dev); napi_disable(&port->napi); hss_stop_hdlc(port); while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0) buffs--; while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0) buffs--; if (buffs) netdev_crit(dev, "unable to drain RX queue, %i buffer(s) left in NPE\n", buffs); buffs = TX_DESCS; while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0) buffs--; i = 0; do { while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) buffs--; if (!buffs) break; } while (++i < MAX_CLOSE_WAIT); if (buffs) netdev_crit(dev, "unable to drain TX queue, %i buffer(s) left in NPE\n", buffs); #if DEBUG_CLOSE if (!buffs) printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); #endif qmgr_disable_irq(queue_ids[port->id].txdone); if (port->plat->close) port->plat->close(port->id, dev); spin_unlock_irqrestore(&npe_lock, flags); destroy_hdlc_queues(port); release_hdlc_queues(port); hdlc_close(dev); return 0; }
STATIC int chan_close (struct net_device * ndev) { hdlc_device *hdlc = dev_to_hdlc (ndev); const struct c4_priv *priv = hdlc->priv; netif_stop_queue (ndev); musycc_chan_down ((ci_t *) 0, priv->channum); hdlc_close (ndev); module_put (THIS_MODULE); return 0; }
static int n2_close(struct net_device *dev) { port_t *port = dev_to_port(dev); int io = port->card->io; u8 mcr = inb(io+N2_MCR) | (port->phy_node ? TX422_PORT1 : TX422_PORT0); sca_close(dev); mcr |= port->phy_node ? DTR_PORT1 : DTR_PORT0; /* set DTR OFF */ outb(mcr, io + N2_MCR); hdlc_close(dev); return 0; }
static int hss_hdlc_open(struct net_device *dev) { struct port *port = dev_to_port(dev); unsigned long flags; int i, err = 0; if ((err = hdlc_open(dev))) return err; if ((err = hss_load_firmware(port))) goto err_hdlc_close; if ((err = request_hdlc_queues(port))) goto err_hdlc_close; if ((err = init_hdlc_queues(port))) goto err_destroy_queues; spin_lock_irqsave(&npe_lock, flags); if (port->plat->open) if ((err = port->plat->open(port->id, dev, hss_hdlc_set_carrier))) goto err_unlock; spin_unlock_irqrestore(&npe_lock, flags); for (i = 0; i < TX_DESCS; i++) queue_put_desc(port->plat->txreadyq, tx_desc_phys(port, i), tx_desc_ptr(port, i)); for (i = 0; i < RX_DESCS; i++) queue_put_desc(queue_ids[port->id].rxfree, rx_desc_phys(port, i), rx_desc_ptr(port, i)); napi_enable(&port->napi); netif_start_queue(dev); qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY, hss_hdlc_rx_irq, dev); qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY, hss_hdlc_txdone_irq, dev); qmgr_enable_irq(queue_ids[port->id].txdone); ports_open++; hss_set_hdlc_cfg(port); hss_config(port); hss_start_hdlc(port); napi_schedule(&port->napi); return 0; err_unlock: spin_unlock_irqrestore(&npe_lock, flags); err_destroy_queues: destroy_hdlc_queues(port); release_hdlc_queues(port); err_hdlc_close: hdlc_close(dev); return err; }
static int pc300_close(struct net_device *dev) { sca_close(dev); hdlc_close(dev); return 0; }