static void __exit scc_enet_cleanup(void) { struct rtnet_device *rtdev = rtdev_root; struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv; volatile cpm8xx_t *cp = cpmp; volatile scc_enet_t *ep; if (rtdev) { rtdm_irq_disable(&cep->irq_handle); rtdm_irq_free(&cep->irq_handle); ep = (scc_enet_t *)(&cp->cp_dparam[PROFF_ENET]); m8xx_cpm_dpfree(ep->sen_genscc.scc_rbase); m8xx_cpm_dpfree(ep->sen_genscc.scc_tbase); rt_stack_disconnect(rtdev); rt_unregister_rtnetdev(rtdev); rt_rtdev_disconnect(rtdev); printk("%s: unloaded\n", rtdev->name); rtskb_pool_release(&cep->skb_pool); rtdev_free(rtdev); rtdev_root = NULL; } }
/*** * rt_loopback_close * @rtdev */ static int rt_loopback_close (struct rtnet_device *rtdev) { rtnetif_stop_queue(rtdev); rt_stack_disconnect(rtdev); return 0; }
/*** * rt_loopback_close * @rtdev */ static int rt_loopback_close (struct rtnet_device *rtdev) { rtnetif_stop_queue(rtdev); rt_stack_disconnect(rtdev); RTNET_MOD_DEC_USE_COUNT; return 0; }
static int tulip_close (/*RTnet*/struct rtnet_device *rtdev) { long ioaddr = rtdev->base_addr; struct tulip_private *tp = (struct tulip_private *) rtdev->priv; int i; rtnetif_stop_queue (rtdev); tulip_down (rtdev); if (tulip_debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", rtdev->name, inl (ioaddr + CSR5)); rtdm_irq_free(&tp->irq_handle); /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { struct /*RTnet*/rtskb *skb = tp->rx_buffers[i].skb; dma_addr_t mapping = tp->rx_buffers[i].mapping; tp->rx_buffers[i].skb = NULL; tp->rx_buffers[i].mapping = 0; tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */ tp->rx_ring[i].length = 0; tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */ if (skb) { pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); /*RTnet*/dev_kfree_rtskb (skb); } } for (i = 0; i < TX_RING_SIZE; i++) { struct /*RTnet*/rtskb *skb = tp->tx_buffers[i].skb; if (skb != NULL) { pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping, skb->len, PCI_DMA_TODEVICE); /*RTnet*/dev_kfree_rtskb (skb); } tp->tx_buffers[i].skb = NULL; tp->tx_buffers[i].mapping = 0; } rt_stack_disconnect(rtdev); return 0; }
/*** * rt2x00_close * @rtdev */ static int rt2x00_close (struct rtnet_device *rtnet_dev) { struct rtwlan_device * rtwlan_dev = rtnetdev_priv(rtnet_dev); struct _rt2x00_core * core = rtwlan_priv(rtwlan_dev); DEBUG("Start.\n"); if(!test_and_clear_bit(DEVICE_ENABLED, &core->flags)){ ERROR("device already disabled.\n"); return -EBUSY; } rt2x00_radio_off(core); rtnetif_stop_queue(rtnet_dev); rt_stack_disconnect(rtnet_dev); RTNET_MOD_DEC_USE_COUNT; return 0; }
static int rt2x00_dev_radio_off(struct _rt2x00_device * device) { struct _rt2x00_pci *rt2x00pci = rt2x00_priv(device); u32 reg = 0x00000000; int retval=0; rt2x00_register_write(rt2x00pci, PWRCSR0, cpu_to_le32(0x00000000)); rt2x00_register_read(rt2x00pci, TXCSR0, ®); rt2x00_set_field32(®, TXCSR0_ABORT, 1); rt2x00_register_write(rt2x00pci, TXCSR0, reg); rt2x00_register_read(rt2x00pci, RXCSR0, ®); rt2x00_set_field32(®, RXCSR0_DISABLE_RX, 1); rt2x00_register_write(rt2x00pci, RXCSR0, reg); rt2x00_register_read(rt2x00pci, LEDCSR, ®); rt2x00_set_field32(®, LEDCSR_LINK, 0); rt2x00_register_write(rt2x00pci, LEDCSR, reg); rt2x00_register_read(rt2x00pci, CSR8, ®); rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, 1); rt2x00_set_field32(®, CSR8_TXDONE_TXRING, 1); rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, 1); rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, 1); rt2x00_set_field32(®, CSR8_RXDONE, 1); rt2x00_register_write(rt2x00pci, CSR8, reg); rt2x00_pci_free_rings(device); if((retval=rtdm_irq_free(&rt2x00pci->irq_handle)) != 0) ERROR("rtdm_irq_free=%d\n", retval); rt_stack_disconnect(device->rtnet_dev); return retval; }