/* Cleanup netpoll for given target (from boot/module param) and free it */ static void free_param_target(struct netpoll_target *nt) { cancel_work_sync(&nt->cleanup_work); if (nt->np_state == NETPOLL_CLEANING || nt->np_state == NETPOLL_ENABLED) netpoll_cleanup(&nt->np); kfree(nt); }
static void cleanup_kgdboe(void) { netpoll_cleanup(&np); configured = 0; kgdb_unregister_io_module(&local_kgdb_io_ops); }
/*..........................................................................*/ void QF_onCleanup(void) { #ifdef Q_SPY idle_running = 0; complete(&idle_done); QS_onFlush(); if (np.dev) netpoll_cleanup(&np); if (qsBuf) kfree(qsBuf); #endif }
void netpoll_wrapper_free(struct netpoll_wrapper *pWrapper) { if (pWrapper) { if (pWrapper->tracepoint_registered) unregister_tracepoint_wrapper(netif_receive_skb, hook_receive_skb, pWrapper); if (pWrapper->netpoll_initialized) netpoll_cleanup(&pWrapper->netpoll_obj); if (pWrapper->pDeviceWithHandler) { rtnl_lock(); netdev_rx_handler_unregister(pWrapper->pDeviceWithHandler); rtnl_unlock(); } kfree(pWrapper); } }
static void deferred_netpoll_cleanup(struct work_struct *work) { struct netpoll_target *nt; struct netpoll_targets *nts; unsigned long flags; nt = container_of(work, struct netpoll_target, cleanup_work); nts = nt->nts; netpoll_cleanup(&nt->np); spin_lock_irqsave(&nts->lock, flags); BUG_ON(nt->np_state != NETPOLL_CLEANING); nt->np_state = NETPOLL_DISABLED; spin_unlock_irqrestore(&nts->lock, flags); netpoll_target_put(nt); }
static void cleanup_netdump(void) { netpoll_cleanup(&np); platform_cleanup_stack(netdump_stack); }
/* Cleanup netpoll for given target (from boot/module param) and free it */ static void free_param_target(struct netconsole_target *nt) { netpoll_cleanup(&nt->np); kfree(nt); }
static void cleanup_netconsole(void) { unregister_console(&netconsole); netpoll_cleanup(&np); }