static void nilfs_destroy_cachep(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); if (nilfs_inode_cachep) kmem_cache_destroy(nilfs_inode_cachep); if (nilfs_transaction_cachep) kmem_cache_destroy(nilfs_transaction_cachep); if (nilfs_segbuf_cachep) kmem_cache_destroy(nilfs_segbuf_cachep); if (nilfs_btree_path_cache) kmem_cache_destroy(nilfs_btree_path_cache); }
static void __exit br_deinit(void) { stp_proto_unregister(&br_stp_proto); br_netlink_fini(); unregister_netdev_switch_notifier(&br_netdev_switch_notifier); unregister_netdevice_notifier(&br_device_notifier); brioctl_set(NULL); unregister_pernet_subsys(&br_net_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ br_nf_core_fini(); #if IS_ENABLED(CONFIG_ATM_LANE) br_fdb_test_addr_hook = NULL; #endif br_fdb_fini(); }
static void __exit ieee80211_exit(void) { rc80211_pid_exit(); rc80211_minstrel_ht_exit(); rc80211_minstrel_exit(); rc80211_maica_exit(); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)) flush_scheduled_work(); #endif if (mesh_allocated) ieee80211s_stop(); ieee80211_iface_exit(); rcu_barrier(); }
static void au_cache_fin(void) { int i; /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); /* excluding AuCache_HNOTIFY */ BUILD_BUG_ON(AuCache_HNOTIFY + 1 != AuCache_Last); for (i = 0; i < AuCache_HNOTIFY; i++) if (au_cachep[i]) { kmem_cache_destroy(au_cachep[i]); au_cachep[i] = NULL; } }
/* * clean up on module removal */ static void __exit afs_exit(void) { printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n"); afs_fs_exit(); afs_kill_lock_manager(); afs_close_socket(); afs_purge_servers(); afs_callback_update_kill(); afs_vlocation_purge(); destroy_workqueue(afs_wq); afs_cell_purge(); #ifdef CONFIG_AFS_FSCACHE fscache_unregister_netfs(&afs_cache_netfs); #endif afs_proc_cleanup(); rcu_barrier(); }
static void __exit br_deinit(void) { stp_proto_unregister(&br_stp_proto); br_netlink_fini(); unregister_netdevice_notifier(&br_device_notifier); brioctl_set(NULL); unregister_pernet_subsys(&br_net_ops); rcu_barrier(); /* */ br_netfilter_fini(); #if IS_ENABLED(CONFIG_ATM_LANE) br_fdb_test_addr_hook = NULL; #endif br_fdb_fini(); }
static void __exit cleanup_sunrpc(void) { ve_sunrpc_hook_unregister(); rpcauth_remove_module(); cleanup_socket_xprt(); svc_cleanup_xprt_sock(); unregister_rpc_pipefs(); rpc_destroy_mempool(); cache_unregister(&ip_map_cache); cache_unregister(&unix_gid_cache); #ifdef RPC_DEBUG rpc_unregister_sysctl(); #endif #ifdef CONFIG_PROC_FS rpc_proc_exit(); #endif rcu_barrier(); /* Wait for completion of call_rcu()'s */ }
/* * fixup_activate is called when: * - an active object is activated * - an unknown object is activated (might be a statically initialized object) * Activation is performed internally by call_rcu(). */ static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) { struct rcu_head *head = addr; switch (state) { case ODEBUG_STATE_NOTAVAILABLE: /* * This is not really a fixup. We just make sure that it is * tracked in the object tracker. */ debug_object_init(head, &rcuhead_debug_descr); debug_object_activate(head, &rcuhead_debug_descr); return 0; case ODEBUG_STATE_ACTIVE: /* * Ensure that queued callbacks are all executed. * If we detect that we are nested in a RCU read-side critical * section, we should simply fail, otherwise we would deadlock. * In !PREEMPT configurations, there is no way to tell if we are * in a RCU read-side critical section or not, so we never * attempt any fixup and just print a warning. */ #ifndef CONFIG_PREEMPT WARN_ON_ONCE(1); return 0; #endif if (rcu_preempt_depth() != 0 || preempt_count() != 0 || irqs_disabled()) { WARN_ON_ONCE(1); return 0; } rcu_barrier(); rcu_barrier_sched(); rcu_barrier_bh(); debug_object_activate(head, &rcuhead_debug_descr); return 1; default: return 0; } }
static void __exit exit_gfs2_fs(void) { unregister_shrinker(&qd_shrinker); gfs2_glock_exit(); gfs2_unregister_debugfs(); unregister_filesystem(&gfs2_fs_type); unregister_filesystem(&gfs2meta_fs_type); destroy_workqueue(gfs_recovery_wq); rcu_barrier(); kmem_cache_destroy(gfs2_quotad_cachep); kmem_cache_destroy(gfs2_rgrpd_cachep); kmem_cache_destroy(gfs2_bufdata_cachep); kmem_cache_destroy(gfs2_inode_cachep); kmem_cache_destroy(gfs2_glock_aspace_cachep); kmem_cache_destroy(gfs2_glock_cachep); gfs2_sys_uninit(); }
static void __exit exit_gfs2_fs(void) { unregister_shrinker(&qd_shrinker); gfs2_glock_exit(); gfs2_unregister_debugfs(); unregister_filesystem(&gfs2_fs_type); unregister_filesystem(&gfs2meta_fs_type); slow_work_unregister_user(THIS_MODULE); rcu_barrier(); kmem_cache_destroy(gfs2_quotad_cachep); kmem_cache_destroy(gfs2_rgrpd_cachep); kmem_cache_destroy(gfs2_bufdata_cachep); kmem_cache_destroy(gfs2_inode_cachep); kmem_cache_destroy(gfs2_glock_aspace_cachep); kmem_cache_destroy(gfs2_glock_cachep); gfs2_sys_uninit(); }
/* * rfs_exit() */ static void __exit rfs_exit(void) { RFS_DEBUG("RFS exit\n"); rfs_fdb_exit(); rfs_wxt_exit(); rfs_rule_exit(); rfs_nbr_exit(); rfs_cm_exit(); rfs_ess_exit(); rfs_proc_exit(); rcu_barrier(); }
static void __exit br_deinit(void) { stp_proto_unregister(&br_stp_proto); br_netlink_fini(); unregister_netdevice_notifier(&br_device_notifier); brioctl_set(NULL); unregister_pernet_subsys(&br_net_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ br_netfilter_fini(); #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) br_fdb_test_addr_hook = NULL; #endif br_handle_frame_hook = NULL; br_hard_xmit_hook = NULL; br_fdb_fini(); }
/* * clean up the filesystem */ void afs_fs_exit(void) { _enter(""); afs_mntpt_kill_timer(); unregister_filesystem(&afs_fs_type); if (atomic_read(&afs_count_active_inodes) != 0) { printk("kAFS: %d active inode objects still present\n", atomic_read(&afs_count_active_inodes)); BUG(); } /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(afs_inode_cachep); _leave(""); }
static void __exit br_deinit(void) { stp_proto_unregister(&br_stp_proto); br_netlink_fini(); unregister_netdevice_notifier(&br_device_notifier); brioctl_set(NULL); unregister_pernet_subsys(&br_net_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ br_netfilter_fini(); #if defined(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) || defined(CONFIG_NETFILTER_XT_MATCH_PHYSDEV_MODULE) br_fdb_get_port_hook = NULL; #endif #if IS_ENABLED(CONFIG_ATM_LANE) br_fdb_test_addr_hook = NULL; #endif br_fdb_fini(); }
/* * unregister the RxRPC protocol */ static void __exit af_rxrpc_exit(void) { _enter(""); rxrpc_sysctl_exit(); unregister_key_type(&key_type_rxrpc_s); unregister_key_type(&key_type_rxrpc); sock_unregister(PF_RXRPC); proto_unregister(&rxrpc_proto); unregister_pernet_subsys(&rxrpc_net_ops); ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0); ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0); /* Make sure the local and peer records pinned by any dying connections * are released. */ rcu_barrier(); rxrpc_destroy_client_conn_ids(); destroy_workqueue(rxrpc_workqueue); rxrpc_exit_security(); kmem_cache_destroy(rxrpc_call_jar); _leave(""); }
static void __exit bitmap_ipmac_fini(void) { rcu_barrier(); ip_set_type_unregister(&bitmap_ipmac_type); }
/* Main interface to do xen specific suspend/resume */ static int enter_state(u32 state) { unsigned long flags; int error; unsigned long cr4; if ( (state <= ACPI_STATE_S0) || (state > ACPI_S_STATES_MAX) ) return -EINVAL; if ( !spin_trylock(&pm_lock) ) return -EBUSY; BUG_ON(system_state != SYS_STATE_active); system_state = SYS_STATE_suspend; printk(XENLOG_INFO "Preparing system for ACPI S%d state.\n", state); freeze_domains(); acpi_dmar_reinstate(); if ( (error = disable_nonboot_cpus()) ) { system_state = SYS_STATE_resume; goto enable_cpu; } cpufreq_del_cpu(0); hvm_cpu_down(); acpi_sleep_prepare(state); console_start_sync(); printk("Entering ACPI S%d state.\n", state); local_irq_save(flags); spin_debug_disable(); if ( (error = device_power_down()) ) { printk(XENLOG_ERR "Some devices failed to power down."); system_state = SYS_STATE_resume; goto done; } ACPI_FLUSH_CPU_CACHE(); switch ( state ) { case ACPI_STATE_S3: do_suspend_lowlevel(); system_reset_counter++; error = tboot_s3_resume(); break; case ACPI_STATE_S5: acpi_enter_sleep_state(ACPI_STATE_S5); break; default: error = -EINVAL; break; } system_state = SYS_STATE_resume; /* Restore CR4 and EFER from cached values. */ cr4 = read_cr4(); write_cr4(cr4 & ~X86_CR4_MCE); write_efer(read_efer()); device_power_up(); mcheck_init(&boot_cpu_data, 0); write_cr4(cr4); printk(XENLOG_INFO "Finishing wakeup from ACPI S%d state.\n", state); if ( (state == ACPI_STATE_S3) && error ) tboot_s3_error(error); done: spin_debug_enable(); local_irq_restore(flags); console_end_sync(); acpi_sleep_post(state); if ( hvm_cpu_up() ) BUG(); enable_cpu: cpufreq_add_cpu(0); microcode_resume_cpu(0); rcu_barrier(); mtrr_aps_sync_begin(); enable_nonboot_cpus(); mtrr_aps_sync_end(); adjust_vtd_irq_affinities(); acpi_dmar_zap(); thaw_domains(); system_state = SYS_STATE_active; spin_unlock(&pm_lock); return error; }
/* * initialise the AFS client FS module */ static int __init afs_init(void) { int ret; printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n"); ret = afs_get_client_UUID(); if (ret < 0) return ret; /* create workqueue */ ret = -ENOMEM; afs_wq = alloc_workqueue("afs", 0, 0); if (!afs_wq) return ret; /* register the /proc stuff */ ret = afs_proc_init(); if (ret < 0) goto error_proc; #ifdef CONFIG_AFS_FSCACHE /* we want to be able to cache */ ret = fscache_register_netfs(&afs_cache_netfs); if (ret < 0) goto error_cache; #endif /* initialise the cell DB */ ret = afs_cell_init(rootcell); if (ret < 0) goto error_cell_init; /* initialise the VL update process */ ret = afs_vlocation_update_init(); if (ret < 0) goto error_vl_update_init; /* initialise the callback update process */ ret = afs_callback_update_init(); if (ret < 0) goto error_callback_update_init; /* create the RxRPC transport */ ret = afs_open_socket(); if (ret < 0) goto error_open_socket; /* register the filesystems */ ret = afs_fs_init(); if (ret < 0) goto error_fs; return ret; error_fs: afs_close_socket(); error_open_socket: afs_callback_update_kill(); error_callback_update_init: afs_vlocation_purge(); error_vl_update_init: afs_cell_purge(); error_cell_init: #ifdef CONFIG_AFS_FSCACHE fscache_unregister_netfs(&afs_cache_netfs); error_cache: #endif afs_proc_cleanup(); error_proc: destroy_workqueue(afs_wq); rcu_barrier(); printk(KERN_ERR "kAFS: failed to register: %d\n", ret); return ret; }
void amdgpu_fence_slab_fini(void) { rcu_barrier(); kmem_cache_destroy(amdgpu_fence_slab); }
static void r92su_survey_done_work(struct work_struct *work) { struct cfg80211_scan_request *req; struct r92su *r92su = container_of(work, struct r92su, survey_done_work.work); mutex_lock(&r92su->lock); if (!r92su_is_open(r92su)) goto out; req = r92su->scan_request; r92su->scan_request = NULL; if (req) { struct cfg80211_scan_info info = { .aborted = false, }; cfg80211_scan_done(req, &info); } r92su->scanned = true; complete(&r92su->scan_done); out: mutex_unlock(&r92su->lock); } static int r92su_stop(struct net_device *ndev) { struct r92su *r92su = ndev->ml_priv; struct cfg80211_bss *tmp_bss; struct llist_node *node; int err = -EINVAL, i; mutex_lock(&r92su->lock); if (r92su_is_connected(r92su)) { err = __r92su_disconnect(r92su); WARN_ONCE(err, "disconnect failed"); } r92su_set_power(r92su, false); if (r92su_is_initializing(r92su)) { err = r92su_hw_mac_deinit(r92su); WARN_ONCE(err, "failed to deinitilize MAC"); } if (r92su_is_initializing(r92su)) r92su_set_state(r92su, R92SU_STOP); if (r92su->scan_request) { struct cfg80211_scan_info info = { .aborted = true, }; cfg80211_scan_done(r92su->scan_request, &info); } tmp_bss = r92su->want_connect_bss; r92su->want_connect_bss = NULL; r92su_bss_free(r92su, tmp_bss); r92su->scan_request = NULL; for (i = 0; i < MAX_STA; i++) r92su_sta_del(r92su, i); mutex_unlock(&r92su->lock); cancel_delayed_work_sync(&r92su->survey_done_work); cancel_delayed_work_sync(&r92su->service_work); cancel_work_sync(&r92su->add_bss_work); cancel_work_sync(&r92su->connect_bss_work); cancel_work_sync(&r92su->disconnect_work); node = llist_del_all(&r92su->add_bss_list); while (node) { struct r92su_add_bss *bss_priv = llist_entry(node, struct r92su_add_bss, head); node = ACCESS_ONCE(node->next); kfree(bss_priv); } /* wait for keys and stas to be freed */ synchronize_rcu(); rcu_barrier(); return err; } static netdev_tx_t r92su_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct r92su *r92su = ndev->ml_priv; switch (r92su->wdev.iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: if (skb->len >= ETH_ALEN + ETH_ALEN + 2) r92su_tx(r92su, skb, false); break; case NL80211_IFTYPE_MONITOR: r92su_tx_monitor(r92su, skb); break; default: dev_kfree_skb_any(skb); break; } return NETDEV_TX_OK; } static const struct net_device_ops r92su_netdevice_ops = { .ndo_open = r92su_open, .ndo_stop = r92su_stop, .ndo_start_xmit = r92su_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_set_rx_mode = r92su_set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static void *devm_dup(struct device *dev, void *src, size_t len) { void *tmp; tmp = devm_kzalloc(dev, len, GFP_KERNEL); if (tmp) memcpy(tmp, src, len); return tmp; } static int r92su_init_band(struct r92su *r92su) { struct ieee80211_supported_band *band; band = &r92su->band_2GHZ; band->channels = devm_dup(&r92su->wdev.wiphy->dev, r92su_channeltable, sizeof(r92su_channeltable)); if (!band->channels) return -ENOMEM; band->bitrates = devm_dup(&r92su->wdev.wiphy->dev, r92su_ratetable, sizeof(r92su_ratetable)); if (!band->bitrates) return -ENOMEM; band->n_channels = ARRAY_SIZE(r92su_channeltable); band->n_bitrates = ARRAY_SIZE(r92su_ratetable); memcpy(&band->ht_cap, &r92su_ht_info, sizeof(r92su_ht_info)); band->ht_cap.ht_supported = !r92su->disable_ht; switch (r92su->rf_type) { case R92SU_1T1R: /* nothing needs to be done. The default ht_cap * contains all the necessary bits for just 1T1R * devices */ break; case R92SU_1T2R: case R92SU_2T2R: band->ht_cap.mcs.rx_mask[1] = 0xff; band->ht_cap.mcs.rx_highest = cpu_to_le16(300); break; } r92su->wdev.wiphy->bands[NL80211_BAND_2GHZ] = &r92su->band_2GHZ; return 0; } static const struct ieee80211_txrx_stypes r92su_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { [NL80211_IFTYPE_ADHOC] = { .tx = 0xffff, .rx = 0, },
static void __exit ieee802154_exit(void) { ieee802154_iface_exit(); rcu_barrier(); }
/* * initialise the AFS client FS module */ static int __init afs_init(void) { int ret; printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n"); ret = afs_get_client_UUID(); if (ret < 0) return ret; /* register the /proc stuff */ ret = afs_proc_init(); if (ret < 0) return ret; #ifdef AFS_CACHING_SUPPORT /* we want to be able to cache */ ret = cachefs_register_netfs(&afs_cache_netfs, &afs_cache_cell_index_def); if (ret < 0) goto error_cache; #endif /* initialise the cell DB */ ret = afs_cell_init(rootcell); if (ret < 0) goto error_cell_init; /* initialise the VL update process */ ret = afs_vlocation_update_init(); if (ret < 0) goto error_vl_update_init; /* initialise the callback update process */ ret = afs_callback_update_init(); /* create the RxRPC transport */ ret = afs_open_socket(); if (ret < 0) goto error_open_socket; /* register the filesystems */ ret = afs_fs_init(); if (ret < 0) goto error_fs; return ret; error_fs: afs_close_socket(); error_open_socket: error_vl_update_init: error_cell_init: #ifdef AFS_CACHING_SUPPORT cachefs_unregister_netfs(&afs_cache_netfs); error_cache: #endif afs_callback_update_kill(); afs_vlocation_purge(); afs_cell_purge(); afs_proc_cleanup(); rcu_barrier(); printk(KERN_ERR "kAFS: failed to register: %d\n", ret); return ret; }
static void aufs_inode_cache_destroy(void) { rcu_barrier(); kmem_cache_destroy(aufs_inode_cache); aufs_inode_cache = NULL; }
static int __init afs_init(void) { int ret; printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n"); ret = afs_get_client_UUID(); if (ret < 0) return ret; /* */ ret = -ENOMEM; afs_wq = alloc_workqueue("afs", 0, 0); if (!afs_wq) return ret; /* */ ret = afs_proc_init(); if (ret < 0) goto error_proc; #ifdef CONFIG_AFS_FSCACHE /* */ ret = fscache_register_netfs(&afs_cache_netfs); if (ret < 0) goto error_cache; #endif /* */ ret = afs_cell_init(rootcell); if (ret < 0) goto error_cell_init; /* */ ret = afs_vlocation_update_init(); if (ret < 0) goto error_vl_update_init; /* */ ret = afs_callback_update_init(); if (ret < 0) goto error_callback_update_init; /* */ ret = afs_open_socket(); if (ret < 0) goto error_open_socket; /* */ ret = afs_fs_init(); if (ret < 0) goto error_fs; return ret; error_fs: afs_close_socket(); error_open_socket: afs_callback_update_kill(); error_callback_update_init: afs_vlocation_purge(); error_vl_update_init: afs_cell_purge(); error_cell_init: #ifdef CONFIG_AFS_FSCACHE fscache_unregister_netfs(&afs_cache_netfs); error_cache: #endif afs_proc_cleanup(); error_proc: destroy_workqueue(afs_wq); rcu_barrier(); printk(KERN_ERR "kAFS: failed to register: %d\n", ret); return ret; }