void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { u64 index; struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); trace_kvm_book3s_mmu_map(pte); spin_lock(&vcpu3s->mmu_lock); /* Add to ePTE list */ index = kvmppc_mmu_hash_pte(pte->pte.eaddr); hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]); /* Add to ePTE_long list */ index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); hlist_add_head_rcu(&pte->list_pte_long, &vcpu3s->hpte_hash_pte_long[index]); /* Add to vPTE list */ index = kvmppc_mmu_hash_vpte(pte->pte.vpage); hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]); /* Add to vPTE_long list */ index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); hlist_add_head_rcu(&pte->list_vpte_long, &vcpu3s->hpte_hash_vpte_long[index]); spin_unlock(&vcpu3s->mmu_lock); }
void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian) { mutex_lock(&kvm->irq_lock); hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); mutex_unlock(&kvm->irq_lock); }
static void __flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) { struct hlist_head *head; head = find_bucket(table, flow->hash); hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); table->count++; }
/** * bfq_cic_link - add @cic to @ioc. * @bfqd: bfq_data @cic refers to. * @ioc: io_context @cic belongs to. * @cic: the cic to link. * @gfp_mask: the mask to use for radix tree preallocations. * * Add @cic to @ioc, using @bfqd as the search key. This enables us to * lookup the process specific cfq io context when entered from the block * layer. Also adds @cic to a per-bfqd list, used when this queue is * removed. */ static int bfq_cic_link(struct bfq_data *bfqd, struct io_context *ioc, struct cfq_io_context *cic, gfp_t gfp_mask) { unsigned long flags; int ret; ret = radix_tree_preload(gfp_mask); if (ret == 0) { cic->ioc = ioc; /* No write-side locking, cic is not published yet. */ rcu_assign_pointer(cic->key, bfqd); spin_lock_irqsave(&ioc->lock, flags); ret = radix_tree_insert(&ioc->bfq_radix_root, bfqd->cic_index, cic); if (ret == 0) hlist_add_head_rcu(&cic->cic_list, &ioc->bfq_cic_list); spin_unlock_irqrestore(&ioc->lock, flags); radix_tree_preload_end(); if (ret == 0) { spin_lock_irqsave(bfqd->queue->queue_lock, flags); list_add(&cic->queue_list, &bfqd->cic_list); spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); } } if (ret != 0) printk(KERN_ERR "bfq: cic link failed!\n"); return ret; }
static struct vlan_group *vlan_group_alloc(int ifindex) { struct vlan_group *grp; unsigned int size; unsigned int i; grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL); if (!grp) return NULL; size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN; for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) { grp->vlan_devices_arrays[i] = kzalloc(size, GFP_KERNEL); if (!grp->vlan_devices_arrays[i]) goto err; } grp->real_dev_ifindex = ifindex; hlist_add_head_rcu(&grp->hlist, &vlan_group_hash[vlan_grp_hashfn(ifindex)]); return grp; err: vlan_group_free(grp); return NULL; }
/** * ovs_vport_add - add vport device (for kernel callers) * * @parms: Information about new vport. * * Creates a new vport with the specified configuration (which is dependent on * device type). ovs_mutex must be held. */ struct vport *ovs_vport_add(const struct vport_parms *parms) { struct vport *vport; int err = 0; int i; for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) { if (vport_ops_list[i]->type == parms->type) { struct hlist_head *bucket; vport = vport_ops_list[i]->create(parms); if (IS_ERR(vport)) { err = PTR_ERR(vport); goto out; } bucket = hash_bucket(ovs_dp_get_net(vport->dp), vport->ops->get_name(vport)); hlist_add_head_rcu(&vport->hash_node, bucket); return vport; } } err = -EAFNOSUPPORT; out: return ERR_PTR(err); }
struct fib_table *fib_new_table(struct net *net, u32 id) { struct fib_table *tb, *alias = NULL; unsigned int h; if (id == 0) id = RT_TABLE_MAIN; tb = fib_get_table(net, id); if (tb) return tb; if (id == RT_TABLE_LOCAL) alias = fib_new_table(net, RT_TABLE_MAIN); tb = fib_trie_table(id, alias); if (!tb) return NULL; switch (id) { case RT_TABLE_MAIN: rcu_assign_pointer(net->ipv4.fib_main, tb); break; case RT_TABLE_DEFAULT: rcu_assign_pointer(net->ipv4.fib_default, tb); break; default: break; } h = id & (FIB_TABLE_HASHSZ - 1); hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]); return tb; }
int bpf_dp_replicator_add_port(struct plum *plum, u32 replicator_id, u32 port_id) { struct hlist_head *head; struct plum_replicator_elem *elem; rcu_read_lock(); elem = replicator_lookup_port(plum, replicator_id, port_id); if (elem) { rcu_read_unlock(); return -EEXIST; } rcu_read_unlock(); elem = kzalloc(sizeof(*elem), GFP_KERNEL); if (!elem) return -ENOMEM; elem->replicator_id = replicator_id; elem->port_id = port_id; head = replicator_hash_bucket(plum, replicator_id); hlist_add_head_rcu(&elem->hash_node, head); return 0; }
/* * rfs_rule_create_mac_rule */ int rfs_rule_create_mac_rule(uint8_t *addr, uint16_t cpu, uint32_t hvid, uint32_t is_static) { struct hlist_head *head; struct rfs_rule_entry *re; struct rfs_rule *rr = &__rr; uint32_t type = RFS_RULE_TYPE_MAC_RULE; head = &rr->hash[rfs_rule_hash(type, addr)]; spin_lock_bh(&rr->hash_lock); hlist_for_each_entry_rcu(re, head, hlist) { if (type != re->type) continue; if (memcmp(re->mac, addr, ETH_ALEN) == 0) { break; } } if (re) { /* * don't overwrite any existing rule */ if (!is_static) { spin_unlock_bh(&rr->hash_lock); return 0; } } /* * Create a rule entry if it doesn't exist */ if (!re ) { re = kzalloc(sizeof(struct rfs_rule_entry), GFP_ATOMIC); if (!re ) { spin_unlock_bh(&rr->hash_lock); return -1; } memcpy(re->mac, addr, ETH_ALEN); re->type = type; re->cpu = RPS_NO_CPU; re->hvid = hvid; hlist_add_head_rcu(&re->hlist, head); } RFS_DEBUG("New MAC rule %pM, cpu %d\n", addr, cpu); if (re->cpu != cpu && rfs_ess_update_mac_rule(re, cpu) < 0) { RFS_WARN("Failed to update MAC rule %pM, cpu %d\n", addr, cpu); } re->is_static = is_static; re->cpu = cpu; __rfs_rule_update_iprule_by_mac(addr, cpu); spin_unlock_bh(&rr->hash_lock); return 0; }
static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow) { struct hlist_head *head; head = find_bucket(ti, flow->hash); hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head); }
/* like hash_find, but assigns a new element if not present yet */ struct ws_sta *ws_hash_get(struct ws_hash *hash, u8 *mac) { struct ws_sta *ws_sta; spinlock_t *list_lock; /* spinlock to protect write access */ struct hlist_head *head; u32 index; ws_sta = ws_hash_find(hash, mac); if (ws_sta) return ws_sta; ws_sta = kzalloc(sizeof(*ws_sta), GFP_ATOMIC); if (!ws_sta) return NULL; ws_sta_init(ws_sta); memcpy(ws_sta->mac, mac, ETH_ALEN); /* add new element */ index = ws_hash_choose(mac); head = &hash->table[index]; list_lock = &hash->list_locks[index]; /* one for the hash, one for returning */ atomic_set(&ws_sta->refcount, 2); spin_lock_bh(list_lock); hlist_add_head_rcu(&ws_sta->hash_entry, head); spin_unlock_bh(list_lock); return ws_sta; }
struct pid *alloc_pid(struct pid_namespace *ns) { struct pid *pid; enum pid_type type; int i, nr; struct pid_namespace *tmp; struct upid *upid; pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); if (!pid) goto out; tmp = ns; pid->level = ns->level; for (i = ns->level; i >= 0; i--) { nr = alloc_pidmap(tmp); if (nr < 0) goto out_free; pid->numbers[i].nr = nr; pid->numbers[i].ns = tmp; tmp = tmp->parent; } if (unlikely(is_child_reaper(pid))) { if (pid_ns_prepare_proc(ns)) goto out_free; } get_pid_ns(ns); atomic_set(&pid->count, 1); for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]); upid = pid->numbers + ns->level; spin_lock_irq(&pidmap_lock); if (!(ns->nr_hashed & PIDNS_HASH_ADDING)) goto out_unlock; for ( ; upid >= pid->numbers; --upid) { hlist_add_head_rcu(&upid->pid_chain, &pid_hash[pid_hashfn(upid->nr, upid->ns)]); upid->ns->nr_hashed++; } spin_unlock_irq(&pidmap_lock); out: return pid; out_unlock: spin_unlock_irq(&pidmap_lock); put_pid_ns(ns); out_free: while (++i <= ns->level) free_pidmap(pid->numbers + i); kmem_cache_free(ns->pid_cachep, pid); pid = NULL; goto out; }
/* * Allocate session and enter it in the hash for the local port. * Caller holds ft_lport_lock. */ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, struct ft_node_acl *acl) { struct ft_sess *sess; struct hlist_head *head; struct hlist_node *pos; head = &tport->hash[ft_sess_hash(port_id)]; hlist_for_each_entry_rcu(sess, pos, head, hash) if (sess->port_id == port_id) return sess; sess = kzalloc(sizeof(*sess), GFP_KERNEL); if (!sess) return NULL; sess->se_sess = transport_init_session(); if (IS_ERR(sess->se_sess)) { kfree(sess); return NULL; } sess->se_sess->se_node_acl = &acl->se_node_acl; sess->tport = tport; sess->port_id = port_id; kref_init(&sess->kref); /* ref for table entry */ hlist_add_head_rcu(&sess->hash, head); tport->sess_count++; pr_debug("port_id %x sess %p\n", port_id, sess); transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl, sess->se_sess, sess); return sess; }
static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t) { struct hlist_head *head = ip_bucket(itn, &t->parms); if (t->collect_md) rcu_assign_pointer(itn->collect_md_tun, t); hlist_add_head_rcu(&t->hash_node, head); }
void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, struct kvm_irq_mask_notifier *kimn) { mutex_lock(&kvm->irq_lock); kimn->irq = irq; hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list); mutex_unlock(&kvm->irq_lock); }
/* * attach_pid() must be called with the tasklist_lock write-held. */ void attach_pid(struct task_struct *task, enum pid_type type, struct pid *pid) { struct pid_link *link; link = &task->pids[type]; link->pid = pid; hlist_add_head_rcu(&link->node, &pid->tasks[type]); }
static struct nfulnl_instance * instance_create(struct net *net, u_int16_t group_num, u32 portid, struct user_namespace *user_ns) { struct nfulnl_instance *inst; struct nfnl_log_net *log = nfnl_log_pernet(net); int err; spin_lock_bh(&log->instances_lock); if (__instance_lookup(log, group_num)) { err = -EEXIST; goto out_unlock; } inst = kzalloc(sizeof(*inst), GFP_ATOMIC); if (!inst) { err = -ENOMEM; goto out_unlock; } if (!try_module_get(THIS_MODULE)) { kfree(inst); err = -EAGAIN; goto out_unlock; } INIT_HLIST_NODE(&inst->hlist); spin_lock_init(&inst->lock); /* needs to be two, since we _put() after creation */ refcount_set(&inst->use, 2); timer_setup(&inst->timer, nfulnl_timer, 0); inst->net = get_net(net); inst->peer_user_ns = user_ns; inst->peer_portid = portid; inst->group_num = group_num; inst->qthreshold = NFULNL_QTHRESH_DEFAULT; inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT; inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT; inst->copy_mode = NFULNL_COPY_PACKET; inst->copy_range = NFULNL_COPY_RANGE_MAX; hlist_add_head_rcu(&inst->hlist, &log->instance_table[instance_hashfn(group_num)]); spin_unlock_bh(&log->instances_lock); return inst; out_unlock: spin_unlock_bh(&log->instances_lock); return ERR_PTR(err); }
static void fib6_link_table(struct net *net, struct fib6_table *tb) { unsigned int h; rwlock_init(&tb->tb6_lock); h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1); hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]); }
/* * register the notifier so that event interception for the tracked guest * pages can be received. */ void kvm_page_track_register_notifier(struct kvm *kvm, struct kvm_page_track_notifier_node *n) { struct kvm_page_track_notifier_head *head; head = &kvm->arch.track_notifier_head; spin_lock(&kvm->mmu_lock); hlist_add_head_rcu(&n->node, &head->track_notifier_list); spin_unlock(&kvm->mmu_lock); }
static int __net_init fib4_rules_init(struct net *net) { struct fib_table *local_table, *main_table; //分别为LOCAL和MAIN分配fib_table.. local_table = fib_hash_table(RT_TABLE_LOCAL); if (local_table == NULL) return -ENOMEM; main_table = fib_hash_table(RT_TABLE_MAIN); if (main_table == NULL) goto fail; //fib_table_hash为调用该函数时候分配的一个hash数组..也是只有2个数组项... //因此就是把fib_table分别添加到对应的hlist上.. hlist_add_head_rcu(&local_table->tb_hlist, &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]); hlist_add_head_rcu(&main_table->tb_hlist, &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]); return 0; fail: kfree(local_table); return -ENOMEM; }
static int __net_init fib4_rules_init(struct net *net) { struct fib_table *local_table, *main_table; local_table = fib_hash_table(RT_TABLE_LOCAL); if (local_table == NULL) return -ENOMEM; main_table = fib_hash_table(RT_TABLE_MAIN); if (main_table == NULL) goto fail; hlist_add_head_rcu(&local_table->tb_hlist, &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]); hlist_add_head_rcu(&main_table->tb_hlist, &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]); return 0; fail: kfree(local_table); return -ENOMEM; }
static int ft_sess_alloc_cb(struct se_portal_group *se_tpg, struct se_session *se_sess, void *p) { struct ft_sess *sess = p; struct ft_tport *tport = sess->tport; struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)]; pr_debug("port_id %x sess %p\n", sess->port_id, sess); hlist_add_head_rcu(&sess->hash, head); tport->sess_count++; return 0; }
/* * Add an entry to the hash table. */ int virt_hash_table_add(struct virt_hash_table *table, struct hlist_node *entry, u32 hash) { struct virt_hash_head *head = &table->head[hash]; if(WARN_ON(hash >= table->size)) return -1; spin_lock_bh(&head->lock); hlist_add_head_rcu(entry, &head->list); spin_unlock_bh(&head->lock); return 0; }
static struct nfulnl_instance * instance_create(u_int16_t group_num, int pid) { struct nfulnl_instance *inst; int err; spin_lock_bh(&instances_lock); if (__instance_lookup(group_num)) { err = -EEXIST; goto out_unlock; } inst = kzalloc(sizeof(*inst), GFP_ATOMIC); if (!inst) { err = -ENOMEM; goto out_unlock; } if (!try_module_get(THIS_MODULE)) { kfree(inst); err = -EAGAIN; goto out_unlock; } INIT_HLIST_NODE(&inst->hlist); spin_lock_init(&inst->lock); /* needs to be two, since we _put() after creation */ atomic_set(&inst->use, 2); setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); inst->peer_pid = pid; inst->group_num = group_num; inst->qthreshold = NFULNL_QTHRESH_DEFAULT; inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT; inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT; inst->copy_mode = NFULNL_COPY_PACKET; inst->copy_range = NFULNL_COPY_RANGE_MAX; hlist_add_head_rcu(&inst->hlist, &instance_table[instance_hashfn(group_num)]); spin_unlock_bh(&instances_lock); return inst; out_unlock: spin_unlock_bh(&instances_lock); return ERR_PTR(err); }
/* Called with RTNL lock and genl_lock. */ static struct vport *new_vport(const struct vport_parms *parms) { struct vport *vport; vport = ovs_vport_add(parms); if (!IS_ERR(vport)) { struct datapath *dp = parms->dp; struct hlist_head *head = vport_hash_bucket(dp, vport->port_no); hlist_add_head_rcu(&vport->dp_hash_node, head); } return vport; }
struct pid *alloc_pid(struct pid_namespace *ns) { struct pid *pid; enum pid_type type; int i, nr; struct pid_namespace *tmp; struct upid *upid; pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); if (!pid) goto out; tmp = ns; for (i = ns->level; i >= 0; i--) { nr = alloc_pidmap(tmp); if (nr < 0) goto out_free; pid->numbers[i].nr = nr; pid->numbers[i].ns = tmp; tmp = tmp->parent; } get_pid_ns(ns); pid->level = ns->level; atomic_set(&pid->count, 1); for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]); spin_lock_irq(&pidmap_lock); for (i = ns->level; i >= 0; i--) { upid = &pid->numbers[i]; hlist_add_head_rcu(&upid->pid_chain, &pid_hash[pid_hashfn(upid->nr, upid->ns)]); } spin_unlock_irq(&pidmap_lock); out: return pid; out_free: for (i++; i <= ns->level; i++) free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr); kmem_cache_free(ns->pid_cachep, pid); pid = NULL; goto out; }
/* Create new listen socket if needed */ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, geneve_rcv_t *rcv, void *data, bool ipv6) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; struct socket *sock; struct udp_tunnel_sock_cfg tunnel_cfg; gs = kzalloc(sizeof(*gs), GFP_KERNEL); if (!gs) return ERR_PTR(-ENOMEM); INIT_WORK(&gs->del_work, geneve_del_work); sock = geneve_create_sock(net, ipv6, port); if (IS_ERR(sock)) { kfree(gs); return ERR_CAST(sock); } gs->sock = sock; atomic_set(&gs->refcnt, 1); gs->rcv = rcv; gs->rcv_data = data; /* Initialize the geneve udp offloads structure */ gs->udp_offloads.port = port; gs->udp_offloads.callbacks.gro_receive = NULL; gs->udp_offloads.callbacks.gro_complete = NULL; spin_lock(&gn->sock_lock); hlist_add_head_rcu(&gs->hlist, gs_head(net, port)); geneve_notify_add_rx_port(gs); spin_unlock(&gn->sock_lock); /* Mark socket as an encapsulation socket */ tunnel_cfg.sk_user_data = gs; tunnel_cfg.encap_type = 1; tunnel_cfg.encap_rcv = geneve_udp_encap_recv; tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, sock, &tunnel_cfg); return gs; }
static void fib6_link_table(struct net *net, struct fib6_table *tb) { unsigned int h; /* * Initialize table lock at a single place to give lockdep a key, * tables aren't visible prior to being linked to the list. */ rwlock_init(&tb->tb6_lock); h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1); /* * No protection necessary, this is the only list mutatation * operation, tables never disappear once they exist. */ hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]); }
/* * Tracker add and del operations support concurrent RCU lookups. */ int lttng_pid_tracker_add(struct lttng_pid_tracker *lpf, int pid) { struct hlist_head *head; struct lttng_pid_hash_node *e; uint32_t hash = hash_32(pid, 32); head = &lpf->pid_hash[hash & (LTTNG_PID_TABLE_SIZE - 1)]; lttng_hlist_for_each_entry(e, head, hlist) { if (pid == e->pid) return -EEXIST; } e = kmalloc(sizeof(struct lttng_pid_hash_node), GFP_KERNEL); if (!e) return -ENOMEM; e->pid = pid; hlist_add_head_rcu(&e->hlist, head); return 0; }
struct fib_table *fib_new_table(struct net *net, u32 id) { struct fib_table *tb; unsigned int h; if (id == 0) id = RT_TABLE_MAIN; tb = fib_get_table(net, id); if (tb) return tb; tb = fib_hash_table(id); if (!tb) return NULL; h = id & (FIB_TABLE_HASHSZ - 1); hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]); return tb; }