static void xfrm_hash_transfer(struct hlist_head *list, struct hlist_head *ndsttable, struct hlist_head *nsrctable, struct hlist_head *nspitable, unsigned int nhashmask) { struct hlist_node *entry, *tmp; struct xfrm_state *x; hlist_for_each_entry_safe(x, entry, tmp, list, bydst) { unsigned int h; h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, x->props.reqid, x->props.family, nhashmask); hlist_add_head(&x->bydst, ndsttable+h); h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family, nhashmask); hlist_add_head(&x->bysrc, nsrctable+h); if (x->id.spi) { h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family, nhashmask); hlist_add_head(&x->byspi, nspitable+h); } }
void pin_insert_group(struct fs_pin *pin, struct vfsmount *m, struct hlist_head *p) { spin_lock(&pin_lock); if (p) hlist_add_head(&pin->s_list, p); hlist_add_head(&pin->m_list, &real_mount(m)->mnt_pins); spin_unlock(&pin_lock); }
/* * mount 'source_mnt' under the destination 'dest_mnt' at * dentry 'dest_dentry'. And propagate that mount to * all the peer and slave mounts of 'dest_mnt'. * Link all the new mounts into a propagation tree headed at * source_mnt. Also link all the new mounts using ->mnt_list * headed at source_mnt's ->mnt_list * * @dest_mnt: destination mount. * @dest_dentry: destination dentry. * @source_mnt: source mount. * @tree_list : list of heads of trees to be attached. */ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, struct mount *source_mnt, struct hlist_head *tree_list) { struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; struct mount *m, *child; int ret = 0; struct mount *prev_dest_mnt = dest_mnt; struct mount *prev_src_mnt = source_mnt; HLIST_HEAD(tmp_list); for (m = propagation_next(dest_mnt, dest_mnt); m; m = propagation_next(m, dest_mnt)) { int type; struct mount *source; if (IS_MNT_NEW(m)) continue; source = get_source(m, prev_dest_mnt, prev_src_mnt, &type); /* Notice when we are propagating across user namespaces */ if (m->mnt_ns->user_ns != user_ns) type |= CL_UNPRIVILEGED; child = copy_tree(source, source->mnt.mnt_root, type); if (IS_ERR(child)) { ret = PTR_ERR(child); tmp_list = *tree_list; tmp_list.first->pprev = &tmp_list.first; INIT_HLIST_HEAD(tree_list); goto out; } if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) { mnt_set_mountpoint(m, dest_mp, child); hlist_add_head(&child->mnt_hash, tree_list); } else { /* * This can happen if the parent mount was bind mounted * on some subdirectory of a shared/slave mount. */ hlist_add_head(&child->mnt_hash, &tmp_list); } prev_dest_mnt = m; prev_src_mnt = child; } out: lock_mount_hash(); while (!hlist_empty(&tmp_list)) { child = hlist_entry(tmp_list.first, struct mount, mnt_hash); umount_tree(child, 0); } unlock_mount_hash(); return ret; }
void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian) { mutex_lock(&kvm->irq_lock); hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list); mutex_unlock(&kvm->irq_lock); }
int main(int argc, char **argv) { struct hlist_head head = { NULL }; /* must be initialed NULL */ int sum; struct cup teacup[MAX]; int i; for (i = 0; i < MAX; i ++) { teacup[i].node.next = NULL; teacup[i].node.pprev = NULL; teacup[i].price = i; hlist_add_head(&head, &teacup[i].node); } hlist_traverse_by_node(&head, &teacup[55].node, print_int, NULL); sum = 0; hlist_traverse_by_head(&head, sum_price, &sum); printf("sum = %d\n", sum); /* delete test */ for (i = MAX / 2; i < MAX; i++) { hlist_delete_node(&teacup[i].node); } sum = 0; hlist_traverse_by_head(&head, sum_price, &sum); printf("sum = %d\n", sum); return 0; }
int main(void) { struct hlist_head testlist; struct hlistitem item1; struct hlistitem item2; struct hlistitem item3; struct hlistitem item4; struct hlistitem *item; size_t i; item1.i = 1; item2.i = 2; item3.i = 3; item4.i = 4; INIT_HLIST_HEAD(&testlist); assert(hlist_empty(&testlist)); hlist_add_head(&item4.list, &testlist); hlist_add_before(&item2.list, &item4.list); hlist_add_before(&item1.list, &item2.list); hlist_add_before(&item3.list, &item4.list); i = 1; hlist_for_each_entry_t(item, &testlist, struct hlistitem, list) { assert(item->i == i); i++; } assert(i == 5); return 0; }
/** * Add non-existing key into the multimap. * * @tmap multimap. * @key key * @newcell prepared inserted value. * * @return 0 in success, * -ENOMEM if no memory. * -EEXIST if key already exists. */ static int multimap_add_newkey( struct multimap *tmap, u64 key, struct tree_cell *newcell, gfp_t gfp_mask) { int ret; struct tree_cell_head *newhead; /* Allocate and initialize new tree cell head. */ newhead = alloc_cell_head(tmap->mmgr, gfp_mask); if (!newhead) { LOGe("memory allocation failed.\n"); return -ENOMEM; } newhead->key = key; INIT_HLIST_HEAD(&newhead->head); hlist_add_head(&newcell->list, &newhead->head); ASSERT(!hlist_empty(&newhead->head)); /* Add to the map. */ ret = map_add((struct map *)tmap, key, (unsigned long)newhead, gfp_mask); if (ret != 0) { free_cell_head(tmap->mmgr, newhead); LOGe("map_add failed.\n"); ASSERT(ret != -EINVAL); } return ret; }
static void null_init_internal(void) { static HLIST_HEAD(__list); null_sec.ps_policy = &null_policy; atomic_set(&null_sec.ps_refcount, 1); /* always busy */ null_sec.ps_id = -1; null_sec.ps_import = NULL; null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL; null_sec.ps_flvr.sf_flags = 0; null_sec.ps_part = LUSTRE_SP_ANY; null_sec.ps_dying = 0; spin_lock_init(&null_sec.ps_lock); atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */ INIT_LIST_HEAD(&null_sec.ps_gc_list); null_sec.ps_gc_interval = 0; null_sec.ps_gc_next = 0; hlist_add_head(&null_cli_ctx.cc_cache, &__list); atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */ null_cli_ctx.cc_sec = &null_sec; null_cli_ctx.cc_ops = &null_ctx_ops; null_cli_ctx.cc_expire = 0; null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL | PTLRPC_CTX_UPTODATE; null_cli_ctx.cc_vcred.vc_uid = 0; spin_lock_init(&null_cli_ctx.cc_lock); INIT_LIST_HEAD(&null_cli_ctx.cc_req_list); INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain); }
struct k_node *k_lookup(struct x_node *x0, struct x_node *x1, int flags) { struct hash_table *t = &k_hash_table; size_t hash = pair_hash(x0->x_hash, x1->x_hash, t->t_shift); struct hlist_head *head = t->t_table + (hash & t->t_mask); struct hlist_node *node; struct k_node *k; hlist_for_each_entry(k, node, head, k_hash_node) { if (k->k_x[0] == x0 && k->k_x[1] == x1) return k; } if (!(flags & L_CREATE)) return NULL; if (x_which(x0) != 0 || x_which(x1) != 1) { errno = EINVAL; return NULL; } k = malloc(sizeof(*k)); if (k == NULL) return NULL; /* k_init() */ memset(k, 0, sizeof(*k)); hlist_add_head(&k->k_hash_node, head); k->k_x[0] = x0; k->k_x[1] = x1; INIT_LIST_HEAD(&k->k_sub_list); nr_k++; return k; }
static void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash) { set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); atomic_inc(&ctx->cc_refcount); hlist_add_head(&ctx->cc_cache, hash); }
void x_init(struct x_node *x, int type, struct x_node *parent, size_t hash, struct hlist_head *head, const char *name) { memset(x, 0, sizeof(*x)); x->x_type = &x_types[type]; x->x_type->x_nr++; ASSERT(parent != NULL || type == X_U || type == X_V); if (parent == NULL) { INIT_LIST_HEAD(&x->x_parent_link); } else { parent->x_nr_child++; list_add_tail(&x->x_parent_link, &parent->x_child_list); x->x_parent = parent; } INIT_LIST_HEAD(&x->x_child_list); INIT_LIST_HEAD(&x->x_sub_list); x->x_hash = hash; /* FIXME We don't look to see if name is already hashed. */ if (head == NULL) { struct hash_table *t = &x->x_type->x_hash_table; head = t->t_table + (hash & t->t_mask); } hlist_add_head(&x->x_hash_node, head); strcpy(x->x_name, name); }
static int patch_set_options(struct vport *vport, struct nlattr *options) { struct patch_vport *patch_vport = patch_vport_priv(vport); struct patch_config *patchconf; int err; patchconf = kmemdup(rtnl_dereference(patch_vport->patchconf), sizeof(struct patch_config), GFP_KERNEL); if (!patchconf) { err = -ENOMEM; goto error; } err = patch_set_config(vport, options, patchconf); if (err) goto error_free; assign_config_rcu(vport, patchconf); hlist_del(&patch_vport->hash_node); rcu_assign_pointer(patch_vport->peer, ovs_vport_locate(ovs_dp_get_net(vport->dp), patchconf->peer_name)); hlist_add_head(&patch_vport->hash_node, hash_bucket(ovs_dp_get_net(vport->dp), patchconf->peer_name)); return 0; error_free: kfree(patchconf); error: return err; }
static int qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr, u16 bucket_idx) { fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data, fltr->buf_len, DMA_TO_DEVICE); if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) { DP_NOTICE(edev, "Failed to map DMA memory for rule\n"); qede_free_arfs_filter(edev, fltr); return -ENOMEM; } INIT_HLIST_NODE(&fltr->node); hlist_add_head(&fltr->node, QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx)); edev->arfs->filter_count++; if (edev->arfs->filter_count == 1 && edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) { edev->ops->configure_arfs_searcher(edev->cdev, fltr->tuple.mode); edev->arfs->mode = fltr->tuple.mode; } return 0; }
int register_kprobe(struct kprobe *p) { int ret = 0; unsigned long flags = 0; if ((ret = arch_prepare_kprobe(p)) != 0) { goto rm_kprobe; } spin_lock_irqsave(&kprobe_lock, flags); INIT_HLIST_NODE(&p->hlist); if (get_kprobe(p->addr)) { ret = -EEXIST; goto out; } arch_copy_kprobe(p); hlist_add_head(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); p->opcode = *p->addr; *p->addr = BREAKPOINT_INSTRUCTION; flush_icache_range((unsigned long) p->addr, (unsigned long) p->addr + sizeof(kprobe_opcode_t)); out: spin_unlock_irqrestore(&kprobe_lock, flags); rm_kprobe: if (ret == -EEXIST) arch_remove_kprobe(p); return ret; }
/* * Lookup file info. If it doesn't exist, create a file info struct * and open a (VFS) file for the given inode. * * FIXME: * Note that we open the file O_RDONLY even when creating write locks. * This is not quite right, but for now, we assume the client performs * the proper R/W checking. */ __be32 nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result, struct nfs_fh *f) { struct hlist_node *pos; struct nlm_file *file; unsigned int hash; __be32 nfserr; nlm_debug_print_fh("nlm_lookup_file", f); hash = file_hash(f); /* Lock file table */ mutex_lock(&nlm_file_mutex); hlist_for_each_entry(file, pos, &nlm_files[hash], f_list) if (!nfs_compare_fh(&file->f_handle, f)) goto found; nlm_debug_print_fh("creating file for", f); nfserr = nlm_lck_denied_nolocks; file = kzalloc(sizeof(*file), GFP_KERNEL); if (!file) goto out_unlock; memcpy(&file->f_handle, f, sizeof(struct nfs_fh)); mutex_init(&file->f_mutex); INIT_HLIST_NODE(&file->f_list); INIT_LIST_HEAD(&file->f_blocks); /* Open the file. Note that this must not sleep for too long, else * we would lock up lockd:-) So no NFS re-exports, folks. * * We have to make sure we have the right credential to open * the file. */ if ((nfserr = nlmsvc_ops->fopen(rqstp, f, &file->f_file)) != 0) { dprintk("lockd: open failed (error %d)\n", nfserr); goto out_free; } hlist_add_head(&file->f_list, &nlm_files[hash]); found: dprintk("lockd: found file %p (count %d)\n", file, file->f_count); *result = file; file->f_count++; nfserr = 0; out_unlock: mutex_unlock(&nlm_file_mutex); return nfserr; out_free: kfree(file); goto out_unlock; }
static void block_hash(struct block_device *bdev, struct block *block) { struct hlist_head *head; int hash; hash = bdev_hash(bdev->no, block->b_nr) & BLOCK_HASH_MASK; head = &block_htable[hash]; hlist_add_head(&block->b_hnode, head); }
void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { u64 index; /* Add to ePTE list */ index = kvmppc_mmu_hash_pte(pte->pte.eaddr); hlist_add_head(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); /* Add to vPTE list */ index = kvmppc_mmu_hash_vpte(pte->pte.vpage); hlist_add_head(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); /* Add to vPTE_long list */ index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); hlist_add_head(&pte->list_vpte_long, &vcpu->arch.hpte_hash_vpte_long[index]); }
void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, struct kvm_irq_mask_notifier *kimn) { mutex_lock(&kvm->irq_lock); kimn->irq = irq; hlist_add_head(&kimn->link, &kvm->mask_notifier_list); mutex_unlock(&kvm->irq_lock); }
static inline void get_entry(struct trunk_entry_incore *entry, struct hlist_head *head) { hlist_add_head(&entry->hash, head); pthread_mutex_lock(&active_list_lock); list_add(&entry->active_list, &trunk_active_list); trunk_entry_active_nr++; pthread_mutex_unlock(&active_list_lock); }
static struct nfulnl_instance * instance_create(u_int16_t group_num, int pid) { struct nfulnl_instance *inst; UDEBUG("entering (group_num=%u, pid=%d)\n", group_num, pid); write_lock_bh(&instances_lock); if (__instance_lookup(group_num)) { inst = NULL; UDEBUG("aborting, instance already exists\n"); goto out_unlock; } inst = kmalloc(sizeof(*inst), GFP_ATOMIC); if (!inst) goto out_unlock; memset(inst, 0, sizeof(*inst)); INIT_HLIST_NODE(&inst->hlist); inst->lock = SPIN_LOCK_UNLOCKED; /* needs to be two, since we _put() after creation */ atomic_set(&inst->use, 2); init_timer(&inst->timer); inst->timer.function = nfulnl_timer; inst->timer.data = (unsigned long)inst; /* don't start timer yet. (re)start it with every packet */ inst->peer_pid = pid; inst->group_num = group_num; inst->qthreshold = NFULNL_QTHRESH_DEFAULT; inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT; inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT; inst->copy_mode = NFULNL_COPY_PACKET; inst->copy_range = 0xffff; if (!try_module_get(THIS_MODULE)) goto out_free; hlist_add_head(&inst->hlist, &instance_table[instance_hashfn(group_num)]); UDEBUG("newly added node: %p, next=%p\n", &inst->hlist, inst->hlist.next); write_unlock_bh(&instances_lock); return inst; out_free: instance_put(inst); out_unlock: write_unlock_bh(&instances_lock); return NULL; }
static void insert_into_hash(struct buffer_head *buf) { struct hlist_head *head = &HASH(buf->b_dev,buf->b_blocknr); spin_lock(&hash_lock); hlist_add_head(&buf->list_free, head); buffer_count++; spin_unlock(&hash_lock); }
void add_item(struct hlist_head *hhead) { struct my_hlist *item = malloc(sizeof *item); if (!item) abort(); INIT_LIST_HEAD(&item->nested); INIT_HLIST_NODE(&item->node); hlist_add_head(&item->node, hhead); }
static bool __add_ipc_port (struct shim_ipc_port * port, IDTYPE vmid, int type, port_fini fini) { bool need_restart = false; assert(vmid != cur_process.vmid); if (vmid && !port->info.vmid) { port->info.vmid = vmid; port->update = true; } if (port->info.vmid && hlist_unhashed(&port->hlist)) { struct hlist_head * head = &ipc_port_pool[PID_HASH(vmid)]; __get_ipc_port(port); hlist_add_head(&port->hlist, head); } if (!(port->info.type & IPC_PORT_IFPOLL) && (type & IPC_PORT_IFPOLL)) need_restart = true; if ((port->info.type & type) != type) { port->info.type |= type; port->update = true; } if (fini && (type & ~IPC_PORT_IFPOLL)) { port_fini * cb = port->fini; for ( ; cb < port->fini + MAX_IPC_PORT_FINI_CB ; cb++) if (!*cb || *cb == fini) break; assert(cb < port->fini + MAX_IPC_PORT_FINI_CB); *cb = fini; } if (need_restart) { if (list_empty(&port->list)) { __get_ipc_port(port); list_add(&port->list, &pobj_list); port->recent = true; } else { if (!port->recent) { list_del_init(&port->list); list_add(&port->list, &pobj_list); port->recent = true; } } return true; } else { if (list_empty(&port->list)) { __get_ipc_port(port); list_add_tail(&port->list, &pobj_list); } return false; } }
static int msm_pmem_table_add(struct hlist_head *ptype, struct msm_pmem_info *info) { struct file *file; unsigned long paddr; #ifdef CONFIG_ANDROID_PMEM unsigned long kvstart; int rc; #endif unsigned long len; struct msm_pmem_region *region; #ifdef CONFIG_ANDROID_PMEM rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file); if (rc < 0) { pr_err("%s: get_pmem_file fd %d error %d\n", __func__, info->fd, rc); return rc; } if (!info->len) info->len = len; rc = check_pmem_info(info, len); if (rc < 0) return rc; #else paddr = 0; file = NULL; #endif paddr += info->offset; len = info->len; if (check_overlap(ptype, paddr, len) < 0) return -EINVAL; CDBG("%s: type %d, active flag %d, paddr 0x%lx, vaddr 0x%lx\n", __func__, info->type, info->active, paddr, (unsigned long)info->vaddr); region = kmalloc(sizeof(struct msm_pmem_region), GFP_KERNEL); if (!region) return -ENOMEM; INIT_HLIST_NODE(®ion->list); region->paddr = paddr; region->len = len; region->file = file; memcpy(®ion->info, info, sizeof(region->info)); D("%s Adding region to list with type %d\n", __func__, region->info.type); D("%s pmem_stats address is 0x%p\n", __func__, ptype); hlist_add_head(&(region->list), ptype); return 0; }
void *slab_alloc(ohc_slab_t *slab) { slab_block_t *sblock; uintptr_t leader; struct hlist_node *p; int buckets; int i; if(hlist_empty(&slab->block_head)) { buckets = slab_buckets(slab); sblock = malloc(sizeof(slab_block_t) + slab->item_size * buckets); if(sblock == NULL) { return NULL; } sblock->slab = slab; sblock->frees = buckets; hlist_add_head(&sblock->block_node, &slab->block_head); INIT_HLIST_HEAD(&sblock->item_head); leader = (uintptr_t)sblock + sizeof(slab_block_t); for(i = 0; i < buckets; i++) { *((slab_block_t **)leader) = sblock; p = (struct hlist_node *)(leader + sizeof(slab_block_t *)); hlist_add_head(p, &sblock->item_head); leader += slab->item_size; } } else { sblock = list_entry(slab->block_head.first, slab_block_t, block_node); } p = sblock->item_head.first; hlist_del(p); sblock->frees--; if(sblock->frees == 0) { /* if no free items, we throw the block away */ hlist_del(&sblock->block_node); } return p; }
static void __setup_new_cell(struct dm_bio_prison *prison, struct dm_cell_key *key, struct bio *holder, uint32_t hash, struct dm_bio_prison_cell *cell) { memcpy(&cell->key, key, sizeof(cell->key)); cell->holder = holder; bio_list_init(&cell->bios); hlist_add_head(&cell->list, prison->cells + hash); }
void slab_free(void *p) { uintptr_t leader = (uintptr_t)p - sizeof(slab_block_t **); slab_block_t *sblock = *((slab_block_t **)leader); ohc_slab_t *slab = sblock->slab; if(sblock->frees == 0) { /* if there WAS no free item in this block, we catch it again */ hlist_add_head(&sblock->block_node, &slab->block_head); } hlist_add_head((struct hlist_node *)p, &sblock->item_head); sblock->frees++; if(sblock->frees == slab_buckets(slab) && sblock->block_node.next) { /* never free the first slab-block */ hlist_del(&sblock->block_node); free(sblock); } }
static int propagate_one(struct mount *m) { struct mount *child; int type; /* skip ones added by this propagate_mnt() */ if (IS_MNT_NEW(m)) return 0; /* skip if mountpoint isn't covered by it */ if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) return 0; if (m->mnt_group_id == last_dest->mnt_group_id) { type = CL_MAKE_SHARED; } else { struct mount *n, *p; for (n = m; ; n = p) { p = n->mnt_master; if (p == dest_master || IS_MNT_MARKED(p)) { while (last_dest->mnt_master != p) { last_source = last_source->mnt_master; last_dest = last_source->mnt_parent; } if (n->mnt_group_id != last_dest->mnt_group_id) { last_source = last_source->mnt_master; last_dest = last_source->mnt_parent; } break; } } type = CL_SLAVE; /* beginning of peer group among the slaves? */ if (IS_MNT_SHARED(m)) type |= CL_MAKE_SHARED; } /* Notice when we are propagating across user namespaces */ if (m->mnt_ns->user_ns != user_ns) type |= CL_UNPRIVILEGED; child = copy_tree(last_source, last_source->mnt.mnt_root, type); if (IS_ERR(child)) return PTR_ERR(child); child->mnt.mnt_flags &= ~MNT_LOCKED; mnt_set_mountpoint(m, mp, child); last_dest = m; last_source = child; if (m->mnt_master != dest_master) { read_seqlock_excl(&mount_lock); SET_MNT_MARK(m->mnt_master); read_sequnlock_excl(&mount_lock); } hlist_add_head(&child->mnt_hash, list); return 0; }
// __stp_tf_map_initialize(): Initialize the free list. Grabs the // lock. static void __stp_tf_map_initialize(void) { int i; struct hlist_head *head = &__stp_tf_map_free_list[0]; unsigned long flags; write_lock_irqsave(&__stp_tf_map_lock, flags); for (i = 0; i < TASK_FINDER_MAP_ENTRY_ITEMS; i++) { hlist_add_head(&__stp_tf_map_free_list_items[i].hlist, head); } write_unlock_irqrestore(&__stp_tf_map_lock, flags); }
static struct nfulnl_instance * instance_create(u_int16_t group_num, int pid) { struct nfulnl_instance *inst; int err; write_lock_bh(&instances_lock); if (__instance_lookup(group_num)) { err = -EEXIST; goto out_unlock; } inst = kzalloc(sizeof(*inst), GFP_ATOMIC); if (!inst) { err = -ENOMEM; goto out_unlock; } if (!try_module_get(THIS_MODULE)) { kfree(inst); err = -EAGAIN; goto out_unlock; } INIT_HLIST_NODE(&inst->hlist); spin_lock_init(&inst->lock); /* needs to be two, since we _put() after creation */ atomic_set(&inst->use, 2); setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); inst->peer_pid = pid; inst->group_num = group_num; inst->qthreshold = NFULNL_QTHRESH_DEFAULT; inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT; inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT; inst->copy_mode = NFULNL_COPY_PACKET; inst->copy_range = NFULNL_COPY_RANGE_MAX; hlist_add_head(&inst->hlist, &instance_table[instance_hashfn(group_num)]); write_unlock_bh(&instances_lock); return inst; out_unlock: write_unlock_bh(&instances_lock); return ERR_PTR(err); }