Esempio n. 1
0
struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{
	struct io_context *ret;

	ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
	if (ret) {
		atomic_long_set(&ret->refcount, 1);
		atomic_set(&ret->nr_tasks, 1);
		spin_lock_init(&ret->lock);
		bitmap_zero(ret->ioprio_changed, IOC_IOPRIO_CHANGED_BITS);
		ret->ioprio = 0;
		ret->last_waited = 0; /* doesn't matter... */
		ret->nr_batch_requests = 0; /* because this is 0 */
		INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_HLIST_HEAD(&ret->cic_list);
		INIT_RADIX_TREE(&ret->bfq_radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_HLIST_HEAD(&ret->bfq_cic_list);
		ret->ioc_data = NULL;
	}

	return ret;
}
Esempio n. 2
0
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
		       struct thread_map *threads)
{
	int i;

	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
		INIT_HLIST_HEAD(&evlist->heads[i]);
	INIT_LIST_HEAD(&evlist->entries);
	perf_evlist__set_maps(evlist, cpus, threads);
	fdarray__init(&evlist->pollfd, 64);
	evlist->workload.pid = -1;
	evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
}
Esempio n. 3
0
static void
vnlayer_destroy_inode_callback(struct rcu_head *head)
{
    struct inode *inode_p = container_of(head, struct inode, i_rcu);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
    INIT_HLIST_HEAD(&inode_p->i_dentry);
#else
    INIT_LIST_HEAD(&inode_p->i_dentry);
#endif
    ASSERT(I_COUNT(inode_p) == 0);
    ASSERT(inode_p->i_state & I_FREEING);
    kmem_cache_free(vnlayer_vnode_cache, (vnlayer_vnode_t *) ITOV(inode_p));
}
Esempio n. 4
0
File: avc.c Progetto: HPSI/xen-v4v
/**
 * avc_init - Initialize the AVC.
 *
 * Initialize the access vector cache.
 */
void __init avc_init(void)
{
    int i;

    for ( i = 0; i < AVC_CACHE_SLOTS; i++ )
    {
        INIT_HLIST_HEAD(&avc_cache.slots[i]);
        spin_lock_init(&avc_cache.slots_lock[i]);
    }
    atomic_set(&avc_cache.active_nodes, 0);
    atomic_set(&avc_cache.lru_hint, 0);

    printk("AVC INITIALIZED\n");
}
Esempio n. 5
0
/*
 * Allocate and initialize a new local port bind bucket.
 * The bindhash mutex for snum's hash chain must be held here.
 */
struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
						 struct inet_bind_hashbucket *head,
						 const unsigned short snum)
{
	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);

	if (tb != NULL) {
		tb->port      = snum;
		tb->fastreuse = 0;
		INIT_HLIST_HEAD(&tb->owners);
		hlist_add_head(&tb->node, &head->chain);
	}
	return tb;
}
Esempio n. 6
0
static int khashmap_alloc(struct khashmap *hlist)
{
	size_t size = khashmap_size_in_bytes(hlist);
	int i;

	if (size < PAGE_SIZE)
		hlist->hash = kmalloc(size, GFP_KERNEL);
	else
		hlist->hash = vmalloc(size);
	if (unlikely(!hlist->hash))
		return -ENOMEM;
	for (i = 0; i < khashmap_size(hlist); i++)
		INIT_HLIST_HEAD(&hlist->hash[i]);
	return 0;
}
Esempio n. 7
0
static void faf_poll_init(void)
{
	int i;

	faf_polled_fd_hash = kmalloc(FAF_POLLED_FD_HASH_SIZE *
				     sizeof(*faf_polled_fd_hash),
				     GFP_KERNEL);
	if (!faf_polled_fd_hash)
		panic("Couldn't allocate FAF poll descriptor table!\n");
	for (i = 0; i < FAF_POLLED_FD_HASH_SIZE; i++)
		INIT_HLIST_HEAD(&faf_polled_fd_hash[i]);

	rpc_register_void(RPC_FAF_POLL_WAIT, handle_faf_poll_wait, 0);
	rpc_register_void(RPC_FAF_POLL_DEQUEUE, handle_faf_poll_dequeue, 0);
}
Esempio n. 8
0
/*
* Expand the size of the hash table to @size.
* @ht: the hash table to expand
* @size: the size we expand to
*/
static int uproc_htable_expand(uproc_htable_t *ht, int size){
    int new_len, new_idx, new_load_limit,  i;
    struct hlist_head *new_buckets, *head;
    struct hlist_node *p, *q;
    unsigned h;
    new_load_limit = ht->load_limit;
    new_len = ht->len;
    new_idx = ht->p_index;
    while(new_load_limit < size && new_idx < uproc_htable_nprimes){
        new_len = uproc_htable_primes[++new_idx];
        new_load_limit = ht->load_factor * new_len;
    }

    if((new_buckets = malloc(new_len * sizeof(struct hlist_head))) == NULL){
        fprintf(stderr, "failed to malloc: %s", strerror(errno));
        return -ENOMEM;
    }

    for(i = 0; i < new_len; ++i){
        INIT_HLIST_HEAD(&new_buckets[i]);
    }

    /*
    * Rehash and move all event to new_buckets.
    */
    for(i = 0; i < ht->len; ++i){
        head = &(ht->buckets[i]);
        if(!hlist_empty(head)){
            p = head->first;
            while(p){
                q = p->next;
                hlist_del(p);
                h = ht->hf(p) % new_len;
                hlist_add_head(&new_buckets[h], p);
                p = q;
            }
        }
    }

    free(ht->buckets);

    ht->p_index = new_idx;
    ht->buckets = new_buckets;
    ht->len = new_len;
    ht->load_limit = new_load_limit;

    return 0;
}
Esempio n. 9
0
struct pid *alloc_pid(struct pid_namespace *ns)
{
	struct pid *pid;
	enum pid_type type;
	int i, nr;
	struct pid_namespace *tmp;
	struct upid *upid;

	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
	if (!pid)
		goto out;

	tmp = ns;
	for (i = ns->level; i >= 0; i--) {
		nr = alloc_pidmap(tmp);
		if (nr < 0)
			goto out_free;

		pid->numbers[i].nr = nr;
		pid->numbers[i].ns = tmp;
		tmp = tmp->parent;
	}

	get_pid_ns(ns);
	pid->level = ns->level;
	atomic_set(&pid->count, 1);
	for (type = 0; type < PIDTYPE_MAX; ++type)
		INIT_HLIST_HEAD(&pid->tasks[type]);

	spin_lock_irq(&pidmap_lock);
	for (i = ns->level; i >= 0; i--) {
		upid = &pid->numbers[i];
		hlist_add_head_rcu(&upid->pid_chain,
				&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
	}
	spin_unlock_irq(&pidmap_lock);

out:
	return pid;

out_free:
	for (i++; i <= ns->level; i++)
		free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);

	kmem_cache_free(ns->pid_cachep, pid);
	pid = NULL;
	goto out;
}
Esempio n. 10
0
void au_nhash_move(struct au_nhash *dst, struct au_nhash *src)
{
	int i;

	AuTraceEnter();

	*dst = *src;
	for (i = 0; i < AuSize_NHASH; i++) {
		struct hlist_head *h;
		h = dst->heads + i;
		if (h->first)
			h->first->pprev = &h->first;
		INIT_HLIST_HEAD(src->heads + i);
	}
	/* smp_mb(); */
}
Esempio n. 11
0
void au_nhash_move(struct au_nhash *dst, struct au_nhash *src)
{
	int i;
	struct hlist_head *dsth, *srch;

	*dst = *src;
	srch = src->heads;
	dsth = dst->heads;
	for (i = 0; i < AuSize_NHASH; i++) {
		if (dsth->first)
			dsth->first->pprev = &dsth->first;
		dsth++;
		INIT_HLIST_HEAD(srch++);
	}
	/* smp_mb(); */
}
Esempio n. 12
0
/*
 * the allocated memory has to be freed by
 * au_nhash_wh_free() or au_nhash_de_free().
 */
int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp)
{
    struct hlist_head *head;
    unsigned int u;

    head = kmalloc(sizeof(*nhash->nh_head) * num_hash, gfp);
    if (head) {
        nhash->nh_num = num_hash;
        nhash->nh_head = head;
        for (u = 0; u < num_hash; u++)
            INIT_HLIST_HEAD(head++);
        return 0; /* success */
    }

    return -ENOMEM;
}
Esempio n. 13
0
/**
 * avc_init - Initialize the AVC.
 *
 * Initialize the access vector cache.
 */
void __init avc_init(void)
{
	int i;

	for (i = 0; i < AVC_CACHE_SLOTS; i++) {
		INIT_HLIST_HEAD(&avc_cache.slots[i]);
		spin_lock_init(&avc_cache.slots_lock[i]);
	}
	atomic_set(&avc_cache.active_nodes, 0);
	atomic_set(&avc_cache.lru_hint, 0);

	avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
					     0, SLAB_PANIC, NULL);

	audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n");
}
Esempio n. 14
0
static struct hlist_head *alloc_rmtperm_hash(void)
{
	struct hlist_head *hash;
	int i;

	OBD_SLAB_ALLOC_GFP(hash, ll_rmtperm_hash_cachep,
			   REMOTE_PERM_HASHSIZE * sizeof(*hash),
			   GFP_IOFS);
	if (!hash)
		return NULL;

	for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
		INIT_HLIST_HEAD(hash + i);

	return hash;
}
Esempio n. 15
0
static int pep_init(struct sock *sk)
{
	struct pep_sock *pn = pep_sk(sk);

	sk->sk_destruct = pipe_destruct;
	INIT_HLIST_HEAD(&pn->hlist);
	pn->listener = NULL;
	skb_queue_head_init(&pn->ctrlreq_queue);
	atomic_set(&pn->tx_credits, 0);
	pn->ifindex = 0;
	pn->peer_type = 0;
	pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
	pn->rx_credits = 0;
	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
	pn->init_enable = 1;
	pn->aligned = 0;
	return 0;
}
Esempio n. 16
0
int nilfs_init_gccache(struct the_nilfs *nilfs)
{
	int loop;

	BUG_ON(nilfs->ns_gc_inodes_h);

	INIT_LIST_HEAD(&nilfs->ns_gc_inodes);

	nilfs->ns_gc_inodes_h =
		kmalloc(sizeof(struct hlist_head) * NILFS_GCINODE_HASH_SIZE,
			GFP_NOFS);
	if (nilfs->ns_gc_inodes_h == NULL)
		return -ENOMEM;

	for (loop = 0; loop < NILFS_GCINODE_HASH_SIZE; loop++)
		INIT_HLIST_HEAD(&nilfs->ns_gc_inodes_h[loop]);
	return 0;
}
Esempio n. 17
0
void nhash_move(struct aufs_nhash *dst, struct aufs_nhash *src)
{
	int i;

	TraceEnter();

	//DbgWhlist(src);
	*dst = *src;
	for (i = 0; i < AUFS_NHASH_SIZE; i++) {
		struct hlist_head *h;
		h = dst->heads + i;
		if (h->first)
			h->first->pprev = &h->first;
		INIT_HLIST_HEAD(src->heads + i);
	}
	//DbgWhlist(src);
	//DbgWhlist(dst);
	//smp_mb();
}
Esempio n. 18
0
void *slab_alloc(ohc_slab_t *slab)
{
	slab_block_t *sblock;
	uintptr_t leader;
	struct hlist_node *p;
	int buckets;
	int i;

	if(hlist_empty(&slab->block_head)) {
		buckets = slab_buckets(slab);
		sblock = malloc(sizeof(slab_block_t) + slab->item_size * buckets);
		if(sblock == NULL) {
			return NULL;
		}

		sblock->slab = slab;
		sblock->frees = buckets;
		hlist_add_head(&sblock->block_node, &slab->block_head);
		INIT_HLIST_HEAD(&sblock->item_head);

		leader = (uintptr_t)sblock + sizeof(slab_block_t);
		for(i = 0; i < buckets; i++) {
			*((slab_block_t **)leader) = sblock;
			p = (struct hlist_node *)(leader + sizeof(slab_block_t *));
			hlist_add_head(p, &sblock->item_head);
			leader += slab->item_size;
		}

	} else {
		sblock = list_entry(slab->block_head.first, slab_block_t, block_node);
	}

	p = sblock->item_head.first;
	hlist_del(p);

	sblock->frees--;
	if(sblock->frees == 0) {
		/* if no free items, we throw the block away */
		hlist_del(&sblock->block_node);
	}

	return p;
}
Esempio n. 19
0
/*
 * Allocate space for the hash table.  The size is 2^bits.
 */
int virt_hash_table_init(struct virt_hash_table *table, unsigned bits)
{
    const unsigned table_size = 1u << bits;
    int i;

    table->head = kmalloc(table_size * sizeof(struct virt_hash_head), GFP_KERNEL);
    if(!table->head)
        return -ENOMEM;

    for(i = 0; i < table_size; i++) {
        struct virt_hash_head *head = &table->head[i];
        spin_lock_init(&head->lock);
        INIT_HLIST_HEAD(&head->list);
    }

    table->bits = bits;
    table->size = table_size;

    return 0;
}
Esempio n. 20
0
static struct faf_polled_fd *faf_polled_fd_find(unsigned long dvfs_id)
{
	struct faf_polled_fd *polled_fd;

	polled_fd = __faf_polled_fd_find(dvfs_id);
	if (polled_fd)
		goto out;

	polled_fd = kmalloc(sizeof(*polled_fd), GFP_KERNEL);
	if (!polled_fd)
		goto out;

	polled_fd->dvfs_id = dvfs_id;
	INIT_HLIST_HEAD(&polled_fd->nodes);
	polled_fd->count = 0;
	hlist_add_head(&polled_fd->list,
		       &faf_polled_fd_hash[faf_polled_fd_hashfn(dvfs_id)]);

out:
	return polled_fd;
}
Esempio n. 21
0
static struct flex_array *alloc_buckets(unsigned int n_buckets)
{
	struct flex_array *buckets;
	int i, err;

	buckets = flex_array_alloc(sizeof(struct hlist_head *),
				   n_buckets, GFP_KERNEL);
	if (!buckets)
		return NULL;

	err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
	if (err) {
		flex_array_free(buckets);
		return NULL;
	}

	for (i = 0; i < n_buckets; i++)
		INIT_HLIST_HEAD((struct hlist_head *)
					flex_array_get(buckets, i));

	return buckets;
}
Esempio n. 22
0
File: wim.c Progetto: twwbond/wimlib
static void
destroy_image_metadata(struct wim_image_metadata *imd,
                       struct blob_table *table,
                       bool free_metadata_blob_descriptor)
{
    free_dentry_tree(imd->root_dentry, table);
    imd->root_dentry = NULL;
    free_wim_security_data(imd->security_data);
    imd->security_data = NULL;

    if (free_metadata_blob_descriptor) {
        free_blob_descriptor(imd->metadata_blob);
        imd->metadata_blob = NULL;
    }
    if (!table) {
        struct blob_descriptor *blob, *tmp;
        list_for_each_entry_safe(blob, tmp, &imd->unhashed_blobs, unhashed_list)
        free_blob_descriptor(blob);
    }
    INIT_LIST_HEAD(&imd->unhashed_blobs);
    INIT_HLIST_HEAD(&imd->inode_list);
}
Esempio n. 23
0
struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{
	struct io_context *ioc;

	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
	if (ioc) {
		atomic_long_set(&ioc->refcount, 1);
		atomic_set(&ioc->nr_tasks, 1);
		spin_lock_init(&ioc->lock);
		ioc->ioprio_changed = 0;
		ioc->ioprio = 0;
		ioc->last_waited = 0; /* doesn't matter... */
		ioc->nr_batch_requests = 0; /* because this is 0 */
		INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_HLIST_HEAD(&ioc->cic_list);
		ioc->ioc_data = NULL;
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
		ioc->cgroup_changed = 0;
#endif
	}

	return ioc;
}
Esempio n. 24
0
int sc_capwap_init(struct sc_capwap_session *session, struct net *net)
{
	int i;

	TRACEKMOD("### sc_capwap_init\n");

	ASSERT_RTNL();

	/* Init session */
	memset(session, 0, sizeof(struct sc_capwap_session));

	session->net = net;

	/* Defragment packets */
	memset(&session->fragments, 0, sizeof(struct sc_capwap_fragment_queue));
	INIT_LIST_HEAD(&session->fragments.lru_list);
	spin_lock_init(&session->fragments.lock);

	for (i = 0; i < STA_HASH_SIZE; i++)
		INIT_HLIST_HEAD(&session->station_list[i]);

	return 0;
}
Esempio n. 25
0
inline int uproc_htable_init(uproc_htable_t *ht, double load_factor, hash_t hf, entry_equal_t heef){
    int i;
    
    ht->len = uproc_htable_primes[0];
    ht->buckets = malloc(ht->len * sizeof(struct hlist_head));

    if(ht->buckets == NULL){
        fprintf(stderr, "can't allocate hash buckets, memory shortage.");
        return -ENOMEM;
    }

    for(i = 0; i < ht->len; ++i){
        INIT_HLIST_HEAD(&ht->buckets[i]);
    }

    ht->p_index = 0;
    ht->load_limit = load_factor * uproc_htable_primes[0];
    ht->load_factor = load_factor;
    ht->n_entries = 0;
    
    ht->hf = hf;
    ht->heef = heef;
    return 0;
}
Esempio n. 26
0
static int replayfs_super_info_init(struct replayfs_sb_info *sbi) {
	int err;

	err = 0;

	spin_lock_init(&sbi->i_get_lock);
	INIT_HLIST_HEAD(&sbi->i_get_head);


	err = replayfs_syscache_init(&sbi->syscall_cache);
	if (err) {
		goto out;
	}

	err = replayfs_file_log_cache_init(&sbi->cache, &sbi->syscall_cache);

	if (err) {
		goto out;
	}


out:
	return err;
}
Esempio n. 27
0
void au_nhash_init(struct au_nhash *nhash)
{
	int i;
	for (i = 0; i < AuSize_NHASH; i++)
		INIT_HLIST_HEAD(nhash->heads + i);
}
Esempio n. 28
0
/*
 * hfs_read_super()
 *
 * This is the function that is responsible for mounting an HFS
 * filesystem.	It performs all the tasks necessary to get enough data
 * from the disk to read the root inode.  This includes parsing the
 * mount options, dealing with Macintosh partitions, reading the
 * superblock and the allocation bitmap blocks, calling
 * hfs_btree_init() to get the necessary data about the extents and
 * catalog B-trees and, finally, reading the root inode into memory.
 */
static int hfs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct hfs_sb_info *sbi;
	struct hfs_find_data fd;
	hfs_cat_rec rec;
	struct inode *root_inode;
	int res;

	sbi = kzalloc(sizeof(struct hfs_sb_info), GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;
	sb->s_fs_info = sbi;
	INIT_HLIST_HEAD(&sbi->rsrc_inodes);

	res = -EINVAL;
	if (!parse_options((char *)data, sbi)) {
		printk(KERN_ERR "hfs: unable to parse mount options.\n");
		goto bail;
	}

	sb->s_op = &hfs_super_operations;
	sb->s_flags |= MS_NODIRATIME;
	mutex_init(&sbi->bitmap_lock);

	res = hfs_mdb_get(sb);
	if (res) {
		if (!silent)
			printk(KERN_WARNING "hfs: can't find a HFS filesystem on dev %s.\n",
				hfs_mdb_name(sb));
		res = -EINVAL;
		goto bail;
	}

	/* try to get the root inode */
	hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
	res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
	if (!res) {
		if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
			res =  -EIO;
			goto bail;
		}
		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
	}
	if (res) {
		hfs_find_exit(&fd);
		goto bail_no_root;
	}
	res = -EINVAL;
	root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
	hfs_find_exit(&fd);
	if (!root_inode)
		goto bail_no_root;

	res = -ENOMEM;
	sb->s_root = d_alloc_root(root_inode);
	if (!sb->s_root)
		goto bail_iput;

	sb->s_root->d_op = &hfs_dentry_operations;

	/* everything's okay */
	return 0;

bail_iput:
	iput(root_inode);
bail_no_root:
	printk(KERN_ERR "hfs: get root inode failed.\n");
bail:
	hfs_mdb_put(sb);
	return res;
}
Esempio n. 29
0
struct pid *alloc_pid(struct pid_namespace *ns)
{
	struct pid *pid;
	enum pid_type type;
	int i, nr;
	struct pid_namespace *tmp;
	struct upid *upid;
	int retval = -ENOMEM;

	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
	if (!pid)
		return ERR_PTR(retval);

	tmp = ns;
	pid->level = ns->level;
	for (i = ns->level; i >= 0; i--) {
		nr = alloc_pidmap(tmp);
		if (nr < 0) {
			retval = nr;
			goto out_free;
		}

		pid->numbers[i].nr = nr;
		pid->numbers[i].ns = tmp;
		tmp = tmp->parent;
	}

	if (unlikely(is_child_reaper(pid))) {
		if (pid_ns_prepare_proc(ns))
			goto out_free;
	}

	get_pid_ns(ns);
	atomic_set(&pid->count, 1);
	for (type = 0; type < PIDTYPE_MAX; ++type)
		INIT_HLIST_HEAD(&pid->tasks[type]);

	upid = pid->numbers + ns->level;
	spin_lock_irq(&pidmap_lock);
	if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
		goto out_unlock;
	for ( ; upid >= pid->numbers; --upid) {
		hlist_add_head_rcu(&upid->pid_chain,
				&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
		upid->ns->nr_hashed++;
	}
	spin_unlock_irq(&pidmap_lock);

	return pid;

out_unlock:
	spin_unlock_irq(&pidmap_lock);
	put_pid_ns(ns);

out_free:
	while (++i <= ns->level)
		free_pidmap(pid->numbers + i);

	kmem_cache_free(ns->pid_cachep, pid);
	return ERR_PTR(retval);
}
static void cifs_i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	INIT_HLIST_HEAD(&inode->i_dentry);
	kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
}