Example #1
0
/**
 * fscrypt_init() - Set up for fs encryption.
 */
static int __init fscrypt_init(void)
{
	/*
	 * Use an unbound workqueue to allow bios to be decrypted in parallel
	 * even when they happen to complete on the same CPU.  This sacrifices
	 * locality, but it's worthwhile since decryption is CPU-intensive.
	 *
	 * Also use a high-priority workqueue to prioritize decryption work,
	 * which blocks reads from completing, over regular application tasks.
	 */
	fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
						 WQ_UNBOUND | WQ_HIGHPRI,
						 num_online_cpus());
	if (!fscrypt_read_workqueue)
		goto fail;

	fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
	if (!fscrypt_ctx_cachep)
		goto fail_free_queue;

	fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
	if (!fscrypt_info_cachep)
		goto fail_free_ctx;

	return 0;

fail_free_ctx:
	kmem_cache_destroy(fscrypt_ctx_cachep);
fail_free_queue:
	destroy_workqueue(fscrypt_read_workqueue);
fail:
	return -ENOMEM;
}
Example #2
0
int __init ext4_init_pageio(void)
{
	io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
	if (io_page_cachep == NULL)
		return -ENOMEM;
	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
	if (io_end_cachep == NULL) {
		kmem_cache_destroy(io_page_cachep);
		return -ENOMEM;
	}
	return 0;
}
int __init ext4_init_system_zone(void)
{
	ext4_system_zone_cachep = KMEM_CACHE(ext4_system_zone, 0);
	if (ext4_system_zone_cachep == NULL)
		return -ENOMEM;
	return 0;
}
Example #4
0
int __init ext4_init_pageio(void)
{
	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
	if (io_end_cachep == NULL)
		return -ENOMEM;
	return 0;
}
Example #5
0
void epm_pid_start(void)
{
	unsigned long cache_flags = SLAB_PANIC;

#ifdef CONFIG_DEBUG_SLAB
	cache_flags |= SLAB_POISON;
#endif
	pid_kddm_obj_cachep = KMEM_CACHE(pid_kddm_object, cache_flags);

	INIT_WORK(&put_pid_work, put_pid_worker);

	register_io_linker(PID_LINKER, &pid_io_linker);
	pid_kddm_set = create_new_kddm_set(kddm_def_ns,
					   PID_KDDM_ID,
					   PID_LINKER,
					   KDDM_CUSTOM_DEF_OWNER,
					   0, 0);
	if (IS_ERR(pid_kddm_set))
		OOM;

	rpc_register_int(PROC_RESERVE_PID, handle_reserve_pid, 0);
	rpc_register_int(PROC_PID_LINK_TASK, handle_pid_link_task, 0);
	rpc_register_int(PROC_END_PID_RESERVATION,
			 handle_end_pid_reservation, 0);
}
Example #6
0
void __init proc_caches_init(void)
{
	sighand_cachep = kmem_cache_create("sighand_cache",
			sizeof(struct sighand_struct), 0,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
			SLAB_NOTRACK, sighand_ctor);
	signal_cachep = kmem_cache_create("signal_cache",
			sizeof(struct signal_struct), 0,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
	files_cachep = kmem_cache_create("files_cache",
			sizeof(struct files_struct), 0,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
	fs_cachep = kmem_cache_create("fs_cache",
			sizeof(struct fs_struct), 0,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
	/*
	 * FIXME! The "sizeof(struct mm_struct)" currently includes the
	 * whole struct cpumask for the OFFSTACK case. We could change
	 * this to *only* allocate as much of it as required by the
	 * maximum number of CPU's we can ever have.  The cpumask_allocation
	 * is at the end of the structure, exactly for that reason.
	 */
	mm_cachep = kmem_cache_create("mm_struct",
			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
	vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
	mmap_init();
	nsproxy_cache_init();
}
Example #7
0
int __init ext4_init_pageio(void)
{
	int i;

	io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
	if (io_page_cachep == NULL)
		return -ENOMEM;
	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
	if (io_end_cachep == NULL) {
		kmem_cache_destroy(io_page_cachep);
		return -ENOMEM;
	}
	for (i = 0; i < WQ_HASH_SZ; i++)
		init_waitqueue_head(&ioend_wq[i]);

	return 0;
}
Example #8
0
static int __init dm_bio_prison_init(void)
{
	_cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
	if (!_cell_cache)
		return -ENOMEM;

	return 0;
}
Example #9
0
/**
 * ext4_init_crypto() - Set up for ext4 encryption.
 *
 * We only call this when we start accessing encrypted files, since it
 * results in memory getting allocated that wouldn't otherwise be used.
 *
 * Return: Zero on success, non-zero otherwise.
 */
int ext4_init_crypto(void)
{
	int i, res = -ENOMEM;

	mutex_lock(&crypto_init);
	if (ext4_read_workqueue)
		goto already_initialized;
	ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
	if (!ext4_read_workqueue)
		goto fail;

	ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
					    SLAB_RECLAIM_ACCOUNT);
	if (!ext4_crypto_ctx_cachep)
		goto fail;

	ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
					    SLAB_RECLAIM_ACCOUNT);
	if (!ext4_crypt_info_cachep)
		goto fail;

	for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
		struct ext4_crypto_ctx *ctx;

		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
		if (!ctx) {
			res = -ENOMEM;
			goto fail;
		}
		list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
	}

	ext4_bounce_page_pool =
		mempool_create_page_pool(num_prealloc_crypto_pages, 0);
	if (!ext4_bounce_page_pool) {
		res = -ENOMEM;
		goto fail;
	}
already_initialized:
	mutex_unlock(&crypto_init);
	return 0;
fail:
	ext4_exit_crypto();
	mutex_unlock(&crypto_init);
	return res;
}
int __init init_ext4_system_zone(void)
{
	ext4_system_zone_cachep = KMEM_CACHE(ext4_system_zone,
					     SLAB_RECLAIM_ACCOUNT);
	if (ext4_system_zone_cachep == NULL)
		return -ENOMEM;
	return 0;
}
Example #11
0
int __init dm_kcopyd_init(void)
{
	_job_cache = KMEM_CACHE(kcopyd_job, 0);
	if (!_job_cache)
		return -ENOMEM;

	return 0;
}
Example #12
0
/*
 * initialise the VMA and region record slabs
 */
void __init mmap_init(void)
{
	int ret;

	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
	VM_BUG_ON(ret);
	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
}
Example #13
0
int __init dm_io_init(void)
{
	_dm_io_cache = KMEM_CACHE(io, 0);
	if (!_dm_io_cache)
		return -ENOMEM;

	return 0;
}
Example #14
0
int kvm_async_pf_init(void)
{
	async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);

	if (!async_pf_cache)
		return -ENOMEM;

	return 0;
}
Example #15
0
int __init i915_global_context_init(void)
{
	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
	if (!global.slab_ce)
		return -ENOMEM;

	i915_global_register(&global.base);
	return 0;
}
static int __init iostash_init(void)
{
    int ret = -1, i;

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
	ERR("Kernel version < 2.6.28 not supported.");
	return -ENOENT;
#endif
	DBG("++ iostash_init() ++\n");

	memset(&gctx, 0, sizeof(gctx));
	do {
		gctx.io_pool = KMEM_CACHE(iostash_bio, 0);
		if (!gctx.io_pool) {
			ERR("iostash_init: KMEM_CACHE() failed\n");
			break;
		}

		gctx.io_client = dm_io_client_create();
		if (IS_ERR(gctx.io_client)) {
			ERR("iostash_init: dm_io_client() failed\n");
			break;
		}

		gctx.sce = sce_create();
		if (!gctx.sce) {
			ERR("iostash_init: sce_create() failed\n");
			break;
		}

		gctx.pdm = pdm_create(gctx.sce, poptask_read, poptask_write);

		if (_init_iostash_kobjects()) {
		    ERR("KOBJECT INIT FAILED!");
		    _destroy_iostash_kobjects();
		}

		mutex_init(&gctx.ctl_mtx);

		for (i = 0; i < IOSTASH_MAXHDD_BCKTS; ++i)
		    INIT_LIST_HEAD(&gctx.hddtbl.bucket[i]);

		for (i = 0; i < IOSTASH_MAXSSD_BCKTS; ++i)
		    INIT_LIST_HEAD(&gctx.ssdtbl.bucket[i]);

		ret = 0;

	} while (0);

	if (ret) {
		_free_resource();
	}

	DBG("-- iostash_init() returns = %d --\n", ret);
	return ret;
}
Example #17
0
File: ua.c Project: kissthink/IET
int ua_init(void)
{
	ua_cache = KMEM_CACHE(ua_entry, 0);
	if (!ua_cache) {
		eprintk("%s", "Failed to create ua cache\n");
		return -ENOMEM;
	}

	return 0;
}
Example #18
0
static int __init kllds_init_module(void) {
  int ret_val;
  ret_val = register_chrdev(CHRDEV_MJR_NUM, LLDS_CDEV_NAME, &fops);

  printk(KERN_INFO "llds: make sure you execute the following:\nllds: # mknod /dev/%s c %d 0\nllds: before attempting to work with libforrest (unless it exists)!\n", LLDS_CDEV_NAME, CHRDEV_MJR_NUM);
  printk(KERN_INFO "llds: page size is %lu bytes\n", PAGE_SIZE);

  tmp_kllds_cache = KMEM_CACHE(__kllds_entry, SLAB_HWCACHE_ALIGN);
  return 0;
}
Example #19
0
int __init jbd2_journal_init_revoke_caches(void)
{
	J_ASSERT(!jbd2_revoke_record_cache);
	J_ASSERT(!jbd2_revoke_table_cache);

	jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s,
					SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY);
	if (!jbd2_revoke_record_cache)
		goto record_cache_failure;

	jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s,
					     SLAB_TEMPORARY);
	if (!jbd2_revoke_table_cache)
		goto table_cache_failure;
	return 0;
table_cache_failure:
	jbd2_journal_destroy_revoke_caches();
record_cache_failure:
		return -ENOMEM;
}
Example #20
0
int vmmr0_async_pf_init(void)
{
	async_pf_cache = KMEM_CACHE(vmmr0_async_pf, 0);

	if (!async_pf_cache)
	{
		return -ENOMEM;
	}

	return 0;
}
Example #21
0
File: ua.c Project: delphij/ietbsd
int ua_init(void)
{
#ifdef LINUX
	ua_cache = KMEM_CACHE(ua_entry, 0);
#else
	ua_cache = uma_zcreate("ietuacache", sizeof(struct ua_entry), NULL, NULL, NULL, NULL, 0, 0);
#endif
	if (!ua_cache) {
		eprintk("%s", "Failed to create ua cache\n");
		return -ENOMEM;
	}

	return 0;
}
Example #22
0
int __init f2fs_init_crypto(void)
{
	int res = -ENOMEM;

	f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0);
	if (!f2fs_read_workqueue)
		goto fail;

	f2fs_crypto_ctx_cachep = KMEM_CACHE(f2fs_crypto_ctx,
						SLAB_RECLAIM_ACCOUNT);
	if (!f2fs_crypto_ctx_cachep)
		goto fail;

	f2fs_crypt_info_cachep = KMEM_CACHE(f2fs_crypt_info,
						SLAB_RECLAIM_ACCOUNT);
	if (!f2fs_crypt_info_cachep)
		goto fail;

	return 0;
fail:
	f2fs_exit_crypto();
	return res;
}
Example #23
0
int init_module(void)
{
  int ret_val;
  ret_val = register_chrdev(MAJOR_NUM, DEVICE_NAME, &fops);
  if (ret_val < 0) {
    printk(KERN_ALERT "Registering char device failed with %d\n", ret_val);
    return ret_val;
  }
  printk(KERN_INFO "try 'sudo mknod %s c %d 0'\n", DEVICE_FILE_NAME, MAJOR_NUM);

  vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
  policy_cache = kmem_cache_create("numa_policy", sizeof(struct mempolicy), 0, SLAB_PANIC, NULL);
  return 0;
}
Example #24
0
/**
 * fscrypt_init() - Set up for fs encryption.
 */
static int __init fscrypt_init(void)
{
	fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
							WQ_HIGHPRI, 0);
	if (!fscrypt_read_workqueue)
		goto fail;

	fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
	if (!fscrypt_ctx_cachep)
		goto fail_free_queue;

	fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
	if (!fscrypt_info_cachep)
		goto fail_free_ctx;

	return 0;

fail_free_ctx:
	kmem_cache_destroy(fscrypt_ctx_cachep);
fail_free_queue:
	destroy_workqueue(fscrypt_read_workqueue);
fail:
	return -ENOMEM;
}
Example #25
0
static int __init
tdb_init(void)
{
	tw_cache = KMEM_CACHE(tdb_work_t, 0);
	if (!tw_cache)
		return -ENOMEM;

	tdb_wq = create_singlethread_workqueue("tdb_wq");
	if (!tdb_wq)
		goto err_wq;

	return 0;
err_wq:
	kmem_cache_destroy(tw_cache);
	return -ENOMEM;
}
Example #26
0
/**
 * @author David Margery
 * @author Pascal Gallard (update to kddm architecture)
 * @author Louis Rilling (split files)
 */
void proc_task_start(void)
{
	unsigned long cache_flags = SLAB_PANIC;

#ifdef CONFIG_DEBUG_SLAB
	cache_flags |= SLAB_POISON;
#endif
	task_kddm_obj_cachep = KMEM_CACHE(task_kddm_object, cache_flags);

	register_io_linker(TASK_LINKER, &task_io_linker);

	task_kddm_set = create_new_kddm_set(kddm_def_ns, TASK_KDDM_ID,
					    TASK_LINKER,
					    KDDM_CUSTOM_DEF_OWNER,
					    0, 0);
	if (IS_ERR(task_kddm_set))
		OOM;

}
Example #27
0
void __init proc_caches_init(void)
{
	sighand_cachep = kmem_cache_create("sighand_cache",
			sizeof(struct sighand_struct), 0,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
			SLAB_NOTRACK, sighand_ctor);
	signal_cachep = kmem_cache_create("signal_cache",
			sizeof(struct signal_struct), 0,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
	files_cachep = kmem_cache_create("files_cache",
			sizeof(struct files_struct), 0,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
	fs_cachep = kmem_cache_create("fs_cache",
			sizeof(struct fs_struct), 0,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
	mm_cachep = kmem_cache_create("mm_struct",
			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
	vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
	mmap_init();
}
Example #28
0
int main(void)
{
	unsigned long hash;

	struct kmem_cache *cache = KMEM_CACHE(foo);

	if (cache == NULL) {
		printk("error: Cannot create cache\n");
		TEST_EXIT(1);
	}

	hash = page_alloc_signature();

	struct foo *fp[40];
	for (int i = 0; i < 40; i++) {
		fp[i] = kmem_cache_alloc(cache, CACHE_OPT_NONE);
		fp[i]->a = i;
	}

	if (page_alloc_signature() == hash) {
		printk("error: No memory allocated\n");
		TEST_EXIT(1);
	}

	for (int i = 0; i < 40; i++) {
		if (fp[i]->a != i) {
			printk("error: Address allocated multiple times\n");
			TEST_EXIT(1);
		}
		kmem_cache_free(cache, fp[i]);
	}

	if (page_alloc_signature() != hash) {
		printk("error: Memory not correctly restored\n");
		TEST_EXIT(1);
	}

	TEST_EXIT(0);
}
Example #29
0
int __init
tfw_cache_init(void)
{
	int r = 0;

	if (!tfw_cfg.cache)
		return 0;

	db = tdb_open(tfw_cfg.c_path, tfw_cfg.c_size, 0);
	if (!db)
		return 1;

	cache_mgr_thr = kthread_run(tfw_cache_mgr, NULL, "tfw_cache_mgr");
	if (IS_ERR(cache_mgr_thr)) {
		r = PTR_ERR(cache_mgr_thr);
		TFW_ERR("Can't start cache manager, %d\n", r);
		goto err_thr;
	}

	c_cache = KMEM_CACHE(tfw_cache_work_t, 0);
	if (!c_cache)
		goto err_cache;

	cache_wq = alloc_workqueue("tfw_cache_wq", WQ_MEM_RECLAIM, 0);
	if (!cache_wq)
		goto err_wq;

	return 0;
err_wq:
	kmem_cache_destroy(c_cache);
err_cache:
	kthread_stop(cache_mgr_thr);
err_thr:
	tdb_close(db);
	return r;
}
Example #30
0
void delayacct_init(void)
{
	delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC);
	delayacct_tsk_init(&init_task);
}