Example #1
0
void emmcfs_exttree_cache_destroy(void)
{
	kmem_cache_destroy(extents_tree_key_cachep);
}
Example #2
0
void sysv_destroy_icache(void)
{
	kmem_cache_destroy(sysv_inode_cachep);
}
Example #3
0
static void destroy_inodecache(void)
{
	kmem_cache_destroy(efs_inode_cachep);
}
static void zswap_entry_cache_destory(void)
{
	kmem_cache_destroy(zswap_entry_cache);
}
static void destroy_inodecache(void)
{
	if (kmem_cache_destroy(affs_inode_cachep))
		printk(KERN_INFO "affs_inode_cache: not all structures were freed\n");
}
Example #6
0
/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
static int nfs_write_mapping(struct address_space *mapping, int how)
{
	struct writeback_control wbc = {
		.bdi = mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = 0,
		.range_end = LLONG_MAX,
	};

	return __nfs_write_mapping(mapping, &wbc, how);
}

/*
 * flush the inode to disk.
 */
int nfs_wb_all(struct inode *inode)
{
	return nfs_write_mapping(inode->i_mapping, 0);
}

int nfs_wb_nocommit(struct inode *inode)
{
	return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
}

int nfs_wb_page_cancel(struct inode *inode, struct page *page)
{
	struct nfs_page *req;
	loff_t range_start = page_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
	struct writeback_control wbc = {
		.bdi = page->mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret = 0;

	BUG_ON(!PageLocked(page));
	for (;;) {
		req = nfs_page_find_request(page);
		if (req == NULL)
			goto out;
		if (test_bit(PG_CLEAN, &req->wb_flags)) {
			nfs_release_request(req);
			break;
		}
		if (nfs_lock_request_dontget(req)) {
			nfs_inode_remove_request(req);
			/*
			 * In case nfs_inode_remove_request has marked the
			 * page as being dirty
			 */
			cancel_dirty_page(page, PAGE_CACHE_SIZE);
			nfs_unlock_request(req);
			break;
		}
		ret = nfs_wait_on_request(req);
		if (ret < 0)
			goto out;
	}
	if (!PagePrivate(page))
		return 0;
	ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
out:
	return ret;
}

static int nfs_wb_page_priority(struct inode *inode, struct page *page,
				int how)
{
	loff_t range_start = page_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
	struct writeback_control wbc = {
		.bdi = page->mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret;

	do {
		if (clear_page_dirty_for_io(page)) {
			ret = nfs_writepage_locked(page, &wbc);
			if (ret < 0)
				goto out_error;
		} else if (!PagePrivate(page))
			break;
		ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
		if (ret < 0)
			goto out_error;
	} while (PagePrivate(page));
	return 0;
out_error:
	__mark_inode_dirty(inode, I_DIRTY_PAGES);
	return ret;
}

/*
 * Write back all requests on one page - we do this before reading it.
 */
int nfs_wb_page(struct inode *inode, struct page* page)
{
	return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
}

#ifdef CONFIG_MIGRATION
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
		struct page *page)
{
	struct nfs_page *req;
	int ret;

	if (PageFsCache(page))
		nfs_fscache_release_page(page, GFP_KERNEL);

	req = nfs_find_and_lock_request(page);
	ret = PTR_ERR(req);
	if (IS_ERR(req))
		goto out;

	ret = migrate_page(mapping, newpage, page);
	if (!req)
		goto out;
	if (ret)
		goto out_unlock;
	page_cache_get(newpage);
	req->wb_page = newpage;
	SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));
	ClearPagePrivate(page);
	set_page_private(page, 0);
	page_cache_release(page);
out_unlock:
	nfs_clear_page_tag_locked(req);
	nfs_release_request(req);
out:
	return ret;
}
#endif

int __init nfs_init_writepagecache(void)
{
	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
					     sizeof(struct nfs_write_data),
					     0, SLAB_HWCACHE_ALIGN,
					     NULL);
	if (nfs_wdata_cachep == NULL)
		return -ENOMEM;

	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
						     nfs_wdata_cachep);
	if (nfs_wdata_mempool == NULL)
		return -ENOMEM;

	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
						      nfs_wdata_cachep);
	if (nfs_commit_mempool == NULL)
		return -ENOMEM;

	/*
	 * NFS congestion size, scale with available memory.
	 *
	 *  64MB:    8192k
	 * 128MB:   11585k
	 * 256MB:   16384k
	 * 512MB:   23170k
	 *   1GB:   32768k
	 *   2GB:   46340k
	 *   4GB:   65536k
	 *   8GB:   92681k
	 *  16GB:  131072k
	 *
	 * This allows larger machines to have larger/more transfers.
	 * Limit the default to 256M
	 */
	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
	if (nfs_congestion_kb > 256*1024)
		nfs_congestion_kb = 256*1024;

	return 0;
}

void nfs_destroy_writepagecache(void)
{
	mempool_destroy(nfs_commit_mempool);
	mempool_destroy(nfs_wdata_mempool);
	kmem_cache_destroy(nfs_wdata_cachep);
}
Example #7
0
void
lofs_subrfini(void)
{
    kmem_cache_destroy(lnode_cache);
}
Example #8
0
void smb_destroy_request_cache(void)
{
    if (kmem_cache_destroy(req_cachep))
        printk(KERN_INFO "smb_destroy_request_cache: not all structures were freed\n");
}
Example #9
0
static int __init fnic_init_module(void)
{
	size_t len;
	int err = 0;

	printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);

	/* Create a cache for allocation of default size sgls */
	len = sizeof(struct fnic_dflt_sgl_list);
	fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
		("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
		 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
		 NULL);
	if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
		printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
		err = -ENOMEM;
		goto err_create_fnic_sgl_slab_dflt;
	}

	/* Create a cache for allocation of max size sgls*/
	len = sizeof(struct fnic_sgl_list);
	fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
		("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
		 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
		 NULL);
	if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
		printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
		err = -ENOMEM;
		goto err_create_fnic_sgl_slab_max;
	}

	/* Create a cache of io_req structs for use via mempool */
	fnic_io_req_cache = kmem_cache_create("fnic_io_req",
					      sizeof(struct fnic_io_req),
					      0, SLAB_HWCACHE_ALIGN, NULL);
	if (!fnic_io_req_cache) {
		printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
		err = -ENOMEM;
		goto err_create_fnic_ioreq_slab;
	}

	fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
	if (!fnic_event_queue) {
		printk(KERN_ERR PFX "fnic work queue create failed\n");
		err = -ENOMEM;
		goto err_create_fnic_workq;
	}

	spin_lock_init(&fnic_list_lock);
	INIT_LIST_HEAD(&fnic_list);

	fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
	if (!fnic_fc_transport) {
		printk(KERN_ERR PFX "fc_attach_transport error\n");
		err = -ENOMEM;
		goto err_fc_transport;
	}

	/* register the driver with PCI system */
	err = pci_register_driver(&fnic_driver);
	if (err < 0) {
		printk(KERN_ERR PFX "pci register error\n");
		goto err_pci_register;
	}
	return err;

err_pci_register:
	fc_release_transport(fnic_fc_transport);
err_fc_transport:
	destroy_workqueue(fnic_event_queue);
err_create_fnic_workq:
	kmem_cache_destroy(fnic_io_req_cache);
err_create_fnic_ioreq_slab:
	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
err_create_fnic_sgl_slab_max:
	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt:
	return err;
}
Example #10
0
static void __exit fq_module_exit(void)
{
	unregister_qdisc(&fq_qdisc_ops);
	kmem_cache_destroy(fq_flow_cachep);
}
Example #11
0
void kvm_async_pf_deinit(void)
{
	if (async_pf_cache)
		kmem_cache_destroy(async_pf_cache);
	async_pf_cache = NULL;
}
void exit_ext4_system_zone(void)
{
	kmem_cache_destroy(ext4_system_zone_cachep);
}
Example #13
0
File: super.c Project: cdkamat/tux3
static void __exit tux3_destroy_inodecache(void)
{
	kmem_cache_destroy(tux_inode_cachep);
}
Example #14
0
void iommu_iova_cache_destroy(void)
{
	kmem_cache_destroy(iommu_iova_cache);
}
Example #15
0
static void __exit exit_hfs_fs(void)
{
	unregister_filesystem(&hfs_fs_type);
	if (kmem_cache_destroy(hfs_inode_cachep))
		printk(KERN_INFO "hfs_inode_cache: not all structures were freed\n");
}
Example #16
0
static int __init init_gfs2_fs(void)
{
    int error;

    gfs2_str2qstr(&gfs2_qdot, ".");
    gfs2_str2qstr(&gfs2_qdotdot, "..");

    error = gfs2_sys_init();
    if (error)
        return error;

    error = gfs2_glock_init();
    if (error)
        goto fail;

    error = -ENOMEM;
    gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
                                          sizeof(struct gfs2_glock),
                                          0, 0,
                                          gfs2_init_glock_once);
    if (!gfs2_glock_cachep)
        goto fail;

    gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
                               sizeof(struct gfs2_glock) +
                               sizeof(struct address_space),
                               0, 0, gfs2_init_gl_aspace_once);

    if (!gfs2_glock_aspace_cachep)
        goto fail;

    gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
                                          sizeof(struct gfs2_inode),
                                          0,  SLAB_RECLAIM_ACCOUNT|
                                          SLAB_MEM_SPREAD,
                                          gfs2_init_inode_once);
    if (!gfs2_inode_cachep)
        goto fail;

    gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata",
                                            sizeof(struct gfs2_bufdata),
                                            0, 0, NULL);
    if (!gfs2_bufdata_cachep)
        goto fail;

    gfs2_rgrpd_cachep = kmem_cache_create("gfs2_rgrpd",
                                          sizeof(struct gfs2_rgrpd),
                                          0, 0, NULL);
    if (!gfs2_rgrpd_cachep)
        goto fail;

    gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad",
                                           sizeof(struct gfs2_quota_data),
                                           0, 0, NULL);
    if (!gfs2_quotad_cachep)
        goto fail;

    register_shrinker(&qd_shrinker);

    error = register_filesystem(&gfs2_fs_type);
    if (error)
        goto fail;

    error = register_filesystem(&gfs2meta_fs_type);
    if (error)
        goto fail_unregister;

    error = -ENOMEM;
    gfs_recovery_wq = alloc_workqueue("gfs_recovery",
                                      WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0);
    if (!gfs_recovery_wq)
        goto fail_wq;

    gfs2_register_debugfs();

    printk("GFS2 (built %s %s) installed\n", __DATE__, __TIME__);

    return 0;

fail_wq:
    unregister_filesystem(&gfs2meta_fs_type);
fail_unregister:
    unregister_filesystem(&gfs2_fs_type);
fail:
    unregister_shrinker(&qd_shrinker);
    gfs2_glock_exit();

    if (gfs2_quotad_cachep)
        kmem_cache_destroy(gfs2_quotad_cachep);

    if (gfs2_rgrpd_cachep)
        kmem_cache_destroy(gfs2_rgrpd_cachep);

    if (gfs2_bufdata_cachep)
        kmem_cache_destroy(gfs2_bufdata_cachep);

    if (gfs2_inode_cachep)
        kmem_cache_destroy(gfs2_inode_cachep);

    if (gfs2_glock_aspace_cachep)
        kmem_cache_destroy(gfs2_glock_aspace_cachep);

    if (gfs2_glock_cachep)
        kmem_cache_destroy(gfs2_glock_cachep);

    gfs2_sys_uninit();
    return error;
}
Example #17
0
static void __exit exit_hfs_fs(void)
{
	unregister_filesystem(&hfs_fs_type);
	kmem_cache_destroy(hfs_inode_cachep);
}
Example #18
0
File: inode.c Project: wfelipe/cfs
void cfs_destroy_cachep (void) {
	if (cfs_inode_cachep)
		kmem_cache_destroy (cfs_inode_cachep);
}
Example #19
0
acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
{
	kmem_cache_destroy(cache);
	return (AE_OK);
}
void kvmppc_mmu_hpte_sysexit(void)
{
	kmem_cache_destroy(hpte_cache);
}
Example #21
0
static void fuse_fs_cleanup(void)
{
	unregister_filesystem(&fuse_fs_type);
	unregister_fuseblk();
	kmem_cache_destroy(fuse_inode_cachep);
}
static void __exit ioat_exit_module(void)
{
	pci_unregister_driver(&ioat_pci_driver);
	kmem_cache_destroy(ioat2_cache);
}
Example #23
0
void vq_term(struct c2_dev *c2dev)
{
	kmem_cache_destroy(c2dev->host_msg_cache);
}
Example #24
0
static void
cifs_destroy_mids(void)
{
	mempool_destroy(cifs_mid_poolp);
	kmem_cache_destroy(cifs_mid_cachep);
}
Example #25
0
static int __init null_init(void)
{
	unsigned int i;

	if (bs > PAGE_SIZE) {
		pr_warn("null_blk: invalid block size\n");
		pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
		bs = PAGE_SIZE;
	}

	if (use_lightnvm && bs != 4096) {
		pr_warn("null_blk: LightNVM only supports 4k block size\n");
		pr_warn("null_blk: defaults block size to 4k\n");
		bs = 4096;
	}

	if (use_lightnvm && queue_mode != NULL_Q_MQ) {
		pr_warn("null_blk: LightNVM only supported for blk-mq\n");
		pr_warn("null_blk: defaults queue mode to blk-mq\n");
		queue_mode = NULL_Q_MQ;
	}

	if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
		if (submit_queues < nr_online_nodes) {
			pr_warn("null_blk: submit_queues param is set to %u.",
							nr_online_nodes);
			submit_queues = nr_online_nodes;
		}
	} else if (submit_queues > nr_cpu_ids)
		submit_queues = nr_cpu_ids;
	else if (!submit_queues)
		submit_queues = 1;

	mutex_init(&lock);

	/* Initialize a separate list for each CPU for issuing softirqs */
	for_each_possible_cpu(i) {
		struct completion_queue *cq = &per_cpu(completion_queues, i);

		init_llist_head(&cq->list);

		if (irqmode != NULL_IRQ_TIMER)
			continue;

		hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
		cq->timer.function = null_cmd_timer_expired;
	}

	null_major = register_blkdev(0, "nullb");
	if (null_major < 0)
		return null_major;

	if (use_lightnvm) {
		ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
								0, 0, NULL);
		if (!ppa_cache) {
			pr_err("null_blk: unable to create ppa cache\n");
			return -ENOMEM;
		}
	}

	for (i = 0; i < nr_devices; i++) {
		if (null_add_dev()) {
			unregister_blkdev(null_major, "nullb");
			goto err_ppa;
		}
	}

	pr_info("null: module loaded\n");
	return 0;
err_ppa:
	kmem_cache_destroy(ppa_cache);
	return -EINVAL;
}
Example #26
0
static int
cifs_init_request_bufs(void)
{
	if (CIFSMaxBufSize < 8192) {
	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
	Unicode path name has to fit in any SMB/CIFS path based frames */
		CIFSMaxBufSize = 8192;
	} else if (CIFSMaxBufSize > 1024*127) {
		CIFSMaxBufSize = 1024 * 127;
	} else {
		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
	}
/*	cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
	cifs_req_cachep = kmem_cache_create("cifs_request",
					    CIFSMaxBufSize +
					    MAX_CIFS_HDR_SIZE, 0,
					    SLAB_HWCACHE_ALIGN, NULL);
	if (cifs_req_cachep == NULL)
		return -ENOMEM;

	if (cifs_min_rcv < 1)
		cifs_min_rcv = 1;
	else if (cifs_min_rcv > 64) {
		cifs_min_rcv = 64;
		cERROR(1, "cifs_min_rcv set to maximum (64)");
	}

	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
						  cifs_req_cachep);

	if (cifs_req_poolp == NULL) {
		kmem_cache_destroy(cifs_req_cachep);
		return -ENOMEM;
	}
	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
	almost all handle based requests (but not write response, nor is it
	sufficient for path based requests).  A smaller size would have
	been more efficient (compacting multiple slab items on one 4k page)
	for the case in which debug was on, but this larger size allows
	more SMBs to use small buffer alloc and is still much more
	efficient to alloc 1 per page off the slab compared to 17K (5page)
	alloc of large cifs buffers even when page debugging is on */
	cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
			NULL);
	if (cifs_sm_req_cachep == NULL) {
		mempool_destroy(cifs_req_poolp);
		kmem_cache_destroy(cifs_req_cachep);
		return -ENOMEM;
	}

	if (cifs_min_small < 2)
		cifs_min_small = 2;
	else if (cifs_min_small > 256) {
		cifs_min_small = 256;
		cFYI(1, "cifs_min_small set to maximum (256)");
	}

	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
						     cifs_sm_req_cachep);

	if (cifs_sm_req_poolp == NULL) {
		mempool_destroy(cifs_req_poolp);
		kmem_cache_destroy(cifs_req_cachep);
		kmem_cache_destroy(cifs_sm_req_cachep);
		return -ENOMEM;
	}

	return 0;
}
void dm_io_exit(void)
{
	kmem_cache_destroy(_dm_io_cache);
	_dm_io_cache = NULL;
}
Example #28
0
static void __exit tcm_loop_fabric_exit(void)
{
	tcm_loop_deregister_configfs();
	tcm_loop_release_core_bus();
	kmem_cache_destroy(tcm_loop_cmd_cache);
}
Example #29
0
void
afs_destroy_inodecache(void)
{
    if (afs_inode_cachep)
	(void) kmem_cache_destroy(afs_inode_cachep);
}
Example #30
0
static void __exit iser_exit(void)
{
	iser_dbg("Removing iSER datamover...\n");
	iscsi_unregister_transport(&iscsi_iser_transport);
	kmem_cache_destroy(ig.desc_cache);
}