/** * fscrypt_initialize() - allocate major buffers for fs encryption. * * We only call this when we start accessing encrypted files, since it * results in memory getting allocated that wouldn't otherwise be used. * * Return: Zero on success, non-zero otherwise. */ int fscrypt_initialize(void) { int i, res = -ENOMEM; if (fscrypt_bounce_page_pool) return 0; mutex_lock(&fscrypt_init_mutex); if (fscrypt_bounce_page_pool) goto already_initialized; for (i = 0; i < num_prealloc_crypto_ctxs; i++) { struct fscrypt_ctx *ctx; ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); if (!ctx) goto fail; list_add(&ctx->free_list, &fscrypt_free_ctxs); } fscrypt_bounce_page_pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0); if (!fscrypt_bounce_page_pool) goto fail; already_initialized: mutex_unlock(&fscrypt_init_mutex); return 0; fail: fscrypt_destroy(); mutex_unlock(&fscrypt_init_mutex); return res; }
/** * fscrypt_initialize() - allocate major buffers for fs encryption. * @cop_flags: fscrypt operations flags * * We only call this when we start accessing encrypted files, since it * results in memory getting allocated that wouldn't otherwise be used. * * Return: Zero on success, non-zero otherwise. */ int fscrypt_initialize(unsigned int cop_flags) { int i, res = -ENOMEM; /* No need to allocate a bounce page pool if this FS won't use it. */ if (cop_flags & FS_CFLG_OWN_PAGES) return 0; mutex_lock(&fscrypt_init_mutex); if (fscrypt_bounce_page_pool) goto already_initialized; for (i = 0; i < num_prealloc_crypto_ctxs; i++) { struct fscrypt_ctx *ctx; ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); if (!ctx) goto fail; list_add(&ctx->free_list, &fscrypt_free_ctxs); } fscrypt_bounce_page_pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0); if (!fscrypt_bounce_page_pool) goto fail; already_initialized: mutex_unlock(&fscrypt_init_mutex); return 0; fail: fscrypt_destroy(); mutex_unlock(&fscrypt_init_mutex); return res; }
static __init int init_emergency_pool(void) { #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG) if (max_pfn <= max_low_pfn) return 0; #endif page_pool = mempool_create_page_pool(POOL_SIZE, 0); BUG_ON(!page_pool); pr_info("pool size: %d pages\n", POOL_SIZE); return 0; }
/** * f2fs_crypto_initialize() - Set up for f2fs encryption. * * We only call this when we start accessing encrypted files, since it * results in memory getting allocated that wouldn't otherwise be used. * * Return: Zero on success, non-zero otherwise. */ int f2fs_crypto_initialize(void) { int i, res = -ENOMEM; if (f2fs_bounce_page_pool) return 0; mutex_lock(&crypto_init); if (f2fs_bounce_page_pool) goto already_initialized; for (i = 0; i < num_prealloc_crypto_ctxs; i++) { struct f2fs_crypto_ctx *ctx; ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_KERNEL); if (!ctx) goto fail; list_add(&ctx->free_list, &f2fs_free_crypto_ctxs); } /* must be allocated at the last step to avoid race condition above */ f2fs_bounce_page_pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0); if (!f2fs_bounce_page_pool) goto fail; f2fs_emergent_page_pool = mempool_create_page_pool(num_prealloc_emergent_pages, 0); if (!f2fs_emergent_page_pool) goto fail; already_initialized: mutex_unlock(&crypto_init); return 0; fail: f2fs_crypto_destroy(); mutex_unlock(&crypto_init); return res; }
/** * ext4_init_crypto() - Set up for ext4 encryption. * * We only call this when we start accessing encrypted files, since it * results in memory getting allocated that wouldn't otherwise be used. * * Return: Zero on success, non-zero otherwise. */ int ext4_init_crypto(void) { int i, res = -ENOMEM; mutex_lock(&crypto_init); if (ext4_read_workqueue) goto already_initialized; ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0); if (!ext4_read_workqueue) goto fail; ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx, SLAB_RECLAIM_ACCOUNT); if (!ext4_crypto_ctx_cachep) goto fail; ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info, SLAB_RECLAIM_ACCOUNT); if (!ext4_crypt_info_cachep) goto fail; for (i = 0; i < num_prealloc_crypto_ctxs; i++) { struct ext4_crypto_ctx *ctx; ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); if (!ctx) { res = -ENOMEM; goto fail; } list_add(&ctx->free_list, &ext4_free_crypto_ctxs); } ext4_bounce_page_pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0); if (!ext4_bounce_page_pool) { res = -ENOMEM; goto fail; } already_initialized: mutex_unlock(&crypto_init); return 0; fail: ext4_exit_crypto(); mutex_unlock(&crypto_init); return res; }
static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) { adapter->pool.erp_req = mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); if (!adapter->pool.erp_req) return -ENOMEM; adapter->pool.gid_pn_req = mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); if (!adapter->pool.gid_pn_req) return -ENOMEM; adapter->pool.scsi_req = mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); if (!adapter->pool.scsi_req) return -ENOMEM; adapter->pool.scsi_abort = mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); if (!adapter->pool.scsi_abort) return -ENOMEM; adapter->pool.status_read_req = mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM, sizeof(struct zfcp_fsf_req)); if (!adapter->pool.status_read_req) return -ENOMEM; adapter->pool.qtcb_pool = mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache); if (!adapter->pool.qtcb_pool) return -ENOMEM; BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE); adapter->pool.sr_data = mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0); if (!adapter->pool.sr_data) return -ENOMEM; adapter->pool.gid_pn = mempool_create_slab_pool(1, zfcp_fc_req_cache); if (!adapter->pool.gid_pn) return -ENOMEM; return 0; }
static int __init init_gfs2_fs(void) { int error; gfs2_str2qstr(&gfs2_qdot, "."); gfs2_str2qstr(&gfs2_qdotdot, ".."); gfs2_quota_hash_init(); error = gfs2_sys_init(); if (error) return error; error = list_lru_init(&gfs2_qd_lru); if (error) goto fail_lru; error = gfs2_glock_init(); if (error) goto fail; error = -ENOMEM; gfs2_glock_cachep = kmem_cache_create("gfs2_glock", sizeof(struct gfs2_glock), 0, 0, gfs2_init_glock_once); if (!gfs2_glock_cachep) goto fail; gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)", sizeof(struct gfs2_glock) + sizeof(struct address_space), 0, 0, gfs2_init_gl_aspace_once); if (!gfs2_glock_aspace_cachep) goto fail; gfs2_inode_cachep = kmem_cache_create("gfs2_inode", sizeof(struct gfs2_inode), 0, SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD, gfs2_init_inode_once); if (!gfs2_inode_cachep) goto fail; gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata", sizeof(struct gfs2_bufdata), 0, 0, NULL); if (!gfs2_bufdata_cachep) goto fail; gfs2_rgrpd_cachep = kmem_cache_create("gfs2_rgrpd", sizeof(struct gfs2_rgrpd), 0, 0, NULL); if (!gfs2_rgrpd_cachep) goto fail; gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad", sizeof(struct gfs2_quota_data), 0, 0, NULL); if (!gfs2_quotad_cachep) goto fail; gfs2_rsrv_cachep = kmem_cache_create("gfs2_mblk", sizeof(struct gfs2_blkreserv), 0, 0, NULL); if (!gfs2_rsrv_cachep) goto fail; register_shrinker(&gfs2_qd_shrinker); error = register_filesystem(&gfs2_fs_type); if (error) goto fail; error = register_filesystem(&gfs2meta_fs_type); if (error) goto fail_unregister; error = -ENOMEM; gfs_recovery_wq = alloc_workqueue("gfs_recovery", WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); if (!gfs_recovery_wq) goto fail_wq; gfs2_control_wq = alloc_workqueue("gfs2_control", WQ_UNBOUND | WQ_FREEZABLE, 0); if (!gfs2_control_wq) goto fail_recovery; gfs2_freeze_wq = alloc_workqueue("freeze_workqueue", 0, 0); if (!gfs2_freeze_wq) goto fail_control; gfs2_page_pool = mempool_create_page_pool(64, 0); if (!gfs2_page_pool) goto fail_freeze; gfs2_register_debugfs(); pr_info("GFS2 installed\n"); return 0; fail_freeze: destroy_workqueue(gfs2_freeze_wq); fail_control: destroy_workqueue(gfs2_control_wq); fail_recovery: destroy_workqueue(gfs_recovery_wq); fail_wq: unregister_filesystem(&gfs2meta_fs_type); fail_unregister: unregister_filesystem(&gfs2_fs_type); fail: list_lru_destroy(&gfs2_qd_lru); fail_lru: unregister_shrinker(&gfs2_qd_shrinker); gfs2_glock_exit(); if (gfs2_rsrv_cachep) kmem_cache_destroy(gfs2_rsrv_cachep); if (gfs2_quotad_cachep) kmem_cache_destroy(gfs2_quotad_cachep); if (gfs2_rgrpd_cachep) kmem_cache_destroy(gfs2_rgrpd_cachep); if (gfs2_bufdata_cachep) kmem_cache_destroy(gfs2_bufdata_cachep); if (gfs2_inode_cachep) kmem_cache_destroy(gfs2_inode_cachep); if (gfs2_glock_aspace_cachep) kmem_cache_destroy(gfs2_glock_aspace_cachep); if (gfs2_glock_cachep) kmem_cache_destroy(gfs2_glock_cachep); gfs2_sys_uninit(); return error; }
static int pblk_core_init(struct pblk *pblk) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg * geo->nr_planes * geo->nr_luns; if (pblk_init_global_caches(pblk)) return -ENOMEM; pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); if (!pblk->page_pool) return -ENOMEM; pblk->line_ws_pool = mempool_create_slab_pool(PBLK_WS_POOL_SIZE, pblk_blk_ws_cache); if (!pblk->line_ws_pool) goto free_page_pool; pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache); if (!pblk->rec_pool) goto free_blk_ws_pool; pblk->g_rq_pool = mempool_create_slab_pool(PBLK_READ_REQ_POOL_SIZE, pblk_g_rq_cache); if (!pblk->g_rq_pool) goto free_rec_pool; pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns * 2, pblk_w_rq_cache); if (!pblk->w_rq_pool) goto free_g_rq_pool; pblk->line_meta_pool = mempool_create_slab_pool(PBLK_META_POOL_SIZE, pblk_line_meta_cache); if (!pblk->line_meta_pool) goto free_w_rq_pool; pblk->close_wq = alloc_workqueue("pblk-close-wq", WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS); if (!pblk->close_wq) goto free_line_meta_pool; pblk->bb_wq = alloc_workqueue("pblk-bb-wq", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!pblk->bb_wq) goto free_close_wq; if (pblk_set_ppaf(pblk)) goto free_bb_wq; if (pblk_rwb_init(pblk)) goto free_bb_wq; INIT_LIST_HEAD(&pblk->compl_list); return 0; free_bb_wq: destroy_workqueue(pblk->bb_wq); free_close_wq: destroy_workqueue(pblk->close_wq); free_line_meta_pool: mempool_destroy(pblk->line_meta_pool); free_w_rq_pool: mempool_destroy(pblk->w_rq_pool); free_g_rq_pool: mempool_destroy(pblk->g_rq_pool); free_rec_pool: mempool_destroy(pblk->rec_pool); free_blk_ws_pool: mempool_destroy(pblk->line_ws_pool); free_page_pool: mempool_destroy(pblk->page_pool); return -ENOMEM; }