static int __init lowmem_init(void) { task_free_register(&task_free_nb); task_fork_register(&task_fork_nb); register_shrinker(&lowmem_shrinker); return 0; }
static int __init lowmem_init(void) { task_free_register(&task_nb); register_shrinker(&lowmem_shrinker); #ifdef CONFIG_ZRAM_FOR_ANDROID lmk_class = class_create(THIS_MODULE, "lmk"); if (IS_ERR(lmk_class)) { printk(KERN_ERR "Failed to create class(lmk)\n"); return 0; } lmk_dev = device_create(lmk_class, NULL, 0, NULL, "lowmemorykiller"); if (IS_ERR(lmk_dev)) { printk(KERN_ERR "Failed to create device(lowmemorykiller)!= %ld\n", IS_ERR(lmk_dev)); return 0; } if (device_create_file(lmk_dev, &dev_attr_lmk_state) < 0) printk(KERN_ERR "Failed to create device file(%s)!\n", dev_attr_lmk_state.attr.name); #endif /* CONFIG_ZRAM_FOR_ANDROID */ return 0; }
int kgsl_heap_init(void) { int i; for (i = 0; i < num_orders; i++) { struct kgsl_page_pool *pool; gfp_t gfp_flags = low_order_gfp_flags; if (orders[i] > 4) gfp_flags = high_order_gfp_flags; pool = kgsl_page_pool_create(gfp_flags, orders[i], orders_reserved[i], reserve_only[i]); if (!pool) goto destroy_pools; kgsl_heap.pools[i] = pool; } kgsl_heap.shrinker.shrink = kgsl_heap_shrink; kgsl_heap.shrinker.seeks = DEFAULT_SEEKS; kgsl_heap.shrinker.batch = 0; register_shrinker(&kgsl_heap.shrinker); return 0; destroy_pools: while (i--) kgsl_page_pool_destroy(kgsl_heap.pools[i]); return -ENOMEM; }
static int __init lowmem_init(void) { int rc; task_free_register(&task_nb); register_shrinker(&lowmem_shrinker); #ifdef CONFIG_MEMORY_HOTPLUG hotplug_memory_notifier(lmk_hotplug_callback, 0); #endif lowmem_kobj = kzalloc(sizeof(*lowmem_kobj), GFP_KERNEL); if (!lowmem_kobj) { rc = -ENOMEM; goto err; } rc = kobject_init_and_add(lowmem_kobj, &lowmem_kobj_type, mm_kobj, "lowmemkiller"); if (rc) goto err_kobj; return 0; err_kobj: kfree(lowmem_kobj); err: unregister_shrinker(&lowmem_shrinker); task_free_unregister(&task_nb); return rc; }
int init_cifs_idmap(void) { struct cred *cred; struct key *keyring; int ret; cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type.name); /* create an override credential set with a special thread keyring in * which requests are cached * * this is used to prevent malicious redirections from being installed * with add_key(). */ cred = prepare_kernel_cred(NULL); if (!cred) return -ENOMEM; keyring = keyring_alloc(".cifs_idmap", 0, 0, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_NOT_IN_QUOTA, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto failed_put_cred; } ret = register_key_type(&cifs_idmap_key_type); if (ret < 0) goto failed_put_key; /* instruct request_key() to use this special keyring as a cache for * the results it looks up */ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); cred->thread_keyring = keyring; cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; root_cred = cred; spin_lock_init(&siduidlock); uidtree = RB_ROOT; spin_lock_init(&sidgidlock); gidtree = RB_ROOT; spin_lock_init(&uidsidlock); siduidtree = RB_ROOT; spin_lock_init(&gidsidlock); sidgidtree = RB_ROOT; register_shrinker(&cifs_shrinker); cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring)); return 0; failed_put_key: key_put(keyring); failed_put_cred: put_cred(cred); return ret; }
static int virtio_balloon_register_shrinker(struct virtio_balloon *vb) { vb->shrinker.scan_objects = virtio_balloon_shrinker_scan; vb->shrinker.count_objects = virtio_balloon_shrinker_count; vb->shrinker.seeks = DEFAULT_SEEKS; return register_shrinker(&vb->shrinker); }
static int __init lowmem_init(void) { #ifdef CONFIG_DUMP_TASKS_ON_NOPAGE timeout = jiffies + 1200 * HZ; #endif register_shrinker(&lowmem_shrinker); return 0; }
void xfs_inode_shrinker_register( struct xfs_mount *mp) { mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink; mp->m_inode_shrink.seeks = DEFAULT_SEEKS; register_shrinker(&mp->m_inode_shrink); }
static int __init lowmem_init(void) { #ifdef CONFIG_MACH_LGE task_free_register(&task_nb); #endif register_shrinker(&lowmem_shrinker); return 0; }
static int __init lowmem_init(void) { register_shrinker(&lowmem_shrinker); #ifdef CONFIG_MEMORY_HOTPLUG hotplug_memory_notifier(lmk_hotplug_callback, 0); #endif return 0; }
static int __init init_f2fs_fs(void) { int err; f2fs_build_trace_ios(); err = init_inodecache(); if (err) goto fail; err = create_node_manager_caches(); if (err) goto free_inodecache; err = create_segment_manager_caches(); if (err) goto free_node_manager_caches; err = create_checkpoint_caches(); if (err) goto free_segment_manager_caches; err = create_extent_cache(); if (err) goto free_checkpoint_caches; f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj); if (!f2fs_kset) { err = -ENOMEM; goto free_extent_cache; } err = register_shrinker(&f2fs_shrinker_info); if (err) goto free_kset; err = register_filesystem(&f2fs_fs_type); if (err) goto free_shrinker; err = f2fs_create_root_stats(); if (err) goto free_filesystem; f2fs_proc_root = proc_mkdir("fs/f2fs", NULL); return 0; free_filesystem: unregister_filesystem(&f2fs_fs_type); free_shrinker: unregister_shrinker(&f2fs_shrinker_info); free_kset: kset_unregister(f2fs_kset); free_extent_cache: destroy_extent_cache(); free_checkpoint_caches: destroy_checkpoint_caches(); free_segment_manager_caches: destroy_segment_manager_caches(); free_node_manager_caches: destroy_node_manager_caches(); free_inodecache: destroy_inodecache(); fail: return err; }
static int __init lowmem_init(void) { register_shrinker(&lowmem_shrinker); #ifdef CONFIG_SEC_OOM_KILLER register_oom_notifier(&android_oom_notifier); #endif return 0; }
static int __init lowmem_init(void) { task_free_register(&task_nb); register_shrinker(&lowmem_shrinker); #ifdef CONFIG_HUAWEI_FEATURE_LOW_MEMORY_KILLER_STUB registerlowmem(); #endif return 0; }
/** * reiser4_init_d_cursor - create d_cursor cache * * Initializes slab cache of d_cursors. It is part of reiser4 module * initialization. */ int reiser4_init_d_cursor(void) { d_cursor_cache = kmem_cache_create("d_cursor", sizeof(dir_cursor), 0, SLAB_HWCACHE_ALIGN, NULL); if (d_cursor_cache == NULL) return RETERR(-ENOMEM); register_shrinker(&d_cursor_shrinker); return 0; }
int init_cifs_idmap(void) { struct cred *cred; struct key *keyring; int ret; cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type.name); cred = prepare_kernel_cred(NULL); if (!cred) return -ENOMEM; keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_NOT_IN_QUOTA); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto failed_put_cred; } ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL); if (ret < 0) goto failed_put_key; ret = register_key_type(&cifs_idmap_key_type); if (ret < 0) goto failed_put_key; set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); cred->thread_keyring = keyring; cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; root_cred = cred; spin_lock_init(&siduidlock); uidtree = RB_ROOT; spin_lock_init(&sidgidlock); gidtree = RB_ROOT; spin_lock_init(&uidsidlock); siduidtree = RB_ROOT; spin_lock_init(&gidsidlock); sidgidtree = RB_ROOT; register_shrinker(&cifs_shrinker); cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring)); return 0; failed_put_key: key_put(keyring); failed_put_cred: put_cred(cred); return ret; }
static int __init lowmem_init(void) { //<!-- BEGIN: [email protected] 2012-08-16 --> //<!-- MOD : make LMK see swap condition //DEL : [email protected] /* lmk_kill_info = kmalloc(1024, GFP_KERNEL);*/ //<!-- END: [email protected] 2012-08-16 --> task_free_register(&task_nb); register_shrinker(&lowmem_shrinker); return 0; }
static int __init lowmem_init(void) { //task_free_register(&task_nb); #if CONFIG_MSM_KGSL_VM_THRESHOLD > 0 extern void kgsl_register_shrinker(void (*shrink)(int largest, int threshold)); kgsl_register_shrinker(lowmem_vm_shrinker); #endif register_shrinker(&lowmem_shrinker); #ifdef CONFIG_MEMORY_HOTPLUG hotplug_memory_notifier(lmk_hotplug_callback, 0); #endif return 0; }
static int __init workingset_init(void) { int ret; ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); if (ret) goto err; ret = register_shrinker(&workingset_shadow_shrinker); if (ret) goto err_list_lru; return 0; err_list_lru: list_lru_destroy(&workingset_shadow_nodes); err: return ret; }
int sptlrpc_enc_pool_init(void) { /* * maximum capacity is 1/8 of total physical memory. * is the 1/8 a good number? */ page_pools.epp_max_pages = totalram_pages / 8; page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages); init_waitqueue_head(&page_pools.epp_waitq); page_pools.epp_waitqlen = 0; page_pools.epp_pages_short = 0; page_pools.epp_growing = 0; page_pools.epp_idle_idx = 0; page_pools.epp_last_shrink = ktime_get_seconds(); page_pools.epp_last_access = ktime_get_seconds(); spin_lock_init(&page_pools.epp_lock); page_pools.epp_total_pages = 0; page_pools.epp_free_pages = 0; page_pools.epp_st_max_pages = 0; page_pools.epp_st_grows = 0; page_pools.epp_st_grow_fails = 0; page_pools.epp_st_shrinks = 0; page_pools.epp_st_access = 0; page_pools.epp_st_missings = 0; page_pools.epp_st_lowfree = 0; page_pools.epp_st_max_wqlen = 0; page_pools.epp_st_max_wait = 0; page_pools.epp_st_outofmem = 0; enc_pools_alloc(); if (!page_pools.epp_pools) return -ENOMEM; register_shrinker(&pools_shrinker); return 0; }
static int __init lowmem_init(void) { #ifdef CONFIG_ZRAM_FOR_ANDROID struct zone *zone; unsigned int high_wmark = 0; unsigned int low_wmark = 0; #endif task_free_register(&task_nb); register_shrinker(&lowmem_shrinker); #ifdef CONFIG_ZRAM_FOR_ANDROID for_each_zone(zone) { if (high_wmark < zone->watermark[WMARK_HIGH]) { high_wmark = zone->watermark[WMARK_HIGH]; low_wmark = zone->watermark[WMARK_LOW]; } } high_wmark += low_wmark; check_free_memory = (high_wmark != 0) ? high_wmark : CHECK_FREE_MEMORY; lmk_class = class_create(THIS_MODULE, "lmk"); if (IS_ERR(lmk_class)) { printk(KERN_ERR "Failed to create class(lmk)\n"); return 0; } lmk_dev = device_create(lmk_class, NULL, 0, NULL, "lowmemorykiller"); if (IS_ERR(lmk_dev)) { printk(KERN_ERR "Failed to create device(lowmemorykiller)!= %ld\n", IS_ERR(lmk_dev)); return 0; } if (device_create_file(lmk_dev, &dev_attr_lmk_state) < 0) printk(KERN_ERR "Failed to create device file(%s)!\n", dev_attr_lmk_state.attr.name); #endif /* CONFIG_ZRAM_FOR_ANDROID */ return 0; }
static int __init workingset_init(void) { register_shrinker(&workingset_shadow_shrinker); return 0; }
static int __init init_gfs2_fs(void) { int error; gfs2_str2qstr(&gfs2_qdot, "."); gfs2_str2qstr(&gfs2_qdotdot, ".."); gfs2_quota_hash_init(); error = gfs2_sys_init(); if (error) return error; error = list_lru_init(&gfs2_qd_lru); if (error) goto fail_lru; error = gfs2_glock_init(); if (error) goto fail; error = -ENOMEM; gfs2_glock_cachep = kmem_cache_create("gfs2_glock", sizeof(struct gfs2_glock), 0, 0, gfs2_init_glock_once); if (!gfs2_glock_cachep) goto fail; gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)", sizeof(struct gfs2_glock) + sizeof(struct address_space), 0, 0, gfs2_init_gl_aspace_once); if (!gfs2_glock_aspace_cachep) goto fail; gfs2_inode_cachep = kmem_cache_create("gfs2_inode", sizeof(struct gfs2_inode), 0, SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD, gfs2_init_inode_once); if (!gfs2_inode_cachep) goto fail; gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata", sizeof(struct gfs2_bufdata), 0, 0, NULL); if (!gfs2_bufdata_cachep) goto fail; gfs2_rgrpd_cachep = kmem_cache_create("gfs2_rgrpd", sizeof(struct gfs2_rgrpd), 0, 0, NULL); if (!gfs2_rgrpd_cachep) goto fail; gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad", sizeof(struct gfs2_quota_data), 0, 0, NULL); if (!gfs2_quotad_cachep) goto fail; gfs2_rsrv_cachep = kmem_cache_create("gfs2_mblk", sizeof(struct gfs2_blkreserv), 0, 0, NULL); if (!gfs2_rsrv_cachep) goto fail; register_shrinker(&gfs2_qd_shrinker); error = register_filesystem(&gfs2_fs_type); if (error) goto fail; error = register_filesystem(&gfs2meta_fs_type); if (error) goto fail_unregister; error = -ENOMEM; gfs_recovery_wq = alloc_workqueue("gfs_recovery", WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); if (!gfs_recovery_wq) goto fail_wq; gfs2_control_wq = alloc_workqueue("gfs2_control", WQ_UNBOUND | WQ_FREEZABLE, 0); if (!gfs2_control_wq) goto fail_recovery; gfs2_freeze_wq = alloc_workqueue("freeze_workqueue", 0, 0); if (!gfs2_freeze_wq) goto fail_control; gfs2_page_pool = mempool_create_page_pool(64, 0); if (!gfs2_page_pool) goto fail_freeze; gfs2_register_debugfs(); pr_info("GFS2 installed\n"); return 0; fail_freeze: destroy_workqueue(gfs2_freeze_wq); fail_control: destroy_workqueue(gfs2_control_wq); fail_recovery: destroy_workqueue(gfs_recovery_wq); fail_wq: unregister_filesystem(&gfs2meta_fs_type); fail_unregister: unregister_filesystem(&gfs2_fs_type); fail: list_lru_destroy(&gfs2_qd_lru); fail_lru: unregister_shrinker(&gfs2_qd_shrinker); gfs2_glock_exit(); if (gfs2_rsrv_cachep) kmem_cache_destroy(gfs2_rsrv_cachep); if (gfs2_quotad_cachep) kmem_cache_destroy(gfs2_quotad_cachep); if (gfs2_rgrpd_cachep) kmem_cache_destroy(gfs2_rgrpd_cachep); if (gfs2_bufdata_cachep) kmem_cache_destroy(gfs2_bufdata_cachep); if (gfs2_inode_cachep) kmem_cache_destroy(gfs2_inode_cachep); if (gfs2_glock_aspace_cachep) kmem_cache_destroy(gfs2_glock_aspace_cachep); if (gfs2_glock_cachep) kmem_cache_destroy(gfs2_glock_cachep); gfs2_sys_uninit(); return error; }
static int __init dfd_shrinker_init(void) { register_shrinker(&dfd_shrinker); return 0; }
/* * This initializes all the quota information that's kept in the * mount structure */ STATIC int xfs_qm_init_quotainfo( xfs_mount_t *mp) { xfs_quotainfo_t *qinf; int error; xfs_dquot_t *dqp; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); /* * See if quotainodes are setup, and if not, allocate them, * and change the superblock accordingly. */ if ((error = xfs_qm_init_quotainos(mp))) { kmem_free(qinf); mp->m_quotainfo = NULL; return error; } INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); mutex_init(&qinf->qi_tree_lock); INIT_LIST_HEAD(&qinf->qi_lru_list); qinf->qi_lru_count = 0; mutex_init(&qinf->qi_lru_lock); /* mutex used to serialize quotaoffs */ mutex_init(&qinf->qi_quotaofflock); /* Precalc some constants */ qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); ASSERT(qinf->qi_dqchunklen); qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen); do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t)); mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); /* * We try to get the limits from the superuser's limits fields. * This is quite hacky, but it is standard quota practice. * * We look at the USR dquot with id == 0 first, but if user quotas * are not enabled we goto the GRP dquot with id == 0. * We don't really care to keep separate default limits for user * and group quotas, at least not at this point. * * Since we may not have done a quotacheck by this point, just read * the dquot without attaching it to any hashtables or lists. */ error = xfs_qm_dqread(mp, 0, XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP : XFS_DQ_PROJ), XFS_QMOPT_DOWARN, &dqp); if (!error) { xfs_disk_dquot_t *ddqp = &dqp->q_core; /* * The warnings and timers set the grace period given to * a user or group before he or she can not perform any * more writing. If it is zero, a default is used. */ qinf->qi_btimelimit = ddqp->d_btimer ? be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT; qinf->qi_itimelimit = ddqp->d_itimer ? be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT; qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ? be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT; qinf->qi_bwarnlimit = ddqp->d_bwarns ? be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT; qinf->qi_iwarnlimit = ddqp->d_iwarns ? be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT; qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ? be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT; qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); xfs_qm_dqdestroy(dqp); } else { qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; } qinf->qi_shrinker.shrink = xfs_qm_shake; qinf->qi_shrinker.seeks = DEFAULT_SEEKS; register_shrinker(&qinf->qi_shrinker); return 0; }
static int __init init_gfs2_fs(void) { int error; gfs2_str2qstr(&gfs2_qdot, "."); gfs2_str2qstr(&gfs2_qdotdot, ".."); error = gfs2_sys_init(); if (error) return error; error = gfs2_glock_init(); if (error) goto fail; error = -ENOMEM; gfs2_glock_cachep = kmem_cache_create("gfs2_glock", sizeof(struct gfs2_glock), 0, 0, gfs2_init_glock_once); if (!gfs2_glock_cachep) goto fail; gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)", sizeof(struct gfs2_glock) + sizeof(struct address_space), 0, 0, gfs2_init_gl_aspace_once); if (!gfs2_glock_aspace_cachep) goto fail; gfs2_inode_cachep = kmem_cache_create("gfs2_inode", sizeof(struct gfs2_inode), 0, SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD, gfs2_init_inode_once); if (!gfs2_inode_cachep) goto fail; gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata", sizeof(struct gfs2_bufdata), 0, 0, NULL); if (!gfs2_bufdata_cachep) goto fail; gfs2_rgrpd_cachep = kmem_cache_create("gfs2_rgrpd", sizeof(struct gfs2_rgrpd), 0, 0, NULL); if (!gfs2_rgrpd_cachep) goto fail; gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad", sizeof(struct gfs2_quota_data), 0, 0, NULL); if (!gfs2_quotad_cachep) goto fail; register_shrinker(&qd_shrinker); error = register_filesystem(&gfs2_fs_type); if (error) goto fail; error = register_filesystem(&gfs2meta_fs_type); if (error) goto fail_unregister; error = -ENOMEM; gfs_recovery_wq = alloc_workqueue("gfs_recovery", WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); if (!gfs_recovery_wq) goto fail_wq; gfs2_register_debugfs(); printk("GFS2 (built %s %s) installed\n", __DATE__, __TIME__); return 0; fail_wq: unregister_filesystem(&gfs2meta_fs_type); fail_unregister: unregister_filesystem(&gfs2_fs_type); fail: unregister_shrinker(&qd_shrinker); gfs2_glock_exit(); if (gfs2_quotad_cachep) kmem_cache_destroy(gfs2_quotad_cachep); if (gfs2_rgrpd_cachep) kmem_cache_destroy(gfs2_rgrpd_cachep); if (gfs2_bufdata_cachep) kmem_cache_destroy(gfs2_bufdata_cachep); if (gfs2_inode_cachep) kmem_cache_destroy(gfs2_inode_cachep); if (gfs2_glock_aspace_cachep) kmem_cache_destroy(gfs2_glock_aspace_cachep); if (gfs2_glock_cachep) kmem_cache_destroy(gfs2_glock_cachep); gfs2_sys_uninit(); return error; }
static int __init init_gfs2_fs(void) { int error; error = gfs2_sys_init(); if (error) return error; error = gfs2_glock_init(); if (error) goto fail; error = -ENOMEM; gfs2_glock_cachep = kmem_cache_create("gfs2_glock", sizeof(struct gfs2_glock), 0, 0, gfs2_init_glock_once); if (!gfs2_glock_cachep) goto fail; gfs2_inode_cachep = kmem_cache_create("gfs2_inode", sizeof(struct gfs2_inode), 0, SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD, gfs2_init_inode_once); if (!gfs2_inode_cachep) goto fail; gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata", sizeof(struct gfs2_bufdata), 0, 0, NULL); if (!gfs2_bufdata_cachep) goto fail; gfs2_rgrpd_cachep = kmem_cache_create("gfs2_rgrpd", sizeof(struct gfs2_rgrpd), 0, 0, NULL); if (!gfs2_rgrpd_cachep) goto fail; gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad", sizeof(struct gfs2_quota_data), 0, 0, NULL); if (!gfs2_quotad_cachep) goto fail; register_shrinker(&qd_shrinker); error = register_filesystem(&gfs2_fs_type); if (error) goto fail; error = register_filesystem(&gfs2meta_fs_type); if (error) goto fail_unregister; error = slow_work_register_user(); if (error) goto fail_slow; gfs2_register_debugfs(); printk("GFS2 (built %s %s) installed\n", __DATE__, __TIME__); return 0; fail_slow: unregister_filesystem(&gfs2meta_fs_type); fail_unregister: unregister_filesystem(&gfs2_fs_type); fail: unregister_shrinker(&qd_shrinker); gfs2_glock_exit(); if (gfs2_quotad_cachep) kmem_cache_destroy(gfs2_quotad_cachep); if (gfs2_rgrpd_cachep) kmem_cache_destroy(gfs2_rgrpd_cachep); if (gfs2_bufdata_cachep) kmem_cache_destroy(gfs2_bufdata_cachep); if (gfs2_inode_cachep) kmem_cache_destroy(gfs2_inode_cachep); if (gfs2_glock_cachep) kmem_cache_destroy(gfs2_glock_cachep); gfs2_sys_uninit(); return error; }
static int __init lowmem_init(void) { register_shrinker(&lowmem_shrinker); return 0; }
static int __init lowmem_init(void) { task_handoff_register(&task_nb); register_shrinker(&lowmem_shrinker); return 0; }