int sf_init_backing_dev(struct sf_glob_info *sf_g) { int rc = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0) /* Each new shared folder map gets a new uint64_t identifier, * allocated in sequence. We ASSUME the sequence will not wrap. */ static uint64_t s_u64Sequence = 0; uint64_t u64CurrentSequence = ASMAtomicIncU64(&s_u64Sequence); sf_g->bdi.ra_pages = 0; /* No readahead */ # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12) sf_g->bdi.capabilities = BDI_CAP_MAP_DIRECT /* MAP_SHARED */ | BDI_CAP_MAP_COPY /* MAP_PRIVATE */ | BDI_CAP_READ_MAP /* can be mapped for reading */ | BDI_CAP_WRITE_MAP /* can be mapped for writing */ | BDI_CAP_EXEC_MAP; /* can be mapped for execution */ # endif /* >= 2.6.12 */ # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) rc = bdi_init(&sf_g->bdi); # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) if (!rc) rc = bdi_register(&sf_g->bdi, NULL, "vboxsf-%llu", (unsigned long long)u64CurrentSequence); # endif /* >= 2.6.26 */ # endif /* >= 2.6.24 */ #endif /* >= 2.6.0 && <= 3.19.0 */ return rc; }
/* * Perform any setup for the swap system */ void __init swap_setup(void) { unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); #ifdef CONFIG_SWAP int i; bdi_init(swapper_spaces[0].backing_dev_info); for (i = 0; i < MAX_SWAPFILES; i++) { spin_lock_init(&swapper_spaces[i].tree_lock); INIT_LIST_HEAD(&swapper_spaces[i].i_mmap_nonlinear); } #endif /* Use a smaller cluster for small-memory machines */ #ifdef CONFIG_ZRAM page_cluster = 0; // disable swap read-ahead #else if (megs < 16) page_cluster = 2; else page_cluster = 3; #endif /* * Right now other parts of the system means that we * _really_ don't want to cluster much more */ }
/* alloc_disk and add_disk can sleep */ void aoeblk_gdalloc(void *vp) { struct aoedev *d = vp; struct gendisk *gd; ulong flags; gd = alloc_disk(AOE_PARTITIONS); if (gd == NULL) { printk(KERN_ERR "aoe: cannot allocate disk structure for %ld.%d\n", d->aoemajor, d->aoeminor); goto err; } d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache); if (d->bufpool == NULL) { printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n", d->aoemajor, d->aoeminor); goto err_disk; } blk_queue_make_request(&d->blkq, aoeblk_make_request); if (bdi_init(&d->blkq.backing_dev_info)) goto err_mempool; spin_lock_irqsave(&d->lock, flags); gd->major = AOE_MAJOR; gd->first_minor = d->sysminor * AOE_PARTITIONS; gd->fops = &aoe_bdops; gd->private_data = d; gd->capacity = d->ssize; snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", d->aoemajor, d->aoeminor); gd->queue = &d->blkq; d->gd = gd; d->flags &= ~DEVFL_GDALLOC; d->flags |= DEVFL_UP; spin_unlock_irqrestore(&d->lock, flags); add_disk(gd); aoedisk_add_sysfs(d); return; err_mempool: mempool_destroy(d->bufpool); err_disk: put_disk(gd); err: spin_lock_irqsave(&d->lock, flags); d->flags &= ~DEVFL_GDALLOC; spin_unlock_irqrestore(&d->lock, flags); }
static int __init default_bdi_init(void) { int err; bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND | WQ_SYSFS, 0); if (!bdi_wq) return -ENOMEM; err = bdi_init(&noop_backing_dev_info); return err; }
int __init init_rootfs(void) { int err; err = bdi_init(&ramfs_backing_dev_info); if (err) return err; err = register_filesystem(&rootfs_fs_type); if (err) bdi_destroy(&ramfs_backing_dev_info); return err; }
struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id) { struct backing_dev_info *bdi; bdi = kmalloc_node(sizeof(struct backing_dev_info), gfp_mask | __GFP_ZERO, node_id); if (!bdi) return NULL; if (bdi_init(bdi)) { kfree(bdi); return NULL; } return bdi; }
/* * Perform any setup for the swap system */ void __init swap_setup(void) { unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); #ifdef CONFIG_SWAP bdi_init(swapper_space.backing_dev_info); #endif /* Use a smaller cluster for small-memory machines */ if (megs < 16) page_cluster = 2; else page_cluster = 3; /* * Right now other parts of the system means that we * _really_ don't want to cluster much more */ }
int __init init_ramfs_fs(void) { static unsigned long once; int err; if (test_and_set_bit(0, &once)) return 0; err = bdi_init(&ramfs_backing_dev_info); if (err) return err; err = register_filesystem(&ramfs_fs_type); if (err) bdi_destroy(&ramfs_backing_dev_info); return err; }
/* * For use from filesystems to quickly init and register a bdi associated * with dirty writeback */ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name) { int err; bdi->name = name; bdi->capabilities = 0; err = bdi_init(bdi); if (err) return err; err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return(&bdi_seq)); if (err) { bdi_destroy(bdi); return err; } return 0; }
static int __init init_dlmfs_fs(void) { int status; int cleanup_inode = 0, cleanup_worker = 0; dlmfs_print_version(); status = bdi_init(&dlmfs_backing_dev_info); if (status) return status; dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache", sizeof(struct dlmfs_inode_private), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), dlmfs_init_once); if (!dlmfs_inode_cache) { status = -ENOMEM; goto bail; } cleanup_inode = 1; user_dlm_worker = create_singlethread_workqueue("user_dlm"); if (!user_dlm_worker) { status = -ENOMEM; goto bail; } cleanup_worker = 1; user_dlm_set_locking_protocol(); status = register_filesystem(&dlmfs_fs_type); bail: if (status) { if (cleanup_inode) kmem_cache_destroy(dlmfs_inode_cache); if (cleanup_worker) destroy_workqueue(user_dlm_worker); bdi_destroy(&dlmfs_backing_dev_info); } else printk("OCFS2 User DLM kernel interface loaded\n"); return status; }
int sf_init_backing_dev(struct sf_glob_info *sf_g, const char *name) { int rc = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) sf_g->bdi.ra_pages = 0; /* No readahead */ # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12) sf_g->bdi.capabilities = BDI_CAP_MAP_DIRECT /* MAP_SHARED */ | BDI_CAP_MAP_COPY /* MAP_PRIVATE */ | BDI_CAP_READ_MAP /* can be mapped for reading */ | BDI_CAP_WRITE_MAP /* can be mapped for writing */ | BDI_CAP_EXEC_MAP; /* can be mapped for execution */ # endif /* >= 2.6.12 */ # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) rc = bdi_init(&sf_g->bdi); # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) if (!rc) rc = bdi_register(&sf_g->bdi, NULL, "vboxsf-%s", name); # endif /* >= 2.6.26 */ # endif /* >= 2.6.24 */ #endif /* >= 2.6.0 */ return rc; }
int __init sysfs_inode_init(void) { return bdi_init(&sysfs_backing_dev_info); }
static struct fuse_conn *new_conn(struct super_block *sb) { struct fuse_conn *fc; int err; fc = kzalloc(sizeof(*fc), GFP_KERNEL); if (fc) { spin_lock_init(&fc->lock); mutex_init(&fc->inst_mutex); atomic_set(&fc->count, 1); init_waitqueue_head(&fc->waitq); init_waitqueue_head(&fc->blocked_waitq); init_waitqueue_head(&fc->reserved_req_waitq); INIT_LIST_HEAD(&fc->pending); INIT_LIST_HEAD(&fc->processing); INIT_LIST_HEAD(&fc->io); INIT_LIST_HEAD(&fc->interrupts); INIT_LIST_HEAD(&fc->bg_queue); atomic_set(&fc->num_waiting, 0); fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; fc->bdi.unplug_io_fn = default_unplug_io_fn; /* fuse does it's own writeback accounting */ fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; fc->dev = sb->s_dev; err = bdi_init(&fc->bdi); if (err) goto error_kfree; if (sb->s_bdev) { err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk", MAJOR(fc->dev), MINOR(fc->dev)); } else { err = bdi_register_dev(&fc->bdi, fc->dev); } if (err) goto error_bdi_destroy; /* * For a single fuse filesystem use max 1% of dirty + * writeback threshold. * * This gives about 1M of write buffer for memory maps on a * machine with 1G and 10% dirty_ratio, which should be more * than enough. * * Privileged users can raise it by writing to * * /sys/class/bdi/<bdi>/max_ratio */ bdi_set_max_ratio(&fc->bdi, 1); fc->reqctr = 0; fc->blocked = 1; fc->attr_version = 1; get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); } return fc; error_bdi_destroy: bdi_destroy(&fc->bdi); error_kfree: mutex_destroy(&fc->inst_mutex); kfree(fc); return NULL; }
int afs_fill_super(struct super_block *sb, void *data, int silent) { int code = 0; #if defined(HAVE_LINUX_BDI_INIT) int bdi_init_done = 0; #endif AFS_GLOCK(); if (afs_was_mounted) { printf ("You must reload the AFS kernel extensions before remounting AFS.\n"); AFS_GUNLOCK(); return -EINVAL; } afs_was_mounted = 1; /* Set basics of super_block */ __module_get(THIS_MODULE); afs_globalVFS = sb; sb->s_flags |= MS_NOATIME; sb->s_blocksize = 1024; sb->s_blocksize_bits = 10; sb->s_magic = AFS_VFSMAGIC; sb->s_op = &afs_sops; /* Super block (vfs) ops */ #if defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP) sb->s_d_op = &afs_dentry_operations; #endif /* used for inodes backing_dev_info field, also */ afs_backing_dev_info = kzalloc(sizeof(struct backing_dev_info), GFP_NOFS); #if defined(HAVE_LINUX_BDI_INIT) code = bdi_init(afs_backing_dev_info); if (code) goto out; bdi_init_done = 1; #endif #if defined(STRUCT_BACKING_DEV_INFO_HAS_NAME) afs_backing_dev_info->name = "openafs"; #endif afs_backing_dev_info->ra_pages = 32; #if defined (STRUCT_SUPER_BLOCK_HAS_S_BDI) sb->s_bdi = afs_backing_dev_info; /* The name specified here will appear in the flushing thread name - flush-afs */ bdi_register(afs_backing_dev_info, NULL, "afs"); #endif #if !defined(AFS_NONFSTRANS) sb->s_export_op = &afs_export_ops; #endif #if defined(MAX_NON_LFS) #ifdef AFS_64BIT_CLIENT #if !defined(MAX_LFS_FILESIZE) #if BITS_PER_LONG==32 #define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) #elif BITS_PER_LONG==64 #define MAX_LFS_FILESIZE 0x7fffffffffffffff #endif #endif sb->s_maxbytes = MAX_LFS_FILESIZE; #else sb->s_maxbytes = MAX_NON_LFS; #endif #endif code = afs_root(sb); out: if (code) { afs_globalVFS = NULL; afs_FlushAllVCaches(); #if defined(HAVE_LINUX_BDI_INIT) if (bdi_init_done) bdi_destroy(afs_backing_dev_info); #endif kfree(afs_backing_dev_info); module_put(THIS_MODULE); } AFS_GUNLOCK(); return code ? -EINVAL : 0; }
void __init kernfs_inode_init(void) { if (bdi_init(&kernfs_bdi)) panic("failed to init kernfs_bdi"); }