int sf_init_backing_dev(struct sf_glob_info *sf_g) { int rc = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0) /* Each new shared folder map gets a new uint64_t identifier, * allocated in sequence. We ASSUME the sequence will not wrap. */ static uint64_t s_u64Sequence = 0; uint64_t u64CurrentSequence = ASMAtomicIncU64(&s_u64Sequence); sf_g->bdi.ra_pages = 0; /* No readahead */ # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12) sf_g->bdi.capabilities = BDI_CAP_MAP_DIRECT /* MAP_SHARED */ | BDI_CAP_MAP_COPY /* MAP_PRIVATE */ | BDI_CAP_READ_MAP /* can be mapped for reading */ | BDI_CAP_WRITE_MAP /* can be mapped for writing */ | BDI_CAP_EXEC_MAP; /* can be mapped for execution */ # endif /* >= 2.6.12 */ # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) rc = bdi_init(&sf_g->bdi); # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) if (!rc) rc = bdi_register(&sf_g->bdi, NULL, "vboxsf-%llu", (unsigned long long)u64CurrentSequence); # endif /* >= 2.6.26 */ # endif /* >= 2.6.24 */ #endif /* >= 2.6.0 && <= 3.19.0 */ return rc; }
int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner) { int rc; rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt)); if (rc) return rc; /* Leaking owner reference... */ WARN_ON(bdi->owner); bdi->owner = owner; get_device(owner); return 0; }
/* * For use from filesystems to quickly init and register a bdi associated * with dirty writeback */ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name) { int err; bdi->name = name; bdi->capabilities = 0; err = bdi_init(bdi); if (err) return err; err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return(&bdi_seq)); if (err) { bdi_destroy(bdi); return err; } return 0; }
int sf_init_backing_dev(struct sf_glob_info *sf_g, const char *name) { int rc = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) sf_g->bdi.ra_pages = 0; /* No readahead */ # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12) sf_g->bdi.capabilities = BDI_CAP_MAP_DIRECT /* MAP_SHARED */ | BDI_CAP_MAP_COPY /* MAP_PRIVATE */ | BDI_CAP_READ_MAP /* can be mapped for reading */ | BDI_CAP_WRITE_MAP /* can be mapped for writing */ | BDI_CAP_EXEC_MAP; /* can be mapped for execution */ # endif /* >= 2.6.12 */ # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) rc = bdi_init(&sf_g->bdi); # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) if (!rc) rc = bdi_register(&sf_g->bdi, NULL, "vboxsf-%s", name); # endif /* >= 2.6.26 */ # endif /* >= 2.6.24 */ #endif /* >= 2.6.0 */ return rc; }
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) { return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev)); }
static struct fuse_conn *new_conn(struct super_block *sb) { struct fuse_conn *fc; int err; fc = kzalloc(sizeof(*fc), GFP_KERNEL); if (fc) { spin_lock_init(&fc->lock); mutex_init(&fc->inst_mutex); atomic_set(&fc->count, 1); init_waitqueue_head(&fc->waitq); init_waitqueue_head(&fc->blocked_waitq); init_waitqueue_head(&fc->reserved_req_waitq); INIT_LIST_HEAD(&fc->pending); INIT_LIST_HEAD(&fc->processing); INIT_LIST_HEAD(&fc->io); INIT_LIST_HEAD(&fc->interrupts); INIT_LIST_HEAD(&fc->bg_queue); atomic_set(&fc->num_waiting, 0); fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; fc->bdi.unplug_io_fn = default_unplug_io_fn; /* fuse does it's own writeback accounting */ fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; fc->dev = sb->s_dev; err = bdi_init(&fc->bdi); if (err) goto error_kfree; if (sb->s_bdev) { err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk", MAJOR(fc->dev), MINOR(fc->dev)); } else { err = bdi_register_dev(&fc->bdi, fc->dev); } if (err) goto error_bdi_destroy; /* * For a single fuse filesystem use max 1% of dirty + * writeback threshold. * * This gives about 1M of write buffer for memory maps on a * machine with 1G and 10% dirty_ratio, which should be more * than enough. * * Privileged users can raise it by writing to * * /sys/class/bdi/<bdi>/max_ratio */ bdi_set_max_ratio(&fc->bdi, 1); fc->reqctr = 0; fc->blocked = 1; fc->attr_version = 1; get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); } return fc; error_bdi_destroy: bdi_destroy(&fc->bdi); error_kfree: mutex_destroy(&fc->inst_mutex); kfree(fc); return NULL; }
int afs_fill_super(struct super_block *sb, void *data, int silent) { int code = 0; #if defined(HAVE_LINUX_BDI_INIT) int bdi_init_done = 0; #endif AFS_GLOCK(); if (afs_was_mounted) { printf ("You must reload the AFS kernel extensions before remounting AFS.\n"); AFS_GUNLOCK(); return -EINVAL; } afs_was_mounted = 1; /* Set basics of super_block */ __module_get(THIS_MODULE); afs_globalVFS = sb; sb->s_flags |= MS_NOATIME; sb->s_blocksize = 1024; sb->s_blocksize_bits = 10; sb->s_magic = AFS_VFSMAGIC; sb->s_op = &afs_sops; /* Super block (vfs) ops */ #if defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP) sb->s_d_op = &afs_dentry_operations; #endif /* used for inodes backing_dev_info field, also */ afs_backing_dev_info = kzalloc(sizeof(struct backing_dev_info), GFP_NOFS); #if defined(HAVE_LINUX_BDI_INIT) code = bdi_init(afs_backing_dev_info); if (code) goto out; bdi_init_done = 1; #endif #if defined(STRUCT_BACKING_DEV_INFO_HAS_NAME) afs_backing_dev_info->name = "openafs"; #endif afs_backing_dev_info->ra_pages = 32; #if defined (STRUCT_SUPER_BLOCK_HAS_S_BDI) sb->s_bdi = afs_backing_dev_info; /* The name specified here will appear in the flushing thread name - flush-afs */ bdi_register(afs_backing_dev_info, NULL, "afs"); #endif #if !defined(AFS_NONFSTRANS) sb->s_export_op = &afs_export_ops; #endif #if defined(MAX_NON_LFS) #ifdef AFS_64BIT_CLIENT #if !defined(MAX_LFS_FILESIZE) #if BITS_PER_LONG==32 #define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) #elif BITS_PER_LONG==64 #define MAX_LFS_FILESIZE 0x7fffffffffffffff #endif #endif sb->s_maxbytes = MAX_LFS_FILESIZE; #else sb->s_maxbytes = MAX_NON_LFS; #endif #endif code = afs_root(sb); out: if (code) { afs_globalVFS = NULL; afs_FlushAllVCaches(); #if defined(HAVE_LINUX_BDI_INIT) if (bdi_init_done) bdi_destroy(afs_backing_dev_info); #endif kfree(afs_backing_dev_info); module_put(THIS_MODULE); } AFS_GUNLOCK(); return code ? -EINVAL : 0; }