static int umount_dev(kdev_t dev, int flags) { int retval; struct inode * inode = get_empty_inode(); retval = -ENOMEM; if (!inode) goto out; inode->i_rdev = dev; retval = -ENXIO; if (MAJOR(dev) >= MAX_BLKDEV) goto out_iput; fsync_dev(dev); down(&mount_sem); retval = do_umount(dev, 0, flags); if (!retval) { fsync_dev(dev); if (dev != ROOT_DEV) { blkdev_release(inode); put_unnamed_dev(dev); } } up(&mount_sem); out_iput: iput(inode); out: return retval; }
asmlinkage int sys_umount(char * name) { struct inode * inode; kdev_t dev; int retval; struct inode dummy_inode; if (!suser()) return -EPERM; retval = namei(name, &inode); if (retval) { retval = lnamei(name, &inode); if (retval) return retval; } if (S_ISBLK(inode->i_mode)) { dev = inode->i_rdev; if (IS_NODEV(inode)) { iput(inode); return -EACCES; } } else { if (!inode->i_sb || inode != inode->i_sb->s_mounted) { iput(inode); return -EINVAL; } dev = inode->i_sb->s_dev; iput(inode); memset(&dummy_inode, 0, sizeof(dummy_inode)); dummy_inode.i_rdev = dev; inode = &dummy_inode; } if (MAJOR(dev) >= MAX_BLKDEV) { iput(inode); return -ENXIO; } retval = do_umount(dev,0); if (!retval) { fsync_dev(dev); if (dev != ROOT_DEV) { blkdev_release (inode); if (MAJOR(dev) == UNNAMED_MAJOR) put_unnamed_dev(dev); } } if (inode != &dummy_inode) iput(inode); if (retval) return retval; fsync_dev(dev); return 0; }
static struct buffer_head *get_free_buffer(void) { register struct buffer_head *bh; for (;;) { bh = bh_lru; do { #ifdef CONFIG_FS_EXTERNAL_BUFFER if (bh->b_count == 0 && !bh->b_dirty && !bh->b_lock && !bh->b_data) #else if (bh->b_count == 0 && !bh->b_dirty && !bh->b_lock) #endif { put_last_lru(bh); return bh; } } while((bh = bh->b_next_lru) != NULL); #if 0 fsync_dev(0); /* This causes a sleep until another process brelse's */ sleep_on(&bufwait); #endif sync_buffers(0, 0); } }
int do_remount(const char *dir,int flags,char *data) { struct dentry *dentry; int retval; dentry = namei(dir); retval = PTR_ERR(dentry); if (!IS_ERR(dentry)) { struct super_block * sb = dentry->d_inode->i_sb; retval = -ENODEV; if (sb) { retval = -EINVAL; if (dentry == sb->s_root) { /* * Shrink the dcache and sync the device. */ shrink_dcache_sb(sb); fsync_dev(sb->s_dev); if (flags & MS_RDONLY) acct_auto_close(sb->s_dev); retval = do_remount_sb(sb, flags, data); } } dput(dentry); } return retval; }
static int mtdblock_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg) { struct mtd_info *mtd; mtd = __get_mtd_device(NULL, MINOR(inode->i_rdev)); if (!mtd) return -EINVAL; switch (cmd) { case BLKGETSIZE: /* Return device size */ return put_user((mtd->size >> 9), (unsigned long *) arg); #ifdef BLKGETSIZE64 case BLKGETSIZE64: return put_user((u64)mtd->size, (u64 *)arg); #endif case BLKFLSBUF: #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0) if(!capable(CAP_SYS_ADMIN)) return -EACCES; #endif fsync_dev(inode->i_rdev); invalidate_buffers(inode->i_rdev); if (mtd->sync) mtd->sync(mtd); return 0; default: return -ENOTTY; } }
int sys_umount(char *name) { struct inode *inode; register struct inode *inodep; kdev_t dev; int retval; struct inode dummy_inode; if (!suser()) return -EPERM; retval = namei(name, &inode, 0, 0); if (retval) { retval = lnamei(name, &inode); if (retval) return retval; } inodep = inode; if (S_ISBLK(inodep->i_mode)) { dev = inodep->i_rdev; if (IS_NODEV(inodep)) { iput(inodep); return -EACCES; } } else { register struct super_block *sb = inodep->i_sb; if (!sb || inodep != sb->s_mounted) { iput(inodep); return -EINVAL; } dev = sb->s_dev; iput(inodep); memset(&dummy_inode, 0, sizeof(dummy_inode)); dummy_inode.i_rdev = dev; inodep = &dummy_inode; } if (MAJOR(dev) >= MAX_BLKDEV) { iput(inodep); return -ENXIO; } if (!(retval = do_umount(dev)) && dev != ROOT_DEV) { register struct file_operations *fops; fops = get_blkfops(MAJOR(dev)); if (fops && fops->release) fops->release(inodep, NULL); #ifdef NOT_YET if (MAJOR(dev) == UNNAMED_MAJOR) put_unnamed_dev(dev); #endif } if (inodep != &dummy_inode) iput(inodep); if (retval) return retval; fsync_dev(dev); return 0; }
void sbull_cleanup(void) { int i; /* * Before anything else, get rid of the timer functions. Set the "usage" * flag on each device as well, under lock, so that if the timer fires up * just before we delete it, it will either complete or abort. Otherwise * we have nasty race conditions to worry about. */ for (i = 0; i < sbull_devs; i++) { Sbull_Dev *dev = sbull_devices + i; del_timer(&dev->timer); spin_lock(&dev->lock); dev->usage++; spin_unlock(&dev->lock); } #ifdef DO_RAW_INTERFACE sbullr_release(); #endif /* flush it all and reset all the data structures */ for (i=0; i<sbull_devs; i++) fsync_dev(MKDEV(sbull_major, i)); /* flush the devices */ unregister_blkdev(major, "sbull"); /* * Fix up the request queue(s) */ #ifdef SBULL_MULTIQUEUE for (i = 0; i < sbull_devs; i++) blk_cleanup_queue(&sbull_devices[i].queue); blk_dev[major].queue = NULL; #else blk_cleanup_queue(BLK_DEFAULT_QUEUE(major)); #endif /* Clean up the global arrays */ read_ahead[major] = 0; kfree(blk_size[major]); blk_size[major] = NULL; kfree(blksize_size[major]); blksize_size[major] = NULL; kfree(hardsect_size[major]); hardsect_size[major] = NULL; /* FIXME: max_readahead and max_sectors */ /* finally, the usual cleanup */ for (i=0; i < sbull_devs; i++) { if (sbull_devices[i].data) vfree(sbull_devices[i].data); } kfree(sbull_devices); }
static int nftl_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg) { struct NFTLrecord *nftl; nftl = NFTLs[MINOR(inode->i_rdev) / 16]; if (!nftl) return -EINVAL; switch (cmd) { case HDIO_GETGEO: { struct hd_geometry g; g.heads = nftl->heads; g.sectors = nftl->sectors; g.cylinders = nftl->cylinders; g.start = part_table[MINOR(inode->i_rdev)].start_sect; return copy_to_user((void *)arg, &g, sizeof g) ? -EFAULT : 0; } case BLKGETSIZE: /* Return device size */ if (!arg) return -EINVAL; return put_user(part_table[MINOR(inode->i_rdev)].nr_sects, (long *) arg); case BLKFLSBUF: if (!capable(CAP_SYS_ADMIN)) return -EACCES; fsync_dev(inode->i_rdev); invalidate_buffers(inode->i_rdev); if (nftl->mtd->sync) nftl->mtd->sync(nftl->mtd); return 0; case BLKRRPART: if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (nftl->usecount > 1) return -EBUSY; #if LINUX_VERSION_CODE < 0x20328 resetup_one_dev(&nftl_gendisk, MINOR(inode->i_rdev) / 16); #else grok_partitions(&nftl_gendisk, MINOR(inode->i_rdev) / 16, 1<<4, nftl->nr_sects); #endif return 0; #if (LINUX_VERSION_CODE < 0x20303) RO_IOCTLS(inode->i_rdev, arg); /* ref. linux/blk.h */ #else case BLKROSET: case BLKROGET: case BLKSSZGET: return blk_ioctl(inode->i_rdev, cmd, arg); #endif default: return -EINVAL; } }
static void go_sync(kdev_t dev, int remount_flag) { printk(KERN_INFO "%sing device %s ... ", remount_flag ? "Remount" : "Sync", kdevname(dev)); if (remount_flag) { /* Remount R/O */ struct super_block *sb = get_super(dev); struct vfsmount *vfsmnt; int ret, flags; if (!sb) { printk("Superblock not found\n"); return; } if (sb->s_flags & MS_RDONLY) { printk("R/O\n"); return; } DQUOT_OFF(dev); fsync_dev(dev); flags = MS_RDONLY; if (sb->s_op && sb->s_op->remount_fs) { ret = sb->s_op->remount_fs(sb, &flags, NULL); if (ret) printk("error %d\n", ret); else { sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); if ((vfsmnt = lookup_vfsmnt(sb->s_dev))) vfsmnt->mnt_flags = sb->s_flags; printk("OK\n"); } } else printk("nothing to do\n"); } else { fsync_dev(dev); /* Sync only */ printk("OK\n"); } }
static int do_umount(kdev_t dev,int unmount_root) { struct super_block * sb; int retval; if (dev==ROOT_DEV && !unmount_root) { /* * Special case for "unmounting" root. We just try to remount * it readonly, and sync() the device. */ if (!(sb=get_super(dev))) return -ENOENT; if (!(sb->s_flags & MS_RDONLY)) { /* * Make sure all quotas are turned off on this device we need to mount * it readonly so no more writes by the quotasystem. * If later on the remount fails too bad there are no quotas running * anymore. Turn them on again by hand. */ quota_off(dev, -1); fsync_dev(dev); retval = do_remount_sb(sb, MS_RDONLY, 0); if (retval) return retval; } return 0; } if (!(sb=get_super(dev)) || !(sb->s_covered)) return -ENOENT; if (!sb->s_covered->i_mount) printk("VFS: umount(%s): mounted inode has i_mount=NULL\n", kdevname(dev)); /* * Before checking if the filesystem is still busy make sure the kernel * doesn't hold any quotafiles open on that device. If the umount fails * too bad there are no quotas running anymore. Turn them on again by hand. */ quota_off(dev, -1); if (!fs_may_umount(dev, sb->s_mounted)) return -EBUSY; sb->s_covered->i_mount = NULL; iput(sb->s_covered); sb->s_covered = NULL; iput(sb->s_mounted); sb->s_mounted = NULL; if (sb->s_op && sb->s_op->write_super && sb->s_dirt) sb->s_op->write_super(sb); put_super(dev); remove_vfsmnt(dev); return 0; }
int journal_recover(journal_t *journal) { int err; journal_superblock_t * sb; struct recovery_info info; memset(&info, 0, sizeof(info)); sb = journal->j_superblock; /* * The journal superblock's s_start field (the current log head) * is always zero if, and only if, the journal was cleanly * unmounted. */ if (!sb->s_start) { jfs_debug(1, "No recovery required, last transaction %d\n", ntohl(sb->s_sequence)); journal->j_transaction_sequence = ntohl(sb->s_sequence) + 1; return 0; } err = do_one_pass(journal, &info, PASS_SCAN); if (!err) err = do_one_pass(journal, &info, PASS_REVOKE); if (!err) err = do_one_pass(journal, &info, PASS_REPLAY); jfs_debug(0, "JFS: recovery, exit status %d, " "recovered transactions %u to %u\n", err, info.start_transaction, info.end_transaction); jfs_debug(0, "JFS: Replayed %d and revoked %d/%d blocks\n", info.nr_replays, info.nr_revoke_hits, info.nr_revokes); /* Restart the log at the next transaction ID, thus invalidating * any existing commit records in the log. */ journal->j_transaction_sequence = ++info.end_transaction; journal_clear_revoke(journal); fsync_dev(journal->j_dev); return err; }
/* do_emergency_sync helper function */ static void go_sync(struct super_block *sb, int remount_flag) { int orig_loglevel; orig_loglevel = console_loglevel; console_loglevel = 7; printk(KERN_INFO "%sing device %s ... ", remount_flag ? "Remount" : "Sync", kdevname(sb->s_dev)); if (remount_flag) { /* Remount R/O */ int ret, flags; struct list_head *p; if (sb->s_flags & MS_RDONLY) { printk("R/O\n"); return; } file_list_lock(); for (p = sb->s_files.next; p != &sb->s_files; p = p->next) { struct file *file = list_entry(p, struct file, f_list); if (file->f_dentry && file_count(file) && S_ISREG(file->f_dentry->d_inode->i_mode)) file->f_mode &= ~2; } file_list_unlock(); DQUOT_OFF(sb); fsync_dev(sb->s_dev); flags = MS_RDONLY; if (sb->s_op && sb->s_op->remount_fs) { ret = sb->s_op->remount_fs(sb, &flags, NULL); if (ret) printk("error %d\n", ret); else { sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); printk("OK\n"); } } else printk("nothing to do\n"); } else { /* Sync only */
int sbull_release (struct inode *inode, struct file *filp) { Sbull_Dev *dev = sbull_devices + MINOR(inode->i_rdev); spin_lock(&dev->lock); dev->usage--; /* * If the device is closed for the last time, start a timer * to release RAM in half a minute. The function and argument * for the timer have been setup in sbull_init() */ if (!dev->usage) { dev->timer.expires = jiffies + 30 * HZ; add_timer(&dev->timer); /* but flush it right now */ fsync_dev(inode->i_rdev); invalidate_buffers(inode->i_rdev); } MOD_DEC_USE_COUNT; spin_unlock(&dev->lock); return 0; }
void radimo_timer_fn(unsigned long data) { MSG(RADIMO_TIMER, "timer expired\n"); /* only "change media" if device is unused */ if (MOD_IN_USE) { radimo_changed = 0; } else { /* medium changed, clear storage and */ MSG(RADIMO_TIMER, "simulating media change\n"); /* By erasing the first four blocks! */ memset(radimo_storage, 0, RADIMO_BLOCK_SIZE * 4 ); radimo_changed = 1; /* data contains i_rdev */ fsync_dev(data); invalidate_buffers(data); } /* set it up again */ radimo_timer.expires = RADIMO_TIMER_DELAY + jiffies; add_timer(&radimo_timer); }
static int do_umount(kdev_t dev) { register struct super_block *sb; register struct super_operations *sop; int retval = -ENOENT; if ((sb = get_super(dev))) { if (dev == ROOT_DEV) { /* Special case for "unmounting" root. We just try to remount * it readonly, and sync() the device. */ retval = 0; if (!(sb->s_flags & MS_RDONLY)) { fsync_dev(dev); retval = do_remount_sb(sb, MS_RDONLY, 0); } } else if (sb->s_covered) { if (!sb->s_covered->i_mount) panic("umount: i_mount=NULL\n"); if (!fs_may_umount(dev, sb->s_mounted)) retval = -EBUSY; else { retval = 0; sb->s_covered->i_mount = NULL; iput(sb->s_covered); sb->s_covered = NULL; iput(sb->s_mounted); sb->s_mounted = NULL; sop = sb->s_op; if (sop && sop->write_super && sb->s_dirt) sop->write_super(sb); put_super(dev); } } } return retval; }
static int nftl_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg) { struct NFTLrecord *nftl; int p; nftl = NFTLs[MINOR(inode->i_rdev) >> NFTL_PARTN_BITS]; if (!nftl) return -EINVAL; switch (cmd) { case HDIO_GETGEO: { struct hd_geometry g; g.heads = nftl->heads; g.sectors = nftl->sectors; g.cylinders = nftl->cylinders; g.start = part_table[MINOR(inode->i_rdev)].start_sect; return copy_to_user((void *)arg, &g, sizeof g) ? -EFAULT : 0; } case BLKGETSIZE: /* Return device size */ return put_user(part_table[MINOR(inode->i_rdev)].nr_sects, (unsigned long *) arg); #ifdef BLKGETSIZE64 case BLKGETSIZE64: return put_user((u64)part_table[MINOR(inode->i_rdev)].nr_sects << 9, (u64 *)arg); #endif case BLKFLSBUF: if (!capable(CAP_SYS_ADMIN)) return -EACCES; fsync_dev(inode->i_rdev); invalidate_buffers(inode->i_rdev); if (nftl->mtd->sync) nftl->mtd->sync(nftl->mtd); return 0; case BLKRRPART: if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (nftl->usecount > 1) return -EBUSY; /* * We have to flush all buffers and invalidate caches, * or we won't be able to re-use the partitions, * if there was a change and we don't want to reboot */ p = (1<<NFTL_PARTN_BITS) - 1; while (p-- > 0) { kdev_t devp = MKDEV(MAJOR(inode->i_dev), MINOR(inode->i_dev)+p); if (part_table[p].nr_sects > 0) invalidate_device (devp, 1); part_table[MINOR(inode->i_dev)+p].start_sect = 0; part_table[MINOR(inode->i_dev)+p].nr_sects = 0; } #if LINUX_VERSION_CODE < 0x20328 resetup_one_dev(&nftl_gendisk, MINOR(inode->i_rdev) >> NFTL_PARTN_BITS); #else grok_partitions(&nftl_gendisk, MINOR(inode->i_rdev) >> NFTL_PARTN_BITS, 1<<NFTL_PARTN_BITS, nftl->nr_sects); #endif return 0; #if (LINUX_VERSION_CODE < 0x20303) RO_IOCTLS(inode->i_rdev, arg); /* ref. linux/blk.h */ #else case BLKROSET: case BLKROGET: case BLKSSZGET: return blk_ioctl(inode->i_rdev, cmd, arg); #endif default: return -EINVAL; } }
int file_fsync (struct inode *inode, struct file *filp) { return fsync_dev(inode->i_dev); }
int sys_sync(void) { fsync_dev(0); return 0; }
void reiserfs_put_super (struct super_block * s) { int i; kdev_t dev = s->s_dev; struct reiserfs_transaction_handle th ; journal_begin(&th, s, 10) ; if (s->u.reiserfs_sb.lock_preserve) reiserfs_panic (s, "vs-2000: reiserfs_put_super: lock_preserve == %d", s->u.reiserfs_sb.lock_preserve); /* change file system state to current state if it was mounted with read-write permissions */ if (!(s->s_flags & MS_RDONLY)) { SB_REISERFS_STATE (s) = le16_to_cpu (s->u.reiserfs_sb.s_mount_state); /* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */ journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s)); } #if 0 if (maybe_free_preserve_list (s) == 0) { reiserfs_warning ("vs-2003: reiserfs_put_super: there are %ld buffers to write\n", s->u.reiserfs_sb.s_suspected_recipient_count); #ifdef REISERFS_CHECK preserve_trace_print_srs (s); #endif /* mark_suspected_recipients_dirty (&th, dev); journal victim */ fsync_dev (dev); s->u.reiserfs_sb.s_suspected_recipient_count = 0; #ifdef REISERFS_CHECK preserve_trace_reset_suspected_recipients (s); #endif maybe_free_preserve_list (s); } #endif #if 0 /* journal victim */ for (i = 0; i < SB_BMAP_NR (s); i ++) { /* update cautious bitmap */ if (memcmp (SB_AP_BITMAP (s)[i]->b_data, SB_AP_CAUTIOUS_BITMAP (s)[i], SB_AP_BITMAP (s)[i]->b_size)) { memcpy (SB_AP_CAUTIOUS_BITMAP (s)[i]->b_data, SB_AP_BITMAP (s)[i]->b_data, SB_AP_BITMAP (s)[i]->b_size); mark_buffer_dirty (SB_AP_CAUTIOUS_BITMAP (s)[i], 1); ll_rw_block (WRITE, 1, &SB_AP_CAUTIOUS_BITMAP (s)[i]); } } #endif /* journal victim */ journal_release(&th, s) ; /* reiserfs_sync_all_buffers(s->s_dev, 1) ; journal does not need this any more */ /* wait on write completion */ for (i = 0; i < SB_BMAP_NR (s); i ++) { /* wait_on_buffer (SB_AP_CAUTIOUS_BITMAP (s)[i]); */ /* brelse (SB_AP_CAUTIOUS_BITMAP (s)[i]); */ brelse (SB_AP_BITMAP (s)[i]); } reiserfs_kfree (SB_AP_BITMAP (s), sizeof (struct buffer_head *) * SB_BMAP_NR (s), s); /* reiserfs_kfree (SB_AP_CAUTIOUS_BITMAP (s), sizeof (struct buffer_head *) * SB_BMAP_NR (s), s); */ brelse (SB_BUFFER_WITH_SB (s)); print_statistics (s); if (s->u.reiserfs_sb.s_kmallocs != 0) { reiserfs_warning ("vs-2004: reiserfs_put_super: aloocated memory left %d\n", s->u.reiserfs_sb.s_kmallocs); } s->s_dev = 0; fixup_reiserfs_buffers (dev); MOD_DEC_USE_COUNT; return; }
static int radimo_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor; if (!inode || !inode->i_rdev) return -EINVAL; minor = MINOR(inode->i_rdev); switch (cmd) { case BLKFLSBUF: { /* flush buffers */ MSG(RADIMO_IOCTL, "ioctl: BLKFLSBUF\n"); /* deny all but root */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; fsync_dev(inode->i_rdev); invalidate_buffers(inode->i_rdev); break; } case BLKGETSIZE: { /* return device size */ MSG(RADIMO_IOCTL, "ioctl: BLKGETSIZE\n"); if (!arg) return -EINVAL; return put_user(radimo_size*2, (long *) arg); } case BLKRASET: { /* set read ahead value */ int tmp; MSG(RADIMO_IOCTL, "ioctl: BLKRASET\n"); if (get_user(tmp, (long *)arg)) return -EINVAL; if (tmp > 0xff) return -EINVAL; read_ahead[RADIMO_MAJOR] = tmp; return 0; } case BLKRAGET: { /* return read ahead value */ MSG(RADIMO_IOCTL, "ioctl: BLKRAGET\n"); if (!arg) return -EINVAL; return put_user(read_ahead[RADIMO_MAJOR], (long *)arg); } case BLKSSZGET: { /* return block size */ MSG(RADIMO_IOCTL, "ioctl: BLKSSZGET\n"); if (!arg) return -EINVAL; return put_user(radimo_soft, (long *)arg); } default: { MSG(RADIMO_ERROR, "ioctl wanted %u\n", cmd); return -ENOTTY; } } return 0; }
static int do_umount(kdev_t dev, int unmount_root, int flags) { struct super_block * sb; int retval; retval = -ENOENT; sb = get_super(dev); if (!sb || !sb->s_root) goto out; /* * Before checking whether the filesystem is still busy, * make sure the kernel doesn't hold any quota files open * on the device. If the umount fails, too bad -- there * are no quotas running any more. Just turn them on again. */ DQUOT_OFF(dev); acct_auto_close(dev); /* * If we may have to abort operations to get out of this * mount, and they will themselves hold resources we must * allow the fs to do things. In the Unix tradition of * 'Gee thats tricky lets do it in userspace' the umount_begin * might fail to complete on the first run through as other tasks * must return, and the like. Thats for the mount program to worry * about for the moment. */ if( (flags&MNT_FORCE) && sb->s_op->umount_begin) sb->s_op->umount_begin(sb); /* * Shrink dcache, then fsync. This guarantees that if the * filesystem is quiescent at this point, then (a) only the * root entry should be in use and (b) that root entry is * clean. */ shrink_dcache_sb(sb); fsync_dev(dev); if (dev==ROOT_DEV && !unmount_root) { /* * Special case for "unmounting" root ... * we just try to remount it readonly. */ retval = 0; if (!(sb->s_flags & MS_RDONLY)) retval = do_remount_sb(sb, MS_RDONLY, 0); return retval; } retval = d_umount(sb); if (retval) goto out; if (sb->s_op) { if (sb->s_op->write_super && sb->s_dirt) sb->s_op->write_super(sb); } lock_super(sb); if (sb->s_op) { if (sb->s_op->put_super) sb->s_op->put_super(sb); } /* Forget any remaining inodes */ if (invalidate_inodes(sb)) { printk("VFS: Busy inodes after unmount. " "Self-destruct in 5 seconds. Have a nice day...\n"); } sb->s_dev = 0; /* Free the superblock */ unlock_super(sb); remove_vfsmnt(dev); out: return retval; }
/*! j'ai récupérée quasi intégralement le code de sbull.c, j'ai juste adapté certains noms de fonction Ca permet d'avoir une fonction bien testée et débuggée */ int smd_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { int err, size; struct hd_geometry *geo = (struct hd_geometry *)arg; servernode_t *sn; ndevice_t *nd; nd = minor2ndev[MINOR(inode->i_rdev)]; sn = nd->sn; PDEBUG("cmd=0x%x arg=0x%lx ndev='%s:%s'\n", cmd, arg, sn->name, nd->name); switch(cmd) { case BLKGETSIZE: /* Return the device size, expressed in sectors */ if (!arg) return -EINVAL; /* NULL pointer: not valid */ err=verify_area(VERIFY_WRITE, (long *) arg, sizeof(long)); if (err) return err; put_user(nd->size * (1024 / SMD_HARDSECT), (long *) arg); PDEBUG("BLKGETSIZE %lld (device size %lld, sect size %d)\n", nd->size * (1024 / SMD_HARDSECT), nd->size, SMD_HARDSECT ); return 0; case BLKFLSBUF: /* flush */ if (!suser()) return -EACCES; /* only root */ fsync_dev(inode->i_rdev); invalidate_buffers(inode->i_rdev); PDEBUG("BLKFLSBUF \n"); return 0; case BLKRAGET: /* return the readahead value */ if (!arg) return -EINVAL; err = verify_area(VERIFY_WRITE, (long *) arg, sizeof(long)); if (err) return err; put_user(read_ahead[MAJOR(inode->i_rdev)],(long *) arg); PDEBUG("BLKRAGET \n"); return 0; case BLKRASET: /* set the readahead value */ if (!suser()) return -EACCES; if (arg > 0xff) return -EINVAL; /* limit it */ read_ahead[MAJOR(inode->i_rdev)] = arg; PDEBUG("BLKRASET \n"); return 0; case BLKRRPART: /* re-read partition table: can't do it */ PDEBUG("BLKRRPART \n"); return -EINVAL; //RO_IOCTLS(inode->i_rdev, arg); /* the default RO operations */ case HDIO_GETGEO: /* * get geometry: we have to fake one... trim the size to a * multiple of 64 (32k): tell we have 16 sectors, 4 heads, * whatever cylinders. Tell also that data starts at sector. 4. */ size = nd->size * (1024 / SMD_HARDSECT); size &= ~0x3f; /* multiple of 64 */ if (geo==NULL) return -EINVAL; err = verify_area(VERIFY_WRITE, geo, sizeof(*geo)); if (err) return err; put_user(size >> 6, &geo->cylinders); put_user( 4, &geo->heads); put_user( 16, &geo->sectors); put_user( 4, &geo->start); PDEBUG("HDIO_GETGEO \n"); return 0; default: PDEBUG("PAR DEFAUT \n"); return blk_ioctl(inode->i_rdev, cmd, arg); } PDEBUG("COMMANDE INCONNUE \n"); return -EINVAL; /* unknown command */ }