/* * Find a super_block with no device assigned. */ static struct super_block *get_empty_super(void) { struct super_block *s; for (s = sb_entry(super_blocks.next); s != sb_entry(&super_blocks); s = sb_entry(s->s_list.next)) { if (s->s_dev) continue; if (!s->s_lock) return s; printk("VFS: empty superblock %p locked!\n", s); } /* Need a new one... */ if (nr_super_blocks >= max_super_blocks) return NULL; s = kmalloc(sizeof(struct super_block), GFP_USER); if (s) { nr_super_blocks++; memset(s, 0, sizeof(struct super_block)); INIT_LIST_HEAD(&s->s_dirty); list_add (&s->s_list, super_blocks.prev); } return s; }
/* * Called with the spinlock already held.. */ static void sync_all_inodes(void) { struct super_block * sb = sb_entry(super_blocks.next); for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) { if (!sb->s_dev) continue; sync_list(&sb->s_dirty); } }
struct super_block * get_super(kdev_t dev) { struct super_block * s; if (!dev) return NULL; restart: s = sb_entry(super_blocks.next); while (s != sb_entry(&super_blocks)) if (s->s_dev == dev) { wait_on_super(s); if (s->s_dev == dev) return s; goto restart; } else s = sb_entry(s->s_list.next); return NULL; }
/* * "sync_inodes()" goes through the super block's dirty list, * writes them out, and puts them back on the normal list. */ void sync_inodes(kdev_t dev) { struct super_block * sb = sb_entry(super_blocks.next); /* * Search the super_blocks array for the device(s) to sync. */ spin_lock(&inode_lock); for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) { if (!sb->s_dev) continue; if (dev && sb->s_dev != dev) continue; sync_list(&sb->s_dirty); if (dev) break; } spin_unlock(&inode_lock); }
/* * Note: check the dirty flag before waiting, so we don't * hold up the sync while mounting a device. (The newly * mounted device won't need syncing.) */ void sync_supers(kdev_t dev) { struct super_block * sb; for (sb = sb_entry(super_blocks.next); sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) { if (!sb->s_dev) continue; if (dev && sb->s_dev != dev) continue; if (!sb->s_dirt) continue; /* N.B. Should lock the superblock while writing */ wait_on_super(sb); if (!sb->s_dev || !sb->s_dirt) continue; if (dev && (dev != sb->s_dev)) continue; if (sb->s_op && sb->s_op->write_super) sb->s_op->write_super(sb); } }
void disk_emergency_sync(void) { struct super_block *sb; struct scsi_dev *sdev; /* host side sync... */ emergency_sync(); /* ...and now the disk side sync */ lock_kernel(); emergency_sync_scheduled = 0; for (sb = sb_entry(super_blocks.next); sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) { if (is_local_disk(sb->s_dev)){ if ((sdev = sb_to_scsidev(sb)) != NULL) ata_sync_platter(sdev); } } unlock_kernel(); printk(KERN_INFO "disk_emergency_sync done!\n"); }
void do_emergency_sync(void) { struct super_block *sb; lock_kernel(); emergency_sync_scheduled = 0; for (sb = sb_entry(super_blocks.next); sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) if (is_local_disk(sb->s_dev)) go_sync(sb); for (sb = sb_entry(super_blocks.next); sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) if (!is_local_disk(sb->s_dev) && MAJOR(sb->s_dev)) go_sync(sb); unlock_kernel(); printk(KERN_INFO "Done.\n"); }