/* Done it all: now write the commit record. We should have * cleaned up our previous buffers by now, so if we are in abort * mode we can now just skip the rest of the journal write * entirely. * * Returns 1 if the journal needs to be aborted or 0 on success */ static int journal_write_commit_record(journal_t *journal, transaction_t *commit_transaction) { struct journal_head *descriptor; struct buffer_head *bh; journal_header_t *header; int ret; if (is_journal_aborted(journal)) return 0; descriptor = journal_get_descriptor_buffer(journal); if (!descriptor) return 1; bh = jh2bh(descriptor); header = (journal_header_t *)(bh->b_data); header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK); header->h_sequence = cpu_to_be32(commit_transaction->t_tid); JBUFFER_TRACE(descriptor, "write commit block"); set_buffer_dirty(bh); if (journal->j_flags & JFS_BARRIER) ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_FLUSH_FUA); else ret = sync_dirty_buffer(bh); put_bh(bh); /* One for getblk() */ journal_put_journal_head(descriptor); return (ret == -EIO); }
/* Done it all: now write the commit record. We should have * cleaned up our previous buffers by now, so if we are in abort * mode we can now just skip the rest of the journal write * entirely. * * Returns 1 if the journal needs to be aborted or 0 on success */ static int journal_write_commit_record(journal_t *journal, transaction_t *commit_transaction) { struct journal_head *descriptor; struct buffer_head *bh; journal_header_t *header; int ret; if (is_journal_aborted(journal)) return 0; descriptor = journal_get_descriptor_buffer(journal); if (!descriptor) return 1; bh = jh2bh(descriptor); header = (journal_header_t *)(bh->b_data); header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK); header->h_sequence = cpu_to_be32(commit_transaction->t_tid); JBUFFER_TRACE(descriptor, "write commit block"); set_buffer_dirty(bh); if (journal->j_flags & JFS_BARRIER) { ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER); /* * Is it possible for another commit to fail at roughly * the same time as this one? If so, we don't want to * trust the barrier flag in the super, but instead want * to remember if we sent a barrier request */ if (ret == -EOPNOTSUPP) { char b[BDEVNAME_SIZE]; printk(KERN_WARNING "JBD: barrier-based sync failed on %s - " "disabling barriers\n", bdevname(journal->j_dev, b)); spin_lock(&journal->j_state_lock); journal->j_flags &= ~JFS_BARRIER; spin_unlock(&journal->j_state_lock); /* And try again, without the barrier */ set_buffer_uptodate(bh); set_buffer_dirty(bh); ret = sync_dirty_buffer(bh); } } else { ret = sync_dirty_buffer(bh); } put_bh(bh); /* One for getblk() */ journal_put_journal_head(descriptor); return (ret == -EIO); }
static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag) { struct the_nilfs *nilfs = sbi->s_nilfs; int err; retry: set_buffer_dirty(nilfs->ns_sbh[0]); if (nilfs_test_opt(sbi, BARRIER)) { err = __sync_dirty_buffer(nilfs->ns_sbh[0], WRITE_SYNC | WRITE_FLUSH_FUA); } else { err = sync_dirty_buffer(nilfs->ns_sbh[0]); } if (unlikely(err)) { printk(KERN_ERR "NILFS: unable to write superblock (err=%d)\n", err); if (err == -EIO && nilfs->ns_sbh[1]) { /* * sbp[0] points to newer log than sbp[1], * so copy sbp[0] to sbp[1] to take over sbp[0]. */ memcpy(nilfs->ns_sbp[1], nilfs->ns_sbp[0], nilfs->ns_sbsize); nilfs_fall_back_super_block(nilfs); goto retry; } } else { struct nilfs_super_block *sbp = nilfs->ns_sbp[0]; nilfs->ns_sbwcount++; /* * The latest segment becomes trailable from the position * written in superblock. */ clear_nilfs_discontinued(nilfs); /* update GC protection for recent segments */ if (nilfs->ns_sbh[1]) { if (flag == NILFS_SB_COMMIT_ALL) { set_buffer_dirty(nilfs->ns_sbh[1]); if (sync_dirty_buffer(nilfs->ns_sbh[1]) < 0) goto out; } if (le64_to_cpu(nilfs->ns_sbp[1]->s_last_cno) < le64_to_cpu(nilfs->ns_sbp[0]->s_last_cno)) sbp = nilfs->ns_sbp[1]; } spin_lock(&nilfs->ns_last_segment_lock); nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq); spin_unlock(&nilfs->ns_last_segment_lock); } out: return err; }
static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block) { struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi); struct buffer_head *bh; int err; bh = sb_getblk(sbi->sb, block); if (!bh) return -EIO; lock_buffer(bh); memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super)); set_buffer_uptodate(bh); set_buffer_dirty(bh); unlock_buffer(bh); /* it's rare case, we can do fua all the time */ err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA); brelse(bh); return err; }
static int nilfs_sync_super(struct super_block *sb, int flag) { struct the_nilfs *nilfs = sb->s_fs_info; int err; #if HAVE_BH_ORDERED int barrier_done = 0; if (nilfs_sb_barrier(nilfs)) { set_buffer_ordered(nilfs->ns_sbh[0]); barrier_done = 1; } retry: set_buffer_dirty(nilfs->ns_sbh[0]); err = sync_dirty_buffer(nilfs->ns_sbh[0]); if (err == -EOPNOTSUPP && barrier_done) { nilfs_warning(sb, __func__, "barrier-based sync failed. " "disabling barriers for superblock\n"); clear_nilfs_sb_barrier(nilfs); barrier_done = 0; clear_buffer_ordered(nilfs->ns_sbh[0]); goto retry; } #else retry: set_buffer_dirty(nilfs->ns_sbh[0]); if (nilfs_sb_barrier(nilfs)) { #if HAVE_BIO_BARRIER err = __sync_dirty_buffer(nilfs->ns_sbh[0], WRITE_SYNC | WRITE_BARRIER); if (err == -EOPNOTSUPP) { nilfs_warning(sb, __func__, "barrier-based sync failed. " "disabling barriers for superblock\n"); clear_nilfs_sb_barrier(nilfs); goto retry; } #else err = __sync_dirty_buffer(nilfs->ns_sbh[0], WRITE_SYNC | WRITE_FLUSH_FUA); #endif /* HAVE_BIO_BARRIER */ } else { err = sync_dirty_buffer(nilfs->ns_sbh[0]); } #endif /* HAVE_BH_ORDERED */ if (unlikely(err)) { printk(KERN_ERR "NILFS: unable to write superblock (err=%d)\n", err); if (err == -EIO && nilfs->ns_sbh[1]) { /* * sbp[0] points to newer log than sbp[1], * so copy sbp[0] to sbp[1] to take over sbp[0]. */ memcpy(nilfs->ns_sbp[1], nilfs->ns_sbp[0], nilfs->ns_sbsize); nilfs_fall_back_super_block(nilfs); goto retry; } } else { struct nilfs_super_block *sbp = nilfs->ns_sbp[0]; nilfs->ns_sbwcount++; /* * The latest segment becomes trailable from the position * written in superblock. */ clear_nilfs_discontinued(nilfs); /* update GC protection for recent segments */ if (nilfs->ns_sbh[1]) { if (flag == NILFS_SB_COMMIT_ALL) { set_buffer_dirty(nilfs->ns_sbh[1]); if (sync_dirty_buffer(nilfs->ns_sbh[1]) < 0) goto out; } if (le64_to_cpu(nilfs->ns_sbp[1]->s_last_cno) < le64_to_cpu(nilfs->ns_sbp[0]->s_last_cno)) sbp = nilfs->ns_sbp[1]; } spin_lock(&nilfs->ns_last_segment_lock); nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq); spin_unlock(&nilfs->ns_last_segment_lock); } out: return err; }