static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, unsigned int nr_revokes) { struct gfs2_sbd *sdp = gl->gl_sbd; struct list_head *head = &gl->gl_ail_list; struct gfs2_bufdata *bd, *tmp; struct buffer_head *bh; const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); gfs2_log_lock(sdp); spin_lock(&sdp->sd_ail_lock); list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { if (nr_revokes == 0) break; bh = bd->bd_bh; if (bh->b_state & b_state) { if (fsync) continue; gfs2_ail_error(gl, bh); } gfs2_trans_add_revoke(sdp, bd); nr_revokes--; } GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); spin_unlock(&sdp->sd_ail_lock); gfs2_log_unlock(sdp); }
static void rgrp_go_sync(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; struct address_space *mapping = &sdp->sd_aspace; struct gfs2_rgrpd *rgd; int error; spin_lock(&gl->gl_spin); rgd = gl->gl_object; if (rgd) gfs2_rgrp_brelse(rgd); spin_unlock(&gl->gl_spin); if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) return; GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); gfs2_log_flush(sdp, gl, NORMAL_FLUSH); filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); mapping_set_error(mapping, error); gfs2_ail_empty_gl(gl); spin_lock(&gl->gl_spin); rgd = gl->gl_object; if (rgd) gfs2_free_clones(rgd); spin_unlock(&gl->gl_spin); }
static void inode_go_sync(struct gfs2_glock *gl) { struct gfs2_inode *ip = gl->gl_object; struct address_space *metamapping = gfs2_glock2aspace(gl); int error; if (ip && !S_ISREG(ip->i_inode.i_mode)) ip = NULL; if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) return; GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); gfs2_log_flush(gl->gl_sbd, gl); filemap_fdatawrite(metamapping); if (ip) { struct address_space *mapping = ip->i_inode.i_mapping; filemap_fdatawrite(mapping); error = filemap_fdatawait(mapping); mapping_set_error(mapping, error); } error = filemap_fdatawait(metamapping); mapping_set_error(metamapping, error); gfs2_ail_empty_gl(gl); /* * Writeback of the data mapping may cause the dirty flag to be set * so we have to clear it again here. */ smp_mb__before_clear_bit(); clear_bit(GLF_DIRTY, &gl->gl_flags); }
static void inode_go_sync(struct gfs2_glock *gl) { struct gfs2_inode *ip = gfs2_glock2inode(gl); int isreg = ip && S_ISREG(ip->i_inode.i_mode); struct address_space *metamapping = gfs2_glock2aspace(gl); int error; if (isreg) { if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); inode_dio_wait(&ip->i_inode); } if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) goto out; GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_INODE_GO_SYNC); filemap_fdatawrite(metamapping); if (isreg) { struct address_space *mapping = ip->i_inode.i_mapping; filemap_fdatawrite(mapping); error = filemap_fdatawait(mapping); mapping_set_error(mapping, error); } error = filemap_fdatawait(metamapping); mapping_set_error(metamapping, error); gfs2_ail_empty_gl(gl); /* * Writeback of the data mapping may cause the dirty flag to be set * so we have to clear it again here. */ smp_mb__before_atomic(); clear_bit(GLF_DIRTY, &gl->gl_flags); out: gfs2_clear_glop_pending(ip); }
static void rgrp_go_sync(struct gfs2_glock *gl) { struct address_space *metamapping = gfs2_glock2aspace(gl); struct gfs2_rgrpd *rgd; int error; if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) return; GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); gfs2_log_flush(gl->gl_sbd, gl); filemap_fdatawrite(metamapping); error = filemap_fdatawait(metamapping); mapping_set_error(metamapping, error); gfs2_ail_empty_gl(gl); spin_lock(&gl->gl_spin); rgd = gl->gl_object; if (rgd) gfs2_free_clones(rgd); spin_unlock(&gl->gl_spin); }