void gfs2_meta_inval(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; struct inode *aspace = gl->gl_aspace; struct address_space *mapping = gl->gl_aspace->i_mapping; gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); atomic_inc(&aspace->i_writecount); truncate_inode_pages(mapping, 0); atomic_dec(&aspace->i_writecount); gfs2_assert_withdraw(sdp, !mapping->nrpages); }
static void inode_go_inval(struct gfs2_glock *gl, int flags) { struct gfs2_inode *ip = gfs2_glock2inode(gl); gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); if (flags & DIO_METADATA) { struct address_space *mapping = gfs2_glock2aspace(gl); truncate_inode_pages(mapping, 0); if (ip) { set_bit(GIF_INVALID, &ip->i_flags); forget_all_cached_acls(&ip->i_inode); security_inode_invalidate_secctx(&ip->i_inode); gfs2_dir_hash_inval(ip); } } if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { gfs2_log_flush(gl->gl_name.ln_sbd, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_INODE_GO_INVAL); gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; } if (ip && S_ISREG(ip->i_inode.i_mode)) truncate_inode_pages(ip->i_inode.i_mapping, 0); gfs2_clear_glop_pending(ip); }
static int gfs2_writepage_common(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); loff_t i_size = i_size_read(inode); pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) goto out; if (current->journal_info) goto redirty; /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); if (page->index > end_index || (page->index == end_index && !offset)) { page->mapping->a_ops->invalidatepage(page, 0); goto out; } return 1; redirty: redirty_page_for_writepage(wbc, page); out: unlock_page(page); return 0; }
static void gfs2_ail_empty_gl(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; struct list_head *head = &gl->gl_ail_list; struct gfs2_bufdata *bd; struct buffer_head *bh; struct gfs2_trans tr; memset(&tr, 0, sizeof(tr)); tr.tr_revokes = atomic_read(&gl->gl_ail_count); if (!tr.tr_revokes) return; /* A shortened, inline version of gfs2_trans_begin() */ tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); tr.tr_ip = (unsigned long)__builtin_return_address(0); INIT_LIST_HEAD(&tr.tr_list_buf); gfs2_log_reserve(sdp, tr.tr_reserved); BUG_ON(current->journal_info); current->journal_info = &tr; spin_lock(&sdp->sd_ail_lock); while (!list_empty(head)) { bd = list_entry(head->next, struct gfs2_bufdata, bd_ail_gl_list); bh = bd->bd_bh; gfs2_remove_from_ail(bd); spin_unlock(&sdp->sd_ail_lock); bd->bd_bh = NULL; bh->b_private = NULL; bd->bd_blkno = bh->b_blocknr; gfs2_log_lock(sdp); gfs2_assert_withdraw(sdp, !buffer_busy(bh)); gfs2_trans_add_revoke(sdp, bd); gfs2_log_unlock(sdp); spin_lock(&sdp->sd_ail_lock); } gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); spin_unlock(&sdp->sd_ail_lock); gfs2_trans_end(sdp); gfs2_log_flush(sdp, NULL); }
static void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) { atomic_add(blks, &sdp->sd_log_blks_free); trace_gfs2_log_blocks(sdp, blks); gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); up_read(&sdp->sd_log_flush_lock); }
static void gfs2_ail_empty_gl(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; unsigned int blocks; struct list_head *head = &gl->gl_ail_list; struct gfs2_bufdata *bd; struct buffer_head *bh; u64 blkno; int error; blocks = atomic_read(&gl->gl_ail_count); if (!blocks) return; error = gfs2_trans_begin(sdp, 0, blocks); if (gfs2_assert_withdraw(sdp, !error)) return; gfs2_log_lock(sdp); while (!list_empty(head)) { bd = list_entry(head->next, struct gfs2_bufdata, bd_ail_gl_list); bh = bd->bd_bh; blkno = bh->b_blocknr; gfs2_assert_withdraw(sdp, !buffer_busy(bh)); bd->bd_ail = NULL; list_del(&bd->bd_ail_st_list); list_del(&bd->bd_ail_gl_list); atomic_dec(&gl->gl_ail_count); brelse(bh); gfs2_log_unlock(sdp); gfs2_trans_add_revoke(sdp, blkno); gfs2_log_lock(sdp); } gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); gfs2_log_unlock(sdp); gfs2_trans_end(sdp); gfs2_log_flush(sdp, NULL); }
static void rgrp_go_inval(struct gfs2_glock *gl, int flags) { struct address_space *mapping = gfs2_glock2aspace(gl); BUG_ON(!(flags & DIO_METADATA)); gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); truncate_inode_pages(mapping, 0); if (gl->gl_object) { struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object; rgd->rd_flags &= ~GFS2_RDF_UPTODATE; } }
static int gfs2_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(dir); struct gfs2_holder ghs[2]; struct inode *inode; struct buffer_head *dibh; int size; int error; /* Must be stuffed with a null terminator for gfs2_follow_link() */ size = strlen(symname); if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode) - 1) return -ENAMETOOLONG; gfs2_holder_init(dip->i_gl, 0, 0, ghs); inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO, 0); if (IS_ERR(inode)) { gfs2_holder_uninit(ghs); return PTR_ERR(inode); } ip = ghs[1].gh_gl->gl_object; ip->i_disksize = size; i_size_write(inode, size); error = gfs2_meta_inode_buffer(ip, &dibh); if (!gfs2_assert_withdraw(sdp, !error)) { gfs2_dinode_out(ip, dibh->b_data); memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname, size); brelse(dibh); } gfs2_trans_end(sdp); if (dip->i_alloc->al_rgd) gfs2_inplace_release(dip); gfs2_quota_unlock(dip); gfs2_alloc_put(dip); gfs2_glock_dq_uninit_m(2, ghs); d_instantiate(dentry, inode); mark_inode_dirty(inode); return 0; }
void gfs2_trans_end(struct gfs2_sbd *sdp) { struct gfs2_trans *tr = current->journal_info; BUG_ON(!tr); current->journal_info = NULL; if (!tr->tr_touched) { gfs2_log_release(sdp, tr->tr_reserved); if (tr->tr_t_gh.gh_gl) { gfs2_glock_dq(&tr->tr_t_gh); gfs2_holder_uninit(&tr->tr_t_gh); kfree(tr); } return; } if (gfs2_assert_withdraw(sdp, tr->tr_num_buf <= tr->tr_blocks)) { fs_err(sdp, "tr_num_buf = %u, tr_blocks = %u ", tr->tr_num_buf, tr->tr_blocks); print_symbol(KERN_WARNING "GFS2: Transaction created at: %s\n", tr->tr_ip); } if (gfs2_assert_withdraw(sdp, tr->tr_num_revoke <= tr->tr_revokes)) { fs_err(sdp, "tr_num_revoke = %u, tr_revokes = %u ", tr->tr_num_revoke, tr->tr_revokes); print_symbol(KERN_WARNING "GFS2: Transaction created at: %s\n", tr->tr_ip); } gfs2_log_commit(sdp, tr); if (tr->tr_t_gh.gh_gl) { gfs2_glock_dq(&tr->tr_t_gh); gfs2_holder_uninit(&tr->tr_t_gh); kfree(tr); } if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) gfs2_log_flush(sdp, NULL); }
static void rgrp_go_inval(struct gfs2_glock *gl, int flags) { struct gfs2_sbd *sdp = gl->gl_sbd; struct address_space *mapping = &sdp->sd_aspace; WARN_ON_ONCE(!(flags & DIO_METADATA)); gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); if (gl->gl_object) { struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object; rgd->rd_flags &= ~GFS2_RDF_UPTODATE; } }
static int gfs2_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); loff_t i_size = i_size_read(inode); pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; int error; int done_trans = 0; if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) { unlock_page(page); return -EIO; } if (current->journal_info) goto out_ignore; /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); if (page->index > end_index || (page->index == end_index && !offset)) { page->mapping->a_ops->invalidatepage(page, 0); unlock_page(page); return 0; /* don't care */ } if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) { error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0); if (error) goto out_ignore; if (!page_has_buffers(page)) { create_empty_buffers(page, inode->i_sb->s_blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); } gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); done_trans = 1; } error = block_write_full_page(page, gfs2_get_block_noalloc, wbc); if (done_trans) gfs2_trans_end(sdp); gfs2_meta_cache_flush(ip); return error; out_ignore: redirty_page_for_writepage(wbc, page); unlock_page(page); return 0; }
static void freeze_go_sync(struct gfs2_glock *gl) { int error = 0; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; if (gl->gl_state == LM_ST_SHARED && test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE); error = freeze_super(sdp->sd_vfs); if (error) { printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error); gfs2_assert_withdraw(sdp, 0); } queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work); gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE | GFS2_LFC_FREEZE_GO_SYNC); } }
void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap, unsigned int bit, int new_value) { unsigned int c, o, b = bit; int old_value; c = b / (8 * PAGE_SIZE); b %= 8 * PAGE_SIZE; o = b / 8; b %= 8; old_value = (bitmap[c][o] & (1 << b)); gfs2_assert_withdraw(sdp, !old_value != !new_value); if (new_value) bitmap[c][o] |= 1 << b; else bitmap[c][o] &= ~(1 << b); }
static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); int ret; if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) goto out; if (PageChecked(page) || current->journal_info) goto out_ignore; ret = __gfs2_jdata_writepage(page, wbc); return ret; out_ignore: redirty_page_for_writepage(wbc, page); out: unlock_page(page); return 0; }
static void inode_go_inval(struct gfs2_glock *gl, int flags) { struct gfs2_inode *ip = gl->gl_object; gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); if (flags & DIO_METADATA) { struct address_space *mapping = gfs2_glock2aspace(gl); truncate_inode_pages(mapping, 0); if (ip) { set_bit(GIF_INVALID, &ip->i_flags); forget_all_cached_acls(&ip->i_inode); } } if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) gl->gl_sbd->sd_rindex_uptodate = 0; if (ip && S_ISREG(ip->i_inode.i_mode)) truncate_inode_pages(ip->i_inode.i_mapping, 0); }
static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(dir); struct gfs2_holder ghs[2]; struct inode *inode; struct buffer_head *dibh; int error; gfs2_holder_init(dip->i_gl, 0, 0, ghs); inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode, 0); if (IS_ERR(inode)) { gfs2_holder_uninit(ghs); return PTR_ERR(inode); } ip = ghs[1].gh_gl->gl_object; ip->i_inode.i_nlink = 2; ip->i_disksize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); ip->i_diskflags |= GFS2_DIF_JDATA; ip->i_entries = 2; error = gfs2_meta_inode_buffer(ip, &dibh); if (!gfs2_assert_withdraw(sdp, !error)) { struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1); struct qstr str; gfs2_str2qstr(&str, "."); gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_qstr2dirent(&str, GFS2_DIRENT_SIZE(str.len), dent); dent->de_inum = di->di_num; /* already GFS2 endian */ dent->de_type = cpu_to_be16(DT_DIR); di->di_entries = cpu_to_be32(1); gfs2_str2qstr(&str, ".."); dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1)); gfs2_qstr2dirent(&str, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent); gfs2_inum_out(dip, dent); dent->de_type = cpu_to_be16(DT_DIR); gfs2_dinode_out(ip, di); brelse(dibh); } error = gfs2_change_nlink(dip, +1); gfs2_assert_withdraw(sdp, !error); /* dip already pinned */ gfs2_trans_end(sdp); gfs2_inplace_release(dip); gfs2_quota_unlock(dip); gfs2_qadata_put(dip); gfs2_glock_dq_uninit_m(2, ghs); d_instantiate(dentry, inode); mark_inode_dirty(inode); return 0; }
static int gfs2_commit_write(struct file *file, struct page *page, unsigned from, unsigned to) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); int error = -EOPNOTSUPP; struct buffer_head *dibh; struct gfs2_alloc *al = &ip->i_alloc; struct gfs2_dinode *di; if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl))) goto fail_nounlock; error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto fail_endtrans; gfs2_trans_add_bh(ip->i_gl, dibh, 1); di = (struct gfs2_dinode *)dibh->b_data; if (gfs2_is_stuffed(ip)) { u64 file_size; void *kaddr; file_size = ((u64)page->index << PAGE_CACHE_SHIFT) + to; kaddr = kmap_atomic(page, KM_USER0); memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from, kaddr + from, to - from); kunmap_atomic(kaddr, KM_USER0); SetPageUptodate(page); if (inode->i_size < file_size) { i_size_write(inode, file_size); mark_inode_dirty(inode); } } else { if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) gfs2_page_add_databufs(ip, page, from, to); error = generic_commit_write(file, page, from, to); if (error) goto fail; } if (ip->i_di.di_size < inode->i_size) { ip->i_di.di_size = inode->i_size; di->di_size = cpu_to_be64(inode->i_size); } brelse(dibh); gfs2_trans_end(sdp); if (al->al_requested) { gfs2_inplace_release(ip); gfs2_quota_unlock(ip); gfs2_alloc_put(ip); } gfs2_glock_dq_m(1, &ip->i_gh); gfs2_holder_uninit(&ip->i_gh); return 0; fail: brelse(dibh); fail_endtrans: gfs2_trans_end(sdp); if (al->al_requested) { gfs2_inplace_release(ip); gfs2_quota_unlock(ip); gfs2_alloc_put(ip); } gfs2_glock_dq_m(1, &ip->i_gh); gfs2_holder_uninit(&ip->i_gh); fail_nounlock: ClearPageUptodate(page); return error; }