static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct gfs2_holder ghs[2]; struct inode *inode; gfs2_holder_init(dip->i_gl, 0, 0, ghs); inode = gfs2_createi(ghs, &dentry->d_name, mode, dev); if (IS_ERR(inode)) { gfs2_holder_uninit(ghs); return PTR_ERR(inode); } gfs2_trans_end(sdp); if (dip->i_alloc->al_rgd) gfs2_inplace_release(dip); gfs2_quota_unlock(dip); gfs2_alloc_put(dip); gfs2_glock_dq_uninit_m(2, ghs); d_instantiate(dentry, inode); mark_inode_dirty(inode); return 0; }
static int gfs2_readpage(struct file *file, struct page *page) { struct address_space *mapping = page->mapping; struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_holder gh; int error; unlock_page(page); gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); error = gfs2_glock_nq(&gh); if (unlikely(error)) goto out; error = AOP_TRUNCATED_PAGE; lock_page(page); if (page->mapping == mapping && !PageUptodate(page)) error = __gfs2_readpage(file, page); else unlock_page(page); gfs2_glock_dq(&gh); out: gfs2_holder_uninit(&gh); if (error && error != AOP_TRUNCATED_PAGE) lock_page(page); return error; }
static int gfs2_get_flags(struct file *filp, u32 __user *ptr) { struct inode *inode = file_inode(filp); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int i, error; u32 gfsflags, fsflags = 0; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); error = gfs2_glock_nq(&gh); if (error) goto out_uninit; gfsflags = ip->i_diskflags; if (S_ISDIR(inode->i_mode)) gfsflags &= ~GFS2_DIF_JDATA; else gfsflags &= ~GFS2_DIF_INHERIT_JDATA; for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) if (gfsflags & fsflag_gfs2flag[i].gfsflag) fsflags |= fsflag_gfs2flag[i].fsflag; if (put_user(fsflags, ptr)) error = -EFAULT; gfs2_glock_dq(&gh); out_uninit: gfs2_holder_uninit(&gh); return error; }
static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int rv; /* * Deferred lock, even if its a write, since we do no allocation * on this path. All we need change is atime, and this lock mode * ensures that other nodes have flushed their buffered read caches * (i.e. their page cache entries for this inode). We do not, * unfortunately have the option of only flushing a range like * the VFS does. */ gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh); rv = gfs2_glock_nq_atime(&gh); if (rv) return rv; rv = gfs2_ok_for_dio(ip, rw, offset); if (rv != 1) goto out; /* dio not valid, fall back to buffered i/o */ rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, gfs2_get_block_direct, NULL); out: gfs2_glock_dq_m(1, &gh); gfs2_holder_uninit(&gh); return rv; }
static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) { struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); struct gfs2_holder i_gh; int error; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh); error = gfs2_glock_nq_atime(&i_gh); if (error) { gfs2_holder_uninit(&i_gh); return error; } /* This is VM_MAYWRITE instead of VM_WRITE because a call to mprotect() can turn on VM_WRITE later. */ if ((vma->vm_flags & (VM_MAYSHARE | VM_MAYWRITE)) == (VM_MAYSHARE | VM_MAYWRITE)) vma->vm_ops = &gfs2_vm_ops_sharewrite; else vma->vm_ops = &gfs2_vm_ops_private; gfs2_glock_dq_uninit(&i_gh); return error; }
static int gfs2_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(dir); struct gfs2_holder ghs[2]; struct inode *inode; struct buffer_head *dibh; int size; int error; /* Must be stuffed with a null terminator for gfs2_follow_link() */ size = strlen(symname); if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode) - 1) return -ENAMETOOLONG; gfs2_holder_init(dip->i_gl, 0, 0, ghs); inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO, 0); if (IS_ERR(inode)) { gfs2_holder_uninit(ghs); return PTR_ERR(inode); } ip = ghs[1].gh_gl->gl_object; ip->i_disksize = size; i_size_write(inode, size); error = gfs2_meta_inode_buffer(ip, &dibh); if (!gfs2_assert_withdraw(sdp, !error)) { gfs2_dinode_out(ip, dibh->b_data); memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname, size); brelse(dibh); } gfs2_trans_end(sdp); if (dip->i_alloc->al_rgd) gfs2_inplace_release(dip); gfs2_quota_unlock(dip); gfs2_alloc_put(dip); gfs2_glock_dq_uninit_m(2, ghs); d_instantiate(dentry, inode); mark_inode_dirty(inode); return 0; }
int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, unsigned int revokes) { struct gfs2_trans *tr; int error; BUG_ON(current->journal_info); BUG_ON(blocks == 0 && revokes == 0); if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) return -EROFS; tr = kzalloc(sizeof(struct gfs2_trans), GFP_NOFS); if (!tr) return -ENOMEM; tr->tr_ip = (unsigned long)__builtin_return_address(0); tr->tr_blocks = blocks; tr->tr_revokes = revokes; tr->tr_reserved = 1; if (blocks) tr->tr_reserved += 6 + blocks; if (revokes) tr->tr_reserved += gfs2_struct2blk(sdp, revokes, sizeof(u64)); INIT_LIST_HEAD(&tr->tr_list_buf); gfs2_holder_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &tr->tr_t_gh); error = gfs2_glock_nq(&tr->tr_t_gh); if (error) goto fail_holder_uninit; error = gfs2_log_reserve(sdp, tr->tr_reserved); if (error) goto fail_gunlock; current->journal_info = tr; return 0; fail_gunlock: gfs2_glock_dq(&tr->tr_t_gh); fail_holder_uninit: gfs2_holder_uninit(&tr->tr_t_gh); kfree(tr); return error; }
static int gfs2_readpage(struct file *file, struct page *page) { struct gfs2_inode *ip = GFS2_I(page->mapping->host); struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); struct gfs2_file *gf = NULL; struct gfs2_holder gh; int error; int do_unlock = 0; if (likely(file != &gfs2_internal_file_sentinel)) { if (file) { gf = file->private_data; if (test_bit(GFF_EXLOCK, &gf->f_flags)) /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */ goto skip_lock; } gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh); do_unlock = 1; error = gfs2_glock_nq_atime(&gh); if (unlikely(error)) goto out_unlock; } skip_lock: if (gfs2_is_stuffed(ip)) { error = stuffed_readpage(ip, page); unlock_page(page); } else error = mpage_readpage(page, gfs2_get_block); if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) error = -EIO; if (do_unlock) { gfs2_glock_dq_m(1, &gh); gfs2_holder_uninit(&gh); } out: return error; out_unlock: unlock_page(page); if (error == GLR_TRYFAILED) { error = AOP_TRUNCATED_PAGE; yield(); } if (do_unlock) gfs2_holder_uninit(&gh); goto out; }
/** * gfs2_readpages - Read a bunch of pages at once * * Some notes: * 1. This is only for readahead, so we can simply ignore any things * which are slightly inconvenient (such as locking conflicts between * the page lock and the glock) and return having done no I/O. Its * obviously not something we'd want to do on too regular a basis. * Any I/O we ignore at this time will be done via readpage later. * 2. We don't handle stuffed files here we let readpage do the honours. * 3. mpage_readpages() does most of the heavy lifting in the common case. * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places. * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as * well as read-ahead. */ static int gfs2_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { struct inode *inode = mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_holder gh; int ret = 0; int do_unlock = 0; if (likely(file != &gfs2_internal_file_sentinel)) { if (file) { struct gfs2_file *gf = file->private_data; if (test_bit(GFF_EXLOCK, &gf->f_flags)) goto skip_lock; } gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_TRY_1CB|GL_ATIME, &gh); do_unlock = 1; ret = gfs2_glock_nq_atime(&gh); if (ret == GLR_TRYFAILED) goto out_noerror; if (unlikely(ret)) goto out_unlock; } skip_lock: if (!gfs2_is_stuffed(ip)) ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block); if (do_unlock) { gfs2_glock_dq_m(1, &gh); gfs2_holder_uninit(&gh); } out: if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) ret = -EIO; return ret; out_noerror: ret = 0; out_unlock: if (do_unlock) gfs2_holder_uninit(&gh); goto out; }
static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) { struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); struct gfs2_holder i_gh; int error; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); error = gfs2_glock_nq(&i_gh); if (error) { gfs2_holder_uninit(&i_gh); return error; } vma->vm_ops = &gfs2_vm_ops; gfs2_glock_dq_uninit(&i_gh); return error; }
static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) { struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); if (!(file->f_flags & O_NOATIME)) { struct gfs2_holder i_gh; int error; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); error = gfs2_glock_nq(&i_gh); file_accessed(file); if (error == 0) gfs2_glock_dq_uninit(&i_gh); } vma->vm_ops = &gfs2_vm_ops; vma->vm_flags |= VM_CAN_NONLINEAR; return 0; }
static int gfs2_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct gfs2_holder ghs[2]; struct inode *inode; gfs2_holder_init(dip->i_gl, 0, 0, ghs); for (;;) { inode = gfs2_createi(ghs, &dentry->d_name, S_IFREG | mode, 0); if (!IS_ERR(inode)) { gfs2_trans_end(sdp); if (dip->i_alloc->al_rgd) gfs2_inplace_release(dip); gfs2_quota_unlock(dip); gfs2_alloc_put(dip); gfs2_glock_dq_uninit_m(2, ghs); mark_inode_dirty(inode); break; } else if (PTR_ERR(inode) != -EEXIST || (nd && (nd->intent.open.flags & O_EXCL))) { gfs2_holder_uninit(ghs); return PTR_ERR(inode); } inode = gfs2_lookupi(dir, &dentry->d_name, 0, nd); if (inode) { if (!IS_ERR(inode)) { gfs2_holder_uninit(ghs); break; } else { gfs2_holder_uninit(ghs); return PTR_ERR(inode); } } } d_instantiate(dentry, inode); return 0; }
int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len) { struct gfs2_holder i_gh; struct buffer_head *dibh; unsigned int x; int error; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); error = gfs2_glock_nq(&i_gh); if (error) { gfs2_holder_uninit(&i_gh); return error; } if (!ip->i_disksize) { gfs2_consist_inode(ip); error = -EIO; goto out; } error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out; x = ip->i_disksize + 1; if (x > *len) { *buf = kmalloc(x, GFP_NOFS); if (!*buf) { error = -ENOMEM; goto out_brelse; } } memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x); *len = x; out_brelse: brelse(dibh); out: gfs2_glock_dq_uninit(&i_gh); return error; }
static int gfs2_get_flags(struct file *filp, u32 __user *ptr) { struct inode *inode = filp->f_path.dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int error; u32 fsflags; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); error = gfs2_glock_nq_atime(&gh); if (error) return error; fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_di.di_flags); if (put_user(fsflags, ptr)) error = -EFAULT; gfs2_glock_dq_m(1, &gh); gfs2_holder_uninit(&gh); return error; }
static int gfs2_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { struct inode *inode = mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_holder gh; int ret; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); ret = gfs2_glock_nq_atime(&gh); if (unlikely(ret)) goto out_uninit; if (!gfs2_is_stuffed(ip)) ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); gfs2_glock_dq(&gh); out_uninit: gfs2_holder_uninit(&gh); if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) ret = -EIO; return ret; }
static int gfs2_readpage(struct file *file, struct page *page) { struct gfs2_inode *ip = GFS2_I(page->mapping->host); struct gfs2_holder gh; int error; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh); error = gfs2_glock_nq_atime(&gh); if (unlikely(error)) { unlock_page(page); goto out; } error = __gfs2_readpage(file, page); gfs2_glock_dq(&gh); out: gfs2_holder_uninit(&gh); if (error == GLR_TRYFAILED) { yield(); return AOP_TRUNCATED_PAGE; } return error; }
static int gfs2_get_flags(struct file *filp, u32 __user *ptr) { struct inode *inode = filp->f_path.dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int error; u32 fsflags; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); error = gfs2_glock_nq(&gh); if (error) return error; fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags); if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA) fsflags |= FS_JOURNAL_DATA_FL; if (put_user(fsflags, ptr)) error = -EFAULT; gfs2_glock_dq(&gh); gfs2_holder_uninit(&gh); return error; }
static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir) { struct inode *dir = file->f_mapping->host; struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_holder d_gh; u64 offset = file->f_pos; int error; gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); error = gfs2_glock_nq(&d_gh); if (error) { gfs2_holder_uninit(&d_gh); return error; } error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra); gfs2_glock_dq_uninit(&d_gh); file->f_pos = offset; return error; }
static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) { struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); if (!(file->f_flags & O_NOATIME) && !IS_NOATIME(&ip->i_inode)) { struct gfs2_holder i_gh; int error; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); error = gfs2_glock_nq(&i_gh); if (error == 0) { file_accessed(file); gfs2_glock_dq(&i_gh); } gfs2_holder_uninit(&i_gh); if (error) return error; } vma->vm_ops = &gfs2_vm_ops; return 0; }
static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(dir); struct gfs2_holder ghs[2]; struct inode *inode; struct buffer_head *dibh; int error; gfs2_holder_init(dip->i_gl, 0, 0, ghs); inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode, 0); if (IS_ERR(inode)) { gfs2_holder_uninit(ghs); return PTR_ERR(inode); } ip = ghs[1].gh_gl->gl_object; ip->i_inode.i_nlink = 2; ip->i_disksize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); ip->i_diskflags |= GFS2_DIF_JDATA; ip->i_entries = 2; error = gfs2_meta_inode_buffer(ip, &dibh); if (!gfs2_assert_withdraw(sdp, !error)) { struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1); struct qstr str; gfs2_str2qstr(&str, "."); gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_qstr2dirent(&str, GFS2_DIRENT_SIZE(str.len), dent); dent->de_inum = di->di_num; /* already GFS2 endian */ dent->de_type = cpu_to_be16(DT_DIR); di->di_entries = cpu_to_be32(1); gfs2_str2qstr(&str, ".."); dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1)); gfs2_qstr2dirent(&str, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent); gfs2_inum_out(dip, dent); dent->de_type = cpu_to_be16(DT_DIR); gfs2_dinode_out(ip, di); brelse(dibh); } error = gfs2_change_nlink(dip, +1); gfs2_assert_withdraw(sdp, !error); /* dip already pinned */ gfs2_trans_end(sdp); gfs2_inplace_release(dip); gfs2_quota_unlock(dip); gfs2_qadata_put(dip); gfs2_glock_dq_uninit_m(2, ghs); d_instantiate(dentry, inode); mark_inode_dirty(inode); return 0; }
static int gfs2_rmdir(struct inode *dir, struct dentry *dentry) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct gfs2_inode *ip = GFS2_I(dentry->d_inode); struct gfs2_holder ghs[3]; struct gfs2_rgrpd *rgd; int error; error = gfs2_rindex_update(sdp); if (error) return error; gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); error = gfs2_glock_nq(ghs); /* parent */ if (error) goto out_parent; error = gfs2_glock_nq(ghs + 1); /* child */ if (error) goto out_child; error = -ENOENT; if (ip->i_inode.i_nlink == 0) goto out_rgrp; error = gfs2_glock_nq(ghs + 2); /* rgrp */ if (error) goto out_rgrp; error = gfs2_unlink_ok(dip, &dentry->d_name, ip); if (error) goto out_gunlock; if (ip->i_entries < 2) { if (gfs2_consist_inode(ip)) gfs2_dinode_print(ip); error = -EIO; goto out_gunlock; } if (ip->i_entries > 2) { error = -ENOTEMPTY; goto out_gunlock; } error = gfs2_trans_begin(sdp, 2 * RES_DINODE + 3 * RES_LEAF + RES_RG_BIT, 0); if (error) goto out_gunlock; error = gfs2_rmdiri(dip, &dentry->d_name, ip); gfs2_trans_end(sdp); out_gunlock: gfs2_glock_dq(ghs + 2); out_rgrp: gfs2_holder_uninit(ghs + 2); gfs2_glock_dq(ghs + 1); out_child: gfs2_holder_uninit(ghs + 1); gfs2_glock_dq(ghs); out_parent: gfs2_holder_uninit(ghs); return error; }
static int gfs2_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct inode *inode = old_dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder ghs[2]; int alloc_required; int error; if (S_ISDIR(inode->i_mode)) return -EPERM; gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); error = gfs2_glock_nq(ghs); /* parent */ if (error) goto out_parent; error = gfs2_glock_nq(ghs + 1); /* child */ if (error) goto out_child; error = -ENOENT; if (inode->i_nlink == 0) goto out_gunlock; error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC); if (error) goto out_gunlock; error = gfs2_dir_check(dir, &dentry->d_name, NULL); switch (error) { case -ENOENT: break; case 0: error = -EEXIST; default: goto out_gunlock; } error = -EINVAL; if (!dip->i_inode.i_nlink) goto out_gunlock; error = -EFBIG; if (dip->i_entries == (u32)-1) goto out_gunlock; error = -EPERM; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out_gunlock; error = -EINVAL; if (!ip->i_inode.i_nlink) goto out_gunlock; error = -EMLINK; if (ip->i_inode.i_nlink == (u32)-1) goto out_gunlock; alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name); if (error < 0) goto out_gunlock; error = 0; if (alloc_required) { struct gfs2_qadata *qa = gfs2_qadata_get(dip); if (!qa) { error = -ENOMEM; goto out_gunlock; } error = gfs2_quota_lock_check(dip); if (error) goto out_alloc; error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres); if (error) goto out_gunlock_q; error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + gfs2_rg_blocks(dip) + 2 * RES_DINODE + RES_STATFS + RES_QUOTA, 0); if (error) goto out_ipres; } else { error = gfs2_trans_begin(sdp, 2 * RES_DINODE + RES_LEAF, 0); if (error) goto out_ipres; } error = gfs2_dir_add(dir, &dentry->d_name, ip, IF2DT(inode->i_mode)); if (error) goto out_end_trans; error = gfs2_change_nlink(ip, +1); out_end_trans: gfs2_trans_end(sdp); out_ipres: if (alloc_required) gfs2_inplace_release(dip); out_gunlock_q: if (alloc_required) gfs2_quota_unlock(dip); out_alloc: if (alloc_required) gfs2_qadata_put(dip); out_gunlock: gfs2_glock_dq(ghs + 1); out_child: gfs2_glock_dq(ghs); out_parent: gfs2_holder_uninit(ghs); gfs2_holder_uninit(ghs + 1); if (!error) { atomic_inc(&inode->i_count); d_instantiate(dentry, inode); mark_inode_dirty(inode); } return error; }
static int gfs2_unlink(struct inode *dir, struct dentry *dentry) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct gfs2_inode *ip = GFS2_I(dentry->d_inode); struct gfs2_holder ghs[3]; struct gfs2_rgrpd *rgd; int error; error = gfs2_rindex_update(sdp); if (error) return error; error = -EROFS; gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); if (!rgd) goto out_inodes; gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); error = gfs2_glock_nq(ghs); /* parent */ if (error) goto out_parent; error = gfs2_glock_nq(ghs + 1); /* child */ if (error) goto out_child; error = -ENOENT; if (ip->i_inode.i_nlink == 0) goto out_rgrp; error = gfs2_glock_nq(ghs + 2); /* rgrp */ if (error) goto out_rgrp; error = gfs2_unlink_ok(dip, &dentry->d_name, ip); if (error) goto out_gunlock; error = gfs2_trans_begin(sdp, 2*RES_DINODE + RES_LEAF + RES_RG_BIT, 0); if (error) goto out_gunlock; error = gfs2_dir_del(dip, &dentry->d_name); if (error) goto out_end_trans; error = gfs2_change_nlink(ip, -1); out_end_trans: gfs2_trans_end(sdp); out_gunlock: gfs2_glock_dq(ghs + 2); out_rgrp: gfs2_glock_dq(ghs + 1); out_child: gfs2_glock_dq(ghs); out_parent: gfs2_holder_uninit(ghs + 2); out_inodes: gfs2_holder_uninit(ghs + 1); gfs2_holder_uninit(ghs); return error; }
static int gfs2_rename(struct inode *odir, struct dentry *odentry, struct inode *ndir, struct dentry *ndentry) { struct gfs2_inode *odip = GFS2_I(odir); struct gfs2_inode *ndip = GFS2_I(ndir); struct gfs2_inode *ip = GFS2_I(odentry->d_inode); struct gfs2_inode *nip = NULL; struct gfs2_sbd *sdp = GFS2_SB(odir); struct gfs2_holder ghs[5], r_gh; struct gfs2_rgrpd *nrgd; unsigned int num_gh; int dir_rename = 0; int alloc_required; unsigned int x; int error; if (ndentry->d_inode) { nip = GFS2_I(ndentry->d_inode); if (ip == nip) return 0; } /* Make sure we aren't trying to move a dirctory into it's subdir */ if (S_ISDIR(ip->i_inode.i_mode) && odip != ndip) { dir_rename = 1; error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, 0, &r_gh); if (error) goto out; error = gfs2_ok_to_move(ip, ndip); if (error) goto out_gunlock_r; } num_gh = 1; gfs2_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); if (odip != ndip) { gfs2_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); num_gh++; } gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); num_gh++; if (nip) { gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); num_gh++; /* grab the resource lock for unlink flag twiddling * this is the case of the target file already existing * so we unlink before doing the rename */ nrgd = gfs2_blk2rgrpd(sdp, nip->i_no_addr); if (nrgd) gfs2_holder_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh++); } error = gfs2_glock_nq_m(num_gh, ghs); if (error) goto out_uninit; /* Check out the old directory */ error = gfs2_unlink_ok(odip, &odentry->d_name, ip); if (error) goto out_gunlock; /* Check out the new directory */ if (nip) { error = gfs2_unlink_ok(ndip, &ndentry->d_name, nip); if (error) goto out_gunlock; if (S_ISDIR(nip->i_inode.i_mode)) { if (nip->i_di.di_entries < 2) { if (gfs2_consist_inode(nip)) gfs2_dinode_print(nip); error = -EIO; goto out_gunlock; } if (nip->i_di.di_entries > 2) { error = -ENOTEMPTY; goto out_gunlock; } } } else { error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC); if (error) goto out_gunlock; error = gfs2_dir_check(ndir, &ndentry->d_name, NULL); switch (error) { case -ENOENT: error = 0; break; case 0: error = -EEXIST; default: goto out_gunlock; }; if (odip != ndip) { if (!ndip->i_inode.i_nlink) { error = -EINVAL; goto out_gunlock; } if (ndip->i_di.di_entries == (u32)-1) { error = -EFBIG; goto out_gunlock; } if (S_ISDIR(ip->i_inode.i_mode) && ndip->i_inode.i_nlink == (u32)-1) { error = -EMLINK; goto out_gunlock; } } } /* Check out the dir to be renamed */ if (dir_rename) { error = gfs2_permission(odentry->d_inode, MAY_WRITE); if (error) goto out_gunlock; } alloc_required = error = gfs2_diradd_alloc_required(ndir, &ndentry->d_name); if (error < 0) goto out_gunlock; error = 0; if (alloc_required) { struct gfs2_alloc *al = gfs2_alloc_get(ndip); if (!al) { error = -ENOMEM; goto out_gunlock; } error = gfs2_quota_lock_check(ndip); if (error) goto out_alloc; al->al_requested = sdp->sd_max_dirres; error = gfs2_inplace_reserve(ndip); if (error) goto out_gunlock_q; error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + al->al_rgd->rd_length + 4 * RES_DINODE + 4 * RES_LEAF + RES_STATFS + RES_QUOTA + 4, 0); if (error) goto out_ipreserv; } else { error = gfs2_trans_begin(sdp, 4 * RES_DINODE + 5 * RES_LEAF + 4, 0); if (error) goto out_gunlock; } /* Remove the target file, if it exists */ if (nip) { if (S_ISDIR(nip->i_inode.i_mode)) error = gfs2_rmdiri(ndip, &ndentry->d_name, nip); else { error = gfs2_dir_del(ndip, &ndentry->d_name); if (error) goto out_end_trans; error = gfs2_change_nlink(nip, -1); } if (error) goto out_end_trans; } if (dir_rename) { struct qstr name; gfs2_str2qstr(&name, ".."); error = gfs2_change_nlink(ndip, +1); if (error) goto out_end_trans; error = gfs2_change_nlink(odip, -1); if (error) goto out_end_trans; error = gfs2_dir_mvino(ip, &name, ndip, DT_DIR); if (error) goto out_end_trans; } else { struct buffer_head *dibh; error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out_end_trans; ip->i_inode.i_ctime = CURRENT_TIME; gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); } error = gfs2_dir_del(odip, &odentry->d_name); if (error) goto out_end_trans; error = gfs2_dir_add(ndir, &ndentry->d_name, ip, IF2DT(ip->i_inode.i_mode)); if (error) goto out_end_trans; out_end_trans: gfs2_trans_end(sdp); out_ipreserv: if (alloc_required) gfs2_inplace_release(ndip); out_gunlock_q: if (alloc_required) gfs2_quota_unlock(ndip); out_alloc: if (alloc_required) gfs2_alloc_put(ndip); out_gunlock: gfs2_glock_dq_m(num_gh, ghs); out_uninit: for (x = 0; x < num_gh; x++) gfs2_holder_uninit(ghs + x); out_gunlock_r: if (dir_rename) gfs2_glock_dq_uninit(&r_gh); out: return error; }
static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_path.dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_alloc_parms ap = { .aflags = 0, }; unsigned long last_index; u64 pos = page->index << PAGE_CACHE_SHIFT; unsigned int data_blocks, ind_blocks, rblocks; int alloc_required = 0; struct gfs2_holder gh; loff_t size; int ret; sb_start_pagefault(inode->i_sb); /* Update file times before taking page lock */ file_update_time(vma->vm_file); ret = get_write_access(inode); if (ret) goto out; ret = gfs2_rs_alloc(ip); if (ret) goto out_write_access; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq(&gh); if (ret) goto out_uninit; set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); set_bit(GIF_SW_PAGED, &ip->i_flags); gfs2_size_hint(inode, pos, PAGE_CACHE_SIZE); ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required); if (ret) goto out_unlock; if (!alloc_required) { lock_page(page); if (!PageUptodate(page) || page->mapping != inode->i_mapping) { ret = -EAGAIN; unlock_page(page); } goto out_unlock; } ret = gfs2_rindex_update(sdp); if (ret) goto out_unlock; ret = gfs2_quota_lock_check(ip); if (ret) goto out_unlock; gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); ap.target = data_blocks + ind_blocks; ret = gfs2_inplace_reserve(ip, &ap); if (ret) goto out_quota_unlock; rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) { rblocks += RES_STATFS + RES_QUOTA; rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks); } ret = gfs2_trans_begin(sdp, rblocks, 0); if (ret) goto out_trans_fail; lock_page(page); ret = -EINVAL; size = i_size_read(inode); last_index = (size - 1) >> PAGE_CACHE_SHIFT; /* Check page index against inode size */ if (size == 0 || (page->index > last_index)) goto out_trans_end; ret = -EAGAIN; /* If truncated, we must retry the operation, we may have raced * with the glock demotion code. */ if (!PageUptodate(page) || page->mapping != inode->i_mapping) goto out_trans_end; /* Unstuff, if required, and allocate backing blocks for page */ ret = 0; if (gfs2_is_stuffed(ip)) ret = gfs2_unstuff_dinode(ip, page); if (ret == 0) ret = gfs2_allocate_page_backing(page); out_trans_end: if (ret) unlock_page(page); gfs2_trans_end(sdp); out_trans_fail: gfs2_inplace_release(ip); out_quota_unlock: gfs2_quota_unlock(ip); out_unlock: gfs2_glock_dq(&gh); out_uninit: gfs2_holder_uninit(&gh); if (ret == 0) { set_page_dirty(page); wait_for_stable_page(page); } out_write_access: put_write_access(inode); out: sb_end_pagefault(inode->i_sb); return block_page_mkwrite_return(ret); } static const struct vm_operations_struct gfs2_vm_ops = { .fault = filemap_fault, .page_mkwrite = gfs2_page_mkwrite, }; /** * gfs2_mmap - * @file: The file to map * @vma: The VMA which described the mapping * * There is no need to get a lock here unless we should be updating * atime. We ignore any locking errors since the only consequence is * a missed atime update (which will just be deferred until later). * * Returns: 0 */ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) { struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); if (!(file->f_flags & O_NOATIME) && !IS_NOATIME(&ip->i_inode)) { struct gfs2_holder i_gh; int error; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); if (error) return error; /* grab lock to update inode */ gfs2_glock_dq_uninit(&i_gh); file_accessed(file); } vma->vm_ops = &gfs2_vm_ops; vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } /** * gfs2_open - open a file * @inode: the inode to open * @file: the struct file for this opening * * Returns: errno */ static int gfs2_open(struct inode *inode, struct file *file) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder i_gh; struct gfs2_file *fp; int error; fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL); if (!fp) return -ENOMEM; mutex_init(&fp->f_fl_mutex); gfs2_assert_warn(GFS2_SB(inode), !file->private_data); file->private_data = fp; if (S_ISREG(ip->i_inode.i_mode)) { error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); if (error) goto fail; if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) { error = -EOVERFLOW; goto fail_gunlock; } gfs2_glock_dq_uninit(&i_gh); } return 0; fail_gunlock: gfs2_glock_dq_uninit(&i_gh); fail: file->private_data = NULL; kfree(fp); return error; } /** * gfs2_release - called to close a struct file * @inode: the inode the struct file belongs to * @file: the struct file being closed * * Returns: errno */ static int gfs2_release(struct inode *inode, struct file *file) { struct gfs2_inode *ip = GFS2_I(inode); kfree(file->private_data); file->private_data = NULL; if (!(file->f_mode & FMODE_WRITE)) return 0; gfs2_rs_delete(ip); return 0; } /** * gfs2_fsync - sync the dirty data for a file (across the cluster) * @file: the file that points to the dentry * @start: the start position in the file to sync * @end: the end position in the file to sync * @datasync: set if we can ignore timestamp changes * * The VFS will flush data for us. We only need to worry * about metadata here. * * Returns: errno */ static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; int sync_state = inode->i_state & I_DIRTY; struct gfs2_inode *ip = GFS2_I(inode); int ret; if (!gfs2_is_jdata(ip)) sync_state &= ~I_DIRTY_PAGES; if (datasync) sync_state &= ~I_DIRTY_SYNC; if (sync_state) { ret = sync_inode_metadata(inode, 1); if (ret) return ret; if (gfs2_is_jdata(ip)) filemap_write_and_wait(inode->i_mapping); gfs2_ail_flush(ip->i_gl, 1); } return 0; } /** * gfs2_file_aio_write - Perform a write to a file * @iocb: The io context * @iov: The data to write * @nr_segs: Number of @iov segments * @pos: The file position * * We have to do a lock/unlock here to refresh the inode size for * O_APPEND writes, otherwise we can land up writing at the wrong * offset. There is still a race, but provided the app is using its * own file locking, this will make O_APPEND work as expected. * */ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; size_t writesize = iov_length(iov, nr_segs); struct dentry *dentry = file->f_dentry; struct gfs2_inode *ip = GFS2_I(dentry->d_inode); struct gfs2_sbd *sdp; int ret; sdp = GFS2_SB(file->f_mapping->host); ret = gfs2_rs_alloc(ip); if (ret) return ret; gfs2_size_hint(file->f_dentry->d_inode, pos, writesize); if (file->f_flags & O_APPEND) { struct gfs2_holder gh; ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); if (ret) return ret; gfs2_glock_dq_uninit(&gh); } return generic_file_aio_write(iocb, iov, nr_segs, pos); } static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { int error; struct inode *inode = out->f_mapping->host; struct gfs2_inode *ip = GFS2_I(inode); error = gfs2_rs_alloc(ip); if (error) return (ssize_t)error; gfs2_size_hint(inode, *ppos, len); return generic_file_splice_write(pipe, out, ppos, len, flags); }
static int gfs2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); unsigned int data_blocks = 0, ind_blocks = 0, rblocks; int alloc_required; int error = 0; struct gfs2_qadata *qa = NULL; pgoff_t index = pos >> PAGE_CACHE_SHIFT; unsigned from = pos & (PAGE_CACHE_SIZE - 1); struct page *page; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); error = gfs2_glock_nq(&ip->i_gh); if (unlikely(error)) goto out_uninit; if (&ip->i_inode == sdp->sd_rindex) { error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, &m_ip->i_gh); if (unlikely(error)) { gfs2_glock_dq(&ip->i_gh); goto out_uninit; } } alloc_required = gfs2_write_alloc_required(ip, pos, len); if (alloc_required || gfs2_is_jdata(ip)) gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); if (alloc_required) { qa = gfs2_qadata_get(ip); if (!qa) { error = -ENOMEM; goto out_unlock; } error = gfs2_quota_lock_check(ip); if (error) goto out_alloc_put; error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks); if (error) goto out_qunlock; } rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) rblocks += RES_STATFS + RES_QUOTA; if (&ip->i_inode == sdp->sd_rindex) rblocks += 2 * RES_STATFS; if (alloc_required) rblocks += gfs2_rg_blocks(ip); error = gfs2_trans_begin(sdp, rblocks, PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; error = -ENOMEM; flags |= AOP_FLAG_NOFS; page = grab_cache_page_write_begin(mapping, index, flags); *pagep = page; if (unlikely(!page)) goto out_endtrans; if (gfs2_is_stuffed(ip)) { error = 0; if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { error = gfs2_unstuff_dinode(ip, page); if (error == 0) goto prepare_write; } else if (!PageUptodate(page)) { error = stuffed_readpage(ip, page); } goto out; } prepare_write: error = __block_write_begin(page, from, len, gfs2_block_map); out: if (error == 0) return 0; unlock_page(page); page_cache_release(page); gfs2_trans_end(sdp); if (pos + len > ip->i_inode.i_size) gfs2_trim_blocks(&ip->i_inode); goto out_trans_fail; out_endtrans: gfs2_trans_end(sdp); out_trans_fail: if (alloc_required) { gfs2_inplace_release(ip); out_qunlock: gfs2_quota_unlock(ip); out_alloc_put: gfs2_qadata_put(ip); } out_unlock: if (&ip->i_inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); out_uninit: gfs2_holder_uninit(&ip->i_gh); return error; }
static int gfs2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); unsigned int data_blocks = 0, ind_blocks = 0, rblocks; unsigned requested = 0; int alloc_required; int error = 0; pgoff_t index = pos >> PAGE_CACHE_SHIFT; unsigned from = pos & (PAGE_CACHE_SIZE - 1); struct page *page; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); error = gfs2_glock_nq(&ip->i_gh); if (unlikely(error)) goto out_uninit; if (&ip->i_inode == sdp->sd_rindex) { error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, &m_ip->i_gh); if (unlikely(error)) { gfs2_glock_dq(&ip->i_gh); goto out_uninit; } } alloc_required = gfs2_write_alloc_required(ip, pos, len); if (alloc_required || gfs2_is_jdata(ip)) gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); if (alloc_required) { struct gfs2_alloc_parms ap = { .aflags = 0, }; requested = data_blocks + ind_blocks; ap.target = requested; error = gfs2_quota_lock_check(ip, &ap); if (error) goto out_unlock; error = gfs2_inplace_reserve(ip, &ap); if (error) goto out_qunlock; } rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) rblocks += RES_STATFS + RES_QUOTA; if (&ip->i_inode == sdp->sd_rindex) rblocks += 2 * RES_STATFS; if (alloc_required) rblocks += gfs2_rg_blocks(ip, requested); error = gfs2_trans_begin(sdp, rblocks, PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; error = -ENOMEM; flags |= AOP_FLAG_NOFS; page = grab_cache_page_write_begin(mapping, index, flags); *pagep = page; if (unlikely(!page)) goto out_endtrans; if (gfs2_is_stuffed(ip)) { error = 0; if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { error = gfs2_unstuff_dinode(ip, page); if (error == 0) goto prepare_write; } else if (!PageUptodate(page)) { error = stuffed_readpage(ip, page); } goto out; } prepare_write: error = __block_write_begin(page, from, len, gfs2_block_map); out: if (error == 0) return 0; unlock_page(page); page_cache_release(page); gfs2_trans_end(sdp); if (pos + len > ip->i_inode.i_size) gfs2_trim_blocks(&ip->i_inode); goto out_trans_fail; out_endtrans: gfs2_trans_end(sdp); out_trans_fail: if (alloc_required) { gfs2_inplace_release(ip); out_qunlock: gfs2_quota_unlock(ip); } out_unlock: if (&ip->i_inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); out_uninit: gfs2_holder_uninit(&ip->i_gh); return error; } /** * adjust_fs_space - Adjusts the free space available due to gfs2_grow * @inode: the rindex inode */ static void adjust_fs_space(struct inode *inode) { struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct buffer_head *m_bh, *l_bh; u64 fs_total, new_free; /* Total up the file system space, according to the latest rindex. */ fs_total = gfs2_ri_total(sdp); if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) return; spin_lock(&sdp->sd_statfs_spin); gfs2_statfs_change_in(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); if (fs_total > (m_sc->sc_total + l_sc->sc_total)) new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); else new_free = 0; spin_unlock(&sdp->sd_statfs_spin); fs_warn(sdp, "File system extended by %llu blocks.\n", (unsigned long long)new_free); gfs2_statfs_change(sdp, new_free, new_free, 0); if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) goto out; update_statfs(sdp, m_bh, l_bh); brelse(l_bh); out: brelse(m_bh); } /** * gfs2_stuffed_write_end - Write end for stuffed files * @inode: The inode * @dibh: The buffer_head containing the on-disk inode * @pos: The file position * @len: The length of the write * @copied: How much was actually copied by the VFS * @page: The page * * This copies the data from the page into the inode block after * the inode data structure itself. * * Returns: errno */ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, loff_t pos, unsigned len, unsigned copied, struct page *page) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); u64 to = pos + copied; void *kaddr; unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); kaddr = kmap_atomic(page); memcpy(buf + pos, kaddr + pos, copied); memset(kaddr + pos + copied, 0, len - copied); flush_dcache_page(page); kunmap_atomic(kaddr); if (!PageUptodate(page)) SetPageUptodate(page); unlock_page(page); page_cache_release(page); if (copied) { if (inode->i_size < to) i_size_write(inode, to); mark_inode_dirty(inode); } if (inode == sdp->sd_rindex) { adjust_fs_space(inode); sdp->sd_rindex_uptodate = 0; } brelse(dibh); gfs2_trans_end(sdp); if (inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); gfs2_holder_uninit(&ip->i_gh); return copied; } /** * gfs2_write_end * @file: The file to write to * @mapping: The address space to write to * @pos: The file position * @len: The length of the data * @copied: * @page: The page that has been written * @fsdata: The fsdata (unused in GFS2) * * The main write_end function for GFS2. We have a separate one for * stuffed files as they are slightly different, otherwise we just * put our locking around the VFS provided functions. * * Returns: errno */ static int gfs2_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct buffer_head *dibh; unsigned int from = pos & (PAGE_CACHE_SIZE - 1); unsigned int to = from + len; int ret; struct gfs2_trans *tr = current->journal_info; BUG_ON(!tr); BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); ret = gfs2_meta_inode_buffer(ip, &dibh); if (unlikely(ret)) { unlock_page(page); page_cache_release(page); goto failed; } if (gfs2_is_stuffed(ip)) return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); if (!gfs2_is_writeback(ip)) gfs2_page_add_databufs(ip, page, from, to); ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); if (tr->tr_num_buf_new) __mark_inode_dirty(inode, I_DIRTY_DATASYNC); else gfs2_trans_add_meta(ip->i_gl, dibh); if (inode == sdp->sd_rindex) { adjust_fs_space(inode); sdp->sd_rindex_uptodate = 0; } brelse(dibh); failed: gfs2_trans_end(sdp); gfs2_inplace_release(ip); if (ip->i_res->rs_qa_qd_num) gfs2_quota_unlock(ip); if (inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); gfs2_holder_uninit(&ip->i_gh); return ret; } /** * gfs2_set_page_dirty - Page dirtying function * @page: The page to dirty * * Returns: 1 if it dirtyed the page, or 0 otherwise */ static int gfs2_set_page_dirty(struct page *page) { SetPageChecked(page); return __set_page_dirty_buffers(page); }
static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct address_space *mapping = inode->i_mapping; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int rv; /* * Deferred lock, even if its a write, since we do no allocation * on this path. All we need change is atime, and this lock mode * ensures that other nodes have flushed their buffered read caches * (i.e. their page cache entries for this inode). We do not, * unfortunately have the option of only flushing a range like * the VFS does. */ gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); rv = gfs2_glock_nq(&gh); if (rv) return rv; rv = gfs2_ok_for_dio(ip, rw, offset); if (rv != 1) goto out; /* dio not valid, fall back to buffered i/o */ /* * Now since we are holding a deferred (CW) lock at this point, you * might be wondering why this is ever needed. There is a case however * where we've granted a deferred local lock against a cached exclusive * glock. That is ok provided all granted local locks are deferred, but * it also means that it is possible to encounter pages which are * cached and possibly also mapped. So here we check for that and sort * them out ahead of the dio. The glock state machine will take care of * everything else. * * If in fact the cached glock state (gl->gl_state) is deferred (CW) in * the first place, mapping->nr_pages will always be zero. */ if (mapping->nrpages) { loff_t lstart = offset & (PAGE_CACHE_SIZE - 1); loff_t len = iov_length(iov, nr_segs); loff_t end = PAGE_ALIGN(offset + len) - 1; rv = 0; if (len == 0) goto out; if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len); rv = filemap_write_and_wait_range(mapping, lstart, end); if (rv) goto out; if (rw == WRITE) truncate_inode_pages_range(mapping, lstart, end); } rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, gfs2_get_block_direct, NULL, NULL, 0); out: gfs2_glock_dq(&gh); gfs2_holder_uninit(&gh); return rv; }
static int gfs2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); unsigned int data_blocks, ind_blocks, rblocks; int alloc_required; int error = 0; struct gfs2_alloc *al; pgoff_t index = pos >> PAGE_CACHE_SHIFT; unsigned from = pos & (PAGE_CACHE_SIZE - 1); unsigned to = from + len; struct page *page; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh); error = gfs2_glock_nq_atime(&ip->i_gh); if (unlikely(error)) goto out_uninit; gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); error = gfs2_write_alloc_required(ip, pos, len, &alloc_required); if (error) goto out_unlock; if (alloc_required) { al = gfs2_alloc_get(ip); if (!al) { error = -ENOMEM; goto out_unlock; } error = gfs2_quota_lock_check(ip); if (error) goto out_alloc_put; al->al_requested = data_blocks + ind_blocks; error = gfs2_inplace_reserve(ip); if (error) goto out_qunlock; } rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) rblocks += RES_STATFS + RES_QUOTA; error = gfs2_trans_begin(sdp, rblocks, PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; error = -ENOMEM; page = grab_cache_page_write_begin(mapping, index, flags); *pagep = page; if (unlikely(!page)) goto out_endtrans; if (gfs2_is_stuffed(ip)) { error = 0; if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { error = gfs2_unstuff_dinode(ip, page); if (error == 0) goto prepare_write; } else if (!PageUptodate(page)) { error = stuffed_readpage(ip, page); } goto out; } prepare_write: error = block_prepare_write(page, from, to, gfs2_block_map); out: if (error == 0) return 0; page_cache_release(page); if (pos + len > ip->i_inode.i_size) vmtruncate(&ip->i_inode, ip->i_inode.i_size); out_endtrans: gfs2_trans_end(sdp); out_trans_fail: if (alloc_required) { gfs2_inplace_release(ip); out_qunlock: gfs2_quota_unlock(ip); out_alloc_put: gfs2_alloc_put(ip); } out_unlock: gfs2_glock_dq(&ip->i_gh); out_uninit: gfs2_holder_uninit(&ip->i_gh); return error; }
static int link_dinode(struct gfs2_inode *dip, const struct qstr *name, struct gfs2_inode *ip, struct gfs2_diradd *da) { struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_alloc_parms ap = { .target = da->nr_blocks, }; int error; if (da->nr_blocks) { error = gfs2_quota_lock_check(dip); if (error) goto fail_quota_locks; error = gfs2_inplace_reserve(dip, &ap); if (error) goto fail_quota_locks; error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(dip, da, 2), 0); if (error) goto fail_ipreserv; } else { error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0); if (error) goto fail_quota_locks; } error = gfs2_dir_add(&dip->i_inode, name, ip, da); if (error) goto fail_end_trans; fail_end_trans: gfs2_trans_end(sdp); fail_ipreserv: gfs2_inplace_release(dip); fail_quota_locks: gfs2_quota_unlock(dip); return error; } static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { const struct xattr *xattr; int err = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { err = __gfs2_xattr_set(inode, xattr->name, xattr->value, xattr->value_len, 0, GFS2_EATYPE_SECURITY); if (err < 0) break; } return err; } static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip, const struct qstr *qstr) { return security_inode_init_security(&ip->i_inode, &dip->i_inode, qstr, &gfs2_initxattrs, NULL); } /** * gfs2_create_inode - Create a new inode * @dir: The parent directory * @dentry: The new dentry * @file: If non-NULL, the file which is being opened * @mode: The permissions on the new inode * @dev: For device nodes, this is the device number * @symname: For symlinks, this is the link destination * @size: The initial size of the inode (ignored for directories) * * Returns: 0 on success, or error code */ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, struct file *file, umode_t mode, dev_t dev, const char *symname, unsigned int size, int excl, int *opened) { const struct qstr *name = &dentry->d_name; struct posix_acl *default_acl, *acl; struct gfs2_holder ghs[2]; struct inode *inode = NULL; struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_glock *io_gl; struct dentry *d; int error; u32 aflags = 0; struct gfs2_diradd da = { .bh = NULL, }; if (!name->len || name->len > GFS2_FNAMESIZE) return -ENAMETOOLONG; error = gfs2_rs_alloc(dip); if (error) return error; error = gfs2_rindex_update(sdp); if (error) return error; error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); if (error) goto fail; error = create_ok(dip, name, mode); if (error) goto fail_gunlock; inode = gfs2_dir_search(dir, &dentry->d_name, !S_ISREG(mode) || excl); error = PTR_ERR(inode); if (!IS_ERR(inode)) { d = d_splice_alias(inode, dentry); error = PTR_ERR(d); if (IS_ERR(d)) { inode = ERR_CAST(d); goto fail_gunlock; } error = 0; if (file) { if (S_ISREG(inode->i_mode)) { WARN_ON(d != NULL); error = finish_open(file, dentry, gfs2_open_common, opened); } else { error = finish_no_open(file, d); } } else { dput(d); } gfs2_glock_dq_uninit(ghs); return error; } else if (error != -ENOENT) { goto fail_gunlock; } error = gfs2_diradd_alloc_required(dir, name, &da); if (error < 0) goto fail_gunlock; inode = new_inode(sdp->sd_vfs); error = -ENOMEM; if (!inode) goto fail_gunlock; error = posix_acl_create(dir, &mode, &default_acl, &acl); if (error) goto fail_free_vfs_inode; ip = GFS2_I(inode); error = gfs2_rs_alloc(ip); if (error) goto fail_free_acls; inode->i_mode = mode; set_nlink(inode, S_ISDIR(mode) ? 2 : 1); inode->i_rdev = dev; inode->i_size = size; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; gfs2_set_inode_blocks(inode, 1); munge_mode_uid_gid(dip, inode); ip->i_goal = dip->i_goal; ip->i_diskflags = 0; ip->i_eattr = 0; ip->i_height = 0; ip->i_depth = 0; ip->i_entries = 0; switch(mode & S_IFMT) { case S_IFREG: if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) || gfs2_tune_get(sdp, gt_new_files_jdata)) ip->i_diskflags |= GFS2_DIF_JDATA; gfs2_set_aops(inode); break; case S_IFDIR: ip->i_diskflags |= (dip->i_diskflags & GFS2_DIF_INHERIT_JDATA); ip->i_diskflags |= GFS2_DIF_JDATA; ip->i_entries = 2; break; } gfs2_set_inode_flags(inode); if ((GFS2_I(sdp->sd_root_dir->d_inode) == dip) || (dip->i_diskflags & GFS2_DIF_TOPDIR)) aflags |= GFS2_AF_ORLOV; error = alloc_dinode(ip, aflags); if (error) goto fail_free_inode; error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); if (error) goto fail_free_inode; ip->i_gl->gl_object = ip; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); if (error) goto fail_free_inode; error = gfs2_trans_begin(sdp, RES_DINODE, 0); if (error) goto fail_gunlock2; init_dinode(dip, ip, symname); gfs2_trans_end(sdp); error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) goto fail_gunlock2; error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); if (error) goto fail_gunlock2; ip->i_iopen_gh.gh_gl->gl_object = ip; gfs2_glock_put(io_gl); gfs2_set_iop(inode); insert_inode_hash(inode); if (default_acl) { error = gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); posix_acl_release(default_acl); } if (acl) { if (!error) error = gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS); posix_acl_release(acl); } if (error) goto fail_gunlock3; error = gfs2_security_init(dip, ip, name); if (error) goto fail_gunlock3; error = link_dinode(dip, name, ip, &da); if (error) goto fail_gunlock3; mark_inode_dirty(inode); d_instantiate(dentry, inode); if (file) { *opened |= FILE_CREATED; error = finish_open(file, dentry, gfs2_open_common, opened); } gfs2_glock_dq_uninit(ghs); gfs2_glock_dq_uninit(ghs + 1); return error; fail_gunlock3: gfs2_glock_dq_uninit(ghs + 1); if (ip->i_gl) gfs2_glock_put(ip->i_gl); goto fail_gunlock; fail_gunlock2: gfs2_glock_dq_uninit(ghs + 1); fail_free_inode: if (ip->i_gl) gfs2_glock_put(ip->i_gl); gfs2_rs_delete(ip, NULL); fail_free_acls: if (default_acl) posix_acl_release(default_acl); if (acl) posix_acl_release(acl); fail_free_vfs_inode: free_inode_nonrcu(inode); inode = NULL; fail_gunlock: gfs2_dir_no_add(&da); gfs2_glock_dq_uninit(ghs); if (inode && !IS_ERR(inode)) { clear_nlink(inode); mark_inode_dirty(inode); set_bit(GIF_ALLOC_FAILED, &GFS2_I(inode)->i_flags); iput(inode); } fail: return error; } /** * gfs2_create - Create a file * @dir: The directory in which to create the file * @dentry: The dentry of the new file * @mode: The mode of the new file * * Returns: errno */ static int gfs2_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl, NULL); } /** * __gfs2_lookup - Look up a filename in a directory and return its inode * @dir: The directory inode * @dentry: The dentry of the new inode * @file: File to be opened * @opened: atomic_open flags * * * Returns: errno */ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry, struct file *file, int *opened) { struct inode *inode; struct dentry *d; struct gfs2_holder gh; struct gfs2_glock *gl; int error; inode = gfs2_lookupi(dir, &dentry->d_name, 0); if (!inode) return NULL; if (IS_ERR(inode)) return ERR_CAST(inode); gl = GFS2_I(inode)->i_gl; error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); if (error) { iput(inode); return ERR_PTR(error); } d = d_splice_alias(inode, dentry); if (IS_ERR(d)) { gfs2_glock_dq_uninit(&gh); return d; } if (file && S_ISREG(inode->i_mode)) error = finish_open(file, dentry, gfs2_open_common, opened); gfs2_glock_dq_uninit(&gh); if (error) { dput(d); return ERR_PTR(error); } return d; } static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, unsigned flags) { return __gfs2_lookup(dir, dentry, NULL, NULL); } /** * gfs2_link - Link to a file * @old_dentry: The inode to link * @dir: Add link to this directory * @dentry: The name of the link * * Link the inode in "old_dentry" into the directory "dir" with the * name in "dentry". * * Returns: errno */ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct inode *inode = old_dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder ghs[2]; struct buffer_head *dibh; struct gfs2_diradd da = { .bh = NULL, }; int error; if (S_ISDIR(inode->i_mode)) return -EPERM; error = gfs2_rs_alloc(dip); if (error) return error; gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); error = gfs2_glock_nq(ghs); /* parent */ if (error) goto out_parent; error = gfs2_glock_nq(ghs + 1); /* child */ if (error) goto out_child; error = -ENOENT; if (inode->i_nlink == 0) goto out_gunlock; error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC); if (error) goto out_gunlock; error = gfs2_dir_check(dir, &dentry->d_name, NULL); switch (error) { case -ENOENT: break; case 0: error = -EEXIST; default: goto out_gunlock; } error = -EINVAL; if (!dip->i_inode.i_nlink) goto out_gunlock; error = -EFBIG; if (dip->i_entries == (u32)-1) goto out_gunlock; error = -EPERM; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out_gunlock; error = -EINVAL; if (!ip->i_inode.i_nlink) goto out_gunlock; error = -EMLINK; if (ip->i_inode.i_nlink == (u32)-1) goto out_gunlock; error = gfs2_diradd_alloc_required(dir, &dentry->d_name, &da); if (error < 0) goto out_gunlock; if (da.nr_blocks) { struct gfs2_alloc_parms ap = { .target = da.nr_blocks, }; error = gfs2_quota_lock_check(dip); if (error) goto out_gunlock; error = gfs2_inplace_reserve(dip, &ap); if (error) goto out_gunlock_q; error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(dip, &da, 2), 0); if (error) goto out_ipres; } else { error = gfs2_trans_begin(sdp, 2 * RES_DINODE + RES_LEAF, 0); if (error) goto out_ipres; } error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out_end_trans; error = gfs2_dir_add(dir, &dentry->d_name, ip, &da); if (error) goto out_brelse; gfs2_trans_add_meta(ip->i_gl, dibh); inc_nlink(&ip->i_inode); ip->i_inode.i_ctime = CURRENT_TIME; ihold(inode); d_instantiate(dentry, inode); mark_inode_dirty(inode); out_brelse: brelse(dibh); out_end_trans: gfs2_trans_end(sdp); out_ipres: if (da.nr_blocks) gfs2_inplace_release(dip); out_gunlock_q: if (da.nr_blocks) gfs2_quota_unlock(dip); out_gunlock: gfs2_dir_no_add(&da); gfs2_glock_dq(ghs + 1); out_child: gfs2_glock_dq(ghs); out_parent: gfs2_holder_uninit(ghs); gfs2_holder_uninit(ghs + 1); return error; } /* * gfs2_unlink_ok - check to see that a inode is still in a directory * @dip: the directory * @name: the name of the file * @ip: the inode * * Assumes that the lock on (at least) @dip is held. * * Returns: 0 if the parent/child relationship is correct, errno if it isn't */ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, const struct gfs2_inode *ip) { int error; if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode)) return -EPERM; if ((dip->i_inode.i_mode & S_ISVTX) && !uid_eq(dip->i_inode.i_uid, current_fsuid()) && !uid_eq(ip->i_inode.i_uid, current_fsuid()) && !capable(CAP_FOWNER)) return -EPERM; if (IS_APPEND(&dip->i_inode)) return -EPERM; error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC); if (error) return error; error = gfs2_dir_check(&dip->i_inode, name, ip); if (error) return error; return 0; }