/** * gfs2_set_flags - set flags on an inode * @inode: The inode * @flags: The flags to set * @mask: Indicates which flags are valid * */ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) { struct inode *inode = filp->f_path.dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct buffer_head *bh; struct gfs2_holder gh; int error; u32 new_flags, flags; error = mnt_want_write_file(filp); if (error) return error; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); if (error) goto out_drop_write; error = -EACCES; if (!inode_owner_or_capable(inode)) goto out; error = 0; flags = ip->i_diskflags; new_flags = (flags & ~mask) | (reqflags & mask); if ((new_flags ^ flags) == 0) goto out; error = -EINVAL; if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET) goto out; error = -EPERM; if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE)) goto out; if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY)) goto out; if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) && !capable(CAP_LINUX_IMMUTABLE)) goto out; if (!IS_IMMUTABLE(inode)) { error = gfs2_permission(inode, MAY_WRITE); if (error) goto out; } if ((flags ^ new_flags) & GFS2_DIF_JDATA) { if (flags & GFS2_DIF_JDATA) gfs2_log_flush(sdp, ip->i_gl); error = filemap_fdatawrite(inode->i_mapping); if (error) goto out; error = filemap_fdatawait(inode->i_mapping); if (error) goto out; } error = gfs2_trans_begin(sdp, RES_DINODE, 0); if (error) goto out; error = gfs2_meta_inode_buffer(ip, &bh); if (error) goto out_trans_end; gfs2_trans_add_bh(ip->i_gl, bh, 1); ip->i_diskflags = new_flags; gfs2_dinode_out(ip, bh->b_data); brelse(bh); gfs2_set_inode_flags(inode); gfs2_set_aops(inode); out_trans_end: gfs2_trans_end(sdp); out: gfs2_glock_dq_uninit(&gh); out_drop_write: mnt_drop_write_file(filp); return error; }
static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_path.dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); unsigned long last_index; u64 pos = page->index << PAGE_CACHE_SHIFT; unsigned int data_blocks, ind_blocks, rblocks; struct gfs2_holder gh; struct gfs2_qadata *qa; loff_t size; int ret; /* Wait if fs is frozen. This is racy so we check again later on * and retry if the fs has been frozen after the page lock has * been acquired */ vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq(&gh); if (ret) goto out; set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); set_bit(GIF_SW_PAGED, &ip->i_flags); if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { lock_page(page); if (!PageUptodate(page) || page->mapping != inode->i_mapping) { ret = -EAGAIN; unlock_page(page); } goto out_unlock; } ret = -ENOMEM; qa = gfs2_qadata_get(ip); if (qa == NULL) goto out_unlock; ret = gfs2_quota_lock_check(ip); if (ret) goto out_alloc_put; gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks); if (ret) goto out_quota_unlock; rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) { rblocks += RES_STATFS + RES_QUOTA; rblocks += gfs2_rg_blocks(ip); } ret = gfs2_trans_begin(sdp, rblocks, 0); if (ret) goto out_trans_fail; lock_page(page); ret = -EINVAL; size = i_size_read(inode); last_index = (size - 1) >> PAGE_CACHE_SHIFT; /* Check page index against inode size */ if (size == 0 || (page->index > last_index)) goto out_trans_end; ret = -EAGAIN; /* If truncated, we must retry the operation, we may have raced * with the glock demotion code. */ if (!PageUptodate(page) || page->mapping != inode->i_mapping) goto out_trans_end; /* Unstuff, if required, and allocate backing blocks for page */ ret = 0; if (gfs2_is_stuffed(ip)) ret = gfs2_unstuff_dinode(ip, page); if (ret == 0) ret = gfs2_allocate_page_backing(page); out_trans_end: if (ret) unlock_page(page); gfs2_trans_end(sdp); out_trans_fail: gfs2_inplace_release(ip); out_quota_unlock: gfs2_quota_unlock(ip); out_alloc_put: gfs2_qadata_put(ip); out_unlock: gfs2_glock_dq(&gh); out: gfs2_holder_uninit(&gh); if (ret == 0) { set_page_dirty(page); /* This check must be post dropping of transaction lock */ if (inode->i_sb->s_frozen == SB_UNFROZEN) { wait_on_page_writeback(page); } else { ret = -EAGAIN; unlock_page(page); } } return block_page_mkwrite_return(ret); }
static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, unsigned int mode, dev_t dev, const char *symname, unsigned int size) { const struct qstr *name = &dentry->d_name; struct gfs2_holder ghs[2]; struct inode *inode = NULL; struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 }; int error; u64 generation; struct buffer_head *bh = NULL; if (!name->len || name->len > GFS2_FNAMESIZE) return -ENAMETOOLONG; error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); if (error) goto fail; error = create_ok(dip, name, mode); if (error) goto fail_gunlock; error = alloc_dinode(dip, &inum.no_addr, &generation); if (error) goto fail_gunlock; inum.no_formal_ino = generation; error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); if (error) goto fail_gunlock; error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, symname, size, &bh); if (error) goto fail_gunlock2; inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr, inum.no_formal_ino, 0); if (IS_ERR(inode)) goto fail_gunlock2; error = gfs2_inode_refresh(GFS2_I(inode)); if (error) goto fail_gunlock2; error = gfs2_acl_create(dip, inode); if (error) goto fail_gunlock2; error = gfs2_security_init(dip, GFS2_I(inode), name); if (error) goto fail_gunlock2; error = link_dinode(dip, name, GFS2_I(inode)); if (error) goto fail_gunlock2; if (bh) brelse(bh); gfs2_trans_end(sdp); if (dip->i_alloc->al_rgd) gfs2_inplace_release(dip); gfs2_quota_unlock(dip); gfs2_alloc_put(dip); gfs2_glock_dq_uninit_m(2, ghs); mark_inode_dirty(inode); d_instantiate(dentry, inode); return 0; fail_gunlock2: gfs2_glock_dq_uninit(ghs + 1); if (inode && !IS_ERR(inode)) iput(inode); fail_gunlock: gfs2_glock_dq_uninit(ghs); fail: if (bh) brelse(bh); return error; } /** * gfs2_create - Create a file * @dir: The directory in which to create the file * @dentry: The dentry of the new file * @mode: The mode of the new file * * Returns: errno */ static int gfs2_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { struct inode *inode; int ret; for (;;) { ret = gfs2_create_inode(dir, dentry, S_IFREG | mode, 0, NULL, 0); if (ret != -EEXIST || (nd && (nd->flags & LOOKUP_EXCL))) return ret; inode = gfs2_lookupi(dir, &dentry->d_name, 0); if (inode) { if (!IS_ERR(inode)) break; return PTR_ERR(inode); } } d_instantiate(dentry, inode); return 0; } /** * gfs2_lookup - Look up a filename in a directory and return its inode * @dir: The directory inode * @dentry: The dentry of the new inode * @nd: passed from Linux VFS, ignored by us * * Called by the VFS layer. Lock dir and call gfs2_lookupi() * * Returns: errno */ static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; inode = gfs2_lookupi(dir, &dentry->d_name, 0); if (inode && IS_ERR(inode)) return ERR_CAST(inode); if (inode) { struct gfs2_glock *gl = GFS2_I(inode)->i_gl; struct gfs2_holder gh; int error; error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); if (error) { iput(inode); return ERR_PTR(error); } gfs2_glock_dq_uninit(&gh); return d_splice_alias(inode, dentry); } d_add(dentry, inode); return NULL; } /** * gfs2_link - Link to a file * @old_dentry: The inode to link * @dir: Add link to this directory * @dentry: The name of the link * * Link the inode in "old_dentry" into the directory "dir" with the * name in "dentry". * * Returns: errno */ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct inode *inode = old_dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder ghs[2]; struct buffer_head *dibh; int alloc_required; int error; if (S_ISDIR(inode->i_mode)) return -EPERM; gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); error = gfs2_glock_nq(ghs); /* parent */ if (error) goto out_parent; error = gfs2_glock_nq(ghs + 1); /* child */ if (error) goto out_child; error = -ENOENT; if (inode->i_nlink == 0) goto out_gunlock; error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC, 0); if (error) goto out_gunlock; error = gfs2_dir_check(dir, &dentry->d_name, NULL); switch (error) { case -ENOENT: break; case 0: error = -EEXIST; default: goto out_gunlock; } error = -EINVAL; if (!dip->i_inode.i_nlink) goto out_gunlock; error = -EFBIG; if (dip->i_entries == (u32)-1) goto out_gunlock; error = -EPERM; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out_gunlock; error = -EINVAL; if (!ip->i_inode.i_nlink) goto out_gunlock; error = -EMLINK; if (ip->i_inode.i_nlink == (u32)-1) goto out_gunlock; alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name); if (error < 0) goto out_gunlock; error = 0; if (alloc_required) { struct gfs2_alloc *al = gfs2_alloc_get(dip); if (!al) { error = -ENOMEM; goto out_gunlock; } error = gfs2_quota_lock_check(dip); if (error) goto out_alloc; al->al_requested = sdp->sd_max_dirres; error = gfs2_inplace_reserve(dip); if (error) goto out_gunlock_q; error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + gfs2_rg_blocks(al) + 2 * RES_DINODE + RES_STATFS + RES_QUOTA, 0); if (error) goto out_ipres; } else { error = gfs2_trans_begin(sdp, 2 * RES_DINODE + RES_LEAF, 0); if (error) goto out_ipres; } error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out_end_trans; error = gfs2_dir_add(dir, &dentry->d_name, ip); if (error) goto out_brelse; gfs2_trans_add_bh(ip->i_gl, dibh, 1); inc_nlink(&ip->i_inode); ip->i_inode.i_ctime = CURRENT_TIME; gfs2_dinode_out(ip, dibh->b_data); mark_inode_dirty(&ip->i_inode); out_brelse: brelse(dibh); out_end_trans: gfs2_trans_end(sdp); out_ipres: if (alloc_required) gfs2_inplace_release(dip); out_gunlock_q: if (alloc_required) gfs2_quota_unlock(dip); out_alloc: if (alloc_required) gfs2_alloc_put(dip); out_gunlock: gfs2_glock_dq(ghs + 1); out_child: gfs2_glock_dq(ghs); out_parent: gfs2_holder_uninit(ghs); gfs2_holder_uninit(ghs + 1); if (!error) { ihold(inode); d_instantiate(dentry, inode); mark_inode_dirty(inode); } return error; } /* * gfs2_unlink_ok - check to see that a inode is still in a directory * @dip: the directory * @name: the name of the file * @ip: the inode * * Assumes that the lock on (at least) @dip is held. * * Returns: 0 if the parent/child relationship is correct, errno if it isn't */ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, const struct gfs2_inode *ip) { int error; if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode)) return -EPERM; if ((dip->i_inode.i_mode & S_ISVTX) && dip->i_inode.i_uid != current_fsuid() && ip->i_inode.i_uid != current_fsuid() && !capable(CAP_FOWNER)) return -EPERM; if (IS_APPEND(&dip->i_inode)) return -EPERM; error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, 0); if (error) return error; error = gfs2_dir_check(&dip->i_inode, name, ip); if (error) return error; return 0; }
static int gfs2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); unsigned int data_blocks = 0, ind_blocks = 0, rblocks; unsigned requested = 0; int alloc_required; int error = 0; pgoff_t index = pos >> PAGE_SHIFT; unsigned from = pos & (PAGE_SIZE - 1); struct page *page; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); error = gfs2_glock_nq(&ip->i_gh); if (unlikely(error)) goto out_uninit; if (&ip->i_inode == sdp->sd_rindex) { error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, &m_ip->i_gh); if (unlikely(error)) { gfs2_glock_dq(&ip->i_gh); goto out_uninit; } } alloc_required = gfs2_write_alloc_required(ip, pos, len); if (alloc_required || gfs2_is_jdata(ip)) gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); if (alloc_required) { struct gfs2_alloc_parms ap = { .aflags = 0, }; requested = data_blocks + ind_blocks; ap.target = requested; error = gfs2_quota_lock_check(ip, &ap); if (error) goto out_unlock; error = gfs2_inplace_reserve(ip, &ap); if (error) goto out_qunlock; } rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) rblocks += RES_STATFS + RES_QUOTA; if (&ip->i_inode == sdp->sd_rindex) rblocks += 2 * RES_STATFS; if (alloc_required) rblocks += gfs2_rg_blocks(ip, requested); error = gfs2_trans_begin(sdp, rblocks, PAGE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; error = -ENOMEM; flags |= AOP_FLAG_NOFS; page = grab_cache_page_write_begin(mapping, index, flags); *pagep = page; if (unlikely(!page)) goto out_endtrans; if (gfs2_is_stuffed(ip)) { error = 0; if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { error = gfs2_unstuff_dinode(ip, page); if (error == 0) goto prepare_write; } else if (!PageUptodate(page)) { error = stuffed_readpage(ip, page); } goto out; } prepare_write: error = __block_write_begin(page, from, len, gfs2_block_map); out: if (error == 0) return 0; unlock_page(page); put_page(page); gfs2_trans_end(sdp); if (pos + len > ip->i_inode.i_size) gfs2_trim_blocks(&ip->i_inode); goto out_trans_fail; out_endtrans: gfs2_trans_end(sdp); out_trans_fail: if (alloc_required) { gfs2_inplace_release(ip); out_qunlock: gfs2_quota_unlock(ip); } out_unlock: if (&ip->i_inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); out_uninit: gfs2_holder_uninit(&ip->i_gh); return error; } /** * adjust_fs_space - Adjusts the free space available due to gfs2_grow * @inode: the rindex inode */ static void adjust_fs_space(struct inode *inode) { struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct buffer_head *m_bh, *l_bh; u64 fs_total, new_free; /* Total up the file system space, according to the latest rindex. */ fs_total = gfs2_ri_total(sdp); if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0) return; spin_lock(&sdp->sd_statfs_spin); gfs2_statfs_change_in(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); if (fs_total > (m_sc->sc_total + l_sc->sc_total)) new_free = fs_total - (m_sc->sc_total + l_sc->sc_total); else new_free = 0; spin_unlock(&sdp->sd_statfs_spin); fs_warn(sdp, "File system extended by %llu blocks.\n", (unsigned long long)new_free); gfs2_statfs_change(sdp, new_free, new_free, 0); if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0) goto out; update_statfs(sdp, m_bh, l_bh); brelse(l_bh); out: brelse(m_bh); } /** * gfs2_stuffed_write_end - Write end for stuffed files * @inode: The inode * @dibh: The buffer_head containing the on-disk inode * @pos: The file position * @len: The length of the write * @copied: How much was actually copied by the VFS * @page: The page * * This copies the data from the page into the inode block after * the inode data structure itself. * * Returns: errno */ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, loff_t pos, unsigned len, unsigned copied, struct page *page) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); u64 to = pos + copied; void *kaddr; unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); kaddr = kmap_atomic(page); memcpy(buf + pos, kaddr + pos, copied); memset(kaddr + pos + copied, 0, len - copied); flush_dcache_page(page); kunmap_atomic(kaddr); if (!PageUptodate(page)) SetPageUptodate(page); unlock_page(page); put_page(page); if (copied) { if (inode->i_size < to) i_size_write(inode, to); mark_inode_dirty(inode); } if (inode == sdp->sd_rindex) { adjust_fs_space(inode); sdp->sd_rindex_uptodate = 0; } brelse(dibh); gfs2_trans_end(sdp); if (inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); gfs2_holder_uninit(&ip->i_gh); return copied; } /** * gfs2_write_end * @file: The file to write to * @mapping: The address space to write to * @pos: The file position * @len: The length of the data * @copied: How much was actually copied by the VFS * @page: The page that has been written * @fsdata: The fsdata (unused in GFS2) * * The main write_end function for GFS2. We have a separate one for * stuffed files as they are slightly different, otherwise we just * put our locking around the VFS provided functions. * * Returns: errno */ static int gfs2_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct buffer_head *dibh; unsigned int from = pos & (PAGE_SIZE - 1); unsigned int to = from + len; int ret; struct gfs2_trans *tr = current->journal_info; BUG_ON(!tr); BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); ret = gfs2_meta_inode_buffer(ip, &dibh); if (unlikely(ret)) { unlock_page(page); put_page(page); goto failed; } if (gfs2_is_stuffed(ip)) return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); if (!gfs2_is_writeback(ip)) gfs2_page_add_databufs(ip, page, from, to); ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); if (tr->tr_num_buf_new) __mark_inode_dirty(inode, I_DIRTY_DATASYNC); else gfs2_trans_add_meta(ip->i_gl, dibh); if (inode == sdp->sd_rindex) { adjust_fs_space(inode); sdp->sd_rindex_uptodate = 0; } brelse(dibh); failed: gfs2_trans_end(sdp); gfs2_inplace_release(ip); if (ip->i_qadata && ip->i_qadata->qa_qd_num) gfs2_quota_unlock(ip); if (inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); gfs2_holder_uninit(&ip->i_gh); return ret; } /** * gfs2_set_page_dirty - Page dirtying function * @page: The page to dirty * * Returns: 1 if it dirtyed the page, or 0 otherwise */ static int gfs2_set_page_dirty(struct page *page) { SetPageChecked(page); return __set_page_dirty_buffers(page); }
static int gfs2_write_jdata_pagevec(struct address_space *mapping, struct writeback_control *wbc, struct pagevec *pvec, int nr_pages, pgoff_t end) { struct inode *inode = mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); loff_t i_size = i_size_read(inode); pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset = i_size & (PAGE_CACHE_SIZE-1); unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); struct backing_dev_info *bdi = mapping->backing_dev_info; int i; int ret; ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); if (ret < 0) return ret; for(i = 0; i < nr_pages; i++) { struct page *page = pvec->pages[i]; lock_page(page); if (unlikely(page->mapping != mapping)) { unlock_page(page); continue; } if (!wbc->range_cyclic && page->index > end) { ret = 1; unlock_page(page); continue; } if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); if (PageWriteback(page) || !clear_page_dirty_for_io(page)) { unlock_page(page); continue; } /* Is the page fully outside i_size? (truncate in progress) */ if (page->index > end_index || (page->index == end_index && !offset)) { page->mapping->a_ops->invalidatepage(page, 0); unlock_page(page); continue; } ret = __gfs2_jdata_writepage(page, wbc); if (ret || (--(wbc->nr_to_write) <= 0)) ret = 1; if (wbc->nonblocking && bdi_write_congested(bdi)) { wbc->encountered_congestion = 1; ret = 1; } } gfs2_trans_end(sdp); return ret; }
static int gfs2_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct inode *inode = old_dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder ghs[2]; int alloc_required; int error; if (S_ISDIR(inode->i_mode)) return -EPERM; gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); error = gfs2_glock_nq(ghs); /* parent */ if (error) goto out_parent; error = gfs2_glock_nq(ghs + 1); /* child */ if (error) goto out_child; error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC); if (error) goto out_gunlock; error = gfs2_dir_check(dir, &dentry->d_name, NULL); switch (error) { case -ENOENT: break; case 0: error = -EEXIST; default: goto out_gunlock; } error = -EINVAL; if (!dip->i_inode.i_nlink) goto out_gunlock; error = -EFBIG; if (dip->i_entries == (u32)-1) goto out_gunlock; error = -EPERM; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out_gunlock; error = -EINVAL; if (!ip->i_inode.i_nlink) goto out_gunlock; error = -EMLINK; if (ip->i_inode.i_nlink == (u32)-1) goto out_gunlock; alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name); if (error < 0) goto out_gunlock; error = 0; if (alloc_required) { struct gfs2_alloc *al = gfs2_alloc_get(dip); if (!al) { error = -ENOMEM; goto out_gunlock; } error = gfs2_quota_lock_check(dip); if (error) goto out_alloc; al->al_requested = sdp->sd_max_dirres; error = gfs2_inplace_reserve(dip); if (error) goto out_gunlock_q; error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + al->al_rgd->rd_length + 2 * RES_DINODE + RES_STATFS + RES_QUOTA, 0); if (error) goto out_ipres; } else { error = gfs2_trans_begin(sdp, 2 * RES_DINODE + RES_LEAF, 0); if (error) goto out_ipres; } error = gfs2_dir_add(dir, &dentry->d_name, ip, IF2DT(inode->i_mode)); if (error) goto out_end_trans; error = gfs2_change_nlink(ip, +1); out_end_trans: gfs2_trans_end(sdp); out_ipres: if (alloc_required) gfs2_inplace_release(dip); out_gunlock_q: if (alloc_required) gfs2_quota_unlock(dip); out_alloc: if (alloc_required) gfs2_alloc_put(dip); out_gunlock: gfs2_glock_dq(ghs + 1); out_child: gfs2_glock_dq(ghs); out_parent: gfs2_holder_uninit(ghs); gfs2_holder_uninit(ghs + 1); if (!error) { atomic_inc(&inode->i_count); d_instantiate(dentry, inode); mark_inode_dirty(inode); } return error; }
static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(dir); struct gfs2_holder ghs[2]; struct inode *inode; struct buffer_head *dibh; int error; gfs2_holder_init(dip->i_gl, 0, 0, ghs); inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode, 0); if (IS_ERR(inode)) { gfs2_holder_uninit(ghs); return PTR_ERR(inode); } ip = ghs[1].gh_gl->gl_object; ip->i_inode.i_nlink = 2; ip->i_disksize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); ip->i_diskflags |= GFS2_DIF_JDATA; ip->i_entries = 2; error = gfs2_meta_inode_buffer(ip, &dibh); if (!gfs2_assert_withdraw(sdp, !error)) { struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1); struct qstr str; gfs2_str2qstr(&str, "."); gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_qstr2dirent(&str, GFS2_DIRENT_SIZE(str.len), dent); dent->de_inum = di->di_num; /* already GFS2 endian */ dent->de_type = cpu_to_be16(DT_DIR); di->di_entries = cpu_to_be32(1); gfs2_str2qstr(&str, ".."); dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1)); gfs2_qstr2dirent(&str, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent); gfs2_inum_out(dip, dent); dent->de_type = cpu_to_be16(DT_DIR); gfs2_dinode_out(ip, di); brelse(dibh); } error = gfs2_change_nlink(dip, +1); gfs2_assert_withdraw(sdp, !error); /* dip already pinned */ gfs2_trans_end(sdp); if (dip->i_alloc->al_rgd) gfs2_inplace_release(dip); gfs2_quota_unlock(dip); gfs2_alloc_put(dip); gfs2_glock_dq_uninit_m(2, ghs); d_instantiate(dentry, inode); mark_inode_dirty(inode); return 0; }
static int gfs2_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct inode *inode = old_dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder ghs[2]; struct buffer_head *dibh; int alloc_required; int error; if (S_ISDIR(inode->i_mode)) return -EPERM; error = gfs2_rs_alloc(dip); if (error) return error; gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); error = gfs2_glock_nq(ghs); /* parent */ if (error) goto out_parent; error = gfs2_glock_nq(ghs + 1); /* child */ if (error) goto out_child; error = -ENOENT; if (inode->i_nlink == 0) goto out_gunlock; error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC); if (error) goto out_gunlock; error = gfs2_dir_check(dir, &dentry->d_name, NULL); switch (error) { case -ENOENT: break; case 0: error = -EEXIST; default: goto out_gunlock; } error = -EINVAL; if (!dip->i_inode.i_nlink) goto out_gunlock; error = -EFBIG; if (dip->i_entries == (u32)-1) goto out_gunlock; error = -EPERM; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out_gunlock; error = -EINVAL; if (!ip->i_inode.i_nlink) goto out_gunlock; error = -EMLINK; if (ip->i_inode.i_nlink == (u32)-1) goto out_gunlock; alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name); if (error < 0) goto out_gunlock; error = 0; if (alloc_required) { error = gfs2_quota_lock_check(dip); if (error) goto out_gunlock; error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0); if (error) goto out_gunlock_q; error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + gfs2_rg_blocks(dip, sdp->sd_max_dirres) + 2 * RES_DINODE + RES_STATFS + RES_QUOTA, 0); if (error) goto out_ipres; } else { error = gfs2_trans_begin(sdp, 2 * RES_DINODE + RES_LEAF, 0); if (error) goto out_ipres; } error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out_end_trans; error = gfs2_dir_add(dir, &dentry->d_name, ip); if (error) goto out_brelse; gfs2_trans_add_meta(ip->i_gl, dibh); inc_nlink(&ip->i_inode); ip->i_inode.i_ctime = CURRENT_TIME; ihold(inode); d_instantiate(dentry, inode); mark_inode_dirty(inode); out_brelse: brelse(dibh); out_end_trans: gfs2_trans_end(sdp); out_ipres: if (alloc_required) gfs2_inplace_release(dip); out_gunlock_q: if (alloc_required) gfs2_quota_unlock(dip); out_gunlock: gfs2_glock_dq(ghs + 1); out_child: gfs2_glock_dq(ghs); out_parent: gfs2_holder_uninit(ghs); gfs2_holder_uninit(ghs + 1); return error; }
static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino) { struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode); struct gfs2_holder gh; struct buffer_head *bh; struct gfs2_inum_range_host ir; int error; error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); if (error) return error; error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0); if (error) goto out; mutex_lock(&sdp->sd_inum_mutex); error = gfs2_meta_inode_buffer(ip, &bh); if (error) goto out_end_trans; gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode)); if (!ir.ir_length) { struct buffer_head *m_bh; u64 x, y; __be64 z; error = gfs2_meta_inode_buffer(m_ip, &m_bh); if (error) goto out_brelse; z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)); x = y = be64_to_cpu(z); ir.ir_start = x; ir.ir_length = GFS2_INUM_QUANTUM; x += GFS2_INUM_QUANTUM; if (x < y) gfs2_consist_inode(m_ip); z = cpu_to_be64(x); gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1); *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z; brelse(m_bh); } *formal_ino = ir.ir_start++; ir.ir_length--; gfs2_trans_add_bh(ip->i_gl, bh, 1); gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode)); out_brelse: brelse(bh); out_end_trans: mutex_unlock(&sdp->sd_inum_mutex); gfs2_trans_end(sdp); out: gfs2_glock_dq_uninit(&gh); return error; }
static int link_dinode(struct gfs2_inode *dip, const struct qstr *name, struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_alloc *al; int alloc_required; struct buffer_head *dibh; int error; al = gfs2_alloc_get(dip); error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); if (error) goto fail; error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name); if (alloc_required < 0) goto fail_quota_locks; if (alloc_required) { error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid); if (error) goto fail_quota_locks; al->al_requested = sdp->sd_max_dirres; error = gfs2_inplace_reserve(dip); if (error) goto fail_quota_locks; error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + al->al_rgd->rd_length + 2 * RES_DINODE + RES_STATFS + RES_QUOTA, 0); if (error) goto fail_ipreserv; } else { error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0); if (error) goto fail_quota_locks; } error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode)); if (error) goto fail_end_trans; error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto fail_end_trans; ip->i_inode.i_nlink = 1; gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); return 0; fail_end_trans: gfs2_trans_end(sdp); fail_ipreserv: if (dip->i_alloc->al_rgd) gfs2_inplace_release(dip); fail_quota_locks: gfs2_quota_unlock(dip); fail: gfs2_alloc_put(dip); return error; }
static int gfs2_rename(struct inode *odir, struct dentry *odentry, struct inode *ndir, struct dentry *ndentry) { struct gfs2_inode *odip = GFS2_I(odir); struct gfs2_inode *ndip = GFS2_I(ndir); struct gfs2_inode *ip = GFS2_I(odentry->d_inode); struct gfs2_inode *nip = NULL; struct gfs2_sbd *sdp = GFS2_SB(odir); struct gfs2_holder ghs[5], r_gh; struct gfs2_rgrpd *nrgd; unsigned int num_gh; int dir_rename = 0; int alloc_required; unsigned int x; int error; if (ndentry->d_inode) { nip = GFS2_I(ndentry->d_inode); if (ip == nip) return 0; } /* Make sure we aren't trying to move a dirctory into it's subdir */ if (S_ISDIR(ip->i_inode.i_mode) && odip != ndip) { dir_rename = 1; error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, 0, &r_gh); if (error) goto out; error = gfs2_ok_to_move(ip, ndip); if (error) goto out_gunlock_r; } num_gh = 1; gfs2_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); if (odip != ndip) { gfs2_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); num_gh++; } gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); num_gh++; if (nip) { gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); num_gh++; /* grab the resource lock for unlink flag twiddling * this is the case of the target file already existing * so we unlink before doing the rename */ nrgd = gfs2_blk2rgrpd(sdp, nip->i_no_addr); if (nrgd) gfs2_holder_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh++); } error = gfs2_glock_nq_m(num_gh, ghs); if (error) goto out_uninit; /* Check out the old directory */ error = gfs2_unlink_ok(odip, &odentry->d_name, ip); if (error) goto out_gunlock; /* Check out the new directory */ if (nip) { error = gfs2_unlink_ok(ndip, &ndentry->d_name, nip); if (error) goto out_gunlock; if (S_ISDIR(nip->i_inode.i_mode)) { if (nip->i_di.di_entries < 2) { if (gfs2_consist_inode(nip)) gfs2_dinode_print(nip); error = -EIO; goto out_gunlock; } if (nip->i_di.di_entries > 2) { error = -ENOTEMPTY; goto out_gunlock; } } } else { error = permission(ndir, MAY_WRITE | MAY_EXEC, NULL); if (error) goto out_gunlock; error = gfs2_dir_check(ndir, &ndentry->d_name, NULL); switch (error) { case -ENOENT: error = 0; break; case 0: error = -EEXIST; default: goto out_gunlock; }; if (odip != ndip) { if (!ndip->i_inode.i_nlink) { error = -EINVAL; goto out_gunlock; } if (ndip->i_di.di_entries == (u32)-1) { error = -EFBIG; goto out_gunlock; } if (S_ISDIR(ip->i_inode.i_mode) && ndip->i_inode.i_nlink == (u32)-1) { error = -EMLINK; goto out_gunlock; } } } /* Check out the dir to be renamed */ if (dir_rename) { error = permission(odentry->d_inode, MAY_WRITE, NULL); if (error) goto out_gunlock; } alloc_required = error = gfs2_diradd_alloc_required(ndir, &ndentry->d_name); if (error < 0) goto out_gunlock; error = 0; if (alloc_required) { struct gfs2_alloc *al = gfs2_alloc_get(ndip); error = gfs2_quota_lock(ndip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); if (error) goto out_alloc; error = gfs2_quota_check(ndip, ndip->i_inode.i_uid, ndip->i_inode.i_gid); if (error) goto out_gunlock_q; al->al_requested = sdp->sd_max_dirres; error = gfs2_inplace_reserve(ndip); if (error) goto out_gunlock_q; error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + al->al_rgd->rd_length + 4 * RES_DINODE + 4 * RES_LEAF + RES_STATFS + RES_QUOTA + 4, 0); if (error) goto out_ipreserv; } else { error = gfs2_trans_begin(sdp, 4 * RES_DINODE + 5 * RES_LEAF + 4, 0); if (error) goto out_gunlock; } /* Remove the target file, if it exists */ if (nip) { if (S_ISDIR(nip->i_inode.i_mode)) error = gfs2_rmdiri(ndip, &ndentry->d_name, nip); else { error = gfs2_dir_del(ndip, &ndentry->d_name); if (error) goto out_end_trans; error = gfs2_change_nlink(nip, -1); } if (error) goto out_end_trans; } if (dir_rename) { struct qstr name; gfs2_str2qstr(&name, ".."); error = gfs2_change_nlink(ndip, +1); if (error) goto out_end_trans; error = gfs2_change_nlink(odip, -1); if (error) goto out_end_trans; error = gfs2_dir_mvino(ip, &name, ndip, DT_DIR); if (error) goto out_end_trans; } else { struct buffer_head *dibh; error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out_end_trans; ip->i_inode.i_ctime = CURRENT_TIME; gfs2_trans_add_bh(ip->i_gl, dibh, 1); gfs2_dinode_out(ip, dibh->b_data); brelse(dibh); } error = gfs2_dir_del(odip, &odentry->d_name); if (error) goto out_end_trans; error = gfs2_dir_add(ndir, &ndentry->d_name, ip, IF2DT(ip->i_inode.i_mode)); if (error) goto out_end_trans; out_end_trans: gfs2_trans_end(sdp); out_ipreserv: if (alloc_required) gfs2_inplace_release(ndip); out_gunlock_q: if (alloc_required) gfs2_quota_unlock(ndip); out_alloc: if (alloc_required) gfs2_alloc_put(ndip); out_gunlock: gfs2_glock_dq_m(num_gh, ghs); out_uninit: for (x = 0; x < num_gh; x++) gfs2_holder_uninit(ghs + x); out_gunlock_r: if (dir_rename) gfs2_glock_dq_uninit(&r_gh); out: return error; }
static void gfs2_delete_inode(struct inode *inode) { struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int error; if (!test_bit(GIF_USER, &ip->i_flags)) goto out; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); if (unlikely(error)) { gfs2_glock_dq_uninit(&ip->i_iopen_gh); goto out; } gfs2_glock_dq_wait(&ip->i_iopen_gh); gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh); error = gfs2_glock_nq(&ip->i_iopen_gh); if (error) goto out_truncate; if (S_ISDIR(inode->i_mode) && (ip->i_diskflags & GFS2_DIF_EXHASH)) { error = gfs2_dir_exhash_dealloc(ip); if (error) goto out_unlock; } if (ip->i_eattr) { error = gfs2_ea_dealloc(ip); if (error) goto out_unlock; } if (!gfs2_is_stuffed(ip)) { error = gfs2_file_dealloc(ip); if (error) goto out_unlock; } error = gfs2_dinode_dealloc(ip); if (error) goto out_unlock; out_truncate: error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); if (error) goto out_unlock; /* Needs to be done before glock release & also in a transaction */ truncate_inode_pages(&inode->i_data, 0); gfs2_trans_end(sdp); out_unlock: if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) gfs2_glock_dq(&ip->i_iopen_gh); gfs2_holder_uninit(&ip->i_iopen_gh); gfs2_glock_dq_uninit(&gh); if (error && error != GLR_TRYFAILED) fs_warn(sdp, "gfs2_delete_inode: %d\n", error); out: truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); }
static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_path.dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); unsigned long last_index; u64 pos = page->index << PAGE_CACHE_SHIFT; unsigned int data_blocks, ind_blocks, rblocks; int alloc_required = 0; struct gfs2_holder gh; struct gfs2_alloc *al; int ret; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq(&gh); if (ret) goto out; set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); set_bit(GIF_SW_PAGED, &ip->i_flags); ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required); if (ret || !alloc_required) goto out_unlock; ret = -ENOMEM; al = gfs2_alloc_get(ip); if (al == NULL) goto out_unlock; ret = gfs2_quota_lock_check(ip); if (ret) goto out_alloc_put; gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); al->al_requested = data_blocks + ind_blocks; ret = gfs2_inplace_reserve(ip); if (ret) goto out_quota_unlock; rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) rblocks += RES_STATFS + RES_QUOTA; ret = gfs2_trans_begin(sdp, rblocks, 0); if (ret) goto out_trans_fail; lock_page(page); ret = -EINVAL; last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT; if (page->index > last_index) goto out_unlock_page; ret = 0; if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping) goto out_unlock_page; if (gfs2_is_stuffed(ip)) { ret = gfs2_unstuff_dinode(ip, page); if (ret) goto out_unlock_page; } ret = gfs2_allocate_page_backing(page); out_unlock_page: unlock_page(page); gfs2_trans_end(sdp); out_trans_fail: gfs2_inplace_release(ip); out_quota_unlock: gfs2_quota_unlock(ip); out_alloc_put: gfs2_alloc_put(ip); out_unlock: gfs2_glock_dq(&gh); out: gfs2_holder_uninit(&gh); if (ret == -ENOMEM) ret = VM_FAULT_OOM; else if (ret) ret = VM_FAULT_SIGBUS; return ret; }
static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, int mode) { struct gfs2_inode *ip = GFS2_I(inode); struct buffer_head *dibh; int error; unsigned int nr_blks; sector_t lblock = offset >> inode->i_blkbits; error = gfs2_meta_inode_buffer(ip, &dibh); if (unlikely(error)) return error; gfs2_trans_add_bh(ip->i_gl, dibh, 1); if (gfs2_is_stuffed(ip)) { error = gfs2_unstuff_dinode(ip, NULL); if (unlikely(error)) goto out; } while (len) { struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; bh_map.b_size = len; set_buffer_zeronew(&bh_map); error = gfs2_block_map(inode, lblock, &bh_map, 1); if (unlikely(error)) goto out; len -= bh_map.b_size; nr_blks = bh_map.b_size >> inode->i_blkbits; lblock += nr_blks; if (!buffer_new(&bh_map)) continue; if (unlikely(!buffer_zeronew(&bh_map))) { error = -EIO; goto out; } } if (offset + len > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE)) i_size_write(inode, offset + len); mark_inode_dirty(inode); out: brelse(dibh); return error; } static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len, unsigned int *data_blocks, unsigned int *ind_blocks) { const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); unsigned int max_blocks = ip->i_rgd->rd_free_clone; unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); for (tmp = max_data; tmp > sdp->sd_diptrs;) { tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); max_data -= tmp; } /* This calculation isn't the exact reverse of gfs2_write_calc_reserve, so it might end up with fewer data blocks */ if (max_data <= *data_blocks) return; *data_blocks = max_data; *ind_blocks = max_blocks - max_data; *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; if (*len > max) { *len = max; gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks); } } static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file->f_path.dentry->d_inode; struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *ip = GFS2_I(inode); unsigned int data_blocks = 0, ind_blocks = 0, rblocks; loff_t bytes, max_bytes; struct gfs2_qadata *qa; int error; const loff_t pos = offset; const loff_t count = len; loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1); loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; loff_t max_chunk_size = UINT_MAX & bsize_mask; next = (next + 1) << sdp->sd_sb.sb_bsize_shift; /* We only support the FALLOC_FL_KEEP_SIZE mode */ if (mode & ~FALLOC_FL_KEEP_SIZE) return -EOPNOTSUPP; offset &= bsize_mask; len = next - offset; bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2; if (!bytes) bytes = UINT_MAX; bytes &= bsize_mask; if (bytes == 0) bytes = sdp->sd_sb.sb_bsize; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); error = gfs2_glock_nq(&ip->i_gh); if (unlikely(error)) goto out_uninit; if (!gfs2_write_alloc_required(ip, offset, len)) goto out_unlock; while (len > 0) { if (len < bytes) bytes = len; qa = gfs2_qadata_get(ip); if (!qa) { error = -ENOMEM; goto out_unlock; } error = gfs2_quota_lock_check(ip); if (error) goto out_alloc_put; retry: gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks); if (error) { if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) { bytes >>= 1; bytes &= bsize_mask; if (bytes == 0) bytes = sdp->sd_sb.sb_bsize; goto retry; } goto out_qunlock; } max_bytes = bytes; calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len, &max_bytes, &data_blocks, &ind_blocks); rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + RES_RG_HDR + gfs2_rg_blocks(ip); if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; error = gfs2_trans_begin(sdp, rblocks, PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; error = fallocate_chunk(inode, offset, max_bytes, mode); gfs2_trans_end(sdp); if (error) goto out_trans_fail; len -= max_bytes; offset += max_bytes; gfs2_inplace_release(ip); gfs2_quota_unlock(ip); gfs2_qadata_put(ip); } if (error == 0) error = generic_write_sync(file, pos, count); goto out_unlock; out_trans_fail: gfs2_inplace_release(ip); out_qunlock: gfs2_quota_unlock(ip); out_alloc_put: gfs2_qadata_put(ip); out_unlock: gfs2_glock_dq(&ip->i_gh); out_uninit: gfs2_holder_uninit(&ip->i_gh); return error; }
static int gfs2_rmdir(struct inode *dir, struct dentry *dentry) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct gfs2_inode *ip = GFS2_I(dentry->d_inode); struct gfs2_holder ghs[3]; struct gfs2_rgrpd *rgd; struct gfs2_holder ri_gh; int error; error = gfs2_rindex_hold(sdp, &ri_gh); if (error) return error; gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); error = gfs2_glock_nq(ghs); /* parent */ if (error) goto out_parent; error = gfs2_glock_nq(ghs + 1); /* child */ if (error) goto out_child; error = gfs2_glock_nq(ghs + 2); /* rgrp */ if (error) goto out_rgrp; error = gfs2_unlink_ok(dip, &dentry->d_name, ip); if (error) goto out_gunlock; if (ip->i_entries < 2) { if (gfs2_consist_inode(ip)) gfs2_dinode_print(ip); error = -EIO; goto out_gunlock; } if (ip->i_entries > 2) { error = -ENOTEMPTY; goto out_gunlock; } error = gfs2_trans_begin(sdp, 2 * RES_DINODE + 3 * RES_LEAF + RES_RG_BIT, 0); if (error) goto out_gunlock; error = gfs2_rmdiri(dip, &dentry->d_name, ip); gfs2_trans_end(sdp); out_gunlock: gfs2_glock_dq(ghs + 2); out_rgrp: gfs2_holder_uninit(ghs + 2); gfs2_glock_dq(ghs + 1); out_child: gfs2_holder_uninit(ghs + 1); gfs2_glock_dq(ghs); out_parent: gfs2_holder_uninit(ghs); gfs2_glock_dq_uninit(&ri_gh); return error; }
static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev, const char *symname, unsigned int size, int excl) { const struct qstr *name = &dentry->d_name; struct gfs2_holder ghs[2]; struct inode *inode = NULL; struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_glock *io_gl; int error; u32 aflags = 0; int arq; if (!name->len || name->len > GFS2_FNAMESIZE) return -ENAMETOOLONG; error = gfs2_rs_alloc(dip); if (error) return error; error = gfs2_rindex_update(sdp); if (error) return error; error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); if (error) goto fail; error = create_ok(dip, name, mode); if ((error == -EEXIST) && S_ISREG(mode) && !excl) { inode = gfs2_lookupi(dir, &dentry->d_name, 0); gfs2_glock_dq_uninit(ghs); d_instantiate(dentry, inode); return IS_ERR(inode) ? PTR_ERR(inode) : 0; } if (error) goto fail_gunlock; arq = error = gfs2_diradd_alloc_required(dir, name); if (error < 0) goto fail_gunlock; inode = new_inode(sdp->sd_vfs); error = -ENOMEM; if (!inode) goto fail_gunlock; ip = GFS2_I(inode); error = gfs2_rs_alloc(ip); if (error) goto fail_free_inode; inode->i_mode = mode; set_nlink(inode, S_ISDIR(mode) ? 2 : 1); inode->i_rdev = dev; inode->i_size = size; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; gfs2_set_inode_blocks(inode, 1); munge_mode_uid_gid(dip, inode); ip->i_goal = dip->i_goal; ip->i_diskflags = 0; ip->i_eattr = 0; ip->i_height = 0; ip->i_depth = 0; ip->i_entries = 0; switch(mode & S_IFMT) { case S_IFREG: if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) || gfs2_tune_get(sdp, gt_new_files_jdata)) ip->i_diskflags |= GFS2_DIF_JDATA; gfs2_set_aops(inode); break; case S_IFDIR: ip->i_diskflags |= (dip->i_diskflags & GFS2_DIF_INHERIT_JDATA); ip->i_diskflags |= GFS2_DIF_JDATA; ip->i_entries = 2; break; } gfs2_set_inode_flags(inode); if ((GFS2_I(sdp->sd_root_dir->d_inode) == dip) || (dip->i_diskflags & GFS2_DIF_TOPDIR)) aflags |= GFS2_AF_ORLOV; error = alloc_dinode(ip, aflags); if (error) goto fail_free_inode; error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); if (error) goto fail_free_inode; ip->i_gl->gl_object = ip; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); if (error) goto fail_free_inode; error = gfs2_trans_begin(sdp, RES_DINODE, 0); if (error) goto fail_gunlock2; init_dinode(dip, ip, symname); gfs2_trans_end(sdp); error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) goto fail_gunlock2; error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); if (error) goto fail_gunlock2; ip->i_iopen_gh.gh_gl->gl_object = ip; gfs2_glock_put(io_gl); gfs2_set_iop(inode); insert_inode_hash(inode); error = gfs2_acl_create(dip, inode); if (error) goto fail_gunlock3; error = gfs2_security_init(dip, ip, name); if (error) goto fail_gunlock3; error = link_dinode(dip, name, ip, arq); if (error) goto fail_gunlock3; mark_inode_dirty(inode); gfs2_glock_dq_uninit(ghs); gfs2_glock_dq_uninit(ghs + 1); d_instantiate(dentry, inode); return 0; fail_gunlock3: gfs2_glock_dq_uninit(ghs + 1); if (ip->i_gl) gfs2_glock_put(ip->i_gl); goto fail_gunlock; fail_gunlock2: gfs2_glock_dq_uninit(ghs + 1); fail_free_inode: if (ip->i_gl) gfs2_glock_put(ip->i_gl); gfs2_rs_delete(ip); free_inode_nonrcu(inode); inode = NULL; fail_gunlock: gfs2_glock_dq_uninit(ghs); if (inode && !IS_ERR(inode)) { clear_nlink(inode); mark_inode_dirty(inode); set_bit(GIF_ALLOC_FAILED, &GFS2_I(inode)->i_flags); iput(inode); } fail: return error; }
static int gfs2_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct buffer_head *dibh; struct gfs2_qadata *qa = ip->i_qadata; unsigned int from = pos & (PAGE_CACHE_SIZE - 1); unsigned int to = from + len; int ret; int i_size_changed = 0; BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); ret = gfs2_meta_inode_buffer(ip, &dibh); if (unlikely(ret)) { unlock_page(page); page_cache_release(page); goto failed; } gfs2_trans_add_bh(ip->i_gl, dibh, 1); if (gfs2_is_stuffed(ip)) return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page); if (!gfs2_is_writeback(ip)) gfs2_page_add_databufs(ip, page, from, to); /* inlined bits of generic_write_end to avoid marking the inode dirty a second time: */ ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); if (pos + ret > ip->i_disksize) { ip->i_disksize = pos + ret; i_size_write(inode, pos + ret); i_size_changed = 1; } unlock_page(page); page_cache_release(page); if (i_size_changed) { gfs2_dinode_out(ip, dibh->b_data); mark_inode_dirty(inode); } if (inode == sdp->sd_rindex) { adjust_fs_space(inode); sdp->sd_rindex_uptodate = 0; } brelse(dibh); gfs2_trans_end(sdp); failed: if (ip->i_res) gfs2_inplace_release(ip); if (qa) { gfs2_quota_unlock(ip); gfs2_qadata_put(ip); } if (inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); gfs2_holder_uninit(&ip->i_gh); return ret; }
static int gfs2_unlink(struct inode *dir, struct dentry *dentry) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct inode *inode = dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder ghs[3]; struct gfs2_rgrpd *rgd; int error; error = gfs2_rindex_update(sdp); if (error) return error; error = -EROFS; gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); if (!rgd) goto out_inodes; gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); error = gfs2_glock_nq(ghs); /* parent */ if (error) goto out_parent; error = gfs2_glock_nq(ghs + 1); /* child */ if (error) goto out_child; error = -ENOENT; if (inode->i_nlink == 0) goto out_rgrp; if (S_ISDIR(inode->i_mode)) { error = -ENOTEMPTY; if (ip->i_entries > 2 || inode->i_nlink > 2) goto out_rgrp; } error = gfs2_glock_nq(ghs + 2); /* rgrp */ if (error) goto out_rgrp; error = gfs2_unlink_ok(dip, &dentry->d_name, ip); if (error) goto out_gunlock; error = gfs2_trans_begin(sdp, 2*RES_DINODE + 3*RES_LEAF + RES_RG_BIT, 0); if (error) goto out_end_trans; error = gfs2_unlink_inode(dip, dentry); out_end_trans: gfs2_trans_end(sdp); out_gunlock: gfs2_glock_dq(ghs + 2); out_rgrp: gfs2_glock_dq(ghs + 1); out_child: gfs2_glock_dq(ghs); out_parent: gfs2_holder_uninit(ghs + 2); out_inodes: gfs2_holder_uninit(ghs + 1); gfs2_holder_uninit(ghs); return error; }
static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = file_inode(vma->vm_file); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_alloc_parms ap = { .aflags = 0, }; unsigned long last_index; u64 pos = page->index << PAGE_CACHE_SHIFT; unsigned int data_blocks, ind_blocks, rblocks; struct gfs2_holder gh; loff_t size; int ret; sb_start_pagefault(inode->i_sb); /* Update file times before taking page lock */ file_update_time(vma->vm_file); ret = get_write_access(inode); if (ret) goto out; ret = gfs2_rs_alloc(ip); if (ret) goto out_write_access; gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq(&gh); if (ret) goto out_uninit; set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); set_bit(GIF_SW_PAGED, &ip->i_flags); if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { lock_page(page); if (!PageUptodate(page) || page->mapping != inode->i_mapping) { ret = -EAGAIN; unlock_page(page); } goto out_unlock; } ret = gfs2_rindex_update(sdp); if (ret) goto out_unlock; gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); ap.target = data_blocks + ind_blocks; ret = gfs2_quota_lock_check(ip, &ap); if (ret) goto out_unlock; ret = gfs2_inplace_reserve(ip, &ap); if (ret) goto out_quota_unlock; rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) { rblocks += RES_STATFS + RES_QUOTA; rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks); } ret = gfs2_trans_begin(sdp, rblocks, 0); if (ret) goto out_trans_fail; lock_page(page); ret = -EINVAL; size = i_size_read(inode); last_index = (size - 1) >> PAGE_CACHE_SHIFT; /* Check page index against inode size */ if (size == 0 || (page->index > last_index)) goto out_trans_end; ret = -EAGAIN; /* If truncated, we must retry the operation, we may have raced * with the glock demotion code. */ if (!PageUptodate(page) || page->mapping != inode->i_mapping) goto out_trans_end; /* Unstuff, if required, and allocate backing blocks for page */ ret = 0; if (gfs2_is_stuffed(ip)) ret = gfs2_unstuff_dinode(ip, page); if (ret == 0) ret = gfs2_allocate_page_backing(page); out_trans_end: if (ret) unlock_page(page); gfs2_trans_end(sdp); out_trans_fail: gfs2_inplace_release(ip); out_quota_unlock: gfs2_quota_unlock(ip); out_unlock: gfs2_glock_dq(&gh); out_uninit: gfs2_holder_uninit(&gh); if (ret == 0) { set_page_dirty(page); wait_for_stable_page(page); } out_write_access: put_write_access(inode); out: sb_end_pagefault(inode->i_sb); return block_page_mkwrite_return(ret); } static const struct vm_operations_struct gfs2_vm_ops = { .fault = filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = gfs2_page_mkwrite, }; /** * gfs2_mmap - * @file: The file to map * @vma: The VMA which described the mapping * * There is no need to get a lock here unless we should be updating * atime. We ignore any locking errors since the only consequence is * a missed atime update (which will just be deferred until later). * * Returns: 0 */ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) { struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); if (!(file->f_flags & O_NOATIME) && !IS_NOATIME(&ip->i_inode)) { struct gfs2_holder i_gh; int error; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); if (error) return error; /* grab lock to update inode */ gfs2_glock_dq_uninit(&i_gh); file_accessed(file); } vma->vm_ops = &gfs2_vm_ops; return 0; } /** * gfs2_open_common - This is common to open and atomic_open * @inode: The inode being opened * @file: The file being opened * * This maybe called under a glock or not depending upon how it has * been called. We must always be called under a glock for regular * files, however. For other file types, it does not matter whether * we hold the glock or not. * * Returns: Error code or 0 for success */ int gfs2_open_common(struct inode *inode, struct file *file) { struct gfs2_file *fp; int ret; if (S_ISREG(inode->i_mode)) { ret = generic_file_open(inode, file); if (ret) return ret; } fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS); if (!fp) return -ENOMEM; mutex_init(&fp->f_fl_mutex); gfs2_assert_warn(GFS2_SB(inode), !file->private_data); file->private_data = fp; return 0; } /** * gfs2_open - open a file * @inode: the inode to open * @file: the struct file for this opening * * After atomic_open, this function is only used for opening files * which are already cached. We must still get the glock for regular * files to ensure that we have the file size uptodate for the large * file check which is in the common code. That is only an issue for * regular files though. * * Returns: errno */ static int gfs2_open(struct inode *inode, struct file *file) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder i_gh; int error; bool need_unlock = false; if (S_ISREG(ip->i_inode.i_mode)) { error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); if (error) return error; need_unlock = true; } error = gfs2_open_common(inode, file); if (need_unlock) gfs2_glock_dq_uninit(&i_gh); return error; } /** * gfs2_release - called to close a struct file * @inode: the inode the struct file belongs to * @file: the struct file being closed * * Returns: errno */ static int gfs2_release(struct inode *inode, struct file *file) { struct gfs2_inode *ip = GFS2_I(inode); kfree(file->private_data); file->private_data = NULL; if (!(file->f_mode & FMODE_WRITE)) return 0; gfs2_rs_delete(ip, &inode->i_writecount); return 0; } /** * gfs2_fsync - sync the dirty data for a file (across the cluster) * @file: the file that points to the dentry * @start: the start position in the file to sync * @end: the end position in the file to sync * @datasync: set if we can ignore timestamp changes * * We split the data flushing here so that we don't wait for the data * until after we've also sent the metadata to disk. Note that for * data=ordered, we will write & wait for the data at the log flush * stage anyway, so this is unlikely to make much of a difference * except in the data=writeback case. * * If the fdatawrite fails due to any reason except -EIO, we will * continue the remainder of the fsync, although we'll still report * the error at the end. This is to match filemap_write_and_wait_range() * behaviour. * * Returns: errno */ static int gfs2_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; int sync_state = inode->i_state & I_DIRTY_ALL; struct gfs2_inode *ip = GFS2_I(inode); int ret = 0, ret1 = 0; if (mapping->nrpages) { ret1 = filemap_fdatawrite_range(mapping, start, end); if (ret1 == -EIO) return ret1; } if (!gfs2_is_jdata(ip)) sync_state &= ~I_DIRTY_PAGES; if (datasync) sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME); if (sync_state) { ret = sync_inode_metadata(inode, 1); if (ret) return ret; if (gfs2_is_jdata(ip)) filemap_write_and_wait(mapping); gfs2_ail_flush(ip->i_gl, 1); } if (mapping->nrpages) ret = filemap_fdatawait_range(mapping, start, end); return ret ? ret : ret1; } /** * gfs2_file_write_iter - Perform a write to a file * @iocb: The io context * @iov: The data to write * @nr_segs: Number of @iov segments * @pos: The file position * * We have to do a lock/unlock here to refresh the inode size for * O_APPEND writes, otherwise we can land up writing at the wrong * offset. There is still a race, but provided the app is using its * own file locking, this will make O_APPEND work as expected. * */ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct gfs2_inode *ip = GFS2_I(file_inode(file)); int ret; ret = gfs2_rs_alloc(ip); if (ret) return ret; gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from)); if (iocb->ki_flags & IOCB_APPEND) { struct gfs2_holder gh; ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); if (ret) return ret; gfs2_glock_dq_uninit(&gh); } return generic_file_write_iter(iocb, from); } static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, int mode) { struct gfs2_inode *ip = GFS2_I(inode); struct buffer_head *dibh; int error; unsigned int nr_blks; sector_t lblock = offset >> inode->i_blkbits; error = gfs2_meta_inode_buffer(ip, &dibh); if (unlikely(error)) return error; gfs2_trans_add_meta(ip->i_gl, dibh); if (gfs2_is_stuffed(ip)) { error = gfs2_unstuff_dinode(ip, NULL); if (unlikely(error)) goto out; } while (len) { struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; bh_map.b_size = len; set_buffer_zeronew(&bh_map); error = gfs2_block_map(inode, lblock, &bh_map, 1); if (unlikely(error)) goto out; len -= bh_map.b_size; nr_blks = bh_map.b_size >> inode->i_blkbits; lblock += nr_blks; if (!buffer_new(&bh_map)) continue; if (unlikely(!buffer_zeronew(&bh_map))) { error = -EIO; goto out; } } out: brelse(dibh); return error; } /** * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of * blocks, determine how many bytes can be written. * @ip: The inode in question. * @len: Max cap of bytes. What we return in *len must be <= this. * @data_blocks: Compute and return the number of data blocks needed * @ind_blocks: Compute and return the number of indirect blocks needed * @max_blocks: The total blocks available to work with. * * Returns: void, but @len, @data_blocks and @ind_blocks are filled in. */ static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len, unsigned int *data_blocks, unsigned int *ind_blocks, unsigned int max_blocks) { loff_t max = *len; const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); for (tmp = max_data; tmp > sdp->sd_diptrs;) { tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); max_data -= tmp; } *data_blocks = max_data; *ind_blocks = max_blocks - max_data; *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; if (*len > max) { *len = max; gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks); } } static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_alloc_parms ap = { .aflags = 0, }; unsigned int data_blocks = 0, ind_blocks = 0, rblocks; loff_t bytes, max_bytes, max_blks = UINT_MAX; int error; const loff_t pos = offset; const loff_t count = len; loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1); loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; loff_t max_chunk_size = UINT_MAX & bsize_mask; next = (next + 1) << sdp->sd_sb.sb_bsize_shift; offset &= bsize_mask; len = next - offset; bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2; if (!bytes) bytes = UINT_MAX; bytes &= bsize_mask; if (bytes == 0) bytes = sdp->sd_sb.sb_bsize; gfs2_size_hint(file, offset, len); gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks); ap.min_target = data_blocks + ind_blocks; while (len > 0) { if (len < bytes) bytes = len; if (!gfs2_write_alloc_required(ip, offset, bytes)) { len -= bytes; offset += bytes; continue; } /* We need to determine how many bytes we can actually * fallocate without exceeding quota or going over the * end of the fs. We start off optimistically by assuming * we can write max_bytes */ max_bytes = (len > max_chunk_size) ? max_chunk_size : len; /* Since max_bytes is most likely a theoretical max, we * calculate a more realistic 'bytes' to serve as a good * starting point for the number of bytes we may be able * to write */ gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); ap.target = data_blocks + ind_blocks; error = gfs2_quota_lock_check(ip, &ap); if (error) return error; /* ap.allowed tells us how many blocks quota will allow * us to write. Check if this reduces max_blks */ if (ap.allowed && ap.allowed < max_blks) max_blks = ap.allowed; error = gfs2_inplace_reserve(ip, &ap); if (error) goto out_qunlock; /* check if the selected rgrp limits our max_blks further */ if (ap.allowed && ap.allowed < max_blks) max_blks = ap.allowed; /* Almost done. Calculate bytes that can be written using * max_blks. We also recompute max_bytes, data_blocks and * ind_blocks */ calc_max_reserv(ip, &max_bytes, &data_blocks, &ind_blocks, max_blks); rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks); if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; error = gfs2_trans_begin(sdp, rblocks, PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; error = fallocate_chunk(inode, offset, max_bytes, mode); gfs2_trans_end(sdp); if (error) goto out_trans_fail; len -= max_bytes; offset += max_bytes; gfs2_inplace_release(ip); gfs2_quota_unlock(ip); } if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) { i_size_write(inode, pos + count); file_update_time(file); mark_inode_dirty(inode); } return generic_write_sync(file, pos, count); out_trans_fail: gfs2_inplace_release(ip); out_qunlock: gfs2_quota_unlock(ip); return error; } static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int ret; if ((mode & ~FALLOC_FL_KEEP_SIZE) || gfs2_is_jdata(ip)) return -EOPNOTSUPP; mutex_lock(&inode->i_mutex); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq(&gh); if (ret) goto out_uninit; if (!(mode & FALLOC_FL_KEEP_SIZE) && (offset + len) > inode->i_size) { ret = inode_newsize_ok(inode, offset + len); if (ret) goto out_unlock; } ret = get_write_access(inode); if (ret) goto out_unlock; ret = gfs2_rs_alloc(ip); if (ret) goto out_putw; ret = __gfs2_fallocate(file, mode, offset, len); if (ret) gfs2_rs_deltree(ip->i_res); out_putw: put_write_access(inode); out_unlock: gfs2_glock_dq(&gh); out_uninit: gfs2_holder_uninit(&gh); mutex_unlock(&inode->i_mutex); return ret; } static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { int error; struct gfs2_inode *ip = GFS2_I(out->f_mapping->host); error = gfs2_rs_alloc(ip); if (error) return (ssize_t)error; gfs2_size_hint(out, *ppos, len); return iter_file_splice_write(pipe, out, ppos, len, flags); } #ifdef CONFIG_GFS2_FS_LOCKING_DLM /** * gfs2_lock - acquire/release a posix lock on a file * @file: the file pointer * @cmd: either modify or retrieve lock state, possibly wait * @fl: type and range of lock * * Returns: errno */ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) { struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host); struct lm_lockstruct *ls = &sdp->sd_lockstruct; if (!(fl->fl_flags & FL_POSIX)) return -ENOLCK; if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK) return -ENOLCK; if (cmd == F_CANCELLK) { /* Hack: */ cmd = F_SETLK; fl->fl_type = F_UNLCK; } if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { if (fl->fl_type == F_UNLCK) locks_lock_file_wait(file, fl); return -EIO; } if (IS_GETLK(cmd)) return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); else if (fl->fl_type == F_UNLCK) return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl); else return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl); } static int do_flock(struct file *file, int cmd, struct file_lock *fl) { struct gfs2_file *fp = file->private_data; struct gfs2_holder *fl_gh = &fp->f_fl_gh; struct gfs2_inode *ip = GFS2_I(file_inode(file)); struct gfs2_glock *gl; unsigned int state; int flags; int error = 0; int sleeptime; state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT; mutex_lock(&fp->f_fl_mutex); gl = fl_gh->gh_gl; if (gl) { if (fl_gh->gh_state == state) goto out; locks_lock_file_wait(file, &(struct file_lock){.fl_type = F_UNLCK}); gfs2_glock_dq(fl_gh); gfs2_holder_reinit(state, flags, fl_gh); } else {
static int gfs2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); unsigned int data_blocks = 0, ind_blocks = 0, rblocks; int alloc_required; int error = 0; struct gfs2_alloc *al; pgoff_t index = pos >> PAGE_CACHE_SHIFT; unsigned from = pos & (PAGE_CACHE_SIZE - 1); unsigned to = from + len; struct page *page; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); error = gfs2_glock_nq(&ip->i_gh); if (unlikely(error)) goto out_uninit; if (&ip->i_inode == sdp->sd_rindex) { error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, &m_ip->i_gh); if (unlikely(error)) { gfs2_glock_dq(&ip->i_gh); goto out_uninit; } } error = gfs2_write_alloc_required(ip, pos, len, &alloc_required); if (error) goto out_unlock; if (alloc_required || gfs2_is_jdata(ip)) gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); if (alloc_required) { al = gfs2_alloc_get(ip); if (!al) { error = -ENOMEM; goto out_unlock; } error = gfs2_quota_lock_check(ip); if (error) goto out_alloc_put; al->al_requested = data_blocks + ind_blocks; error = gfs2_inplace_reserve(ip); if (error) goto out_qunlock; } rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) rblocks += RES_STATFS + RES_QUOTA; if (&ip->i_inode == sdp->sd_rindex) rblocks += 2 * RES_STATFS; error = gfs2_trans_begin(sdp, rblocks, PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; error = -ENOMEM; flags |= AOP_FLAG_NOFS; page = grab_cache_page_write_begin(mapping, index, flags); *pagep = page; if (unlikely(!page)) goto out_endtrans; if (gfs2_is_stuffed(ip)) { error = 0; if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { error = gfs2_unstuff_dinode(ip, page); if (error == 0) goto prepare_write; } else if (!PageUptodate(page)) { error = stuffed_readpage(ip, page); } goto out; } prepare_write: error = block_prepare_write(page, from, to, gfs2_block_map); out: if (error == 0) return 0; page_cache_release(page); if (pos + len > ip->i_inode.i_size) vmtruncate(&ip->i_inode, ip->i_inode.i_size); out_endtrans: gfs2_trans_end(sdp); out_trans_fail: if (alloc_required) { gfs2_inplace_release(ip); out_qunlock: gfs2_quota_unlock(ip); out_alloc_put: gfs2_alloc_put(ip); } out_unlock: if (&ip->i_inode == sdp->sd_rindex) { gfs2_glock_dq(&m_ip->i_gh); gfs2_holder_uninit(&m_ip->i_gh); } gfs2_glock_dq(&ip->i_gh); out_uninit: gfs2_holder_uninit(&ip->i_gh); return error; }
static int gfs2_write_jdata_pagevec(struct address_space *mapping, struct writeback_control *wbc, struct pagevec *pvec, int nr_pages, pgoff_t end, pgoff_t *done_index) { struct inode *inode = mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize); int i; int ret; ret = gfs2_trans_begin(sdp, nrblocks, nrblocks); if (ret < 0) return ret; for(i = 0; i < nr_pages; i++) { struct page *page = pvec->pages[i]; /* * At this point, the page may be truncated or * invalidated (changing page->mapping to NULL), or * even swizzled back from swapper_space to tmpfs file * mapping. However, page->index will not change * because we have a reference on the page. */ if (page->index > end) { /* * can't be range_cyclic (1st pass) because * end == -1 in that case. */ ret = 1; break; } *done_index = page->index; lock_page(page); if (unlikely(page->mapping != mapping)) { continue_unlock: unlock_page(page); continue; } if (!PageDirty(page)) { /* someone wrote it for us */ goto continue_unlock; } if (PageWriteback(page)) { if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); else goto continue_unlock; } BUG_ON(PageWriteback(page)); if (!clear_page_dirty_for_io(page)) goto continue_unlock; trace_wbc_writepage(wbc, inode_to_bdi(inode)); ret = __gfs2_jdata_writepage(page, wbc); if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { unlock_page(page); ret = 0; } else { /* * done_index is set past this page, * so media errors will not choke * background writeout for the entire * file. This has consequences for * range_cyclic semantics (ie. it may * not be suitable for data integrity * writeout). */ *done_index = page->index + 1; ret = 1; break; } } /* * We stop writing back only if we are not doing * integrity sync. In case of integrity sync we have to * keep going until we have written all the pages * we tagged for writeback prior to entering this loop. */ if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { ret = 1; break; } } gfs2_trans_end(sdp); return ret; }
static int gfs2_unlink(struct inode *dir, struct dentry *dentry) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct gfs2_inode *ip = GFS2_I(dentry->d_inode); struct gfs2_holder ghs[3]; struct gfs2_rgrpd *rgd; struct gfs2_holder ri_gh; int error; error = gfs2_rindex_hold(sdp, &ri_gh); if (error) return error; gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); error = gfs2_glock_nq(ghs); /* parent */ if (error) goto out_parent; error = gfs2_glock_nq(ghs + 1); /* child */ if (error) goto out_child; error = gfs2_glock_nq(ghs + 2); /* rgrp */ if (error) goto out_rgrp; error = gfs2_unlink_ok(dip, &dentry->d_name, ip); if (error) goto out_gunlock; error = gfs2_trans_begin(sdp, 2*RES_DINODE + RES_LEAF + RES_RG_BIT, 0); if (error) goto out_gunlock; error = gfs2_dir_del(dip, &dentry->d_name); if (error) goto out_end_trans; error = gfs2_change_nlink(ip, -1); out_end_trans: gfs2_trans_end(sdp); out_gunlock: gfs2_glock_dq(ghs + 2); out_rgrp: gfs2_holder_uninit(ghs + 2); gfs2_glock_dq(ghs + 1); out_child: gfs2_holder_uninit(ghs + 1); gfs2_glock_dq(ghs); out_parent: gfs2_holder_uninit(ghs); gfs2_glock_dq_uninit(&ri_gh); return error; }
/** * gfs2_set_flags - set flags on an inode * @inode: The inode * @flags: The flags to set * @mask: Indicates which flags are valid * */ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) { struct inode *inode = filp->f_path.dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct buffer_head *bh; struct gfs2_holder gh; int error; u32 new_flags, flags; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); if (error) return error; flags = ip->i_di.di_flags; new_flags = (flags & ~mask) | (reqflags & mask); if ((new_flags ^ flags) == 0) goto out; if (S_ISDIR(inode->i_mode)) { if ((new_flags ^ flags) & GFS2_DIF_JDATA) new_flags ^= (GFS2_DIF_JDATA|GFS2_DIF_INHERIT_JDATA); if ((new_flags ^ flags) & GFS2_DIF_DIRECTIO) new_flags ^= (GFS2_DIF_DIRECTIO|GFS2_DIF_INHERIT_DIRECTIO); } error = -EINVAL; if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET) goto out; error = -EPERM; if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE)) goto out; if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY)) goto out; if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) && !capable(CAP_LINUX_IMMUTABLE)) goto out; if (!IS_IMMUTABLE(inode)) { error = permission(inode, MAY_WRITE, NULL); if (error) goto out; } error = gfs2_trans_begin(sdp, RES_DINODE, 0); if (error) goto out; error = gfs2_meta_inode_buffer(ip, &bh); if (error) goto out_trans_end; gfs2_trans_add_bh(ip->i_gl, bh, 1); ip->i_di.di_flags = new_flags; gfs2_dinode_out(ip, bh->b_data); brelse(bh); gfs2_set_inode_flags(inode); out_trans_end: gfs2_trans_end(sdp); out: gfs2_glock_dq_uninit(&gh); return error; }