/** * generic_file_splice_write_nolock - generic_file_splice_write without mutexes * @pipe: pipe info * @out: file to write to * @len: number of bytes to splice * @flags: splice modifier flags * * Will either move or copy pages (determined by @flags options) from * the given pipe inode to the given file. The caller is responsible * for acquiring i_mutex on both inodes. * */ ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct address_space *mapping = out->f_mapping; struct inode *inode = mapping->host; ssize_t ret; int err; err = remove_suid(out->f_path.dentry); if (unlikely(err)) return err; ret = __splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file); if (ret > 0) { unsigned long nr_pages; *ppos += ret; nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; /* * If file or inode is SYNC and we actually wrote some data, * sync it. */ if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) { err = generic_osync_inode(inode, mapping, OSYNC_METADATA|OSYNC_DATA); if (err) ret = err; } balance_dirty_pages_ratelimited_nr(mapping, nr_pages); }
/** * generic_write_sync - perform syncing after a write if file / inode is sync * @file: file to which the write happened * @pos: offset where the write started * @count: length of the write * * This is just a simple wrapper about our general syncing function. */ int generic_write_sync(struct file *file, loff_t pos, loff_t count) { if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) return 0; return vfs_fsync_range(file, pos, pos + count - 1, (file->f_flags & __O_SYNC) ? 0 : 1); }
static VALUE method_get_children(VALUE self, VALUE reqid, VALUE path, VALUE async, VALUE watch) { STANDARD_PREAMBLE(self, zk, reqid, path, async, watch, data_ctx, watch_ctx, call_type); struct String_vector strings; struct Stat stat; int rc; switch (call_type) { case SYNC: rc = zoo_get_children2(zk->zh, RSTRING_PTR(path), 0, &strings, &stat); break; case SYNC_WATCH: rc = zoo_wget_children2(zk->zh, RSTRING_PTR(path), zkrb_state_callback, watch_ctx, &strings, &stat); break; case ASYNC: rc = zoo_aget_children2(zk->zh, RSTRING_PTR(path), 0, zkrb_strings_stat_callback, data_ctx); break; case ASYNC_WATCH: rc = zoo_awget_children2(zk->zh, RSTRING_PTR(path), zkrb_state_callback, watch_ctx, zkrb_strings_stat_callback, data_ctx); break; } VALUE output = rb_ary_new(); rb_ary_push(output, INT2FIX(rc)); if (IS_SYNC(call_type) && rc == ZOK) { rb_ary_push(output, zkrb_string_vector_to_ruby(&strings)); rb_ary_push(output, zkrb_stat_to_rarray(&stat)); } return output; }
static VALUE method_set(VALUE self, VALUE reqid, VALUE path, VALUE data, VALUE async, VALUE version) { VALUE watch = Qfalse; struct Stat stat; const char *data_ptr ; size_t data_len ; int rc; VALUE output ; STANDARD_PREAMBLE(self, zk, reqid, path, async, watch, data_ctx, watch_ctx, call_type); if (data != Qnil) Check_Type(data, T_STRING); data_ptr = (data == Qnil) ? NULL : RSTRING_PTR(data); data_len = (data == Qnil) ? -1 : RSTRING_LEN(data); switch (call_type) { case SYNC: rc = zoo_set2(zk->zh, RSTRING_PTR(path), data_ptr, data_len, FIX2INT(version), &stat); break; case ASYNC: rc = zoo_aset(zk->zh, RSTRING_PTR(path), data_ptr, data_len, FIX2INT(version), zkrb_stat_callback, data_ctx); break; default: /* TODO(wickman) raise proper argument error */ return Qnil; break; } output = rb_ary_new(); rb_ary_push(output, INT2FIX(rc)); if (IS_SYNC(call_type) && rc == ZOK) { rb_ary_push(output, zkrb_stat_to_rarray(&stat)); } return output; }
static VALUE method_get_acl(VALUE self, VALUE reqid, VALUE path, VALUE async) { VALUE watch = Qfalse; struct ACL_vector acls; struct Stat stat; int rc; VALUE output ; STANDARD_PREAMBLE(self, zk, reqid, path, async, watch, data_ctx, watch_ctx, call_type); switch (call_type) { case SYNC: rc = zoo_get_acl(zk->zh, RSTRING_PTR(path), &acls, &stat); break; case ASYNC: rc = zoo_aget_acl(zk->zh, RSTRING_PTR(path), zkrb_acl_callback, data_ctx); break; default: /* TODO(wickman) raise proper argument error */ return Qnil; break; } output = rb_ary_new(); rb_ary_push(output, INT2FIX(rc)); if (IS_SYNC(call_type) && rc == ZOK) { rb_ary_push(output, zkrb_acl_vector_to_ruby(&acls)); rb_ary_push(output, zkrb_stat_to_rarray(&stat)); deallocate_ACL_vector(&acls); } return output; }
void nilfs_evict_inode(struct inode *inode) { struct nilfs_transaction_info ti; struct super_block *sb = inode->i_sb; struct nilfs_inode_info *ii = NILFS_I(inode); int ret; if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { truncate_inode_pages_final(&inode->i_data); clear_inode(inode); nilfs_clear_inode(inode); return; } nilfs_transaction_begin(sb, &ti, 0); /* never fails */ truncate_inode_pages_final(&inode->i_data); /* TODO: some of the following operations may fail. */ nilfs_truncate_bmap(ii, 0); nilfs_mark_inode_dirty(inode); clear_inode(inode); ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); if (!ret) atomic64_dec(&ii->i_root->inodes_count); nilfs_clear_inode(inode); if (IS_SYNC(inode)) nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_transaction_commit(sb); /* May construct a logical segment and may fail in sync mode. But delete_inode has no return value. */ }
static VALUE method_exists(VALUE self, VALUE reqid, VALUE path, VALUE async, VALUE watch) { struct Stat stat; int rc; VALUE output; STANDARD_PREAMBLE(self, zk, reqid, path, async, watch, data_ctx, watch_ctx, call_type); switch (call_type) { case SYNC: rc = zoo_exists(zk->zh, RSTRING_PTR(path), 0, &stat); break; case SYNC_WATCH: rc = zoo_wexists(zk->zh, RSTRING_PTR(path), zkrb_state_callback, watch_ctx, &stat); break; case ASYNC: rc = zoo_aexists(zk->zh, RSTRING_PTR(path), 0, zkrb_stat_callback, data_ctx); break; case ASYNC_WATCH: rc = zoo_awexists(zk->zh, RSTRING_PTR(path), zkrb_state_callback, watch_ctx, zkrb_stat_callback, data_ctx); break; } output = rb_ary_new(); rb_ary_push(output, INT2FIX(rc)); if (IS_SYNC(call_type) && rc == ZOK) { rb_ary_push(output, zkrb_stat_to_rarray(&stat)); } return output; }
/** * generic_file_splice_write_nolock - generic_file_splice_write without mutexes * @pipe: pipe info * @out: file to write to * @len: number of bytes to splice * @flags: splice modifier flags * * Will either move or copy pages (determined by @flags options) from * the given pipe inode to the given file. The caller is responsible * for acquiring i_mutex on both inodes. * */ ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct address_space *mapping = out->f_mapping; struct inode *inode = mapping->host; ssize_t ret; int err; err = remove_suid(out->f_path.dentry); if (unlikely(err)) return err; ret = __splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file); if (ret > 0) { *ppos += ret; /* * If file or inode is SYNC and we actually wrote some data, * sync it. */ if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) { err = generic_osync_inode(inode, mapping, OSYNC_METADATA|OSYNC_DATA); if (err) ret = err; } } return ret; }
void nilfs_evict_inode(struct inode *inode) { struct nilfs_transaction_info ti; struct super_block *sb = inode->i_sb; struct nilfs_inode_info *ii = NILFS_I(inode); int ret; if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { if (inode->i_data.nrpages) truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); nilfs_clear_inode(inode); return; } nilfs_transaction_begin(sb, &ti, 0); if (inode->i_data.nrpages) truncate_inode_pages(&inode->i_data, 0); nilfs_truncate_bmap(ii, 0); nilfs_mark_inode_dirty(inode); end_writeback(inode); ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); if (!ret) atomic_dec(&ii->i_root->inodes_count); nilfs_clear_inode(inode); if (IS_SYNC(inode)) nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_transaction_commit(sb); }
static VALUE method_get_acl(VALUE self, VALUE reqid, VALUE path, VALUE async) { STANDARD_PREAMBLE(self, zk, reqid, path, async, Qfalse, call_type); VALUE output = Qnil; struct ACL_vector acls; struct Stat stat; int rc=ZOK; switch (call_type) { #ifdef THREADED case SYNC: rc = zkrb_call_zoo_get_acl(zk->zh, RSTRING_PTR(path), &acls, &stat); break; #endif case ASYNC: rc = zkrb_call_zoo_aget_acl(zk->zh, RSTRING_PTR(path), zkrb_acl_callback, CTX_ALLOC(zk, reqid)); break; default: raise_invalid_call_type_err(call_type); break; } output = rb_ary_new(); rb_ary_push(output, INT2FIX(rc)); if (IS_SYNC(call_type) && rc == ZOK) { rb_ary_push(output, zkrb_acl_vector_to_ruby(&acls)); rb_ary_push(output, zkrb_stat_to_rarray(&stat)); deallocate_ACL_vector(&acls); } return output; }
void nilfs_truncate(struct inode *inode) { unsigned long blkoff; unsigned int blocksize; struct nilfs_transaction_info ti; struct super_block *sb = inode->i_sb; struct nilfs_inode_info *ii = NILFS_I(inode); inode_debug(2, "called. (ino=%lu)\n", inode->i_ino); if (!test_bit(NILFS_I_BMAP, &ii->i_state)) return; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return; blocksize = sb->s_blocksize; blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; nilfs_transaction_begin(sb, &ti, 0); /* never fails */ block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); nilfs_truncate_bmap(ii, blkoff); inode->i_mtime = inode->i_ctime = CURRENT_TIME; if (IS_SYNC(inode)) nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_set_file_dirty(NILFS_SB(sb), inode, 0); nilfs_transaction_commit(sb); /* May construct a logical segment and may fail in sync mode. But truncate has no return value. */ }
void nilfs_delete_inode(struct inode *inode) { struct nilfs_transaction_info ti; struct super_block *sb = inode->i_sb; struct nilfs_inode_info *ii = NILFS_I(inode); if (unlikely(is_bad_inode(inode))) { if (inode->i_data.nrpages) truncate_inode_pages(&inode->i_data, 0); clear_inode(inode); return; } nilfs_transaction_begin(sb, &ti, 0); /* never fails */ if (inode->i_data.nrpages) truncate_inode_pages(&inode->i_data, 0); nilfs_truncate_bmap(ii, 0); nilfs_free_inode(inode); /* nilfs_free_inode() marks inode buffer dirty */ if (IS_SYNC(inode)) nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_transaction_commit(sb); /* May construct a logical segment and may fail in sync mode. But delete_inode has no return value. */ }
static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode) { mark_buffer_dirty_inode(bh, inode); if (IS_SYNC(inode)) { ll_rw_block (WRITE, 1, &bh); wait_on_buffer (bh); } }
static ssize_t ext4_file_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_path.dentry->d_inode; ssize_t ret; int err; ret = generic_file_aio_write(iocb, iov, nr_segs, pos); /* * Skip flushing if there was an error, or if nothing was written. */ if (ret <= 0) return ret; /* * If the inode is IS_SYNC, or is O_SYNC and we are doing data * journalling then we need to make sure that we force the transaction * to disk to keep all metadata uptodate synchronously. */ if (file->f_flags & O_SYNC) { /* * If we are non-data-journaled, then the dirty data has * already been flushed to backing store by generic_osync_inode, * and the inode has been flushed too if there have been any * modifications other than mere timestamp updates. * * Open question --- do we care about flushing timestamps too * if the inode is IS_SYNC? */ if (!ext4_should_journal_data(inode)) return ret; goto force_commit; } /* * So we know that there has been no forced data flush. If the inode * is marked IS_SYNC, we need to force one ourselves. */ if (!IS_SYNC(inode)) return ret; /* * Open question #2 --- should we force data to disk here too? If we * don't, the only impact is that data=writeback filesystems won't * flush data to disk automatically on IS_SYNC, only metadata (but * historically, that is what ext2 has done.) */ force_commit: err = ext4_force_commit(inode->i_sb); if (err) return err; return ret; }
static int ufs_trunc_tindirect(struct inode *inode) { struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct ufs_inode_info *ufsi = UFS_I(inode); struct ufs_buffer_head * tind_bh; u64 tindirect_block, tmp, i; void *tind, *p; int retry; UFSD("ENTER: ino %lu\n", inode->i_ino); retry = 0; tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb)) ? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0; p = ufs_get_direct_data_ptr(uspi, ufsi, UFS_TIND_BLOCK); if (!(tmp = ufs_data_ptr_to_cpu(sb, p))) return 0; tind_bh = ubh_bread (sb, tmp, uspi->s_bsize); if (tmp != ufs_data_ptr_to_cpu(sb, p)) { ubh_brelse (tind_bh); return 1; } if (!tind_bh) { ufs_data_ptr_clear(uspi, p); return 0; } for (i = tindirect_block ; i < uspi->s_apb ; i++) { tind = ubh_get_data_ptr(uspi, tind_bh, i); retry |= ufs_trunc_dindirect(inode, UFS_NDADDR + uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind); ubh_mark_buffer_dirty(tind_bh); } for (i = 0; i < uspi->s_apb; i++) if (!ufs_is_data_ptr_zero(uspi, ubh_get_data_ptr(uspi, tind_bh, i))) break; if (i >= uspi->s_apb) { tmp = ufs_data_ptr_to_cpu(sb, p); ufs_data_ptr_clear(uspi, p); ufs_free_blocks(inode, tmp, uspi->s_fpb); mark_inode_dirty(inode); ubh_bforget(tind_bh); tind_bh = NULL; } if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) { ubh_ll_rw_block(SWRITE, tind_bh); ubh_wait_on_buffer (tind_bh); } ubh_brelse (tind_bh); UFSD("EXIT: ino %lu\n", inode->i_ino); return retry; }
static int ufs_trunc_tindirect (struct inode * inode) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_buffer_head * tind_bh; unsigned tindirect_block, tmp, i; __fs32 * tind, * p; int retry; UFSD("ENTER\n"); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; retry = 0; tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb)) ? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0; p = ufsi->i_u1.i_data + UFS_TIND_BLOCK; if (!(tmp = fs32_to_cpu(sb, *p))) return 0; tind_bh = ubh_bread (sb, tmp, uspi->s_bsize); if (tmp != fs32_to_cpu(sb, *p)) { ubh_brelse (tind_bh); return 1; } if (!tind_bh) { *p = 0; return 0; } for (i = tindirect_block ; i < uspi->s_apb ; i++) { tind = ubh_get_addr32 (tind_bh, i); retry |= ufs_trunc_dindirect(inode, UFS_NDADDR + uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind); ubh_mark_buffer_dirty(tind_bh); } for (i = 0; i < uspi->s_apb; i++) if (*ubh_get_addr32 (tind_bh, i)) break; if (i >= uspi->s_apb) { tmp = fs32_to_cpu(sb, *p); *p = 0; ufs_free_blocks(inode, tmp, uspi->s_fpb); mark_inode_dirty(inode); ubh_bforget(tind_bh); tind_bh = NULL; } if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) { ubh_ll_rw_block(SWRITE, tind_bh); ubh_wait_on_buffer (tind_bh); } ubh_brelse (tind_bh); UFSD("EXIT\n"); return retry; }
static int dir_commit_chunk(struct page *page, unsigned from, unsigned to) { struct inode *dir = (struct inode *)page->mapping->host; int err = 0; page->mapping->a_ops->commit_write(NULL, page, from, to); if (IS_SYNC(dir)) err = waitfor_one_page(page); return err; }
/** * generic_write_sync - perform syncing after a write if file / inode is sync * @file: file to which the write happened * @pos: offset where the write started * @count: length of the write * * This is just a simple wrapper about our general syncing function. */ int generic_write_sync(struct file *file, loff_t pos, loff_t count) { //conditional fsync disable #ifdef CONFIG_FSYNC_OFF return 0; #endif if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) return 0; return vfs_fsync_range(file, pos, pos + count - 1, (file->f_flags & __O_SYNC) ? 0 : 1); }
/** * generic_write_sync - perform syncing after a write if file / inode is sync * @file: file to which the write happened * @pos: offset where the write started * @count: length of the write * * This is just a simple wrapper about our general syncing function. */ int generic_write_sync(struct file *file, loff_t pos, loff_t count) { #ifdef CONFIG_FSYNC_CONTROL if (!fsynccontrol_fsync_enabled()) return 0; #endif if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) return 0; return vfs_fsync_range(file, pos, pos + count - 1, (file->f_flags & __O_SYNC) ? 0 : 1); }
/** * generic_write_sync - perform syncing after a write if file / inode is sync * @file: file to which the write happened * @pos: offset where the write started * @count: length of the write * * This is just a simple wrapper about our general syncing function. */ int generic_write_sync(struct file *file, loff_t pos, loff_t count) { #ifdef CONFIG_DYNAMIC_FSYNC if (!early_suspend_active) return 0; #endif if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) return 0; return vfs_fsync_range(file, pos, pos + count - 1, (file->f_flags & __O_SYNC) ? 0 : 1); }
static int nfs_need_sync_write(struct file *filp, struct inode *inode) { struct nfs_open_context *ctx; if (IS_SYNC(inode) || (filp->f_flags & O_SYNC)) return 1; ctx = nfs_file_open_context(filp); if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags)) return 1; return 0; }
/* * Write an mmapped page to the server. */ int nfs_writepage(struct page *page) { struct inode *inode = page->mapping->host; unsigned long end_index; unsigned offset = PAGE_CACHE_SIZE; int inode_referenced = 0; int err; /* * Note: We need to ensure that we have a reference to the inode * if we are to do asynchronous writes. If not, waiting * in nfs_wait_on_request() may deadlock with clear_inode(). * * If igrab() fails here, then it is in any case safe to * call nfs_wb_page(), since there will be no pending writes. */ if (igrab(inode) != 0) inode_referenced = 1; end_index = inode->i_size >> PAGE_CACHE_SHIFT; /* Ensure we've flushed out any previous writes */ nfs_wb_page(inode,page); /* easy case */ if (page->index < end_index) goto do_it; /* things got complicated... */ offset = inode->i_size & (PAGE_CACHE_SIZE-1); /* OK, are we completely out? */ err = -EIO; if (page->index >= end_index+1 || !offset) goto out; do_it: lock_kernel(); if (NFS_SERVER(inode)->wsize >= PAGE_CACHE_SIZE && !IS_SYNC(inode) && inode_referenced) { err = nfs_writepage_async(NULL, inode, page, 0, offset); if (err >= 0) err = 0; } else { err = nfs_writepage_sync(NULL, inode, page, 0, offset); if (err == offset) err = 0; } unlock_kernel(); out: UnlockPage(page); if (inode_referenced) iput(inode); return err; }
int ufs_truncate(struct inode *inode, loff_t old_i_size) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; int retry, err = 0; UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n", inode->i_ino, (unsigned long long)i_size_read(inode), (unsigned long long)old_i_size); if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return -EINVAL; if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; err = ufs_alloc_lastblock(inode); if (err) { i_size_write(inode, old_i_size); goto out; } block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block); lock_kernel(); while (1) { retry = ufs_trunc_direct(inode); retry |= ufs_trunc_indirect(inode, UFS_IND_BLOCK, ufs_get_direct_data_ptr(uspi, ufsi, UFS_IND_BLOCK)); retry |= ufs_trunc_dindirect(inode, UFS_IND_BLOCK + uspi->s_apb, ufs_get_direct_data_ptr(uspi, ufsi, UFS_DIND_BLOCK)); retry |= ufs_trunc_tindirect (inode); if (!retry) break; if (IS_SYNC(inode) && (inode->i_state & I_DIRTY)) ufs_sync_inode (inode); blk_run_address_space(inode->i_mapping); yield(); } inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; ufsi->i_lastfrag = DIRECT_FRAGMENT; unlock_kernel(); mark_inode_dirty(inode); out: UFSD("EXIT: err %d\n", err); return err; }
static VALUE method_create(VALUE self, VALUE reqid, VALUE path, VALUE data, VALUE async, VALUE acls, VALUE flags) { STANDARD_PREAMBLE(self, zk, reqid, path, async, Qfalse, call_type); VALUE output = Qnil; if (data != Qnil) Check_Type(data, T_STRING); Check_Type(flags, T_FIXNUM); const char *data_ptr = (data == Qnil) ? NULL : RSTRING_PTR(data); ssize_t data_len = (data == Qnil) ? -1 : RSTRING_LEN(data); struct ACL_vector *aclptr = NULL; if (acls != Qnil) { aclptr = zkrb_ruby_to_aclvector(acls); } char realpath[16384]; int invalid_call_type=0; int rc; switch (call_type) { #ifdef THREADED case SYNC: // casting data_len to int is OK as you can only store 1MB in zookeeper rc = zkrb_call_zoo_create(zk->zh, RSTRING_PTR(path), data_ptr, (int)data_len, aclptr, FIX2INT(flags), realpath, sizeof(realpath)); break; #endif case ASYNC: rc = zkrb_call_zoo_acreate(zk->zh, RSTRING_PTR(path), data_ptr, (int)data_len, aclptr, FIX2INT(flags), zkrb_string_callback, CTX_ALLOC(zk, reqid)); break; default: invalid_call_type=1; break; } if (aclptr) { deallocate_ACL_vector(aclptr); free(aclptr); } if (invalid_call_type) raise_invalid_call_type_err(call_type); output = rb_ary_new(); rb_ary_push(output, INT2FIX(rc)); if (IS_SYNC(call_type) && rc == ZOK) { return rb_ary_push(output, rb_str_new2(realpath)); } return output; }
static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct dentry * dentry = iocb->ki_filp->f_path.dentry; struct inode * inode = dentry->d_inode; ssize_t result; size_t count = iov_length(iov, nr_segs); #ifdef CONFIG_NFS_DIRECTIO if (iocb->ki_filp->f_flags & O_DIRECT) return nfs_file_direct_write(iocb, iov, nr_segs, pos); #endif dfprintk(VFS, "nfs: write(%s/%s(%ld), %lu@%Ld)\n", dentry->d_parent->d_name.name, dentry->d_name.name, inode->i_ino, (unsigned long) count, (long long) pos); result = -EBUSY; if (IS_SWAPFILE(inode)) goto out_swapfile; /* * O_APPEND implies that we must revalidate the file length. */ if (iocb->ki_filp->f_flags & O_APPEND) { result = nfs_revalidate_file_size(inode, iocb->ki_filp); if (result) goto out; } result = count; if (!count) goto out; nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); result = generic_file_aio_write(iocb, iov, nr_segs, pos); /* Return error values for O_SYNC and IS_SYNC() */ if (result >= 0 && (IS_SYNC(inode) || (iocb->ki_filp->f_flags & O_SYNC))) { int err = nfs_fsync(iocb->ki_filp, dentry, 1); if (err < 0) result = err; } out: return result; out_swapfile: printk(KERN_INFO "NFS: attempt to write to active swap file!\n"); goto out; }
static VALUE method_get_children(VALUE self, VALUE reqid, VALUE path, VALUE async, VALUE watch) { STANDARD_PREAMBLE(self, zk, reqid, path, async, watch, call_type); VALUE output = Qnil; struct String_vector strings; struct Stat stat; int rc; switch (call_type) { #ifdef THREADED case SYNC: rc = zkrb_call_zoo_get_children2( zk->zh, RSTRING_PTR(path), 0, &strings, &stat); break; case SYNC_WATCH: rc = zkrb_call_zoo_wget_children2( zk->zh, RSTRING_PTR(path), zkrb_state_callback, CTX_ALLOC(zk, reqid), &strings, &stat); break; #endif case ASYNC: rc = zkrb_call_zoo_aget_children2( zk->zh, RSTRING_PTR(path), 0, zkrb_strings_stat_callback, CTX_ALLOC(zk, reqid)); break; case ASYNC_WATCH: rc = zkrb_call_zoo_awget_children2( zk->zh, RSTRING_PTR(path), zkrb_state_callback, CTX_ALLOC(zk, reqid), zkrb_strings_stat_callback, CTX_ALLOC(zk, reqid)); break; default: raise_invalid_call_type_err(call_type); break; } output = rb_ary_new(); rb_ary_push(output, INT2FIX(rc)); if (IS_SYNC(call_type) && rc == ZOK) { rb_ary_push(output, zkrb_string_vector_to_ruby(&strings)); rb_ary_push(output, zkrb_stat_to_rarray(&stat)); } return output; }
/* * Write an mmapped page to the server. */ int nfs_writepage(struct page *page) { struct inode *inode; unsigned long end_index; unsigned offset = PAGE_CACHE_SIZE; int err; struct address_space *mapping = page->mapping; if (!mapping) BUG(); inode = mapping->host; if (!inode) BUG(); end_index = inode->i_size >> PAGE_CACHE_SHIFT; /* Ensure we've flushed out any previous writes */ nfs_wb_page(inode,page); /* easy case */ if (page->index < end_index) goto do_it; /* things got complicated... */ offset = inode->i_size & (PAGE_CACHE_SIZE-1); /* OK, are we completely out? */ err = -EIO; if (page->index >= end_index+1 || !offset) goto out; do_it: lock_kernel(); if (NFS_SERVER(inode)->wsize >= PAGE_CACHE_SIZE && !IS_SYNC(inode)) { err = nfs_writepage_async(NULL, inode, page, 0, offset); if (err >= 0) err = 0; } else { err = nfs_writepage_sync(NULL, inode, page, 0, offset); if (err == offset) err = 0; } unlock_kernel(); out: UnlockPage(page); return err; }
static int fat_cont_expand(struct inode *inode, loff_t size) //static int fat_cont_expand(struct inode *inode, unsigned int size) // 0610 ASUS { struct address_space *mapping = inode->i_mapping; loff_t start = inode->i_size, count = size - inode->i_size; int err; err = generic_cont_expand_simple(inode, size); if (err) goto out; inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; mark_inode_dirty(inode); if (IS_SYNC(inode)) err = sync_page_range_nolock(inode, mapping, start, count); out: return err; }
static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n, int sync) { struct buffer_head *bh; sector_t end = beg + n; for (; beg < end; ++beg) { bh = sb_getblk(inode->i_sb, beg); lock_buffer(bh); memset(bh->b_data, 0, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); mark_buffer_dirty(bh); unlock_buffer(bh); if (IS_SYNC(inode) || sync) sync_dirty_buffer(bh); brelse(bh); } }
static VALUE method_create(VALUE self, VALUE reqid, VALUE path, VALUE data, VALUE async, VALUE acls, VALUE flags) { VALUE watch = Qfalse; STANDARD_PREAMBLE(self, zk, reqid, path, async, watch, data_ctx, watch_ctx, call_type); if (data != Qnil) Check_Type(data, T_STRING); Check_Type(flags, T_FIXNUM); const char *data_ptr = (data == Qnil) ? NULL : RSTRING_PTR(data); size_t data_len = (data == Qnil) ? -1 : RSTRING_LEN(data); struct ACL_vector *aclptr = NULL; if (acls != Qnil) { aclptr = zkrb_ruby_to_aclvector(acls); } char realpath[16384]; int rc; switch (call_type) { case SYNC: // casting data_len to int is OK as you can only store 1MB in zookeeper rc = zkrb_call_zoo_create(zk->zh, RSTRING_PTR(path), data_ptr, (int)data_len, aclptr, FIX2INT(flags), realpath, sizeof(realpath)); break; case ASYNC: rc = zkrb_call_zoo_acreate(zk->zh, RSTRING_PTR(path), data_ptr, (int)data_len, aclptr, FIX2INT(flags), zkrb_string_callback, data_ctx); break; default: /* TODO(wickman) raise proper argument error */ return Qnil; break; } if (aclptr) { deallocate_ACL_vector(aclptr); free(aclptr); } VALUE output = rb_ary_new(); rb_ary_push(output, INT2FIX(rc)); if (IS_SYNC(call_type) && rc == ZOK) { return rb_ary_push(output, rb_str_new2(realpath)); } return output; }