static int nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, struct iattr *sattr) { struct inode *inode = dentry->d_inode; struct nfs3_sattrargs arg = { .fh = NFS_FH(inode), .sattr = sattr, }; struct rpc_message msg = { .rpc_proc = &nfs3_procedures[NFS3PROC_SETATTR], .rpc_argp = &arg, .rpc_resp = fattr, }; int status; dprintk("NFS call setattr\n"); if (sattr->ia_valid & ATTR_FILE) msg.rpc_cred = nfs_file_cred(sattr->ia_file); nfs_fattr_init(fattr); zql_control_test(NFS_SERVER(inode)); status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); if (status == 0) nfs_setattr_update_inode(inode, sattr, fattr); dprintk("NFS reply setattr: %d\n", status); return status; }
static int nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, struct iattr *sattr) { struct inode *inode = dentry->d_inode; struct nfs_sattrargs arg = { .fh = NFS_FH(inode), .sattr = sattr }; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_SETATTR], .rpc_argp = &arg, .rpc_resp = fattr, }; int status; /* Mask out the non-modebit related stuff from attr->ia_mode */ sattr->ia_mode &= S_IALLUGO; dprintk("NFS call setattr\n"); if (sattr->ia_valid & ATTR_FILE) msg.rpc_cred = nfs_file_cred(sattr->ia_file); nfs_fattr_init(fattr); status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); if (status == 0) nfs_setattr_update_inode(inode, sattr); dprintk("NFS reply setattr: %d\n", status); return status; }
static inline int nfs_direct_read_rpc(struct file *file, struct nfs_readargs *arg) { int result; struct inode * inode = file->f_dentry->d_inode; struct nfs_fattr fattr; struct rpc_message msg; struct nfs_readres res = { &fattr, arg->count, 0 }; #ifdef CONFIG_NFS_V3 msg.rpc_proc = (NFS_PROTO(inode)->version == 3) ? NFS3PROC_READ : NFSPROC_READ; #else msg.rpc_proc = NFSPROC_READ; #endif msg.rpc_argp = arg; msg.rpc_resp = &res; lock_kernel(); msg.rpc_cred = nfs_file_cred(file); fattr.valid = 0; result = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs_refresh_inode(inode, &fattr); unlock_kernel(); return result; }
static inline int nfs_direct_write_rpc(struct file *file, struct nfs_writeargs *arg, struct nfs_writeverf *verf) { int result; struct inode *inode = file->f_dentry->d_inode; struct nfs_fattr fattr; struct rpc_message msg; struct nfs_writeres res = { &fattr, verf, 0 }; #ifdef CONFIG_NFS_V3 msg.rpc_proc = (NFS_PROTO(inode)->version == 3) ? NFS3PROC_WRITE : NFSPROC_WRITE; #else msg.rpc_proc = NFSPROC_WRITE; #endif msg.rpc_argp = arg; msg.rpc_resp = &res; lock_kernel(); msg.rpc_cred = get_rpccred(nfs_file_cred(file)); fattr.valid = 0; result = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs_write_attributes(inode, &fattr); put_rpccred(msg.rpc_cred); unlock_kernel(); #ifdef CONFIG_NFS_V3 if (NFS_PROTO(inode)->version == 3) { if (result > 0) { if ((arg->stable == NFS_FILE_SYNC) && (verf->committed != NFS_FILE_SYNC)) { printk(KERN_ERR "%s: server didn't sync stable write request\n", __FUNCTION__); return -EIO; } if (result != arg->count) { printk(KERN_INFO "%s: short write, count=%u, result=%d\n", __FUNCTION__, arg->count, result); } } return result; } else { #endif verf->committed = NFS_FILE_SYNC; /* NFSv2 always syncs data */ if (result == 0) return arg->count; return result; #ifdef CONFIG_NFS_V3 } #endif }
/* * If we cannot find a cookie in our cache, we suspect that this is * because it points to a deleted file, so we ask the server to return * whatever it thinks is the next entry. We then feed this to filldir. * If all goes well, we should then be able to find our way round the * cache on the next call to readdir_search_pagecache(); * * NOTE: we cannot add the anonymous page to the pagecache because * the data it contains might not be page aligned. Besides, * we should already have a complete representation of the * directory in the page cache by the time we get here. */ static inline int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, filldir_t filldir) { struct file *file = desc->file; struct inode *inode = file->f_dentry->d_inode; struct rpc_cred *cred = nfs_file_cred(file); struct page *page = NULL; int status; dfprintk(VFS, "NFS: uncached_readdir() searching for cookie %Lu\n", (long long)desc->target); page = alloc_page(GFP_HIGHUSER); if (!page) { status = -ENOMEM; goto out; } desc->error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, desc->target, page, NFS_SERVER(inode)->dtsize, desc->plus); NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME; desc->page = page; desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ if (desc->error >= 0) { if ((status = dir_decode(desc)) == 0) desc->entry->prev_cookie = desc->target; } else status = -EIO; if (status < 0) goto out_release; status = nfs_do_filldir(desc, dirent, filldir); /* Reset read descriptor so it searches the page cache from * the start upon the next call to readdir_search_pagecache() */ desc->page_index = 0; desc->entry->cookie = desc->entry->prev_cookie = 0; desc->entry->eof = 0; out: dfprintk(VFS, "NFS: uncached_readdir() returns %d\n", status); return status; out_release: dir_page_release(desc); goto out; }
/* Now we cache directories properly, by stuffing the dirent * data directly in the page cache. * * Inode invalidation due to refresh etc. takes care of * _everything_, no sloppy entry flushing logic, no extraneous * copying, network direct to page cache, the way it was meant * to be. * * NOTE: Dirent information verification is done always by the * page-in of the RPC reply, nowhere else, this simplies * things substantially. */ static int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page) { struct file *file = desc->file; struct inode *inode = file->f_dentry->d_inode; struct rpc_cred *cred = nfs_file_cred(file); unsigned long timestamp; int error; dfprintk(VFS, "NFS: nfs_readdir_filler() reading cookie %Lu into page %lu.\n", (long long)desc->entry->cookie, page->index); again: timestamp = jiffies; error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, desc->entry->cookie, page, NFS_SERVER(inode)->dtsize, desc->plus); if (error < 0) { /* We requested READDIRPLUS, but the server doesn't grok it */ if (error == -ENOTSUPP && desc->plus) { NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS; NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS; desc->plus = 0; goto again; } goto error; } SetPageUptodate(page); NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME; /* Ensure consistent page alignment of the data. * Note: assumes we have exclusive access to this mapping either * throught inode->i_sem or some other mechanism. */ if (page->index == 0) { invalidate_inode_pages(inode->i_mapping); NFS_I(inode)->readdir_timestamp = timestamp; } unlock_page(page); return 0; error: SetPageError(page); unlock_page(page); nfs_zap_caches(inode); desc->error = error; return -EIO; }
/* Now we cache directories properly, by stuffing the dirent * data directly in the page cache. * * Inode invalidation due to refresh etc. takes care of * _everything_, no sloppy entry flushing logic, no extraneous * copying, network direct to page cache, the way it was meant * to be. * * NOTE: Dirent information verification is done always by the * page-in of the RPC reply, nowhere else, this simplies * things substantially. */ static int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page) { struct file *file = desc->file; struct inode *inode = file->f_dentry->d_inode; struct rpc_cred *cred = nfs_file_cred(file); void *buffer = kmap(page); int error; dfprintk(VFS, "NFS: nfs_readdir_filler() reading cookie %Lu into page %lu.\n", (long long)desc->entry->cookie, page->index); again: error = NFS_PROTO(inode)->readdir(inode, cred, desc->entry->cookie, buffer, NFS_SERVER(inode)->dtsize, desc->plus); /* We requested READDIRPLUS, but the server doesn't grok it */ if (desc->plus && error == -ENOTSUPP) { NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS; desc->plus = 0; goto again; } if (error < 0) goto error; SetPageUptodate(page); kunmap(page); /* Ensure consistent page alignment of the data. * Note: assumes we have exclusive access to this mapping either * throught inode->i_sem or some other mechanism. */ if (page->index == 0) invalidate_inode_pages(inode); UnlockPage(page); return 0; error: SetPageError(page); kunmap(page); UnlockPage(page); invalidate_inode_pages(inode); desc->error = error; return -EIO; }
static int nfs_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs_diropargs arg = { .fh = NFS_FH(dir), .name = name->name, .len = name->len }; struct nfs_diropok res = { .fh = fhandle, .fattr = fattr }; int status; dprintk("NFS call lookup %s\n", name->name); fattr->valid = 0; status = rpc_call(NFS_CLIENT(dir), NFSPROC_LOOKUP, &arg, &res, 0); dprintk("NFS reply lookup: %d\n", status); return status; } static int nfs_proc_readlink(struct inode *inode, struct page *page) { struct nfs_readlinkargs args = { .fh = NFS_FH(inode), .count = PAGE_CACHE_SIZE, .pages = &page }; int status; dprintk("NFS call readlink\n"); status = rpc_call(NFS_CLIENT(inode), NFSPROC_READLINK, &args, NULL, 0); dprintk("NFS reply readlink: %d\n", status); return status; } static int nfs_proc_read(struct nfs_read_data *rdata, struct file *filp) { int flags = rdata->flags; struct inode * inode = rdata->inode; struct nfs_fattr * fattr = rdata->res.fattr; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_READ], .rpc_argp = &rdata->args, .rpc_resp = &rdata->res, }; int status; dprintk("NFS call read %d @ %Ld\n", rdata->args.count, (long long) rdata->args.offset); fattr->valid = 0; msg.rpc_cred = nfs_cred(inode, filp); status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags); if (status >= 0) { nfs_refresh_inode(inode, fattr); /* Emulate the eof flag, which isn't normally needed in NFSv2 * as it is guaranteed to always return the file attributes */ if (rdata->args.offset + rdata->args.count >= fattr->size) rdata->res.eof = 1; } dprintk("NFS reply read: %d\n", status); return status; } static int nfs_proc_write(struct nfs_write_data *wdata, struct file *filp) { int flags = wdata->flags; struct inode * inode = wdata->inode; struct nfs_fattr * fattr = wdata->res.fattr; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_WRITE], .rpc_argp = &wdata->args, .rpc_resp = &wdata->res, }; int status; dprintk("NFS call write %d @ %Ld\n", wdata->args.count, (long long) wdata->args.offset); fattr->valid = 0; msg.rpc_cred = nfs_cred(inode, filp); status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags); if (status >= 0) { nfs_refresh_inode(inode, fattr); wdata->res.count = wdata->args.count; wdata->verf.committed = NFS_FILE_SYNC; } dprintk("NFS reply write: %d\n", status); return status < 0? status : wdata->res.count; } static struct inode * nfs_proc_create(struct inode *dir, struct qstr *name, struct iattr *sattr, int flags) { struct nfs_fh fhandle; struct nfs_fattr fattr; struct nfs_createargs arg = { .fh = NFS_FH(dir), .name = name->name, .len = name->len, .sattr = sattr }; struct nfs_diropok res = { .fh = &fhandle, .fattr = &fattr }; int status; fattr.valid = 0; dprintk("NFS call create %s\n", name->name); status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0); dprintk("NFS reply create: %d\n", status); if (status == 0) { struct inode *inode; inode = nfs_fhget(dir->i_sb, &fhandle, &fattr); if (inode) return inode; status = -ENOMEM; } return ERR_PTR(status); } /* * In NFSv2, mknod is grafted onto the create call. */ static int nfs_proc_mknod(struct inode *dir, struct qstr *name, struct iattr *sattr, dev_t rdev, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs_createargs arg = { .fh = NFS_FH(dir), .name = name->name, .len = name->len, .sattr = sattr }; struct nfs_diropok res = { .fh = fhandle, .fattr = fattr }; int status, mode; dprintk("NFS call mknod %s\n", name->name); mode = sattr->ia_mode; if (S_ISFIFO(mode)) { sattr->ia_mode = (mode & ~S_IFMT) | S_IFCHR; sattr->ia_valid &= ~ATTR_SIZE; } else if (S_ISCHR(mode) || S_ISBLK(mode)) { sattr->ia_valid |= ATTR_SIZE; sattr->ia_size = new_encode_dev(rdev);/* get out your barf bag */ } fattr->valid = 0; status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0); if (status == -EINVAL && S_ISFIFO(mode)) { sattr->ia_mode = mode; fattr->valid = 0; status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0); } dprintk("NFS reply mknod: %d\n", status); return status; } static int nfs_proc_remove(struct inode *dir, struct qstr *name) { struct nfs_diropargs arg = { .fh = NFS_FH(dir), .name = name->name, .len = name->len }; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_REMOVE], .rpc_argp = &arg, .rpc_resp = NULL, .rpc_cred = NULL }; int status; dprintk("NFS call remove %s\n", name->name); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); dprintk("NFS reply remove: %d\n", status); return status; } static int nfs_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr *name) { struct nfs_diropargs *arg; arg = (struct nfs_diropargs *)kmalloc(sizeof(*arg), GFP_KERNEL); if (!arg) return -ENOMEM; arg->fh = NFS_FH(dir->d_inode); arg->name = name->name; arg->len = name->len; msg->rpc_proc = &nfs_procedures[NFSPROC_REMOVE]; msg->rpc_argp = arg; return 0; } static int nfs_proc_unlink_done(struct dentry *dir, struct rpc_task *task) { struct rpc_message *msg = &task->tk_msg; if (msg->rpc_argp) kfree(msg->rpc_argp); return 0; } static int nfs_proc_rename(struct inode *old_dir, struct qstr *old_name, struct inode *new_dir, struct qstr *new_name) { struct nfs_renameargs arg = { .fromfh = NFS_FH(old_dir), .fromname = old_name->name, .fromlen = old_name->len, .tofh = NFS_FH(new_dir), .toname = new_name->name, .tolen = new_name->len }; int status; dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name); status = rpc_call(NFS_CLIENT(old_dir), NFSPROC_RENAME, &arg, NULL, 0); dprintk("NFS reply rename: %d\n", status); return status; } static int nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) { struct nfs_linkargs arg = { .fromfh = NFS_FH(inode), .tofh = NFS_FH(dir), .toname = name->name, .tolen = name->len }; int status; dprintk("NFS call link %s\n", name->name); status = rpc_call(NFS_CLIENT(inode), NFSPROC_LINK, &arg, NULL, 0); dprintk("NFS reply link: %d\n", status); return status; } static int nfs_proc_symlink(struct inode *dir, struct qstr *name, struct qstr *path, struct iattr *sattr, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs_symlinkargs arg = { .fromfh = NFS_FH(dir), .fromname = name->name, .fromlen = name->len, .topath = path->name, .tolen = path->len, .sattr = sattr }; int status; dprintk("NFS call symlink %s -> %s\n", name->name, path->name); fattr->valid = 0; status = rpc_call(NFS_CLIENT(dir), NFSPROC_SYMLINK, &arg, NULL, 0); dprintk("NFS reply symlink: %d\n", status); return status; } static int nfs_proc_mkdir(struct inode *dir, struct qstr *name, struct iattr *sattr, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs_createargs arg = { .fh = NFS_FH(dir), .name = name->name, .len = name->len, .sattr = sattr }; struct nfs_diropok res = { .fh = fhandle, .fattr = fattr }; int status; dprintk("NFS call mkdir %s\n", name->name); fattr->valid = 0; status = rpc_call(NFS_CLIENT(dir), NFSPROC_MKDIR, &arg, &res, 0); dprintk("NFS reply mkdir: %d\n", status); return status; } static int nfs_proc_rmdir(struct inode *dir, struct qstr *name) { struct nfs_diropargs arg = { .fh = NFS_FH(dir), .name = name->name, .len = name->len }; int status; dprintk("NFS call rmdir %s\n", name->name); status = rpc_call(NFS_CLIENT(dir), NFSPROC_RMDIR, &arg, NULL, 0); dprintk("NFS reply rmdir: %d\n", status); return status; } /* * The READDIR implementation is somewhat hackish - we pass a temporary * buffer to the encode function, which installs it in the receive * the receive iovec. The decode function just parses the reply to make * sure it is syntactically correct; the entries itself are decoded * from nfs_readdir by calling the decode_entry function directly. */ static int nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page *page, unsigned int count, int plus) { struct inode *dir = dentry->d_inode; struct nfs_readdirargs arg = { .fh = NFS_FH(dir), .cookie = cookie, .count = count, .pages = &page }; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_READDIR], .rpc_argp = &arg, .rpc_resp = NULL, .rpc_cred = cred }; int status; lock_kernel(); dprintk("NFS call readdir %d\n", (unsigned int)cookie); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); dprintk("NFS reply readdir: %d\n", status); unlock_kernel(); return status; } static int nfs_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *stat) { struct nfs2_fsstat fsinfo; int status; dprintk("NFS call statfs\n"); stat->fattr->valid = 0; status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0); dprintk("NFS reply statfs: %d\n", status); if (status) goto out; stat->tbytes = (u64)fsinfo.blocks * fsinfo.bsize; stat->fbytes = (u64)fsinfo.bfree * fsinfo.bsize; stat->abytes = (u64)fsinfo.bavail * fsinfo.bsize; stat->tfiles = 0; stat->ffiles = 0; stat->afiles = 0; out: return status; } static int nfs_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { struct nfs2_fsstat fsinfo; int status; dprintk("NFS call fsinfo\n"); info->fattr->valid = 0; status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0); dprintk("NFS reply fsinfo: %d\n", status); if (status) goto out; info->rtmax = NFS_MAXDATA; info->rtpref = fsinfo.tsize; info->rtmult = fsinfo.bsize; info->wtmax = NFS_MAXDATA; info->wtpref = fsinfo.tsize; info->wtmult = fsinfo.bsize; info->dtpref = fsinfo.tsize; info->maxfilesize = 0x7FFFFFFF; info->lease_time = 0; out: return status; } static int nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_pathconf *info) { info->max_link = 0; info->max_namelen = NFS2_MAXNAMLEN; return 0; } extern u32 * nfs_decode_dirent(u32 *, struct nfs_entry *, int); static void nfs_read_done(struct rpc_task *task) { struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata; if (task->tk_status >= 0) { nfs_refresh_inode(data->inode, data->res.fattr); /* Emulate the eof flag, which isn't normally needed in NFSv2 * as it is guaranteed to always return the file attributes */ if (data->args.offset + data->args.count >= data->res.fattr->size) data->res.eof = 1; } nfs_readpage_result(task); } static void nfs_proc_read_setup(struct nfs_read_data *data) { struct rpc_task *task = &data->task; struct inode *inode = data->inode; int flags; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_READ], .rpc_argp = &data->args, .rpc_resp = &data->res, .rpc_cred = data->cred, }; /* N.B. Do we need to test? Never called for swapfile inode */ flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0); /* Finalize the task. */ rpc_init_task(task, NFS_CLIENT(inode), nfs_read_done, flags); rpc_call_setup(task, &msg, 0); } static void nfs_write_done(struct rpc_task *task) { struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata; if (task->tk_status >= 0) nfs_refresh_inode(data->inode, data->res.fattr); nfs_writeback_done(task); } static void nfs_proc_write_setup(struct nfs_write_data *data, int how) { struct rpc_task *task = &data->task; struct inode *inode = data->inode; int flags; struct rpc_message msg = { .rpc_proc = &nfs_procedures[NFSPROC_WRITE], .rpc_argp = &data->args, .rpc_resp = &data->res, .rpc_cred = data->cred, }; /* Note: NFSv2 ignores @stable and always uses NFS_FILE_SYNC */ data->args.stable = NFS_FILE_SYNC; /* Set the initial flags for the task. */ flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; /* Finalize the task. */ rpc_init_task(task, NFS_CLIENT(inode), nfs_write_done, flags); rpc_call_setup(task, &msg, 0); } static void nfs_proc_commit_setup(struct nfs_write_data *data, int how) { BUG(); } /* * Set up the nfspage struct with the right credentials */ static void nfs_request_init(struct nfs_page *req, struct file *filp) { req->wb_cred = get_rpccred(nfs_cred(req->wb_inode, filp)); } static int nfs_request_compatible(struct nfs_page *req, struct file *filp, struct page *page) { if (req->wb_file != filp) return 0; if (req->wb_page != page) return 0; if (req->wb_cred != nfs_file_cred(filp)) return 0; return 1; } static int nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl) { return nlmclnt_proc(filp->f_dentry->d_inode, cmd, fl); } struct nfs_rpc_ops nfs_v2_clientops = { .version = 2, /* protocol version */ .dentry_ops = &nfs_dentry_operations, .dir_inode_ops = &nfs_dir_inode_operations, .getroot = nfs_proc_get_root, .getattr = nfs_proc_getattr, .setattr = nfs_proc_setattr, .lookup = nfs_proc_lookup, .access = NULL, /* access */ .readlink = nfs_proc_readlink, .read = nfs_proc_read, .write = nfs_proc_write, .commit = NULL, /* commit */ .create = nfs_proc_create, .remove = nfs_proc_remove, .unlink_setup = nfs_proc_unlink_setup, .unlink_done = nfs_proc_unlink_done, .rename = nfs_proc_rename, .link = nfs_proc_link, .symlink = nfs_proc_symlink, .mkdir = nfs_proc_mkdir, .rmdir = nfs_proc_rmdir, .readdir = nfs_proc_readdir, .mknod = nfs_proc_mknod, .statfs = nfs_proc_statfs, .fsinfo = nfs_proc_fsinfo, .pathconf = nfs_proc_pathconf, .decode_dirent = nfs_decode_dirent, .read_setup = nfs_proc_read_setup, .write_setup = nfs_proc_write_setup, .commit_setup = nfs_proc_commit_setup, .file_open = nfs_open, .file_release = nfs_release, .request_init = nfs_request_init, .request_compatible = nfs_request_compatible, .lock = nfs_proc_lock, };
/* * Write a page synchronously. * Offset is the data offset within the page. */ static int nfs_writepage_sync(struct file *file, struct inode *inode, struct page *page, unsigned int offset, unsigned int count) { struct rpc_cred *cred = NULL; loff_t base; unsigned int wsize = NFS_SERVER(inode)->wsize; int result, refresh = 0, written = 0, flags; u8 *buffer; struct nfs_fattr fattr; struct nfs_writeverf verf; if (file) cred = get_rpccred(nfs_file_cred(file)); if (!cred) cred = get_rpccred(NFS_I(inode)->mm_cred); dprintk("NFS: nfs_writepage_sync(%x/%Ld %d@%Ld)\n", inode->i_dev, (long long)NFS_FILEID(inode), count, (long long)(page_offset(page) + offset)); buffer = kmap(page) + offset; base = page_offset(page) + offset; flags = ((IS_SWAPFILE(inode)) ? NFS_RW_SWAP : 0) | NFS_RW_SYNC; do { if (count < wsize && !IS_SWAPFILE(inode)) wsize = count; result = NFS_PROTO(inode)->write(inode, cred, &fattr, flags, base, wsize, buffer, &verf); nfs_write_attributes(inode, &fattr); if (result < 0) { /* Must mark the page invalid after I/O error */ ClearPageUptodate(page); goto io_error; } if (result != wsize) printk("NFS: short write, wsize=%u, result=%d\n", wsize, result); refresh = 1; buffer += wsize; base += wsize; written += wsize; count -= wsize; /* * If we've extended the file, update the inode * now so we don't invalidate the cache. */ if (base > inode->i_size) inode->i_size = base; } while (count); if (PageError(page)) ClearPageError(page); io_error: kunmap(page); if (cred) put_rpccred(cred); return written? written : result; }
/* * Generic NLM call, async version. */ int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct rpc_message msg = { .rpc_argp = &req->a_args, .rpc_resp = &req->a_res, }; int status; dprintk("lockd: call procedure %d on %s (async)\n", (int)proc, host->h_name); /* If we have no RPC client yet, create one. */ if ((clnt = nlm_bind_host(host)) == NULL) return -ENOLCK; msg.rpc_proc = &clnt->cl_procinfo[proc]; /* bootstrap and kick off the async RPC call */ status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req); return status; } int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct nlm_args *argp = &req->a_args; struct nlm_res *resp = &req->a_res; struct file *file = argp->lock.fl.fl_file; struct rpc_message msg = { .rpc_argp = argp, .rpc_resp = resp, }; int status; dprintk("lockd: call procedure %d on %s (async)\n", (int)proc, host->h_name); /* If we have no RPC client yet, create one. */ if ((clnt = nlm_bind_host(host)) == NULL) return -ENOLCK; msg.rpc_proc = &clnt->cl_procinfo[proc]; /* bootstrap and kick off the async RPC call */ if (file) msg.rpc_cred = nfs_file_cred(file); /* Increment host refcount */ nlm_get_host(host); status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req); if (status < 0) nlm_release_host(host); return status; } /* * TEST for the presence of a conflicting lock */ static int nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) { int status; if ((status = nlmclnt_call(req, NLMPROC_TEST)) < 0) return status; status = req->a_res.status; if (status == NLM_LCK_GRANTED) { fl->fl_type = F_UNLCK; } if (status == NLM_LCK_DENIED) { /* * Report the conflicting lock back to the application. * FIXME: Is it OK to report the pid back as well? */ locks_copy_lock(fl, &req->a_res.lock.fl); /* fl->fl_pid = 0; */ } else { return nlm_stat_to_errno(req->a_res.status); } return 0; }
/* * Generic NLM call */ int nlmclnt_call(struct nlm_rqst *req, u32 proc) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct nlm_args *argp = &req->a_args; struct nlm_res *resp = &req->a_res; struct file *filp = argp->lock.fl.fl_file; struct rpc_message msg = { .rpc_argp = argp, .rpc_resp = resp, }; int status; dprintk("lockd: call procedure %d on %s\n", (int)proc, host->h_name); if (filp) msg.rpc_cred = nfs_file_cred(filp); do { if (host->h_reclaiming && !argp->reclaim) goto in_grace_period; /* If we have no RPC client yet, create one. */ if ((clnt = nlm_bind_host(host)) == NULL) return -ENOLCK; msg.rpc_proc = &clnt->cl_procinfo[proc]; /* Perform the RPC call. If an error occurs, try again */ if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) { dprintk("lockd: rpc_call returned error %d\n", -status); switch (status) { case -EPROTONOSUPPORT: status = -EINVAL; break; case -ECONNREFUSED: case -ETIMEDOUT: case -ENOTCONN: nlm_rebind_host(host); status = -EAGAIN; break; case -ERESTARTSYS: return signalled () ? -EINTR : status; default: break; } break; } else if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) { dprintk("lockd: server in grace period\n"); if (argp->reclaim) { printk(KERN_WARNING "lockd: spurious grace period reject?!\n"); return -ENOLCK; } } else { if (!argp->reclaim) { /* We appear to be out of the grace period */ wake_up_all(&host->h_gracewait); } dprintk("lockd: server returns status %d\n", resp->status); return 0; /* Okay, call complete */ } in_grace_period: /* * The server has rebooted and appears to be in the grace * period during which locks are only allowed to be * reclaimed. * We can only back off and try again later. */ status = nlm_wait_on_grace(&host->h_gracewait); } while (status == 0); return status; }