/* * 3. Encode arguments of an RPC call */ static void call_encode(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; unsigned int bufsiz; kxdrproc_t encode; int status; u32 *p; dprintk("RPC: %4d call_encode (status %d)\n", task->tk_pid, task->tk_status); task->tk_action = call_transmit; /* Default buffer setup */ bufsiz = rpcproc_bufsiz(clnt, task->tk_proc)+RPC_SLACK_SPACE; req->rq_svec[0].iov_base = task->tk_buffer; req->rq_svec[0].iov_len = bufsiz; req->rq_slen = 0; req->rq_snr = 1; req->rq_rvec[0].iov_base = task->tk_buffer; req->rq_rvec[0].iov_len = bufsiz; req->rq_rlen = bufsiz; req->rq_rnr = 1; if (task->tk_proc > clnt->cl_maxproc) { printk(KERN_WARNING "%s (vers %d): bad procedure number %d\n", clnt->cl_protname, clnt->cl_vers, task->tk_proc); rpc_exit(task, -EIO); return; } /* Zero buffer so we have automatic zero-padding of opaque & string */ memset(task->tk_buffer, 0, bufsiz); /* Encode header and provided arguments */ encode = rpcproc_encode(clnt, task->tk_proc); if (!(p = call_header(task))) { printk("RPC: call_header failed, exit EIO\n"); rpc_exit(task, -EIO); } else if ((status = encode(req, p, task->tk_argp)) < 0) { printk(KERN_WARNING "%s: can't encode arguments: %d\n", clnt->cl_protname, -status); rpc_exit(task, status); } }
/* * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. * (Note: buffer memory is freed in rpc_task_release). */ static void call_allocate(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; unsigned int bufsiz; dprintk("RPC: %4d call_allocate (status %d)\n", task->tk_pid, task->tk_status); task->tk_action = call_encode; if (task->tk_buffer) return; /* FIXME: compute buffer requirements more exactly using * auth->au_wslack */ bufsiz = rpcproc_bufsiz(clnt, task->tk_proc) + RPC_SLACK_SPACE; if ((task->tk_buffer = rpc_malloc(task, bufsiz)) != NULL) return; printk("RPC: buffer allocation failed for task %p\n", task); if (!signalled()) { xprt_release(task); task->tk_action = call_reserve; rpc_delay(task, HZ); return; } rpc_exit(task, -ERESTARTSYS); }
/* * 1. Reserve an RPC call slot */ static void call_reserve(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; dprintk("RPC: %4d call_reserve\n", task->tk_pid); if (!clnt->cl_port) { printk(KERN_NOTICE "%s: couldn't bind to server %s - %s.\n", clnt->cl_protname, clnt->cl_server, clnt->cl_softrtry? "giving up" : "retrying"); if (!clnt->cl_softrtry) { rpc_delay(task, 5*HZ); return; } rpc_exit(task, -EIO); return; } if (!rpcauth_uptodatecred(task)) { task->tk_action = call_refresh; return; } task->tk_action = call_reserveresult; task->tk_timeout = clnt->cl_timeout.to_resrvval; task->tk_status = 0; clnt->cl_stats->rpccnt++; xprt_reserve(task); }
/** * nfs_pgio_prepare - Prepare pageio hdr to go over the wire * @task: The current task * @calldata: pageio header to prepare */ static void nfs_pgio_prepare(struct rpc_task *task, void *calldata) { struct nfs_pgio_header *hdr = calldata; int err; err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr); if (err) rpc_exit(task, err); }
/* * 6a. Handle RPC timeout * We do not release the request slot, so we keep using the * same XID for all retransmits. */ static void call_timeout(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; if (req) { struct rpc_timeout *to = &req->rq_timeout; if (xprt_adjust_timeout(to)) { dprintk("RPC: %4d call_timeout (minor timeo)\n", task->tk_pid); goto minor_timeout; } to->to_initval <<= 1; if (to->to_initval > to->to_maxval) to->to_initval = to->to_maxval; } dprintk("RPC: %4d call_timeout (major timeo)\n", task->tk_pid); if (clnt->cl_softrtry) { if (clnt->cl_chatty && !task->tk_exit) printk("%s: server %s not responding, timed out\n", clnt->cl_protname, clnt->cl_server); rpc_exit(task, -EIO); return; } if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) { task->tk_flags |= RPC_CALL_MAJORSEEN; if (req) printk("%s: server %s not responding, still trying\n", clnt->cl_protname, clnt->cl_server); else printk("%s: task %d can't get a request slot\n", clnt->cl_protname, task->tk_pid); } if (clnt->cl_autobind) clnt->cl_port = 0; minor_timeout: if (!clnt->cl_port) { task->tk_action = call_bind; } else if (!req) { task->tk_action = call_reserve; } else if (req->rq_damaged) { task->tk_action = call_encode; clnt->cl_stats->rpcretrans++; } else { task->tk_action = call_transmit; clnt->cl_stats->rpcretrans++; } task->tk_status = 0; }
/* * 1b. Grok the result of xprt_reserve() */ static void call_reserveresult(struct rpc_task *task) { int status = task->tk_status; dprintk("RPC: %4d call_reserveresult (status %d)\n", task->tk_pid, task->tk_status); /* * After a call to xprt_reserve(), we must have either * a request slot or else an error status. */ if ((task->tk_status >= 0 && !task->tk_rqstp) || (task->tk_status < 0 && task->tk_rqstp)) printk(KERN_ERR "call_reserveresult: status=%d, request=%p??\n", task->tk_status, task->tk_rqstp); if (task->tk_status >= 0) { task->tk_action = call_allocate; return; } task->tk_status = 0; switch (status) { case -EAGAIN: case -ENOBUFS: task->tk_timeout = task->tk_client->cl_timeout.to_resrvval; task->tk_action = call_reserve; break; case -ETIMEDOUT: dprintk("RPC: task timed out\n"); task->tk_action = call_timeout; break; default: if (!task->tk_rqstp) { printk(KERN_INFO "RPC: task has no request, exit EIO\n"); rpc_exit(task, -EIO); } else rpc_exit(task, status); } }
/* * 7. Decode the RPC reply */ static void call_decode(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; kxdrproc_t decode = rpcproc_decode(clnt, task->tk_proc); u32 *p; dprintk("RPC: %4d call_decode (status %d)\n", task->tk_pid, task->tk_status); if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) { printk("%s: server %s OK\n", clnt->cl_protname, clnt->cl_server); task->tk_flags &= ~RPC_CALL_MAJORSEEN; } if (task->tk_status < 12) { printk("%s: too small RPC reply size (%d bytes)\n", clnt->cl_protname, task->tk_status); rpc_exit(task, -EIO); return; } /* Verify the RPC header */ if (!(p = call_verify(task))) return; /* * The following is an NFS-specific hack to cater for setuid * processes whose uid is mapped to nobody on the server. */ if (task->tk_client->cl_prog == 100003 && (ntohl(*p) == NFSERR_ACCES || ntohl(*p) == NFSERR_PERM)) { if (RPC_IS_SETUID(task) && (task->tk_suid_retry)--) { dprintk("RPC: %4d retry squashed uid\n", task->tk_pid); task->tk_flags ^= RPC_CALL_REALUID; task->tk_action = call_encode; return; } } task->tk_action = NULL; task->tk_status = decode(req, p, task->tk_resp); dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, task->tk_status); }
/* * Kill all tasks for the given client. * XXX: kill their descendants as well? */ void rpc_killall_tasks(struct rpc_clnt *clnt) { struct rpc_task **q, *rovr; dprintk("RPC: killing all tasks for client %p\n", clnt); /* N.B. Why bother to inhibit? Nothing blocks here ... */ rpc_inhibit++; for (q = &all_tasks; (rovr = *q); q = &rovr->tk_next_task) { if (!clnt || rovr->tk_client == clnt) { rovr->tk_flags |= RPC_TASK_KILLED; rpc_exit(rovr, -EIO); rpc_wake_up_task(rovr); } } rpc_inhibit--; }
void nfs_reqlist_exit(struct nfs_server *server) { struct nfs_reqlist *cache; lock_kernel(); cache = server->rw_requests; if (!cache) goto out; dprintk("NFS: reqlist_exit (ptr %p rpc %p)\n", cache, cache->task); while (cache->task) { rpc_exit(cache->task, 0); rpc_wake_up_task(cache->task); interruptible_sleep_on_timeout(&cache->request_wait, 1 * HZ); } out: unlock_kernel(); }
/* * 1b. Grok the result of xprt_reserve() */ static void call_reserveresult(struct rpc_task *task) { dprintk("RPC: %4d call_reserveresult (status %d)\n", task->tk_pid, task->tk_status); /* * After a call to xprt_reserve(), we must have either * a request slot or else an error status. */ if ((task->tk_status >= 0 && !task->tk_rqstp) || (task->tk_status < 0 && task->tk_rqstp)) printk("call_reserveresult: status=%d, request=%p??\n", task->tk_status, task->tk_rqstp); if (task->tk_status >= 0) { task->tk_action = call_allocate; goto out; } else if (task->tk_status == -EAGAIN) { task->tk_timeout = task->tk_client->cl_timeout.to_resrvval; task->tk_status = 0; xprt_reserve(task); goto out; } else if (task->tk_status == -ETIMEDOUT) { dprintk("RPC: task timed out\n"); task->tk_action = call_timeout; goto out; } else { task->tk_action = NULL; } if (!task->tk_rqstp) { printk("RPC: task has no request, exit EIO\n"); rpc_exit(task, -EIO); } out: return; }
/* * 1. Reserve an RPC call slot */ static void call_reserve(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; if (task->tk_msg.rpc_proc > clnt->cl_maxproc) { printk(KERN_WARNING "%s (vers %d): bad procedure number %d\n", clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc); rpc_exit(task, -EIO); return; } dprintk("RPC: %4d call_reserve\n", task->tk_pid); if (!rpcauth_uptodatecred(task)) { task->tk_action = call_refresh; return; } task->tk_status = 0; task->tk_action = call_reserveresult; task->tk_timeout = clnt->cl_timeout.to_resrvval; clnt->cl_stats->rpccnt++; xprt_reserve(task); }
/* * Obtain the port for a given RPC service on a given host. This one can * be called for an ongoing RPC request. */ void rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt) { struct rpc_portmap *map = clnt->cl_pmap; struct sockaddr_in *sap = &clnt->cl_xprt->addr; struct rpc_message msg = { .rpc_proc = &pmap_procedures[PMAP_GETPORT], .rpc_argp = map, .rpc_resp = &clnt->cl_port, .rpc_cred = NULL }; struct rpc_clnt *pmap_clnt; struct rpc_task *child; dprintk("RPC: %4d rpc_getport(%s, %d, %d, %d)\n", task->tk_pid, clnt->cl_server, map->pm_prog, map->pm_vers, map->pm_prot); /* Autobind on cloned rpc clients is discouraged */ BUG_ON(clnt->cl_parent != clnt); spin_lock(&pmap_lock); if (map->pm_binding) { rpc_sleep_on(&map->pm_bindwait, task, NULL, NULL); spin_unlock(&pmap_lock); return; } map->pm_binding = 1; spin_unlock(&pmap_lock); pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot, 0); if (IS_ERR(pmap_clnt)) { task->tk_status = PTR_ERR(pmap_clnt); goto bailout; } task->tk_status = 0; /* * Note: rpc_new_child will release client after a failure. */ if (!(child = rpc_new_child(pmap_clnt, task))) goto bailout; /* Setup the call info struct */ rpc_call_setup(child, &msg, 0); /* ... and run the child task */ rpc_run_child(task, child, pmap_getport_done); return; bailout: spin_lock(&pmap_lock); map->pm_binding = 0; rpc_wake_up(&map->pm_bindwait); spin_unlock(&pmap_lock); rpc_exit(task, -EIO); } #ifdef CONFIG_ROOT_NFS int rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) { struct rpc_portmap map = { .pm_prog = prog, .pm_vers = vers, .pm_prot = prot, .pm_port = 0 }; struct rpc_clnt *pmap_clnt; char hostname[32]; int status; dprintk("RPC: rpc_getport_external(%u.%u.%u.%u, %d, %d, %d)\n", NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); pmap_clnt = pmap_create(hostname, sin, prot, 0); if (IS_ERR(pmap_clnt)) return PTR_ERR(pmap_clnt); /* Setup the call info struct */ status = rpc_call(pmap_clnt, PMAP_GETPORT, &map, &map.pm_port, 0); if (status >= 0) { if (map.pm_port != 0) return map.pm_port; status = -EACCES; } return status; } #endif static void pmap_getport_done(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_xprt *xprt = task->tk_xprt; struct rpc_portmap *map = clnt->cl_pmap; dprintk("RPC: %4d pmap_getport_done(status %d, port %d)\n", task->tk_pid, task->tk_status, clnt->cl_port); xprt->ops->set_port(xprt, 0); if (task->tk_status < 0) { /* Make the calling task exit with an error */ task->tk_action = rpc_exit_task; } else if (clnt->cl_port == 0) { /* Program not registered */ rpc_exit(task, -EACCES); } else { xprt->ops->set_port(xprt, clnt->cl_port); clnt->cl_port = htons(clnt->cl_port); } spin_lock(&pmap_lock); map->pm_binding = 0; rpc_wake_up(&map->pm_bindwait); spin_unlock(&pmap_lock); } /* * Set or unset a port registration with the local portmapper. * port == 0 means unregister, port != 0 means register. */ int rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) { struct sockaddr_in sin; struct rpc_portmap map; struct rpc_clnt *pmap_clnt; int error = 0; dprintk("RPC: registering (%d, %d, %d, %d) with portmapper.\n", prog, vers, prot, port); sin.sin_family = AF_INET; sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); if (IS_ERR(pmap_clnt)) { error = PTR_ERR(pmap_clnt); dprintk("RPC: couldn't create pmap client. Error = %d\n", error); return error; } map.pm_prog = prog; map.pm_vers = vers; map.pm_prot = prot; map.pm_port = port; error = rpc_call(pmap_clnt, port? PMAP_SET : PMAP_UNSET, &map, okay, 0); if (error < 0) { printk(KERN_WARNING "RPC: failed to contact portmap (errno %d).\n", error); } dprintk("RPC: registration status %d/%d\n", error, *okay); /* Client deleted automatically because cl_oneshot == 1 */ return error; } static struct rpc_clnt * pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged) { struct rpc_xprt *xprt; struct rpc_clnt *clnt; /* printk("pmap: create xprt\n"); */ xprt = xprt_create_proto(proto, srvaddr, NULL); if (IS_ERR(xprt)) return (struct rpc_clnt *)xprt; xprt->ops->set_port(xprt, RPC_PMAP_PORT); if (!privileged) xprt->resvport = 0; /* printk("pmap: create clnt\n"); */ clnt = rpc_new_client(xprt, hostname, &pmap_program, RPC_PMAP_VERSION, RPC_AUTH_UNIX); if (!IS_ERR(clnt)) { clnt->cl_softrtry = 1; clnt->cl_oneshot = 1; } return clnt; } /* * XDR encode/decode functions for PMAP */ static int xdr_encode_mapping(struct rpc_rqst *req, u32 *p, struct rpc_portmap *map) { dprintk("RPC: xdr_encode_mapping(%d, %d, %d, %d)\n", map->pm_prog, map->pm_vers, map->pm_prot, map->pm_port); *p++ = htonl(map->pm_prog); *p++ = htonl(map->pm_vers); *p++ = htonl(map->pm_prot); *p++ = htonl(map->pm_port); req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); return 0; } static int xdr_decode_port(struct rpc_rqst *req, u32 *p, unsigned short *portp) { *portp = (unsigned short) ntohl(*p++); return 0; }
/* * This is the RPC `scheduler' (or rather, the finite state machine). */ static int __rpc_execute(struct rpc_task *task) { unsigned long oldflags; int status = 0; dprintk("RPC: %4d rpc_execute flgs %x\n", task->tk_pid, task->tk_flags); if (!RPC_IS_RUNNING(task)) { printk(KERN_WARNING "RPC: rpc_execute called for sleeping task!!\n"); return 0; } while (1) { /* * Execute any pending callback. */ if (task->tk_flags & RPC_TASK_CALLBACK) { /* Define a callback save pointer */ void (*save_callback)(struct rpc_task *); task->tk_flags &= ~RPC_TASK_CALLBACK; /* * If a callback exists, save it, reset it, * call it. * The save is needed to stop from resetting * another callback set within the callback handler * - Dave */ if (task->tk_callback) { save_callback=task->tk_callback; task->tk_callback=NULL; save_callback(task); } } /* * No handler for next step means exit. */ if (!task->tk_action) break; /* * Perform the next FSM step. * tk_action may be NULL when the task has been killed * by someone else. */ if (RPC_IS_RUNNING(task) && task->tk_action) task->tk_action(task); /* * Check whether task is sleeping. * Note that if the task may go to sleep in tk_action, * and the RPC reply arrives before we get here, it will * have state RUNNING, but will still be on schedq. */ save_flags(oldflags); cli(); if (RPC_IS_RUNNING(task)) { if (task->tk_rpcwait == &schedq) rpc_remove_wait_queue(task); } else while (!RPC_IS_RUNNING(task)) { if (RPC_IS_ASYNC(task)) { restore_flags(oldflags); return 0; } /* sync task: sleep here */ dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); if (current->pid == rpciod_pid) printk(KERN_ERR "RPC: rpciod waiting on sync task!\n"); sleep_on(&task->tk_wait); /* * When the task received a signal, remove from * any queues etc, and make runnable again. */ if (signalled()) __rpc_wake_up(task); dprintk("RPC: %4d sync task resuming\n", task->tk_pid); } restore_flags(oldflags); /* * When a sync task receives a signal, it exits with * -ERESTARTSYS. In order to catch any callbacks that * clean up after sleeping on some queue, we don't * break the loop here, but go around once more. */ if (!RPC_IS_ASYNC(task) && signalled()) { dprintk("RPC: %4d got signal\n", task->tk_pid); rpc_exit(task, -ERESTARTSYS); } } dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status); if (task->tk_exit) { status = task->tk_status; task->tk_exit(task); } return status; }
void vcos_llthread_exit(void) { /* vcos_llthread_delete might have been called by this point! */ vcos_assert(rtos_get_cpu_number() == 1); rpc_exit(0); }
/* * Reply header verification */ static u32 * call_verify(struct rpc_task *task) { u32 *p = task->tk_buffer, n; p += 1; /* skip XID */ if ((n = ntohl(*p++)) != RPC_REPLY) { printk("call_verify: not an RPC reply: %x\n", n); goto garbage; } if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { int error = -EACCES; if ((n = ntohl(*p++)) != RPC_AUTH_ERROR) { printk("call_verify: RPC call rejected: %x\n", n); } else switch ((n = ntohl(*p++))) { case RPC_AUTH_REJECTEDCRED: case RPC_AUTH_REJECTEDVERF: if (!task->tk_cred_retry--) break; dprintk("RPC: %4d call_verify: retry stale creds\n", task->tk_pid); rpcauth_invalcred(task); task->tk_action = call_refresh; return NULL; case RPC_AUTH_BADCRED: case RPC_AUTH_BADVERF: /* possibly garbled cred/verf? */ if (!task->tk_garb_retry--) break; dprintk("RPC: %4d call_verify: retry garbled creds\n", task->tk_pid); task->tk_action = call_encode; return NULL; case RPC_AUTH_TOOWEAK: printk("call_verify: server requires stronger " "authentication.\n"); default: printk("call_verify: unknown auth error: %x\n", n); error = -EIO; } dprintk("RPC: %4d call_verify: call rejected %d\n", task->tk_pid, n); rpc_exit(task, error); return NULL; } if (!(p = rpcauth_checkverf(task, p))) { printk("call_verify: auth check failed\n"); goto garbage; /* bad verifier, retry */ } switch ((n = ntohl(*p++))) { case RPC_SUCCESS: return p; case RPC_GARBAGE_ARGS: break; /* retry */ default: printk("call_verify: server accept status: %x\n", n); /* Also retry */ } garbage: dprintk("RPC: %4d call_verify: server saw garbage\n", task->tk_pid); task->tk_client->cl_stats->rpcgarbage++; if (task->tk_garb_retry--) { printk("RPC: garbage, retrying %4d\n", task->tk_pid); task->tk_action = call_encode; return NULL; } printk("RPC: garbage, exit EIO\n"); rpc_exit(task, -EIO); return NULL; }
static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, struct nfs_lock_context *lock, loff_t offset, loff_t len) { struct inode *inode = file_inode(filep); struct nfs_server *server = NFS_SERVER(inode); struct nfs42_falloc_args args = { .falloc_fh = NFS_FH(inode), .falloc_offset = offset, .falloc_length = len, .falloc_bitmask = server->cache_consistency_bitmask, }; struct nfs42_falloc_res res = { .falloc_server = server, }; int status; msg->rpc_argp = &args; msg->rpc_resp = &res; status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context, lock, FMODE_WRITE); if (status) return status; res.falloc_fattr = nfs_alloc_fattr(); if (!res.falloc_fattr) return -ENOMEM; status = nfs4_call_sync(server->client, server, msg, &args.seq_args, &res.seq_res, 0); if (status == 0) status = nfs_post_op_update_inode(inode, res.falloc_fattr); kfree(res.falloc_fattr); return status; } static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, loff_t offset, loff_t len) { struct nfs_server *server = NFS_SERVER(file_inode(filep)); struct nfs4_exception exception = { }; struct nfs_lock_context *lock; int err; lock = nfs_get_lock_context(nfs_file_open_context(filep)); if (IS_ERR(lock)) return PTR_ERR(lock); exception.inode = file_inode(filep); exception.state = lock->open_context->state; do { err = _nfs42_proc_fallocate(msg, filep, lock, offset, len); if (err == -ENOTSUPP) { err = -EOPNOTSUPP; break; } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); nfs_put_lock_context(lock); return err; } int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE], }; struct inode *inode = file_inode(filep); int err; if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) return -EOPNOTSUPP; inode_lock(inode); err = nfs42_proc_fallocate(&msg, filep, offset, len); if (err == -EOPNOTSUPP) NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE; inode_unlock(inode); return err; } int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE], }; struct inode *inode = file_inode(filep); int err; if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE)) return -EOPNOTSUPP; inode_lock(inode); err = nfs_sync_inode(inode); if (err) goto out_unlock; err = nfs42_proc_fallocate(&msg, filep, offset, len); if (err == 0) truncate_pagecache_range(inode, offset, (offset + len) -1); if (err == -EOPNOTSUPP) NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE; out_unlock: inode_unlock(inode); return err; } static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src, struct nfs_lock_context *src_lock, struct file *dst, loff_t pos_dst, struct nfs_lock_context *dst_lock, size_t count) { struct nfs42_copy_args args = { .src_fh = NFS_FH(file_inode(src)), .src_pos = pos_src, .dst_fh = NFS_FH(file_inode(dst)), .dst_pos = pos_dst, .count = count, }; struct nfs42_copy_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], .rpc_argp = &args, .rpc_resp = &res, }; struct inode *dst_inode = file_inode(dst); struct nfs_server *server = NFS_SERVER(dst_inode); int status; status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, src_lock, FMODE_READ); if (status) return status; status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping, pos_src, pos_src + (loff_t)count - 1); if (status) return status; status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, dst_lock, FMODE_WRITE); if (status) return status; status = nfs_sync_inode(dst_inode); if (status) return status; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); if (status == -ENOTSUPP) server->caps &= ~NFS_CAP_COPY; if (status) return status; if (res.write_res.verifier.committed != NFS_FILE_SYNC) { status = nfs_commit_file(dst, &res.write_res.verifier.verifier); if (status) return status; } truncate_pagecache_range(dst_inode, pos_dst, pos_dst + res.write_res.count); return res.write_res.count; } ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, struct file *dst, loff_t pos_dst, size_t count) { struct nfs_server *server = NFS_SERVER(file_inode(dst)); struct nfs_lock_context *src_lock; struct nfs_lock_context *dst_lock; struct nfs4_exception src_exception = { }; struct nfs4_exception dst_exception = { }; ssize_t err, err2; if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY)) return -EOPNOTSUPP; src_lock = nfs_get_lock_context(nfs_file_open_context(src)); if (IS_ERR(src_lock)) return PTR_ERR(src_lock); src_exception.inode = file_inode(src); src_exception.state = src_lock->open_context->state; dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); if (IS_ERR(dst_lock)) { err = PTR_ERR(dst_lock); goto out_put_src_lock; } dst_exception.inode = file_inode(dst); dst_exception.state = dst_lock->open_context->state; do { inode_lock(file_inode(dst)); err = _nfs42_proc_copy(src, pos_src, src_lock, dst, pos_dst, dst_lock, count); inode_unlock(file_inode(dst)); if (err == -ENOTSUPP) { err = -EOPNOTSUPP; break; } err2 = nfs4_handle_exception(server, err, &src_exception); err = nfs4_handle_exception(server, err, &dst_exception); if (!err) err = err2; } while (src_exception.retry || dst_exception.retry); nfs_put_lock_context(dst_lock); out_put_src_lock: nfs_put_lock_context(src_lock); return err; } static loff_t _nfs42_proc_llseek(struct file *filep, struct nfs_lock_context *lock, loff_t offset, int whence) { struct inode *inode = file_inode(filep); struct nfs42_seek_args args = { .sa_fh = NFS_FH(inode), .sa_offset = offset, .sa_what = (whence == SEEK_HOLE) ? NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA, }; struct nfs42_seek_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK], .rpc_argp = &args, .rpc_resp = &res, }; struct nfs_server *server = NFS_SERVER(inode); int status; if (!nfs_server_capable(inode, NFS_CAP_SEEK)) return -ENOTSUPP; status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context, lock, FMODE_READ); if (status) return status; status = nfs_filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); if (status) return status; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); if (status == -ENOTSUPP) server->caps &= ~NFS_CAP_SEEK; if (status) return status; return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); } loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) { struct nfs_server *server = NFS_SERVER(file_inode(filep)); struct nfs4_exception exception = { }; struct nfs_lock_context *lock; loff_t err; lock = nfs_get_lock_context(nfs_file_open_context(filep)); if (IS_ERR(lock)) return PTR_ERR(lock); exception.inode = file_inode(filep); exception.state = lock->open_context->state; do { err = _nfs42_proc_llseek(filep, lock, offset, whence); if (err >= 0) break; if (err == -ENOTSUPP) { err = -EOPNOTSUPP; break; } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); nfs_put_lock_context(lock); return err; } static void nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) { struct nfs42_layoutstat_data *data = calldata; struct inode *inode = data->inode; struct nfs_server *server = NFS_SERVER(inode); struct pnfs_layout_hdr *lo; spin_lock(&inode->i_lock); lo = NFS_I(inode)->layout; if (!pnfs_layout_is_valid(lo)) { spin_unlock(&inode->i_lock); rpc_exit(task, 0); return; } nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); spin_unlock(&inode->i_lock); nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args, &data->res.seq_res, task); } static void nfs42_layoutstat_done(struct rpc_task *task, void *calldata) { struct nfs42_layoutstat_data *data = calldata; struct inode *inode = data->inode; struct pnfs_layout_hdr *lo; if (!nfs4_sequence_done(task, &data->res.seq_res)) return; switch (task->tk_status) { case 0: break; case -NFS4ERR_EXPIRED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_BAD_STATEID: spin_lock(&inode->i_lock); lo = NFS_I(inode)->layout; if (pnfs_layout_is_valid(lo) && nfs4_stateid_match(&data->args.stateid, &lo->plh_stateid)) { LIST_HEAD(head); /* * Mark the bad layout state as invalid, then retry * with the current stateid. */ pnfs_mark_layout_stateid_invalid(lo, &head); spin_unlock(&inode->i_lock); pnfs_free_lseg_list(&head); } else spin_unlock(&inode->i_lock); break; case -NFS4ERR_OLD_STATEID: spin_lock(&inode->i_lock); lo = NFS_I(inode)->layout; if (pnfs_layout_is_valid(lo) && nfs4_stateid_match_other(&data->args.stateid, &lo->plh_stateid)) { /* Do we need to delay before resending? */ if (!nfs4_stateid_is_newer(&lo->plh_stateid, &data->args.stateid)) rpc_delay(task, HZ); rpc_restart_call_prepare(task); } spin_unlock(&inode->i_lock); break; case -ENOTSUPP: case -EOPNOTSUPP: NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; } dprintk("%s server returns %d\n", __func__, task->tk_status); } static void nfs42_layoutstat_release(void *calldata) { struct nfs42_layoutstat_data *data = calldata; struct nfs_server *nfss = NFS_SERVER(data->args.inode); if (nfss->pnfs_curr_ld->cleanup_layoutstats) nfss->pnfs_curr_ld->cleanup_layoutstats(data); pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout); smp_mb__before_atomic(); clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags); smp_mb__after_atomic(); nfs_iput_and_deactive(data->inode); kfree(data->args.devinfo); kfree(data); } static const struct rpc_call_ops nfs42_layoutstat_ops = { .rpc_call_prepare = nfs42_layoutstat_prepare, .rpc_call_done = nfs42_layoutstat_done, .rpc_release = nfs42_layoutstat_release, }; int nfs42_proc_layoutstats_generic(struct nfs_server *server, struct nfs42_layoutstat_data *data) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS], .rpc_argp = &data->args, .rpc_resp = &data->res, }; struct rpc_task_setup task_setup = { .rpc_client = server->client, .rpc_message = &msg, .callback_ops = &nfs42_layoutstat_ops, .callback_data = data, .flags = RPC_TASK_ASYNC, }; struct rpc_task *task; data->inode = nfs_igrab_and_active(data->args.inode); if (!data->inode) { nfs42_layoutstat_release(data); return -EAGAIN; } nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); task = rpc_run_task(&task_setup); if (IS_ERR(task)) return PTR_ERR(task); rpc_put_task(task); return 0; } static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, struct file *dst_f, struct nfs_lock_context *src_lock, struct nfs_lock_context *dst_lock, loff_t src_offset, loff_t dst_offset, loff_t count) { struct inode *src_inode = file_inode(src_f); struct inode *dst_inode = file_inode(dst_f); struct nfs_server *server = NFS_SERVER(dst_inode); struct nfs42_clone_args args = { .src_fh = NFS_FH(src_inode), .dst_fh = NFS_FH(dst_inode), .src_offset = src_offset, .dst_offset = dst_offset, .count = count, .dst_bitmask = server->cache_consistency_bitmask, }; struct nfs42_clone_res res = { .server = server, }; int status; msg->rpc_argp = &args; msg->rpc_resp = &res; status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, src_lock, FMODE_READ); if (status) return status; status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, dst_lock, FMODE_WRITE); if (status) return status; res.dst_fattr = nfs_alloc_fattr(); if (!res.dst_fattr) return -ENOMEM; status = nfs4_call_sync(server->client, server, msg, &args.seq_args, &res.seq_res, 0); if (status == 0) status = nfs_post_op_update_inode(dst_inode, res.dst_fattr); kfree(res.dst_fattr); return status; } int nfs42_proc_clone(struct file *src_f, struct file *dst_f, loff_t src_offset, loff_t dst_offset, loff_t count) { struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE], }; struct inode *inode = file_inode(src_f); struct nfs_server *server = NFS_SERVER(file_inode(src_f)); struct nfs_lock_context *src_lock; struct nfs_lock_context *dst_lock; struct nfs4_exception src_exception = { }; struct nfs4_exception dst_exception = { }; int err, err2; if (!nfs_server_capable(inode, NFS_CAP_CLONE)) return -EOPNOTSUPP; src_lock = nfs_get_lock_context(nfs_file_open_context(src_f)); if (IS_ERR(src_lock)) return PTR_ERR(src_lock); src_exception.inode = file_inode(src_f); src_exception.state = src_lock->open_context->state; dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f)); if (IS_ERR(dst_lock)) { err = PTR_ERR(dst_lock); goto out_put_src_lock; } dst_exception.inode = file_inode(dst_f); dst_exception.state = dst_lock->open_context->state; do { err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock, src_offset, dst_offset, count); if (err == -ENOTSUPP || err == -EOPNOTSUPP) { NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE; err = -EOPNOTSUPP; break; } err2 = nfs4_handle_exception(server, err, &src_exception); err = nfs4_handle_exception(server, err, &dst_exception); if (!err) err = err2; } while (src_exception.retry || dst_exception.retry); nfs_put_lock_context(dst_lock); out_put_src_lock: nfs_put_lock_context(src_lock); return err; }
int main (int argc, char *argv[]) { if (rpc_test_get_connection_path) g_connection_path = rpc_test_get_connection_path (); else g_connection_path = NPW_CONNECTION_PATH "/Test.RPC"; #ifdef BUILD_CLIENT gchar **child_args; if (argc < 2) g_error ("no server program provided on command line"); signal (SIGSEGV, urgent_exit_sig); signal (SIGBUS, urgent_exit_sig); signal (SIGINT, urgent_exit_sig); signal (SIGABRT, urgent_exit_sig); if ((child_args = clone_args (argv)) == NULL) g_error ("could not create server program arguments\n"); g_free (child_args[0]); child_args[0] = g_strdup (argv[1]); if (!g_spawn_async (NULL, child_args, NULL, G_SPAWN_DO_NOT_REAP_CHILD, NULL, NULL, &g_child_pid, NULL)) g_error ("could not start server program '%s'", child_args[0]); g_strfreev (child_args); if ((g_connection = rpc_init_client (g_connection_path)) == NULL) g_error ("failed to initialize RPC client connection"); #endif #ifdef BUILD_SERVER if ((g_connection = rpc_init_server (g_connection_path)) == NULL) g_error ("failed to initialize RPC server connection"); #endif int fd = -1; GSource *rpc_source = NULL; guint rpc_source_id = 0; GPollFD rpc_event_poll_fd; #ifdef BUILD_CLIENT fd = rpc_socket (g_connection); #endif #ifdef BUILD_SERVER fd = rpc_listen_socket (g_connection); #endif RPC_TEST_ENSURE (fd >= 0); if ((rpc_source = g_source_new (&rpc_event_funcs, sizeof (GSource))) == NULL) g_error ("failed to initialize RPC source"); rpc_source_id = g_source_attach (rpc_source, NULL); memset (&rpc_event_poll_fd, 0, sizeof (rpc_event_poll_fd)); rpc_event_poll_fd.fd = fd; rpc_event_poll_fd.events = G_IO_IN; rpc_event_poll_fd.revents = 0; g_source_add_poll (rpc_source, &rpc_event_poll_fd); static const rpc_method_descriptor_t vtable[] = { { RPC_TEST_METHOD_EXIT, handle_rpc_test_exit } }; if (rpc_connection_add_method_descriptor (g_connection, &vtable[0]) < 0) g_error ("could not add method descriptor for TEST_RPC_METHOD_EXIT"); g_main_loop = g_main_loop_new (NULL, TRUE); #ifdef BUILD_CLIENT g_child_watch_id = g_child_watch_add (g_child_pid, child_exited_cb, NULL); #endif rpc_test_init_invoke (argv); g_main_loop_run (g_main_loop); if (rpc_source_id) g_source_remove (rpc_source_id); if (g_connection) rpc_exit (g_connection); return g_exit_status; }