/* * Encode arguments to readdir call */ static int nfs_xdr_readdirargs(struct rpc_rqst *req, u32 *p, struct nfs_readdirargs *args) { struct rpc_task *task = req->rq_task; struct rpc_auth *auth = task->tk_auth; unsigned int replen; u32 count = args->count; /* * Some servers (e.g. HP OS 9.5) seem to expect the buffer size * to be in longwords ... check whether to convert the size. */ if (task->tk_client->cl_flags & NFS_CLNTF_BUFSIZE) count = count >> 2; p = xdr_encode_fhandle(p, args->fh); *p++ = htonl(args->cookie & 0xFFFFFFFF); *p++ = htonl(count); /* see above */ req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); /* Inline the page array */ replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readdirres_sz) << 2; xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count); return 0; }
void gssx_enc_accept_sec_context(struct rpc_rqst *req, struct xdr_stream *xdr, struct gssx_arg_accept_sec_context *arg) { int err; err = gssx_enc_call_ctx(xdr, &arg->call_ctx); if (err) goto done; /* arg->context_handle */ if (arg->context_handle) { err = gssx_enc_ctx(xdr, arg->context_handle); if (err) goto done; } else { err = gssx_enc_bool(xdr, 0); } /* arg->cred_handle */ if (arg->cred_handle) { err = gssx_enc_cred(xdr, arg->cred_handle); if (err) goto done; } else { err = gssx_enc_bool(xdr, 0); } /* arg->input_token */ err = gssx_enc_in_token(xdr, &arg->input_token); if (err) goto done; /* arg->input_cb */ if (arg->input_cb) { err = gssx_enc_cb(xdr, arg->input_cb); if (err) goto done; } else { err = gssx_enc_bool(xdr, 0); } err = gssx_enc_bool(xdr, arg->ret_deleg_cred); if (err) goto done; /* leave options empty for now, will add once we have any options * to pass up at all */ /* arg->options */ err = dummy_enc_opt_array(xdr, &arg->options); xdr_inline_pages(&req->rq_rcv_buf, PAGE_SIZE/2 /* pretty arbitrary */, arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE); done: if (err) dprintk("RPC: gssx_enc_accept_sec_context: %d\n", err); }
/* * While encoding arguments, set up the reply buffer in advance to * receive reply data directly into the page cache. */ static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages, unsigned int base, unsigned int len, unsigned int bufsize) { struct rpc_auth *auth = req->rq_cred->cr_auth; unsigned int replen; replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize; xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len); }
/* * Encode READLINK args */ static int nfs_xdr_readlinkargs(struct rpc_rqst *req, u32 *p, struct nfs_readlinkargs *args) { struct rpc_auth *auth = req->rq_task->tk_auth; unsigned int replen; p = xdr_encode_fhandle(p, args->fh); req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); /* Inline the page array */ replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readlinkres_sz) << 2; xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen); return 0; }
/* * Encode arguments to readdir call */ static int nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args) { struct rpc_auth *auth = req->rq_cred->cr_auth; unsigned int replen; u32 count = args->count; p = xdr_encode_fhandle(p, args->fh); *p++ = htonl(args->cookie); *p++ = htonl(count); /* see above */ req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); /* Inline the page array */ replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readdirres_sz) << 2; xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count); return 0; }
/* * Arguments to a READ call. Since we read data directly into the page * cache, we also set up the reply iovec here so that iov[1] points * exactly to the page we want to fetch. */ static int nfs3_xdr_readargs(struct rpc_rqst *req, u32 *p, struct nfs_readargs *args) { struct rpc_auth *auth = req->rq_task->tk_auth; unsigned int replen; u32 count = args->count; p = xdr_encode_fhandle(p, args->fh); p = xdr_encode_hyper(p, args->offset); *p++ = htonl(count); req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); /* Inline the page array */ replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readres_sz) << 2; xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, count); return 0; }
/* * Arguments to a READ call. Since we read data directly into the page * cache, we also set up the reply iovec here so that iov[1] points * exactly to the page we want to fetch. */ static int nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) { struct rpc_auth *auth = req->rq_cred->cr_auth; unsigned int replen; u32 offset = (u32)args->offset; u32 count = args->count; p = xdr_encode_fhandle(p, args->fh); *p++ = htonl(offset); *p++ = htonl(count); *p++ = htonl(count); req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); /* Inline the page array */ replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readres_sz) << 2; xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, count); req->rq_rcv_buf.flags |= XDRBUF_READ; return 0; }